Wed, 10 Apr 2019 11:38:47 +0200
8221355: Performance regression after JDK-8155635 backport into 8u
Reviewed-by: shade, roland
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_COMPILE_HPP
26 #define SHARE_VM_OPTO_COMPILE_HPP
28 #include "asm/codeBuffer.hpp"
29 #include "ci/compilerInterface.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "code/exceptionHandlerTable.hpp"
32 #include "compiler/compilerOracle.hpp"
33 #include "compiler/compileBroker.hpp"
34 #include "libadt/dict.hpp"
35 #include "libadt/port.hpp"
36 #include "libadt/vectset.hpp"
37 #include "memory/resourceArea.hpp"
38 #include "opto/idealGraphPrinter.hpp"
39 #include "opto/phasetype.hpp"
40 #include "opto/phase.hpp"
41 #include "opto/regmask.hpp"
42 #include "runtime/deoptimization.hpp"
43 #include "runtime/vmThread.hpp"
44 #include "trace/tracing.hpp"
45 #include "utilities/ticks.hpp"
47 class Block;
48 class Bundle;
49 class C2Compiler;
50 class CallGenerator;
51 class ConnectionGraph;
52 class InlineTree;
53 class Int_Array;
54 class Matcher;
55 class MachConstantNode;
56 class MachConstantBaseNode;
57 class MachNode;
58 class MachOper;
59 class MachSafePointNode;
60 class Node;
61 class Node_Array;
62 class Node_Notes;
63 class OptoReg;
64 class PhaseCFG;
65 class PhaseGVN;
66 class PhaseIterGVN;
67 class PhaseRegAlloc;
68 class PhaseCCP;
69 class PhaseCCP_DCE;
70 class RootNode;
71 class relocInfo;
72 class Scope;
73 class StartNode;
74 class SafePointNode;
75 class JVMState;
76 class Type;
77 class TypeData;
78 class TypeInt;
79 class TypePtr;
80 class TypeOopPtr;
81 class TypeFunc;
82 class Unique_Node_List;
83 class nmethod;
84 class WarmCallInfo;
85 class Node_Stack;
86 struct Final_Reshape_Counts;
88 //------------------------------Compile----------------------------------------
89 // This class defines a top-level Compiler invocation.
91 class Compile : public Phase {
92 friend class VMStructs;
94 public:
95 // Fixed alias indexes. (See also MergeMemNode.)
96 enum {
97 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
98 AliasIdxBot = 2, // pseudo-index, aliases to everything
99 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
100 };
102 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler);
103 // Integrated with logging. If logging is turned on, and dolog is true,
104 // then brackets are put into the log, with time stamps and node counts.
105 // (The time collection itself is always conditionalized on TimeCompiler.)
106 class TracePhase : public TraceTime {
107 private:
108 Compile* C;
109 CompileLog* _log;
110 const char* _phase_name;
111 bool _dolog;
112 public:
113 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog);
114 ~TracePhase();
115 };
117 // Information per category of alias (memory slice)
118 class AliasType {
119 private:
120 friend class Compile;
122 int _index; // unique index, used with MergeMemNode
123 const TypePtr* _adr_type; // normalized address type
124 ciField* _field; // relevant instance field, or null if none
125 const Type* _element; // relevant array element type, or null if none
126 bool _is_rewritable; // false if the memory is write-once only
127 int _general_index; // if this is type is an instance, the general
128 // type that this is an instance of
130 void Init(int i, const TypePtr* at);
132 public:
133 int index() const { return _index; }
134 const TypePtr* adr_type() const { return _adr_type; }
135 ciField* field() const { return _field; }
136 const Type* element() const { return _element; }
137 bool is_rewritable() const { return _is_rewritable; }
138 bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
139 int general_index() const { return (_general_index != 0) ? _general_index : _index; }
141 void set_rewritable(bool z) { _is_rewritable = z; }
142 void set_field(ciField* f) {
143 assert(!_field,"");
144 _field = f;
145 if (f->is_final() || f->is_stable()) {
146 // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops.
147 _is_rewritable = false;
148 }
149 }
150 void set_element(const Type* e) {
151 assert(_element == NULL, "");
152 _element = e;
153 }
155 BasicType basic_type() const;
157 void print_on(outputStream* st) PRODUCT_RETURN;
158 };
160 enum {
161 logAliasCacheSize = 6,
162 AliasCacheSize = (1<<logAliasCacheSize)
163 };
164 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
165 enum {
166 trapHistLength = MethodData::_trap_hist_limit
167 };
169 // Constant entry of the constant table.
170 class Constant {
171 private:
172 BasicType _type;
173 union {
174 jvalue _value;
175 Metadata* _metadata;
176 } _v;
177 int _offset; // offset of this constant (in bytes) relative to the constant table base.
178 float _freq;
179 bool _can_be_reused; // true (default) if the value can be shared with other users.
181 public:
182 Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; }
183 Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) :
184 _type(type),
185 _offset(-1),
186 _freq(freq),
187 _can_be_reused(can_be_reused)
188 {
189 assert(type != T_METADATA, "wrong constructor");
190 _v._value = value;
191 }
192 Constant(Metadata* metadata, bool can_be_reused = true) :
193 _type(T_METADATA),
194 _offset(-1),
195 _freq(0.0f),
196 _can_be_reused(can_be_reused)
197 {
198 _v._metadata = metadata;
199 }
201 bool operator==(const Constant& other);
203 BasicType type() const { return _type; }
205 jlong get_jlong() const { return _v._value.j; }
206 jfloat get_jfloat() const { return _v._value.f; }
207 jdouble get_jdouble() const { return _v._value.d; }
208 jobject get_jobject() const { return _v._value.l; }
210 Metadata* get_metadata() const { return _v._metadata; }
212 int offset() const { return _offset; }
213 void set_offset(int offset) { _offset = offset; }
215 float freq() const { return _freq; }
216 void inc_freq(float freq) { _freq += freq; }
218 bool can_be_reused() const { return _can_be_reused; }
219 };
221 // Constant table.
222 class ConstantTable {
223 private:
224 GrowableArray<Constant> _constants; // Constants of this table.
225 int _size; // Size in bytes the emitted constant table takes (including padding).
226 int _table_base_offset; // Offset of the table base that gets added to the constant offsets.
227 int _nof_jump_tables; // Number of jump-tables in this constant table.
229 static int qsort_comparator(Constant* a, Constant* b);
231 // We use negative frequencies to keep the order of the
232 // jump-tables in which they were added. Otherwise we get into
233 // trouble with relocation.
234 float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); }
236 public:
237 ConstantTable() :
238 _size(-1),
239 _table_base_offset(-1), // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
240 _nof_jump_tables(0)
241 {}
243 int size() const { assert(_size != -1, "not calculated yet"); return _size; }
245 int calculate_table_base_offset() const; // AD specific
246 void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; }
247 int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; }
249 void emit(CodeBuffer& cb);
251 // Returns the offset of the last entry (the top) of the constant table.
252 int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); }
254 void calculate_offsets_and_size();
255 int find_offset(Constant& con) const;
257 void add(Constant& con);
258 Constant add(MachConstantNode* n, BasicType type, jvalue value);
259 Constant add(Metadata* metadata);
260 Constant add(MachConstantNode* n, MachOper* oper);
261 Constant add(MachConstantNode* n, jfloat f) {
262 jvalue value; value.f = f;
263 return add(n, T_FLOAT, value);
264 }
265 Constant add(MachConstantNode* n, jdouble d) {
266 jvalue value; value.d = d;
267 return add(n, T_DOUBLE, value);
268 }
270 // Jump-table
271 Constant add_jump_table(MachConstantNode* n);
272 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
273 };
275 private:
276 // Fixed parameters to this compilation.
277 const int _compile_id;
278 const bool _save_argument_registers; // save/restore arg regs for trampolines
279 const bool _subsume_loads; // Load can be matched as part of a larger op.
280 const bool _do_escape_analysis; // Do escape analysis.
281 const bool _eliminate_boxing; // Do boxing elimination.
282 ciMethod* _method; // The method being compiled.
283 int _entry_bci; // entry bci for osr methods.
284 const TypeFunc* _tf; // My kind of signature
285 InlineTree* _ilt; // Ditto (temporary).
286 address _stub_function; // VM entry for stub being compiled, or NULL
287 const char* _stub_name; // Name of stub or adapter being compiled, or NULL
288 address _stub_entry_point; // Compile code entry for generated stub, or NULL
290 // Control of this compilation.
291 int _num_loop_opts; // Number of iterations for doing loop optimiztions
292 int _max_inline_size; // Max inline size for this compilation
293 int _freq_inline_size; // Max hot method inline size for this compilation
294 int _fixed_slots; // count of frame slots not allocated by the register
295 // allocator i.e. locks, original deopt pc, etc.
296 uintx _max_node_limit; // Max unique node count during a single compilation.
297 // For deopt
298 int _orig_pc_slot;
299 int _orig_pc_slot_offset_in_bytes;
301 int _major_progress; // Count of something big happening
302 bool _inlining_progress; // progress doing incremental inlining?
303 bool _inlining_incrementally;// Are we doing incremental inlining (post parse)
304 bool _has_loops; // True if the method _may_ have some loops
305 bool _has_split_ifs; // True if the method _may_ have some split-if
306 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
307 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
308 bool _has_boxed_value; // True if a boxed object is allocated
309 int _max_vector_size; // Maximum size of generated vectors
310 uint _trap_hist[trapHistLength]; // Cumulative traps
311 bool _trap_can_recompile; // Have we emitted a recompiling trap?
312 uint _decompile_count; // Cumulative decompilation counts.
313 bool _do_inlining; // True if we intend to do inlining
314 bool _do_scheduling; // True if we intend to do scheduling
315 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
316 bool _do_count_invocations; // True if we generate code to count invocations
317 bool _do_method_data_update; // True if we generate code to update MethodData*s
318 int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
319 bool _print_assembly; // True if we should dump assembly code for this compilation
320 bool _print_inlining; // True if we should print inlining for this compilation
321 bool _print_intrinsics; // True if we should print intrinsics for this compilation
322 #ifndef PRODUCT
323 bool _trace_opto_output;
324 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
325 #endif
326 bool _has_irreducible_loop; // Found irreducible loops
327 // JSR 292
328 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
329 RTMState _rtm_state; // State of Restricted Transactional Memory usage
331 // Compilation environment.
332 Arena _comp_arena; // Arena with lifetime equivalent to Compile
333 ciEnv* _env; // CI interface
334 CompileLog* _log; // from CompilerThread
335 const char* _failure_reason; // for record_failure/failing pattern
336 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics.
337 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching.
338 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
339 GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
340 GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
341 ConnectionGraph* _congraph;
342 #ifndef PRODUCT
343 IdealGraphPrinter* _printer;
344 #endif
347 // Node management
348 uint _unique; // Counter for unique Node indices
349 VectorSet _dead_node_list; // Set of dead nodes
350 uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N).
351 // So use this to keep count and make the call O(1).
352 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
353 Arena _node_arena; // Arena for new-space Nodes
354 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
355 RootNode* _root; // Unique root of compilation, or NULL after bail-out.
356 Node* _top; // Unique top node. (Reset by various phases.)
358 Node* _immutable_memory; // Initial memory state
360 Node* _recent_alloc_obj;
361 Node* _recent_alloc_ctl;
363 // Constant table
364 ConstantTable _constant_table; // The constant table for this compile.
365 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
368 // Blocked array of debugging and profiling information,
369 // tracked per node.
370 enum { _log2_node_notes_block_size = 8,
371 _node_notes_block_size = (1<<_log2_node_notes_block_size)
372 };
373 GrowableArray<Node_Notes*>* _node_note_array;
374 Node_Notes* _default_node_notes; // default notes for new nodes
376 // After parsing and every bulk phase we hang onto the Root instruction.
377 // The RootNode instruction is where the whole program begins. It produces
378 // the initial Control and BOTTOM for everybody else.
380 // Type management
381 Arena _Compile_types; // Arena for all types
382 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
383 Dict* _type_dict; // Intern table
384 void* _type_hwm; // Last allocation (see Type::operator new/delete)
385 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
386 ciMethod* _last_tf_m; // Cache for
387 const TypeFunc* _last_tf; // TypeFunc::make
388 AliasType** _alias_types; // List of alias types seen so far.
389 int _num_alias_types; // Logical length of _alias_types
390 int _max_alias_types; // Physical length of _alias_types
391 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
393 // Parsing, optimization
394 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
395 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
396 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
398 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
399 // main parsing has finished.
400 GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
402 GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations
404 int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining)
405 uint _number_of_mh_late_inlines; // number of method handle late inlining still pending
408 // Inlining may not happen in parse order which would make
409 // PrintInlining output confusing. Keep track of PrintInlining
410 // pieces in order.
411 class PrintInliningBuffer : public ResourceObj {
412 private:
413 CallGenerator* _cg;
414 stringStream* _ss;
416 public:
417 PrintInliningBuffer()
418 : _cg(NULL) { _ss = new stringStream(); }
420 stringStream* ss() const { return _ss; }
421 CallGenerator* cg() const { return _cg; }
422 void set_cg(CallGenerator* cg) { _cg = cg; }
423 };
425 GrowableArray<PrintInliningBuffer>* _print_inlining_list;
426 int _print_inlining_idx;
428 // Only keep nodes in the expensive node list that need to be optimized
429 void cleanup_expensive_nodes(PhaseIterGVN &igvn);
430 // Use for sorting expensive nodes to bring similar nodes together
431 static int cmp_expensive_nodes(Node** n1, Node** n2);
432 // Expensive nodes list already sorted?
433 bool expensive_nodes_sorted() const;
434 // Remove the speculative part of types and clean up the graph
435 void remove_speculative_types(PhaseIterGVN &igvn);
437 void* _replay_inline_data; // Pointer to data loaded from file
439 public:
441 outputStream* print_inlining_stream() const {
442 return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
443 }
445 void print_inlining_skip(CallGenerator* cg) {
446 if (_print_inlining) {
447 _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
448 _print_inlining_idx++;
449 _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
450 }
451 }
453 void print_inlining_insert(CallGenerator* cg) {
454 if (_print_inlining) {
455 for (int i = 0; i < _print_inlining_list->length(); i++) {
456 if (_print_inlining_list->adr_at(i)->cg() == cg) {
457 _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
458 _print_inlining_idx = i+1;
459 _print_inlining_list->adr_at(i)->set_cg(NULL);
460 return;
461 }
462 }
463 ShouldNotReachHere();
464 }
465 }
467 void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
468 stringStream ss;
469 CompileTask::print_inlining(&ss, method, inline_level, bci, msg);
470 print_inlining_stream()->print("%s", ss.as_string());
471 }
473 void* replay_inline_data() const { return _replay_inline_data; }
475 // Dump inlining replay data to the stream.
476 void dump_inline_data(outputStream* out);
478 private:
479 // Matching, CFG layout, allocation, code generation
480 PhaseCFG* _cfg; // Results of CFG finding
481 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
482 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
483 int _java_calls; // Number of java calls in the method
484 int _inner_loops; // Number of inner loops in the method
485 Matcher* _matcher; // Engine to map ideal to machine instructions
486 PhaseRegAlloc* _regalloc; // Results of register allocation.
487 int _frame_slots; // Size of total frame in stack slots
488 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries
489 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
490 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
491 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
492 int _interpreter_frame_size;
494 uint _node_bundling_limit;
495 Bundle* _node_bundling_base; // Information for instruction bundling
497 // Instruction bits passed off to the VM
498 int _method_size; // Size of nmethod code segment in bytes
499 CodeBuffer _code_buffer; // Where the code is assembled
500 int _first_block_size; // Size of unvalidated entry point code / OSR poison code
501 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
502 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
503 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
504 static int _CompiledZap_count; // counter compared against CompileZap[First/Last]
505 BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
506 relocInfo* _scratch_locs_memory; // For temporary code buffers.
507 int _scratch_const_size; // For temporary code buffers.
508 bool _in_scratch_emit_size; // true when in scratch_emit_size.
510 public:
511 // Accessors
513 // The Compile instance currently active in this (compiler) thread.
514 static Compile* current() {
515 return (Compile*) ciEnv::current()->compiler_data();
516 }
518 // ID for this compilation. Useful for setting breakpoints in the debugger.
519 int compile_id() const { return _compile_id; }
521 // Does this compilation allow instructions to subsume loads? User
522 // instructions that subsume a load may result in an unschedulable
523 // instruction sequence.
524 bool subsume_loads() const { return _subsume_loads; }
525 /** Do escape analysis. */
526 bool do_escape_analysis() const { return _do_escape_analysis; }
527 /** Do boxing elimination. */
528 bool eliminate_boxing() const { return _eliminate_boxing; }
529 /** Do aggressive boxing elimination. */
530 bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; }
531 bool save_argument_registers() const { return _save_argument_registers; }
534 // Other fixed compilation parameters.
535 ciMethod* method() const { return _method; }
536 int entry_bci() const { return _entry_bci; }
537 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
538 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
539 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
540 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
541 InlineTree* ilt() const { return _ilt; }
542 address stub_function() const { return _stub_function; }
543 const char* stub_name() const { return _stub_name; }
544 address stub_entry_point() const { return _stub_entry_point; }
546 // Control of this compilation.
547 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
548 void set_fixed_slots(int n) { _fixed_slots = n; }
549 int major_progress() const { return _major_progress; }
550 void set_inlining_progress(bool z) { _inlining_progress = z; }
551 int inlining_progress() const { return _inlining_progress; }
552 void set_inlining_incrementally(bool z) { _inlining_incrementally = z; }
553 int inlining_incrementally() const { return _inlining_incrementally; }
554 void set_major_progress() { _major_progress++; }
555 void clear_major_progress() { _major_progress = 0; }
556 int num_loop_opts() const { return _num_loop_opts; }
557 void set_num_loop_opts(int n) { _num_loop_opts = n; }
558 int max_inline_size() const { return _max_inline_size; }
559 void set_freq_inline_size(int n) { _freq_inline_size = n; }
560 int freq_inline_size() const { return _freq_inline_size; }
561 void set_max_inline_size(int n) { _max_inline_size = n; }
562 bool has_loops() const { return _has_loops; }
563 void set_has_loops(bool z) { _has_loops = z; }
564 bool has_split_ifs() const { return _has_split_ifs; }
565 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
566 bool has_unsafe_access() const { return _has_unsafe_access; }
567 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
568 bool has_stringbuilder() const { return _has_stringbuilder; }
569 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
570 bool has_boxed_value() const { return _has_boxed_value; }
571 void set_has_boxed_value(bool z) { _has_boxed_value = z; }
572 int max_vector_size() const { return _max_vector_size; }
573 void set_max_vector_size(int s) { _max_vector_size = s; }
574 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
575 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
576 bool trap_can_recompile() const { return _trap_can_recompile; }
577 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
578 uint decompile_count() const { return _decompile_count; }
579 void set_decompile_count(uint c) { _decompile_count = c; }
580 bool allow_range_check_smearing() const;
581 bool do_inlining() const { return _do_inlining; }
582 void set_do_inlining(bool z) { _do_inlining = z; }
583 bool do_scheduling() const { return _do_scheduling; }
584 void set_do_scheduling(bool z) { _do_scheduling = z; }
585 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
586 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
587 bool do_count_invocations() const{ return _do_count_invocations; }
588 void set_do_count_invocations(bool z){ _do_count_invocations = z; }
589 bool do_method_data_update() const { return _do_method_data_update; }
590 void set_do_method_data_update(bool z) { _do_method_data_update = z; }
591 int AliasLevel() const { return _AliasLevel; }
592 bool print_assembly() const { return _print_assembly; }
593 void set_print_assembly(bool z) { _print_assembly = z; }
594 bool print_inlining() const { return _print_inlining; }
595 void set_print_inlining(bool z) { _print_inlining = z; }
596 bool print_intrinsics() const { return _print_intrinsics; }
597 void set_print_intrinsics(bool z) { _print_intrinsics = z; }
598 RTMState rtm_state() const { return _rtm_state; }
599 void set_rtm_state(RTMState s) { _rtm_state = s; }
600 bool use_rtm() const { return (_rtm_state & NoRTM) == 0; }
601 bool profile_rtm() const { return _rtm_state == ProfileRTM; }
602 uint max_node_limit() const { return (uint)_max_node_limit; }
603 void set_max_node_limit(uint n) { _max_node_limit = n; }
605 // check the CompilerOracle for special behaviours for this compile
606 bool method_has_option(const char * option) {
607 return method() != NULL && method()->has_option(option);
608 }
609 template<typename T>
610 bool method_has_option_value(const char * option, T& value) {
611 return method() != NULL && method()->has_option_value(option, value);
612 }
613 #ifndef PRODUCT
614 bool trace_opto_output() const { return _trace_opto_output; }
615 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
616 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
617 int _in_dump_cnt; // Required for dumping ir nodes.
618 #endif
619 bool has_irreducible_loop() const { return _has_irreducible_loop; }
620 void set_has_irreducible_loop(bool z) { _has_irreducible_loop = z; }
622 // JSR 292
623 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
624 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
626 Ticks _latest_stage_start_counter;
628 void begin_method() {
629 #ifndef PRODUCT
630 if (_printer) _printer->begin_method(this);
631 #endif
632 C->_latest_stage_start_counter.stamp();
633 }
635 void print_method(CompilerPhaseType cpt, int level = 1) {
636 EventCompilerPhase event;
637 if (event.should_commit()) {
638 event.set_starttime(C->_latest_stage_start_counter);
639 event.set_phase((u1) cpt);
640 event.set_compileID(C->_compile_id);
641 event.set_phaseLevel(level);
642 event.commit();
643 }
646 #ifndef PRODUCT
647 if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
648 #endif
649 C->_latest_stage_start_counter.stamp();
650 }
652 void end_method(int level = 1) {
653 EventCompilerPhase event;
654 if (event.should_commit()) {
655 event.set_starttime(C->_latest_stage_start_counter);
656 event.set_phase((u1) PHASE_END);
657 event.set_compileID(C->_compile_id);
658 event.set_phaseLevel(level);
659 event.commit();
660 }
661 #ifndef PRODUCT
662 if (_printer) _printer->end_method();
663 #endif
664 }
666 int macro_count() const { return _macro_nodes->length(); }
667 int predicate_count() const { return _predicate_opaqs->length();}
668 int expensive_count() const { return _expensive_nodes->length(); }
669 Node* macro_node(int idx) const { return _macro_nodes->at(idx); }
670 Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);}
671 Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); }
672 ConnectionGraph* congraph() { return _congraph;}
673 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
674 void add_macro_node(Node * n) {
675 //assert(n->is_macro(), "must be a macro node");
676 assert(!_macro_nodes->contains(n), "duplicate entry in expand list");
677 _macro_nodes->append(n);
678 }
679 void remove_macro_node(Node * n) {
680 // this function may be called twice for a node so check
681 // that the node is in the array before attempting to remove it
682 if (_macro_nodes->contains(n))
683 _macro_nodes->remove(n);
684 // remove from _predicate_opaqs list also if it is there
685 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
686 _predicate_opaqs->remove(n);
687 }
688 }
689 void add_expensive_node(Node * n);
690 void remove_expensive_node(Node * n) {
691 if (_expensive_nodes->contains(n)) {
692 _expensive_nodes->remove(n);
693 }
694 }
695 void add_predicate_opaq(Node * n) {
696 assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1");
697 assert(_macro_nodes->contains(n), "should have already been in macro list");
698 _predicate_opaqs->append(n);
699 }
701 // Range check dependent CastII nodes that can be removed after loop optimizations
702 void add_range_check_cast(Node* n);
703 void remove_range_check_cast(Node* n) {
704 if (_range_check_casts->contains(n)) {
705 _range_check_casts->remove(n);
706 }
707 }
708 Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); }
709 int range_check_cast_count() const { return _range_check_casts->length(); }
710 // Remove all range check dependent CastIINodes.
711 void remove_range_check_casts(PhaseIterGVN &igvn);
713 // remove the opaque nodes that protect the predicates so that the unused checks and
714 // uncommon traps will be eliminated from the graph.
715 void cleanup_loop_predicates(PhaseIterGVN &igvn);
716 bool is_predicate_opaq(Node * n) {
717 return _predicate_opaqs->contains(n);
718 }
720 // Are there candidate expensive nodes for optimization?
721 bool should_optimize_expensive_nodes(PhaseIterGVN &igvn);
722 // Check whether n1 and n2 are similar
723 static int cmp_expensive_nodes(Node* n1, Node* n2);
724 // Sort expensive nodes to locate similar expensive nodes
725 void sort_expensive_nodes();
727 // Compilation environment.
728 Arena* comp_arena() { return &_comp_arena; }
729 ciEnv* env() const { return _env; }
730 CompileLog* log() const { return _log; }
731 bool failing() const { return _env->failing() || _failure_reason != NULL; }
732 const char* failure_reason() { return _failure_reason; }
733 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
735 void record_failure(const char* reason);
736 void record_method_not_compilable(const char* reason, bool all_tiers = false) {
737 // All bailouts cover "all_tiers" when TieredCompilation is off.
738 if (!TieredCompilation) all_tiers = true;
739 env()->record_method_not_compilable(reason, all_tiers);
740 // Record failure reason.
741 record_failure(reason);
742 }
743 void record_method_not_compilable_all_tiers(const char* reason) {
744 record_method_not_compilable(reason, true);
745 }
746 bool check_node_count(uint margin, const char* reason) {
747 if (live_nodes() + margin > max_node_limit()) {
748 record_method_not_compilable(reason);
749 return true;
750 } else {
751 return false;
752 }
753 }
755 // Node management
756 uint unique() const { return _unique; }
757 uint next_unique() { return _unique++; }
758 void set_unique(uint i) { _unique = i; }
759 static int debug_idx() { return debug_only(_debug_idx)+0; }
760 static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
761 Arena* node_arena() { return &_node_arena; }
762 Arena* old_arena() { return &_old_arena; }
763 RootNode* root() const { return _root; }
764 void set_root(RootNode* r) { _root = r; }
765 StartNode* start() const; // (Derived from root.)
766 void init_start(StartNode* s);
767 Node* immutable_memory();
769 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
770 Node* recent_alloc_obj() const { return _recent_alloc_obj; }
771 void set_recent_alloc(Node* ctl, Node* obj) {
772 _recent_alloc_ctl = ctl;
773 _recent_alloc_obj = obj;
774 }
775 void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
776 _dead_node_count++;
777 }
778 bool is_dead_node(uint idx) { return _dead_node_list.test(idx) != 0; }
779 uint dead_node_count() { return _dead_node_count; }
780 void reset_dead_node_list() { _dead_node_list.Reset();
781 _dead_node_count = 0;
782 }
783 uint live_nodes() const {
784 int val = _unique - _dead_node_count;
785 assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
786 return (uint) val;
787 }
788 #ifdef ASSERT
789 uint count_live_nodes_by_graph_walk();
790 void print_missing_nodes();
791 #endif
793 // Constant table
794 ConstantTable& constant_table() { return _constant_table; }
796 MachConstantBaseNode* mach_constant_base_node();
797 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
798 // Generated by adlc, true if CallNode requires MachConstantBase.
799 bool needs_clone_jvms();
801 // Handy undefined Node
802 Node* top() const { return _top; }
804 // these are used by guys who need to know about creation and transformation of top:
805 Node* cached_top_node() { return _top; }
806 void set_cached_top_node(Node* tn);
808 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
809 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
810 Node_Notes* default_node_notes() const { return _default_node_notes; }
811 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
813 Node_Notes* node_notes_at(int idx) {
814 return locate_node_notes(_node_note_array, idx, false);
815 }
816 inline bool set_node_notes_at(int idx, Node_Notes* value);
818 // Copy notes from source to dest, if they exist.
819 // Overwrite dest only if source provides something.
820 // Return true if information was moved.
821 bool copy_node_notes_to(Node* dest, Node* source);
823 // Workhorse function to sort out the blocked Node_Notes array:
824 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
825 int idx, bool can_grow = false);
827 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
829 // Type management
830 Arena* type_arena() { return _type_arena; }
831 Dict* type_dict() { return _type_dict; }
832 void* type_hwm() { return _type_hwm; }
833 size_t type_last_size() { return _type_last_size; }
834 int num_alias_types() { return _num_alias_types; }
836 void init_type_arena() { _type_arena = &_Compile_types; }
837 void set_type_arena(Arena* a) { _type_arena = a; }
838 void set_type_dict(Dict* d) { _type_dict = d; }
839 void set_type_hwm(void* p) { _type_hwm = p; }
840 void set_type_last_size(size_t sz) { _type_last_size = sz; }
842 const TypeFunc* last_tf(ciMethod* m) {
843 return (m == _last_tf_m) ? _last_tf : NULL;
844 }
845 void set_last_tf(ciMethod* m, const TypeFunc* tf) {
846 assert(m != NULL || tf == NULL, "");
847 _last_tf_m = m;
848 _last_tf = tf;
849 }
851 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
852 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
853 bool have_alias_type(const TypePtr* adr_type);
854 AliasType* alias_type(ciField* field);
856 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
857 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
858 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
860 // Building nodes
861 void rethrow_exceptions(JVMState* jvms);
862 void return_values(JVMState* jvms);
863 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
865 // Decide how to build a call.
866 // The profile factor is a discount to apply to this site's interp. profile.
867 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch,
868 JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL,
869 bool allow_intrinsics = true, bool delayed_forbidden = false);
870 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) {
871 return should_delay_string_inlining(call_method, jvms) ||
872 should_delay_boxing_inlining(call_method, jvms);
873 }
874 bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms);
875 bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms);
877 // Helper functions to identify inlining potential at call-site
878 ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass,
879 ciKlass* holder, ciMethod* callee,
880 const TypeOopPtr* receiver_type, bool is_virtual,
881 bool &call_does_dispatch, int &vtable_index,
882 bool check_access = true);
883 ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass,
884 ciMethod* callee, const TypeOopPtr* receiver_type,
885 bool check_access = true);
887 // Report if there were too many traps at a current method and bci.
888 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
889 // If there is no MDO at all, report no trap unless told to assume it.
890 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
891 // This version, unspecific to a particular bci, asks if
892 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
893 bool too_many_traps(Deoptimization::DeoptReason reason,
894 // Privately used parameter for logging:
895 ciMethodData* logmd = NULL);
896 // Report if there were too many recompiles at a method and bci.
897 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
898 // Return a bitset with the reasons where deoptimization is allowed,
899 // i.e., where there were not too many uncommon traps.
900 int _allowed_reasons;
901 int allowed_deopt_reasons() { return _allowed_reasons; }
902 void set_allowed_deopt_reasons();
904 // Parsing, optimization
905 PhaseGVN* initial_gvn() { return _initial_gvn; }
906 Unique_Node_List* for_igvn() { return _for_igvn; }
907 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List.
908 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
909 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
911 // Replace n by nn using initial_gvn, calling hash_delete and
912 // record_for_igvn as needed.
913 void gvn_replace_by(Node* n, Node* nn);
916 void identify_useful_nodes(Unique_Node_List &useful);
917 void update_dead_node_list(Unique_Node_List &useful);
918 void remove_useless_nodes (Unique_Node_List &useful);
920 WarmCallInfo* warm_calls() const { return _warm_calls; }
921 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
922 WarmCallInfo* pop_warm_call();
924 // Record this CallGenerator for inlining at the end of parsing.
925 void add_late_inline(CallGenerator* cg) {
926 _late_inlines.insert_before(_late_inlines_pos, cg);
927 _late_inlines_pos++;
928 }
930 void prepend_late_inline(CallGenerator* cg) {
931 _late_inlines.insert_before(0, cg);
932 }
934 void add_string_late_inline(CallGenerator* cg) {
935 _string_late_inlines.push(cg);
936 }
938 void add_boxing_late_inline(CallGenerator* cg) {
939 _boxing_late_inlines.push(cg);
940 }
942 void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful);
944 void dump_inlining();
946 bool over_inlining_cutoff() const {
947 if (!inlining_incrementally()) {
948 return unique() > (uint)NodeCountInliningCutoff;
949 } else {
950 return live_nodes() > (uint)LiveNodeCountInliningCutoff;
951 }
952 }
954 void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; }
955 void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; }
956 bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; }
958 void inline_incrementally_one(PhaseIterGVN& igvn);
959 void inline_incrementally(PhaseIterGVN& igvn);
960 void inline_string_calls(bool parse_time);
961 void inline_boxing_calls(PhaseIterGVN& igvn);
963 // Matching, CFG layout, allocation, code generation
964 PhaseCFG* cfg() { return _cfg; }
965 bool select_24_bit_instr() const { return _select_24_bit_instr; }
966 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
967 bool has_java_calls() const { return _java_calls > 0; }
968 int java_calls() const { return _java_calls; }
969 int inner_loops() const { return _inner_loops; }
970 Matcher* matcher() { return _matcher; }
971 PhaseRegAlloc* regalloc() { return _regalloc; }
972 int frame_slots() const { return _frame_slots; }
973 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
974 int frame_size_in_bytes() const { return _frame_slots << LogBytesPerInt; }
975 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
976 Arena* indexSet_arena() { return _indexSet_arena; }
977 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
978 uint node_bundling_limit() { return _node_bundling_limit; }
979 Bundle* node_bundling_base() { return _node_bundling_base; }
980 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
981 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
982 bool starts_bundle(const Node *n) const;
983 bool need_stack_bang(int frame_size_in_bytes) const;
984 bool need_register_stack_bang() const;
986 void update_interpreter_frame_size(int size) {
987 if (_interpreter_frame_size < size) {
988 _interpreter_frame_size = size;
989 }
990 }
991 int bang_size_in_bytes() const;
993 void set_matcher(Matcher* m) { _matcher = m; }
994 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
995 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
996 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
998 // Remember if this compilation changes hardware mode to 24-bit precision
999 void set_24_bit_selection_and_mode(bool selection, bool mode) {
1000 _select_24_bit_instr = selection;
1001 _in_24_bit_fp_mode = mode;
1002 }
1004 void set_java_calls(int z) { _java_calls = z; }
1005 void set_inner_loops(int z) { _inner_loops = z; }
1007 // Instruction bits passed off to the VM
1008 int code_size() { return _method_size; }
1009 CodeBuffer* code_buffer() { return &_code_buffer; }
1010 int first_block_size() { return _first_block_size; }
1011 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); }
1012 ExceptionHandlerTable* handler_table() { return &_handler_table; }
1013 ImplicitExceptionTable* inc_table() { return &_inc_table; }
1014 OopMapSet* oop_map_set() { return _oop_map_set; }
1015 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
1016 Dependencies* dependencies() { return env()->dependencies(); }
1017 static int CompiledZap_count() { return _CompiledZap_count; }
1018 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; }
1019 void init_scratch_buffer_blob(int const_size);
1020 void clear_scratch_buffer_blob();
1021 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
1022 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
1023 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
1025 // emit to scratch blob, report resulting size
1026 uint scratch_emit_size(const Node* n);
1027 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; }
1028 bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
1030 enum ScratchBufferBlob {
1031 MAX_inst_size = 1024,
1032 MAX_locs_size = 128, // number of relocInfo elements
1033 MAX_const_size = 128,
1034 MAX_stubs_size = 128
1035 };
1037 // Major entry point. Given a Scope, compile the associated method.
1038 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
1039 // replacement, entry_bci indicates the bytecode for which to compile a
1040 // continuation.
1041 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
1042 int entry_bci, bool subsume_loads, bool do_escape_analysis,
1043 bool eliminate_boxing);
1045 // Second major entry point. From the TypeFunc signature, generate code
1046 // to pass arguments from the Java calling convention to the C calling
1047 // convention.
1048 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
1049 address stub_function, const char *stub_name,
1050 int is_fancy_jump, bool pass_tls,
1051 bool save_arg_registers, bool return_pc);
1053 // From the TypeFunc signature, generate code to pass arguments
1054 // from Compiled calling convention to Interpreter's calling convention
1055 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
1057 // From the TypeFunc signature, generate code to pass arguments
1058 // from Interpreter's calling convention to Compiler's calling convention
1059 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
1061 // Are we compiling a method?
1062 bool has_method() { return method() != NULL; }
1064 // Maybe print some information about this compile.
1065 void print_compile_messages();
1067 // Final graph reshaping, a post-pass after the regular optimizer is done.
1068 bool final_graph_reshaping();
1070 // returns true if adr is completely contained in the given alias category
1071 bool must_alias(const TypePtr* adr, int alias_idx);
1073 // returns true if adr overlaps with the given alias category
1074 bool can_alias(const TypePtr* adr, int alias_idx);
1076 // Driver for converting compiler's IR into machine code bits
1077 void Output();
1079 // Accessors for node bundling info.
1080 Bundle* node_bundling(const Node *n);
1081 bool valid_bundle_info(const Node *n);
1083 // Schedule and Bundle the instructions
1084 void ScheduleAndBundle();
1086 // Build OopMaps for each GC point
1087 void BuildOopMaps();
1089 // Append debug info for the node "local" at safepoint node "sfpt" to the
1090 // "array", May also consult and add to "objs", which describes the
1091 // scalar-replaced objects.
1092 void FillLocArray( int idx, MachSafePointNode* sfpt,
1093 Node *local, GrowableArray<ScopeValue*> *array,
1094 GrowableArray<ScopeValue*> *objs );
1096 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL.
1097 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id);
1098 // Requres that "objs" does not contains an ObjectValue whose id matches
1099 // that of "sv. Appends "sv".
1100 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
1101 ObjectValue* sv );
1103 // Process an OopMap Element while emitting nodes
1104 void Process_OopMap_Node(MachNode *mach, int code_offset);
1106 // Initialize code buffer
1107 CodeBuffer* init_buffer(uint* blk_starts);
1109 // Write out basic block data to code buffer
1110 void fill_buffer(CodeBuffer* cb, uint* blk_starts);
1112 // Determine which variable sized branches can be shortened
1113 void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size);
1115 // Compute the size of first NumberOfLoopInstrToAlign instructions
1116 // at the head of a loop.
1117 void compute_loop_first_inst_sizes();
1119 // Compute the information for the exception tables
1120 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);
1122 // Stack slots that may be unused by the calling convention but must
1123 // otherwise be preserved. On Intel this includes the return address.
1124 // On PowerPC it includes the 4 words holding the old TOC & LR glue.
1125 uint in_preserve_stack_slots();
1127 // "Top of Stack" slots that may be unused by the calling convention but must
1128 // otherwise be preserved.
1129 // On Intel these are not necessary and the value can be zero.
1130 // On Sparc this describes the words reserved for storing a register window
1131 // when an interrupt occurs.
1132 static uint out_preserve_stack_slots();
1134 // Number of outgoing stack slots killed above the out_preserve_stack_slots
1135 // for calls to C. Supports the var-args backing area for register parms.
1136 uint varargs_C_out_slots_killed() const;
1138 // Number of Stack Slots consumed by a synchronization entry
1139 int sync_stack_slots() const;
1141 // Compute the name of old_SP. See <arch>.ad for frame layout.
1142 OptoReg::Name compute_old_SP();
1144 #ifdef ENABLE_ZAP_DEAD_LOCALS
1145 static bool is_node_getting_a_safepoint(Node*);
1146 void Insert_zap_nodes();
1147 Node* call_zap_node(MachSafePointNode* n, int block_no);
1148 #endif
1150 private:
1151 // Phase control:
1152 void Init(int aliaslevel); // Prepare for a single compilation
1153 int Inline_Warm(); // Find more inlining work.
1154 void Finish_Warm(); // Give up on further inlines.
1155 void Optimize(); // Given a graph, optimize it
1156 void Code_Gen(); // Generate code from a graph
1158 // Management of the AliasType table.
1159 void grow_alias_types();
1160 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
1161 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
1162 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
1164 void verify_top(Node*) const PRODUCT_RETURN;
1166 // Intrinsic setup.
1167 void register_library_intrinsics(); // initializer
1168 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
1169 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper
1170 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
1171 void register_intrinsic(CallGenerator* cg); // update fn
1173 #ifndef PRODUCT
1174 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
1175 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
1176 #endif
1177 // Function calls made by the public function final_graph_reshaping.
1178 // No need to be made public as they are not called elsewhere.
1179 void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc);
1180 void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc );
1181 void eliminate_redundant_card_marks(Node* n);
1183 public:
1185 // Note: Histogram array size is about 1 Kb.
1186 enum { // flag bits:
1187 _intrinsic_worked = 1, // succeeded at least once
1188 _intrinsic_failed = 2, // tried it but it failed
1189 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
1190 _intrinsic_virtual = 8, // was seen in the virtual form (rare)
1191 _intrinsic_both = 16 // was seen in the non-virtual form (usual)
1192 };
1193 // Update histogram. Return boolean if this is a first-time occurrence.
1194 static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
1195 bool is_virtual, int flags) PRODUCT_RETURN0;
1196 static void print_intrinsic_statistics() PRODUCT_RETURN;
1198 // Graph verification code
1199 // Walk the node list, verifying that there is a one-to-one
1200 // correspondence between Use-Def edges and Def-Use edges
1201 // The option no_dead_code enables stronger checks that the
1202 // graph is strongly connected from root in both directions.
1203 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
1205 // Verify GC barrier patterns
1206 void verify_barriers() PRODUCT_RETURN;
1208 // End-of-run dumps.
1209 static void print_statistics() PRODUCT_RETURN;
1211 // Dump formatted assembly
1212 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
1213 void dump_pc(int *pcs, int pc_limit, Node *n);
1215 // Verify ADLC assumptions during startup
1216 static void adlc_verification() PRODUCT_RETURN;
1218 // Definitions of pd methods
1219 static void pd_compiler2_init();
1221 // Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
1222 static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl);
1224 // Auxiliary method for randomized fuzzing/stressing
1225 static bool randomized_select(int count);
1226 };
1228 #endif // SHARE_VM_OPTO_COMPILE_HPP