Fri, 20 Dec 2013 13:51:14 +0100
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
Summary: Add ConstantTableBase node edge after parameters and before jvms. Adapt jvms offsets.
Reviewed-by: kvn
duke@435 | 1 | /* |
sla@5237 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_COMPILE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_COMPILE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "asm/codeBuffer.hpp" |
stefank@2314 | 29 | #include "ci/compilerInterface.hpp" |
stefank@2314 | 30 | #include "code/debugInfoRec.hpp" |
stefank@2314 | 31 | #include "code/exceptionHandlerTable.hpp" |
stefank@2314 | 32 | #include "compiler/compilerOracle.hpp" |
roland@4357 | 33 | #include "compiler/compileBroker.hpp" |
stefank@2314 | 34 | #include "libadt/dict.hpp" |
stefank@2314 | 35 | #include "libadt/port.hpp" |
stefank@2314 | 36 | #include "libadt/vectset.hpp" |
stefank@2314 | 37 | #include "memory/resourceArea.hpp" |
stefank@2314 | 38 | #include "opto/idealGraphPrinter.hpp" |
sla@5237 | 39 | #include "opto/phasetype.hpp" |
stefank@2314 | 40 | #include "opto/phase.hpp" |
stefank@2314 | 41 | #include "opto/regmask.hpp" |
stefank@2314 | 42 | #include "runtime/deoptimization.hpp" |
stefank@2314 | 43 | #include "runtime/vmThread.hpp" |
sla@5237 | 44 | #include "trace/tracing.hpp" |
mgronlun@6131 | 45 | #include "utilities/ticks.hpp" |
stefank@2314 | 46 | |
duke@435 | 47 | class Block; |
duke@435 | 48 | class Bundle; |
duke@435 | 49 | class C2Compiler; |
duke@435 | 50 | class CallGenerator; |
duke@435 | 51 | class ConnectionGraph; |
duke@435 | 52 | class InlineTree; |
duke@435 | 53 | class Int_Array; |
duke@435 | 54 | class Matcher; |
twisti@2350 | 55 | class MachConstantNode; |
twisti@2350 | 56 | class MachConstantBaseNode; |
duke@435 | 57 | class MachNode; |
twisti@2350 | 58 | class MachOper; |
kvn@473 | 59 | class MachSafePointNode; |
duke@435 | 60 | class Node; |
duke@435 | 61 | class Node_Array; |
duke@435 | 62 | class Node_Notes; |
duke@435 | 63 | class OptoReg; |
duke@435 | 64 | class PhaseCFG; |
duke@435 | 65 | class PhaseGVN; |
cfang@1607 | 66 | class PhaseIterGVN; |
duke@435 | 67 | class PhaseRegAlloc; |
duke@435 | 68 | class PhaseCCP; |
duke@435 | 69 | class PhaseCCP_DCE; |
duke@435 | 70 | class RootNode; |
duke@435 | 71 | class relocInfo; |
duke@435 | 72 | class Scope; |
duke@435 | 73 | class StartNode; |
duke@435 | 74 | class SafePointNode; |
duke@435 | 75 | class JVMState; |
vlivanov@5658 | 76 | class Type; |
duke@435 | 77 | class TypeData; |
duke@435 | 78 | class TypePtr; |
twisti@4414 | 79 | class TypeOopPtr; |
duke@435 | 80 | class TypeFunc; |
duke@435 | 81 | class Unique_Node_List; |
duke@435 | 82 | class nmethod; |
duke@435 | 83 | class WarmCallInfo; |
bharadwaj@4315 | 84 | class Node_Stack; |
bharadwaj@4315 | 85 | struct Final_Reshape_Counts; |
duke@435 | 86 | |
duke@435 | 87 | //------------------------------Compile---------------------------------------- |
duke@435 | 88 | // This class defines a top-level Compiler invocation. |
duke@435 | 89 | |
duke@435 | 90 | class Compile : public Phase { |
never@3138 | 91 | friend class VMStructs; |
never@3138 | 92 | |
duke@435 | 93 | public: |
duke@435 | 94 | // Fixed alias indexes. (See also MergeMemNode.) |
duke@435 | 95 | enum { |
duke@435 | 96 | AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value) |
duke@435 | 97 | AliasIdxBot = 2, // pseudo-index, aliases to everything |
duke@435 | 98 | AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM |
duke@435 | 99 | }; |
duke@435 | 100 | |
duke@435 | 101 | // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler); |
duke@435 | 102 | // Integrated with logging. If logging is turned on, and dolog is true, |
duke@435 | 103 | // then brackets are put into the log, with time stamps and node counts. |
duke@435 | 104 | // (The time collection itself is always conditionalized on TimeCompiler.) |
duke@435 | 105 | class TracePhase : public TraceTime { |
duke@435 | 106 | private: |
duke@435 | 107 | Compile* C; |
duke@435 | 108 | CompileLog* _log; |
bharadwaj@4315 | 109 | const char* _phase_name; |
bharadwaj@4315 | 110 | bool _dolog; |
duke@435 | 111 | public: |
duke@435 | 112 | TracePhase(const char* name, elapsedTimer* accumulator, bool dolog); |
duke@435 | 113 | ~TracePhase(); |
duke@435 | 114 | }; |
duke@435 | 115 | |
duke@435 | 116 | // Information per category of alias (memory slice) |
duke@435 | 117 | class AliasType { |
duke@435 | 118 | private: |
duke@435 | 119 | friend class Compile; |
duke@435 | 120 | |
duke@435 | 121 | int _index; // unique index, used with MergeMemNode |
duke@435 | 122 | const TypePtr* _adr_type; // normalized address type |
duke@435 | 123 | ciField* _field; // relevant instance field, or null if none |
vlivanov@5658 | 124 | const Type* _element; // relevant array element type, or null if none |
duke@435 | 125 | bool _is_rewritable; // false if the memory is write-once only |
duke@435 | 126 | int _general_index; // if this is type is an instance, the general |
duke@435 | 127 | // type that this is an instance of |
duke@435 | 128 | |
duke@435 | 129 | void Init(int i, const TypePtr* at); |
duke@435 | 130 | |
duke@435 | 131 | public: |
duke@435 | 132 | int index() const { return _index; } |
duke@435 | 133 | const TypePtr* adr_type() const { return _adr_type; } |
duke@435 | 134 | ciField* field() const { return _field; } |
vlivanov@5658 | 135 | const Type* element() const { return _element; } |
duke@435 | 136 | bool is_rewritable() const { return _is_rewritable; } |
duke@435 | 137 | bool is_volatile() const { return (_field ? _field->is_volatile() : false); } |
duke@435 | 138 | int general_index() const { return (_general_index != 0) ? _general_index : _index; } |
duke@435 | 139 | |
duke@435 | 140 | void set_rewritable(bool z) { _is_rewritable = z; } |
duke@435 | 141 | void set_field(ciField* f) { |
duke@435 | 142 | assert(!_field,""); |
duke@435 | 143 | _field = f; |
vlivanov@5658 | 144 | if (f->is_final() || f->is_stable()) { |
vlivanov@5658 | 145 | // In the case of @Stable, multiple writes are possible but may be assumed to be no-ops. |
vlivanov@5658 | 146 | _is_rewritable = false; |
vlivanov@5658 | 147 | } |
vlivanov@5658 | 148 | } |
vlivanov@5658 | 149 | void set_element(const Type* e) { |
vlivanov@5658 | 150 | assert(_element == NULL, ""); |
vlivanov@5658 | 151 | _element = e; |
duke@435 | 152 | } |
duke@435 | 153 | |
duke@435 | 154 | void print_on(outputStream* st) PRODUCT_RETURN; |
duke@435 | 155 | }; |
duke@435 | 156 | |
duke@435 | 157 | enum { |
duke@435 | 158 | logAliasCacheSize = 6, |
duke@435 | 159 | AliasCacheSize = (1<<logAliasCacheSize) |
duke@435 | 160 | }; |
duke@435 | 161 | struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type |
duke@435 | 162 | enum { |
coleenp@4037 | 163 | trapHistLength = MethodData::_trap_hist_limit |
duke@435 | 164 | }; |
duke@435 | 165 | |
twisti@2350 | 166 | // Constant entry of the constant table. |
twisti@2350 | 167 | class Constant { |
twisti@2350 | 168 | private: |
twisti@2350 | 169 | BasicType _type; |
coleenp@4037 | 170 | union { |
kvn@4199 | 171 | jvalue _value; |
coleenp@4037 | 172 | Metadata* _metadata; |
coleenp@4037 | 173 | } _v; |
twisti@2350 | 174 | int _offset; // offset of this constant (in bytes) relative to the constant table base. |
twisti@3310 | 175 | float _freq; |
twisti@2350 | 176 | bool _can_be_reused; // true (default) if the value can be shared with other users. |
twisti@2350 | 177 | |
twisti@2350 | 178 | public: |
coleenp@4037 | 179 | Constant() : _type(T_ILLEGAL), _offset(-1), _freq(0.0f), _can_be_reused(true) { _v._value.l = 0; } |
twisti@3310 | 180 | Constant(BasicType type, jvalue value, float freq = 0.0f, bool can_be_reused = true) : |
twisti@2350 | 181 | _type(type), |
twisti@2350 | 182 | _offset(-1), |
twisti@3310 | 183 | _freq(freq), |
twisti@2350 | 184 | _can_be_reused(can_be_reused) |
coleenp@4037 | 185 | { |
coleenp@4037 | 186 | assert(type != T_METADATA, "wrong constructor"); |
coleenp@4037 | 187 | _v._value = value; |
coleenp@4037 | 188 | } |
coleenp@4037 | 189 | Constant(Metadata* metadata, bool can_be_reused = true) : |
coleenp@4037 | 190 | _type(T_METADATA), |
coleenp@4037 | 191 | _offset(-1), |
coleenp@4037 | 192 | _freq(0.0f), |
coleenp@4037 | 193 | _can_be_reused(can_be_reused) |
coleenp@4037 | 194 | { |
coleenp@4037 | 195 | _v._metadata = metadata; |
coleenp@4037 | 196 | } |
twisti@2350 | 197 | |
twisti@2350 | 198 | bool operator==(const Constant& other); |
twisti@2350 | 199 | |
twisti@2350 | 200 | BasicType type() const { return _type; } |
twisti@2350 | 201 | |
coleenp@4037 | 202 | jlong get_jlong() const { return _v._value.j; } |
coleenp@4037 | 203 | jfloat get_jfloat() const { return _v._value.f; } |
coleenp@4037 | 204 | jdouble get_jdouble() const { return _v._value.d; } |
coleenp@4037 | 205 | jobject get_jobject() const { return _v._value.l; } |
coleenp@4037 | 206 | |
coleenp@4037 | 207 | Metadata* get_metadata() const { return _v._metadata; } |
twisti@2350 | 208 | |
twisti@2350 | 209 | int offset() const { return _offset; } |
twisti@2350 | 210 | void set_offset(int offset) { _offset = offset; } |
twisti@2350 | 211 | |
twisti@3310 | 212 | float freq() const { return _freq; } |
twisti@3310 | 213 | void inc_freq(float freq) { _freq += freq; } |
twisti@3310 | 214 | |
twisti@2350 | 215 | bool can_be_reused() const { return _can_be_reused; } |
twisti@2350 | 216 | }; |
twisti@2350 | 217 | |
twisti@2350 | 218 | // Constant table. |
twisti@2350 | 219 | class ConstantTable { |
twisti@2350 | 220 | private: |
twisti@2350 | 221 | GrowableArray<Constant> _constants; // Constants of this table. |
twisti@2350 | 222 | int _size; // Size in bytes the emitted constant table takes (including padding). |
twisti@2350 | 223 | int _table_base_offset; // Offset of the table base that gets added to the constant offsets. |
twisti@3310 | 224 | int _nof_jump_tables; // Number of jump-tables in this constant table. |
twisti@3310 | 225 | |
twisti@3310 | 226 | static int qsort_comparator(Constant* a, Constant* b); |
twisti@3310 | 227 | |
twisti@3310 | 228 | // We use negative frequencies to keep the order of the |
twisti@3310 | 229 | // jump-tables in which they were added. Otherwise we get into |
twisti@3310 | 230 | // trouble with relocation. |
twisti@3310 | 231 | float next_jump_table_freq() { return -1.0f * (++_nof_jump_tables); } |
twisti@2350 | 232 | |
twisti@2350 | 233 | public: |
twisti@2350 | 234 | ConstantTable() : |
twisti@2350 | 235 | _size(-1), |
twisti@3310 | 236 | _table_base_offset(-1), // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit). |
twisti@3310 | 237 | _nof_jump_tables(0) |
twisti@2350 | 238 | {} |
twisti@2350 | 239 | |
twisti@3310 | 240 | int size() const { assert(_size != -1, "not calculated yet"); return _size; } |
twisti@2350 | 241 | |
twisti@3310 | 242 | int calculate_table_base_offset() const; // AD specific |
twisti@3310 | 243 | void set_table_base_offset(int x) { assert(_table_base_offset == -1 || x == _table_base_offset, "can't change"); _table_base_offset = x; } |
twisti@3310 | 244 | int table_base_offset() const { assert(_table_base_offset != -1, "not set yet"); return _table_base_offset; } |
twisti@2350 | 245 | |
twisti@2350 | 246 | void emit(CodeBuffer& cb); |
twisti@2350 | 247 | |
twisti@2350 | 248 | // Returns the offset of the last entry (the top) of the constant table. |
twisti@3310 | 249 | int top_offset() const { assert(_constants.top().offset() != -1, "not bound yet"); return _constants.top().offset(); } |
twisti@2350 | 250 | |
twisti@2350 | 251 | void calculate_offsets_and_size(); |
twisti@2350 | 252 | int find_offset(Constant& con) const; |
twisti@2350 | 253 | |
twisti@2350 | 254 | void add(Constant& con); |
twisti@3310 | 255 | Constant add(MachConstantNode* n, BasicType type, jvalue value); |
coleenp@4037 | 256 | Constant add(Metadata* metadata); |
twisti@3310 | 257 | Constant add(MachConstantNode* n, MachOper* oper); |
twisti@3310 | 258 | Constant add(MachConstantNode* n, jfloat f) { |
twisti@2350 | 259 | jvalue value; value.f = f; |
twisti@3310 | 260 | return add(n, T_FLOAT, value); |
twisti@2350 | 261 | } |
twisti@3310 | 262 | Constant add(MachConstantNode* n, jdouble d) { |
twisti@2350 | 263 | jvalue value; value.d = d; |
twisti@3310 | 264 | return add(n, T_DOUBLE, value); |
twisti@2350 | 265 | } |
twisti@2350 | 266 | |
twisti@3310 | 267 | // Jump-table |
twisti@3310 | 268 | Constant add_jump_table(MachConstantNode* n); |
twisti@3310 | 269 | void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const; |
twisti@2350 | 270 | }; |
twisti@2350 | 271 | |
duke@435 | 272 | private: |
duke@435 | 273 | // Fixed parameters to this compilation. |
duke@435 | 274 | const int _compile_id; |
duke@435 | 275 | const bool _save_argument_registers; // save/restore arg regs for trampolines |
duke@435 | 276 | const bool _subsume_loads; // Load can be matched as part of a larger op. |
kvn@473 | 277 | const bool _do_escape_analysis; // Do escape analysis. |
kvn@5110 | 278 | const bool _eliminate_boxing; // Do boxing elimination. |
duke@435 | 279 | ciMethod* _method; // The method being compiled. |
duke@435 | 280 | int _entry_bci; // entry bci for osr methods. |
duke@435 | 281 | const TypeFunc* _tf; // My kind of signature |
duke@435 | 282 | InlineTree* _ilt; // Ditto (temporary). |
duke@435 | 283 | address _stub_function; // VM entry for stub being compiled, or NULL |
duke@435 | 284 | const char* _stub_name; // Name of stub or adapter being compiled, or NULL |
duke@435 | 285 | address _stub_entry_point; // Compile code entry for generated stub, or NULL |
duke@435 | 286 | |
duke@435 | 287 | // Control of this compilation. |
duke@435 | 288 | int _num_loop_opts; // Number of iterations for doing loop optimiztions |
duke@435 | 289 | int _max_inline_size; // Max inline size for this compilation |
duke@435 | 290 | int _freq_inline_size; // Max hot method inline size for this compilation |
duke@435 | 291 | int _fixed_slots; // count of frame slots not allocated by the register |
duke@435 | 292 | // allocator i.e. locks, original deopt pc, etc. |
duke@435 | 293 | // For deopt |
duke@435 | 294 | int _orig_pc_slot; |
duke@435 | 295 | int _orig_pc_slot_offset_in_bytes; |
duke@435 | 296 | |
duke@435 | 297 | int _major_progress; // Count of something big happening |
roland@4409 | 298 | bool _inlining_progress; // progress doing incremental inlining? |
roland@4409 | 299 | bool _inlining_incrementally;// Are we doing incremental inlining (post parse) |
duke@435 | 300 | bool _has_loops; // True if the method _may_ have some loops |
duke@435 | 301 | bool _has_split_ifs; // True if the method _may_ have some split-if |
duke@435 | 302 | bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores. |
never@1515 | 303 | bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated |
kvn@5110 | 304 | bool _has_boxed_value; // True if a boxed object is allocated |
kvn@4103 | 305 | int _max_vector_size; // Maximum size of generated vectors |
duke@435 | 306 | uint _trap_hist[trapHistLength]; // Cumulative traps |
duke@435 | 307 | bool _trap_can_recompile; // Have we emitted a recompiling trap? |
duke@435 | 308 | uint _decompile_count; // Cumulative decompilation counts. |
duke@435 | 309 | bool _do_inlining; // True if we intend to do inlining |
duke@435 | 310 | bool _do_scheduling; // True if we intend to do scheduling |
rasbold@853 | 311 | bool _do_freq_based_layout; // True if we intend to do frequency based block layout |
duke@435 | 312 | bool _do_count_invocations; // True if we generate code to count invocations |
coleenp@4037 | 313 | bool _do_method_data_update; // True if we generate code to update MethodData*s |
duke@435 | 314 | int _AliasLevel; // Locally-adjusted version of AliasLevel flag. |
duke@435 | 315 | bool _print_assembly; // True if we should dump assembly code for this compilation |
kvn@5763 | 316 | bool _print_inlining; // True if we should print inlining for this compilation |
kvn@5763 | 317 | bool _print_intrinsics; // True if we should print intrinsics for this compilation |
duke@435 | 318 | #ifndef PRODUCT |
duke@435 | 319 | bool _trace_opto_output; |
never@802 | 320 | bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing |
duke@435 | 321 | #endif |
duke@435 | 322 | |
twisti@1700 | 323 | // JSR 292 |
twisti@1700 | 324 | bool _has_method_handle_invokes; // True if this method has MethodHandle invokes. |
twisti@1700 | 325 | |
duke@435 | 326 | // Compilation environment. |
duke@435 | 327 | Arena _comp_arena; // Arena with lifetime equivalent to Compile |
duke@435 | 328 | ciEnv* _env; // CI interface |
duke@435 | 329 | CompileLog* _log; // from CompilerThread |
duke@435 | 330 | const char* _failure_reason; // for record_failure/failing pattern |
duke@435 | 331 | GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics. |
duke@435 | 332 | GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching. |
cfang@1607 | 333 | GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates. |
roland@4589 | 334 | GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common |
duke@435 | 335 | ConnectionGraph* _congraph; |
duke@435 | 336 | #ifndef PRODUCT |
duke@435 | 337 | IdealGraphPrinter* _printer; |
duke@435 | 338 | #endif |
duke@435 | 339 | |
sla@5237 | 340 | |
duke@435 | 341 | // Node management |
duke@435 | 342 | uint _unique; // Counter for unique Node indices |
bharadwaj@4315 | 343 | VectorSet _dead_node_list; // Set of dead nodes |
bharadwaj@4315 | 344 | uint _dead_node_count; // Number of dead nodes; VectorSet::Size() is O(N). |
bharadwaj@4315 | 345 | // So use this to keep count and make the call O(1). |
duke@435 | 346 | debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx> |
duke@435 | 347 | Arena _node_arena; // Arena for new-space Nodes |
duke@435 | 348 | Arena _old_arena; // Arena for old-space Nodes, lifetime during xform |
duke@435 | 349 | RootNode* _root; // Unique root of compilation, or NULL after bail-out. |
duke@435 | 350 | Node* _top; // Unique top node. (Reset by various phases.) |
duke@435 | 351 | |
duke@435 | 352 | Node* _immutable_memory; // Initial memory state |
duke@435 | 353 | |
duke@435 | 354 | Node* _recent_alloc_obj; |
duke@435 | 355 | Node* _recent_alloc_ctl; |
duke@435 | 356 | |
twisti@2350 | 357 | // Constant table |
twisti@2350 | 358 | ConstantTable _constant_table; // The constant table for this compile. |
twisti@2350 | 359 | MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. |
twisti@2350 | 360 | |
twisti@2350 | 361 | |
duke@435 | 362 | // Blocked array of debugging and profiling information, |
duke@435 | 363 | // tracked per node. |
duke@435 | 364 | enum { _log2_node_notes_block_size = 8, |
duke@435 | 365 | _node_notes_block_size = (1<<_log2_node_notes_block_size) |
duke@435 | 366 | }; |
duke@435 | 367 | GrowableArray<Node_Notes*>* _node_note_array; |
duke@435 | 368 | Node_Notes* _default_node_notes; // default notes for new nodes |
duke@435 | 369 | |
duke@435 | 370 | // After parsing and every bulk phase we hang onto the Root instruction. |
duke@435 | 371 | // The RootNode instruction is where the whole program begins. It produces |
duke@435 | 372 | // the initial Control and BOTTOM for everybody else. |
duke@435 | 373 | |
duke@435 | 374 | // Type management |
duke@435 | 375 | Arena _Compile_types; // Arena for all types |
duke@435 | 376 | Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared() |
duke@435 | 377 | Dict* _type_dict; // Intern table |
duke@435 | 378 | void* _type_hwm; // Last allocation (see Type::operator new/delete) |
duke@435 | 379 | size_t _type_last_size; // Last allocation size (see Type::operator new/delete) |
duke@435 | 380 | ciMethod* _last_tf_m; // Cache for |
duke@435 | 381 | const TypeFunc* _last_tf; // TypeFunc::make |
duke@435 | 382 | AliasType** _alias_types; // List of alias types seen so far. |
duke@435 | 383 | int _num_alias_types; // Logical length of _alias_types |
duke@435 | 384 | int _max_alias_types; // Physical length of _alias_types |
duke@435 | 385 | AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking |
duke@435 | 386 | |
duke@435 | 387 | // Parsing, optimization |
duke@435 | 388 | PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN |
duke@435 | 389 | Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN |
duke@435 | 390 | WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining. |
duke@435 | 391 | |
roland@4409 | 392 | GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after |
roland@4409 | 393 | // main parsing has finished. |
roland@4409 | 394 | GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations |
roland@4409 | 395 | |
kvn@5110 | 396 | GrowableArray<CallGenerator*> _boxing_late_inlines; // same but for boxing operations |
kvn@5110 | 397 | |
roland@4409 | 398 | int _late_inlines_pos; // Where in the queue should the next late inlining candidate go (emulate depth first inlining) |
roland@4409 | 399 | uint _number_of_mh_late_inlines; // number of method handle late inlining still pending |
roland@4409 | 400 | |
never@1515 | 401 | |
roland@4357 | 402 | // Inlining may not happen in parse order which would make |
roland@4357 | 403 | // PrintInlining output confusing. Keep track of PrintInlining |
roland@4357 | 404 | // pieces in order. |
roland@4357 | 405 | class PrintInliningBuffer : public ResourceObj { |
roland@4357 | 406 | private: |
roland@4357 | 407 | CallGenerator* _cg; |
roland@4357 | 408 | stringStream* _ss; |
roland@4357 | 409 | |
roland@4357 | 410 | public: |
roland@4357 | 411 | PrintInliningBuffer() |
roland@4357 | 412 | : _cg(NULL) { _ss = new stringStream(); } |
roland@4357 | 413 | |
roland@4357 | 414 | stringStream* ss() const { return _ss; } |
roland@4357 | 415 | CallGenerator* cg() const { return _cg; } |
roland@4357 | 416 | void set_cg(CallGenerator* cg) { _cg = cg; } |
roland@4357 | 417 | }; |
roland@4357 | 418 | |
roland@4357 | 419 | GrowableArray<PrintInliningBuffer>* _print_inlining_list; |
kvn@5763 | 420 | int _print_inlining_idx; |
roland@4357 | 421 | |
roland@4589 | 422 | // Only keep nodes in the expensive node list that need to be optimized |
roland@4589 | 423 | void cleanup_expensive_nodes(PhaseIterGVN &igvn); |
roland@4589 | 424 | // Use for sorting expensive nodes to bring similar nodes together |
roland@4589 | 425 | static int cmp_expensive_nodes(Node** n1, Node** n2); |
roland@4589 | 426 | // Expensive nodes list already sorted? |
roland@4589 | 427 | bool expensive_nodes_sorted() const; |
roland@5991 | 428 | // Remove the speculative part of types and clean up the graph |
roland@5991 | 429 | void remove_speculative_types(PhaseIterGVN &igvn); |
roland@4589 | 430 | |
roland@5981 | 431 | // Are we within a PreserveJVMState block? |
roland@5981 | 432 | int _preserve_jvm_state; |
roland@5981 | 433 | |
roland@4357 | 434 | public: |
roland@4357 | 435 | |
roland@4357 | 436 | outputStream* print_inlining_stream() const { |
kvn@5763 | 437 | return _print_inlining_list->adr_at(_print_inlining_idx)->ss(); |
roland@4357 | 438 | } |
roland@4357 | 439 | |
roland@4357 | 440 | void print_inlining_skip(CallGenerator* cg) { |
kvn@5763 | 441 | if (_print_inlining) { |
kvn@5763 | 442 | _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg); |
kvn@5763 | 443 | _print_inlining_idx++; |
kvn@5763 | 444 | _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer()); |
roland@4357 | 445 | } |
roland@4357 | 446 | } |
roland@4357 | 447 | |
roland@4357 | 448 | void print_inlining_insert(CallGenerator* cg) { |
kvn@5763 | 449 | if (_print_inlining) { |
roland@4357 | 450 | for (int i = 0; i < _print_inlining_list->length(); i++) { |
kvn@5763 | 451 | if (_print_inlining_list->adr_at(i)->cg() == cg) { |
roland@4357 | 452 | _print_inlining_list->insert_before(i+1, PrintInliningBuffer()); |
kvn@5763 | 453 | _print_inlining_idx = i+1; |
kvn@5763 | 454 | _print_inlining_list->adr_at(i)->set_cg(NULL); |
roland@4357 | 455 | return; |
roland@4357 | 456 | } |
roland@4357 | 457 | } |
roland@4357 | 458 | ShouldNotReachHere(); |
roland@4357 | 459 | } |
roland@4357 | 460 | } |
roland@4357 | 461 | |
roland@4357 | 462 | void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) { |
roland@4357 | 463 | stringStream ss; |
roland@4357 | 464 | CompileTask::print_inlining(&ss, method, inline_level, bci, msg); |
roland@4357 | 465 | print_inlining_stream()->print(ss.as_string()); |
roland@4357 | 466 | } |
roland@4357 | 467 | |
roland@4357 | 468 | private: |
duke@435 | 469 | // Matching, CFG layout, allocation, code generation |
duke@435 | 470 | PhaseCFG* _cfg; // Results of CFG finding |
duke@435 | 471 | bool _select_24_bit_instr; // We selected an instruction with a 24-bit result |
duke@435 | 472 | bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results |
kvn@1294 | 473 | int _java_calls; // Number of java calls in the method |
kvn@1294 | 474 | int _inner_loops; // Number of inner loops in the method |
duke@435 | 475 | Matcher* _matcher; // Engine to map ideal to machine instructions |
duke@435 | 476 | PhaseRegAlloc* _regalloc; // Results of register allocation. |
duke@435 | 477 | int _frame_slots; // Size of total frame in stack slots |
duke@435 | 478 | CodeOffsets _code_offsets; // Offsets into the code for various interesting entries |
duke@435 | 479 | RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout) |
duke@435 | 480 | Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin |
duke@435 | 481 | void* _indexSet_free_block_list; // free list of IndexSet bit blocks |
duke@435 | 482 | |
duke@435 | 483 | uint _node_bundling_limit; |
duke@435 | 484 | Bundle* _node_bundling_base; // Information for instruction bundling |
duke@435 | 485 | |
duke@435 | 486 | // Instruction bits passed off to the VM |
duke@435 | 487 | int _method_size; // Size of nmethod code segment in bytes |
duke@435 | 488 | CodeBuffer _code_buffer; // Where the code is assembled |
duke@435 | 489 | int _first_block_size; // Size of unvalidated entry point code / OSR poison code |
duke@435 | 490 | ExceptionHandlerTable _handler_table; // Table of native-code exception handlers |
duke@435 | 491 | ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code |
duke@435 | 492 | OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location) |
duke@435 | 493 | static int _CompiledZap_count; // counter compared against CompileZap[First/Last] |
duke@435 | 494 | BufferBlob* _scratch_buffer_blob; // For temporary code buffers. |
duke@435 | 495 | relocInfo* _scratch_locs_memory; // For temporary code buffers. |
twisti@2350 | 496 | int _scratch_const_size; // For temporary code buffers. |
twisti@2350 | 497 | bool _in_scratch_emit_size; // true when in scratch_emit_size. |
duke@435 | 498 | |
duke@435 | 499 | public: |
duke@435 | 500 | // Accessors |
duke@435 | 501 | |
duke@435 | 502 | // The Compile instance currently active in this (compiler) thread. |
duke@435 | 503 | static Compile* current() { |
duke@435 | 504 | return (Compile*) ciEnv::current()->compiler_data(); |
duke@435 | 505 | } |
duke@435 | 506 | |
duke@435 | 507 | // ID for this compilation. Useful for setting breakpoints in the debugger. |
duke@435 | 508 | int compile_id() const { return _compile_id; } |
duke@435 | 509 | |
duke@435 | 510 | // Does this compilation allow instructions to subsume loads? User |
duke@435 | 511 | // instructions that subsume a load may result in an unschedulable |
duke@435 | 512 | // instruction sequence. |
duke@435 | 513 | bool subsume_loads() const { return _subsume_loads; } |
kvn@5110 | 514 | /** Do escape analysis. */ |
kvn@473 | 515 | bool do_escape_analysis() const { return _do_escape_analysis; } |
kvn@5110 | 516 | /** Do boxing elimination. */ |
kvn@5110 | 517 | bool eliminate_boxing() const { return _eliminate_boxing; } |
kvn@5110 | 518 | /** Do aggressive boxing elimination. */ |
kvn@5110 | 519 | bool aggressive_unboxing() const { return _eliminate_boxing && AggressiveUnboxing; } |
duke@435 | 520 | bool save_argument_registers() const { return _save_argument_registers; } |
duke@435 | 521 | |
duke@435 | 522 | |
duke@435 | 523 | // Other fixed compilation parameters. |
duke@435 | 524 | ciMethod* method() const { return _method; } |
duke@435 | 525 | int entry_bci() const { return _entry_bci; } |
duke@435 | 526 | bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; } |
duke@435 | 527 | bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); } |
duke@435 | 528 | const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; } |
duke@435 | 529 | void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; } |
duke@435 | 530 | InlineTree* ilt() const { return _ilt; } |
duke@435 | 531 | address stub_function() const { return _stub_function; } |
duke@435 | 532 | const char* stub_name() const { return _stub_name; } |
duke@435 | 533 | address stub_entry_point() const { return _stub_entry_point; } |
duke@435 | 534 | |
duke@435 | 535 | // Control of this compilation. |
duke@435 | 536 | int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; } |
duke@435 | 537 | void set_fixed_slots(int n) { _fixed_slots = n; } |
duke@435 | 538 | int major_progress() const { return _major_progress; } |
roland@4409 | 539 | void set_inlining_progress(bool z) { _inlining_progress = z; } |
roland@4409 | 540 | int inlining_progress() const { return _inlining_progress; } |
roland@4409 | 541 | void set_inlining_incrementally(bool z) { _inlining_incrementally = z; } |
roland@4409 | 542 | int inlining_incrementally() const { return _inlining_incrementally; } |
duke@435 | 543 | void set_major_progress() { _major_progress++; } |
duke@435 | 544 | void clear_major_progress() { _major_progress = 0; } |
duke@435 | 545 | int num_loop_opts() const { return _num_loop_opts; } |
duke@435 | 546 | void set_num_loop_opts(int n) { _num_loop_opts = n; } |
duke@435 | 547 | int max_inline_size() const { return _max_inline_size; } |
duke@435 | 548 | void set_freq_inline_size(int n) { _freq_inline_size = n; } |
duke@435 | 549 | int freq_inline_size() const { return _freq_inline_size; } |
duke@435 | 550 | void set_max_inline_size(int n) { _max_inline_size = n; } |
duke@435 | 551 | bool has_loops() const { return _has_loops; } |
duke@435 | 552 | void set_has_loops(bool z) { _has_loops = z; } |
duke@435 | 553 | bool has_split_ifs() const { return _has_split_ifs; } |
duke@435 | 554 | void set_has_split_ifs(bool z) { _has_split_ifs = z; } |
duke@435 | 555 | bool has_unsafe_access() const { return _has_unsafe_access; } |
duke@435 | 556 | void set_has_unsafe_access(bool z) { _has_unsafe_access = z; } |
never@1515 | 557 | bool has_stringbuilder() const { return _has_stringbuilder; } |
never@1515 | 558 | void set_has_stringbuilder(bool z) { _has_stringbuilder = z; } |
kvn@5110 | 559 | bool has_boxed_value() const { return _has_boxed_value; } |
kvn@5110 | 560 | void set_has_boxed_value(bool z) { _has_boxed_value = z; } |
kvn@4103 | 561 | int max_vector_size() const { return _max_vector_size; } |
kvn@4103 | 562 | void set_max_vector_size(int s) { _max_vector_size = s; } |
duke@435 | 563 | void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; } |
duke@435 | 564 | uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; } |
duke@435 | 565 | bool trap_can_recompile() const { return _trap_can_recompile; } |
duke@435 | 566 | void set_trap_can_recompile(bool z) { _trap_can_recompile = z; } |
duke@435 | 567 | uint decompile_count() const { return _decompile_count; } |
duke@435 | 568 | void set_decompile_count(uint c) { _decompile_count = c; } |
duke@435 | 569 | bool allow_range_check_smearing() const; |
duke@435 | 570 | bool do_inlining() const { return _do_inlining; } |
duke@435 | 571 | void set_do_inlining(bool z) { _do_inlining = z; } |
duke@435 | 572 | bool do_scheduling() const { return _do_scheduling; } |
duke@435 | 573 | void set_do_scheduling(bool z) { _do_scheduling = z; } |
rasbold@853 | 574 | bool do_freq_based_layout() const{ return _do_freq_based_layout; } |
rasbold@853 | 575 | void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; } |
duke@435 | 576 | bool do_count_invocations() const{ return _do_count_invocations; } |
duke@435 | 577 | void set_do_count_invocations(bool z){ _do_count_invocations = z; } |
duke@435 | 578 | bool do_method_data_update() const { return _do_method_data_update; } |
duke@435 | 579 | void set_do_method_data_update(bool z) { _do_method_data_update = z; } |
duke@435 | 580 | int AliasLevel() const { return _AliasLevel; } |
duke@435 | 581 | bool print_assembly() const { return _print_assembly; } |
duke@435 | 582 | void set_print_assembly(bool z) { _print_assembly = z; } |
kvn@5763 | 583 | bool print_inlining() const { return _print_inlining; } |
kvn@5763 | 584 | void set_print_inlining(bool z) { _print_inlining = z; } |
kvn@5763 | 585 | bool print_intrinsics() const { return _print_intrinsics; } |
kvn@5763 | 586 | void set_print_intrinsics(bool z) { _print_intrinsics = z; } |
duke@435 | 587 | // check the CompilerOracle for special behaviours for this compile |
duke@435 | 588 | bool method_has_option(const char * option) { |
duke@435 | 589 | return method() != NULL && method()->has_option(option); |
duke@435 | 590 | } |
duke@435 | 591 | #ifndef PRODUCT |
duke@435 | 592 | bool trace_opto_output() const { return _trace_opto_output; } |
never@802 | 593 | bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; } |
never@802 | 594 | void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; } |
goetz@6488 | 595 | int _in_dump_cnt; // Required for dumping ir nodes. |
duke@435 | 596 | #endif |
duke@435 | 597 | |
twisti@1700 | 598 | // JSR 292 |
twisti@1700 | 599 | bool has_method_handle_invokes() const { return _has_method_handle_invokes; } |
twisti@1700 | 600 | void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; } |
twisti@1700 | 601 | |
mgronlun@6131 | 602 | Ticks _latest_stage_start_counter; |
sla@5237 | 603 | |
duke@435 | 604 | void begin_method() { |
duke@435 | 605 | #ifndef PRODUCT |
duke@435 | 606 | if (_printer) _printer->begin_method(this); |
duke@435 | 607 | #endif |
mgronlun@6131 | 608 | C->_latest_stage_start_counter.stamp(); |
duke@435 | 609 | } |
sla@5237 | 610 | |
sla@5237 | 611 | void print_method(CompilerPhaseType cpt, int level = 1) { |
mgronlun@6131 | 612 | EventCompilerPhase event; |
sla@5237 | 613 | if (event.should_commit()) { |
sla@5237 | 614 | event.set_starttime(C->_latest_stage_start_counter); |
sla@5237 | 615 | event.set_phase((u1) cpt); |
sla@5237 | 616 | event.set_compileID(C->_compile_id); |
sla@5237 | 617 | event.set_phaseLevel(level); |
sla@5237 | 618 | event.commit(); |
sla@5237 | 619 | } |
sla@5237 | 620 | |
sla@5237 | 621 | |
duke@435 | 622 | #ifndef PRODUCT |
sla@5237 | 623 | if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level); |
duke@435 | 624 | #endif |
mgronlun@6131 | 625 | C->_latest_stage_start_counter.stamp(); |
duke@435 | 626 | } |
sla@5237 | 627 | |
sla@5237 | 628 | void end_method(int level = 1) { |
mgronlun@6131 | 629 | EventCompilerPhase event; |
sla@5237 | 630 | if (event.should_commit()) { |
sla@5237 | 631 | event.set_starttime(C->_latest_stage_start_counter); |
sla@5237 | 632 | event.set_phase((u1) PHASE_END); |
sla@5237 | 633 | event.set_compileID(C->_compile_id); |
sla@5237 | 634 | event.set_phaseLevel(level); |
sla@5237 | 635 | event.commit(); |
sla@5237 | 636 | } |
duke@435 | 637 | #ifndef PRODUCT |
duke@435 | 638 | if (_printer) _printer->end_method(); |
duke@435 | 639 | #endif |
duke@435 | 640 | } |
duke@435 | 641 | |
kvn@5110 | 642 | int macro_count() const { return _macro_nodes->length(); } |
kvn@5110 | 643 | int predicate_count() const { return _predicate_opaqs->length();} |
kvn@5110 | 644 | int expensive_count() const { return _expensive_nodes->length(); } |
kvn@5110 | 645 | Node* macro_node(int idx) const { return _macro_nodes->at(idx); } |
kvn@5110 | 646 | Node* predicate_opaque1_node(int idx) const { return _predicate_opaqs->at(idx);} |
kvn@5110 | 647 | Node* expensive_node(int idx) const { return _expensive_nodes->at(idx); } |
duke@435 | 648 | ConnectionGraph* congraph() { return _congraph;} |
kvn@1989 | 649 | void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;} |
duke@435 | 650 | void add_macro_node(Node * n) { |
duke@435 | 651 | //assert(n->is_macro(), "must be a macro node"); |
duke@435 | 652 | assert(!_macro_nodes->contains(n), " duplicate entry in expand list"); |
duke@435 | 653 | _macro_nodes->append(n); |
duke@435 | 654 | } |
duke@435 | 655 | void remove_macro_node(Node * n) { |
duke@435 | 656 | // this function may be called twice for a node so check |
duke@435 | 657 | // that the node is in the array before attempting to remove it |
duke@435 | 658 | if (_macro_nodes->contains(n)) |
duke@435 | 659 | _macro_nodes->remove(n); |
cfang@1607 | 660 | // remove from _predicate_opaqs list also if it is there |
cfang@1607 | 661 | if (predicate_count() > 0 && _predicate_opaqs->contains(n)){ |
cfang@1607 | 662 | _predicate_opaqs->remove(n); |
cfang@1607 | 663 | } |
duke@435 | 664 | } |
roland@4589 | 665 | void add_expensive_node(Node * n); |
roland@4589 | 666 | void remove_expensive_node(Node * n) { |
roland@4589 | 667 | if (_expensive_nodes->contains(n)) { |
roland@4589 | 668 | _expensive_nodes->remove(n); |
roland@4589 | 669 | } |
roland@4589 | 670 | } |
cfang@1607 | 671 | void add_predicate_opaq(Node * n) { |
cfang@1607 | 672 | assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1"); |
cfang@1607 | 673 | assert(_macro_nodes->contains(n), "should have already been in macro list"); |
cfang@1607 | 674 | _predicate_opaqs->append(n); |
cfang@1607 | 675 | } |
cfang@1607 | 676 | // remove the opaque nodes that protect the predicates so that the unused checks and |
cfang@1607 | 677 | // uncommon traps will be eliminated from the graph. |
cfang@1607 | 678 | void cleanup_loop_predicates(PhaseIterGVN &igvn); |
kvn@2727 | 679 | bool is_predicate_opaq(Node * n) { |
kvn@2727 | 680 | return _predicate_opaqs->contains(n); |
kvn@2727 | 681 | } |
duke@435 | 682 | |
roland@4589 | 683 | // Are there candidate expensive nodes for optimization? |
roland@4589 | 684 | bool should_optimize_expensive_nodes(PhaseIterGVN &igvn); |
roland@4589 | 685 | // Check whether n1 and n2 are similar |
roland@4589 | 686 | static int cmp_expensive_nodes(Node* n1, Node* n2); |
roland@4589 | 687 | // Sort expensive nodes to locate similar expensive nodes |
roland@4589 | 688 | void sort_expensive_nodes(); |
roland@4589 | 689 | |
duke@435 | 690 | // Compilation environment. |
duke@435 | 691 | Arena* comp_arena() { return &_comp_arena; } |
duke@435 | 692 | ciEnv* env() const { return _env; } |
duke@435 | 693 | CompileLog* log() const { return _log; } |
duke@435 | 694 | bool failing() const { return _env->failing() || _failure_reason != NULL; } |
bharadwaj@4315 | 695 | const char* failure_reason() { return _failure_reason; } |
duke@435 | 696 | bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); } |
duke@435 | 697 | |
duke@435 | 698 | void record_failure(const char* reason); |
duke@435 | 699 | void record_method_not_compilable(const char* reason, bool all_tiers = false) { |
duke@435 | 700 | // All bailouts cover "all_tiers" when TieredCompilation is off. |
duke@435 | 701 | if (!TieredCompilation) all_tiers = true; |
duke@435 | 702 | env()->record_method_not_compilable(reason, all_tiers); |
duke@435 | 703 | // Record failure reason. |
duke@435 | 704 | record_failure(reason); |
duke@435 | 705 | } |
duke@435 | 706 | void record_method_not_compilable_all_tiers(const char* reason) { |
duke@435 | 707 | record_method_not_compilable(reason, true); |
duke@435 | 708 | } |
duke@435 | 709 | bool check_node_count(uint margin, const char* reason) { |
bharadwaj@4315 | 710 | if (live_nodes() + margin > (uint)MaxNodeLimit) { |
duke@435 | 711 | record_method_not_compilable(reason); |
duke@435 | 712 | return true; |
duke@435 | 713 | } else { |
duke@435 | 714 | return false; |
duke@435 | 715 | } |
duke@435 | 716 | } |
duke@435 | 717 | |
duke@435 | 718 | // Node management |
bharadwaj@4315 | 719 | uint unique() const { return _unique; } |
bharadwaj@4315 | 720 | uint next_unique() { return _unique++; } |
bharadwaj@4315 | 721 | void set_unique(uint i) { _unique = i; } |
bharadwaj@4315 | 722 | static int debug_idx() { return debug_only(_debug_idx)+0; } |
bharadwaj@4315 | 723 | static void set_debug_idx(int i) { debug_only(_debug_idx = i); } |
bharadwaj@4315 | 724 | Arena* node_arena() { return &_node_arena; } |
bharadwaj@4315 | 725 | Arena* old_arena() { return &_old_arena; } |
bharadwaj@4315 | 726 | RootNode* root() const { return _root; } |
bharadwaj@4315 | 727 | void set_root(RootNode* r) { _root = r; } |
bharadwaj@4315 | 728 | StartNode* start() const; // (Derived from root.) |
duke@435 | 729 | void init_start(StartNode* s); |
bharadwaj@4315 | 730 | Node* immutable_memory(); |
duke@435 | 731 | |
bharadwaj@4315 | 732 | Node* recent_alloc_ctl() const { return _recent_alloc_ctl; } |
bharadwaj@4315 | 733 | Node* recent_alloc_obj() const { return _recent_alloc_obj; } |
bharadwaj@4315 | 734 | void set_recent_alloc(Node* ctl, Node* obj) { |
duke@435 | 735 | _recent_alloc_ctl = ctl; |
duke@435 | 736 | _recent_alloc_obj = obj; |
bharadwaj@4315 | 737 | } |
bharadwaj@4315 | 738 | void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; |
bharadwaj@4315 | 739 | _dead_node_count++; |
bharadwaj@4315 | 740 | } |
kvn@4695 | 741 | bool is_dead_node(uint idx) { return _dead_node_list.test(idx) != 0; } |
bharadwaj@4315 | 742 | uint dead_node_count() { return _dead_node_count; } |
bharadwaj@4315 | 743 | void reset_dead_node_list() { _dead_node_list.Reset(); |
bharadwaj@4315 | 744 | _dead_node_count = 0; |
bharadwaj@4315 | 745 | } |
roland@4357 | 746 | uint live_nodes() const { |
bharadwaj@4315 | 747 | int val = _unique - _dead_node_count; |
bharadwaj@4315 | 748 | assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count)); |
bharadwaj@4315 | 749 | return (uint) val; |
bharadwaj@4315 | 750 | } |
bharadwaj@4315 | 751 | #ifdef ASSERT |
bharadwaj@4315 | 752 | uint count_live_nodes_by_graph_walk(); |
bharadwaj@4315 | 753 | void print_missing_nodes(); |
bharadwaj@4315 | 754 | #endif |
duke@435 | 755 | |
twisti@2350 | 756 | // Constant table |
twisti@2350 | 757 | ConstantTable& constant_table() { return _constant_table; } |
twisti@2350 | 758 | |
twisti@2350 | 759 | MachConstantBaseNode* mach_constant_base_node(); |
twisti@2350 | 760 | bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } |
goetz@6499 | 761 | // Generated by adlc, true if CallNode requires MachConstantBase. |
goetz@6499 | 762 | bool needs_clone_jvms(); |
twisti@2350 | 763 | |
duke@435 | 764 | // Handy undefined Node |
duke@435 | 765 | Node* top() const { return _top; } |
duke@435 | 766 | |
duke@435 | 767 | // these are used by guys who need to know about creation and transformation of top: |
duke@435 | 768 | Node* cached_top_node() { return _top; } |
duke@435 | 769 | void set_cached_top_node(Node* tn); |
duke@435 | 770 | |
duke@435 | 771 | GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; } |
duke@435 | 772 | void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; } |
duke@435 | 773 | Node_Notes* default_node_notes() const { return _default_node_notes; } |
duke@435 | 774 | void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; } |
duke@435 | 775 | |
duke@435 | 776 | Node_Notes* node_notes_at(int idx) { |
duke@435 | 777 | return locate_node_notes(_node_note_array, idx, false); |
duke@435 | 778 | } |
duke@435 | 779 | inline bool set_node_notes_at(int idx, Node_Notes* value); |
duke@435 | 780 | |
duke@435 | 781 | // Copy notes from source to dest, if they exist. |
duke@435 | 782 | // Overwrite dest only if source provides something. |
duke@435 | 783 | // Return true if information was moved. |
duke@435 | 784 | bool copy_node_notes_to(Node* dest, Node* source); |
duke@435 | 785 | |
duke@435 | 786 | // Workhorse function to sort out the blocked Node_Notes array: |
duke@435 | 787 | inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr, |
duke@435 | 788 | int idx, bool can_grow = false); |
duke@435 | 789 | |
duke@435 | 790 | void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by); |
duke@435 | 791 | |
duke@435 | 792 | // Type management |
duke@435 | 793 | Arena* type_arena() { return _type_arena; } |
duke@435 | 794 | Dict* type_dict() { return _type_dict; } |
duke@435 | 795 | void* type_hwm() { return _type_hwm; } |
duke@435 | 796 | size_t type_last_size() { return _type_last_size; } |
duke@435 | 797 | int num_alias_types() { return _num_alias_types; } |
duke@435 | 798 | |
duke@435 | 799 | void init_type_arena() { _type_arena = &_Compile_types; } |
duke@435 | 800 | void set_type_arena(Arena* a) { _type_arena = a; } |
duke@435 | 801 | void set_type_dict(Dict* d) { _type_dict = d; } |
duke@435 | 802 | void set_type_hwm(void* p) { _type_hwm = p; } |
duke@435 | 803 | void set_type_last_size(size_t sz) { _type_last_size = sz; } |
duke@435 | 804 | |
duke@435 | 805 | const TypeFunc* last_tf(ciMethod* m) { |
duke@435 | 806 | return (m == _last_tf_m) ? _last_tf : NULL; |
duke@435 | 807 | } |
duke@435 | 808 | void set_last_tf(ciMethod* m, const TypeFunc* tf) { |
duke@435 | 809 | assert(m != NULL || tf == NULL, ""); |
duke@435 | 810 | _last_tf_m = m; |
duke@435 | 811 | _last_tf = tf; |
duke@435 | 812 | } |
duke@435 | 813 | |
duke@435 | 814 | AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; } |
never@2658 | 815 | AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); } |
duke@435 | 816 | bool have_alias_type(const TypePtr* adr_type); |
duke@435 | 817 | AliasType* alias_type(ciField* field); |
duke@435 | 818 | |
duke@435 | 819 | int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); } |
duke@435 | 820 | const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); } |
duke@435 | 821 | int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); } |
duke@435 | 822 | |
duke@435 | 823 | // Building nodes |
duke@435 | 824 | void rethrow_exceptions(JVMState* jvms); |
duke@435 | 825 | void return_values(JVMState* jvms); |
duke@435 | 826 | JVMState* build_start_state(StartNode* start, const TypeFunc* tf); |
duke@435 | 827 | |
duke@435 | 828 | // Decide how to build a call. |
duke@435 | 829 | // The profile factor is a discount to apply to this site's interp. profile. |
roland@5981 | 830 | CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_does_dispatch, |
roland@5991 | 831 | JVMState* jvms, bool allow_inline, float profile_factor, ciKlass* speculative_receiver_type = NULL, |
roland@5991 | 832 | bool allow_intrinsics = true, bool delayed_forbidden = false); |
kvn@5110 | 833 | bool should_delay_inlining(ciMethod* call_method, JVMState* jvms) { |
kvn@5110 | 834 | return should_delay_string_inlining(call_method, jvms) || |
kvn@5110 | 835 | should_delay_boxing_inlining(call_method, jvms); |
kvn@5110 | 836 | } |
kvn@5110 | 837 | bool should_delay_string_inlining(ciMethod* call_method, JVMState* jvms); |
kvn@5110 | 838 | bool should_delay_boxing_inlining(ciMethod* call_method, JVMState* jvms); |
duke@435 | 839 | |
twisti@4414 | 840 | // Helper functions to identify inlining potential at call-site |
twisti@4414 | 841 | ciMethod* optimize_virtual_call(ciMethod* caller, int bci, ciInstanceKlass* klass, |
twisti@4414 | 842 | ciMethod* callee, const TypeOopPtr* receiver_type, |
twisti@4414 | 843 | bool is_virtual, |
twisti@4414 | 844 | bool &call_does_dispatch, int &vtable_index); |
twisti@4414 | 845 | ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, |
twisti@4414 | 846 | ciMethod* callee, const TypeOopPtr* receiver_type); |
twisti@4414 | 847 | |
duke@435 | 848 | // Report if there were too many traps at a current method and bci. |
duke@435 | 849 | // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded. |
duke@435 | 850 | // If there is no MDO at all, report no trap unless told to assume it. |
duke@435 | 851 | bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason); |
duke@435 | 852 | // This version, unspecific to a particular bci, asks if |
duke@435 | 853 | // PerMethodTrapLimit was exceeded for all inlined methods seen so far. |
duke@435 | 854 | bool too_many_traps(Deoptimization::DeoptReason reason, |
duke@435 | 855 | // Privately used parameter for logging: |
duke@435 | 856 | ciMethodData* logmd = NULL); |
duke@435 | 857 | // Report if there were too many recompiles at a method and bci. |
duke@435 | 858 | bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason); |
goetz@6490 | 859 | // Return a bitset with the reasons where deoptimization is allowed, |
goetz@6490 | 860 | // i.e., where there were not too many uncommon traps. |
goetz@6490 | 861 | int _allowed_reasons; |
goetz@6490 | 862 | int allowed_deopt_reasons() { return _allowed_reasons; } |
goetz@6490 | 863 | void set_allowed_deopt_reasons(); |
duke@435 | 864 | |
duke@435 | 865 | // Parsing, optimization |
duke@435 | 866 | PhaseGVN* initial_gvn() { return _initial_gvn; } |
duke@435 | 867 | Unique_Node_List* for_igvn() { return _for_igvn; } |
duke@435 | 868 | inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List. |
duke@435 | 869 | void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; } |
duke@435 | 870 | void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; } |
duke@435 | 871 | |
never@1515 | 872 | // Replace n by nn using initial_gvn, calling hash_delete and |
never@1515 | 873 | // record_for_igvn as needed. |
never@1515 | 874 | void gvn_replace_by(Node* n, Node* nn); |
never@1515 | 875 | |
never@1515 | 876 | |
duke@435 | 877 | void identify_useful_nodes(Unique_Node_List &useful); |
bharadwaj@4315 | 878 | void update_dead_node_list(Unique_Node_List &useful); |
roland@4357 | 879 | void remove_useless_nodes (Unique_Node_List &useful); |
duke@435 | 880 | |
duke@435 | 881 | WarmCallInfo* warm_calls() const { return _warm_calls; } |
duke@435 | 882 | void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; } |
duke@435 | 883 | WarmCallInfo* pop_warm_call(); |
duke@435 | 884 | |
never@1515 | 885 | // Record this CallGenerator for inlining at the end of parsing. |
roland@4409 | 886 | void add_late_inline(CallGenerator* cg) { |
roland@4409 | 887 | _late_inlines.insert_before(_late_inlines_pos, cg); |
roland@4409 | 888 | _late_inlines_pos++; |
roland@4409 | 889 | } |
roland@4409 | 890 | |
roland@4409 | 891 | void prepend_late_inline(CallGenerator* cg) { |
roland@4409 | 892 | _late_inlines.insert_before(0, cg); |
roland@4409 | 893 | } |
roland@4409 | 894 | |
roland@4409 | 895 | void add_string_late_inline(CallGenerator* cg) { |
roland@4409 | 896 | _string_late_inlines.push(cg); |
roland@4409 | 897 | } |
roland@4409 | 898 | |
kvn@5110 | 899 | void add_boxing_late_inline(CallGenerator* cg) { |
kvn@5110 | 900 | _boxing_late_inlines.push(cg); |
kvn@5110 | 901 | } |
kvn@5110 | 902 | |
roland@4409 | 903 | void remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful); |
never@1515 | 904 | |
roland@4357 | 905 | void dump_inlining(); |
roland@4357 | 906 | |
roland@4409 | 907 | bool over_inlining_cutoff() const { |
roland@4409 | 908 | if (!inlining_incrementally()) { |
roland@4409 | 909 | return unique() > (uint)NodeCountInliningCutoff; |
roland@4409 | 910 | } else { |
roland@4409 | 911 | return live_nodes() > (uint)LiveNodeCountInliningCutoff; |
roland@4409 | 912 | } |
roland@4409 | 913 | } |
roland@4409 | 914 | |
roland@4409 | 915 | void inc_number_of_mh_late_inlines() { _number_of_mh_late_inlines++; } |
roland@4409 | 916 | void dec_number_of_mh_late_inlines() { assert(_number_of_mh_late_inlines > 0, "_number_of_mh_late_inlines < 0 !"); _number_of_mh_late_inlines--; } |
roland@4409 | 917 | bool has_mh_late_inlines() const { return _number_of_mh_late_inlines > 0; } |
roland@4409 | 918 | |
roland@4409 | 919 | void inline_incrementally_one(PhaseIterGVN& igvn); |
roland@4409 | 920 | void inline_incrementally(PhaseIterGVN& igvn); |
roland@4409 | 921 | void inline_string_calls(bool parse_time); |
kvn@5110 | 922 | void inline_boxing_calls(PhaseIterGVN& igvn); |
roland@4409 | 923 | |
duke@435 | 924 | // Matching, CFG layout, allocation, code generation |
duke@435 | 925 | PhaseCFG* cfg() { return _cfg; } |
duke@435 | 926 | bool select_24_bit_instr() const { return _select_24_bit_instr; } |
duke@435 | 927 | bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; } |
kvn@1294 | 928 | bool has_java_calls() const { return _java_calls > 0; } |
kvn@1294 | 929 | int java_calls() const { return _java_calls; } |
kvn@1294 | 930 | int inner_loops() const { return _inner_loops; } |
duke@435 | 931 | Matcher* matcher() { return _matcher; } |
duke@435 | 932 | PhaseRegAlloc* regalloc() { return _regalloc; } |
duke@435 | 933 | int frame_slots() const { return _frame_slots; } |
duke@435 | 934 | int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words' |
duke@435 | 935 | RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; } |
duke@435 | 936 | Arena* indexSet_arena() { return _indexSet_arena; } |
duke@435 | 937 | void* indexSet_free_block_list() { return _indexSet_free_block_list; } |
duke@435 | 938 | uint node_bundling_limit() { return _node_bundling_limit; } |
duke@435 | 939 | Bundle* node_bundling_base() { return _node_bundling_base; } |
duke@435 | 940 | void set_node_bundling_limit(uint n) { _node_bundling_limit = n; } |
duke@435 | 941 | void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; } |
duke@435 | 942 | bool starts_bundle(const Node *n) const; |
duke@435 | 943 | bool need_stack_bang(int frame_size_in_bytes) const; |
duke@435 | 944 | bool need_register_stack_bang() const; |
duke@435 | 945 | |
duke@435 | 946 | void set_matcher(Matcher* m) { _matcher = m; } |
duke@435 | 947 | //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; } |
duke@435 | 948 | void set_indexSet_arena(Arena* a) { _indexSet_arena = a; } |
duke@435 | 949 | void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; } |
duke@435 | 950 | |
duke@435 | 951 | // Remember if this compilation changes hardware mode to 24-bit precision |
duke@435 | 952 | void set_24_bit_selection_and_mode(bool selection, bool mode) { |
duke@435 | 953 | _select_24_bit_instr = selection; |
duke@435 | 954 | _in_24_bit_fp_mode = mode; |
duke@435 | 955 | } |
duke@435 | 956 | |
kvn@1294 | 957 | void set_java_calls(int z) { _java_calls = z; } |
kvn@1294 | 958 | void set_inner_loops(int z) { _inner_loops = z; } |
duke@435 | 959 | |
duke@435 | 960 | // Instruction bits passed off to the VM |
duke@435 | 961 | int code_size() { return _method_size; } |
duke@435 | 962 | CodeBuffer* code_buffer() { return &_code_buffer; } |
duke@435 | 963 | int first_block_size() { return _first_block_size; } |
duke@435 | 964 | void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); } |
duke@435 | 965 | ExceptionHandlerTable* handler_table() { return &_handler_table; } |
duke@435 | 966 | ImplicitExceptionTable* inc_table() { return &_inc_table; } |
duke@435 | 967 | OopMapSet* oop_map_set() { return _oop_map_set; } |
duke@435 | 968 | DebugInformationRecorder* debug_info() { return env()->debug_info(); } |
duke@435 | 969 | Dependencies* dependencies() { return env()->dependencies(); } |
duke@435 | 970 | static int CompiledZap_count() { return _CompiledZap_count; } |
duke@435 | 971 | BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } |
twisti@2350 | 972 | void init_scratch_buffer_blob(int const_size); |
twisti@2350 | 973 | void clear_scratch_buffer_blob(); |
duke@435 | 974 | void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } |
duke@435 | 975 | relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } |
duke@435 | 976 | void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } |
duke@435 | 977 | |
duke@435 | 978 | // emit to scratch blob, report resulting size |
duke@435 | 979 | uint scratch_emit_size(const Node* n); |
twisti@2350 | 980 | void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; } |
twisti@2350 | 981 | bool in_scratch_emit_size() const { return _in_scratch_emit_size; } |
duke@435 | 982 | |
duke@435 | 983 | enum ScratchBufferBlob { |
duke@435 | 984 | MAX_inst_size = 1024, |
duke@435 | 985 | MAX_locs_size = 128, // number of relocInfo elements |
duke@435 | 986 | MAX_const_size = 128, |
duke@435 | 987 | MAX_stubs_size = 128 |
duke@435 | 988 | }; |
duke@435 | 989 | |
duke@435 | 990 | // Major entry point. Given a Scope, compile the associated method. |
duke@435 | 991 | // For normal compilations, entry_bci is InvocationEntryBci. For on stack |
duke@435 | 992 | // replacement, entry_bci indicates the bytecode for which to compile a |
duke@435 | 993 | // continuation. |
duke@435 | 994 | Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, |
kvn@5110 | 995 | int entry_bci, bool subsume_loads, bool do_escape_analysis, |
kvn@5110 | 996 | bool eliminate_boxing); |
duke@435 | 997 | |
duke@435 | 998 | // Second major entry point. From the TypeFunc signature, generate code |
duke@435 | 999 | // to pass arguments from the Java calling convention to the C calling |
duke@435 | 1000 | // convention. |
duke@435 | 1001 | Compile(ciEnv* ci_env, const TypeFunc *(*gen)(), |
duke@435 | 1002 | address stub_function, const char *stub_name, |
duke@435 | 1003 | int is_fancy_jump, bool pass_tls, |
duke@435 | 1004 | bool save_arg_registers, bool return_pc); |
duke@435 | 1005 | |
duke@435 | 1006 | // From the TypeFunc signature, generate code to pass arguments |
duke@435 | 1007 | // from Compiled calling convention to Interpreter's calling convention |
duke@435 | 1008 | void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry); |
duke@435 | 1009 | |
duke@435 | 1010 | // From the TypeFunc signature, generate code to pass arguments |
duke@435 | 1011 | // from Interpreter's calling convention to Compiler's calling convention |
duke@435 | 1012 | void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf); |
duke@435 | 1013 | |
duke@435 | 1014 | // Are we compiling a method? |
duke@435 | 1015 | bool has_method() { return method() != NULL; } |
duke@435 | 1016 | |
duke@435 | 1017 | // Maybe print some information about this compile. |
duke@435 | 1018 | void print_compile_messages(); |
duke@435 | 1019 | |
duke@435 | 1020 | // Final graph reshaping, a post-pass after the regular optimizer is done. |
duke@435 | 1021 | bool final_graph_reshaping(); |
duke@435 | 1022 | |
duke@435 | 1023 | // returns true if adr is completely contained in the given alias category |
duke@435 | 1024 | bool must_alias(const TypePtr* adr, int alias_idx); |
duke@435 | 1025 | |
duke@435 | 1026 | // returns true if adr overlaps with the given alias category |
duke@435 | 1027 | bool can_alias(const TypePtr* adr, int alias_idx); |
duke@435 | 1028 | |
duke@435 | 1029 | // Driver for converting compiler's IR into machine code bits |
duke@435 | 1030 | void Output(); |
duke@435 | 1031 | |
duke@435 | 1032 | // Accessors for node bundling info. |
duke@435 | 1033 | Bundle* node_bundling(const Node *n); |
duke@435 | 1034 | bool valid_bundle_info(const Node *n); |
duke@435 | 1035 | |
duke@435 | 1036 | // Schedule and Bundle the instructions |
duke@435 | 1037 | void ScheduleAndBundle(); |
duke@435 | 1038 | |
duke@435 | 1039 | // Build OopMaps for each GC point |
duke@435 | 1040 | void BuildOopMaps(); |
kvn@498 | 1041 | |
kvn@498 | 1042 | // Append debug info for the node "local" at safepoint node "sfpt" to the |
kvn@498 | 1043 | // "array", May also consult and add to "objs", which describes the |
kvn@498 | 1044 | // scalar-replaced objects. |
kvn@498 | 1045 | void FillLocArray( int idx, MachSafePointNode* sfpt, |
kvn@498 | 1046 | Node *local, GrowableArray<ScopeValue*> *array, |
kvn@498 | 1047 | GrowableArray<ScopeValue*> *objs ); |
kvn@498 | 1048 | |
kvn@498 | 1049 | // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL. |
kvn@498 | 1050 | static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id); |
kvn@498 | 1051 | // Requres that "objs" does not contains an ObjectValue whose id matches |
kvn@498 | 1052 | // that of "sv. Appends "sv". |
kvn@498 | 1053 | static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs, |
kvn@498 | 1054 | ObjectValue* sv ); |
duke@435 | 1055 | |
duke@435 | 1056 | // Process an OopMap Element while emitting nodes |
duke@435 | 1057 | void Process_OopMap_Node(MachNode *mach, int code_offset); |
duke@435 | 1058 | |
kvn@3049 | 1059 | // Initialize code buffer |
kvn@3049 | 1060 | CodeBuffer* init_buffer(uint* blk_starts); |
kvn@3049 | 1061 | |
duke@435 | 1062 | // Write out basic block data to code buffer |
kvn@3049 | 1063 | void fill_buffer(CodeBuffer* cb, uint* blk_starts); |
duke@435 | 1064 | |
duke@435 | 1065 | // Determine which variable sized branches can be shortened |
kvn@3049 | 1066 | void shorten_branches(uint* blk_starts, int& code_size, int& reloc_size, int& stub_size); |
kvn@3049 | 1067 | |
duke@435 | 1068 | // Compute the size of first NumberOfLoopInstrToAlign instructions |
duke@435 | 1069 | // at the head of a loop. |
duke@435 | 1070 | void compute_loop_first_inst_sizes(); |
duke@435 | 1071 | |
duke@435 | 1072 | // Compute the information for the exception tables |
duke@435 | 1073 | void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels); |
duke@435 | 1074 | |
duke@435 | 1075 | // Stack slots that may be unused by the calling convention but must |
duke@435 | 1076 | // otherwise be preserved. On Intel this includes the return address. |
duke@435 | 1077 | // On PowerPC it includes the 4 words holding the old TOC & LR glue. |
duke@435 | 1078 | uint in_preserve_stack_slots(); |
duke@435 | 1079 | |
duke@435 | 1080 | // "Top of Stack" slots that may be unused by the calling convention but must |
duke@435 | 1081 | // otherwise be preserved. |
duke@435 | 1082 | // On Intel these are not necessary and the value can be zero. |
duke@435 | 1083 | // On Sparc this describes the words reserved for storing a register window |
duke@435 | 1084 | // when an interrupt occurs. |
duke@435 | 1085 | static uint out_preserve_stack_slots(); |
duke@435 | 1086 | |
duke@435 | 1087 | // Number of outgoing stack slots killed above the out_preserve_stack_slots |
duke@435 | 1088 | // for calls to C. Supports the var-args backing area for register parms. |
duke@435 | 1089 | uint varargs_C_out_slots_killed() const; |
duke@435 | 1090 | |
duke@435 | 1091 | // Number of Stack Slots consumed by a synchronization entry |
duke@435 | 1092 | int sync_stack_slots() const; |
duke@435 | 1093 | |
duke@435 | 1094 | // Compute the name of old_SP. See <arch>.ad for frame layout. |
duke@435 | 1095 | OptoReg::Name compute_old_SP(); |
duke@435 | 1096 | |
duke@435 | 1097 | #ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 1098 | static bool is_node_getting_a_safepoint(Node*); |
duke@435 | 1099 | void Insert_zap_nodes(); |
duke@435 | 1100 | Node* call_zap_node(MachSafePointNode* n, int block_no); |
duke@435 | 1101 | #endif |
duke@435 | 1102 | |
duke@435 | 1103 | private: |
duke@435 | 1104 | // Phase control: |
duke@435 | 1105 | void Init(int aliaslevel); // Prepare for a single compilation |
duke@435 | 1106 | int Inline_Warm(); // Find more inlining work. |
duke@435 | 1107 | void Finish_Warm(); // Give up on further inlines. |
duke@435 | 1108 | void Optimize(); // Given a graph, optimize it |
duke@435 | 1109 | void Code_Gen(); // Generate code from a graph |
duke@435 | 1110 | |
duke@435 | 1111 | // Management of the AliasType table. |
duke@435 | 1112 | void grow_alias_types(); |
duke@435 | 1113 | AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type); |
duke@435 | 1114 | const TypePtr *flatten_alias_type(const TypePtr* adr_type) const; |
never@2658 | 1115 | AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field); |
duke@435 | 1116 | |
duke@435 | 1117 | void verify_top(Node*) const PRODUCT_RETURN; |
duke@435 | 1118 | |
duke@435 | 1119 | // Intrinsic setup. |
duke@435 | 1120 | void register_library_intrinsics(); // initializer |
duke@435 | 1121 | CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor |
duke@435 | 1122 | int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper |
duke@435 | 1123 | CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn |
duke@435 | 1124 | void register_intrinsic(CallGenerator* cg); // update fn |
duke@435 | 1125 | |
duke@435 | 1126 | #ifndef PRODUCT |
duke@435 | 1127 | static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT]; |
duke@435 | 1128 | static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT]; |
duke@435 | 1129 | #endif |
bharadwaj@4315 | 1130 | // Function calls made by the public function final_graph_reshaping. |
bharadwaj@4315 | 1131 | // No need to be made public as they are not called elsewhere. |
bharadwaj@4315 | 1132 | void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc); |
bharadwaj@4315 | 1133 | void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ); |
bharadwaj@4315 | 1134 | void eliminate_redundant_card_marks(Node* n); |
duke@435 | 1135 | |
duke@435 | 1136 | public: |
duke@435 | 1137 | |
duke@435 | 1138 | // Note: Histogram array size is about 1 Kb. |
duke@435 | 1139 | enum { // flag bits: |
duke@435 | 1140 | _intrinsic_worked = 1, // succeeded at least once |
duke@435 | 1141 | _intrinsic_failed = 2, // tried it but it failed |
duke@435 | 1142 | _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps) |
duke@435 | 1143 | _intrinsic_virtual = 8, // was seen in the virtual form (rare) |
duke@435 | 1144 | _intrinsic_both = 16 // was seen in the non-virtual form (usual) |
duke@435 | 1145 | }; |
duke@435 | 1146 | // Update histogram. Return boolean if this is a first-time occurrence. |
duke@435 | 1147 | static bool gather_intrinsic_statistics(vmIntrinsics::ID id, |
duke@435 | 1148 | bool is_virtual, int flags) PRODUCT_RETURN0; |
duke@435 | 1149 | static void print_intrinsic_statistics() PRODUCT_RETURN; |
duke@435 | 1150 | |
duke@435 | 1151 | // Graph verification code |
duke@435 | 1152 | // Walk the node list, verifying that there is a one-to-one |
duke@435 | 1153 | // correspondence between Use-Def edges and Def-Use edges |
duke@435 | 1154 | // The option no_dead_code enables stronger checks that the |
duke@435 | 1155 | // graph is strongly connected from root in both directions. |
duke@435 | 1156 | void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN; |
duke@435 | 1157 | |
iveresov@6070 | 1158 | // Verify GC barrier patterns |
iveresov@6070 | 1159 | void verify_barriers() PRODUCT_RETURN; |
iveresov@6070 | 1160 | |
duke@435 | 1161 | // End-of-run dumps. |
duke@435 | 1162 | static void print_statistics() PRODUCT_RETURN; |
duke@435 | 1163 | |
duke@435 | 1164 | // Dump formatted assembly |
duke@435 | 1165 | void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN; |
duke@435 | 1166 | void dump_pc(int *pcs, int pc_limit, Node *n); |
duke@435 | 1167 | |
duke@435 | 1168 | // Verify ADLC assumptions during startup |
duke@435 | 1169 | static void adlc_verification() PRODUCT_RETURN; |
duke@435 | 1170 | |
duke@435 | 1171 | // Definitions of pd methods |
duke@435 | 1172 | static void pd_compiler2_init(); |
shade@4691 | 1173 | |
shade@4691 | 1174 | // Auxiliary method for randomized fuzzing/stressing |
shade@4691 | 1175 | static bool randomized_select(int count); |
roland@5981 | 1176 | |
roland@5981 | 1177 | // enter a PreserveJVMState block |
roland@5981 | 1178 | void inc_preserve_jvm_state() { |
roland@5981 | 1179 | _preserve_jvm_state++; |
roland@5981 | 1180 | } |
roland@5981 | 1181 | |
roland@5981 | 1182 | // exit a PreserveJVMState block |
roland@5981 | 1183 | void dec_preserve_jvm_state() { |
roland@5981 | 1184 | _preserve_jvm_state--; |
roland@5981 | 1185 | assert(_preserve_jvm_state >= 0, "_preserve_jvm_state shouldn't be negative"); |
roland@5981 | 1186 | } |
roland@5981 | 1187 | |
roland@5981 | 1188 | bool has_preserve_jvm_state() const { |
roland@5981 | 1189 | return _preserve_jvm_state > 0; |
roland@5981 | 1190 | } |
duke@435 | 1191 | }; |
stefank@2314 | 1192 | |
stefank@2314 | 1193 | #endif // SHARE_VM_OPTO_COMPILE_HPP |