Mon, 23 Jun 2014 13:33:23 +0200
8046289: compiler/6340864/TestLongVect.java timeout with
Reviewed-by: iveresov, vlivanov
duke@435 | 1 | /* |
drchase@6680 | 2 | * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
twisti@4318 | 26 | #include "asm/macroAssembler.hpp" |
twisti@4318 | 27 | #include "asm/macroAssembler.inline.hpp" |
kvn@6217 | 28 | #include "ci/ciReplay.hpp" |
stefank@2314 | 29 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 30 | #include "code/exceptionHandlerTable.hpp" |
stefank@2314 | 31 | #include "code/nmethod.hpp" |
stefank@2314 | 32 | #include "compiler/compileLog.hpp" |
twisti@4318 | 33 | #include "compiler/disassembler.hpp" |
stefank@2314 | 34 | #include "compiler/oopMap.hpp" |
stefank@2314 | 35 | #include "opto/addnode.hpp" |
stefank@2314 | 36 | #include "opto/block.hpp" |
stefank@2314 | 37 | #include "opto/c2compiler.hpp" |
stefank@2314 | 38 | #include "opto/callGenerator.hpp" |
stefank@2314 | 39 | #include "opto/callnode.hpp" |
stefank@2314 | 40 | #include "opto/cfgnode.hpp" |
stefank@2314 | 41 | #include "opto/chaitin.hpp" |
stefank@2314 | 42 | #include "opto/compile.hpp" |
stefank@2314 | 43 | #include "opto/connode.hpp" |
stefank@2314 | 44 | #include "opto/divnode.hpp" |
stefank@2314 | 45 | #include "opto/escape.hpp" |
stefank@2314 | 46 | #include "opto/idealGraphPrinter.hpp" |
stefank@2314 | 47 | #include "opto/loopnode.hpp" |
stefank@2314 | 48 | #include "opto/machnode.hpp" |
stefank@2314 | 49 | #include "opto/macro.hpp" |
stefank@2314 | 50 | #include "opto/matcher.hpp" |
rbackman@5927 | 51 | #include "opto/mathexactnode.hpp" |
stefank@2314 | 52 | #include "opto/memnode.hpp" |
stefank@2314 | 53 | #include "opto/mulnode.hpp" |
stefank@2314 | 54 | #include "opto/node.hpp" |
stefank@2314 | 55 | #include "opto/opcodes.hpp" |
stefank@2314 | 56 | #include "opto/output.hpp" |
stefank@2314 | 57 | #include "opto/parse.hpp" |
stefank@2314 | 58 | #include "opto/phaseX.hpp" |
stefank@2314 | 59 | #include "opto/rootnode.hpp" |
stefank@2314 | 60 | #include "opto/runtime.hpp" |
stefank@2314 | 61 | #include "opto/stringopts.hpp" |
stefank@2314 | 62 | #include "opto/type.hpp" |
stefank@2314 | 63 | #include "opto/vectornode.hpp" |
stefank@2314 | 64 | #include "runtime/arguments.hpp" |
stefank@2314 | 65 | #include "runtime/signature.hpp" |
stefank@2314 | 66 | #include "runtime/stubRoutines.hpp" |
stefank@2314 | 67 | #include "runtime/timer.hpp" |
sla@5237 | 68 | #include "trace/tracing.hpp" |
stefank@2314 | 69 | #include "utilities/copy.hpp" |
stefank@2314 | 70 | #ifdef TARGET_ARCH_MODEL_x86_32 |
stefank@2314 | 71 | # include "adfiles/ad_x86_32.hpp" |
stefank@2314 | 72 | #endif |
stefank@2314 | 73 | #ifdef TARGET_ARCH_MODEL_x86_64 |
stefank@2314 | 74 | # include "adfiles/ad_x86_64.hpp" |
stefank@2314 | 75 | #endif |
stefank@2314 | 76 | #ifdef TARGET_ARCH_MODEL_sparc |
stefank@2314 | 77 | # include "adfiles/ad_sparc.hpp" |
stefank@2314 | 78 | #endif |
stefank@2314 | 79 | #ifdef TARGET_ARCH_MODEL_zero |
stefank@2314 | 80 | # include "adfiles/ad_zero.hpp" |
stefank@2314 | 81 | #endif |
bobv@2508 | 82 | #ifdef TARGET_ARCH_MODEL_arm |
bobv@2508 | 83 | # include "adfiles/ad_arm.hpp" |
bobv@2508 | 84 | #endif |
goetz@6441 | 85 | #ifdef TARGET_ARCH_MODEL_ppc_32 |
goetz@6441 | 86 | # include "adfiles/ad_ppc_32.hpp" |
goetz@6441 | 87 | #endif |
goetz@6441 | 88 | #ifdef TARGET_ARCH_MODEL_ppc_64 |
goetz@6441 | 89 | # include "adfiles/ad_ppc_64.hpp" |
bobv@2508 | 90 | #endif |
duke@435 | 91 | |
twisti@2350 | 92 | |
twisti@2350 | 93 | // -------------------- Compile::mach_constant_base_node ----------------------- |
twisti@2350 | 94 | // Constant table base node singleton. |
twisti@2350 | 95 | MachConstantBaseNode* Compile::mach_constant_base_node() { |
twisti@2350 | 96 | if (_mach_constant_base_node == NULL) { |
twisti@2350 | 97 | _mach_constant_base_node = new (C) MachConstantBaseNode(); |
twisti@2350 | 98 | _mach_constant_base_node->add_req(C->root()); |
twisti@2350 | 99 | } |
twisti@2350 | 100 | return _mach_constant_base_node; |
twisti@2350 | 101 | } |
twisti@2350 | 102 | |
twisti@2350 | 103 | |
duke@435 | 104 | /// Support for intrinsics. |
duke@435 | 105 | |
duke@435 | 106 | // Return the index at which m must be inserted (or already exists). |
duke@435 | 107 | // The sort order is by the address of the ciMethod, with is_virtual as minor key. |
duke@435 | 108 | int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) { |
duke@435 | 109 | #ifdef ASSERT |
duke@435 | 110 | for (int i = 1; i < _intrinsics->length(); i++) { |
duke@435 | 111 | CallGenerator* cg1 = _intrinsics->at(i-1); |
duke@435 | 112 | CallGenerator* cg2 = _intrinsics->at(i); |
duke@435 | 113 | assert(cg1->method() != cg2->method() |
duke@435 | 114 | ? cg1->method() < cg2->method() |
duke@435 | 115 | : cg1->is_virtual() < cg2->is_virtual(), |
duke@435 | 116 | "compiler intrinsics list must stay sorted"); |
duke@435 | 117 | } |
duke@435 | 118 | #endif |
duke@435 | 119 | // Binary search sorted list, in decreasing intervals [lo, hi]. |
duke@435 | 120 | int lo = 0, hi = _intrinsics->length()-1; |
duke@435 | 121 | while (lo <= hi) { |
duke@435 | 122 | int mid = (uint)(hi + lo) / 2; |
duke@435 | 123 | ciMethod* mid_m = _intrinsics->at(mid)->method(); |
duke@435 | 124 | if (m < mid_m) { |
duke@435 | 125 | hi = mid-1; |
duke@435 | 126 | } else if (m > mid_m) { |
duke@435 | 127 | lo = mid+1; |
duke@435 | 128 | } else { |
duke@435 | 129 | // look at minor sort key |
duke@435 | 130 | bool mid_virt = _intrinsics->at(mid)->is_virtual(); |
duke@435 | 131 | if (is_virtual < mid_virt) { |
duke@435 | 132 | hi = mid-1; |
duke@435 | 133 | } else if (is_virtual > mid_virt) { |
duke@435 | 134 | lo = mid+1; |
duke@435 | 135 | } else { |
duke@435 | 136 | return mid; // exact match |
duke@435 | 137 | } |
duke@435 | 138 | } |
duke@435 | 139 | } |
duke@435 | 140 | return lo; // inexact match |
duke@435 | 141 | } |
duke@435 | 142 | |
duke@435 | 143 | void Compile::register_intrinsic(CallGenerator* cg) { |
duke@435 | 144 | if (_intrinsics == NULL) { |
roland@4409 | 145 | _intrinsics = new (comp_arena())GrowableArray<CallGenerator*>(comp_arena(), 60, 0, NULL); |
duke@435 | 146 | } |
duke@435 | 147 | // This code is stolen from ciObjectFactory::insert. |
duke@435 | 148 | // Really, GrowableArray should have methods for |
duke@435 | 149 | // insert_at, remove_at, and binary_search. |
duke@435 | 150 | int len = _intrinsics->length(); |
duke@435 | 151 | int index = intrinsic_insertion_index(cg->method(), cg->is_virtual()); |
duke@435 | 152 | if (index == len) { |
duke@435 | 153 | _intrinsics->append(cg); |
duke@435 | 154 | } else { |
duke@435 | 155 | #ifdef ASSERT |
duke@435 | 156 | CallGenerator* oldcg = _intrinsics->at(index); |
duke@435 | 157 | assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice"); |
duke@435 | 158 | #endif |
duke@435 | 159 | _intrinsics->append(_intrinsics->at(len-1)); |
duke@435 | 160 | int pos; |
duke@435 | 161 | for (pos = len-2; pos >= index; pos--) { |
duke@435 | 162 | _intrinsics->at_put(pos+1,_intrinsics->at(pos)); |
duke@435 | 163 | } |
duke@435 | 164 | _intrinsics->at_put(index, cg); |
duke@435 | 165 | } |
duke@435 | 166 | assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked"); |
duke@435 | 167 | } |
duke@435 | 168 | |
duke@435 | 169 | CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) { |
duke@435 | 170 | assert(m->is_loaded(), "don't try this on unloaded methods"); |
duke@435 | 171 | if (_intrinsics != NULL) { |
duke@435 | 172 | int index = intrinsic_insertion_index(m, is_virtual); |
duke@435 | 173 | if (index < _intrinsics->length() |
duke@435 | 174 | && _intrinsics->at(index)->method() == m |
duke@435 | 175 | && _intrinsics->at(index)->is_virtual() == is_virtual) { |
duke@435 | 176 | return _intrinsics->at(index); |
duke@435 | 177 | } |
duke@435 | 178 | } |
duke@435 | 179 | // Lazily create intrinsics for intrinsic IDs well-known in the runtime. |
jrose@1291 | 180 | if (m->intrinsic_id() != vmIntrinsics::_none && |
jrose@1291 | 181 | m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) { |
duke@435 | 182 | CallGenerator* cg = make_vm_intrinsic(m, is_virtual); |
duke@435 | 183 | if (cg != NULL) { |
duke@435 | 184 | // Save it for next time: |
duke@435 | 185 | register_intrinsic(cg); |
duke@435 | 186 | return cg; |
duke@435 | 187 | } else { |
duke@435 | 188 | gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled); |
duke@435 | 189 | } |
duke@435 | 190 | } |
duke@435 | 191 | return NULL; |
duke@435 | 192 | } |
duke@435 | 193 | |
duke@435 | 194 | // Compile:: register_library_intrinsics and make_vm_intrinsic are defined |
duke@435 | 195 | // in library_call.cpp. |
duke@435 | 196 | |
duke@435 | 197 | |
duke@435 | 198 | #ifndef PRODUCT |
duke@435 | 199 | // statistics gathering... |
duke@435 | 200 | |
duke@435 | 201 | juint Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0}; |
duke@435 | 202 | jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0}; |
duke@435 | 203 | |
duke@435 | 204 | bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) { |
duke@435 | 205 | assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob"); |
duke@435 | 206 | int oflags = _intrinsic_hist_flags[id]; |
duke@435 | 207 | assert(flags != 0, "what happened?"); |
duke@435 | 208 | if (is_virtual) { |
duke@435 | 209 | flags |= _intrinsic_virtual; |
duke@435 | 210 | } |
duke@435 | 211 | bool changed = (flags != oflags); |
duke@435 | 212 | if ((flags & _intrinsic_worked) != 0) { |
duke@435 | 213 | juint count = (_intrinsic_hist_count[id] += 1); |
duke@435 | 214 | if (count == 1) { |
duke@435 | 215 | changed = true; // first time |
duke@435 | 216 | } |
duke@435 | 217 | // increment the overall count also: |
duke@435 | 218 | _intrinsic_hist_count[vmIntrinsics::_none] += 1; |
duke@435 | 219 | } |
duke@435 | 220 | if (changed) { |
duke@435 | 221 | if (((oflags ^ flags) & _intrinsic_virtual) != 0) { |
duke@435 | 222 | // Something changed about the intrinsic's virtuality. |
duke@435 | 223 | if ((flags & _intrinsic_virtual) != 0) { |
duke@435 | 224 | // This is the first use of this intrinsic as a virtual call. |
duke@435 | 225 | if (oflags != 0) { |
duke@435 | 226 | // We already saw it as a non-virtual, so note both cases. |
duke@435 | 227 | flags |= _intrinsic_both; |
duke@435 | 228 | } |
duke@435 | 229 | } else if ((oflags & _intrinsic_both) == 0) { |
duke@435 | 230 | // This is the first use of this intrinsic as a non-virtual |
duke@435 | 231 | flags |= _intrinsic_both; |
duke@435 | 232 | } |
duke@435 | 233 | } |
duke@435 | 234 | _intrinsic_hist_flags[id] = (jubyte) (oflags | flags); |
duke@435 | 235 | } |
duke@435 | 236 | // update the overall flags also: |
duke@435 | 237 | _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags; |
duke@435 | 238 | return changed; |
duke@435 | 239 | } |
duke@435 | 240 | |
duke@435 | 241 | static char* format_flags(int flags, char* buf) { |
duke@435 | 242 | buf[0] = 0; |
duke@435 | 243 | if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked"); |
duke@435 | 244 | if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed"); |
duke@435 | 245 | if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled"); |
duke@435 | 246 | if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual"); |
duke@435 | 247 | if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual"); |
duke@435 | 248 | if (buf[0] == 0) strcat(buf, ","); |
duke@435 | 249 | assert(buf[0] == ',', "must be"); |
duke@435 | 250 | return &buf[1]; |
duke@435 | 251 | } |
duke@435 | 252 | |
duke@435 | 253 | void Compile::print_intrinsic_statistics() { |
duke@435 | 254 | char flagsbuf[100]; |
duke@435 | 255 | ttyLocker ttyl; |
duke@435 | 256 | if (xtty != NULL) xtty->head("statistics type='intrinsic'"); |
duke@435 | 257 | tty->print_cr("Compiler intrinsic usage:"); |
duke@435 | 258 | juint total = _intrinsic_hist_count[vmIntrinsics::_none]; |
duke@435 | 259 | if (total == 0) total = 1; // avoid div0 in case of no successes |
duke@435 | 260 | #define PRINT_STAT_LINE(name, c, f) \ |
duke@435 | 261 | tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f); |
duke@435 | 262 | for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) { |
duke@435 | 263 | vmIntrinsics::ID id = (vmIntrinsics::ID) index; |
duke@435 | 264 | int flags = _intrinsic_hist_flags[id]; |
duke@435 | 265 | juint count = _intrinsic_hist_count[id]; |
duke@435 | 266 | if ((flags | count) != 0) { |
duke@435 | 267 | PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf)); |
duke@435 | 268 | } |
duke@435 | 269 | } |
duke@435 | 270 | PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf)); |
duke@435 | 271 | if (xtty != NULL) xtty->tail("statistics"); |
duke@435 | 272 | } |
duke@435 | 273 | |
duke@435 | 274 | void Compile::print_statistics() { |
duke@435 | 275 | { ttyLocker ttyl; |
duke@435 | 276 | if (xtty != NULL) xtty->head("statistics type='opto'"); |
duke@435 | 277 | Parse::print_statistics(); |
duke@435 | 278 | PhaseCCP::print_statistics(); |
duke@435 | 279 | PhaseRegAlloc::print_statistics(); |
duke@435 | 280 | Scheduling::print_statistics(); |
duke@435 | 281 | PhasePeephole::print_statistics(); |
duke@435 | 282 | PhaseIdealLoop::print_statistics(); |
duke@435 | 283 | if (xtty != NULL) xtty->tail("statistics"); |
duke@435 | 284 | } |
duke@435 | 285 | if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) { |
duke@435 | 286 | // put this under its own <statistics> element. |
duke@435 | 287 | print_intrinsic_statistics(); |
duke@435 | 288 | } |
duke@435 | 289 | } |
duke@435 | 290 | #endif //PRODUCT |
duke@435 | 291 | |
duke@435 | 292 | // Support for bundling info |
duke@435 | 293 | Bundle* Compile::node_bundling(const Node *n) { |
duke@435 | 294 | assert(valid_bundle_info(n), "oob"); |
duke@435 | 295 | return &_node_bundling_base[n->_idx]; |
duke@435 | 296 | } |
duke@435 | 297 | |
duke@435 | 298 | bool Compile::valid_bundle_info(const Node *n) { |
duke@435 | 299 | return (_node_bundling_limit > n->_idx); |
duke@435 | 300 | } |
duke@435 | 301 | |
duke@435 | 302 | |
never@1515 | 303 | void Compile::gvn_replace_by(Node* n, Node* nn) { |
never@1515 | 304 | for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) { |
never@1515 | 305 | Node* use = n->last_out(i); |
never@1515 | 306 | bool is_in_table = initial_gvn()->hash_delete(use); |
never@1515 | 307 | uint uses_found = 0; |
never@1515 | 308 | for (uint j = 0; j < use->len(); j++) { |
never@1515 | 309 | if (use->in(j) == n) { |
never@1515 | 310 | if (j < use->req()) |
never@1515 | 311 | use->set_req(j, nn); |
never@1515 | 312 | else |
never@1515 | 313 | use->set_prec(j, nn); |
never@1515 | 314 | uses_found++; |
never@1515 | 315 | } |
never@1515 | 316 | } |
never@1515 | 317 | if (is_in_table) { |
never@1515 | 318 | // reinsert into table |
never@1515 | 319 | initial_gvn()->hash_find_insert(use); |
never@1515 | 320 | } |
never@1515 | 321 | record_for_igvn(use); |
never@1515 | 322 | i -= uses_found; // we deleted 1 or more copies of this edge |
never@1515 | 323 | } |
never@1515 | 324 | } |
never@1515 | 325 | |
never@1515 | 326 | |
bharadwaj@4315 | 327 | static inline bool not_a_node(const Node* n) { |
bharadwaj@4315 | 328 | if (n == NULL) return true; |
bharadwaj@4315 | 329 | if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc. |
bharadwaj@4315 | 330 | if (*(address*)n == badAddress) return true; // kill by Node::destruct |
bharadwaj@4315 | 331 | return false; |
bharadwaj@4315 | 332 | } |
never@1515 | 333 | |
duke@435 | 334 | // Identify all nodes that are reachable from below, useful. |
duke@435 | 335 | // Use breadth-first pass that records state in a Unique_Node_List, |
duke@435 | 336 | // recursive traversal is slower. |
duke@435 | 337 | void Compile::identify_useful_nodes(Unique_Node_List &useful) { |
duke@435 | 338 | int estimated_worklist_size = unique(); |
duke@435 | 339 | useful.map( estimated_worklist_size, NULL ); // preallocate space |
duke@435 | 340 | |
duke@435 | 341 | // Initialize worklist |
duke@435 | 342 | if (root() != NULL) { useful.push(root()); } |
duke@435 | 343 | // If 'top' is cached, declare it useful to preserve cached node |
duke@435 | 344 | if( cached_top_node() ) { useful.push(cached_top_node()); } |
duke@435 | 345 | |
duke@435 | 346 | // Push all useful nodes onto the list, breadthfirst |
duke@435 | 347 | for( uint next = 0; next < useful.size(); ++next ) { |
duke@435 | 348 | assert( next < unique(), "Unique useful nodes < total nodes"); |
duke@435 | 349 | Node *n = useful.at(next); |
duke@435 | 350 | uint max = n->len(); |
duke@435 | 351 | for( uint i = 0; i < max; ++i ) { |
duke@435 | 352 | Node *m = n->in(i); |
bharadwaj@4315 | 353 | if (not_a_node(m)) continue; |
duke@435 | 354 | useful.push(m); |
duke@435 | 355 | } |
duke@435 | 356 | } |
duke@435 | 357 | } |
duke@435 | 358 | |
bharadwaj@4315 | 359 | // Update dead_node_list with any missing dead nodes using useful |
bharadwaj@4315 | 360 | // list. Consider all non-useful nodes to be useless i.e., dead nodes. |
bharadwaj@4315 | 361 | void Compile::update_dead_node_list(Unique_Node_List &useful) { |
bharadwaj@4315 | 362 | uint max_idx = unique(); |
bharadwaj@4315 | 363 | VectorSet& useful_node_set = useful.member_set(); |
bharadwaj@4315 | 364 | |
bharadwaj@4315 | 365 | for (uint node_idx = 0; node_idx < max_idx; node_idx++) { |
bharadwaj@4315 | 366 | // If node with index node_idx is not in useful set, |
bharadwaj@4315 | 367 | // mark it as dead in dead node list. |
bharadwaj@4315 | 368 | if (! useful_node_set.test(node_idx) ) { |
bharadwaj@4315 | 369 | record_dead_node(node_idx); |
bharadwaj@4315 | 370 | } |
bharadwaj@4315 | 371 | } |
bharadwaj@4315 | 372 | } |
bharadwaj@4315 | 373 | |
roland@4409 | 374 | void Compile::remove_useless_late_inlines(GrowableArray<CallGenerator*>* inlines, Unique_Node_List &useful) { |
roland@4409 | 375 | int shift = 0; |
roland@4409 | 376 | for (int i = 0; i < inlines->length(); i++) { |
roland@4409 | 377 | CallGenerator* cg = inlines->at(i); |
roland@4409 | 378 | CallNode* call = cg->call_node(); |
roland@4409 | 379 | if (shift > 0) { |
roland@4409 | 380 | inlines->at_put(i-shift, cg); |
roland@4409 | 381 | } |
roland@4409 | 382 | if (!useful.member(call)) { |
roland@4409 | 383 | shift++; |
roland@4409 | 384 | } |
roland@4409 | 385 | } |
roland@4409 | 386 | inlines->trunc_to(inlines->length()-shift); |
roland@4409 | 387 | } |
roland@4409 | 388 | |
duke@435 | 389 | // Disconnect all useless nodes by disconnecting those at the boundary. |
duke@435 | 390 | void Compile::remove_useless_nodes(Unique_Node_List &useful) { |
duke@435 | 391 | uint next = 0; |
kvn@3260 | 392 | while (next < useful.size()) { |
duke@435 | 393 | Node *n = useful.at(next++); |
roland@7041 | 394 | if (n->is_SafePoint()) { |
roland@7041 | 395 | // We're done with a parsing phase. Replaced nodes are not valid |
roland@7041 | 396 | // beyond that point. |
roland@7041 | 397 | n->as_SafePoint()->delete_replaced_nodes(); |
roland@7041 | 398 | } |
duke@435 | 399 | // Use raw traversal of out edges since this code removes out edges |
duke@435 | 400 | int max = n->outcnt(); |
kvn@3260 | 401 | for (int j = 0; j < max; ++j) { |
duke@435 | 402 | Node* child = n->raw_out(j); |
kvn@3260 | 403 | if (! useful.member(child)) { |
kvn@3260 | 404 | assert(!child->is_top() || child != top(), |
kvn@3260 | 405 | "If top is cached in Compile object it is in useful list"); |
duke@435 | 406 | // Only need to remove this out-edge to the useless node |
duke@435 | 407 | n->raw_del_out(j); |
duke@435 | 408 | --j; |
duke@435 | 409 | --max; |
duke@435 | 410 | } |
duke@435 | 411 | } |
duke@435 | 412 | if (n->outcnt() == 1 && n->has_special_unique_user()) { |
kvn@3260 | 413 | record_for_igvn(n->unique_out()); |
kvn@3260 | 414 | } |
kvn@3260 | 415 | } |
kvn@3260 | 416 | // Remove useless macro and predicate opaq nodes |
kvn@3260 | 417 | for (int i = C->macro_count()-1; i >= 0; i--) { |
kvn@3260 | 418 | Node* n = C->macro_node(i); |
kvn@3260 | 419 | if (!useful.member(n)) { |
kvn@3260 | 420 | remove_macro_node(n); |
duke@435 | 421 | } |
duke@435 | 422 | } |
roland@4589 | 423 | // Remove useless expensive node |
roland@4589 | 424 | for (int i = C->expensive_count()-1; i >= 0; i--) { |
roland@4589 | 425 | Node* n = C->expensive_node(i); |
roland@4589 | 426 | if (!useful.member(n)) { |
roland@4589 | 427 | remove_expensive_node(n); |
roland@4589 | 428 | } |
roland@4589 | 429 | } |
roland@4409 | 430 | // clean up the late inline lists |
roland@4409 | 431 | remove_useless_late_inlines(&_string_late_inlines, useful); |
kvn@5110 | 432 | remove_useless_late_inlines(&_boxing_late_inlines, useful); |
roland@4409 | 433 | remove_useless_late_inlines(&_late_inlines, useful); |
duke@435 | 434 | debug_only(verify_graph_edges(true/*check for no_dead_code*/);) |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | //------------------------------frame_size_in_words----------------------------- |
duke@435 | 438 | // frame_slots in units of words |
duke@435 | 439 | int Compile::frame_size_in_words() const { |
duke@435 | 440 | // shift is 0 in LP32 and 1 in LP64 |
duke@435 | 441 | const int shift = (LogBytesPerWord - LogBytesPerInt); |
duke@435 | 442 | int words = _frame_slots >> shift; |
duke@435 | 443 | assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" ); |
duke@435 | 444 | return words; |
duke@435 | 445 | } |
duke@435 | 446 | |
roland@6723 | 447 | // To bang the stack of this compiled method we use the stack size |
roland@6723 | 448 | // that the interpreter would need in case of a deoptimization. This |
roland@6723 | 449 | // removes the need to bang the stack in the deoptimization blob which |
roland@6723 | 450 | // in turn simplifies stack overflow handling. |
roland@6723 | 451 | int Compile::bang_size_in_bytes() const { |
roland@6723 | 452 | return MAX2(_interpreter_frame_size, frame_size_in_bytes()); |
roland@6723 | 453 | } |
roland@6723 | 454 | |
duke@435 | 455 | // ============================================================================ |
duke@435 | 456 | //------------------------------CompileWrapper--------------------------------- |
duke@435 | 457 | class CompileWrapper : public StackObj { |
duke@435 | 458 | Compile *const _compile; |
duke@435 | 459 | public: |
duke@435 | 460 | CompileWrapper(Compile* compile); |
duke@435 | 461 | |
duke@435 | 462 | ~CompileWrapper(); |
duke@435 | 463 | }; |
duke@435 | 464 | |
duke@435 | 465 | CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) { |
duke@435 | 466 | // the Compile* pointer is stored in the current ciEnv: |
duke@435 | 467 | ciEnv* env = compile->env(); |
duke@435 | 468 | assert(env == ciEnv::current(), "must already be a ciEnv active"); |
duke@435 | 469 | assert(env->compiler_data() == NULL, "compile already active?"); |
duke@435 | 470 | env->set_compiler_data(compile); |
duke@435 | 471 | assert(compile == Compile::current(), "sanity"); |
duke@435 | 472 | |
duke@435 | 473 | compile->set_type_dict(NULL); |
duke@435 | 474 | compile->set_type_hwm(NULL); |
duke@435 | 475 | compile->set_type_last_size(0); |
duke@435 | 476 | compile->set_last_tf(NULL, NULL); |
duke@435 | 477 | compile->set_indexSet_arena(NULL); |
duke@435 | 478 | compile->set_indexSet_free_block_list(NULL); |
duke@435 | 479 | compile->init_type_arena(); |
duke@435 | 480 | Type::Initialize(compile); |
duke@435 | 481 | _compile->set_scratch_buffer_blob(NULL); |
duke@435 | 482 | _compile->begin_method(); |
duke@435 | 483 | } |
duke@435 | 484 | CompileWrapper::~CompileWrapper() { |
duke@435 | 485 | _compile->end_method(); |
duke@435 | 486 | if (_compile->scratch_buffer_blob() != NULL) |
duke@435 | 487 | BufferBlob::free(_compile->scratch_buffer_blob()); |
duke@435 | 488 | _compile->env()->set_compiler_data(NULL); |
duke@435 | 489 | } |
duke@435 | 490 | |
duke@435 | 491 | |
duke@435 | 492 | //----------------------------print_compile_messages--------------------------- |
duke@435 | 493 | void Compile::print_compile_messages() { |
duke@435 | 494 | #ifndef PRODUCT |
duke@435 | 495 | // Check if recompiling |
duke@435 | 496 | if (_subsume_loads == false && PrintOpto) { |
duke@435 | 497 | // Recompiling without allowing machine instructions to subsume loads |
duke@435 | 498 | tty->print_cr("*********************************************************"); |
duke@435 | 499 | tty->print_cr("** Bailout: Recompile without subsuming loads **"); |
duke@435 | 500 | tty->print_cr("*********************************************************"); |
duke@435 | 501 | } |
kvn@473 | 502 | if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) { |
kvn@473 | 503 | // Recompiling without escape analysis |
kvn@473 | 504 | tty->print_cr("*********************************************************"); |
kvn@473 | 505 | tty->print_cr("** Bailout: Recompile without escape analysis **"); |
kvn@473 | 506 | tty->print_cr("*********************************************************"); |
kvn@473 | 507 | } |
kvn@5110 | 508 | if (_eliminate_boxing != EliminateAutoBox && PrintOpto) { |
kvn@5110 | 509 | // Recompiling without boxing elimination |
kvn@5110 | 510 | tty->print_cr("*********************************************************"); |
kvn@5110 | 511 | tty->print_cr("** Bailout: Recompile without boxing elimination **"); |
kvn@5110 | 512 | tty->print_cr("*********************************************************"); |
kvn@5110 | 513 | } |
duke@435 | 514 | if (env()->break_at_compile()) { |
twisti@1040 | 515 | // Open the debugger when compiling this method. |
duke@435 | 516 | tty->print("### Breaking when compiling: "); |
duke@435 | 517 | method()->print_short_name(); |
duke@435 | 518 | tty->cr(); |
duke@435 | 519 | BREAKPOINT; |
duke@435 | 520 | } |
duke@435 | 521 | |
duke@435 | 522 | if( PrintOpto ) { |
duke@435 | 523 | if (is_osr_compilation()) { |
duke@435 | 524 | tty->print("[OSR]%3d", _compile_id); |
duke@435 | 525 | } else { |
duke@435 | 526 | tty->print("%3d", _compile_id); |
duke@435 | 527 | } |
duke@435 | 528 | } |
duke@435 | 529 | #endif |
duke@435 | 530 | } |
duke@435 | 531 | |
duke@435 | 532 | |
kvn@2414 | 533 | //-----------------------init_scratch_buffer_blob------------------------------ |
kvn@2414 | 534 | // Construct a temporary BufferBlob and cache it for this compile. |
twisti@2350 | 535 | void Compile::init_scratch_buffer_blob(int const_size) { |
kvn@2414 | 536 | // If there is already a scratch buffer blob allocated and the |
kvn@2414 | 537 | // constant section is big enough, use it. Otherwise free the |
kvn@2414 | 538 | // current and allocate a new one. |
kvn@2414 | 539 | BufferBlob* blob = scratch_buffer_blob(); |
kvn@2414 | 540 | if ((blob != NULL) && (const_size <= _scratch_const_size)) { |
kvn@2414 | 541 | // Use the current blob. |
kvn@2414 | 542 | } else { |
kvn@2414 | 543 | if (blob != NULL) { |
kvn@2414 | 544 | BufferBlob::free(blob); |
kvn@2414 | 545 | } |
duke@435 | 546 | |
kvn@2414 | 547 | ResourceMark rm; |
kvn@2414 | 548 | _scratch_const_size = const_size; |
kvn@2414 | 549 | int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size); |
kvn@2414 | 550 | blob = BufferBlob::create("Compile::scratch_buffer", size); |
kvn@2414 | 551 | // Record the buffer blob for next time. |
kvn@2414 | 552 | set_scratch_buffer_blob(blob); |
kvn@2414 | 553 | // Have we run out of code space? |
kvn@2414 | 554 | if (scratch_buffer_blob() == NULL) { |
kvn@2414 | 555 | // Let CompilerBroker disable further compilations. |
kvn@2414 | 556 | record_failure("Not enough space for scratch buffer in CodeCache"); |
kvn@2414 | 557 | return; |
kvn@2414 | 558 | } |
kvn@598 | 559 | } |
duke@435 | 560 | |
duke@435 | 561 | // Initialize the relocation buffers |
twisti@2103 | 562 | relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size; |
duke@435 | 563 | set_scratch_locs_memory(locs_buf); |
duke@435 | 564 | } |
duke@435 | 565 | |
duke@435 | 566 | |
duke@435 | 567 | //-----------------------scratch_emit_size------------------------------------- |
duke@435 | 568 | // Helper function that computes size by emitting code |
duke@435 | 569 | uint Compile::scratch_emit_size(const Node* n) { |
twisti@2350 | 570 | // Start scratch_emit_size section. |
twisti@2350 | 571 | set_in_scratch_emit_size(true); |
twisti@2350 | 572 | |
duke@435 | 573 | // Emit into a trash buffer and count bytes emitted. |
duke@435 | 574 | // This is a pretty expensive way to compute a size, |
duke@435 | 575 | // but it works well enough if seldom used. |
duke@435 | 576 | // All common fixed-size instructions are given a size |
duke@435 | 577 | // method by the AD file. |
duke@435 | 578 | // Note that the scratch buffer blob and locs memory are |
duke@435 | 579 | // allocated at the beginning of the compile task, and |
duke@435 | 580 | // may be shared by several calls to scratch_emit_size. |
duke@435 | 581 | // The allocation of the scratch buffer blob is particularly |
duke@435 | 582 | // expensive, since it has to grab the code cache lock. |
duke@435 | 583 | BufferBlob* blob = this->scratch_buffer_blob(); |
duke@435 | 584 | assert(blob != NULL, "Initialize BufferBlob at start"); |
duke@435 | 585 | assert(blob->size() > MAX_inst_size, "sanity"); |
duke@435 | 586 | relocInfo* locs_buf = scratch_locs_memory(); |
twisti@2103 | 587 | address blob_begin = blob->content_begin(); |
duke@435 | 588 | address blob_end = (address)locs_buf; |
twisti@2103 | 589 | assert(blob->content_contains(blob_end), "sanity"); |
duke@435 | 590 | CodeBuffer buf(blob_begin, blob_end - blob_begin); |
twisti@2350 | 591 | buf.initialize_consts_size(_scratch_const_size); |
duke@435 | 592 | buf.initialize_stubs_size(MAX_stubs_size); |
duke@435 | 593 | assert(locs_buf != NULL, "sanity"); |
twisti@2350 | 594 | int lsize = MAX_locs_size / 3; |
twisti@2350 | 595 | buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize); |
twisti@2350 | 596 | buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize); |
twisti@2350 | 597 | buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize); |
twisti@2350 | 598 | |
twisti@2350 | 599 | // Do the emission. |
kvn@3037 | 600 | |
kvn@3037 | 601 | Label fakeL; // Fake label for branch instructions. |
kvn@3051 | 602 | Label* saveL = NULL; |
kvn@3051 | 603 | uint save_bnum = 0; |
kvn@3051 | 604 | bool is_branch = n->is_MachBranch(); |
kvn@3037 | 605 | if (is_branch) { |
kvn@3037 | 606 | MacroAssembler masm(&buf); |
kvn@3037 | 607 | masm.bind(fakeL); |
kvn@3051 | 608 | n->as_MachBranch()->save_label(&saveL, &save_bnum); |
kvn@3051 | 609 | n->as_MachBranch()->label_set(&fakeL, 0); |
kvn@3037 | 610 | } |
duke@435 | 611 | n->emit(buf, this->regalloc()); |
kvn@3051 | 612 | if (is_branch) // Restore label. |
kvn@3051 | 613 | n->as_MachBranch()->label_set(saveL, save_bnum); |
twisti@2350 | 614 | |
twisti@2350 | 615 | // End scratch_emit_size section. |
twisti@2350 | 616 | set_in_scratch_emit_size(false); |
twisti@2350 | 617 | |
twisti@2103 | 618 | return buf.insts_size(); |
duke@435 | 619 | } |
duke@435 | 620 | |
duke@435 | 621 | |
duke@435 | 622 | // ============================================================================ |
duke@435 | 623 | //------------------------------Compile standard------------------------------- |
duke@435 | 624 | debug_only( int Compile::_debug_idx = 100000; ) |
duke@435 | 625 | |
duke@435 | 626 | // Compile a method. entry_bci is -1 for normal compilations and indicates |
duke@435 | 627 | // the continuation bci for on stack replacement. |
duke@435 | 628 | |
duke@435 | 629 | |
kvn@5110 | 630 | Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, |
kvn@5110 | 631 | bool subsume_loads, bool do_escape_analysis, bool eliminate_boxing ) |
duke@435 | 632 | : Phase(Compiler), |
duke@435 | 633 | _env(ci_env), |
duke@435 | 634 | _log(ci_env->log()), |
duke@435 | 635 | _compile_id(ci_env->compile_id()), |
duke@435 | 636 | _save_argument_registers(false), |
duke@435 | 637 | _stub_name(NULL), |
duke@435 | 638 | _stub_function(NULL), |
duke@435 | 639 | _stub_entry_point(NULL), |
duke@435 | 640 | _method(target), |
duke@435 | 641 | _entry_bci(osr_bci), |
duke@435 | 642 | _initial_gvn(NULL), |
duke@435 | 643 | _for_igvn(NULL), |
duke@435 | 644 | _warm_calls(NULL), |
duke@435 | 645 | _subsume_loads(subsume_loads), |
kvn@473 | 646 | _do_escape_analysis(do_escape_analysis), |
kvn@5110 | 647 | _eliminate_boxing(eliminate_boxing), |
duke@435 | 648 | _failure_reason(NULL), |
duke@435 | 649 | _code_buffer("Compile::Fill_buffer"), |
duke@435 | 650 | _orig_pc_slot(0), |
duke@435 | 651 | _orig_pc_slot_offset_in_bytes(0), |
twisti@1700 | 652 | _has_method_handle_invokes(false), |
twisti@2350 | 653 | _mach_constant_base_node(NULL), |
duke@435 | 654 | _node_bundling_limit(0), |
duke@435 | 655 | _node_bundling_base(NULL), |
kvn@1294 | 656 | _java_calls(0), |
kvn@1294 | 657 | _inner_loops(0), |
twisti@2350 | 658 | _scratch_const_size(-1), |
twisti@2350 | 659 | _in_scratch_emit_size(false), |
bharadwaj@4315 | 660 | _dead_node_list(comp_arena()), |
bharadwaj@4315 | 661 | _dead_node_count(0), |
duke@435 | 662 | #ifndef PRODUCT |
duke@435 | 663 | _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")), |
goetz@6488 | 664 | _in_dump_cnt(0), |
duke@435 | 665 | _printer(IdealGraphPrinter::printer()), |
duke@435 | 666 | #endif |
roland@4357 | 667 | _congraph(NULL), |
zgu@7074 | 668 | _comp_arena(mtCompiler), |
zgu@7074 | 669 | _node_arena(mtCompiler), |
zgu@7074 | 670 | _old_arena(mtCompiler), |
zgu@7074 | 671 | _Compile_types(mtCompiler), |
kvn@6217 | 672 | _replay_inline_data(NULL), |
roland@4409 | 673 | _late_inlines(comp_arena(), 2, 0, NULL), |
roland@4409 | 674 | _string_late_inlines(comp_arena(), 2, 0, NULL), |
kvn@5110 | 675 | _boxing_late_inlines(comp_arena(), 2, 0, NULL), |
roland@4409 | 676 | _late_inlines_pos(0), |
roland@4409 | 677 | _number_of_mh_late_inlines(0), |
roland@4409 | 678 | _inlining_progress(false), |
roland@4409 | 679 | _inlining_incrementally(false), |
roland@4357 | 680 | _print_inlining_list(NULL), |
roland@5981 | 681 | _print_inlining_idx(0), |
roland@6723 | 682 | _interpreter_frame_size(0) { |
duke@435 | 683 | C = this; |
duke@435 | 684 | |
duke@435 | 685 | CompileWrapper cw(this); |
duke@435 | 686 | #ifndef PRODUCT |
duke@435 | 687 | if (TimeCompiler2) { |
duke@435 | 688 | tty->print(" "); |
duke@435 | 689 | target->holder()->name()->print(); |
duke@435 | 690 | tty->print("."); |
duke@435 | 691 | target->print_short_name(); |
duke@435 | 692 | tty->print(" "); |
duke@435 | 693 | } |
duke@435 | 694 | TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2); |
duke@435 | 695 | TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false); |
jrose@535 | 696 | bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly"); |
jrose@535 | 697 | if (!print_opto_assembly) { |
jrose@535 | 698 | bool print_assembly = (PrintAssembly || _method->should_print_assembly()); |
jrose@535 | 699 | if (print_assembly && !Disassembler::can_decode()) { |
jrose@535 | 700 | tty->print_cr("PrintAssembly request changed to PrintOptoAssembly"); |
jrose@535 | 701 | print_opto_assembly = true; |
jrose@535 | 702 | } |
jrose@535 | 703 | } |
jrose@535 | 704 | set_print_assembly(print_opto_assembly); |
never@802 | 705 | set_parsed_irreducible_loop(false); |
kvn@6217 | 706 | |
kvn@6217 | 707 | if (method()->has_option("ReplayInline")) { |
kvn@6217 | 708 | _replay_inline_data = ciReplay::load_inline_data(method(), entry_bci(), ci_env->comp_level()); |
kvn@6217 | 709 | } |
duke@435 | 710 | #endif |
kvn@5763 | 711 | set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining)); |
kvn@5763 | 712 | set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics")); |
kvn@6657 | 713 | set_has_irreducible_loop(true); // conservative until build_loop_tree() reset it |
duke@435 | 714 | |
kvn@6429 | 715 | if (ProfileTraps RTM_OPT_ONLY( || UseRTMLocking )) { |
duke@435 | 716 | // Make sure the method being compiled gets its own MDO, |
duke@435 | 717 | // so we can at least track the decompile_count(). |
kvn@6429 | 718 | // Need MDO to record RTM code generation state. |
iveresov@2349 | 719 | method()->ensure_method_data(); |
duke@435 | 720 | } |
duke@435 | 721 | |
duke@435 | 722 | Init(::AliasLevel); |
duke@435 | 723 | |
duke@435 | 724 | |
duke@435 | 725 | print_compile_messages(); |
duke@435 | 726 | |
shade@6314 | 727 | _ilt = InlineTree::build_inline_tree_root(); |
duke@435 | 728 | |
duke@435 | 729 | // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice |
duke@435 | 730 | assert(num_alias_types() >= AliasIdxRaw, ""); |
duke@435 | 731 | |
duke@435 | 732 | #define MINIMUM_NODE_HASH 1023 |
duke@435 | 733 | // Node list that Iterative GVN will start with |
duke@435 | 734 | Unique_Node_List for_igvn(comp_arena()); |
duke@435 | 735 | set_for_igvn(&for_igvn); |
duke@435 | 736 | |
duke@435 | 737 | // GVN that will be run immediately on new nodes |
duke@435 | 738 | uint estimated_size = method()->code_size()*4+64; |
duke@435 | 739 | estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size); |
duke@435 | 740 | PhaseGVN gvn(node_arena(), estimated_size); |
duke@435 | 741 | set_initial_gvn(&gvn); |
duke@435 | 742 | |
kvn@5763 | 743 | if (print_inlining() || print_intrinsics()) { |
roland@4357 | 744 | _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer()); |
roland@4357 | 745 | } |
duke@435 | 746 | { // Scope for timing the parser |
duke@435 | 747 | TracePhase t3("parse", &_t_parser, true); |
duke@435 | 748 | |
duke@435 | 749 | // Put top into the hash table ASAP. |
duke@435 | 750 | initial_gvn()->transform_no_reclaim(top()); |
duke@435 | 751 | |
duke@435 | 752 | // Set up tf(), start(), and find a CallGenerator. |
johnc@2781 | 753 | CallGenerator* cg = NULL; |
duke@435 | 754 | if (is_osr_compilation()) { |
duke@435 | 755 | const TypeTuple *domain = StartOSRNode::osr_domain(); |
duke@435 | 756 | const TypeTuple *range = TypeTuple::make_range(method()->signature()); |
duke@435 | 757 | init_tf(TypeFunc::make(domain, range)); |
kvn@4115 | 758 | StartNode* s = new (this) StartOSRNode(root(), domain); |
duke@435 | 759 | initial_gvn()->set_type_bottom(s); |
duke@435 | 760 | init_start(s); |
duke@435 | 761 | cg = CallGenerator::for_osr(method(), entry_bci()); |
duke@435 | 762 | } else { |
duke@435 | 763 | // Normal case. |
duke@435 | 764 | init_tf(TypeFunc::make(method())); |
kvn@4115 | 765 | StartNode* s = new (this) StartNode(root(), tf()->domain()); |
duke@435 | 766 | initial_gvn()->set_type_bottom(s); |
duke@435 | 767 | init_start(s); |
johnc@2781 | 768 | if (method()->intrinsic_id() == vmIntrinsics::_Reference_get && UseG1GC) { |
johnc@2781 | 769 | // With java.lang.ref.reference.get() we must go through the |
johnc@2781 | 770 | // intrinsic when G1 is enabled - even when get() is the root |
johnc@2781 | 771 | // method of the compile - so that, if necessary, the value in |
johnc@2781 | 772 | // the referent field of the reference object gets recorded by |
johnc@2781 | 773 | // the pre-barrier code. |
johnc@2781 | 774 | // Specifically, if G1 is enabled, the value in the referent |
johnc@2781 | 775 | // field is recorded by the G1 SATB pre barrier. This will |
johnc@2781 | 776 | // result in the referent being marked live and the reference |
johnc@2781 | 777 | // object removed from the list of discovered references during |
johnc@2781 | 778 | // reference processing. |
johnc@2781 | 779 | cg = find_intrinsic(method(), false); |
johnc@2781 | 780 | } |
johnc@2781 | 781 | if (cg == NULL) { |
johnc@2781 | 782 | float past_uses = method()->interpreter_invocation_count(); |
johnc@2781 | 783 | float expected_uses = past_uses; |
johnc@2781 | 784 | cg = CallGenerator::for_inline(method(), expected_uses); |
johnc@2781 | 785 | } |
duke@435 | 786 | } |
duke@435 | 787 | if (failing()) return; |
duke@435 | 788 | if (cg == NULL) { |
duke@435 | 789 | record_method_not_compilable_all_tiers("cannot parse method"); |
duke@435 | 790 | return; |
duke@435 | 791 | } |
duke@435 | 792 | JVMState* jvms = build_start_state(start(), tf()); |
roland@7041 | 793 | if ((jvms = cg->generate(jvms)) == NULL) { |
duke@435 | 794 | record_method_not_compilable("method parse failed"); |
duke@435 | 795 | return; |
duke@435 | 796 | } |
duke@435 | 797 | GraphKit kit(jvms); |
duke@435 | 798 | |
duke@435 | 799 | if (!kit.stopped()) { |
duke@435 | 800 | // Accept return values, and transfer control we know not where. |
duke@435 | 801 | // This is done by a special, unique ReturnNode bound to root. |
duke@435 | 802 | return_values(kit.jvms()); |
duke@435 | 803 | } |
duke@435 | 804 | |
duke@435 | 805 | if (kit.has_exceptions()) { |
duke@435 | 806 | // Any exceptions that escape from this call must be rethrown |
duke@435 | 807 | // to whatever caller is dynamically above us on the stack. |
duke@435 | 808 | // This is done by a special, unique RethrowNode bound to root. |
duke@435 | 809 | rethrow_exceptions(kit.transfer_exceptions_into_jvms()); |
duke@435 | 810 | } |
duke@435 | 811 | |
roland@4409 | 812 | assert(IncrementalInline || (_late_inlines.length() == 0 && !has_mh_late_inlines()), "incremental inlining is off"); |
roland@4409 | 813 | |
roland@4409 | 814 | if (_late_inlines.length() == 0 && !has_mh_late_inlines() && !failing() && has_stringbuilder()) { |
roland@4409 | 815 | inline_string_calls(true); |
never@1515 | 816 | } |
roland@4409 | 817 | |
roland@4409 | 818 | if (failing()) return; |
never@1515 | 819 | |
sla@5237 | 820 | print_method(PHASE_BEFORE_REMOVEUSELESS, 3); |
never@802 | 821 | |
duke@435 | 822 | // Remove clutter produced by parsing. |
duke@435 | 823 | if (!failing()) { |
duke@435 | 824 | ResourceMark rm; |
duke@435 | 825 | PhaseRemoveUseless pru(initial_gvn(), &for_igvn); |
duke@435 | 826 | } |
duke@435 | 827 | } |
duke@435 | 828 | |
duke@435 | 829 | // Note: Large methods are capped off in do_one_bytecode(). |
duke@435 | 830 | if (failing()) return; |
duke@435 | 831 | |
duke@435 | 832 | // After parsing, node notes are no longer automagic. |
duke@435 | 833 | // They must be propagated by register_new_node_with_optimizer(), |
duke@435 | 834 | // clone(), or the like. |
duke@435 | 835 | set_default_node_notes(NULL); |
duke@435 | 836 | |
duke@435 | 837 | for (;;) { |
duke@435 | 838 | int successes = Inline_Warm(); |
duke@435 | 839 | if (failing()) return; |
duke@435 | 840 | if (successes == 0) break; |
duke@435 | 841 | } |
duke@435 | 842 | |
duke@435 | 843 | // Drain the list. |
duke@435 | 844 | Finish_Warm(); |
duke@435 | 845 | #ifndef PRODUCT |
duke@435 | 846 | if (_printer) { |
duke@435 | 847 | _printer->print_inlining(this); |
duke@435 | 848 | } |
duke@435 | 849 | #endif |
duke@435 | 850 | |
duke@435 | 851 | if (failing()) return; |
duke@435 | 852 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 853 | |
duke@435 | 854 | // Now optimize |
duke@435 | 855 | Optimize(); |
duke@435 | 856 | if (failing()) return; |
duke@435 | 857 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 858 | |
duke@435 | 859 | #ifndef PRODUCT |
duke@435 | 860 | if (PrintIdeal) { |
duke@435 | 861 | ttyLocker ttyl; // keep the following output all in one block |
duke@435 | 862 | // This output goes directly to the tty, not the compiler log. |
duke@435 | 863 | // To enable tools to match it up with the compilation activity, |
duke@435 | 864 | // be sure to tag this tty output with the compile ID. |
duke@435 | 865 | if (xtty != NULL) { |
duke@435 | 866 | xtty->head("ideal compile_id='%d'%s", compile_id(), |
duke@435 | 867 | is_osr_compilation() ? " compile_kind='osr'" : |
duke@435 | 868 | ""); |
duke@435 | 869 | } |
duke@435 | 870 | root()->dump(9999); |
duke@435 | 871 | if (xtty != NULL) { |
duke@435 | 872 | xtty->tail("ideal"); |
duke@435 | 873 | } |
duke@435 | 874 | } |
duke@435 | 875 | #endif |
duke@435 | 876 | |
iveresov@6070 | 877 | NOT_PRODUCT( verify_barriers(); ) |
kvn@6217 | 878 | |
kvn@6217 | 879 | // Dump compilation data to replay it. |
kvn@6217 | 880 | if (method()->has_option("DumpReplay")) { |
kvn@6217 | 881 | env()->dump_replay_data(_compile_id); |
kvn@6217 | 882 | } |
kvn@6217 | 883 | if (method()->has_option("DumpInline") && (ilt() != NULL)) { |
kvn@6217 | 884 | env()->dump_inline_data(_compile_id); |
kvn@6217 | 885 | } |
kvn@6217 | 886 | |
duke@435 | 887 | // Now that we know the size of all the monitors we can add a fixed slot |
duke@435 | 888 | // for the original deopt pc. |
duke@435 | 889 | |
duke@435 | 890 | _orig_pc_slot = fixed_slots(); |
duke@435 | 891 | int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size); |
duke@435 | 892 | set_fixed_slots(next_slot); |
duke@435 | 893 | |
goetz@6490 | 894 | // Compute when to use implicit null checks. Used by matching trap based |
goetz@6490 | 895 | // nodes and NullCheck optimization. |
goetz@6490 | 896 | set_allowed_deopt_reasons(); |
goetz@6490 | 897 | |
duke@435 | 898 | // Now generate code |
duke@435 | 899 | Code_Gen(); |
duke@435 | 900 | if (failing()) return; |
duke@435 | 901 | |
duke@435 | 902 | // Check if we want to skip execution of all compiled code. |
duke@435 | 903 | { |
duke@435 | 904 | #ifndef PRODUCT |
duke@435 | 905 | if (OptoNoExecute) { |
duke@435 | 906 | record_method_not_compilable("+OptoNoExecute"); // Flag as failed |
duke@435 | 907 | return; |
duke@435 | 908 | } |
duke@435 | 909 | TracePhase t2("install_code", &_t_registerMethod, TimeCompiler); |
duke@435 | 910 | #endif |
duke@435 | 911 | |
duke@435 | 912 | if (is_osr_compilation()) { |
duke@435 | 913 | _code_offsets.set_value(CodeOffsets::Verified_Entry, 0); |
duke@435 | 914 | _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size); |
duke@435 | 915 | } else { |
duke@435 | 916 | _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size); |
duke@435 | 917 | _code_offsets.set_value(CodeOffsets::OSR_Entry, 0); |
duke@435 | 918 | } |
duke@435 | 919 | |
duke@435 | 920 | env()->register_method(_method, _entry_bci, |
duke@435 | 921 | &_code_offsets, |
duke@435 | 922 | _orig_pc_slot_offset_in_bytes, |
duke@435 | 923 | code_buffer(), |
duke@435 | 924 | frame_size_in_words(), _oop_map_set, |
duke@435 | 925 | &_handler_table, &_inc_table, |
duke@435 | 926 | compiler, |
duke@435 | 927 | env()->comp_level(), |
kvn@4103 | 928 | has_unsafe_access(), |
kvn@6429 | 929 | SharedRuntime::is_wide_vector(max_vector_size()), |
kvn@6429 | 930 | rtm_state() |
duke@435 | 931 | ); |
vlivanov@4154 | 932 | |
vlivanov@4154 | 933 | if (log() != NULL) // Print code cache state into compiler log |
vlivanov@4154 | 934 | log()->code_cache_state(); |
duke@435 | 935 | } |
duke@435 | 936 | } |
duke@435 | 937 | |
duke@435 | 938 | //------------------------------Compile---------------------------------------- |
duke@435 | 939 | // Compile a runtime stub |
duke@435 | 940 | Compile::Compile( ciEnv* ci_env, |
duke@435 | 941 | TypeFunc_generator generator, |
duke@435 | 942 | address stub_function, |
duke@435 | 943 | const char *stub_name, |
duke@435 | 944 | int is_fancy_jump, |
duke@435 | 945 | bool pass_tls, |
duke@435 | 946 | bool save_arg_registers, |
duke@435 | 947 | bool return_pc ) |
duke@435 | 948 | : Phase(Compiler), |
duke@435 | 949 | _env(ci_env), |
duke@435 | 950 | _log(ci_env->log()), |
neliasso@4730 | 951 | _compile_id(0), |
duke@435 | 952 | _save_argument_registers(save_arg_registers), |
duke@435 | 953 | _method(NULL), |
duke@435 | 954 | _stub_name(stub_name), |
duke@435 | 955 | _stub_function(stub_function), |
duke@435 | 956 | _stub_entry_point(NULL), |
duke@435 | 957 | _entry_bci(InvocationEntryBci), |
duke@435 | 958 | _initial_gvn(NULL), |
duke@435 | 959 | _for_igvn(NULL), |
duke@435 | 960 | _warm_calls(NULL), |
duke@435 | 961 | _orig_pc_slot(0), |
duke@435 | 962 | _orig_pc_slot_offset_in_bytes(0), |
duke@435 | 963 | _subsume_loads(true), |
kvn@473 | 964 | _do_escape_analysis(false), |
kvn@5110 | 965 | _eliminate_boxing(false), |
duke@435 | 966 | _failure_reason(NULL), |
duke@435 | 967 | _code_buffer("Compile::Fill_buffer"), |
twisti@1700 | 968 | _has_method_handle_invokes(false), |
twisti@2350 | 969 | _mach_constant_base_node(NULL), |
duke@435 | 970 | _node_bundling_limit(0), |
duke@435 | 971 | _node_bundling_base(NULL), |
kvn@1294 | 972 | _java_calls(0), |
kvn@1294 | 973 | _inner_loops(0), |
duke@435 | 974 | #ifndef PRODUCT |
duke@435 | 975 | _trace_opto_output(TraceOptoOutput), |
goetz@6488 | 976 | _in_dump_cnt(0), |
duke@435 | 977 | _printer(NULL), |
duke@435 | 978 | #endif |
zgu@7074 | 979 | _comp_arena(mtCompiler), |
zgu@7074 | 980 | _node_arena(mtCompiler), |
zgu@7074 | 981 | _old_arena(mtCompiler), |
zgu@7074 | 982 | _Compile_types(mtCompiler), |
bharadwaj@4315 | 983 | _dead_node_list(comp_arena()), |
bharadwaj@4315 | 984 | _dead_node_count(0), |
roland@4357 | 985 | _congraph(NULL), |
kvn@6217 | 986 | _replay_inline_data(NULL), |
roland@4409 | 987 | _number_of_mh_late_inlines(0), |
roland@4409 | 988 | _inlining_progress(false), |
roland@4409 | 989 | _inlining_incrementally(false), |
roland@4357 | 990 | _print_inlining_list(NULL), |
roland@5981 | 991 | _print_inlining_idx(0), |
roland@6723 | 992 | _allowed_reasons(0), |
roland@6723 | 993 | _interpreter_frame_size(0) { |
duke@435 | 994 | C = this; |
duke@435 | 995 | |
duke@435 | 996 | #ifndef PRODUCT |
duke@435 | 997 | TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false); |
duke@435 | 998 | TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false); |
duke@435 | 999 | set_print_assembly(PrintFrameConverterAssembly); |
never@802 | 1000 | set_parsed_irreducible_loop(false); |
duke@435 | 1001 | #endif |
kvn@6657 | 1002 | set_has_irreducible_loop(false); // no loops |
kvn@6657 | 1003 | |
duke@435 | 1004 | CompileWrapper cw(this); |
duke@435 | 1005 | Init(/*AliasLevel=*/ 0); |
duke@435 | 1006 | init_tf((*generator)()); |
duke@435 | 1007 | |
duke@435 | 1008 | { |
duke@435 | 1009 | // The following is a dummy for the sake of GraphKit::gen_stub |
duke@435 | 1010 | Unique_Node_List for_igvn(comp_arena()); |
duke@435 | 1011 | set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this |
duke@435 | 1012 | PhaseGVN gvn(Thread::current()->resource_area(),255); |
duke@435 | 1013 | set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively |
duke@435 | 1014 | gvn.transform_no_reclaim(top()); |
duke@435 | 1015 | |
duke@435 | 1016 | GraphKit kit; |
duke@435 | 1017 | kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc); |
duke@435 | 1018 | } |
duke@435 | 1019 | |
duke@435 | 1020 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1021 | Code_Gen(); |
duke@435 | 1022 | if (failing()) return; |
duke@435 | 1023 | |
duke@435 | 1024 | |
duke@435 | 1025 | // Entry point will be accessed using compile->stub_entry_point(); |
duke@435 | 1026 | if (code_buffer() == NULL) { |
duke@435 | 1027 | Matcher::soft_match_failure(); |
duke@435 | 1028 | } else { |
duke@435 | 1029 | if (PrintAssembly && (WizardMode || Verbose)) |
duke@435 | 1030 | tty->print_cr("### Stub::%s", stub_name); |
duke@435 | 1031 | |
duke@435 | 1032 | if (!failing()) { |
duke@435 | 1033 | assert(_fixed_slots == 0, "no fixed slots used for runtime stubs"); |
duke@435 | 1034 | |
duke@435 | 1035 | // Make the NMethod |
duke@435 | 1036 | // For now we mark the frame as never safe for profile stackwalking |
duke@435 | 1037 | RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name, |
duke@435 | 1038 | code_buffer(), |
duke@435 | 1039 | CodeOffsets::frame_never_safe, |
duke@435 | 1040 | // _code_offsets.value(CodeOffsets::Frame_Complete), |
duke@435 | 1041 | frame_size_in_words(), |
duke@435 | 1042 | _oop_map_set, |
duke@435 | 1043 | save_arg_registers); |
duke@435 | 1044 | assert(rs != NULL && rs->is_runtime_stub(), "sanity check"); |
duke@435 | 1045 | |
duke@435 | 1046 | _stub_entry_point = rs->entry_point(); |
duke@435 | 1047 | } |
duke@435 | 1048 | } |
duke@435 | 1049 | } |
duke@435 | 1050 | |
duke@435 | 1051 | //------------------------------Init------------------------------------------- |
duke@435 | 1052 | // Prepare for a single compilation |
duke@435 | 1053 | void Compile::Init(int aliaslevel) { |
duke@435 | 1054 | _unique = 0; |
duke@435 | 1055 | _regalloc = NULL; |
duke@435 | 1056 | |
duke@435 | 1057 | _tf = NULL; // filled in later |
duke@435 | 1058 | _top = NULL; // cached later |
duke@435 | 1059 | _matcher = NULL; // filled in later |
duke@435 | 1060 | _cfg = NULL; // filled in later |
duke@435 | 1061 | |
duke@435 | 1062 | set_24_bit_selection_and_mode(Use24BitFP, false); |
duke@435 | 1063 | |
duke@435 | 1064 | _node_note_array = NULL; |
duke@435 | 1065 | _default_node_notes = NULL; |
duke@435 | 1066 | |
duke@435 | 1067 | _immutable_memory = NULL; // filled in at first inquiry |
duke@435 | 1068 | |
duke@435 | 1069 | // Globally visible Nodes |
duke@435 | 1070 | // First set TOP to NULL to give safe behavior during creation of RootNode |
duke@435 | 1071 | set_cached_top_node(NULL); |
kvn@4115 | 1072 | set_root(new (this) RootNode()); |
duke@435 | 1073 | // Now that you have a Root to point to, create the real TOP |
kvn@4115 | 1074 | set_cached_top_node( new (this) ConNode(Type::TOP) ); |
duke@435 | 1075 | set_recent_alloc(NULL, NULL); |
duke@435 | 1076 | |
duke@435 | 1077 | // Create Debug Information Recorder to record scopes, oopmaps, etc. |
coleenp@4037 | 1078 | env()->set_oop_recorder(new OopRecorder(env()->arena())); |
duke@435 | 1079 | env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder())); |
duke@435 | 1080 | env()->set_dependencies(new Dependencies(env())); |
duke@435 | 1081 | |
duke@435 | 1082 | _fixed_slots = 0; |
duke@435 | 1083 | set_has_split_ifs(false); |
duke@435 | 1084 | set_has_loops(has_method() && method()->has_loops()); // first approximation |
never@1515 | 1085 | set_has_stringbuilder(false); |
kvn@5110 | 1086 | set_has_boxed_value(false); |
duke@435 | 1087 | _trap_can_recompile = false; // no traps emitted yet |
duke@435 | 1088 | _major_progress = true; // start out assuming good things will happen |
duke@435 | 1089 | set_has_unsafe_access(false); |
kvn@4103 | 1090 | set_max_vector_size(0); |
duke@435 | 1091 | Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist)); |
duke@435 | 1092 | set_decompile_count(0); |
duke@435 | 1093 | |
rasbold@853 | 1094 | set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency")); |
iveresov@2138 | 1095 | set_num_loop_opts(LoopOptsCount); |
iveresov@2138 | 1096 | set_do_inlining(Inline); |
iveresov@2138 | 1097 | set_max_inline_size(MaxInlineSize); |
iveresov@2138 | 1098 | set_freq_inline_size(FreqInlineSize); |
iveresov@2138 | 1099 | set_do_scheduling(OptoScheduling); |
iveresov@2138 | 1100 | set_do_count_invocations(false); |
iveresov@2138 | 1101 | set_do_method_data_update(false); |
kvn@6429 | 1102 | set_rtm_state(NoRTM); // No RTM lock eliding by default |
kvn@6429 | 1103 | #if INCLUDE_RTM_OPT |
kvn@6429 | 1104 | if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) { |
kvn@6429 | 1105 | int rtm_state = method()->method_data()->rtm_state(); |
kvn@6429 | 1106 | if (method_has_option("NoRTMLockEliding") || ((rtm_state & NoRTM) != 0)) { |
kvn@6429 | 1107 | // Don't generate RTM lock eliding code. |
kvn@6429 | 1108 | set_rtm_state(NoRTM); |
kvn@6429 | 1109 | } else if (method_has_option("UseRTMLockEliding") || ((rtm_state & UseRTM) != 0) || !UseRTMDeopt) { |
kvn@6429 | 1110 | // Generate RTM lock eliding code without abort ratio calculation code. |
kvn@6429 | 1111 | set_rtm_state(UseRTM); |
kvn@6429 | 1112 | } else if (UseRTMDeopt) { |
kvn@6429 | 1113 | // Generate RTM lock eliding code and include abort ratio calculation |
kvn@6429 | 1114 | // code if UseRTMDeopt is on. |
kvn@6429 | 1115 | set_rtm_state(ProfileRTM); |
kvn@6429 | 1116 | } |
kvn@6429 | 1117 | } |
kvn@6429 | 1118 | #endif |
duke@435 | 1119 | if (debug_info()->recording_non_safepoints()) { |
duke@435 | 1120 | set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*> |
duke@435 | 1121 | (comp_arena(), 8, 0, NULL)); |
duke@435 | 1122 | set_default_node_notes(Node_Notes::make(this)); |
duke@435 | 1123 | } |
duke@435 | 1124 | |
duke@435 | 1125 | // // -- Initialize types before each compile -- |
duke@435 | 1126 | // // Update cached type information |
duke@435 | 1127 | // if( _method && _method->constants() ) |
duke@435 | 1128 | // Type::update_loaded_types(_method, _method->constants()); |
duke@435 | 1129 | |
duke@435 | 1130 | // Init alias_type map. |
kvn@473 | 1131 | if (!_do_escape_analysis && aliaslevel == 3) |
duke@435 | 1132 | aliaslevel = 2; // No unique types without escape analysis |
duke@435 | 1133 | _AliasLevel = aliaslevel; |
duke@435 | 1134 | const int grow_ats = 16; |
duke@435 | 1135 | _max_alias_types = grow_ats; |
duke@435 | 1136 | _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats); |
duke@435 | 1137 | AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats); |
duke@435 | 1138 | Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats); |
duke@435 | 1139 | { |
duke@435 | 1140 | for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i]; |
duke@435 | 1141 | } |
duke@435 | 1142 | // Initialize the first few types. |
duke@435 | 1143 | _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL); |
duke@435 | 1144 | _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM); |
duke@435 | 1145 | _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM); |
duke@435 | 1146 | _num_alias_types = AliasIdxRaw+1; |
duke@435 | 1147 | // Zero out the alias type cache. |
duke@435 | 1148 | Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache)); |
duke@435 | 1149 | // A NULL adr_type hits in the cache right away. Preload the right answer. |
duke@435 | 1150 | probe_alias_cache(NULL)->_index = AliasIdxTop; |
duke@435 | 1151 | |
duke@435 | 1152 | _intrinsics = NULL; |
kvn@2040 | 1153 | _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
kvn@2040 | 1154 | _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
roland@4589 | 1155 | _expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
duke@435 | 1156 | register_library_intrinsics(); |
duke@435 | 1157 | } |
duke@435 | 1158 | |
duke@435 | 1159 | //---------------------------init_start---------------------------------------- |
duke@435 | 1160 | // Install the StartNode on this compile object. |
duke@435 | 1161 | void Compile::init_start(StartNode* s) { |
duke@435 | 1162 | if (failing()) |
duke@435 | 1163 | return; // already failing |
duke@435 | 1164 | assert(s == start(), ""); |
duke@435 | 1165 | } |
duke@435 | 1166 | |
duke@435 | 1167 | StartNode* Compile::start() const { |
duke@435 | 1168 | assert(!failing(), ""); |
duke@435 | 1169 | for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) { |
duke@435 | 1170 | Node* start = root()->fast_out(i); |
duke@435 | 1171 | if( start->is_Start() ) |
duke@435 | 1172 | return start->as_Start(); |
duke@435 | 1173 | } |
kvn@6657 | 1174 | fatal("Did not find Start node!"); |
duke@435 | 1175 | return NULL; |
duke@435 | 1176 | } |
duke@435 | 1177 | |
duke@435 | 1178 | //-------------------------------immutable_memory------------------------------------- |
duke@435 | 1179 | // Access immutable memory |
duke@435 | 1180 | Node* Compile::immutable_memory() { |
duke@435 | 1181 | if (_immutable_memory != NULL) { |
duke@435 | 1182 | return _immutable_memory; |
duke@435 | 1183 | } |
duke@435 | 1184 | StartNode* s = start(); |
duke@435 | 1185 | for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) { |
duke@435 | 1186 | Node *p = s->fast_out(i); |
duke@435 | 1187 | if (p != s && p->as_Proj()->_con == TypeFunc::Memory) { |
duke@435 | 1188 | _immutable_memory = p; |
duke@435 | 1189 | return _immutable_memory; |
duke@435 | 1190 | } |
duke@435 | 1191 | } |
duke@435 | 1192 | ShouldNotReachHere(); |
duke@435 | 1193 | return NULL; |
duke@435 | 1194 | } |
duke@435 | 1195 | |
duke@435 | 1196 | //----------------------set_cached_top_node------------------------------------ |
duke@435 | 1197 | // Install the cached top node, and make sure Node::is_top works correctly. |
duke@435 | 1198 | void Compile::set_cached_top_node(Node* tn) { |
duke@435 | 1199 | if (tn != NULL) verify_top(tn); |
duke@435 | 1200 | Node* old_top = _top; |
duke@435 | 1201 | _top = tn; |
duke@435 | 1202 | // Calling Node::setup_is_top allows the nodes the chance to adjust |
duke@435 | 1203 | // their _out arrays. |
duke@435 | 1204 | if (_top != NULL) _top->setup_is_top(); |
duke@435 | 1205 | if (old_top != NULL) old_top->setup_is_top(); |
duke@435 | 1206 | assert(_top == NULL || top()->is_top(), ""); |
duke@435 | 1207 | } |
duke@435 | 1208 | |
bharadwaj@4315 | 1209 | #ifdef ASSERT |
bharadwaj@4315 | 1210 | uint Compile::count_live_nodes_by_graph_walk() { |
bharadwaj@4315 | 1211 | Unique_Node_List useful(comp_arena()); |
bharadwaj@4315 | 1212 | // Get useful node list by walking the graph. |
bharadwaj@4315 | 1213 | identify_useful_nodes(useful); |
bharadwaj@4315 | 1214 | return useful.size(); |
bharadwaj@4315 | 1215 | } |
bharadwaj@4315 | 1216 | |
bharadwaj@4315 | 1217 | void Compile::print_missing_nodes() { |
bharadwaj@4315 | 1218 | |
bharadwaj@4315 | 1219 | // Return if CompileLog is NULL and PrintIdealNodeCount is false. |
bharadwaj@4315 | 1220 | if ((_log == NULL) && (! PrintIdealNodeCount)) { |
bharadwaj@4315 | 1221 | return; |
bharadwaj@4315 | 1222 | } |
bharadwaj@4315 | 1223 | |
bharadwaj@4315 | 1224 | // This is an expensive function. It is executed only when the user |
bharadwaj@4315 | 1225 | // specifies VerifyIdealNodeCount option or otherwise knows the |
bharadwaj@4315 | 1226 | // additional work that needs to be done to identify reachable nodes |
bharadwaj@4315 | 1227 | // by walking the flow graph and find the missing ones using |
bharadwaj@4315 | 1228 | // _dead_node_list. |
bharadwaj@4315 | 1229 | |
bharadwaj@4315 | 1230 | Unique_Node_List useful(comp_arena()); |
bharadwaj@4315 | 1231 | // Get useful node list by walking the graph. |
bharadwaj@4315 | 1232 | identify_useful_nodes(useful); |
bharadwaj@4315 | 1233 | |
bharadwaj@4315 | 1234 | uint l_nodes = C->live_nodes(); |
bharadwaj@4315 | 1235 | uint l_nodes_by_walk = useful.size(); |
bharadwaj@4315 | 1236 | |
bharadwaj@4315 | 1237 | if (l_nodes != l_nodes_by_walk) { |
bharadwaj@4315 | 1238 | if (_log != NULL) { |
bharadwaj@4315 | 1239 | _log->begin_head("mismatched_nodes count='%d'", abs((int) (l_nodes - l_nodes_by_walk))); |
bharadwaj@4315 | 1240 | _log->stamp(); |
bharadwaj@4315 | 1241 | _log->end_head(); |
bharadwaj@4315 | 1242 | } |
bharadwaj@4315 | 1243 | VectorSet& useful_member_set = useful.member_set(); |
bharadwaj@4315 | 1244 | int last_idx = l_nodes_by_walk; |
bharadwaj@4315 | 1245 | for (int i = 0; i < last_idx; i++) { |
bharadwaj@4315 | 1246 | if (useful_member_set.test(i)) { |
bharadwaj@4315 | 1247 | if (_dead_node_list.test(i)) { |
bharadwaj@4315 | 1248 | if (_log != NULL) { |
bharadwaj@4315 | 1249 | _log->elem("mismatched_node_info node_idx='%d' type='both live and dead'", i); |
bharadwaj@4315 | 1250 | } |
bharadwaj@4315 | 1251 | if (PrintIdealNodeCount) { |
bharadwaj@4315 | 1252 | // Print the log message to tty |
bharadwaj@4315 | 1253 | tty->print_cr("mismatched_node idx='%d' both live and dead'", i); |
bharadwaj@4315 | 1254 | useful.at(i)->dump(); |
bharadwaj@4315 | 1255 | } |
bharadwaj@4315 | 1256 | } |
bharadwaj@4315 | 1257 | } |
bharadwaj@4315 | 1258 | else if (! _dead_node_list.test(i)) { |
bharadwaj@4315 | 1259 | if (_log != NULL) { |
bharadwaj@4315 | 1260 | _log->elem("mismatched_node_info node_idx='%d' type='neither live nor dead'", i); |
bharadwaj@4315 | 1261 | } |
bharadwaj@4315 | 1262 | if (PrintIdealNodeCount) { |
bharadwaj@4315 | 1263 | // Print the log message to tty |
bharadwaj@4315 | 1264 | tty->print_cr("mismatched_node idx='%d' type='neither live nor dead'", i); |
bharadwaj@4315 | 1265 | } |
bharadwaj@4315 | 1266 | } |
bharadwaj@4315 | 1267 | } |
bharadwaj@4315 | 1268 | if (_log != NULL) { |
bharadwaj@4315 | 1269 | _log->tail("mismatched_nodes"); |
bharadwaj@4315 | 1270 | } |
bharadwaj@4315 | 1271 | } |
bharadwaj@4315 | 1272 | } |
bharadwaj@4315 | 1273 | #endif |
bharadwaj@4315 | 1274 | |
duke@435 | 1275 | #ifndef PRODUCT |
duke@435 | 1276 | void Compile::verify_top(Node* tn) const { |
duke@435 | 1277 | if (tn != NULL) { |
duke@435 | 1278 | assert(tn->is_Con(), "top node must be a constant"); |
duke@435 | 1279 | assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type"); |
duke@435 | 1280 | assert(tn->in(0) != NULL, "must have live top node"); |
duke@435 | 1281 | } |
duke@435 | 1282 | } |
duke@435 | 1283 | #endif |
duke@435 | 1284 | |
duke@435 | 1285 | |
duke@435 | 1286 | ///-------------------Managing Per-Node Debug & Profile Info------------------- |
duke@435 | 1287 | |
duke@435 | 1288 | void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) { |
duke@435 | 1289 | guarantee(arr != NULL, ""); |
duke@435 | 1290 | int num_blocks = arr->length(); |
duke@435 | 1291 | if (grow_by < num_blocks) grow_by = num_blocks; |
duke@435 | 1292 | int num_notes = grow_by * _node_notes_block_size; |
duke@435 | 1293 | Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes); |
duke@435 | 1294 | Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes)); |
duke@435 | 1295 | while (num_notes > 0) { |
duke@435 | 1296 | arr->append(notes); |
duke@435 | 1297 | notes += _node_notes_block_size; |
duke@435 | 1298 | num_notes -= _node_notes_block_size; |
duke@435 | 1299 | } |
duke@435 | 1300 | assert(num_notes == 0, "exact multiple, please"); |
duke@435 | 1301 | } |
duke@435 | 1302 | |
duke@435 | 1303 | bool Compile::copy_node_notes_to(Node* dest, Node* source) { |
duke@435 | 1304 | if (source == NULL || dest == NULL) return false; |
duke@435 | 1305 | |
duke@435 | 1306 | if (dest->is_Con()) |
duke@435 | 1307 | return false; // Do not push debug info onto constants. |
duke@435 | 1308 | |
duke@435 | 1309 | #ifdef ASSERT |
duke@435 | 1310 | // Leave a bread crumb trail pointing to the original node: |
duke@435 | 1311 | if (dest != NULL && dest != source && dest->debug_orig() == NULL) { |
duke@435 | 1312 | dest->set_debug_orig(source); |
duke@435 | 1313 | } |
duke@435 | 1314 | #endif |
duke@435 | 1315 | |
duke@435 | 1316 | if (node_note_array() == NULL) |
duke@435 | 1317 | return false; // Not collecting any notes now. |
duke@435 | 1318 | |
duke@435 | 1319 | // This is a copy onto a pre-existing node, which may already have notes. |
duke@435 | 1320 | // If both nodes have notes, do not overwrite any pre-existing notes. |
duke@435 | 1321 | Node_Notes* source_notes = node_notes_at(source->_idx); |
duke@435 | 1322 | if (source_notes == NULL || source_notes->is_clear()) return false; |
duke@435 | 1323 | Node_Notes* dest_notes = node_notes_at(dest->_idx); |
duke@435 | 1324 | if (dest_notes == NULL || dest_notes->is_clear()) { |
duke@435 | 1325 | return set_node_notes_at(dest->_idx, source_notes); |
duke@435 | 1326 | } |
duke@435 | 1327 | |
duke@435 | 1328 | Node_Notes merged_notes = (*source_notes); |
duke@435 | 1329 | // The order of operations here ensures that dest notes will win... |
duke@435 | 1330 | merged_notes.update_from(dest_notes); |
duke@435 | 1331 | return set_node_notes_at(dest->_idx, &merged_notes); |
duke@435 | 1332 | } |
duke@435 | 1333 | |
duke@435 | 1334 | |
duke@435 | 1335 | //--------------------------allow_range_check_smearing------------------------- |
duke@435 | 1336 | // Gating condition for coalescing similar range checks. |
duke@435 | 1337 | // Sometimes we try 'speculatively' replacing a series of a range checks by a |
duke@435 | 1338 | // single covering check that is at least as strong as any of them. |
duke@435 | 1339 | // If the optimization succeeds, the simplified (strengthened) range check |
duke@435 | 1340 | // will always succeed. If it fails, we will deopt, and then give up |
duke@435 | 1341 | // on the optimization. |
duke@435 | 1342 | bool Compile::allow_range_check_smearing() const { |
duke@435 | 1343 | // If this method has already thrown a range-check, |
duke@435 | 1344 | // assume it was because we already tried range smearing |
duke@435 | 1345 | // and it failed. |
duke@435 | 1346 | uint already_trapped = trap_count(Deoptimization::Reason_range_check); |
duke@435 | 1347 | return !already_trapped; |
duke@435 | 1348 | } |
duke@435 | 1349 | |
duke@435 | 1350 | |
duke@435 | 1351 | //------------------------------flatten_alias_type----------------------------- |
duke@435 | 1352 | const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { |
duke@435 | 1353 | int offset = tj->offset(); |
duke@435 | 1354 | TypePtr::PTR ptr = tj->ptr(); |
duke@435 | 1355 | |
kvn@682 | 1356 | // Known instance (scalarizable allocation) alias only with itself. |
kvn@682 | 1357 | bool is_known_inst = tj->isa_oopptr() != NULL && |
kvn@682 | 1358 | tj->is_oopptr()->is_known_instance(); |
kvn@682 | 1359 | |
duke@435 | 1360 | // Process weird unsafe references. |
duke@435 | 1361 | if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { |
duke@435 | 1362 | assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); |
kvn@682 | 1363 | assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); |
duke@435 | 1364 | tj = TypeOopPtr::BOTTOM; |
duke@435 | 1365 | ptr = tj->ptr(); |
duke@435 | 1366 | offset = tj->offset(); |
duke@435 | 1367 | } |
duke@435 | 1368 | |
duke@435 | 1369 | // Array pointers need some flattening |
duke@435 | 1370 | const TypeAryPtr *ta = tj->isa_aryptr(); |
vlivanov@5658 | 1371 | if (ta && ta->is_stable()) { |
vlivanov@5658 | 1372 | // Erase stability property for alias analysis. |
vlivanov@5658 | 1373 | tj = ta = ta->cast_to_stable(false); |
vlivanov@5658 | 1374 | } |
kvn@682 | 1375 | if( ta && is_known_inst ) { |
kvn@682 | 1376 | if ( offset != Type::OffsetBot && |
kvn@682 | 1377 | offset > arrayOopDesc::length_offset_in_bytes() ) { |
kvn@682 | 1378 | offset = Type::OffsetBot; // Flatten constant access into array body only |
kvn@682 | 1379 | tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); |
kvn@682 | 1380 | } |
kvn@682 | 1381 | } else if( ta && _AliasLevel >= 2 ) { |
duke@435 | 1382 | // For arrays indexed by constant indices, we flatten the alias |
duke@435 | 1383 | // space to include all of the array body. Only the header, klass |
duke@435 | 1384 | // and array length can be accessed un-aliased. |
duke@435 | 1385 | if( offset != Type::OffsetBot ) { |
coleenp@4037 | 1386 | if( ta->const_oop() ) { // MethodData* or Method* |
duke@435 | 1387 | offset = Type::OffsetBot; // Flatten constant access into array body |
kvn@682 | 1388 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); |
duke@435 | 1389 | } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { |
duke@435 | 1390 | // range is OK as-is. |
duke@435 | 1391 | tj = ta = TypeAryPtr::RANGE; |
duke@435 | 1392 | } else if( offset == oopDesc::klass_offset_in_bytes() ) { |
duke@435 | 1393 | tj = TypeInstPtr::KLASS; // all klass loads look alike |
duke@435 | 1394 | ta = TypeAryPtr::RANGE; // generic ignored junk |
duke@435 | 1395 | ptr = TypePtr::BotPTR; |
duke@435 | 1396 | } else if( offset == oopDesc::mark_offset_in_bytes() ) { |
duke@435 | 1397 | tj = TypeInstPtr::MARK; |
duke@435 | 1398 | ta = TypeAryPtr::RANGE; // generic ignored junk |
duke@435 | 1399 | ptr = TypePtr::BotPTR; |
duke@435 | 1400 | } else { // Random constant offset into array body |
duke@435 | 1401 | offset = Type::OffsetBot; // Flatten constant access into array body |
kvn@682 | 1402 | tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); |
duke@435 | 1403 | } |
duke@435 | 1404 | } |
duke@435 | 1405 | // Arrays of fixed size alias with arrays of unknown size. |
duke@435 | 1406 | if (ta->size() != TypeInt::POS) { |
duke@435 | 1407 | const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); |
kvn@682 | 1408 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); |
duke@435 | 1409 | } |
duke@435 | 1410 | // Arrays of known objects become arrays of unknown objects. |
coleenp@548 | 1411 | if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { |
coleenp@548 | 1412 | const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); |
kvn@682 | 1413 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
coleenp@548 | 1414 | } |
duke@435 | 1415 | if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { |
duke@435 | 1416 | const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); |
kvn@682 | 1417 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
duke@435 | 1418 | } |
duke@435 | 1419 | // Arrays of bytes and of booleans both use 'bastore' and 'baload' so |
duke@435 | 1420 | // cannot be distinguished by bytecode alone. |
duke@435 | 1421 | if (ta->elem() == TypeInt::BOOL) { |
duke@435 | 1422 | const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); |
duke@435 | 1423 | ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); |
kvn@682 | 1424 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); |
duke@435 | 1425 | } |
duke@435 | 1426 | // During the 2nd round of IterGVN, NotNull castings are removed. |
duke@435 | 1427 | // Make sure the Bottom and NotNull variants alias the same. |
duke@435 | 1428 | // Also, make sure exact and non-exact variants alias the same. |
roland@5991 | 1429 | if (ptr == TypePtr::NotNull || ta->klass_is_exact() || ta->speculative() != NULL) { |
kvn@2986 | 1430 | tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); |
duke@435 | 1431 | } |
duke@435 | 1432 | } |
duke@435 | 1433 | |
duke@435 | 1434 | // Oop pointers need some flattening |
duke@435 | 1435 | const TypeInstPtr *to = tj->isa_instptr(); |
duke@435 | 1436 | if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) { |
never@2658 | 1437 | ciInstanceKlass *k = to->klass()->as_instance_klass(); |
duke@435 | 1438 | if( ptr == TypePtr::Constant ) { |
never@2658 | 1439 | if (to->klass() != ciEnv::current()->Class_klass() || |
never@2658 | 1440 | offset < k->size_helper() * wordSize) { |
never@2658 | 1441 | // No constant oop pointers (such as Strings); they alias with |
never@2658 | 1442 | // unknown strings. |
never@2658 | 1443 | assert(!is_known_inst, "not scalarizable allocation"); |
never@2658 | 1444 | tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
never@2658 | 1445 | } |
kvn@682 | 1446 | } else if( is_known_inst ) { |
kvn@598 | 1447 | tj = to; // Keep NotNull and klass_is_exact for instance type |
duke@435 | 1448 | } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { |
duke@435 | 1449 | // During the 2nd round of IterGVN, NotNull castings are removed. |
duke@435 | 1450 | // Make sure the Bottom and NotNull variants alias the same. |
duke@435 | 1451 | // Also, make sure exact and non-exact variants alias the same. |
kvn@682 | 1452 | tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
duke@435 | 1453 | } |
roland@5991 | 1454 | if (to->speculative() != NULL) { |
roland@5991 | 1455 | tj = to = TypeInstPtr::make(to->ptr(),to->klass(),to->klass_is_exact(),to->const_oop(),to->offset(), to->instance_id()); |
roland@5991 | 1456 | } |
duke@435 | 1457 | // Canonicalize the holder of this field |
coleenp@548 | 1458 | if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { |
duke@435 | 1459 | // First handle header references such as a LoadKlassNode, even if the |
duke@435 | 1460 | // object's klass is unloaded at compile time (4965979). |
kvn@682 | 1461 | if (!is_known_inst) { // Do it only for non-instance types |
kvn@682 | 1462 | tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); |
kvn@682 | 1463 | } |
duke@435 | 1464 | } else if (offset < 0 || offset >= k->size_helper() * wordSize) { |
never@2658 | 1465 | // Static fields are in the space above the normal instance |
never@2658 | 1466 | // fields in the java.lang.Class instance. |
never@2658 | 1467 | if (to->klass() != ciEnv::current()->Class_klass()) { |
never@2658 | 1468 | to = NULL; |
never@2658 | 1469 | tj = TypeOopPtr::BOTTOM; |
never@2658 | 1470 | offset = tj->offset(); |
never@2658 | 1471 | } |
duke@435 | 1472 | } else { |
duke@435 | 1473 | ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); |
duke@435 | 1474 | if (!k->equals(canonical_holder) || tj->offset() != offset) { |
kvn@682 | 1475 | if( is_known_inst ) { |
kvn@682 | 1476 | tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); |
kvn@682 | 1477 | } else { |
kvn@682 | 1478 | tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); |
kvn@682 | 1479 | } |
duke@435 | 1480 | } |
duke@435 | 1481 | } |
duke@435 | 1482 | } |
duke@435 | 1483 | |
duke@435 | 1484 | // Klass pointers to object array klasses need some flattening |
duke@435 | 1485 | const TypeKlassPtr *tk = tj->isa_klassptr(); |
duke@435 | 1486 | if( tk ) { |
duke@435 | 1487 | // If we are referencing a field within a Klass, we need |
duke@435 | 1488 | // to assume the worst case of an Object. Both exact and |
never@3389 | 1489 | // inexact types must flatten to the same alias class so |
never@3389 | 1490 | // use NotNull as the PTR. |
duke@435 | 1491 | if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) { |
duke@435 | 1492 | |
never@3389 | 1493 | tj = tk = TypeKlassPtr::make(TypePtr::NotNull, |
duke@435 | 1494 | TypeKlassPtr::OBJECT->klass(), |
duke@435 | 1495 | offset); |
duke@435 | 1496 | } |
duke@435 | 1497 | |
duke@435 | 1498 | ciKlass* klass = tk->klass(); |
duke@435 | 1499 | if( klass->is_obj_array_klass() ) { |
duke@435 | 1500 | ciKlass* k = TypeAryPtr::OOPS->klass(); |
duke@435 | 1501 | if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs |
duke@435 | 1502 | k = TypeInstPtr::BOTTOM->klass(); |
duke@435 | 1503 | tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset ); |
duke@435 | 1504 | } |
duke@435 | 1505 | |
duke@435 | 1506 | // Check for precise loads from the primary supertype array and force them |
duke@435 | 1507 | // to the supertype cache alias index. Check for generic array loads from |
duke@435 | 1508 | // the primary supertype array and also force them to the supertype cache |
duke@435 | 1509 | // alias index. Since the same load can reach both, we need to merge |
duke@435 | 1510 | // these 2 disparate memories into the same alias class. Since the |
duke@435 | 1511 | // primary supertype array is read-only, there's no chance of confusion |
duke@435 | 1512 | // where we bypass an array load and an array store. |
stefank@3391 | 1513 | int primary_supers_offset = in_bytes(Klass::primary_supers_offset()); |
never@3389 | 1514 | if (offset == Type::OffsetBot || |
never@3389 | 1515 | (offset >= primary_supers_offset && |
never@3389 | 1516 | offset < (int)(primary_supers_offset + Klass::primary_super_limit() * wordSize)) || |
stefank@3391 | 1517 | offset == (int)in_bytes(Klass::secondary_super_cache_offset())) { |
stefank@3391 | 1518 | offset = in_bytes(Klass::secondary_super_cache_offset()); |
duke@435 | 1519 | tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); |
duke@435 | 1520 | } |
duke@435 | 1521 | } |
duke@435 | 1522 | |
duke@435 | 1523 | // Flatten all Raw pointers together. |
duke@435 | 1524 | if (tj->base() == Type::RawPtr) |
duke@435 | 1525 | tj = TypeRawPtr::BOTTOM; |
duke@435 | 1526 | |
duke@435 | 1527 | if (tj->base() == Type::AnyPtr) |
duke@435 | 1528 | tj = TypePtr::BOTTOM; // An error, which the caller must check for. |
duke@435 | 1529 | |
duke@435 | 1530 | // Flatten all to bottom for now |
duke@435 | 1531 | switch( _AliasLevel ) { |
duke@435 | 1532 | case 0: |
duke@435 | 1533 | tj = TypePtr::BOTTOM; |
duke@435 | 1534 | break; |
duke@435 | 1535 | case 1: // Flatten to: oop, static, field or array |
duke@435 | 1536 | switch (tj->base()) { |
duke@435 | 1537 | //case Type::AryPtr: tj = TypeAryPtr::RANGE; break; |
duke@435 | 1538 | case Type::RawPtr: tj = TypeRawPtr::BOTTOM; break; |
duke@435 | 1539 | case Type::AryPtr: // do not distinguish arrays at all |
duke@435 | 1540 | case Type::InstPtr: tj = TypeInstPtr::BOTTOM; break; |
duke@435 | 1541 | case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break; |
duke@435 | 1542 | case Type::AnyPtr: tj = TypePtr::BOTTOM; break; // caller checks it |
duke@435 | 1543 | default: ShouldNotReachHere(); |
duke@435 | 1544 | } |
duke@435 | 1545 | break; |
twisti@1040 | 1546 | case 2: // No collapsing at level 2; keep all splits |
twisti@1040 | 1547 | case 3: // No collapsing at level 3; keep all splits |
duke@435 | 1548 | break; |
duke@435 | 1549 | default: |
duke@435 | 1550 | Unimplemented(); |
duke@435 | 1551 | } |
duke@435 | 1552 | |
duke@435 | 1553 | offset = tj->offset(); |
duke@435 | 1554 | assert( offset != Type::OffsetTop, "Offset has fallen from constant" ); |
duke@435 | 1555 | |
duke@435 | 1556 | assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) || |
duke@435 | 1557 | (offset == Type::OffsetBot && tj->base() == Type::AryPtr) || |
duke@435 | 1558 | (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || |
duke@435 | 1559 | (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || |
duke@435 | 1560 | (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || |
duke@435 | 1561 | (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || |
duke@435 | 1562 | (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) , |
duke@435 | 1563 | "For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); |
duke@435 | 1564 | assert( tj->ptr() != TypePtr::TopPTR && |
duke@435 | 1565 | tj->ptr() != TypePtr::AnyNull && |
duke@435 | 1566 | tj->ptr() != TypePtr::Null, "No imprecise addresses" ); |
duke@435 | 1567 | // assert( tj->ptr() != TypePtr::Constant || |
duke@435 | 1568 | // tj->base() == Type::RawPtr || |
duke@435 | 1569 | // tj->base() == Type::KlassPtr, "No constant oop addresses" ); |
duke@435 | 1570 | |
duke@435 | 1571 | return tj; |
duke@435 | 1572 | } |
duke@435 | 1573 | |
duke@435 | 1574 | void Compile::AliasType::Init(int i, const TypePtr* at) { |
duke@435 | 1575 | _index = i; |
duke@435 | 1576 | _adr_type = at; |
duke@435 | 1577 | _field = NULL; |
vlivanov@5658 | 1578 | _element = NULL; |
duke@435 | 1579 | _is_rewritable = true; // default |
duke@435 | 1580 | const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; |
kvn@658 | 1581 | if (atoop != NULL && atoop->is_known_instance()) { |
kvn@658 | 1582 | const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot); |
duke@435 | 1583 | _general_index = Compile::current()->get_alias_index(gt); |
duke@435 | 1584 | } else { |
duke@435 | 1585 | _general_index = 0; |
duke@435 | 1586 | } |
duke@435 | 1587 | } |
duke@435 | 1588 | |
duke@435 | 1589 | //---------------------------------print_on------------------------------------ |
duke@435 | 1590 | #ifndef PRODUCT |
duke@435 | 1591 | void Compile::AliasType::print_on(outputStream* st) { |
duke@435 | 1592 | if (index() < 10) |
duke@435 | 1593 | st->print("@ <%d> ", index()); |
duke@435 | 1594 | else st->print("@ <%d>", index()); |
duke@435 | 1595 | st->print(is_rewritable() ? " " : " RO"); |
duke@435 | 1596 | int offset = adr_type()->offset(); |
duke@435 | 1597 | if (offset == Type::OffsetBot) |
duke@435 | 1598 | st->print(" +any"); |
duke@435 | 1599 | else st->print(" +%-3d", offset); |
duke@435 | 1600 | st->print(" in "); |
duke@435 | 1601 | adr_type()->dump_on(st); |
duke@435 | 1602 | const TypeOopPtr* tjp = adr_type()->isa_oopptr(); |
duke@435 | 1603 | if (field() != NULL && tjp) { |
duke@435 | 1604 | if (tjp->klass() != field()->holder() || |
duke@435 | 1605 | tjp->offset() != field()->offset_in_bytes()) { |
duke@435 | 1606 | st->print(" != "); |
duke@435 | 1607 | field()->print(); |
duke@435 | 1608 | st->print(" ***"); |
duke@435 | 1609 | } |
duke@435 | 1610 | } |
duke@435 | 1611 | } |
duke@435 | 1612 | |
duke@435 | 1613 | void print_alias_types() { |
duke@435 | 1614 | Compile* C = Compile::current(); |
duke@435 | 1615 | tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1); |
duke@435 | 1616 | for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) { |
duke@435 | 1617 | C->alias_type(idx)->print_on(tty); |
duke@435 | 1618 | tty->cr(); |
duke@435 | 1619 | } |
duke@435 | 1620 | } |
duke@435 | 1621 | #endif |
duke@435 | 1622 | |
duke@435 | 1623 | |
duke@435 | 1624 | //----------------------------probe_alias_cache-------------------------------- |
duke@435 | 1625 | Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) { |
duke@435 | 1626 | intptr_t key = (intptr_t) adr_type; |
duke@435 | 1627 | key ^= key >> logAliasCacheSize; |
duke@435 | 1628 | return &_alias_cache[key & right_n_bits(logAliasCacheSize)]; |
duke@435 | 1629 | } |
duke@435 | 1630 | |
duke@435 | 1631 | |
duke@435 | 1632 | //-----------------------------grow_alias_types-------------------------------- |
duke@435 | 1633 | void Compile::grow_alias_types() { |
duke@435 | 1634 | const int old_ats = _max_alias_types; // how many before? |
duke@435 | 1635 | const int new_ats = old_ats; // how many more? |
duke@435 | 1636 | const int grow_ats = old_ats+new_ats; // how many now? |
duke@435 | 1637 | _max_alias_types = grow_ats; |
duke@435 | 1638 | _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats); |
duke@435 | 1639 | AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats); |
duke@435 | 1640 | Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats); |
duke@435 | 1641 | for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i]; |
duke@435 | 1642 | } |
duke@435 | 1643 | |
duke@435 | 1644 | |
duke@435 | 1645 | //--------------------------------find_alias_type------------------------------ |
never@2658 | 1646 | Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create, ciField* original_field) { |
duke@435 | 1647 | if (_AliasLevel == 0) |
duke@435 | 1648 | return alias_type(AliasIdxBot); |
duke@435 | 1649 | |
duke@435 | 1650 | AliasCacheEntry* ace = probe_alias_cache(adr_type); |
duke@435 | 1651 | if (ace->_adr_type == adr_type) { |
duke@435 | 1652 | return alias_type(ace->_index); |
duke@435 | 1653 | } |
duke@435 | 1654 | |
duke@435 | 1655 | // Handle special cases. |
duke@435 | 1656 | if (adr_type == NULL) return alias_type(AliasIdxTop); |
duke@435 | 1657 | if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot); |
duke@435 | 1658 | |
duke@435 | 1659 | // Do it the slow way. |
duke@435 | 1660 | const TypePtr* flat = flatten_alias_type(adr_type); |
duke@435 | 1661 | |
duke@435 | 1662 | #ifdef ASSERT |
duke@435 | 1663 | assert(flat == flatten_alias_type(flat), "idempotent"); |
duke@435 | 1664 | assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr"); |
duke@435 | 1665 | if (flat->isa_oopptr() && !flat->isa_klassptr()) { |
duke@435 | 1666 | const TypeOopPtr* foop = flat->is_oopptr(); |
kvn@682 | 1667 | // Scalarizable allocations have exact klass always. |
kvn@682 | 1668 | bool exact = !foop->klass_is_exact() || foop->is_known_instance(); |
kvn@682 | 1669 | const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr(); |
duke@435 | 1670 | assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type"); |
duke@435 | 1671 | } |
duke@435 | 1672 | assert(flat == flatten_alias_type(flat), "exact bit doesn't matter"); |
duke@435 | 1673 | #endif |
duke@435 | 1674 | |
duke@435 | 1675 | int idx = AliasIdxTop; |
duke@435 | 1676 | for (int i = 0; i < num_alias_types(); i++) { |
duke@435 | 1677 | if (alias_type(i)->adr_type() == flat) { |
duke@435 | 1678 | idx = i; |
duke@435 | 1679 | break; |
duke@435 | 1680 | } |
duke@435 | 1681 | } |
duke@435 | 1682 | |
duke@435 | 1683 | if (idx == AliasIdxTop) { |
duke@435 | 1684 | if (no_create) return NULL; |
duke@435 | 1685 | // Grow the array if necessary. |
duke@435 | 1686 | if (_num_alias_types == _max_alias_types) grow_alias_types(); |
duke@435 | 1687 | // Add a new alias type. |
duke@435 | 1688 | idx = _num_alias_types++; |
duke@435 | 1689 | _alias_types[idx]->Init(idx, flat); |
duke@435 | 1690 | if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false); |
duke@435 | 1691 | if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false); |
duke@435 | 1692 | if (flat->isa_instptr()) { |
duke@435 | 1693 | if (flat->offset() == java_lang_Class::klass_offset_in_bytes() |
duke@435 | 1694 | && flat->is_instptr()->klass() == env()->Class_klass()) |
duke@435 | 1695 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1696 | } |
vlivanov@5658 | 1697 | if (flat->isa_aryptr()) { |
vlivanov@5658 | 1698 | #ifdef ASSERT |
vlivanov@5658 | 1699 | const int header_size_min = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
vlivanov@5658 | 1700 | // (T_BYTE has the weakest alignment and size restrictions...) |
vlivanov@5658 | 1701 | assert(flat->offset() < header_size_min, "array body reference must be OffsetBot"); |
vlivanov@5658 | 1702 | #endif |
vlivanov@5658 | 1703 | if (flat->offset() == TypePtr::OffsetBot) { |
vlivanov@5658 | 1704 | alias_type(idx)->set_element(flat->is_aryptr()->elem()); |
vlivanov@5658 | 1705 | } |
vlivanov@5658 | 1706 | } |
duke@435 | 1707 | if (flat->isa_klassptr()) { |
stefank@3391 | 1708 | if (flat->offset() == in_bytes(Klass::super_check_offset_offset())) |
duke@435 | 1709 | alias_type(idx)->set_rewritable(false); |
stefank@3391 | 1710 | if (flat->offset() == in_bytes(Klass::modifier_flags_offset())) |
duke@435 | 1711 | alias_type(idx)->set_rewritable(false); |
stefank@3391 | 1712 | if (flat->offset() == in_bytes(Klass::access_flags_offset())) |
duke@435 | 1713 | alias_type(idx)->set_rewritable(false); |
stefank@3391 | 1714 | if (flat->offset() == in_bytes(Klass::java_mirror_offset())) |
duke@435 | 1715 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1716 | } |
duke@435 | 1717 | // %%% (We would like to finalize JavaThread::threadObj_offset(), |
duke@435 | 1718 | // but the base pointer type is not distinctive enough to identify |
duke@435 | 1719 | // references into JavaThread.) |
duke@435 | 1720 | |
never@2658 | 1721 | // Check for final fields. |
duke@435 | 1722 | const TypeInstPtr* tinst = flat->isa_instptr(); |
coleenp@548 | 1723 | if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { |
never@2658 | 1724 | ciField* field; |
never@2658 | 1725 | if (tinst->const_oop() != NULL && |
never@2658 | 1726 | tinst->klass() == ciEnv::current()->Class_klass() && |
never@2658 | 1727 | tinst->offset() >= (tinst->klass()->as_instance_klass()->size_helper() * wordSize)) { |
never@2658 | 1728 | // static field |
never@2658 | 1729 | ciInstanceKlass* k = tinst->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); |
never@2658 | 1730 | field = k->get_field_by_offset(tinst->offset(), true); |
never@2658 | 1731 | } else { |
never@2658 | 1732 | ciInstanceKlass *k = tinst->klass()->as_instance_klass(); |
never@2658 | 1733 | field = k->get_field_by_offset(tinst->offset(), false); |
never@2658 | 1734 | } |
never@2658 | 1735 | assert(field == NULL || |
never@2658 | 1736 | original_field == NULL || |
never@2658 | 1737 | (field->holder() == original_field->holder() && |
never@2658 | 1738 | field->offset() == original_field->offset() && |
never@2658 | 1739 | field->is_static() == original_field->is_static()), "wrong field?"); |
duke@435 | 1740 | // Set field() and is_rewritable() attributes. |
duke@435 | 1741 | if (field != NULL) alias_type(idx)->set_field(field); |
duke@435 | 1742 | } |
duke@435 | 1743 | } |
duke@435 | 1744 | |
duke@435 | 1745 | // Fill the cache for next time. |
duke@435 | 1746 | ace->_adr_type = adr_type; |
duke@435 | 1747 | ace->_index = idx; |
duke@435 | 1748 | assert(alias_type(adr_type) == alias_type(idx), "type must be installed"); |
duke@435 | 1749 | |
duke@435 | 1750 | // Might as well try to fill the cache for the flattened version, too. |
duke@435 | 1751 | AliasCacheEntry* face = probe_alias_cache(flat); |
duke@435 | 1752 | if (face->_adr_type == NULL) { |
duke@435 | 1753 | face->_adr_type = flat; |
duke@435 | 1754 | face->_index = idx; |
duke@435 | 1755 | assert(alias_type(flat) == alias_type(idx), "flat type must work too"); |
duke@435 | 1756 | } |
duke@435 | 1757 | |
duke@435 | 1758 | return alias_type(idx); |
duke@435 | 1759 | } |
duke@435 | 1760 | |
duke@435 | 1761 | |
duke@435 | 1762 | Compile::AliasType* Compile::alias_type(ciField* field) { |
duke@435 | 1763 | const TypeOopPtr* t; |
duke@435 | 1764 | if (field->is_static()) |
never@2658 | 1765 | t = TypeInstPtr::make(field->holder()->java_mirror()); |
duke@435 | 1766 | else |
duke@435 | 1767 | t = TypeOopPtr::make_from_klass_raw(field->holder()); |
never@2658 | 1768 | AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes()), field); |
vlivanov@5658 | 1769 | assert((field->is_final() || field->is_stable()) == !atp->is_rewritable(), "must get the rewritable bits correct"); |
duke@435 | 1770 | return atp; |
duke@435 | 1771 | } |
duke@435 | 1772 | |
duke@435 | 1773 | |
duke@435 | 1774 | //------------------------------have_alias_type-------------------------------- |
duke@435 | 1775 | bool Compile::have_alias_type(const TypePtr* adr_type) { |
duke@435 | 1776 | AliasCacheEntry* ace = probe_alias_cache(adr_type); |
duke@435 | 1777 | if (ace->_adr_type == adr_type) { |
duke@435 | 1778 | return true; |
duke@435 | 1779 | } |
duke@435 | 1780 | |
duke@435 | 1781 | // Handle special cases. |
duke@435 | 1782 | if (adr_type == NULL) return true; |
duke@435 | 1783 | if (adr_type == TypePtr::BOTTOM) return true; |
duke@435 | 1784 | |
never@2658 | 1785 | return find_alias_type(adr_type, true, NULL) != NULL; |
duke@435 | 1786 | } |
duke@435 | 1787 | |
duke@435 | 1788 | //-----------------------------must_alias-------------------------------------- |
duke@435 | 1789 | // True if all values of the given address type are in the given alias category. |
duke@435 | 1790 | bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) { |
duke@435 | 1791 | if (alias_idx == AliasIdxBot) return true; // the universal category |
duke@435 | 1792 | if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP |
duke@435 | 1793 | if (alias_idx == AliasIdxTop) return false; // the empty category |
duke@435 | 1794 | if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins |
duke@435 | 1795 | |
duke@435 | 1796 | // the only remaining possible overlap is identity |
duke@435 | 1797 | int adr_idx = get_alias_index(adr_type); |
duke@435 | 1798 | assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
duke@435 | 1799 | assert(adr_idx == alias_idx || |
duke@435 | 1800 | (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM |
duke@435 | 1801 | && adr_type != TypeOopPtr::BOTTOM), |
duke@435 | 1802 | "should not be testing for overlap with an unsafe pointer"); |
duke@435 | 1803 | return adr_idx == alias_idx; |
duke@435 | 1804 | } |
duke@435 | 1805 | |
duke@435 | 1806 | //------------------------------can_alias-------------------------------------- |
duke@435 | 1807 | // True if any values of the given address type are in the given alias category. |
duke@435 | 1808 | bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) { |
duke@435 | 1809 | if (alias_idx == AliasIdxTop) return false; // the empty category |
duke@435 | 1810 | if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP |
duke@435 | 1811 | if (alias_idx == AliasIdxBot) return true; // the universal category |
duke@435 | 1812 | if (adr_type->base() == Type::AnyPtr) return true; // TypePtr::BOTTOM or its twins |
duke@435 | 1813 | |
duke@435 | 1814 | // the only remaining possible overlap is identity |
duke@435 | 1815 | int adr_idx = get_alias_index(adr_type); |
duke@435 | 1816 | assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
duke@435 | 1817 | return adr_idx == alias_idx; |
duke@435 | 1818 | } |
duke@435 | 1819 | |
duke@435 | 1820 | |
duke@435 | 1821 | |
duke@435 | 1822 | //---------------------------pop_warm_call------------------------------------- |
duke@435 | 1823 | WarmCallInfo* Compile::pop_warm_call() { |
duke@435 | 1824 | WarmCallInfo* wci = _warm_calls; |
duke@435 | 1825 | if (wci != NULL) _warm_calls = wci->remove_from(wci); |
duke@435 | 1826 | return wci; |
duke@435 | 1827 | } |
duke@435 | 1828 | |
duke@435 | 1829 | //----------------------------Inline_Warm-------------------------------------- |
duke@435 | 1830 | int Compile::Inline_Warm() { |
duke@435 | 1831 | // If there is room, try to inline some more warm call sites. |
duke@435 | 1832 | // %%% Do a graph index compaction pass when we think we're out of space? |
duke@435 | 1833 | if (!InlineWarmCalls) return 0; |
duke@435 | 1834 | |
duke@435 | 1835 | int calls_made_hot = 0; |
duke@435 | 1836 | int room_to_grow = NodeCountInliningCutoff - unique(); |
duke@435 | 1837 | int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep); |
duke@435 | 1838 | int amount_grown = 0; |
duke@435 | 1839 | WarmCallInfo* call; |
duke@435 | 1840 | while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) { |
duke@435 | 1841 | int est_size = (int)call->size(); |
duke@435 | 1842 | if (est_size > (room_to_grow - amount_grown)) { |
duke@435 | 1843 | // This one won't fit anyway. Get rid of it. |
duke@435 | 1844 | call->make_cold(); |
duke@435 | 1845 | continue; |
duke@435 | 1846 | } |
duke@435 | 1847 | call->make_hot(); |
duke@435 | 1848 | calls_made_hot++; |
duke@435 | 1849 | amount_grown += est_size; |
duke@435 | 1850 | amount_to_grow -= est_size; |
duke@435 | 1851 | } |
duke@435 | 1852 | |
duke@435 | 1853 | if (calls_made_hot > 0) set_major_progress(); |
duke@435 | 1854 | return calls_made_hot; |
duke@435 | 1855 | } |
duke@435 | 1856 | |
duke@435 | 1857 | |
duke@435 | 1858 | //----------------------------Finish_Warm-------------------------------------- |
duke@435 | 1859 | void Compile::Finish_Warm() { |
duke@435 | 1860 | if (!InlineWarmCalls) return; |
duke@435 | 1861 | if (failing()) return; |
duke@435 | 1862 | if (warm_calls() == NULL) return; |
duke@435 | 1863 | |
duke@435 | 1864 | // Clean up loose ends, if we are out of space for inlining. |
duke@435 | 1865 | WarmCallInfo* call; |
duke@435 | 1866 | while ((call = pop_warm_call()) != NULL) { |
duke@435 | 1867 | call->make_cold(); |
duke@435 | 1868 | } |
duke@435 | 1869 | } |
duke@435 | 1870 | |
cfang@1607 | 1871 | //---------------------cleanup_loop_predicates----------------------- |
cfang@1607 | 1872 | // Remove the opaque nodes that protect the predicates so that all unused |
cfang@1607 | 1873 | // checks and uncommon_traps will be eliminated from the ideal graph |
cfang@1607 | 1874 | void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) { |
cfang@1607 | 1875 | if (predicate_count()==0) return; |
cfang@1607 | 1876 | for (int i = predicate_count(); i > 0; i--) { |
cfang@1607 | 1877 | Node * n = predicate_opaque1_node(i-1); |
cfang@1607 | 1878 | assert(n->Opcode() == Op_Opaque1, "must be"); |
cfang@1607 | 1879 | igvn.replace_node(n, n->in(1)); |
cfang@1607 | 1880 | } |
cfang@1607 | 1881 | assert(predicate_count()==0, "should be clean!"); |
cfang@1607 | 1882 | } |
duke@435 | 1883 | |
roland@4409 | 1884 | // StringOpts and late inlining of string methods |
roland@4409 | 1885 | void Compile::inline_string_calls(bool parse_time) { |
roland@4409 | 1886 | { |
roland@4409 | 1887 | // remove useless nodes to make the usage analysis simpler |
roland@4409 | 1888 | ResourceMark rm; |
roland@4409 | 1889 | PhaseRemoveUseless pru(initial_gvn(), for_igvn()); |
roland@4409 | 1890 | } |
roland@4409 | 1891 | |
roland@4409 | 1892 | { |
roland@4409 | 1893 | ResourceMark rm; |
sla@5237 | 1894 | print_method(PHASE_BEFORE_STRINGOPTS, 3); |
roland@4409 | 1895 | PhaseStringOpts pso(initial_gvn(), for_igvn()); |
sla@5237 | 1896 | print_method(PHASE_AFTER_STRINGOPTS, 3); |
roland@4409 | 1897 | } |
roland@4409 | 1898 | |
roland@4409 | 1899 | // now inline anything that we skipped the first time around |
roland@4409 | 1900 | if (!parse_time) { |
roland@4409 | 1901 | _late_inlines_pos = _late_inlines.length(); |
roland@4409 | 1902 | } |
roland@4409 | 1903 | |
roland@4409 | 1904 | while (_string_late_inlines.length() > 0) { |
roland@4409 | 1905 | CallGenerator* cg = _string_late_inlines.pop(); |
roland@4409 | 1906 | cg->do_late_inline(); |
roland@4409 | 1907 | if (failing()) return; |
roland@4409 | 1908 | } |
roland@4409 | 1909 | _string_late_inlines.trunc_to(0); |
roland@4409 | 1910 | } |
roland@4409 | 1911 | |
kvn@5110 | 1912 | // Late inlining of boxing methods |
kvn@5110 | 1913 | void Compile::inline_boxing_calls(PhaseIterGVN& igvn) { |
kvn@5110 | 1914 | if (_boxing_late_inlines.length() > 0) { |
kvn@5110 | 1915 | assert(has_boxed_value(), "inconsistent"); |
kvn@5110 | 1916 | |
kvn@5110 | 1917 | PhaseGVN* gvn = initial_gvn(); |
kvn@5110 | 1918 | set_inlining_incrementally(true); |
kvn@5110 | 1919 | |
kvn@5110 | 1920 | assert( igvn._worklist.size() == 0, "should be done with igvn" ); |
kvn@5110 | 1921 | for_igvn()->clear(); |
kvn@5110 | 1922 | gvn->replace_with(&igvn); |
kvn@5110 | 1923 | |
roland@7041 | 1924 | _late_inlines_pos = _late_inlines.length(); |
roland@7041 | 1925 | |
kvn@5110 | 1926 | while (_boxing_late_inlines.length() > 0) { |
kvn@5110 | 1927 | CallGenerator* cg = _boxing_late_inlines.pop(); |
kvn@5110 | 1928 | cg->do_late_inline(); |
kvn@5110 | 1929 | if (failing()) return; |
kvn@5110 | 1930 | } |
kvn@5110 | 1931 | _boxing_late_inlines.trunc_to(0); |
kvn@5110 | 1932 | |
kvn@5110 | 1933 | { |
kvn@5110 | 1934 | ResourceMark rm; |
kvn@5110 | 1935 | PhaseRemoveUseless pru(gvn, for_igvn()); |
kvn@5110 | 1936 | } |
kvn@5110 | 1937 | |
kvn@5110 | 1938 | igvn = PhaseIterGVN(gvn); |
kvn@5110 | 1939 | igvn.optimize(); |
kvn@5110 | 1940 | |
kvn@5110 | 1941 | set_inlining_progress(false); |
kvn@5110 | 1942 | set_inlining_incrementally(false); |
kvn@5110 | 1943 | } |
kvn@5110 | 1944 | } |
kvn@5110 | 1945 | |
roland@4409 | 1946 | void Compile::inline_incrementally_one(PhaseIterGVN& igvn) { |
roland@4409 | 1947 | assert(IncrementalInline, "incremental inlining should be on"); |
roland@4409 | 1948 | PhaseGVN* gvn = initial_gvn(); |
roland@4409 | 1949 | |
roland@4409 | 1950 | set_inlining_progress(false); |
roland@4409 | 1951 | for_igvn()->clear(); |
roland@4409 | 1952 | gvn->replace_with(&igvn); |
roland@4409 | 1953 | |
roland@4409 | 1954 | int i = 0; |
roland@4409 | 1955 | |
roland@4409 | 1956 | for (; i <_late_inlines.length() && !inlining_progress(); i++) { |
roland@4409 | 1957 | CallGenerator* cg = _late_inlines.at(i); |
roland@4409 | 1958 | _late_inlines_pos = i+1; |
roland@4409 | 1959 | cg->do_late_inline(); |
roland@4409 | 1960 | if (failing()) return; |
roland@4409 | 1961 | } |
roland@4409 | 1962 | int j = 0; |
roland@4409 | 1963 | for (; i < _late_inlines.length(); i++, j++) { |
roland@4409 | 1964 | _late_inlines.at_put(j, _late_inlines.at(i)); |
roland@4409 | 1965 | } |
roland@4409 | 1966 | _late_inlines.trunc_to(j); |
roland@4409 | 1967 | |
roland@4409 | 1968 | { |
roland@4409 | 1969 | ResourceMark rm; |
kvn@5110 | 1970 | PhaseRemoveUseless pru(gvn, for_igvn()); |
roland@4409 | 1971 | } |
roland@4409 | 1972 | |
roland@4409 | 1973 | igvn = PhaseIterGVN(gvn); |
roland@4409 | 1974 | } |
roland@4409 | 1975 | |
roland@4409 | 1976 | // Perform incremental inlining until bound on number of live nodes is reached |
roland@4409 | 1977 | void Compile::inline_incrementally(PhaseIterGVN& igvn) { |
roland@4409 | 1978 | PhaseGVN* gvn = initial_gvn(); |
roland@4409 | 1979 | |
roland@4409 | 1980 | set_inlining_incrementally(true); |
roland@4409 | 1981 | set_inlining_progress(true); |
roland@4409 | 1982 | uint low_live_nodes = 0; |
roland@4409 | 1983 | |
roland@4409 | 1984 | while(inlining_progress() && _late_inlines.length() > 0) { |
roland@4409 | 1985 | |
roland@4409 | 1986 | if (live_nodes() > (uint)LiveNodeCountInliningCutoff) { |
roland@4409 | 1987 | if (low_live_nodes < (uint)LiveNodeCountInliningCutoff * 8 / 10) { |
roland@4409 | 1988 | // PhaseIdealLoop is expensive so we only try it once we are |
roland@7041 | 1989 | // out of live nodes and we only try it again if the previous |
roland@7041 | 1990 | // helped got the number of nodes down significantly |
roland@4409 | 1991 | PhaseIdealLoop ideal_loop( igvn, false, true ); |
roland@4409 | 1992 | if (failing()) return; |
roland@4409 | 1993 | low_live_nodes = live_nodes(); |
roland@4409 | 1994 | _major_progress = true; |
roland@4409 | 1995 | } |
roland@4409 | 1996 | |
roland@4409 | 1997 | if (live_nodes() > (uint)LiveNodeCountInliningCutoff) { |
roland@4409 | 1998 | break; |
roland@4409 | 1999 | } |
roland@4409 | 2000 | } |
roland@4409 | 2001 | |
roland@4409 | 2002 | inline_incrementally_one(igvn); |
roland@4409 | 2003 | |
roland@4409 | 2004 | if (failing()) return; |
roland@4409 | 2005 | |
roland@4409 | 2006 | igvn.optimize(); |
roland@4409 | 2007 | |
roland@4409 | 2008 | if (failing()) return; |
roland@4409 | 2009 | } |
roland@4409 | 2010 | |
roland@4409 | 2011 | assert( igvn._worklist.size() == 0, "should be done with igvn" ); |
roland@4409 | 2012 | |
roland@4409 | 2013 | if (_string_late_inlines.length() > 0) { |
roland@4409 | 2014 | assert(has_stringbuilder(), "inconsistent"); |
roland@4409 | 2015 | for_igvn()->clear(); |
roland@4409 | 2016 | initial_gvn()->replace_with(&igvn); |
roland@4409 | 2017 | |
roland@4409 | 2018 | inline_string_calls(false); |
roland@4409 | 2019 | |
roland@4409 | 2020 | if (failing()) return; |
roland@4409 | 2021 | |
roland@4409 | 2022 | { |
roland@4409 | 2023 | ResourceMark rm; |
roland@4409 | 2024 | PhaseRemoveUseless pru(initial_gvn(), for_igvn()); |
roland@4409 | 2025 | } |
roland@4409 | 2026 | |
roland@4409 | 2027 | igvn = PhaseIterGVN(gvn); |
roland@4409 | 2028 | |
roland@4409 | 2029 | igvn.optimize(); |
roland@4409 | 2030 | } |
roland@4409 | 2031 | |
roland@4409 | 2032 | set_inlining_incrementally(false); |
roland@4409 | 2033 | } |
roland@4409 | 2034 | |
roland@4409 | 2035 | |
duke@435 | 2036 | //------------------------------Optimize--------------------------------------- |
duke@435 | 2037 | // Given a graph, optimize it. |
duke@435 | 2038 | void Compile::Optimize() { |
duke@435 | 2039 | TracePhase t1("optimizer", &_t_optimizer, true); |
duke@435 | 2040 | |
duke@435 | 2041 | #ifndef PRODUCT |
duke@435 | 2042 | if (env()->break_at_compile()) { |
duke@435 | 2043 | BREAKPOINT; |
duke@435 | 2044 | } |
duke@435 | 2045 | |
duke@435 | 2046 | #endif |
duke@435 | 2047 | |
duke@435 | 2048 | ResourceMark rm; |
duke@435 | 2049 | int loop_opts_cnt; |
duke@435 | 2050 | |
duke@435 | 2051 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 2052 | |
sla@5237 | 2053 | print_method(PHASE_AFTER_PARSING); |
duke@435 | 2054 | |
duke@435 | 2055 | { |
duke@435 | 2056 | // Iterative Global Value Numbering, including ideal transforms |
duke@435 | 2057 | // Initialize IterGVN with types and values from parse-time GVN |
duke@435 | 2058 | PhaseIterGVN igvn(initial_gvn()); |
duke@435 | 2059 | { |
duke@435 | 2060 | NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); ) |
duke@435 | 2061 | igvn.optimize(); |
duke@435 | 2062 | } |
duke@435 | 2063 | |
sla@5237 | 2064 | print_method(PHASE_ITER_GVN1, 2); |
duke@435 | 2065 | |
duke@435 | 2066 | if (failing()) return; |
duke@435 | 2067 | |
kvn@5110 | 2068 | { |
kvn@5110 | 2069 | NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); ) |
kvn@5110 | 2070 | inline_incrementally(igvn); |
kvn@5110 | 2071 | } |
roland@4409 | 2072 | |
sla@5237 | 2073 | print_method(PHASE_INCREMENTAL_INLINE, 2); |
roland@4409 | 2074 | |
roland@4409 | 2075 | if (failing()) return; |
roland@4409 | 2076 | |
kvn@5110 | 2077 | if (eliminate_boxing()) { |
kvn@5110 | 2078 | NOT_PRODUCT( TracePhase t2("incrementalInline", &_t_incrInline, TimeCompiler); ) |
kvn@5110 | 2079 | // Inline valueOf() methods now. |
kvn@5110 | 2080 | inline_boxing_calls(igvn); |
kvn@5110 | 2081 | |
roland@7041 | 2082 | if (AlwaysIncrementalInline) { |
roland@7041 | 2083 | inline_incrementally(igvn); |
roland@7041 | 2084 | } |
roland@7041 | 2085 | |
sla@5237 | 2086 | print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2); |
kvn@5110 | 2087 | |
kvn@5110 | 2088 | if (failing()) return; |
kvn@5110 | 2089 | } |
kvn@5110 | 2090 | |
roland@5991 | 2091 | // Remove the speculative part of types and clean up the graph from |
roland@5991 | 2092 | // the extra CastPP nodes whose only purpose is to carry them. Do |
roland@5991 | 2093 | // that early so that optimizations are not disrupted by the extra |
roland@5991 | 2094 | // CastPP nodes. |
roland@5991 | 2095 | remove_speculative_types(igvn); |
roland@5991 | 2096 | |
roland@4589 | 2097 | // No more new expensive nodes will be added to the list from here |
roland@4589 | 2098 | // so keep only the actual candidates for optimizations. |
roland@4589 | 2099 | cleanup_expensive_nodes(igvn); |
roland@4589 | 2100 | |
kvn@1989 | 2101 | // Perform escape analysis |
kvn@1989 | 2102 | if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { |
kvn@3260 | 2103 | if (has_loops()) { |
kvn@3260 | 2104 | // Cleanup graph (remove dead nodes). |
kvn@3260 | 2105 | TracePhase t2("idealLoop", &_t_idealLoop, true); |
kvn@3260 | 2106 | PhaseIdealLoop ideal_loop( igvn, false, true ); |
sla@5237 | 2107 | if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2); |
kvn@3260 | 2108 | if (failing()) return; |
kvn@3260 | 2109 | } |
kvn@1989 | 2110 | ConnectionGraph::do_analysis(this, &igvn); |
kvn@1989 | 2111 | |
kvn@1989 | 2112 | if (failing()) return; |
kvn@1989 | 2113 | |
kvn@3311 | 2114 | // Optimize out fields loads from scalar replaceable allocations. |
kvn@1989 | 2115 | igvn.optimize(); |
sla@5237 | 2116 | print_method(PHASE_ITER_GVN_AFTER_EA, 2); |
kvn@1989 | 2117 | |
kvn@1989 | 2118 | if (failing()) return; |
kvn@1989 | 2119 | |
kvn@3311 | 2120 | if (congraph() != NULL && macro_count() > 0) { |
kvn@3651 | 2121 | NOT_PRODUCT( TracePhase t2("macroEliminate", &_t_macroEliminate, TimeCompiler); ) |
kvn@3311 | 2122 | PhaseMacroExpand mexp(igvn); |
kvn@3311 | 2123 | mexp.eliminate_macro_nodes(); |
kvn@3311 | 2124 | igvn.set_delay_transform(false); |
kvn@3311 | 2125 | |
kvn@3311 | 2126 | igvn.optimize(); |
sla@5237 | 2127 | print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2); |
kvn@3311 | 2128 | |
kvn@3311 | 2129 | if (failing()) return; |
kvn@3311 | 2130 | } |
kvn@1989 | 2131 | } |
kvn@1989 | 2132 | |
duke@435 | 2133 | // Loop transforms on the ideal graph. Range Check Elimination, |
duke@435 | 2134 | // peeling, unrolling, etc. |
duke@435 | 2135 | |
duke@435 | 2136 | // Set loop opts counter |
duke@435 | 2137 | loop_opts_cnt = num_loop_opts(); |
duke@435 | 2138 | if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { |
duke@435 | 2139 | { |
duke@435 | 2140 | TracePhase t2("idealLoop", &_t_idealLoop, true); |
kvn@2727 | 2141 | PhaseIdealLoop ideal_loop( igvn, true ); |
duke@435 | 2142 | loop_opts_cnt--; |
sla@5237 | 2143 | if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2); |
duke@435 | 2144 | if (failing()) return; |
duke@435 | 2145 | } |
duke@435 | 2146 | // Loop opts pass if partial peeling occurred in previous pass |
duke@435 | 2147 | if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 2148 | TracePhase t3("idealLoop", &_t_idealLoop, true); |
kvn@2727 | 2149 | PhaseIdealLoop ideal_loop( igvn, false ); |
duke@435 | 2150 | loop_opts_cnt--; |
sla@5237 | 2151 | if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2); |
duke@435 | 2152 | if (failing()) return; |
duke@435 | 2153 | } |
duke@435 | 2154 | // Loop opts pass for loop-unrolling before CCP |
duke@435 | 2155 | if(major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 2156 | TracePhase t4("idealLoop", &_t_idealLoop, true); |
kvn@2727 | 2157 | PhaseIdealLoop ideal_loop( igvn, false ); |
duke@435 | 2158 | loop_opts_cnt--; |
sla@5237 | 2159 | if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2); |
duke@435 | 2160 | } |
never@1356 | 2161 | if (!failing()) { |
never@1356 | 2162 | // Verify that last round of loop opts produced a valid graph |
never@1356 | 2163 | NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) |
never@1356 | 2164 | PhaseIdealLoop::verify(igvn); |
never@1356 | 2165 | } |
duke@435 | 2166 | } |
duke@435 | 2167 | if (failing()) return; |
duke@435 | 2168 | |
duke@435 | 2169 | // Conditional Constant Propagation; |
duke@435 | 2170 | PhaseCCP ccp( &igvn ); |
duke@435 | 2171 | assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)"); |
duke@435 | 2172 | { |
duke@435 | 2173 | TracePhase t2("ccp", &_t_ccp, true); |
duke@435 | 2174 | ccp.do_transform(); |
duke@435 | 2175 | } |
sla@5237 | 2176 | print_method(PHASE_CPP1, 2); |
duke@435 | 2177 | |
duke@435 | 2178 | assert( true, "Break here to ccp.dump_old2new_map()"); |
duke@435 | 2179 | |
duke@435 | 2180 | // Iterative Global Value Numbering, including ideal transforms |
duke@435 | 2181 | { |
duke@435 | 2182 | NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); ) |
duke@435 | 2183 | igvn = ccp; |
duke@435 | 2184 | igvn.optimize(); |
duke@435 | 2185 | } |
duke@435 | 2186 | |
sla@5237 | 2187 | print_method(PHASE_ITER_GVN2, 2); |
duke@435 | 2188 | |
duke@435 | 2189 | if (failing()) return; |
duke@435 | 2190 | |
duke@435 | 2191 | // Loop transforms on the ideal graph. Range Check Elimination, |
duke@435 | 2192 | // peeling, unrolling, etc. |
duke@435 | 2193 | if(loop_opts_cnt > 0) { |
duke@435 | 2194 | debug_only( int cnt = 0; ); |
duke@435 | 2195 | while(major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 2196 | TracePhase t2("idealLoop", &_t_idealLoop, true); |
duke@435 | 2197 | assert( cnt++ < 40, "infinite cycle in loop optimization" ); |
kvn@2727 | 2198 | PhaseIdealLoop ideal_loop( igvn, true); |
duke@435 | 2199 | loop_opts_cnt--; |
sla@5237 | 2200 | if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2); |
duke@435 | 2201 | if (failing()) return; |
duke@435 | 2202 | } |
duke@435 | 2203 | } |
never@1356 | 2204 | |
never@1356 | 2205 | { |
never@1356 | 2206 | // Verify that all previous optimizations produced a valid graph |
never@1356 | 2207 | // at least to this point, even if no loop optimizations were done. |
never@1356 | 2208 | NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) |
never@1356 | 2209 | PhaseIdealLoop::verify(igvn); |
never@1356 | 2210 | } |
never@1356 | 2211 | |
duke@435 | 2212 | { |
duke@435 | 2213 | NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); ) |
duke@435 | 2214 | PhaseMacroExpand mex(igvn); |
duke@435 | 2215 | if (mex.expand_macro_nodes()) { |
duke@435 | 2216 | assert(failing(), "must bail out w/ explicit message"); |
duke@435 | 2217 | return; |
duke@435 | 2218 | } |
duke@435 | 2219 | } |
duke@435 | 2220 | |
duke@435 | 2221 | } // (End scope of igvn; run destructor if necessary for asserts.) |
duke@435 | 2222 | |
kvn@4448 | 2223 | dump_inlining(); |
duke@435 | 2224 | // A method with only infinite loops has no edges entering loops from root |
duke@435 | 2225 | { |
duke@435 | 2226 | NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); ) |
duke@435 | 2227 | if (final_graph_reshaping()) { |
duke@435 | 2228 | assert(failing(), "must bail out w/ explicit message"); |
duke@435 | 2229 | return; |
duke@435 | 2230 | } |
duke@435 | 2231 | } |
duke@435 | 2232 | |
sla@5237 | 2233 | print_method(PHASE_OPTIMIZE_FINISHED, 2); |
duke@435 | 2234 | } |
duke@435 | 2235 | |
duke@435 | 2236 | |
duke@435 | 2237 | //------------------------------Code_Gen--------------------------------------- |
duke@435 | 2238 | // Given a graph, generate code for it |
duke@435 | 2239 | void Compile::Code_Gen() { |
adlertz@5539 | 2240 | if (failing()) { |
adlertz@5539 | 2241 | return; |
adlertz@5539 | 2242 | } |
duke@435 | 2243 | |
duke@435 | 2244 | // Perform instruction selection. You might think we could reclaim Matcher |
duke@435 | 2245 | // memory PDQ, but actually the Matcher is used in generating spill code. |
duke@435 | 2246 | // Internals of the Matcher (including some VectorSets) must remain live |
duke@435 | 2247 | // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage |
duke@435 | 2248 | // set a bit in reclaimed memory. |
duke@435 | 2249 | |
duke@435 | 2250 | // In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
duke@435 | 2251 | // nodes. Mapping is only valid at the root of each matched subtree. |
duke@435 | 2252 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 2253 | |
adlertz@5539 | 2254 | Matcher matcher; |
adlertz@5539 | 2255 | _matcher = &matcher; |
duke@435 | 2256 | { |
duke@435 | 2257 | TracePhase t2("matcher", &_t_matcher, true); |
adlertz@5539 | 2258 | matcher.match(); |
duke@435 | 2259 | } |
duke@435 | 2260 | // In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
duke@435 | 2261 | // nodes. Mapping is only valid at the root of each matched subtree. |
duke@435 | 2262 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 2263 | |
duke@435 | 2264 | // If you have too many nodes, or if matching has failed, bail out |
duke@435 | 2265 | check_node_count(0, "out of nodes matching instructions"); |
adlertz@5539 | 2266 | if (failing()) { |
adlertz@5539 | 2267 | return; |
adlertz@5539 | 2268 | } |
duke@435 | 2269 | |
duke@435 | 2270 | // Build a proper-looking CFG |
adlertz@5539 | 2271 | PhaseCFG cfg(node_arena(), root(), matcher); |
duke@435 | 2272 | _cfg = &cfg; |
duke@435 | 2273 | { |
duke@435 | 2274 | NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); ) |
adlertz@5539 | 2275 | bool success = cfg.do_global_code_motion(); |
adlertz@5539 | 2276 | if (!success) { |
adlertz@5539 | 2277 | return; |
adlertz@5539 | 2278 | } |
adlertz@5539 | 2279 | |
adlertz@5539 | 2280 | print_method(PHASE_GLOBAL_CODE_MOTION, 2); |
duke@435 | 2281 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 2282 | debug_only( cfg.verify(); ) |
duke@435 | 2283 | } |
adlertz@5539 | 2284 | |
adlertz@5539 | 2285 | PhaseChaitin regalloc(unique(), cfg, matcher); |
duke@435 | 2286 | _regalloc = ®alloc; |
duke@435 | 2287 | { |
duke@435 | 2288 | TracePhase t2("regalloc", &_t_registerAllocation, true); |
duke@435 | 2289 | // Perform register allocation. After Chaitin, use-def chains are |
duke@435 | 2290 | // no longer accurate (at spill code) and so must be ignored. |
duke@435 | 2291 | // Node->LRG->reg mappings are still accurate. |
duke@435 | 2292 | _regalloc->Register_Allocate(); |
duke@435 | 2293 | |
duke@435 | 2294 | // Bail out if the allocator builds too many nodes |
neliasso@4949 | 2295 | if (failing()) { |
neliasso@4949 | 2296 | return; |
neliasso@4949 | 2297 | } |
duke@435 | 2298 | } |
duke@435 | 2299 | |
duke@435 | 2300 | // Prior to register allocation we kept empty basic blocks in case the |
duke@435 | 2301 | // the allocator needed a place to spill. After register allocation we |
duke@435 | 2302 | // are not adding any new instructions. If any basic block is empty, we |
duke@435 | 2303 | // can now safely remove it. |
duke@435 | 2304 | { |
rasbold@853 | 2305 | NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); ) |
adlertz@5539 | 2306 | cfg.remove_empty_blocks(); |
rasbold@853 | 2307 | if (do_freq_based_layout()) { |
rasbold@853 | 2308 | PhaseBlockLayout layout(cfg); |
rasbold@853 | 2309 | } else { |
rasbold@853 | 2310 | cfg.set_loop_alignment(); |
rasbold@853 | 2311 | } |
rasbold@853 | 2312 | cfg.fixup_flow(); |
duke@435 | 2313 | } |
duke@435 | 2314 | |
duke@435 | 2315 | // Apply peephole optimizations |
duke@435 | 2316 | if( OptoPeephole ) { |
duke@435 | 2317 | NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) |
duke@435 | 2318 | PhasePeephole peep( _regalloc, cfg); |
duke@435 | 2319 | peep.do_transform(); |
duke@435 | 2320 | } |
duke@435 | 2321 | |
goetz@6478 | 2322 | // Do late expand if CPU requires this. |
goetz@6478 | 2323 | if (Matcher::require_postalloc_expand) { |
goetz@6478 | 2324 | NOT_PRODUCT(TracePhase t2c("postalloc_expand", &_t_postalloc_expand, true)); |
goetz@6478 | 2325 | cfg.postalloc_expand(_regalloc); |
goetz@6478 | 2326 | } |
goetz@6478 | 2327 | |
duke@435 | 2328 | // Convert Nodes to instruction bits in a buffer |
duke@435 | 2329 | { |
duke@435 | 2330 | // %%%% workspace merge brought two timers together for one job |
duke@435 | 2331 | TracePhase t2a("output", &_t_output, true); |
duke@435 | 2332 | NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); ) |
duke@435 | 2333 | Output(); |
duke@435 | 2334 | } |
duke@435 | 2335 | |
sla@5237 | 2336 | print_method(PHASE_FINAL_CODE); |
duke@435 | 2337 | |
duke@435 | 2338 | // He's dead, Jim. |
duke@435 | 2339 | _cfg = (PhaseCFG*)0xdeadbeef; |
duke@435 | 2340 | _regalloc = (PhaseChaitin*)0xdeadbeef; |
duke@435 | 2341 | } |
duke@435 | 2342 | |
duke@435 | 2343 | |
duke@435 | 2344 | //------------------------------dump_asm--------------------------------------- |
duke@435 | 2345 | // Dump formatted assembly |
duke@435 | 2346 | #ifndef PRODUCT |
duke@435 | 2347 | void Compile::dump_asm(int *pcs, uint pc_limit) { |
duke@435 | 2348 | bool cut_short = false; |
duke@435 | 2349 | tty->print_cr("#"); |
duke@435 | 2350 | tty->print("# "); _tf->dump(); tty->cr(); |
duke@435 | 2351 | tty->print_cr("#"); |
duke@435 | 2352 | |
duke@435 | 2353 | // For all blocks |
duke@435 | 2354 | int pc = 0x0; // Program counter |
duke@435 | 2355 | char starts_bundle = ' '; |
duke@435 | 2356 | _regalloc->dump_frame(); |
duke@435 | 2357 | |
duke@435 | 2358 | Node *n = NULL; |
adlertz@5539 | 2359 | for (uint i = 0; i < _cfg->number_of_blocks(); i++) { |
adlertz@5539 | 2360 | if (VMThread::should_terminate()) { |
adlertz@5539 | 2361 | cut_short = true; |
adlertz@5539 | 2362 | break; |
adlertz@5539 | 2363 | } |
adlertz@5539 | 2364 | Block* block = _cfg->get_block(i); |
adlertz@5539 | 2365 | if (block->is_connector() && !Verbose) { |
adlertz@5539 | 2366 | continue; |
adlertz@5539 | 2367 | } |
adlertz@5635 | 2368 | n = block->head(); |
adlertz@5539 | 2369 | if (pcs && n->_idx < pc_limit) { |
duke@435 | 2370 | tty->print("%3.3x ", pcs[n->_idx]); |
adlertz@5539 | 2371 | } else { |
duke@435 | 2372 | tty->print(" "); |
adlertz@5539 | 2373 | } |
adlertz@5539 | 2374 | block->dump_head(_cfg); |
adlertz@5539 | 2375 | if (block->is_connector()) { |
duke@435 | 2376 | tty->print_cr(" # Empty connector block"); |
adlertz@5539 | 2377 | } else if (block->num_preds() == 2 && block->pred(1)->is_CatchProj() && block->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { |
duke@435 | 2378 | tty->print_cr(" # Block is sole successor of call"); |
duke@435 | 2379 | } |
duke@435 | 2380 | |
duke@435 | 2381 | // For all instructions |
duke@435 | 2382 | Node *delay = NULL; |
adlertz@5635 | 2383 | for (uint j = 0; j < block->number_of_nodes(); j++) { |
adlertz@5539 | 2384 | if (VMThread::should_terminate()) { |
adlertz@5539 | 2385 | cut_short = true; |
adlertz@5539 | 2386 | break; |
adlertz@5539 | 2387 | } |
adlertz@5635 | 2388 | n = block->get_node(j); |
duke@435 | 2389 | if (valid_bundle_info(n)) { |
adlertz@5539 | 2390 | Bundle* bundle = node_bundling(n); |
duke@435 | 2391 | if (bundle->used_in_unconditional_delay()) { |
duke@435 | 2392 | delay = n; |
duke@435 | 2393 | continue; |
duke@435 | 2394 | } |
adlertz@5539 | 2395 | if (bundle->starts_bundle()) { |
duke@435 | 2396 | starts_bundle = '+'; |
adlertz@5539 | 2397 | } |
duke@435 | 2398 | } |
duke@435 | 2399 | |
adlertz@5539 | 2400 | if (WizardMode) { |
adlertz@5539 | 2401 | n->dump(); |
adlertz@5539 | 2402 | } |
coleenp@548 | 2403 | |
duke@435 | 2404 | if( !n->is_Region() && // Dont print in the Assembly |
duke@435 | 2405 | !n->is_Phi() && // a few noisely useless nodes |
duke@435 | 2406 | !n->is_Proj() && |
duke@435 | 2407 | !n->is_MachTemp() && |
kvn@1535 | 2408 | !n->is_SafePointScalarObject() && |
duke@435 | 2409 | !n->is_Catch() && // Would be nice to print exception table targets |
duke@435 | 2410 | !n->is_MergeMem() && // Not very interesting |
duke@435 | 2411 | !n->is_top() && // Debug info table constants |
duke@435 | 2412 | !(n->is_Con() && !n->is_Mach())// Debug info table constants |
duke@435 | 2413 | ) { |
duke@435 | 2414 | if (pcs && n->_idx < pc_limit) |
duke@435 | 2415 | tty->print("%3.3x", pcs[n->_idx]); |
duke@435 | 2416 | else |
duke@435 | 2417 | tty->print(" "); |
duke@435 | 2418 | tty->print(" %c ", starts_bundle); |
duke@435 | 2419 | starts_bundle = ' '; |
duke@435 | 2420 | tty->print("\t"); |
duke@435 | 2421 | n->format(_regalloc, tty); |
duke@435 | 2422 | tty->cr(); |
duke@435 | 2423 | } |
duke@435 | 2424 | |
duke@435 | 2425 | // If we have an instruction with a delay slot, and have seen a delay, |
duke@435 | 2426 | // then back up and print it |
duke@435 | 2427 | if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { |
duke@435 | 2428 | assert(delay != NULL, "no unconditional delay instruction"); |
coleenp@548 | 2429 | if (WizardMode) delay->dump(); |
coleenp@548 | 2430 | |
duke@435 | 2431 | if (node_bundling(delay)->starts_bundle()) |
duke@435 | 2432 | starts_bundle = '+'; |
duke@435 | 2433 | if (pcs && n->_idx < pc_limit) |
duke@435 | 2434 | tty->print("%3.3x", pcs[n->_idx]); |
duke@435 | 2435 | else |
duke@435 | 2436 | tty->print(" "); |
duke@435 | 2437 | tty->print(" %c ", starts_bundle); |
duke@435 | 2438 | starts_bundle = ' '; |
duke@435 | 2439 | tty->print("\t"); |
duke@435 | 2440 | delay->format(_regalloc, tty); |
drchase@6680 | 2441 | tty->cr(); |
duke@435 | 2442 | delay = NULL; |
duke@435 | 2443 | } |
duke@435 | 2444 | |
duke@435 | 2445 | // Dump the exception table as well |
duke@435 | 2446 | if( n->is_Catch() && (Verbose || WizardMode) ) { |
duke@435 | 2447 | // Print the exception table for this offset |
duke@435 | 2448 | _handler_table.print_subtable_for(pc); |
duke@435 | 2449 | } |
duke@435 | 2450 | } |
duke@435 | 2451 | |
duke@435 | 2452 | if (pcs && n->_idx < pc_limit) |
duke@435 | 2453 | tty->print_cr("%3.3x", pcs[n->_idx]); |
duke@435 | 2454 | else |
drchase@6680 | 2455 | tty->cr(); |
duke@435 | 2456 | |
duke@435 | 2457 | assert(cut_short || delay == NULL, "no unconditional delay branch"); |
duke@435 | 2458 | |
duke@435 | 2459 | } // End of per-block dump |
drchase@6680 | 2460 | tty->cr(); |
duke@435 | 2461 | |
duke@435 | 2462 | if (cut_short) tty->print_cr("*** disassembly is cut short ***"); |
duke@435 | 2463 | } |
duke@435 | 2464 | #endif |
duke@435 | 2465 | |
duke@435 | 2466 | //------------------------------Final_Reshape_Counts--------------------------- |
duke@435 | 2467 | // This class defines counters to help identify when a method |
duke@435 | 2468 | // may/must be executed using hardware with only 24-bit precision. |
duke@435 | 2469 | struct Final_Reshape_Counts : public StackObj { |
duke@435 | 2470 | int _call_count; // count non-inlined 'common' calls |
duke@435 | 2471 | int _float_count; // count float ops requiring 24-bit precision |
duke@435 | 2472 | int _double_count; // count double ops requiring more precision |
duke@435 | 2473 | int _java_call_count; // count non-inlined 'java' calls |
kvn@1294 | 2474 | int _inner_loop_count; // count loops which need alignment |
duke@435 | 2475 | VectorSet _visited; // Visitation flags |
duke@435 | 2476 | Node_List _tests; // Set of IfNodes & PCTableNodes |
duke@435 | 2477 | |
duke@435 | 2478 | Final_Reshape_Counts() : |
kvn@1294 | 2479 | _call_count(0), _float_count(0), _double_count(0), |
kvn@1294 | 2480 | _java_call_count(0), _inner_loop_count(0), |
duke@435 | 2481 | _visited( Thread::current()->resource_area() ) { } |
duke@435 | 2482 | |
duke@435 | 2483 | void inc_call_count () { _call_count ++; } |
duke@435 | 2484 | void inc_float_count () { _float_count ++; } |
duke@435 | 2485 | void inc_double_count() { _double_count++; } |
duke@435 | 2486 | void inc_java_call_count() { _java_call_count++; } |
kvn@1294 | 2487 | void inc_inner_loop_count() { _inner_loop_count++; } |
duke@435 | 2488 | |
duke@435 | 2489 | int get_call_count () const { return _call_count ; } |
duke@435 | 2490 | int get_float_count () const { return _float_count ; } |
duke@435 | 2491 | int get_double_count() const { return _double_count; } |
duke@435 | 2492 | int get_java_call_count() const { return _java_call_count; } |
kvn@1294 | 2493 | int get_inner_loop_count() const { return _inner_loop_count; } |
duke@435 | 2494 | }; |
duke@435 | 2495 | |
mikael@4889 | 2496 | #ifdef ASSERT |
duke@435 | 2497 | static bool oop_offset_is_sane(const TypeInstPtr* tp) { |
duke@435 | 2498 | ciInstanceKlass *k = tp->klass()->as_instance_klass(); |
duke@435 | 2499 | // Make sure the offset goes inside the instance layout. |
coleenp@548 | 2500 | return k->contains_field_offset(tp->offset()); |
duke@435 | 2501 | // Note that OffsetBot and OffsetTop are very negative. |
duke@435 | 2502 | } |
mikael@4889 | 2503 | #endif |
duke@435 | 2504 | |
never@2780 | 2505 | // Eliminate trivially redundant StoreCMs and accumulate their |
never@2780 | 2506 | // precedence edges. |
bharadwaj@4315 | 2507 | void Compile::eliminate_redundant_card_marks(Node* n) { |
never@2780 | 2508 | assert(n->Opcode() == Op_StoreCM, "expected StoreCM"); |
never@2780 | 2509 | if (n->in(MemNode::Address)->outcnt() > 1) { |
never@2780 | 2510 | // There are multiple users of the same address so it might be |
never@2780 | 2511 | // possible to eliminate some of the StoreCMs |
never@2780 | 2512 | Node* mem = n->in(MemNode::Memory); |
never@2780 | 2513 | Node* adr = n->in(MemNode::Address); |
never@2780 | 2514 | Node* val = n->in(MemNode::ValueIn); |
never@2780 | 2515 | Node* prev = n; |
never@2780 | 2516 | bool done = false; |
never@2780 | 2517 | // Walk the chain of StoreCMs eliminating ones that match. As |
never@2780 | 2518 | // long as it's a chain of single users then the optimization is |
never@2780 | 2519 | // safe. Eliminating partially redundant StoreCMs would require |
never@2780 | 2520 | // cloning copies down the other paths. |
never@2780 | 2521 | while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) { |
never@2780 | 2522 | if (adr == mem->in(MemNode::Address) && |
never@2780 | 2523 | val == mem->in(MemNode::ValueIn)) { |
never@2780 | 2524 | // redundant StoreCM |
never@2780 | 2525 | if (mem->req() > MemNode::OopStore) { |
never@2780 | 2526 | // Hasn't been processed by this code yet. |
never@2780 | 2527 | n->add_prec(mem->in(MemNode::OopStore)); |
never@2780 | 2528 | } else { |
never@2780 | 2529 | // Already converted to precedence edge |
never@2780 | 2530 | for (uint i = mem->req(); i < mem->len(); i++) { |
never@2780 | 2531 | // Accumulate any precedence edges |
never@2780 | 2532 | if (mem->in(i) != NULL) { |
never@2780 | 2533 | n->add_prec(mem->in(i)); |
never@2780 | 2534 | } |
never@2780 | 2535 | } |
never@2780 | 2536 | // Everything above this point has been processed. |
never@2780 | 2537 | done = true; |
never@2780 | 2538 | } |
never@2780 | 2539 | // Eliminate the previous StoreCM |
never@2780 | 2540 | prev->set_req(MemNode::Memory, mem->in(MemNode::Memory)); |
never@2780 | 2541 | assert(mem->outcnt() == 0, "should be dead"); |
bharadwaj@4315 | 2542 | mem->disconnect_inputs(NULL, this); |
never@2780 | 2543 | } else { |
never@2780 | 2544 | prev = mem; |
never@2780 | 2545 | } |
never@2780 | 2546 | mem = prev->in(MemNode::Memory); |
never@2780 | 2547 | } |
never@2780 | 2548 | } |
never@2780 | 2549 | } |
never@2780 | 2550 | |
duke@435 | 2551 | //------------------------------final_graph_reshaping_impl---------------------- |
duke@435 | 2552 | // Implement items 1-5 from final_graph_reshaping below. |
bharadwaj@4315 | 2553 | void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) { |
duke@435 | 2554 | |
kvn@603 | 2555 | if ( n->outcnt() == 0 ) return; // dead node |
duke@435 | 2556 | uint nop = n->Opcode(); |
duke@435 | 2557 | |
duke@435 | 2558 | // Check for 2-input instruction with "last use" on right input. |
duke@435 | 2559 | // Swap to left input. Implements item (2). |
duke@435 | 2560 | if( n->req() == 3 && // two-input instruction |
duke@435 | 2561 | n->in(1)->outcnt() > 1 && // left use is NOT a last use |
duke@435 | 2562 | (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop |
duke@435 | 2563 | n->in(2)->outcnt() == 1 &&// right use IS a last use |
duke@435 | 2564 | !n->in(2)->is_Con() ) { // right use is not a constant |
duke@435 | 2565 | // Check for commutative opcode |
duke@435 | 2566 | switch( nop ) { |
duke@435 | 2567 | case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL: |
duke@435 | 2568 | case Op_MaxI: case Op_MinI: |
duke@435 | 2569 | case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL: |
duke@435 | 2570 | case Op_AndL: case Op_XorL: case Op_OrL: |
duke@435 | 2571 | case Op_AndI: case Op_XorI: case Op_OrI: { |
duke@435 | 2572 | // Move "last use" input to left by swapping inputs |
duke@435 | 2573 | n->swap_edges(1, 2); |
duke@435 | 2574 | break; |
duke@435 | 2575 | } |
duke@435 | 2576 | default: |
duke@435 | 2577 | break; |
duke@435 | 2578 | } |
duke@435 | 2579 | } |
duke@435 | 2580 | |
kvn@1964 | 2581 | #ifdef ASSERT |
kvn@1964 | 2582 | if( n->is_Mem() ) { |
bharadwaj@4315 | 2583 | int alias_idx = get_alias_index(n->as_Mem()->adr_type()); |
kvn@1964 | 2584 | assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw || |
kvn@1964 | 2585 | // oop will be recorded in oop map if load crosses safepoint |
kvn@1964 | 2586 | n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() || |
kvn@1964 | 2587 | LoadNode::is_immutable_value(n->in(MemNode::Address))), |
kvn@1964 | 2588 | "raw memory operations should have control edge"); |
kvn@1964 | 2589 | } |
kvn@1964 | 2590 | #endif |
duke@435 | 2591 | // Count FPU ops and common calls, implements item (3) |
duke@435 | 2592 | switch( nop ) { |
duke@435 | 2593 | // Count all float operations that may use FPU |
duke@435 | 2594 | case Op_AddF: |
duke@435 | 2595 | case Op_SubF: |
duke@435 | 2596 | case Op_MulF: |
duke@435 | 2597 | case Op_DivF: |
duke@435 | 2598 | case Op_NegF: |
duke@435 | 2599 | case Op_ModF: |
duke@435 | 2600 | case Op_ConvI2F: |
duke@435 | 2601 | case Op_ConF: |
duke@435 | 2602 | case Op_CmpF: |
duke@435 | 2603 | case Op_CmpF3: |
duke@435 | 2604 | // case Op_ConvL2F: // longs are split into 32-bit halves |
kvn@1294 | 2605 | frc.inc_float_count(); |
duke@435 | 2606 | break; |
duke@435 | 2607 | |
duke@435 | 2608 | case Op_ConvF2D: |
duke@435 | 2609 | case Op_ConvD2F: |
kvn@1294 | 2610 | frc.inc_float_count(); |
kvn@1294 | 2611 | frc.inc_double_count(); |
duke@435 | 2612 | break; |
duke@435 | 2613 | |
duke@435 | 2614 | // Count all double operations that may use FPU |
duke@435 | 2615 | case Op_AddD: |
duke@435 | 2616 | case Op_SubD: |
duke@435 | 2617 | case Op_MulD: |
duke@435 | 2618 | case Op_DivD: |
duke@435 | 2619 | case Op_NegD: |
duke@435 | 2620 | case Op_ModD: |
duke@435 | 2621 | case Op_ConvI2D: |
duke@435 | 2622 | case Op_ConvD2I: |
duke@435 | 2623 | // case Op_ConvL2D: // handled by leaf call |
duke@435 | 2624 | // case Op_ConvD2L: // handled by leaf call |
duke@435 | 2625 | case Op_ConD: |
duke@435 | 2626 | case Op_CmpD: |
duke@435 | 2627 | case Op_CmpD3: |
kvn@1294 | 2628 | frc.inc_double_count(); |
duke@435 | 2629 | break; |
duke@435 | 2630 | case Op_Opaque1: // Remove Opaque Nodes before matching |
duke@435 | 2631 | case Op_Opaque2: // Remove Opaque Nodes before matching |
kvn@6429 | 2632 | case Op_Opaque3: |
bharadwaj@4315 | 2633 | n->subsume_by(n->in(1), this); |
duke@435 | 2634 | break; |
duke@435 | 2635 | case Op_CallStaticJava: |
duke@435 | 2636 | case Op_CallJava: |
duke@435 | 2637 | case Op_CallDynamicJava: |
kvn@1294 | 2638 | frc.inc_java_call_count(); // Count java call site; |
duke@435 | 2639 | case Op_CallRuntime: |
duke@435 | 2640 | case Op_CallLeaf: |
duke@435 | 2641 | case Op_CallLeafNoFP: { |
duke@435 | 2642 | assert( n->is_Call(), "" ); |
duke@435 | 2643 | CallNode *call = n->as_Call(); |
duke@435 | 2644 | // Count call sites where the FP mode bit would have to be flipped. |
duke@435 | 2645 | // Do not count uncommon runtime calls: |
duke@435 | 2646 | // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, |
duke@435 | 2647 | // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ... |
duke@435 | 2648 | if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) { |
kvn@1294 | 2649 | frc.inc_call_count(); // Count the call site |
duke@435 | 2650 | } else { // See if uncommon argument is shared |
duke@435 | 2651 | Node *n = call->in(TypeFunc::Parms); |
duke@435 | 2652 | int nop = n->Opcode(); |
duke@435 | 2653 | // Clone shared simple arguments to uncommon calls, item (1). |
duke@435 | 2654 | if( n->outcnt() > 1 && |
duke@435 | 2655 | !n->is_Proj() && |
duke@435 | 2656 | nop != Op_CreateEx && |
duke@435 | 2657 | nop != Op_CheckCastPP && |
kvn@766 | 2658 | nop != Op_DecodeN && |
roland@4159 | 2659 | nop != Op_DecodeNKlass && |
duke@435 | 2660 | !n->is_Mem() ) { |
duke@435 | 2661 | Node *x = n->clone(); |
duke@435 | 2662 | call->set_req( TypeFunc::Parms, x ); |
duke@435 | 2663 | } |
duke@435 | 2664 | } |
duke@435 | 2665 | break; |
duke@435 | 2666 | } |
duke@435 | 2667 | |
duke@435 | 2668 | case Op_StoreD: |
duke@435 | 2669 | case Op_LoadD: |
duke@435 | 2670 | case Op_LoadD_unaligned: |
kvn@1294 | 2671 | frc.inc_double_count(); |
duke@435 | 2672 | goto handle_mem; |
duke@435 | 2673 | case Op_StoreF: |
duke@435 | 2674 | case Op_LoadF: |
kvn@1294 | 2675 | frc.inc_float_count(); |
duke@435 | 2676 | goto handle_mem; |
duke@435 | 2677 | |
never@2780 | 2678 | case Op_StoreCM: |
never@2780 | 2679 | { |
never@2780 | 2680 | // Convert OopStore dependence into precedence edge |
never@2780 | 2681 | Node* prec = n->in(MemNode::OopStore); |
never@2780 | 2682 | n->del_req(MemNode::OopStore); |
never@2780 | 2683 | n->add_prec(prec); |
never@2780 | 2684 | eliminate_redundant_card_marks(n); |
never@2780 | 2685 | } |
never@2780 | 2686 | |
never@2780 | 2687 | // fall through |
never@2780 | 2688 | |
duke@435 | 2689 | case Op_StoreB: |
duke@435 | 2690 | case Op_StoreC: |
duke@435 | 2691 | case Op_StorePConditional: |
duke@435 | 2692 | case Op_StoreI: |
duke@435 | 2693 | case Op_StoreL: |
kvn@855 | 2694 | case Op_StoreIConditional: |
duke@435 | 2695 | case Op_StoreLConditional: |
duke@435 | 2696 | case Op_CompareAndSwapI: |
duke@435 | 2697 | case Op_CompareAndSwapL: |
duke@435 | 2698 | case Op_CompareAndSwapP: |
coleenp@548 | 2699 | case Op_CompareAndSwapN: |
roland@4106 | 2700 | case Op_GetAndAddI: |
roland@4106 | 2701 | case Op_GetAndAddL: |
roland@4106 | 2702 | case Op_GetAndSetI: |
roland@4106 | 2703 | case Op_GetAndSetL: |
roland@4106 | 2704 | case Op_GetAndSetP: |
roland@4106 | 2705 | case Op_GetAndSetN: |
duke@435 | 2706 | case Op_StoreP: |
coleenp@548 | 2707 | case Op_StoreN: |
roland@4159 | 2708 | case Op_StoreNKlass: |
duke@435 | 2709 | case Op_LoadB: |
twisti@1059 | 2710 | case Op_LoadUB: |
twisti@993 | 2711 | case Op_LoadUS: |
duke@435 | 2712 | case Op_LoadI: |
duke@435 | 2713 | case Op_LoadKlass: |
kvn@599 | 2714 | case Op_LoadNKlass: |
duke@435 | 2715 | case Op_LoadL: |
duke@435 | 2716 | case Op_LoadL_unaligned: |
duke@435 | 2717 | case Op_LoadPLocked: |
duke@435 | 2718 | case Op_LoadP: |
coleenp@548 | 2719 | case Op_LoadN: |
duke@435 | 2720 | case Op_LoadRange: |
duke@435 | 2721 | case Op_LoadS: { |
duke@435 | 2722 | handle_mem: |
duke@435 | 2723 | #ifdef ASSERT |
duke@435 | 2724 | if( VerifyOptoOopOffsets ) { |
duke@435 | 2725 | assert( n->is_Mem(), "" ); |
duke@435 | 2726 | MemNode *mem = (MemNode*)n; |
duke@435 | 2727 | // Check to see if address types have grounded out somehow. |
duke@435 | 2728 | const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); |
duke@435 | 2729 | assert( !tp || oop_offset_is_sane(tp), "" ); |
duke@435 | 2730 | } |
duke@435 | 2731 | #endif |
duke@435 | 2732 | break; |
duke@435 | 2733 | } |
duke@435 | 2734 | |
duke@435 | 2735 | case Op_AddP: { // Assert sane base pointers |
kvn@617 | 2736 | Node *addp = n->in(AddPNode::Address); |
duke@435 | 2737 | assert( !addp->is_AddP() || |
duke@435 | 2738 | addp->in(AddPNode::Base)->is_top() || // Top OK for allocation |
duke@435 | 2739 | addp->in(AddPNode::Base) == n->in(AddPNode::Base), |
duke@435 | 2740 | "Base pointers must match" ); |
kvn@617 | 2741 | #ifdef _LP64 |
ehelin@5694 | 2742 | if ((UseCompressedOops || UseCompressedClassPointers) && |
kvn@617 | 2743 | addp->Opcode() == Op_ConP && |
kvn@617 | 2744 | addp == n->in(AddPNode::Base) && |
kvn@617 | 2745 | n->in(AddPNode::Offset)->is_Con()) { |
kvn@617 | 2746 | // Use addressing with narrow klass to load with offset on x86. |
kvn@617 | 2747 | // On sparc loading 32-bits constant and decoding it have less |
kvn@617 | 2748 | // instructions (4) then load 64-bits constant (7). |
kvn@617 | 2749 | // Do this transformation here since IGVN will convert ConN back to ConP. |
kvn@617 | 2750 | const Type* t = addp->bottom_type(); |
roland@4159 | 2751 | if (t->isa_oopptr() || t->isa_klassptr()) { |
kvn@617 | 2752 | Node* nn = NULL; |
kvn@617 | 2753 | |
roland@4159 | 2754 | int op = t->isa_oopptr() ? Op_ConN : Op_ConNKlass; |
roland@4159 | 2755 | |
kvn@617 | 2756 | // Look for existing ConN node of the same exact type. |
bharadwaj@4315 | 2757 | Node* r = root(); |
kvn@617 | 2758 | uint cnt = r->outcnt(); |
kvn@617 | 2759 | for (uint i = 0; i < cnt; i++) { |
kvn@617 | 2760 | Node* m = r->raw_out(i); |
roland@4159 | 2761 | if (m!= NULL && m->Opcode() == op && |
kvn@656 | 2762 | m->bottom_type()->make_ptr() == t) { |
kvn@617 | 2763 | nn = m; |
kvn@617 | 2764 | break; |
kvn@617 | 2765 | } |
kvn@617 | 2766 | } |
kvn@617 | 2767 | if (nn != NULL) { |
kvn@617 | 2768 | // Decode a narrow oop to match address |
kvn@617 | 2769 | // [R12 + narrow_oop_reg<<3 + offset] |
roland@4159 | 2770 | if (t->isa_oopptr()) { |
bharadwaj@4315 | 2771 | nn = new (this) DecodeNNode(nn, t); |
roland@4159 | 2772 | } else { |
bharadwaj@4315 | 2773 | nn = new (this) DecodeNKlassNode(nn, t); |
roland@4159 | 2774 | } |
kvn@617 | 2775 | n->set_req(AddPNode::Base, nn); |
kvn@617 | 2776 | n->set_req(AddPNode::Address, nn); |
kvn@617 | 2777 | if (addp->outcnt() == 0) { |
bharadwaj@4315 | 2778 | addp->disconnect_inputs(NULL, this); |
kvn@617 | 2779 | } |
kvn@617 | 2780 | } |
kvn@617 | 2781 | } |
kvn@617 | 2782 | } |
kvn@617 | 2783 | #endif |
duke@435 | 2784 | break; |
duke@435 | 2785 | } |
duke@435 | 2786 | |
kvn@599 | 2787 | #ifdef _LP64 |
kvn@803 | 2788 | case Op_CastPP: |
kvn@1930 | 2789 | if (n->in(1)->is_DecodeN() && Matcher::gen_narrow_oop_implicit_null_checks()) { |
kvn@803 | 2790 | Node* in1 = n->in(1); |
kvn@803 | 2791 | const Type* t = n->bottom_type(); |
kvn@803 | 2792 | Node* new_in1 = in1->clone(); |
kvn@803 | 2793 | new_in1->as_DecodeN()->set_type(t); |
kvn@803 | 2794 | |
kvn@1930 | 2795 | if (!Matcher::narrow_oop_use_complex_address()) { |
kvn@803 | 2796 | // |
kvn@803 | 2797 | // x86, ARM and friends can handle 2 adds in addressing mode |
kvn@803 | 2798 | // and Matcher can fold a DecodeN node into address by using |
kvn@803 | 2799 | // a narrow oop directly and do implicit NULL check in address: |
kvn@803 | 2800 | // |
kvn@803 | 2801 | // [R12 + narrow_oop_reg<<3 + offset] |
kvn@803 | 2802 | // NullCheck narrow_oop_reg |
kvn@803 | 2803 | // |
kvn@803 | 2804 | // On other platforms (Sparc) we have to keep new DecodeN node and |
kvn@803 | 2805 | // use it to do implicit NULL check in address: |
kvn@803 | 2806 | // |
kvn@803 | 2807 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2808 | // [base_reg + offset] |
kvn@803 | 2809 | // NullCheck base_reg |
kvn@803 | 2810 | // |
twisti@1040 | 2811 | // Pin the new DecodeN node to non-null path on these platform (Sparc) |
kvn@803 | 2812 | // to keep the information to which NULL check the new DecodeN node |
kvn@803 | 2813 | // corresponds to use it as value in implicit_null_check(). |
kvn@803 | 2814 | // |
kvn@803 | 2815 | new_in1->set_req(0, n->in(0)); |
kvn@803 | 2816 | } |
kvn@803 | 2817 | |
bharadwaj@4315 | 2818 | n->subsume_by(new_in1, this); |
kvn@803 | 2819 | if (in1->outcnt() == 0) { |
bharadwaj@4315 | 2820 | in1->disconnect_inputs(NULL, this); |
kvn@803 | 2821 | } |
kvn@803 | 2822 | } |
kvn@803 | 2823 | break; |
kvn@803 | 2824 | |
kvn@599 | 2825 | case Op_CmpP: |
kvn@603 | 2826 | // Do this transformation here to preserve CmpPNode::sub() and |
kvn@603 | 2827 | // other TypePtr related Ideal optimizations (for example, ptr nullness). |
roland@4159 | 2828 | if (n->in(1)->is_DecodeNarrowPtr() || n->in(2)->is_DecodeNarrowPtr()) { |
kvn@766 | 2829 | Node* in1 = n->in(1); |
kvn@766 | 2830 | Node* in2 = n->in(2); |
roland@4159 | 2831 | if (!in1->is_DecodeNarrowPtr()) { |
kvn@766 | 2832 | in2 = in1; |
kvn@766 | 2833 | in1 = n->in(2); |
kvn@766 | 2834 | } |
roland@4159 | 2835 | assert(in1->is_DecodeNarrowPtr(), "sanity"); |
kvn@766 | 2836 | |
kvn@766 | 2837 | Node* new_in2 = NULL; |
roland@4159 | 2838 | if (in2->is_DecodeNarrowPtr()) { |
roland@4159 | 2839 | assert(in2->Opcode() == in1->Opcode(), "must be same node type"); |
kvn@766 | 2840 | new_in2 = in2->in(1); |
kvn@766 | 2841 | } else if (in2->Opcode() == Op_ConP) { |
kvn@766 | 2842 | const Type* t = in2->bottom_type(); |
kvn@1930 | 2843 | if (t == TypePtr::NULL_PTR) { |
roland@4159 | 2844 | assert(in1->is_DecodeN(), "compare klass to null?"); |
kvn@1930 | 2845 | // Don't convert CmpP null check into CmpN if compressed |
kvn@1930 | 2846 | // oops implicit null check is not generated. |
kvn@1930 | 2847 | // This will allow to generate normal oop implicit null check. |
kvn@1930 | 2848 | if (Matcher::gen_narrow_oop_implicit_null_checks()) |
bharadwaj@4315 | 2849 | new_in2 = ConNode::make(this, TypeNarrowOop::NULL_PTR); |
kvn@803 | 2850 | // |
kvn@803 | 2851 | // This transformation together with CastPP transformation above |
kvn@803 | 2852 | // will generated code for implicit NULL checks for compressed oops. |
kvn@803 | 2853 | // |
kvn@803 | 2854 | // The original code after Optimize() |
kvn@803 | 2855 | // |
kvn@803 | 2856 | // LoadN memory, narrow_oop_reg |
kvn@803 | 2857 | // decode narrow_oop_reg, base_reg |
kvn@803 | 2858 | // CmpP base_reg, NULL |
kvn@803 | 2859 | // CastPP base_reg // NotNull |
kvn@803 | 2860 | // Load [base_reg + offset], val_reg |
kvn@803 | 2861 | // |
kvn@803 | 2862 | // after these transformations will be |
kvn@803 | 2863 | // |
kvn@803 | 2864 | // LoadN memory, narrow_oop_reg |
kvn@803 | 2865 | // CmpN narrow_oop_reg, NULL |
kvn@803 | 2866 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2867 | // Load [base_reg + offset], val_reg |
kvn@803 | 2868 | // |
kvn@803 | 2869 | // and the uncommon path (== NULL) will use narrow_oop_reg directly |
kvn@803 | 2870 | // since narrow oops can be used in debug info now (see the code in |
kvn@803 | 2871 | // final_graph_reshaping_walk()). |
kvn@803 | 2872 | // |
kvn@803 | 2873 | // At the end the code will be matched to |
kvn@803 | 2874 | // on x86: |
kvn@803 | 2875 | // |
kvn@803 | 2876 | // Load_narrow_oop memory, narrow_oop_reg |
kvn@803 | 2877 | // Load [R12 + narrow_oop_reg<<3 + offset], val_reg |
kvn@803 | 2878 | // NullCheck narrow_oop_reg |
kvn@803 | 2879 | // |
kvn@803 | 2880 | // and on sparc: |
kvn@803 | 2881 | // |
kvn@803 | 2882 | // Load_narrow_oop memory, narrow_oop_reg |
kvn@803 | 2883 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2884 | // Load [base_reg + offset], val_reg |
kvn@803 | 2885 | // NullCheck base_reg |
kvn@803 | 2886 | // |
kvn@599 | 2887 | } else if (t->isa_oopptr()) { |
bharadwaj@4315 | 2888 | new_in2 = ConNode::make(this, t->make_narrowoop()); |
roland@4159 | 2889 | } else if (t->isa_klassptr()) { |
bharadwaj@4315 | 2890 | new_in2 = ConNode::make(this, t->make_narrowklass()); |
kvn@599 | 2891 | } |
kvn@599 | 2892 | } |
kvn@766 | 2893 | if (new_in2 != NULL) { |
bharadwaj@4315 | 2894 | Node* cmpN = new (this) CmpNNode(in1->in(1), new_in2); |
bharadwaj@4315 | 2895 | n->subsume_by(cmpN, this); |
kvn@766 | 2896 | if (in1->outcnt() == 0) { |
bharadwaj@4315 | 2897 | in1->disconnect_inputs(NULL, this); |
kvn@766 | 2898 | } |
kvn@766 | 2899 | if (in2->outcnt() == 0) { |
bharadwaj@4315 | 2900 | in2->disconnect_inputs(NULL, this); |
kvn@766 | 2901 | } |
kvn@599 | 2902 | } |
kvn@599 | 2903 | } |
kvn@728 | 2904 | break; |
kvn@803 | 2905 | |
kvn@803 | 2906 | case Op_DecodeN: |
roland@4159 | 2907 | case Op_DecodeNKlass: |
roland@4159 | 2908 | assert(!n->in(1)->is_EncodeNarrowPtr(), "should be optimized out"); |
kvn@1930 | 2909 | // DecodeN could be pinned when it can't be fold into |
kvn@927 | 2910 | // an address expression, see the code for Op_CastPP above. |
roland@4159 | 2911 | assert(n->in(0) == NULL || (UseCompressedOops && !Matcher::narrow_oop_use_complex_address()), "no control"); |
kvn@803 | 2912 | break; |
kvn@803 | 2913 | |
roland@4159 | 2914 | case Op_EncodeP: |
roland@4159 | 2915 | case Op_EncodePKlass: { |
kvn@803 | 2916 | Node* in1 = n->in(1); |
roland@4159 | 2917 | if (in1->is_DecodeNarrowPtr()) { |
bharadwaj@4315 | 2918 | n->subsume_by(in1->in(1), this); |
kvn@803 | 2919 | } else if (in1->Opcode() == Op_ConP) { |
kvn@803 | 2920 | const Type* t = in1->bottom_type(); |
kvn@803 | 2921 | if (t == TypePtr::NULL_PTR) { |
roland@4159 | 2922 | assert(t->isa_oopptr(), "null klass?"); |
bharadwaj@4315 | 2923 | n->subsume_by(ConNode::make(this, TypeNarrowOop::NULL_PTR), this); |
kvn@803 | 2924 | } else if (t->isa_oopptr()) { |
bharadwaj@4315 | 2925 | n->subsume_by(ConNode::make(this, t->make_narrowoop()), this); |
roland@4159 | 2926 | } else if (t->isa_klassptr()) { |
bharadwaj@4315 | 2927 | n->subsume_by(ConNode::make(this, t->make_narrowklass()), this); |
kvn@803 | 2928 | } |
kvn@803 | 2929 | } |
kvn@803 | 2930 | if (in1->outcnt() == 0) { |
bharadwaj@4315 | 2931 | in1->disconnect_inputs(NULL, this); |
kvn@803 | 2932 | } |
kvn@803 | 2933 | break; |
kvn@803 | 2934 | } |
kvn@803 | 2935 | |
never@1515 | 2936 | case Op_Proj: { |
never@1515 | 2937 | if (OptimizeStringConcat) { |
never@1515 | 2938 | ProjNode* p = n->as_Proj(); |
never@1515 | 2939 | if (p->_is_io_use) { |
never@1515 | 2940 | // Separate projections were used for the exception path which |
never@1515 | 2941 | // are normally removed by a late inline. If it wasn't inlined |
never@1515 | 2942 | // then they will hang around and should just be replaced with |
never@1515 | 2943 | // the original one. |
never@1515 | 2944 | Node* proj = NULL; |
never@1515 | 2945 | // Replace with just one |
never@1515 | 2946 | for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) { |
never@1515 | 2947 | Node *use = i.get(); |
never@1515 | 2948 | if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) { |
never@1515 | 2949 | proj = use; |
never@1515 | 2950 | break; |
never@1515 | 2951 | } |
never@1515 | 2952 | } |
kvn@3396 | 2953 | assert(proj != NULL, "must be found"); |
bharadwaj@4315 | 2954 | p->subsume_by(proj, this); |
never@1515 | 2955 | } |
never@1515 | 2956 | } |
never@1515 | 2957 | break; |
never@1515 | 2958 | } |
never@1515 | 2959 | |
kvn@803 | 2960 | case Op_Phi: |
roland@4159 | 2961 | if (n->as_Phi()->bottom_type()->isa_narrowoop() || n->as_Phi()->bottom_type()->isa_narrowklass()) { |
kvn@803 | 2962 | // The EncodeP optimization may create Phi with the same edges |
kvn@803 | 2963 | // for all paths. It is not handled well by Register Allocator. |
kvn@803 | 2964 | Node* unique_in = n->in(1); |
kvn@803 | 2965 | assert(unique_in != NULL, ""); |
kvn@803 | 2966 | uint cnt = n->req(); |
kvn@803 | 2967 | for (uint i = 2; i < cnt; i++) { |
kvn@803 | 2968 | Node* m = n->in(i); |
kvn@803 | 2969 | assert(m != NULL, ""); |
kvn@803 | 2970 | if (unique_in != m) |
kvn@803 | 2971 | unique_in = NULL; |
kvn@803 | 2972 | } |
kvn@803 | 2973 | if (unique_in != NULL) { |
bharadwaj@4315 | 2974 | n->subsume_by(unique_in, this); |
kvn@803 | 2975 | } |
kvn@803 | 2976 | } |
kvn@803 | 2977 | break; |
kvn@803 | 2978 | |
kvn@599 | 2979 | #endif |
kvn@599 | 2980 | |
duke@435 | 2981 | case Op_ModI: |
duke@435 | 2982 | if (UseDivMod) { |
duke@435 | 2983 | // Check if a%b and a/b both exist |
duke@435 | 2984 | Node* d = n->find_similar(Op_DivI); |
duke@435 | 2985 | if (d) { |
duke@435 | 2986 | // Replace them with a fused divmod if supported |
duke@435 | 2987 | if (Matcher::has_match_rule(Op_DivModI)) { |
bharadwaj@4315 | 2988 | DivModINode* divmod = DivModINode::make(this, n); |
bharadwaj@4315 | 2989 | d->subsume_by(divmod->div_proj(), this); |
bharadwaj@4315 | 2990 | n->subsume_by(divmod->mod_proj(), this); |
duke@435 | 2991 | } else { |
duke@435 | 2992 | // replace a%b with a-((a/b)*b) |
bharadwaj@4315 | 2993 | Node* mult = new (this) MulINode(d, d->in(2)); |
bharadwaj@4315 | 2994 | Node* sub = new (this) SubINode(d->in(1), mult); |
bharadwaj@4315 | 2995 | n->subsume_by(sub, this); |
duke@435 | 2996 | } |
duke@435 | 2997 | } |
duke@435 | 2998 | } |
duke@435 | 2999 | break; |
duke@435 | 3000 | |
duke@435 | 3001 | case Op_ModL: |
duke@435 | 3002 | if (UseDivMod) { |
duke@435 | 3003 | // Check if a%b and a/b both exist |
duke@435 | 3004 | Node* d = n->find_similar(Op_DivL); |
duke@435 | 3005 | if (d) { |
duke@435 | 3006 | // Replace them with a fused divmod if supported |
duke@435 | 3007 | if (Matcher::has_match_rule(Op_DivModL)) { |
bharadwaj@4315 | 3008 | DivModLNode* divmod = DivModLNode::make(this, n); |
bharadwaj@4315 | 3009 | d->subsume_by(divmod->div_proj(), this); |
bharadwaj@4315 | 3010 | n->subsume_by(divmod->mod_proj(), this); |
duke@435 | 3011 | } else { |
duke@435 | 3012 | // replace a%b with a-((a/b)*b) |
bharadwaj@4315 | 3013 | Node* mult = new (this) MulLNode(d, d->in(2)); |
bharadwaj@4315 | 3014 | Node* sub = new (this) SubLNode(d->in(1), mult); |
bharadwaj@4315 | 3015 | n->subsume_by(sub, this); |
duke@435 | 3016 | } |
duke@435 | 3017 | } |
duke@435 | 3018 | } |
duke@435 | 3019 | break; |
duke@435 | 3020 | |
kvn@3882 | 3021 | case Op_LoadVector: |
kvn@3882 | 3022 | case Op_StoreVector: |
duke@435 | 3023 | break; |
duke@435 | 3024 | |
duke@435 | 3025 | case Op_PackB: |
duke@435 | 3026 | case Op_PackS: |
duke@435 | 3027 | case Op_PackI: |
duke@435 | 3028 | case Op_PackF: |
duke@435 | 3029 | case Op_PackL: |
duke@435 | 3030 | case Op_PackD: |
duke@435 | 3031 | if (n->req()-1 > 2) { |
duke@435 | 3032 | // Replace many operand PackNodes with a binary tree for matching |
duke@435 | 3033 | PackNode* p = (PackNode*) n; |
bharadwaj@4315 | 3034 | Node* btp = p->binary_tree_pack(this, 1, n->req()); |
bharadwaj@4315 | 3035 | n->subsume_by(btp, this); |
duke@435 | 3036 | } |
duke@435 | 3037 | break; |
kvn@1294 | 3038 | case Op_Loop: |
kvn@1294 | 3039 | case Op_CountedLoop: |
kvn@1294 | 3040 | if (n->as_Loop()->is_inner_loop()) { |
kvn@1294 | 3041 | frc.inc_inner_loop_count(); |
kvn@1294 | 3042 | } |
kvn@1294 | 3043 | break; |
roland@2683 | 3044 | case Op_LShiftI: |
roland@2683 | 3045 | case Op_RShiftI: |
roland@2683 | 3046 | case Op_URShiftI: |
roland@2683 | 3047 | case Op_LShiftL: |
roland@2683 | 3048 | case Op_RShiftL: |
roland@2683 | 3049 | case Op_URShiftL: |
roland@2683 | 3050 | if (Matcher::need_masked_shift_count) { |
roland@2683 | 3051 | // The cpu's shift instructions don't restrict the count to the |
roland@2683 | 3052 | // lower 5/6 bits. We need to do the masking ourselves. |
roland@2683 | 3053 | Node* in2 = n->in(2); |
roland@2683 | 3054 | juint mask = (n->bottom_type() == TypeInt::INT) ? (BitsPerInt - 1) : (BitsPerLong - 1); |
roland@2683 | 3055 | const TypeInt* t = in2->find_int_type(); |
roland@2683 | 3056 | if (t != NULL && t->is_con()) { |
roland@2683 | 3057 | juint shift = t->get_con(); |
roland@2683 | 3058 | if (shift > mask) { // Unsigned cmp |
bharadwaj@4315 | 3059 | n->set_req(2, ConNode::make(this, TypeInt::make(shift & mask))); |
roland@2683 | 3060 | } |
roland@2683 | 3061 | } else { |
roland@2683 | 3062 | if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) { |
bharadwaj@4315 | 3063 | Node* shift = new (this) AndINode(in2, ConNode::make(this, TypeInt::make(mask))); |
roland@2683 | 3064 | n->set_req(2, shift); |
roland@2683 | 3065 | } |
roland@2683 | 3066 | } |
roland@2683 | 3067 | if (in2->outcnt() == 0) { // Remove dead node |
bharadwaj@4315 | 3068 | in2->disconnect_inputs(NULL, this); |
roland@2683 | 3069 | } |
roland@2683 | 3070 | } |
roland@2683 | 3071 | break; |
roland@4694 | 3072 | case Op_MemBarStoreStore: |
kvn@5110 | 3073 | case Op_MemBarRelease: |
roland@4694 | 3074 | // Break the link with AllocateNode: it is no longer useful and |
roland@4694 | 3075 | // confuses register allocation. |
roland@4694 | 3076 | if (n->req() > MemBarNode::Precedent) { |
roland@4694 | 3077 | n->set_req(MemBarNode::Precedent, top()); |
roland@4694 | 3078 | } |
roland@4694 | 3079 | break; |
duke@435 | 3080 | default: |
duke@435 | 3081 | assert( !n->is_Call(), "" ); |
duke@435 | 3082 | assert( !n->is_Mem(), "" ); |
duke@435 | 3083 | break; |
duke@435 | 3084 | } |
never@562 | 3085 | |
never@562 | 3086 | // Collect CFG split points |
never@562 | 3087 | if (n->is_MultiBranch()) |
kvn@1294 | 3088 | frc._tests.push(n); |
duke@435 | 3089 | } |
duke@435 | 3090 | |
duke@435 | 3091 | //------------------------------final_graph_reshaping_walk--------------------- |
duke@435 | 3092 | // Replacing Opaque nodes with their input in final_graph_reshaping_impl(), |
duke@435 | 3093 | // requires that the walk visits a node's inputs before visiting the node. |
bharadwaj@4315 | 3094 | void Compile::final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) { |
kvn@766 | 3095 | ResourceArea *area = Thread::current()->resource_area(); |
kvn@766 | 3096 | Unique_Node_List sfpt(area); |
kvn@766 | 3097 | |
kvn@1294 | 3098 | frc._visited.set(root->_idx); // first, mark node as visited |
duke@435 | 3099 | uint cnt = root->req(); |
duke@435 | 3100 | Node *n = root; |
duke@435 | 3101 | uint i = 0; |
duke@435 | 3102 | while (true) { |
duke@435 | 3103 | if (i < cnt) { |
duke@435 | 3104 | // Place all non-visited non-null inputs onto stack |
duke@435 | 3105 | Node* m = n->in(i); |
duke@435 | 3106 | ++i; |
kvn@1294 | 3107 | if (m != NULL && !frc._visited.test_set(m->_idx)) { |
roland@6723 | 3108 | if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) { |
roland@6723 | 3109 | // compute worst case interpreter size in case of a deoptimization |
roland@6723 | 3110 | update_interpreter_frame_size(m->as_SafePoint()->jvms()->interpreter_frame_size()); |
roland@6723 | 3111 | |
kvn@766 | 3112 | sfpt.push(m); |
roland@6723 | 3113 | } |
duke@435 | 3114 | cnt = m->req(); |
duke@435 | 3115 | nstack.push(n, i); // put on stack parent and next input's index |
duke@435 | 3116 | n = m; |
duke@435 | 3117 | i = 0; |
duke@435 | 3118 | } |
duke@435 | 3119 | } else { |
duke@435 | 3120 | // Now do post-visit work |
kvn@1294 | 3121 | final_graph_reshaping_impl( n, frc ); |
duke@435 | 3122 | if (nstack.is_empty()) |
duke@435 | 3123 | break; // finished |
duke@435 | 3124 | n = nstack.node(); // Get node from stack |
duke@435 | 3125 | cnt = n->req(); |
duke@435 | 3126 | i = nstack.index(); |
duke@435 | 3127 | nstack.pop(); // Shift to the next node on stack |
duke@435 | 3128 | } |
duke@435 | 3129 | } |
kvn@766 | 3130 | |
kvn@1930 | 3131 | // Skip next transformation if compressed oops are not used. |
roland@4159 | 3132 | if ((UseCompressedOops && !Matcher::gen_narrow_oop_implicit_null_checks()) || |
ehelin@5694 | 3133 | (!UseCompressedOops && !UseCompressedClassPointers)) |
kvn@1930 | 3134 | return; |
kvn@1930 | 3135 | |
roland@4159 | 3136 | // Go over safepoints nodes to skip DecodeN/DecodeNKlass nodes for debug edges. |
kvn@766 | 3137 | // It could be done for an uncommon traps or any safepoints/calls |
roland@4159 | 3138 | // if the DecodeN/DecodeNKlass node is referenced only in a debug info. |
kvn@766 | 3139 | while (sfpt.size() > 0) { |
kvn@766 | 3140 | n = sfpt.pop(); |
kvn@766 | 3141 | JVMState *jvms = n->as_SafePoint()->jvms(); |
kvn@766 | 3142 | assert(jvms != NULL, "sanity"); |
kvn@766 | 3143 | int start = jvms->debug_start(); |
kvn@766 | 3144 | int end = n->req(); |
kvn@766 | 3145 | bool is_uncommon = (n->is_CallStaticJava() && |
kvn@766 | 3146 | n->as_CallStaticJava()->uncommon_trap_request() != 0); |
kvn@766 | 3147 | for (int j = start; j < end; j++) { |
kvn@766 | 3148 | Node* in = n->in(j); |
roland@4159 | 3149 | if (in->is_DecodeNarrowPtr()) { |
kvn@766 | 3150 | bool safe_to_skip = true; |
kvn@766 | 3151 | if (!is_uncommon ) { |
kvn@766 | 3152 | // Is it safe to skip? |
kvn@766 | 3153 | for (uint i = 0; i < in->outcnt(); i++) { |
kvn@766 | 3154 | Node* u = in->raw_out(i); |
kvn@766 | 3155 | if (!u->is_SafePoint() || |
kvn@766 | 3156 | u->is_Call() && u->as_Call()->has_non_debug_use(n)) { |
kvn@766 | 3157 | safe_to_skip = false; |
kvn@766 | 3158 | } |
kvn@766 | 3159 | } |
kvn@766 | 3160 | } |
kvn@766 | 3161 | if (safe_to_skip) { |
kvn@766 | 3162 | n->set_req(j, in->in(1)); |
kvn@766 | 3163 | } |
kvn@766 | 3164 | if (in->outcnt() == 0) { |
bharadwaj@4315 | 3165 | in->disconnect_inputs(NULL, this); |
kvn@766 | 3166 | } |
kvn@766 | 3167 | } |
kvn@766 | 3168 | } |
kvn@766 | 3169 | } |
duke@435 | 3170 | } |
duke@435 | 3171 | |
duke@435 | 3172 | //------------------------------final_graph_reshaping-------------------------- |
duke@435 | 3173 | // Final Graph Reshaping. |
duke@435 | 3174 | // |
duke@435 | 3175 | // (1) Clone simple inputs to uncommon calls, so they can be scheduled late |
duke@435 | 3176 | // and not commoned up and forced early. Must come after regular |
duke@435 | 3177 | // optimizations to avoid GVN undoing the cloning. Clone constant |
duke@435 | 3178 | // inputs to Loop Phis; these will be split by the allocator anyways. |
duke@435 | 3179 | // Remove Opaque nodes. |
duke@435 | 3180 | // (2) Move last-uses by commutative operations to the left input to encourage |
duke@435 | 3181 | // Intel update-in-place two-address operations and better register usage |
duke@435 | 3182 | // on RISCs. Must come after regular optimizations to avoid GVN Ideal |
duke@435 | 3183 | // calls canonicalizing them back. |
duke@435 | 3184 | // (3) Count the number of double-precision FP ops, single-precision FP ops |
duke@435 | 3185 | // and call sites. On Intel, we can get correct rounding either by |
duke@435 | 3186 | // forcing singles to memory (requires extra stores and loads after each |
duke@435 | 3187 | // FP bytecode) or we can set a rounding mode bit (requires setting and |
duke@435 | 3188 | // clearing the mode bit around call sites). The mode bit is only used |
duke@435 | 3189 | // if the relative frequency of single FP ops to calls is low enough. |
duke@435 | 3190 | // This is a key transform for SPEC mpeg_audio. |
duke@435 | 3191 | // (4) Detect infinite loops; blobs of code reachable from above but not |
duke@435 | 3192 | // below. Several of the Code_Gen algorithms fail on such code shapes, |
duke@435 | 3193 | // so we simply bail out. Happens a lot in ZKM.jar, but also happens |
duke@435 | 3194 | // from time to time in other codes (such as -Xcomp finalizer loops, etc). |
duke@435 | 3195 | // Detection is by looking for IfNodes where only 1 projection is |
duke@435 | 3196 | // reachable from below or CatchNodes missing some targets. |
duke@435 | 3197 | // (5) Assert for insane oop offsets in debug mode. |
duke@435 | 3198 | |
duke@435 | 3199 | bool Compile::final_graph_reshaping() { |
duke@435 | 3200 | // an infinite loop may have been eliminated by the optimizer, |
duke@435 | 3201 | // in which case the graph will be empty. |
duke@435 | 3202 | if (root()->req() == 1) { |
duke@435 | 3203 | record_method_not_compilable("trivial infinite loop"); |
duke@435 | 3204 | return true; |
duke@435 | 3205 | } |
duke@435 | 3206 | |
roland@4589 | 3207 | // Expensive nodes have their control input set to prevent the GVN |
roland@4589 | 3208 | // from freely commoning them. There's no GVN beyond this point so |
roland@4589 | 3209 | // no need to keep the control input. We want the expensive nodes to |
roland@4589 | 3210 | // be freely moved to the least frequent code path by gcm. |
roland@4589 | 3211 | assert(OptimizeExpensiveOps || expensive_count() == 0, "optimization off but list non empty?"); |
roland@4589 | 3212 | for (int i = 0; i < expensive_count(); i++) { |
roland@4589 | 3213 | _expensive_nodes->at(i)->set_req(0, NULL); |
roland@4589 | 3214 | } |
roland@4589 | 3215 | |
kvn@1294 | 3216 | Final_Reshape_Counts frc; |
duke@435 | 3217 | |
duke@435 | 3218 | // Visit everybody reachable! |
duke@435 | 3219 | // Allocate stack of size C->unique()/2 to avoid frequent realloc |
duke@435 | 3220 | Node_Stack nstack(unique() >> 1); |
kvn@1294 | 3221 | final_graph_reshaping_walk(nstack, root(), frc); |
duke@435 | 3222 | |
duke@435 | 3223 | // Check for unreachable (from below) code (i.e., infinite loops). |
kvn@1294 | 3224 | for( uint i = 0; i < frc._tests.size(); i++ ) { |
kvn@1294 | 3225 | MultiBranchNode *n = frc._tests[i]->as_MultiBranch(); |
never@562 | 3226 | // Get number of CFG targets. |
duke@435 | 3227 | // Note that PCTables include exception targets after calls. |
never@562 | 3228 | uint required_outcnt = n->required_outcnt(); |
never@562 | 3229 | if (n->outcnt() != required_outcnt) { |
duke@435 | 3230 | // Check for a few special cases. Rethrow Nodes never take the |
duke@435 | 3231 | // 'fall-thru' path, so expected kids is 1 less. |
duke@435 | 3232 | if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) { |
duke@435 | 3233 | if (n->in(0)->in(0)->is_Call()) { |
duke@435 | 3234 | CallNode *call = n->in(0)->in(0)->as_Call(); |
duke@435 | 3235 | if (call->entry_point() == OptoRuntime::rethrow_stub()) { |
never@562 | 3236 | required_outcnt--; // Rethrow always has 1 less kid |
duke@435 | 3237 | } else if (call->req() > TypeFunc::Parms && |
duke@435 | 3238 | call->is_CallDynamicJava()) { |
duke@435 | 3239 | // Check for null receiver. In such case, the optimizer has |
duke@435 | 3240 | // detected that the virtual call will always result in a null |
duke@435 | 3241 | // pointer exception. The fall-through projection of this CatchNode |
duke@435 | 3242 | // will not be populated. |
duke@435 | 3243 | Node *arg0 = call->in(TypeFunc::Parms); |
duke@435 | 3244 | if (arg0->is_Type() && |
duke@435 | 3245 | arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { |
never@562 | 3246 | required_outcnt--; |
duke@435 | 3247 | } |
duke@435 | 3248 | } else if (call->entry_point() == OptoRuntime::new_array_Java() && |
duke@435 | 3249 | call->req() > TypeFunc::Parms+1 && |
duke@435 | 3250 | call->is_CallStaticJava()) { |
duke@435 | 3251 | // Check for negative array length. In such case, the optimizer has |
duke@435 | 3252 | // detected that the allocation attempt will always result in an |
duke@435 | 3253 | // exception. There is no fall-through projection of this CatchNode . |
duke@435 | 3254 | Node *arg1 = call->in(TypeFunc::Parms+1); |
duke@435 | 3255 | if (arg1->is_Type() && |
duke@435 | 3256 | arg1->as_Type()->type()->join(TypeInt::POS)->empty()) { |
never@562 | 3257 | required_outcnt--; |
duke@435 | 3258 | } |
duke@435 | 3259 | } |
duke@435 | 3260 | } |
duke@435 | 3261 | } |
never@562 | 3262 | // Recheck with a better notion of 'required_outcnt' |
never@562 | 3263 | if (n->outcnt() != required_outcnt) { |
duke@435 | 3264 | record_method_not_compilable("malformed control flow"); |
duke@435 | 3265 | return true; // Not all targets reachable! |
duke@435 | 3266 | } |
duke@435 | 3267 | } |
duke@435 | 3268 | // Check that I actually visited all kids. Unreached kids |
duke@435 | 3269 | // must be infinite loops. |
duke@435 | 3270 | for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) |
kvn@1294 | 3271 | if (!frc._visited.test(n->fast_out(j)->_idx)) { |
duke@435 | 3272 | record_method_not_compilable("infinite loop"); |
duke@435 | 3273 | return true; // Found unvisited kid; must be unreach |
duke@435 | 3274 | } |
duke@435 | 3275 | } |
duke@435 | 3276 | |
duke@435 | 3277 | // If original bytecodes contained a mixture of floats and doubles |
duke@435 | 3278 | // check if the optimizer has made it homogenous, item (3). |
never@1364 | 3279 | if( Use24BitFPMode && Use24BitFP && UseSSE == 0 && |
kvn@1294 | 3280 | frc.get_float_count() > 32 && |
kvn@1294 | 3281 | frc.get_double_count() == 0 && |
kvn@1294 | 3282 | (10 * frc.get_call_count() < frc.get_float_count()) ) { |
duke@435 | 3283 | set_24_bit_selection_and_mode( false, true ); |
duke@435 | 3284 | } |
duke@435 | 3285 | |
kvn@1294 | 3286 | set_java_calls(frc.get_java_call_count()); |
kvn@1294 | 3287 | set_inner_loops(frc.get_inner_loop_count()); |
duke@435 | 3288 | |
duke@435 | 3289 | // No infinite loops, no reason to bail out. |
duke@435 | 3290 | return false; |
duke@435 | 3291 | } |
duke@435 | 3292 | |
duke@435 | 3293 | //-----------------------------too_many_traps---------------------------------- |
duke@435 | 3294 | // Report if there are too many traps at the current method and bci. |
duke@435 | 3295 | // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded. |
duke@435 | 3296 | bool Compile::too_many_traps(ciMethod* method, |
duke@435 | 3297 | int bci, |
duke@435 | 3298 | Deoptimization::DeoptReason reason) { |
duke@435 | 3299 | ciMethodData* md = method->method_data(); |
duke@435 | 3300 | if (md->is_empty()) { |
duke@435 | 3301 | // Assume the trap has not occurred, or that it occurred only |
duke@435 | 3302 | // because of a transient condition during start-up in the interpreter. |
duke@435 | 3303 | return false; |
duke@435 | 3304 | } |
roland@6377 | 3305 | ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL; |
roland@6377 | 3306 | if (md->has_trap_at(bci, m, reason) != 0) { |
duke@435 | 3307 | // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic. |
duke@435 | 3308 | // Also, if there are multiple reasons, or if there is no per-BCI record, |
duke@435 | 3309 | // assume the worst. |
duke@435 | 3310 | if (log()) |
duke@435 | 3311 | log()->elem("observe trap='%s' count='%d'", |
duke@435 | 3312 | Deoptimization::trap_reason_name(reason), |
duke@435 | 3313 | md->trap_count(reason)); |
duke@435 | 3314 | return true; |
duke@435 | 3315 | } else { |
duke@435 | 3316 | // Ignore method/bci and see if there have been too many globally. |
duke@435 | 3317 | return too_many_traps(reason, md); |
duke@435 | 3318 | } |
duke@435 | 3319 | } |
duke@435 | 3320 | |
duke@435 | 3321 | // Less-accurate variant which does not require a method and bci. |
duke@435 | 3322 | bool Compile::too_many_traps(Deoptimization::DeoptReason reason, |
duke@435 | 3323 | ciMethodData* logmd) { |
roland@6377 | 3324 | if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) { |
duke@435 | 3325 | // Too many traps globally. |
duke@435 | 3326 | // Note that we use cumulative trap_count, not just md->trap_count. |
duke@435 | 3327 | if (log()) { |
duke@435 | 3328 | int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason); |
duke@435 | 3329 | log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'", |
duke@435 | 3330 | Deoptimization::trap_reason_name(reason), |
duke@435 | 3331 | mcount, trap_count(reason)); |
duke@435 | 3332 | } |
duke@435 | 3333 | return true; |
duke@435 | 3334 | } else { |
duke@435 | 3335 | // The coast is clear. |
duke@435 | 3336 | return false; |
duke@435 | 3337 | } |
duke@435 | 3338 | } |
duke@435 | 3339 | |
duke@435 | 3340 | //--------------------------too_many_recompiles-------------------------------- |
duke@435 | 3341 | // Report if there are too many recompiles at the current method and bci. |
duke@435 | 3342 | // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff. |
duke@435 | 3343 | // Is not eager to return true, since this will cause the compiler to use |
duke@435 | 3344 | // Action_none for a trap point, to avoid too many recompilations. |
duke@435 | 3345 | bool Compile::too_many_recompiles(ciMethod* method, |
duke@435 | 3346 | int bci, |
duke@435 | 3347 | Deoptimization::DeoptReason reason) { |
duke@435 | 3348 | ciMethodData* md = method->method_data(); |
duke@435 | 3349 | if (md->is_empty()) { |
duke@435 | 3350 | // Assume the trap has not occurred, or that it occurred only |
duke@435 | 3351 | // because of a transient condition during start-up in the interpreter. |
duke@435 | 3352 | return false; |
duke@435 | 3353 | } |
duke@435 | 3354 | // Pick a cutoff point well within PerBytecodeRecompilationCutoff. |
duke@435 | 3355 | uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8; |
duke@435 | 3356 | uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero |
duke@435 | 3357 | Deoptimization::DeoptReason per_bc_reason |
duke@435 | 3358 | = Deoptimization::reason_recorded_per_bytecode_if_any(reason); |
roland@6377 | 3359 | ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL; |
duke@435 | 3360 | if ((per_bc_reason == Deoptimization::Reason_none |
roland@6377 | 3361 | || md->has_trap_at(bci, m, reason) != 0) |
duke@435 | 3362 | // The trap frequency measure we care about is the recompile count: |
roland@6377 | 3363 | && md->trap_recompiled_at(bci, m) |
duke@435 | 3364 | && md->overflow_recompile_count() >= bc_cutoff) { |
duke@435 | 3365 | // Do not emit a trap here if it has already caused recompilations. |
duke@435 | 3366 | // Also, if there are multiple reasons, or if there is no per-BCI record, |
duke@435 | 3367 | // assume the worst. |
duke@435 | 3368 | if (log()) |
duke@435 | 3369 | log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'", |
duke@435 | 3370 | Deoptimization::trap_reason_name(reason), |
duke@435 | 3371 | md->trap_count(reason), |
duke@435 | 3372 | md->overflow_recompile_count()); |
duke@435 | 3373 | return true; |
duke@435 | 3374 | } else if (trap_count(reason) != 0 |
duke@435 | 3375 | && decompile_count() >= m_cutoff) { |
duke@435 | 3376 | // Too many recompiles globally, and we have seen this sort of trap. |
duke@435 | 3377 | // Use cumulative decompile_count, not just md->decompile_count. |
duke@435 | 3378 | if (log()) |
duke@435 | 3379 | log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'", |
duke@435 | 3380 | Deoptimization::trap_reason_name(reason), |
duke@435 | 3381 | md->trap_count(reason), trap_count(reason), |
duke@435 | 3382 | md->decompile_count(), decompile_count()); |
duke@435 | 3383 | return true; |
duke@435 | 3384 | } else { |
duke@435 | 3385 | // The coast is clear. |
duke@435 | 3386 | return false; |
duke@435 | 3387 | } |
duke@435 | 3388 | } |
duke@435 | 3389 | |
goetz@6490 | 3390 | // Compute when not to trap. Used by matching trap based nodes and |
goetz@6490 | 3391 | // NullCheck optimization. |
goetz@6490 | 3392 | void Compile::set_allowed_deopt_reasons() { |
goetz@6490 | 3393 | _allowed_reasons = 0; |
goetz@6490 | 3394 | if (is_method_compilation()) { |
goetz@6490 | 3395 | for (int rs = (int)Deoptimization::Reason_none+1; rs < Compile::trapHistLength; rs++) { |
goetz@6490 | 3396 | assert(rs < BitsPerInt, "recode bit map"); |
goetz@6490 | 3397 | if (!too_many_traps((Deoptimization::DeoptReason) rs)) { |
goetz@6490 | 3398 | _allowed_reasons |= nth_bit(rs); |
goetz@6490 | 3399 | } |
goetz@6490 | 3400 | } |
goetz@6490 | 3401 | } |
goetz@6490 | 3402 | } |
duke@435 | 3403 | |
duke@435 | 3404 | #ifndef PRODUCT |
duke@435 | 3405 | //------------------------------verify_graph_edges--------------------------- |
duke@435 | 3406 | // Walk the Graph and verify that there is a one-to-one correspondence |
duke@435 | 3407 | // between Use-Def edges and Def-Use edges in the graph. |
duke@435 | 3408 | void Compile::verify_graph_edges(bool no_dead_code) { |
duke@435 | 3409 | if (VerifyGraphEdges) { |
duke@435 | 3410 | ResourceArea *area = Thread::current()->resource_area(); |
duke@435 | 3411 | Unique_Node_List visited(area); |
duke@435 | 3412 | // Call recursive graph walk to check edges |
duke@435 | 3413 | _root->verify_edges(visited); |
duke@435 | 3414 | if (no_dead_code) { |
duke@435 | 3415 | // Now make sure that no visited node is used by an unvisited node. |
duke@435 | 3416 | bool dead_nodes = 0; |
duke@435 | 3417 | Unique_Node_List checked(area); |
duke@435 | 3418 | while (visited.size() > 0) { |
duke@435 | 3419 | Node* n = visited.pop(); |
duke@435 | 3420 | checked.push(n); |
duke@435 | 3421 | for (uint i = 0; i < n->outcnt(); i++) { |
duke@435 | 3422 | Node* use = n->raw_out(i); |
duke@435 | 3423 | if (checked.member(use)) continue; // already checked |
duke@435 | 3424 | if (visited.member(use)) continue; // already in the graph |
duke@435 | 3425 | if (use->is_Con()) continue; // a dead ConNode is OK |
duke@435 | 3426 | // At this point, we have found a dead node which is DU-reachable. |
duke@435 | 3427 | if (dead_nodes++ == 0) |
duke@435 | 3428 | tty->print_cr("*** Dead nodes reachable via DU edges:"); |
duke@435 | 3429 | use->dump(2); |
duke@435 | 3430 | tty->print_cr("---"); |
duke@435 | 3431 | checked.push(use); // No repeats; pretend it is now checked. |
duke@435 | 3432 | } |
duke@435 | 3433 | } |
duke@435 | 3434 | assert(dead_nodes == 0, "using nodes must be reachable from root"); |
duke@435 | 3435 | } |
duke@435 | 3436 | } |
duke@435 | 3437 | } |
iveresov@6070 | 3438 | |
iveresov@6070 | 3439 | // Verify GC barriers consistency |
iveresov@6070 | 3440 | // Currently supported: |
iveresov@6070 | 3441 | // - G1 pre-barriers (see GraphKit::g1_write_barrier_pre()) |
iveresov@6070 | 3442 | void Compile::verify_barriers() { |
iveresov@6070 | 3443 | if (UseG1GC) { |
iveresov@6070 | 3444 | // Verify G1 pre-barriers |
iveresov@6070 | 3445 | const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + PtrQueue::byte_offset_of_active()); |
iveresov@6070 | 3446 | |
iveresov@6070 | 3447 | ResourceArea *area = Thread::current()->resource_area(); |
iveresov@6070 | 3448 | Unique_Node_List visited(area); |
iveresov@6070 | 3449 | Node_List worklist(area); |
iveresov@6070 | 3450 | // We're going to walk control flow backwards starting from the Root |
iveresov@6070 | 3451 | worklist.push(_root); |
iveresov@6070 | 3452 | while (worklist.size() > 0) { |
iveresov@6070 | 3453 | Node* x = worklist.pop(); |
iveresov@6070 | 3454 | if (x == NULL || x == top()) continue; |
iveresov@6070 | 3455 | if (visited.member(x)) { |
iveresov@6070 | 3456 | continue; |
iveresov@6070 | 3457 | } else { |
iveresov@6070 | 3458 | visited.push(x); |
iveresov@6070 | 3459 | } |
iveresov@6070 | 3460 | |
iveresov@6070 | 3461 | if (x->is_Region()) { |
iveresov@6070 | 3462 | for (uint i = 1; i < x->req(); i++) { |
iveresov@6070 | 3463 | worklist.push(x->in(i)); |
iveresov@6070 | 3464 | } |
iveresov@6070 | 3465 | } else { |
iveresov@6070 | 3466 | worklist.push(x->in(0)); |
iveresov@6070 | 3467 | // We are looking for the pattern: |
iveresov@6070 | 3468 | // /->ThreadLocal |
iveresov@6070 | 3469 | // If->Bool->CmpI->LoadB->AddP->ConL(marking_offset) |
iveresov@6070 | 3470 | // \->ConI(0) |
iveresov@6070 | 3471 | // We want to verify that the If and the LoadB have the same control |
iveresov@6070 | 3472 | // See GraphKit::g1_write_barrier_pre() |
iveresov@6070 | 3473 | if (x->is_If()) { |
iveresov@6070 | 3474 | IfNode *iff = x->as_If(); |
iveresov@6070 | 3475 | if (iff->in(1)->is_Bool() && iff->in(1)->in(1)->is_Cmp()) { |
iveresov@6070 | 3476 | CmpNode *cmp = iff->in(1)->in(1)->as_Cmp(); |
iveresov@6070 | 3477 | if (cmp->Opcode() == Op_CmpI && cmp->in(2)->is_Con() && cmp->in(2)->bottom_type()->is_int()->get_con() == 0 |
iveresov@6070 | 3478 | && cmp->in(1)->is_Load()) { |
iveresov@6070 | 3479 | LoadNode* load = cmp->in(1)->as_Load(); |
iveresov@6070 | 3480 | if (load->Opcode() == Op_LoadB && load->in(2)->is_AddP() && load->in(2)->in(2)->Opcode() == Op_ThreadLocal |
iveresov@6070 | 3481 | && load->in(2)->in(3)->is_Con() |
iveresov@6070 | 3482 | && load->in(2)->in(3)->bottom_type()->is_intptr_t()->get_con() == marking_offset) { |
iveresov@6070 | 3483 | |
iveresov@6070 | 3484 | Node* if_ctrl = iff->in(0); |
iveresov@6070 | 3485 | Node* load_ctrl = load->in(0); |
iveresov@6070 | 3486 | |
iveresov@6070 | 3487 | if (if_ctrl != load_ctrl) { |
iveresov@6070 | 3488 | // Skip possible CProj->NeverBranch in infinite loops |
iveresov@6070 | 3489 | if ((if_ctrl->is_Proj() && if_ctrl->Opcode() == Op_CProj) |
iveresov@6070 | 3490 | && (if_ctrl->in(0)->is_MultiBranch() && if_ctrl->in(0)->Opcode() == Op_NeverBranch)) { |
iveresov@6070 | 3491 | if_ctrl = if_ctrl->in(0)->in(0); |
iveresov@6070 | 3492 | } |
iveresov@6070 | 3493 | } |
iveresov@6070 | 3494 | assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match"); |
iveresov@6070 | 3495 | } |
iveresov@6070 | 3496 | } |
iveresov@6070 | 3497 | } |
iveresov@6070 | 3498 | } |
iveresov@6070 | 3499 | } |
iveresov@6070 | 3500 | } |
iveresov@6070 | 3501 | } |
iveresov@6070 | 3502 | } |
iveresov@6070 | 3503 | |
duke@435 | 3504 | #endif |
duke@435 | 3505 | |
duke@435 | 3506 | // The Compile object keeps track of failure reasons separately from the ciEnv. |
duke@435 | 3507 | // This is required because there is not quite a 1-1 relation between the |
duke@435 | 3508 | // ciEnv and its compilation task and the Compile object. Note that one |
duke@435 | 3509 | // ciEnv might use two Compile objects, if C2Compiler::compile_method decides |
duke@435 | 3510 | // to backtrack and retry without subsuming loads. Other than this backtracking |
duke@435 | 3511 | // behavior, the Compile's failure reason is quietly copied up to the ciEnv |
duke@435 | 3512 | // by the logic in C2Compiler. |
duke@435 | 3513 | void Compile::record_failure(const char* reason) { |
duke@435 | 3514 | if (log() != NULL) { |
duke@435 | 3515 | log()->elem("failure reason='%s' phase='compile'", reason); |
duke@435 | 3516 | } |
duke@435 | 3517 | if (_failure_reason == NULL) { |
duke@435 | 3518 | // Record the first failure reason. |
duke@435 | 3519 | _failure_reason = reason; |
duke@435 | 3520 | } |
sla@5237 | 3521 | |
sla@5237 | 3522 | EventCompilerFailure event; |
sla@5237 | 3523 | if (event.should_commit()) { |
sla@5237 | 3524 | event.set_compileID(Compile::compile_id()); |
sla@5237 | 3525 | event.set_failure(reason); |
sla@5237 | 3526 | event.commit(); |
sla@5237 | 3527 | } |
sla@5237 | 3528 | |
never@657 | 3529 | if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { |
sla@5237 | 3530 | C->print_method(PHASE_FAILURE); |
never@657 | 3531 | } |
duke@435 | 3532 | _root = NULL; // flush the graph, too |
duke@435 | 3533 | } |
duke@435 | 3534 | |
duke@435 | 3535 | Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog) |
bharadwaj@4315 | 3536 | : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false), |
bharadwaj@4315 | 3537 | _phase_name(name), _dolog(dolog) |
duke@435 | 3538 | { |
duke@435 | 3539 | if (dolog) { |
duke@435 | 3540 | C = Compile::current(); |
duke@435 | 3541 | _log = C->log(); |
duke@435 | 3542 | } else { |
duke@435 | 3543 | C = NULL; |
duke@435 | 3544 | _log = NULL; |
duke@435 | 3545 | } |
duke@435 | 3546 | if (_log != NULL) { |
bharadwaj@4315 | 3547 | _log->begin_head("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes()); |
duke@435 | 3548 | _log->stamp(); |
duke@435 | 3549 | _log->end_head(); |
duke@435 | 3550 | } |
duke@435 | 3551 | } |
duke@435 | 3552 | |
duke@435 | 3553 | Compile::TracePhase::~TracePhase() { |
bharadwaj@4315 | 3554 | |
bharadwaj@4315 | 3555 | C = Compile::current(); |
bharadwaj@4315 | 3556 | if (_dolog) { |
bharadwaj@4315 | 3557 | _log = C->log(); |
bharadwaj@4315 | 3558 | } else { |
bharadwaj@4315 | 3559 | _log = NULL; |
bharadwaj@4315 | 3560 | } |
bharadwaj@4315 | 3561 | |
bharadwaj@4315 | 3562 | #ifdef ASSERT |
bharadwaj@4315 | 3563 | if (PrintIdealNodeCount) { |
bharadwaj@4315 | 3564 | tty->print_cr("phase name='%s' nodes='%d' live='%d' live_graph_walk='%d'", |
bharadwaj@4315 | 3565 | _phase_name, C->unique(), C->live_nodes(), C->count_live_nodes_by_graph_walk()); |
bharadwaj@4315 | 3566 | } |
bharadwaj@4315 | 3567 | |
bharadwaj@4315 | 3568 | if (VerifyIdealNodeCount) { |
bharadwaj@4315 | 3569 | Compile::current()->print_missing_nodes(); |
bharadwaj@4315 | 3570 | } |
bharadwaj@4315 | 3571 | #endif |
bharadwaj@4315 | 3572 | |
duke@435 | 3573 | if (_log != NULL) { |
bharadwaj@4315 | 3574 | _log->done("phase name='%s' nodes='%d' live='%d'", _phase_name, C->unique(), C->live_nodes()); |
duke@435 | 3575 | } |
duke@435 | 3576 | } |
twisti@2350 | 3577 | |
twisti@2350 | 3578 | //============================================================================= |
twisti@2350 | 3579 | // Two Constant's are equal when the type and the value are equal. |
twisti@2350 | 3580 | bool Compile::Constant::operator==(const Constant& other) { |
twisti@2350 | 3581 | if (type() != other.type() ) return false; |
twisti@2350 | 3582 | if (can_be_reused() != other.can_be_reused()) return false; |
twisti@2350 | 3583 | // For floating point values we compare the bit pattern. |
twisti@2350 | 3584 | switch (type()) { |
coleenp@4037 | 3585 | case T_FLOAT: return (_v._value.i == other._v._value.i); |
twisti@2350 | 3586 | case T_LONG: |
coleenp@4037 | 3587 | case T_DOUBLE: return (_v._value.j == other._v._value.j); |
twisti@2350 | 3588 | case T_OBJECT: |
coleenp@4037 | 3589 | case T_ADDRESS: return (_v._value.l == other._v._value.l); |
coleenp@4037 | 3590 | case T_VOID: return (_v._value.l == other._v._value.l); // jump-table entries |
kvn@4199 | 3591 | case T_METADATA: return (_v._metadata == other._v._metadata); |
twisti@2350 | 3592 | default: ShouldNotReachHere(); |
twisti@2350 | 3593 | } |
twisti@2350 | 3594 | return false; |
twisti@2350 | 3595 | } |
twisti@2350 | 3596 | |
twisti@2350 | 3597 | static int type_to_size_in_bytes(BasicType t) { |
twisti@2350 | 3598 | switch (t) { |
twisti@2350 | 3599 | case T_LONG: return sizeof(jlong ); |
twisti@2350 | 3600 | case T_FLOAT: return sizeof(jfloat ); |
twisti@2350 | 3601 | case T_DOUBLE: return sizeof(jdouble); |
coleenp@4037 | 3602 | case T_METADATA: return sizeof(Metadata*); |
twisti@2350 | 3603 | // We use T_VOID as marker for jump-table entries (labels) which |
twisti@3310 | 3604 | // need an internal word relocation. |
twisti@2350 | 3605 | case T_VOID: |
twisti@2350 | 3606 | case T_ADDRESS: |
twisti@2350 | 3607 | case T_OBJECT: return sizeof(jobject); |
twisti@2350 | 3608 | } |
twisti@2350 | 3609 | |
twisti@2350 | 3610 | ShouldNotReachHere(); |
twisti@2350 | 3611 | return -1; |
twisti@2350 | 3612 | } |
twisti@2350 | 3613 | |
twisti@3310 | 3614 | int Compile::ConstantTable::qsort_comparator(Constant* a, Constant* b) { |
twisti@3310 | 3615 | // sort descending |
twisti@3310 | 3616 | if (a->freq() > b->freq()) return -1; |
twisti@3310 | 3617 | if (a->freq() < b->freq()) return 1; |
twisti@3310 | 3618 | return 0; |
twisti@3310 | 3619 | } |
twisti@3310 | 3620 | |
twisti@2350 | 3621 | void Compile::ConstantTable::calculate_offsets_and_size() { |
twisti@3310 | 3622 | // First, sort the array by frequencies. |
twisti@3310 | 3623 | _constants.sort(qsort_comparator); |
twisti@3310 | 3624 | |
twisti@3310 | 3625 | #ifdef ASSERT |
twisti@3310 | 3626 | // Make sure all jump-table entries were sorted to the end of the |
twisti@3310 | 3627 | // array (they have a negative frequency). |
twisti@3310 | 3628 | bool found_void = false; |
twisti@3310 | 3629 | for (int i = 0; i < _constants.length(); i++) { |
twisti@3310 | 3630 | Constant con = _constants.at(i); |
twisti@3310 | 3631 | if (con.type() == T_VOID) |
twisti@3310 | 3632 | found_void = true; // jump-tables |
twisti@3310 | 3633 | else |
twisti@3310 | 3634 | assert(!found_void, "wrong sorting"); |
twisti@3310 | 3635 | } |
twisti@3310 | 3636 | #endif |
twisti@3310 | 3637 | |
twisti@3310 | 3638 | int offset = 0; |
twisti@3310 | 3639 | for (int i = 0; i < _constants.length(); i++) { |
twisti@3310 | 3640 | Constant* con = _constants.adr_at(i); |
twisti@3310 | 3641 | |
twisti@3310 | 3642 | // Align offset for type. |
twisti@3310 | 3643 | int typesize = type_to_size_in_bytes(con->type()); |
twisti@3310 | 3644 | offset = align_size_up(offset, typesize); |
twisti@3310 | 3645 | con->set_offset(offset); // set constant's offset |
twisti@3310 | 3646 | |
twisti@3310 | 3647 | if (con->type() == T_VOID) { |
twisti@3310 | 3648 | MachConstantNode* n = (MachConstantNode*) con->get_jobject(); |
twisti@3310 | 3649 | offset = offset + typesize * n->outcnt(); // expand jump-table |
twisti@3310 | 3650 | } else { |
twisti@3310 | 3651 | offset = offset + typesize; |
twisti@2350 | 3652 | } |
twisti@2350 | 3653 | } |
twisti@2350 | 3654 | |
twisti@2350 | 3655 | // Align size up to the next section start (which is insts; see |
twisti@2350 | 3656 | // CodeBuffer::align_at_start). |
twisti@2350 | 3657 | assert(_size == -1, "already set?"); |
twisti@3310 | 3658 | _size = align_size_up(offset, CodeEntryAlignment); |
twisti@2350 | 3659 | } |
twisti@2350 | 3660 | |
twisti@2350 | 3661 | void Compile::ConstantTable::emit(CodeBuffer& cb) { |
twisti@2350 | 3662 | MacroAssembler _masm(&cb); |
twisti@3310 | 3663 | for (int i = 0; i < _constants.length(); i++) { |
twisti@3310 | 3664 | Constant con = _constants.at(i); |
twisti@3310 | 3665 | address constant_addr; |
twisti@3310 | 3666 | switch (con.type()) { |
twisti@3310 | 3667 | case T_LONG: constant_addr = _masm.long_constant( con.get_jlong() ); break; |
twisti@3310 | 3668 | case T_FLOAT: constant_addr = _masm.float_constant( con.get_jfloat() ); break; |
twisti@3310 | 3669 | case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break; |
twisti@3310 | 3670 | case T_OBJECT: { |
twisti@3310 | 3671 | jobject obj = con.get_jobject(); |
twisti@3310 | 3672 | int oop_index = _masm.oop_recorder()->find_index(obj); |
twisti@3310 | 3673 | constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index)); |
twisti@3310 | 3674 | break; |
twisti@3310 | 3675 | } |
twisti@3310 | 3676 | case T_ADDRESS: { |
twisti@3310 | 3677 | address addr = (address) con.get_jobject(); |
twisti@3310 | 3678 | constant_addr = _masm.address_constant(addr); |
twisti@3310 | 3679 | break; |
twisti@3310 | 3680 | } |
twisti@3310 | 3681 | // We use T_VOID as marker for jump-table entries (labels) which |
twisti@3310 | 3682 | // need an internal word relocation. |
twisti@3310 | 3683 | case T_VOID: { |
twisti@3310 | 3684 | MachConstantNode* n = (MachConstantNode*) con.get_jobject(); |
twisti@3310 | 3685 | // Fill the jump-table with a dummy word. The real value is |
twisti@3310 | 3686 | // filled in later in fill_jump_table. |
twisti@3310 | 3687 | address dummy = (address) n; |
twisti@3310 | 3688 | constant_addr = _masm.address_constant(dummy); |
twisti@3310 | 3689 | // Expand jump-table |
twisti@3310 | 3690 | for (uint i = 1; i < n->outcnt(); i++) { |
twisti@3310 | 3691 | address temp_addr = _masm.address_constant(dummy + i); |
twisti@3310 | 3692 | assert(temp_addr, "consts section too small"); |
twisti@2350 | 3693 | } |
twisti@3310 | 3694 | break; |
twisti@2350 | 3695 | } |
coleenp@4037 | 3696 | case T_METADATA: { |
coleenp@4037 | 3697 | Metadata* obj = con.get_metadata(); |
coleenp@4037 | 3698 | int metadata_index = _masm.oop_recorder()->find_index(obj); |
coleenp@4037 | 3699 | constant_addr = _masm.address_constant((address) obj, metadata_Relocation::spec(metadata_index)); |
coleenp@4037 | 3700 | break; |
coleenp@4037 | 3701 | } |
twisti@3310 | 3702 | default: ShouldNotReachHere(); |
twisti@3310 | 3703 | } |
twisti@3310 | 3704 | assert(constant_addr, "consts section too small"); |
drchase@6680 | 3705 | assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), |
drchase@6680 | 3706 | err_msg_res("must be: %d == %d", (int) (constant_addr - _masm.code()->consts()->start()), (int)(con.offset()))); |
twisti@2350 | 3707 | } |
twisti@2350 | 3708 | } |
twisti@2350 | 3709 | |
twisti@2350 | 3710 | int Compile::ConstantTable::find_offset(Constant& con) const { |
twisti@2350 | 3711 | int idx = _constants.find(con); |
twisti@2350 | 3712 | assert(idx != -1, "constant must be in constant table"); |
twisti@2350 | 3713 | int offset = _constants.at(idx).offset(); |
twisti@2350 | 3714 | assert(offset != -1, "constant table not emitted yet?"); |
twisti@2350 | 3715 | return offset; |
twisti@2350 | 3716 | } |
twisti@2350 | 3717 | |
twisti@2350 | 3718 | void Compile::ConstantTable::add(Constant& con) { |
twisti@2350 | 3719 | if (con.can_be_reused()) { |
twisti@2350 | 3720 | int idx = _constants.find(con); |
twisti@2350 | 3721 | if (idx != -1 && _constants.at(idx).can_be_reused()) { |
twisti@3310 | 3722 | _constants.adr_at(idx)->inc_freq(con.freq()); // increase the frequency by the current value |
twisti@2350 | 3723 | return; |
twisti@2350 | 3724 | } |
twisti@2350 | 3725 | } |
twisti@2350 | 3726 | (void) _constants.append(con); |
twisti@2350 | 3727 | } |
twisti@2350 | 3728 | |
twisti@3310 | 3729 | Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) { |
adlertz@5509 | 3730 | Block* b = Compile::current()->cfg()->get_block_for_node(n); |
twisti@3310 | 3731 | Constant con(type, value, b->_freq); |
twisti@2350 | 3732 | add(con); |
twisti@2350 | 3733 | return con; |
twisti@2350 | 3734 | } |
twisti@2350 | 3735 | |
coleenp@4037 | 3736 | Compile::Constant Compile::ConstantTable::add(Metadata* metadata) { |
coleenp@4037 | 3737 | Constant con(metadata); |
coleenp@4037 | 3738 | add(con); |
coleenp@4037 | 3739 | return con; |
coleenp@4037 | 3740 | } |
coleenp@4037 | 3741 | |
twisti@3310 | 3742 | Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, MachOper* oper) { |
twisti@2350 | 3743 | jvalue value; |
twisti@2350 | 3744 | BasicType type = oper->type()->basic_type(); |
twisti@2350 | 3745 | switch (type) { |
twisti@2350 | 3746 | case T_LONG: value.j = oper->constantL(); break; |
twisti@2350 | 3747 | case T_FLOAT: value.f = oper->constantF(); break; |
twisti@2350 | 3748 | case T_DOUBLE: value.d = oper->constantD(); break; |
twisti@2350 | 3749 | case T_OBJECT: |
twisti@2350 | 3750 | case T_ADDRESS: value.l = (jobject) oper->constant(); break; |
coleenp@4037 | 3751 | case T_METADATA: return add((Metadata*)oper->constant()); break; |
coleenp@4037 | 3752 | default: guarantee(false, err_msg_res("unhandled type: %s", type2name(type))); |
twisti@2350 | 3753 | } |
twisti@3310 | 3754 | return add(n, type, value); |
twisti@2350 | 3755 | } |
twisti@2350 | 3756 | |
twisti@3310 | 3757 | Compile::Constant Compile::ConstantTable::add_jump_table(MachConstantNode* n) { |
twisti@2350 | 3758 | jvalue value; |
twisti@2350 | 3759 | // We can use the node pointer here to identify the right jump-table |
twisti@2350 | 3760 | // as this method is called from Compile::Fill_buffer right before |
twisti@2350 | 3761 | // the MachNodes are emitted and the jump-table is filled (means the |
twisti@2350 | 3762 | // MachNode pointers do not change anymore). |
twisti@2350 | 3763 | value.l = (jobject) n; |
twisti@3310 | 3764 | Constant con(T_VOID, value, next_jump_table_freq(), false); // Labels of a jump-table cannot be reused. |
twisti@3310 | 3765 | add(con); |
twisti@2350 | 3766 | return con; |
twisti@2350 | 3767 | } |
twisti@2350 | 3768 | |
twisti@2350 | 3769 | void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const { |
twisti@2350 | 3770 | // If called from Compile::scratch_emit_size do nothing. |
twisti@2350 | 3771 | if (Compile::current()->in_scratch_emit_size()) return; |
twisti@2350 | 3772 | |
twisti@2350 | 3773 | assert(labels.is_nonempty(), "must be"); |
kvn@3971 | 3774 | assert((uint) labels.length() == n->outcnt(), err_msg_res("must be equal: %d == %d", labels.length(), n->outcnt())); |
twisti@2350 | 3775 | |
twisti@2350 | 3776 | // Since MachConstantNode::constant_offset() also contains |
twisti@2350 | 3777 | // table_base_offset() we need to subtract the table_base_offset() |
twisti@2350 | 3778 | // to get the plain offset into the constant table. |
twisti@2350 | 3779 | int offset = n->constant_offset() - table_base_offset(); |
twisti@2350 | 3780 | |
twisti@2350 | 3781 | MacroAssembler _masm(&cb); |
twisti@2350 | 3782 | address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset); |
twisti@2350 | 3783 | |
twisti@3310 | 3784 | for (uint i = 0; i < n->outcnt(); i++) { |
twisti@2350 | 3785 | address* constant_addr = &jump_table_base[i]; |
drchase@6680 | 3786 | assert(*constant_addr == (((address) n) + i), err_msg_res("all jump-table entries must contain adjusted node pointer: " INTPTR_FORMAT " == " INTPTR_FORMAT, p2i(*constant_addr), p2i(((address) n) + i))); |
twisti@2350 | 3787 | *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr); |
twisti@2350 | 3788 | cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type); |
twisti@2350 | 3789 | } |
twisti@2350 | 3790 | } |
roland@4357 | 3791 | |
roland@4357 | 3792 | void Compile::dump_inlining() { |
kvn@5763 | 3793 | if (print_inlining() || print_intrinsics()) { |
roland@4409 | 3794 | // Print inlining message for candidates that we couldn't inline |
roland@4409 | 3795 | // for lack of space or non constant receiver |
roland@4409 | 3796 | for (int i = 0; i < _late_inlines.length(); i++) { |
roland@4409 | 3797 | CallGenerator* cg = _late_inlines.at(i); |
roland@4409 | 3798 | cg->print_inlining_late("live nodes > LiveNodeCountInliningCutoff"); |
roland@4409 | 3799 | } |
roland@4409 | 3800 | Unique_Node_List useful; |
roland@4409 | 3801 | useful.push(root()); |
roland@4409 | 3802 | for (uint next = 0; next < useful.size(); ++next) { |
roland@4409 | 3803 | Node* n = useful.at(next); |
roland@4409 | 3804 | if (n->is_Call() && n->as_Call()->generator() != NULL && n->as_Call()->generator()->call_node() == n) { |
roland@4409 | 3805 | CallNode* call = n->as_Call(); |
roland@4409 | 3806 | CallGenerator* cg = call->generator(); |
roland@4409 | 3807 | cg->print_inlining_late("receiver not constant"); |
roland@4409 | 3808 | } |
roland@4409 | 3809 | uint max = n->len(); |
roland@4409 | 3810 | for ( uint i = 0; i < max; ++i ) { |
roland@4409 | 3811 | Node *m = n->in(i); |
roland@4409 | 3812 | if ( m == NULL ) continue; |
roland@4409 | 3813 | useful.push(m); |
roland@4409 | 3814 | } |
roland@4409 | 3815 | } |
roland@4357 | 3816 | for (int i = 0; i < _print_inlining_list->length(); i++) { |
drchase@6680 | 3817 | tty->print("%s", _print_inlining_list->adr_at(i)->ss()->as_string()); |
roland@4357 | 3818 | } |
roland@4357 | 3819 | } |
roland@4357 | 3820 | } |
roland@4589 | 3821 | |
kvn@6217 | 3822 | // Dump inlining replay data to the stream. |
kvn@6217 | 3823 | // Don't change thread state and acquire any locks. |
kvn@6217 | 3824 | void Compile::dump_inline_data(outputStream* out) { |
kvn@6217 | 3825 | InlineTree* inl_tree = ilt(); |
kvn@6217 | 3826 | if (inl_tree != NULL) { |
kvn@6217 | 3827 | out->print(" inline %d", inl_tree->count()); |
kvn@6217 | 3828 | inl_tree->dump_replay_data(out); |
kvn@6217 | 3829 | } |
kvn@6217 | 3830 | } |
kvn@6217 | 3831 | |
roland@4589 | 3832 | int Compile::cmp_expensive_nodes(Node* n1, Node* n2) { |
roland@4589 | 3833 | if (n1->Opcode() < n2->Opcode()) return -1; |
roland@4589 | 3834 | else if (n1->Opcode() > n2->Opcode()) return 1; |
roland@4589 | 3835 | |
roland@4589 | 3836 | assert(n1->req() == n2->req(), err_msg_res("can't compare %s nodes: n1->req() = %d, n2->req() = %d", NodeClassNames[n1->Opcode()], n1->req(), n2->req())); |
roland@4589 | 3837 | for (uint i = 1; i < n1->req(); i++) { |
roland@4589 | 3838 | if (n1->in(i) < n2->in(i)) return -1; |
roland@4589 | 3839 | else if (n1->in(i) > n2->in(i)) return 1; |
roland@4589 | 3840 | } |
roland@4589 | 3841 | |
roland@4589 | 3842 | return 0; |
roland@4589 | 3843 | } |
roland@4589 | 3844 | |
roland@4589 | 3845 | int Compile::cmp_expensive_nodes(Node** n1p, Node** n2p) { |
roland@4589 | 3846 | Node* n1 = *n1p; |
roland@4589 | 3847 | Node* n2 = *n2p; |
roland@4589 | 3848 | |
roland@4589 | 3849 | return cmp_expensive_nodes(n1, n2); |
roland@4589 | 3850 | } |
roland@4589 | 3851 | |
roland@4589 | 3852 | void Compile::sort_expensive_nodes() { |
roland@4589 | 3853 | if (!expensive_nodes_sorted()) { |
roland@4589 | 3854 | _expensive_nodes->sort(cmp_expensive_nodes); |
roland@4589 | 3855 | } |
roland@4589 | 3856 | } |
roland@4589 | 3857 | |
roland@4589 | 3858 | bool Compile::expensive_nodes_sorted() const { |
roland@4589 | 3859 | for (int i = 1; i < _expensive_nodes->length(); i++) { |
roland@4589 | 3860 | if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i-1)) < 0) { |
roland@4589 | 3861 | return false; |
roland@4589 | 3862 | } |
roland@4589 | 3863 | } |
roland@4589 | 3864 | return true; |
roland@4589 | 3865 | } |
roland@4589 | 3866 | |
roland@4589 | 3867 | bool Compile::should_optimize_expensive_nodes(PhaseIterGVN &igvn) { |
roland@4589 | 3868 | if (_expensive_nodes->length() == 0) { |
roland@4589 | 3869 | return false; |
roland@4589 | 3870 | } |
roland@4589 | 3871 | |
roland@4589 | 3872 | assert(OptimizeExpensiveOps, "optimization off?"); |
roland@4589 | 3873 | |
roland@4589 | 3874 | // Take this opportunity to remove dead nodes from the list |
roland@4589 | 3875 | int j = 0; |
roland@4589 | 3876 | for (int i = 0; i < _expensive_nodes->length(); i++) { |
roland@4589 | 3877 | Node* n = _expensive_nodes->at(i); |
roland@4589 | 3878 | if (!n->is_unreachable(igvn)) { |
roland@4589 | 3879 | assert(n->is_expensive(), "should be expensive"); |
roland@4589 | 3880 | _expensive_nodes->at_put(j, n); |
roland@4589 | 3881 | j++; |
roland@4589 | 3882 | } |
roland@4589 | 3883 | } |
roland@4589 | 3884 | _expensive_nodes->trunc_to(j); |
roland@4589 | 3885 | |
roland@4589 | 3886 | // Then sort the list so that similar nodes are next to each other |
roland@4589 | 3887 | // and check for at least two nodes of identical kind with same data |
roland@4589 | 3888 | // inputs. |
roland@4589 | 3889 | sort_expensive_nodes(); |
roland@4589 | 3890 | |
roland@4589 | 3891 | for (int i = 0; i < _expensive_nodes->length()-1; i++) { |
roland@4589 | 3892 | if (cmp_expensive_nodes(_expensive_nodes->adr_at(i), _expensive_nodes->adr_at(i+1)) == 0) { |
roland@4589 | 3893 | return true; |
roland@4589 | 3894 | } |
roland@4589 | 3895 | } |
roland@4589 | 3896 | |
roland@4589 | 3897 | return false; |
roland@4589 | 3898 | } |
roland@4589 | 3899 | |
roland@4589 | 3900 | void Compile::cleanup_expensive_nodes(PhaseIterGVN &igvn) { |
roland@4589 | 3901 | if (_expensive_nodes->length() == 0) { |
roland@4589 | 3902 | return; |
roland@4589 | 3903 | } |
roland@4589 | 3904 | |
roland@4589 | 3905 | assert(OptimizeExpensiveOps, "optimization off?"); |
roland@4589 | 3906 | |
roland@4589 | 3907 | // Sort to bring similar nodes next to each other and clear the |
roland@4589 | 3908 | // control input of nodes for which there's only a single copy. |
roland@4589 | 3909 | sort_expensive_nodes(); |
roland@4589 | 3910 | |
roland@4589 | 3911 | int j = 0; |
roland@4589 | 3912 | int identical = 0; |
roland@4589 | 3913 | int i = 0; |
roland@4589 | 3914 | for (; i < _expensive_nodes->length()-1; i++) { |
roland@4589 | 3915 | assert(j <= i, "can't write beyond current index"); |
roland@4589 | 3916 | if (_expensive_nodes->at(i)->Opcode() == _expensive_nodes->at(i+1)->Opcode()) { |
roland@4589 | 3917 | identical++; |
roland@4589 | 3918 | _expensive_nodes->at_put(j++, _expensive_nodes->at(i)); |
roland@4589 | 3919 | continue; |
roland@4589 | 3920 | } |
roland@4589 | 3921 | if (identical > 0) { |
roland@4589 | 3922 | _expensive_nodes->at_put(j++, _expensive_nodes->at(i)); |
roland@4589 | 3923 | identical = 0; |
roland@4589 | 3924 | } else { |
roland@4589 | 3925 | Node* n = _expensive_nodes->at(i); |
roland@4589 | 3926 | igvn.hash_delete(n); |
roland@4589 | 3927 | n->set_req(0, NULL); |
roland@4589 | 3928 | igvn.hash_insert(n); |
roland@4589 | 3929 | } |
roland@4589 | 3930 | } |
roland@4589 | 3931 | if (identical > 0) { |
roland@4589 | 3932 | _expensive_nodes->at_put(j++, _expensive_nodes->at(i)); |
roland@4589 | 3933 | } else if (_expensive_nodes->length() >= 1) { |
roland@4589 | 3934 | Node* n = _expensive_nodes->at(i); |
roland@4589 | 3935 | igvn.hash_delete(n); |
roland@4589 | 3936 | n->set_req(0, NULL); |
roland@4589 | 3937 | igvn.hash_insert(n); |
roland@4589 | 3938 | } |
roland@4589 | 3939 | _expensive_nodes->trunc_to(j); |
roland@4589 | 3940 | } |
roland@4589 | 3941 | |
roland@4589 | 3942 | void Compile::add_expensive_node(Node * n) { |
roland@4589 | 3943 | assert(!_expensive_nodes->contains(n), "duplicate entry in expensive list"); |
roland@4589 | 3944 | assert(n->is_expensive(), "expensive nodes with non-null control here only"); |
roland@4589 | 3945 | assert(!n->is_CFG() && !n->is_Mem(), "no cfg or memory nodes here"); |
roland@4589 | 3946 | if (OptimizeExpensiveOps) { |
roland@4589 | 3947 | _expensive_nodes->append(n); |
roland@4589 | 3948 | } else { |
roland@4589 | 3949 | // Clear control input and let IGVN optimize expensive nodes if |
roland@4589 | 3950 | // OptimizeExpensiveOps is off. |
roland@4589 | 3951 | n->set_req(0, NULL); |
roland@4589 | 3952 | } |
roland@4589 | 3953 | } |
shade@4691 | 3954 | |
roland@5991 | 3955 | /** |
roland@5991 | 3956 | * Remove the speculative part of types and clean up the graph |
roland@5991 | 3957 | */ |
roland@5991 | 3958 | void Compile::remove_speculative_types(PhaseIterGVN &igvn) { |
roland@5991 | 3959 | if (UseTypeSpeculation) { |
roland@5991 | 3960 | Unique_Node_List worklist; |
roland@5991 | 3961 | worklist.push(root()); |
roland@5991 | 3962 | int modified = 0; |
roland@5991 | 3963 | // Go over all type nodes that carry a speculative type, drop the |
roland@5991 | 3964 | // speculative part of the type and enqueue the node for an igvn |
roland@5991 | 3965 | // which may optimize it out. |
roland@5991 | 3966 | for (uint next = 0; next < worklist.size(); ++next) { |
roland@5991 | 3967 | Node *n = worklist.at(next); |
roland@6313 | 3968 | if (n->is_Type()) { |
roland@5991 | 3969 | TypeNode* tn = n->as_Type(); |
roland@6313 | 3970 | const Type* t = tn->type(); |
roland@6313 | 3971 | const Type* t_no_spec = t->remove_speculative(); |
roland@6313 | 3972 | if (t_no_spec != t) { |
roland@6313 | 3973 | bool in_hash = igvn.hash_delete(n); |
roland@6313 | 3974 | assert(in_hash, "node should be in igvn hash table"); |
roland@6313 | 3975 | tn->set_type(t_no_spec); |
roland@6313 | 3976 | igvn.hash_insert(n); |
roland@6313 | 3977 | igvn._worklist.push(n); // give it a chance to go away |
roland@6313 | 3978 | modified++; |
roland@6313 | 3979 | } |
roland@5991 | 3980 | } |
roland@5991 | 3981 | uint max = n->len(); |
roland@5991 | 3982 | for( uint i = 0; i < max; ++i ) { |
roland@5991 | 3983 | Node *m = n->in(i); |
roland@5991 | 3984 | if (not_a_node(m)) continue; |
roland@5991 | 3985 | worklist.push(m); |
roland@5991 | 3986 | } |
roland@5991 | 3987 | } |
roland@5991 | 3988 | // Drop the speculative part of all types in the igvn's type table |
roland@5991 | 3989 | igvn.remove_speculative_types(); |
roland@5991 | 3990 | if (modified > 0) { |
roland@5991 | 3991 | igvn.optimize(); |
roland@5991 | 3992 | } |
roland@6313 | 3993 | #ifdef ASSERT |
roland@6313 | 3994 | // Verify that after the IGVN is over no speculative type has resurfaced |
roland@6313 | 3995 | worklist.clear(); |
roland@6313 | 3996 | worklist.push(root()); |
roland@6313 | 3997 | for (uint next = 0; next < worklist.size(); ++next) { |
roland@6313 | 3998 | Node *n = worklist.at(next); |
anoll@6638 | 3999 | const Type* t = igvn.type_or_null(n); |
anoll@6638 | 4000 | assert((t == NULL) || (t == t->remove_speculative()), "no more speculative types"); |
roland@6313 | 4001 | if (n->is_Type()) { |
roland@6313 | 4002 | t = n->as_Type()->type(); |
roland@6313 | 4003 | assert(t == t->remove_speculative(), "no more speculative types"); |
roland@6313 | 4004 | } |
roland@6313 | 4005 | uint max = n->len(); |
roland@6313 | 4006 | for( uint i = 0; i < max; ++i ) { |
roland@6313 | 4007 | Node *m = n->in(i); |
roland@6313 | 4008 | if (not_a_node(m)) continue; |
roland@6313 | 4009 | worklist.push(m); |
roland@6313 | 4010 | } |
roland@6313 | 4011 | } |
roland@6313 | 4012 | igvn.check_no_speculative_types(); |
roland@6313 | 4013 | #endif |
roland@5991 | 4014 | } |
roland@5991 | 4015 | } |
roland@5991 | 4016 | |
shade@4691 | 4017 | // Auxiliary method to support randomized stressing/fuzzing. |
shade@4691 | 4018 | // |
shade@4691 | 4019 | // This method can be called the arbitrary number of times, with current count |
shade@4691 | 4020 | // as the argument. The logic allows selecting a single candidate from the |
shade@4691 | 4021 | // running list of candidates as follows: |
shade@4691 | 4022 | // int count = 0; |
shade@4691 | 4023 | // Cand* selected = null; |
shade@4691 | 4024 | // while(cand = cand->next()) { |
shade@4691 | 4025 | // if (randomized_select(++count)) { |
shade@4691 | 4026 | // selected = cand; |
shade@4691 | 4027 | // } |
shade@4691 | 4028 | // } |
shade@4691 | 4029 | // |
shade@4691 | 4030 | // Including count equalizes the chances any candidate is "selected". |
shade@4691 | 4031 | // This is useful when we don't have the complete list of candidates to choose |
shade@4691 | 4032 | // from uniformly. In this case, we need to adjust the randomicity of the |
shade@4691 | 4033 | // selection, or else we will end up biasing the selection towards the latter |
shade@4691 | 4034 | // candidates. |
shade@4691 | 4035 | // |
shade@4691 | 4036 | // Quick back-envelope calculation shows that for the list of n candidates |
shade@4691 | 4037 | // the equal probability for the candidate to persist as "best" can be |
shade@4691 | 4038 | // achieved by replacing it with "next" k-th candidate with the probability |
shade@4691 | 4039 | // of 1/k. It can be easily shown that by the end of the run, the |
shade@4691 | 4040 | // probability for any candidate is converged to 1/n, thus giving the |
shade@4691 | 4041 | // uniform distribution among all the candidates. |
shade@4691 | 4042 | // |
shade@4691 | 4043 | // We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large. |
shade@4691 | 4044 | #define RANDOMIZED_DOMAIN_POW 29 |
shade@4691 | 4045 | #define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW) |
shade@4691 | 4046 | #define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1) |
shade@4691 | 4047 | bool Compile::randomized_select(int count) { |
shade@4691 | 4048 | assert(count > 0, "only positive"); |
shade@4691 | 4049 | return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count); |
shade@4691 | 4050 | } |