Tue, 09 Mar 2010 20:16:19 +0100
6919934: JSR 292 needs to support x86 C1
Summary: This implements JSR 292 support for C1 x86.
Reviewed-by: never, jrose, kvn
duke@435 | 1 | /* |
twisti@1700 | 2 | * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_compile.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | /// Support for intrinsics. |
duke@435 | 29 | |
duke@435 | 30 | // Return the index at which m must be inserted (or already exists). |
duke@435 | 31 | // The sort order is by the address of the ciMethod, with is_virtual as minor key. |
duke@435 | 32 | int Compile::intrinsic_insertion_index(ciMethod* m, bool is_virtual) { |
duke@435 | 33 | #ifdef ASSERT |
duke@435 | 34 | for (int i = 1; i < _intrinsics->length(); i++) { |
duke@435 | 35 | CallGenerator* cg1 = _intrinsics->at(i-1); |
duke@435 | 36 | CallGenerator* cg2 = _intrinsics->at(i); |
duke@435 | 37 | assert(cg1->method() != cg2->method() |
duke@435 | 38 | ? cg1->method() < cg2->method() |
duke@435 | 39 | : cg1->is_virtual() < cg2->is_virtual(), |
duke@435 | 40 | "compiler intrinsics list must stay sorted"); |
duke@435 | 41 | } |
duke@435 | 42 | #endif |
duke@435 | 43 | // Binary search sorted list, in decreasing intervals [lo, hi]. |
duke@435 | 44 | int lo = 0, hi = _intrinsics->length()-1; |
duke@435 | 45 | while (lo <= hi) { |
duke@435 | 46 | int mid = (uint)(hi + lo) / 2; |
duke@435 | 47 | ciMethod* mid_m = _intrinsics->at(mid)->method(); |
duke@435 | 48 | if (m < mid_m) { |
duke@435 | 49 | hi = mid-1; |
duke@435 | 50 | } else if (m > mid_m) { |
duke@435 | 51 | lo = mid+1; |
duke@435 | 52 | } else { |
duke@435 | 53 | // look at minor sort key |
duke@435 | 54 | bool mid_virt = _intrinsics->at(mid)->is_virtual(); |
duke@435 | 55 | if (is_virtual < mid_virt) { |
duke@435 | 56 | hi = mid-1; |
duke@435 | 57 | } else if (is_virtual > mid_virt) { |
duke@435 | 58 | lo = mid+1; |
duke@435 | 59 | } else { |
duke@435 | 60 | return mid; // exact match |
duke@435 | 61 | } |
duke@435 | 62 | } |
duke@435 | 63 | } |
duke@435 | 64 | return lo; // inexact match |
duke@435 | 65 | } |
duke@435 | 66 | |
duke@435 | 67 | void Compile::register_intrinsic(CallGenerator* cg) { |
duke@435 | 68 | if (_intrinsics == NULL) { |
duke@435 | 69 | _intrinsics = new GrowableArray<CallGenerator*>(60); |
duke@435 | 70 | } |
duke@435 | 71 | // This code is stolen from ciObjectFactory::insert. |
duke@435 | 72 | // Really, GrowableArray should have methods for |
duke@435 | 73 | // insert_at, remove_at, and binary_search. |
duke@435 | 74 | int len = _intrinsics->length(); |
duke@435 | 75 | int index = intrinsic_insertion_index(cg->method(), cg->is_virtual()); |
duke@435 | 76 | if (index == len) { |
duke@435 | 77 | _intrinsics->append(cg); |
duke@435 | 78 | } else { |
duke@435 | 79 | #ifdef ASSERT |
duke@435 | 80 | CallGenerator* oldcg = _intrinsics->at(index); |
duke@435 | 81 | assert(oldcg->method() != cg->method() || oldcg->is_virtual() != cg->is_virtual(), "don't register twice"); |
duke@435 | 82 | #endif |
duke@435 | 83 | _intrinsics->append(_intrinsics->at(len-1)); |
duke@435 | 84 | int pos; |
duke@435 | 85 | for (pos = len-2; pos >= index; pos--) { |
duke@435 | 86 | _intrinsics->at_put(pos+1,_intrinsics->at(pos)); |
duke@435 | 87 | } |
duke@435 | 88 | _intrinsics->at_put(index, cg); |
duke@435 | 89 | } |
duke@435 | 90 | assert(find_intrinsic(cg->method(), cg->is_virtual()) == cg, "registration worked"); |
duke@435 | 91 | } |
duke@435 | 92 | |
duke@435 | 93 | CallGenerator* Compile::find_intrinsic(ciMethod* m, bool is_virtual) { |
duke@435 | 94 | assert(m->is_loaded(), "don't try this on unloaded methods"); |
duke@435 | 95 | if (_intrinsics != NULL) { |
duke@435 | 96 | int index = intrinsic_insertion_index(m, is_virtual); |
duke@435 | 97 | if (index < _intrinsics->length() |
duke@435 | 98 | && _intrinsics->at(index)->method() == m |
duke@435 | 99 | && _intrinsics->at(index)->is_virtual() == is_virtual) { |
duke@435 | 100 | return _intrinsics->at(index); |
duke@435 | 101 | } |
duke@435 | 102 | } |
duke@435 | 103 | // Lazily create intrinsics for intrinsic IDs well-known in the runtime. |
jrose@1291 | 104 | if (m->intrinsic_id() != vmIntrinsics::_none && |
jrose@1291 | 105 | m->intrinsic_id() <= vmIntrinsics::LAST_COMPILER_INLINE) { |
duke@435 | 106 | CallGenerator* cg = make_vm_intrinsic(m, is_virtual); |
duke@435 | 107 | if (cg != NULL) { |
duke@435 | 108 | // Save it for next time: |
duke@435 | 109 | register_intrinsic(cg); |
duke@435 | 110 | return cg; |
duke@435 | 111 | } else { |
duke@435 | 112 | gather_intrinsic_statistics(m->intrinsic_id(), is_virtual, _intrinsic_disabled); |
duke@435 | 113 | } |
duke@435 | 114 | } |
duke@435 | 115 | return NULL; |
duke@435 | 116 | } |
duke@435 | 117 | |
duke@435 | 118 | // Compile:: register_library_intrinsics and make_vm_intrinsic are defined |
duke@435 | 119 | // in library_call.cpp. |
duke@435 | 120 | |
duke@435 | 121 | |
duke@435 | 122 | #ifndef PRODUCT |
duke@435 | 123 | // statistics gathering... |
duke@435 | 124 | |
duke@435 | 125 | juint Compile::_intrinsic_hist_count[vmIntrinsics::ID_LIMIT] = {0}; |
duke@435 | 126 | jubyte Compile::_intrinsic_hist_flags[vmIntrinsics::ID_LIMIT] = {0}; |
duke@435 | 127 | |
duke@435 | 128 | bool Compile::gather_intrinsic_statistics(vmIntrinsics::ID id, bool is_virtual, int flags) { |
duke@435 | 129 | assert(id > vmIntrinsics::_none && id < vmIntrinsics::ID_LIMIT, "oob"); |
duke@435 | 130 | int oflags = _intrinsic_hist_flags[id]; |
duke@435 | 131 | assert(flags != 0, "what happened?"); |
duke@435 | 132 | if (is_virtual) { |
duke@435 | 133 | flags |= _intrinsic_virtual; |
duke@435 | 134 | } |
duke@435 | 135 | bool changed = (flags != oflags); |
duke@435 | 136 | if ((flags & _intrinsic_worked) != 0) { |
duke@435 | 137 | juint count = (_intrinsic_hist_count[id] += 1); |
duke@435 | 138 | if (count == 1) { |
duke@435 | 139 | changed = true; // first time |
duke@435 | 140 | } |
duke@435 | 141 | // increment the overall count also: |
duke@435 | 142 | _intrinsic_hist_count[vmIntrinsics::_none] += 1; |
duke@435 | 143 | } |
duke@435 | 144 | if (changed) { |
duke@435 | 145 | if (((oflags ^ flags) & _intrinsic_virtual) != 0) { |
duke@435 | 146 | // Something changed about the intrinsic's virtuality. |
duke@435 | 147 | if ((flags & _intrinsic_virtual) != 0) { |
duke@435 | 148 | // This is the first use of this intrinsic as a virtual call. |
duke@435 | 149 | if (oflags != 0) { |
duke@435 | 150 | // We already saw it as a non-virtual, so note both cases. |
duke@435 | 151 | flags |= _intrinsic_both; |
duke@435 | 152 | } |
duke@435 | 153 | } else if ((oflags & _intrinsic_both) == 0) { |
duke@435 | 154 | // This is the first use of this intrinsic as a non-virtual |
duke@435 | 155 | flags |= _intrinsic_both; |
duke@435 | 156 | } |
duke@435 | 157 | } |
duke@435 | 158 | _intrinsic_hist_flags[id] = (jubyte) (oflags | flags); |
duke@435 | 159 | } |
duke@435 | 160 | // update the overall flags also: |
duke@435 | 161 | _intrinsic_hist_flags[vmIntrinsics::_none] |= (jubyte) flags; |
duke@435 | 162 | return changed; |
duke@435 | 163 | } |
duke@435 | 164 | |
duke@435 | 165 | static char* format_flags(int flags, char* buf) { |
duke@435 | 166 | buf[0] = 0; |
duke@435 | 167 | if ((flags & Compile::_intrinsic_worked) != 0) strcat(buf, ",worked"); |
duke@435 | 168 | if ((flags & Compile::_intrinsic_failed) != 0) strcat(buf, ",failed"); |
duke@435 | 169 | if ((flags & Compile::_intrinsic_disabled) != 0) strcat(buf, ",disabled"); |
duke@435 | 170 | if ((flags & Compile::_intrinsic_virtual) != 0) strcat(buf, ",virtual"); |
duke@435 | 171 | if ((flags & Compile::_intrinsic_both) != 0) strcat(buf, ",nonvirtual"); |
duke@435 | 172 | if (buf[0] == 0) strcat(buf, ","); |
duke@435 | 173 | assert(buf[0] == ',', "must be"); |
duke@435 | 174 | return &buf[1]; |
duke@435 | 175 | } |
duke@435 | 176 | |
duke@435 | 177 | void Compile::print_intrinsic_statistics() { |
duke@435 | 178 | char flagsbuf[100]; |
duke@435 | 179 | ttyLocker ttyl; |
duke@435 | 180 | if (xtty != NULL) xtty->head("statistics type='intrinsic'"); |
duke@435 | 181 | tty->print_cr("Compiler intrinsic usage:"); |
duke@435 | 182 | juint total = _intrinsic_hist_count[vmIntrinsics::_none]; |
duke@435 | 183 | if (total == 0) total = 1; // avoid div0 in case of no successes |
duke@435 | 184 | #define PRINT_STAT_LINE(name, c, f) \ |
duke@435 | 185 | tty->print_cr(" %4d (%4.1f%%) %s (%s)", (int)(c), ((c) * 100.0) / total, name, f); |
duke@435 | 186 | for (int index = 1 + (int)vmIntrinsics::_none; index < (int)vmIntrinsics::ID_LIMIT; index++) { |
duke@435 | 187 | vmIntrinsics::ID id = (vmIntrinsics::ID) index; |
duke@435 | 188 | int flags = _intrinsic_hist_flags[id]; |
duke@435 | 189 | juint count = _intrinsic_hist_count[id]; |
duke@435 | 190 | if ((flags | count) != 0) { |
duke@435 | 191 | PRINT_STAT_LINE(vmIntrinsics::name_at(id), count, format_flags(flags, flagsbuf)); |
duke@435 | 192 | } |
duke@435 | 193 | } |
duke@435 | 194 | PRINT_STAT_LINE("total", total, format_flags(_intrinsic_hist_flags[vmIntrinsics::_none], flagsbuf)); |
duke@435 | 195 | if (xtty != NULL) xtty->tail("statistics"); |
duke@435 | 196 | } |
duke@435 | 197 | |
duke@435 | 198 | void Compile::print_statistics() { |
duke@435 | 199 | { ttyLocker ttyl; |
duke@435 | 200 | if (xtty != NULL) xtty->head("statistics type='opto'"); |
duke@435 | 201 | Parse::print_statistics(); |
duke@435 | 202 | PhaseCCP::print_statistics(); |
duke@435 | 203 | PhaseRegAlloc::print_statistics(); |
duke@435 | 204 | Scheduling::print_statistics(); |
duke@435 | 205 | PhasePeephole::print_statistics(); |
duke@435 | 206 | PhaseIdealLoop::print_statistics(); |
duke@435 | 207 | if (xtty != NULL) xtty->tail("statistics"); |
duke@435 | 208 | } |
duke@435 | 209 | if (_intrinsic_hist_flags[vmIntrinsics::_none] != 0) { |
duke@435 | 210 | // put this under its own <statistics> element. |
duke@435 | 211 | print_intrinsic_statistics(); |
duke@435 | 212 | } |
duke@435 | 213 | } |
duke@435 | 214 | #endif //PRODUCT |
duke@435 | 215 | |
duke@435 | 216 | // Support for bundling info |
duke@435 | 217 | Bundle* Compile::node_bundling(const Node *n) { |
duke@435 | 218 | assert(valid_bundle_info(n), "oob"); |
duke@435 | 219 | return &_node_bundling_base[n->_idx]; |
duke@435 | 220 | } |
duke@435 | 221 | |
duke@435 | 222 | bool Compile::valid_bundle_info(const Node *n) { |
duke@435 | 223 | return (_node_bundling_limit > n->_idx); |
duke@435 | 224 | } |
duke@435 | 225 | |
duke@435 | 226 | |
never@1515 | 227 | void Compile::gvn_replace_by(Node* n, Node* nn) { |
never@1515 | 228 | for (DUIterator_Last imin, i = n->last_outs(imin); i >= imin; ) { |
never@1515 | 229 | Node* use = n->last_out(i); |
never@1515 | 230 | bool is_in_table = initial_gvn()->hash_delete(use); |
never@1515 | 231 | uint uses_found = 0; |
never@1515 | 232 | for (uint j = 0; j < use->len(); j++) { |
never@1515 | 233 | if (use->in(j) == n) { |
never@1515 | 234 | if (j < use->req()) |
never@1515 | 235 | use->set_req(j, nn); |
never@1515 | 236 | else |
never@1515 | 237 | use->set_prec(j, nn); |
never@1515 | 238 | uses_found++; |
never@1515 | 239 | } |
never@1515 | 240 | } |
never@1515 | 241 | if (is_in_table) { |
never@1515 | 242 | // reinsert into table |
never@1515 | 243 | initial_gvn()->hash_find_insert(use); |
never@1515 | 244 | } |
never@1515 | 245 | record_for_igvn(use); |
never@1515 | 246 | i -= uses_found; // we deleted 1 or more copies of this edge |
never@1515 | 247 | } |
never@1515 | 248 | } |
never@1515 | 249 | |
never@1515 | 250 | |
never@1515 | 251 | |
never@1515 | 252 | |
duke@435 | 253 | // Identify all nodes that are reachable from below, useful. |
duke@435 | 254 | // Use breadth-first pass that records state in a Unique_Node_List, |
duke@435 | 255 | // recursive traversal is slower. |
duke@435 | 256 | void Compile::identify_useful_nodes(Unique_Node_List &useful) { |
duke@435 | 257 | int estimated_worklist_size = unique(); |
duke@435 | 258 | useful.map( estimated_worklist_size, NULL ); // preallocate space |
duke@435 | 259 | |
duke@435 | 260 | // Initialize worklist |
duke@435 | 261 | if (root() != NULL) { useful.push(root()); } |
duke@435 | 262 | // If 'top' is cached, declare it useful to preserve cached node |
duke@435 | 263 | if( cached_top_node() ) { useful.push(cached_top_node()); } |
duke@435 | 264 | |
duke@435 | 265 | // Push all useful nodes onto the list, breadthfirst |
duke@435 | 266 | for( uint next = 0; next < useful.size(); ++next ) { |
duke@435 | 267 | assert( next < unique(), "Unique useful nodes < total nodes"); |
duke@435 | 268 | Node *n = useful.at(next); |
duke@435 | 269 | uint max = n->len(); |
duke@435 | 270 | for( uint i = 0; i < max; ++i ) { |
duke@435 | 271 | Node *m = n->in(i); |
duke@435 | 272 | if( m == NULL ) continue; |
duke@435 | 273 | useful.push(m); |
duke@435 | 274 | } |
duke@435 | 275 | } |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | // Disconnect all useless nodes by disconnecting those at the boundary. |
duke@435 | 279 | void Compile::remove_useless_nodes(Unique_Node_List &useful) { |
duke@435 | 280 | uint next = 0; |
duke@435 | 281 | while( next < useful.size() ) { |
duke@435 | 282 | Node *n = useful.at(next++); |
duke@435 | 283 | // Use raw traversal of out edges since this code removes out edges |
duke@435 | 284 | int max = n->outcnt(); |
duke@435 | 285 | for (int j = 0; j < max; ++j ) { |
duke@435 | 286 | Node* child = n->raw_out(j); |
duke@435 | 287 | if( ! useful.member(child) ) { |
duke@435 | 288 | assert( !child->is_top() || child != top(), |
duke@435 | 289 | "If top is cached in Compile object it is in useful list"); |
duke@435 | 290 | // Only need to remove this out-edge to the useless node |
duke@435 | 291 | n->raw_del_out(j); |
duke@435 | 292 | --j; |
duke@435 | 293 | --max; |
duke@435 | 294 | } |
duke@435 | 295 | } |
duke@435 | 296 | if (n->outcnt() == 1 && n->has_special_unique_user()) { |
duke@435 | 297 | record_for_igvn( n->unique_out() ); |
duke@435 | 298 | } |
duke@435 | 299 | } |
duke@435 | 300 | debug_only(verify_graph_edges(true/*check for no_dead_code*/);) |
duke@435 | 301 | } |
duke@435 | 302 | |
duke@435 | 303 | //------------------------------frame_size_in_words----------------------------- |
duke@435 | 304 | // frame_slots in units of words |
duke@435 | 305 | int Compile::frame_size_in_words() const { |
duke@435 | 306 | // shift is 0 in LP32 and 1 in LP64 |
duke@435 | 307 | const int shift = (LogBytesPerWord - LogBytesPerInt); |
duke@435 | 308 | int words = _frame_slots >> shift; |
duke@435 | 309 | assert( words << shift == _frame_slots, "frame size must be properly aligned in LP64" ); |
duke@435 | 310 | return words; |
duke@435 | 311 | } |
duke@435 | 312 | |
duke@435 | 313 | // ============================================================================ |
duke@435 | 314 | //------------------------------CompileWrapper--------------------------------- |
duke@435 | 315 | class CompileWrapper : public StackObj { |
duke@435 | 316 | Compile *const _compile; |
duke@435 | 317 | public: |
duke@435 | 318 | CompileWrapper(Compile* compile); |
duke@435 | 319 | |
duke@435 | 320 | ~CompileWrapper(); |
duke@435 | 321 | }; |
duke@435 | 322 | |
duke@435 | 323 | CompileWrapper::CompileWrapper(Compile* compile) : _compile(compile) { |
duke@435 | 324 | // the Compile* pointer is stored in the current ciEnv: |
duke@435 | 325 | ciEnv* env = compile->env(); |
duke@435 | 326 | assert(env == ciEnv::current(), "must already be a ciEnv active"); |
duke@435 | 327 | assert(env->compiler_data() == NULL, "compile already active?"); |
duke@435 | 328 | env->set_compiler_data(compile); |
duke@435 | 329 | assert(compile == Compile::current(), "sanity"); |
duke@435 | 330 | |
duke@435 | 331 | compile->set_type_dict(NULL); |
duke@435 | 332 | compile->set_type_hwm(NULL); |
duke@435 | 333 | compile->set_type_last_size(0); |
duke@435 | 334 | compile->set_last_tf(NULL, NULL); |
duke@435 | 335 | compile->set_indexSet_arena(NULL); |
duke@435 | 336 | compile->set_indexSet_free_block_list(NULL); |
duke@435 | 337 | compile->init_type_arena(); |
duke@435 | 338 | Type::Initialize(compile); |
duke@435 | 339 | _compile->set_scratch_buffer_blob(NULL); |
duke@435 | 340 | _compile->begin_method(); |
duke@435 | 341 | } |
duke@435 | 342 | CompileWrapper::~CompileWrapper() { |
duke@435 | 343 | _compile->end_method(); |
duke@435 | 344 | if (_compile->scratch_buffer_blob() != NULL) |
duke@435 | 345 | BufferBlob::free(_compile->scratch_buffer_blob()); |
duke@435 | 346 | _compile->env()->set_compiler_data(NULL); |
duke@435 | 347 | } |
duke@435 | 348 | |
duke@435 | 349 | |
duke@435 | 350 | //----------------------------print_compile_messages--------------------------- |
duke@435 | 351 | void Compile::print_compile_messages() { |
duke@435 | 352 | #ifndef PRODUCT |
duke@435 | 353 | // Check if recompiling |
duke@435 | 354 | if (_subsume_loads == false && PrintOpto) { |
duke@435 | 355 | // Recompiling without allowing machine instructions to subsume loads |
duke@435 | 356 | tty->print_cr("*********************************************************"); |
duke@435 | 357 | tty->print_cr("** Bailout: Recompile without subsuming loads **"); |
duke@435 | 358 | tty->print_cr("*********************************************************"); |
duke@435 | 359 | } |
kvn@473 | 360 | if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) { |
kvn@473 | 361 | // Recompiling without escape analysis |
kvn@473 | 362 | tty->print_cr("*********************************************************"); |
kvn@473 | 363 | tty->print_cr("** Bailout: Recompile without escape analysis **"); |
kvn@473 | 364 | tty->print_cr("*********************************************************"); |
kvn@473 | 365 | } |
duke@435 | 366 | if (env()->break_at_compile()) { |
twisti@1040 | 367 | // Open the debugger when compiling this method. |
duke@435 | 368 | tty->print("### Breaking when compiling: "); |
duke@435 | 369 | method()->print_short_name(); |
duke@435 | 370 | tty->cr(); |
duke@435 | 371 | BREAKPOINT; |
duke@435 | 372 | } |
duke@435 | 373 | |
duke@435 | 374 | if( PrintOpto ) { |
duke@435 | 375 | if (is_osr_compilation()) { |
duke@435 | 376 | tty->print("[OSR]%3d", _compile_id); |
duke@435 | 377 | } else { |
duke@435 | 378 | tty->print("%3d", _compile_id); |
duke@435 | 379 | } |
duke@435 | 380 | } |
duke@435 | 381 | #endif |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | |
duke@435 | 385 | void Compile::init_scratch_buffer_blob() { |
duke@435 | 386 | if( scratch_buffer_blob() != NULL ) return; |
duke@435 | 387 | |
duke@435 | 388 | // Construct a temporary CodeBuffer to have it construct a BufferBlob |
duke@435 | 389 | // Cache this BufferBlob for this compile. |
duke@435 | 390 | ResourceMark rm; |
duke@435 | 391 | int size = (MAX_inst_size + MAX_stubs_size + MAX_const_size); |
duke@435 | 392 | BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size); |
duke@435 | 393 | // Record the buffer blob for next time. |
duke@435 | 394 | set_scratch_buffer_blob(blob); |
kvn@598 | 395 | // Have we run out of code space? |
kvn@598 | 396 | if (scratch_buffer_blob() == NULL) { |
kvn@598 | 397 | // Let CompilerBroker disable further compilations. |
kvn@598 | 398 | record_failure("Not enough space for scratch buffer in CodeCache"); |
kvn@598 | 399 | return; |
kvn@598 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | // Initialize the relocation buffers |
duke@435 | 403 | relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size; |
duke@435 | 404 | set_scratch_locs_memory(locs_buf); |
duke@435 | 405 | } |
duke@435 | 406 | |
duke@435 | 407 | |
duke@435 | 408 | //-----------------------scratch_emit_size------------------------------------- |
duke@435 | 409 | // Helper function that computes size by emitting code |
duke@435 | 410 | uint Compile::scratch_emit_size(const Node* n) { |
duke@435 | 411 | // Emit into a trash buffer and count bytes emitted. |
duke@435 | 412 | // This is a pretty expensive way to compute a size, |
duke@435 | 413 | // but it works well enough if seldom used. |
duke@435 | 414 | // All common fixed-size instructions are given a size |
duke@435 | 415 | // method by the AD file. |
duke@435 | 416 | // Note that the scratch buffer blob and locs memory are |
duke@435 | 417 | // allocated at the beginning of the compile task, and |
duke@435 | 418 | // may be shared by several calls to scratch_emit_size. |
duke@435 | 419 | // The allocation of the scratch buffer blob is particularly |
duke@435 | 420 | // expensive, since it has to grab the code cache lock. |
duke@435 | 421 | BufferBlob* blob = this->scratch_buffer_blob(); |
duke@435 | 422 | assert(blob != NULL, "Initialize BufferBlob at start"); |
duke@435 | 423 | assert(blob->size() > MAX_inst_size, "sanity"); |
duke@435 | 424 | relocInfo* locs_buf = scratch_locs_memory(); |
duke@435 | 425 | address blob_begin = blob->instructions_begin(); |
duke@435 | 426 | address blob_end = (address)locs_buf; |
duke@435 | 427 | assert(blob->instructions_contains(blob_end), "sanity"); |
duke@435 | 428 | CodeBuffer buf(blob_begin, blob_end - blob_begin); |
duke@435 | 429 | buf.initialize_consts_size(MAX_const_size); |
duke@435 | 430 | buf.initialize_stubs_size(MAX_stubs_size); |
duke@435 | 431 | assert(locs_buf != NULL, "sanity"); |
duke@435 | 432 | int lsize = MAX_locs_size / 2; |
duke@435 | 433 | buf.insts()->initialize_shared_locs(&locs_buf[0], lsize); |
duke@435 | 434 | buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize); |
duke@435 | 435 | n->emit(buf, this->regalloc()); |
duke@435 | 436 | return buf.code_size(); |
duke@435 | 437 | } |
duke@435 | 438 | |
duke@435 | 439 | |
duke@435 | 440 | // ============================================================================ |
duke@435 | 441 | //------------------------------Compile standard------------------------------- |
duke@435 | 442 | debug_only( int Compile::_debug_idx = 100000; ) |
duke@435 | 443 | |
duke@435 | 444 | // Compile a method. entry_bci is -1 for normal compilations and indicates |
duke@435 | 445 | // the continuation bci for on stack replacement. |
duke@435 | 446 | |
duke@435 | 447 | |
kvn@473 | 448 | Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis ) |
duke@435 | 449 | : Phase(Compiler), |
duke@435 | 450 | _env(ci_env), |
duke@435 | 451 | _log(ci_env->log()), |
duke@435 | 452 | _compile_id(ci_env->compile_id()), |
duke@435 | 453 | _save_argument_registers(false), |
duke@435 | 454 | _stub_name(NULL), |
duke@435 | 455 | _stub_function(NULL), |
duke@435 | 456 | _stub_entry_point(NULL), |
duke@435 | 457 | _method(target), |
duke@435 | 458 | _entry_bci(osr_bci), |
duke@435 | 459 | _initial_gvn(NULL), |
duke@435 | 460 | _for_igvn(NULL), |
duke@435 | 461 | _warm_calls(NULL), |
duke@435 | 462 | _subsume_loads(subsume_loads), |
kvn@473 | 463 | _do_escape_analysis(do_escape_analysis), |
duke@435 | 464 | _failure_reason(NULL), |
duke@435 | 465 | _code_buffer("Compile::Fill_buffer"), |
duke@435 | 466 | _orig_pc_slot(0), |
duke@435 | 467 | _orig_pc_slot_offset_in_bytes(0), |
twisti@1700 | 468 | _has_method_handle_invokes(false), |
duke@435 | 469 | _node_bundling_limit(0), |
duke@435 | 470 | _node_bundling_base(NULL), |
kvn@1294 | 471 | _java_calls(0), |
kvn@1294 | 472 | _inner_loops(0), |
duke@435 | 473 | #ifndef PRODUCT |
duke@435 | 474 | _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")), |
duke@435 | 475 | _printer(IdealGraphPrinter::printer()), |
duke@435 | 476 | #endif |
duke@435 | 477 | _congraph(NULL) { |
duke@435 | 478 | C = this; |
duke@435 | 479 | |
duke@435 | 480 | CompileWrapper cw(this); |
duke@435 | 481 | #ifndef PRODUCT |
duke@435 | 482 | if (TimeCompiler2) { |
duke@435 | 483 | tty->print(" "); |
duke@435 | 484 | target->holder()->name()->print(); |
duke@435 | 485 | tty->print("."); |
duke@435 | 486 | target->print_short_name(); |
duke@435 | 487 | tty->print(" "); |
duke@435 | 488 | } |
duke@435 | 489 | TraceTime t1("Total compilation time", &_t_totalCompilation, TimeCompiler, TimeCompiler2); |
duke@435 | 490 | TraceTime t2(NULL, &_t_methodCompilation, TimeCompiler, false); |
jrose@535 | 491 | bool print_opto_assembly = PrintOptoAssembly || _method->has_option("PrintOptoAssembly"); |
jrose@535 | 492 | if (!print_opto_assembly) { |
jrose@535 | 493 | bool print_assembly = (PrintAssembly || _method->should_print_assembly()); |
jrose@535 | 494 | if (print_assembly && !Disassembler::can_decode()) { |
jrose@535 | 495 | tty->print_cr("PrintAssembly request changed to PrintOptoAssembly"); |
jrose@535 | 496 | print_opto_assembly = true; |
jrose@535 | 497 | } |
jrose@535 | 498 | } |
jrose@535 | 499 | set_print_assembly(print_opto_assembly); |
never@802 | 500 | set_parsed_irreducible_loop(false); |
duke@435 | 501 | #endif |
duke@435 | 502 | |
duke@435 | 503 | if (ProfileTraps) { |
duke@435 | 504 | // Make sure the method being compiled gets its own MDO, |
duke@435 | 505 | // so we can at least track the decompile_count(). |
duke@435 | 506 | method()->build_method_data(); |
duke@435 | 507 | } |
duke@435 | 508 | |
duke@435 | 509 | Init(::AliasLevel); |
duke@435 | 510 | |
duke@435 | 511 | |
duke@435 | 512 | print_compile_messages(); |
duke@435 | 513 | |
duke@435 | 514 | if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) ) |
duke@435 | 515 | _ilt = InlineTree::build_inline_tree_root(); |
duke@435 | 516 | else |
duke@435 | 517 | _ilt = NULL; |
duke@435 | 518 | |
duke@435 | 519 | // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice |
duke@435 | 520 | assert(num_alias_types() >= AliasIdxRaw, ""); |
duke@435 | 521 | |
duke@435 | 522 | #define MINIMUM_NODE_HASH 1023 |
duke@435 | 523 | // Node list that Iterative GVN will start with |
duke@435 | 524 | Unique_Node_List for_igvn(comp_arena()); |
duke@435 | 525 | set_for_igvn(&for_igvn); |
duke@435 | 526 | |
duke@435 | 527 | // GVN that will be run immediately on new nodes |
duke@435 | 528 | uint estimated_size = method()->code_size()*4+64; |
duke@435 | 529 | estimated_size = (estimated_size < MINIMUM_NODE_HASH ? MINIMUM_NODE_HASH : estimated_size); |
duke@435 | 530 | PhaseGVN gvn(node_arena(), estimated_size); |
duke@435 | 531 | set_initial_gvn(&gvn); |
duke@435 | 532 | |
duke@435 | 533 | { // Scope for timing the parser |
duke@435 | 534 | TracePhase t3("parse", &_t_parser, true); |
duke@435 | 535 | |
duke@435 | 536 | // Put top into the hash table ASAP. |
duke@435 | 537 | initial_gvn()->transform_no_reclaim(top()); |
duke@435 | 538 | |
duke@435 | 539 | // Set up tf(), start(), and find a CallGenerator. |
duke@435 | 540 | CallGenerator* cg; |
duke@435 | 541 | if (is_osr_compilation()) { |
duke@435 | 542 | const TypeTuple *domain = StartOSRNode::osr_domain(); |
duke@435 | 543 | const TypeTuple *range = TypeTuple::make_range(method()->signature()); |
duke@435 | 544 | init_tf(TypeFunc::make(domain, range)); |
duke@435 | 545 | StartNode* s = new (this, 2) StartOSRNode(root(), domain); |
duke@435 | 546 | initial_gvn()->set_type_bottom(s); |
duke@435 | 547 | init_start(s); |
duke@435 | 548 | cg = CallGenerator::for_osr(method(), entry_bci()); |
duke@435 | 549 | } else { |
duke@435 | 550 | // Normal case. |
duke@435 | 551 | init_tf(TypeFunc::make(method())); |
duke@435 | 552 | StartNode* s = new (this, 2) StartNode(root(), tf()->domain()); |
duke@435 | 553 | initial_gvn()->set_type_bottom(s); |
duke@435 | 554 | init_start(s); |
duke@435 | 555 | float past_uses = method()->interpreter_invocation_count(); |
duke@435 | 556 | float expected_uses = past_uses; |
duke@435 | 557 | cg = CallGenerator::for_inline(method(), expected_uses); |
duke@435 | 558 | } |
duke@435 | 559 | if (failing()) return; |
duke@435 | 560 | if (cg == NULL) { |
duke@435 | 561 | record_method_not_compilable_all_tiers("cannot parse method"); |
duke@435 | 562 | return; |
duke@435 | 563 | } |
duke@435 | 564 | JVMState* jvms = build_start_state(start(), tf()); |
duke@435 | 565 | if ((jvms = cg->generate(jvms)) == NULL) { |
duke@435 | 566 | record_method_not_compilable("method parse failed"); |
duke@435 | 567 | return; |
duke@435 | 568 | } |
duke@435 | 569 | GraphKit kit(jvms); |
duke@435 | 570 | |
duke@435 | 571 | if (!kit.stopped()) { |
duke@435 | 572 | // Accept return values, and transfer control we know not where. |
duke@435 | 573 | // This is done by a special, unique ReturnNode bound to root. |
duke@435 | 574 | return_values(kit.jvms()); |
duke@435 | 575 | } |
duke@435 | 576 | |
duke@435 | 577 | if (kit.has_exceptions()) { |
duke@435 | 578 | // Any exceptions that escape from this call must be rethrown |
duke@435 | 579 | // to whatever caller is dynamically above us on the stack. |
duke@435 | 580 | // This is done by a special, unique RethrowNode bound to root. |
duke@435 | 581 | rethrow_exceptions(kit.transfer_exceptions_into_jvms()); |
duke@435 | 582 | } |
duke@435 | 583 | |
never@1515 | 584 | if (!failing() && has_stringbuilder()) { |
never@1515 | 585 | { |
never@1515 | 586 | // remove useless nodes to make the usage analysis simpler |
never@1515 | 587 | ResourceMark rm; |
never@1515 | 588 | PhaseRemoveUseless pru(initial_gvn(), &for_igvn); |
never@1515 | 589 | } |
never@1515 | 590 | |
never@1515 | 591 | { |
never@1515 | 592 | ResourceMark rm; |
never@1515 | 593 | print_method("Before StringOpts", 3); |
never@1515 | 594 | PhaseStringOpts pso(initial_gvn(), &for_igvn); |
never@1515 | 595 | print_method("After StringOpts", 3); |
never@1515 | 596 | } |
never@1515 | 597 | |
never@1515 | 598 | // now inline anything that we skipped the first time around |
never@1515 | 599 | while (_late_inlines.length() > 0) { |
never@1515 | 600 | CallGenerator* cg = _late_inlines.pop(); |
never@1515 | 601 | cg->do_late_inline(); |
never@1515 | 602 | } |
never@1515 | 603 | } |
never@1515 | 604 | assert(_late_inlines.length() == 0, "should have been processed"); |
never@1515 | 605 | |
never@852 | 606 | print_method("Before RemoveUseless", 3); |
never@802 | 607 | |
duke@435 | 608 | // Remove clutter produced by parsing. |
duke@435 | 609 | if (!failing()) { |
duke@435 | 610 | ResourceMark rm; |
duke@435 | 611 | PhaseRemoveUseless pru(initial_gvn(), &for_igvn); |
duke@435 | 612 | } |
duke@435 | 613 | } |
duke@435 | 614 | |
duke@435 | 615 | // Note: Large methods are capped off in do_one_bytecode(). |
duke@435 | 616 | if (failing()) return; |
duke@435 | 617 | |
duke@435 | 618 | // After parsing, node notes are no longer automagic. |
duke@435 | 619 | // They must be propagated by register_new_node_with_optimizer(), |
duke@435 | 620 | // clone(), or the like. |
duke@435 | 621 | set_default_node_notes(NULL); |
duke@435 | 622 | |
duke@435 | 623 | for (;;) { |
duke@435 | 624 | int successes = Inline_Warm(); |
duke@435 | 625 | if (failing()) return; |
duke@435 | 626 | if (successes == 0) break; |
duke@435 | 627 | } |
duke@435 | 628 | |
duke@435 | 629 | // Drain the list. |
duke@435 | 630 | Finish_Warm(); |
duke@435 | 631 | #ifndef PRODUCT |
duke@435 | 632 | if (_printer) { |
duke@435 | 633 | _printer->print_inlining(this); |
duke@435 | 634 | } |
duke@435 | 635 | #endif |
duke@435 | 636 | |
duke@435 | 637 | if (failing()) return; |
duke@435 | 638 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 639 | |
duke@435 | 640 | // Perform escape analysis |
kvn@679 | 641 | if (_do_escape_analysis && ConnectionGraph::has_candidates(this)) { |
kvn@679 | 642 | TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, true); |
kvn@688 | 643 | // Add ConP#NULL and ConN#NULL nodes before ConnectionGraph construction. |
kvn@688 | 644 | PhaseGVN* igvn = initial_gvn(); |
kvn@688 | 645 | Node* oop_null = igvn->zerocon(T_OBJECT); |
kvn@688 | 646 | Node* noop_null = igvn->zerocon(T_NARROWOOP); |
kvn@679 | 647 | |
kvn@679 | 648 | _congraph = new(comp_arena()) ConnectionGraph(this); |
kvn@679 | 649 | bool has_non_escaping_obj = _congraph->compute_escape(); |
kvn@473 | 650 | |
duke@435 | 651 | #ifndef PRODUCT |
duke@435 | 652 | if (PrintEscapeAnalysis) { |
duke@435 | 653 | _congraph->dump(); |
duke@435 | 654 | } |
duke@435 | 655 | #endif |
kvn@688 | 656 | // Cleanup. |
kvn@688 | 657 | if (oop_null->outcnt() == 0) |
kvn@688 | 658 | igvn->hash_delete(oop_null); |
kvn@688 | 659 | if (noop_null->outcnt() == 0) |
kvn@688 | 660 | igvn->hash_delete(noop_null); |
kvn@688 | 661 | |
kvn@679 | 662 | if (!has_non_escaping_obj) { |
kvn@679 | 663 | _congraph = NULL; |
kvn@679 | 664 | } |
kvn@679 | 665 | |
kvn@679 | 666 | if (failing()) return; |
duke@435 | 667 | } |
duke@435 | 668 | // Now optimize |
duke@435 | 669 | Optimize(); |
duke@435 | 670 | if (failing()) return; |
duke@435 | 671 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 672 | |
duke@435 | 673 | #ifndef PRODUCT |
duke@435 | 674 | if (PrintIdeal) { |
duke@435 | 675 | ttyLocker ttyl; // keep the following output all in one block |
duke@435 | 676 | // This output goes directly to the tty, not the compiler log. |
duke@435 | 677 | // To enable tools to match it up with the compilation activity, |
duke@435 | 678 | // be sure to tag this tty output with the compile ID. |
duke@435 | 679 | if (xtty != NULL) { |
duke@435 | 680 | xtty->head("ideal compile_id='%d'%s", compile_id(), |
duke@435 | 681 | is_osr_compilation() ? " compile_kind='osr'" : |
duke@435 | 682 | ""); |
duke@435 | 683 | } |
duke@435 | 684 | root()->dump(9999); |
duke@435 | 685 | if (xtty != NULL) { |
duke@435 | 686 | xtty->tail("ideal"); |
duke@435 | 687 | } |
duke@435 | 688 | } |
duke@435 | 689 | #endif |
duke@435 | 690 | |
duke@435 | 691 | // Now that we know the size of all the monitors we can add a fixed slot |
duke@435 | 692 | // for the original deopt pc. |
duke@435 | 693 | |
duke@435 | 694 | _orig_pc_slot = fixed_slots(); |
duke@435 | 695 | int next_slot = _orig_pc_slot + (sizeof(address) / VMRegImpl::stack_slot_size); |
duke@435 | 696 | set_fixed_slots(next_slot); |
duke@435 | 697 | |
duke@435 | 698 | // Now generate code |
duke@435 | 699 | Code_Gen(); |
duke@435 | 700 | if (failing()) return; |
duke@435 | 701 | |
duke@435 | 702 | // Check if we want to skip execution of all compiled code. |
duke@435 | 703 | { |
duke@435 | 704 | #ifndef PRODUCT |
duke@435 | 705 | if (OptoNoExecute) { |
duke@435 | 706 | record_method_not_compilable("+OptoNoExecute"); // Flag as failed |
duke@435 | 707 | return; |
duke@435 | 708 | } |
duke@435 | 709 | TracePhase t2("install_code", &_t_registerMethod, TimeCompiler); |
duke@435 | 710 | #endif |
duke@435 | 711 | |
duke@435 | 712 | if (is_osr_compilation()) { |
duke@435 | 713 | _code_offsets.set_value(CodeOffsets::Verified_Entry, 0); |
duke@435 | 714 | _code_offsets.set_value(CodeOffsets::OSR_Entry, _first_block_size); |
duke@435 | 715 | } else { |
duke@435 | 716 | _code_offsets.set_value(CodeOffsets::Verified_Entry, _first_block_size); |
duke@435 | 717 | _code_offsets.set_value(CodeOffsets::OSR_Entry, 0); |
duke@435 | 718 | } |
duke@435 | 719 | |
duke@435 | 720 | env()->register_method(_method, _entry_bci, |
duke@435 | 721 | &_code_offsets, |
duke@435 | 722 | _orig_pc_slot_offset_in_bytes, |
duke@435 | 723 | code_buffer(), |
duke@435 | 724 | frame_size_in_words(), _oop_map_set, |
duke@435 | 725 | &_handler_table, &_inc_table, |
duke@435 | 726 | compiler, |
duke@435 | 727 | env()->comp_level(), |
duke@435 | 728 | true, /*has_debug_info*/ |
duke@435 | 729 | has_unsafe_access() |
duke@435 | 730 | ); |
duke@435 | 731 | } |
duke@435 | 732 | } |
duke@435 | 733 | |
duke@435 | 734 | //------------------------------Compile---------------------------------------- |
duke@435 | 735 | // Compile a runtime stub |
duke@435 | 736 | Compile::Compile( ciEnv* ci_env, |
duke@435 | 737 | TypeFunc_generator generator, |
duke@435 | 738 | address stub_function, |
duke@435 | 739 | const char *stub_name, |
duke@435 | 740 | int is_fancy_jump, |
duke@435 | 741 | bool pass_tls, |
duke@435 | 742 | bool save_arg_registers, |
duke@435 | 743 | bool return_pc ) |
duke@435 | 744 | : Phase(Compiler), |
duke@435 | 745 | _env(ci_env), |
duke@435 | 746 | _log(ci_env->log()), |
duke@435 | 747 | _compile_id(-1), |
duke@435 | 748 | _save_argument_registers(save_arg_registers), |
duke@435 | 749 | _method(NULL), |
duke@435 | 750 | _stub_name(stub_name), |
duke@435 | 751 | _stub_function(stub_function), |
duke@435 | 752 | _stub_entry_point(NULL), |
duke@435 | 753 | _entry_bci(InvocationEntryBci), |
duke@435 | 754 | _initial_gvn(NULL), |
duke@435 | 755 | _for_igvn(NULL), |
duke@435 | 756 | _warm_calls(NULL), |
duke@435 | 757 | _orig_pc_slot(0), |
duke@435 | 758 | _orig_pc_slot_offset_in_bytes(0), |
duke@435 | 759 | _subsume_loads(true), |
kvn@473 | 760 | _do_escape_analysis(false), |
duke@435 | 761 | _failure_reason(NULL), |
duke@435 | 762 | _code_buffer("Compile::Fill_buffer"), |
twisti@1700 | 763 | _has_method_handle_invokes(false), |
duke@435 | 764 | _node_bundling_limit(0), |
duke@435 | 765 | _node_bundling_base(NULL), |
kvn@1294 | 766 | _java_calls(0), |
kvn@1294 | 767 | _inner_loops(0), |
duke@435 | 768 | #ifndef PRODUCT |
duke@435 | 769 | _trace_opto_output(TraceOptoOutput), |
duke@435 | 770 | _printer(NULL), |
duke@435 | 771 | #endif |
duke@435 | 772 | _congraph(NULL) { |
duke@435 | 773 | C = this; |
duke@435 | 774 | |
duke@435 | 775 | #ifndef PRODUCT |
duke@435 | 776 | TraceTime t1(NULL, &_t_totalCompilation, TimeCompiler, false); |
duke@435 | 777 | TraceTime t2(NULL, &_t_stubCompilation, TimeCompiler, false); |
duke@435 | 778 | set_print_assembly(PrintFrameConverterAssembly); |
never@802 | 779 | set_parsed_irreducible_loop(false); |
duke@435 | 780 | #endif |
duke@435 | 781 | CompileWrapper cw(this); |
duke@435 | 782 | Init(/*AliasLevel=*/ 0); |
duke@435 | 783 | init_tf((*generator)()); |
duke@435 | 784 | |
duke@435 | 785 | { |
duke@435 | 786 | // The following is a dummy for the sake of GraphKit::gen_stub |
duke@435 | 787 | Unique_Node_List for_igvn(comp_arena()); |
duke@435 | 788 | set_for_igvn(&for_igvn); // not used, but some GraphKit guys push on this |
duke@435 | 789 | PhaseGVN gvn(Thread::current()->resource_area(),255); |
duke@435 | 790 | set_initial_gvn(&gvn); // not significant, but GraphKit guys use it pervasively |
duke@435 | 791 | gvn.transform_no_reclaim(top()); |
duke@435 | 792 | |
duke@435 | 793 | GraphKit kit; |
duke@435 | 794 | kit.gen_stub(stub_function, stub_name, is_fancy_jump, pass_tls, return_pc); |
duke@435 | 795 | } |
duke@435 | 796 | |
duke@435 | 797 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 798 | Code_Gen(); |
duke@435 | 799 | if (failing()) return; |
duke@435 | 800 | |
duke@435 | 801 | |
duke@435 | 802 | // Entry point will be accessed using compile->stub_entry_point(); |
duke@435 | 803 | if (code_buffer() == NULL) { |
duke@435 | 804 | Matcher::soft_match_failure(); |
duke@435 | 805 | } else { |
duke@435 | 806 | if (PrintAssembly && (WizardMode || Verbose)) |
duke@435 | 807 | tty->print_cr("### Stub::%s", stub_name); |
duke@435 | 808 | |
duke@435 | 809 | if (!failing()) { |
duke@435 | 810 | assert(_fixed_slots == 0, "no fixed slots used for runtime stubs"); |
duke@435 | 811 | |
duke@435 | 812 | // Make the NMethod |
duke@435 | 813 | // For now we mark the frame as never safe for profile stackwalking |
duke@435 | 814 | RuntimeStub *rs = RuntimeStub::new_runtime_stub(stub_name, |
duke@435 | 815 | code_buffer(), |
duke@435 | 816 | CodeOffsets::frame_never_safe, |
duke@435 | 817 | // _code_offsets.value(CodeOffsets::Frame_Complete), |
duke@435 | 818 | frame_size_in_words(), |
duke@435 | 819 | _oop_map_set, |
duke@435 | 820 | save_arg_registers); |
duke@435 | 821 | assert(rs != NULL && rs->is_runtime_stub(), "sanity check"); |
duke@435 | 822 | |
duke@435 | 823 | _stub_entry_point = rs->entry_point(); |
duke@435 | 824 | } |
duke@435 | 825 | } |
duke@435 | 826 | } |
duke@435 | 827 | |
duke@435 | 828 | #ifndef PRODUCT |
duke@435 | 829 | void print_opto_verbose_signature( const TypeFunc *j_sig, const char *stub_name ) { |
duke@435 | 830 | if(PrintOpto && Verbose) { |
duke@435 | 831 | tty->print("%s ", stub_name); j_sig->print_flattened(); tty->cr(); |
duke@435 | 832 | } |
duke@435 | 833 | } |
duke@435 | 834 | #endif |
duke@435 | 835 | |
duke@435 | 836 | void Compile::print_codes() { |
duke@435 | 837 | } |
duke@435 | 838 | |
duke@435 | 839 | //------------------------------Init------------------------------------------- |
duke@435 | 840 | // Prepare for a single compilation |
duke@435 | 841 | void Compile::Init(int aliaslevel) { |
duke@435 | 842 | _unique = 0; |
duke@435 | 843 | _regalloc = NULL; |
duke@435 | 844 | |
duke@435 | 845 | _tf = NULL; // filled in later |
duke@435 | 846 | _top = NULL; // cached later |
duke@435 | 847 | _matcher = NULL; // filled in later |
duke@435 | 848 | _cfg = NULL; // filled in later |
duke@435 | 849 | |
duke@435 | 850 | set_24_bit_selection_and_mode(Use24BitFP, false); |
duke@435 | 851 | |
duke@435 | 852 | _node_note_array = NULL; |
duke@435 | 853 | _default_node_notes = NULL; |
duke@435 | 854 | |
duke@435 | 855 | _immutable_memory = NULL; // filled in at first inquiry |
duke@435 | 856 | |
duke@435 | 857 | // Globally visible Nodes |
duke@435 | 858 | // First set TOP to NULL to give safe behavior during creation of RootNode |
duke@435 | 859 | set_cached_top_node(NULL); |
duke@435 | 860 | set_root(new (this, 3) RootNode()); |
duke@435 | 861 | // Now that you have a Root to point to, create the real TOP |
duke@435 | 862 | set_cached_top_node( new (this, 1) ConNode(Type::TOP) ); |
duke@435 | 863 | set_recent_alloc(NULL, NULL); |
duke@435 | 864 | |
duke@435 | 865 | // Create Debug Information Recorder to record scopes, oopmaps, etc. |
duke@435 | 866 | env()->set_oop_recorder(new OopRecorder(comp_arena())); |
duke@435 | 867 | env()->set_debug_info(new DebugInformationRecorder(env()->oop_recorder())); |
duke@435 | 868 | env()->set_dependencies(new Dependencies(env())); |
duke@435 | 869 | |
duke@435 | 870 | _fixed_slots = 0; |
duke@435 | 871 | set_has_split_ifs(false); |
duke@435 | 872 | set_has_loops(has_method() && method()->has_loops()); // first approximation |
never@1515 | 873 | set_has_stringbuilder(false); |
duke@435 | 874 | _deopt_happens = true; // start out assuming the worst |
duke@435 | 875 | _trap_can_recompile = false; // no traps emitted yet |
duke@435 | 876 | _major_progress = true; // start out assuming good things will happen |
duke@435 | 877 | set_has_unsafe_access(false); |
duke@435 | 878 | Copy::zero_to_bytes(_trap_hist, sizeof(_trap_hist)); |
duke@435 | 879 | set_decompile_count(0); |
duke@435 | 880 | |
rasbold@853 | 881 | set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency")); |
duke@435 | 882 | // Compilation level related initialization |
duke@435 | 883 | if (env()->comp_level() == CompLevel_fast_compile) { |
duke@435 | 884 | set_num_loop_opts(Tier1LoopOptsCount); |
duke@435 | 885 | set_do_inlining(Tier1Inline != 0); |
duke@435 | 886 | set_max_inline_size(Tier1MaxInlineSize); |
duke@435 | 887 | set_freq_inline_size(Tier1FreqInlineSize); |
duke@435 | 888 | set_do_scheduling(false); |
duke@435 | 889 | set_do_count_invocations(Tier1CountInvocations); |
duke@435 | 890 | set_do_method_data_update(Tier1UpdateMethodData); |
duke@435 | 891 | } else { |
duke@435 | 892 | assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level"); |
duke@435 | 893 | set_num_loop_opts(LoopOptsCount); |
duke@435 | 894 | set_do_inlining(Inline); |
duke@435 | 895 | set_max_inline_size(MaxInlineSize); |
duke@435 | 896 | set_freq_inline_size(FreqInlineSize); |
duke@435 | 897 | set_do_scheduling(OptoScheduling); |
duke@435 | 898 | set_do_count_invocations(false); |
duke@435 | 899 | set_do_method_data_update(false); |
duke@435 | 900 | } |
duke@435 | 901 | |
duke@435 | 902 | if (debug_info()->recording_non_safepoints()) { |
duke@435 | 903 | set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*> |
duke@435 | 904 | (comp_arena(), 8, 0, NULL)); |
duke@435 | 905 | set_default_node_notes(Node_Notes::make(this)); |
duke@435 | 906 | } |
duke@435 | 907 | |
duke@435 | 908 | // // -- Initialize types before each compile -- |
duke@435 | 909 | // // Update cached type information |
duke@435 | 910 | // if( _method && _method->constants() ) |
duke@435 | 911 | // Type::update_loaded_types(_method, _method->constants()); |
duke@435 | 912 | |
duke@435 | 913 | // Init alias_type map. |
kvn@473 | 914 | if (!_do_escape_analysis && aliaslevel == 3) |
duke@435 | 915 | aliaslevel = 2; // No unique types without escape analysis |
duke@435 | 916 | _AliasLevel = aliaslevel; |
duke@435 | 917 | const int grow_ats = 16; |
duke@435 | 918 | _max_alias_types = grow_ats; |
duke@435 | 919 | _alias_types = NEW_ARENA_ARRAY(comp_arena(), AliasType*, grow_ats); |
duke@435 | 920 | AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, grow_ats); |
duke@435 | 921 | Copy::zero_to_bytes(ats, sizeof(AliasType)*grow_ats); |
duke@435 | 922 | { |
duke@435 | 923 | for (int i = 0; i < grow_ats; i++) _alias_types[i] = &ats[i]; |
duke@435 | 924 | } |
duke@435 | 925 | // Initialize the first few types. |
duke@435 | 926 | _alias_types[AliasIdxTop]->Init(AliasIdxTop, NULL); |
duke@435 | 927 | _alias_types[AliasIdxBot]->Init(AliasIdxBot, TypePtr::BOTTOM); |
duke@435 | 928 | _alias_types[AliasIdxRaw]->Init(AliasIdxRaw, TypeRawPtr::BOTTOM); |
duke@435 | 929 | _num_alias_types = AliasIdxRaw+1; |
duke@435 | 930 | // Zero out the alias type cache. |
duke@435 | 931 | Copy::zero_to_bytes(_alias_cache, sizeof(_alias_cache)); |
duke@435 | 932 | // A NULL adr_type hits in the cache right away. Preload the right answer. |
duke@435 | 933 | probe_alias_cache(NULL)->_index = AliasIdxTop; |
duke@435 | 934 | |
duke@435 | 935 | _intrinsics = NULL; |
duke@435 | 936 | _macro_nodes = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
cfang@1607 | 937 | _predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL); |
duke@435 | 938 | register_library_intrinsics(); |
duke@435 | 939 | } |
duke@435 | 940 | |
duke@435 | 941 | //---------------------------init_start---------------------------------------- |
duke@435 | 942 | // Install the StartNode on this compile object. |
duke@435 | 943 | void Compile::init_start(StartNode* s) { |
duke@435 | 944 | if (failing()) |
duke@435 | 945 | return; // already failing |
duke@435 | 946 | assert(s == start(), ""); |
duke@435 | 947 | } |
duke@435 | 948 | |
duke@435 | 949 | StartNode* Compile::start() const { |
duke@435 | 950 | assert(!failing(), ""); |
duke@435 | 951 | for (DUIterator_Fast imax, i = root()->fast_outs(imax); i < imax; i++) { |
duke@435 | 952 | Node* start = root()->fast_out(i); |
duke@435 | 953 | if( start->is_Start() ) |
duke@435 | 954 | return start->as_Start(); |
duke@435 | 955 | } |
duke@435 | 956 | ShouldNotReachHere(); |
duke@435 | 957 | return NULL; |
duke@435 | 958 | } |
duke@435 | 959 | |
duke@435 | 960 | //-------------------------------immutable_memory------------------------------------- |
duke@435 | 961 | // Access immutable memory |
duke@435 | 962 | Node* Compile::immutable_memory() { |
duke@435 | 963 | if (_immutable_memory != NULL) { |
duke@435 | 964 | return _immutable_memory; |
duke@435 | 965 | } |
duke@435 | 966 | StartNode* s = start(); |
duke@435 | 967 | for (DUIterator_Fast imax, i = s->fast_outs(imax); true; i++) { |
duke@435 | 968 | Node *p = s->fast_out(i); |
duke@435 | 969 | if (p != s && p->as_Proj()->_con == TypeFunc::Memory) { |
duke@435 | 970 | _immutable_memory = p; |
duke@435 | 971 | return _immutable_memory; |
duke@435 | 972 | } |
duke@435 | 973 | } |
duke@435 | 974 | ShouldNotReachHere(); |
duke@435 | 975 | return NULL; |
duke@435 | 976 | } |
duke@435 | 977 | |
duke@435 | 978 | //----------------------set_cached_top_node------------------------------------ |
duke@435 | 979 | // Install the cached top node, and make sure Node::is_top works correctly. |
duke@435 | 980 | void Compile::set_cached_top_node(Node* tn) { |
duke@435 | 981 | if (tn != NULL) verify_top(tn); |
duke@435 | 982 | Node* old_top = _top; |
duke@435 | 983 | _top = tn; |
duke@435 | 984 | // Calling Node::setup_is_top allows the nodes the chance to adjust |
duke@435 | 985 | // their _out arrays. |
duke@435 | 986 | if (_top != NULL) _top->setup_is_top(); |
duke@435 | 987 | if (old_top != NULL) old_top->setup_is_top(); |
duke@435 | 988 | assert(_top == NULL || top()->is_top(), ""); |
duke@435 | 989 | } |
duke@435 | 990 | |
duke@435 | 991 | #ifndef PRODUCT |
duke@435 | 992 | void Compile::verify_top(Node* tn) const { |
duke@435 | 993 | if (tn != NULL) { |
duke@435 | 994 | assert(tn->is_Con(), "top node must be a constant"); |
duke@435 | 995 | assert(((ConNode*)tn)->type() == Type::TOP, "top node must have correct type"); |
duke@435 | 996 | assert(tn->in(0) != NULL, "must have live top node"); |
duke@435 | 997 | } |
duke@435 | 998 | } |
duke@435 | 999 | #endif |
duke@435 | 1000 | |
duke@435 | 1001 | |
duke@435 | 1002 | ///-------------------Managing Per-Node Debug & Profile Info------------------- |
duke@435 | 1003 | |
duke@435 | 1004 | void Compile::grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by) { |
duke@435 | 1005 | guarantee(arr != NULL, ""); |
duke@435 | 1006 | int num_blocks = arr->length(); |
duke@435 | 1007 | if (grow_by < num_blocks) grow_by = num_blocks; |
duke@435 | 1008 | int num_notes = grow_by * _node_notes_block_size; |
duke@435 | 1009 | Node_Notes* notes = NEW_ARENA_ARRAY(node_arena(), Node_Notes, num_notes); |
duke@435 | 1010 | Copy::zero_to_bytes(notes, num_notes * sizeof(Node_Notes)); |
duke@435 | 1011 | while (num_notes > 0) { |
duke@435 | 1012 | arr->append(notes); |
duke@435 | 1013 | notes += _node_notes_block_size; |
duke@435 | 1014 | num_notes -= _node_notes_block_size; |
duke@435 | 1015 | } |
duke@435 | 1016 | assert(num_notes == 0, "exact multiple, please"); |
duke@435 | 1017 | } |
duke@435 | 1018 | |
duke@435 | 1019 | bool Compile::copy_node_notes_to(Node* dest, Node* source) { |
duke@435 | 1020 | if (source == NULL || dest == NULL) return false; |
duke@435 | 1021 | |
duke@435 | 1022 | if (dest->is_Con()) |
duke@435 | 1023 | return false; // Do not push debug info onto constants. |
duke@435 | 1024 | |
duke@435 | 1025 | #ifdef ASSERT |
duke@435 | 1026 | // Leave a bread crumb trail pointing to the original node: |
duke@435 | 1027 | if (dest != NULL && dest != source && dest->debug_orig() == NULL) { |
duke@435 | 1028 | dest->set_debug_orig(source); |
duke@435 | 1029 | } |
duke@435 | 1030 | #endif |
duke@435 | 1031 | |
duke@435 | 1032 | if (node_note_array() == NULL) |
duke@435 | 1033 | return false; // Not collecting any notes now. |
duke@435 | 1034 | |
duke@435 | 1035 | // This is a copy onto a pre-existing node, which may already have notes. |
duke@435 | 1036 | // If both nodes have notes, do not overwrite any pre-existing notes. |
duke@435 | 1037 | Node_Notes* source_notes = node_notes_at(source->_idx); |
duke@435 | 1038 | if (source_notes == NULL || source_notes->is_clear()) return false; |
duke@435 | 1039 | Node_Notes* dest_notes = node_notes_at(dest->_idx); |
duke@435 | 1040 | if (dest_notes == NULL || dest_notes->is_clear()) { |
duke@435 | 1041 | return set_node_notes_at(dest->_idx, source_notes); |
duke@435 | 1042 | } |
duke@435 | 1043 | |
duke@435 | 1044 | Node_Notes merged_notes = (*source_notes); |
duke@435 | 1045 | // The order of operations here ensures that dest notes will win... |
duke@435 | 1046 | merged_notes.update_from(dest_notes); |
duke@435 | 1047 | return set_node_notes_at(dest->_idx, &merged_notes); |
duke@435 | 1048 | } |
duke@435 | 1049 | |
duke@435 | 1050 | |
duke@435 | 1051 | //--------------------------allow_range_check_smearing------------------------- |
duke@435 | 1052 | // Gating condition for coalescing similar range checks. |
duke@435 | 1053 | // Sometimes we try 'speculatively' replacing a series of a range checks by a |
duke@435 | 1054 | // single covering check that is at least as strong as any of them. |
duke@435 | 1055 | // If the optimization succeeds, the simplified (strengthened) range check |
duke@435 | 1056 | // will always succeed. If it fails, we will deopt, and then give up |
duke@435 | 1057 | // on the optimization. |
duke@435 | 1058 | bool Compile::allow_range_check_smearing() const { |
duke@435 | 1059 | // If this method has already thrown a range-check, |
duke@435 | 1060 | // assume it was because we already tried range smearing |
duke@435 | 1061 | // and it failed. |
duke@435 | 1062 | uint already_trapped = trap_count(Deoptimization::Reason_range_check); |
duke@435 | 1063 | return !already_trapped; |
duke@435 | 1064 | } |
duke@435 | 1065 | |
duke@435 | 1066 | |
duke@435 | 1067 | //------------------------------flatten_alias_type----------------------------- |
duke@435 | 1068 | const TypePtr *Compile::flatten_alias_type( const TypePtr *tj ) const { |
duke@435 | 1069 | int offset = tj->offset(); |
duke@435 | 1070 | TypePtr::PTR ptr = tj->ptr(); |
duke@435 | 1071 | |
kvn@682 | 1072 | // Known instance (scalarizable allocation) alias only with itself. |
kvn@682 | 1073 | bool is_known_inst = tj->isa_oopptr() != NULL && |
kvn@682 | 1074 | tj->is_oopptr()->is_known_instance(); |
kvn@682 | 1075 | |
duke@435 | 1076 | // Process weird unsafe references. |
duke@435 | 1077 | if (offset == Type::OffsetBot && (tj->isa_instptr() /*|| tj->isa_klassptr()*/)) { |
duke@435 | 1078 | assert(InlineUnsafeOps, "indeterminate pointers come only from unsafe ops"); |
kvn@682 | 1079 | assert(!is_known_inst, "scalarizable allocation should not have unsafe references"); |
duke@435 | 1080 | tj = TypeOopPtr::BOTTOM; |
duke@435 | 1081 | ptr = tj->ptr(); |
duke@435 | 1082 | offset = tj->offset(); |
duke@435 | 1083 | } |
duke@435 | 1084 | |
duke@435 | 1085 | // Array pointers need some flattening |
duke@435 | 1086 | const TypeAryPtr *ta = tj->isa_aryptr(); |
kvn@682 | 1087 | if( ta && is_known_inst ) { |
kvn@682 | 1088 | if ( offset != Type::OffsetBot && |
kvn@682 | 1089 | offset > arrayOopDesc::length_offset_in_bytes() ) { |
kvn@682 | 1090 | offset = Type::OffsetBot; // Flatten constant access into array body only |
kvn@682 | 1091 | tj = ta = TypeAryPtr::make(ptr, ta->ary(), ta->klass(), true, offset, ta->instance_id()); |
kvn@682 | 1092 | } |
kvn@682 | 1093 | } else if( ta && _AliasLevel >= 2 ) { |
duke@435 | 1094 | // For arrays indexed by constant indices, we flatten the alias |
duke@435 | 1095 | // space to include all of the array body. Only the header, klass |
duke@435 | 1096 | // and array length can be accessed un-aliased. |
duke@435 | 1097 | if( offset != Type::OffsetBot ) { |
duke@435 | 1098 | if( ta->const_oop() ) { // methodDataOop or methodOop |
duke@435 | 1099 | offset = Type::OffsetBot; // Flatten constant access into array body |
kvn@682 | 1100 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),ta->ary(),ta->klass(),false,offset); |
duke@435 | 1101 | } else if( offset == arrayOopDesc::length_offset_in_bytes() ) { |
duke@435 | 1102 | // range is OK as-is. |
duke@435 | 1103 | tj = ta = TypeAryPtr::RANGE; |
duke@435 | 1104 | } else if( offset == oopDesc::klass_offset_in_bytes() ) { |
duke@435 | 1105 | tj = TypeInstPtr::KLASS; // all klass loads look alike |
duke@435 | 1106 | ta = TypeAryPtr::RANGE; // generic ignored junk |
duke@435 | 1107 | ptr = TypePtr::BotPTR; |
duke@435 | 1108 | } else if( offset == oopDesc::mark_offset_in_bytes() ) { |
duke@435 | 1109 | tj = TypeInstPtr::MARK; |
duke@435 | 1110 | ta = TypeAryPtr::RANGE; // generic ignored junk |
duke@435 | 1111 | ptr = TypePtr::BotPTR; |
duke@435 | 1112 | } else { // Random constant offset into array body |
duke@435 | 1113 | offset = Type::OffsetBot; // Flatten constant access into array body |
kvn@682 | 1114 | tj = ta = TypeAryPtr::make(ptr,ta->ary(),ta->klass(),false,offset); |
duke@435 | 1115 | } |
duke@435 | 1116 | } |
duke@435 | 1117 | // Arrays of fixed size alias with arrays of unknown size. |
duke@435 | 1118 | if (ta->size() != TypeInt::POS) { |
duke@435 | 1119 | const TypeAry *tary = TypeAry::make(ta->elem(), TypeInt::POS); |
kvn@682 | 1120 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,ta->klass(),false,offset); |
duke@435 | 1121 | } |
duke@435 | 1122 | // Arrays of known objects become arrays of unknown objects. |
coleenp@548 | 1123 | if (ta->elem()->isa_narrowoop() && ta->elem() != TypeNarrowOop::BOTTOM) { |
coleenp@548 | 1124 | const TypeAry *tary = TypeAry::make(TypeNarrowOop::BOTTOM, ta->size()); |
kvn@682 | 1125 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
coleenp@548 | 1126 | } |
duke@435 | 1127 | if (ta->elem()->isa_oopptr() && ta->elem() != TypeInstPtr::BOTTOM) { |
duke@435 | 1128 | const TypeAry *tary = TypeAry::make(TypeInstPtr::BOTTOM, ta->size()); |
kvn@682 | 1129 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,NULL,false,offset); |
duke@435 | 1130 | } |
duke@435 | 1131 | // Arrays of bytes and of booleans both use 'bastore' and 'baload' so |
duke@435 | 1132 | // cannot be distinguished by bytecode alone. |
duke@435 | 1133 | if (ta->elem() == TypeInt::BOOL) { |
duke@435 | 1134 | const TypeAry *tary = TypeAry::make(TypeInt::BYTE, ta->size()); |
duke@435 | 1135 | ciKlass* aklass = ciTypeArrayKlass::make(T_BYTE); |
kvn@682 | 1136 | tj = ta = TypeAryPtr::make(ptr,ta->const_oop(),tary,aklass,false,offset); |
duke@435 | 1137 | } |
duke@435 | 1138 | // During the 2nd round of IterGVN, NotNull castings are removed. |
duke@435 | 1139 | // Make sure the Bottom and NotNull variants alias the same. |
duke@435 | 1140 | // Also, make sure exact and non-exact variants alias the same. |
duke@435 | 1141 | if( ptr == TypePtr::NotNull || ta->klass_is_exact() ) { |
duke@435 | 1142 | if (ta->const_oop()) { |
duke@435 | 1143 | tj = ta = TypeAryPtr::make(TypePtr::Constant,ta->const_oop(),ta->ary(),ta->klass(),false,offset); |
duke@435 | 1144 | } else { |
duke@435 | 1145 | tj = ta = TypeAryPtr::make(TypePtr::BotPTR,ta->ary(),ta->klass(),false,offset); |
duke@435 | 1146 | } |
duke@435 | 1147 | } |
duke@435 | 1148 | } |
duke@435 | 1149 | |
duke@435 | 1150 | // Oop pointers need some flattening |
duke@435 | 1151 | const TypeInstPtr *to = tj->isa_instptr(); |
duke@435 | 1152 | if( to && _AliasLevel >= 2 && to != TypeOopPtr::BOTTOM ) { |
duke@435 | 1153 | if( ptr == TypePtr::Constant ) { |
duke@435 | 1154 | // No constant oop pointers (such as Strings); they alias with |
duke@435 | 1155 | // unknown strings. |
kvn@682 | 1156 | assert(!is_known_inst, "not scalarizable allocation"); |
duke@435 | 1157 | tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
kvn@682 | 1158 | } else if( is_known_inst ) { |
kvn@598 | 1159 | tj = to; // Keep NotNull and klass_is_exact for instance type |
duke@435 | 1160 | } else if( ptr == TypePtr::NotNull || to->klass_is_exact() ) { |
duke@435 | 1161 | // During the 2nd round of IterGVN, NotNull castings are removed. |
duke@435 | 1162 | // Make sure the Bottom and NotNull variants alias the same. |
duke@435 | 1163 | // Also, make sure exact and non-exact variants alias the same. |
kvn@682 | 1164 | tj = to = TypeInstPtr::make(TypePtr::BotPTR,to->klass(),false,0,offset); |
duke@435 | 1165 | } |
duke@435 | 1166 | // Canonicalize the holder of this field |
duke@435 | 1167 | ciInstanceKlass *k = to->klass()->as_instance_klass(); |
coleenp@548 | 1168 | if (offset >= 0 && offset < instanceOopDesc::base_offset_in_bytes()) { |
duke@435 | 1169 | // First handle header references such as a LoadKlassNode, even if the |
duke@435 | 1170 | // object's klass is unloaded at compile time (4965979). |
kvn@682 | 1171 | if (!is_known_inst) { // Do it only for non-instance types |
kvn@682 | 1172 | tj = to = TypeInstPtr::make(TypePtr::BotPTR, env()->Object_klass(), false, NULL, offset); |
kvn@682 | 1173 | } |
duke@435 | 1174 | } else if (offset < 0 || offset >= k->size_helper() * wordSize) { |
duke@435 | 1175 | to = NULL; |
duke@435 | 1176 | tj = TypeOopPtr::BOTTOM; |
duke@435 | 1177 | offset = tj->offset(); |
duke@435 | 1178 | } else { |
duke@435 | 1179 | ciInstanceKlass *canonical_holder = k->get_canonical_holder(offset); |
duke@435 | 1180 | if (!k->equals(canonical_holder) || tj->offset() != offset) { |
kvn@682 | 1181 | if( is_known_inst ) { |
kvn@682 | 1182 | tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, true, NULL, offset, to->instance_id()); |
kvn@682 | 1183 | } else { |
kvn@682 | 1184 | tj = to = TypeInstPtr::make(to->ptr(), canonical_holder, false, NULL, offset); |
kvn@682 | 1185 | } |
duke@435 | 1186 | } |
duke@435 | 1187 | } |
duke@435 | 1188 | } |
duke@435 | 1189 | |
duke@435 | 1190 | // Klass pointers to object array klasses need some flattening |
duke@435 | 1191 | const TypeKlassPtr *tk = tj->isa_klassptr(); |
duke@435 | 1192 | if( tk ) { |
duke@435 | 1193 | // If we are referencing a field within a Klass, we need |
duke@435 | 1194 | // to assume the worst case of an Object. Both exact and |
duke@435 | 1195 | // inexact types must flatten to the same alias class. |
duke@435 | 1196 | // Since the flattened result for a klass is defined to be |
duke@435 | 1197 | // precisely java.lang.Object, use a constant ptr. |
duke@435 | 1198 | if ( offset == Type::OffsetBot || (offset >= 0 && (size_t)offset < sizeof(Klass)) ) { |
duke@435 | 1199 | |
duke@435 | 1200 | tj = tk = TypeKlassPtr::make(TypePtr::Constant, |
duke@435 | 1201 | TypeKlassPtr::OBJECT->klass(), |
duke@435 | 1202 | offset); |
duke@435 | 1203 | } |
duke@435 | 1204 | |
duke@435 | 1205 | ciKlass* klass = tk->klass(); |
duke@435 | 1206 | if( klass->is_obj_array_klass() ) { |
duke@435 | 1207 | ciKlass* k = TypeAryPtr::OOPS->klass(); |
duke@435 | 1208 | if( !k || !k->is_loaded() ) // Only fails for some -Xcomp runs |
duke@435 | 1209 | k = TypeInstPtr::BOTTOM->klass(); |
duke@435 | 1210 | tj = tk = TypeKlassPtr::make( TypePtr::NotNull, k, offset ); |
duke@435 | 1211 | } |
duke@435 | 1212 | |
duke@435 | 1213 | // Check for precise loads from the primary supertype array and force them |
duke@435 | 1214 | // to the supertype cache alias index. Check for generic array loads from |
duke@435 | 1215 | // the primary supertype array and also force them to the supertype cache |
duke@435 | 1216 | // alias index. Since the same load can reach both, we need to merge |
duke@435 | 1217 | // these 2 disparate memories into the same alias class. Since the |
duke@435 | 1218 | // primary supertype array is read-only, there's no chance of confusion |
duke@435 | 1219 | // where we bypass an array load and an array store. |
duke@435 | 1220 | uint off2 = offset - Klass::primary_supers_offset_in_bytes(); |
duke@435 | 1221 | if( offset == Type::OffsetBot || |
duke@435 | 1222 | off2 < Klass::primary_super_limit()*wordSize ) { |
duke@435 | 1223 | offset = sizeof(oopDesc) +Klass::secondary_super_cache_offset_in_bytes(); |
duke@435 | 1224 | tj = tk = TypeKlassPtr::make( TypePtr::NotNull, tk->klass(), offset ); |
duke@435 | 1225 | } |
duke@435 | 1226 | } |
duke@435 | 1227 | |
duke@435 | 1228 | // Flatten all Raw pointers together. |
duke@435 | 1229 | if (tj->base() == Type::RawPtr) |
duke@435 | 1230 | tj = TypeRawPtr::BOTTOM; |
duke@435 | 1231 | |
duke@435 | 1232 | if (tj->base() == Type::AnyPtr) |
duke@435 | 1233 | tj = TypePtr::BOTTOM; // An error, which the caller must check for. |
duke@435 | 1234 | |
duke@435 | 1235 | // Flatten all to bottom for now |
duke@435 | 1236 | switch( _AliasLevel ) { |
duke@435 | 1237 | case 0: |
duke@435 | 1238 | tj = TypePtr::BOTTOM; |
duke@435 | 1239 | break; |
duke@435 | 1240 | case 1: // Flatten to: oop, static, field or array |
duke@435 | 1241 | switch (tj->base()) { |
duke@435 | 1242 | //case Type::AryPtr: tj = TypeAryPtr::RANGE; break; |
duke@435 | 1243 | case Type::RawPtr: tj = TypeRawPtr::BOTTOM; break; |
duke@435 | 1244 | case Type::AryPtr: // do not distinguish arrays at all |
duke@435 | 1245 | case Type::InstPtr: tj = TypeInstPtr::BOTTOM; break; |
duke@435 | 1246 | case Type::KlassPtr: tj = TypeKlassPtr::OBJECT; break; |
duke@435 | 1247 | case Type::AnyPtr: tj = TypePtr::BOTTOM; break; // caller checks it |
duke@435 | 1248 | default: ShouldNotReachHere(); |
duke@435 | 1249 | } |
duke@435 | 1250 | break; |
twisti@1040 | 1251 | case 2: // No collapsing at level 2; keep all splits |
twisti@1040 | 1252 | case 3: // No collapsing at level 3; keep all splits |
duke@435 | 1253 | break; |
duke@435 | 1254 | default: |
duke@435 | 1255 | Unimplemented(); |
duke@435 | 1256 | } |
duke@435 | 1257 | |
duke@435 | 1258 | offset = tj->offset(); |
duke@435 | 1259 | assert( offset != Type::OffsetTop, "Offset has fallen from constant" ); |
duke@435 | 1260 | |
duke@435 | 1261 | assert( (offset != Type::OffsetBot && tj->base() != Type::AryPtr) || |
duke@435 | 1262 | (offset == Type::OffsetBot && tj->base() == Type::AryPtr) || |
duke@435 | 1263 | (offset == Type::OffsetBot && tj == TypeOopPtr::BOTTOM) || |
duke@435 | 1264 | (offset == Type::OffsetBot && tj == TypePtr::BOTTOM) || |
duke@435 | 1265 | (offset == oopDesc::mark_offset_in_bytes() && tj->base() == Type::AryPtr) || |
duke@435 | 1266 | (offset == oopDesc::klass_offset_in_bytes() && tj->base() == Type::AryPtr) || |
duke@435 | 1267 | (offset == arrayOopDesc::length_offset_in_bytes() && tj->base() == Type::AryPtr) , |
duke@435 | 1268 | "For oops, klasses, raw offset must be constant; for arrays the offset is never known" ); |
duke@435 | 1269 | assert( tj->ptr() != TypePtr::TopPTR && |
duke@435 | 1270 | tj->ptr() != TypePtr::AnyNull && |
duke@435 | 1271 | tj->ptr() != TypePtr::Null, "No imprecise addresses" ); |
duke@435 | 1272 | // assert( tj->ptr() != TypePtr::Constant || |
duke@435 | 1273 | // tj->base() == Type::RawPtr || |
duke@435 | 1274 | // tj->base() == Type::KlassPtr, "No constant oop addresses" ); |
duke@435 | 1275 | |
duke@435 | 1276 | return tj; |
duke@435 | 1277 | } |
duke@435 | 1278 | |
duke@435 | 1279 | void Compile::AliasType::Init(int i, const TypePtr* at) { |
duke@435 | 1280 | _index = i; |
duke@435 | 1281 | _adr_type = at; |
duke@435 | 1282 | _field = NULL; |
duke@435 | 1283 | _is_rewritable = true; // default |
duke@435 | 1284 | const TypeOopPtr *atoop = (at != NULL) ? at->isa_oopptr() : NULL; |
kvn@658 | 1285 | if (atoop != NULL && atoop->is_known_instance()) { |
kvn@658 | 1286 | const TypeOopPtr *gt = atoop->cast_to_instance_id(TypeOopPtr::InstanceBot); |
duke@435 | 1287 | _general_index = Compile::current()->get_alias_index(gt); |
duke@435 | 1288 | } else { |
duke@435 | 1289 | _general_index = 0; |
duke@435 | 1290 | } |
duke@435 | 1291 | } |
duke@435 | 1292 | |
duke@435 | 1293 | //---------------------------------print_on------------------------------------ |
duke@435 | 1294 | #ifndef PRODUCT |
duke@435 | 1295 | void Compile::AliasType::print_on(outputStream* st) { |
duke@435 | 1296 | if (index() < 10) |
duke@435 | 1297 | st->print("@ <%d> ", index()); |
duke@435 | 1298 | else st->print("@ <%d>", index()); |
duke@435 | 1299 | st->print(is_rewritable() ? " " : " RO"); |
duke@435 | 1300 | int offset = adr_type()->offset(); |
duke@435 | 1301 | if (offset == Type::OffsetBot) |
duke@435 | 1302 | st->print(" +any"); |
duke@435 | 1303 | else st->print(" +%-3d", offset); |
duke@435 | 1304 | st->print(" in "); |
duke@435 | 1305 | adr_type()->dump_on(st); |
duke@435 | 1306 | const TypeOopPtr* tjp = adr_type()->isa_oopptr(); |
duke@435 | 1307 | if (field() != NULL && tjp) { |
duke@435 | 1308 | if (tjp->klass() != field()->holder() || |
duke@435 | 1309 | tjp->offset() != field()->offset_in_bytes()) { |
duke@435 | 1310 | st->print(" != "); |
duke@435 | 1311 | field()->print(); |
duke@435 | 1312 | st->print(" ***"); |
duke@435 | 1313 | } |
duke@435 | 1314 | } |
duke@435 | 1315 | } |
duke@435 | 1316 | |
duke@435 | 1317 | void print_alias_types() { |
duke@435 | 1318 | Compile* C = Compile::current(); |
duke@435 | 1319 | tty->print_cr("--- Alias types, AliasIdxBot .. %d", C->num_alias_types()-1); |
duke@435 | 1320 | for (int idx = Compile::AliasIdxBot; idx < C->num_alias_types(); idx++) { |
duke@435 | 1321 | C->alias_type(idx)->print_on(tty); |
duke@435 | 1322 | tty->cr(); |
duke@435 | 1323 | } |
duke@435 | 1324 | } |
duke@435 | 1325 | #endif |
duke@435 | 1326 | |
duke@435 | 1327 | |
duke@435 | 1328 | //----------------------------probe_alias_cache-------------------------------- |
duke@435 | 1329 | Compile::AliasCacheEntry* Compile::probe_alias_cache(const TypePtr* adr_type) { |
duke@435 | 1330 | intptr_t key = (intptr_t) adr_type; |
duke@435 | 1331 | key ^= key >> logAliasCacheSize; |
duke@435 | 1332 | return &_alias_cache[key & right_n_bits(logAliasCacheSize)]; |
duke@435 | 1333 | } |
duke@435 | 1334 | |
duke@435 | 1335 | |
duke@435 | 1336 | //-----------------------------grow_alias_types-------------------------------- |
duke@435 | 1337 | void Compile::grow_alias_types() { |
duke@435 | 1338 | const int old_ats = _max_alias_types; // how many before? |
duke@435 | 1339 | const int new_ats = old_ats; // how many more? |
duke@435 | 1340 | const int grow_ats = old_ats+new_ats; // how many now? |
duke@435 | 1341 | _max_alias_types = grow_ats; |
duke@435 | 1342 | _alias_types = REALLOC_ARENA_ARRAY(comp_arena(), AliasType*, _alias_types, old_ats, grow_ats); |
duke@435 | 1343 | AliasType* ats = NEW_ARENA_ARRAY(comp_arena(), AliasType, new_ats); |
duke@435 | 1344 | Copy::zero_to_bytes(ats, sizeof(AliasType)*new_ats); |
duke@435 | 1345 | for (int i = 0; i < new_ats; i++) _alias_types[old_ats+i] = &ats[i]; |
duke@435 | 1346 | } |
duke@435 | 1347 | |
duke@435 | 1348 | |
duke@435 | 1349 | //--------------------------------find_alias_type------------------------------ |
duke@435 | 1350 | Compile::AliasType* Compile::find_alias_type(const TypePtr* adr_type, bool no_create) { |
duke@435 | 1351 | if (_AliasLevel == 0) |
duke@435 | 1352 | return alias_type(AliasIdxBot); |
duke@435 | 1353 | |
duke@435 | 1354 | AliasCacheEntry* ace = probe_alias_cache(adr_type); |
duke@435 | 1355 | if (ace->_adr_type == adr_type) { |
duke@435 | 1356 | return alias_type(ace->_index); |
duke@435 | 1357 | } |
duke@435 | 1358 | |
duke@435 | 1359 | // Handle special cases. |
duke@435 | 1360 | if (adr_type == NULL) return alias_type(AliasIdxTop); |
duke@435 | 1361 | if (adr_type == TypePtr::BOTTOM) return alias_type(AliasIdxBot); |
duke@435 | 1362 | |
duke@435 | 1363 | // Do it the slow way. |
duke@435 | 1364 | const TypePtr* flat = flatten_alias_type(adr_type); |
duke@435 | 1365 | |
duke@435 | 1366 | #ifdef ASSERT |
duke@435 | 1367 | assert(flat == flatten_alias_type(flat), "idempotent"); |
duke@435 | 1368 | assert(flat != TypePtr::BOTTOM, "cannot alias-analyze an untyped ptr"); |
duke@435 | 1369 | if (flat->isa_oopptr() && !flat->isa_klassptr()) { |
duke@435 | 1370 | const TypeOopPtr* foop = flat->is_oopptr(); |
kvn@682 | 1371 | // Scalarizable allocations have exact klass always. |
kvn@682 | 1372 | bool exact = !foop->klass_is_exact() || foop->is_known_instance(); |
kvn@682 | 1373 | const TypePtr* xoop = foop->cast_to_exactness(exact)->is_ptr(); |
duke@435 | 1374 | assert(foop == flatten_alias_type(xoop), "exactness must not affect alias type"); |
duke@435 | 1375 | } |
duke@435 | 1376 | assert(flat == flatten_alias_type(flat), "exact bit doesn't matter"); |
duke@435 | 1377 | #endif |
duke@435 | 1378 | |
duke@435 | 1379 | int idx = AliasIdxTop; |
duke@435 | 1380 | for (int i = 0; i < num_alias_types(); i++) { |
duke@435 | 1381 | if (alias_type(i)->adr_type() == flat) { |
duke@435 | 1382 | idx = i; |
duke@435 | 1383 | break; |
duke@435 | 1384 | } |
duke@435 | 1385 | } |
duke@435 | 1386 | |
duke@435 | 1387 | if (idx == AliasIdxTop) { |
duke@435 | 1388 | if (no_create) return NULL; |
duke@435 | 1389 | // Grow the array if necessary. |
duke@435 | 1390 | if (_num_alias_types == _max_alias_types) grow_alias_types(); |
duke@435 | 1391 | // Add a new alias type. |
duke@435 | 1392 | idx = _num_alias_types++; |
duke@435 | 1393 | _alias_types[idx]->Init(idx, flat); |
duke@435 | 1394 | if (flat == TypeInstPtr::KLASS) alias_type(idx)->set_rewritable(false); |
duke@435 | 1395 | if (flat == TypeAryPtr::RANGE) alias_type(idx)->set_rewritable(false); |
duke@435 | 1396 | if (flat->isa_instptr()) { |
duke@435 | 1397 | if (flat->offset() == java_lang_Class::klass_offset_in_bytes() |
duke@435 | 1398 | && flat->is_instptr()->klass() == env()->Class_klass()) |
duke@435 | 1399 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1400 | } |
duke@435 | 1401 | if (flat->isa_klassptr()) { |
duke@435 | 1402 | if (flat->offset() == Klass::super_check_offset_offset_in_bytes() + (int)sizeof(oopDesc)) |
duke@435 | 1403 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1404 | if (flat->offset() == Klass::modifier_flags_offset_in_bytes() + (int)sizeof(oopDesc)) |
duke@435 | 1405 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1406 | if (flat->offset() == Klass::access_flags_offset_in_bytes() + (int)sizeof(oopDesc)) |
duke@435 | 1407 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1408 | if (flat->offset() == Klass::java_mirror_offset_in_bytes() + (int)sizeof(oopDesc)) |
duke@435 | 1409 | alias_type(idx)->set_rewritable(false); |
duke@435 | 1410 | } |
duke@435 | 1411 | // %%% (We would like to finalize JavaThread::threadObj_offset(), |
duke@435 | 1412 | // but the base pointer type is not distinctive enough to identify |
duke@435 | 1413 | // references into JavaThread.) |
duke@435 | 1414 | |
duke@435 | 1415 | // Check for final instance fields. |
duke@435 | 1416 | const TypeInstPtr* tinst = flat->isa_instptr(); |
coleenp@548 | 1417 | if (tinst && tinst->offset() >= instanceOopDesc::base_offset_in_bytes()) { |
duke@435 | 1418 | ciInstanceKlass *k = tinst->klass()->as_instance_klass(); |
duke@435 | 1419 | ciField* field = k->get_field_by_offset(tinst->offset(), false); |
duke@435 | 1420 | // Set field() and is_rewritable() attributes. |
duke@435 | 1421 | if (field != NULL) alias_type(idx)->set_field(field); |
duke@435 | 1422 | } |
duke@435 | 1423 | const TypeKlassPtr* tklass = flat->isa_klassptr(); |
duke@435 | 1424 | // Check for final static fields. |
duke@435 | 1425 | if (tklass && tklass->klass()->is_instance_klass()) { |
duke@435 | 1426 | ciInstanceKlass *k = tklass->klass()->as_instance_klass(); |
duke@435 | 1427 | ciField* field = k->get_field_by_offset(tklass->offset(), true); |
duke@435 | 1428 | // Set field() and is_rewritable() attributes. |
duke@435 | 1429 | if (field != NULL) alias_type(idx)->set_field(field); |
duke@435 | 1430 | } |
duke@435 | 1431 | } |
duke@435 | 1432 | |
duke@435 | 1433 | // Fill the cache for next time. |
duke@435 | 1434 | ace->_adr_type = adr_type; |
duke@435 | 1435 | ace->_index = idx; |
duke@435 | 1436 | assert(alias_type(adr_type) == alias_type(idx), "type must be installed"); |
duke@435 | 1437 | |
duke@435 | 1438 | // Might as well try to fill the cache for the flattened version, too. |
duke@435 | 1439 | AliasCacheEntry* face = probe_alias_cache(flat); |
duke@435 | 1440 | if (face->_adr_type == NULL) { |
duke@435 | 1441 | face->_adr_type = flat; |
duke@435 | 1442 | face->_index = idx; |
duke@435 | 1443 | assert(alias_type(flat) == alias_type(idx), "flat type must work too"); |
duke@435 | 1444 | } |
duke@435 | 1445 | |
duke@435 | 1446 | return alias_type(idx); |
duke@435 | 1447 | } |
duke@435 | 1448 | |
duke@435 | 1449 | |
duke@435 | 1450 | Compile::AliasType* Compile::alias_type(ciField* field) { |
duke@435 | 1451 | const TypeOopPtr* t; |
duke@435 | 1452 | if (field->is_static()) |
duke@435 | 1453 | t = TypeKlassPtr::make(field->holder()); |
duke@435 | 1454 | else |
duke@435 | 1455 | t = TypeOopPtr::make_from_klass_raw(field->holder()); |
duke@435 | 1456 | AliasType* atp = alias_type(t->add_offset(field->offset_in_bytes())); |
duke@435 | 1457 | assert(field->is_final() == !atp->is_rewritable(), "must get the rewritable bits correct"); |
duke@435 | 1458 | return atp; |
duke@435 | 1459 | } |
duke@435 | 1460 | |
duke@435 | 1461 | |
duke@435 | 1462 | //------------------------------have_alias_type-------------------------------- |
duke@435 | 1463 | bool Compile::have_alias_type(const TypePtr* adr_type) { |
duke@435 | 1464 | AliasCacheEntry* ace = probe_alias_cache(adr_type); |
duke@435 | 1465 | if (ace->_adr_type == adr_type) { |
duke@435 | 1466 | return true; |
duke@435 | 1467 | } |
duke@435 | 1468 | |
duke@435 | 1469 | // Handle special cases. |
duke@435 | 1470 | if (adr_type == NULL) return true; |
duke@435 | 1471 | if (adr_type == TypePtr::BOTTOM) return true; |
duke@435 | 1472 | |
duke@435 | 1473 | return find_alias_type(adr_type, true) != NULL; |
duke@435 | 1474 | } |
duke@435 | 1475 | |
duke@435 | 1476 | //-----------------------------must_alias-------------------------------------- |
duke@435 | 1477 | // True if all values of the given address type are in the given alias category. |
duke@435 | 1478 | bool Compile::must_alias(const TypePtr* adr_type, int alias_idx) { |
duke@435 | 1479 | if (alias_idx == AliasIdxBot) return true; // the universal category |
duke@435 | 1480 | if (adr_type == NULL) return true; // NULL serves as TypePtr::TOP |
duke@435 | 1481 | if (alias_idx == AliasIdxTop) return false; // the empty category |
duke@435 | 1482 | if (adr_type->base() == Type::AnyPtr) return false; // TypePtr::BOTTOM or its twins |
duke@435 | 1483 | |
duke@435 | 1484 | // the only remaining possible overlap is identity |
duke@435 | 1485 | int adr_idx = get_alias_index(adr_type); |
duke@435 | 1486 | assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
duke@435 | 1487 | assert(adr_idx == alias_idx || |
duke@435 | 1488 | (alias_type(alias_idx)->adr_type() != TypeOopPtr::BOTTOM |
duke@435 | 1489 | && adr_type != TypeOopPtr::BOTTOM), |
duke@435 | 1490 | "should not be testing for overlap with an unsafe pointer"); |
duke@435 | 1491 | return adr_idx == alias_idx; |
duke@435 | 1492 | } |
duke@435 | 1493 | |
duke@435 | 1494 | //------------------------------can_alias-------------------------------------- |
duke@435 | 1495 | // True if any values of the given address type are in the given alias category. |
duke@435 | 1496 | bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) { |
duke@435 | 1497 | if (alias_idx == AliasIdxTop) return false; // the empty category |
duke@435 | 1498 | if (adr_type == NULL) return false; // NULL serves as TypePtr::TOP |
duke@435 | 1499 | if (alias_idx == AliasIdxBot) return true; // the universal category |
duke@435 | 1500 | if (adr_type->base() == Type::AnyPtr) return true; // TypePtr::BOTTOM or its twins |
duke@435 | 1501 | |
duke@435 | 1502 | // the only remaining possible overlap is identity |
duke@435 | 1503 | int adr_idx = get_alias_index(adr_type); |
duke@435 | 1504 | assert(adr_idx != AliasIdxBot && adr_idx != AliasIdxTop, ""); |
duke@435 | 1505 | return adr_idx == alias_idx; |
duke@435 | 1506 | } |
duke@435 | 1507 | |
duke@435 | 1508 | |
duke@435 | 1509 | |
duke@435 | 1510 | //---------------------------pop_warm_call------------------------------------- |
duke@435 | 1511 | WarmCallInfo* Compile::pop_warm_call() { |
duke@435 | 1512 | WarmCallInfo* wci = _warm_calls; |
duke@435 | 1513 | if (wci != NULL) _warm_calls = wci->remove_from(wci); |
duke@435 | 1514 | return wci; |
duke@435 | 1515 | } |
duke@435 | 1516 | |
duke@435 | 1517 | //----------------------------Inline_Warm-------------------------------------- |
duke@435 | 1518 | int Compile::Inline_Warm() { |
duke@435 | 1519 | // If there is room, try to inline some more warm call sites. |
duke@435 | 1520 | // %%% Do a graph index compaction pass when we think we're out of space? |
duke@435 | 1521 | if (!InlineWarmCalls) return 0; |
duke@435 | 1522 | |
duke@435 | 1523 | int calls_made_hot = 0; |
duke@435 | 1524 | int room_to_grow = NodeCountInliningCutoff - unique(); |
duke@435 | 1525 | int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep); |
duke@435 | 1526 | int amount_grown = 0; |
duke@435 | 1527 | WarmCallInfo* call; |
duke@435 | 1528 | while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) { |
duke@435 | 1529 | int est_size = (int)call->size(); |
duke@435 | 1530 | if (est_size > (room_to_grow - amount_grown)) { |
duke@435 | 1531 | // This one won't fit anyway. Get rid of it. |
duke@435 | 1532 | call->make_cold(); |
duke@435 | 1533 | continue; |
duke@435 | 1534 | } |
duke@435 | 1535 | call->make_hot(); |
duke@435 | 1536 | calls_made_hot++; |
duke@435 | 1537 | amount_grown += est_size; |
duke@435 | 1538 | amount_to_grow -= est_size; |
duke@435 | 1539 | } |
duke@435 | 1540 | |
duke@435 | 1541 | if (calls_made_hot > 0) set_major_progress(); |
duke@435 | 1542 | return calls_made_hot; |
duke@435 | 1543 | } |
duke@435 | 1544 | |
duke@435 | 1545 | |
duke@435 | 1546 | //----------------------------Finish_Warm-------------------------------------- |
duke@435 | 1547 | void Compile::Finish_Warm() { |
duke@435 | 1548 | if (!InlineWarmCalls) return; |
duke@435 | 1549 | if (failing()) return; |
duke@435 | 1550 | if (warm_calls() == NULL) return; |
duke@435 | 1551 | |
duke@435 | 1552 | // Clean up loose ends, if we are out of space for inlining. |
duke@435 | 1553 | WarmCallInfo* call; |
duke@435 | 1554 | while ((call = pop_warm_call()) != NULL) { |
duke@435 | 1555 | call->make_cold(); |
duke@435 | 1556 | } |
duke@435 | 1557 | } |
duke@435 | 1558 | |
cfang@1607 | 1559 | //---------------------cleanup_loop_predicates----------------------- |
cfang@1607 | 1560 | // Remove the opaque nodes that protect the predicates so that all unused |
cfang@1607 | 1561 | // checks and uncommon_traps will be eliminated from the ideal graph |
cfang@1607 | 1562 | void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) { |
cfang@1607 | 1563 | if (predicate_count()==0) return; |
cfang@1607 | 1564 | for (int i = predicate_count(); i > 0; i--) { |
cfang@1607 | 1565 | Node * n = predicate_opaque1_node(i-1); |
cfang@1607 | 1566 | assert(n->Opcode() == Op_Opaque1, "must be"); |
cfang@1607 | 1567 | igvn.replace_node(n, n->in(1)); |
cfang@1607 | 1568 | } |
cfang@1607 | 1569 | assert(predicate_count()==0, "should be clean!"); |
cfang@1607 | 1570 | igvn.optimize(); |
cfang@1607 | 1571 | } |
duke@435 | 1572 | |
duke@435 | 1573 | //------------------------------Optimize--------------------------------------- |
duke@435 | 1574 | // Given a graph, optimize it. |
duke@435 | 1575 | void Compile::Optimize() { |
duke@435 | 1576 | TracePhase t1("optimizer", &_t_optimizer, true); |
duke@435 | 1577 | |
duke@435 | 1578 | #ifndef PRODUCT |
duke@435 | 1579 | if (env()->break_at_compile()) { |
duke@435 | 1580 | BREAKPOINT; |
duke@435 | 1581 | } |
duke@435 | 1582 | |
duke@435 | 1583 | #endif |
duke@435 | 1584 | |
duke@435 | 1585 | ResourceMark rm; |
duke@435 | 1586 | int loop_opts_cnt; |
duke@435 | 1587 | |
duke@435 | 1588 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1589 | |
never@657 | 1590 | print_method("After Parsing"); |
duke@435 | 1591 | |
duke@435 | 1592 | { |
duke@435 | 1593 | // Iterative Global Value Numbering, including ideal transforms |
duke@435 | 1594 | // Initialize IterGVN with types and values from parse-time GVN |
duke@435 | 1595 | PhaseIterGVN igvn(initial_gvn()); |
duke@435 | 1596 | { |
duke@435 | 1597 | NOT_PRODUCT( TracePhase t2("iterGVN", &_t_iterGVN, TimeCompiler); ) |
duke@435 | 1598 | igvn.optimize(); |
duke@435 | 1599 | } |
duke@435 | 1600 | |
duke@435 | 1601 | print_method("Iter GVN 1", 2); |
duke@435 | 1602 | |
duke@435 | 1603 | if (failing()) return; |
duke@435 | 1604 | |
duke@435 | 1605 | // Loop transforms on the ideal graph. Range Check Elimination, |
duke@435 | 1606 | // peeling, unrolling, etc. |
duke@435 | 1607 | |
duke@435 | 1608 | // Set loop opts counter |
duke@435 | 1609 | loop_opts_cnt = num_loop_opts(); |
duke@435 | 1610 | if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { |
duke@435 | 1611 | { |
duke@435 | 1612 | TracePhase t2("idealLoop", &_t_idealLoop, true); |
cfang@1607 | 1613 | PhaseIdealLoop ideal_loop( igvn, true, UseLoopPredicate); |
duke@435 | 1614 | loop_opts_cnt--; |
duke@435 | 1615 | if (major_progress()) print_method("PhaseIdealLoop 1", 2); |
duke@435 | 1616 | if (failing()) return; |
duke@435 | 1617 | } |
duke@435 | 1618 | // Loop opts pass if partial peeling occurred in previous pass |
duke@435 | 1619 | if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 1620 | TracePhase t3("idealLoop", &_t_idealLoop, true); |
cfang@1607 | 1621 | PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); |
duke@435 | 1622 | loop_opts_cnt--; |
duke@435 | 1623 | if (major_progress()) print_method("PhaseIdealLoop 2", 2); |
duke@435 | 1624 | if (failing()) return; |
duke@435 | 1625 | } |
duke@435 | 1626 | // Loop opts pass for loop-unrolling before CCP |
duke@435 | 1627 | if(major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 1628 | TracePhase t4("idealLoop", &_t_idealLoop, true); |
cfang@1607 | 1629 | PhaseIdealLoop ideal_loop( igvn, false, UseLoopPredicate); |
duke@435 | 1630 | loop_opts_cnt--; |
duke@435 | 1631 | if (major_progress()) print_method("PhaseIdealLoop 3", 2); |
duke@435 | 1632 | } |
never@1356 | 1633 | if (!failing()) { |
never@1356 | 1634 | // Verify that last round of loop opts produced a valid graph |
never@1356 | 1635 | NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) |
never@1356 | 1636 | PhaseIdealLoop::verify(igvn); |
never@1356 | 1637 | } |
duke@435 | 1638 | } |
duke@435 | 1639 | if (failing()) return; |
duke@435 | 1640 | |
duke@435 | 1641 | // Conditional Constant Propagation; |
duke@435 | 1642 | PhaseCCP ccp( &igvn ); |
duke@435 | 1643 | assert( true, "Break here to ccp.dump_nodes_and_types(_root,999,1)"); |
duke@435 | 1644 | { |
duke@435 | 1645 | TracePhase t2("ccp", &_t_ccp, true); |
duke@435 | 1646 | ccp.do_transform(); |
duke@435 | 1647 | } |
duke@435 | 1648 | print_method("PhaseCPP 1", 2); |
duke@435 | 1649 | |
duke@435 | 1650 | assert( true, "Break here to ccp.dump_old2new_map()"); |
duke@435 | 1651 | |
duke@435 | 1652 | // Iterative Global Value Numbering, including ideal transforms |
duke@435 | 1653 | { |
duke@435 | 1654 | NOT_PRODUCT( TracePhase t2("iterGVN2", &_t_iterGVN2, TimeCompiler); ) |
duke@435 | 1655 | igvn = ccp; |
duke@435 | 1656 | igvn.optimize(); |
duke@435 | 1657 | } |
duke@435 | 1658 | |
duke@435 | 1659 | print_method("Iter GVN 2", 2); |
duke@435 | 1660 | |
duke@435 | 1661 | if (failing()) return; |
duke@435 | 1662 | |
duke@435 | 1663 | // Loop transforms on the ideal graph. Range Check Elimination, |
duke@435 | 1664 | // peeling, unrolling, etc. |
duke@435 | 1665 | if(loop_opts_cnt > 0) { |
duke@435 | 1666 | debug_only( int cnt = 0; ); |
cfang@1607 | 1667 | bool loop_predication = UseLoopPredicate; |
duke@435 | 1668 | while(major_progress() && (loop_opts_cnt > 0)) { |
duke@435 | 1669 | TracePhase t2("idealLoop", &_t_idealLoop, true); |
duke@435 | 1670 | assert( cnt++ < 40, "infinite cycle in loop optimization" ); |
cfang@1607 | 1671 | PhaseIdealLoop ideal_loop( igvn, true, loop_predication); |
duke@435 | 1672 | loop_opts_cnt--; |
duke@435 | 1673 | if (major_progress()) print_method("PhaseIdealLoop iterations", 2); |
duke@435 | 1674 | if (failing()) return; |
cfang@1607 | 1675 | // Perform loop predication optimization during first iteration after CCP. |
cfang@1607 | 1676 | // After that switch it off and cleanup unused loop predicates. |
cfang@1607 | 1677 | if (loop_predication) { |
cfang@1607 | 1678 | loop_predication = false; |
cfang@1607 | 1679 | cleanup_loop_predicates(igvn); |
cfang@1607 | 1680 | if (failing()) return; |
cfang@1607 | 1681 | } |
duke@435 | 1682 | } |
duke@435 | 1683 | } |
never@1356 | 1684 | |
never@1356 | 1685 | { |
never@1356 | 1686 | // Verify that all previous optimizations produced a valid graph |
never@1356 | 1687 | // at least to this point, even if no loop optimizations were done. |
never@1356 | 1688 | NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) |
never@1356 | 1689 | PhaseIdealLoop::verify(igvn); |
never@1356 | 1690 | } |
never@1356 | 1691 | |
duke@435 | 1692 | { |
duke@435 | 1693 | NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); ) |
duke@435 | 1694 | PhaseMacroExpand mex(igvn); |
duke@435 | 1695 | if (mex.expand_macro_nodes()) { |
duke@435 | 1696 | assert(failing(), "must bail out w/ explicit message"); |
duke@435 | 1697 | return; |
duke@435 | 1698 | } |
duke@435 | 1699 | } |
duke@435 | 1700 | |
duke@435 | 1701 | } // (End scope of igvn; run destructor if necessary for asserts.) |
duke@435 | 1702 | |
duke@435 | 1703 | // A method with only infinite loops has no edges entering loops from root |
duke@435 | 1704 | { |
duke@435 | 1705 | NOT_PRODUCT( TracePhase t2("graphReshape", &_t_graphReshaping, TimeCompiler); ) |
duke@435 | 1706 | if (final_graph_reshaping()) { |
duke@435 | 1707 | assert(failing(), "must bail out w/ explicit message"); |
duke@435 | 1708 | return; |
duke@435 | 1709 | } |
duke@435 | 1710 | } |
duke@435 | 1711 | |
duke@435 | 1712 | print_method("Optimize finished", 2); |
duke@435 | 1713 | } |
duke@435 | 1714 | |
duke@435 | 1715 | |
duke@435 | 1716 | //------------------------------Code_Gen--------------------------------------- |
duke@435 | 1717 | // Given a graph, generate code for it |
duke@435 | 1718 | void Compile::Code_Gen() { |
duke@435 | 1719 | if (failing()) return; |
duke@435 | 1720 | |
duke@435 | 1721 | // Perform instruction selection. You might think we could reclaim Matcher |
duke@435 | 1722 | // memory PDQ, but actually the Matcher is used in generating spill code. |
duke@435 | 1723 | // Internals of the Matcher (including some VectorSets) must remain live |
duke@435 | 1724 | // for awhile - thus I cannot reclaim Matcher memory lest a VectorSet usage |
duke@435 | 1725 | // set a bit in reclaimed memory. |
duke@435 | 1726 | |
duke@435 | 1727 | // In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
duke@435 | 1728 | // nodes. Mapping is only valid at the root of each matched subtree. |
duke@435 | 1729 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1730 | |
duke@435 | 1731 | Node_List proj_list; |
duke@435 | 1732 | Matcher m(proj_list); |
duke@435 | 1733 | _matcher = &m; |
duke@435 | 1734 | { |
duke@435 | 1735 | TracePhase t2("matcher", &_t_matcher, true); |
duke@435 | 1736 | m.match(); |
duke@435 | 1737 | } |
duke@435 | 1738 | // In debug mode can dump m._nodes.dump() for mapping of ideal to machine |
duke@435 | 1739 | // nodes. Mapping is only valid at the root of each matched subtree. |
duke@435 | 1740 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1741 | |
duke@435 | 1742 | // If you have too many nodes, or if matching has failed, bail out |
duke@435 | 1743 | check_node_count(0, "out of nodes matching instructions"); |
duke@435 | 1744 | if (failing()) return; |
duke@435 | 1745 | |
duke@435 | 1746 | // Build a proper-looking CFG |
duke@435 | 1747 | PhaseCFG cfg(node_arena(), root(), m); |
duke@435 | 1748 | _cfg = &cfg; |
duke@435 | 1749 | { |
duke@435 | 1750 | NOT_PRODUCT( TracePhase t2("scheduler", &_t_scheduler, TimeCompiler); ) |
duke@435 | 1751 | cfg.Dominators(); |
duke@435 | 1752 | if (failing()) return; |
duke@435 | 1753 | |
duke@435 | 1754 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1755 | |
duke@435 | 1756 | cfg.Estimate_Block_Frequency(); |
duke@435 | 1757 | cfg.GlobalCodeMotion(m,unique(),proj_list); |
duke@435 | 1758 | |
duke@435 | 1759 | print_method("Global code motion", 2); |
duke@435 | 1760 | |
duke@435 | 1761 | if (failing()) return; |
duke@435 | 1762 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1763 | |
duke@435 | 1764 | debug_only( cfg.verify(); ) |
duke@435 | 1765 | } |
duke@435 | 1766 | NOT_PRODUCT( verify_graph_edges(); ) |
duke@435 | 1767 | |
duke@435 | 1768 | PhaseChaitin regalloc(unique(),cfg,m); |
duke@435 | 1769 | _regalloc = ®alloc; |
duke@435 | 1770 | { |
duke@435 | 1771 | TracePhase t2("regalloc", &_t_registerAllocation, true); |
duke@435 | 1772 | // Perform any platform dependent preallocation actions. This is used, |
duke@435 | 1773 | // for example, to avoid taking an implicit null pointer exception |
duke@435 | 1774 | // using the frame pointer on win95. |
duke@435 | 1775 | _regalloc->pd_preallocate_hook(); |
duke@435 | 1776 | |
duke@435 | 1777 | // Perform register allocation. After Chaitin, use-def chains are |
duke@435 | 1778 | // no longer accurate (at spill code) and so must be ignored. |
duke@435 | 1779 | // Node->LRG->reg mappings are still accurate. |
duke@435 | 1780 | _regalloc->Register_Allocate(); |
duke@435 | 1781 | |
duke@435 | 1782 | // Bail out if the allocator builds too many nodes |
duke@435 | 1783 | if (failing()) return; |
duke@435 | 1784 | } |
duke@435 | 1785 | |
duke@435 | 1786 | // Prior to register allocation we kept empty basic blocks in case the |
duke@435 | 1787 | // the allocator needed a place to spill. After register allocation we |
duke@435 | 1788 | // are not adding any new instructions. If any basic block is empty, we |
duke@435 | 1789 | // can now safely remove it. |
duke@435 | 1790 | { |
rasbold@853 | 1791 | NOT_PRODUCT( TracePhase t2("blockOrdering", &_t_blockOrdering, TimeCompiler); ) |
rasbold@853 | 1792 | cfg.remove_empty(); |
rasbold@853 | 1793 | if (do_freq_based_layout()) { |
rasbold@853 | 1794 | PhaseBlockLayout layout(cfg); |
rasbold@853 | 1795 | } else { |
rasbold@853 | 1796 | cfg.set_loop_alignment(); |
rasbold@853 | 1797 | } |
rasbold@853 | 1798 | cfg.fixup_flow(); |
duke@435 | 1799 | } |
duke@435 | 1800 | |
duke@435 | 1801 | // Perform any platform dependent postallocation verifications. |
duke@435 | 1802 | debug_only( _regalloc->pd_postallocate_verify_hook(); ) |
duke@435 | 1803 | |
duke@435 | 1804 | // Apply peephole optimizations |
duke@435 | 1805 | if( OptoPeephole ) { |
duke@435 | 1806 | NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); ) |
duke@435 | 1807 | PhasePeephole peep( _regalloc, cfg); |
duke@435 | 1808 | peep.do_transform(); |
duke@435 | 1809 | } |
duke@435 | 1810 | |
duke@435 | 1811 | // Convert Nodes to instruction bits in a buffer |
duke@435 | 1812 | { |
duke@435 | 1813 | // %%%% workspace merge brought two timers together for one job |
duke@435 | 1814 | TracePhase t2a("output", &_t_output, true); |
duke@435 | 1815 | NOT_PRODUCT( TraceTime t2b(NULL, &_t_codeGeneration, TimeCompiler, false); ) |
duke@435 | 1816 | Output(); |
duke@435 | 1817 | } |
duke@435 | 1818 | |
never@657 | 1819 | print_method("Final Code"); |
duke@435 | 1820 | |
duke@435 | 1821 | // He's dead, Jim. |
duke@435 | 1822 | _cfg = (PhaseCFG*)0xdeadbeef; |
duke@435 | 1823 | _regalloc = (PhaseChaitin*)0xdeadbeef; |
duke@435 | 1824 | } |
duke@435 | 1825 | |
duke@435 | 1826 | |
duke@435 | 1827 | //------------------------------dump_asm--------------------------------------- |
duke@435 | 1828 | // Dump formatted assembly |
duke@435 | 1829 | #ifndef PRODUCT |
duke@435 | 1830 | void Compile::dump_asm(int *pcs, uint pc_limit) { |
duke@435 | 1831 | bool cut_short = false; |
duke@435 | 1832 | tty->print_cr("#"); |
duke@435 | 1833 | tty->print("# "); _tf->dump(); tty->cr(); |
duke@435 | 1834 | tty->print_cr("#"); |
duke@435 | 1835 | |
duke@435 | 1836 | // For all blocks |
duke@435 | 1837 | int pc = 0x0; // Program counter |
duke@435 | 1838 | char starts_bundle = ' '; |
duke@435 | 1839 | _regalloc->dump_frame(); |
duke@435 | 1840 | |
duke@435 | 1841 | Node *n = NULL; |
duke@435 | 1842 | for( uint i=0; i<_cfg->_num_blocks; i++ ) { |
duke@435 | 1843 | if (VMThread::should_terminate()) { cut_short = true; break; } |
duke@435 | 1844 | Block *b = _cfg->_blocks[i]; |
duke@435 | 1845 | if (b->is_connector() && !Verbose) continue; |
duke@435 | 1846 | n = b->_nodes[0]; |
duke@435 | 1847 | if (pcs && n->_idx < pc_limit) |
duke@435 | 1848 | tty->print("%3.3x ", pcs[n->_idx]); |
duke@435 | 1849 | else |
duke@435 | 1850 | tty->print(" "); |
duke@435 | 1851 | b->dump_head( &_cfg->_bbs ); |
duke@435 | 1852 | if (b->is_connector()) { |
duke@435 | 1853 | tty->print_cr(" # Empty connector block"); |
duke@435 | 1854 | } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) { |
duke@435 | 1855 | tty->print_cr(" # Block is sole successor of call"); |
duke@435 | 1856 | } |
duke@435 | 1857 | |
duke@435 | 1858 | // For all instructions |
duke@435 | 1859 | Node *delay = NULL; |
duke@435 | 1860 | for( uint j = 0; j<b->_nodes.size(); j++ ) { |
duke@435 | 1861 | if (VMThread::should_terminate()) { cut_short = true; break; } |
duke@435 | 1862 | n = b->_nodes[j]; |
duke@435 | 1863 | if (valid_bundle_info(n)) { |
duke@435 | 1864 | Bundle *bundle = node_bundling(n); |
duke@435 | 1865 | if (bundle->used_in_unconditional_delay()) { |
duke@435 | 1866 | delay = n; |
duke@435 | 1867 | continue; |
duke@435 | 1868 | } |
duke@435 | 1869 | if (bundle->starts_bundle()) |
duke@435 | 1870 | starts_bundle = '+'; |
duke@435 | 1871 | } |
duke@435 | 1872 | |
coleenp@548 | 1873 | if (WizardMode) n->dump(); |
coleenp@548 | 1874 | |
duke@435 | 1875 | if( !n->is_Region() && // Dont print in the Assembly |
duke@435 | 1876 | !n->is_Phi() && // a few noisely useless nodes |
duke@435 | 1877 | !n->is_Proj() && |
duke@435 | 1878 | !n->is_MachTemp() && |
kvn@1535 | 1879 | !n->is_SafePointScalarObject() && |
duke@435 | 1880 | !n->is_Catch() && // Would be nice to print exception table targets |
duke@435 | 1881 | !n->is_MergeMem() && // Not very interesting |
duke@435 | 1882 | !n->is_top() && // Debug info table constants |
duke@435 | 1883 | !(n->is_Con() && !n->is_Mach())// Debug info table constants |
duke@435 | 1884 | ) { |
duke@435 | 1885 | if (pcs && n->_idx < pc_limit) |
duke@435 | 1886 | tty->print("%3.3x", pcs[n->_idx]); |
duke@435 | 1887 | else |
duke@435 | 1888 | tty->print(" "); |
duke@435 | 1889 | tty->print(" %c ", starts_bundle); |
duke@435 | 1890 | starts_bundle = ' '; |
duke@435 | 1891 | tty->print("\t"); |
duke@435 | 1892 | n->format(_regalloc, tty); |
duke@435 | 1893 | tty->cr(); |
duke@435 | 1894 | } |
duke@435 | 1895 | |
duke@435 | 1896 | // If we have an instruction with a delay slot, and have seen a delay, |
duke@435 | 1897 | // then back up and print it |
duke@435 | 1898 | if (valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) { |
duke@435 | 1899 | assert(delay != NULL, "no unconditional delay instruction"); |
coleenp@548 | 1900 | if (WizardMode) delay->dump(); |
coleenp@548 | 1901 | |
duke@435 | 1902 | if (node_bundling(delay)->starts_bundle()) |
duke@435 | 1903 | starts_bundle = '+'; |
duke@435 | 1904 | if (pcs && n->_idx < pc_limit) |
duke@435 | 1905 | tty->print("%3.3x", pcs[n->_idx]); |
duke@435 | 1906 | else |
duke@435 | 1907 | tty->print(" "); |
duke@435 | 1908 | tty->print(" %c ", starts_bundle); |
duke@435 | 1909 | starts_bundle = ' '; |
duke@435 | 1910 | tty->print("\t"); |
duke@435 | 1911 | delay->format(_regalloc, tty); |
duke@435 | 1912 | tty->print_cr(""); |
duke@435 | 1913 | delay = NULL; |
duke@435 | 1914 | } |
duke@435 | 1915 | |
duke@435 | 1916 | // Dump the exception table as well |
duke@435 | 1917 | if( n->is_Catch() && (Verbose || WizardMode) ) { |
duke@435 | 1918 | // Print the exception table for this offset |
duke@435 | 1919 | _handler_table.print_subtable_for(pc); |
duke@435 | 1920 | } |
duke@435 | 1921 | } |
duke@435 | 1922 | |
duke@435 | 1923 | if (pcs && n->_idx < pc_limit) |
duke@435 | 1924 | tty->print_cr("%3.3x", pcs[n->_idx]); |
duke@435 | 1925 | else |
duke@435 | 1926 | tty->print_cr(""); |
duke@435 | 1927 | |
duke@435 | 1928 | assert(cut_short || delay == NULL, "no unconditional delay branch"); |
duke@435 | 1929 | |
duke@435 | 1930 | } // End of per-block dump |
duke@435 | 1931 | tty->print_cr(""); |
duke@435 | 1932 | |
duke@435 | 1933 | if (cut_short) tty->print_cr("*** disassembly is cut short ***"); |
duke@435 | 1934 | } |
duke@435 | 1935 | #endif |
duke@435 | 1936 | |
duke@435 | 1937 | //------------------------------Final_Reshape_Counts--------------------------- |
duke@435 | 1938 | // This class defines counters to help identify when a method |
duke@435 | 1939 | // may/must be executed using hardware with only 24-bit precision. |
duke@435 | 1940 | struct Final_Reshape_Counts : public StackObj { |
duke@435 | 1941 | int _call_count; // count non-inlined 'common' calls |
duke@435 | 1942 | int _float_count; // count float ops requiring 24-bit precision |
duke@435 | 1943 | int _double_count; // count double ops requiring more precision |
duke@435 | 1944 | int _java_call_count; // count non-inlined 'java' calls |
kvn@1294 | 1945 | int _inner_loop_count; // count loops which need alignment |
duke@435 | 1946 | VectorSet _visited; // Visitation flags |
duke@435 | 1947 | Node_List _tests; // Set of IfNodes & PCTableNodes |
duke@435 | 1948 | |
duke@435 | 1949 | Final_Reshape_Counts() : |
kvn@1294 | 1950 | _call_count(0), _float_count(0), _double_count(0), |
kvn@1294 | 1951 | _java_call_count(0), _inner_loop_count(0), |
duke@435 | 1952 | _visited( Thread::current()->resource_area() ) { } |
duke@435 | 1953 | |
duke@435 | 1954 | void inc_call_count () { _call_count ++; } |
duke@435 | 1955 | void inc_float_count () { _float_count ++; } |
duke@435 | 1956 | void inc_double_count() { _double_count++; } |
duke@435 | 1957 | void inc_java_call_count() { _java_call_count++; } |
kvn@1294 | 1958 | void inc_inner_loop_count() { _inner_loop_count++; } |
duke@435 | 1959 | |
duke@435 | 1960 | int get_call_count () const { return _call_count ; } |
duke@435 | 1961 | int get_float_count () const { return _float_count ; } |
duke@435 | 1962 | int get_double_count() const { return _double_count; } |
duke@435 | 1963 | int get_java_call_count() const { return _java_call_count; } |
kvn@1294 | 1964 | int get_inner_loop_count() const { return _inner_loop_count; } |
duke@435 | 1965 | }; |
duke@435 | 1966 | |
duke@435 | 1967 | static bool oop_offset_is_sane(const TypeInstPtr* tp) { |
duke@435 | 1968 | ciInstanceKlass *k = tp->klass()->as_instance_klass(); |
duke@435 | 1969 | // Make sure the offset goes inside the instance layout. |
coleenp@548 | 1970 | return k->contains_field_offset(tp->offset()); |
duke@435 | 1971 | // Note that OffsetBot and OffsetTop are very negative. |
duke@435 | 1972 | } |
duke@435 | 1973 | |
duke@435 | 1974 | //------------------------------final_graph_reshaping_impl---------------------- |
duke@435 | 1975 | // Implement items 1-5 from final_graph_reshaping below. |
kvn@1294 | 1976 | static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { |
duke@435 | 1977 | |
kvn@603 | 1978 | if ( n->outcnt() == 0 ) return; // dead node |
duke@435 | 1979 | uint nop = n->Opcode(); |
duke@435 | 1980 | |
duke@435 | 1981 | // Check for 2-input instruction with "last use" on right input. |
duke@435 | 1982 | // Swap to left input. Implements item (2). |
duke@435 | 1983 | if( n->req() == 3 && // two-input instruction |
duke@435 | 1984 | n->in(1)->outcnt() > 1 && // left use is NOT a last use |
duke@435 | 1985 | (!n->in(1)->is_Phi() || n->in(1)->in(2) != n) && // it is not data loop |
duke@435 | 1986 | n->in(2)->outcnt() == 1 &&// right use IS a last use |
duke@435 | 1987 | !n->in(2)->is_Con() ) { // right use is not a constant |
duke@435 | 1988 | // Check for commutative opcode |
duke@435 | 1989 | switch( nop ) { |
duke@435 | 1990 | case Op_AddI: case Op_AddF: case Op_AddD: case Op_AddL: |
duke@435 | 1991 | case Op_MaxI: case Op_MinI: |
duke@435 | 1992 | case Op_MulI: case Op_MulF: case Op_MulD: case Op_MulL: |
duke@435 | 1993 | case Op_AndL: case Op_XorL: case Op_OrL: |
duke@435 | 1994 | case Op_AndI: case Op_XorI: case Op_OrI: { |
duke@435 | 1995 | // Move "last use" input to left by swapping inputs |
duke@435 | 1996 | n->swap_edges(1, 2); |
duke@435 | 1997 | break; |
duke@435 | 1998 | } |
duke@435 | 1999 | default: |
duke@435 | 2000 | break; |
duke@435 | 2001 | } |
duke@435 | 2002 | } |
duke@435 | 2003 | |
duke@435 | 2004 | // Count FPU ops and common calls, implements item (3) |
duke@435 | 2005 | switch( nop ) { |
duke@435 | 2006 | // Count all float operations that may use FPU |
duke@435 | 2007 | case Op_AddF: |
duke@435 | 2008 | case Op_SubF: |
duke@435 | 2009 | case Op_MulF: |
duke@435 | 2010 | case Op_DivF: |
duke@435 | 2011 | case Op_NegF: |
duke@435 | 2012 | case Op_ModF: |
duke@435 | 2013 | case Op_ConvI2F: |
duke@435 | 2014 | case Op_ConF: |
duke@435 | 2015 | case Op_CmpF: |
duke@435 | 2016 | case Op_CmpF3: |
duke@435 | 2017 | // case Op_ConvL2F: // longs are split into 32-bit halves |
kvn@1294 | 2018 | frc.inc_float_count(); |
duke@435 | 2019 | break; |
duke@435 | 2020 | |
duke@435 | 2021 | case Op_ConvF2D: |
duke@435 | 2022 | case Op_ConvD2F: |
kvn@1294 | 2023 | frc.inc_float_count(); |
kvn@1294 | 2024 | frc.inc_double_count(); |
duke@435 | 2025 | break; |
duke@435 | 2026 | |
duke@435 | 2027 | // Count all double operations that may use FPU |
duke@435 | 2028 | case Op_AddD: |
duke@435 | 2029 | case Op_SubD: |
duke@435 | 2030 | case Op_MulD: |
duke@435 | 2031 | case Op_DivD: |
duke@435 | 2032 | case Op_NegD: |
duke@435 | 2033 | case Op_ModD: |
duke@435 | 2034 | case Op_ConvI2D: |
duke@435 | 2035 | case Op_ConvD2I: |
duke@435 | 2036 | // case Op_ConvL2D: // handled by leaf call |
duke@435 | 2037 | // case Op_ConvD2L: // handled by leaf call |
duke@435 | 2038 | case Op_ConD: |
duke@435 | 2039 | case Op_CmpD: |
duke@435 | 2040 | case Op_CmpD3: |
kvn@1294 | 2041 | frc.inc_double_count(); |
duke@435 | 2042 | break; |
duke@435 | 2043 | case Op_Opaque1: // Remove Opaque Nodes before matching |
duke@435 | 2044 | case Op_Opaque2: // Remove Opaque Nodes before matching |
kvn@603 | 2045 | n->subsume_by(n->in(1)); |
duke@435 | 2046 | break; |
duke@435 | 2047 | case Op_CallStaticJava: |
duke@435 | 2048 | case Op_CallJava: |
duke@435 | 2049 | case Op_CallDynamicJava: |
kvn@1294 | 2050 | frc.inc_java_call_count(); // Count java call site; |
duke@435 | 2051 | case Op_CallRuntime: |
duke@435 | 2052 | case Op_CallLeaf: |
duke@435 | 2053 | case Op_CallLeafNoFP: { |
duke@435 | 2054 | assert( n->is_Call(), "" ); |
duke@435 | 2055 | CallNode *call = n->as_Call(); |
duke@435 | 2056 | // Count call sites where the FP mode bit would have to be flipped. |
duke@435 | 2057 | // Do not count uncommon runtime calls: |
duke@435 | 2058 | // uncommon_trap, _complete_monitor_locking, _complete_monitor_unlocking, |
duke@435 | 2059 | // _new_Java, _new_typeArray, _new_objArray, _rethrow_Java, ... |
duke@435 | 2060 | if( !call->is_CallStaticJava() || !call->as_CallStaticJava()->_name ) { |
kvn@1294 | 2061 | frc.inc_call_count(); // Count the call site |
duke@435 | 2062 | } else { // See if uncommon argument is shared |
duke@435 | 2063 | Node *n = call->in(TypeFunc::Parms); |
duke@435 | 2064 | int nop = n->Opcode(); |
duke@435 | 2065 | // Clone shared simple arguments to uncommon calls, item (1). |
duke@435 | 2066 | if( n->outcnt() > 1 && |
duke@435 | 2067 | !n->is_Proj() && |
duke@435 | 2068 | nop != Op_CreateEx && |
duke@435 | 2069 | nop != Op_CheckCastPP && |
kvn@766 | 2070 | nop != Op_DecodeN && |
duke@435 | 2071 | !n->is_Mem() ) { |
duke@435 | 2072 | Node *x = n->clone(); |
duke@435 | 2073 | call->set_req( TypeFunc::Parms, x ); |
duke@435 | 2074 | } |
duke@435 | 2075 | } |
duke@435 | 2076 | break; |
duke@435 | 2077 | } |
duke@435 | 2078 | |
duke@435 | 2079 | case Op_StoreD: |
duke@435 | 2080 | case Op_LoadD: |
duke@435 | 2081 | case Op_LoadD_unaligned: |
kvn@1294 | 2082 | frc.inc_double_count(); |
duke@435 | 2083 | goto handle_mem; |
duke@435 | 2084 | case Op_StoreF: |
duke@435 | 2085 | case Op_LoadF: |
kvn@1294 | 2086 | frc.inc_float_count(); |
duke@435 | 2087 | goto handle_mem; |
duke@435 | 2088 | |
duke@435 | 2089 | case Op_StoreB: |
duke@435 | 2090 | case Op_StoreC: |
duke@435 | 2091 | case Op_StoreCM: |
duke@435 | 2092 | case Op_StorePConditional: |
duke@435 | 2093 | case Op_StoreI: |
duke@435 | 2094 | case Op_StoreL: |
kvn@855 | 2095 | case Op_StoreIConditional: |
duke@435 | 2096 | case Op_StoreLConditional: |
duke@435 | 2097 | case Op_CompareAndSwapI: |
duke@435 | 2098 | case Op_CompareAndSwapL: |
duke@435 | 2099 | case Op_CompareAndSwapP: |
coleenp@548 | 2100 | case Op_CompareAndSwapN: |
duke@435 | 2101 | case Op_StoreP: |
coleenp@548 | 2102 | case Op_StoreN: |
duke@435 | 2103 | case Op_LoadB: |
twisti@1059 | 2104 | case Op_LoadUB: |
twisti@993 | 2105 | case Op_LoadUS: |
duke@435 | 2106 | case Op_LoadI: |
twisti@1059 | 2107 | case Op_LoadUI2L: |
duke@435 | 2108 | case Op_LoadKlass: |
kvn@599 | 2109 | case Op_LoadNKlass: |
duke@435 | 2110 | case Op_LoadL: |
duke@435 | 2111 | case Op_LoadL_unaligned: |
duke@435 | 2112 | case Op_LoadPLocked: |
duke@435 | 2113 | case Op_LoadLLocked: |
duke@435 | 2114 | case Op_LoadP: |
coleenp@548 | 2115 | case Op_LoadN: |
duke@435 | 2116 | case Op_LoadRange: |
duke@435 | 2117 | case Op_LoadS: { |
duke@435 | 2118 | handle_mem: |
duke@435 | 2119 | #ifdef ASSERT |
duke@435 | 2120 | if( VerifyOptoOopOffsets ) { |
duke@435 | 2121 | assert( n->is_Mem(), "" ); |
duke@435 | 2122 | MemNode *mem = (MemNode*)n; |
duke@435 | 2123 | // Check to see if address types have grounded out somehow. |
duke@435 | 2124 | const TypeInstPtr *tp = mem->in(MemNode::Address)->bottom_type()->isa_instptr(); |
duke@435 | 2125 | assert( !tp || oop_offset_is_sane(tp), "" ); |
duke@435 | 2126 | } |
duke@435 | 2127 | #endif |
duke@435 | 2128 | break; |
duke@435 | 2129 | } |
duke@435 | 2130 | |
duke@435 | 2131 | case Op_AddP: { // Assert sane base pointers |
kvn@617 | 2132 | Node *addp = n->in(AddPNode::Address); |
duke@435 | 2133 | assert( !addp->is_AddP() || |
duke@435 | 2134 | addp->in(AddPNode::Base)->is_top() || // Top OK for allocation |
duke@435 | 2135 | addp->in(AddPNode::Base) == n->in(AddPNode::Base), |
duke@435 | 2136 | "Base pointers must match" ); |
kvn@617 | 2137 | #ifdef _LP64 |
kvn@617 | 2138 | if (UseCompressedOops && |
kvn@617 | 2139 | addp->Opcode() == Op_ConP && |
kvn@617 | 2140 | addp == n->in(AddPNode::Base) && |
kvn@617 | 2141 | n->in(AddPNode::Offset)->is_Con()) { |
kvn@617 | 2142 | // Use addressing with narrow klass to load with offset on x86. |
kvn@617 | 2143 | // On sparc loading 32-bits constant and decoding it have less |
kvn@617 | 2144 | // instructions (4) then load 64-bits constant (7). |
kvn@617 | 2145 | // Do this transformation here since IGVN will convert ConN back to ConP. |
kvn@617 | 2146 | const Type* t = addp->bottom_type(); |
kvn@617 | 2147 | if (t->isa_oopptr()) { |
kvn@617 | 2148 | Node* nn = NULL; |
kvn@617 | 2149 | |
kvn@617 | 2150 | // Look for existing ConN node of the same exact type. |
kvn@617 | 2151 | Compile* C = Compile::current(); |
kvn@617 | 2152 | Node* r = C->root(); |
kvn@617 | 2153 | uint cnt = r->outcnt(); |
kvn@617 | 2154 | for (uint i = 0; i < cnt; i++) { |
kvn@617 | 2155 | Node* m = r->raw_out(i); |
kvn@617 | 2156 | if (m!= NULL && m->Opcode() == Op_ConN && |
kvn@656 | 2157 | m->bottom_type()->make_ptr() == t) { |
kvn@617 | 2158 | nn = m; |
kvn@617 | 2159 | break; |
kvn@617 | 2160 | } |
kvn@617 | 2161 | } |
kvn@617 | 2162 | if (nn != NULL) { |
kvn@617 | 2163 | // Decode a narrow oop to match address |
kvn@617 | 2164 | // [R12 + narrow_oop_reg<<3 + offset] |
kvn@617 | 2165 | nn = new (C, 2) DecodeNNode(nn, t); |
kvn@617 | 2166 | n->set_req(AddPNode::Base, nn); |
kvn@617 | 2167 | n->set_req(AddPNode::Address, nn); |
kvn@617 | 2168 | if (addp->outcnt() == 0) { |
kvn@617 | 2169 | addp->disconnect_inputs(NULL); |
kvn@617 | 2170 | } |
kvn@617 | 2171 | } |
kvn@617 | 2172 | } |
kvn@617 | 2173 | } |
kvn@617 | 2174 | #endif |
duke@435 | 2175 | break; |
duke@435 | 2176 | } |
duke@435 | 2177 | |
kvn@599 | 2178 | #ifdef _LP64 |
kvn@803 | 2179 | case Op_CastPP: |
kvn@1077 | 2180 | if (n->in(1)->is_DecodeN() && Universe::narrow_oop_use_implicit_null_checks()) { |
kvn@803 | 2181 | Compile* C = Compile::current(); |
kvn@803 | 2182 | Node* in1 = n->in(1); |
kvn@803 | 2183 | const Type* t = n->bottom_type(); |
kvn@803 | 2184 | Node* new_in1 = in1->clone(); |
kvn@803 | 2185 | new_in1->as_DecodeN()->set_type(t); |
kvn@803 | 2186 | |
kvn@803 | 2187 | if (!Matcher::clone_shift_expressions) { |
kvn@803 | 2188 | // |
kvn@803 | 2189 | // x86, ARM and friends can handle 2 adds in addressing mode |
kvn@803 | 2190 | // and Matcher can fold a DecodeN node into address by using |
kvn@803 | 2191 | // a narrow oop directly and do implicit NULL check in address: |
kvn@803 | 2192 | // |
kvn@803 | 2193 | // [R12 + narrow_oop_reg<<3 + offset] |
kvn@803 | 2194 | // NullCheck narrow_oop_reg |
kvn@803 | 2195 | // |
kvn@803 | 2196 | // On other platforms (Sparc) we have to keep new DecodeN node and |
kvn@803 | 2197 | // use it to do implicit NULL check in address: |
kvn@803 | 2198 | // |
kvn@803 | 2199 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2200 | // [base_reg + offset] |
kvn@803 | 2201 | // NullCheck base_reg |
kvn@803 | 2202 | // |
twisti@1040 | 2203 | // Pin the new DecodeN node to non-null path on these platform (Sparc) |
kvn@803 | 2204 | // to keep the information to which NULL check the new DecodeN node |
kvn@803 | 2205 | // corresponds to use it as value in implicit_null_check(). |
kvn@803 | 2206 | // |
kvn@803 | 2207 | new_in1->set_req(0, n->in(0)); |
kvn@803 | 2208 | } |
kvn@803 | 2209 | |
kvn@803 | 2210 | n->subsume_by(new_in1); |
kvn@803 | 2211 | if (in1->outcnt() == 0) { |
kvn@803 | 2212 | in1->disconnect_inputs(NULL); |
kvn@803 | 2213 | } |
kvn@803 | 2214 | } |
kvn@803 | 2215 | break; |
kvn@803 | 2216 | |
kvn@599 | 2217 | case Op_CmpP: |
kvn@603 | 2218 | // Do this transformation here to preserve CmpPNode::sub() and |
kvn@603 | 2219 | // other TypePtr related Ideal optimizations (for example, ptr nullness). |
kvn@766 | 2220 | if (n->in(1)->is_DecodeN() || n->in(2)->is_DecodeN()) { |
kvn@766 | 2221 | Node* in1 = n->in(1); |
kvn@766 | 2222 | Node* in2 = n->in(2); |
kvn@766 | 2223 | if (!in1->is_DecodeN()) { |
kvn@766 | 2224 | in2 = in1; |
kvn@766 | 2225 | in1 = n->in(2); |
kvn@766 | 2226 | } |
kvn@766 | 2227 | assert(in1->is_DecodeN(), "sanity"); |
kvn@766 | 2228 | |
kvn@599 | 2229 | Compile* C = Compile::current(); |
kvn@766 | 2230 | Node* new_in2 = NULL; |
kvn@766 | 2231 | if (in2->is_DecodeN()) { |
kvn@766 | 2232 | new_in2 = in2->in(1); |
kvn@766 | 2233 | } else if (in2->Opcode() == Op_ConP) { |
kvn@766 | 2234 | const Type* t = in2->bottom_type(); |
kvn@1077 | 2235 | if (t == TypePtr::NULL_PTR && Universe::narrow_oop_use_implicit_null_checks()) { |
kvn@803 | 2236 | new_in2 = ConNode::make(C, TypeNarrowOop::NULL_PTR); |
kvn@803 | 2237 | // |
kvn@803 | 2238 | // This transformation together with CastPP transformation above |
kvn@803 | 2239 | // will generated code for implicit NULL checks for compressed oops. |
kvn@803 | 2240 | // |
kvn@803 | 2241 | // The original code after Optimize() |
kvn@803 | 2242 | // |
kvn@803 | 2243 | // LoadN memory, narrow_oop_reg |
kvn@803 | 2244 | // decode narrow_oop_reg, base_reg |
kvn@803 | 2245 | // CmpP base_reg, NULL |
kvn@803 | 2246 | // CastPP base_reg // NotNull |
kvn@803 | 2247 | // Load [base_reg + offset], val_reg |
kvn@803 | 2248 | // |
kvn@803 | 2249 | // after these transformations will be |
kvn@803 | 2250 | // |
kvn@803 | 2251 | // LoadN memory, narrow_oop_reg |
kvn@803 | 2252 | // CmpN narrow_oop_reg, NULL |
kvn@803 | 2253 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2254 | // Load [base_reg + offset], val_reg |
kvn@803 | 2255 | // |
kvn@803 | 2256 | // and the uncommon path (== NULL) will use narrow_oop_reg directly |
kvn@803 | 2257 | // since narrow oops can be used in debug info now (see the code in |
kvn@803 | 2258 | // final_graph_reshaping_walk()). |
kvn@803 | 2259 | // |
kvn@803 | 2260 | // At the end the code will be matched to |
kvn@803 | 2261 | // on x86: |
kvn@803 | 2262 | // |
kvn@803 | 2263 | // Load_narrow_oop memory, narrow_oop_reg |
kvn@803 | 2264 | // Load [R12 + narrow_oop_reg<<3 + offset], val_reg |
kvn@803 | 2265 | // NullCheck narrow_oop_reg |
kvn@803 | 2266 | // |
kvn@803 | 2267 | // and on sparc: |
kvn@803 | 2268 | // |
kvn@803 | 2269 | // Load_narrow_oop memory, narrow_oop_reg |
kvn@803 | 2270 | // decode_not_null narrow_oop_reg, base_reg |
kvn@803 | 2271 | // Load [base_reg + offset], val_reg |
kvn@803 | 2272 | // NullCheck base_reg |
kvn@803 | 2273 | // |
kvn@599 | 2274 | } else if (t->isa_oopptr()) { |
kvn@766 | 2275 | new_in2 = ConNode::make(C, t->make_narrowoop()); |
kvn@599 | 2276 | } |
kvn@599 | 2277 | } |
kvn@766 | 2278 | if (new_in2 != NULL) { |
kvn@766 | 2279 | Node* cmpN = new (C, 3) CmpNNode(in1->in(1), new_in2); |
kvn@603 | 2280 | n->subsume_by( cmpN ); |
kvn@766 | 2281 | if (in1->outcnt() == 0) { |
kvn@766 | 2282 | in1->disconnect_inputs(NULL); |
kvn@766 | 2283 | } |
kvn@766 | 2284 | if (in2->outcnt() == 0) { |
kvn@766 | 2285 | in2->disconnect_inputs(NULL); |
kvn@766 | 2286 | } |
kvn@599 | 2287 | } |
kvn@599 | 2288 | } |
kvn@728 | 2289 | break; |
kvn@803 | 2290 | |
kvn@803 | 2291 | case Op_DecodeN: |
kvn@803 | 2292 | assert(!n->in(1)->is_EncodeP(), "should be optimized out"); |
kvn@927 | 2293 | // DecodeN could be pinned on Sparc where it can't be fold into |
kvn@927 | 2294 | // an address expression, see the code for Op_CastPP above. |
kvn@927 | 2295 | assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc"); |
kvn@803 | 2296 | break; |
kvn@803 | 2297 | |
kvn@803 | 2298 | case Op_EncodeP: { |
kvn@803 | 2299 | Node* in1 = n->in(1); |
kvn@803 | 2300 | if (in1->is_DecodeN()) { |
kvn@803 | 2301 | n->subsume_by(in1->in(1)); |
kvn@803 | 2302 | } else if (in1->Opcode() == Op_ConP) { |
kvn@803 | 2303 | Compile* C = Compile::current(); |
kvn@803 | 2304 | const Type* t = in1->bottom_type(); |
kvn@803 | 2305 | if (t == TypePtr::NULL_PTR) { |
kvn@803 | 2306 | n->subsume_by(ConNode::make(C, TypeNarrowOop::NULL_PTR)); |
kvn@803 | 2307 | } else if (t->isa_oopptr()) { |
kvn@803 | 2308 | n->subsume_by(ConNode::make(C, t->make_narrowoop())); |
kvn@803 | 2309 | } |
kvn@803 | 2310 | } |
kvn@803 | 2311 | if (in1->outcnt() == 0) { |
kvn@803 | 2312 | in1->disconnect_inputs(NULL); |
kvn@803 | 2313 | } |
kvn@803 | 2314 | break; |
kvn@803 | 2315 | } |
kvn@803 | 2316 | |
never@1515 | 2317 | case Op_Proj: { |
never@1515 | 2318 | if (OptimizeStringConcat) { |
never@1515 | 2319 | ProjNode* p = n->as_Proj(); |
never@1515 | 2320 | if (p->_is_io_use) { |
never@1515 | 2321 | // Separate projections were used for the exception path which |
never@1515 | 2322 | // are normally removed by a late inline. If it wasn't inlined |
never@1515 | 2323 | // then they will hang around and should just be replaced with |
never@1515 | 2324 | // the original one. |
never@1515 | 2325 | Node* proj = NULL; |
never@1515 | 2326 | // Replace with just one |
never@1515 | 2327 | for (SimpleDUIterator i(p->in(0)); i.has_next(); i.next()) { |
never@1515 | 2328 | Node *use = i.get(); |
never@1515 | 2329 | if (use->is_Proj() && p != use && use->as_Proj()->_con == p->_con) { |
never@1515 | 2330 | proj = use; |
never@1515 | 2331 | break; |
never@1515 | 2332 | } |
never@1515 | 2333 | } |
never@1515 | 2334 | assert(p != NULL, "must be found"); |
never@1515 | 2335 | p->subsume_by(proj); |
never@1515 | 2336 | } |
never@1515 | 2337 | } |
never@1515 | 2338 | break; |
never@1515 | 2339 | } |
never@1515 | 2340 | |
kvn@803 | 2341 | case Op_Phi: |
kvn@803 | 2342 | if (n->as_Phi()->bottom_type()->isa_narrowoop()) { |
kvn@803 | 2343 | // The EncodeP optimization may create Phi with the same edges |
kvn@803 | 2344 | // for all paths. It is not handled well by Register Allocator. |
kvn@803 | 2345 | Node* unique_in = n->in(1); |
kvn@803 | 2346 | assert(unique_in != NULL, ""); |
kvn@803 | 2347 | uint cnt = n->req(); |
kvn@803 | 2348 | for (uint i = 2; i < cnt; i++) { |
kvn@803 | 2349 | Node* m = n->in(i); |
kvn@803 | 2350 | assert(m != NULL, ""); |
kvn@803 | 2351 | if (unique_in != m) |
kvn@803 | 2352 | unique_in = NULL; |
kvn@803 | 2353 | } |
kvn@803 | 2354 | if (unique_in != NULL) { |
kvn@803 | 2355 | n->subsume_by(unique_in); |
kvn@803 | 2356 | } |
kvn@803 | 2357 | } |
kvn@803 | 2358 | break; |
kvn@803 | 2359 | |
kvn@599 | 2360 | #endif |
kvn@599 | 2361 | |
duke@435 | 2362 | case Op_ModI: |
duke@435 | 2363 | if (UseDivMod) { |
duke@435 | 2364 | // Check if a%b and a/b both exist |
duke@435 | 2365 | Node* d = n->find_similar(Op_DivI); |
duke@435 | 2366 | if (d) { |
duke@435 | 2367 | // Replace them with a fused divmod if supported |
duke@435 | 2368 | Compile* C = Compile::current(); |
duke@435 | 2369 | if (Matcher::has_match_rule(Op_DivModI)) { |
duke@435 | 2370 | DivModINode* divmod = DivModINode::make(C, n); |
kvn@603 | 2371 | d->subsume_by(divmod->div_proj()); |
kvn@603 | 2372 | n->subsume_by(divmod->mod_proj()); |
duke@435 | 2373 | } else { |
duke@435 | 2374 | // replace a%b with a-((a/b)*b) |
duke@435 | 2375 | Node* mult = new (C, 3) MulINode(d, d->in(2)); |
duke@435 | 2376 | Node* sub = new (C, 3) SubINode(d->in(1), mult); |
kvn@603 | 2377 | n->subsume_by( sub ); |
duke@435 | 2378 | } |
duke@435 | 2379 | } |
duke@435 | 2380 | } |
duke@435 | 2381 | break; |
duke@435 | 2382 | |
duke@435 | 2383 | case Op_ModL: |
duke@435 | 2384 | if (UseDivMod) { |
duke@435 | 2385 | // Check if a%b and a/b both exist |
duke@435 | 2386 | Node* d = n->find_similar(Op_DivL); |
duke@435 | 2387 | if (d) { |
duke@435 | 2388 | // Replace them with a fused divmod if supported |
duke@435 | 2389 | Compile* C = Compile::current(); |
duke@435 | 2390 | if (Matcher::has_match_rule(Op_DivModL)) { |
duke@435 | 2391 | DivModLNode* divmod = DivModLNode::make(C, n); |
kvn@603 | 2392 | d->subsume_by(divmod->div_proj()); |
kvn@603 | 2393 | n->subsume_by(divmod->mod_proj()); |
duke@435 | 2394 | } else { |
duke@435 | 2395 | // replace a%b with a-((a/b)*b) |
duke@435 | 2396 | Node* mult = new (C, 3) MulLNode(d, d->in(2)); |
duke@435 | 2397 | Node* sub = new (C, 3) SubLNode(d->in(1), mult); |
kvn@603 | 2398 | n->subsume_by( sub ); |
duke@435 | 2399 | } |
duke@435 | 2400 | } |
duke@435 | 2401 | } |
duke@435 | 2402 | break; |
duke@435 | 2403 | |
duke@435 | 2404 | case Op_Load16B: |
duke@435 | 2405 | case Op_Load8B: |
duke@435 | 2406 | case Op_Load4B: |
duke@435 | 2407 | case Op_Load8S: |
duke@435 | 2408 | case Op_Load4S: |
duke@435 | 2409 | case Op_Load2S: |
duke@435 | 2410 | case Op_Load8C: |
duke@435 | 2411 | case Op_Load4C: |
duke@435 | 2412 | case Op_Load2C: |
duke@435 | 2413 | case Op_Load4I: |
duke@435 | 2414 | case Op_Load2I: |
duke@435 | 2415 | case Op_Load2L: |
duke@435 | 2416 | case Op_Load4F: |
duke@435 | 2417 | case Op_Load2F: |
duke@435 | 2418 | case Op_Load2D: |
duke@435 | 2419 | case Op_Store16B: |
duke@435 | 2420 | case Op_Store8B: |
duke@435 | 2421 | case Op_Store4B: |
duke@435 | 2422 | case Op_Store8C: |
duke@435 | 2423 | case Op_Store4C: |
duke@435 | 2424 | case Op_Store2C: |
duke@435 | 2425 | case Op_Store4I: |
duke@435 | 2426 | case Op_Store2I: |
duke@435 | 2427 | case Op_Store2L: |
duke@435 | 2428 | case Op_Store4F: |
duke@435 | 2429 | case Op_Store2F: |
duke@435 | 2430 | case Op_Store2D: |
duke@435 | 2431 | break; |
duke@435 | 2432 | |
duke@435 | 2433 | case Op_PackB: |
duke@435 | 2434 | case Op_PackS: |
duke@435 | 2435 | case Op_PackC: |
duke@435 | 2436 | case Op_PackI: |
duke@435 | 2437 | case Op_PackF: |
duke@435 | 2438 | case Op_PackL: |
duke@435 | 2439 | case Op_PackD: |
duke@435 | 2440 | if (n->req()-1 > 2) { |
duke@435 | 2441 | // Replace many operand PackNodes with a binary tree for matching |
duke@435 | 2442 | PackNode* p = (PackNode*) n; |
duke@435 | 2443 | Node* btp = p->binaryTreePack(Compile::current(), 1, n->req()); |
kvn@603 | 2444 | n->subsume_by(btp); |
duke@435 | 2445 | } |
duke@435 | 2446 | break; |
kvn@1294 | 2447 | case Op_Loop: |
kvn@1294 | 2448 | case Op_CountedLoop: |
kvn@1294 | 2449 | if (n->as_Loop()->is_inner_loop()) { |
kvn@1294 | 2450 | frc.inc_inner_loop_count(); |
kvn@1294 | 2451 | } |
kvn@1294 | 2452 | break; |
duke@435 | 2453 | default: |
duke@435 | 2454 | assert( !n->is_Call(), "" ); |
duke@435 | 2455 | assert( !n->is_Mem(), "" ); |
duke@435 | 2456 | break; |
duke@435 | 2457 | } |
never@562 | 2458 | |
never@562 | 2459 | // Collect CFG split points |
never@562 | 2460 | if (n->is_MultiBranch()) |
kvn@1294 | 2461 | frc._tests.push(n); |
duke@435 | 2462 | } |
duke@435 | 2463 | |
duke@435 | 2464 | //------------------------------final_graph_reshaping_walk--------------------- |
duke@435 | 2465 | // Replacing Opaque nodes with their input in final_graph_reshaping_impl(), |
duke@435 | 2466 | // requires that the walk visits a node's inputs before visiting the node. |
kvn@1294 | 2467 | static void final_graph_reshaping_walk( Node_Stack &nstack, Node *root, Final_Reshape_Counts &frc ) { |
kvn@766 | 2468 | ResourceArea *area = Thread::current()->resource_area(); |
kvn@766 | 2469 | Unique_Node_List sfpt(area); |
kvn@766 | 2470 | |
kvn@1294 | 2471 | frc._visited.set(root->_idx); // first, mark node as visited |
duke@435 | 2472 | uint cnt = root->req(); |
duke@435 | 2473 | Node *n = root; |
duke@435 | 2474 | uint i = 0; |
duke@435 | 2475 | while (true) { |
duke@435 | 2476 | if (i < cnt) { |
duke@435 | 2477 | // Place all non-visited non-null inputs onto stack |
duke@435 | 2478 | Node* m = n->in(i); |
duke@435 | 2479 | ++i; |
kvn@1294 | 2480 | if (m != NULL && !frc._visited.test_set(m->_idx)) { |
kvn@766 | 2481 | if (m->is_SafePoint() && m->as_SafePoint()->jvms() != NULL) |
kvn@766 | 2482 | sfpt.push(m); |
duke@435 | 2483 | cnt = m->req(); |
duke@435 | 2484 | nstack.push(n, i); // put on stack parent and next input's index |
duke@435 | 2485 | n = m; |
duke@435 | 2486 | i = 0; |
duke@435 | 2487 | } |
duke@435 | 2488 | } else { |
duke@435 | 2489 | // Now do post-visit work |
kvn@1294 | 2490 | final_graph_reshaping_impl( n, frc ); |
duke@435 | 2491 | if (nstack.is_empty()) |
duke@435 | 2492 | break; // finished |
duke@435 | 2493 | n = nstack.node(); // Get node from stack |
duke@435 | 2494 | cnt = n->req(); |
duke@435 | 2495 | i = nstack.index(); |
duke@435 | 2496 | nstack.pop(); // Shift to the next node on stack |
duke@435 | 2497 | } |
duke@435 | 2498 | } |
kvn@766 | 2499 | |
kvn@766 | 2500 | // Go over safepoints nodes to skip DecodeN nodes for debug edges. |
kvn@766 | 2501 | // It could be done for an uncommon traps or any safepoints/calls |
kvn@766 | 2502 | // if the DecodeN node is referenced only in a debug info. |
kvn@766 | 2503 | while (sfpt.size() > 0) { |
kvn@766 | 2504 | n = sfpt.pop(); |
kvn@766 | 2505 | JVMState *jvms = n->as_SafePoint()->jvms(); |
kvn@766 | 2506 | assert(jvms != NULL, "sanity"); |
kvn@766 | 2507 | int start = jvms->debug_start(); |
kvn@766 | 2508 | int end = n->req(); |
kvn@766 | 2509 | bool is_uncommon = (n->is_CallStaticJava() && |
kvn@766 | 2510 | n->as_CallStaticJava()->uncommon_trap_request() != 0); |
kvn@766 | 2511 | for (int j = start; j < end; j++) { |
kvn@766 | 2512 | Node* in = n->in(j); |
kvn@766 | 2513 | if (in->is_DecodeN()) { |
kvn@766 | 2514 | bool safe_to_skip = true; |
kvn@766 | 2515 | if (!is_uncommon ) { |
kvn@766 | 2516 | // Is it safe to skip? |
kvn@766 | 2517 | for (uint i = 0; i < in->outcnt(); i++) { |
kvn@766 | 2518 | Node* u = in->raw_out(i); |
kvn@766 | 2519 | if (!u->is_SafePoint() || |
kvn@766 | 2520 | u->is_Call() && u->as_Call()->has_non_debug_use(n)) { |
kvn@766 | 2521 | safe_to_skip = false; |
kvn@766 | 2522 | } |
kvn@766 | 2523 | } |
kvn@766 | 2524 | } |
kvn@766 | 2525 | if (safe_to_skip) { |
kvn@766 | 2526 | n->set_req(j, in->in(1)); |
kvn@766 | 2527 | } |
kvn@766 | 2528 | if (in->outcnt() == 0) { |
kvn@766 | 2529 | in->disconnect_inputs(NULL); |
kvn@766 | 2530 | } |
kvn@766 | 2531 | } |
kvn@766 | 2532 | } |
kvn@766 | 2533 | } |
duke@435 | 2534 | } |
duke@435 | 2535 | |
duke@435 | 2536 | //------------------------------final_graph_reshaping-------------------------- |
duke@435 | 2537 | // Final Graph Reshaping. |
duke@435 | 2538 | // |
duke@435 | 2539 | // (1) Clone simple inputs to uncommon calls, so they can be scheduled late |
duke@435 | 2540 | // and not commoned up and forced early. Must come after regular |
duke@435 | 2541 | // optimizations to avoid GVN undoing the cloning. Clone constant |
duke@435 | 2542 | // inputs to Loop Phis; these will be split by the allocator anyways. |
duke@435 | 2543 | // Remove Opaque nodes. |
duke@435 | 2544 | // (2) Move last-uses by commutative operations to the left input to encourage |
duke@435 | 2545 | // Intel update-in-place two-address operations and better register usage |
duke@435 | 2546 | // on RISCs. Must come after regular optimizations to avoid GVN Ideal |
duke@435 | 2547 | // calls canonicalizing them back. |
duke@435 | 2548 | // (3) Count the number of double-precision FP ops, single-precision FP ops |
duke@435 | 2549 | // and call sites. On Intel, we can get correct rounding either by |
duke@435 | 2550 | // forcing singles to memory (requires extra stores and loads after each |
duke@435 | 2551 | // FP bytecode) or we can set a rounding mode bit (requires setting and |
duke@435 | 2552 | // clearing the mode bit around call sites). The mode bit is only used |
duke@435 | 2553 | // if the relative frequency of single FP ops to calls is low enough. |
duke@435 | 2554 | // This is a key transform for SPEC mpeg_audio. |
duke@435 | 2555 | // (4) Detect infinite loops; blobs of code reachable from above but not |
duke@435 | 2556 | // below. Several of the Code_Gen algorithms fail on such code shapes, |
duke@435 | 2557 | // so we simply bail out. Happens a lot in ZKM.jar, but also happens |
duke@435 | 2558 | // from time to time in other codes (such as -Xcomp finalizer loops, etc). |
duke@435 | 2559 | // Detection is by looking for IfNodes where only 1 projection is |
duke@435 | 2560 | // reachable from below or CatchNodes missing some targets. |
duke@435 | 2561 | // (5) Assert for insane oop offsets in debug mode. |
duke@435 | 2562 | |
duke@435 | 2563 | bool Compile::final_graph_reshaping() { |
duke@435 | 2564 | // an infinite loop may have been eliminated by the optimizer, |
duke@435 | 2565 | // in which case the graph will be empty. |
duke@435 | 2566 | if (root()->req() == 1) { |
duke@435 | 2567 | record_method_not_compilable("trivial infinite loop"); |
duke@435 | 2568 | return true; |
duke@435 | 2569 | } |
duke@435 | 2570 | |
kvn@1294 | 2571 | Final_Reshape_Counts frc; |
duke@435 | 2572 | |
duke@435 | 2573 | // Visit everybody reachable! |
duke@435 | 2574 | // Allocate stack of size C->unique()/2 to avoid frequent realloc |
duke@435 | 2575 | Node_Stack nstack(unique() >> 1); |
kvn@1294 | 2576 | final_graph_reshaping_walk(nstack, root(), frc); |
duke@435 | 2577 | |
duke@435 | 2578 | // Check for unreachable (from below) code (i.e., infinite loops). |
kvn@1294 | 2579 | for( uint i = 0; i < frc._tests.size(); i++ ) { |
kvn@1294 | 2580 | MultiBranchNode *n = frc._tests[i]->as_MultiBranch(); |
never@562 | 2581 | // Get number of CFG targets. |
duke@435 | 2582 | // Note that PCTables include exception targets after calls. |
never@562 | 2583 | uint required_outcnt = n->required_outcnt(); |
never@562 | 2584 | if (n->outcnt() != required_outcnt) { |
duke@435 | 2585 | // Check for a few special cases. Rethrow Nodes never take the |
duke@435 | 2586 | // 'fall-thru' path, so expected kids is 1 less. |
duke@435 | 2587 | if (n->is_PCTable() && n->in(0) && n->in(0)->in(0)) { |
duke@435 | 2588 | if (n->in(0)->in(0)->is_Call()) { |
duke@435 | 2589 | CallNode *call = n->in(0)->in(0)->as_Call(); |
duke@435 | 2590 | if (call->entry_point() == OptoRuntime::rethrow_stub()) { |
never@562 | 2591 | required_outcnt--; // Rethrow always has 1 less kid |
duke@435 | 2592 | } else if (call->req() > TypeFunc::Parms && |
duke@435 | 2593 | call->is_CallDynamicJava()) { |
duke@435 | 2594 | // Check for null receiver. In such case, the optimizer has |
duke@435 | 2595 | // detected that the virtual call will always result in a null |
duke@435 | 2596 | // pointer exception. The fall-through projection of this CatchNode |
duke@435 | 2597 | // will not be populated. |
duke@435 | 2598 | Node *arg0 = call->in(TypeFunc::Parms); |
duke@435 | 2599 | if (arg0->is_Type() && |
duke@435 | 2600 | arg0->as_Type()->type()->higher_equal(TypePtr::NULL_PTR)) { |
never@562 | 2601 | required_outcnt--; |
duke@435 | 2602 | } |
duke@435 | 2603 | } else if (call->entry_point() == OptoRuntime::new_array_Java() && |
duke@435 | 2604 | call->req() > TypeFunc::Parms+1 && |
duke@435 | 2605 | call->is_CallStaticJava()) { |
duke@435 | 2606 | // Check for negative array length. In such case, the optimizer has |
duke@435 | 2607 | // detected that the allocation attempt will always result in an |
duke@435 | 2608 | // exception. There is no fall-through projection of this CatchNode . |
duke@435 | 2609 | Node *arg1 = call->in(TypeFunc::Parms+1); |
duke@435 | 2610 | if (arg1->is_Type() && |
duke@435 | 2611 | arg1->as_Type()->type()->join(TypeInt::POS)->empty()) { |
never@562 | 2612 | required_outcnt--; |
duke@435 | 2613 | } |
duke@435 | 2614 | } |
duke@435 | 2615 | } |
duke@435 | 2616 | } |
never@562 | 2617 | // Recheck with a better notion of 'required_outcnt' |
never@562 | 2618 | if (n->outcnt() != required_outcnt) { |
duke@435 | 2619 | record_method_not_compilable("malformed control flow"); |
duke@435 | 2620 | return true; // Not all targets reachable! |
duke@435 | 2621 | } |
duke@435 | 2622 | } |
duke@435 | 2623 | // Check that I actually visited all kids. Unreached kids |
duke@435 | 2624 | // must be infinite loops. |
duke@435 | 2625 | for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) |
kvn@1294 | 2626 | if (!frc._visited.test(n->fast_out(j)->_idx)) { |
duke@435 | 2627 | record_method_not_compilable("infinite loop"); |
duke@435 | 2628 | return true; // Found unvisited kid; must be unreach |
duke@435 | 2629 | } |
duke@435 | 2630 | } |
duke@435 | 2631 | |
duke@435 | 2632 | // If original bytecodes contained a mixture of floats and doubles |
duke@435 | 2633 | // check if the optimizer has made it homogenous, item (3). |
never@1364 | 2634 | if( Use24BitFPMode && Use24BitFP && UseSSE == 0 && |
kvn@1294 | 2635 | frc.get_float_count() > 32 && |
kvn@1294 | 2636 | frc.get_double_count() == 0 && |
kvn@1294 | 2637 | (10 * frc.get_call_count() < frc.get_float_count()) ) { |
duke@435 | 2638 | set_24_bit_selection_and_mode( false, true ); |
duke@435 | 2639 | } |
duke@435 | 2640 | |
kvn@1294 | 2641 | set_java_calls(frc.get_java_call_count()); |
kvn@1294 | 2642 | set_inner_loops(frc.get_inner_loop_count()); |
duke@435 | 2643 | |
duke@435 | 2644 | // No infinite loops, no reason to bail out. |
duke@435 | 2645 | return false; |
duke@435 | 2646 | } |
duke@435 | 2647 | |
duke@435 | 2648 | //-----------------------------too_many_traps---------------------------------- |
duke@435 | 2649 | // Report if there are too many traps at the current method and bci. |
duke@435 | 2650 | // Return true if there was a trap, and/or PerMethodTrapLimit is exceeded. |
duke@435 | 2651 | bool Compile::too_many_traps(ciMethod* method, |
duke@435 | 2652 | int bci, |
duke@435 | 2653 | Deoptimization::DeoptReason reason) { |
duke@435 | 2654 | ciMethodData* md = method->method_data(); |
duke@435 | 2655 | if (md->is_empty()) { |
duke@435 | 2656 | // Assume the trap has not occurred, or that it occurred only |
duke@435 | 2657 | // because of a transient condition during start-up in the interpreter. |
duke@435 | 2658 | return false; |
duke@435 | 2659 | } |
duke@435 | 2660 | if (md->has_trap_at(bci, reason) != 0) { |
duke@435 | 2661 | // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic. |
duke@435 | 2662 | // Also, if there are multiple reasons, or if there is no per-BCI record, |
duke@435 | 2663 | // assume the worst. |
duke@435 | 2664 | if (log()) |
duke@435 | 2665 | log()->elem("observe trap='%s' count='%d'", |
duke@435 | 2666 | Deoptimization::trap_reason_name(reason), |
duke@435 | 2667 | md->trap_count(reason)); |
duke@435 | 2668 | return true; |
duke@435 | 2669 | } else { |
duke@435 | 2670 | // Ignore method/bci and see if there have been too many globally. |
duke@435 | 2671 | return too_many_traps(reason, md); |
duke@435 | 2672 | } |
duke@435 | 2673 | } |
duke@435 | 2674 | |
duke@435 | 2675 | // Less-accurate variant which does not require a method and bci. |
duke@435 | 2676 | bool Compile::too_many_traps(Deoptimization::DeoptReason reason, |
duke@435 | 2677 | ciMethodData* logmd) { |
duke@435 | 2678 | if (trap_count(reason) >= (uint)PerMethodTrapLimit) { |
duke@435 | 2679 | // Too many traps globally. |
duke@435 | 2680 | // Note that we use cumulative trap_count, not just md->trap_count. |
duke@435 | 2681 | if (log()) { |
duke@435 | 2682 | int mcount = (logmd == NULL)? -1: (int)logmd->trap_count(reason); |
duke@435 | 2683 | log()->elem("observe trap='%s' count='0' mcount='%d' ccount='%d'", |
duke@435 | 2684 | Deoptimization::trap_reason_name(reason), |
duke@435 | 2685 | mcount, trap_count(reason)); |
duke@435 | 2686 | } |
duke@435 | 2687 | return true; |
duke@435 | 2688 | } else { |
duke@435 | 2689 | // The coast is clear. |
duke@435 | 2690 | return false; |
duke@435 | 2691 | } |
duke@435 | 2692 | } |
duke@435 | 2693 | |
duke@435 | 2694 | //--------------------------too_many_recompiles-------------------------------- |
duke@435 | 2695 | // Report if there are too many recompiles at the current method and bci. |
duke@435 | 2696 | // Consults PerBytecodeRecompilationCutoff and PerMethodRecompilationCutoff. |
duke@435 | 2697 | // Is not eager to return true, since this will cause the compiler to use |
duke@435 | 2698 | // Action_none for a trap point, to avoid too many recompilations. |
duke@435 | 2699 | bool Compile::too_many_recompiles(ciMethod* method, |
duke@435 | 2700 | int bci, |
duke@435 | 2701 | Deoptimization::DeoptReason reason) { |
duke@435 | 2702 | ciMethodData* md = method->method_data(); |
duke@435 | 2703 | if (md->is_empty()) { |
duke@435 | 2704 | // Assume the trap has not occurred, or that it occurred only |
duke@435 | 2705 | // because of a transient condition during start-up in the interpreter. |
duke@435 | 2706 | return false; |
duke@435 | 2707 | } |
duke@435 | 2708 | // Pick a cutoff point well within PerBytecodeRecompilationCutoff. |
duke@435 | 2709 | uint bc_cutoff = (uint) PerBytecodeRecompilationCutoff / 8; |
duke@435 | 2710 | uint m_cutoff = (uint) PerMethodRecompilationCutoff / 2 + 1; // not zero |
duke@435 | 2711 | Deoptimization::DeoptReason per_bc_reason |
duke@435 | 2712 | = Deoptimization::reason_recorded_per_bytecode_if_any(reason); |
duke@435 | 2713 | if ((per_bc_reason == Deoptimization::Reason_none |
duke@435 | 2714 | || md->has_trap_at(bci, reason) != 0) |
duke@435 | 2715 | // The trap frequency measure we care about is the recompile count: |
duke@435 | 2716 | && md->trap_recompiled_at(bci) |
duke@435 | 2717 | && md->overflow_recompile_count() >= bc_cutoff) { |
duke@435 | 2718 | // Do not emit a trap here if it has already caused recompilations. |
duke@435 | 2719 | // Also, if there are multiple reasons, or if there is no per-BCI record, |
duke@435 | 2720 | // assume the worst. |
duke@435 | 2721 | if (log()) |
duke@435 | 2722 | log()->elem("observe trap='%s recompiled' count='%d' recompiles2='%d'", |
duke@435 | 2723 | Deoptimization::trap_reason_name(reason), |
duke@435 | 2724 | md->trap_count(reason), |
duke@435 | 2725 | md->overflow_recompile_count()); |
duke@435 | 2726 | return true; |
duke@435 | 2727 | } else if (trap_count(reason) != 0 |
duke@435 | 2728 | && decompile_count() >= m_cutoff) { |
duke@435 | 2729 | // Too many recompiles globally, and we have seen this sort of trap. |
duke@435 | 2730 | // Use cumulative decompile_count, not just md->decompile_count. |
duke@435 | 2731 | if (log()) |
duke@435 | 2732 | log()->elem("observe trap='%s' count='%d' mcount='%d' decompiles='%d' mdecompiles='%d'", |
duke@435 | 2733 | Deoptimization::trap_reason_name(reason), |
duke@435 | 2734 | md->trap_count(reason), trap_count(reason), |
duke@435 | 2735 | md->decompile_count(), decompile_count()); |
duke@435 | 2736 | return true; |
duke@435 | 2737 | } else { |
duke@435 | 2738 | // The coast is clear. |
duke@435 | 2739 | return false; |
duke@435 | 2740 | } |
duke@435 | 2741 | } |
duke@435 | 2742 | |
duke@435 | 2743 | |
duke@435 | 2744 | #ifndef PRODUCT |
duke@435 | 2745 | //------------------------------verify_graph_edges--------------------------- |
duke@435 | 2746 | // Walk the Graph and verify that there is a one-to-one correspondence |
duke@435 | 2747 | // between Use-Def edges and Def-Use edges in the graph. |
duke@435 | 2748 | void Compile::verify_graph_edges(bool no_dead_code) { |
duke@435 | 2749 | if (VerifyGraphEdges) { |
duke@435 | 2750 | ResourceArea *area = Thread::current()->resource_area(); |
duke@435 | 2751 | Unique_Node_List visited(area); |
duke@435 | 2752 | // Call recursive graph walk to check edges |
duke@435 | 2753 | _root->verify_edges(visited); |
duke@435 | 2754 | if (no_dead_code) { |
duke@435 | 2755 | // Now make sure that no visited node is used by an unvisited node. |
duke@435 | 2756 | bool dead_nodes = 0; |
duke@435 | 2757 | Unique_Node_List checked(area); |
duke@435 | 2758 | while (visited.size() > 0) { |
duke@435 | 2759 | Node* n = visited.pop(); |
duke@435 | 2760 | checked.push(n); |
duke@435 | 2761 | for (uint i = 0; i < n->outcnt(); i++) { |
duke@435 | 2762 | Node* use = n->raw_out(i); |
duke@435 | 2763 | if (checked.member(use)) continue; // already checked |
duke@435 | 2764 | if (visited.member(use)) continue; // already in the graph |
duke@435 | 2765 | if (use->is_Con()) continue; // a dead ConNode is OK |
duke@435 | 2766 | // At this point, we have found a dead node which is DU-reachable. |
duke@435 | 2767 | if (dead_nodes++ == 0) |
duke@435 | 2768 | tty->print_cr("*** Dead nodes reachable via DU edges:"); |
duke@435 | 2769 | use->dump(2); |
duke@435 | 2770 | tty->print_cr("---"); |
duke@435 | 2771 | checked.push(use); // No repeats; pretend it is now checked. |
duke@435 | 2772 | } |
duke@435 | 2773 | } |
duke@435 | 2774 | assert(dead_nodes == 0, "using nodes must be reachable from root"); |
duke@435 | 2775 | } |
duke@435 | 2776 | } |
duke@435 | 2777 | } |
duke@435 | 2778 | #endif |
duke@435 | 2779 | |
duke@435 | 2780 | // The Compile object keeps track of failure reasons separately from the ciEnv. |
duke@435 | 2781 | // This is required because there is not quite a 1-1 relation between the |
duke@435 | 2782 | // ciEnv and its compilation task and the Compile object. Note that one |
duke@435 | 2783 | // ciEnv might use two Compile objects, if C2Compiler::compile_method decides |
duke@435 | 2784 | // to backtrack and retry without subsuming loads. Other than this backtracking |
duke@435 | 2785 | // behavior, the Compile's failure reason is quietly copied up to the ciEnv |
duke@435 | 2786 | // by the logic in C2Compiler. |
duke@435 | 2787 | void Compile::record_failure(const char* reason) { |
duke@435 | 2788 | if (log() != NULL) { |
duke@435 | 2789 | log()->elem("failure reason='%s' phase='compile'", reason); |
duke@435 | 2790 | } |
duke@435 | 2791 | if (_failure_reason == NULL) { |
duke@435 | 2792 | // Record the first failure reason. |
duke@435 | 2793 | _failure_reason = reason; |
duke@435 | 2794 | } |
never@657 | 2795 | if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { |
never@657 | 2796 | C->print_method(_failure_reason); |
never@657 | 2797 | } |
duke@435 | 2798 | _root = NULL; // flush the graph, too |
duke@435 | 2799 | } |
duke@435 | 2800 | |
duke@435 | 2801 | Compile::TracePhase::TracePhase(const char* name, elapsedTimer* accumulator, bool dolog) |
duke@435 | 2802 | : TraceTime(NULL, accumulator, false NOT_PRODUCT( || TimeCompiler ), false) |
duke@435 | 2803 | { |
duke@435 | 2804 | if (dolog) { |
duke@435 | 2805 | C = Compile::current(); |
duke@435 | 2806 | _log = C->log(); |
duke@435 | 2807 | } else { |
duke@435 | 2808 | C = NULL; |
duke@435 | 2809 | _log = NULL; |
duke@435 | 2810 | } |
duke@435 | 2811 | if (_log != NULL) { |
duke@435 | 2812 | _log->begin_head("phase name='%s' nodes='%d'", name, C->unique()); |
duke@435 | 2813 | _log->stamp(); |
duke@435 | 2814 | _log->end_head(); |
duke@435 | 2815 | } |
duke@435 | 2816 | } |
duke@435 | 2817 | |
duke@435 | 2818 | Compile::TracePhase::~TracePhase() { |
duke@435 | 2819 | if (_log != NULL) { |
duke@435 | 2820 | _log->done("phase nodes='%d'", C->unique()); |
duke@435 | 2821 | } |
duke@435 | 2822 | } |