Thu, 20 Sep 2012 16:49:17 +0200
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
Summary: use shorter instruction sequences for atomic add and atomic exchange when possible.
Reviewed-by: kvn, jrose
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "compiler/compileLog.hpp" |
stefank@2314 | 27 | #include "libadt/vectset.hpp" |
stefank@2314 | 28 | #include "opto/addnode.hpp" |
stefank@2314 | 29 | #include "opto/callnode.hpp" |
stefank@2314 | 30 | #include "opto/cfgnode.hpp" |
stefank@2314 | 31 | #include "opto/compile.hpp" |
stefank@2314 | 32 | #include "opto/connode.hpp" |
stefank@2314 | 33 | #include "opto/locknode.hpp" |
stefank@2314 | 34 | #include "opto/loopnode.hpp" |
stefank@2314 | 35 | #include "opto/macro.hpp" |
stefank@2314 | 36 | #include "opto/memnode.hpp" |
stefank@2314 | 37 | #include "opto/node.hpp" |
stefank@2314 | 38 | #include "opto/phaseX.hpp" |
stefank@2314 | 39 | #include "opto/rootnode.hpp" |
stefank@2314 | 40 | #include "opto/runtime.hpp" |
stefank@2314 | 41 | #include "opto/subnode.hpp" |
stefank@2314 | 42 | #include "opto/type.hpp" |
stefank@2314 | 43 | #include "runtime/sharedRuntime.hpp" |
duke@435 | 44 | |
duke@435 | 45 | |
duke@435 | 46 | // |
duke@435 | 47 | // Replace any references to "oldref" in inputs to "use" with "newref". |
duke@435 | 48 | // Returns the number of replacements made. |
duke@435 | 49 | // |
duke@435 | 50 | int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { |
duke@435 | 51 | int nreplacements = 0; |
duke@435 | 52 | uint req = use->req(); |
duke@435 | 53 | for (uint j = 0; j < use->len(); j++) { |
duke@435 | 54 | Node *uin = use->in(j); |
duke@435 | 55 | if (uin == oldref) { |
duke@435 | 56 | if (j < req) |
duke@435 | 57 | use->set_req(j, newref); |
duke@435 | 58 | else |
duke@435 | 59 | use->set_prec(j, newref); |
duke@435 | 60 | nreplacements++; |
duke@435 | 61 | } else if (j >= req && uin == NULL) { |
duke@435 | 62 | break; |
duke@435 | 63 | } |
duke@435 | 64 | } |
duke@435 | 65 | return nreplacements; |
duke@435 | 66 | } |
duke@435 | 67 | |
duke@435 | 68 | void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) { |
duke@435 | 69 | // Copy debug information and adjust JVMState information |
duke@435 | 70 | uint old_dbg_start = oldcall->tf()->domain()->cnt(); |
duke@435 | 71 | uint new_dbg_start = newcall->tf()->domain()->cnt(); |
duke@435 | 72 | int jvms_adj = new_dbg_start - old_dbg_start; |
duke@435 | 73 | assert (new_dbg_start == newcall->req(), "argument count mismatch"); |
kvn@498 | 74 | |
kvn@498 | 75 | Dict* sosn_map = new Dict(cmpkey,hashkey); |
duke@435 | 76 | for (uint i = old_dbg_start; i < oldcall->req(); i++) { |
kvn@498 | 77 | Node* old_in = oldcall->in(i); |
kvn@498 | 78 | // Clone old SafePointScalarObjectNodes, adjusting their field contents. |
kvn@895 | 79 | if (old_in != NULL && old_in->is_SafePointScalarObject()) { |
kvn@498 | 80 | SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); |
kvn@498 | 81 | uint old_unique = C->unique(); |
kvn@498 | 82 | Node* new_in = old_sosn->clone(jvms_adj, sosn_map); |
kvn@498 | 83 | if (old_unique != C->unique()) { |
kvn@3311 | 84 | new_in->set_req(0, C->root()); // reset control edge |
kvn@498 | 85 | new_in = transform_later(new_in); // Register new node. |
kvn@498 | 86 | } |
kvn@498 | 87 | old_in = new_in; |
kvn@498 | 88 | } |
kvn@498 | 89 | newcall->add_req(old_in); |
duke@435 | 90 | } |
kvn@498 | 91 | |
duke@435 | 92 | newcall->set_jvms(oldcall->jvms()); |
duke@435 | 93 | for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) { |
duke@435 | 94 | jvms->set_map(newcall); |
duke@435 | 95 | jvms->set_locoff(jvms->locoff()+jvms_adj); |
duke@435 | 96 | jvms->set_stkoff(jvms->stkoff()+jvms_adj); |
duke@435 | 97 | jvms->set_monoff(jvms->monoff()+jvms_adj); |
kvn@498 | 98 | jvms->set_scloff(jvms->scloff()+jvms_adj); |
duke@435 | 99 | jvms->set_endoff(jvms->endoff()+jvms_adj); |
duke@435 | 100 | } |
duke@435 | 101 | } |
duke@435 | 102 | |
kvn@855 | 103 | Node* PhaseMacroExpand::opt_bits_test(Node* ctrl, Node* region, int edge, Node* word, int mask, int bits, bool return_fast_path) { |
kvn@855 | 104 | Node* cmp; |
kvn@855 | 105 | if (mask != 0) { |
kvn@855 | 106 | Node* and_node = transform_later(new (C, 3) AndXNode(word, MakeConX(mask))); |
kvn@855 | 107 | cmp = transform_later(new (C, 3) CmpXNode(and_node, MakeConX(bits))); |
kvn@855 | 108 | } else { |
kvn@855 | 109 | cmp = word; |
kvn@855 | 110 | } |
kvn@855 | 111 | Node* bol = transform_later(new (C, 2) BoolNode(cmp, BoolTest::ne)); |
kvn@855 | 112 | IfNode* iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); |
kvn@855 | 113 | transform_later(iff); |
duke@435 | 114 | |
kvn@855 | 115 | // Fast path taken. |
kvn@855 | 116 | Node *fast_taken = transform_later( new (C, 1) IfFalseNode(iff) ); |
duke@435 | 117 | |
duke@435 | 118 | // Fast path not-taken, i.e. slow path |
kvn@855 | 119 | Node *slow_taken = transform_later( new (C, 1) IfTrueNode(iff) ); |
kvn@855 | 120 | |
kvn@855 | 121 | if (return_fast_path) { |
kvn@855 | 122 | region->init_req(edge, slow_taken); // Capture slow-control |
kvn@855 | 123 | return fast_taken; |
kvn@855 | 124 | } else { |
kvn@855 | 125 | region->init_req(edge, fast_taken); // Capture fast-control |
kvn@855 | 126 | return slow_taken; |
kvn@855 | 127 | } |
duke@435 | 128 | } |
duke@435 | 129 | |
duke@435 | 130 | //--------------------copy_predefined_input_for_runtime_call-------------------- |
duke@435 | 131 | void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) { |
duke@435 | 132 | // Set fixed predefined input arguments |
duke@435 | 133 | call->init_req( TypeFunc::Control, ctrl ); |
duke@435 | 134 | call->init_req( TypeFunc::I_O , oldcall->in( TypeFunc::I_O) ); |
duke@435 | 135 | call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ????? |
duke@435 | 136 | call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) ); |
duke@435 | 137 | call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) ); |
duke@435 | 138 | } |
duke@435 | 139 | |
duke@435 | 140 | //------------------------------make_slow_call--------------------------------- |
duke@435 | 141 | CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) { |
duke@435 | 142 | |
duke@435 | 143 | // Slow-path call |
duke@435 | 144 | int size = slow_call_type->domain()->cnt(); |
duke@435 | 145 | CallNode *call = leaf_name |
duke@435 | 146 | ? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) |
duke@435 | 147 | : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM ); |
duke@435 | 148 | |
duke@435 | 149 | // Slow path call has no side-effects, uses few values |
duke@435 | 150 | copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); |
duke@435 | 151 | if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); |
duke@435 | 152 | if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); |
duke@435 | 153 | copy_call_debug_info(oldcall, call); |
duke@435 | 154 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
kvn@1976 | 155 | _igvn.replace_node(oldcall, call); |
duke@435 | 156 | transform_later(call); |
duke@435 | 157 | |
duke@435 | 158 | return call; |
duke@435 | 159 | } |
duke@435 | 160 | |
duke@435 | 161 | void PhaseMacroExpand::extract_call_projections(CallNode *call) { |
duke@435 | 162 | _fallthroughproj = NULL; |
duke@435 | 163 | _fallthroughcatchproj = NULL; |
duke@435 | 164 | _ioproj_fallthrough = NULL; |
duke@435 | 165 | _ioproj_catchall = NULL; |
duke@435 | 166 | _catchallcatchproj = NULL; |
duke@435 | 167 | _memproj_fallthrough = NULL; |
duke@435 | 168 | _memproj_catchall = NULL; |
duke@435 | 169 | _resproj = NULL; |
duke@435 | 170 | for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { |
duke@435 | 171 | ProjNode *pn = call->fast_out(i)->as_Proj(); |
duke@435 | 172 | switch (pn->_con) { |
duke@435 | 173 | case TypeFunc::Control: |
duke@435 | 174 | { |
duke@435 | 175 | // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj |
duke@435 | 176 | _fallthroughproj = pn; |
duke@435 | 177 | DUIterator_Fast jmax, j = pn->fast_outs(jmax); |
duke@435 | 178 | const Node *cn = pn->fast_out(j); |
duke@435 | 179 | if (cn->is_Catch()) { |
duke@435 | 180 | ProjNode *cpn = NULL; |
duke@435 | 181 | for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { |
duke@435 | 182 | cpn = cn->fast_out(k)->as_Proj(); |
duke@435 | 183 | assert(cpn->is_CatchProj(), "must be a CatchProjNode"); |
duke@435 | 184 | if (cpn->_con == CatchProjNode::fall_through_index) |
duke@435 | 185 | _fallthroughcatchproj = cpn; |
duke@435 | 186 | else { |
duke@435 | 187 | assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); |
duke@435 | 188 | _catchallcatchproj = cpn; |
duke@435 | 189 | } |
duke@435 | 190 | } |
duke@435 | 191 | } |
duke@435 | 192 | break; |
duke@435 | 193 | } |
duke@435 | 194 | case TypeFunc::I_O: |
duke@435 | 195 | if (pn->_is_io_use) |
duke@435 | 196 | _ioproj_catchall = pn; |
duke@435 | 197 | else |
duke@435 | 198 | _ioproj_fallthrough = pn; |
duke@435 | 199 | break; |
duke@435 | 200 | case TypeFunc::Memory: |
duke@435 | 201 | if (pn->_is_io_use) |
duke@435 | 202 | _memproj_catchall = pn; |
duke@435 | 203 | else |
duke@435 | 204 | _memproj_fallthrough = pn; |
duke@435 | 205 | break; |
duke@435 | 206 | case TypeFunc::Parms: |
duke@435 | 207 | _resproj = pn; |
duke@435 | 208 | break; |
duke@435 | 209 | default: |
duke@435 | 210 | assert(false, "unexpected projection from allocation node."); |
duke@435 | 211 | } |
duke@435 | 212 | } |
duke@435 | 213 | |
duke@435 | 214 | } |
duke@435 | 215 | |
kvn@508 | 216 | // Eliminate a card mark sequence. p2x is a ConvP2XNode |
kvn@1286 | 217 | void PhaseMacroExpand::eliminate_card_mark(Node* p2x) { |
kvn@508 | 218 | assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required"); |
kvn@1286 | 219 | if (!UseG1GC) { |
kvn@1286 | 220 | // vanilla/CMS post barrier |
kvn@1286 | 221 | Node *shift = p2x->unique_out(); |
kvn@1286 | 222 | Node *addp = shift->unique_out(); |
kvn@1286 | 223 | for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { |
never@2814 | 224 | Node *mem = addp->last_out(j); |
never@2814 | 225 | if (UseCondCardMark && mem->is_Load()) { |
never@2814 | 226 | assert(mem->Opcode() == Op_LoadB, "unexpected code shape"); |
never@2814 | 227 | // The load is checking if the card has been written so |
never@2814 | 228 | // replace it with zero to fold the test. |
never@2814 | 229 | _igvn.replace_node(mem, intcon(0)); |
never@2814 | 230 | continue; |
never@2814 | 231 | } |
never@2814 | 232 | assert(mem->is_Store(), "store required"); |
never@2814 | 233 | _igvn.replace_node(mem, mem->in(MemNode::Memory)); |
kvn@1286 | 234 | } |
kvn@1286 | 235 | } else { |
kvn@1286 | 236 | // G1 pre/post barriers |
kvn@3521 | 237 | assert(p2x->outcnt() <= 2, "expects 1 or 2 users: Xor and URShift nodes"); |
kvn@1286 | 238 | // It could be only one user, URShift node, in Object.clone() instrinsic |
kvn@1286 | 239 | // but the new allocation is passed to arraycopy stub and it could not |
kvn@1286 | 240 | // be scalar replaced. So we don't check the case. |
kvn@1286 | 241 | |
kvn@3521 | 242 | // An other case of only one user (Xor) is when the value check for NULL |
kvn@3521 | 243 | // in G1 post barrier is folded after CCP so the code which used URShift |
kvn@3521 | 244 | // is removed. |
kvn@3521 | 245 | |
kvn@3521 | 246 | // Take Region node before eliminating post barrier since it also |
kvn@3521 | 247 | // eliminates CastP2X node when it has only one user. |
kvn@3521 | 248 | Node* this_region = p2x->in(0); |
kvn@3521 | 249 | assert(this_region != NULL, ""); |
kvn@3521 | 250 | |
kvn@1286 | 251 | // Remove G1 post barrier. |
kvn@1286 | 252 | |
kvn@1286 | 253 | // Search for CastP2X->Xor->URShift->Cmp path which |
kvn@1286 | 254 | // checks if the store done to a different from the value's region. |
kvn@1286 | 255 | // And replace Cmp with #0 (false) to collapse G1 post barrier. |
kvn@1286 | 256 | Node* xorx = NULL; |
kvn@1286 | 257 | for (DUIterator_Fast imax, i = p2x->fast_outs(imax); i < imax; i++) { |
kvn@1286 | 258 | Node* u = p2x->fast_out(i); |
kvn@1286 | 259 | if (u->Opcode() == Op_XorX) { |
kvn@1286 | 260 | xorx = u; |
kvn@1286 | 261 | break; |
kvn@1286 | 262 | } |
kvn@1286 | 263 | } |
kvn@1286 | 264 | assert(xorx != NULL, "missing G1 post barrier"); |
kvn@1286 | 265 | Node* shift = xorx->unique_out(); |
kvn@1286 | 266 | Node* cmpx = shift->unique_out(); |
kvn@1286 | 267 | assert(cmpx->is_Cmp() && cmpx->unique_out()->is_Bool() && |
kvn@1286 | 268 | cmpx->unique_out()->as_Bool()->_test._test == BoolTest::ne, |
kvn@1286 | 269 | "missing region check in G1 post barrier"); |
kvn@1286 | 270 | _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ)); |
kvn@1286 | 271 | |
kvn@1286 | 272 | // Remove G1 pre barrier. |
kvn@1286 | 273 | |
kvn@1286 | 274 | // Search "if (marking != 0)" check and set it to "false". |
kvn@1286 | 275 | // There is no G1 pre barrier if previous stored value is NULL |
kvn@1286 | 276 | // (for example, after initialization). |
kvn@1286 | 277 | if (this_region->is_Region() && this_region->req() == 3) { |
kvn@1286 | 278 | int ind = 1; |
kvn@1286 | 279 | if (!this_region->in(ind)->is_IfFalse()) { |
kvn@1286 | 280 | ind = 2; |
kvn@1286 | 281 | } |
kvn@1286 | 282 | if (this_region->in(ind)->is_IfFalse()) { |
kvn@1286 | 283 | Node* bol = this_region->in(ind)->in(0)->in(1); |
kvn@1286 | 284 | assert(bol->is_Bool(), ""); |
kvn@1286 | 285 | cmpx = bol->in(1); |
kvn@1286 | 286 | if (bol->as_Bool()->_test._test == BoolTest::ne && |
kvn@1286 | 287 | cmpx->is_Cmp() && cmpx->in(2) == intcon(0) && |
kvn@1286 | 288 | cmpx->in(1)->is_Load()) { |
kvn@1286 | 289 | Node* adr = cmpx->in(1)->as_Load()->in(MemNode::Address); |
kvn@1286 | 290 | const int marking_offset = in_bytes(JavaThread::satb_mark_queue_offset() + |
kvn@1286 | 291 | PtrQueue::byte_offset_of_active()); |
kvn@1286 | 292 | if (adr->is_AddP() && adr->in(AddPNode::Base) == top() && |
kvn@1286 | 293 | adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && |
kvn@1286 | 294 | adr->in(AddPNode::Offset) == MakeConX(marking_offset)) { |
kvn@1286 | 295 | _igvn.replace_node(cmpx, makecon(TypeInt::CC_EQ)); |
kvn@1286 | 296 | } |
kvn@1286 | 297 | } |
kvn@1286 | 298 | } |
kvn@1286 | 299 | } |
kvn@1286 | 300 | // Now CastP2X can be removed since it is used only on dead path |
kvn@1286 | 301 | // which currently still alive until igvn optimize it. |
kvn@3521 | 302 | assert(p2x->outcnt() == 0 || p2x->unique_out()->Opcode() == Op_URShiftX, ""); |
kvn@1286 | 303 | _igvn.replace_node(p2x, top()); |
kvn@508 | 304 | } |
kvn@508 | 305 | } |
kvn@508 | 306 | |
kvn@508 | 307 | // Search for a memory operation for the specified memory slice. |
kvn@688 | 308 | static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc, PhaseGVN *phase) { |
kvn@508 | 309 | Node *orig_mem = mem; |
kvn@508 | 310 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@688 | 311 | const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); |
kvn@508 | 312 | while (true) { |
kvn@508 | 313 | if (mem == alloc_mem || mem == start_mem ) { |
twisti@1040 | 314 | return mem; // hit one of our sentinels |
kvn@508 | 315 | } else if (mem->is_MergeMem()) { |
kvn@508 | 316 | mem = mem->as_MergeMem()->memory_at(alias_idx); |
kvn@508 | 317 | } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { |
kvn@508 | 318 | Node *in = mem->in(0); |
kvn@508 | 319 | // we can safely skip over safepoints, calls, locks and membars because we |
kvn@508 | 320 | // already know that the object is safe to eliminate. |
kvn@508 | 321 | if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) { |
kvn@508 | 322 | return in; |
kvn@688 | 323 | } else if (in->is_Call()) { |
kvn@688 | 324 | CallNode *call = in->as_Call(); |
kvn@688 | 325 | if (!call->may_modify(tinst, phase)) { |
kvn@688 | 326 | mem = call->in(TypeFunc::Memory); |
kvn@688 | 327 | } |
kvn@688 | 328 | mem = in->in(TypeFunc::Memory); |
kvn@688 | 329 | } else if (in->is_MemBar()) { |
kvn@508 | 330 | mem = in->in(TypeFunc::Memory); |
kvn@508 | 331 | } else { |
kvn@508 | 332 | assert(false, "unexpected projection"); |
kvn@508 | 333 | } |
kvn@508 | 334 | } else if (mem->is_Store()) { |
kvn@508 | 335 | const TypePtr* atype = mem->as_Store()->adr_type(); |
kvn@508 | 336 | int adr_idx = Compile::current()->get_alias_index(atype); |
kvn@508 | 337 | if (adr_idx == alias_idx) { |
kvn@508 | 338 | assert(atype->isa_oopptr(), "address type must be oopptr"); |
kvn@508 | 339 | int adr_offset = atype->offset(); |
kvn@508 | 340 | uint adr_iid = atype->is_oopptr()->instance_id(); |
kvn@508 | 341 | // Array elements references have the same alias_idx |
kvn@508 | 342 | // but different offset and different instance_id. |
kvn@508 | 343 | if (adr_offset == offset && adr_iid == alloc->_idx) |
kvn@508 | 344 | return mem; |
kvn@508 | 345 | } else { |
kvn@508 | 346 | assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw"); |
kvn@508 | 347 | } |
kvn@508 | 348 | mem = mem->in(MemNode::Memory); |
kvn@1535 | 349 | } else if (mem->is_ClearArray()) { |
kvn@1535 | 350 | if (!ClearArrayNode::step_through(&mem, alloc->_idx, phase)) { |
kvn@1535 | 351 | // Can not bypass initialization of the instance |
kvn@1535 | 352 | // we are looking. |
kvn@1535 | 353 | debug_only(intptr_t offset;) |
kvn@1535 | 354 | assert(alloc == AllocateNode::Ideal_allocation(mem->in(3), phase, offset), "sanity"); |
kvn@1535 | 355 | InitializeNode* init = alloc->as_Allocate()->initialization(); |
kvn@1535 | 356 | // We are looking for stored value, return Initialize node |
kvn@1535 | 357 | // or memory edge from Allocate node. |
kvn@1535 | 358 | if (init != NULL) |
kvn@1535 | 359 | return init; |
kvn@1535 | 360 | else |
kvn@1535 | 361 | return alloc->in(TypeFunc::Memory); // It will produce zero value (see callers). |
kvn@1535 | 362 | } |
kvn@1535 | 363 | // Otherwise skip it (the call updated 'mem' value). |
kvn@1019 | 364 | } else if (mem->Opcode() == Op_SCMemProj) { |
kvn@1019 | 365 | assert(mem->in(0)->is_LoadStore(), "sanity"); |
kvn@1019 | 366 | const TypePtr* atype = mem->in(0)->in(MemNode::Address)->bottom_type()->is_ptr(); |
kvn@1019 | 367 | int adr_idx = Compile::current()->get_alias_index(atype); |
kvn@1019 | 368 | if (adr_idx == alias_idx) { |
kvn@1019 | 369 | assert(false, "Object is not scalar replaceable if a LoadStore node access its field"); |
kvn@1019 | 370 | return NULL; |
kvn@1019 | 371 | } |
kvn@1019 | 372 | mem = mem->in(0)->in(MemNode::Memory); |
kvn@508 | 373 | } else { |
kvn@508 | 374 | return mem; |
kvn@508 | 375 | } |
kvn@682 | 376 | assert(mem != orig_mem, "dead memory loop"); |
kvn@508 | 377 | } |
kvn@508 | 378 | } |
kvn@508 | 379 | |
kvn@508 | 380 | // |
kvn@508 | 381 | // Given a Memory Phi, compute a value Phi containing the values from stores |
kvn@508 | 382 | // on the input paths. |
kvn@508 | 383 | // Note: this function is recursive, its depth is limied by the "level" argument |
kvn@508 | 384 | // Returns the computed Phi, or NULL if it cannot compute it. |
kvn@682 | 385 | Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, Node_Stack *value_phis, int level) { |
kvn@682 | 386 | assert(mem->is_Phi(), "sanity"); |
kvn@682 | 387 | int alias_idx = C->get_alias_index(adr_t); |
kvn@682 | 388 | int offset = adr_t->offset(); |
kvn@682 | 389 | int instance_id = adr_t->instance_id(); |
kvn@682 | 390 | |
kvn@682 | 391 | // Check if an appropriate value phi already exists. |
kvn@682 | 392 | Node* region = mem->in(0); |
kvn@682 | 393 | for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { |
kvn@682 | 394 | Node* phi = region->fast_out(k); |
kvn@682 | 395 | if (phi->is_Phi() && phi != mem && |
kvn@682 | 396 | phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) { |
kvn@682 | 397 | return phi; |
kvn@682 | 398 | } |
kvn@682 | 399 | } |
kvn@682 | 400 | // Check if an appropriate new value phi already exists. |
kvn@2985 | 401 | Node* new_phi = value_phis->find(mem->_idx); |
kvn@2985 | 402 | if (new_phi != NULL) |
kvn@2985 | 403 | return new_phi; |
kvn@508 | 404 | |
kvn@508 | 405 | if (level <= 0) { |
kvn@688 | 406 | return NULL; // Give up: phi tree too deep |
kvn@508 | 407 | } |
kvn@508 | 408 | Node *start_mem = C->start()->proj_out(TypeFunc::Memory); |
kvn@508 | 409 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@508 | 410 | |
kvn@508 | 411 | uint length = mem->req(); |
zgu@3900 | 412 | GrowableArray <Node *> values(length, length, NULL, false); |
kvn@508 | 413 | |
kvn@682 | 414 | // create a new Phi for the value |
kvn@682 | 415 | PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); |
kvn@682 | 416 | transform_later(phi); |
kvn@682 | 417 | value_phis->push(phi, mem->_idx); |
kvn@682 | 418 | |
kvn@508 | 419 | for (uint j = 1; j < length; j++) { |
kvn@508 | 420 | Node *in = mem->in(j); |
kvn@508 | 421 | if (in == NULL || in->is_top()) { |
kvn@508 | 422 | values.at_put(j, in); |
kvn@508 | 423 | } else { |
kvn@688 | 424 | Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc, &_igvn); |
kvn@508 | 425 | if (val == start_mem || val == alloc_mem) { |
kvn@508 | 426 | // hit a sentinel, return appropriate 0 value |
kvn@508 | 427 | values.at_put(j, _igvn.zerocon(ft)); |
kvn@508 | 428 | continue; |
kvn@508 | 429 | } |
kvn@508 | 430 | if (val->is_Initialize()) { |
kvn@508 | 431 | val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); |
kvn@508 | 432 | } |
kvn@508 | 433 | if (val == NULL) { |
kvn@508 | 434 | return NULL; // can't find a value on this path |
kvn@508 | 435 | } |
kvn@508 | 436 | if (val == mem) { |
kvn@508 | 437 | values.at_put(j, mem); |
kvn@508 | 438 | } else if (val->is_Store()) { |
kvn@508 | 439 | values.at_put(j, val->in(MemNode::ValueIn)); |
kvn@508 | 440 | } else if(val->is_Proj() && val->in(0) == alloc) { |
kvn@508 | 441 | values.at_put(j, _igvn.zerocon(ft)); |
kvn@508 | 442 | } else if (val->is_Phi()) { |
kvn@682 | 443 | val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, value_phis, level-1); |
kvn@682 | 444 | if (val == NULL) { |
kvn@682 | 445 | return NULL; |
kvn@508 | 446 | } |
kvn@682 | 447 | values.at_put(j, val); |
kvn@1019 | 448 | } else if (val->Opcode() == Op_SCMemProj) { |
kvn@1019 | 449 | assert(val->in(0)->is_LoadStore(), "sanity"); |
kvn@1019 | 450 | assert(false, "Object is not scalar replaceable if a LoadStore node access its field"); |
kvn@1019 | 451 | return NULL; |
kvn@508 | 452 | } else { |
kvn@1019 | 453 | #ifdef ASSERT |
kvn@1019 | 454 | val->dump(); |
kvn@688 | 455 | assert(false, "unknown node on this path"); |
kvn@1019 | 456 | #endif |
kvn@688 | 457 | return NULL; // unknown node on this path |
kvn@508 | 458 | } |
kvn@508 | 459 | } |
kvn@508 | 460 | } |
kvn@682 | 461 | // Set Phi's inputs |
kvn@508 | 462 | for (uint j = 1; j < length; j++) { |
kvn@508 | 463 | if (values.at(j) == mem) { |
kvn@508 | 464 | phi->init_req(j, phi); |
kvn@508 | 465 | } else { |
kvn@508 | 466 | phi->init_req(j, values.at(j)); |
kvn@508 | 467 | } |
kvn@508 | 468 | } |
kvn@508 | 469 | return phi; |
kvn@508 | 470 | } |
kvn@508 | 471 | |
kvn@508 | 472 | // Search the last value stored into the object's field. |
kvn@508 | 473 | Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) { |
kvn@658 | 474 | assert(adr_t->is_known_instance_field(), "instance required"); |
kvn@658 | 475 | int instance_id = adr_t->instance_id(); |
kvn@658 | 476 | assert((uint)instance_id == alloc->_idx, "wrong allocation"); |
kvn@508 | 477 | |
kvn@508 | 478 | int alias_idx = C->get_alias_index(adr_t); |
kvn@508 | 479 | int offset = adr_t->offset(); |
kvn@508 | 480 | Node *start_mem = C->start()->proj_out(TypeFunc::Memory); |
kvn@508 | 481 | Node *alloc_ctrl = alloc->in(TypeFunc::Control); |
kvn@508 | 482 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@682 | 483 | Arena *a = Thread::current()->resource_area(); |
kvn@682 | 484 | VectorSet visited(a); |
kvn@508 | 485 | |
kvn@508 | 486 | |
kvn@508 | 487 | bool done = sfpt_mem == alloc_mem; |
kvn@508 | 488 | Node *mem = sfpt_mem; |
kvn@508 | 489 | while (!done) { |
kvn@508 | 490 | if (visited.test_set(mem->_idx)) { |
kvn@508 | 491 | return NULL; // found a loop, give up |
kvn@508 | 492 | } |
kvn@688 | 493 | mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc, &_igvn); |
kvn@508 | 494 | if (mem == start_mem || mem == alloc_mem) { |
kvn@508 | 495 | done = true; // hit a sentinel, return appropriate 0 value |
kvn@508 | 496 | } else if (mem->is_Initialize()) { |
kvn@508 | 497 | mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); |
kvn@508 | 498 | if (mem == NULL) { |
kvn@508 | 499 | done = true; // Something go wrong. |
kvn@508 | 500 | } else if (mem->is_Store()) { |
kvn@508 | 501 | const TypePtr* atype = mem->as_Store()->adr_type(); |
kvn@508 | 502 | assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); |
kvn@508 | 503 | done = true; |
kvn@508 | 504 | } |
kvn@508 | 505 | } else if (mem->is_Store()) { |
kvn@508 | 506 | const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); |
kvn@508 | 507 | assert(atype != NULL, "address type must be oopptr"); |
kvn@508 | 508 | assert(C->get_alias_index(atype) == alias_idx && |
kvn@658 | 509 | atype->is_known_instance_field() && atype->offset() == offset && |
kvn@508 | 510 | atype->instance_id() == instance_id, "store is correct memory slice"); |
kvn@508 | 511 | done = true; |
kvn@508 | 512 | } else if (mem->is_Phi()) { |
kvn@508 | 513 | // try to find a phi's unique input |
kvn@508 | 514 | Node *unique_input = NULL; |
kvn@508 | 515 | Node *top = C->top(); |
kvn@508 | 516 | for (uint i = 1; i < mem->req(); i++) { |
kvn@688 | 517 | Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc, &_igvn); |
kvn@508 | 518 | if (n == NULL || n == top || n == mem) { |
kvn@508 | 519 | continue; |
kvn@508 | 520 | } else if (unique_input == NULL) { |
kvn@508 | 521 | unique_input = n; |
kvn@508 | 522 | } else if (unique_input != n) { |
kvn@508 | 523 | unique_input = top; |
kvn@508 | 524 | break; |
kvn@508 | 525 | } |
kvn@508 | 526 | } |
kvn@508 | 527 | if (unique_input != NULL && unique_input != top) { |
kvn@508 | 528 | mem = unique_input; |
kvn@508 | 529 | } else { |
kvn@508 | 530 | done = true; |
kvn@508 | 531 | } |
kvn@508 | 532 | } else { |
kvn@508 | 533 | assert(false, "unexpected node"); |
kvn@508 | 534 | } |
kvn@508 | 535 | } |
kvn@508 | 536 | if (mem != NULL) { |
kvn@508 | 537 | if (mem == start_mem || mem == alloc_mem) { |
kvn@508 | 538 | // hit a sentinel, return appropriate 0 value |
kvn@508 | 539 | return _igvn.zerocon(ft); |
kvn@508 | 540 | } else if (mem->is_Store()) { |
kvn@508 | 541 | return mem->in(MemNode::ValueIn); |
kvn@508 | 542 | } else if (mem->is_Phi()) { |
kvn@508 | 543 | // attempt to produce a Phi reflecting the values on the input paths of the Phi |
kvn@682 | 544 | Node_Stack value_phis(a, 8); |
kvn@688 | 545 | Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, &value_phis, ValueSearchLimit); |
kvn@508 | 546 | if (phi != NULL) { |
kvn@508 | 547 | return phi; |
kvn@682 | 548 | } else { |
kvn@682 | 549 | // Kill all new Phis |
kvn@682 | 550 | while(value_phis.is_nonempty()) { |
kvn@682 | 551 | Node* n = value_phis.node(); |
kvn@1976 | 552 | _igvn.replace_node(n, C->top()); |
kvn@682 | 553 | value_phis.pop(); |
kvn@682 | 554 | } |
kvn@508 | 555 | } |
kvn@508 | 556 | } |
kvn@508 | 557 | } |
kvn@508 | 558 | // Something go wrong. |
kvn@508 | 559 | return NULL; |
kvn@508 | 560 | } |
kvn@508 | 561 | |
kvn@508 | 562 | // Check the possibility of scalar replacement. |
kvn@508 | 563 | bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { |
kvn@508 | 564 | // Scan the uses of the allocation to check for anything that would |
kvn@508 | 565 | // prevent us from eliminating it. |
kvn@508 | 566 | NOT_PRODUCT( const char* fail_eliminate = NULL; ) |
kvn@508 | 567 | DEBUG_ONLY( Node* disq_node = NULL; ) |
kvn@508 | 568 | bool can_eliminate = true; |
kvn@508 | 569 | |
kvn@508 | 570 | Node* res = alloc->result_cast(); |
kvn@508 | 571 | const TypeOopPtr* res_type = NULL; |
kvn@508 | 572 | if (res == NULL) { |
kvn@508 | 573 | // All users were eliminated. |
kvn@508 | 574 | } else if (!res->is_CheckCastPP()) { |
kvn@508 | 575 | NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) |
kvn@508 | 576 | can_eliminate = false; |
kvn@508 | 577 | } else { |
kvn@508 | 578 | res_type = _igvn.type(res)->isa_oopptr(); |
kvn@508 | 579 | if (res_type == NULL) { |
kvn@508 | 580 | NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) |
kvn@508 | 581 | can_eliminate = false; |
kvn@508 | 582 | } else if (res_type->isa_aryptr()) { |
kvn@508 | 583 | int length = alloc->in(AllocateNode::ALength)->find_int_con(-1); |
kvn@508 | 584 | if (length < 0) { |
kvn@508 | 585 | NOT_PRODUCT(fail_eliminate = "Array's size is not constant";) |
kvn@508 | 586 | can_eliminate = false; |
kvn@508 | 587 | } |
kvn@508 | 588 | } |
kvn@508 | 589 | } |
kvn@508 | 590 | |
kvn@508 | 591 | if (can_eliminate && res != NULL) { |
kvn@508 | 592 | for (DUIterator_Fast jmax, j = res->fast_outs(jmax); |
kvn@508 | 593 | j < jmax && can_eliminate; j++) { |
kvn@508 | 594 | Node* use = res->fast_out(j); |
kvn@508 | 595 | |
kvn@508 | 596 | if (use->is_AddP()) { |
kvn@508 | 597 | const TypePtr* addp_type = _igvn.type(use)->is_ptr(); |
kvn@508 | 598 | int offset = addp_type->offset(); |
kvn@508 | 599 | |
kvn@508 | 600 | if (offset == Type::OffsetTop || offset == Type::OffsetBot) { |
kvn@508 | 601 | NOT_PRODUCT(fail_eliminate = "Undefined field referrence";) |
kvn@508 | 602 | can_eliminate = false; |
kvn@508 | 603 | break; |
kvn@508 | 604 | } |
kvn@508 | 605 | for (DUIterator_Fast kmax, k = use->fast_outs(kmax); |
kvn@508 | 606 | k < kmax && can_eliminate; k++) { |
kvn@508 | 607 | Node* n = use->fast_out(k); |
kvn@508 | 608 | if (!n->is_Store() && n->Opcode() != Op_CastP2X) { |
kvn@508 | 609 | DEBUG_ONLY(disq_node = n;) |
kvn@688 | 610 | if (n->is_Load() || n->is_LoadStore()) { |
kvn@508 | 611 | NOT_PRODUCT(fail_eliminate = "Field load";) |
kvn@508 | 612 | } else { |
kvn@508 | 613 | NOT_PRODUCT(fail_eliminate = "Not store field referrence";) |
kvn@508 | 614 | } |
kvn@508 | 615 | can_eliminate = false; |
kvn@508 | 616 | } |
kvn@508 | 617 | } |
kvn@508 | 618 | } else if (use->is_SafePoint()) { |
kvn@508 | 619 | SafePointNode* sfpt = use->as_SafePoint(); |
kvn@603 | 620 | if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { |
kvn@508 | 621 | // Object is passed as argument. |
kvn@508 | 622 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 623 | NOT_PRODUCT(fail_eliminate = "Object is passed as argument";) |
kvn@508 | 624 | can_eliminate = false; |
kvn@508 | 625 | } |
kvn@508 | 626 | Node* sfptMem = sfpt->memory(); |
kvn@508 | 627 | if (sfptMem == NULL || sfptMem->is_top()) { |
kvn@508 | 628 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 629 | NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";) |
kvn@508 | 630 | can_eliminate = false; |
kvn@508 | 631 | } else { |
kvn@508 | 632 | safepoints.append_if_missing(sfpt); |
kvn@508 | 633 | } |
kvn@508 | 634 | } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark |
kvn@508 | 635 | if (use->is_Phi()) { |
kvn@508 | 636 | if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { |
kvn@508 | 637 | NOT_PRODUCT(fail_eliminate = "Object is return value";) |
kvn@508 | 638 | } else { |
kvn@508 | 639 | NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";) |
kvn@508 | 640 | } |
kvn@508 | 641 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 642 | } else { |
kvn@508 | 643 | if (use->Opcode() == Op_Return) { |
kvn@508 | 644 | NOT_PRODUCT(fail_eliminate = "Object is return value";) |
kvn@508 | 645 | }else { |
kvn@508 | 646 | NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) |
kvn@508 | 647 | } |
kvn@508 | 648 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 649 | } |
kvn@508 | 650 | can_eliminate = false; |
kvn@508 | 651 | } |
kvn@508 | 652 | } |
kvn@508 | 653 | } |
kvn@508 | 654 | |
kvn@508 | 655 | #ifndef PRODUCT |
kvn@508 | 656 | if (PrintEliminateAllocations) { |
kvn@508 | 657 | if (can_eliminate) { |
kvn@508 | 658 | tty->print("Scalar "); |
kvn@508 | 659 | if (res == NULL) |
kvn@508 | 660 | alloc->dump(); |
kvn@508 | 661 | else |
kvn@508 | 662 | res->dump(); |
kvn@508 | 663 | } else { |
kvn@508 | 664 | tty->print("NotScalar (%s)", fail_eliminate); |
kvn@508 | 665 | if (res == NULL) |
kvn@508 | 666 | alloc->dump(); |
kvn@508 | 667 | else |
kvn@508 | 668 | res->dump(); |
kvn@508 | 669 | #ifdef ASSERT |
kvn@508 | 670 | if (disq_node != NULL) { |
kvn@508 | 671 | tty->print(" >>>> "); |
kvn@508 | 672 | disq_node->dump(); |
kvn@508 | 673 | } |
kvn@508 | 674 | #endif /*ASSERT*/ |
kvn@508 | 675 | } |
kvn@508 | 676 | } |
kvn@508 | 677 | #endif |
kvn@508 | 678 | return can_eliminate; |
kvn@508 | 679 | } |
kvn@508 | 680 | |
kvn@508 | 681 | // Do scalar replacement. |
kvn@508 | 682 | bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { |
kvn@508 | 683 | GrowableArray <SafePointNode *> safepoints_done; |
kvn@508 | 684 | |
kvn@508 | 685 | ciKlass* klass = NULL; |
kvn@508 | 686 | ciInstanceKlass* iklass = NULL; |
kvn@508 | 687 | int nfields = 0; |
kvn@508 | 688 | int array_base; |
kvn@508 | 689 | int element_size; |
kvn@508 | 690 | BasicType basic_elem_type; |
kvn@508 | 691 | ciType* elem_type; |
kvn@508 | 692 | |
kvn@508 | 693 | Node* res = alloc->result_cast(); |
kvn@508 | 694 | const TypeOopPtr* res_type = NULL; |
kvn@508 | 695 | if (res != NULL) { // Could be NULL when there are no users |
kvn@508 | 696 | res_type = _igvn.type(res)->isa_oopptr(); |
kvn@508 | 697 | } |
kvn@508 | 698 | |
kvn@508 | 699 | if (res != NULL) { |
kvn@508 | 700 | klass = res_type->klass(); |
kvn@508 | 701 | if (res_type->isa_instptr()) { |
kvn@508 | 702 | // find the fields of the class which will be needed for safepoint debug information |
kvn@508 | 703 | assert(klass->is_instance_klass(), "must be an instance klass."); |
kvn@508 | 704 | iklass = klass->as_instance_klass(); |
kvn@508 | 705 | nfields = iklass->nof_nonstatic_fields(); |
kvn@508 | 706 | } else { |
kvn@508 | 707 | // find the array's elements which will be needed for safepoint debug information |
kvn@508 | 708 | nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); |
kvn@508 | 709 | assert(klass->is_array_klass() && nfields >= 0, "must be an array klass."); |
kvn@508 | 710 | elem_type = klass->as_array_klass()->element_type(); |
kvn@508 | 711 | basic_elem_type = elem_type->basic_type(); |
kvn@508 | 712 | array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); |
kvn@508 | 713 | element_size = type2aelembytes(basic_elem_type); |
kvn@508 | 714 | } |
kvn@508 | 715 | } |
kvn@508 | 716 | // |
kvn@508 | 717 | // Process the safepoint uses |
kvn@508 | 718 | // |
kvn@508 | 719 | while (safepoints.length() > 0) { |
kvn@508 | 720 | SafePointNode* sfpt = safepoints.pop(); |
kvn@508 | 721 | Node* mem = sfpt->memory(); |
kvn@508 | 722 | uint first_ind = sfpt->req(); |
kvn@508 | 723 | SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type, |
kvn@508 | 724 | #ifdef ASSERT |
kvn@508 | 725 | alloc, |
kvn@508 | 726 | #endif |
kvn@508 | 727 | first_ind, nfields); |
kvn@3311 | 728 | sobj->init_req(0, C->root()); |
kvn@508 | 729 | transform_later(sobj); |
kvn@508 | 730 | |
kvn@508 | 731 | // Scan object's fields adding an input to the safepoint for each field. |
kvn@508 | 732 | for (int j = 0; j < nfields; j++) { |
kvn@741 | 733 | intptr_t offset; |
kvn@508 | 734 | ciField* field = NULL; |
kvn@508 | 735 | if (iklass != NULL) { |
kvn@508 | 736 | field = iklass->nonstatic_field_at(j); |
kvn@508 | 737 | offset = field->offset(); |
kvn@508 | 738 | elem_type = field->type(); |
kvn@508 | 739 | basic_elem_type = field->layout_type(); |
kvn@508 | 740 | } else { |
kvn@741 | 741 | offset = array_base + j * (intptr_t)element_size; |
kvn@508 | 742 | } |
kvn@508 | 743 | |
kvn@508 | 744 | const Type *field_type; |
kvn@508 | 745 | // The next code is taken from Parse::do_get_xxx(). |
kvn@559 | 746 | if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) { |
kvn@508 | 747 | if (!elem_type->is_loaded()) { |
kvn@508 | 748 | field_type = TypeInstPtr::BOTTOM; |
kvn@2037 | 749 | } else if (field != NULL && field->is_constant() && field->is_static()) { |
kvn@508 | 750 | // This can happen if the constant oop is non-perm. |
kvn@508 | 751 | ciObject* con = field->constant_value().as_object(); |
kvn@508 | 752 | // Do not "join" in the previous type; it doesn't add value, |
kvn@508 | 753 | // and may yield a vacuous result if the field is of interface type. |
kvn@508 | 754 | field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); |
kvn@508 | 755 | assert(field_type != NULL, "field singleton type must be consistent"); |
kvn@508 | 756 | } else { |
kvn@508 | 757 | field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); |
kvn@508 | 758 | } |
kvn@559 | 759 | if (UseCompressedOops) { |
kvn@656 | 760 | field_type = field_type->make_narrowoop(); |
kvn@559 | 761 | basic_elem_type = T_NARROWOOP; |
kvn@559 | 762 | } |
kvn@508 | 763 | } else { |
kvn@508 | 764 | field_type = Type::get_const_basic_type(basic_elem_type); |
kvn@508 | 765 | } |
kvn@508 | 766 | |
kvn@508 | 767 | const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); |
kvn@508 | 768 | |
kvn@508 | 769 | Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc); |
kvn@508 | 770 | if (field_val == NULL) { |
kvn@3311 | 771 | // We weren't able to find a value for this field, |
kvn@3311 | 772 | // give up on eliminating this allocation. |
kvn@3311 | 773 | |
kvn@3311 | 774 | // Remove any extra entries we added to the safepoint. |
kvn@508 | 775 | uint last = sfpt->req() - 1; |
kvn@508 | 776 | for (int k = 0; k < j; k++) { |
kvn@508 | 777 | sfpt->del_req(last--); |
kvn@508 | 778 | } |
kvn@508 | 779 | // rollback processed safepoints |
kvn@508 | 780 | while (safepoints_done.length() > 0) { |
kvn@508 | 781 | SafePointNode* sfpt_done = safepoints_done.pop(); |
kvn@508 | 782 | // remove any extra entries we added to the safepoint |
kvn@508 | 783 | last = sfpt_done->req() - 1; |
kvn@508 | 784 | for (int k = 0; k < nfields; k++) { |
kvn@508 | 785 | sfpt_done->del_req(last--); |
kvn@508 | 786 | } |
kvn@508 | 787 | JVMState *jvms = sfpt_done->jvms(); |
kvn@508 | 788 | jvms->set_endoff(sfpt_done->req()); |
kvn@508 | 789 | // Now make a pass over the debug information replacing any references |
kvn@508 | 790 | // to SafePointScalarObjectNode with the allocated object. |
kvn@508 | 791 | int start = jvms->debug_start(); |
kvn@508 | 792 | int end = jvms->debug_end(); |
kvn@508 | 793 | for (int i = start; i < end; i++) { |
kvn@508 | 794 | if (sfpt_done->in(i)->is_SafePointScalarObject()) { |
kvn@508 | 795 | SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); |
kvn@508 | 796 | if (scobj->first_index() == sfpt_done->req() && |
kvn@508 | 797 | scobj->n_fields() == (uint)nfields) { |
kvn@508 | 798 | assert(scobj->alloc() == alloc, "sanity"); |
kvn@508 | 799 | sfpt_done->set_req(i, res); |
kvn@508 | 800 | } |
kvn@508 | 801 | } |
kvn@508 | 802 | } |
kvn@508 | 803 | } |
kvn@508 | 804 | #ifndef PRODUCT |
kvn@508 | 805 | if (PrintEliminateAllocations) { |
kvn@508 | 806 | if (field != NULL) { |
kvn@508 | 807 | tty->print("=== At SafePoint node %d can't find value of Field: ", |
kvn@508 | 808 | sfpt->_idx); |
kvn@508 | 809 | field->print(); |
kvn@508 | 810 | int field_idx = C->get_alias_index(field_addr_type); |
kvn@508 | 811 | tty->print(" (alias_idx=%d)", field_idx); |
kvn@508 | 812 | } else { // Array's element |
kvn@508 | 813 | tty->print("=== At SafePoint node %d can't find value of array element [%d]", |
kvn@508 | 814 | sfpt->_idx, j); |
kvn@508 | 815 | } |
kvn@508 | 816 | tty->print(", which prevents elimination of: "); |
kvn@508 | 817 | if (res == NULL) |
kvn@508 | 818 | alloc->dump(); |
kvn@508 | 819 | else |
kvn@508 | 820 | res->dump(); |
kvn@508 | 821 | } |
kvn@508 | 822 | #endif |
kvn@508 | 823 | return false; |
kvn@508 | 824 | } |
kvn@559 | 825 | if (UseCompressedOops && field_type->isa_narrowoop()) { |
kvn@559 | 826 | // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation |
kvn@559 | 827 | // to be able scalar replace the allocation. |
kvn@656 | 828 | if (field_val->is_EncodeP()) { |
kvn@656 | 829 | field_val = field_val->in(1); |
kvn@656 | 830 | } else { |
kvn@656 | 831 | field_val = transform_later(new (C, 2) DecodeNNode(field_val, field_val->bottom_type()->make_ptr())); |
kvn@656 | 832 | } |
kvn@559 | 833 | } |
kvn@508 | 834 | sfpt->add_req(field_val); |
kvn@508 | 835 | } |
kvn@508 | 836 | JVMState *jvms = sfpt->jvms(); |
kvn@508 | 837 | jvms->set_endoff(sfpt->req()); |
kvn@508 | 838 | // Now make a pass over the debug information replacing any references |
kvn@508 | 839 | // to the allocated object with "sobj" |
kvn@508 | 840 | int start = jvms->debug_start(); |
kvn@508 | 841 | int end = jvms->debug_end(); |
kvn@508 | 842 | for (int i = start; i < end; i++) { |
kvn@508 | 843 | if (sfpt->in(i) == res) { |
kvn@508 | 844 | sfpt->set_req(i, sobj); |
kvn@508 | 845 | } |
kvn@508 | 846 | } |
kvn@508 | 847 | safepoints_done.append_if_missing(sfpt); // keep it for rollback |
kvn@508 | 848 | } |
kvn@508 | 849 | return true; |
kvn@508 | 850 | } |
kvn@508 | 851 | |
kvn@508 | 852 | // Process users of eliminated allocation. |
kvn@508 | 853 | void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { |
kvn@508 | 854 | Node* res = alloc->result_cast(); |
kvn@508 | 855 | if (res != NULL) { |
kvn@508 | 856 | for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { |
kvn@508 | 857 | Node *use = res->last_out(j); |
kvn@508 | 858 | uint oc1 = res->outcnt(); |
kvn@508 | 859 | |
kvn@508 | 860 | if (use->is_AddP()) { |
kvn@508 | 861 | for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) { |
kvn@508 | 862 | Node *n = use->last_out(k); |
kvn@508 | 863 | uint oc2 = use->outcnt(); |
kvn@508 | 864 | if (n->is_Store()) { |
kvn@1535 | 865 | #ifdef ASSERT |
kvn@1535 | 866 | // Verify that there is no dependent MemBarVolatile nodes, |
kvn@1535 | 867 | // they should be removed during IGVN, see MemBarNode::Ideal(). |
kvn@1535 | 868 | for (DUIterator_Fast pmax, p = n->fast_outs(pmax); |
kvn@1535 | 869 | p < pmax; p++) { |
kvn@1535 | 870 | Node* mb = n->fast_out(p); |
kvn@1535 | 871 | assert(mb->is_Initialize() || !mb->is_MemBar() || |
kvn@1535 | 872 | mb->req() <= MemBarNode::Precedent || |
kvn@1535 | 873 | mb->in(MemBarNode::Precedent) != n, |
kvn@1535 | 874 | "MemBarVolatile should be eliminated for non-escaping object"); |
kvn@1535 | 875 | } |
kvn@1535 | 876 | #endif |
kvn@508 | 877 | _igvn.replace_node(n, n->in(MemNode::Memory)); |
kvn@508 | 878 | } else { |
kvn@508 | 879 | eliminate_card_mark(n); |
kvn@508 | 880 | } |
kvn@508 | 881 | k -= (oc2 - use->outcnt()); |
kvn@508 | 882 | } |
kvn@508 | 883 | } else { |
kvn@508 | 884 | eliminate_card_mark(use); |
kvn@508 | 885 | } |
kvn@508 | 886 | j -= (oc1 - res->outcnt()); |
kvn@508 | 887 | } |
kvn@508 | 888 | assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); |
kvn@508 | 889 | _igvn.remove_dead_node(res); |
kvn@508 | 890 | } |
kvn@508 | 891 | |
kvn@508 | 892 | // |
kvn@508 | 893 | // Process other users of allocation's projections |
kvn@508 | 894 | // |
kvn@508 | 895 | if (_resproj != NULL && _resproj->outcnt() != 0) { |
kvn@508 | 896 | for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { |
kvn@508 | 897 | Node *use = _resproj->last_out(j); |
kvn@508 | 898 | uint oc1 = _resproj->outcnt(); |
kvn@508 | 899 | if (use->is_Initialize()) { |
kvn@508 | 900 | // Eliminate Initialize node. |
kvn@508 | 901 | InitializeNode *init = use->as_Initialize(); |
kvn@508 | 902 | assert(init->outcnt() <= 2, "only a control and memory projection expected"); |
kvn@508 | 903 | Node *ctrl_proj = init->proj_out(TypeFunc::Control); |
kvn@508 | 904 | if (ctrl_proj != NULL) { |
kvn@508 | 905 | assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection"); |
kvn@508 | 906 | _igvn.replace_node(ctrl_proj, _fallthroughcatchproj); |
kvn@508 | 907 | } |
kvn@508 | 908 | Node *mem_proj = init->proj_out(TypeFunc::Memory); |
kvn@508 | 909 | if (mem_proj != NULL) { |
kvn@508 | 910 | Node *mem = init->in(TypeFunc::Memory); |
kvn@508 | 911 | #ifdef ASSERT |
kvn@508 | 912 | if (mem->is_MergeMem()) { |
kvn@508 | 913 | assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection"); |
kvn@508 | 914 | } else { |
kvn@508 | 915 | assert(mem == _memproj_fallthrough, "allocation memory projection"); |
kvn@508 | 916 | } |
kvn@508 | 917 | #endif |
kvn@508 | 918 | _igvn.replace_node(mem_proj, mem); |
kvn@508 | 919 | } |
kvn@508 | 920 | } else if (use->is_AddP()) { |
kvn@508 | 921 | // raw memory addresses used only by the initialization |
kvn@1143 | 922 | _igvn.replace_node(use, C->top()); |
kvn@508 | 923 | } else { |
kvn@508 | 924 | assert(false, "only Initialize or AddP expected"); |
kvn@508 | 925 | } |
kvn@508 | 926 | j -= (oc1 - _resproj->outcnt()); |
kvn@508 | 927 | } |
kvn@508 | 928 | } |
kvn@508 | 929 | if (_fallthroughcatchproj != NULL) { |
kvn@508 | 930 | _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control)); |
kvn@508 | 931 | } |
kvn@508 | 932 | if (_memproj_fallthrough != NULL) { |
kvn@508 | 933 | _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory)); |
kvn@508 | 934 | } |
kvn@508 | 935 | if (_memproj_catchall != NULL) { |
kvn@508 | 936 | _igvn.replace_node(_memproj_catchall, C->top()); |
kvn@508 | 937 | } |
kvn@508 | 938 | if (_ioproj_fallthrough != NULL) { |
kvn@508 | 939 | _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O)); |
kvn@508 | 940 | } |
kvn@508 | 941 | if (_ioproj_catchall != NULL) { |
kvn@508 | 942 | _igvn.replace_node(_ioproj_catchall, C->top()); |
kvn@508 | 943 | } |
kvn@508 | 944 | if (_catchallcatchproj != NULL) { |
kvn@508 | 945 | _igvn.replace_node(_catchallcatchproj, C->top()); |
kvn@508 | 946 | } |
kvn@508 | 947 | } |
kvn@508 | 948 | |
kvn@508 | 949 | bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { |
kvn@508 | 950 | |
kvn@508 | 951 | if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { |
kvn@508 | 952 | return false; |
kvn@508 | 953 | } |
kvn@508 | 954 | |
kvn@508 | 955 | extract_call_projections(alloc); |
kvn@508 | 956 | |
kvn@508 | 957 | GrowableArray <SafePointNode *> safepoints; |
kvn@508 | 958 | if (!can_eliminate_allocation(alloc, safepoints)) { |
kvn@508 | 959 | return false; |
kvn@508 | 960 | } |
kvn@508 | 961 | |
kvn@508 | 962 | if (!scalar_replacement(alloc, safepoints)) { |
kvn@508 | 963 | return false; |
kvn@508 | 964 | } |
kvn@508 | 965 | |
never@1515 | 966 | CompileLog* log = C->log(); |
never@1515 | 967 | if (log != NULL) { |
never@1515 | 968 | Node* klass = alloc->in(AllocateNode::KlassNode); |
never@1515 | 969 | const TypeKlassPtr* tklass = _igvn.type(klass)->is_klassptr(); |
never@1515 | 970 | log->head("eliminate_allocation type='%d'", |
never@1515 | 971 | log->identify(tklass->klass())); |
never@1515 | 972 | JVMState* p = alloc->jvms(); |
never@1515 | 973 | while (p != NULL) { |
never@1515 | 974 | log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); |
never@1515 | 975 | p = p->caller(); |
never@1515 | 976 | } |
never@1515 | 977 | log->tail("eliminate_allocation"); |
never@1515 | 978 | } |
never@1515 | 979 | |
kvn@508 | 980 | process_users_of_allocation(alloc); |
kvn@508 | 981 | |
kvn@508 | 982 | #ifndef PRODUCT |
never@1515 | 983 | if (PrintEliminateAllocations) { |
never@1515 | 984 | if (alloc->is_AllocateArray()) |
never@1515 | 985 | tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); |
never@1515 | 986 | else |
never@1515 | 987 | tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); |
never@1515 | 988 | } |
kvn@508 | 989 | #endif |
kvn@508 | 990 | |
kvn@508 | 991 | return true; |
kvn@508 | 992 | } |
kvn@508 | 993 | |
duke@435 | 994 | |
duke@435 | 995 | //---------------------------set_eden_pointers------------------------- |
duke@435 | 996 | void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { |
duke@435 | 997 | if (UseTLAB) { // Private allocation: load from TLS |
duke@435 | 998 | Node* thread = transform_later(new (C, 1) ThreadLocalNode()); |
duke@435 | 999 | int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); |
duke@435 | 1000 | int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); |
duke@435 | 1001 | eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); |
duke@435 | 1002 | eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); |
duke@435 | 1003 | } else { // Shared allocation: load from globals |
duke@435 | 1004 | CollectedHeap* ch = Universe::heap(); |
duke@435 | 1005 | address top_adr = (address)ch->top_addr(); |
duke@435 | 1006 | address end_adr = (address)ch->end_addr(); |
duke@435 | 1007 | eden_top_adr = makecon(TypeRawPtr::make(top_adr)); |
duke@435 | 1008 | eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr); |
duke@435 | 1009 | } |
duke@435 | 1010 | } |
duke@435 | 1011 | |
duke@435 | 1012 | |
duke@435 | 1013 | Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { |
duke@435 | 1014 | Node* adr = basic_plus_adr(base, offset); |
kvn@855 | 1015 | const TypePtr* adr_type = adr->bottom_type()->is_ptr(); |
coleenp@548 | 1016 | Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt); |
duke@435 | 1017 | transform_later(value); |
duke@435 | 1018 | return value; |
duke@435 | 1019 | } |
duke@435 | 1020 | |
duke@435 | 1021 | |
duke@435 | 1022 | Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { |
duke@435 | 1023 | Node* adr = basic_plus_adr(base, offset); |
coleenp@548 | 1024 | mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt); |
duke@435 | 1025 | transform_later(mem); |
duke@435 | 1026 | return mem; |
duke@435 | 1027 | } |
duke@435 | 1028 | |
duke@435 | 1029 | //============================================================================= |
duke@435 | 1030 | // |
duke@435 | 1031 | // A L L O C A T I O N |
duke@435 | 1032 | // |
duke@435 | 1033 | // Allocation attempts to be fast in the case of frequent small objects. |
duke@435 | 1034 | // It breaks down like this: |
duke@435 | 1035 | // |
duke@435 | 1036 | // 1) Size in doublewords is computed. This is a constant for objects and |
duke@435 | 1037 | // variable for most arrays. Doubleword units are used to avoid size |
duke@435 | 1038 | // overflow of huge doubleword arrays. We need doublewords in the end for |
duke@435 | 1039 | // rounding. |
duke@435 | 1040 | // |
duke@435 | 1041 | // 2) Size is checked for being 'too large'. Too-large allocations will go |
duke@435 | 1042 | // the slow path into the VM. The slow path can throw any required |
duke@435 | 1043 | // exceptions, and does all the special checks for very large arrays. The |
duke@435 | 1044 | // size test can constant-fold away for objects. For objects with |
duke@435 | 1045 | // finalizers it constant-folds the otherway: you always go slow with |
duke@435 | 1046 | // finalizers. |
duke@435 | 1047 | // |
duke@435 | 1048 | // 3) If NOT using TLABs, this is the contended loop-back point. |
duke@435 | 1049 | // Load-Locked the heap top. If using TLABs normal-load the heap top. |
duke@435 | 1050 | // |
duke@435 | 1051 | // 4) Check that heap top + size*8 < max. If we fail go the slow ` route. |
duke@435 | 1052 | // NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish |
duke@435 | 1053 | // "size*8" we always enter the VM, where "largish" is a constant picked small |
duke@435 | 1054 | // enough that there's always space between the eden max and 4Gig (old space is |
duke@435 | 1055 | // there so it's quite large) and large enough that the cost of entering the VM |
duke@435 | 1056 | // is dwarfed by the cost to initialize the space. |
duke@435 | 1057 | // |
duke@435 | 1058 | // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back |
duke@435 | 1059 | // down. If contended, repeat at step 3. If using TLABs normal-store |
duke@435 | 1060 | // adjusted heap top back down; there is no contention. |
duke@435 | 1061 | // |
duke@435 | 1062 | // 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark |
duke@435 | 1063 | // fields. |
duke@435 | 1064 | // |
duke@435 | 1065 | // 7) Merge with the slow-path; cast the raw memory pointer to the correct |
duke@435 | 1066 | // oop flavor. |
duke@435 | 1067 | // |
duke@435 | 1068 | //============================================================================= |
duke@435 | 1069 | // FastAllocateSizeLimit value is in DOUBLEWORDS. |
duke@435 | 1070 | // Allocations bigger than this always go the slow route. |
duke@435 | 1071 | // This value must be small enough that allocation attempts that need to |
duke@435 | 1072 | // trigger exceptions go the slow route. Also, it must be small enough so |
duke@435 | 1073 | // that heap_top + size_in_bytes does not wrap around the 4Gig limit. |
duke@435 | 1074 | //=============================================================================j// |
duke@435 | 1075 | // %%% Here is an old comment from parseHelper.cpp; is it outdated? |
duke@435 | 1076 | // The allocator will coalesce int->oop copies away. See comment in |
duke@435 | 1077 | // coalesce.cpp about how this works. It depends critically on the exact |
duke@435 | 1078 | // code shape produced here, so if you are changing this code shape |
duke@435 | 1079 | // make sure the GC info for the heap-top is correct in and around the |
duke@435 | 1080 | // slow-path call. |
duke@435 | 1081 | // |
duke@435 | 1082 | |
duke@435 | 1083 | void PhaseMacroExpand::expand_allocate_common( |
duke@435 | 1084 | AllocateNode* alloc, // allocation node to be expanded |
duke@435 | 1085 | Node* length, // array length for an array allocation |
duke@435 | 1086 | const TypeFunc* slow_call_type, // Type of slow call |
duke@435 | 1087 | address slow_call_address // Address of slow call |
duke@435 | 1088 | ) |
duke@435 | 1089 | { |
duke@435 | 1090 | |
duke@435 | 1091 | Node* ctrl = alloc->in(TypeFunc::Control); |
duke@435 | 1092 | Node* mem = alloc->in(TypeFunc::Memory); |
duke@435 | 1093 | Node* i_o = alloc->in(TypeFunc::I_O); |
duke@435 | 1094 | Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); |
duke@435 | 1095 | Node* klass_node = alloc->in(AllocateNode::KlassNode); |
duke@435 | 1096 | Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); |
duke@435 | 1097 | |
roland@3392 | 1098 | Node* storestore = alloc->storestore(); |
roland@3392 | 1099 | if (storestore != NULL) { |
roland@3392 | 1100 | // Break this link that is no longer useful and confuses register allocation |
roland@3392 | 1101 | storestore->set_req(MemBarNode::Precedent, top()); |
roland@3392 | 1102 | } |
roland@3392 | 1103 | |
duke@435 | 1104 | assert(ctrl != NULL, "must have control"); |
duke@435 | 1105 | // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. |
duke@435 | 1106 | // they will not be used if "always_slow" is set |
duke@435 | 1107 | enum { slow_result_path = 1, fast_result_path = 2 }; |
duke@435 | 1108 | Node *result_region; |
duke@435 | 1109 | Node *result_phi_rawmem; |
duke@435 | 1110 | Node *result_phi_rawoop; |
duke@435 | 1111 | Node *result_phi_i_o; |
duke@435 | 1112 | |
duke@435 | 1113 | // The initial slow comparison is a size check, the comparison |
duke@435 | 1114 | // we want to do is a BoolTest::gt |
duke@435 | 1115 | bool always_slow = false; |
duke@435 | 1116 | int tv = _igvn.find_int_con(initial_slow_test, -1); |
duke@435 | 1117 | if (tv >= 0) { |
duke@435 | 1118 | always_slow = (tv == 1); |
duke@435 | 1119 | initial_slow_test = NULL; |
duke@435 | 1120 | } else { |
duke@435 | 1121 | initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); |
duke@435 | 1122 | } |
duke@435 | 1123 | |
kvn@1215 | 1124 | if (C->env()->dtrace_alloc_probes() || |
ysr@777 | 1125 | !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() || |
ysr@777 | 1126 | (UseConcMarkSweepGC && CMSIncrementalMode))) { |
duke@435 | 1127 | // Force slow-path allocation |
duke@435 | 1128 | always_slow = true; |
duke@435 | 1129 | initial_slow_test = NULL; |
duke@435 | 1130 | } |
duke@435 | 1131 | |
ysr@777 | 1132 | |
duke@435 | 1133 | enum { too_big_or_final_path = 1, need_gc_path = 2 }; |
duke@435 | 1134 | Node *slow_region = NULL; |
duke@435 | 1135 | Node *toobig_false = ctrl; |
duke@435 | 1136 | |
duke@435 | 1137 | assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); |
duke@435 | 1138 | // generate the initial test if necessary |
duke@435 | 1139 | if (initial_slow_test != NULL ) { |
duke@435 | 1140 | slow_region = new (C, 3) RegionNode(3); |
duke@435 | 1141 | |
duke@435 | 1142 | // Now make the initial failure test. Usually a too-big test but |
duke@435 | 1143 | // might be a TRUE for finalizers or a fancy class check for |
duke@435 | 1144 | // newInstance0. |
duke@435 | 1145 | IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); |
duke@435 | 1146 | transform_later(toobig_iff); |
duke@435 | 1147 | // Plug the failing-too-big test into the slow-path region |
duke@435 | 1148 | Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff ); |
duke@435 | 1149 | transform_later(toobig_true); |
duke@435 | 1150 | slow_region ->init_req( too_big_or_final_path, toobig_true ); |
duke@435 | 1151 | toobig_false = new (C, 1) IfFalseNode( toobig_iff ); |
duke@435 | 1152 | transform_later(toobig_false); |
duke@435 | 1153 | } else { // No initial test, just fall into next case |
duke@435 | 1154 | toobig_false = ctrl; |
duke@435 | 1155 | debug_only(slow_region = NodeSentinel); |
duke@435 | 1156 | } |
duke@435 | 1157 | |
duke@435 | 1158 | Node *slow_mem = mem; // save the current memory state for slow path |
duke@435 | 1159 | // generate the fast allocation code unless we know that the initial test will always go slow |
duke@435 | 1160 | if (!always_slow) { |
kvn@1000 | 1161 | // Fast path modifies only raw memory. |
kvn@1000 | 1162 | if (mem->is_MergeMem()) { |
kvn@1000 | 1163 | mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); |
kvn@1000 | 1164 | } |
kvn@1000 | 1165 | |
ysr@777 | 1166 | Node* eden_top_adr; |
ysr@777 | 1167 | Node* eden_end_adr; |
ysr@777 | 1168 | |
ysr@777 | 1169 | set_eden_pointers(eden_top_adr, eden_end_adr); |
ysr@777 | 1170 | |
ysr@777 | 1171 | // Load Eden::end. Loop invariant and hoisted. |
ysr@777 | 1172 | // |
ysr@777 | 1173 | // Note: We set the control input on "eden_end" and "old_eden_top" when using |
ysr@777 | 1174 | // a TLAB to work around a bug where these values were being moved across |
ysr@777 | 1175 | // a safepoint. These are not oops, so they cannot be include in the oop |
phh@2423 | 1176 | // map, but they can be changed by a GC. The proper way to fix this would |
ysr@777 | 1177 | // be to set the raw memory state when generating a SafepointNode. However |
ysr@777 | 1178 | // this will require extensive changes to the loop optimization in order to |
ysr@777 | 1179 | // prevent a degradation of the optimization. |
ysr@777 | 1180 | // See comment in memnode.hpp, around line 227 in class LoadPNode. |
ysr@777 | 1181 | Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); |
ysr@777 | 1182 | |
duke@435 | 1183 | // allocate the Region and Phi nodes for the result |
duke@435 | 1184 | result_region = new (C, 3) RegionNode(3); |
phh@2423 | 1185 | result_phi_rawmem = new (C, 3) PhiNode(result_region, Type::MEMORY, TypeRawPtr::BOTTOM); |
phh@2423 | 1186 | result_phi_rawoop = new (C, 3) PhiNode(result_region, TypeRawPtr::BOTTOM); |
phh@2423 | 1187 | result_phi_i_o = new (C, 3) PhiNode(result_region, Type::ABIO); // I/O is used for Prefetch |
duke@435 | 1188 | |
duke@435 | 1189 | // We need a Region for the loop-back contended case. |
duke@435 | 1190 | enum { fall_in_path = 1, contended_loopback_path = 2 }; |
duke@435 | 1191 | Node *contended_region; |
duke@435 | 1192 | Node *contended_phi_rawmem; |
phh@2423 | 1193 | if (UseTLAB) { |
duke@435 | 1194 | contended_region = toobig_false; |
duke@435 | 1195 | contended_phi_rawmem = mem; |
duke@435 | 1196 | } else { |
duke@435 | 1197 | contended_region = new (C, 3) RegionNode(3); |
phh@2423 | 1198 | contended_phi_rawmem = new (C, 3) PhiNode(contended_region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 1199 | // Now handle the passing-too-big test. We fall into the contended |
duke@435 | 1200 | // loop-back merge point. |
phh@2423 | 1201 | contended_region ->init_req(fall_in_path, toobig_false); |
phh@2423 | 1202 | contended_phi_rawmem->init_req(fall_in_path, mem); |
duke@435 | 1203 | transform_later(contended_region); |
duke@435 | 1204 | transform_later(contended_phi_rawmem); |
duke@435 | 1205 | } |
duke@435 | 1206 | |
duke@435 | 1207 | // Load(-locked) the heap top. |
duke@435 | 1208 | // See note above concerning the control input when using a TLAB |
duke@435 | 1209 | Node *old_eden_top = UseTLAB |
phh@2423 | 1210 | ? new (C, 3) LoadPNode (ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) |
phh@2423 | 1211 | : new (C, 3) LoadPLockedNode(contended_region, contended_phi_rawmem, eden_top_adr); |
duke@435 | 1212 | |
duke@435 | 1213 | transform_later(old_eden_top); |
duke@435 | 1214 | // Add to heap top to get a new heap top |
phh@2423 | 1215 | Node *new_eden_top = new (C, 4) AddPNode(top(), old_eden_top, size_in_bytes); |
duke@435 | 1216 | transform_later(new_eden_top); |
duke@435 | 1217 | // Check for needing a GC; compare against heap end |
phh@2423 | 1218 | Node *needgc_cmp = new (C, 3) CmpPNode(new_eden_top, eden_end); |
duke@435 | 1219 | transform_later(needgc_cmp); |
phh@2423 | 1220 | Node *needgc_bol = new (C, 2) BoolNode(needgc_cmp, BoolTest::ge); |
duke@435 | 1221 | transform_later(needgc_bol); |
phh@2423 | 1222 | IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN); |
duke@435 | 1223 | transform_later(needgc_iff); |
duke@435 | 1224 | |
duke@435 | 1225 | // Plug the failing-heap-space-need-gc test into the slow-path region |
phh@2423 | 1226 | Node *needgc_true = new (C, 1) IfTrueNode(needgc_iff); |
duke@435 | 1227 | transform_later(needgc_true); |
phh@2423 | 1228 | if (initial_slow_test) { |
phh@2423 | 1229 | slow_region->init_req(need_gc_path, needgc_true); |
duke@435 | 1230 | // This completes all paths into the slow merge point |
duke@435 | 1231 | transform_later(slow_region); |
duke@435 | 1232 | } else { // No initial slow path needed! |
duke@435 | 1233 | // Just fall from the need-GC path straight into the VM call. |
phh@2423 | 1234 | slow_region = needgc_true; |
duke@435 | 1235 | } |
duke@435 | 1236 | // No need for a GC. Setup for the Store-Conditional |
phh@2423 | 1237 | Node *needgc_false = new (C, 1) IfFalseNode(needgc_iff); |
duke@435 | 1238 | transform_later(needgc_false); |
duke@435 | 1239 | |
duke@435 | 1240 | // Grab regular I/O before optional prefetch may change it. |
duke@435 | 1241 | // Slow-path does no I/O so just set it to the original I/O. |
phh@2423 | 1242 | result_phi_i_o->init_req(slow_result_path, i_o); |
duke@435 | 1243 | |
duke@435 | 1244 | i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem, |
duke@435 | 1245 | old_eden_top, new_eden_top, length); |
duke@435 | 1246 | |
phh@2423 | 1247 | // Name successful fast-path variables |
phh@2423 | 1248 | Node* fast_oop = old_eden_top; |
phh@2423 | 1249 | Node* fast_oop_ctrl; |
phh@2423 | 1250 | Node* fast_oop_rawmem; |
phh@2423 | 1251 | |
duke@435 | 1252 | // Store (-conditional) the modified eden top back down. |
duke@435 | 1253 | // StorePConditional produces flags for a test PLUS a modified raw |
duke@435 | 1254 | // memory state. |
phh@2423 | 1255 | if (UseTLAB) { |
phh@2423 | 1256 | Node* store_eden_top = |
phh@2423 | 1257 | new (C, 4) StorePNode(needgc_false, contended_phi_rawmem, eden_top_adr, |
phh@2423 | 1258 | TypeRawPtr::BOTTOM, new_eden_top); |
duke@435 | 1259 | transform_later(store_eden_top); |
duke@435 | 1260 | fast_oop_ctrl = needgc_false; // No contention, so this is the fast path |
phh@2423 | 1261 | fast_oop_rawmem = store_eden_top; |
duke@435 | 1262 | } else { |
phh@2423 | 1263 | Node* store_eden_top = |
phh@2423 | 1264 | new (C, 5) StorePConditionalNode(needgc_false, contended_phi_rawmem, eden_top_adr, |
phh@2423 | 1265 | new_eden_top, fast_oop/*old_eden_top*/); |
duke@435 | 1266 | transform_later(store_eden_top); |
phh@2423 | 1267 | Node *contention_check = new (C, 2) BoolNode(store_eden_top, BoolTest::ne); |
duke@435 | 1268 | transform_later(contention_check); |
duke@435 | 1269 | store_eden_top = new (C, 1) SCMemProjNode(store_eden_top); |
duke@435 | 1270 | transform_later(store_eden_top); |
duke@435 | 1271 | |
duke@435 | 1272 | // If not using TLABs, check to see if there was contention. |
phh@2423 | 1273 | IfNode *contention_iff = new (C, 2) IfNode (needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN); |
duke@435 | 1274 | transform_later(contention_iff); |
phh@2423 | 1275 | Node *contention_true = new (C, 1) IfTrueNode(contention_iff); |
duke@435 | 1276 | transform_later(contention_true); |
duke@435 | 1277 | // If contention, loopback and try again. |
phh@2423 | 1278 | contended_region->init_req(contended_loopback_path, contention_true); |
phh@2423 | 1279 | contended_phi_rawmem->init_req(contended_loopback_path, store_eden_top); |
duke@435 | 1280 | |
duke@435 | 1281 | // Fast-path succeeded with no contention! |
phh@2423 | 1282 | Node *contention_false = new (C, 1) IfFalseNode(contention_iff); |
duke@435 | 1283 | transform_later(contention_false); |
duke@435 | 1284 | fast_oop_ctrl = contention_false; |
phh@2423 | 1285 | |
phh@2423 | 1286 | // Bump total allocated bytes for this thread |
phh@2423 | 1287 | Node* thread = new (C, 1) ThreadLocalNode(); |
phh@2423 | 1288 | transform_later(thread); |
phh@2423 | 1289 | Node* alloc_bytes_adr = basic_plus_adr(top()/*not oop*/, thread, |
phh@2423 | 1290 | in_bytes(JavaThread::allocated_bytes_offset())); |
phh@2423 | 1291 | Node* alloc_bytes = make_load(fast_oop_ctrl, store_eden_top, alloc_bytes_adr, |
phh@2423 | 1292 | 0, TypeLong::LONG, T_LONG); |
phh@2423 | 1293 | #ifdef _LP64 |
phh@2423 | 1294 | Node* alloc_size = size_in_bytes; |
phh@2423 | 1295 | #else |
phh@2423 | 1296 | Node* alloc_size = new (C, 2) ConvI2LNode(size_in_bytes); |
phh@2423 | 1297 | transform_later(alloc_size); |
phh@2423 | 1298 | #endif |
phh@2423 | 1299 | Node* new_alloc_bytes = new (C, 3) AddLNode(alloc_bytes, alloc_size); |
phh@2423 | 1300 | transform_later(new_alloc_bytes); |
phh@2423 | 1301 | fast_oop_rawmem = make_store(fast_oop_ctrl, store_eden_top, alloc_bytes_adr, |
phh@2423 | 1302 | 0, new_alloc_bytes, T_LONG); |
duke@435 | 1303 | } |
duke@435 | 1304 | |
roland@3392 | 1305 | InitializeNode* init = alloc->initialization(); |
duke@435 | 1306 | fast_oop_rawmem = initialize_object(alloc, |
duke@435 | 1307 | fast_oop_ctrl, fast_oop_rawmem, fast_oop, |
duke@435 | 1308 | klass_node, length, size_in_bytes); |
duke@435 | 1309 | |
roland@3392 | 1310 | // If initialization is performed by an array copy, any required |
roland@3392 | 1311 | // MemBarStoreStore was already added. If the object does not |
roland@3392 | 1312 | // escape no need for a MemBarStoreStore. Otherwise we need a |
roland@3392 | 1313 | // MemBarStoreStore so that stores that initialize this object |
roland@3392 | 1314 | // can't be reordered with a subsequent store that makes this |
roland@3392 | 1315 | // object accessible by other threads. |
roland@3392 | 1316 | if (init == NULL || (!init->is_complete_with_arraycopy() && !init->does_not_escape())) { |
roland@3392 | 1317 | if (init == NULL || init->req() < InitializeNode::RawStores) { |
roland@3392 | 1318 | // No InitializeNode or no stores captured by zeroing |
roland@3392 | 1319 | // elimination. Simply add the MemBarStoreStore after object |
roland@3392 | 1320 | // initialization. |
roland@3392 | 1321 | MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot, fast_oop_rawmem); |
roland@3392 | 1322 | transform_later(mb); |
roland@3392 | 1323 | |
roland@3392 | 1324 | mb->init_req(TypeFunc::Memory, fast_oop_rawmem); |
roland@3392 | 1325 | mb->init_req(TypeFunc::Control, fast_oop_ctrl); |
roland@3392 | 1326 | fast_oop_ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control); |
roland@3392 | 1327 | transform_later(fast_oop_ctrl); |
roland@3392 | 1328 | fast_oop_rawmem = new (C, 1) ProjNode(mb,TypeFunc::Memory); |
roland@3392 | 1329 | transform_later(fast_oop_rawmem); |
roland@3392 | 1330 | } else { |
roland@3392 | 1331 | // Add the MemBarStoreStore after the InitializeNode so that |
roland@3392 | 1332 | // all stores performing the initialization that were moved |
roland@3392 | 1333 | // before the InitializeNode happen before the storestore |
roland@3392 | 1334 | // barrier. |
roland@3392 | 1335 | |
roland@3392 | 1336 | Node* init_ctrl = init->proj_out(TypeFunc::Control); |
roland@3392 | 1337 | Node* init_mem = init->proj_out(TypeFunc::Memory); |
roland@3392 | 1338 | |
roland@3392 | 1339 | MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot); |
roland@3392 | 1340 | transform_later(mb); |
roland@3392 | 1341 | |
roland@3392 | 1342 | Node* ctrl = new (C, 1) ProjNode(init,TypeFunc::Control); |
roland@3392 | 1343 | transform_later(ctrl); |
roland@3392 | 1344 | Node* mem = new (C, 1) ProjNode(init,TypeFunc::Memory); |
roland@3392 | 1345 | transform_later(mem); |
roland@3392 | 1346 | |
roland@3392 | 1347 | // The MemBarStoreStore depends on control and memory coming |
roland@3392 | 1348 | // from the InitializeNode |
roland@3392 | 1349 | mb->init_req(TypeFunc::Memory, mem); |
roland@3392 | 1350 | mb->init_req(TypeFunc::Control, ctrl); |
roland@3392 | 1351 | |
roland@3392 | 1352 | ctrl = new (C, 1) ProjNode(mb,TypeFunc::Control); |
roland@3392 | 1353 | transform_later(ctrl); |
roland@3392 | 1354 | mem = new (C, 1) ProjNode(mb,TypeFunc::Memory); |
roland@3392 | 1355 | transform_later(mem); |
roland@3392 | 1356 | |
roland@3392 | 1357 | // All nodes that depended on the InitializeNode for control |
roland@3392 | 1358 | // and memory must now depend on the MemBarNode that itself |
roland@3392 | 1359 | // depends on the InitializeNode |
roland@3392 | 1360 | _igvn.replace_node(init_ctrl, ctrl); |
roland@3392 | 1361 | _igvn.replace_node(init_mem, mem); |
roland@3392 | 1362 | } |
roland@3392 | 1363 | } |
roland@3392 | 1364 | |
kvn@1215 | 1365 | if (C->env()->dtrace_extended_probes()) { |
duke@435 | 1366 | // Slow-path call |
duke@435 | 1367 | int size = TypeFunc::Parms + 2; |
duke@435 | 1368 | CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), |
duke@435 | 1369 | CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base), |
duke@435 | 1370 | "dtrace_object_alloc", |
duke@435 | 1371 | TypeRawPtr::BOTTOM); |
duke@435 | 1372 | |
duke@435 | 1373 | // Get base of thread-local storage area |
duke@435 | 1374 | Node* thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 1375 | transform_later(thread); |
duke@435 | 1376 | |
duke@435 | 1377 | call->init_req(TypeFunc::Parms+0, thread); |
duke@435 | 1378 | call->init_req(TypeFunc::Parms+1, fast_oop); |
phh@2423 | 1379 | call->init_req(TypeFunc::Control, fast_oop_ctrl); |
phh@2423 | 1380 | call->init_req(TypeFunc::I_O , top()); // does no i/o |
phh@2423 | 1381 | call->init_req(TypeFunc::Memory , fast_oop_rawmem); |
phh@2423 | 1382 | call->init_req(TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr)); |
phh@2423 | 1383 | call->init_req(TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr)); |
duke@435 | 1384 | transform_later(call); |
duke@435 | 1385 | fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); |
duke@435 | 1386 | transform_later(fast_oop_ctrl); |
duke@435 | 1387 | fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory); |
duke@435 | 1388 | transform_later(fast_oop_rawmem); |
duke@435 | 1389 | } |
duke@435 | 1390 | |
duke@435 | 1391 | // Plug in the successful fast-path into the result merge point |
phh@2423 | 1392 | result_region ->init_req(fast_result_path, fast_oop_ctrl); |
phh@2423 | 1393 | result_phi_rawoop->init_req(fast_result_path, fast_oop); |
phh@2423 | 1394 | result_phi_i_o ->init_req(fast_result_path, i_o); |
phh@2423 | 1395 | result_phi_rawmem->init_req(fast_result_path, fast_oop_rawmem); |
duke@435 | 1396 | } else { |
duke@435 | 1397 | slow_region = ctrl; |
kvn@3396 | 1398 | result_phi_i_o = i_o; // Rename it to use in the following code. |
duke@435 | 1399 | } |
duke@435 | 1400 | |
duke@435 | 1401 | // Generate slow-path call |
duke@435 | 1402 | CallNode *call = new (C, slow_call_type->domain()->cnt()) |
duke@435 | 1403 | CallStaticJavaNode(slow_call_type, slow_call_address, |
duke@435 | 1404 | OptoRuntime::stub_name(slow_call_address), |
duke@435 | 1405 | alloc->jvms()->bci(), |
duke@435 | 1406 | TypePtr::BOTTOM); |
duke@435 | 1407 | call->init_req( TypeFunc::Control, slow_region ); |
duke@435 | 1408 | call->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
duke@435 | 1409 | call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs |
duke@435 | 1410 | call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); |
duke@435 | 1411 | call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); |
duke@435 | 1412 | |
duke@435 | 1413 | call->init_req(TypeFunc::Parms+0, klass_node); |
duke@435 | 1414 | if (length != NULL) { |
duke@435 | 1415 | call->init_req(TypeFunc::Parms+1, length); |
duke@435 | 1416 | } |
duke@435 | 1417 | |
duke@435 | 1418 | // Copy debug information and adjust JVMState information, then replace |
duke@435 | 1419 | // allocate node with the call |
duke@435 | 1420 | copy_call_debug_info((CallNode *) alloc, call); |
duke@435 | 1421 | if (!always_slow) { |
duke@435 | 1422 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
kvn@3396 | 1423 | } else { |
kvn@3396 | 1424 | // Hook i_o projection to avoid its elimination during allocation |
kvn@3396 | 1425 | // replacement (when only a slow call is generated). |
kvn@3396 | 1426 | call->set_req(TypeFunc::I_O, result_phi_i_o); |
duke@435 | 1427 | } |
kvn@1976 | 1428 | _igvn.replace_node(alloc, call); |
duke@435 | 1429 | transform_later(call); |
duke@435 | 1430 | |
duke@435 | 1431 | // Identify the output projections from the allocate node and |
duke@435 | 1432 | // adjust any references to them. |
duke@435 | 1433 | // The control and io projections look like: |
duke@435 | 1434 | // |
duke@435 | 1435 | // v---Proj(ctrl) <-----+ v---CatchProj(ctrl) |
duke@435 | 1436 | // Allocate Catch |
duke@435 | 1437 | // ^---Proj(io) <-------+ ^---CatchProj(io) |
duke@435 | 1438 | // |
duke@435 | 1439 | // We are interested in the CatchProj nodes. |
duke@435 | 1440 | // |
duke@435 | 1441 | extract_call_projections(call); |
duke@435 | 1442 | |
kvn@3396 | 1443 | // An allocate node has separate memory projections for the uses on |
kvn@3396 | 1444 | // the control and i_o paths. Replace the control memory projection with |
kvn@3396 | 1445 | // result_phi_rawmem (unless we are only generating a slow call when |
kvn@3396 | 1446 | // both memory projections are combined) |
duke@435 | 1447 | if (!always_slow && _memproj_fallthrough != NULL) { |
duke@435 | 1448 | for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 1449 | Node *use = _memproj_fallthrough->fast_out(i); |
kvn@3847 | 1450 | _igvn.rehash_node_delayed(use); |
duke@435 | 1451 | imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem); |
duke@435 | 1452 | // back up iterator |
duke@435 | 1453 | --i; |
duke@435 | 1454 | } |
duke@435 | 1455 | } |
kvn@3396 | 1456 | // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete |
kvn@3396 | 1457 | // _memproj_catchall so we end up with a call that has only 1 memory projection. |
duke@435 | 1458 | if (_memproj_catchall != NULL ) { |
duke@435 | 1459 | if (_memproj_fallthrough == NULL) { |
duke@435 | 1460 | _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory); |
duke@435 | 1461 | transform_later(_memproj_fallthrough); |
duke@435 | 1462 | } |
duke@435 | 1463 | for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 1464 | Node *use = _memproj_catchall->fast_out(i); |
kvn@3847 | 1465 | _igvn.rehash_node_delayed(use); |
duke@435 | 1466 | imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough); |
duke@435 | 1467 | // back up iterator |
duke@435 | 1468 | --i; |
duke@435 | 1469 | } |
kvn@3396 | 1470 | assert(_memproj_catchall->outcnt() == 0, "all uses must be deleted"); |
kvn@3396 | 1471 | _igvn.remove_dead_node(_memproj_catchall); |
duke@435 | 1472 | } |
duke@435 | 1473 | |
kvn@3396 | 1474 | // An allocate node has separate i_o projections for the uses on the control |
kvn@3396 | 1475 | // and i_o paths. Always replace the control i_o projection with result i_o |
kvn@3396 | 1476 | // otherwise incoming i_o become dead when only a slow call is generated |
kvn@3396 | 1477 | // (it is different from memory projections where both projections are |
kvn@3396 | 1478 | // combined in such case). |
kvn@3396 | 1479 | if (_ioproj_fallthrough != NULL) { |
duke@435 | 1480 | for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 1481 | Node *use = _ioproj_fallthrough->fast_out(i); |
kvn@3847 | 1482 | _igvn.rehash_node_delayed(use); |
duke@435 | 1483 | imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o); |
duke@435 | 1484 | // back up iterator |
duke@435 | 1485 | --i; |
duke@435 | 1486 | } |
duke@435 | 1487 | } |
kvn@3396 | 1488 | // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete |
kvn@3396 | 1489 | // _ioproj_catchall so we end up with a call that has only 1 i_o projection. |
duke@435 | 1490 | if (_ioproj_catchall != NULL ) { |
kvn@3396 | 1491 | if (_ioproj_fallthrough == NULL) { |
kvn@3396 | 1492 | _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O); |
kvn@3396 | 1493 | transform_later(_ioproj_fallthrough); |
kvn@3396 | 1494 | } |
duke@435 | 1495 | for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 1496 | Node *use = _ioproj_catchall->fast_out(i); |
kvn@3847 | 1497 | _igvn.rehash_node_delayed(use); |
duke@435 | 1498 | imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough); |
duke@435 | 1499 | // back up iterator |
duke@435 | 1500 | --i; |
duke@435 | 1501 | } |
kvn@3396 | 1502 | assert(_ioproj_catchall->outcnt() == 0, "all uses must be deleted"); |
kvn@3396 | 1503 | _igvn.remove_dead_node(_ioproj_catchall); |
duke@435 | 1504 | } |
duke@435 | 1505 | |
duke@435 | 1506 | // if we generated only a slow call, we are done |
kvn@3396 | 1507 | if (always_slow) { |
kvn@3396 | 1508 | // Now we can unhook i_o. |
kvn@3398 | 1509 | if (result_phi_i_o->outcnt() > 1) { |
kvn@3398 | 1510 | call->set_req(TypeFunc::I_O, top()); |
kvn@3398 | 1511 | } else { |
kvn@3398 | 1512 | assert(result_phi_i_o->unique_ctrl_out() == call, ""); |
kvn@3398 | 1513 | // Case of new array with negative size known during compilation. |
kvn@3398 | 1514 | // AllocateArrayNode::Ideal() optimization disconnect unreachable |
kvn@3398 | 1515 | // following code since call to runtime will throw exception. |
kvn@3398 | 1516 | // As result there will be no users of i_o after the call. |
kvn@3398 | 1517 | // Leave i_o attached to this call to avoid problems in preceding graph. |
kvn@3398 | 1518 | } |
duke@435 | 1519 | return; |
kvn@3396 | 1520 | } |
duke@435 | 1521 | |
duke@435 | 1522 | |
duke@435 | 1523 | if (_fallthroughcatchproj != NULL) { |
duke@435 | 1524 | ctrl = _fallthroughcatchproj->clone(); |
duke@435 | 1525 | transform_later(ctrl); |
kvn@1143 | 1526 | _igvn.replace_node(_fallthroughcatchproj, result_region); |
duke@435 | 1527 | } else { |
duke@435 | 1528 | ctrl = top(); |
duke@435 | 1529 | } |
duke@435 | 1530 | Node *slow_result; |
duke@435 | 1531 | if (_resproj == NULL) { |
duke@435 | 1532 | // no uses of the allocation result |
duke@435 | 1533 | slow_result = top(); |
duke@435 | 1534 | } else { |
duke@435 | 1535 | slow_result = _resproj->clone(); |
duke@435 | 1536 | transform_later(slow_result); |
kvn@1143 | 1537 | _igvn.replace_node(_resproj, result_phi_rawoop); |
duke@435 | 1538 | } |
duke@435 | 1539 | |
duke@435 | 1540 | // Plug slow-path into result merge point |
duke@435 | 1541 | result_region ->init_req( slow_result_path, ctrl ); |
duke@435 | 1542 | result_phi_rawoop->init_req( slow_result_path, slow_result); |
duke@435 | 1543 | result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough ); |
duke@435 | 1544 | transform_later(result_region); |
duke@435 | 1545 | transform_later(result_phi_rawoop); |
duke@435 | 1546 | transform_later(result_phi_rawmem); |
duke@435 | 1547 | transform_later(result_phi_i_o); |
duke@435 | 1548 | // This completes all paths into the result merge point |
duke@435 | 1549 | } |
duke@435 | 1550 | |
duke@435 | 1551 | |
duke@435 | 1552 | // Helper for PhaseMacroExpand::expand_allocate_common. |
duke@435 | 1553 | // Initializes the newly-allocated storage. |
duke@435 | 1554 | Node* |
duke@435 | 1555 | PhaseMacroExpand::initialize_object(AllocateNode* alloc, |
duke@435 | 1556 | Node* control, Node* rawmem, Node* object, |
duke@435 | 1557 | Node* klass_node, Node* length, |
duke@435 | 1558 | Node* size_in_bytes) { |
duke@435 | 1559 | InitializeNode* init = alloc->initialization(); |
duke@435 | 1560 | // Store the klass & mark bits |
duke@435 | 1561 | Node* mark_node = NULL; |
duke@435 | 1562 | // For now only enable fast locking for non-array types |
duke@435 | 1563 | if (UseBiasedLocking && (length == NULL)) { |
stefank@3391 | 1564 | mark_node = make_load(control, rawmem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeRawPtr::BOTTOM, T_ADDRESS); |
duke@435 | 1565 | } else { |
duke@435 | 1566 | mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); |
duke@435 | 1567 | } |
duke@435 | 1568 | rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); |
coleenp@548 | 1569 | |
coleenp@4037 | 1570 | rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); |
duke@435 | 1571 | int header_size = alloc->minimum_header_size(); // conservatively small |
duke@435 | 1572 | |
duke@435 | 1573 | // Array length |
duke@435 | 1574 | if (length != NULL) { // Arrays need length field |
duke@435 | 1575 | rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); |
duke@435 | 1576 | // conservatively small header size: |
coleenp@548 | 1577 | header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
duke@435 | 1578 | ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); |
duke@435 | 1579 | if (k->is_array_klass()) // we know the exact header size in most cases: |
duke@435 | 1580 | header_size = Klass::layout_helper_header_size(k->layout_helper()); |
duke@435 | 1581 | } |
duke@435 | 1582 | |
duke@435 | 1583 | // Clear the object body, if necessary. |
duke@435 | 1584 | if (init == NULL) { |
duke@435 | 1585 | // The init has somehow disappeared; be cautious and clear everything. |
duke@435 | 1586 | // |
duke@435 | 1587 | // This can happen if a node is allocated but an uncommon trap occurs |
duke@435 | 1588 | // immediately. In this case, the Initialize gets associated with the |
duke@435 | 1589 | // trap, and may be placed in a different (outer) loop, if the Allocate |
duke@435 | 1590 | // is in a loop. If (this is rare) the inner loop gets unrolled, then |
duke@435 | 1591 | // there can be two Allocates to one Initialize. The answer in all these |
duke@435 | 1592 | // edge cases is safety first. It is always safe to clear immediately |
duke@435 | 1593 | // within an Allocate, and then (maybe or maybe not) clear some more later. |
duke@435 | 1594 | if (!ZeroTLAB) |
duke@435 | 1595 | rawmem = ClearArrayNode::clear_memory(control, rawmem, object, |
duke@435 | 1596 | header_size, size_in_bytes, |
duke@435 | 1597 | &_igvn); |
duke@435 | 1598 | } else { |
duke@435 | 1599 | if (!init->is_complete()) { |
duke@435 | 1600 | // Try to win by zeroing only what the init does not store. |
duke@435 | 1601 | // We can also try to do some peephole optimizations, |
duke@435 | 1602 | // such as combining some adjacent subword stores. |
duke@435 | 1603 | rawmem = init->complete_stores(control, rawmem, object, |
duke@435 | 1604 | header_size, size_in_bytes, &_igvn); |
duke@435 | 1605 | } |
duke@435 | 1606 | // We have no more use for this link, since the AllocateNode goes away: |
duke@435 | 1607 | init->set_req(InitializeNode::RawAddress, top()); |
duke@435 | 1608 | // (If we keep the link, it just confuses the register allocator, |
duke@435 | 1609 | // who thinks he sees a real use of the address by the membar.) |
duke@435 | 1610 | } |
duke@435 | 1611 | |
duke@435 | 1612 | return rawmem; |
duke@435 | 1613 | } |
duke@435 | 1614 | |
duke@435 | 1615 | // Generate prefetch instructions for next allocations. |
duke@435 | 1616 | Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, |
duke@435 | 1617 | Node*& contended_phi_rawmem, |
duke@435 | 1618 | Node* old_eden_top, Node* new_eden_top, |
duke@435 | 1619 | Node* length) { |
kvn@1802 | 1620 | enum { fall_in_path = 1, pf_path = 2 }; |
duke@435 | 1621 | if( UseTLAB && AllocatePrefetchStyle == 2 ) { |
duke@435 | 1622 | // Generate prefetch allocation with watermark check. |
duke@435 | 1623 | // As an allocation hits the watermark, we will prefetch starting |
duke@435 | 1624 | // at a "distance" away from watermark. |
duke@435 | 1625 | |
duke@435 | 1626 | Node *pf_region = new (C, 3) RegionNode(3); |
duke@435 | 1627 | Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, |
duke@435 | 1628 | TypeRawPtr::BOTTOM ); |
duke@435 | 1629 | // I/O is used for Prefetch |
duke@435 | 1630 | Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO ); |
duke@435 | 1631 | |
duke@435 | 1632 | Node *thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 1633 | transform_later(thread); |
duke@435 | 1634 | |
duke@435 | 1635 | Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread, |
duke@435 | 1636 | _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); |
duke@435 | 1637 | transform_later(eden_pf_adr); |
duke@435 | 1638 | |
duke@435 | 1639 | Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false, |
duke@435 | 1640 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 1641 | TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); |
duke@435 | 1642 | transform_later(old_pf_wm); |
duke@435 | 1643 | |
duke@435 | 1644 | // check against new_eden_top |
duke@435 | 1645 | Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm ); |
duke@435 | 1646 | transform_later(need_pf_cmp); |
duke@435 | 1647 | Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge ); |
duke@435 | 1648 | transform_later(need_pf_bol); |
duke@435 | 1649 | IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol, |
duke@435 | 1650 | PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); |
duke@435 | 1651 | transform_later(need_pf_iff); |
duke@435 | 1652 | |
duke@435 | 1653 | // true node, add prefetchdistance |
duke@435 | 1654 | Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff ); |
duke@435 | 1655 | transform_later(need_pf_true); |
duke@435 | 1656 | |
duke@435 | 1657 | Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff ); |
duke@435 | 1658 | transform_later(need_pf_false); |
duke@435 | 1659 | |
duke@435 | 1660 | Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm, |
duke@435 | 1661 | _igvn.MakeConX(AllocatePrefetchDistance) ); |
duke@435 | 1662 | transform_later(new_pf_wmt ); |
duke@435 | 1663 | new_pf_wmt->set_req(0, need_pf_true); |
duke@435 | 1664 | |
duke@435 | 1665 | Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true, |
duke@435 | 1666 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 1667 | TypeRawPtr::BOTTOM, new_pf_wmt ); |
duke@435 | 1668 | transform_later(store_new_wmt); |
duke@435 | 1669 | |
duke@435 | 1670 | // adding prefetches |
duke@435 | 1671 | pf_phi_abio->init_req( fall_in_path, i_o ); |
duke@435 | 1672 | |
duke@435 | 1673 | Node *prefetch_adr; |
duke@435 | 1674 | Node *prefetch; |
duke@435 | 1675 | uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize; |
duke@435 | 1676 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 1677 | uint distance = 0; |
duke@435 | 1678 | |
duke@435 | 1679 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 1680 | prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt, |
duke@435 | 1681 | _igvn.MakeConX(distance) ); |
duke@435 | 1682 | transform_later(prefetch_adr); |
kvn@3052 | 1683 | prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr ); |
duke@435 | 1684 | transform_later(prefetch); |
duke@435 | 1685 | distance += step_size; |
duke@435 | 1686 | i_o = prefetch; |
duke@435 | 1687 | } |
duke@435 | 1688 | pf_phi_abio->set_req( pf_path, i_o ); |
duke@435 | 1689 | |
duke@435 | 1690 | pf_region->init_req( fall_in_path, need_pf_false ); |
duke@435 | 1691 | pf_region->init_req( pf_path, need_pf_true ); |
duke@435 | 1692 | |
duke@435 | 1693 | pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); |
duke@435 | 1694 | pf_phi_rawmem->init_req( pf_path, store_new_wmt ); |
duke@435 | 1695 | |
duke@435 | 1696 | transform_later(pf_region); |
duke@435 | 1697 | transform_later(pf_phi_rawmem); |
duke@435 | 1698 | transform_later(pf_phi_abio); |
duke@435 | 1699 | |
duke@435 | 1700 | needgc_false = pf_region; |
duke@435 | 1701 | contended_phi_rawmem = pf_phi_rawmem; |
duke@435 | 1702 | i_o = pf_phi_abio; |
kvn@1802 | 1703 | } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { |
kvn@3052 | 1704 | // Insert a prefetch for each allocation. |
kvn@3052 | 1705 | // This code is used for Sparc with BIS. |
kvn@1802 | 1706 | Node *pf_region = new (C, 3) RegionNode(3); |
kvn@1802 | 1707 | Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, |
kvn@1802 | 1708 | TypeRawPtr::BOTTOM ); |
kvn@1802 | 1709 | |
kvn@3052 | 1710 | // Generate several prefetch instructions. |
kvn@3052 | 1711 | uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines; |
kvn@1802 | 1712 | uint step_size = AllocatePrefetchStepSize; |
kvn@1802 | 1713 | uint distance = AllocatePrefetchDistance; |
kvn@1802 | 1714 | |
kvn@1802 | 1715 | // Next cache address. |
kvn@1802 | 1716 | Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top, |
kvn@1802 | 1717 | _igvn.MakeConX(distance)); |
kvn@1802 | 1718 | transform_later(cache_adr); |
kvn@1802 | 1719 | cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr); |
kvn@1802 | 1720 | transform_later(cache_adr); |
kvn@1802 | 1721 | Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); |
kvn@1802 | 1722 | cache_adr = new (C, 3) AndXNode(cache_adr, mask); |
kvn@1802 | 1723 | transform_later(cache_adr); |
kvn@1802 | 1724 | cache_adr = new (C, 2) CastX2PNode(cache_adr); |
kvn@1802 | 1725 | transform_later(cache_adr); |
kvn@1802 | 1726 | |
kvn@1802 | 1727 | // Prefetch |
kvn@3052 | 1728 | Node *prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, cache_adr ); |
kvn@1802 | 1729 | prefetch->set_req(0, needgc_false); |
kvn@1802 | 1730 | transform_later(prefetch); |
kvn@1802 | 1731 | contended_phi_rawmem = prefetch; |
kvn@1802 | 1732 | Node *prefetch_adr; |
kvn@1802 | 1733 | distance = step_size; |
kvn@1802 | 1734 | for ( uint i = 1; i < lines; i++ ) { |
kvn@1802 | 1735 | prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr, |
kvn@1802 | 1736 | _igvn.MakeConX(distance) ); |
kvn@1802 | 1737 | transform_later(prefetch_adr); |
kvn@3052 | 1738 | prefetch = new (C, 3) PrefetchAllocationNode( contended_phi_rawmem, prefetch_adr ); |
kvn@1802 | 1739 | transform_later(prefetch); |
kvn@1802 | 1740 | distance += step_size; |
kvn@1802 | 1741 | contended_phi_rawmem = prefetch; |
kvn@1802 | 1742 | } |
duke@435 | 1743 | } else if( AllocatePrefetchStyle > 0 ) { |
duke@435 | 1744 | // Insert a prefetch for each allocation only on the fast-path |
duke@435 | 1745 | Node *prefetch_adr; |
duke@435 | 1746 | Node *prefetch; |
kvn@3052 | 1747 | // Generate several prefetch instructions. |
kvn@3052 | 1748 | uint lines = (length != NULL) ? AllocatePrefetchLines : AllocateInstancePrefetchLines; |
duke@435 | 1749 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 1750 | uint distance = AllocatePrefetchDistance; |
duke@435 | 1751 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 1752 | prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top, |
duke@435 | 1753 | _igvn.MakeConX(distance) ); |
duke@435 | 1754 | transform_later(prefetch_adr); |
kvn@3052 | 1755 | prefetch = new (C, 3) PrefetchAllocationNode( i_o, prefetch_adr ); |
duke@435 | 1756 | // Do not let it float too high, since if eden_top == eden_end, |
duke@435 | 1757 | // both might be null. |
duke@435 | 1758 | if( i == 0 ) { // Set control for first prefetch, next follows it |
duke@435 | 1759 | prefetch->init_req(0, needgc_false); |
duke@435 | 1760 | } |
duke@435 | 1761 | transform_later(prefetch); |
duke@435 | 1762 | distance += step_size; |
duke@435 | 1763 | i_o = prefetch; |
duke@435 | 1764 | } |
duke@435 | 1765 | } |
duke@435 | 1766 | return i_o; |
duke@435 | 1767 | } |
duke@435 | 1768 | |
duke@435 | 1769 | |
duke@435 | 1770 | void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { |
duke@435 | 1771 | expand_allocate_common(alloc, NULL, |
duke@435 | 1772 | OptoRuntime::new_instance_Type(), |
duke@435 | 1773 | OptoRuntime::new_instance_Java()); |
duke@435 | 1774 | } |
duke@435 | 1775 | |
duke@435 | 1776 | void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { |
duke@435 | 1777 | Node* length = alloc->in(AllocateNode::ALength); |
kvn@3157 | 1778 | InitializeNode* init = alloc->initialization(); |
kvn@3157 | 1779 | Node* klass_node = alloc->in(AllocateNode::KlassNode); |
kvn@3157 | 1780 | ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); |
kvn@3157 | 1781 | address slow_call_address; // Address of slow call |
kvn@3157 | 1782 | if (init != NULL && init->is_complete_with_arraycopy() && |
kvn@3157 | 1783 | k->is_type_array_klass()) { |
kvn@3157 | 1784 | // Don't zero type array during slow allocation in VM since |
kvn@3157 | 1785 | // it will be initialized later by arraycopy in compiled code. |
kvn@3157 | 1786 | slow_call_address = OptoRuntime::new_array_nozero_Java(); |
kvn@3157 | 1787 | } else { |
kvn@3157 | 1788 | slow_call_address = OptoRuntime::new_array_Java(); |
kvn@3157 | 1789 | } |
duke@435 | 1790 | expand_allocate_common(alloc, length, |
duke@435 | 1791 | OptoRuntime::new_array_Type(), |
kvn@3157 | 1792 | slow_call_address); |
duke@435 | 1793 | } |
duke@435 | 1794 | |
kvn@3406 | 1795 | //-------------------mark_eliminated_box---------------------------------- |
kvn@3406 | 1796 | // |
kvn@2951 | 1797 | // During EA obj may point to several objects but after few ideal graph |
kvn@2951 | 1798 | // transformations (CCP) it may point to only one non escaping object |
kvn@2951 | 1799 | // (but still using phi), corresponding locks and unlocks will be marked |
kvn@2951 | 1800 | // for elimination. Later obj could be replaced with a new node (new phi) |
kvn@2951 | 1801 | // and which does not have escape information. And later after some graph |
kvn@2951 | 1802 | // reshape other locks and unlocks (which were not marked for elimination |
kvn@2951 | 1803 | // before) are connected to this new obj (phi) but they still will not be |
kvn@2951 | 1804 | // marked for elimination since new obj has no escape information. |
kvn@2951 | 1805 | // Mark all associated (same box and obj) lock and unlock nodes for |
kvn@2951 | 1806 | // elimination if some of them marked already. |
kvn@3406 | 1807 | void PhaseMacroExpand::mark_eliminated_box(Node* oldbox, Node* obj) { |
kvn@3421 | 1808 | if (oldbox->as_BoxLock()->is_eliminated()) |
kvn@3421 | 1809 | return; // This BoxLock node was processed already. |
kvn@3406 | 1810 | |
kvn@3421 | 1811 | // New implementation (EliminateNestedLocks) has separate BoxLock |
kvn@3421 | 1812 | // node for each locked region so mark all associated locks/unlocks as |
kvn@3421 | 1813 | // eliminated even if different objects are referenced in one locked region |
kvn@3421 | 1814 | // (for example, OSR compilation of nested loop inside locked scope). |
kvn@3421 | 1815 | if (EliminateNestedLocks || |
kvn@3406 | 1816 | oldbox->as_BoxLock()->is_simple_lock_region(NULL, obj)) { |
kvn@3406 | 1817 | // Box is used only in one lock region. Mark this box as eliminated. |
kvn@3406 | 1818 | _igvn.hash_delete(oldbox); |
kvn@3406 | 1819 | oldbox->as_BoxLock()->set_eliminated(); // This changes box's hash value |
kvn@3406 | 1820 | _igvn.hash_insert(oldbox); |
kvn@3406 | 1821 | |
kvn@3406 | 1822 | for (uint i = 0; i < oldbox->outcnt(); i++) { |
kvn@3406 | 1823 | Node* u = oldbox->raw_out(i); |
kvn@3406 | 1824 | if (u->is_AbstractLock() && !u->as_AbstractLock()->is_non_esc_obj()) { |
kvn@3406 | 1825 | AbstractLockNode* alock = u->as_AbstractLock(); |
kvn@3406 | 1826 | // Check lock's box since box could be referenced by Lock's debug info. |
kvn@3406 | 1827 | if (alock->box_node() == oldbox) { |
kvn@3406 | 1828 | // Mark eliminated all related locks and unlocks. |
kvn@3406 | 1829 | alock->set_non_esc_obj(); |
kvn@3406 | 1830 | } |
kvn@3406 | 1831 | } |
kvn@3406 | 1832 | } |
kvn@2951 | 1833 | return; |
kvn@501 | 1834 | } |
kvn@3406 | 1835 | |
kvn@3406 | 1836 | // Create new "eliminated" BoxLock node and use it in monitor debug info |
kvn@3406 | 1837 | // instead of oldbox for the same object. |
kvn@3419 | 1838 | BoxLockNode* newbox = oldbox->clone()->as_BoxLock(); |
kvn@3406 | 1839 | |
kvn@3406 | 1840 | // Note: BoxLock node is marked eliminated only here and it is used |
kvn@3406 | 1841 | // to indicate that all associated lock and unlock nodes are marked |
kvn@3406 | 1842 | // for elimination. |
kvn@3406 | 1843 | newbox->set_eliminated(); |
kvn@3406 | 1844 | transform_later(newbox); |
kvn@3406 | 1845 | |
kvn@3406 | 1846 | // Replace old box node with new box for all users of the same object. |
kvn@3406 | 1847 | for (uint i = 0; i < oldbox->outcnt();) { |
kvn@3406 | 1848 | bool next_edge = true; |
kvn@3406 | 1849 | |
kvn@3406 | 1850 | Node* u = oldbox->raw_out(i); |
kvn@3406 | 1851 | if (u->is_AbstractLock()) { |
kvn@3406 | 1852 | AbstractLockNode* alock = u->as_AbstractLock(); |
kvn@3407 | 1853 | if (alock->box_node() == oldbox && alock->obj_node()->eqv_uncast(obj)) { |
kvn@3406 | 1854 | // Replace Box and mark eliminated all related locks and unlocks. |
kvn@3406 | 1855 | alock->set_non_esc_obj(); |
kvn@3847 | 1856 | _igvn.rehash_node_delayed(alock); |
kvn@3406 | 1857 | alock->set_box_node(newbox); |
kvn@3406 | 1858 | next_edge = false; |
kvn@3406 | 1859 | } |
kvn@3406 | 1860 | } |
kvn@3407 | 1861 | if (u->is_FastLock() && u->as_FastLock()->obj_node()->eqv_uncast(obj)) { |
kvn@3406 | 1862 | FastLockNode* flock = u->as_FastLock(); |
kvn@3406 | 1863 | assert(flock->box_node() == oldbox, "sanity"); |
kvn@3847 | 1864 | _igvn.rehash_node_delayed(flock); |
kvn@3406 | 1865 | flock->set_box_node(newbox); |
kvn@3406 | 1866 | next_edge = false; |
kvn@3406 | 1867 | } |
kvn@3406 | 1868 | |
kvn@3406 | 1869 | // Replace old box in monitor debug info. |
kvn@3406 | 1870 | if (u->is_SafePoint() && u->as_SafePoint()->jvms()) { |
kvn@3406 | 1871 | SafePointNode* sfn = u->as_SafePoint(); |
kvn@3406 | 1872 | JVMState* youngest_jvms = sfn->jvms(); |
kvn@3406 | 1873 | int max_depth = youngest_jvms->depth(); |
kvn@3406 | 1874 | for (int depth = 1; depth <= max_depth; depth++) { |
kvn@3406 | 1875 | JVMState* jvms = youngest_jvms->of_depth(depth); |
kvn@3406 | 1876 | int num_mon = jvms->nof_monitors(); |
kvn@3406 | 1877 | // Loop over monitors |
kvn@3406 | 1878 | for (int idx = 0; idx < num_mon; idx++) { |
kvn@3406 | 1879 | Node* obj_node = sfn->monitor_obj(jvms, idx); |
kvn@3406 | 1880 | Node* box_node = sfn->monitor_box(jvms, idx); |
kvn@3407 | 1881 | if (box_node == oldbox && obj_node->eqv_uncast(obj)) { |
kvn@3406 | 1882 | int j = jvms->monitor_box_offset(idx); |
kvn@3847 | 1883 | _igvn.replace_input_of(u, j, newbox); |
kvn@3406 | 1884 | next_edge = false; |
kvn@3406 | 1885 | } |
kvn@3406 | 1886 | } |
kvn@3406 | 1887 | } |
kvn@3406 | 1888 | } |
kvn@3406 | 1889 | if (next_edge) i++; |
kvn@3406 | 1890 | } |
kvn@3406 | 1891 | } |
kvn@3406 | 1892 | |
kvn@3406 | 1893 | //-----------------------mark_eliminated_locking_nodes----------------------- |
kvn@3406 | 1894 | void PhaseMacroExpand::mark_eliminated_locking_nodes(AbstractLockNode *alock) { |
kvn@3406 | 1895 | if (EliminateNestedLocks) { |
kvn@3406 | 1896 | if (alock->is_nested()) { |
kvn@3406 | 1897 | assert(alock->box_node()->as_BoxLock()->is_eliminated(), "sanity"); |
kvn@3406 | 1898 | return; |
kvn@3406 | 1899 | } else if (!alock->is_non_esc_obj()) { // Not eliminated or coarsened |
kvn@3406 | 1900 | // Only Lock node has JVMState needed here. |
kvn@3406 | 1901 | if (alock->jvms() != NULL && alock->as_Lock()->is_nested_lock_region()) { |
kvn@3406 | 1902 | // Mark eliminated related nested locks and unlocks. |
kvn@3406 | 1903 | Node* obj = alock->obj_node(); |
kvn@3406 | 1904 | BoxLockNode* box_node = alock->box_node()->as_BoxLock(); |
kvn@3406 | 1905 | assert(!box_node->is_eliminated(), "should not be marked yet"); |
kvn@2951 | 1906 | // Note: BoxLock node is marked eliminated only here |
kvn@2951 | 1907 | // and it is used to indicate that all associated lock |
kvn@2951 | 1908 | // and unlock nodes are marked for elimination. |
kvn@3406 | 1909 | box_node->set_eliminated(); // Box's hash is always NO_HASH here |
kvn@3406 | 1910 | for (uint i = 0; i < box_node->outcnt(); i++) { |
kvn@3406 | 1911 | Node* u = box_node->raw_out(i); |
kvn@3406 | 1912 | if (u->is_AbstractLock()) { |
kvn@3406 | 1913 | alock = u->as_AbstractLock(); |
kvn@3406 | 1914 | if (alock->box_node() == box_node) { |
kvn@3406 | 1915 | // Verify that this Box is referenced only by related locks. |
kvn@3407 | 1916 | assert(alock->obj_node()->eqv_uncast(obj), ""); |
kvn@3406 | 1917 | // Mark all related locks and unlocks. |
kvn@3406 | 1918 | alock->set_nested(); |
kvn@3406 | 1919 | } |
kvn@3406 | 1920 | } |
kvn@3406 | 1921 | } |
kvn@3406 | 1922 | } |
kvn@3406 | 1923 | return; |
kvn@3406 | 1924 | } |
kvn@3406 | 1925 | // Process locks for non escaping object |
kvn@3406 | 1926 | assert(alock->is_non_esc_obj(), ""); |
kvn@3406 | 1927 | } // EliminateNestedLocks |
kvn@895 | 1928 | |
kvn@3406 | 1929 | if (alock->is_non_esc_obj()) { // Lock is used for non escaping object |
kvn@3406 | 1930 | // Look for all locks of this object and mark them and |
kvn@3406 | 1931 | // corresponding BoxLock nodes as eliminated. |
kvn@3406 | 1932 | Node* obj = alock->obj_node(); |
kvn@3406 | 1933 | for (uint j = 0; j < obj->outcnt(); j++) { |
kvn@3406 | 1934 | Node* o = obj->raw_out(j); |
kvn@3407 | 1935 | if (o->is_AbstractLock() && |
kvn@3407 | 1936 | o->as_AbstractLock()->obj_node()->eqv_uncast(obj)) { |
kvn@3406 | 1937 | alock = o->as_AbstractLock(); |
kvn@3406 | 1938 | Node* box = alock->box_node(); |
kvn@3406 | 1939 | // Replace old box node with new eliminated box for all users |
kvn@3406 | 1940 | // of the same object and mark related locks as eliminated. |
kvn@3406 | 1941 | mark_eliminated_box(box, obj); |
kvn@3406 | 1942 | } |
kvn@3406 | 1943 | } |
kvn@3406 | 1944 | } |
kvn@2951 | 1945 | } |
kvn@501 | 1946 | |
kvn@2951 | 1947 | // we have determined that this lock/unlock can be eliminated, we simply |
kvn@2951 | 1948 | // eliminate the node without expanding it. |
kvn@2951 | 1949 | // |
kvn@2951 | 1950 | // Note: The membar's associated with the lock/unlock are currently not |
kvn@2951 | 1951 | // eliminated. This should be investigated as a future enhancement. |
kvn@2951 | 1952 | // |
kvn@2951 | 1953 | bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { |
kvn@2951 | 1954 | |
kvn@2951 | 1955 | if (!alock->is_eliminated()) { |
kvn@2951 | 1956 | return false; |
kvn@2951 | 1957 | } |
kvn@2951 | 1958 | #ifdef ASSERT |
kvn@3406 | 1959 | if (!alock->is_coarsened()) { |
kvn@2951 | 1960 | // Check that new "eliminated" BoxLock node is created. |
kvn@2951 | 1961 | BoxLockNode* oldbox = alock->box_node()->as_BoxLock(); |
kvn@2951 | 1962 | assert(oldbox->is_eliminated(), "should be done already"); |
kvn@2951 | 1963 | } |
kvn@2951 | 1964 | #endif |
never@1515 | 1965 | CompileLog* log = C->log(); |
never@1515 | 1966 | if (log != NULL) { |
never@1515 | 1967 | log->head("eliminate_lock lock='%d'", |
never@1515 | 1968 | alock->is_Lock()); |
never@1515 | 1969 | JVMState* p = alock->jvms(); |
never@1515 | 1970 | while (p != NULL) { |
never@1515 | 1971 | log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); |
never@1515 | 1972 | p = p->caller(); |
never@1515 | 1973 | } |
never@1515 | 1974 | log->tail("eliminate_lock"); |
never@1515 | 1975 | } |
never@1515 | 1976 | |
kvn@501 | 1977 | #ifndef PRODUCT |
kvn@501 | 1978 | if (PrintEliminateLocks) { |
kvn@501 | 1979 | if (alock->is_Lock()) { |
kvn@3311 | 1980 | tty->print_cr("++++ Eliminated: %d Lock", alock->_idx); |
kvn@501 | 1981 | } else { |
kvn@3311 | 1982 | tty->print_cr("++++ Eliminated: %d Unlock", alock->_idx); |
kvn@501 | 1983 | } |
kvn@501 | 1984 | } |
kvn@501 | 1985 | #endif |
kvn@501 | 1986 | |
kvn@501 | 1987 | Node* mem = alock->in(TypeFunc::Memory); |
kvn@501 | 1988 | Node* ctrl = alock->in(TypeFunc::Control); |
kvn@501 | 1989 | |
kvn@501 | 1990 | extract_call_projections(alock); |
kvn@501 | 1991 | // There are 2 projections from the lock. The lock node will |
kvn@501 | 1992 | // be deleted when its last use is subsumed below. |
kvn@501 | 1993 | assert(alock->outcnt() == 2 && |
kvn@501 | 1994 | _fallthroughproj != NULL && |
kvn@501 | 1995 | _memproj_fallthrough != NULL, |
kvn@501 | 1996 | "Unexpected projections from Lock/Unlock"); |
kvn@501 | 1997 | |
kvn@501 | 1998 | Node* fallthroughproj = _fallthroughproj; |
kvn@501 | 1999 | Node* memproj_fallthrough = _memproj_fallthrough; |
duke@435 | 2000 | |
duke@435 | 2001 | // The memory projection from a lock/unlock is RawMem |
duke@435 | 2002 | // The input to a Lock is merged memory, so extract its RawMem input |
duke@435 | 2003 | // (unless the MergeMem has been optimized away.) |
duke@435 | 2004 | if (alock->is_Lock()) { |
roland@3047 | 2005 | // Seach for MemBarAcquireLock node and delete it also. |
kvn@501 | 2006 | MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); |
roland@3047 | 2007 | assert(membar != NULL && membar->Opcode() == Op_MemBarAcquireLock, ""); |
kvn@501 | 2008 | Node* ctrlproj = membar->proj_out(TypeFunc::Control); |
kvn@501 | 2009 | Node* memproj = membar->proj_out(TypeFunc::Memory); |
kvn@1143 | 2010 | _igvn.replace_node(ctrlproj, fallthroughproj); |
kvn@1143 | 2011 | _igvn.replace_node(memproj, memproj_fallthrough); |
kvn@895 | 2012 | |
kvn@895 | 2013 | // Delete FastLock node also if this Lock node is unique user |
kvn@895 | 2014 | // (a loop peeling may clone a Lock node). |
kvn@895 | 2015 | Node* flock = alock->as_Lock()->fastlock_node(); |
kvn@895 | 2016 | if (flock->outcnt() == 1) { |
kvn@895 | 2017 | assert(flock->unique_out() == alock, "sanity"); |
kvn@1143 | 2018 | _igvn.replace_node(flock, top()); |
kvn@895 | 2019 | } |
duke@435 | 2020 | } |
duke@435 | 2021 | |
roland@3047 | 2022 | // Seach for MemBarReleaseLock node and delete it also. |
kvn@501 | 2023 | if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() && |
kvn@501 | 2024 | ctrl->in(0)->is_MemBar()) { |
kvn@501 | 2025 | MemBarNode* membar = ctrl->in(0)->as_MemBar(); |
roland@3047 | 2026 | assert(membar->Opcode() == Op_MemBarReleaseLock && |
kvn@501 | 2027 | mem->is_Proj() && membar == mem->in(0), ""); |
kvn@1143 | 2028 | _igvn.replace_node(fallthroughproj, ctrl); |
kvn@1143 | 2029 | _igvn.replace_node(memproj_fallthrough, mem); |
kvn@501 | 2030 | fallthroughproj = ctrl; |
kvn@501 | 2031 | memproj_fallthrough = mem; |
kvn@501 | 2032 | ctrl = membar->in(TypeFunc::Control); |
kvn@501 | 2033 | mem = membar->in(TypeFunc::Memory); |
kvn@501 | 2034 | } |
kvn@501 | 2035 | |
kvn@1143 | 2036 | _igvn.replace_node(fallthroughproj, ctrl); |
kvn@1143 | 2037 | _igvn.replace_node(memproj_fallthrough, mem); |
kvn@501 | 2038 | return true; |
duke@435 | 2039 | } |
duke@435 | 2040 | |
duke@435 | 2041 | |
duke@435 | 2042 | //------------------------------expand_lock_node---------------------- |
duke@435 | 2043 | void PhaseMacroExpand::expand_lock_node(LockNode *lock) { |
duke@435 | 2044 | |
duke@435 | 2045 | Node* ctrl = lock->in(TypeFunc::Control); |
duke@435 | 2046 | Node* mem = lock->in(TypeFunc::Memory); |
duke@435 | 2047 | Node* obj = lock->obj_node(); |
duke@435 | 2048 | Node* box = lock->box_node(); |
kvn@501 | 2049 | Node* flock = lock->fastlock_node(); |
duke@435 | 2050 | |
kvn@3419 | 2051 | assert(!box->as_BoxLock()->is_eliminated(), "sanity"); |
kvn@3406 | 2052 | |
duke@435 | 2053 | // Make the merge point |
kvn@855 | 2054 | Node *region; |
kvn@855 | 2055 | Node *mem_phi; |
kvn@855 | 2056 | Node *slow_path; |
duke@435 | 2057 | |
kvn@855 | 2058 | if (UseOptoBiasInlining) { |
kvn@855 | 2059 | /* |
twisti@1040 | 2060 | * See the full description in MacroAssembler::biased_locking_enter(). |
kvn@855 | 2061 | * |
kvn@855 | 2062 | * if( (mark_word & biased_lock_mask) == biased_lock_pattern ) { |
kvn@855 | 2063 | * // The object is biased. |
kvn@855 | 2064 | * proto_node = klass->prototype_header; |
kvn@855 | 2065 | * o_node = thread | proto_node; |
kvn@855 | 2066 | * x_node = o_node ^ mark_word; |
kvn@855 | 2067 | * if( (x_node & ~age_mask) == 0 ) { // Biased to the current thread ? |
kvn@855 | 2068 | * // Done. |
kvn@855 | 2069 | * } else { |
kvn@855 | 2070 | * if( (x_node & biased_lock_mask) != 0 ) { |
kvn@855 | 2071 | * // The klass's prototype header is no longer biased. |
kvn@855 | 2072 | * cas(&mark_word, mark_word, proto_node) |
kvn@855 | 2073 | * goto cas_lock; |
kvn@855 | 2074 | * } else { |
kvn@855 | 2075 | * // The klass's prototype header is still biased. |
kvn@855 | 2076 | * if( (x_node & epoch_mask) != 0 ) { // Expired epoch? |
kvn@855 | 2077 | * old = mark_word; |
kvn@855 | 2078 | * new = o_node; |
kvn@855 | 2079 | * } else { |
kvn@855 | 2080 | * // Different thread or anonymous biased. |
kvn@855 | 2081 | * old = mark_word & (epoch_mask | age_mask | biased_lock_mask); |
kvn@855 | 2082 | * new = thread | old; |
kvn@855 | 2083 | * } |
kvn@855 | 2084 | * // Try to rebias. |
kvn@855 | 2085 | * if( cas(&mark_word, old, new) == 0 ) { |
kvn@855 | 2086 | * // Done. |
kvn@855 | 2087 | * } else { |
kvn@855 | 2088 | * goto slow_path; // Failed. |
kvn@855 | 2089 | * } |
kvn@855 | 2090 | * } |
kvn@855 | 2091 | * } |
kvn@855 | 2092 | * } else { |
kvn@855 | 2093 | * // The object is not biased. |
kvn@855 | 2094 | * cas_lock: |
kvn@855 | 2095 | * if( FastLock(obj) == 0 ) { |
kvn@855 | 2096 | * // Done. |
kvn@855 | 2097 | * } else { |
kvn@855 | 2098 | * slow_path: |
kvn@855 | 2099 | * OptoRuntime::complete_monitor_locking_Java(obj); |
kvn@855 | 2100 | * } |
kvn@855 | 2101 | * } |
kvn@855 | 2102 | */ |
kvn@855 | 2103 | |
kvn@855 | 2104 | region = new (C, 5) RegionNode(5); |
kvn@855 | 2105 | // create a Phi for the memory state |
kvn@855 | 2106 | mem_phi = new (C, 5) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2107 | |
kvn@855 | 2108 | Node* fast_lock_region = new (C, 3) RegionNode(3); |
kvn@855 | 2109 | Node* fast_lock_mem_phi = new (C, 3) PhiNode( fast_lock_region, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2110 | |
kvn@855 | 2111 | // First, check mark word for the biased lock pattern. |
kvn@855 | 2112 | Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); |
kvn@855 | 2113 | |
kvn@855 | 2114 | // Get fast path - mark word has the biased lock pattern. |
kvn@855 | 2115 | ctrl = opt_bits_test(ctrl, fast_lock_region, 1, mark_node, |
kvn@855 | 2116 | markOopDesc::biased_lock_mask_in_place, |
kvn@855 | 2117 | markOopDesc::biased_lock_pattern, true); |
kvn@855 | 2118 | // fast_lock_region->in(1) is set to slow path. |
kvn@855 | 2119 | fast_lock_mem_phi->init_req(1, mem); |
kvn@855 | 2120 | |
kvn@855 | 2121 | // Now check that the lock is biased to the current thread and has |
kvn@855 | 2122 | // the same epoch and bias as Klass::_prototype_header. |
kvn@855 | 2123 | |
kvn@855 | 2124 | // Special-case a fresh allocation to avoid building nodes: |
kvn@855 | 2125 | Node* klass_node = AllocateNode::Ideal_klass(obj, &_igvn); |
kvn@855 | 2126 | if (klass_node == NULL) { |
kvn@855 | 2127 | Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes()); |
kvn@855 | 2128 | klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) ); |
kvn@925 | 2129 | #ifdef _LP64 |
kvn@925 | 2130 | if (UseCompressedOops && klass_node->is_DecodeN()) { |
kvn@925 | 2131 | assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity"); |
kvn@925 | 2132 | klass_node->in(1)->init_req(0, ctrl); |
kvn@925 | 2133 | } else |
kvn@925 | 2134 | #endif |
kvn@925 | 2135 | klass_node->init_req(0, ctrl); |
kvn@855 | 2136 | } |
stefank@3391 | 2137 | Node *proto_node = make_load(ctrl, mem, klass_node, in_bytes(Klass::prototype_header_offset()), TypeX_X, TypeX_X->basic_type()); |
kvn@855 | 2138 | |
kvn@855 | 2139 | Node* thread = transform_later(new (C, 1) ThreadLocalNode()); |
kvn@855 | 2140 | Node* cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); |
kvn@855 | 2141 | Node* o_node = transform_later(new (C, 3) OrXNode(cast_thread, proto_node)); |
kvn@855 | 2142 | Node* x_node = transform_later(new (C, 3) XorXNode(o_node, mark_node)); |
kvn@855 | 2143 | |
kvn@855 | 2144 | // Get slow path - mark word does NOT match the value. |
kvn@855 | 2145 | Node* not_biased_ctrl = opt_bits_test(ctrl, region, 3, x_node, |
kvn@855 | 2146 | (~markOopDesc::age_mask_in_place), 0); |
kvn@855 | 2147 | // region->in(3) is set to fast path - the object is biased to the current thread. |
kvn@855 | 2148 | mem_phi->init_req(3, mem); |
kvn@855 | 2149 | |
kvn@855 | 2150 | |
kvn@855 | 2151 | // Mark word does NOT match the value (thread | Klass::_prototype_header). |
kvn@855 | 2152 | |
kvn@855 | 2153 | |
kvn@855 | 2154 | // First, check biased pattern. |
kvn@855 | 2155 | // Get fast path - _prototype_header has the same biased lock pattern. |
kvn@855 | 2156 | ctrl = opt_bits_test(not_biased_ctrl, fast_lock_region, 2, x_node, |
kvn@855 | 2157 | markOopDesc::biased_lock_mask_in_place, 0, true); |
kvn@855 | 2158 | |
kvn@855 | 2159 | not_biased_ctrl = fast_lock_region->in(2); // Slow path |
kvn@855 | 2160 | // fast_lock_region->in(2) - the prototype header is no longer biased |
kvn@855 | 2161 | // and we have to revoke the bias on this object. |
kvn@855 | 2162 | // We are going to try to reset the mark of this object to the prototype |
kvn@855 | 2163 | // value and fall through to the CAS-based locking scheme. |
kvn@855 | 2164 | Node* adr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); |
kvn@855 | 2165 | Node* cas = new (C, 5) StoreXConditionalNode(not_biased_ctrl, mem, adr, |
kvn@855 | 2166 | proto_node, mark_node); |
kvn@855 | 2167 | transform_later(cas); |
kvn@855 | 2168 | Node* proj = transform_later( new (C, 1) SCMemProjNode(cas)); |
kvn@855 | 2169 | fast_lock_mem_phi->init_req(2, proj); |
kvn@855 | 2170 | |
kvn@855 | 2171 | |
kvn@855 | 2172 | // Second, check epoch bits. |
kvn@855 | 2173 | Node* rebiased_region = new (C, 3) RegionNode(3); |
kvn@855 | 2174 | Node* old_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); |
kvn@855 | 2175 | Node* new_phi = new (C, 3) PhiNode( rebiased_region, TypeX_X); |
kvn@855 | 2176 | |
kvn@855 | 2177 | // Get slow path - mark word does NOT match epoch bits. |
kvn@855 | 2178 | Node* epoch_ctrl = opt_bits_test(ctrl, rebiased_region, 1, x_node, |
kvn@855 | 2179 | markOopDesc::epoch_mask_in_place, 0); |
kvn@855 | 2180 | // The epoch of the current bias is not valid, attempt to rebias the object |
kvn@855 | 2181 | // toward the current thread. |
kvn@855 | 2182 | rebiased_region->init_req(2, epoch_ctrl); |
kvn@855 | 2183 | old_phi->init_req(2, mark_node); |
kvn@855 | 2184 | new_phi->init_req(2, o_node); |
kvn@855 | 2185 | |
kvn@855 | 2186 | // rebiased_region->in(1) is set to fast path. |
kvn@855 | 2187 | // The epoch of the current bias is still valid but we know |
kvn@855 | 2188 | // nothing about the owner; it might be set or it might be clear. |
kvn@855 | 2189 | Node* cmask = MakeConX(markOopDesc::biased_lock_mask_in_place | |
kvn@855 | 2190 | markOopDesc::age_mask_in_place | |
kvn@855 | 2191 | markOopDesc::epoch_mask_in_place); |
kvn@855 | 2192 | Node* old = transform_later(new (C, 3) AndXNode(mark_node, cmask)); |
kvn@855 | 2193 | cast_thread = transform_later(new (C, 2) CastP2XNode(ctrl, thread)); |
kvn@855 | 2194 | Node* new_mark = transform_later(new (C, 3) OrXNode(cast_thread, old)); |
kvn@855 | 2195 | old_phi->init_req(1, old); |
kvn@855 | 2196 | new_phi->init_req(1, new_mark); |
kvn@855 | 2197 | |
kvn@855 | 2198 | transform_later(rebiased_region); |
kvn@855 | 2199 | transform_later(old_phi); |
kvn@855 | 2200 | transform_later(new_phi); |
kvn@855 | 2201 | |
kvn@855 | 2202 | // Try to acquire the bias of the object using an atomic operation. |
kvn@855 | 2203 | // If this fails we will go in to the runtime to revoke the object's bias. |
kvn@855 | 2204 | cas = new (C, 5) StoreXConditionalNode(rebiased_region, mem, adr, |
kvn@855 | 2205 | new_phi, old_phi); |
kvn@855 | 2206 | transform_later(cas); |
kvn@855 | 2207 | proj = transform_later( new (C, 1) SCMemProjNode(cas)); |
kvn@855 | 2208 | |
kvn@855 | 2209 | // Get slow path - Failed to CAS. |
kvn@855 | 2210 | not_biased_ctrl = opt_bits_test(rebiased_region, region, 4, cas, 0, 0); |
kvn@855 | 2211 | mem_phi->init_req(4, proj); |
kvn@855 | 2212 | // region->in(4) is set to fast path - the object is rebiased to the current thread. |
kvn@855 | 2213 | |
kvn@855 | 2214 | // Failed to CAS. |
kvn@855 | 2215 | slow_path = new (C, 3) RegionNode(3); |
kvn@855 | 2216 | Node *slow_mem = new (C, 3) PhiNode( slow_path, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2217 | |
kvn@855 | 2218 | slow_path->init_req(1, not_biased_ctrl); // Capture slow-control |
kvn@855 | 2219 | slow_mem->init_req(1, proj); |
kvn@855 | 2220 | |
kvn@855 | 2221 | // Call CAS-based locking scheme (FastLock node). |
kvn@855 | 2222 | |
kvn@855 | 2223 | transform_later(fast_lock_region); |
kvn@855 | 2224 | transform_later(fast_lock_mem_phi); |
kvn@855 | 2225 | |
kvn@855 | 2226 | // Get slow path - FastLock failed to lock the object. |
kvn@855 | 2227 | ctrl = opt_bits_test(fast_lock_region, region, 2, flock, 0, 0); |
kvn@855 | 2228 | mem_phi->init_req(2, fast_lock_mem_phi); |
kvn@855 | 2229 | // region->in(2) is set to fast path - the object is locked to the current thread. |
kvn@855 | 2230 | |
kvn@855 | 2231 | slow_path->init_req(2, ctrl); // Capture slow-control |
kvn@855 | 2232 | slow_mem->init_req(2, fast_lock_mem_phi); |
kvn@855 | 2233 | |
kvn@855 | 2234 | transform_later(slow_path); |
kvn@855 | 2235 | transform_later(slow_mem); |
kvn@855 | 2236 | // Reset lock's memory edge. |
kvn@855 | 2237 | lock->set_req(TypeFunc::Memory, slow_mem); |
kvn@855 | 2238 | |
kvn@855 | 2239 | } else { |
kvn@855 | 2240 | region = new (C, 3) RegionNode(3); |
kvn@855 | 2241 | // create a Phi for the memory state |
kvn@855 | 2242 | mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2243 | |
kvn@855 | 2244 | // Optimize test; set region slot 2 |
kvn@855 | 2245 | slow_path = opt_bits_test(ctrl, region, 2, flock, 0, 0); |
kvn@855 | 2246 | mem_phi->init_req(2, mem); |
kvn@855 | 2247 | } |
duke@435 | 2248 | |
duke@435 | 2249 | // Make slow path call |
duke@435 | 2250 | CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box ); |
duke@435 | 2251 | |
duke@435 | 2252 | extract_call_projections(call); |
duke@435 | 2253 | |
duke@435 | 2254 | // Slow path can only throw asynchronous exceptions, which are always |
duke@435 | 2255 | // de-opted. So the compiler thinks the slow-call can never throw an |
duke@435 | 2256 | // exception. If it DOES throw an exception we would need the debug |
duke@435 | 2257 | // info removed first (since if it throws there is no monitor). |
duke@435 | 2258 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 2259 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 2260 | |
duke@435 | 2261 | // Capture slow path |
duke@435 | 2262 | // disconnect fall-through projection from call and create a new one |
duke@435 | 2263 | // hook up users of fall-through projection to region |
duke@435 | 2264 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 2265 | transform_later(slow_ctrl); |
duke@435 | 2266 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 2267 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 2268 | region->init_req(1, slow_ctrl); |
duke@435 | 2269 | // region inputs are now complete |
duke@435 | 2270 | transform_later(region); |
kvn@1143 | 2271 | _igvn.replace_node(_fallthroughproj, region); |
duke@435 | 2272 | |
kvn@855 | 2273 | Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 2274 | mem_phi->init_req(1, memproj ); |
duke@435 | 2275 | transform_later(mem_phi); |
kvn@1143 | 2276 | _igvn.replace_node(_memproj_fallthrough, mem_phi); |
duke@435 | 2277 | } |
duke@435 | 2278 | |
duke@435 | 2279 | //------------------------------expand_unlock_node---------------------- |
duke@435 | 2280 | void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { |
duke@435 | 2281 | |
kvn@501 | 2282 | Node* ctrl = unlock->in(TypeFunc::Control); |
duke@435 | 2283 | Node* mem = unlock->in(TypeFunc::Memory); |
duke@435 | 2284 | Node* obj = unlock->obj_node(); |
duke@435 | 2285 | Node* box = unlock->box_node(); |
duke@435 | 2286 | |
kvn@3419 | 2287 | assert(!box->as_BoxLock()->is_eliminated(), "sanity"); |
kvn@3406 | 2288 | |
duke@435 | 2289 | // No need for a null check on unlock |
duke@435 | 2290 | |
duke@435 | 2291 | // Make the merge point |
kvn@855 | 2292 | Node *region; |
kvn@855 | 2293 | Node *mem_phi; |
kvn@855 | 2294 | |
kvn@855 | 2295 | if (UseOptoBiasInlining) { |
kvn@855 | 2296 | // Check for biased locking unlock case, which is a no-op. |
twisti@1040 | 2297 | // See the full description in MacroAssembler::biased_locking_exit(). |
kvn@855 | 2298 | region = new (C, 4) RegionNode(4); |
kvn@855 | 2299 | // create a Phi for the memory state |
kvn@855 | 2300 | mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2301 | mem_phi->init_req(3, mem); |
kvn@855 | 2302 | |
kvn@855 | 2303 | Node* mark_node = make_load(ctrl, mem, obj, oopDesc::mark_offset_in_bytes(), TypeX_X, TypeX_X->basic_type()); |
kvn@855 | 2304 | ctrl = opt_bits_test(ctrl, region, 3, mark_node, |
kvn@855 | 2305 | markOopDesc::biased_lock_mask_in_place, |
kvn@855 | 2306 | markOopDesc::biased_lock_pattern); |
kvn@855 | 2307 | } else { |
kvn@855 | 2308 | region = new (C, 3) RegionNode(3); |
kvn@855 | 2309 | // create a Phi for the memory state |
kvn@855 | 2310 | mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
kvn@855 | 2311 | } |
duke@435 | 2312 | |
duke@435 | 2313 | FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box ); |
duke@435 | 2314 | funlock = transform_later( funlock )->as_FastUnlock(); |
duke@435 | 2315 | // Optimize test; set region slot 2 |
kvn@855 | 2316 | Node *slow_path = opt_bits_test(ctrl, region, 2, funlock, 0, 0); |
duke@435 | 2317 | |
duke@435 | 2318 | CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box ); |
duke@435 | 2319 | |
duke@435 | 2320 | extract_call_projections(call); |
duke@435 | 2321 | |
duke@435 | 2322 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 2323 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 2324 | |
duke@435 | 2325 | // No exceptions for unlocking |
duke@435 | 2326 | // Capture slow path |
duke@435 | 2327 | // disconnect fall-through projection from call and create a new one |
duke@435 | 2328 | // hook up users of fall-through projection to region |
duke@435 | 2329 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 2330 | transform_later(slow_ctrl); |
duke@435 | 2331 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 2332 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 2333 | region->init_req(1, slow_ctrl); |
duke@435 | 2334 | // region inputs are now complete |
duke@435 | 2335 | transform_later(region); |
kvn@1143 | 2336 | _igvn.replace_node(_fallthroughproj, region); |
duke@435 | 2337 | |
duke@435 | 2338 | Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 2339 | mem_phi->init_req(1, memproj ); |
duke@435 | 2340 | mem_phi->init_req(2, mem); |
duke@435 | 2341 | transform_later(mem_phi); |
kvn@1143 | 2342 | _igvn.replace_node(_memproj_fallthrough, mem_phi); |
duke@435 | 2343 | } |
duke@435 | 2344 | |
kvn@3311 | 2345 | //---------------------------eliminate_macro_nodes---------------------- |
kvn@3311 | 2346 | // Eliminate scalar replaced allocations and associated locks. |
kvn@3311 | 2347 | void PhaseMacroExpand::eliminate_macro_nodes() { |
duke@435 | 2348 | if (C->macro_count() == 0) |
kvn@3311 | 2349 | return; |
kvn@3311 | 2350 | |
kvn@895 | 2351 | // First, attempt to eliminate locks |
kvn@2951 | 2352 | int cnt = C->macro_count(); |
kvn@2951 | 2353 | for (int i=0; i < cnt; i++) { |
kvn@2951 | 2354 | Node *n = C->macro_node(i); |
kvn@2951 | 2355 | if (n->is_AbstractLock()) { // Lock and Unlock nodes |
kvn@2951 | 2356 | // Before elimination mark all associated (same box and obj) |
kvn@2951 | 2357 | // lock and unlock nodes. |
kvn@2951 | 2358 | mark_eliminated_locking_nodes(n->as_AbstractLock()); |
kvn@2951 | 2359 | } |
kvn@2951 | 2360 | } |
kvn@508 | 2361 | bool progress = true; |
kvn@508 | 2362 | while (progress) { |
kvn@508 | 2363 | progress = false; |
kvn@508 | 2364 | for (int i = C->macro_count(); i > 0; i--) { |
kvn@508 | 2365 | Node * n = C->macro_node(i-1); |
kvn@508 | 2366 | bool success = false; |
kvn@508 | 2367 | debug_only(int old_macro_count = C->macro_count();); |
kvn@895 | 2368 | if (n->is_AbstractLock()) { |
kvn@895 | 2369 | success = eliminate_locking_node(n->as_AbstractLock()); |
kvn@895 | 2370 | } |
kvn@895 | 2371 | assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); |
kvn@895 | 2372 | progress = progress || success; |
kvn@895 | 2373 | } |
kvn@895 | 2374 | } |
kvn@895 | 2375 | // Next, attempt to eliminate allocations |
kvn@895 | 2376 | progress = true; |
kvn@895 | 2377 | while (progress) { |
kvn@895 | 2378 | progress = false; |
kvn@895 | 2379 | for (int i = C->macro_count(); i > 0; i--) { |
kvn@895 | 2380 | Node * n = C->macro_node(i-1); |
kvn@895 | 2381 | bool success = false; |
kvn@895 | 2382 | debug_only(int old_macro_count = C->macro_count();); |
kvn@508 | 2383 | switch (n->class_id()) { |
kvn@508 | 2384 | case Node::Class_Allocate: |
kvn@508 | 2385 | case Node::Class_AllocateArray: |
kvn@508 | 2386 | success = eliminate_allocate_node(n->as_Allocate()); |
kvn@508 | 2387 | break; |
kvn@508 | 2388 | case Node::Class_Lock: |
kvn@508 | 2389 | case Node::Class_Unlock: |
kvn@895 | 2390 | assert(!n->as_AbstractLock()->is_eliminated(), "sanity"); |
kvn@508 | 2391 | break; |
kvn@508 | 2392 | default: |
kvn@3311 | 2393 | assert(n->Opcode() == Op_LoopLimit || |
kvn@3311 | 2394 | n->Opcode() == Op_Opaque1 || |
kvn@3311 | 2395 | n->Opcode() == Op_Opaque2, "unknown node type in macro list"); |
kvn@508 | 2396 | } |
kvn@508 | 2397 | assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); |
kvn@508 | 2398 | progress = progress || success; |
kvn@508 | 2399 | } |
kvn@508 | 2400 | } |
kvn@3311 | 2401 | } |
kvn@3311 | 2402 | |
kvn@3311 | 2403 | //------------------------------expand_macro_nodes---------------------- |
kvn@3311 | 2404 | // Returns true if a failure occurred. |
kvn@3311 | 2405 | bool PhaseMacroExpand::expand_macro_nodes() { |
kvn@3311 | 2406 | // Last attempt to eliminate macro nodes. |
kvn@3311 | 2407 | eliminate_macro_nodes(); |
kvn@3311 | 2408 | |
kvn@508 | 2409 | // Make sure expansion will not cause node limit to be exceeded. |
kvn@508 | 2410 | // Worst case is a macro node gets expanded into about 50 nodes. |
kvn@508 | 2411 | // Allow 50% more for optimization. |
duke@435 | 2412 | if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) ) |
duke@435 | 2413 | return true; |
kvn@508 | 2414 | |
kvn@3311 | 2415 | // Eliminate Opaque and LoopLimit nodes. Do it after all loop optimizations. |
kvn@3311 | 2416 | bool progress = true; |
kvn@3311 | 2417 | while (progress) { |
kvn@3311 | 2418 | progress = false; |
kvn@3311 | 2419 | for (int i = C->macro_count(); i > 0; i--) { |
kvn@3311 | 2420 | Node * n = C->macro_node(i-1); |
kvn@3311 | 2421 | bool success = false; |
kvn@3311 | 2422 | debug_only(int old_macro_count = C->macro_count();); |
kvn@3311 | 2423 | if (n->Opcode() == Op_LoopLimit) { |
kvn@3311 | 2424 | // Remove it from macro list and put on IGVN worklist to optimize. |
kvn@3311 | 2425 | C->remove_macro_node(n); |
kvn@3311 | 2426 | _igvn._worklist.push(n); |
kvn@3311 | 2427 | success = true; |
kvn@3311 | 2428 | } else if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) { |
kvn@3311 | 2429 | _igvn.replace_node(n, n->in(1)); |
kvn@3311 | 2430 | success = true; |
kvn@3311 | 2431 | } |
kvn@3311 | 2432 | assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); |
kvn@3311 | 2433 | progress = progress || success; |
kvn@3311 | 2434 | } |
kvn@3311 | 2435 | } |
kvn@3311 | 2436 | |
duke@435 | 2437 | // expand "macro" nodes |
duke@435 | 2438 | // nodes are removed from the macro list as they are processed |
duke@435 | 2439 | while (C->macro_count() > 0) { |
kvn@508 | 2440 | int macro_count = C->macro_count(); |
kvn@508 | 2441 | Node * n = C->macro_node(macro_count-1); |
duke@435 | 2442 | assert(n->is_macro(), "only macro nodes expected here"); |
duke@435 | 2443 | if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { |
duke@435 | 2444 | // node is unreachable, so don't try to expand it |
duke@435 | 2445 | C->remove_macro_node(n); |
duke@435 | 2446 | continue; |
duke@435 | 2447 | } |
duke@435 | 2448 | switch (n->class_id()) { |
duke@435 | 2449 | case Node::Class_Allocate: |
duke@435 | 2450 | expand_allocate(n->as_Allocate()); |
duke@435 | 2451 | break; |
duke@435 | 2452 | case Node::Class_AllocateArray: |
duke@435 | 2453 | expand_allocate_array(n->as_AllocateArray()); |
duke@435 | 2454 | break; |
duke@435 | 2455 | case Node::Class_Lock: |
duke@435 | 2456 | expand_lock_node(n->as_Lock()); |
duke@435 | 2457 | break; |
duke@435 | 2458 | case Node::Class_Unlock: |
duke@435 | 2459 | expand_unlock_node(n->as_Unlock()); |
duke@435 | 2460 | break; |
duke@435 | 2461 | default: |
duke@435 | 2462 | assert(false, "unknown node type in macro list"); |
duke@435 | 2463 | } |
kvn@508 | 2464 | assert(C->macro_count() < macro_count, "must have deleted a node from macro list"); |
duke@435 | 2465 | if (C->failing()) return true; |
duke@435 | 2466 | } |
coleenp@548 | 2467 | |
coleenp@548 | 2468 | _igvn.set_delay_transform(false); |
duke@435 | 2469 | _igvn.optimize(); |
kvn@3311 | 2470 | if (C->failing()) return true; |
duke@435 | 2471 | return false; |
duke@435 | 2472 | } |