Thu, 06 Mar 2008 10:30:17 -0800
6667610: (Escape Analysis) retry compilation without EA if it fails
Summary: During split unique types EA could exceed nodes limit and fail the method compilation.
Reviewed-by: rasbold
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_macro.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | |
duke@435 | 29 | // |
duke@435 | 30 | // Replace any references to "oldref" in inputs to "use" with "newref". |
duke@435 | 31 | // Returns the number of replacements made. |
duke@435 | 32 | // |
duke@435 | 33 | int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { |
duke@435 | 34 | int nreplacements = 0; |
duke@435 | 35 | uint req = use->req(); |
duke@435 | 36 | for (uint j = 0; j < use->len(); j++) { |
duke@435 | 37 | Node *uin = use->in(j); |
duke@435 | 38 | if (uin == oldref) { |
duke@435 | 39 | if (j < req) |
duke@435 | 40 | use->set_req(j, newref); |
duke@435 | 41 | else |
duke@435 | 42 | use->set_prec(j, newref); |
duke@435 | 43 | nreplacements++; |
duke@435 | 44 | } else if (j >= req && uin == NULL) { |
duke@435 | 45 | break; |
duke@435 | 46 | } |
duke@435 | 47 | } |
duke@435 | 48 | return nreplacements; |
duke@435 | 49 | } |
duke@435 | 50 | |
duke@435 | 51 | void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) { |
duke@435 | 52 | // Copy debug information and adjust JVMState information |
duke@435 | 53 | uint old_dbg_start = oldcall->tf()->domain()->cnt(); |
duke@435 | 54 | uint new_dbg_start = newcall->tf()->domain()->cnt(); |
duke@435 | 55 | int jvms_adj = new_dbg_start - old_dbg_start; |
duke@435 | 56 | assert (new_dbg_start == newcall->req(), "argument count mismatch"); |
duke@435 | 57 | for (uint i = old_dbg_start; i < oldcall->req(); i++) { |
duke@435 | 58 | newcall->add_req(oldcall->in(i)); |
duke@435 | 59 | } |
duke@435 | 60 | newcall->set_jvms(oldcall->jvms()); |
duke@435 | 61 | for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) { |
duke@435 | 62 | jvms->set_map(newcall); |
duke@435 | 63 | jvms->set_locoff(jvms->locoff()+jvms_adj); |
duke@435 | 64 | jvms->set_stkoff(jvms->stkoff()+jvms_adj); |
duke@435 | 65 | jvms->set_monoff(jvms->monoff()+jvms_adj); |
duke@435 | 66 | jvms->set_endoff(jvms->endoff()+jvms_adj); |
duke@435 | 67 | } |
duke@435 | 68 | } |
duke@435 | 69 | |
duke@435 | 70 | Node* PhaseMacroExpand::opt_iff(Node* region, Node* iff) { |
duke@435 | 71 | IfNode *opt_iff = transform_later(iff)->as_If(); |
duke@435 | 72 | |
duke@435 | 73 | // Fast path taken; set region slot 2 |
duke@435 | 74 | Node *fast_taken = transform_later( new (C, 1) IfFalseNode(opt_iff) ); |
duke@435 | 75 | region->init_req(2,fast_taken); // Capture fast-control |
duke@435 | 76 | |
duke@435 | 77 | // Fast path not-taken, i.e. slow path |
duke@435 | 78 | Node *slow_taken = transform_later( new (C, 1) IfTrueNode(opt_iff) ); |
duke@435 | 79 | return slow_taken; |
duke@435 | 80 | } |
duke@435 | 81 | |
duke@435 | 82 | //--------------------copy_predefined_input_for_runtime_call-------------------- |
duke@435 | 83 | void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) { |
duke@435 | 84 | // Set fixed predefined input arguments |
duke@435 | 85 | call->init_req( TypeFunc::Control, ctrl ); |
duke@435 | 86 | call->init_req( TypeFunc::I_O , oldcall->in( TypeFunc::I_O) ); |
duke@435 | 87 | call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ????? |
duke@435 | 88 | call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) ); |
duke@435 | 89 | call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) ); |
duke@435 | 90 | } |
duke@435 | 91 | |
duke@435 | 92 | //------------------------------make_slow_call--------------------------------- |
duke@435 | 93 | CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) { |
duke@435 | 94 | |
duke@435 | 95 | // Slow-path call |
duke@435 | 96 | int size = slow_call_type->domain()->cnt(); |
duke@435 | 97 | CallNode *call = leaf_name |
duke@435 | 98 | ? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) |
duke@435 | 99 | : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM ); |
duke@435 | 100 | |
duke@435 | 101 | // Slow path call has no side-effects, uses few values |
duke@435 | 102 | copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); |
duke@435 | 103 | if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); |
duke@435 | 104 | if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); |
duke@435 | 105 | copy_call_debug_info(oldcall, call); |
duke@435 | 106 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
duke@435 | 107 | _igvn.hash_delete(oldcall); |
duke@435 | 108 | _igvn.subsume_node(oldcall, call); |
duke@435 | 109 | transform_later(call); |
duke@435 | 110 | |
duke@435 | 111 | return call; |
duke@435 | 112 | } |
duke@435 | 113 | |
duke@435 | 114 | void PhaseMacroExpand::extract_call_projections(CallNode *call) { |
duke@435 | 115 | _fallthroughproj = NULL; |
duke@435 | 116 | _fallthroughcatchproj = NULL; |
duke@435 | 117 | _ioproj_fallthrough = NULL; |
duke@435 | 118 | _ioproj_catchall = NULL; |
duke@435 | 119 | _catchallcatchproj = NULL; |
duke@435 | 120 | _memproj_fallthrough = NULL; |
duke@435 | 121 | _memproj_catchall = NULL; |
duke@435 | 122 | _resproj = NULL; |
duke@435 | 123 | for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { |
duke@435 | 124 | ProjNode *pn = call->fast_out(i)->as_Proj(); |
duke@435 | 125 | switch (pn->_con) { |
duke@435 | 126 | case TypeFunc::Control: |
duke@435 | 127 | { |
duke@435 | 128 | // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj |
duke@435 | 129 | _fallthroughproj = pn; |
duke@435 | 130 | DUIterator_Fast jmax, j = pn->fast_outs(jmax); |
duke@435 | 131 | const Node *cn = pn->fast_out(j); |
duke@435 | 132 | if (cn->is_Catch()) { |
duke@435 | 133 | ProjNode *cpn = NULL; |
duke@435 | 134 | for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { |
duke@435 | 135 | cpn = cn->fast_out(k)->as_Proj(); |
duke@435 | 136 | assert(cpn->is_CatchProj(), "must be a CatchProjNode"); |
duke@435 | 137 | if (cpn->_con == CatchProjNode::fall_through_index) |
duke@435 | 138 | _fallthroughcatchproj = cpn; |
duke@435 | 139 | else { |
duke@435 | 140 | assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); |
duke@435 | 141 | _catchallcatchproj = cpn; |
duke@435 | 142 | } |
duke@435 | 143 | } |
duke@435 | 144 | } |
duke@435 | 145 | break; |
duke@435 | 146 | } |
duke@435 | 147 | case TypeFunc::I_O: |
duke@435 | 148 | if (pn->_is_io_use) |
duke@435 | 149 | _ioproj_catchall = pn; |
duke@435 | 150 | else |
duke@435 | 151 | _ioproj_fallthrough = pn; |
duke@435 | 152 | break; |
duke@435 | 153 | case TypeFunc::Memory: |
duke@435 | 154 | if (pn->_is_io_use) |
duke@435 | 155 | _memproj_catchall = pn; |
duke@435 | 156 | else |
duke@435 | 157 | _memproj_fallthrough = pn; |
duke@435 | 158 | break; |
duke@435 | 159 | case TypeFunc::Parms: |
duke@435 | 160 | _resproj = pn; |
duke@435 | 161 | break; |
duke@435 | 162 | default: |
duke@435 | 163 | assert(false, "unexpected projection from allocation node."); |
duke@435 | 164 | } |
duke@435 | 165 | } |
duke@435 | 166 | |
duke@435 | 167 | } |
duke@435 | 168 | |
duke@435 | 169 | |
duke@435 | 170 | //---------------------------set_eden_pointers------------------------- |
duke@435 | 171 | void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { |
duke@435 | 172 | if (UseTLAB) { // Private allocation: load from TLS |
duke@435 | 173 | Node* thread = transform_later(new (C, 1) ThreadLocalNode()); |
duke@435 | 174 | int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); |
duke@435 | 175 | int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); |
duke@435 | 176 | eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); |
duke@435 | 177 | eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); |
duke@435 | 178 | } else { // Shared allocation: load from globals |
duke@435 | 179 | CollectedHeap* ch = Universe::heap(); |
duke@435 | 180 | address top_adr = (address)ch->top_addr(); |
duke@435 | 181 | address end_adr = (address)ch->end_addr(); |
duke@435 | 182 | eden_top_adr = makecon(TypeRawPtr::make(top_adr)); |
duke@435 | 183 | eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr); |
duke@435 | 184 | } |
duke@435 | 185 | } |
duke@435 | 186 | |
duke@435 | 187 | |
duke@435 | 188 | Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { |
duke@435 | 189 | Node* adr = basic_plus_adr(base, offset); |
duke@435 | 190 | const TypePtr* adr_type = TypeRawPtr::BOTTOM; |
duke@435 | 191 | Node* value = LoadNode::make(C, ctl, mem, adr, adr_type, value_type, bt); |
duke@435 | 192 | transform_later(value); |
duke@435 | 193 | return value; |
duke@435 | 194 | } |
duke@435 | 195 | |
duke@435 | 196 | |
duke@435 | 197 | Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { |
duke@435 | 198 | Node* adr = basic_plus_adr(base, offset); |
duke@435 | 199 | mem = StoreNode::make(C, ctl, mem, adr, NULL, value, bt); |
duke@435 | 200 | transform_later(mem); |
duke@435 | 201 | return mem; |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | //============================================================================= |
duke@435 | 205 | // |
duke@435 | 206 | // A L L O C A T I O N |
duke@435 | 207 | // |
duke@435 | 208 | // Allocation attempts to be fast in the case of frequent small objects. |
duke@435 | 209 | // It breaks down like this: |
duke@435 | 210 | // |
duke@435 | 211 | // 1) Size in doublewords is computed. This is a constant for objects and |
duke@435 | 212 | // variable for most arrays. Doubleword units are used to avoid size |
duke@435 | 213 | // overflow of huge doubleword arrays. We need doublewords in the end for |
duke@435 | 214 | // rounding. |
duke@435 | 215 | // |
duke@435 | 216 | // 2) Size is checked for being 'too large'. Too-large allocations will go |
duke@435 | 217 | // the slow path into the VM. The slow path can throw any required |
duke@435 | 218 | // exceptions, and does all the special checks for very large arrays. The |
duke@435 | 219 | // size test can constant-fold away for objects. For objects with |
duke@435 | 220 | // finalizers it constant-folds the otherway: you always go slow with |
duke@435 | 221 | // finalizers. |
duke@435 | 222 | // |
duke@435 | 223 | // 3) If NOT using TLABs, this is the contended loop-back point. |
duke@435 | 224 | // Load-Locked the heap top. If using TLABs normal-load the heap top. |
duke@435 | 225 | // |
duke@435 | 226 | // 4) Check that heap top + size*8 < max. If we fail go the slow ` route. |
duke@435 | 227 | // NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish |
duke@435 | 228 | // "size*8" we always enter the VM, where "largish" is a constant picked small |
duke@435 | 229 | // enough that there's always space between the eden max and 4Gig (old space is |
duke@435 | 230 | // there so it's quite large) and large enough that the cost of entering the VM |
duke@435 | 231 | // is dwarfed by the cost to initialize the space. |
duke@435 | 232 | // |
duke@435 | 233 | // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back |
duke@435 | 234 | // down. If contended, repeat at step 3. If using TLABs normal-store |
duke@435 | 235 | // adjusted heap top back down; there is no contention. |
duke@435 | 236 | // |
duke@435 | 237 | // 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark |
duke@435 | 238 | // fields. |
duke@435 | 239 | // |
duke@435 | 240 | // 7) Merge with the slow-path; cast the raw memory pointer to the correct |
duke@435 | 241 | // oop flavor. |
duke@435 | 242 | // |
duke@435 | 243 | //============================================================================= |
duke@435 | 244 | // FastAllocateSizeLimit value is in DOUBLEWORDS. |
duke@435 | 245 | // Allocations bigger than this always go the slow route. |
duke@435 | 246 | // This value must be small enough that allocation attempts that need to |
duke@435 | 247 | // trigger exceptions go the slow route. Also, it must be small enough so |
duke@435 | 248 | // that heap_top + size_in_bytes does not wrap around the 4Gig limit. |
duke@435 | 249 | //=============================================================================j// |
duke@435 | 250 | // %%% Here is an old comment from parseHelper.cpp; is it outdated? |
duke@435 | 251 | // The allocator will coalesce int->oop copies away. See comment in |
duke@435 | 252 | // coalesce.cpp about how this works. It depends critically on the exact |
duke@435 | 253 | // code shape produced here, so if you are changing this code shape |
duke@435 | 254 | // make sure the GC info for the heap-top is correct in and around the |
duke@435 | 255 | // slow-path call. |
duke@435 | 256 | // |
duke@435 | 257 | |
duke@435 | 258 | void PhaseMacroExpand::expand_allocate_common( |
duke@435 | 259 | AllocateNode* alloc, // allocation node to be expanded |
duke@435 | 260 | Node* length, // array length for an array allocation |
duke@435 | 261 | const TypeFunc* slow_call_type, // Type of slow call |
duke@435 | 262 | address slow_call_address // Address of slow call |
duke@435 | 263 | ) |
duke@435 | 264 | { |
duke@435 | 265 | |
duke@435 | 266 | Node* ctrl = alloc->in(TypeFunc::Control); |
duke@435 | 267 | Node* mem = alloc->in(TypeFunc::Memory); |
duke@435 | 268 | Node* i_o = alloc->in(TypeFunc::I_O); |
duke@435 | 269 | Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); |
duke@435 | 270 | Node* klass_node = alloc->in(AllocateNode::KlassNode); |
duke@435 | 271 | Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); |
duke@435 | 272 | |
duke@435 | 273 | Node* eden_top_adr; |
duke@435 | 274 | Node* eden_end_adr; |
duke@435 | 275 | set_eden_pointers(eden_top_adr, eden_end_adr); |
duke@435 | 276 | |
duke@435 | 277 | uint raw_idx = C->get_alias_index(TypeRawPtr::BOTTOM); |
duke@435 | 278 | assert(ctrl != NULL, "must have control"); |
duke@435 | 279 | |
duke@435 | 280 | // Load Eden::end. Loop invariant and hoisted. |
duke@435 | 281 | // |
duke@435 | 282 | // Note: We set the control input on "eden_end" and "old_eden_top" when using |
duke@435 | 283 | // a TLAB to work around a bug where these values were being moved across |
duke@435 | 284 | // a safepoint. These are not oops, so they cannot be include in the oop |
duke@435 | 285 | // map, but the can be changed by a GC. The proper way to fix this would |
duke@435 | 286 | // be to set the raw memory state when generating a SafepointNode. However |
duke@435 | 287 | // this will require extensive changes to the loop optimization in order to |
duke@435 | 288 | // prevent a degradation of the optimization. |
duke@435 | 289 | // See comment in memnode.hpp, around line 227 in class LoadPNode. |
duke@435 | 290 | Node* eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); |
duke@435 | 291 | |
duke@435 | 292 | // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. |
duke@435 | 293 | // they will not be used if "always_slow" is set |
duke@435 | 294 | enum { slow_result_path = 1, fast_result_path = 2 }; |
duke@435 | 295 | Node *result_region; |
duke@435 | 296 | Node *result_phi_rawmem; |
duke@435 | 297 | Node *result_phi_rawoop; |
duke@435 | 298 | Node *result_phi_i_o; |
duke@435 | 299 | |
duke@435 | 300 | // The initial slow comparison is a size check, the comparison |
duke@435 | 301 | // we want to do is a BoolTest::gt |
duke@435 | 302 | bool always_slow = false; |
duke@435 | 303 | int tv = _igvn.find_int_con(initial_slow_test, -1); |
duke@435 | 304 | if (tv >= 0) { |
duke@435 | 305 | always_slow = (tv == 1); |
duke@435 | 306 | initial_slow_test = NULL; |
duke@435 | 307 | } else { |
duke@435 | 308 | initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); |
duke@435 | 309 | } |
duke@435 | 310 | |
duke@435 | 311 | if (DTraceAllocProbes) { |
duke@435 | 312 | // Force slow-path allocation |
duke@435 | 313 | always_slow = true; |
duke@435 | 314 | initial_slow_test = NULL; |
duke@435 | 315 | } |
duke@435 | 316 | |
duke@435 | 317 | enum { too_big_or_final_path = 1, need_gc_path = 2 }; |
duke@435 | 318 | Node *slow_region = NULL; |
duke@435 | 319 | Node *toobig_false = ctrl; |
duke@435 | 320 | |
duke@435 | 321 | assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); |
duke@435 | 322 | // generate the initial test if necessary |
duke@435 | 323 | if (initial_slow_test != NULL ) { |
duke@435 | 324 | slow_region = new (C, 3) RegionNode(3); |
duke@435 | 325 | |
duke@435 | 326 | // Now make the initial failure test. Usually a too-big test but |
duke@435 | 327 | // might be a TRUE for finalizers or a fancy class check for |
duke@435 | 328 | // newInstance0. |
duke@435 | 329 | IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); |
duke@435 | 330 | transform_later(toobig_iff); |
duke@435 | 331 | // Plug the failing-too-big test into the slow-path region |
duke@435 | 332 | Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff ); |
duke@435 | 333 | transform_later(toobig_true); |
duke@435 | 334 | slow_region ->init_req( too_big_or_final_path, toobig_true ); |
duke@435 | 335 | toobig_false = new (C, 1) IfFalseNode( toobig_iff ); |
duke@435 | 336 | transform_later(toobig_false); |
duke@435 | 337 | } else { // No initial test, just fall into next case |
duke@435 | 338 | toobig_false = ctrl; |
duke@435 | 339 | debug_only(slow_region = NodeSentinel); |
duke@435 | 340 | } |
duke@435 | 341 | |
duke@435 | 342 | Node *slow_mem = mem; // save the current memory state for slow path |
duke@435 | 343 | // generate the fast allocation code unless we know that the initial test will always go slow |
duke@435 | 344 | if (!always_slow) { |
duke@435 | 345 | // allocate the Region and Phi nodes for the result |
duke@435 | 346 | result_region = new (C, 3) RegionNode(3); |
duke@435 | 347 | result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM ); |
duke@435 | 348 | result_phi_rawoop = new (C, 3) PhiNode( result_region, TypeRawPtr::BOTTOM ); |
duke@435 | 349 | result_phi_i_o = new (C, 3) PhiNode( result_region, Type::ABIO ); // I/O is used for Prefetch |
duke@435 | 350 | |
duke@435 | 351 | // We need a Region for the loop-back contended case. |
duke@435 | 352 | enum { fall_in_path = 1, contended_loopback_path = 2 }; |
duke@435 | 353 | Node *contended_region; |
duke@435 | 354 | Node *contended_phi_rawmem; |
duke@435 | 355 | if( UseTLAB ) { |
duke@435 | 356 | contended_region = toobig_false; |
duke@435 | 357 | contended_phi_rawmem = mem; |
duke@435 | 358 | } else { |
duke@435 | 359 | contended_region = new (C, 3) RegionNode(3); |
duke@435 | 360 | contended_phi_rawmem = new (C, 3) PhiNode( contended_region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 361 | // Now handle the passing-too-big test. We fall into the contended |
duke@435 | 362 | // loop-back merge point. |
duke@435 | 363 | contended_region ->init_req( fall_in_path, toobig_false ); |
duke@435 | 364 | contended_phi_rawmem->init_req( fall_in_path, mem ); |
duke@435 | 365 | transform_later(contended_region); |
duke@435 | 366 | transform_later(contended_phi_rawmem); |
duke@435 | 367 | } |
duke@435 | 368 | |
duke@435 | 369 | // Load(-locked) the heap top. |
duke@435 | 370 | // See note above concerning the control input when using a TLAB |
duke@435 | 371 | Node *old_eden_top = UseTLAB |
duke@435 | 372 | ? new (C, 3) LoadPNode ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ) |
duke@435 | 373 | : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr ); |
duke@435 | 374 | |
duke@435 | 375 | transform_later(old_eden_top); |
duke@435 | 376 | // Add to heap top to get a new heap top |
duke@435 | 377 | Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes ); |
duke@435 | 378 | transform_later(new_eden_top); |
duke@435 | 379 | // Check for needing a GC; compare against heap end |
duke@435 | 380 | Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end ); |
duke@435 | 381 | transform_later(needgc_cmp); |
duke@435 | 382 | Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge ); |
duke@435 | 383 | transform_later(needgc_bol); |
duke@435 | 384 | IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); |
duke@435 | 385 | transform_later(needgc_iff); |
duke@435 | 386 | |
duke@435 | 387 | // Plug the failing-heap-space-need-gc test into the slow-path region |
duke@435 | 388 | Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff ); |
duke@435 | 389 | transform_later(needgc_true); |
duke@435 | 390 | if( initial_slow_test ) { |
duke@435 | 391 | slow_region ->init_req( need_gc_path, needgc_true ); |
duke@435 | 392 | // This completes all paths into the slow merge point |
duke@435 | 393 | transform_later(slow_region); |
duke@435 | 394 | } else { // No initial slow path needed! |
duke@435 | 395 | // Just fall from the need-GC path straight into the VM call. |
duke@435 | 396 | slow_region = needgc_true; |
duke@435 | 397 | } |
duke@435 | 398 | // No need for a GC. Setup for the Store-Conditional |
duke@435 | 399 | Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff ); |
duke@435 | 400 | transform_later(needgc_false); |
duke@435 | 401 | |
duke@435 | 402 | // Grab regular I/O before optional prefetch may change it. |
duke@435 | 403 | // Slow-path does no I/O so just set it to the original I/O. |
duke@435 | 404 | result_phi_i_o->init_req( slow_result_path, i_o ); |
duke@435 | 405 | |
duke@435 | 406 | i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem, |
duke@435 | 407 | old_eden_top, new_eden_top, length); |
duke@435 | 408 | |
duke@435 | 409 | // Store (-conditional) the modified eden top back down. |
duke@435 | 410 | // StorePConditional produces flags for a test PLUS a modified raw |
duke@435 | 411 | // memory state. |
duke@435 | 412 | Node *store_eden_top; |
duke@435 | 413 | Node *fast_oop_ctrl; |
duke@435 | 414 | if( UseTLAB ) { |
duke@435 | 415 | store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top ); |
duke@435 | 416 | transform_later(store_eden_top); |
duke@435 | 417 | fast_oop_ctrl = needgc_false; // No contention, so this is the fast path |
duke@435 | 418 | } else { |
duke@435 | 419 | store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top ); |
duke@435 | 420 | transform_later(store_eden_top); |
duke@435 | 421 | Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne ); |
duke@435 | 422 | transform_later(contention_check); |
duke@435 | 423 | store_eden_top = new (C, 1) SCMemProjNode(store_eden_top); |
duke@435 | 424 | transform_later(store_eden_top); |
duke@435 | 425 | |
duke@435 | 426 | // If not using TLABs, check to see if there was contention. |
duke@435 | 427 | IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 428 | transform_later(contention_iff); |
duke@435 | 429 | Node *contention_true = new (C, 1) IfTrueNode( contention_iff ); |
duke@435 | 430 | transform_later(contention_true); |
duke@435 | 431 | // If contention, loopback and try again. |
duke@435 | 432 | contended_region->init_req( contended_loopback_path, contention_true ); |
duke@435 | 433 | contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top ); |
duke@435 | 434 | |
duke@435 | 435 | // Fast-path succeeded with no contention! |
duke@435 | 436 | Node *contention_false = new (C, 1) IfFalseNode( contention_iff ); |
duke@435 | 437 | transform_later(contention_false); |
duke@435 | 438 | fast_oop_ctrl = contention_false; |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | // Rename successful fast-path variables to make meaning more obvious |
duke@435 | 442 | Node* fast_oop = old_eden_top; |
duke@435 | 443 | Node* fast_oop_rawmem = store_eden_top; |
duke@435 | 444 | fast_oop_rawmem = initialize_object(alloc, |
duke@435 | 445 | fast_oop_ctrl, fast_oop_rawmem, fast_oop, |
duke@435 | 446 | klass_node, length, size_in_bytes); |
duke@435 | 447 | |
duke@435 | 448 | if (ExtendedDTraceProbes) { |
duke@435 | 449 | // Slow-path call |
duke@435 | 450 | int size = TypeFunc::Parms + 2; |
duke@435 | 451 | CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), |
duke@435 | 452 | CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base), |
duke@435 | 453 | "dtrace_object_alloc", |
duke@435 | 454 | TypeRawPtr::BOTTOM); |
duke@435 | 455 | |
duke@435 | 456 | // Get base of thread-local storage area |
duke@435 | 457 | Node* thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 458 | transform_later(thread); |
duke@435 | 459 | |
duke@435 | 460 | call->init_req(TypeFunc::Parms+0, thread); |
duke@435 | 461 | call->init_req(TypeFunc::Parms+1, fast_oop); |
duke@435 | 462 | call->init_req( TypeFunc::Control, fast_oop_ctrl ); |
duke@435 | 463 | call->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
duke@435 | 464 | call->init_req( TypeFunc::Memory , fast_oop_rawmem ); |
duke@435 | 465 | call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); |
duke@435 | 466 | call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); |
duke@435 | 467 | transform_later(call); |
duke@435 | 468 | fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); |
duke@435 | 469 | transform_later(fast_oop_ctrl); |
duke@435 | 470 | fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory); |
duke@435 | 471 | transform_later(fast_oop_rawmem); |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | // Plug in the successful fast-path into the result merge point |
duke@435 | 475 | result_region ->init_req( fast_result_path, fast_oop_ctrl ); |
duke@435 | 476 | result_phi_rawoop->init_req( fast_result_path, fast_oop ); |
duke@435 | 477 | result_phi_i_o ->init_req( fast_result_path, i_o ); |
duke@435 | 478 | result_phi_rawmem->init_req( fast_result_path, fast_oop_rawmem ); |
duke@435 | 479 | } else { |
duke@435 | 480 | slow_region = ctrl; |
duke@435 | 481 | } |
duke@435 | 482 | |
duke@435 | 483 | // Generate slow-path call |
duke@435 | 484 | CallNode *call = new (C, slow_call_type->domain()->cnt()) |
duke@435 | 485 | CallStaticJavaNode(slow_call_type, slow_call_address, |
duke@435 | 486 | OptoRuntime::stub_name(slow_call_address), |
duke@435 | 487 | alloc->jvms()->bci(), |
duke@435 | 488 | TypePtr::BOTTOM); |
duke@435 | 489 | call->init_req( TypeFunc::Control, slow_region ); |
duke@435 | 490 | call->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
duke@435 | 491 | call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs |
duke@435 | 492 | call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); |
duke@435 | 493 | call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); |
duke@435 | 494 | |
duke@435 | 495 | call->init_req(TypeFunc::Parms+0, klass_node); |
duke@435 | 496 | if (length != NULL) { |
duke@435 | 497 | call->init_req(TypeFunc::Parms+1, length); |
duke@435 | 498 | } |
duke@435 | 499 | |
duke@435 | 500 | // Copy debug information and adjust JVMState information, then replace |
duke@435 | 501 | // allocate node with the call |
duke@435 | 502 | copy_call_debug_info((CallNode *) alloc, call); |
duke@435 | 503 | if (!always_slow) { |
duke@435 | 504 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
duke@435 | 505 | } |
duke@435 | 506 | _igvn.hash_delete(alloc); |
duke@435 | 507 | _igvn.subsume_node(alloc, call); |
duke@435 | 508 | transform_later(call); |
duke@435 | 509 | |
duke@435 | 510 | // Identify the output projections from the allocate node and |
duke@435 | 511 | // adjust any references to them. |
duke@435 | 512 | // The control and io projections look like: |
duke@435 | 513 | // |
duke@435 | 514 | // v---Proj(ctrl) <-----+ v---CatchProj(ctrl) |
duke@435 | 515 | // Allocate Catch |
duke@435 | 516 | // ^---Proj(io) <-------+ ^---CatchProj(io) |
duke@435 | 517 | // |
duke@435 | 518 | // We are interested in the CatchProj nodes. |
duke@435 | 519 | // |
duke@435 | 520 | extract_call_projections(call); |
duke@435 | 521 | |
duke@435 | 522 | // An allocate node has separate memory projections for the uses on the control and i_o paths |
duke@435 | 523 | // Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call) |
duke@435 | 524 | if (!always_slow && _memproj_fallthrough != NULL) { |
duke@435 | 525 | for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 526 | Node *use = _memproj_fallthrough->fast_out(i); |
duke@435 | 527 | _igvn.hash_delete(use); |
duke@435 | 528 | imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem); |
duke@435 | 529 | _igvn._worklist.push(use); |
duke@435 | 530 | // back up iterator |
duke@435 | 531 | --i; |
duke@435 | 532 | } |
duke@435 | 533 | } |
duke@435 | 534 | // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so |
duke@435 | 535 | // we end up with a call that has only 1 memory projection |
duke@435 | 536 | if (_memproj_catchall != NULL ) { |
duke@435 | 537 | if (_memproj_fallthrough == NULL) { |
duke@435 | 538 | _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory); |
duke@435 | 539 | transform_later(_memproj_fallthrough); |
duke@435 | 540 | } |
duke@435 | 541 | for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 542 | Node *use = _memproj_catchall->fast_out(i); |
duke@435 | 543 | _igvn.hash_delete(use); |
duke@435 | 544 | imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough); |
duke@435 | 545 | _igvn._worklist.push(use); |
duke@435 | 546 | // back up iterator |
duke@435 | 547 | --i; |
duke@435 | 548 | } |
duke@435 | 549 | } |
duke@435 | 550 | |
duke@435 | 551 | mem = result_phi_rawmem; |
duke@435 | 552 | |
duke@435 | 553 | // An allocate node has separate i_o projections for the uses on the control and i_o paths |
duke@435 | 554 | // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call) |
duke@435 | 555 | if (_ioproj_fallthrough == NULL) { |
duke@435 | 556 | _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O); |
duke@435 | 557 | transform_later(_ioproj_fallthrough); |
duke@435 | 558 | } else if (!always_slow) { |
duke@435 | 559 | for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 560 | Node *use = _ioproj_fallthrough->fast_out(i); |
duke@435 | 561 | |
duke@435 | 562 | _igvn.hash_delete(use); |
duke@435 | 563 | imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o); |
duke@435 | 564 | _igvn._worklist.push(use); |
duke@435 | 565 | // back up iterator |
duke@435 | 566 | --i; |
duke@435 | 567 | } |
duke@435 | 568 | } |
duke@435 | 569 | // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so |
duke@435 | 570 | // we end up with a call that has only 1 control projection |
duke@435 | 571 | if (_ioproj_catchall != NULL ) { |
duke@435 | 572 | for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 573 | Node *use = _ioproj_catchall->fast_out(i); |
duke@435 | 574 | _igvn.hash_delete(use); |
duke@435 | 575 | imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough); |
duke@435 | 576 | _igvn._worklist.push(use); |
duke@435 | 577 | // back up iterator |
duke@435 | 578 | --i; |
duke@435 | 579 | } |
duke@435 | 580 | } |
duke@435 | 581 | |
duke@435 | 582 | // if we generated only a slow call, we are done |
duke@435 | 583 | if (always_slow) |
duke@435 | 584 | return; |
duke@435 | 585 | |
duke@435 | 586 | |
duke@435 | 587 | if (_fallthroughcatchproj != NULL) { |
duke@435 | 588 | ctrl = _fallthroughcatchproj->clone(); |
duke@435 | 589 | transform_later(ctrl); |
duke@435 | 590 | _igvn.hash_delete(_fallthroughcatchproj); |
duke@435 | 591 | _igvn.subsume_node(_fallthroughcatchproj, result_region); |
duke@435 | 592 | } else { |
duke@435 | 593 | ctrl = top(); |
duke@435 | 594 | } |
duke@435 | 595 | Node *slow_result; |
duke@435 | 596 | if (_resproj == NULL) { |
duke@435 | 597 | // no uses of the allocation result |
duke@435 | 598 | slow_result = top(); |
duke@435 | 599 | } else { |
duke@435 | 600 | slow_result = _resproj->clone(); |
duke@435 | 601 | transform_later(slow_result); |
duke@435 | 602 | _igvn.hash_delete(_resproj); |
duke@435 | 603 | _igvn.subsume_node(_resproj, result_phi_rawoop); |
duke@435 | 604 | } |
duke@435 | 605 | |
duke@435 | 606 | // Plug slow-path into result merge point |
duke@435 | 607 | result_region ->init_req( slow_result_path, ctrl ); |
duke@435 | 608 | result_phi_rawoop->init_req( slow_result_path, slow_result); |
duke@435 | 609 | result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough ); |
duke@435 | 610 | transform_later(result_region); |
duke@435 | 611 | transform_later(result_phi_rawoop); |
duke@435 | 612 | transform_later(result_phi_rawmem); |
duke@435 | 613 | transform_later(result_phi_i_o); |
duke@435 | 614 | // This completes all paths into the result merge point |
duke@435 | 615 | } |
duke@435 | 616 | |
duke@435 | 617 | |
duke@435 | 618 | // Helper for PhaseMacroExpand::expand_allocate_common. |
duke@435 | 619 | // Initializes the newly-allocated storage. |
duke@435 | 620 | Node* |
duke@435 | 621 | PhaseMacroExpand::initialize_object(AllocateNode* alloc, |
duke@435 | 622 | Node* control, Node* rawmem, Node* object, |
duke@435 | 623 | Node* klass_node, Node* length, |
duke@435 | 624 | Node* size_in_bytes) { |
duke@435 | 625 | InitializeNode* init = alloc->initialization(); |
duke@435 | 626 | // Store the klass & mark bits |
duke@435 | 627 | Node* mark_node = NULL; |
duke@435 | 628 | // For now only enable fast locking for non-array types |
duke@435 | 629 | if (UseBiasedLocking && (length == NULL)) { |
duke@435 | 630 | mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); |
duke@435 | 631 | } else { |
duke@435 | 632 | mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); |
duke@435 | 633 | } |
duke@435 | 634 | rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); |
duke@435 | 635 | rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT); |
duke@435 | 636 | int header_size = alloc->minimum_header_size(); // conservatively small |
duke@435 | 637 | |
duke@435 | 638 | // Array length |
duke@435 | 639 | if (length != NULL) { // Arrays need length field |
duke@435 | 640 | rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); |
duke@435 | 641 | // conservatively small header size: |
duke@435 | 642 | header_size = sizeof(arrayOopDesc); |
duke@435 | 643 | ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); |
duke@435 | 644 | if (k->is_array_klass()) // we know the exact header size in most cases: |
duke@435 | 645 | header_size = Klass::layout_helper_header_size(k->layout_helper()); |
duke@435 | 646 | } |
duke@435 | 647 | |
duke@435 | 648 | // Clear the object body, if necessary. |
duke@435 | 649 | if (init == NULL) { |
duke@435 | 650 | // The init has somehow disappeared; be cautious and clear everything. |
duke@435 | 651 | // |
duke@435 | 652 | // This can happen if a node is allocated but an uncommon trap occurs |
duke@435 | 653 | // immediately. In this case, the Initialize gets associated with the |
duke@435 | 654 | // trap, and may be placed in a different (outer) loop, if the Allocate |
duke@435 | 655 | // is in a loop. If (this is rare) the inner loop gets unrolled, then |
duke@435 | 656 | // there can be two Allocates to one Initialize. The answer in all these |
duke@435 | 657 | // edge cases is safety first. It is always safe to clear immediately |
duke@435 | 658 | // within an Allocate, and then (maybe or maybe not) clear some more later. |
duke@435 | 659 | if (!ZeroTLAB) |
duke@435 | 660 | rawmem = ClearArrayNode::clear_memory(control, rawmem, object, |
duke@435 | 661 | header_size, size_in_bytes, |
duke@435 | 662 | &_igvn); |
duke@435 | 663 | } else { |
duke@435 | 664 | if (!init->is_complete()) { |
duke@435 | 665 | // Try to win by zeroing only what the init does not store. |
duke@435 | 666 | // We can also try to do some peephole optimizations, |
duke@435 | 667 | // such as combining some adjacent subword stores. |
duke@435 | 668 | rawmem = init->complete_stores(control, rawmem, object, |
duke@435 | 669 | header_size, size_in_bytes, &_igvn); |
duke@435 | 670 | } |
duke@435 | 671 | |
duke@435 | 672 | // We have no more use for this link, since the AllocateNode goes away: |
duke@435 | 673 | init->set_req(InitializeNode::RawAddress, top()); |
duke@435 | 674 | // (If we keep the link, it just confuses the register allocator, |
duke@435 | 675 | // who thinks he sees a real use of the address by the membar.) |
duke@435 | 676 | } |
duke@435 | 677 | |
duke@435 | 678 | return rawmem; |
duke@435 | 679 | } |
duke@435 | 680 | |
duke@435 | 681 | // Generate prefetch instructions for next allocations. |
duke@435 | 682 | Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, |
duke@435 | 683 | Node*& contended_phi_rawmem, |
duke@435 | 684 | Node* old_eden_top, Node* new_eden_top, |
duke@435 | 685 | Node* length) { |
duke@435 | 686 | if( UseTLAB && AllocatePrefetchStyle == 2 ) { |
duke@435 | 687 | // Generate prefetch allocation with watermark check. |
duke@435 | 688 | // As an allocation hits the watermark, we will prefetch starting |
duke@435 | 689 | // at a "distance" away from watermark. |
duke@435 | 690 | enum { fall_in_path = 1, pf_path = 2 }; |
duke@435 | 691 | |
duke@435 | 692 | Node *pf_region = new (C, 3) RegionNode(3); |
duke@435 | 693 | Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, |
duke@435 | 694 | TypeRawPtr::BOTTOM ); |
duke@435 | 695 | // I/O is used for Prefetch |
duke@435 | 696 | Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO ); |
duke@435 | 697 | |
duke@435 | 698 | Node *thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 699 | transform_later(thread); |
duke@435 | 700 | |
duke@435 | 701 | Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread, |
duke@435 | 702 | _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); |
duke@435 | 703 | transform_later(eden_pf_adr); |
duke@435 | 704 | |
duke@435 | 705 | Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false, |
duke@435 | 706 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 707 | TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); |
duke@435 | 708 | transform_later(old_pf_wm); |
duke@435 | 709 | |
duke@435 | 710 | // check against new_eden_top |
duke@435 | 711 | Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm ); |
duke@435 | 712 | transform_later(need_pf_cmp); |
duke@435 | 713 | Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge ); |
duke@435 | 714 | transform_later(need_pf_bol); |
duke@435 | 715 | IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol, |
duke@435 | 716 | PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); |
duke@435 | 717 | transform_later(need_pf_iff); |
duke@435 | 718 | |
duke@435 | 719 | // true node, add prefetchdistance |
duke@435 | 720 | Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff ); |
duke@435 | 721 | transform_later(need_pf_true); |
duke@435 | 722 | |
duke@435 | 723 | Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff ); |
duke@435 | 724 | transform_later(need_pf_false); |
duke@435 | 725 | |
duke@435 | 726 | Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm, |
duke@435 | 727 | _igvn.MakeConX(AllocatePrefetchDistance) ); |
duke@435 | 728 | transform_later(new_pf_wmt ); |
duke@435 | 729 | new_pf_wmt->set_req(0, need_pf_true); |
duke@435 | 730 | |
duke@435 | 731 | Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true, |
duke@435 | 732 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 733 | TypeRawPtr::BOTTOM, new_pf_wmt ); |
duke@435 | 734 | transform_later(store_new_wmt); |
duke@435 | 735 | |
duke@435 | 736 | // adding prefetches |
duke@435 | 737 | pf_phi_abio->init_req( fall_in_path, i_o ); |
duke@435 | 738 | |
duke@435 | 739 | Node *prefetch_adr; |
duke@435 | 740 | Node *prefetch; |
duke@435 | 741 | uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize; |
duke@435 | 742 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 743 | uint distance = 0; |
duke@435 | 744 | |
duke@435 | 745 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 746 | prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt, |
duke@435 | 747 | _igvn.MakeConX(distance) ); |
duke@435 | 748 | transform_later(prefetch_adr); |
duke@435 | 749 | prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); |
duke@435 | 750 | transform_later(prefetch); |
duke@435 | 751 | distance += step_size; |
duke@435 | 752 | i_o = prefetch; |
duke@435 | 753 | } |
duke@435 | 754 | pf_phi_abio->set_req( pf_path, i_o ); |
duke@435 | 755 | |
duke@435 | 756 | pf_region->init_req( fall_in_path, need_pf_false ); |
duke@435 | 757 | pf_region->init_req( pf_path, need_pf_true ); |
duke@435 | 758 | |
duke@435 | 759 | pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); |
duke@435 | 760 | pf_phi_rawmem->init_req( pf_path, store_new_wmt ); |
duke@435 | 761 | |
duke@435 | 762 | transform_later(pf_region); |
duke@435 | 763 | transform_later(pf_phi_rawmem); |
duke@435 | 764 | transform_later(pf_phi_abio); |
duke@435 | 765 | |
duke@435 | 766 | needgc_false = pf_region; |
duke@435 | 767 | contended_phi_rawmem = pf_phi_rawmem; |
duke@435 | 768 | i_o = pf_phi_abio; |
duke@435 | 769 | } else if( AllocatePrefetchStyle > 0 ) { |
duke@435 | 770 | // Insert a prefetch for each allocation only on the fast-path |
duke@435 | 771 | Node *prefetch_adr; |
duke@435 | 772 | Node *prefetch; |
duke@435 | 773 | // Generate several prefetch instructions only for arrays. |
duke@435 | 774 | uint lines = (length != NULL) ? AllocatePrefetchLines : 1; |
duke@435 | 775 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 776 | uint distance = AllocatePrefetchDistance; |
duke@435 | 777 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 778 | prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top, |
duke@435 | 779 | _igvn.MakeConX(distance) ); |
duke@435 | 780 | transform_later(prefetch_adr); |
duke@435 | 781 | prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); |
duke@435 | 782 | // Do not let it float too high, since if eden_top == eden_end, |
duke@435 | 783 | // both might be null. |
duke@435 | 784 | if( i == 0 ) { // Set control for first prefetch, next follows it |
duke@435 | 785 | prefetch->init_req(0, needgc_false); |
duke@435 | 786 | } |
duke@435 | 787 | transform_later(prefetch); |
duke@435 | 788 | distance += step_size; |
duke@435 | 789 | i_o = prefetch; |
duke@435 | 790 | } |
duke@435 | 791 | } |
duke@435 | 792 | return i_o; |
duke@435 | 793 | } |
duke@435 | 794 | |
duke@435 | 795 | |
duke@435 | 796 | void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { |
duke@435 | 797 | expand_allocate_common(alloc, NULL, |
duke@435 | 798 | OptoRuntime::new_instance_Type(), |
duke@435 | 799 | OptoRuntime::new_instance_Java()); |
duke@435 | 800 | } |
duke@435 | 801 | |
duke@435 | 802 | void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { |
duke@435 | 803 | Node* length = alloc->in(AllocateNode::ALength); |
duke@435 | 804 | expand_allocate_common(alloc, length, |
duke@435 | 805 | OptoRuntime::new_array_Type(), |
duke@435 | 806 | OptoRuntime::new_array_Java()); |
duke@435 | 807 | } |
duke@435 | 808 | |
duke@435 | 809 | |
duke@435 | 810 | // we have determined that this lock/unlock can be eliminated, we simply |
duke@435 | 811 | // eliminate the node without expanding it. |
duke@435 | 812 | // |
duke@435 | 813 | // Note: The membar's associated with the lock/unlock are currently not |
duke@435 | 814 | // eliminated. This should be investigated as a future enhancement. |
duke@435 | 815 | // |
duke@435 | 816 | void PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { |
duke@435 | 817 | Node* mem = alock->in(TypeFunc::Memory); |
duke@435 | 818 | |
duke@435 | 819 | // The memory projection from a lock/unlock is RawMem |
duke@435 | 820 | // The input to a Lock is merged memory, so extract its RawMem input |
duke@435 | 821 | // (unless the MergeMem has been optimized away.) |
duke@435 | 822 | if (alock->is_Lock()) { |
duke@435 | 823 | if (mem->is_MergeMem()) |
duke@435 | 824 | mem = mem->as_MergeMem()->in(Compile::AliasIdxRaw); |
duke@435 | 825 | } |
duke@435 | 826 | |
duke@435 | 827 | extract_call_projections(alock); |
duke@435 | 828 | // There are 2 projections from the lock. The lock node will |
duke@435 | 829 | // be deleted when its last use is subsumed below. |
duke@435 | 830 | assert(alock->outcnt() == 2 && _fallthroughproj != NULL && |
duke@435 | 831 | _memproj_fallthrough != NULL, "Unexpected projections from Lock/Unlock"); |
duke@435 | 832 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 833 | _igvn.subsume_node(_fallthroughproj, alock->in(TypeFunc::Control)); |
duke@435 | 834 | _igvn.hash_delete(_memproj_fallthrough); |
duke@435 | 835 | _igvn.subsume_node(_memproj_fallthrough, mem); |
duke@435 | 836 | return; |
duke@435 | 837 | } |
duke@435 | 838 | |
duke@435 | 839 | |
duke@435 | 840 | //------------------------------expand_lock_node---------------------- |
duke@435 | 841 | void PhaseMacroExpand::expand_lock_node(LockNode *lock) { |
duke@435 | 842 | |
duke@435 | 843 | Node* ctrl = lock->in(TypeFunc::Control); |
duke@435 | 844 | Node* mem = lock->in(TypeFunc::Memory); |
duke@435 | 845 | Node* obj = lock->obj_node(); |
duke@435 | 846 | Node* box = lock->box_node(); |
duke@435 | 847 | Node *flock = lock->fastlock_node(); |
duke@435 | 848 | |
duke@435 | 849 | if (lock->is_eliminated()) { |
duke@435 | 850 | eliminate_locking_node(lock); |
duke@435 | 851 | return; |
duke@435 | 852 | } |
duke@435 | 853 | |
duke@435 | 854 | // Make the merge point |
duke@435 | 855 | Node *region = new (C, 3) RegionNode(3); |
duke@435 | 856 | |
duke@435 | 857 | Node *bol = transform_later(new (C, 2) BoolNode(flock,BoolTest::ne)); |
duke@435 | 858 | Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 859 | // Optimize test; set region slot 2 |
duke@435 | 860 | Node *slow_path = opt_iff(region,iff); |
duke@435 | 861 | |
duke@435 | 862 | // Make slow path call |
duke@435 | 863 | CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box ); |
duke@435 | 864 | |
duke@435 | 865 | extract_call_projections(call); |
duke@435 | 866 | |
duke@435 | 867 | // Slow path can only throw asynchronous exceptions, which are always |
duke@435 | 868 | // de-opted. So the compiler thinks the slow-call can never throw an |
duke@435 | 869 | // exception. If it DOES throw an exception we would need the debug |
duke@435 | 870 | // info removed first (since if it throws there is no monitor). |
duke@435 | 871 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 872 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 873 | |
duke@435 | 874 | // Capture slow path |
duke@435 | 875 | // disconnect fall-through projection from call and create a new one |
duke@435 | 876 | // hook up users of fall-through projection to region |
duke@435 | 877 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 878 | transform_later(slow_ctrl); |
duke@435 | 879 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 880 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 881 | region->init_req(1, slow_ctrl); |
duke@435 | 882 | // region inputs are now complete |
duke@435 | 883 | transform_later(region); |
duke@435 | 884 | _igvn.subsume_node(_fallthroughproj, region); |
duke@435 | 885 | |
duke@435 | 886 | // create a Phi for the memory state |
duke@435 | 887 | Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 888 | Node *memproj = transform_later( new (C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 889 | mem_phi->init_req(1, memproj ); |
duke@435 | 890 | mem_phi->init_req(2, mem); |
duke@435 | 891 | transform_later(mem_phi); |
duke@435 | 892 | _igvn.hash_delete(_memproj_fallthrough); |
duke@435 | 893 | _igvn.subsume_node(_memproj_fallthrough, mem_phi); |
duke@435 | 894 | |
duke@435 | 895 | |
duke@435 | 896 | } |
duke@435 | 897 | |
duke@435 | 898 | //------------------------------expand_unlock_node---------------------- |
duke@435 | 899 | void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { |
duke@435 | 900 | |
duke@435 | 901 | Node *ctrl = unlock->in(TypeFunc::Control); |
duke@435 | 902 | Node* mem = unlock->in(TypeFunc::Memory); |
duke@435 | 903 | Node* obj = unlock->obj_node(); |
duke@435 | 904 | Node* box = unlock->box_node(); |
duke@435 | 905 | |
duke@435 | 906 | |
duke@435 | 907 | if (unlock->is_eliminated()) { |
duke@435 | 908 | eliminate_locking_node(unlock); |
duke@435 | 909 | return; |
duke@435 | 910 | } |
duke@435 | 911 | |
duke@435 | 912 | // No need for a null check on unlock |
duke@435 | 913 | |
duke@435 | 914 | // Make the merge point |
duke@435 | 915 | RegionNode *region = new (C, 3) RegionNode(3); |
duke@435 | 916 | |
duke@435 | 917 | FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box ); |
duke@435 | 918 | funlock = transform_later( funlock )->as_FastUnlock(); |
duke@435 | 919 | Node *bol = transform_later(new (C, 2) BoolNode(funlock,BoolTest::ne)); |
duke@435 | 920 | Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 921 | // Optimize test; set region slot 2 |
duke@435 | 922 | Node *slow_path = opt_iff(region,iff); |
duke@435 | 923 | |
duke@435 | 924 | CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box ); |
duke@435 | 925 | |
duke@435 | 926 | extract_call_projections(call); |
duke@435 | 927 | |
duke@435 | 928 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 929 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 930 | |
duke@435 | 931 | // No exceptions for unlocking |
duke@435 | 932 | // Capture slow path |
duke@435 | 933 | // disconnect fall-through projection from call and create a new one |
duke@435 | 934 | // hook up users of fall-through projection to region |
duke@435 | 935 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 936 | transform_later(slow_ctrl); |
duke@435 | 937 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 938 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 939 | region->init_req(1, slow_ctrl); |
duke@435 | 940 | // region inputs are now complete |
duke@435 | 941 | transform_later(region); |
duke@435 | 942 | _igvn.subsume_node(_fallthroughproj, region); |
duke@435 | 943 | |
duke@435 | 944 | // create a Phi for the memory state |
duke@435 | 945 | Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 946 | Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 947 | mem_phi->init_req(1, memproj ); |
duke@435 | 948 | mem_phi->init_req(2, mem); |
duke@435 | 949 | transform_later(mem_phi); |
duke@435 | 950 | _igvn.hash_delete(_memproj_fallthrough); |
duke@435 | 951 | _igvn.subsume_node(_memproj_fallthrough, mem_phi); |
duke@435 | 952 | |
duke@435 | 953 | |
duke@435 | 954 | } |
duke@435 | 955 | |
duke@435 | 956 | //------------------------------expand_macro_nodes---------------------- |
duke@435 | 957 | // Returns true if a failure occurred. |
duke@435 | 958 | bool PhaseMacroExpand::expand_macro_nodes() { |
duke@435 | 959 | if (C->macro_count() == 0) |
duke@435 | 960 | return false; |
duke@435 | 961 | // Make sure expansion will not cause node limit to be exceeded. Worst case is a |
duke@435 | 962 | // macro node gets expanded into about 50 nodes. Allow 50% more for optimization |
duke@435 | 963 | if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) ) |
duke@435 | 964 | return true; |
duke@435 | 965 | // expand "macro" nodes |
duke@435 | 966 | // nodes are removed from the macro list as they are processed |
duke@435 | 967 | while (C->macro_count() > 0) { |
duke@435 | 968 | Node * n = C->macro_node(0); |
duke@435 | 969 | assert(n->is_macro(), "only macro nodes expected here"); |
duke@435 | 970 | if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { |
duke@435 | 971 | // node is unreachable, so don't try to expand it |
duke@435 | 972 | C->remove_macro_node(n); |
duke@435 | 973 | continue; |
duke@435 | 974 | } |
duke@435 | 975 | switch (n->class_id()) { |
duke@435 | 976 | case Node::Class_Allocate: |
duke@435 | 977 | expand_allocate(n->as_Allocate()); |
duke@435 | 978 | break; |
duke@435 | 979 | case Node::Class_AllocateArray: |
duke@435 | 980 | expand_allocate_array(n->as_AllocateArray()); |
duke@435 | 981 | break; |
duke@435 | 982 | case Node::Class_Lock: |
duke@435 | 983 | expand_lock_node(n->as_Lock()); |
duke@435 | 984 | break; |
duke@435 | 985 | case Node::Class_Unlock: |
duke@435 | 986 | expand_unlock_node(n->as_Unlock()); |
duke@435 | 987 | break; |
duke@435 | 988 | default: |
duke@435 | 989 | assert(false, "unknown node type in macro list"); |
duke@435 | 990 | } |
duke@435 | 991 | if (C->failing()) return true; |
duke@435 | 992 | } |
duke@435 | 993 | _igvn.optimize(); |
duke@435 | 994 | return false; |
duke@435 | 995 | } |