|
1 /* |
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "classfile/systemDictionary.hpp" |
|
27 #include "compiler/compileLog.hpp" |
|
28 #include "memory/allocation.inline.hpp" |
|
29 #include "oops/objArrayKlass.hpp" |
|
30 #include "opto/addnode.hpp" |
|
31 #include "opto/cfgnode.hpp" |
|
32 #include "opto/compile.hpp" |
|
33 #include "opto/connode.hpp" |
|
34 #include "opto/loopnode.hpp" |
|
35 #include "opto/machnode.hpp" |
|
36 #include "opto/matcher.hpp" |
|
37 #include "opto/memnode.hpp" |
|
38 #include "opto/mulnode.hpp" |
|
39 #include "opto/phaseX.hpp" |
|
40 #include "opto/regmask.hpp" |
|
41 |
|
42 // Portions of code courtesy of Clifford Click |
|
43 |
|
44 // Optimization - Graph Style |
|
45 |
|
46 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st); |
|
47 |
|
48 //============================================================================= |
|
49 uint MemNode::size_of() const { return sizeof(*this); } |
|
50 |
|
51 const TypePtr *MemNode::adr_type() const { |
|
52 Node* adr = in(Address); |
|
53 const TypePtr* cross_check = NULL; |
|
54 DEBUG_ONLY(cross_check = _adr_type); |
|
55 return calculate_adr_type(adr->bottom_type(), cross_check); |
|
56 } |
|
57 |
|
58 #ifndef PRODUCT |
|
59 void MemNode::dump_spec(outputStream *st) const { |
|
60 if (in(Address) == NULL) return; // node is dead |
|
61 #ifndef ASSERT |
|
62 // fake the missing field |
|
63 const TypePtr* _adr_type = NULL; |
|
64 if (in(Address) != NULL) |
|
65 _adr_type = in(Address)->bottom_type()->isa_ptr(); |
|
66 #endif |
|
67 dump_adr_type(this, _adr_type, st); |
|
68 |
|
69 Compile* C = Compile::current(); |
|
70 if( C->alias_type(_adr_type)->is_volatile() ) |
|
71 st->print(" Volatile!"); |
|
72 } |
|
73 |
|
74 void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { |
|
75 st->print(" @"); |
|
76 if (adr_type == NULL) { |
|
77 st->print("NULL"); |
|
78 } else { |
|
79 adr_type->dump_on(st); |
|
80 Compile* C = Compile::current(); |
|
81 Compile::AliasType* atp = NULL; |
|
82 if (C->have_alias_type(adr_type)) atp = C->alias_type(adr_type); |
|
83 if (atp == NULL) |
|
84 st->print(", idx=?\?;"); |
|
85 else if (atp->index() == Compile::AliasIdxBot) |
|
86 st->print(", idx=Bot;"); |
|
87 else if (atp->index() == Compile::AliasIdxTop) |
|
88 st->print(", idx=Top;"); |
|
89 else if (atp->index() == Compile::AliasIdxRaw) |
|
90 st->print(", idx=Raw;"); |
|
91 else { |
|
92 ciField* field = atp->field(); |
|
93 if (field) { |
|
94 st->print(", name="); |
|
95 field->print_name_on(st); |
|
96 } |
|
97 st->print(", idx=%d;", atp->index()); |
|
98 } |
|
99 } |
|
100 } |
|
101 |
|
102 extern void print_alias_types(); |
|
103 |
|
104 #endif |
|
105 |
|
106 Node *MemNode::optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase) { |
|
107 assert((t_oop != NULL), "sanity"); |
|
108 bool is_instance = t_oop->is_known_instance_field(); |
|
109 bool is_boxed_value_load = t_oop->is_ptr_to_boxed_value() && |
|
110 (load != NULL) && load->is_Load() && |
|
111 (phase->is_IterGVN() != NULL); |
|
112 if (!(is_instance || is_boxed_value_load)) |
|
113 return mchain; // don't try to optimize non-instance types |
|
114 uint instance_id = t_oop->instance_id(); |
|
115 Node *start_mem = phase->C->start()->proj_out(TypeFunc::Memory); |
|
116 Node *prev = NULL; |
|
117 Node *result = mchain; |
|
118 while (prev != result) { |
|
119 prev = result; |
|
120 if (result == start_mem) |
|
121 break; // hit one of our sentinels |
|
122 // skip over a call which does not affect this memory slice |
|
123 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { |
|
124 Node *proj_in = result->in(0); |
|
125 if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { |
|
126 break; // hit one of our sentinels |
|
127 } else if (proj_in->is_Call()) { |
|
128 CallNode *call = proj_in->as_Call(); |
|
129 if (!call->may_modify(t_oop, phase)) { // returns false for instances |
|
130 result = call->in(TypeFunc::Memory); |
|
131 } |
|
132 } else if (proj_in->is_Initialize()) { |
|
133 AllocateNode* alloc = proj_in->as_Initialize()->allocation(); |
|
134 // Stop if this is the initialization for the object instance which |
|
135 // which contains this memory slice, otherwise skip over it. |
|
136 if ((alloc == NULL) || (alloc->_idx == instance_id)) { |
|
137 break; |
|
138 } |
|
139 if (is_instance) { |
|
140 result = proj_in->in(TypeFunc::Memory); |
|
141 } else if (is_boxed_value_load) { |
|
142 Node* klass = alloc->in(AllocateNode::KlassNode); |
|
143 const TypeKlassPtr* tklass = phase->type(klass)->is_klassptr(); |
|
144 if (tklass->klass_is_exact() && !tklass->klass()->equals(t_oop->klass())) { |
|
145 result = proj_in->in(TypeFunc::Memory); // not related allocation |
|
146 } |
|
147 } |
|
148 } else if (proj_in->is_MemBar()) { |
|
149 result = proj_in->in(TypeFunc::Memory); |
|
150 } else { |
|
151 assert(false, "unexpected projection"); |
|
152 } |
|
153 } else if (result->is_ClearArray()) { |
|
154 if (!is_instance || !ClearArrayNode::step_through(&result, instance_id, phase)) { |
|
155 // Can not bypass initialization of the instance |
|
156 // we are looking for. |
|
157 break; |
|
158 } |
|
159 // Otherwise skip it (the call updated 'result' value). |
|
160 } else if (result->is_MergeMem()) { |
|
161 result = step_through_mergemem(phase, result->as_MergeMem(), t_oop, NULL, tty); |
|
162 } |
|
163 } |
|
164 return result; |
|
165 } |
|
166 |
|
167 Node *MemNode::optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase) { |
|
168 const TypeOopPtr* t_oop = t_adr->isa_oopptr(); |
|
169 if (t_oop == NULL) |
|
170 return mchain; // don't try to optimize non-oop types |
|
171 Node* result = optimize_simple_memory_chain(mchain, t_oop, load, phase); |
|
172 bool is_instance = t_oop->is_known_instance_field(); |
|
173 PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
174 if (is_instance && igvn != NULL && result->is_Phi()) { |
|
175 PhiNode *mphi = result->as_Phi(); |
|
176 assert(mphi->bottom_type() == Type::MEMORY, "memory phi required"); |
|
177 const TypePtr *t = mphi->adr_type(); |
|
178 if (t == TypePtr::BOTTOM || t == TypeRawPtr::BOTTOM || |
|
179 t->isa_oopptr() && !t->is_oopptr()->is_known_instance() && |
|
180 t->is_oopptr()->cast_to_exactness(true) |
|
181 ->is_oopptr()->cast_to_ptr_type(t_oop->ptr()) |
|
182 ->is_oopptr()->cast_to_instance_id(t_oop->instance_id()) == t_oop) { |
|
183 // clone the Phi with our address type |
|
184 result = mphi->split_out_instance(t_adr, igvn); |
|
185 } else { |
|
186 assert(phase->C->get_alias_index(t) == phase->C->get_alias_index(t_adr), "correct memory chain"); |
|
187 } |
|
188 } |
|
189 return result; |
|
190 } |
|
191 |
|
192 static Node *step_through_mergemem(PhaseGVN *phase, MergeMemNode *mmem, const TypePtr *tp, const TypePtr *adr_check, outputStream *st) { |
|
193 uint alias_idx = phase->C->get_alias_index(tp); |
|
194 Node *mem = mmem; |
|
195 #ifdef ASSERT |
|
196 { |
|
197 // Check that current type is consistent with the alias index used during graph construction |
|
198 assert(alias_idx >= Compile::AliasIdxRaw, "must not be a bad alias_idx"); |
|
199 bool consistent = adr_check == NULL || adr_check->empty() || |
|
200 phase->C->must_alias(adr_check, alias_idx ); |
|
201 // Sometimes dead array references collapse to a[-1], a[-2], or a[-3] |
|
202 if( !consistent && adr_check != NULL && !adr_check->empty() && |
|
203 tp->isa_aryptr() && tp->offset() == Type::OffsetBot && |
|
204 adr_check->isa_aryptr() && adr_check->offset() != Type::OffsetBot && |
|
205 ( adr_check->offset() == arrayOopDesc::length_offset_in_bytes() || |
|
206 adr_check->offset() == oopDesc::klass_offset_in_bytes() || |
|
207 adr_check->offset() == oopDesc::mark_offset_in_bytes() ) ) { |
|
208 // don't assert if it is dead code. |
|
209 consistent = true; |
|
210 } |
|
211 if( !consistent ) { |
|
212 st->print("alias_idx==%d, adr_check==", alias_idx); |
|
213 if( adr_check == NULL ) { |
|
214 st->print("NULL"); |
|
215 } else { |
|
216 adr_check->dump(); |
|
217 } |
|
218 st->cr(); |
|
219 print_alias_types(); |
|
220 assert(consistent, "adr_check must match alias idx"); |
|
221 } |
|
222 } |
|
223 #endif |
|
224 // TypeOopPtr::NOTNULL+any is an OOP with unknown offset - generally |
|
225 // means an array I have not precisely typed yet. Do not do any |
|
226 // alias stuff with it any time soon. |
|
227 const TypeOopPtr *toop = tp->isa_oopptr(); |
|
228 if( tp->base() != Type::AnyPtr && |
|
229 !(toop && |
|
230 toop->klass() != NULL && |
|
231 toop->klass()->is_java_lang_Object() && |
|
232 toop->offset() == Type::OffsetBot) ) { |
|
233 // compress paths and change unreachable cycles to TOP |
|
234 // If not, we can update the input infinitely along a MergeMem cycle |
|
235 // Equivalent code in PhiNode::Ideal |
|
236 Node* m = phase->transform(mmem); |
|
237 // If transformed to a MergeMem, get the desired slice |
|
238 // Otherwise the returned node represents memory for every slice |
|
239 mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; |
|
240 // Update input if it is progress over what we have now |
|
241 } |
|
242 return mem; |
|
243 } |
|
244 |
|
245 //--------------------------Ideal_common--------------------------------------- |
|
246 // Look for degenerate control and memory inputs. Bypass MergeMem inputs. |
|
247 // Unhook non-raw memories from complete (macro-expanded) initializations. |
|
248 Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { |
|
249 // If our control input is a dead region, kill all below the region |
|
250 Node *ctl = in(MemNode::Control); |
|
251 if (ctl && remove_dead_region(phase, can_reshape)) |
|
252 return this; |
|
253 ctl = in(MemNode::Control); |
|
254 // Don't bother trying to transform a dead node |
|
255 if (ctl && ctl->is_top()) return NodeSentinel; |
|
256 |
|
257 PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
258 // Wait if control on the worklist. |
|
259 if (ctl && can_reshape && igvn != NULL) { |
|
260 Node* bol = NULL; |
|
261 Node* cmp = NULL; |
|
262 if (ctl->in(0)->is_If()) { |
|
263 assert(ctl->is_IfTrue() || ctl->is_IfFalse(), "sanity"); |
|
264 bol = ctl->in(0)->in(1); |
|
265 if (bol->is_Bool()) |
|
266 cmp = ctl->in(0)->in(1)->in(1); |
|
267 } |
|
268 if (igvn->_worklist.member(ctl) || |
|
269 (bol != NULL && igvn->_worklist.member(bol)) || |
|
270 (cmp != NULL && igvn->_worklist.member(cmp)) ) { |
|
271 // This control path may be dead. |
|
272 // Delay this memory node transformation until the control is processed. |
|
273 phase->is_IterGVN()->_worklist.push(this); |
|
274 return NodeSentinel; // caller will return NULL |
|
275 } |
|
276 } |
|
277 // Ignore if memory is dead, or self-loop |
|
278 Node *mem = in(MemNode::Memory); |
|
279 if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL |
|
280 assert(mem != this, "dead loop in MemNode::Ideal"); |
|
281 |
|
282 if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) { |
|
283 // This memory slice may be dead. |
|
284 // Delay this mem node transformation until the memory is processed. |
|
285 phase->is_IterGVN()->_worklist.push(this); |
|
286 return NodeSentinel; // caller will return NULL |
|
287 } |
|
288 |
|
289 Node *address = in(MemNode::Address); |
|
290 const Type *t_adr = phase->type(address); |
|
291 if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL |
|
292 |
|
293 if (can_reshape && igvn != NULL && |
|
294 (igvn->_worklist.member(address) || |
|
295 igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) { |
|
296 // The address's base and type may change when the address is processed. |
|
297 // Delay this mem node transformation until the address is processed. |
|
298 phase->is_IterGVN()->_worklist.push(this); |
|
299 return NodeSentinel; // caller will return NULL |
|
300 } |
|
301 |
|
302 // Do NOT remove or optimize the next lines: ensure a new alias index |
|
303 // is allocated for an oop pointer type before Escape Analysis. |
|
304 // Note: C++ will not remove it since the call has side effect. |
|
305 if (t_adr->isa_oopptr()) { |
|
306 int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); |
|
307 } |
|
308 |
|
309 Node* base = NULL; |
|
310 if (address->is_AddP()) { |
|
311 base = address->in(AddPNode::Base); |
|
312 } |
|
313 if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) && |
|
314 !t_adr->isa_rawptr()) { |
|
315 // Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true. |
|
316 // Skip this node optimization if its address has TOP base. |
|
317 return NodeSentinel; // caller will return NULL |
|
318 } |
|
319 |
|
320 // Avoid independent memory operations |
|
321 Node* old_mem = mem; |
|
322 |
|
323 // The code which unhooks non-raw memories from complete (macro-expanded) |
|
324 // initializations was removed. After macro-expansion all stores catched |
|
325 // by Initialize node became raw stores and there is no information |
|
326 // which memory slices they modify. So it is unsafe to move any memory |
|
327 // operation above these stores. Also in most cases hooked non-raw memories |
|
328 // were already unhooked by using information from detect_ptr_independence() |
|
329 // and find_previous_store(). |
|
330 |
|
331 if (mem->is_MergeMem()) { |
|
332 MergeMemNode* mmem = mem->as_MergeMem(); |
|
333 const TypePtr *tp = t_adr->is_ptr(); |
|
334 |
|
335 mem = step_through_mergemem(phase, mmem, tp, adr_type(), tty); |
|
336 } |
|
337 |
|
338 if (mem != old_mem) { |
|
339 set_req(MemNode::Memory, mem); |
|
340 if (can_reshape && old_mem->outcnt() == 0) { |
|
341 igvn->_worklist.push(old_mem); |
|
342 } |
|
343 if (phase->type( mem ) == Type::TOP) return NodeSentinel; |
|
344 return this; |
|
345 } |
|
346 |
|
347 // let the subclass continue analyzing... |
|
348 return NULL; |
|
349 } |
|
350 |
|
351 // Helper function for proving some simple control dominations. |
|
352 // Attempt to prove that all control inputs of 'dom' dominate 'sub'. |
|
353 // Already assumes that 'dom' is available at 'sub', and that 'sub' |
|
354 // is not a constant (dominated by the method's StartNode). |
|
355 // Used by MemNode::find_previous_store to prove that the |
|
356 // control input of a memory operation predates (dominates) |
|
357 // an allocation it wants to look past. |
|
358 bool MemNode::all_controls_dominate(Node* dom, Node* sub) { |
|
359 if (dom == NULL || dom->is_top() || sub == NULL || sub->is_top()) |
|
360 return false; // Conservative answer for dead code |
|
361 |
|
362 // Check 'dom'. Skip Proj and CatchProj nodes. |
|
363 dom = dom->find_exact_control(dom); |
|
364 if (dom == NULL || dom->is_top()) |
|
365 return false; // Conservative answer for dead code |
|
366 |
|
367 if (dom == sub) { |
|
368 // For the case when, for example, 'sub' is Initialize and the original |
|
369 // 'dom' is Proj node of the 'sub'. |
|
370 return false; |
|
371 } |
|
372 |
|
373 if (dom->is_Con() || dom->is_Start() || dom->is_Root() || dom == sub) |
|
374 return true; |
|
375 |
|
376 // 'dom' dominates 'sub' if its control edge and control edges |
|
377 // of all its inputs dominate or equal to sub's control edge. |
|
378 |
|
379 // Currently 'sub' is either Allocate, Initialize or Start nodes. |
|
380 // Or Region for the check in LoadNode::Ideal(); |
|
381 // 'sub' should have sub->in(0) != NULL. |
|
382 assert(sub->is_Allocate() || sub->is_Initialize() || sub->is_Start() || |
|
383 sub->is_Region() || sub->is_Call(), "expecting only these nodes"); |
|
384 |
|
385 // Get control edge of 'sub'. |
|
386 Node* orig_sub = sub; |
|
387 sub = sub->find_exact_control(sub->in(0)); |
|
388 if (sub == NULL || sub->is_top()) |
|
389 return false; // Conservative answer for dead code |
|
390 |
|
391 assert(sub->is_CFG(), "expecting control"); |
|
392 |
|
393 if (sub == dom) |
|
394 return true; |
|
395 |
|
396 if (sub->is_Start() || sub->is_Root()) |
|
397 return false; |
|
398 |
|
399 { |
|
400 // Check all control edges of 'dom'. |
|
401 |
|
402 ResourceMark rm; |
|
403 Arena* arena = Thread::current()->resource_area(); |
|
404 Node_List nlist(arena); |
|
405 Unique_Node_List dom_list(arena); |
|
406 |
|
407 dom_list.push(dom); |
|
408 bool only_dominating_controls = false; |
|
409 |
|
410 for (uint next = 0; next < dom_list.size(); next++) { |
|
411 Node* n = dom_list.at(next); |
|
412 if (n == orig_sub) |
|
413 return false; // One of dom's inputs dominated by sub. |
|
414 if (!n->is_CFG() && n->pinned()) { |
|
415 // Check only own control edge for pinned non-control nodes. |
|
416 n = n->find_exact_control(n->in(0)); |
|
417 if (n == NULL || n->is_top()) |
|
418 return false; // Conservative answer for dead code |
|
419 assert(n->is_CFG(), "expecting control"); |
|
420 dom_list.push(n); |
|
421 } else if (n->is_Con() || n->is_Start() || n->is_Root()) { |
|
422 only_dominating_controls = true; |
|
423 } else if (n->is_CFG()) { |
|
424 if (n->dominates(sub, nlist)) |
|
425 only_dominating_controls = true; |
|
426 else |
|
427 return false; |
|
428 } else { |
|
429 // First, own control edge. |
|
430 Node* m = n->find_exact_control(n->in(0)); |
|
431 if (m != NULL) { |
|
432 if (m->is_top()) |
|
433 return false; // Conservative answer for dead code |
|
434 dom_list.push(m); |
|
435 } |
|
436 // Now, the rest of edges. |
|
437 uint cnt = n->req(); |
|
438 for (uint i = 1; i < cnt; i++) { |
|
439 m = n->find_exact_control(n->in(i)); |
|
440 if (m == NULL || m->is_top()) |
|
441 continue; |
|
442 dom_list.push(m); |
|
443 } |
|
444 } |
|
445 } |
|
446 return only_dominating_controls; |
|
447 } |
|
448 } |
|
449 |
|
450 //---------------------detect_ptr_independence--------------------------------- |
|
451 // Used by MemNode::find_previous_store to prove that two base |
|
452 // pointers are never equal. |
|
453 // The pointers are accompanied by their associated allocations, |
|
454 // if any, which have been previously discovered by the caller. |
|
455 bool MemNode::detect_ptr_independence(Node* p1, AllocateNode* a1, |
|
456 Node* p2, AllocateNode* a2, |
|
457 PhaseTransform* phase) { |
|
458 // Attempt to prove that these two pointers cannot be aliased. |
|
459 // They may both manifestly be allocations, and they should differ. |
|
460 // Or, if they are not both allocations, they can be distinct constants. |
|
461 // Otherwise, one is an allocation and the other a pre-existing value. |
|
462 if (a1 == NULL && a2 == NULL) { // neither an allocation |
|
463 return (p1 != p2) && p1->is_Con() && p2->is_Con(); |
|
464 } else if (a1 != NULL && a2 != NULL) { // both allocations |
|
465 return (a1 != a2); |
|
466 } else if (a1 != NULL) { // one allocation a1 |
|
467 // (Note: p2->is_Con implies p2->in(0)->is_Root, which dominates.) |
|
468 return all_controls_dominate(p2, a1); |
|
469 } else { //(a2 != NULL) // one allocation a2 |
|
470 return all_controls_dominate(p1, a2); |
|
471 } |
|
472 return false; |
|
473 } |
|
474 |
|
475 |
|
476 // The logic for reordering loads and stores uses four steps: |
|
477 // (a) Walk carefully past stores and initializations which we |
|
478 // can prove are independent of this load. |
|
479 // (b) Observe that the next memory state makes an exact match |
|
480 // with self (load or store), and locate the relevant store. |
|
481 // (c) Ensure that, if we were to wire self directly to the store, |
|
482 // the optimizer would fold it up somehow. |
|
483 // (d) Do the rewiring, and return, depending on some other part of |
|
484 // the optimizer to fold up the load. |
|
485 // This routine handles steps (a) and (b). Steps (c) and (d) are |
|
486 // specific to loads and stores, so they are handled by the callers. |
|
487 // (Currently, only LoadNode::Ideal has steps (c), (d). More later.) |
|
488 // |
|
489 Node* MemNode::find_previous_store(PhaseTransform* phase) { |
|
490 Node* ctrl = in(MemNode::Control); |
|
491 Node* adr = in(MemNode::Address); |
|
492 intptr_t offset = 0; |
|
493 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
494 AllocateNode* alloc = AllocateNode::Ideal_allocation(base, phase); |
|
495 |
|
496 if (offset == Type::OffsetBot) |
|
497 return NULL; // cannot unalias unless there are precise offsets |
|
498 |
|
499 const TypeOopPtr *addr_t = adr->bottom_type()->isa_oopptr(); |
|
500 |
|
501 intptr_t size_in_bytes = memory_size(); |
|
502 |
|
503 Node* mem = in(MemNode::Memory); // start searching here... |
|
504 |
|
505 int cnt = 50; // Cycle limiter |
|
506 for (;;) { // While we can dance past unrelated stores... |
|
507 if (--cnt < 0) break; // Caught in cycle or a complicated dance? |
|
508 |
|
509 if (mem->is_Store()) { |
|
510 Node* st_adr = mem->in(MemNode::Address); |
|
511 intptr_t st_offset = 0; |
|
512 Node* st_base = AddPNode::Ideal_base_and_offset(st_adr, phase, st_offset); |
|
513 if (st_base == NULL) |
|
514 break; // inscrutable pointer |
|
515 if (st_offset != offset && st_offset != Type::OffsetBot) { |
|
516 const int MAX_STORE = BytesPerLong; |
|
517 if (st_offset >= offset + size_in_bytes || |
|
518 st_offset <= offset - MAX_STORE || |
|
519 st_offset <= offset - mem->as_Store()->memory_size()) { |
|
520 // Success: The offsets are provably independent. |
|
521 // (You may ask, why not just test st_offset != offset and be done? |
|
522 // The answer is that stores of different sizes can co-exist |
|
523 // in the same sequence of RawMem effects. We sometimes initialize |
|
524 // a whole 'tile' of array elements with a single jint or jlong.) |
|
525 mem = mem->in(MemNode::Memory); |
|
526 continue; // (a) advance through independent store memory |
|
527 } |
|
528 } |
|
529 if (st_base != base && |
|
530 detect_ptr_independence(base, alloc, |
|
531 st_base, |
|
532 AllocateNode::Ideal_allocation(st_base, phase), |
|
533 phase)) { |
|
534 // Success: The bases are provably independent. |
|
535 mem = mem->in(MemNode::Memory); |
|
536 continue; // (a) advance through independent store memory |
|
537 } |
|
538 |
|
539 // (b) At this point, if the bases or offsets do not agree, we lose, |
|
540 // since we have not managed to prove 'this' and 'mem' independent. |
|
541 if (st_base == base && st_offset == offset) { |
|
542 return mem; // let caller handle steps (c), (d) |
|
543 } |
|
544 |
|
545 } else if (mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
546 InitializeNode* st_init = mem->in(0)->as_Initialize(); |
|
547 AllocateNode* st_alloc = st_init->allocation(); |
|
548 if (st_alloc == NULL) |
|
549 break; // something degenerated |
|
550 bool known_identical = false; |
|
551 bool known_independent = false; |
|
552 if (alloc == st_alloc) |
|
553 known_identical = true; |
|
554 else if (alloc != NULL) |
|
555 known_independent = true; |
|
556 else if (all_controls_dominate(this, st_alloc)) |
|
557 known_independent = true; |
|
558 |
|
559 if (known_independent) { |
|
560 // The bases are provably independent: Either they are |
|
561 // manifestly distinct allocations, or else the control |
|
562 // of this load dominates the store's allocation. |
|
563 int alias_idx = phase->C->get_alias_index(adr_type()); |
|
564 if (alias_idx == Compile::AliasIdxRaw) { |
|
565 mem = st_alloc->in(TypeFunc::Memory); |
|
566 } else { |
|
567 mem = st_init->memory(alias_idx); |
|
568 } |
|
569 continue; // (a) advance through independent store memory |
|
570 } |
|
571 |
|
572 // (b) at this point, if we are not looking at a store initializing |
|
573 // the same allocation we are loading from, we lose. |
|
574 if (known_identical) { |
|
575 // From caller, can_see_stored_value will consult find_captured_store. |
|
576 return mem; // let caller handle steps (c), (d) |
|
577 } |
|
578 |
|
579 } else if (addr_t != NULL && addr_t->is_known_instance_field()) { |
|
580 // Can't use optimize_simple_memory_chain() since it needs PhaseGVN. |
|
581 if (mem->is_Proj() && mem->in(0)->is_Call()) { |
|
582 CallNode *call = mem->in(0)->as_Call(); |
|
583 if (!call->may_modify(addr_t, phase)) { |
|
584 mem = call->in(TypeFunc::Memory); |
|
585 continue; // (a) advance through independent call memory |
|
586 } |
|
587 } else if (mem->is_Proj() && mem->in(0)->is_MemBar()) { |
|
588 mem = mem->in(0)->in(TypeFunc::Memory); |
|
589 continue; // (a) advance through independent MemBar memory |
|
590 } else if (mem->is_ClearArray()) { |
|
591 if (ClearArrayNode::step_through(&mem, (uint)addr_t->instance_id(), phase)) { |
|
592 // (the call updated 'mem' value) |
|
593 continue; // (a) advance through independent allocation memory |
|
594 } else { |
|
595 // Can not bypass initialization of the instance |
|
596 // we are looking for. |
|
597 return mem; |
|
598 } |
|
599 } else if (mem->is_MergeMem()) { |
|
600 int alias_idx = phase->C->get_alias_index(adr_type()); |
|
601 mem = mem->as_MergeMem()->memory_at(alias_idx); |
|
602 continue; // (a) advance through independent MergeMem memory |
|
603 } |
|
604 } |
|
605 |
|
606 // Unless there is an explicit 'continue', we must bail out here, |
|
607 // because 'mem' is an inscrutable memory state (e.g., a call). |
|
608 break; |
|
609 } |
|
610 |
|
611 return NULL; // bail out |
|
612 } |
|
613 |
|
614 //----------------------calculate_adr_type------------------------------------- |
|
615 // Helper function. Notices when the given type of address hits top or bottom. |
|
616 // Also, asserts a cross-check of the type against the expected address type. |
|
617 const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_check) { |
|
618 if (t == Type::TOP) return NULL; // does not touch memory any more? |
|
619 #ifdef PRODUCT |
|
620 cross_check = NULL; |
|
621 #else |
|
622 if (!VerifyAliases || is_error_reported() || Node::in_dump()) cross_check = NULL; |
|
623 #endif |
|
624 const TypePtr* tp = t->isa_ptr(); |
|
625 if (tp == NULL) { |
|
626 assert(cross_check == NULL || cross_check == TypePtr::BOTTOM, "expected memory type must be wide"); |
|
627 return TypePtr::BOTTOM; // touches lots of memory |
|
628 } else { |
|
629 #ifdef ASSERT |
|
630 // %%%% [phh] We don't check the alias index if cross_check is |
|
631 // TypeRawPtr::BOTTOM. Needs to be investigated. |
|
632 if (cross_check != NULL && |
|
633 cross_check != TypePtr::BOTTOM && |
|
634 cross_check != TypeRawPtr::BOTTOM) { |
|
635 // Recheck the alias index, to see if it has changed (due to a bug). |
|
636 Compile* C = Compile::current(); |
|
637 assert(C->get_alias_index(cross_check) == C->get_alias_index(tp), |
|
638 "must stay in the original alias category"); |
|
639 // The type of the address must be contained in the adr_type, |
|
640 // disregarding "null"-ness. |
|
641 // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.) |
|
642 const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr(); |
|
643 assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(), |
|
644 "real address must not escape from expected memory type"); |
|
645 } |
|
646 #endif |
|
647 return tp; |
|
648 } |
|
649 } |
|
650 |
|
651 //------------------------adr_phi_is_loop_invariant---------------------------- |
|
652 // A helper function for Ideal_DU_postCCP to check if a Phi in a counted |
|
653 // loop is loop invariant. Make a quick traversal of Phi and associated |
|
654 // CastPP nodes, looking to see if they are a closed group within the loop. |
|
655 bool MemNode::adr_phi_is_loop_invariant(Node* adr_phi, Node* cast) { |
|
656 // The idea is that the phi-nest must boil down to only CastPP nodes |
|
657 // with the same data. This implies that any path into the loop already |
|
658 // includes such a CastPP, and so the original cast, whatever its input, |
|
659 // must be covered by an equivalent cast, with an earlier control input. |
|
660 ResourceMark rm; |
|
661 |
|
662 // The loop entry input of the phi should be the unique dominating |
|
663 // node for every Phi/CastPP in the loop. |
|
664 Unique_Node_List closure; |
|
665 closure.push(adr_phi->in(LoopNode::EntryControl)); |
|
666 |
|
667 // Add the phi node and the cast to the worklist. |
|
668 Unique_Node_List worklist; |
|
669 worklist.push(adr_phi); |
|
670 if( cast != NULL ){ |
|
671 if( !cast->is_ConstraintCast() ) return false; |
|
672 worklist.push(cast); |
|
673 } |
|
674 |
|
675 // Begin recursive walk of phi nodes. |
|
676 while( worklist.size() ){ |
|
677 // Take a node off the worklist |
|
678 Node *n = worklist.pop(); |
|
679 if( !closure.member(n) ){ |
|
680 // Add it to the closure. |
|
681 closure.push(n); |
|
682 // Make a sanity check to ensure we don't waste too much time here. |
|
683 if( closure.size() > 20) return false; |
|
684 // This node is OK if: |
|
685 // - it is a cast of an identical value |
|
686 // - or it is a phi node (then we add its inputs to the worklist) |
|
687 // Otherwise, the node is not OK, and we presume the cast is not invariant |
|
688 if( n->is_ConstraintCast() ){ |
|
689 worklist.push(n->in(1)); |
|
690 } else if( n->is_Phi() ) { |
|
691 for( uint i = 1; i < n->req(); i++ ) { |
|
692 worklist.push(n->in(i)); |
|
693 } |
|
694 } else { |
|
695 return false; |
|
696 } |
|
697 } |
|
698 } |
|
699 |
|
700 // Quit when the worklist is empty, and we've found no offending nodes. |
|
701 return true; |
|
702 } |
|
703 |
|
704 //------------------------------Ideal_DU_postCCP------------------------------- |
|
705 // Find any cast-away of null-ness and keep its control. Null cast-aways are |
|
706 // going away in this pass and we need to make this memory op depend on the |
|
707 // gating null check. |
|
708 Node *MemNode::Ideal_DU_postCCP( PhaseCCP *ccp ) { |
|
709 return Ideal_common_DU_postCCP(ccp, this, in(MemNode::Address)); |
|
710 } |
|
711 |
|
712 // I tried to leave the CastPP's in. This makes the graph more accurate in |
|
713 // some sense; we get to keep around the knowledge that an oop is not-null |
|
714 // after some test. Alas, the CastPP's interfere with GVN (some values are |
|
715 // the regular oop, some are the CastPP of the oop, all merge at Phi's which |
|
716 // cannot collapse, etc). This cost us 10% on SpecJVM, even when I removed |
|
717 // some of the more trivial cases in the optimizer. Removing more useless |
|
718 // Phi's started allowing Loads to illegally float above null checks. I gave |
|
719 // up on this approach. CNC 10/20/2000 |
|
720 // This static method may be called not from MemNode (EncodePNode calls it). |
|
721 // Only the control edge of the node 'n' might be updated. |
|
722 Node *MemNode::Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ) { |
|
723 Node *skipped_cast = NULL; |
|
724 // Need a null check? Regular static accesses do not because they are |
|
725 // from constant addresses. Array ops are gated by the range check (which |
|
726 // always includes a NULL check). Just check field ops. |
|
727 if( n->in(MemNode::Control) == NULL ) { |
|
728 // Scan upwards for the highest location we can place this memory op. |
|
729 while( true ) { |
|
730 switch( adr->Opcode() ) { |
|
731 |
|
732 case Op_AddP: // No change to NULL-ness, so peek thru AddP's |
|
733 adr = adr->in(AddPNode::Base); |
|
734 continue; |
|
735 |
|
736 case Op_DecodeN: // No change to NULL-ness, so peek thru |
|
737 case Op_DecodeNKlass: |
|
738 adr = adr->in(1); |
|
739 continue; |
|
740 |
|
741 case Op_EncodeP: |
|
742 case Op_EncodePKlass: |
|
743 // EncodeP node's control edge could be set by this method |
|
744 // when EncodeP node depends on CastPP node. |
|
745 // |
|
746 // Use its control edge for memory op because EncodeP may go away |
|
747 // later when it is folded with following or preceding DecodeN node. |
|
748 if (adr->in(0) == NULL) { |
|
749 // Keep looking for cast nodes. |
|
750 adr = adr->in(1); |
|
751 continue; |
|
752 } |
|
753 ccp->hash_delete(n); |
|
754 n->set_req(MemNode::Control, adr->in(0)); |
|
755 ccp->hash_insert(n); |
|
756 return n; |
|
757 |
|
758 case Op_CastPP: |
|
759 // If the CastPP is useless, just peek on through it. |
|
760 if( ccp->type(adr) == ccp->type(adr->in(1)) ) { |
|
761 // Remember the cast that we've peeked though. If we peek |
|
762 // through more than one, then we end up remembering the highest |
|
763 // one, that is, if in a loop, the one closest to the top. |
|
764 skipped_cast = adr; |
|
765 adr = adr->in(1); |
|
766 continue; |
|
767 } |
|
768 // CastPP is going away in this pass! We need this memory op to be |
|
769 // control-dependent on the test that is guarding the CastPP. |
|
770 ccp->hash_delete(n); |
|
771 n->set_req(MemNode::Control, adr->in(0)); |
|
772 ccp->hash_insert(n); |
|
773 return n; |
|
774 |
|
775 case Op_Phi: |
|
776 // Attempt to float above a Phi to some dominating point. |
|
777 if (adr->in(0) != NULL && adr->in(0)->is_CountedLoop()) { |
|
778 // If we've already peeked through a Cast (which could have set the |
|
779 // control), we can't float above a Phi, because the skipped Cast |
|
780 // may not be loop invariant. |
|
781 if (adr_phi_is_loop_invariant(adr, skipped_cast)) { |
|
782 adr = adr->in(1); |
|
783 continue; |
|
784 } |
|
785 } |
|
786 |
|
787 // Intentional fallthrough! |
|
788 |
|
789 // No obvious dominating point. The mem op is pinned below the Phi |
|
790 // by the Phi itself. If the Phi goes away (no true value is merged) |
|
791 // then the mem op can float, but not indefinitely. It must be pinned |
|
792 // behind the controls leading to the Phi. |
|
793 case Op_CheckCastPP: |
|
794 // These usually stick around to change address type, however a |
|
795 // useless one can be elided and we still need to pick up a control edge |
|
796 if (adr->in(0) == NULL) { |
|
797 // This CheckCastPP node has NO control and is likely useless. But we |
|
798 // need check further up the ancestor chain for a control input to keep |
|
799 // the node in place. 4959717. |
|
800 skipped_cast = adr; |
|
801 adr = adr->in(1); |
|
802 continue; |
|
803 } |
|
804 ccp->hash_delete(n); |
|
805 n->set_req(MemNode::Control, adr->in(0)); |
|
806 ccp->hash_insert(n); |
|
807 return n; |
|
808 |
|
809 // List of "safe" opcodes; those that implicitly block the memory |
|
810 // op below any null check. |
|
811 case Op_CastX2P: // no null checks on native pointers |
|
812 case Op_Parm: // 'this' pointer is not null |
|
813 case Op_LoadP: // Loading from within a klass |
|
814 case Op_LoadN: // Loading from within a klass |
|
815 case Op_LoadKlass: // Loading from within a klass |
|
816 case Op_LoadNKlass: // Loading from within a klass |
|
817 case Op_ConP: // Loading from a klass |
|
818 case Op_ConN: // Loading from a klass |
|
819 case Op_ConNKlass: // Loading from a klass |
|
820 case Op_CreateEx: // Sucking up the guts of an exception oop |
|
821 case Op_Con: // Reading from TLS |
|
822 case Op_CMoveP: // CMoveP is pinned |
|
823 case Op_CMoveN: // CMoveN is pinned |
|
824 break; // No progress |
|
825 |
|
826 case Op_Proj: // Direct call to an allocation routine |
|
827 case Op_SCMemProj: // Memory state from store conditional ops |
|
828 #ifdef ASSERT |
|
829 { |
|
830 assert(adr->as_Proj()->_con == TypeFunc::Parms, "must be return value"); |
|
831 const Node* call = adr->in(0); |
|
832 if (call->is_CallJava()) { |
|
833 const CallJavaNode* call_java = call->as_CallJava(); |
|
834 const TypeTuple *r = call_java->tf()->range(); |
|
835 assert(r->cnt() > TypeFunc::Parms, "must return value"); |
|
836 const Type* ret_type = r->field_at(TypeFunc::Parms); |
|
837 assert(ret_type && ret_type->isa_ptr(), "must return pointer"); |
|
838 // We further presume that this is one of |
|
839 // new_instance_Java, new_array_Java, or |
|
840 // the like, but do not assert for this. |
|
841 } else if (call->is_Allocate()) { |
|
842 // similar case to new_instance_Java, etc. |
|
843 } else if (!call->is_CallLeaf()) { |
|
844 // Projections from fetch_oop (OSR) are allowed as well. |
|
845 ShouldNotReachHere(); |
|
846 } |
|
847 } |
|
848 #endif |
|
849 break; |
|
850 default: |
|
851 ShouldNotReachHere(); |
|
852 } |
|
853 break; |
|
854 } |
|
855 } |
|
856 |
|
857 return NULL; // No progress |
|
858 } |
|
859 |
|
860 |
|
861 //============================================================================= |
|
862 uint LoadNode::size_of() const { return sizeof(*this); } |
|
863 uint LoadNode::cmp( const Node &n ) const |
|
864 { return !Type::cmp( _type, ((LoadNode&)n)._type ); } |
|
865 const Type *LoadNode::bottom_type() const { return _type; } |
|
866 uint LoadNode::ideal_reg() const { |
|
867 return _type->ideal_reg(); |
|
868 } |
|
869 |
|
870 #ifndef PRODUCT |
|
871 void LoadNode::dump_spec(outputStream *st) const { |
|
872 MemNode::dump_spec(st); |
|
873 if( !Verbose && !WizardMode ) { |
|
874 // standard dump does this in Verbose and WizardMode |
|
875 st->print(" #"); _type->dump_on(st); |
|
876 } |
|
877 } |
|
878 #endif |
|
879 |
|
880 #ifdef ASSERT |
|
881 //----------------------------is_immutable_value------------------------------- |
|
882 // Helper function to allow a raw load without control edge for some cases |
|
883 bool LoadNode::is_immutable_value(Node* adr) { |
|
884 return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && |
|
885 adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && |
|
886 (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == |
|
887 in_bytes(JavaThread::osthread_offset()))); |
|
888 } |
|
889 #endif |
|
890 |
|
891 //----------------------------LoadNode::make----------------------------------- |
|
892 // Polymorphic factory method: |
|
893 Node *LoadNode::make(PhaseGVN& gvn, Node *ctl, Node *mem, Node *adr, const TypePtr* adr_type, const Type *rt, BasicType bt, MemOrd mo) { |
|
894 Compile* C = gvn.C; |
|
895 |
|
896 // sanity check the alias category against the created node type |
|
897 assert(!(adr_type->isa_oopptr() && |
|
898 adr_type->offset() == oopDesc::klass_offset_in_bytes()), |
|
899 "use LoadKlassNode instead"); |
|
900 assert(!(adr_type->isa_aryptr() && |
|
901 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), |
|
902 "use LoadRangeNode instead"); |
|
903 // Check control edge of raw loads |
|
904 assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
|
905 // oop will be recorded in oop map if load crosses safepoint |
|
906 rt->isa_oopptr() || is_immutable_value(adr), |
|
907 "raw memory operations should have control edge"); |
|
908 switch (bt) { |
|
909 case T_BOOLEAN: return new (C) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int(), mo); |
|
910 case T_BYTE: return new (C) LoadBNode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
|
911 case T_INT: return new (C) LoadINode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
|
912 case T_CHAR: return new (C) LoadUSNode(ctl, mem, adr, adr_type, rt->is_int(), mo); |
|
913 case T_SHORT: return new (C) LoadSNode (ctl, mem, adr, adr_type, rt->is_int(), mo); |
|
914 case T_LONG: return new (C) LoadLNode (ctl, mem, adr, adr_type, rt->is_long(), mo); |
|
915 case T_FLOAT: return new (C) LoadFNode (ctl, mem, adr, adr_type, rt, mo); |
|
916 case T_DOUBLE: return new (C) LoadDNode (ctl, mem, adr, adr_type, rt, mo); |
|
917 case T_ADDRESS: return new (C) LoadPNode (ctl, mem, adr, adr_type, rt->is_ptr(), mo); |
|
918 case T_OBJECT: |
|
919 #ifdef _LP64 |
|
920 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
921 Node* load = gvn.transform(new (C) LoadNNode(ctl, mem, adr, adr_type, rt->make_narrowoop(), mo)); |
|
922 return new (C) DecodeNNode(load, load->bottom_type()->make_ptr()); |
|
923 } else |
|
924 #endif |
|
925 { |
|
926 assert(!adr->bottom_type()->is_ptr_to_narrowoop() && !adr->bottom_type()->is_ptr_to_narrowklass(), "should have got back a narrow oop"); |
|
927 return new (C) LoadPNode(ctl, mem, adr, adr_type, rt->is_oopptr(), mo); |
|
928 } |
|
929 } |
|
930 ShouldNotReachHere(); |
|
931 return (LoadNode*)NULL; |
|
932 } |
|
933 |
|
934 LoadLNode* LoadLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt, MemOrd mo) { |
|
935 bool require_atomic = true; |
|
936 return new (C) LoadLNode(ctl, mem, adr, adr_type, rt->is_long(), mo, require_atomic); |
|
937 } |
|
938 |
|
939 |
|
940 |
|
941 |
|
942 //------------------------------hash------------------------------------------- |
|
943 uint LoadNode::hash() const { |
|
944 // unroll addition of interesting fields |
|
945 return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address); |
|
946 } |
|
947 |
|
948 static bool skip_through_membars(Compile::AliasType* atp, const TypeInstPtr* tp, bool eliminate_boxing) { |
|
949 if ((atp != NULL) && (atp->index() >= Compile::AliasIdxRaw)) { |
|
950 bool non_volatile = (atp->field() != NULL) && !atp->field()->is_volatile(); |
|
951 bool is_stable_ary = FoldStableValues && |
|
952 (tp != NULL) && (tp->isa_aryptr() != NULL) && |
|
953 tp->isa_aryptr()->is_stable(); |
|
954 |
|
955 return (eliminate_boxing && non_volatile) || is_stable_ary; |
|
956 } |
|
957 |
|
958 return false; |
|
959 } |
|
960 |
|
961 //---------------------------can_see_stored_value------------------------------ |
|
962 // This routine exists to make sure this set of tests is done the same |
|
963 // everywhere. We need to make a coordinated change: first LoadNode::Ideal |
|
964 // will change the graph shape in a way which makes memory alive twice at the |
|
965 // same time (uses the Oracle model of aliasing), then some |
|
966 // LoadXNode::Identity will fold things back to the equivalence-class model |
|
967 // of aliasing. |
|
968 Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const { |
|
969 Node* ld_adr = in(MemNode::Address); |
|
970 intptr_t ld_off = 0; |
|
971 AllocateNode* ld_alloc = AllocateNode::Ideal_allocation(ld_adr, phase, ld_off); |
|
972 const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr(); |
|
973 Compile::AliasType* atp = (tp != NULL) ? phase->C->alias_type(tp) : NULL; |
|
974 // This is more general than load from boxing objects. |
|
975 if (skip_through_membars(atp, tp, phase->C->eliminate_boxing())) { |
|
976 uint alias_idx = atp->index(); |
|
977 bool final = !atp->is_rewritable(); |
|
978 Node* result = NULL; |
|
979 Node* current = st; |
|
980 // Skip through chains of MemBarNodes checking the MergeMems for |
|
981 // new states for the slice of this load. Stop once any other |
|
982 // kind of node is encountered. Loads from final memory can skip |
|
983 // through any kind of MemBar but normal loads shouldn't skip |
|
984 // through MemBarAcquire since the could allow them to move out of |
|
985 // a synchronized region. |
|
986 while (current->is_Proj()) { |
|
987 int opc = current->in(0)->Opcode(); |
|
988 if ((final && (opc == Op_MemBarAcquire || |
|
989 opc == Op_MemBarAcquireLock || |
|
990 opc == Op_LoadFence)) || |
|
991 opc == Op_MemBarRelease || |
|
992 opc == Op_StoreFence || |
|
993 opc == Op_MemBarReleaseLock || |
|
994 opc == Op_MemBarCPUOrder) { |
|
995 Node* mem = current->in(0)->in(TypeFunc::Memory); |
|
996 if (mem->is_MergeMem()) { |
|
997 MergeMemNode* merge = mem->as_MergeMem(); |
|
998 Node* new_st = merge->memory_at(alias_idx); |
|
999 if (new_st == merge->base_memory()) { |
|
1000 // Keep searching |
|
1001 current = new_st; |
|
1002 continue; |
|
1003 } |
|
1004 // Save the new memory state for the slice and fall through |
|
1005 // to exit. |
|
1006 result = new_st; |
|
1007 } |
|
1008 } |
|
1009 break; |
|
1010 } |
|
1011 if (result != NULL) { |
|
1012 st = result; |
|
1013 } |
|
1014 } |
|
1015 |
|
1016 // Loop around twice in the case Load -> Initialize -> Store. |
|
1017 // (See PhaseIterGVN::add_users_to_worklist, which knows about this case.) |
|
1018 for (int trip = 0; trip <= 1; trip++) { |
|
1019 |
|
1020 if (st->is_Store()) { |
|
1021 Node* st_adr = st->in(MemNode::Address); |
|
1022 if (!phase->eqv(st_adr, ld_adr)) { |
|
1023 // Try harder before giving up... Match raw and non-raw pointers. |
|
1024 intptr_t st_off = 0; |
|
1025 AllocateNode* alloc = AllocateNode::Ideal_allocation(st_adr, phase, st_off); |
|
1026 if (alloc == NULL) return NULL; |
|
1027 if (alloc != ld_alloc) return NULL; |
|
1028 if (ld_off != st_off) return NULL; |
|
1029 // At this point we have proven something like this setup: |
|
1030 // A = Allocate(...) |
|
1031 // L = LoadQ(, AddP(CastPP(, A.Parm),, #Off)) |
|
1032 // S = StoreQ(, AddP(, A.Parm , #Off), V) |
|
1033 // (Actually, we haven't yet proven the Q's are the same.) |
|
1034 // In other words, we are loading from a casted version of |
|
1035 // the same pointer-and-offset that we stored to. |
|
1036 // Thus, we are able to replace L by V. |
|
1037 } |
|
1038 // Now prove that we have a LoadQ matched to a StoreQ, for some Q. |
|
1039 if (store_Opcode() != st->Opcode()) |
|
1040 return NULL; |
|
1041 return st->in(MemNode::ValueIn); |
|
1042 } |
|
1043 |
|
1044 // A load from a freshly-created object always returns zero. |
|
1045 // (This can happen after LoadNode::Ideal resets the load's memory input |
|
1046 // to find_captured_store, which returned InitializeNode::zero_memory.) |
|
1047 if (st->is_Proj() && st->in(0)->is_Allocate() && |
|
1048 (st->in(0) == ld_alloc) && |
|
1049 (ld_off >= st->in(0)->as_Allocate()->minimum_header_size())) { |
|
1050 // return a zero value for the load's basic type |
|
1051 // (This is one of the few places where a generic PhaseTransform |
|
1052 // can create new nodes. Think of it as lazily manifesting |
|
1053 // virtually pre-existing constants.) |
|
1054 return phase->zerocon(memory_type()); |
|
1055 } |
|
1056 |
|
1057 // A load from an initialization barrier can match a captured store. |
|
1058 if (st->is_Proj() && st->in(0)->is_Initialize()) { |
|
1059 InitializeNode* init = st->in(0)->as_Initialize(); |
|
1060 AllocateNode* alloc = init->allocation(); |
|
1061 if ((alloc != NULL) && (alloc == ld_alloc)) { |
|
1062 // examine a captured store value |
|
1063 st = init->find_captured_store(ld_off, memory_size(), phase); |
|
1064 if (st != NULL) |
|
1065 continue; // take one more trip around |
|
1066 } |
|
1067 } |
|
1068 |
|
1069 // Load boxed value from result of valueOf() call is input parameter. |
|
1070 if (this->is_Load() && ld_adr->is_AddP() && |
|
1071 (tp != NULL) && tp->is_ptr_to_boxed_value()) { |
|
1072 intptr_t ignore = 0; |
|
1073 Node* base = AddPNode::Ideal_base_and_offset(ld_adr, phase, ignore); |
|
1074 if (base != NULL && base->is_Proj() && |
|
1075 base->as_Proj()->_con == TypeFunc::Parms && |
|
1076 base->in(0)->is_CallStaticJava() && |
|
1077 base->in(0)->as_CallStaticJava()->is_boxing_method()) { |
|
1078 return base->in(0)->in(TypeFunc::Parms); |
|
1079 } |
|
1080 } |
|
1081 |
|
1082 break; |
|
1083 } |
|
1084 |
|
1085 return NULL; |
|
1086 } |
|
1087 |
|
1088 //----------------------is_instance_field_load_with_local_phi------------------ |
|
1089 bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) { |
|
1090 if( in(Memory)->is_Phi() && in(Memory)->in(0) == ctrl && |
|
1091 in(Address)->is_AddP() ) { |
|
1092 const TypeOopPtr* t_oop = in(Address)->bottom_type()->isa_oopptr(); |
|
1093 // Only instances and boxed values. |
|
1094 if( t_oop != NULL && |
|
1095 (t_oop->is_ptr_to_boxed_value() || |
|
1096 t_oop->is_known_instance_field()) && |
|
1097 t_oop->offset() != Type::OffsetBot && |
|
1098 t_oop->offset() != Type::OffsetTop) { |
|
1099 return true; |
|
1100 } |
|
1101 } |
|
1102 return false; |
|
1103 } |
|
1104 |
|
1105 //------------------------------Identity--------------------------------------- |
|
1106 // Loads are identity if previous store is to same address |
|
1107 Node *LoadNode::Identity( PhaseTransform *phase ) { |
|
1108 // If the previous store-maker is the right kind of Store, and the store is |
|
1109 // to the same address, then we are equal to the value stored. |
|
1110 Node* mem = in(Memory); |
|
1111 Node* value = can_see_stored_value(mem, phase); |
|
1112 if( value ) { |
|
1113 // byte, short & char stores truncate naturally. |
|
1114 // A load has to load the truncated value which requires |
|
1115 // some sort of masking operation and that requires an |
|
1116 // Ideal call instead of an Identity call. |
|
1117 if (memory_size() < BytesPerInt) { |
|
1118 // If the input to the store does not fit with the load's result type, |
|
1119 // it must be truncated via an Ideal call. |
|
1120 if (!phase->type(value)->higher_equal(phase->type(this))) |
|
1121 return this; |
|
1122 } |
|
1123 // (This works even when value is a Con, but LoadNode::Value |
|
1124 // usually runs first, producing the singleton type of the Con.) |
|
1125 return value; |
|
1126 } |
|
1127 |
|
1128 // Search for an existing data phi which was generated before for the same |
|
1129 // instance's field to avoid infinite generation of phis in a loop. |
|
1130 Node *region = mem->in(0); |
|
1131 if (is_instance_field_load_with_local_phi(region)) { |
|
1132 const TypeOopPtr *addr_t = in(Address)->bottom_type()->isa_oopptr(); |
|
1133 int this_index = phase->C->get_alias_index(addr_t); |
|
1134 int this_offset = addr_t->offset(); |
|
1135 int this_iid = addr_t->instance_id(); |
|
1136 if (!addr_t->is_known_instance() && |
|
1137 addr_t->is_ptr_to_boxed_value()) { |
|
1138 // Use _idx of address base (could be Phi node) for boxed values. |
|
1139 intptr_t ignore = 0; |
|
1140 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); |
|
1141 this_iid = base->_idx; |
|
1142 } |
|
1143 const Type* this_type = bottom_type(); |
|
1144 for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) { |
|
1145 Node* phi = region->fast_out(i); |
|
1146 if (phi->is_Phi() && phi != mem && |
|
1147 phi->as_Phi()->is_same_inst_field(this_type, this_iid, this_index, this_offset)) { |
|
1148 return phi; |
|
1149 } |
|
1150 } |
|
1151 } |
|
1152 |
|
1153 return this; |
|
1154 } |
|
1155 |
|
1156 // We're loading from an object which has autobox behaviour. |
|
1157 // If this object is result of a valueOf call we'll have a phi |
|
1158 // merging a newly allocated object and a load from the cache. |
|
1159 // We want to replace this load with the original incoming |
|
1160 // argument to the valueOf call. |
|
1161 Node* LoadNode::eliminate_autobox(PhaseGVN* phase) { |
|
1162 assert(phase->C->eliminate_boxing(), "sanity"); |
|
1163 intptr_t ignore = 0; |
|
1164 Node* base = AddPNode::Ideal_base_and_offset(in(Address), phase, ignore); |
|
1165 if ((base == NULL) || base->is_Phi()) { |
|
1166 // Push the loads from the phi that comes from valueOf up |
|
1167 // through it to allow elimination of the loads and the recovery |
|
1168 // of the original value. It is done in split_through_phi(). |
|
1169 return NULL; |
|
1170 } else if (base->is_Load() || |
|
1171 base->is_DecodeN() && base->in(1)->is_Load()) { |
|
1172 // Eliminate the load of boxed value for integer types from the cache |
|
1173 // array by deriving the value from the index into the array. |
|
1174 // Capture the offset of the load and then reverse the computation. |
|
1175 |
|
1176 // Get LoadN node which loads a boxing object from 'cache' array. |
|
1177 if (base->is_DecodeN()) { |
|
1178 base = base->in(1); |
|
1179 } |
|
1180 if (!base->in(Address)->is_AddP()) { |
|
1181 return NULL; // Complex address |
|
1182 } |
|
1183 AddPNode* address = base->in(Address)->as_AddP(); |
|
1184 Node* cache_base = address->in(AddPNode::Base); |
|
1185 if ((cache_base != NULL) && cache_base->is_DecodeN()) { |
|
1186 // Get ConP node which is static 'cache' field. |
|
1187 cache_base = cache_base->in(1); |
|
1188 } |
|
1189 if ((cache_base != NULL) && cache_base->is_Con()) { |
|
1190 const TypeAryPtr* base_type = cache_base->bottom_type()->isa_aryptr(); |
|
1191 if ((base_type != NULL) && base_type->is_autobox_cache()) { |
|
1192 Node* elements[4]; |
|
1193 int shift = exact_log2(type2aelembytes(T_OBJECT)); |
|
1194 int count = address->unpack_offsets(elements, ARRAY_SIZE(elements)); |
|
1195 if ((count > 0) && elements[0]->is_Con() && |
|
1196 ((count == 1) || |
|
1197 (count == 2) && elements[1]->Opcode() == Op_LShiftX && |
|
1198 elements[1]->in(2) == phase->intcon(shift))) { |
|
1199 ciObjArray* array = base_type->const_oop()->as_obj_array(); |
|
1200 // Fetch the box object cache[0] at the base of the array and get its value |
|
1201 ciInstance* box = array->obj_at(0)->as_instance(); |
|
1202 ciInstanceKlass* ik = box->klass()->as_instance_klass(); |
|
1203 assert(ik->is_box_klass(), "sanity"); |
|
1204 assert(ik->nof_nonstatic_fields() == 1, "change following code"); |
|
1205 if (ik->nof_nonstatic_fields() == 1) { |
|
1206 // This should be true nonstatic_field_at requires calling |
|
1207 // nof_nonstatic_fields so check it anyway |
|
1208 ciConstant c = box->field_value(ik->nonstatic_field_at(0)); |
|
1209 BasicType bt = c.basic_type(); |
|
1210 // Only integer types have boxing cache. |
|
1211 assert(bt == T_BOOLEAN || bt == T_CHAR || |
|
1212 bt == T_BYTE || bt == T_SHORT || |
|
1213 bt == T_INT || bt == T_LONG, err_msg_res("wrong type = %s", type2name(bt))); |
|
1214 jlong cache_low = (bt == T_LONG) ? c.as_long() : c.as_int(); |
|
1215 if (cache_low != (int)cache_low) { |
|
1216 return NULL; // should not happen since cache is array indexed by value |
|
1217 } |
|
1218 jlong offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT) - (cache_low << shift); |
|
1219 if (offset != (int)offset) { |
|
1220 return NULL; // should not happen since cache is array indexed by value |
|
1221 } |
|
1222 // Add up all the offsets making of the address of the load |
|
1223 Node* result = elements[0]; |
|
1224 for (int i = 1; i < count; i++) { |
|
1225 result = phase->transform(new (phase->C) AddXNode(result, elements[i])); |
|
1226 } |
|
1227 // Remove the constant offset from the address and then |
|
1228 result = phase->transform(new (phase->C) AddXNode(result, phase->MakeConX(-(int)offset))); |
|
1229 // remove the scaling of the offset to recover the original index. |
|
1230 if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) { |
|
1231 // Peel the shift off directly but wrap it in a dummy node |
|
1232 // since Ideal can't return existing nodes |
|
1233 result = new (phase->C) RShiftXNode(result->in(1), phase->intcon(0)); |
|
1234 } else if (result->is_Add() && result->in(2)->is_Con() && |
|
1235 result->in(1)->Opcode() == Op_LShiftX && |
|
1236 result->in(1)->in(2) == phase->intcon(shift)) { |
|
1237 // We can't do general optimization: ((X<<Z) + Y) >> Z ==> X + (Y>>Z) |
|
1238 // but for boxing cache access we know that X<<Z will not overflow |
|
1239 // (there is range check) so we do this optimizatrion by hand here. |
|
1240 Node* add_con = new (phase->C) RShiftXNode(result->in(2), phase->intcon(shift)); |
|
1241 result = new (phase->C) AddXNode(result->in(1)->in(1), phase->transform(add_con)); |
|
1242 } else { |
|
1243 result = new (phase->C) RShiftXNode(result, phase->intcon(shift)); |
|
1244 } |
|
1245 #ifdef _LP64 |
|
1246 if (bt != T_LONG) { |
|
1247 result = new (phase->C) ConvL2INode(phase->transform(result)); |
|
1248 } |
|
1249 #else |
|
1250 if (bt == T_LONG) { |
|
1251 result = new (phase->C) ConvI2LNode(phase->transform(result)); |
|
1252 } |
|
1253 #endif |
|
1254 return result; |
|
1255 } |
|
1256 } |
|
1257 } |
|
1258 } |
|
1259 } |
|
1260 return NULL; |
|
1261 } |
|
1262 |
|
1263 static bool stable_phi(PhiNode* phi, PhaseGVN *phase) { |
|
1264 Node* region = phi->in(0); |
|
1265 if (region == NULL) { |
|
1266 return false; // Wait stable graph |
|
1267 } |
|
1268 uint cnt = phi->req(); |
|
1269 for (uint i = 1; i < cnt; i++) { |
|
1270 Node* rc = region->in(i); |
|
1271 if (rc == NULL || phase->type(rc) == Type::TOP) |
|
1272 return false; // Wait stable graph |
|
1273 Node* in = phi->in(i); |
|
1274 if (in == NULL || phase->type(in) == Type::TOP) |
|
1275 return false; // Wait stable graph |
|
1276 } |
|
1277 return true; |
|
1278 } |
|
1279 //------------------------------split_through_phi------------------------------ |
|
1280 // Split instance or boxed field load through Phi. |
|
1281 Node *LoadNode::split_through_phi(PhaseGVN *phase) { |
|
1282 Node* mem = in(Memory); |
|
1283 Node* address = in(Address); |
|
1284 const TypeOopPtr *t_oop = phase->type(address)->isa_oopptr(); |
|
1285 |
|
1286 assert((t_oop != NULL) && |
|
1287 (t_oop->is_known_instance_field() || |
|
1288 t_oop->is_ptr_to_boxed_value()), "invalide conditions"); |
|
1289 |
|
1290 Compile* C = phase->C; |
|
1291 intptr_t ignore = 0; |
|
1292 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); |
|
1293 bool base_is_phi = (base != NULL) && base->is_Phi(); |
|
1294 bool load_boxed_values = t_oop->is_ptr_to_boxed_value() && C->aggressive_unboxing() && |
|
1295 (base != NULL) && (base == address->in(AddPNode::Base)) && |
|
1296 phase->type(base)->higher_equal(TypePtr::NOTNULL); |
|
1297 |
|
1298 if (!((mem->is_Phi() || base_is_phi) && |
|
1299 (load_boxed_values || t_oop->is_known_instance_field()))) { |
|
1300 return NULL; // memory is not Phi |
|
1301 } |
|
1302 |
|
1303 if (mem->is_Phi()) { |
|
1304 if (!stable_phi(mem->as_Phi(), phase)) { |
|
1305 return NULL; // Wait stable graph |
|
1306 } |
|
1307 uint cnt = mem->req(); |
|
1308 // Check for loop invariant memory. |
|
1309 if (cnt == 3) { |
|
1310 for (uint i = 1; i < cnt; i++) { |
|
1311 Node* in = mem->in(i); |
|
1312 Node* m = optimize_memory_chain(in, t_oop, this, phase); |
|
1313 if (m == mem) { |
|
1314 set_req(Memory, mem->in(cnt - i)); |
|
1315 return this; // made change |
|
1316 } |
|
1317 } |
|
1318 } |
|
1319 } |
|
1320 if (base_is_phi) { |
|
1321 if (!stable_phi(base->as_Phi(), phase)) { |
|
1322 return NULL; // Wait stable graph |
|
1323 } |
|
1324 uint cnt = base->req(); |
|
1325 // Check for loop invariant memory. |
|
1326 if (cnt == 3) { |
|
1327 for (uint i = 1; i < cnt; i++) { |
|
1328 if (base->in(i) == base) { |
|
1329 return NULL; // Wait stable graph |
|
1330 } |
|
1331 } |
|
1332 } |
|
1333 } |
|
1334 |
|
1335 bool load_boxed_phi = load_boxed_values && base_is_phi && (base->in(0) == mem->in(0)); |
|
1336 |
|
1337 // Split through Phi (see original code in loopopts.cpp). |
|
1338 assert(C->have_alias_type(t_oop), "instance should have alias type"); |
|
1339 |
|
1340 // Do nothing here if Identity will find a value |
|
1341 // (to avoid infinite chain of value phis generation). |
|
1342 if (!phase->eqv(this, this->Identity(phase))) |
|
1343 return NULL; |
|
1344 |
|
1345 // Select Region to split through. |
|
1346 Node* region; |
|
1347 if (!base_is_phi) { |
|
1348 assert(mem->is_Phi(), "sanity"); |
|
1349 region = mem->in(0); |
|
1350 // Skip if the region dominates some control edge of the address. |
|
1351 if (!MemNode::all_controls_dominate(address, region)) |
|
1352 return NULL; |
|
1353 } else if (!mem->is_Phi()) { |
|
1354 assert(base_is_phi, "sanity"); |
|
1355 region = base->in(0); |
|
1356 // Skip if the region dominates some control edge of the memory. |
|
1357 if (!MemNode::all_controls_dominate(mem, region)) |
|
1358 return NULL; |
|
1359 } else if (base->in(0) != mem->in(0)) { |
|
1360 assert(base_is_phi && mem->is_Phi(), "sanity"); |
|
1361 if (MemNode::all_controls_dominate(mem, base->in(0))) { |
|
1362 region = base->in(0); |
|
1363 } else if (MemNode::all_controls_dominate(address, mem->in(0))) { |
|
1364 region = mem->in(0); |
|
1365 } else { |
|
1366 return NULL; // complex graph |
|
1367 } |
|
1368 } else { |
|
1369 assert(base->in(0) == mem->in(0), "sanity"); |
|
1370 region = mem->in(0); |
|
1371 } |
|
1372 |
|
1373 const Type* this_type = this->bottom_type(); |
|
1374 int this_index = C->get_alias_index(t_oop); |
|
1375 int this_offset = t_oop->offset(); |
|
1376 int this_iid = t_oop->instance_id(); |
|
1377 if (!t_oop->is_known_instance() && load_boxed_values) { |
|
1378 // Use _idx of address base for boxed values. |
|
1379 this_iid = base->_idx; |
|
1380 } |
|
1381 PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
1382 Node* phi = new (C) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); |
|
1383 for (uint i = 1; i < region->req(); i++) { |
|
1384 Node* x; |
|
1385 Node* the_clone = NULL; |
|
1386 if (region->in(i) == C->top()) { |
|
1387 x = C->top(); // Dead path? Use a dead data op |
|
1388 } else { |
|
1389 x = this->clone(); // Else clone up the data op |
|
1390 the_clone = x; // Remember for possible deletion. |
|
1391 // Alter data node to use pre-phi inputs |
|
1392 if (this->in(0) == region) { |
|
1393 x->set_req(0, region->in(i)); |
|
1394 } else { |
|
1395 x->set_req(0, NULL); |
|
1396 } |
|
1397 if (mem->is_Phi() && (mem->in(0) == region)) { |
|
1398 x->set_req(Memory, mem->in(i)); // Use pre-Phi input for the clone. |
|
1399 } |
|
1400 if (address->is_Phi() && address->in(0) == region) { |
|
1401 x->set_req(Address, address->in(i)); // Use pre-Phi input for the clone |
|
1402 } |
|
1403 if (base_is_phi && (base->in(0) == region)) { |
|
1404 Node* base_x = base->in(i); // Clone address for loads from boxed objects. |
|
1405 Node* adr_x = phase->transform(new (C) AddPNode(base_x,base_x,address->in(AddPNode::Offset))); |
|
1406 x->set_req(Address, adr_x); |
|
1407 } |
|
1408 } |
|
1409 // Check for a 'win' on some paths |
|
1410 const Type *t = x->Value(igvn); |
|
1411 |
|
1412 bool singleton = t->singleton(); |
|
1413 |
|
1414 // See comments in PhaseIdealLoop::split_thru_phi(). |
|
1415 if (singleton && t == Type::TOP) { |
|
1416 singleton &= region->is_Loop() && (i != LoopNode::EntryControl); |
|
1417 } |
|
1418 |
|
1419 if (singleton) { |
|
1420 x = igvn->makecon(t); |
|
1421 } else { |
|
1422 // We now call Identity to try to simplify the cloned node. |
|
1423 // Note that some Identity methods call phase->type(this). |
|
1424 // Make sure that the type array is big enough for |
|
1425 // our new node, even though we may throw the node away. |
|
1426 // (This tweaking with igvn only works because x is a new node.) |
|
1427 igvn->set_type(x, t); |
|
1428 // If x is a TypeNode, capture any more-precise type permanently into Node |
|
1429 // otherwise it will be not updated during igvn->transform since |
|
1430 // igvn->type(x) is set to x->Value() already. |
|
1431 x->raise_bottom_type(t); |
|
1432 Node *y = x->Identity(igvn); |
|
1433 if (y != x) { |
|
1434 x = y; |
|
1435 } else { |
|
1436 y = igvn->hash_find_insert(x); |
|
1437 if (y) { |
|
1438 x = y; |
|
1439 } else { |
|
1440 // Else x is a new node we are keeping |
|
1441 // We do not need register_new_node_with_optimizer |
|
1442 // because set_type has already been called. |
|
1443 igvn->_worklist.push(x); |
|
1444 } |
|
1445 } |
|
1446 } |
|
1447 if (x != the_clone && the_clone != NULL) { |
|
1448 igvn->remove_dead_node(the_clone); |
|
1449 } |
|
1450 phi->set_req(i, x); |
|
1451 } |
|
1452 // Record Phi |
|
1453 igvn->register_new_node_with_optimizer(phi); |
|
1454 return phi; |
|
1455 } |
|
1456 |
|
1457 //------------------------------Ideal------------------------------------------ |
|
1458 // If the load is from Field memory and the pointer is non-null, we can |
|
1459 // zero out the control input. |
|
1460 // If the offset is constant and the base is an object allocation, |
|
1461 // try to hook me up to the exact initializing store. |
|
1462 Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1463 Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
1464 if (p) return (p == NodeSentinel) ? NULL : p; |
|
1465 |
|
1466 Node* ctrl = in(MemNode::Control); |
|
1467 Node* address = in(MemNode::Address); |
|
1468 |
|
1469 // Skip up past a SafePoint control. Cannot do this for Stores because |
|
1470 // pointer stores & cardmarks must stay on the same side of a SafePoint. |
|
1471 if( ctrl != NULL && ctrl->Opcode() == Op_SafePoint && |
|
1472 phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw ) { |
|
1473 ctrl = ctrl->in(0); |
|
1474 set_req(MemNode::Control,ctrl); |
|
1475 } |
|
1476 |
|
1477 intptr_t ignore = 0; |
|
1478 Node* base = AddPNode::Ideal_base_and_offset(address, phase, ignore); |
|
1479 if (base != NULL |
|
1480 && phase->C->get_alias_index(phase->type(address)->is_ptr()) != Compile::AliasIdxRaw) { |
|
1481 // Check for useless control edge in some common special cases |
|
1482 if (in(MemNode::Control) != NULL |
|
1483 && phase->type(base)->higher_equal(TypePtr::NOTNULL) |
|
1484 && all_controls_dominate(base, phase->C->start())) { |
|
1485 // A method-invariant, non-null address (constant or 'this' argument). |
|
1486 set_req(MemNode::Control, NULL); |
|
1487 } |
|
1488 } |
|
1489 |
|
1490 Node* mem = in(MemNode::Memory); |
|
1491 const TypePtr *addr_t = phase->type(address)->isa_ptr(); |
|
1492 |
|
1493 if (can_reshape && (addr_t != NULL)) { |
|
1494 // try to optimize our memory input |
|
1495 Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, this, phase); |
|
1496 if (opt_mem != mem) { |
|
1497 set_req(MemNode::Memory, opt_mem); |
|
1498 if (phase->type( opt_mem ) == Type::TOP) return NULL; |
|
1499 return this; |
|
1500 } |
|
1501 const TypeOopPtr *t_oop = addr_t->isa_oopptr(); |
|
1502 if ((t_oop != NULL) && |
|
1503 (t_oop->is_known_instance_field() || |
|
1504 t_oop->is_ptr_to_boxed_value())) { |
|
1505 PhaseIterGVN *igvn = phase->is_IterGVN(); |
|
1506 if (igvn != NULL && igvn->_worklist.member(opt_mem)) { |
|
1507 // Delay this transformation until memory Phi is processed. |
|
1508 phase->is_IterGVN()->_worklist.push(this); |
|
1509 return NULL; |
|
1510 } |
|
1511 // Split instance field load through Phi. |
|
1512 Node* result = split_through_phi(phase); |
|
1513 if (result != NULL) return result; |
|
1514 |
|
1515 if (t_oop->is_ptr_to_boxed_value()) { |
|
1516 Node* result = eliminate_autobox(phase); |
|
1517 if (result != NULL) return result; |
|
1518 } |
|
1519 } |
|
1520 } |
|
1521 |
|
1522 // Check for prior store with a different base or offset; make Load |
|
1523 // independent. Skip through any number of them. Bail out if the stores |
|
1524 // are in an endless dead cycle and report no progress. This is a key |
|
1525 // transform for Reflection. However, if after skipping through the Stores |
|
1526 // we can't then fold up against a prior store do NOT do the transform as |
|
1527 // this amounts to using the 'Oracle' model of aliasing. It leaves the same |
|
1528 // array memory alive twice: once for the hoisted Load and again after the |
|
1529 // bypassed Store. This situation only works if EVERYBODY who does |
|
1530 // anti-dependence work knows how to bypass. I.e. we need all |
|
1531 // anti-dependence checks to ask the same Oracle. Right now, that Oracle is |
|
1532 // the alias index stuff. So instead, peek through Stores and IFF we can |
|
1533 // fold up, do so. |
|
1534 Node* prev_mem = find_previous_store(phase); |
|
1535 // Steps (a), (b): Walk past independent stores to find an exact match. |
|
1536 if (prev_mem != NULL && prev_mem != in(MemNode::Memory)) { |
|
1537 // (c) See if we can fold up on the spot, but don't fold up here. |
|
1538 // Fold-up might require truncation (for LoadB/LoadS/LoadUS) or |
|
1539 // just return a prior value, which is done by Identity calls. |
|
1540 if (can_see_stored_value(prev_mem, phase)) { |
|
1541 // Make ready for step (d): |
|
1542 set_req(MemNode::Memory, prev_mem); |
|
1543 return this; |
|
1544 } |
|
1545 } |
|
1546 |
|
1547 return NULL; // No further progress |
|
1548 } |
|
1549 |
|
1550 // Helper to recognize certain Klass fields which are invariant across |
|
1551 // some group of array types (e.g., int[] or all T[] where T < Object). |
|
1552 const Type* |
|
1553 LoadNode::load_array_final_field(const TypeKlassPtr *tkls, |
|
1554 ciKlass* klass) const { |
|
1555 if (tkls->offset() == in_bytes(Klass::modifier_flags_offset())) { |
|
1556 // The field is Klass::_modifier_flags. Return its (constant) value. |
|
1557 // (Folds up the 2nd indirection in aClassConstant.getModifiers().) |
|
1558 assert(this->Opcode() == Op_LoadI, "must load an int from _modifier_flags"); |
|
1559 return TypeInt::make(klass->modifier_flags()); |
|
1560 } |
|
1561 if (tkls->offset() == in_bytes(Klass::access_flags_offset())) { |
|
1562 // The field is Klass::_access_flags. Return its (constant) value. |
|
1563 // (Folds up the 2nd indirection in Reflection.getClassAccessFlags(aClassConstant).) |
|
1564 assert(this->Opcode() == Op_LoadI, "must load an int from _access_flags"); |
|
1565 return TypeInt::make(klass->access_flags()); |
|
1566 } |
|
1567 if (tkls->offset() == in_bytes(Klass::layout_helper_offset())) { |
|
1568 // The field is Klass::_layout_helper. Return its constant value if known. |
|
1569 assert(this->Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1570 return TypeInt::make(klass->layout_helper()); |
|
1571 } |
|
1572 |
|
1573 // No match. |
|
1574 return NULL; |
|
1575 } |
|
1576 |
|
1577 // Try to constant-fold a stable array element. |
|
1578 static const Type* fold_stable_ary_elem(const TypeAryPtr* ary, int off, BasicType loadbt) { |
|
1579 assert(ary->const_oop(), "array should be constant"); |
|
1580 assert(ary->is_stable(), "array should be stable"); |
|
1581 |
|
1582 // Decode the results of GraphKit::array_element_address. |
|
1583 ciArray* aobj = ary->const_oop()->as_array(); |
|
1584 ciConstant con = aobj->element_value_by_offset(off); |
|
1585 |
|
1586 if (con.basic_type() != T_ILLEGAL && !con.is_null_or_zero()) { |
|
1587 const Type* con_type = Type::make_from_constant(con); |
|
1588 if (con_type != NULL) { |
|
1589 if (con_type->isa_aryptr()) { |
|
1590 // Join with the array element type, in case it is also stable. |
|
1591 int dim = ary->stable_dimension(); |
|
1592 con_type = con_type->is_aryptr()->cast_to_stable(true, dim-1); |
|
1593 } |
|
1594 if (loadbt == T_NARROWOOP && con_type->isa_oopptr()) { |
|
1595 con_type = con_type->make_narrowoop(); |
|
1596 } |
|
1597 #ifndef PRODUCT |
|
1598 if (TraceIterativeGVN) { |
|
1599 tty->print("FoldStableValues: array element [off=%d]: con_type=", off); |
|
1600 con_type->dump(); tty->cr(); |
|
1601 } |
|
1602 #endif //PRODUCT |
|
1603 return con_type; |
|
1604 } |
|
1605 } |
|
1606 return NULL; |
|
1607 } |
|
1608 |
|
1609 //------------------------------Value----------------------------------------- |
|
1610 const Type *LoadNode::Value( PhaseTransform *phase ) const { |
|
1611 // Either input is TOP ==> the result is TOP |
|
1612 Node* mem = in(MemNode::Memory); |
|
1613 const Type *t1 = phase->type(mem); |
|
1614 if (t1 == Type::TOP) return Type::TOP; |
|
1615 Node* adr = in(MemNode::Address); |
|
1616 const TypePtr* tp = phase->type(adr)->isa_ptr(); |
|
1617 if (tp == NULL || tp->empty()) return Type::TOP; |
|
1618 int off = tp->offset(); |
|
1619 assert(off != Type::OffsetTop, "case covered by TypePtr::empty"); |
|
1620 Compile* C = phase->C; |
|
1621 |
|
1622 // Try to guess loaded type from pointer type |
|
1623 if (tp->isa_aryptr()) { |
|
1624 const TypeAryPtr* ary = tp->is_aryptr(); |
|
1625 const Type* t = ary->elem(); |
|
1626 |
|
1627 // Determine whether the reference is beyond the header or not, by comparing |
|
1628 // the offset against the offset of the start of the array's data. |
|
1629 // Different array types begin at slightly different offsets (12 vs. 16). |
|
1630 // We choose T_BYTE as an example base type that is least restrictive |
|
1631 // as to alignment, which will therefore produce the smallest |
|
1632 // possible base offset. |
|
1633 const int min_base_off = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
|
1634 const bool off_beyond_header = ((uint)off >= (uint)min_base_off); |
|
1635 |
|
1636 // Try to constant-fold a stable array element. |
|
1637 if (FoldStableValues && ary->is_stable() && ary->const_oop() != NULL) { |
|
1638 // Make sure the reference is not into the header and the offset is constant |
|
1639 if (off_beyond_header && adr->is_AddP() && off != Type::OffsetBot) { |
|
1640 const Type* con_type = fold_stable_ary_elem(ary, off, memory_type()); |
|
1641 if (con_type != NULL) { |
|
1642 return con_type; |
|
1643 } |
|
1644 } |
|
1645 } |
|
1646 |
|
1647 // Don't do this for integer types. There is only potential profit if |
|
1648 // the element type t is lower than _type; that is, for int types, if _type is |
|
1649 // more restrictive than t. This only happens here if one is short and the other |
|
1650 // char (both 16 bits), and in those cases we've made an intentional decision |
|
1651 // to use one kind of load over the other. See AndINode::Ideal and 4965907. |
|
1652 // Also, do not try to narrow the type for a LoadKlass, regardless of offset. |
|
1653 // |
|
1654 // Yes, it is possible to encounter an expression like (LoadKlass p1:(AddP x x 8)) |
|
1655 // where the _gvn.type of the AddP is wider than 8. This occurs when an earlier |
|
1656 // copy p0 of (AddP x x 8) has been proven equal to p1, and the p0 has been |
|
1657 // subsumed by p1. If p1 is on the worklist but has not yet been re-transformed, |
|
1658 // it is possible that p1 will have a type like Foo*[int+]:NotNull*+any. |
|
1659 // In fact, that could have been the original type of p1, and p1 could have |
|
1660 // had an original form like p1:(AddP x x (LShiftL quux 3)), where the |
|
1661 // expression (LShiftL quux 3) independently optimized to the constant 8. |
|
1662 if ((t->isa_int() == NULL) && (t->isa_long() == NULL) |
|
1663 && (_type->isa_vect() == NULL) |
|
1664 && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) { |
|
1665 // t might actually be lower than _type, if _type is a unique |
|
1666 // concrete subclass of abstract class t. |
|
1667 if (off_beyond_header) { // is the offset beyond the header? |
|
1668 const Type* jt = t->join_speculative(_type); |
|
1669 // In any case, do not allow the join, per se, to empty out the type. |
|
1670 if (jt->empty() && !t->empty()) { |
|
1671 // This can happen if a interface-typed array narrows to a class type. |
|
1672 jt = _type; |
|
1673 } |
|
1674 #ifdef ASSERT |
|
1675 if (phase->C->eliminate_boxing() && adr->is_AddP()) { |
|
1676 // The pointers in the autobox arrays are always non-null |
|
1677 Node* base = adr->in(AddPNode::Base); |
|
1678 if ((base != NULL) && base->is_DecodeN()) { |
|
1679 // Get LoadN node which loads IntegerCache.cache field |
|
1680 base = base->in(1); |
|
1681 } |
|
1682 if ((base != NULL) && base->is_Con()) { |
|
1683 const TypeAryPtr* base_type = base->bottom_type()->isa_aryptr(); |
|
1684 if ((base_type != NULL) && base_type->is_autobox_cache()) { |
|
1685 // It could be narrow oop |
|
1686 assert(jt->make_ptr()->ptr() == TypePtr::NotNull,"sanity"); |
|
1687 } |
|
1688 } |
|
1689 } |
|
1690 #endif |
|
1691 return jt; |
|
1692 } |
|
1693 } |
|
1694 } else if (tp->base() == Type::InstPtr) { |
|
1695 ciEnv* env = C->env(); |
|
1696 const TypeInstPtr* tinst = tp->is_instptr(); |
|
1697 ciKlass* klass = tinst->klass(); |
|
1698 assert( off != Type::OffsetBot || |
|
1699 // arrays can be cast to Objects |
|
1700 tp->is_oopptr()->klass()->is_java_lang_Object() || |
|
1701 // unsafe field access may not have a constant offset |
|
1702 C->has_unsafe_access(), |
|
1703 "Field accesses must be precise" ); |
|
1704 // For oop loads, we expect the _type to be precise |
|
1705 if (klass == env->String_klass() && |
|
1706 adr->is_AddP() && off != Type::OffsetBot) { |
|
1707 // For constant Strings treat the final fields as compile time constants. |
|
1708 Node* base = adr->in(AddPNode::Base); |
|
1709 const TypeOopPtr* t = phase->type(base)->isa_oopptr(); |
|
1710 if (t != NULL && t->singleton()) { |
|
1711 ciField* field = env->String_klass()->get_field_by_offset(off, false); |
|
1712 if (field != NULL && field->is_final()) { |
|
1713 ciObject* string = t->const_oop(); |
|
1714 ciConstant constant = string->as_instance()->field_value(field); |
|
1715 if (constant.basic_type() == T_INT) { |
|
1716 return TypeInt::make(constant.as_int()); |
|
1717 } else if (constant.basic_type() == T_ARRAY) { |
|
1718 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
1719 return TypeNarrowOop::make_from_constant(constant.as_object(), true); |
|
1720 } else { |
|
1721 return TypeOopPtr::make_from_constant(constant.as_object(), true); |
|
1722 } |
|
1723 } |
|
1724 } |
|
1725 } |
|
1726 } |
|
1727 // Optimizations for constant objects |
|
1728 ciObject* const_oop = tinst->const_oop(); |
|
1729 if (const_oop != NULL) { |
|
1730 // For constant Boxed value treat the target field as a compile time constant. |
|
1731 if (tinst->is_ptr_to_boxed_value()) { |
|
1732 return tinst->get_const_boxed_value(); |
|
1733 } else |
|
1734 // For constant CallSites treat the target field as a compile time constant. |
|
1735 if (const_oop->is_call_site()) { |
|
1736 ciCallSite* call_site = const_oop->as_call_site(); |
|
1737 ciField* field = call_site->klass()->as_instance_klass()->get_field_by_offset(off, /*is_static=*/ false); |
|
1738 if (field != NULL && field->is_call_site_target()) { |
|
1739 ciMethodHandle* target = call_site->get_target(); |
|
1740 if (target != NULL) { // just in case |
|
1741 ciConstant constant(T_OBJECT, target); |
|
1742 const Type* t; |
|
1743 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
1744 t = TypeNarrowOop::make_from_constant(constant.as_object(), true); |
|
1745 } else { |
|
1746 t = TypeOopPtr::make_from_constant(constant.as_object(), true); |
|
1747 } |
|
1748 // Add a dependence for invalidation of the optimization. |
|
1749 if (!call_site->is_constant_call_site()) { |
|
1750 C->dependencies()->assert_call_site_target_value(call_site, target); |
|
1751 } |
|
1752 return t; |
|
1753 } |
|
1754 } |
|
1755 } |
|
1756 } |
|
1757 } else if (tp->base() == Type::KlassPtr) { |
|
1758 assert( off != Type::OffsetBot || |
|
1759 // arrays can be cast to Objects |
|
1760 tp->is_klassptr()->klass()->is_java_lang_Object() || |
|
1761 // also allow array-loading from the primary supertype |
|
1762 // array during subtype checks |
|
1763 Opcode() == Op_LoadKlass, |
|
1764 "Field accesses must be precise" ); |
|
1765 // For klass/static loads, we expect the _type to be precise |
|
1766 } |
|
1767 |
|
1768 const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
1769 if (tkls != NULL && !StressReflectiveCode) { |
|
1770 ciKlass* klass = tkls->klass(); |
|
1771 if (klass->is_loaded() && tkls->klass_is_exact()) { |
|
1772 // We are loading a field from a Klass metaobject whose identity |
|
1773 // is known at compile time (the type is "exact" or "precise"). |
|
1774 // Check for fields we know are maintained as constants by the VM. |
|
1775 if (tkls->offset() == in_bytes(Klass::super_check_offset_offset())) { |
|
1776 // The field is Klass::_super_check_offset. Return its (constant) value. |
|
1777 // (Folds up type checking code.) |
|
1778 assert(Opcode() == Op_LoadI, "must load an int from _super_check_offset"); |
|
1779 return TypeInt::make(klass->super_check_offset()); |
|
1780 } |
|
1781 // Compute index into primary_supers array |
|
1782 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); |
|
1783 // Check for overflowing; use unsigned compare to handle the negative case. |
|
1784 if( depth < ciKlass::primary_super_limit() ) { |
|
1785 // The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1786 // (Folds up type checking code.) |
|
1787 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1788 ciKlass *ss = klass->super_of_depth(depth); |
|
1789 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1790 } |
|
1791 const Type* aift = load_array_final_field(tkls, klass); |
|
1792 if (aift != NULL) return aift; |
|
1793 if (tkls->offset() == in_bytes(ArrayKlass::component_mirror_offset()) |
|
1794 && klass->is_array_klass()) { |
|
1795 // The field is ArrayKlass::_component_mirror. Return its (constant) value. |
|
1796 // (Folds up aClassConstant.getComponentType, common in Arrays.copyOf.) |
|
1797 assert(Opcode() == Op_LoadP, "must load an oop from _component_mirror"); |
|
1798 return TypeInstPtr::make(klass->as_array_klass()->component_mirror()); |
|
1799 } |
|
1800 if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) { |
|
1801 // The field is Klass::_java_mirror. Return its (constant) value. |
|
1802 // (Folds up the 2nd indirection in anObjConstant.getClass().) |
|
1803 assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror"); |
|
1804 return TypeInstPtr::make(klass->java_mirror()); |
|
1805 } |
|
1806 } |
|
1807 |
|
1808 // We can still check if we are loading from the primary_supers array at a |
|
1809 // shallow enough depth. Even though the klass is not exact, entries less |
|
1810 // than or equal to its super depth are correct. |
|
1811 if (klass->is_loaded() ) { |
|
1812 ciType *inner = klass; |
|
1813 while( inner->is_obj_array_klass() ) |
|
1814 inner = inner->as_obj_array_klass()->base_element_type(); |
|
1815 if( inner->is_instance_klass() && |
|
1816 !inner->as_instance_klass()->flags().is_interface() ) { |
|
1817 // Compute index into primary_supers array |
|
1818 juint depth = (tkls->offset() - in_bytes(Klass::primary_supers_offset())) / sizeof(Klass*); |
|
1819 // Check for overflowing; use unsigned compare to handle the negative case. |
|
1820 if( depth < ciKlass::primary_super_limit() && |
|
1821 depth <= klass->super_depth() ) { // allow self-depth checks to handle self-check case |
|
1822 // The field is an element of Klass::_primary_supers. Return its (constant) value. |
|
1823 // (Folds up type checking code.) |
|
1824 assert(Opcode() == Op_LoadKlass, "must load a klass from _primary_supers"); |
|
1825 ciKlass *ss = klass->super_of_depth(depth); |
|
1826 return ss ? TypeKlassPtr::make(ss) : TypePtr::NULL_PTR; |
|
1827 } |
|
1828 } |
|
1829 } |
|
1830 |
|
1831 // If the type is enough to determine that the thing is not an array, |
|
1832 // we can give the layout_helper a positive interval type. |
|
1833 // This will help short-circuit some reflective code. |
|
1834 if (tkls->offset() == in_bytes(Klass::layout_helper_offset()) |
|
1835 && !klass->is_array_klass() // not directly typed as an array |
|
1836 && !klass->is_interface() // specifically not Serializable & Cloneable |
|
1837 && !klass->is_java_lang_Object() // not the supertype of all T[] |
|
1838 ) { |
|
1839 // Note: When interfaces are reliable, we can narrow the interface |
|
1840 // test to (klass != Serializable && klass != Cloneable). |
|
1841 assert(Opcode() == Op_LoadI, "must load an int from _layout_helper"); |
|
1842 jint min_size = Klass::instance_layout_helper(oopDesc::header_size(), false); |
|
1843 // The key property of this type is that it folds up tests |
|
1844 // for array-ness, since it proves that the layout_helper is positive. |
|
1845 // Thus, a generic value like the basic object layout helper works fine. |
|
1846 return TypeInt::make(min_size, max_jint, Type::WidenMin); |
|
1847 } |
|
1848 } |
|
1849 |
|
1850 // If we are loading from a freshly-allocated object, produce a zero, |
|
1851 // if the load is provably beyond the header of the object. |
|
1852 // (Also allow a variable load from a fresh array to produce zero.) |
|
1853 const TypeOopPtr *tinst = tp->isa_oopptr(); |
|
1854 bool is_instance = (tinst != NULL) && tinst->is_known_instance_field(); |
|
1855 bool is_boxed_value = (tinst != NULL) && tinst->is_ptr_to_boxed_value(); |
|
1856 if (ReduceFieldZeroing || is_instance || is_boxed_value) { |
|
1857 Node* value = can_see_stored_value(mem,phase); |
|
1858 if (value != NULL && value->is_Con()) { |
|
1859 assert(value->bottom_type()->higher_equal(_type),"sanity"); |
|
1860 return value->bottom_type(); |
|
1861 } |
|
1862 } |
|
1863 |
|
1864 if (is_instance) { |
|
1865 // If we have an instance type and our memory input is the |
|
1866 // programs's initial memory state, there is no matching store, |
|
1867 // so just return a zero of the appropriate type |
|
1868 Node *mem = in(MemNode::Memory); |
|
1869 if (mem->is_Parm() && mem->in(0)->is_Start()) { |
|
1870 assert(mem->as_Parm()->_con == TypeFunc::Memory, "must be memory Parm"); |
|
1871 return Type::get_zero_type(_type->basic_type()); |
|
1872 } |
|
1873 } |
|
1874 return _type; |
|
1875 } |
|
1876 |
|
1877 //------------------------------match_edge------------------------------------- |
|
1878 // Do we Match on this edge index or not? Match only the address. |
|
1879 uint LoadNode::match_edge(uint idx) const { |
|
1880 return idx == MemNode::Address; |
|
1881 } |
|
1882 |
|
1883 //--------------------------LoadBNode::Ideal-------------------------------------- |
|
1884 // |
|
1885 // If the previous store is to the same address as this load, |
|
1886 // and the value stored was larger than a byte, replace this load |
|
1887 // with the value stored truncated to a byte. If no truncation is |
|
1888 // needed, the replacement is done in LoadNode::Identity(). |
|
1889 // |
|
1890 Node *LoadBNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1891 Node* mem = in(MemNode::Memory); |
|
1892 Node* value = can_see_stored_value(mem,phase); |
|
1893 if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
1894 Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(24)) ); |
|
1895 return new (phase->C) RShiftINode(result, phase->intcon(24)); |
|
1896 } |
|
1897 // Identity call will handle the case where truncation is not needed. |
|
1898 return LoadNode::Ideal(phase, can_reshape); |
|
1899 } |
|
1900 |
|
1901 const Type* LoadBNode::Value(PhaseTransform *phase) const { |
|
1902 Node* mem = in(MemNode::Memory); |
|
1903 Node* value = can_see_stored_value(mem,phase); |
|
1904 if (value != NULL && value->is_Con() && |
|
1905 !value->bottom_type()->higher_equal(_type)) { |
|
1906 // If the input to the store does not fit with the load's result type, |
|
1907 // it must be truncated. We can't delay until Ideal call since |
|
1908 // a singleton Value is needed for split_thru_phi optimization. |
|
1909 int con = value->get_int(); |
|
1910 return TypeInt::make((con << 24) >> 24); |
|
1911 } |
|
1912 return LoadNode::Value(phase); |
|
1913 } |
|
1914 |
|
1915 //--------------------------LoadUBNode::Ideal------------------------------------- |
|
1916 // |
|
1917 // If the previous store is to the same address as this load, |
|
1918 // and the value stored was larger than a byte, replace this load |
|
1919 // with the value stored truncated to a byte. If no truncation is |
|
1920 // needed, the replacement is done in LoadNode::Identity(). |
|
1921 // |
|
1922 Node* LoadUBNode::Ideal(PhaseGVN* phase, bool can_reshape) { |
|
1923 Node* mem = in(MemNode::Memory); |
|
1924 Node* value = can_see_stored_value(mem, phase); |
|
1925 if (value && !phase->type(value)->higher_equal(_type)) |
|
1926 return new (phase->C) AndINode(value, phase->intcon(0xFF)); |
|
1927 // Identity call will handle the case where truncation is not needed. |
|
1928 return LoadNode::Ideal(phase, can_reshape); |
|
1929 } |
|
1930 |
|
1931 const Type* LoadUBNode::Value(PhaseTransform *phase) const { |
|
1932 Node* mem = in(MemNode::Memory); |
|
1933 Node* value = can_see_stored_value(mem,phase); |
|
1934 if (value != NULL && value->is_Con() && |
|
1935 !value->bottom_type()->higher_equal(_type)) { |
|
1936 // If the input to the store does not fit with the load's result type, |
|
1937 // it must be truncated. We can't delay until Ideal call since |
|
1938 // a singleton Value is needed for split_thru_phi optimization. |
|
1939 int con = value->get_int(); |
|
1940 return TypeInt::make(con & 0xFF); |
|
1941 } |
|
1942 return LoadNode::Value(phase); |
|
1943 } |
|
1944 |
|
1945 //--------------------------LoadUSNode::Ideal------------------------------------- |
|
1946 // |
|
1947 // If the previous store is to the same address as this load, |
|
1948 // and the value stored was larger than a char, replace this load |
|
1949 // with the value stored truncated to a char. If no truncation is |
|
1950 // needed, the replacement is done in LoadNode::Identity(). |
|
1951 // |
|
1952 Node *LoadUSNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1953 Node* mem = in(MemNode::Memory); |
|
1954 Node* value = can_see_stored_value(mem,phase); |
|
1955 if( value && !phase->type(value)->higher_equal( _type ) ) |
|
1956 return new (phase->C) AndINode(value,phase->intcon(0xFFFF)); |
|
1957 // Identity call will handle the case where truncation is not needed. |
|
1958 return LoadNode::Ideal(phase, can_reshape); |
|
1959 } |
|
1960 |
|
1961 const Type* LoadUSNode::Value(PhaseTransform *phase) const { |
|
1962 Node* mem = in(MemNode::Memory); |
|
1963 Node* value = can_see_stored_value(mem,phase); |
|
1964 if (value != NULL && value->is_Con() && |
|
1965 !value->bottom_type()->higher_equal(_type)) { |
|
1966 // If the input to the store does not fit with the load's result type, |
|
1967 // it must be truncated. We can't delay until Ideal call since |
|
1968 // a singleton Value is needed for split_thru_phi optimization. |
|
1969 int con = value->get_int(); |
|
1970 return TypeInt::make(con & 0xFFFF); |
|
1971 } |
|
1972 return LoadNode::Value(phase); |
|
1973 } |
|
1974 |
|
1975 //--------------------------LoadSNode::Ideal-------------------------------------- |
|
1976 // |
|
1977 // If the previous store is to the same address as this load, |
|
1978 // and the value stored was larger than a short, replace this load |
|
1979 // with the value stored truncated to a short. If no truncation is |
|
1980 // needed, the replacement is done in LoadNode::Identity(). |
|
1981 // |
|
1982 Node *LoadSNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
1983 Node* mem = in(MemNode::Memory); |
|
1984 Node* value = can_see_stored_value(mem,phase); |
|
1985 if( value && !phase->type(value)->higher_equal( _type ) ) { |
|
1986 Node *result = phase->transform( new (phase->C) LShiftINode(value, phase->intcon(16)) ); |
|
1987 return new (phase->C) RShiftINode(result, phase->intcon(16)); |
|
1988 } |
|
1989 // Identity call will handle the case where truncation is not needed. |
|
1990 return LoadNode::Ideal(phase, can_reshape); |
|
1991 } |
|
1992 |
|
1993 const Type* LoadSNode::Value(PhaseTransform *phase) const { |
|
1994 Node* mem = in(MemNode::Memory); |
|
1995 Node* value = can_see_stored_value(mem,phase); |
|
1996 if (value != NULL && value->is_Con() && |
|
1997 !value->bottom_type()->higher_equal(_type)) { |
|
1998 // If the input to the store does not fit with the load's result type, |
|
1999 // it must be truncated. We can't delay until Ideal call since |
|
2000 // a singleton Value is needed for split_thru_phi optimization. |
|
2001 int con = value->get_int(); |
|
2002 return TypeInt::make((con << 16) >> 16); |
|
2003 } |
|
2004 return LoadNode::Value(phase); |
|
2005 } |
|
2006 |
|
2007 //============================================================================= |
|
2008 //----------------------------LoadKlassNode::make------------------------------ |
|
2009 // Polymorphic factory method: |
|
2010 Node *LoadKlassNode::make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at, const TypeKlassPtr *tk ) { |
|
2011 Compile* C = gvn.C; |
|
2012 Node *ctl = NULL; |
|
2013 // sanity check the alias category against the created node type |
|
2014 const TypePtr *adr_type = adr->bottom_type()->isa_ptr(); |
|
2015 assert(adr_type != NULL, "expecting TypeKlassPtr"); |
|
2016 #ifdef _LP64 |
|
2017 if (adr_type->is_ptr_to_narrowklass()) { |
|
2018 assert(UseCompressedClassPointers, "no compressed klasses"); |
|
2019 Node* load_klass = gvn.transform(new (C) LoadNKlassNode(ctl, mem, adr, at, tk->make_narrowklass(), MemNode::unordered)); |
|
2020 return new (C) DecodeNKlassNode(load_klass, load_klass->bottom_type()->make_ptr()); |
|
2021 } |
|
2022 #endif |
|
2023 assert(!adr_type->is_ptr_to_narrowklass() && !adr_type->is_ptr_to_narrowoop(), "should have got back a narrow oop"); |
|
2024 return new (C) LoadKlassNode(ctl, mem, adr, at, tk, MemNode::unordered); |
|
2025 } |
|
2026 |
|
2027 //------------------------------Value------------------------------------------ |
|
2028 const Type *LoadKlassNode::Value( PhaseTransform *phase ) const { |
|
2029 return klass_value_common(phase); |
|
2030 } |
|
2031 |
|
2032 const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const { |
|
2033 // Either input is TOP ==> the result is TOP |
|
2034 const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2035 if (t1 == Type::TOP) return Type::TOP; |
|
2036 Node *adr = in(MemNode::Address); |
|
2037 const Type *t2 = phase->type( adr ); |
|
2038 if (t2 == Type::TOP) return Type::TOP; |
|
2039 const TypePtr *tp = t2->is_ptr(); |
|
2040 if (TypePtr::above_centerline(tp->ptr()) || |
|
2041 tp->ptr() == TypePtr::Null) return Type::TOP; |
|
2042 |
|
2043 // Return a more precise klass, if possible |
|
2044 const TypeInstPtr *tinst = tp->isa_instptr(); |
|
2045 if (tinst != NULL) { |
|
2046 ciInstanceKlass* ik = tinst->klass()->as_instance_klass(); |
|
2047 int offset = tinst->offset(); |
|
2048 if (ik == phase->C->env()->Class_klass() |
|
2049 && (offset == java_lang_Class::klass_offset_in_bytes() || |
|
2050 offset == java_lang_Class::array_klass_offset_in_bytes())) { |
|
2051 // We are loading a special hidden field from a Class mirror object, |
|
2052 // the field which points to the VM's Klass metaobject. |
|
2053 ciType* t = tinst->java_mirror_type(); |
|
2054 // java_mirror_type returns non-null for compile-time Class constants. |
|
2055 if (t != NULL) { |
|
2056 // constant oop => constant klass |
|
2057 if (offset == java_lang_Class::array_klass_offset_in_bytes()) { |
|
2058 if (t->is_void()) { |
|
2059 // We cannot create a void array. Since void is a primitive type return null |
|
2060 // klass. Users of this result need to do a null check on the returned klass. |
|
2061 return TypePtr::NULL_PTR; |
|
2062 } |
|
2063 return TypeKlassPtr::make(ciArrayKlass::make(t)); |
|
2064 } |
|
2065 if (!t->is_klass()) { |
|
2066 // a primitive Class (e.g., int.class) has NULL for a klass field |
|
2067 return TypePtr::NULL_PTR; |
|
2068 } |
|
2069 // (Folds up the 1st indirection in aClassConstant.getModifiers().) |
|
2070 return TypeKlassPtr::make(t->as_klass()); |
|
2071 } |
|
2072 // non-constant mirror, so we can't tell what's going on |
|
2073 } |
|
2074 if( !ik->is_loaded() ) |
|
2075 return _type; // Bail out if not loaded |
|
2076 if (offset == oopDesc::klass_offset_in_bytes()) { |
|
2077 if (tinst->klass_is_exact()) { |
|
2078 return TypeKlassPtr::make(ik); |
|
2079 } |
|
2080 // See if we can become precise: no subklasses and no interface |
|
2081 // (Note: We need to support verified interfaces.) |
|
2082 if (!ik->is_interface() && !ik->has_subklass()) { |
|
2083 //assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2084 // Add a dependence; if any subclass added we need to recompile |
|
2085 if (!ik->is_final()) { |
|
2086 // %%% should use stronger assert_unique_concrete_subtype instead |
|
2087 phase->C->dependencies()->assert_leaf_type(ik); |
|
2088 } |
|
2089 // Return precise klass |
|
2090 return TypeKlassPtr::make(ik); |
|
2091 } |
|
2092 |
|
2093 // Return root of possible klass |
|
2094 return TypeKlassPtr::make(TypePtr::NotNull, ik, 0/*offset*/); |
|
2095 } |
|
2096 } |
|
2097 |
|
2098 // Check for loading klass from an array |
|
2099 const TypeAryPtr *tary = tp->isa_aryptr(); |
|
2100 if( tary != NULL ) { |
|
2101 ciKlass *tary_klass = tary->klass(); |
|
2102 if (tary_klass != NULL // can be NULL when at BOTTOM or TOP |
|
2103 && tary->offset() == oopDesc::klass_offset_in_bytes()) { |
|
2104 if (tary->klass_is_exact()) { |
|
2105 return TypeKlassPtr::make(tary_klass); |
|
2106 } |
|
2107 ciArrayKlass *ak = tary->klass()->as_array_klass(); |
|
2108 // If the klass is an object array, we defer the question to the |
|
2109 // array component klass. |
|
2110 if( ak->is_obj_array_klass() ) { |
|
2111 assert( ak->is_loaded(), "" ); |
|
2112 ciKlass *base_k = ak->as_obj_array_klass()->base_element_klass(); |
|
2113 if( base_k->is_loaded() && base_k->is_instance_klass() ) { |
|
2114 ciInstanceKlass* ik = base_k->as_instance_klass(); |
|
2115 // See if we can become precise: no subklasses and no interface |
|
2116 if (!ik->is_interface() && !ik->has_subklass()) { |
|
2117 //assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2118 // Add a dependence; if any subclass added we need to recompile |
|
2119 if (!ik->is_final()) { |
|
2120 phase->C->dependencies()->assert_leaf_type(ik); |
|
2121 } |
|
2122 // Return precise array klass |
|
2123 return TypeKlassPtr::make(ak); |
|
2124 } |
|
2125 } |
|
2126 return TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); |
|
2127 } else { // Found a type-array? |
|
2128 //assert(!UseExactTypes, "this code should be useless with exact types"); |
|
2129 assert( ak->is_type_array_klass(), "" ); |
|
2130 return TypeKlassPtr::make(ak); // These are always precise |
|
2131 } |
|
2132 } |
|
2133 } |
|
2134 |
|
2135 // Check for loading klass from an array klass |
|
2136 const TypeKlassPtr *tkls = tp->isa_klassptr(); |
|
2137 if (tkls != NULL && !StressReflectiveCode) { |
|
2138 ciKlass* klass = tkls->klass(); |
|
2139 if( !klass->is_loaded() ) |
|
2140 return _type; // Bail out if not loaded |
|
2141 if( klass->is_obj_array_klass() && |
|
2142 tkls->offset() == in_bytes(ObjArrayKlass::element_klass_offset())) { |
|
2143 ciKlass* elem = klass->as_obj_array_klass()->element_klass(); |
|
2144 // // Always returning precise element type is incorrect, |
|
2145 // // e.g., element type could be object and array may contain strings |
|
2146 // return TypeKlassPtr::make(TypePtr::Constant, elem, 0); |
|
2147 |
|
2148 // The array's TypeKlassPtr was declared 'precise' or 'not precise' |
|
2149 // according to the element type's subclassing. |
|
2150 return TypeKlassPtr::make(tkls->ptr(), elem, 0/*offset*/); |
|
2151 } |
|
2152 if( klass->is_instance_klass() && tkls->klass_is_exact() && |
|
2153 tkls->offset() == in_bytes(Klass::super_offset())) { |
|
2154 ciKlass* sup = klass->as_instance_klass()->super(); |
|
2155 // The field is Klass::_super. Return its (constant) value. |
|
2156 // (Folds up the 2nd indirection in aClassConstant.getSuperClass().) |
|
2157 return sup ? TypeKlassPtr::make(sup) : TypePtr::NULL_PTR; |
|
2158 } |
|
2159 } |
|
2160 |
|
2161 // Bailout case |
|
2162 return LoadNode::Value(phase); |
|
2163 } |
|
2164 |
|
2165 //------------------------------Identity--------------------------------------- |
|
2166 // To clean up reflective code, simplify k.java_mirror.as_klass to plain k. |
|
2167 // Also feed through the klass in Allocate(...klass...)._klass. |
|
2168 Node* LoadKlassNode::Identity( PhaseTransform *phase ) { |
|
2169 return klass_identity_common(phase); |
|
2170 } |
|
2171 |
|
2172 Node* LoadNode::klass_identity_common(PhaseTransform *phase ) { |
|
2173 Node* x = LoadNode::Identity(phase); |
|
2174 if (x != this) return x; |
|
2175 |
|
2176 // Take apart the address into an oop and and offset. |
|
2177 // Return 'this' if we cannot. |
|
2178 Node* adr = in(MemNode::Address); |
|
2179 intptr_t offset = 0; |
|
2180 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
2181 if (base == NULL) return this; |
|
2182 const TypeOopPtr* toop = phase->type(adr)->isa_oopptr(); |
|
2183 if (toop == NULL) return this; |
|
2184 |
|
2185 // We can fetch the klass directly through an AllocateNode. |
|
2186 // This works even if the klass is not constant (clone or newArray). |
|
2187 if (offset == oopDesc::klass_offset_in_bytes()) { |
|
2188 Node* allocated_klass = AllocateNode::Ideal_klass(base, phase); |
|
2189 if (allocated_klass != NULL) { |
|
2190 return allocated_klass; |
|
2191 } |
|
2192 } |
|
2193 |
|
2194 // Simplify k.java_mirror.as_klass to plain k, where k is a Klass*. |
|
2195 // Simplify ak.component_mirror.array_klass to plain ak, ak an ArrayKlass. |
|
2196 // See inline_native_Class_query for occurrences of these patterns. |
|
2197 // Java Example: x.getClass().isAssignableFrom(y) |
|
2198 // Java Example: Array.newInstance(x.getClass().getComponentType(), n) |
|
2199 // |
|
2200 // This improves reflective code, often making the Class |
|
2201 // mirror go completely dead. (Current exception: Class |
|
2202 // mirrors may appear in debug info, but we could clean them out by |
|
2203 // introducing a new debug info operator for Klass*.java_mirror). |
|
2204 if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass() |
|
2205 && (offset == java_lang_Class::klass_offset_in_bytes() || |
|
2206 offset == java_lang_Class::array_klass_offset_in_bytes())) { |
|
2207 // We are loading a special hidden field from a Class mirror, |
|
2208 // the field which points to its Klass or ArrayKlass metaobject. |
|
2209 if (base->is_Load()) { |
|
2210 Node* adr2 = base->in(MemNode::Address); |
|
2211 const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr(); |
|
2212 if (tkls != NULL && !tkls->empty() |
|
2213 && (tkls->klass()->is_instance_klass() || |
|
2214 tkls->klass()->is_array_klass()) |
|
2215 && adr2->is_AddP() |
|
2216 ) { |
|
2217 int mirror_field = in_bytes(Klass::java_mirror_offset()); |
|
2218 if (offset == java_lang_Class::array_klass_offset_in_bytes()) { |
|
2219 mirror_field = in_bytes(ArrayKlass::component_mirror_offset()); |
|
2220 } |
|
2221 if (tkls->offset() == mirror_field) { |
|
2222 return adr2->in(AddPNode::Base); |
|
2223 } |
|
2224 } |
|
2225 } |
|
2226 } |
|
2227 |
|
2228 return this; |
|
2229 } |
|
2230 |
|
2231 |
|
2232 //------------------------------Value------------------------------------------ |
|
2233 const Type *LoadNKlassNode::Value( PhaseTransform *phase ) const { |
|
2234 const Type *t = klass_value_common(phase); |
|
2235 if (t == Type::TOP) |
|
2236 return t; |
|
2237 |
|
2238 return t->make_narrowklass(); |
|
2239 } |
|
2240 |
|
2241 //------------------------------Identity--------------------------------------- |
|
2242 // To clean up reflective code, simplify k.java_mirror.as_klass to narrow k. |
|
2243 // Also feed through the klass in Allocate(...klass...)._klass. |
|
2244 Node* LoadNKlassNode::Identity( PhaseTransform *phase ) { |
|
2245 Node *x = klass_identity_common(phase); |
|
2246 |
|
2247 const Type *t = phase->type( x ); |
|
2248 if( t == Type::TOP ) return x; |
|
2249 if( t->isa_narrowklass()) return x; |
|
2250 assert (!t->isa_narrowoop(), "no narrow oop here"); |
|
2251 |
|
2252 return phase->transform(new (phase->C) EncodePKlassNode(x, t->make_narrowklass())); |
|
2253 } |
|
2254 |
|
2255 //------------------------------Value----------------------------------------- |
|
2256 const Type *LoadRangeNode::Value( PhaseTransform *phase ) const { |
|
2257 // Either input is TOP ==> the result is TOP |
|
2258 const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2259 if( t1 == Type::TOP ) return Type::TOP; |
|
2260 Node *adr = in(MemNode::Address); |
|
2261 const Type *t2 = phase->type( adr ); |
|
2262 if( t2 == Type::TOP ) return Type::TOP; |
|
2263 const TypePtr *tp = t2->is_ptr(); |
|
2264 if (TypePtr::above_centerline(tp->ptr())) return Type::TOP; |
|
2265 const TypeAryPtr *tap = tp->isa_aryptr(); |
|
2266 if( !tap ) return _type; |
|
2267 return tap->size(); |
|
2268 } |
|
2269 |
|
2270 //-------------------------------Ideal--------------------------------------- |
|
2271 // Feed through the length in AllocateArray(...length...)._length. |
|
2272 Node *LoadRangeNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2273 Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
2274 if (p) return (p == NodeSentinel) ? NULL : p; |
|
2275 |
|
2276 // Take apart the address into an oop and and offset. |
|
2277 // Return 'this' if we cannot. |
|
2278 Node* adr = in(MemNode::Address); |
|
2279 intptr_t offset = 0; |
|
2280 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
2281 if (base == NULL) return NULL; |
|
2282 const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); |
|
2283 if (tary == NULL) return NULL; |
|
2284 |
|
2285 // We can fetch the length directly through an AllocateArrayNode. |
|
2286 // This works even if the length is not constant (clone or newArray). |
|
2287 if (offset == arrayOopDesc::length_offset_in_bytes()) { |
|
2288 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); |
|
2289 if (alloc != NULL) { |
|
2290 Node* allocated_length = alloc->Ideal_length(); |
|
2291 Node* len = alloc->make_ideal_length(tary, phase); |
|
2292 if (allocated_length != len) { |
|
2293 // New CastII improves on this. |
|
2294 return len; |
|
2295 } |
|
2296 } |
|
2297 } |
|
2298 |
|
2299 return NULL; |
|
2300 } |
|
2301 |
|
2302 //------------------------------Identity--------------------------------------- |
|
2303 // Feed through the length in AllocateArray(...length...)._length. |
|
2304 Node* LoadRangeNode::Identity( PhaseTransform *phase ) { |
|
2305 Node* x = LoadINode::Identity(phase); |
|
2306 if (x != this) return x; |
|
2307 |
|
2308 // Take apart the address into an oop and and offset. |
|
2309 // Return 'this' if we cannot. |
|
2310 Node* adr = in(MemNode::Address); |
|
2311 intptr_t offset = 0; |
|
2312 Node* base = AddPNode::Ideal_base_and_offset(adr, phase, offset); |
|
2313 if (base == NULL) return this; |
|
2314 const TypeAryPtr* tary = phase->type(adr)->isa_aryptr(); |
|
2315 if (tary == NULL) return this; |
|
2316 |
|
2317 // We can fetch the length directly through an AllocateArrayNode. |
|
2318 // This works even if the length is not constant (clone or newArray). |
|
2319 if (offset == arrayOopDesc::length_offset_in_bytes()) { |
|
2320 AllocateArrayNode* alloc = AllocateArrayNode::Ideal_array_allocation(base, phase); |
|
2321 if (alloc != NULL) { |
|
2322 Node* allocated_length = alloc->Ideal_length(); |
|
2323 // Do not allow make_ideal_length to allocate a CastII node. |
|
2324 Node* len = alloc->make_ideal_length(tary, phase, false); |
|
2325 if (allocated_length == len) { |
|
2326 // Return allocated_length only if it would not be improved by a CastII. |
|
2327 return allocated_length; |
|
2328 } |
|
2329 } |
|
2330 } |
|
2331 |
|
2332 return this; |
|
2333 |
|
2334 } |
|
2335 |
|
2336 //============================================================================= |
|
2337 //---------------------------StoreNode::make----------------------------------- |
|
2338 // Polymorphic factory method: |
|
2339 StoreNode* StoreNode::make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt, MemOrd mo) { |
|
2340 assert((mo == unordered || mo == release), "unexpected"); |
|
2341 Compile* C = gvn.C; |
|
2342 assert(C->get_alias_index(adr_type) != Compile::AliasIdxRaw || |
|
2343 ctl != NULL, "raw memory operations should have control edge"); |
|
2344 |
|
2345 switch (bt) { |
|
2346 case T_BOOLEAN: |
|
2347 case T_BYTE: return new (C) StoreBNode(ctl, mem, adr, adr_type, val, mo); |
|
2348 case T_INT: return new (C) StoreINode(ctl, mem, adr, adr_type, val, mo); |
|
2349 case T_CHAR: |
|
2350 case T_SHORT: return new (C) StoreCNode(ctl, mem, adr, adr_type, val, mo); |
|
2351 case T_LONG: return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo); |
|
2352 case T_FLOAT: return new (C) StoreFNode(ctl, mem, adr, adr_type, val, mo); |
|
2353 case T_DOUBLE: return new (C) StoreDNode(ctl, mem, adr, adr_type, val, mo); |
|
2354 case T_METADATA: |
|
2355 case T_ADDRESS: |
|
2356 case T_OBJECT: |
|
2357 #ifdef _LP64 |
|
2358 if (adr->bottom_type()->is_ptr_to_narrowoop()) { |
|
2359 val = gvn.transform(new (C) EncodePNode(val, val->bottom_type()->make_narrowoop())); |
|
2360 return new (C) StoreNNode(ctl, mem, adr, adr_type, val, mo); |
|
2361 } else if (adr->bottom_type()->is_ptr_to_narrowklass() || |
|
2362 (UseCompressedClassPointers && val->bottom_type()->isa_klassptr() && |
|
2363 adr->bottom_type()->isa_rawptr())) { |
|
2364 val = gvn.transform(new (C) EncodePKlassNode(val, val->bottom_type()->make_narrowklass())); |
|
2365 return new (C) StoreNKlassNode(ctl, mem, adr, adr_type, val, mo); |
|
2366 } |
|
2367 #endif |
|
2368 { |
|
2369 return new (C) StorePNode(ctl, mem, adr, adr_type, val, mo); |
|
2370 } |
|
2371 } |
|
2372 ShouldNotReachHere(); |
|
2373 return (StoreNode*)NULL; |
|
2374 } |
|
2375 |
|
2376 StoreLNode* StoreLNode::make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo) { |
|
2377 bool require_atomic = true; |
|
2378 return new (C) StoreLNode(ctl, mem, adr, adr_type, val, mo, require_atomic); |
|
2379 } |
|
2380 |
|
2381 |
|
2382 //--------------------------bottom_type---------------------------------------- |
|
2383 const Type *StoreNode::bottom_type() const { |
|
2384 return Type::MEMORY; |
|
2385 } |
|
2386 |
|
2387 //------------------------------hash------------------------------------------- |
|
2388 uint StoreNode::hash() const { |
|
2389 // unroll addition of interesting fields |
|
2390 //return (uintptr_t)in(Control) + (uintptr_t)in(Memory) + (uintptr_t)in(Address) + (uintptr_t)in(ValueIn); |
|
2391 |
|
2392 // Since they are not commoned, do not hash them: |
|
2393 return NO_HASH; |
|
2394 } |
|
2395 |
|
2396 //------------------------------Ideal------------------------------------------ |
|
2397 // Change back-to-back Store(, p, x) -> Store(m, p, y) to Store(m, p, x). |
|
2398 // When a store immediately follows a relevant allocation/initialization, |
|
2399 // try to capture it into the initialization, or hoist it above. |
|
2400 Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2401 Node* p = MemNode::Ideal_common(phase, can_reshape); |
|
2402 if (p) return (p == NodeSentinel) ? NULL : p; |
|
2403 |
|
2404 Node* mem = in(MemNode::Memory); |
|
2405 Node* address = in(MemNode::Address); |
|
2406 |
|
2407 // Back-to-back stores to same address? Fold em up. Generally |
|
2408 // unsafe if I have intervening uses... Also disallowed for StoreCM |
|
2409 // since they must follow each StoreP operation. Redundant StoreCMs |
|
2410 // are eliminated just before matching in final_graph_reshape. |
|
2411 if (mem->is_Store() && mem->in(MemNode::Address)->eqv_uncast(address) && |
|
2412 mem->Opcode() != Op_StoreCM) { |
|
2413 // Looking at a dead closed cycle of memory? |
|
2414 assert(mem != mem->in(MemNode::Memory), "dead loop in StoreNode::Ideal"); |
|
2415 |
|
2416 assert(Opcode() == mem->Opcode() || |
|
2417 phase->C->get_alias_index(adr_type()) == Compile::AliasIdxRaw, |
|
2418 "no mismatched stores, except on raw memory"); |
|
2419 |
|
2420 if (mem->outcnt() == 1 && // check for intervening uses |
|
2421 mem->as_Store()->memory_size() <= this->memory_size()) { |
|
2422 // If anybody other than 'this' uses 'mem', we cannot fold 'mem' away. |
|
2423 // For example, 'mem' might be the final state at a conditional return. |
|
2424 // Or, 'mem' might be used by some node which is live at the same time |
|
2425 // 'this' is live, which might be unschedulable. So, require exactly |
|
2426 // ONE user, the 'this' store, until such time as we clone 'mem' for |
|
2427 // each of 'mem's uses (thus making the exactly-1-user-rule hold true). |
|
2428 if (can_reshape) { // (%%% is this an anachronism?) |
|
2429 set_req_X(MemNode::Memory, mem->in(MemNode::Memory), |
|
2430 phase->is_IterGVN()); |
|
2431 } else { |
|
2432 // It's OK to do this in the parser, since DU info is always accurate, |
|
2433 // and the parser always refers to nodes via SafePointNode maps. |
|
2434 set_req(MemNode::Memory, mem->in(MemNode::Memory)); |
|
2435 } |
|
2436 return this; |
|
2437 } |
|
2438 } |
|
2439 |
|
2440 // Capture an unaliased, unconditional, simple store into an initializer. |
|
2441 // Or, if it is independent of the allocation, hoist it above the allocation. |
|
2442 if (ReduceFieldZeroing && /*can_reshape &&*/ |
|
2443 mem->is_Proj() && mem->in(0)->is_Initialize()) { |
|
2444 InitializeNode* init = mem->in(0)->as_Initialize(); |
|
2445 intptr_t offset = init->can_capture_store(this, phase, can_reshape); |
|
2446 if (offset > 0) { |
|
2447 Node* moved = init->capture_store(this, offset, phase, can_reshape); |
|
2448 // If the InitializeNode captured me, it made a raw copy of me, |
|
2449 // and I need to disappear. |
|
2450 if (moved != NULL) { |
|
2451 // %%% hack to ensure that Ideal returns a new node: |
|
2452 mem = MergeMemNode::make(phase->C, mem); |
|
2453 return mem; // fold me away |
|
2454 } |
|
2455 } |
|
2456 } |
|
2457 |
|
2458 return NULL; // No further progress |
|
2459 } |
|
2460 |
|
2461 //------------------------------Value----------------------------------------- |
|
2462 const Type *StoreNode::Value( PhaseTransform *phase ) const { |
|
2463 // Either input is TOP ==> the result is TOP |
|
2464 const Type *t1 = phase->type( in(MemNode::Memory) ); |
|
2465 if( t1 == Type::TOP ) return Type::TOP; |
|
2466 const Type *t2 = phase->type( in(MemNode::Address) ); |
|
2467 if( t2 == Type::TOP ) return Type::TOP; |
|
2468 const Type *t3 = phase->type( in(MemNode::ValueIn) ); |
|
2469 if( t3 == Type::TOP ) return Type::TOP; |
|
2470 return Type::MEMORY; |
|
2471 } |
|
2472 |
|
2473 //------------------------------Identity--------------------------------------- |
|
2474 // Remove redundant stores: |
|
2475 // Store(m, p, Load(m, p)) changes to m. |
|
2476 // Store(, p, x) -> Store(m, p, x) changes to Store(m, p, x). |
|
2477 Node *StoreNode::Identity( PhaseTransform *phase ) { |
|
2478 Node* mem = in(MemNode::Memory); |
|
2479 Node* adr = in(MemNode::Address); |
|
2480 Node* val = in(MemNode::ValueIn); |
|
2481 |
|
2482 // Load then Store? Then the Store is useless |
|
2483 if (val->is_Load() && |
|
2484 val->in(MemNode::Address)->eqv_uncast(adr) && |
|
2485 val->in(MemNode::Memory )->eqv_uncast(mem) && |
|
2486 val->as_Load()->store_Opcode() == Opcode()) { |
|
2487 return mem; |
|
2488 } |
|
2489 |
|
2490 // Two stores in a row of the same value? |
|
2491 if (mem->is_Store() && |
|
2492 mem->in(MemNode::Address)->eqv_uncast(adr) && |
|
2493 mem->in(MemNode::ValueIn)->eqv_uncast(val) && |
|
2494 mem->Opcode() == Opcode()) { |
|
2495 return mem; |
|
2496 } |
|
2497 |
|
2498 // Store of zero anywhere into a freshly-allocated object? |
|
2499 // Then the store is useless. |
|
2500 // (It must already have been captured by the InitializeNode.) |
|
2501 if (ReduceFieldZeroing && phase->type(val)->is_zero_type()) { |
|
2502 // a newly allocated object is already all-zeroes everywhere |
|
2503 if (mem->is_Proj() && mem->in(0)->is_Allocate()) { |
|
2504 return mem; |
|
2505 } |
|
2506 |
|
2507 // the store may also apply to zero-bits in an earlier object |
|
2508 Node* prev_mem = find_previous_store(phase); |
|
2509 // Steps (a), (b): Walk past independent stores to find an exact match. |
|
2510 if (prev_mem != NULL) { |
|
2511 Node* prev_val = can_see_stored_value(prev_mem, phase); |
|
2512 if (prev_val != NULL && phase->eqv(prev_val, val)) { |
|
2513 // prev_val and val might differ by a cast; it would be good |
|
2514 // to keep the more informative of the two. |
|
2515 return mem; |
|
2516 } |
|
2517 } |
|
2518 } |
|
2519 |
|
2520 return this; |
|
2521 } |
|
2522 |
|
2523 //------------------------------match_edge------------------------------------- |
|
2524 // Do we Match on this edge index or not? Match only memory & value |
|
2525 uint StoreNode::match_edge(uint idx) const { |
|
2526 return idx == MemNode::Address || idx == MemNode::ValueIn; |
|
2527 } |
|
2528 |
|
2529 //------------------------------cmp-------------------------------------------- |
|
2530 // Do not common stores up together. They generally have to be split |
|
2531 // back up anyways, so do not bother. |
|
2532 uint StoreNode::cmp( const Node &n ) const { |
|
2533 return (&n == this); // Always fail except on self |
|
2534 } |
|
2535 |
|
2536 //------------------------------Ideal_masked_input----------------------------- |
|
2537 // Check for a useless mask before a partial-word store |
|
2538 // (StoreB ... (AndI valIn conIa) ) |
|
2539 // If (conIa & mask == mask) this simplifies to |
|
2540 // (StoreB ... (valIn) ) |
|
2541 Node *StoreNode::Ideal_masked_input(PhaseGVN *phase, uint mask) { |
|
2542 Node *val = in(MemNode::ValueIn); |
|
2543 if( val->Opcode() == Op_AndI ) { |
|
2544 const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2545 if( t && t->is_con() && (t->get_con() & mask) == mask ) { |
|
2546 set_req(MemNode::ValueIn, val->in(1)); |
|
2547 return this; |
|
2548 } |
|
2549 } |
|
2550 return NULL; |
|
2551 } |
|
2552 |
|
2553 |
|
2554 //------------------------------Ideal_sign_extended_input---------------------- |
|
2555 // Check for useless sign-extension before a partial-word store |
|
2556 // (StoreB ... (RShiftI _ (LShiftI _ valIn conIL ) conIR) ) |
|
2557 // If (conIL == conIR && conIR <= num_bits) this simplifies to |
|
2558 // (StoreB ... (valIn) ) |
|
2559 Node *StoreNode::Ideal_sign_extended_input(PhaseGVN *phase, int num_bits) { |
|
2560 Node *val = in(MemNode::ValueIn); |
|
2561 if( val->Opcode() == Op_RShiftI ) { |
|
2562 const TypeInt *t = phase->type( val->in(2) )->isa_int(); |
|
2563 if( t && t->is_con() && (t->get_con() <= num_bits) ) { |
|
2564 Node *shl = val->in(1); |
|
2565 if( shl->Opcode() == Op_LShiftI ) { |
|
2566 const TypeInt *t2 = phase->type( shl->in(2) )->isa_int(); |
|
2567 if( t2 && t2->is_con() && (t2->get_con() == t->get_con()) ) { |
|
2568 set_req(MemNode::ValueIn, shl->in(1)); |
|
2569 return this; |
|
2570 } |
|
2571 } |
|
2572 } |
|
2573 } |
|
2574 return NULL; |
|
2575 } |
|
2576 |
|
2577 //------------------------------value_never_loaded----------------------------------- |
|
2578 // Determine whether there are any possible loads of the value stored. |
|
2579 // For simplicity, we actually check if there are any loads from the |
|
2580 // address stored to, not just for loads of the value stored by this node. |
|
2581 // |
|
2582 bool StoreNode::value_never_loaded( PhaseTransform *phase) const { |
|
2583 Node *adr = in(Address); |
|
2584 const TypeOopPtr *adr_oop = phase->type(adr)->isa_oopptr(); |
|
2585 if (adr_oop == NULL) |
|
2586 return false; |
|
2587 if (!adr_oop->is_known_instance_field()) |
|
2588 return false; // if not a distinct instance, there may be aliases of the address |
|
2589 for (DUIterator_Fast imax, i = adr->fast_outs(imax); i < imax; i++) { |
|
2590 Node *use = adr->fast_out(i); |
|
2591 int opc = use->Opcode(); |
|
2592 if (use->is_Load() || use->is_LoadStore()) { |
|
2593 return false; |
|
2594 } |
|
2595 } |
|
2596 return true; |
|
2597 } |
|
2598 |
|
2599 //============================================================================= |
|
2600 //------------------------------Ideal------------------------------------------ |
|
2601 // If the store is from an AND mask that leaves the low bits untouched, then |
|
2602 // we can skip the AND operation. If the store is from a sign-extension |
|
2603 // (a left shift, then right shift) we can skip both. |
|
2604 Node *StoreBNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2605 Node *progress = StoreNode::Ideal_masked_input(phase, 0xFF); |
|
2606 if( progress != NULL ) return progress; |
|
2607 |
|
2608 progress = StoreNode::Ideal_sign_extended_input(phase, 24); |
|
2609 if( progress != NULL ) return progress; |
|
2610 |
|
2611 // Finally check the default case |
|
2612 return StoreNode::Ideal(phase, can_reshape); |
|
2613 } |
|
2614 |
|
2615 //============================================================================= |
|
2616 //------------------------------Ideal------------------------------------------ |
|
2617 // If the store is from an AND mask that leaves the low bits untouched, then |
|
2618 // we can skip the AND operation |
|
2619 Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2620 Node *progress = StoreNode::Ideal_masked_input(phase, 0xFFFF); |
|
2621 if( progress != NULL ) return progress; |
|
2622 |
|
2623 progress = StoreNode::Ideal_sign_extended_input(phase, 16); |
|
2624 if( progress != NULL ) return progress; |
|
2625 |
|
2626 // Finally check the default case |
|
2627 return StoreNode::Ideal(phase, can_reshape); |
|
2628 } |
|
2629 |
|
2630 //============================================================================= |
|
2631 //------------------------------Identity--------------------------------------- |
|
2632 Node *StoreCMNode::Identity( PhaseTransform *phase ) { |
|
2633 // No need to card mark when storing a null ptr |
|
2634 Node* my_store = in(MemNode::OopStore); |
|
2635 if (my_store->is_Store()) { |
|
2636 const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) ); |
|
2637 if( t1 == TypePtr::NULL_PTR ) { |
|
2638 return in(MemNode::Memory); |
|
2639 } |
|
2640 } |
|
2641 return this; |
|
2642 } |
|
2643 |
|
2644 //============================================================================= |
|
2645 //------------------------------Ideal--------------------------------------- |
|
2646 Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2647 Node* progress = StoreNode::Ideal(phase, can_reshape); |
|
2648 if (progress != NULL) return progress; |
|
2649 |
|
2650 Node* my_store = in(MemNode::OopStore); |
|
2651 if (my_store->is_MergeMem()) { |
|
2652 Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx()); |
|
2653 set_req(MemNode::OopStore, mem); |
|
2654 return this; |
|
2655 } |
|
2656 |
|
2657 return NULL; |
|
2658 } |
|
2659 |
|
2660 //------------------------------Value----------------------------------------- |
|
2661 const Type *StoreCMNode::Value( PhaseTransform *phase ) const { |
|
2662 // Either input is TOP ==> the result is TOP |
|
2663 const Type *t = phase->type( in(MemNode::Memory) ); |
|
2664 if( t == Type::TOP ) return Type::TOP; |
|
2665 t = phase->type( in(MemNode::Address) ); |
|
2666 if( t == Type::TOP ) return Type::TOP; |
|
2667 t = phase->type( in(MemNode::ValueIn) ); |
|
2668 if( t == Type::TOP ) return Type::TOP; |
|
2669 // If extra input is TOP ==> the result is TOP |
|
2670 t = phase->type( in(MemNode::OopStore) ); |
|
2671 if( t == Type::TOP ) return Type::TOP; |
|
2672 |
|
2673 return StoreNode::Value( phase ); |
|
2674 } |
|
2675 |
|
2676 |
|
2677 //============================================================================= |
|
2678 //----------------------------------SCMemProjNode------------------------------ |
|
2679 const Type * SCMemProjNode::Value( PhaseTransform *phase ) const |
|
2680 { |
|
2681 return bottom_type(); |
|
2682 } |
|
2683 |
|
2684 //============================================================================= |
|
2685 //----------------------------------LoadStoreNode------------------------------ |
|
2686 LoadStoreNode::LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ) |
|
2687 : Node(required), |
|
2688 _type(rt), |
|
2689 _adr_type(at) |
|
2690 { |
|
2691 init_req(MemNode::Control, c ); |
|
2692 init_req(MemNode::Memory , mem); |
|
2693 init_req(MemNode::Address, adr); |
|
2694 init_req(MemNode::ValueIn, val); |
|
2695 init_class_id(Class_LoadStore); |
|
2696 } |
|
2697 |
|
2698 uint LoadStoreNode::ideal_reg() const { |
|
2699 return _type->ideal_reg(); |
|
2700 } |
|
2701 |
|
2702 bool LoadStoreNode::result_not_used() const { |
|
2703 for( DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++ ) { |
|
2704 Node *x = fast_out(i); |
|
2705 if (x->Opcode() == Op_SCMemProj) continue; |
|
2706 return false; |
|
2707 } |
|
2708 return true; |
|
2709 } |
|
2710 |
|
2711 uint LoadStoreNode::size_of() const { return sizeof(*this); } |
|
2712 |
|
2713 //============================================================================= |
|
2714 //----------------------------------LoadStoreConditionalNode-------------------- |
|
2715 LoadStoreConditionalNode::LoadStoreConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex ) : LoadStoreNode(c, mem, adr, val, NULL, TypeInt::BOOL, 5) { |
|
2716 init_req(ExpectedIn, ex ); |
|
2717 } |
|
2718 |
|
2719 //============================================================================= |
|
2720 //-------------------------------adr_type-------------------------------------- |
|
2721 // Do we Match on this edge index or not? Do not match memory |
|
2722 const TypePtr* ClearArrayNode::adr_type() const { |
|
2723 Node *adr = in(3); |
|
2724 return MemNode::calculate_adr_type(adr->bottom_type()); |
|
2725 } |
|
2726 |
|
2727 //------------------------------match_edge------------------------------------- |
|
2728 // Do we Match on this edge index or not? Do not match memory |
|
2729 uint ClearArrayNode::match_edge(uint idx) const { |
|
2730 return idx > 1; |
|
2731 } |
|
2732 |
|
2733 //------------------------------Identity--------------------------------------- |
|
2734 // Clearing a zero length array does nothing |
|
2735 Node *ClearArrayNode::Identity( PhaseTransform *phase ) { |
|
2736 return phase->type(in(2))->higher_equal(TypeX::ZERO) ? in(1) : this; |
|
2737 } |
|
2738 |
|
2739 //------------------------------Idealize--------------------------------------- |
|
2740 // Clearing a short array is faster with stores |
|
2741 Node *ClearArrayNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
|
2742 const int unit = BytesPerLong; |
|
2743 const TypeX* t = phase->type(in(2))->isa_intptr_t(); |
|
2744 if (!t) return NULL; |
|
2745 if (!t->is_con()) return NULL; |
|
2746 intptr_t raw_count = t->get_con(); |
|
2747 intptr_t size = raw_count; |
|
2748 if (!Matcher::init_array_count_is_in_bytes) size *= unit; |
|
2749 // Clearing nothing uses the Identity call. |
|
2750 // Negative clears are possible on dead ClearArrays |
|
2751 // (see jck test stmt114.stmt11402.val). |
|
2752 if (size <= 0 || size % unit != 0) return NULL; |
|
2753 intptr_t count = size / unit; |
|
2754 // Length too long; use fast hardware clear |
|
2755 if (size > Matcher::init_array_short_size) return NULL; |
|
2756 Node *mem = in(1); |
|
2757 if( phase->type(mem)==Type::TOP ) return NULL; |
|
2758 Node *adr = in(3); |
|
2759 const Type* at = phase->type(adr); |
|
2760 if( at==Type::TOP ) return NULL; |
|
2761 const TypePtr* atp = at->isa_ptr(); |
|
2762 // adjust atp to be the correct array element address type |
|
2763 if (atp == NULL) atp = TypePtr::BOTTOM; |
|
2764 else atp = atp->add_offset(Type::OffsetBot); |
|
2765 // Get base for derived pointer purposes |
|
2766 if( adr->Opcode() != Op_AddP ) Unimplemented(); |
|
2767 Node *base = adr->in(1); |
|
2768 |
|
2769 Node *zero = phase->makecon(TypeLong::ZERO); |
|
2770 Node *off = phase->MakeConX(BytesPerLong); |
|
2771 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
|
2772 count--; |
|
2773 while( count-- ) { |
|
2774 mem = phase->transform(mem); |
|
2775 adr = phase->transform(new (phase->C) AddPNode(base,adr,off)); |
|
2776 mem = new (phase->C) StoreLNode(in(0),mem,adr,atp,zero,MemNode::unordered,false); |
|
2777 } |
|
2778 return mem; |
|
2779 } |
|
2780 |
|
2781 //----------------------------step_through---------------------------------- |
|
2782 // Return allocation input memory edge if it is different instance |
|
2783 // or itself if it is the one we are looking for. |
|
2784 bool ClearArrayNode::step_through(Node** np, uint instance_id, PhaseTransform* phase) { |
|
2785 Node* n = *np; |
|
2786 assert(n->is_ClearArray(), "sanity"); |
|
2787 intptr_t offset; |
|
2788 AllocateNode* alloc = AllocateNode::Ideal_allocation(n->in(3), phase, offset); |
|
2789 // This method is called only before Allocate nodes are expanded during |
|
2790 // macro nodes expansion. Before that ClearArray nodes are only generated |
|
2791 // in LibraryCallKit::generate_arraycopy() which follows allocations. |
|
2792 assert(alloc != NULL, "should have allocation"); |
|
2793 if (alloc->_idx == instance_id) { |
|
2794 // Can not bypass initialization of the instance we are looking for. |
|
2795 return false; |
|
2796 } |
|
2797 // Otherwise skip it. |
|
2798 InitializeNode* init = alloc->initialization(); |
|
2799 if (init != NULL) |
|
2800 *np = init->in(TypeFunc::Memory); |
|
2801 else |
|
2802 *np = alloc->in(TypeFunc::Memory); |
|
2803 return true; |
|
2804 } |
|
2805 |
|
2806 //----------------------------clear_memory------------------------------------- |
|
2807 // Generate code to initialize object storage to zero. |
|
2808 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2809 intptr_t start_offset, |
|
2810 Node* end_offset, |
|
2811 PhaseGVN* phase) { |
|
2812 Compile* C = phase->C; |
|
2813 intptr_t offset = start_offset; |
|
2814 |
|
2815 int unit = BytesPerLong; |
|
2816 if ((offset % unit) != 0) { |
|
2817 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(offset)); |
|
2818 adr = phase->transform(adr); |
|
2819 const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
2820 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
|
2821 mem = phase->transform(mem); |
|
2822 offset += BytesPerInt; |
|
2823 } |
|
2824 assert((offset % unit) == 0, ""); |
|
2825 |
|
2826 // Initialize the remaining stuff, if any, with a ClearArray. |
|
2827 return clear_memory(ctl, mem, dest, phase->MakeConX(offset), end_offset, phase); |
|
2828 } |
|
2829 |
|
2830 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2831 Node* start_offset, |
|
2832 Node* end_offset, |
|
2833 PhaseGVN* phase) { |
|
2834 if (start_offset == end_offset) { |
|
2835 // nothing to do |
|
2836 return mem; |
|
2837 } |
|
2838 |
|
2839 Compile* C = phase->C; |
|
2840 int unit = BytesPerLong; |
|
2841 Node* zbase = start_offset; |
|
2842 Node* zend = end_offset; |
|
2843 |
|
2844 // Scale to the unit required by the CPU: |
|
2845 if (!Matcher::init_array_count_is_in_bytes) { |
|
2846 Node* shift = phase->intcon(exact_log2(unit)); |
|
2847 zbase = phase->transform( new(C) URShiftXNode(zbase, shift) ); |
|
2848 zend = phase->transform( new(C) URShiftXNode(zend, shift) ); |
|
2849 } |
|
2850 |
|
2851 // Bulk clear double-words |
|
2852 Node* zsize = phase->transform( new(C) SubXNode(zend, zbase) ); |
|
2853 Node* adr = phase->transform( new(C) AddPNode(dest, dest, start_offset) ); |
|
2854 mem = new (C) ClearArrayNode(ctl, mem, zsize, adr); |
|
2855 return phase->transform(mem); |
|
2856 } |
|
2857 |
|
2858 Node* ClearArrayNode::clear_memory(Node* ctl, Node* mem, Node* dest, |
|
2859 intptr_t start_offset, |
|
2860 intptr_t end_offset, |
|
2861 PhaseGVN* phase) { |
|
2862 if (start_offset == end_offset) { |
|
2863 // nothing to do |
|
2864 return mem; |
|
2865 } |
|
2866 |
|
2867 Compile* C = phase->C; |
|
2868 assert((end_offset % BytesPerInt) == 0, "odd end offset"); |
|
2869 intptr_t done_offset = end_offset; |
|
2870 if ((done_offset % BytesPerLong) != 0) { |
|
2871 done_offset -= BytesPerInt; |
|
2872 } |
|
2873 if (done_offset > start_offset) { |
|
2874 mem = clear_memory(ctl, mem, dest, |
|
2875 start_offset, phase->MakeConX(done_offset), phase); |
|
2876 } |
|
2877 if (done_offset < end_offset) { // emit the final 32-bit store |
|
2878 Node* adr = new (C) AddPNode(dest, dest, phase->MakeConX(done_offset)); |
|
2879 adr = phase->transform(adr); |
|
2880 const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
2881 mem = StoreNode::make(*phase, ctl, mem, adr, atp, phase->zerocon(T_INT), T_INT, MemNode::unordered); |
|
2882 mem = phase->transform(mem); |
|
2883 done_offset += BytesPerInt; |
|
2884 } |
|
2885 assert(done_offset == end_offset, ""); |
|
2886 return mem; |
|
2887 } |
|
2888 |
|
2889 //============================================================================= |
|
2890 // Do not match memory edge. |
|
2891 uint StrIntrinsicNode::match_edge(uint idx) const { |
|
2892 return idx == 2 || idx == 3; |
|
2893 } |
|
2894 |
|
2895 //------------------------------Ideal------------------------------------------ |
|
2896 // Return a node which is more "ideal" than the current node. Strip out |
|
2897 // control copies |
|
2898 Node *StrIntrinsicNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2899 if (remove_dead_region(phase, can_reshape)) return this; |
|
2900 // Don't bother trying to transform a dead node |
|
2901 if (in(0) && in(0)->is_top()) return NULL; |
|
2902 |
|
2903 if (can_reshape) { |
|
2904 Node* mem = phase->transform(in(MemNode::Memory)); |
|
2905 // If transformed to a MergeMem, get the desired slice |
|
2906 uint alias_idx = phase->C->get_alias_index(adr_type()); |
|
2907 mem = mem->is_MergeMem() ? mem->as_MergeMem()->memory_at(alias_idx) : mem; |
|
2908 if (mem != in(MemNode::Memory)) { |
|
2909 set_req(MemNode::Memory, mem); |
|
2910 return this; |
|
2911 } |
|
2912 } |
|
2913 return NULL; |
|
2914 } |
|
2915 |
|
2916 //------------------------------Value------------------------------------------ |
|
2917 const Type *StrIntrinsicNode::Value( PhaseTransform *phase ) const { |
|
2918 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP; |
|
2919 return bottom_type(); |
|
2920 } |
|
2921 |
|
2922 //============================================================================= |
|
2923 //------------------------------match_edge------------------------------------- |
|
2924 // Do not match memory edge |
|
2925 uint EncodeISOArrayNode::match_edge(uint idx) const { |
|
2926 return idx == 2 || idx == 3; // EncodeISOArray src (Binary dst len) |
|
2927 } |
|
2928 |
|
2929 //------------------------------Ideal------------------------------------------ |
|
2930 // Return a node which is more "ideal" than the current node. Strip out |
|
2931 // control copies |
|
2932 Node *EncodeISOArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2933 return remove_dead_region(phase, can_reshape) ? this : NULL; |
|
2934 } |
|
2935 |
|
2936 //------------------------------Value------------------------------------------ |
|
2937 const Type *EncodeISOArrayNode::Value(PhaseTransform *phase) const { |
|
2938 if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP; |
|
2939 return bottom_type(); |
|
2940 } |
|
2941 |
|
2942 //============================================================================= |
|
2943 MemBarNode::MemBarNode(Compile* C, int alias_idx, Node* precedent) |
|
2944 : MultiNode(TypeFunc::Parms + (precedent == NULL? 0: 1)), |
|
2945 _adr_type(C->get_adr_type(alias_idx)) |
|
2946 { |
|
2947 init_class_id(Class_MemBar); |
|
2948 Node* top = C->top(); |
|
2949 init_req(TypeFunc::I_O,top); |
|
2950 init_req(TypeFunc::FramePtr,top); |
|
2951 init_req(TypeFunc::ReturnAdr,top); |
|
2952 if (precedent != NULL) |
|
2953 init_req(TypeFunc::Parms, precedent); |
|
2954 } |
|
2955 |
|
2956 //------------------------------cmp-------------------------------------------- |
|
2957 uint MemBarNode::hash() const { return NO_HASH; } |
|
2958 uint MemBarNode::cmp( const Node &n ) const { |
|
2959 return (&n == this); // Always fail except on self |
|
2960 } |
|
2961 |
|
2962 //------------------------------make------------------------------------------- |
|
2963 MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) { |
|
2964 switch (opcode) { |
|
2965 case Op_MemBarAcquire: return new(C) MemBarAcquireNode(C, atp, pn); |
|
2966 case Op_LoadFence: return new(C) LoadFenceNode(C, atp, pn); |
|
2967 case Op_MemBarRelease: return new(C) MemBarReleaseNode(C, atp, pn); |
|
2968 case Op_StoreFence: return new(C) StoreFenceNode(C, atp, pn); |
|
2969 case Op_MemBarAcquireLock: return new(C) MemBarAcquireLockNode(C, atp, pn); |
|
2970 case Op_MemBarReleaseLock: return new(C) MemBarReleaseLockNode(C, atp, pn); |
|
2971 case Op_MemBarVolatile: return new(C) MemBarVolatileNode(C, atp, pn); |
|
2972 case Op_MemBarCPUOrder: return new(C) MemBarCPUOrderNode(C, atp, pn); |
|
2973 case Op_Initialize: return new(C) InitializeNode(C, atp, pn); |
|
2974 case Op_MemBarStoreStore: return new(C) MemBarStoreStoreNode(C, atp, pn); |
|
2975 default: ShouldNotReachHere(); return NULL; |
|
2976 } |
|
2977 } |
|
2978 |
|
2979 //------------------------------Ideal------------------------------------------ |
|
2980 // Return a node which is more "ideal" than the current node. Strip out |
|
2981 // control copies |
|
2982 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
2983 if (remove_dead_region(phase, can_reshape)) return this; |
|
2984 // Don't bother trying to transform a dead node |
|
2985 if (in(0) && in(0)->is_top()) { |
|
2986 return NULL; |
|
2987 } |
|
2988 |
|
2989 // Eliminate volatile MemBars for scalar replaced objects. |
|
2990 if (can_reshape && req() == (Precedent+1)) { |
|
2991 bool eliminate = false; |
|
2992 int opc = Opcode(); |
|
2993 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { |
|
2994 // Volatile field loads and stores. |
|
2995 Node* my_mem = in(MemBarNode::Precedent); |
|
2996 // The MembarAquire may keep an unused LoadNode alive through the Precedent edge |
|
2997 if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { |
|
2998 // if the Precedent is a decodeN and its input (a Load) is used at more than one place, |
|
2999 // replace this Precedent (decodeN) with the Load instead. |
|
3000 if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { |
|
3001 Node* load_node = my_mem->in(1); |
|
3002 set_req(MemBarNode::Precedent, load_node); |
|
3003 phase->is_IterGVN()->_worklist.push(my_mem); |
|
3004 my_mem = load_node; |
|
3005 } else { |
|
3006 assert(my_mem->unique_out() == this, "sanity"); |
|
3007 del_req(Precedent); |
|
3008 phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later |
|
3009 my_mem = NULL; |
|
3010 } |
|
3011 } |
|
3012 if (my_mem != NULL && my_mem->is_Mem()) { |
|
3013 const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); |
|
3014 // Check for scalar replaced object reference. |
|
3015 if( t_oop != NULL && t_oop->is_known_instance_field() && |
|
3016 t_oop->offset() != Type::OffsetBot && |
|
3017 t_oop->offset() != Type::OffsetTop) { |
|
3018 eliminate = true; |
|
3019 } |
|
3020 } |
|
3021 } else if (opc == Op_MemBarRelease) { |
|
3022 // Final field stores. |
|
3023 Node* alloc = AllocateNode::Ideal_allocation(in(MemBarNode::Precedent), phase); |
|
3024 if ((alloc != NULL) && alloc->is_Allocate() && |
|
3025 alloc->as_Allocate()->_is_non_escaping) { |
|
3026 // The allocated object does not escape. |
|
3027 eliminate = true; |
|
3028 } |
|
3029 } |
|
3030 if (eliminate) { |
|
3031 // Replace MemBar projections by its inputs. |
|
3032 PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
3033 igvn->replace_node(proj_out(TypeFunc::Memory), in(TypeFunc::Memory)); |
|
3034 igvn->replace_node(proj_out(TypeFunc::Control), in(TypeFunc::Control)); |
|
3035 // Must return either the original node (now dead) or a new node |
|
3036 // (Do not return a top here, since that would break the uniqueness of top.) |
|
3037 return new (phase->C) ConINode(TypeInt::ZERO); |
|
3038 } |
|
3039 } |
|
3040 return NULL; |
|
3041 } |
|
3042 |
|
3043 //------------------------------Value------------------------------------------ |
|
3044 const Type *MemBarNode::Value( PhaseTransform *phase ) const { |
|
3045 if( !in(0) ) return Type::TOP; |
|
3046 if( phase->type(in(0)) == Type::TOP ) |
|
3047 return Type::TOP; |
|
3048 return TypeTuple::MEMBAR; |
|
3049 } |
|
3050 |
|
3051 //------------------------------match------------------------------------------ |
|
3052 // Construct projections for memory. |
|
3053 Node *MemBarNode::match( const ProjNode *proj, const Matcher *m ) { |
|
3054 switch (proj->_con) { |
|
3055 case TypeFunc::Control: |
|
3056 case TypeFunc::Memory: |
|
3057 return new (m->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
|
3058 } |
|
3059 ShouldNotReachHere(); |
|
3060 return NULL; |
|
3061 } |
|
3062 |
|
3063 //===========================InitializeNode==================================== |
|
3064 // SUMMARY: |
|
3065 // This node acts as a memory barrier on raw memory, after some raw stores. |
|
3066 // The 'cooked' oop value feeds from the Initialize, not the Allocation. |
|
3067 // The Initialize can 'capture' suitably constrained stores as raw inits. |
|
3068 // It can coalesce related raw stores into larger units (called 'tiles'). |
|
3069 // It can avoid zeroing new storage for memory units which have raw inits. |
|
3070 // At macro-expansion, it is marked 'complete', and does not optimize further. |
|
3071 // |
|
3072 // EXAMPLE: |
|
3073 // The object 'new short[2]' occupies 16 bytes in a 32-bit machine. |
|
3074 // ctl = incoming control; mem* = incoming memory |
|
3075 // (Note: A star * on a memory edge denotes I/O and other standard edges.) |
|
3076 // First allocate uninitialized memory and fill in the header: |
|
3077 // alloc = (Allocate ctl mem* 16 #short[].klass ...) |
|
3078 // ctl := alloc.Control; mem* := alloc.Memory* |
|
3079 // rawmem = alloc.Memory; rawoop = alloc.RawAddress |
|
3080 // Then initialize to zero the non-header parts of the raw memory block: |
|
3081 // init = (Initialize alloc.Control alloc.Memory* alloc.RawAddress) |
|
3082 // ctl := init.Control; mem.SLICE(#short[*]) := init.Memory |
|
3083 // After the initialize node executes, the object is ready for service: |
|
3084 // oop := (CheckCastPP init.Control alloc.RawAddress #short[]) |
|
3085 // Suppose its body is immediately initialized as {1,2}: |
|
3086 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
3087 // store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
3088 // mem.SLICE(#short[*]) := store2 |
|
3089 // |
|
3090 // DETAILS: |
|
3091 // An InitializeNode collects and isolates object initialization after |
|
3092 // an AllocateNode and before the next possible safepoint. As a |
|
3093 // memory barrier (MemBarNode), it keeps critical stores from drifting |
|
3094 // down past any safepoint or any publication of the allocation. |
|
3095 // Before this barrier, a newly-allocated object may have uninitialized bits. |
|
3096 // After this barrier, it may be treated as a real oop, and GC is allowed. |
|
3097 // |
|
3098 // The semantics of the InitializeNode include an implicit zeroing of |
|
3099 // the new object from object header to the end of the object. |
|
3100 // (The object header and end are determined by the AllocateNode.) |
|
3101 // |
|
3102 // Certain stores may be added as direct inputs to the InitializeNode. |
|
3103 // These stores must update raw memory, and they must be to addresses |
|
3104 // derived from the raw address produced by AllocateNode, and with |
|
3105 // a constant offset. They must be ordered by increasing offset. |
|
3106 // The first one is at in(RawStores), the last at in(req()-1). |
|
3107 // Unlike most memory operations, they are not linked in a chain, |
|
3108 // but are displayed in parallel as users of the rawmem output of |
|
3109 // the allocation. |
|
3110 // |
|
3111 // (See comments in InitializeNode::capture_store, which continue |
|
3112 // the example given above.) |
|
3113 // |
|
3114 // When the associated Allocate is macro-expanded, the InitializeNode |
|
3115 // may be rewritten to optimize collected stores. A ClearArrayNode |
|
3116 // may also be created at that point to represent any required zeroing. |
|
3117 // The InitializeNode is then marked 'complete', prohibiting further |
|
3118 // capturing of nearby memory operations. |
|
3119 // |
|
3120 // During macro-expansion, all captured initializations which store |
|
3121 // constant values of 32 bits or smaller are coalesced (if advantageous) |
|
3122 // into larger 'tiles' 32 or 64 bits. This allows an object to be |
|
3123 // initialized in fewer memory operations. Memory words which are |
|
3124 // covered by neither tiles nor non-constant stores are pre-zeroed |
|
3125 // by explicit stores of zero. (The code shape happens to do all |
|
3126 // zeroing first, then all other stores, with both sequences occurring |
|
3127 // in order of ascending offsets.) |
|
3128 // |
|
3129 // Alternatively, code may be inserted between an AllocateNode and its |
|
3130 // InitializeNode, to perform arbitrary initialization of the new object. |
|
3131 // E.g., the object copying intrinsics insert complex data transfers here. |
|
3132 // The initialization must then be marked as 'complete' disable the |
|
3133 // built-in zeroing semantics and the collection of initializing stores. |
|
3134 // |
|
3135 // While an InitializeNode is incomplete, reads from the memory state |
|
3136 // produced by it are optimizable if they match the control edge and |
|
3137 // new oop address associated with the allocation/initialization. |
|
3138 // They return a stored value (if the offset matches) or else zero. |
|
3139 // A write to the memory state, if it matches control and address, |
|
3140 // and if it is to a constant offset, may be 'captured' by the |
|
3141 // InitializeNode. It is cloned as a raw memory operation and rewired |
|
3142 // inside the initialization, to the raw oop produced by the allocation. |
|
3143 // Operations on addresses which are provably distinct (e.g., to |
|
3144 // other AllocateNodes) are allowed to bypass the initialization. |
|
3145 // |
|
3146 // The effect of all this is to consolidate object initialization |
|
3147 // (both arrays and non-arrays, both piecewise and bulk) into a |
|
3148 // single location, where it can be optimized as a unit. |
|
3149 // |
|
3150 // Only stores with an offset less than TrackedInitializationLimit words |
|
3151 // will be considered for capture by an InitializeNode. This puts a |
|
3152 // reasonable limit on the complexity of optimized initializations. |
|
3153 |
|
3154 //---------------------------InitializeNode------------------------------------ |
|
3155 InitializeNode::InitializeNode(Compile* C, int adr_type, Node* rawoop) |
|
3156 : _is_complete(Incomplete), _does_not_escape(false), |
|
3157 MemBarNode(C, adr_type, rawoop) |
|
3158 { |
|
3159 init_class_id(Class_Initialize); |
|
3160 |
|
3161 assert(adr_type == Compile::AliasIdxRaw, "only valid atp"); |
|
3162 assert(in(RawAddress) == rawoop, "proper init"); |
|
3163 // Note: allocation() can be NULL, for secondary initialization barriers |
|
3164 } |
|
3165 |
|
3166 // Since this node is not matched, it will be processed by the |
|
3167 // register allocator. Declare that there are no constraints |
|
3168 // on the allocation of the RawAddress edge. |
|
3169 const RegMask &InitializeNode::in_RegMask(uint idx) const { |
|
3170 // This edge should be set to top, by the set_complete. But be conservative. |
|
3171 if (idx == InitializeNode::RawAddress) |
|
3172 return *(Compile::current()->matcher()->idealreg2spillmask[in(idx)->ideal_reg()]); |
|
3173 return RegMask::Empty; |
|
3174 } |
|
3175 |
|
3176 Node* InitializeNode::memory(uint alias_idx) { |
|
3177 Node* mem = in(Memory); |
|
3178 if (mem->is_MergeMem()) { |
|
3179 return mem->as_MergeMem()->memory_at(alias_idx); |
|
3180 } else { |
|
3181 // incoming raw memory is not split |
|
3182 return mem; |
|
3183 } |
|
3184 } |
|
3185 |
|
3186 bool InitializeNode::is_non_zero() { |
|
3187 if (is_complete()) return false; |
|
3188 remove_extra_zeroes(); |
|
3189 return (req() > RawStores); |
|
3190 } |
|
3191 |
|
3192 void InitializeNode::set_complete(PhaseGVN* phase) { |
|
3193 assert(!is_complete(), "caller responsibility"); |
|
3194 _is_complete = Complete; |
|
3195 |
|
3196 // After this node is complete, it contains a bunch of |
|
3197 // raw-memory initializations. There is no need for |
|
3198 // it to have anything to do with non-raw memory effects. |
|
3199 // Therefore, tell all non-raw users to re-optimize themselves, |
|
3200 // after skipping the memory effects of this initialization. |
|
3201 PhaseIterGVN* igvn = phase->is_IterGVN(); |
|
3202 if (igvn) igvn->add_users_to_worklist(this); |
|
3203 } |
|
3204 |
|
3205 // convenience function |
|
3206 // return false if the init contains any stores already |
|
3207 bool AllocateNode::maybe_set_complete(PhaseGVN* phase) { |
|
3208 InitializeNode* init = initialization(); |
|
3209 if (init == NULL || init->is_complete()) return false; |
|
3210 init->remove_extra_zeroes(); |
|
3211 // for now, if this allocation has already collected any inits, bail: |
|
3212 if (init->is_non_zero()) return false; |
|
3213 init->set_complete(phase); |
|
3214 return true; |
|
3215 } |
|
3216 |
|
3217 void InitializeNode::remove_extra_zeroes() { |
|
3218 if (req() == RawStores) return; |
|
3219 Node* zmem = zero_memory(); |
|
3220 uint fill = RawStores; |
|
3221 for (uint i = fill; i < req(); i++) { |
|
3222 Node* n = in(i); |
|
3223 if (n->is_top() || n == zmem) continue; // skip |
|
3224 if (fill < i) set_req(fill, n); // compact |
|
3225 ++fill; |
|
3226 } |
|
3227 // delete any empty spaces created: |
|
3228 while (fill < req()) { |
|
3229 del_req(fill); |
|
3230 } |
|
3231 } |
|
3232 |
|
3233 // Helper for remembering which stores go with which offsets. |
|
3234 intptr_t InitializeNode::get_store_offset(Node* st, PhaseTransform* phase) { |
|
3235 if (!st->is_Store()) return -1; // can happen to dead code via subsume_node |
|
3236 intptr_t offset = -1; |
|
3237 Node* base = AddPNode::Ideal_base_and_offset(st->in(MemNode::Address), |
|
3238 phase, offset); |
|
3239 if (base == NULL) return -1; // something is dead, |
|
3240 if (offset < 0) return -1; // dead, dead |
|
3241 return offset; |
|
3242 } |
|
3243 |
|
3244 // Helper for proving that an initialization expression is |
|
3245 // "simple enough" to be folded into an object initialization. |
|
3246 // Attempts to prove that a store's initial value 'n' can be captured |
|
3247 // within the initialization without creating a vicious cycle, such as: |
|
3248 // { Foo p = new Foo(); p.next = p; } |
|
3249 // True for constants and parameters and small combinations thereof. |
|
3250 bool InitializeNode::detect_init_independence(Node* n, int& count) { |
|
3251 if (n == NULL) return true; // (can this really happen?) |
|
3252 if (n->is_Proj()) n = n->in(0); |
|
3253 if (n == this) return false; // found a cycle |
|
3254 if (n->is_Con()) return true; |
|
3255 if (n->is_Start()) return true; // params, etc., are OK |
|
3256 if (n->is_Root()) return true; // even better |
|
3257 |
|
3258 Node* ctl = n->in(0); |
|
3259 if (ctl != NULL && !ctl->is_top()) { |
|
3260 if (ctl->is_Proj()) ctl = ctl->in(0); |
|
3261 if (ctl == this) return false; |
|
3262 |
|
3263 // If we already know that the enclosing memory op is pinned right after |
|
3264 // the init, then any control flow that the store has picked up |
|
3265 // must have preceded the init, or else be equal to the init. |
|
3266 // Even after loop optimizations (which might change control edges) |
|
3267 // a store is never pinned *before* the availability of its inputs. |
|
3268 if (!MemNode::all_controls_dominate(n, this)) |
|
3269 return false; // failed to prove a good control |
|
3270 } |
|
3271 |
|
3272 // Check data edges for possible dependencies on 'this'. |
|
3273 if ((count += 1) > 20) return false; // complexity limit |
|
3274 for (uint i = 1; i < n->req(); i++) { |
|
3275 Node* m = n->in(i); |
|
3276 if (m == NULL || m == n || m->is_top()) continue; |
|
3277 uint first_i = n->find_edge(m); |
|
3278 if (i != first_i) continue; // process duplicate edge just once |
|
3279 if (!detect_init_independence(m, count)) { |
|
3280 return false; |
|
3281 } |
|
3282 } |
|
3283 |
|
3284 return true; |
|
3285 } |
|
3286 |
|
3287 // Here are all the checks a Store must pass before it can be moved into |
|
3288 // an initialization. Returns zero if a check fails. |
|
3289 // On success, returns the (constant) offset to which the store applies, |
|
3290 // within the initialized memory. |
|
3291 intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) { |
|
3292 const int FAIL = 0; |
|
3293 if (st->req() != MemNode::ValueIn + 1) |
|
3294 return FAIL; // an inscrutable StoreNode (card mark?) |
|
3295 Node* ctl = st->in(MemNode::Control); |
|
3296 if (!(ctl != NULL && ctl->is_Proj() && ctl->in(0) == this)) |
|
3297 return FAIL; // must be unconditional after the initialization |
|
3298 Node* mem = st->in(MemNode::Memory); |
|
3299 if (!(mem->is_Proj() && mem->in(0) == this)) |
|
3300 return FAIL; // must not be preceded by other stores |
|
3301 Node* adr = st->in(MemNode::Address); |
|
3302 intptr_t offset; |
|
3303 AllocateNode* alloc = AllocateNode::Ideal_allocation(adr, phase, offset); |
|
3304 if (alloc == NULL) |
|
3305 return FAIL; // inscrutable address |
|
3306 if (alloc != allocation()) |
|
3307 return FAIL; // wrong allocation! (store needs to float up) |
|
3308 Node* val = st->in(MemNode::ValueIn); |
|
3309 int complexity_count = 0; |
|
3310 if (!detect_init_independence(val, complexity_count)) |
|
3311 return FAIL; // stored value must be 'simple enough' |
|
3312 |
|
3313 // The Store can be captured only if nothing after the allocation |
|
3314 // and before the Store is using the memory location that the store |
|
3315 // overwrites. |
|
3316 bool failed = false; |
|
3317 // If is_complete_with_arraycopy() is true the shape of the graph is |
|
3318 // well defined and is safe so no need for extra checks. |
|
3319 if (!is_complete_with_arraycopy()) { |
|
3320 // We are going to look at each use of the memory state following |
|
3321 // the allocation to make sure nothing reads the memory that the |
|
3322 // Store writes. |
|
3323 const TypePtr* t_adr = phase->type(adr)->isa_ptr(); |
|
3324 int alias_idx = phase->C->get_alias_index(t_adr); |
|
3325 ResourceMark rm; |
|
3326 Unique_Node_List mems; |
|
3327 mems.push(mem); |
|
3328 Node* unique_merge = NULL; |
|
3329 for (uint next = 0; next < mems.size(); ++next) { |
|
3330 Node *m = mems.at(next); |
|
3331 for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { |
|
3332 Node *n = m->fast_out(j); |
|
3333 if (n->outcnt() == 0) { |
|
3334 continue; |
|
3335 } |
|
3336 if (n == st) { |
|
3337 continue; |
|
3338 } else if (n->in(0) != NULL && n->in(0) != ctl) { |
|
3339 // If the control of this use is different from the control |
|
3340 // of the Store which is right after the InitializeNode then |
|
3341 // this node cannot be between the InitializeNode and the |
|
3342 // Store. |
|
3343 continue; |
|
3344 } else if (n->is_MergeMem()) { |
|
3345 if (n->as_MergeMem()->memory_at(alias_idx) == m) { |
|
3346 // We can hit a MergeMemNode (that will likely go away |
|
3347 // later) that is a direct use of the memory state |
|
3348 // following the InitializeNode on the same slice as the |
|
3349 // store node that we'd like to capture. We need to check |
|
3350 // the uses of the MergeMemNode. |
|
3351 mems.push(n); |
|
3352 } |
|
3353 } else if (n->is_Mem()) { |
|
3354 Node* other_adr = n->in(MemNode::Address); |
|
3355 if (other_adr == adr) { |
|
3356 failed = true; |
|
3357 break; |
|
3358 } else { |
|
3359 const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr(); |
|
3360 if (other_t_adr != NULL) { |
|
3361 int other_alias_idx = phase->C->get_alias_index(other_t_adr); |
|
3362 if (other_alias_idx == alias_idx) { |
|
3363 // A load from the same memory slice as the store right |
|
3364 // after the InitializeNode. We check the control of the |
|
3365 // object/array that is loaded from. If it's the same as |
|
3366 // the store control then we cannot capture the store. |
|
3367 assert(!n->is_Store(), "2 stores to same slice on same control?"); |
|
3368 Node* base = other_adr; |
|
3369 assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name())); |
|
3370 base = base->in(AddPNode::Base); |
|
3371 if (base != NULL) { |
|
3372 base = base->uncast(); |
|
3373 if (base->is_Proj() && base->in(0) == alloc) { |
|
3374 failed = true; |
|
3375 break; |
|
3376 } |
|
3377 } |
|
3378 } |
|
3379 } |
|
3380 } |
|
3381 } else { |
|
3382 failed = true; |
|
3383 break; |
|
3384 } |
|
3385 } |
|
3386 } |
|
3387 } |
|
3388 if (failed) { |
|
3389 if (!can_reshape) { |
|
3390 // We decided we couldn't capture the store during parsing. We |
|
3391 // should try again during the next IGVN once the graph is |
|
3392 // cleaner. |
|
3393 phase->C->record_for_igvn(st); |
|
3394 } |
|
3395 return FAIL; |
|
3396 } |
|
3397 |
|
3398 return offset; // success |
|
3399 } |
|
3400 |
|
3401 // Find the captured store in(i) which corresponds to the range |
|
3402 // [start..start+size) in the initialized object. |
|
3403 // If there is one, return its index i. If there isn't, return the |
|
3404 // negative of the index where it should be inserted. |
|
3405 // Return 0 if the queried range overlaps an initialization boundary |
|
3406 // or if dead code is encountered. |
|
3407 // If size_in_bytes is zero, do not bother with overlap checks. |
|
3408 int InitializeNode::captured_store_insertion_point(intptr_t start, |
|
3409 int size_in_bytes, |
|
3410 PhaseTransform* phase) { |
|
3411 const int FAIL = 0, MAX_STORE = BytesPerLong; |
|
3412 |
|
3413 if (is_complete()) |
|
3414 return FAIL; // arraycopy got here first; punt |
|
3415 |
|
3416 assert(allocation() != NULL, "must be present"); |
|
3417 |
|
3418 // no negatives, no header fields: |
|
3419 if (start < (intptr_t) allocation()->minimum_header_size()) return FAIL; |
|
3420 |
|
3421 // after a certain size, we bail out on tracking all the stores: |
|
3422 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
3423 if (start >= ti_limit) return FAIL; |
|
3424 |
|
3425 for (uint i = InitializeNode::RawStores, limit = req(); ; ) { |
|
3426 if (i >= limit) return -(int)i; // not found; here is where to put it |
|
3427 |
|
3428 Node* st = in(i); |
|
3429 intptr_t st_off = get_store_offset(st, phase); |
|
3430 if (st_off < 0) { |
|
3431 if (st != zero_memory()) { |
|
3432 return FAIL; // bail out if there is dead garbage |
|
3433 } |
|
3434 } else if (st_off > start) { |
|
3435 // ...we are done, since stores are ordered |
|
3436 if (st_off < start + size_in_bytes) { |
|
3437 return FAIL; // the next store overlaps |
|
3438 } |
|
3439 return -(int)i; // not found; here is where to put it |
|
3440 } else if (st_off < start) { |
|
3441 if (size_in_bytes != 0 && |
|
3442 start < st_off + MAX_STORE && |
|
3443 start < st_off + st->as_Store()->memory_size()) { |
|
3444 return FAIL; // the previous store overlaps |
|
3445 } |
|
3446 } else { |
|
3447 if (size_in_bytes != 0 && |
|
3448 st->as_Store()->memory_size() != size_in_bytes) { |
|
3449 return FAIL; // mismatched store size |
|
3450 } |
|
3451 return i; |
|
3452 } |
|
3453 |
|
3454 ++i; |
|
3455 } |
|
3456 } |
|
3457 |
|
3458 // Look for a captured store which initializes at the offset 'start' |
|
3459 // with the given size. If there is no such store, and no other |
|
3460 // initialization interferes, then return zero_memory (the memory |
|
3461 // projection of the AllocateNode). |
|
3462 Node* InitializeNode::find_captured_store(intptr_t start, int size_in_bytes, |
|
3463 PhaseTransform* phase) { |
|
3464 assert(stores_are_sane(phase), ""); |
|
3465 int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
3466 if (i == 0) { |
|
3467 return NULL; // something is dead |
|
3468 } else if (i < 0) { |
|
3469 return zero_memory(); // just primordial zero bits here |
|
3470 } else { |
|
3471 Node* st = in(i); // here is the store at this position |
|
3472 assert(get_store_offset(st->as_Store(), phase) == start, "sanity"); |
|
3473 return st; |
|
3474 } |
|
3475 } |
|
3476 |
|
3477 // Create, as a raw pointer, an address within my new object at 'offset'. |
|
3478 Node* InitializeNode::make_raw_address(intptr_t offset, |
|
3479 PhaseTransform* phase) { |
|
3480 Node* addr = in(RawAddress); |
|
3481 if (offset != 0) { |
|
3482 Compile* C = phase->C; |
|
3483 addr = phase->transform( new (C) AddPNode(C->top(), addr, |
|
3484 phase->MakeConX(offset)) ); |
|
3485 } |
|
3486 return addr; |
|
3487 } |
|
3488 |
|
3489 // Clone the given store, converting it into a raw store |
|
3490 // initializing a field or element of my new object. |
|
3491 // Caller is responsible for retiring the original store, |
|
3492 // with subsume_node or the like. |
|
3493 // |
|
3494 // From the example above InitializeNode::InitializeNode, |
|
3495 // here are the old stores to be captured: |
|
3496 // store1 = (StoreC init.Control init.Memory (+ oop 12) 1) |
|
3497 // store2 = (StoreC init.Control store1 (+ oop 14) 2) |
|
3498 // |
|
3499 // Here is the changed code; note the extra edges on init: |
|
3500 // alloc = (Allocate ...) |
|
3501 // rawoop = alloc.RawAddress |
|
3502 // rawstore1 = (StoreC alloc.Control alloc.Memory (+ rawoop 12) 1) |
|
3503 // rawstore2 = (StoreC alloc.Control alloc.Memory (+ rawoop 14) 2) |
|
3504 // init = (Initialize alloc.Control alloc.Memory rawoop |
|
3505 // rawstore1 rawstore2) |
|
3506 // |
|
3507 Node* InitializeNode::capture_store(StoreNode* st, intptr_t start, |
|
3508 PhaseTransform* phase, bool can_reshape) { |
|
3509 assert(stores_are_sane(phase), ""); |
|
3510 |
|
3511 if (start < 0) return NULL; |
|
3512 assert(can_capture_store(st, phase, can_reshape) == start, "sanity"); |
|
3513 |
|
3514 Compile* C = phase->C; |
|
3515 int size_in_bytes = st->memory_size(); |
|
3516 int i = captured_store_insertion_point(start, size_in_bytes, phase); |
|
3517 if (i == 0) return NULL; // bail out |
|
3518 Node* prev_mem = NULL; // raw memory for the captured store |
|
3519 if (i > 0) { |
|
3520 prev_mem = in(i); // there is a pre-existing store under this one |
|
3521 set_req(i, C->top()); // temporarily disconnect it |
|
3522 // See StoreNode::Ideal 'st->outcnt() == 1' for the reason to disconnect. |
|
3523 } else { |
|
3524 i = -i; // no pre-existing store |
|
3525 prev_mem = zero_memory(); // a slice of the newly allocated object |
|
3526 if (i > InitializeNode::RawStores && in(i-1) == prev_mem) |
|
3527 set_req(--i, C->top()); // reuse this edge; it has been folded away |
|
3528 else |
|
3529 ins_req(i, C->top()); // build a new edge |
|
3530 } |
|
3531 Node* new_st = st->clone(); |
|
3532 new_st->set_req(MemNode::Control, in(Control)); |
|
3533 new_st->set_req(MemNode::Memory, prev_mem); |
|
3534 new_st->set_req(MemNode::Address, make_raw_address(start, phase)); |
|
3535 new_st = phase->transform(new_st); |
|
3536 |
|
3537 // At this point, new_st might have swallowed a pre-existing store |
|
3538 // at the same offset, or perhaps new_st might have disappeared, |
|
3539 // if it redundantly stored the same value (or zero to fresh memory). |
|
3540 |
|
3541 // In any case, wire it in: |
|
3542 set_req(i, new_st); |
|
3543 |
|
3544 // The caller may now kill the old guy. |
|
3545 DEBUG_ONLY(Node* check_st = find_captured_store(start, size_in_bytes, phase)); |
|
3546 assert(check_st == new_st || check_st == NULL, "must be findable"); |
|
3547 assert(!is_complete(), ""); |
|
3548 return new_st; |
|
3549 } |
|
3550 |
|
3551 static bool store_constant(jlong* tiles, int num_tiles, |
|
3552 intptr_t st_off, int st_size, |
|
3553 jlong con) { |
|
3554 if ((st_off & (st_size-1)) != 0) |
|
3555 return false; // strange store offset (assume size==2**N) |
|
3556 address addr = (address)tiles + st_off; |
|
3557 assert(st_off >= 0 && addr+st_size <= (address)&tiles[num_tiles], "oob"); |
|
3558 switch (st_size) { |
|
3559 case sizeof(jbyte): *(jbyte*) addr = (jbyte) con; break; |
|
3560 case sizeof(jchar): *(jchar*) addr = (jchar) con; break; |
|
3561 case sizeof(jint): *(jint*) addr = (jint) con; break; |
|
3562 case sizeof(jlong): *(jlong*) addr = (jlong) con; break; |
|
3563 default: return false; // strange store size (detect size!=2**N here) |
|
3564 } |
|
3565 return true; // return success to caller |
|
3566 } |
|
3567 |
|
3568 // Coalesce subword constants into int constants and possibly |
|
3569 // into long constants. The goal, if the CPU permits, |
|
3570 // is to initialize the object with a small number of 64-bit tiles. |
|
3571 // Also, convert floating-point constants to bit patterns. |
|
3572 // Non-constants are not relevant to this pass. |
|
3573 // |
|
3574 // In terms of the running example on InitializeNode::InitializeNode |
|
3575 // and InitializeNode::capture_store, here is the transformation |
|
3576 // of rawstore1 and rawstore2 into rawstore12: |
|
3577 // alloc = (Allocate ...) |
|
3578 // rawoop = alloc.RawAddress |
|
3579 // tile12 = 0x00010002 |
|
3580 // rawstore12 = (StoreI alloc.Control alloc.Memory (+ rawoop 12) tile12) |
|
3581 // init = (Initialize alloc.Control alloc.Memory rawoop rawstore12) |
|
3582 // |
|
3583 void |
|
3584 InitializeNode::coalesce_subword_stores(intptr_t header_size, |
|
3585 Node* size_in_bytes, |
|
3586 PhaseGVN* phase) { |
|
3587 Compile* C = phase->C; |
|
3588 |
|
3589 assert(stores_are_sane(phase), ""); |
|
3590 // Note: After this pass, they are not completely sane, |
|
3591 // since there may be some overlaps. |
|
3592 |
|
3593 int old_subword = 0, old_long = 0, new_int = 0, new_long = 0; |
|
3594 |
|
3595 intptr_t ti_limit = (TrackedInitializationLimit * HeapWordSize); |
|
3596 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, ti_limit); |
|
3597 size_limit = MIN2(size_limit, ti_limit); |
|
3598 size_limit = align_size_up(size_limit, BytesPerLong); |
|
3599 int num_tiles = size_limit / BytesPerLong; |
|
3600 |
|
3601 // allocate space for the tile map: |
|
3602 const int small_len = DEBUG_ONLY(true ? 3 :) 30; // keep stack frames small |
|
3603 jlong tiles_buf[small_len]; |
|
3604 Node* nodes_buf[small_len]; |
|
3605 jlong inits_buf[small_len]; |
|
3606 jlong* tiles = ((num_tiles <= small_len) ? &tiles_buf[0] |
|
3607 : NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
3608 Node** nodes = ((num_tiles <= small_len) ? &nodes_buf[0] |
|
3609 : NEW_RESOURCE_ARRAY(Node*, num_tiles)); |
|
3610 jlong* inits = ((num_tiles <= small_len) ? &inits_buf[0] |
|
3611 : NEW_RESOURCE_ARRAY(jlong, num_tiles)); |
|
3612 // tiles: exact bitwise model of all primitive constants |
|
3613 // nodes: last constant-storing node subsumed into the tiles model |
|
3614 // inits: which bytes (in each tile) are touched by any initializations |
|
3615 |
|
3616 //// Pass A: Fill in the tile model with any relevant stores. |
|
3617 |
|
3618 Copy::zero_to_bytes(tiles, sizeof(tiles[0]) * num_tiles); |
|
3619 Copy::zero_to_bytes(nodes, sizeof(nodes[0]) * num_tiles); |
|
3620 Copy::zero_to_bytes(inits, sizeof(inits[0]) * num_tiles); |
|
3621 Node* zmem = zero_memory(); // initially zero memory state |
|
3622 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
3623 Node* st = in(i); |
|
3624 intptr_t st_off = get_store_offset(st, phase); |
|
3625 |
|
3626 // Figure out the store's offset and constant value: |
|
3627 if (st_off < header_size) continue; //skip (ignore header) |
|
3628 if (st->in(MemNode::Memory) != zmem) continue; //skip (odd store chain) |
|
3629 int st_size = st->as_Store()->memory_size(); |
|
3630 if (st_off + st_size > size_limit) break; |
|
3631 |
|
3632 // Record which bytes are touched, whether by constant or not. |
|
3633 if (!store_constant(inits, num_tiles, st_off, st_size, (jlong) -1)) |
|
3634 continue; // skip (strange store size) |
|
3635 |
|
3636 const Type* val = phase->type(st->in(MemNode::ValueIn)); |
|
3637 if (!val->singleton()) continue; //skip (non-con store) |
|
3638 BasicType type = val->basic_type(); |
|
3639 |
|
3640 jlong con = 0; |
|
3641 switch (type) { |
|
3642 case T_INT: con = val->is_int()->get_con(); break; |
|
3643 case T_LONG: con = val->is_long()->get_con(); break; |
|
3644 case T_FLOAT: con = jint_cast(val->getf()); break; |
|
3645 case T_DOUBLE: con = jlong_cast(val->getd()); break; |
|
3646 default: continue; //skip (odd store type) |
|
3647 } |
|
3648 |
|
3649 if (type == T_LONG && Matcher::isSimpleConstant64(con) && |
|
3650 st->Opcode() == Op_StoreL) { |
|
3651 continue; // This StoreL is already optimal. |
|
3652 } |
|
3653 |
|
3654 // Store down the constant. |
|
3655 store_constant(tiles, num_tiles, st_off, st_size, con); |
|
3656 |
|
3657 intptr_t j = st_off >> LogBytesPerLong; |
|
3658 |
|
3659 if (type == T_INT && st_size == BytesPerInt |
|
3660 && (st_off & BytesPerInt) == BytesPerInt) { |
|
3661 jlong lcon = tiles[j]; |
|
3662 if (!Matcher::isSimpleConstant64(lcon) && |
|
3663 st->Opcode() == Op_StoreI) { |
|
3664 // This StoreI is already optimal by itself. |
|
3665 jint* intcon = (jint*) &tiles[j]; |
|
3666 intcon[1] = 0; // undo the store_constant() |
|
3667 |
|
3668 // If the previous store is also optimal by itself, back up and |
|
3669 // undo the action of the previous loop iteration... if we can. |
|
3670 // But if we can't, just let the previous half take care of itself. |
|
3671 st = nodes[j]; |
|
3672 st_off -= BytesPerInt; |
|
3673 con = intcon[0]; |
|
3674 if (con != 0 && st != NULL && st->Opcode() == Op_StoreI) { |
|
3675 assert(st_off >= header_size, "still ignoring header"); |
|
3676 assert(get_store_offset(st, phase) == st_off, "must be"); |
|
3677 assert(in(i-1) == zmem, "must be"); |
|
3678 DEBUG_ONLY(const Type* tcon = phase->type(st->in(MemNode::ValueIn))); |
|
3679 assert(con == tcon->is_int()->get_con(), "must be"); |
|
3680 // Undo the effects of the previous loop trip, which swallowed st: |
|
3681 intcon[0] = 0; // undo store_constant() |
|
3682 set_req(i-1, st); // undo set_req(i, zmem) |
|
3683 nodes[j] = NULL; // undo nodes[j] = st |
|
3684 --old_subword; // undo ++old_subword |
|
3685 } |
|
3686 continue; // This StoreI is already optimal. |
|
3687 } |
|
3688 } |
|
3689 |
|
3690 // This store is not needed. |
|
3691 set_req(i, zmem); |
|
3692 nodes[j] = st; // record for the moment |
|
3693 if (st_size < BytesPerLong) // something has changed |
|
3694 ++old_subword; // includes int/float, but who's counting... |
|
3695 else ++old_long; |
|
3696 } |
|
3697 |
|
3698 if ((old_subword + old_long) == 0) |
|
3699 return; // nothing more to do |
|
3700 |
|
3701 //// Pass B: Convert any non-zero tiles into optimal constant stores. |
|
3702 // Be sure to insert them before overlapping non-constant stores. |
|
3703 // (E.g., byte[] x = { 1,2,y,4 } => x[int 0] = 0x01020004, x[2]=y.) |
|
3704 for (int j = 0; j < num_tiles; j++) { |
|
3705 jlong con = tiles[j]; |
|
3706 jlong init = inits[j]; |
|
3707 if (con == 0) continue; |
|
3708 jint con0, con1; // split the constant, address-wise |
|
3709 jint init0, init1; // split the init map, address-wise |
|
3710 { union { jlong con; jint intcon[2]; } u; |
|
3711 u.con = con; |
|
3712 con0 = u.intcon[0]; |
|
3713 con1 = u.intcon[1]; |
|
3714 u.con = init; |
|
3715 init0 = u.intcon[0]; |
|
3716 init1 = u.intcon[1]; |
|
3717 } |
|
3718 |
|
3719 Node* old = nodes[j]; |
|
3720 assert(old != NULL, "need the prior store"); |
|
3721 intptr_t offset = (j * BytesPerLong); |
|
3722 |
|
3723 bool split = !Matcher::isSimpleConstant64(con); |
|
3724 |
|
3725 if (offset < header_size) { |
|
3726 assert(offset + BytesPerInt >= header_size, "second int counts"); |
|
3727 assert(*(jint*)&tiles[j] == 0, "junk in header"); |
|
3728 split = true; // only the second word counts |
|
3729 // Example: int a[] = { 42 ... } |
|
3730 } else if (con0 == 0 && init0 == -1) { |
|
3731 split = true; // first word is covered by full inits |
|
3732 // Example: int a[] = { ... foo(), 42 ... } |
|
3733 } else if (con1 == 0 && init1 == -1) { |
|
3734 split = true; // second word is covered by full inits |
|
3735 // Example: int a[] = { ... 42, foo() ... } |
|
3736 } |
|
3737 |
|
3738 // Here's a case where init0 is neither 0 nor -1: |
|
3739 // byte a[] = { ... 0,0,foo(),0, 0,0,0,42 ... } |
|
3740 // Assuming big-endian memory, init0, init1 are 0x0000FF00, 0x000000FF. |
|
3741 // In this case the tile is not split; it is (jlong)42. |
|
3742 // The big tile is stored down, and then the foo() value is inserted. |
|
3743 // (If there were foo(),foo() instead of foo(),0, init0 would be -1.) |
|
3744 |
|
3745 Node* ctl = old->in(MemNode::Control); |
|
3746 Node* adr = make_raw_address(offset, phase); |
|
3747 const TypePtr* atp = TypeRawPtr::BOTTOM; |
|
3748 |
|
3749 // One or two coalesced stores to plop down. |
|
3750 Node* st[2]; |
|
3751 intptr_t off[2]; |
|
3752 int nst = 0; |
|
3753 if (!split) { |
|
3754 ++new_long; |
|
3755 off[nst] = offset; |
|
3756 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
|
3757 phase->longcon(con), T_LONG, MemNode::unordered); |
|
3758 } else { |
|
3759 // Omit either if it is a zero. |
|
3760 if (con0 != 0) { |
|
3761 ++new_int; |
|
3762 off[nst] = offset; |
|
3763 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
|
3764 phase->intcon(con0), T_INT, MemNode::unordered); |
|
3765 } |
|
3766 if (con1 != 0) { |
|
3767 ++new_int; |
|
3768 offset += BytesPerInt; |
|
3769 adr = make_raw_address(offset, phase); |
|
3770 off[nst] = offset; |
|
3771 st[nst++] = StoreNode::make(*phase, ctl, zmem, adr, atp, |
|
3772 phase->intcon(con1), T_INT, MemNode::unordered); |
|
3773 } |
|
3774 } |
|
3775 |
|
3776 // Insert second store first, then the first before the second. |
|
3777 // Insert each one just before any overlapping non-constant stores. |
|
3778 while (nst > 0) { |
|
3779 Node* st1 = st[--nst]; |
|
3780 C->copy_node_notes_to(st1, old); |
|
3781 st1 = phase->transform(st1); |
|
3782 offset = off[nst]; |
|
3783 assert(offset >= header_size, "do not smash header"); |
|
3784 int ins_idx = captured_store_insertion_point(offset, /*size:*/0, phase); |
|
3785 guarantee(ins_idx != 0, "must re-insert constant store"); |
|
3786 if (ins_idx < 0) ins_idx = -ins_idx; // never overlap |
|
3787 if (ins_idx > InitializeNode::RawStores && in(ins_idx-1) == zmem) |
|
3788 set_req(--ins_idx, st1); |
|
3789 else |
|
3790 ins_req(ins_idx, st1); |
|
3791 } |
|
3792 } |
|
3793 |
|
3794 if (PrintCompilation && WizardMode) |
|
3795 tty->print_cr("Changed %d/%d subword/long constants into %d/%d int/long", |
|
3796 old_subword, old_long, new_int, new_long); |
|
3797 if (C->log() != NULL) |
|
3798 C->log()->elem("comment that='%d/%d subword/long to %d/%d int/long'", |
|
3799 old_subword, old_long, new_int, new_long); |
|
3800 |
|
3801 // Clean up any remaining occurrences of zmem: |
|
3802 remove_extra_zeroes(); |
|
3803 } |
|
3804 |
|
3805 // Explore forward from in(start) to find the first fully initialized |
|
3806 // word, and return its offset. Skip groups of subword stores which |
|
3807 // together initialize full words. If in(start) is itself part of a |
|
3808 // fully initialized word, return the offset of in(start). If there |
|
3809 // are no following full-word stores, or if something is fishy, return |
|
3810 // a negative value. |
|
3811 intptr_t InitializeNode::find_next_fullword_store(uint start, PhaseGVN* phase) { |
|
3812 int int_map = 0; |
|
3813 intptr_t int_map_off = 0; |
|
3814 const int FULL_MAP = right_n_bits(BytesPerInt); // the int_map we hope for |
|
3815 |
|
3816 for (uint i = start, limit = req(); i < limit; i++) { |
|
3817 Node* st = in(i); |
|
3818 |
|
3819 intptr_t st_off = get_store_offset(st, phase); |
|
3820 if (st_off < 0) break; // return conservative answer |
|
3821 |
|
3822 int st_size = st->as_Store()->memory_size(); |
|
3823 if (st_size >= BytesPerInt && (st_off % BytesPerInt) == 0) { |
|
3824 return st_off; // we found a complete word init |
|
3825 } |
|
3826 |
|
3827 // update the map: |
|
3828 |
|
3829 intptr_t this_int_off = align_size_down(st_off, BytesPerInt); |
|
3830 if (this_int_off != int_map_off) { |
|
3831 // reset the map: |
|
3832 int_map = 0; |
|
3833 int_map_off = this_int_off; |
|
3834 } |
|
3835 |
|
3836 int subword_off = st_off - this_int_off; |
|
3837 int_map |= right_n_bits(st_size) << subword_off; |
|
3838 if ((int_map & FULL_MAP) == FULL_MAP) { |
|
3839 return this_int_off; // we found a complete word init |
|
3840 } |
|
3841 |
|
3842 // Did this store hit or cross the word boundary? |
|
3843 intptr_t next_int_off = align_size_down(st_off + st_size, BytesPerInt); |
|
3844 if (next_int_off == this_int_off + BytesPerInt) { |
|
3845 // We passed the current int, without fully initializing it. |
|
3846 int_map_off = next_int_off; |
|
3847 int_map >>= BytesPerInt; |
|
3848 } else if (next_int_off > this_int_off + BytesPerInt) { |
|
3849 // We passed the current and next int. |
|
3850 return this_int_off + BytesPerInt; |
|
3851 } |
|
3852 } |
|
3853 |
|
3854 return -1; |
|
3855 } |
|
3856 |
|
3857 |
|
3858 // Called when the associated AllocateNode is expanded into CFG. |
|
3859 // At this point, we may perform additional optimizations. |
|
3860 // Linearize the stores by ascending offset, to make memory |
|
3861 // activity as coherent as possible. |
|
3862 Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, |
|
3863 intptr_t header_size, |
|
3864 Node* size_in_bytes, |
|
3865 PhaseGVN* phase) { |
|
3866 assert(!is_complete(), "not already complete"); |
|
3867 assert(stores_are_sane(phase), ""); |
|
3868 assert(allocation() != NULL, "must be present"); |
|
3869 |
|
3870 remove_extra_zeroes(); |
|
3871 |
|
3872 if (ReduceFieldZeroing || ReduceBulkZeroing) |
|
3873 // reduce instruction count for common initialization patterns |
|
3874 coalesce_subword_stores(header_size, size_in_bytes, phase); |
|
3875 |
|
3876 Node* zmem = zero_memory(); // initially zero memory state |
|
3877 Node* inits = zmem; // accumulating a linearized chain of inits |
|
3878 #ifdef ASSERT |
|
3879 intptr_t first_offset = allocation()->minimum_header_size(); |
|
3880 intptr_t last_init_off = first_offset; // previous init offset |
|
3881 intptr_t last_init_end = first_offset; // previous init offset+size |
|
3882 intptr_t last_tile_end = first_offset; // previous tile offset+size |
|
3883 #endif |
|
3884 intptr_t zeroes_done = header_size; |
|
3885 |
|
3886 bool do_zeroing = true; // we might give up if inits are very sparse |
|
3887 int big_init_gaps = 0; // how many large gaps have we seen? |
|
3888 |
|
3889 if (ZeroTLAB) do_zeroing = false; |
|
3890 if (!ReduceFieldZeroing && !ReduceBulkZeroing) do_zeroing = false; |
|
3891 |
|
3892 for (uint i = InitializeNode::RawStores, limit = req(); i < limit; i++) { |
|
3893 Node* st = in(i); |
|
3894 intptr_t st_off = get_store_offset(st, phase); |
|
3895 if (st_off < 0) |
|
3896 break; // unknown junk in the inits |
|
3897 if (st->in(MemNode::Memory) != zmem) |
|
3898 break; // complicated store chains somehow in list |
|
3899 |
|
3900 int st_size = st->as_Store()->memory_size(); |
|
3901 intptr_t next_init_off = st_off + st_size; |
|
3902 |
|
3903 if (do_zeroing && zeroes_done < next_init_off) { |
|
3904 // See if this store needs a zero before it or under it. |
|
3905 intptr_t zeroes_needed = st_off; |
|
3906 |
|
3907 if (st_size < BytesPerInt) { |
|
3908 // Look for subword stores which only partially initialize words. |
|
3909 // If we find some, we must lay down some word-level zeroes first, |
|
3910 // underneath the subword stores. |
|
3911 // |
|
3912 // Examples: |
|
3913 // byte[] a = { p,q,r,s } => a[0]=p,a[1]=q,a[2]=r,a[3]=s |
|
3914 // byte[] a = { x,y,0,0 } => a[0..3] = 0, a[0]=x,a[1]=y |
|
3915 // byte[] a = { 0,0,z,0 } => a[0..3] = 0, a[2]=z |
|
3916 // |
|
3917 // Note: coalesce_subword_stores may have already done this, |
|
3918 // if it was prompted by constant non-zero subword initializers. |
|
3919 // But this case can still arise with non-constant stores. |
|
3920 |
|
3921 intptr_t next_full_store = find_next_fullword_store(i, phase); |
|
3922 |
|
3923 // In the examples above: |
|
3924 // in(i) p q r s x y z |
|
3925 // st_off 12 13 14 15 12 13 14 |
|
3926 // st_size 1 1 1 1 1 1 1 |
|
3927 // next_full_s. 12 16 16 16 16 16 16 |
|
3928 // z's_done 12 16 16 16 12 16 12 |
|
3929 // z's_needed 12 16 16 16 16 16 16 |
|
3930 // zsize 0 0 0 0 4 0 4 |
|
3931 if (next_full_store < 0) { |
|
3932 // Conservative tack: Zero to end of current word. |
|
3933 zeroes_needed = align_size_up(zeroes_needed, BytesPerInt); |
|
3934 } else { |
|
3935 // Zero to beginning of next fully initialized word. |
|
3936 // Or, don't zero at all, if we are already in that word. |
|
3937 assert(next_full_store >= zeroes_needed, "must go forward"); |
|
3938 assert((next_full_store & (BytesPerInt-1)) == 0, "even boundary"); |
|
3939 zeroes_needed = next_full_store; |
|
3940 } |
|
3941 } |
|
3942 |
|
3943 if (zeroes_needed > zeroes_done) { |
|
3944 intptr_t zsize = zeroes_needed - zeroes_done; |
|
3945 // Do some incremental zeroing on rawmem, in parallel with inits. |
|
3946 zeroes_done = align_size_down(zeroes_done, BytesPerInt); |
|
3947 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
|
3948 zeroes_done, zeroes_needed, |
|
3949 phase); |
|
3950 zeroes_done = zeroes_needed; |
|
3951 if (zsize > Matcher::init_array_short_size && ++big_init_gaps > 2) |
|
3952 do_zeroing = false; // leave the hole, next time |
|
3953 } |
|
3954 } |
|
3955 |
|
3956 // Collect the store and move on: |
|
3957 st->set_req(MemNode::Memory, inits); |
|
3958 inits = st; // put it on the linearized chain |
|
3959 set_req(i, zmem); // unhook from previous position |
|
3960 |
|
3961 if (zeroes_done == st_off) |
|
3962 zeroes_done = next_init_off; |
|
3963 |
|
3964 assert(!do_zeroing || zeroes_done >= next_init_off, "don't miss any"); |
|
3965 |
|
3966 #ifdef ASSERT |
|
3967 // Various order invariants. Weaker than stores_are_sane because |
|
3968 // a large constant tile can be filled in by smaller non-constant stores. |
|
3969 assert(st_off >= last_init_off, "inits do not reverse"); |
|
3970 last_init_off = st_off; |
|
3971 const Type* val = NULL; |
|
3972 if (st_size >= BytesPerInt && |
|
3973 (val = phase->type(st->in(MemNode::ValueIn)))->singleton() && |
|
3974 (int)val->basic_type() < (int)T_OBJECT) { |
|
3975 assert(st_off >= last_tile_end, "tiles do not overlap"); |
|
3976 assert(st_off >= last_init_end, "tiles do not overwrite inits"); |
|
3977 last_tile_end = MAX2(last_tile_end, next_init_off); |
|
3978 } else { |
|
3979 intptr_t st_tile_end = align_size_up(next_init_off, BytesPerLong); |
|
3980 assert(st_tile_end >= last_tile_end, "inits stay with tiles"); |
|
3981 assert(st_off >= last_init_end, "inits do not overlap"); |
|
3982 last_init_end = next_init_off; // it's a non-tile |
|
3983 } |
|
3984 #endif //ASSERT |
|
3985 } |
|
3986 |
|
3987 remove_extra_zeroes(); // clear out all the zmems left over |
|
3988 add_req(inits); |
|
3989 |
|
3990 if (!ZeroTLAB) { |
|
3991 // If anything remains to be zeroed, zero it all now. |
|
3992 zeroes_done = align_size_down(zeroes_done, BytesPerInt); |
|
3993 // if it is the last unused 4 bytes of an instance, forget about it |
|
3994 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); |
|
3995 if (zeroes_done + BytesPerLong >= size_limit) { |
|
3996 assert(allocation() != NULL, ""); |
|
3997 if (allocation()->Opcode() == Op_Allocate) { |
|
3998 Node* klass_node = allocation()->in(AllocateNode::KlassNode); |
|
3999 ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); |
|
4000 if (zeroes_done == k->layout_helper()) |
|
4001 zeroes_done = size_limit; |
|
4002 } |
|
4003 } |
|
4004 if (zeroes_done < size_limit) { |
|
4005 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr, |
|
4006 zeroes_done, size_in_bytes, phase); |
|
4007 } |
|
4008 } |
|
4009 |
|
4010 set_complete(phase); |
|
4011 return rawmem; |
|
4012 } |
|
4013 |
|
4014 |
|
4015 #ifdef ASSERT |
|
4016 bool InitializeNode::stores_are_sane(PhaseTransform* phase) { |
|
4017 if (is_complete()) |
|
4018 return true; // stores could be anything at this point |
|
4019 assert(allocation() != NULL, "must be present"); |
|
4020 intptr_t last_off = allocation()->minimum_header_size(); |
|
4021 for (uint i = InitializeNode::RawStores; i < req(); i++) { |
|
4022 Node* st = in(i); |
|
4023 intptr_t st_off = get_store_offset(st, phase); |
|
4024 if (st_off < 0) continue; // ignore dead garbage |
|
4025 if (last_off > st_off) { |
|
4026 tty->print_cr("*** bad store offset at %d: " INTX_FORMAT " > " INTX_FORMAT, i, last_off, st_off); |
|
4027 this->dump(2); |
|
4028 assert(false, "ascending store offsets"); |
|
4029 return false; |
|
4030 } |
|
4031 last_off = st_off + st->as_Store()->memory_size(); |
|
4032 } |
|
4033 return true; |
|
4034 } |
|
4035 #endif //ASSERT |
|
4036 |
|
4037 |
|
4038 |
|
4039 |
|
4040 //============================MergeMemNode===================================== |
|
4041 // |
|
4042 // SEMANTICS OF MEMORY MERGES: A MergeMem is a memory state assembled from several |
|
4043 // contributing store or call operations. Each contributor provides the memory |
|
4044 // state for a particular "alias type" (see Compile::alias_type). For example, |
|
4045 // if a MergeMem has an input X for alias category #6, then any memory reference |
|
4046 // to alias category #6 may use X as its memory state input, as an exact equivalent |
|
4047 // to using the MergeMem as a whole. |
|
4048 // Load<6>( MergeMem(<6>: X, ...), p ) <==> Load<6>(X,p) |
|
4049 // |
|
4050 // (Here, the <N> notation gives the index of the relevant adr_type.) |
|
4051 // |
|
4052 // In one special case (and more cases in the future), alias categories overlap. |
|
4053 // The special alias category "Bot" (Compile::AliasIdxBot) includes all memory |
|
4054 // states. Therefore, if a MergeMem has only one contributing input W for Bot, |
|
4055 // it is exactly equivalent to that state W: |
|
4056 // MergeMem(<Bot>: W) <==> W |
|
4057 // |
|
4058 // Usually, the merge has more than one input. In that case, where inputs |
|
4059 // overlap (i.e., one is Bot), the narrower alias type determines the memory |
|
4060 // state for that type, and the wider alias type (Bot) fills in everywhere else: |
|
4061 // Load<5>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<5>(W,p) |
|
4062 // Load<6>( MergeMem(<Bot>: W, <6>: X), p ) <==> Load<6>(X,p) |
|
4063 // |
|
4064 // A merge can take a "wide" memory state as one of its narrow inputs. |
|
4065 // This simply means that the merge observes out only the relevant parts of |
|
4066 // the wide input. That is, wide memory states arriving at narrow merge inputs |
|
4067 // are implicitly "filtered" or "sliced" as necessary. (This is rare.) |
|
4068 // |
|
4069 // These rules imply that MergeMem nodes may cascade (via their <Bot> links), |
|
4070 // and that memory slices "leak through": |
|
4071 // MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y)) <==> MergeMem(<Bot>: W, <7>: Y) |
|
4072 // |
|
4073 // But, in such a cascade, repeated memory slices can "block the leak": |
|
4074 // MergeMem(<Bot>: MergeMem(<Bot>: W, <7>: Y), <7>: Y') <==> MergeMem(<Bot>: W, <7>: Y') |
|
4075 // |
|
4076 // In the last example, Y is not part of the combined memory state of the |
|
4077 // outermost MergeMem. The system must, of course, prevent unschedulable |
|
4078 // memory states from arising, so you can be sure that the state Y is somehow |
|
4079 // a precursor to state Y'. |
|
4080 // |
|
4081 // |
|
4082 // REPRESENTATION OF MEMORY MERGES: The indexes used to address the Node::in array |
|
4083 // of each MergeMemNode array are exactly the numerical alias indexes, including |
|
4084 // but not limited to AliasIdxTop, AliasIdxBot, and AliasIdxRaw. The functions |
|
4085 // Compile::alias_type (and kin) produce and manage these indexes. |
|
4086 // |
|
4087 // By convention, the value of in(AliasIdxTop) (i.e., in(1)) is always the top node. |
|
4088 // (Note that this provides quick access to the top node inside MergeMem methods, |
|
4089 // without the need to reach out via TLS to Compile::current.) |
|
4090 // |
|
4091 // As a consequence of what was just described, a MergeMem that represents a full |
|
4092 // memory state has an edge in(AliasIdxBot) which is a "wide" memory state, |
|
4093 // containing all alias categories. |
|
4094 // |
|
4095 // MergeMem nodes never (?) have control inputs, so in(0) is NULL. |
|
4096 // |
|
4097 // All other edges in(N) (including in(AliasIdxRaw), which is in(3)) are either |
|
4098 // a memory state for the alias type <N>, or else the top node, meaning that |
|
4099 // there is no particular input for that alias type. Note that the length of |
|
4100 // a MergeMem is variable, and may be extended at any time to accommodate new |
|
4101 // memory states at larger alias indexes. When merges grow, they are of course |
|
4102 // filled with "top" in the unused in() positions. |
|
4103 // |
|
4104 // This use of top is named "empty_memory()", or "empty_mem" (no-memory) as a variable. |
|
4105 // (Top was chosen because it works smoothly with passes like GCM.) |
|
4106 // |
|
4107 // For convenience, we hardwire the alias index for TypeRawPtr::BOTTOM. (It is |
|
4108 // the type of random VM bits like TLS references.) Since it is always the |
|
4109 // first non-Bot memory slice, some low-level loops use it to initialize an |
|
4110 // index variable: for (i = AliasIdxRaw; i < req(); i++). |
|
4111 // |
|
4112 // |
|
4113 // ACCESSORS: There is a special accessor MergeMemNode::base_memory which returns |
|
4114 // the distinguished "wide" state. The accessor MergeMemNode::memory_at(N) returns |
|
4115 // the memory state for alias type <N>, or (if there is no particular slice at <N>, |
|
4116 // it returns the base memory. To prevent bugs, memory_at does not accept <Top> |
|
4117 // or <Bot> indexes. The iterator MergeMemStream provides robust iteration over |
|
4118 // MergeMem nodes or pairs of such nodes, ensuring that the non-top edges are visited. |
|
4119 // |
|
4120 // %%%% We may get rid of base_memory as a separate accessor at some point; it isn't |
|
4121 // really that different from the other memory inputs. An abbreviation called |
|
4122 // "bot_memory()" for "memory_at(AliasIdxBot)" would keep code tidy. |
|
4123 // |
|
4124 // |
|
4125 // PARTIAL MEMORY STATES: During optimization, MergeMem nodes may arise that represent |
|
4126 // partial memory states. When a Phi splits through a MergeMem, the copy of the Phi |
|
4127 // that "emerges though" the base memory will be marked as excluding the alias types |
|
4128 // of the other (narrow-memory) copies which "emerged through" the narrow edges: |
|
4129 // |
|
4130 // Phi<Bot>(U, MergeMem(<Bot>: W, <8>: Y)) |
|
4131 // ==Ideal=> MergeMem(<Bot>: Phi<Bot-8>(U, W), Phi<8>(U, Y)) |
|
4132 // |
|
4133 // This strange "subtraction" effect is necessary to ensure IGVN convergence. |
|
4134 // (It is currently unimplemented.) As you can see, the resulting merge is |
|
4135 // actually a disjoint union of memory states, rather than an overlay. |
|
4136 // |
|
4137 |
|
4138 //------------------------------MergeMemNode----------------------------------- |
|
4139 Node* MergeMemNode::make_empty_memory() { |
|
4140 Node* empty_memory = (Node*) Compile::current()->top(); |
|
4141 assert(empty_memory->is_top(), "correct sentinel identity"); |
|
4142 return empty_memory; |
|
4143 } |
|
4144 |
|
4145 MergeMemNode::MergeMemNode(Node *new_base) : Node(1+Compile::AliasIdxRaw) { |
|
4146 init_class_id(Class_MergeMem); |
|
4147 // all inputs are nullified in Node::Node(int) |
|
4148 // set_input(0, NULL); // no control input |
|
4149 |
|
4150 // Initialize the edges uniformly to top, for starters. |
|
4151 Node* empty_mem = make_empty_memory(); |
|
4152 for (uint i = Compile::AliasIdxTop; i < req(); i++) { |
|
4153 init_req(i,empty_mem); |
|
4154 } |
|
4155 assert(empty_memory() == empty_mem, ""); |
|
4156 |
|
4157 if( new_base != NULL && new_base->is_MergeMem() ) { |
|
4158 MergeMemNode* mdef = new_base->as_MergeMem(); |
|
4159 assert(mdef->empty_memory() == empty_mem, "consistent sentinels"); |
|
4160 for (MergeMemStream mms(this, mdef); mms.next_non_empty2(); ) { |
|
4161 mms.set_memory(mms.memory2()); |
|
4162 } |
|
4163 assert(base_memory() == mdef->base_memory(), ""); |
|
4164 } else { |
|
4165 set_base_memory(new_base); |
|
4166 } |
|
4167 } |
|
4168 |
|
4169 // Make a new, untransformed MergeMem with the same base as 'mem'. |
|
4170 // If mem is itself a MergeMem, populate the result with the same edges. |
|
4171 MergeMemNode* MergeMemNode::make(Compile* C, Node* mem) { |
|
4172 return new(C) MergeMemNode(mem); |
|
4173 } |
|
4174 |
|
4175 //------------------------------cmp-------------------------------------------- |
|
4176 uint MergeMemNode::hash() const { return NO_HASH; } |
|
4177 uint MergeMemNode::cmp( const Node &n ) const { |
|
4178 return (&n == this); // Always fail except on self |
|
4179 } |
|
4180 |
|
4181 //------------------------------Identity--------------------------------------- |
|
4182 Node* MergeMemNode::Identity(PhaseTransform *phase) { |
|
4183 // Identity if this merge point does not record any interesting memory |
|
4184 // disambiguations. |
|
4185 Node* base_mem = base_memory(); |
|
4186 Node* empty_mem = empty_memory(); |
|
4187 if (base_mem != empty_mem) { // Memory path is not dead? |
|
4188 for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4189 Node* mem = in(i); |
|
4190 if (mem != empty_mem && mem != base_mem) { |
|
4191 return this; // Many memory splits; no change |
|
4192 } |
|
4193 } |
|
4194 } |
|
4195 return base_mem; // No memory splits; ID on the one true input |
|
4196 } |
|
4197 |
|
4198 //------------------------------Ideal------------------------------------------ |
|
4199 // This method is invoked recursively on chains of MergeMem nodes |
|
4200 Node *MergeMemNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
|
4201 // Remove chain'd MergeMems |
|
4202 // |
|
4203 // This is delicate, because the each "in(i)" (i >= Raw) is interpreted |
|
4204 // relative to the "in(Bot)". Since we are patching both at the same time, |
|
4205 // we have to be careful to read each "in(i)" relative to the old "in(Bot)", |
|
4206 // but rewrite each "in(i)" relative to the new "in(Bot)". |
|
4207 Node *progress = NULL; |
|
4208 |
|
4209 |
|
4210 Node* old_base = base_memory(); |
|
4211 Node* empty_mem = empty_memory(); |
|
4212 if (old_base == empty_mem) |
|
4213 return NULL; // Dead memory path. |
|
4214 |
|
4215 MergeMemNode* old_mbase; |
|
4216 if (old_base != NULL && old_base->is_MergeMem()) |
|
4217 old_mbase = old_base->as_MergeMem(); |
|
4218 else |
|
4219 old_mbase = NULL; |
|
4220 Node* new_base = old_base; |
|
4221 |
|
4222 // simplify stacked MergeMems in base memory |
|
4223 if (old_mbase) new_base = old_mbase->base_memory(); |
|
4224 |
|
4225 // the base memory might contribute new slices beyond my req() |
|
4226 if (old_mbase) grow_to_match(old_mbase); |
|
4227 |
|
4228 // Look carefully at the base node if it is a phi. |
|
4229 PhiNode* phi_base; |
|
4230 if (new_base != NULL && new_base->is_Phi()) |
|
4231 phi_base = new_base->as_Phi(); |
|
4232 else |
|
4233 phi_base = NULL; |
|
4234 |
|
4235 Node* phi_reg = NULL; |
|
4236 uint phi_len = (uint)-1; |
|
4237 if (phi_base != NULL && !phi_base->is_copy()) { |
|
4238 // do not examine phi if degraded to a copy |
|
4239 phi_reg = phi_base->region(); |
|
4240 phi_len = phi_base->req(); |
|
4241 // see if the phi is unfinished |
|
4242 for (uint i = 1; i < phi_len; i++) { |
|
4243 if (phi_base->in(i) == NULL) { |
|
4244 // incomplete phi; do not look at it yet! |
|
4245 phi_reg = NULL; |
|
4246 phi_len = (uint)-1; |
|
4247 break; |
|
4248 } |
|
4249 } |
|
4250 } |
|
4251 |
|
4252 // Note: We do not call verify_sparse on entry, because inputs |
|
4253 // can normalize to the base_memory via subsume_node or similar |
|
4254 // mechanisms. This method repairs that damage. |
|
4255 |
|
4256 assert(!old_mbase || old_mbase->is_empty_memory(empty_mem), "consistent sentinels"); |
|
4257 |
|
4258 // Look at each slice. |
|
4259 for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4260 Node* old_in = in(i); |
|
4261 // calculate the old memory value |
|
4262 Node* old_mem = old_in; |
|
4263 if (old_mem == empty_mem) old_mem = old_base; |
|
4264 assert(old_mem == memory_at(i), ""); |
|
4265 |
|
4266 // maybe update (reslice) the old memory value |
|
4267 |
|
4268 // simplify stacked MergeMems |
|
4269 Node* new_mem = old_mem; |
|
4270 MergeMemNode* old_mmem; |
|
4271 if (old_mem != NULL && old_mem->is_MergeMem()) |
|
4272 old_mmem = old_mem->as_MergeMem(); |
|
4273 else |
|
4274 old_mmem = NULL; |
|
4275 if (old_mmem == this) { |
|
4276 // This can happen if loops break up and safepoints disappear. |
|
4277 // A merge of BotPtr (default) with a RawPtr memory derived from a |
|
4278 // safepoint can be rewritten to a merge of the same BotPtr with |
|
4279 // the BotPtr phi coming into the loop. If that phi disappears |
|
4280 // also, we can end up with a self-loop of the mergemem. |
|
4281 // In general, if loops degenerate and memory effects disappear, |
|
4282 // a mergemem can be left looking at itself. This simply means |
|
4283 // that the mergemem's default should be used, since there is |
|
4284 // no longer any apparent effect on this slice. |
|
4285 // Note: If a memory slice is a MergeMem cycle, it is unreachable |
|
4286 // from start. Update the input to TOP. |
|
4287 new_mem = (new_base == this || new_base == empty_mem)? empty_mem : new_base; |
|
4288 } |
|
4289 else if (old_mmem != NULL) { |
|
4290 new_mem = old_mmem->memory_at(i); |
|
4291 } |
|
4292 // else preceding memory was not a MergeMem |
|
4293 |
|
4294 // replace equivalent phis (unfortunately, they do not GVN together) |
|
4295 if (new_mem != NULL && new_mem != new_base && |
|
4296 new_mem->req() == phi_len && new_mem->in(0) == phi_reg) { |
|
4297 if (new_mem->is_Phi()) { |
|
4298 PhiNode* phi_mem = new_mem->as_Phi(); |
|
4299 for (uint i = 1; i < phi_len; i++) { |
|
4300 if (phi_base->in(i) != phi_mem->in(i)) { |
|
4301 phi_mem = NULL; |
|
4302 break; |
|
4303 } |
|
4304 } |
|
4305 if (phi_mem != NULL) { |
|
4306 // equivalent phi nodes; revert to the def |
|
4307 new_mem = new_base; |
|
4308 } |
|
4309 } |
|
4310 } |
|
4311 |
|
4312 // maybe store down a new value |
|
4313 Node* new_in = new_mem; |
|
4314 if (new_in == new_base) new_in = empty_mem; |
|
4315 |
|
4316 if (new_in != old_in) { |
|
4317 // Warning: Do not combine this "if" with the previous "if" |
|
4318 // A memory slice might have be be rewritten even if it is semantically |
|
4319 // unchanged, if the base_memory value has changed. |
|
4320 set_req(i, new_in); |
|
4321 progress = this; // Report progress |
|
4322 } |
|
4323 } |
|
4324 |
|
4325 if (new_base != old_base) { |
|
4326 set_req(Compile::AliasIdxBot, new_base); |
|
4327 // Don't use set_base_memory(new_base), because we need to update du. |
|
4328 assert(base_memory() == new_base, ""); |
|
4329 progress = this; |
|
4330 } |
|
4331 |
|
4332 if( base_memory() == this ) { |
|
4333 // a self cycle indicates this memory path is dead |
|
4334 set_req(Compile::AliasIdxBot, empty_mem); |
|
4335 } |
|
4336 |
|
4337 // Resolve external cycles by calling Ideal on a MergeMem base_memory |
|
4338 // Recursion must occur after the self cycle check above |
|
4339 if( base_memory()->is_MergeMem() ) { |
|
4340 MergeMemNode *new_mbase = base_memory()->as_MergeMem(); |
|
4341 Node *m = phase->transform(new_mbase); // Rollup any cycles |
|
4342 if( m != NULL && (m->is_top() || |
|
4343 m->is_MergeMem() && m->as_MergeMem()->base_memory() == empty_mem) ) { |
|
4344 // propagate rollup of dead cycle to self |
|
4345 set_req(Compile::AliasIdxBot, empty_mem); |
|
4346 } |
|
4347 } |
|
4348 |
|
4349 if( base_memory() == empty_mem ) { |
|
4350 progress = this; |
|
4351 // Cut inputs during Parse phase only. |
|
4352 // During Optimize phase a dead MergeMem node will be subsumed by Top. |
|
4353 if( !can_reshape ) { |
|
4354 for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4355 if( in(i) != empty_mem ) { set_req(i, empty_mem); } |
|
4356 } |
|
4357 } |
|
4358 } |
|
4359 |
|
4360 if( !progress && base_memory()->is_Phi() && can_reshape ) { |
|
4361 // Check if PhiNode::Ideal's "Split phis through memory merges" |
|
4362 // transform should be attempted. Look for this->phi->this cycle. |
|
4363 uint merge_width = req(); |
|
4364 if (merge_width > Compile::AliasIdxRaw) { |
|
4365 PhiNode* phi = base_memory()->as_Phi(); |
|
4366 for( uint i = 1; i < phi->req(); ++i ) {// For all paths in |
|
4367 if (phi->in(i) == this) { |
|
4368 phase->is_IterGVN()->_worklist.push(phi); |
|
4369 break; |
|
4370 } |
|
4371 } |
|
4372 } |
|
4373 } |
|
4374 |
|
4375 assert(progress || verify_sparse(), "please, no dups of base"); |
|
4376 return progress; |
|
4377 } |
|
4378 |
|
4379 //-------------------------set_base_memory------------------------------------- |
|
4380 void MergeMemNode::set_base_memory(Node *new_base) { |
|
4381 Node* empty_mem = empty_memory(); |
|
4382 set_req(Compile::AliasIdxBot, new_base); |
|
4383 assert(memory_at(req()) == new_base, "must set default memory"); |
|
4384 // Clear out other occurrences of new_base: |
|
4385 if (new_base != empty_mem) { |
|
4386 for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4387 if (in(i) == new_base) set_req(i, empty_mem); |
|
4388 } |
|
4389 } |
|
4390 } |
|
4391 |
|
4392 //------------------------------out_RegMask------------------------------------ |
|
4393 const RegMask &MergeMemNode::out_RegMask() const { |
|
4394 return RegMask::Empty; |
|
4395 } |
|
4396 |
|
4397 //------------------------------dump_spec-------------------------------------- |
|
4398 #ifndef PRODUCT |
|
4399 void MergeMemNode::dump_spec(outputStream *st) const { |
|
4400 st->print(" {"); |
|
4401 Node* base_mem = base_memory(); |
|
4402 for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) { |
|
4403 Node* mem = memory_at(i); |
|
4404 if (mem == base_mem) { st->print(" -"); continue; } |
|
4405 st->print( " N%d:", mem->_idx ); |
|
4406 Compile::current()->get_adr_type(i)->dump_on(st); |
|
4407 } |
|
4408 st->print(" }"); |
|
4409 } |
|
4410 #endif // !PRODUCT |
|
4411 |
|
4412 |
|
4413 #ifdef ASSERT |
|
4414 static bool might_be_same(Node* a, Node* b) { |
|
4415 if (a == b) return true; |
|
4416 if (!(a->is_Phi() || b->is_Phi())) return false; |
|
4417 // phis shift around during optimization |
|
4418 return true; // pretty stupid... |
|
4419 } |
|
4420 |
|
4421 // verify a narrow slice (either incoming or outgoing) |
|
4422 static void verify_memory_slice(const MergeMemNode* m, int alias_idx, Node* n) { |
|
4423 if (!VerifyAliases) return; // don't bother to verify unless requested |
|
4424 if (is_error_reported()) return; // muzzle asserts when debugging an error |
|
4425 if (Node::in_dump()) return; // muzzle asserts when printing |
|
4426 assert(alias_idx >= Compile::AliasIdxRaw, "must not disturb base_memory or sentinel"); |
|
4427 assert(n != NULL, ""); |
|
4428 // Elide intervening MergeMem's |
|
4429 while (n->is_MergeMem()) { |
|
4430 n = n->as_MergeMem()->memory_at(alias_idx); |
|
4431 } |
|
4432 Compile* C = Compile::current(); |
|
4433 const TypePtr* n_adr_type = n->adr_type(); |
|
4434 if (n == m->empty_memory()) { |
|
4435 // Implicit copy of base_memory() |
|
4436 } else if (n_adr_type != TypePtr::BOTTOM) { |
|
4437 assert(n_adr_type != NULL, "new memory must have a well-defined adr_type"); |
|
4438 assert(C->must_alias(n_adr_type, alias_idx), "new memory must match selected slice"); |
|
4439 } else { |
|
4440 // A few places like make_runtime_call "know" that VM calls are narrow, |
|
4441 // and can be used to update only the VM bits stored as TypeRawPtr::BOTTOM. |
|
4442 bool expected_wide_mem = false; |
|
4443 if (n == m->base_memory()) { |
|
4444 expected_wide_mem = true; |
|
4445 } else if (alias_idx == Compile::AliasIdxRaw || |
|
4446 n == m->memory_at(Compile::AliasIdxRaw)) { |
|
4447 expected_wide_mem = true; |
|
4448 } else if (!C->alias_type(alias_idx)->is_rewritable()) { |
|
4449 // memory can "leak through" calls on channels that |
|
4450 // are write-once. Allow this also. |
|
4451 expected_wide_mem = true; |
|
4452 } |
|
4453 assert(expected_wide_mem, "expected narrow slice replacement"); |
|
4454 } |
|
4455 } |
|
4456 #else // !ASSERT |
|
4457 #define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op |
|
4458 #endif |
|
4459 |
|
4460 |
|
4461 //-----------------------------memory_at--------------------------------------- |
|
4462 Node* MergeMemNode::memory_at(uint alias_idx) const { |
|
4463 assert(alias_idx >= Compile::AliasIdxRaw || |
|
4464 alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
|
4465 "must avoid base_memory and AliasIdxTop"); |
|
4466 |
|
4467 // Otherwise, it is a narrow slice. |
|
4468 Node* n = alias_idx < req() ? in(alias_idx) : empty_memory(); |
|
4469 Compile *C = Compile::current(); |
|
4470 if (is_empty_memory(n)) { |
|
4471 // the array is sparse; empty slots are the "top" node |
|
4472 n = base_memory(); |
|
4473 assert(Node::in_dump() |
|
4474 || n == NULL || n->bottom_type() == Type::TOP |
|
4475 || n->adr_type() == NULL // address is TOP |
|
4476 || n->adr_type() == TypePtr::BOTTOM |
|
4477 || n->adr_type() == TypeRawPtr::BOTTOM |
|
4478 || Compile::current()->AliasLevel() == 0, |
|
4479 "must be a wide memory"); |
|
4480 // AliasLevel == 0 if we are organizing the memory states manually. |
|
4481 // See verify_memory_slice for comments on TypeRawPtr::BOTTOM. |
|
4482 } else { |
|
4483 // make sure the stored slice is sane |
|
4484 #ifdef ASSERT |
|
4485 if (is_error_reported() || Node::in_dump()) { |
|
4486 } else if (might_be_same(n, base_memory())) { |
|
4487 // Give it a pass: It is a mostly harmless repetition of the base. |
|
4488 // This can arise normally from node subsumption during optimization. |
|
4489 } else { |
|
4490 verify_memory_slice(this, alias_idx, n); |
|
4491 } |
|
4492 #endif |
|
4493 } |
|
4494 return n; |
|
4495 } |
|
4496 |
|
4497 //---------------------------set_memory_at------------------------------------- |
|
4498 void MergeMemNode::set_memory_at(uint alias_idx, Node *n) { |
|
4499 verify_memory_slice(this, alias_idx, n); |
|
4500 Node* empty_mem = empty_memory(); |
|
4501 if (n == base_memory()) n = empty_mem; // collapse default |
|
4502 uint need_req = alias_idx+1; |
|
4503 if (req() < need_req) { |
|
4504 if (n == empty_mem) return; // already the default, so do not grow me |
|
4505 // grow the sparse array |
|
4506 do { |
|
4507 add_req(empty_mem); |
|
4508 } while (req() < need_req); |
|
4509 } |
|
4510 set_req( alias_idx, n ); |
|
4511 } |
|
4512 |
|
4513 |
|
4514 |
|
4515 //--------------------------iteration_setup------------------------------------ |
|
4516 void MergeMemNode::iteration_setup(const MergeMemNode* other) { |
|
4517 if (other != NULL) { |
|
4518 grow_to_match(other); |
|
4519 // invariant: the finite support of mm2 is within mm->req() |
|
4520 #ifdef ASSERT |
|
4521 for (uint i = req(); i < other->req(); i++) { |
|
4522 assert(other->is_empty_memory(other->in(i)), "slice left uncovered"); |
|
4523 } |
|
4524 #endif |
|
4525 } |
|
4526 // Replace spurious copies of base_memory by top. |
|
4527 Node* base_mem = base_memory(); |
|
4528 if (base_mem != NULL && !base_mem->is_top()) { |
|
4529 for (uint i = Compile::AliasIdxBot+1, imax = req(); i < imax; i++) { |
|
4530 if (in(i) == base_mem) |
|
4531 set_req(i, empty_memory()); |
|
4532 } |
|
4533 } |
|
4534 } |
|
4535 |
|
4536 //---------------------------grow_to_match------------------------------------- |
|
4537 void MergeMemNode::grow_to_match(const MergeMemNode* other) { |
|
4538 Node* empty_mem = empty_memory(); |
|
4539 assert(other->is_empty_memory(empty_mem), "consistent sentinels"); |
|
4540 // look for the finite support of the other memory |
|
4541 for (uint i = other->req(); --i >= req(); ) { |
|
4542 if (other->in(i) != empty_mem) { |
|
4543 uint new_len = i+1; |
|
4544 while (req() < new_len) add_req(empty_mem); |
|
4545 break; |
|
4546 } |
|
4547 } |
|
4548 } |
|
4549 |
|
4550 //---------------------------verify_sparse------------------------------------- |
|
4551 #ifndef PRODUCT |
|
4552 bool MergeMemNode::verify_sparse() const { |
|
4553 assert(is_empty_memory(make_empty_memory()), "sane sentinel"); |
|
4554 Node* base_mem = base_memory(); |
|
4555 // The following can happen in degenerate cases, since empty==top. |
|
4556 if (is_empty_memory(base_mem)) return true; |
|
4557 for (uint i = Compile::AliasIdxRaw; i < req(); i++) { |
|
4558 assert(in(i) != NULL, "sane slice"); |
|
4559 if (in(i) == base_mem) return false; // should have been the sentinel value! |
|
4560 } |
|
4561 return true; |
|
4562 } |
|
4563 |
|
4564 bool MergeMemStream::match_memory(Node* mem, const MergeMemNode* mm, int idx) { |
|
4565 Node* n; |
|
4566 n = mm->in(idx); |
|
4567 if (mem == n) return true; // might be empty_memory() |
|
4568 n = (idx == Compile::AliasIdxBot)? mm->base_memory(): mm->memory_at(idx); |
|
4569 if (mem == n) return true; |
|
4570 while (n->is_Phi() && (n = n->as_Phi()->is_copy()) != NULL) { |
|
4571 if (mem == n) return true; |
|
4572 if (n == NULL) break; |
|
4573 } |
|
4574 return false; |
|
4575 } |
|
4576 #endif // !PRODUCT |