Fri, 11 Mar 2011 07:50:51 -0800
7026631: field _klass is incorrectly set for dual type of TypeAryPtr::OOPS
Summary: add missing check this->dual() != TypeAryPtr::OOPS into TypeAryPtr::klass().
Reviewed-by: never
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "memory/allocation.inline.hpp"
27 #include "opto/addnode.hpp"
28 #include "opto/callnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/idealGraphPrinter.hpp"
31 #include "opto/matcher.hpp"
32 #include "opto/memnode.hpp"
33 #include "opto/opcodes.hpp"
34 #include "opto/regmask.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/runtime.hpp"
37 #include "opto/type.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/os.hpp"
40 #ifdef TARGET_ARCH_MODEL_x86_32
41 # include "adfiles/ad_x86_32.hpp"
42 #endif
43 #ifdef TARGET_ARCH_MODEL_x86_64
44 # include "adfiles/ad_x86_64.hpp"
45 #endif
46 #ifdef TARGET_ARCH_MODEL_sparc
47 # include "adfiles/ad_sparc.hpp"
48 #endif
49 #ifdef TARGET_ARCH_MODEL_zero
50 # include "adfiles/ad_zero.hpp"
51 #endif
53 OptoReg::Name OptoReg::c_frame_pointer;
57 const int Matcher::base2reg[Type::lastype] = {
58 Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN,
59 Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
60 Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
61 0, 0/*abio*/,
62 Op_RegP /* Return address */, 0, /* the memories */
63 Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD,
64 0 /*bottom*/
65 };
67 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
68 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
69 RegMask Matcher::STACK_ONLY_mask;
70 RegMask Matcher::c_frame_ptr_mask;
71 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
72 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
74 //---------------------------Matcher-------------------------------------------
75 Matcher::Matcher( Node_List &proj_list ) :
76 PhaseTransform( Phase::Ins_Select ),
77 #ifdef ASSERT
78 _old2new_map(C->comp_arena()),
79 _new2old_map(C->comp_arena()),
80 #endif
81 _shared_nodes(C->comp_arena()),
82 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
83 _swallowed(swallowed),
84 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
85 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
86 _must_clone(must_clone), _proj_list(proj_list),
87 _register_save_policy(register_save_policy),
88 _c_reg_save_policy(c_reg_save_policy),
89 _register_save_type(register_save_type),
90 _ruleName(ruleName),
91 _allocation_started(false),
92 _states_arena(Chunk::medium_size),
93 _visited(&_states_arena),
94 _shared(&_states_arena),
95 _dontcare(&_states_arena) {
96 C->set_matcher(this);
98 idealreg2spillmask [Op_RegI] = NULL;
99 idealreg2spillmask [Op_RegN] = NULL;
100 idealreg2spillmask [Op_RegL] = NULL;
101 idealreg2spillmask [Op_RegF] = NULL;
102 idealreg2spillmask [Op_RegD] = NULL;
103 idealreg2spillmask [Op_RegP] = NULL;
105 idealreg2debugmask [Op_RegI] = NULL;
106 idealreg2debugmask [Op_RegN] = NULL;
107 idealreg2debugmask [Op_RegL] = NULL;
108 idealreg2debugmask [Op_RegF] = NULL;
109 idealreg2debugmask [Op_RegD] = NULL;
110 idealreg2debugmask [Op_RegP] = NULL;
112 idealreg2mhdebugmask[Op_RegI] = NULL;
113 idealreg2mhdebugmask[Op_RegN] = NULL;
114 idealreg2mhdebugmask[Op_RegL] = NULL;
115 idealreg2mhdebugmask[Op_RegF] = NULL;
116 idealreg2mhdebugmask[Op_RegD] = NULL;
117 idealreg2mhdebugmask[Op_RegP] = NULL;
119 debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
120 }
122 //------------------------------warp_incoming_stk_arg------------------------
123 // This warps a VMReg into an OptoReg::Name
124 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
125 OptoReg::Name warped;
126 if( reg->is_stack() ) { // Stack slot argument?
127 warped = OptoReg::add(_old_SP, reg->reg2stack() );
128 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
129 if( warped >= _in_arg_limit )
130 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
131 if (!RegMask::can_represent(warped)) {
132 // the compiler cannot represent this method's calling sequence
133 C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
134 return OptoReg::Bad;
135 }
136 return warped;
137 }
138 return OptoReg::as_OptoReg(reg);
139 }
141 //---------------------------compute_old_SP------------------------------------
142 OptoReg::Name Compile::compute_old_SP() {
143 int fixed = fixed_slots();
144 int preserve = in_preserve_stack_slots();
145 return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
146 }
150 #ifdef ASSERT
151 void Matcher::verify_new_nodes_only(Node* xroot) {
152 // Make sure that the new graph only references new nodes
153 ResourceMark rm;
154 Unique_Node_List worklist;
155 VectorSet visited(Thread::current()->resource_area());
156 worklist.push(xroot);
157 while (worklist.size() > 0) {
158 Node* n = worklist.pop();
159 visited <<= n->_idx;
160 assert(C->node_arena()->contains(n), "dead node");
161 for (uint j = 0; j < n->req(); j++) {
162 Node* in = n->in(j);
163 if (in != NULL) {
164 assert(C->node_arena()->contains(in), "dead node");
165 if (!visited.test(in->_idx)) {
166 worklist.push(in);
167 }
168 }
169 }
170 }
171 }
172 #endif
175 //---------------------------match---------------------------------------------
176 void Matcher::match( ) {
177 if( MaxLabelRootDepth < 100 ) { // Too small?
178 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
179 MaxLabelRootDepth = 100;
180 }
181 // One-time initialization of some register masks.
182 init_spill_mask( C->root()->in(1) );
183 _return_addr_mask = return_addr();
184 #ifdef _LP64
185 // Pointers take 2 slots in 64-bit land
186 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
187 #endif
189 // Map a Java-signature return type into return register-value
190 // machine registers for 0, 1 and 2 returned values.
191 const TypeTuple *range = C->tf()->range();
192 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
193 // Get ideal-register return type
194 int ireg = base2reg[range->field_at(TypeFunc::Parms)->base()];
195 // Get machine return register
196 uint sop = C->start()->Opcode();
197 OptoRegPair regs = return_value(ireg, false);
199 // And mask for same
200 _return_value_mask = RegMask(regs.first());
201 if( OptoReg::is_valid(regs.second()) )
202 _return_value_mask.Insert(regs.second());
203 }
205 // ---------------
206 // Frame Layout
208 // Need the method signature to determine the incoming argument types,
209 // because the types determine which registers the incoming arguments are
210 // in, and this affects the matched code.
211 const TypeTuple *domain = C->tf()->domain();
212 uint argcnt = domain->cnt() - TypeFunc::Parms;
213 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
214 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
215 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
216 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
217 uint i;
218 for( i = 0; i<argcnt; i++ ) {
219 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
220 }
222 // Pass array of ideal registers and length to USER code (from the AD file)
223 // that will convert this to an array of register numbers.
224 const StartNode *start = C->start();
225 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
226 #ifdef ASSERT
227 // Sanity check users' calling convention. Real handy while trying to
228 // get the initial port correct.
229 { for (uint i = 0; i<argcnt; i++) {
230 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
231 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
232 _parm_regs[i].set_bad();
233 continue;
234 }
235 VMReg parm_reg = vm_parm_regs[i].first();
236 assert(parm_reg->is_valid(), "invalid arg?");
237 if (parm_reg->is_reg()) {
238 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
239 assert(can_be_java_arg(opto_parm_reg) ||
240 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
241 opto_parm_reg == inline_cache_reg(),
242 "parameters in register must be preserved by runtime stubs");
243 }
244 for (uint j = 0; j < i; j++) {
245 assert(parm_reg != vm_parm_regs[j].first(),
246 "calling conv. must produce distinct regs");
247 }
248 }
249 }
250 #endif
252 // Do some initial frame layout.
254 // Compute the old incoming SP (may be called FP) as
255 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
256 _old_SP = C->compute_old_SP();
257 assert( is_even(_old_SP), "must be even" );
259 // Compute highest incoming stack argument as
260 // _old_SP + out_preserve_stack_slots + incoming argument size.
261 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
262 assert( is_even(_in_arg_limit), "out_preserve must be even" );
263 for( i = 0; i < argcnt; i++ ) {
264 // Permit args to have no register
265 _calling_convention_mask[i].Clear();
266 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
267 continue;
268 }
269 // calling_convention returns stack arguments as a count of
270 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
271 // the allocators point of view, taking into account all the
272 // preserve area, locks & pad2.
274 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
275 if( OptoReg::is_valid(reg1))
276 _calling_convention_mask[i].Insert(reg1);
278 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
279 if( OptoReg::is_valid(reg2))
280 _calling_convention_mask[i].Insert(reg2);
282 // Saved biased stack-slot register number
283 _parm_regs[i].set_pair(reg2, reg1);
284 }
286 // Finally, make sure the incoming arguments take up an even number of
287 // words, in case the arguments or locals need to contain doubleword stack
288 // slots. The rest of the system assumes that stack slot pairs (in
289 // particular, in the spill area) which look aligned will in fact be
290 // aligned relative to the stack pointer in the target machine. Double
291 // stack slots will always be allocated aligned.
292 _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
294 // Compute highest outgoing stack argument as
295 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
296 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
297 assert( is_even(_out_arg_limit), "out_preserve must be even" );
299 if (!RegMask::can_represent(OptoReg::add(_out_arg_limit,-1))) {
300 // the compiler cannot represent this method's calling sequence
301 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
302 }
304 if (C->failing()) return; // bailed out on incoming arg failure
306 // ---------------
307 // Collect roots of matcher trees. Every node for which
308 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
309 // can be a valid interior of some tree.
310 find_shared( C->root() );
311 find_shared( C->top() );
313 C->print_method("Before Matching");
315 // Create new ideal node ConP #NULL even if it does exist in old space
316 // to avoid false sharing if the corresponding mach node is not used.
317 // The corresponding mach node is only used in rare cases for derived
318 // pointers.
319 Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
321 // Swap out to old-space; emptying new-space
322 Arena *old = C->node_arena()->move_contents(C->old_arena());
324 // Save debug and profile information for nodes in old space:
325 _old_node_note_array = C->node_note_array();
326 if (_old_node_note_array != NULL) {
327 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
328 (C->comp_arena(), _old_node_note_array->length(),
329 0, NULL));
330 }
332 // Pre-size the new_node table to avoid the need for range checks.
333 grow_new_node_array(C->unique());
335 // Reset node counter so MachNodes start with _idx at 0
336 int nodes = C->unique(); // save value
337 C->set_unique(0);
339 // Recursively match trees from old space into new space.
340 // Correct leaves of new-space Nodes; they point to old-space.
341 _visited.Clear(); // Clear visit bits for xform call
342 C->set_cached_top_node(xform( C->top(), nodes ));
343 if (!C->failing()) {
344 Node* xroot = xform( C->root(), 1 );
345 if (xroot == NULL) {
346 Matcher::soft_match_failure(); // recursive matching process failed
347 C->record_method_not_compilable("instruction match failed");
348 } else {
349 // During matching shared constants were attached to C->root()
350 // because xroot wasn't available yet, so transfer the uses to
351 // the xroot.
352 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
353 Node* n = C->root()->fast_out(j);
354 if (C->node_arena()->contains(n)) {
355 assert(n->in(0) == C->root(), "should be control user");
356 n->set_req(0, xroot);
357 --j;
358 --jmax;
359 }
360 }
362 // Generate new mach node for ConP #NULL
363 assert(new_ideal_null != NULL, "sanity");
364 _mach_null = match_tree(new_ideal_null);
365 // Don't set control, it will confuse GCM since there are no uses.
366 // The control will be set when this node is used first time
367 // in find_base_for_derived().
368 assert(_mach_null != NULL, "");
370 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
372 #ifdef ASSERT
373 verify_new_nodes_only(xroot);
374 #endif
375 }
376 }
377 if (C->top() == NULL || C->root() == NULL) {
378 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
379 }
380 if (C->failing()) {
381 // delete old;
382 old->destruct_contents();
383 return;
384 }
385 assert( C->top(), "" );
386 assert( C->root(), "" );
387 validate_null_checks();
389 // Now smoke old-space
390 NOT_DEBUG( old->destruct_contents() );
392 // ------------------------
393 // Set up save-on-entry registers
394 Fixup_Save_On_Entry( );
395 }
398 //------------------------------Fixup_Save_On_Entry----------------------------
399 // The stated purpose of this routine is to take care of save-on-entry
400 // registers. However, the overall goal of the Match phase is to convert into
401 // machine-specific instructions which have RegMasks to guide allocation.
402 // So what this procedure really does is put a valid RegMask on each input
403 // to the machine-specific variations of all Return, TailCall and Halt
404 // instructions. It also adds edgs to define the save-on-entry values (and of
405 // course gives them a mask).
407 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
408 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
409 // Do all the pre-defined register masks
410 rms[TypeFunc::Control ] = RegMask::Empty;
411 rms[TypeFunc::I_O ] = RegMask::Empty;
412 rms[TypeFunc::Memory ] = RegMask::Empty;
413 rms[TypeFunc::ReturnAdr] = ret_adr;
414 rms[TypeFunc::FramePtr ] = fp;
415 return rms;
416 }
418 //---------------------------init_first_stack_mask-----------------------------
419 // Create the initial stack mask used by values spilling to the stack.
420 // Disallow any debug info in outgoing argument areas by setting the
421 // initial mask accordingly.
422 void Matcher::init_first_stack_mask() {
424 // Allocate storage for spill masks as masks for the appropriate load type.
425 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * 3*6);
427 idealreg2spillmask [Op_RegN] = &rms[0];
428 idealreg2spillmask [Op_RegI] = &rms[1];
429 idealreg2spillmask [Op_RegL] = &rms[2];
430 idealreg2spillmask [Op_RegF] = &rms[3];
431 idealreg2spillmask [Op_RegD] = &rms[4];
432 idealreg2spillmask [Op_RegP] = &rms[5];
434 idealreg2debugmask [Op_RegN] = &rms[6];
435 idealreg2debugmask [Op_RegI] = &rms[7];
436 idealreg2debugmask [Op_RegL] = &rms[8];
437 idealreg2debugmask [Op_RegF] = &rms[9];
438 idealreg2debugmask [Op_RegD] = &rms[10];
439 idealreg2debugmask [Op_RegP] = &rms[11];
441 idealreg2mhdebugmask[Op_RegN] = &rms[12];
442 idealreg2mhdebugmask[Op_RegI] = &rms[13];
443 idealreg2mhdebugmask[Op_RegL] = &rms[14];
444 idealreg2mhdebugmask[Op_RegF] = &rms[15];
445 idealreg2mhdebugmask[Op_RegD] = &rms[16];
446 idealreg2mhdebugmask[Op_RegP] = &rms[17];
448 OptoReg::Name i;
450 // At first, start with the empty mask
451 C->FIRST_STACK_mask().Clear();
453 // Add in the incoming argument area
454 OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
455 for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1))
456 C->FIRST_STACK_mask().Insert(i);
458 // Add in all bits past the outgoing argument area
459 guarantee(RegMask::can_represent(OptoReg::add(_out_arg_limit,-1)),
460 "must be able to represent all call arguments in reg mask");
461 init = _out_arg_limit;
462 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
463 C->FIRST_STACK_mask().Insert(i);
465 // Finally, set the "infinite stack" bit.
466 C->FIRST_STACK_mask().set_AllStack();
468 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
469 #ifdef _LP64
470 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
471 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
472 #endif
473 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
474 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
475 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
476 idealreg2spillmask[Op_RegL]->OR(C->FIRST_STACK_mask());
477 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
478 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
479 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
480 idealreg2spillmask[Op_RegD]->OR(C->FIRST_STACK_mask());
481 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
482 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
484 if (UseFPUForSpilling) {
485 // This mask logic assumes that the spill operations are
486 // symmetric and that the registers involved are the same size.
487 // On sparc for instance we may have to use 64 bit moves will
488 // kill 2 registers when used with F0-F31.
489 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
490 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
491 #ifdef _LP64
492 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
493 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
494 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
495 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
496 #else
497 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
498 #endif
499 }
501 // Make up debug masks. Any spill slot plus callee-save registers.
502 // Caller-save registers are assumed to be trashable by the various
503 // inline-cache fixup routines.
504 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
505 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
506 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
507 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
508 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
509 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
511 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
512 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
513 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
514 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
515 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
516 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
518 // Prevent stub compilations from attempting to reference
519 // callee-saved registers from debug info
520 bool exclude_soe = !Compile::current()->is_method_compilation();
522 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
523 // registers the caller has to save do not work
524 if( _register_save_policy[i] == 'C' ||
525 _register_save_policy[i] == 'A' ||
526 (_register_save_policy[i] == 'E' && exclude_soe) ) {
527 idealreg2debugmask [Op_RegN]->Remove(i);
528 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
529 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
530 idealreg2debugmask [Op_RegF]->Remove(i); // masks
531 idealreg2debugmask [Op_RegD]->Remove(i);
532 idealreg2debugmask [Op_RegP]->Remove(i);
534 idealreg2mhdebugmask[Op_RegN]->Remove(i);
535 idealreg2mhdebugmask[Op_RegI]->Remove(i);
536 idealreg2mhdebugmask[Op_RegL]->Remove(i);
537 idealreg2mhdebugmask[Op_RegF]->Remove(i);
538 idealreg2mhdebugmask[Op_RegD]->Remove(i);
539 idealreg2mhdebugmask[Op_RegP]->Remove(i);
540 }
541 }
543 // Subtract the register we use to save the SP for MethodHandle
544 // invokes to from the debug mask.
545 const RegMask save_mask = method_handle_invoke_SP_save_mask();
546 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
547 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
548 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
549 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
550 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
551 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
552 }
554 //---------------------------is_save_on_entry----------------------------------
555 bool Matcher::is_save_on_entry( int reg ) {
556 return
557 _register_save_policy[reg] == 'E' ||
558 _register_save_policy[reg] == 'A' || // Save-on-entry register?
559 // Also save argument registers in the trampolining stubs
560 (C->save_argument_registers() && is_spillable_arg(reg));
561 }
563 //---------------------------Fixup_Save_On_Entry-------------------------------
564 void Matcher::Fixup_Save_On_Entry( ) {
565 init_first_stack_mask();
567 Node *root = C->root(); // Short name for root
568 // Count number of save-on-entry registers.
569 uint soe_cnt = number_of_saved_registers();
570 uint i;
572 // Find the procedure Start Node
573 StartNode *start = C->start();
574 assert( start, "Expect a start node" );
576 // Save argument registers in the trampolining stubs
577 if( C->save_argument_registers() )
578 for( i = 0; i < _last_Mach_Reg; i++ )
579 if( is_spillable_arg(i) )
580 soe_cnt++;
582 // Input RegMask array shared by all Returns.
583 // The type for doubles and longs has a count of 2, but
584 // there is only 1 returned value
585 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
586 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
587 // Returns have 0 or 1 returned values depending on call signature.
588 // Return register is specified by return_value in the AD file.
589 if (ret_edge_cnt > TypeFunc::Parms)
590 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
592 // Input RegMask array shared by all Rethrows.
593 uint reth_edge_cnt = TypeFunc::Parms+1;
594 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
595 // Rethrow takes exception oop only, but in the argument 0 slot.
596 reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
597 #ifdef _LP64
598 // Need two slots for ptrs in 64-bit land
599 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
600 #endif
602 // Input RegMask array shared by all TailCalls
603 uint tail_call_edge_cnt = TypeFunc::Parms+2;
604 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
606 // Input RegMask array shared by all TailJumps
607 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
608 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
610 // TailCalls have 2 returned values (target & moop), whose masks come
611 // from the usual MachNode/MachOper mechanism. Find a sample
612 // TailCall to extract these masks and put the correct masks into
613 // the tail_call_rms array.
614 for( i=1; i < root->req(); i++ ) {
615 MachReturnNode *m = root->in(i)->as_MachReturn();
616 if( m->ideal_Opcode() == Op_TailCall ) {
617 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
618 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
619 break;
620 }
621 }
623 // TailJumps have 2 returned values (target & ex_oop), whose masks come
624 // from the usual MachNode/MachOper mechanism. Find a sample
625 // TailJump to extract these masks and put the correct masks into
626 // the tail_jump_rms array.
627 for( i=1; i < root->req(); i++ ) {
628 MachReturnNode *m = root->in(i)->as_MachReturn();
629 if( m->ideal_Opcode() == Op_TailJump ) {
630 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
631 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
632 break;
633 }
634 }
636 // Input RegMask array shared by all Halts
637 uint halt_edge_cnt = TypeFunc::Parms;
638 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
640 // Capture the return input masks into each exit flavor
641 for( i=1; i < root->req(); i++ ) {
642 MachReturnNode *exit = root->in(i)->as_MachReturn();
643 switch( exit->ideal_Opcode() ) {
644 case Op_Return : exit->_in_rms = ret_rms; break;
645 case Op_Rethrow : exit->_in_rms = reth_rms; break;
646 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
647 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
648 case Op_Halt : exit->_in_rms = halt_rms; break;
649 default : ShouldNotReachHere();
650 }
651 }
653 // Next unused projection number from Start.
654 int proj_cnt = C->tf()->domain()->cnt();
656 // Do all the save-on-entry registers. Make projections from Start for
657 // them, and give them a use at the exit points. To the allocator, they
658 // look like incoming register arguments.
659 for( i = 0; i < _last_Mach_Reg; i++ ) {
660 if( is_save_on_entry(i) ) {
662 // Add the save-on-entry to the mask array
663 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
664 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
665 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
666 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
667 // Halts need the SOE registers, but only in the stack as debug info.
668 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
669 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
671 Node *mproj;
673 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
674 // into a single RegD.
675 if( (i&1) == 0 &&
676 _register_save_type[i ] == Op_RegF &&
677 _register_save_type[i+1] == Op_RegF &&
678 is_save_on_entry(i+1) ) {
679 // Add other bit for double
680 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
681 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
682 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
683 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
684 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
685 mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
686 proj_cnt += 2; // Skip 2 for doubles
687 }
688 else if( (i&1) == 1 && // Else check for high half of double
689 _register_save_type[i-1] == Op_RegF &&
690 _register_save_type[i ] == Op_RegF &&
691 is_save_on_entry(i-1) ) {
692 ret_rms [ ret_edge_cnt] = RegMask::Empty;
693 reth_rms [ reth_edge_cnt] = RegMask::Empty;
694 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
695 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
696 halt_rms [ halt_edge_cnt] = RegMask::Empty;
697 mproj = C->top();
698 }
699 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
700 // into a single RegL.
701 else if( (i&1) == 0 &&
702 _register_save_type[i ] == Op_RegI &&
703 _register_save_type[i+1] == Op_RegI &&
704 is_save_on_entry(i+1) ) {
705 // Add other bit for long
706 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
707 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
708 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
709 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
710 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
711 mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
712 proj_cnt += 2; // Skip 2 for longs
713 }
714 else if( (i&1) == 1 && // Else check for high half of long
715 _register_save_type[i-1] == Op_RegI &&
716 _register_save_type[i ] == Op_RegI &&
717 is_save_on_entry(i-1) ) {
718 ret_rms [ ret_edge_cnt] = RegMask::Empty;
719 reth_rms [ reth_edge_cnt] = RegMask::Empty;
720 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
721 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
722 halt_rms [ halt_edge_cnt] = RegMask::Empty;
723 mproj = C->top();
724 } else {
725 // Make a projection for it off the Start
726 mproj = new (C, 1) MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
727 }
729 ret_edge_cnt ++;
730 reth_edge_cnt ++;
731 tail_call_edge_cnt ++;
732 tail_jump_edge_cnt ++;
733 halt_edge_cnt ++;
735 // Add a use of the SOE register to all exit paths
736 for( uint j=1; j < root->req(); j++ )
737 root->in(j)->add_req(mproj);
738 } // End of if a save-on-entry register
739 } // End of for all machine registers
740 }
742 //------------------------------init_spill_mask--------------------------------
743 void Matcher::init_spill_mask( Node *ret ) {
744 if( idealreg2regmask[Op_RegI] ) return; // One time only init
746 OptoReg::c_frame_pointer = c_frame_pointer();
747 c_frame_ptr_mask = c_frame_pointer();
748 #ifdef _LP64
749 // pointers are twice as big
750 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
751 #endif
753 // Start at OptoReg::stack0()
754 STACK_ONLY_mask.Clear();
755 OptoReg::Name init = OptoReg::stack2reg(0);
756 // STACK_ONLY_mask is all stack bits
757 OptoReg::Name i;
758 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
759 STACK_ONLY_mask.Insert(i);
760 // Also set the "infinite stack" bit.
761 STACK_ONLY_mask.set_AllStack();
763 // Copy the register names over into the shared world
764 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
765 // SharedInfo::regName[i] = regName[i];
766 // Handy RegMasks per machine register
767 mreg2regmask[i].Insert(i);
768 }
770 // Grab the Frame Pointer
771 Node *fp = ret->in(TypeFunc::FramePtr);
772 Node *mem = ret->in(TypeFunc::Memory);
773 const TypePtr* atp = TypePtr::BOTTOM;
774 // Share frame pointer while making spill ops
775 set_shared(fp);
777 // Compute generic short-offset Loads
778 #ifdef _LP64
779 MachNode *spillCP = match_tree(new (C, 3) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
780 #endif
781 MachNode *spillI = match_tree(new (C, 3) LoadINode(NULL,mem,fp,atp));
782 MachNode *spillL = match_tree(new (C, 3) LoadLNode(NULL,mem,fp,atp));
783 MachNode *spillF = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp));
784 MachNode *spillD = match_tree(new (C, 3) LoadDNode(NULL,mem,fp,atp));
785 MachNode *spillP = match_tree(new (C, 3) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
786 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
787 spillD != NULL && spillP != NULL, "");
789 // Get the ADLC notion of the right regmask, for each basic type.
790 #ifdef _LP64
791 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
792 #endif
793 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
794 idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
795 idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
796 idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
797 idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
798 }
800 #ifdef ASSERT
801 static void match_alias_type(Compile* C, Node* n, Node* m) {
802 if (!VerifyAliases) return; // do not go looking for trouble by default
803 const TypePtr* nat = n->adr_type();
804 const TypePtr* mat = m->adr_type();
805 int nidx = C->get_alias_index(nat);
806 int midx = C->get_alias_index(mat);
807 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
808 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
809 for (uint i = 1; i < n->req(); i++) {
810 Node* n1 = n->in(i);
811 const TypePtr* n1at = n1->adr_type();
812 if (n1at != NULL) {
813 nat = n1at;
814 nidx = C->get_alias_index(n1at);
815 }
816 }
817 }
818 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
819 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
820 switch (n->Opcode()) {
821 case Op_PrefetchRead:
822 case Op_PrefetchWrite:
823 nidx = Compile::AliasIdxRaw;
824 nat = TypeRawPtr::BOTTOM;
825 break;
826 }
827 }
828 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
829 switch (n->Opcode()) {
830 case Op_ClearArray:
831 midx = Compile::AliasIdxRaw;
832 mat = TypeRawPtr::BOTTOM;
833 break;
834 }
835 }
836 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
837 switch (n->Opcode()) {
838 case Op_Return:
839 case Op_Rethrow:
840 case Op_Halt:
841 case Op_TailCall:
842 case Op_TailJump:
843 nidx = Compile::AliasIdxBot;
844 nat = TypePtr::BOTTOM;
845 break;
846 }
847 }
848 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
849 switch (n->Opcode()) {
850 case Op_StrComp:
851 case Op_StrEquals:
852 case Op_StrIndexOf:
853 case Op_AryEq:
854 case Op_MemBarVolatile:
855 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
856 nidx = Compile::AliasIdxTop;
857 nat = NULL;
858 break;
859 }
860 }
861 if (nidx != midx) {
862 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
863 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
864 n->dump();
865 m->dump();
866 }
867 assert(C->subsume_loads() && C->must_alias(nat, midx),
868 "must not lose alias info when matching");
869 }
870 }
871 #endif
874 //------------------------------MStack-----------------------------------------
875 // State and MStack class used in xform() and find_shared() iterative methods.
876 enum Node_State { Pre_Visit, // node has to be pre-visited
877 Visit, // visit node
878 Post_Visit, // post-visit node
879 Alt_Post_Visit // alternative post-visit path
880 };
882 class MStack: public Node_Stack {
883 public:
884 MStack(int size) : Node_Stack(size) { }
886 void push(Node *n, Node_State ns) {
887 Node_Stack::push(n, (uint)ns);
888 }
889 void push(Node *n, Node_State ns, Node *parent, int indx) {
890 ++_inode_top;
891 if ((_inode_top + 1) >= _inode_max) grow();
892 _inode_top->node = parent;
893 _inode_top->indx = (uint)indx;
894 ++_inode_top;
895 _inode_top->node = n;
896 _inode_top->indx = (uint)ns;
897 }
898 Node *parent() {
899 pop();
900 return node();
901 }
902 Node_State state() const {
903 return (Node_State)index();
904 }
905 void set_state(Node_State ns) {
906 set_index((uint)ns);
907 }
908 };
911 //------------------------------xform------------------------------------------
912 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
913 // Node in new-space. Given a new-space Node, recursively walk his children.
914 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
915 Node *Matcher::xform( Node *n, int max_stack ) {
916 // Use one stack to keep both: child's node/state and parent's node/index
917 MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
918 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
920 while (mstack.is_nonempty()) {
921 n = mstack.node(); // Leave node on stack
922 Node_State nstate = mstack.state();
923 if (nstate == Visit) {
924 mstack.set_state(Post_Visit);
925 Node *oldn = n;
926 // Old-space or new-space check
927 if (!C->node_arena()->contains(n)) {
928 // Old space!
929 Node* m;
930 if (has_new_node(n)) { // Not yet Label/Reduced
931 m = new_node(n);
932 } else {
933 if (!is_dontcare(n)) { // Matcher can match this guy
934 // Calls match special. They match alone with no children.
935 // Their children, the incoming arguments, match normally.
936 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
937 if (C->failing()) return NULL;
938 if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
939 } else { // Nothing the matcher cares about
940 if( n->is_Proj() && n->in(0)->is_Multi()) { // Projections?
941 // Convert to machine-dependent projection
942 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
943 #ifdef ASSERT
944 _new2old_map.map(m->_idx, n);
945 #endif
946 if (m->in(0) != NULL) // m might be top
947 collect_null_checks(m, n);
948 } else { // Else just a regular 'ol guy
949 m = n->clone(); // So just clone into new-space
950 #ifdef ASSERT
951 _new2old_map.map(m->_idx, n);
952 #endif
953 // Def-Use edges will be added incrementally as Uses
954 // of this node are matched.
955 assert(m->outcnt() == 0, "no Uses of this clone yet");
956 }
957 }
959 set_new_node(n, m); // Map old to new
960 if (_old_node_note_array != NULL) {
961 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
962 n->_idx);
963 C->set_node_notes_at(m->_idx, nn);
964 }
965 debug_only(match_alias_type(C, n, m));
966 }
967 n = m; // n is now a new-space node
968 mstack.set_node(n);
969 }
971 // New space!
972 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
974 int i;
975 // Put precedence edges on stack first (match them last).
976 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
977 Node *m = oldn->in(i);
978 if (m == NULL) break;
979 // set -1 to call add_prec() instead of set_req() during Step1
980 mstack.push(m, Visit, n, -1);
981 }
983 // For constant debug info, I'd rather have unmatched constants.
984 int cnt = n->req();
985 JVMState* jvms = n->jvms();
986 int debug_cnt = jvms ? jvms->debug_start() : cnt;
988 // Now do only debug info. Clone constants rather than matching.
989 // Constants are represented directly in the debug info without
990 // the need for executable machine instructions.
991 // Monitor boxes are also represented directly.
992 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
993 Node *m = n->in(i); // Get input
994 int op = m->Opcode();
995 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
996 if( op == Op_ConI || op == Op_ConP || op == Op_ConN ||
997 op == Op_ConF || op == Op_ConD || op == Op_ConL
998 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
999 ) {
1000 m = m->clone();
1001 #ifdef ASSERT
1002 _new2old_map.map(m->_idx, n);
1003 #endif
1004 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1005 mstack.push(m->in(0), Visit, m, 0);
1006 } else {
1007 mstack.push(m, Visit, n, i);
1008 }
1009 }
1011 // And now walk his children, and convert his inputs to new-space.
1012 for( ; i >= 0; --i ) { // For all normal inputs do
1013 Node *m = n->in(i); // Get input
1014 if(m != NULL)
1015 mstack.push(m, Visit, n, i);
1016 }
1018 }
1019 else if (nstate == Post_Visit) {
1020 // Set xformed input
1021 Node *p = mstack.parent();
1022 if (p != NULL) { // root doesn't have parent
1023 int i = (int)mstack.index();
1024 if (i >= 0)
1025 p->set_req(i, n); // required input
1026 else if (i == -1)
1027 p->add_prec(n); // precedence input
1028 else
1029 ShouldNotReachHere();
1030 }
1031 mstack.pop(); // remove processed node from stack
1032 }
1033 else {
1034 ShouldNotReachHere();
1035 }
1036 } // while (mstack.is_nonempty())
1037 return n; // Return new-space Node
1038 }
1040 //------------------------------warp_outgoing_stk_arg------------------------
1041 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1042 // Convert outgoing argument location to a pre-biased stack offset
1043 if (reg->is_stack()) {
1044 OptoReg::Name warped = reg->reg2stack();
1045 // Adjust the stack slot offset to be the register number used
1046 // by the allocator.
1047 warped = OptoReg::add(begin_out_arg_area, warped);
1048 // Keep track of the largest numbered stack slot used for an arg.
1049 // Largest used slot per call-site indicates the amount of stack
1050 // that is killed by the call.
1051 if( warped >= out_arg_limit_per_call )
1052 out_arg_limit_per_call = OptoReg::add(warped,1);
1053 if (!RegMask::can_represent(warped)) {
1054 C->record_method_not_compilable_all_tiers("unsupported calling sequence");
1055 return OptoReg::Bad;
1056 }
1057 return warped;
1058 }
1059 return OptoReg::as_OptoReg(reg);
1060 }
1063 //------------------------------match_sfpt-------------------------------------
1064 // Helper function to match call instructions. Calls match special.
1065 // They match alone with no children. Their children, the incoming
1066 // arguments, match normally.
1067 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1068 MachSafePointNode *msfpt = NULL;
1069 MachCallNode *mcall = NULL;
1070 uint cnt;
1071 // Split out case for SafePoint vs Call
1072 CallNode *call;
1073 const TypeTuple *domain;
1074 ciMethod* method = NULL;
1075 bool is_method_handle_invoke = false; // for special kill effects
1076 if( sfpt->is_Call() ) {
1077 call = sfpt->as_Call();
1078 domain = call->tf()->domain();
1079 cnt = domain->cnt();
1081 // Match just the call, nothing else
1082 MachNode *m = match_tree(call);
1083 if (C->failing()) return NULL;
1084 if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1086 // Copy data from the Ideal SafePoint to the machine version
1087 mcall = m->as_MachCall();
1089 mcall->set_tf( call->tf());
1090 mcall->set_entry_point(call->entry_point());
1091 mcall->set_cnt( call->cnt());
1093 if( mcall->is_MachCallJava() ) {
1094 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1095 const CallJavaNode *call_java = call->as_CallJava();
1096 method = call_java->method();
1097 mcall_java->_method = method;
1098 mcall_java->_bci = call_java->_bci;
1099 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1100 is_method_handle_invoke = call_java->is_method_handle_invoke();
1101 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1102 if( mcall_java->is_MachCallStaticJava() )
1103 mcall_java->as_MachCallStaticJava()->_name =
1104 call_java->as_CallStaticJava()->_name;
1105 if( mcall_java->is_MachCallDynamicJava() )
1106 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1107 call_java->as_CallDynamicJava()->_vtable_index;
1108 }
1109 else if( mcall->is_MachCallRuntime() ) {
1110 mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1111 }
1112 msfpt = mcall;
1113 }
1114 // This is a non-call safepoint
1115 else {
1116 call = NULL;
1117 domain = NULL;
1118 MachNode *mn = match_tree(sfpt);
1119 if (C->failing()) return NULL;
1120 msfpt = mn->as_MachSafePoint();
1121 cnt = TypeFunc::Parms;
1122 }
1124 // Advertise the correct memory effects (for anti-dependence computation).
1125 msfpt->set_adr_type(sfpt->adr_type());
1127 // Allocate a private array of RegMasks. These RegMasks are not shared.
1128 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1129 // Empty them all.
1130 memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1132 // Do all the pre-defined non-Empty register masks
1133 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1134 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1136 // Place first outgoing argument can possibly be put.
1137 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1138 assert( is_even(begin_out_arg_area), "" );
1139 // Compute max outgoing register number per call site.
1140 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1141 // Calls to C may hammer extra stack slots above and beyond any arguments.
1142 // These are usually backing store for register arguments for varargs.
1143 if( call != NULL && call->is_CallRuntime() )
1144 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1147 // Do the normal argument list (parameters) register masks
1148 int argcnt = cnt - TypeFunc::Parms;
1149 if( argcnt > 0 ) { // Skip it all if we have no args
1150 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1151 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1152 int i;
1153 for( i = 0; i < argcnt; i++ ) {
1154 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1155 }
1156 // V-call to pick proper calling convention
1157 call->calling_convention( sig_bt, parm_regs, argcnt );
1159 #ifdef ASSERT
1160 // Sanity check users' calling convention. Really handy during
1161 // the initial porting effort. Fairly expensive otherwise.
1162 { for (int i = 0; i<argcnt; i++) {
1163 if( !parm_regs[i].first()->is_valid() &&
1164 !parm_regs[i].second()->is_valid() ) continue;
1165 VMReg reg1 = parm_regs[i].first();
1166 VMReg reg2 = parm_regs[i].second();
1167 for (int j = 0; j < i; j++) {
1168 if( !parm_regs[j].first()->is_valid() &&
1169 !parm_regs[j].second()->is_valid() ) continue;
1170 VMReg reg3 = parm_regs[j].first();
1171 VMReg reg4 = parm_regs[j].second();
1172 if( !reg1->is_valid() ) {
1173 assert( !reg2->is_valid(), "valid halvsies" );
1174 } else if( !reg3->is_valid() ) {
1175 assert( !reg4->is_valid(), "valid halvsies" );
1176 } else {
1177 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1178 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1179 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1180 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1181 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1182 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1183 }
1184 }
1185 }
1186 }
1187 #endif
1189 // Visit each argument. Compute its outgoing register mask.
1190 // Return results now can have 2 bits returned.
1191 // Compute max over all outgoing arguments both per call-site
1192 // and over the entire method.
1193 for( i = 0; i < argcnt; i++ ) {
1194 // Address of incoming argument mask to fill in
1195 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1196 if( !parm_regs[i].first()->is_valid() &&
1197 !parm_regs[i].second()->is_valid() ) {
1198 continue; // Avoid Halves
1199 }
1200 // Grab first register, adjust stack slots and insert in mask.
1201 OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1202 if (OptoReg::is_valid(reg1))
1203 rm->Insert( reg1 );
1204 // Grab second register (if any), adjust stack slots and insert in mask.
1205 OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1206 if (OptoReg::is_valid(reg2))
1207 rm->Insert( reg2 );
1208 } // End of for all arguments
1210 // Compute number of stack slots needed to restore stack in case of
1211 // Pascal-style argument popping.
1212 mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1213 }
1215 if (is_method_handle_invoke) {
1216 // Kill some extra stack space in case method handles want to do
1217 // a little in-place argument insertion.
1218 int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
1219 out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word;
1220 // Do not update mcall->_argsize because (a) the extra space is not
1221 // pushed as arguments and (b) _argsize is dead (not used anywhere).
1222 }
1224 // Compute the max stack slot killed by any call. These will not be
1225 // available for debug info, and will be used to adjust FIRST_STACK_mask
1226 // after all call sites have been visited.
1227 if( _out_arg_limit < out_arg_limit_per_call)
1228 _out_arg_limit = out_arg_limit_per_call;
1230 if (mcall) {
1231 // Kill the outgoing argument area, including any non-argument holes and
1232 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1233 // Since the max-per-method covers the max-per-call-site and debug info
1234 // is excluded on the max-per-method basis, debug info cannot land in
1235 // this killed area.
1236 uint r_cnt = mcall->tf()->range()->cnt();
1237 MachProjNode *proj = new (C, 1) MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1238 if (!RegMask::can_represent(OptoReg::Name(out_arg_limit_per_call-1))) {
1239 C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
1240 } else {
1241 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1242 proj->_rout.Insert(OptoReg::Name(i));
1243 }
1244 if( proj->_rout.is_NotEmpty() )
1245 _proj_list.push(proj);
1246 }
1247 // Transfer the safepoint information from the call to the mcall
1248 // Move the JVMState list
1249 msfpt->set_jvms(sfpt->jvms());
1250 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1251 jvms->set_map(sfpt);
1252 }
1254 // Debug inputs begin just after the last incoming parameter
1255 assert( (mcall == NULL) || (mcall->jvms() == NULL) ||
1256 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" );
1258 // Move the OopMap
1259 msfpt->_oop_map = sfpt->_oop_map;
1261 // Registers killed by the call are set in the local scheduling pass
1262 // of Global Code Motion.
1263 return msfpt;
1264 }
1266 //---------------------------match_tree----------------------------------------
1267 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1268 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1269 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1270 // a Load's result RegMask for memoization in idealreg2regmask[]
1271 MachNode *Matcher::match_tree( const Node *n ) {
1272 assert( n->Opcode() != Op_Phi, "cannot match" );
1273 assert( !n->is_block_start(), "cannot match" );
1274 // Set the mark for all locally allocated State objects.
1275 // When this call returns, the _states_arena arena will be reset
1276 // freeing all State objects.
1277 ResourceMark rm( &_states_arena );
1279 LabelRootDepth = 0;
1281 // StoreNodes require their Memory input to match any LoadNodes
1282 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1283 #ifdef ASSERT
1284 Node* save_mem_node = _mem_node;
1285 _mem_node = n->is_Store() ? (Node*)n : NULL;
1286 #endif
1287 // State object for root node of match tree
1288 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1289 State *s = new (&_states_arena) State;
1290 s->_kids[0] = NULL;
1291 s->_kids[1] = NULL;
1292 s->_leaf = (Node*)n;
1293 // Label the input tree, allocating labels from top-level arena
1294 Label_Root( n, s, n->in(0), mem );
1295 if (C->failing()) return NULL;
1297 // The minimum cost match for the whole tree is found at the root State
1298 uint mincost = max_juint;
1299 uint cost = max_juint;
1300 uint i;
1301 for( i = 0; i < NUM_OPERANDS; i++ ) {
1302 if( s->valid(i) && // valid entry and
1303 s->_cost[i] < cost && // low cost and
1304 s->_rule[i] >= NUM_OPERANDS ) // not an operand
1305 cost = s->_cost[mincost=i];
1306 }
1307 if (mincost == max_juint) {
1308 #ifndef PRODUCT
1309 tty->print("No matching rule for:");
1310 s->dump();
1311 #endif
1312 Matcher::soft_match_failure();
1313 return NULL;
1314 }
1315 // Reduce input tree based upon the state labels to machine Nodes
1316 MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1317 #ifdef ASSERT
1318 _old2new_map.map(n->_idx, m);
1319 _new2old_map.map(m->_idx, (Node*)n);
1320 #endif
1322 // Add any Matcher-ignored edges
1323 uint cnt = n->req();
1324 uint start = 1;
1325 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1326 if( n->is_AddP() ) {
1327 assert( mem == (Node*)1, "" );
1328 start = AddPNode::Base+1;
1329 }
1330 for( i = start; i < cnt; i++ ) {
1331 if( !n->match_edge(i) ) {
1332 if( i < m->req() )
1333 m->ins_req( i, n->in(i) );
1334 else
1335 m->add_req( n->in(i) );
1336 }
1337 }
1339 debug_only( _mem_node = save_mem_node; )
1340 return m;
1341 }
1344 //------------------------------match_into_reg---------------------------------
1345 // Choose to either match this Node in a register or part of the current
1346 // match tree. Return true for requiring a register and false for matching
1347 // as part of the current match tree.
1348 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1350 const Type *t = m->bottom_type();
1352 if( t->singleton() ) {
1353 // Never force constants into registers. Allow them to match as
1354 // constants or registers. Copies of the same value will share
1355 // the same register. See find_shared_node.
1356 return false;
1357 } else { // Not a constant
1358 // Stop recursion if they have different Controls.
1359 // Slot 0 of constants is not really a Control.
1360 if( control && m->in(0) && control != m->in(0) ) {
1362 // Actually, we can live with the most conservative control we
1363 // find, if it post-dominates the others. This allows us to
1364 // pick up load/op/store trees where the load can float a little
1365 // above the store.
1366 Node *x = control;
1367 const uint max_scan = 6; // Arbitrary scan cutoff
1368 uint j;
1369 for( j=0; j<max_scan; j++ ) {
1370 if( x->is_Region() ) // Bail out at merge points
1371 return true;
1372 x = x->in(0);
1373 if( x == m->in(0) ) // Does 'control' post-dominate
1374 break; // m->in(0)? If so, we can use it
1375 }
1376 if( j == max_scan ) // No post-domination before scan end?
1377 return true; // Then break the match tree up
1378 }
1379 if (m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) {
1380 // These are commonly used in address expressions and can
1381 // efficiently fold into them on X64 in some cases.
1382 return false;
1383 }
1384 }
1386 // Not forceable cloning. If shared, put it into a register.
1387 return shared;
1388 }
1391 //------------------------------Instruction Selection--------------------------
1392 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1393 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1394 // things the Matcher does not match (e.g., Memory), and things with different
1395 // Controls (hence forced into different blocks). We pass in the Control
1396 // selected for this entire State tree.
1398 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1399 // Store and the Load must have identical Memories (as well as identical
1400 // pointers). Since the Matcher does not have anything for Memory (and
1401 // does not handle DAGs), I have to match the Memory input myself. If the
1402 // Tree root is a Store, I require all Loads to have the identical memory.
1403 Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1404 // Since Label_Root is a recursive function, its possible that we might run
1405 // out of stack space. See bugs 6272980 & 6227033 for more info.
1406 LabelRootDepth++;
1407 if (LabelRootDepth > MaxLabelRootDepth) {
1408 C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
1409 return NULL;
1410 }
1411 uint care = 0; // Edges matcher cares about
1412 uint cnt = n->req();
1413 uint i = 0;
1415 // Examine children for memory state
1416 // Can only subsume a child into your match-tree if that child's memory state
1417 // is not modified along the path to another input.
1418 // It is unsafe even if the other inputs are separate roots.
1419 Node *input_mem = NULL;
1420 for( i = 1; i < cnt; i++ ) {
1421 if( !n->match_edge(i) ) continue;
1422 Node *m = n->in(i); // Get ith input
1423 assert( m, "expect non-null children" );
1424 if( m->is_Load() ) {
1425 if( input_mem == NULL ) {
1426 input_mem = m->in(MemNode::Memory);
1427 } else if( input_mem != m->in(MemNode::Memory) ) {
1428 input_mem = NodeSentinel;
1429 }
1430 }
1431 }
1433 for( i = 1; i < cnt; i++ ){// For my children
1434 if( !n->match_edge(i) ) continue;
1435 Node *m = n->in(i); // Get ith input
1436 // Allocate states out of a private arena
1437 State *s = new (&_states_arena) State;
1438 svec->_kids[care++] = s;
1439 assert( care <= 2, "binary only for now" );
1441 // Recursively label the State tree.
1442 s->_kids[0] = NULL;
1443 s->_kids[1] = NULL;
1444 s->_leaf = m;
1446 // Check for leaves of the State Tree; things that cannot be a part of
1447 // the current tree. If it finds any, that value is matched as a
1448 // register operand. If not, then the normal matching is used.
1449 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1450 //
1451 // Stop recursion if this is LoadNode and the root of this tree is a
1452 // StoreNode and the load & store have different memories.
1453 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1454 // Can NOT include the match of a subtree when its memory state
1455 // is used by any of the other subtrees
1456 (input_mem == NodeSentinel) ) {
1457 #ifndef PRODUCT
1458 // Print when we exclude matching due to different memory states at input-loads
1459 if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1460 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
1461 tty->print_cr("invalid input_mem");
1462 }
1463 #endif
1464 // Switch to a register-only opcode; this value must be in a register
1465 // and cannot be subsumed as part of a larger instruction.
1466 s->DFA( m->ideal_reg(), m );
1468 } else {
1469 // If match tree has no control and we do, adopt it for entire tree
1470 if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1471 control = m->in(0); // Pick up control
1472 // Else match as a normal part of the match tree.
1473 control = Label_Root(m,s,control,mem);
1474 if (C->failing()) return NULL;
1475 }
1476 }
1479 // Call DFA to match this node, and return
1480 svec->DFA( n->Opcode(), n );
1482 #ifdef ASSERT
1483 uint x;
1484 for( x = 0; x < _LAST_MACH_OPER; x++ )
1485 if( svec->valid(x) )
1486 break;
1488 if (x >= _LAST_MACH_OPER) {
1489 n->dump();
1490 svec->dump();
1491 assert( false, "bad AD file" );
1492 }
1493 #endif
1494 return control;
1495 }
1498 // Con nodes reduced using the same rule can share their MachNode
1499 // which reduces the number of copies of a constant in the final
1500 // program. The register allocator is free to split uses later to
1501 // split live ranges.
1502 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1503 if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL;
1505 // See if this Con has already been reduced using this rule.
1506 if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1507 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1508 if (last != NULL && rule == last->rule()) {
1509 // Don't expect control change for DecodeN
1510 if (leaf->is_DecodeN())
1511 return last;
1512 // Get the new space root.
1513 Node* xroot = new_node(C->root());
1514 if (xroot == NULL) {
1515 // This shouldn't happen give the order of matching.
1516 return NULL;
1517 }
1519 // Shared constants need to have their control be root so they
1520 // can be scheduled properly.
1521 Node* control = last->in(0);
1522 if (control != xroot) {
1523 if (control == NULL || control == C->root()) {
1524 last->set_req(0, xroot);
1525 } else {
1526 assert(false, "unexpected control");
1527 return NULL;
1528 }
1529 }
1530 return last;
1531 }
1532 return NULL;
1533 }
1536 //------------------------------ReduceInst-------------------------------------
1537 // Reduce a State tree (with given Control) into a tree of MachNodes.
1538 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1539 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1540 // Each MachNode has a number of complicated MachOper operands; each
1541 // MachOper also covers a further tree of Ideal Nodes.
1543 // The root of the Ideal match tree is always an instruction, so we enter
1544 // the recursion here. After building the MachNode, we need to recurse
1545 // the tree checking for these cases:
1546 // (1) Child is an instruction -
1547 // Build the instruction (recursively), add it as an edge.
1548 // Build a simple operand (register) to hold the result of the instruction.
1549 // (2) Child is an interior part of an instruction -
1550 // Skip over it (do nothing)
1551 // (3) Child is the start of a operand -
1552 // Build the operand, place it inside the instruction
1553 // Call ReduceOper.
1554 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1555 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1557 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1558 if (shared_node != NULL) {
1559 return shared_node;
1560 }
1562 // Build the object to represent this state & prepare for recursive calls
1563 MachNode *mach = s->MachNodeGenerator( rule, C );
1564 mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
1565 assert( mach->_opnds[0] != NULL, "Missing result operand" );
1566 Node *leaf = s->_leaf;
1567 // Check for instruction or instruction chain rule
1568 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1569 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1570 "duplicating node that's already been matched");
1571 // Instruction
1572 mach->add_req( leaf->in(0) ); // Set initial control
1573 // Reduce interior of complex instruction
1574 ReduceInst_Interior( s, rule, mem, mach, 1 );
1575 } else {
1576 // Instruction chain rules are data-dependent on their inputs
1577 mach->add_req(0); // Set initial control to none
1578 ReduceInst_Chain_Rule( s, rule, mem, mach );
1579 }
1581 // If a Memory was used, insert a Memory edge
1582 if( mem != (Node*)1 ) {
1583 mach->ins_req(MemNode::Memory,mem);
1584 #ifdef ASSERT
1585 // Verify adr type after matching memory operation
1586 const MachOper* oper = mach->memory_operand();
1587 if (oper != NULL && oper != (MachOper*)-1) {
1588 // It has a unique memory operand. Find corresponding ideal mem node.
1589 Node* m = NULL;
1590 if (leaf->is_Mem()) {
1591 m = leaf;
1592 } else {
1593 m = _mem_node;
1594 assert(m != NULL && m->is_Mem(), "expecting memory node");
1595 }
1596 const Type* mach_at = mach->adr_type();
1597 // DecodeN node consumed by an address may have different type
1598 // then its input. Don't compare types for such case.
1599 if (m->adr_type() != mach_at &&
1600 (m->in(MemNode::Address)->is_DecodeN() ||
1601 m->in(MemNode::Address)->is_AddP() &&
1602 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN() ||
1603 m->in(MemNode::Address)->is_AddP() &&
1604 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1605 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeN())) {
1606 mach_at = m->adr_type();
1607 }
1608 if (m->adr_type() != mach_at) {
1609 m->dump();
1610 tty->print_cr("mach:");
1611 mach->dump(1);
1612 }
1613 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1614 }
1615 #endif
1616 }
1618 // If the _leaf is an AddP, insert the base edge
1619 if( leaf->is_AddP() )
1620 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1622 uint num_proj = _proj_list.size();
1624 // Perform any 1-to-many expansions required
1625 MachNode *ex = mach->Expand(s,_proj_list, mem);
1626 if( ex != mach ) {
1627 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1628 if( ex->in(1)->is_Con() )
1629 ex->in(1)->set_req(0, C->root());
1630 // Remove old node from the graph
1631 for( uint i=0; i<mach->req(); i++ ) {
1632 mach->set_req(i,NULL);
1633 }
1634 #ifdef ASSERT
1635 _new2old_map.map(ex->_idx, s->_leaf);
1636 #endif
1637 }
1639 // PhaseChaitin::fixup_spills will sometimes generate spill code
1640 // via the matcher. By the time, nodes have been wired into the CFG,
1641 // and any further nodes generated by expand rules will be left hanging
1642 // in space, and will not get emitted as output code. Catch this.
1643 // Also, catch any new register allocation constraints ("projections")
1644 // generated belatedly during spill code generation.
1645 if (_allocation_started) {
1646 guarantee(ex == mach, "no expand rules during spill generation");
1647 guarantee(_proj_list.size() == num_proj, "no allocation during spill generation");
1648 }
1650 if (leaf->is_Con() || leaf->is_DecodeN()) {
1651 // Record the con for sharing
1652 _shared_nodes.map(leaf->_idx, ex);
1653 }
1655 return ex;
1656 }
1658 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1659 // 'op' is what I am expecting to receive
1660 int op = _leftOp[rule];
1661 // Operand type to catch childs result
1662 // This is what my child will give me.
1663 int opnd_class_instance = s->_rule[op];
1664 // Choose between operand class or not.
1665 // This is what I will receive.
1666 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1667 // New rule for child. Chase operand classes to get the actual rule.
1668 int newrule = s->_rule[catch_op];
1670 if( newrule < NUM_OPERANDS ) {
1671 // Chain from operand or operand class, may be output of shared node
1672 assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1673 "Bad AD file: Instruction chain rule must chain from operand");
1674 // Insert operand into array of operands for this instruction
1675 mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1677 ReduceOper( s, newrule, mem, mach );
1678 } else {
1679 // Chain from the result of an instruction
1680 assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1681 mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1682 Node *mem1 = (Node*)1;
1683 debug_only(Node *save_mem_node = _mem_node;)
1684 mach->add_req( ReduceInst(s, newrule, mem1) );
1685 debug_only(_mem_node = save_mem_node;)
1686 }
1687 return;
1688 }
1691 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1692 if( s->_leaf->is_Load() ) {
1693 Node *mem2 = s->_leaf->in(MemNode::Memory);
1694 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1695 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1696 mem = mem2;
1697 }
1698 if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1699 if( mach->in(0) == NULL )
1700 mach->set_req(0, s->_leaf->in(0));
1701 }
1703 // Now recursively walk the state tree & add operand list.
1704 for( uint i=0; i<2; i++ ) { // binary tree
1705 State *newstate = s->_kids[i];
1706 if( newstate == NULL ) break; // Might only have 1 child
1707 // 'op' is what I am expecting to receive
1708 int op;
1709 if( i == 0 ) {
1710 op = _leftOp[rule];
1711 } else {
1712 op = _rightOp[rule];
1713 }
1714 // Operand type to catch childs result
1715 // This is what my child will give me.
1716 int opnd_class_instance = newstate->_rule[op];
1717 // Choose between operand class or not.
1718 // This is what I will receive.
1719 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1720 // New rule for child. Chase operand classes to get the actual rule.
1721 int newrule = newstate->_rule[catch_op];
1723 if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1724 // Operand/operandClass
1725 // Insert operand into array of operands for this instruction
1726 mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
1727 ReduceOper( newstate, newrule, mem, mach );
1729 } else { // Child is internal operand or new instruction
1730 if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1731 // internal operand --> call ReduceInst_Interior
1732 // Interior of complex instruction. Do nothing but recurse.
1733 num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1734 } else {
1735 // instruction --> call build operand( ) to catch result
1736 // --> ReduceInst( newrule )
1737 mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
1738 Node *mem1 = (Node*)1;
1739 debug_only(Node *save_mem_node = _mem_node;)
1740 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1741 debug_only(_mem_node = save_mem_node;)
1742 }
1743 }
1744 assert( mach->_opnds[num_opnds-1], "" );
1745 }
1746 return num_opnds;
1747 }
1749 // This routine walks the interior of possible complex operands.
1750 // At each point we check our children in the match tree:
1751 // (1) No children -
1752 // We are a leaf; add _leaf field as an input to the MachNode
1753 // (2) Child is an internal operand -
1754 // Skip over it ( do nothing )
1755 // (3) Child is an instruction -
1756 // Call ReduceInst recursively and
1757 // and instruction as an input to the MachNode
1758 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1759 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1760 State *kid = s->_kids[0];
1761 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1763 // Leaf? And not subsumed?
1764 if( kid == NULL && !_swallowed[rule] ) {
1765 mach->add_req( s->_leaf ); // Add leaf pointer
1766 return; // Bail out
1767 }
1769 if( s->_leaf->is_Load() ) {
1770 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1771 mem = s->_leaf->in(MemNode::Memory);
1772 debug_only(_mem_node = s->_leaf;)
1773 }
1774 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1775 if( !mach->in(0) )
1776 mach->set_req(0,s->_leaf->in(0));
1777 else {
1778 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1779 }
1780 }
1782 for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree
1783 int newrule;
1784 if( i == 0 )
1785 newrule = kid->_rule[_leftOp[rule]];
1786 else
1787 newrule = kid->_rule[_rightOp[rule]];
1789 if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1790 // Internal operand; recurse but do nothing else
1791 ReduceOper( kid, newrule, mem, mach );
1793 } else { // Child is a new instruction
1794 // Reduce the instruction, and add a direct pointer from this
1795 // machine instruction to the newly reduced one.
1796 Node *mem1 = (Node*)1;
1797 debug_only(Node *save_mem_node = _mem_node;)
1798 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1799 debug_only(_mem_node = save_mem_node;)
1800 }
1801 }
1802 }
1805 // -------------------------------------------------------------------------
1806 // Java-Java calling convention
1807 // (what you use when Java calls Java)
1809 //------------------------------find_receiver----------------------------------
1810 // For a given signature, return the OptoReg for parameter 0.
1811 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1812 VMRegPair regs;
1813 BasicType sig_bt = T_OBJECT;
1814 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1815 // Return argument 0 register. In the LP64 build pointers
1816 // take 2 registers, but the VM wants only the 'main' name.
1817 return OptoReg::as_OptoReg(regs.first());
1818 }
1820 // A method-klass-holder may be passed in the inline_cache_reg
1821 // and then expanded into the inline_cache_reg and a method_oop register
1822 // defined in ad_<arch>.cpp
1825 //------------------------------find_shared------------------------------------
1826 // Set bits if Node is shared or otherwise a root
1827 void Matcher::find_shared( Node *n ) {
1828 // Allocate stack of size C->unique() * 2 to avoid frequent realloc
1829 MStack mstack(C->unique() * 2);
1830 // Mark nodes as address_visited if they are inputs to an address expression
1831 VectorSet address_visited(Thread::current()->resource_area());
1832 mstack.push(n, Visit); // Don't need to pre-visit root node
1833 while (mstack.is_nonempty()) {
1834 n = mstack.node(); // Leave node on stack
1835 Node_State nstate = mstack.state();
1836 uint nop = n->Opcode();
1837 if (nstate == Pre_Visit) {
1838 if (address_visited.test(n->_idx)) { // Visited in address already?
1839 // Flag as visited and shared now.
1840 set_visited(n);
1841 }
1842 if (is_visited(n)) { // Visited already?
1843 // Node is shared and has no reason to clone. Flag it as shared.
1844 // This causes it to match into a register for the sharing.
1845 set_shared(n); // Flag as shared and
1846 mstack.pop(); // remove node from stack
1847 continue;
1848 }
1849 nstate = Visit; // Not already visited; so visit now
1850 }
1851 if (nstate == Visit) {
1852 mstack.set_state(Post_Visit);
1853 set_visited(n); // Flag as visited now
1854 bool mem_op = false;
1856 switch( nop ) { // Handle some opcodes special
1857 case Op_Phi: // Treat Phis as shared roots
1858 case Op_Parm:
1859 case Op_Proj: // All handled specially during matching
1860 case Op_SafePointScalarObject:
1861 set_shared(n);
1862 set_dontcare(n);
1863 break;
1864 case Op_If:
1865 case Op_CountedLoopEnd:
1866 mstack.set_state(Alt_Post_Visit); // Alternative way
1867 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
1868 // with matching cmp/branch in 1 instruction. The Matcher needs the
1869 // Bool and CmpX side-by-side, because it can only get at constants
1870 // that are at the leaves of Match trees, and the Bool's condition acts
1871 // as a constant here.
1872 mstack.push(n->in(1), Visit); // Clone the Bool
1873 mstack.push(n->in(0), Pre_Visit); // Visit control input
1874 continue; // while (mstack.is_nonempty())
1875 case Op_ConvI2D: // These forms efficiently match with a prior
1876 case Op_ConvI2F: // Load but not a following Store
1877 if( n->in(1)->is_Load() && // Prior load
1878 n->outcnt() == 1 && // Not already shared
1879 n->unique_out()->is_Store() ) // Following store
1880 set_shared(n); // Force it to be a root
1881 break;
1882 case Op_ReverseBytesI:
1883 case Op_ReverseBytesL:
1884 if( n->in(1)->is_Load() && // Prior load
1885 n->outcnt() == 1 ) // Not already shared
1886 set_shared(n); // Force it to be a root
1887 break;
1888 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
1889 case Op_IfFalse:
1890 case Op_IfTrue:
1891 case Op_MachProj:
1892 case Op_MergeMem:
1893 case Op_Catch:
1894 case Op_CatchProj:
1895 case Op_CProj:
1896 case Op_JumpProj:
1897 case Op_JProj:
1898 case Op_NeverBranch:
1899 set_dontcare(n);
1900 break;
1901 case Op_Jump:
1902 mstack.push(n->in(1), Visit); // Switch Value
1903 mstack.push(n->in(0), Pre_Visit); // Visit Control input
1904 continue; // while (mstack.is_nonempty())
1905 case Op_StrComp:
1906 case Op_StrEquals:
1907 case Op_StrIndexOf:
1908 case Op_AryEq:
1909 set_shared(n); // Force result into register (it will be anyways)
1910 break;
1911 case Op_ConP: { // Convert pointers above the centerline to NUL
1912 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
1913 const TypePtr* tp = tn->type()->is_ptr();
1914 if (tp->_ptr == TypePtr::AnyNull) {
1915 tn->set_type(TypePtr::NULL_PTR);
1916 }
1917 break;
1918 }
1919 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
1920 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
1921 const TypePtr* tp = tn->type()->make_ptr();
1922 if (tp && tp->_ptr == TypePtr::AnyNull) {
1923 tn->set_type(TypeNarrowOop::NULL_PTR);
1924 }
1925 break;
1926 }
1927 case Op_Binary: // These are introduced in the Post_Visit state.
1928 ShouldNotReachHere();
1929 break;
1930 case Op_ClearArray:
1931 case Op_SafePoint:
1932 mem_op = true;
1933 break;
1934 default:
1935 if( n->is_Store() ) {
1936 // Do match stores, despite no ideal reg
1937 mem_op = true;
1938 break;
1939 }
1940 if( n->is_Mem() ) { // Loads and LoadStores
1941 mem_op = true;
1942 // Loads must be root of match tree due to prior load conflict
1943 if( C->subsume_loads() == false )
1944 set_shared(n);
1945 }
1946 // Fall into default case
1947 if( !n->ideal_reg() )
1948 set_dontcare(n); // Unmatchable Nodes
1949 } // end_switch
1951 for(int i = n->req() - 1; i >= 0; --i) { // For my children
1952 Node *m = n->in(i); // Get ith input
1953 if (m == NULL) continue; // Ignore NULLs
1954 uint mop = m->Opcode();
1956 // Must clone all producers of flags, or we will not match correctly.
1957 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
1958 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
1959 // are also there, so we may match a float-branch to int-flags and
1960 // expect the allocator to haul the flags from the int-side to the
1961 // fp-side. No can do.
1962 if( _must_clone[mop] ) {
1963 mstack.push(m, Visit);
1964 continue; // for(int i = ...)
1965 }
1967 if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
1968 // Bases used in addresses must be shared but since
1969 // they are shared through a DecodeN they may appear
1970 // to have a single use so force sharing here.
1971 set_shared(m->in(AddPNode::Base)->in(1));
1972 }
1974 // Clone addressing expressions as they are "free" in memory access instructions
1975 if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
1976 // Some inputs for address expression are not put on stack
1977 // to avoid marking them as shared and forcing them into register
1978 // if they are used only in address expressions.
1979 // But they should be marked as shared if there are other uses
1980 // besides address expressions.
1982 Node *off = m->in(AddPNode::Offset);
1983 if( off->is_Con() &&
1984 // When there are other uses besides address expressions
1985 // put it on stack and mark as shared.
1986 !is_visited(m) ) {
1987 address_visited.test_set(m->_idx); // Flag as address_visited
1988 Node *adr = m->in(AddPNode::Address);
1990 // Intel, ARM and friends can handle 2 adds in addressing mode
1991 if( clone_shift_expressions && adr->is_AddP() &&
1992 // AtomicAdd is not an addressing expression.
1993 // Cheap to find it by looking for screwy base.
1994 !adr->in(AddPNode::Base)->is_top() &&
1995 // Are there other uses besides address expressions?
1996 !is_visited(adr) ) {
1997 address_visited.set(adr->_idx); // Flag as address_visited
1998 Node *shift = adr->in(AddPNode::Offset);
1999 // Check for shift by small constant as well
2000 if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
2001 shift->in(2)->get_int() <= 3 &&
2002 // Are there other uses besides address expressions?
2003 !is_visited(shift) ) {
2004 address_visited.set(shift->_idx); // Flag as address_visited
2005 mstack.push(shift->in(2), Visit);
2006 Node *conv = shift->in(1);
2007 #ifdef _LP64
2008 // Allow Matcher to match the rule which bypass
2009 // ConvI2L operation for an array index on LP64
2010 // if the index value is positive.
2011 if( conv->Opcode() == Op_ConvI2L &&
2012 conv->as_Type()->type()->is_long()->_lo >= 0 &&
2013 // Are there other uses besides address expressions?
2014 !is_visited(conv) ) {
2015 address_visited.set(conv->_idx); // Flag as address_visited
2016 mstack.push(conv->in(1), Pre_Visit);
2017 } else
2018 #endif
2019 mstack.push(conv, Pre_Visit);
2020 } else {
2021 mstack.push(shift, Pre_Visit);
2022 }
2023 mstack.push(adr->in(AddPNode::Address), Pre_Visit);
2024 mstack.push(adr->in(AddPNode::Base), Pre_Visit);
2025 } else { // Sparc, Alpha, PPC and friends
2026 mstack.push(adr, Pre_Visit);
2027 }
2029 // Clone X+offset as it also folds into most addressing expressions
2030 mstack.push(off, Visit);
2031 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2032 continue; // for(int i = ...)
2033 } // if( off->is_Con() )
2034 } // if( mem_op &&
2035 mstack.push(m, Pre_Visit);
2036 } // for(int i = ...)
2037 }
2038 else if (nstate == Alt_Post_Visit) {
2039 mstack.pop(); // Remove node from stack
2040 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2041 // shared and all users of the Bool need to move the Cmp in parallel.
2042 // This leaves both the Bool and the If pointing at the Cmp. To
2043 // prevent the Matcher from trying to Match the Cmp along both paths
2044 // BoolNode::match_edge always returns a zero.
2046 // We reorder the Op_If in a pre-order manner, so we can visit without
2047 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2048 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2049 }
2050 else if (nstate == Post_Visit) {
2051 mstack.pop(); // Remove node from stack
2053 // Now hack a few special opcodes
2054 switch( n->Opcode() ) { // Handle some opcodes special
2055 case Op_StorePConditional:
2056 case Op_StoreIConditional:
2057 case Op_StoreLConditional:
2058 case Op_CompareAndSwapI:
2059 case Op_CompareAndSwapL:
2060 case Op_CompareAndSwapP:
2061 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2062 Node *newval = n->in(MemNode::ValueIn );
2063 Node *oldval = n->in(LoadStoreNode::ExpectedIn);
2064 Node *pair = new (C, 3) BinaryNode( oldval, newval );
2065 n->set_req(MemNode::ValueIn,pair);
2066 n->del_req(LoadStoreNode::ExpectedIn);
2067 break;
2068 }
2069 case Op_CMoveD: // Convert trinary to binary-tree
2070 case Op_CMoveF:
2071 case Op_CMoveI:
2072 case Op_CMoveL:
2073 case Op_CMoveN:
2074 case Op_CMoveP: {
2075 // Restructure into a binary tree for Matching. It's possible that
2076 // we could move this code up next to the graph reshaping for IfNodes
2077 // or vice-versa, but I do not want to debug this for Ladybird.
2078 // 10/2/2000 CNC.
2079 Node *pair1 = new (C, 3) BinaryNode(n->in(1),n->in(1)->in(1));
2080 n->set_req(1,pair1);
2081 Node *pair2 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2082 n->set_req(2,pair2);
2083 n->del_req(3);
2084 break;
2085 }
2086 case Op_StrEquals: {
2087 Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2088 n->set_req(2,pair1);
2089 n->set_req(3,n->in(4));
2090 n->del_req(4);
2091 break;
2092 }
2093 case Op_StrComp:
2094 case Op_StrIndexOf: {
2095 Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2096 n->set_req(2,pair1);
2097 Node *pair2 = new (C, 3) BinaryNode(n->in(4),n->in(5));
2098 n->set_req(3,pair2);
2099 n->del_req(5);
2100 n->del_req(4);
2101 break;
2102 }
2103 default:
2104 break;
2105 }
2106 }
2107 else {
2108 ShouldNotReachHere();
2109 }
2110 } // end of while (mstack.is_nonempty())
2111 }
2113 #ifdef ASSERT
2114 // machine-independent root to machine-dependent root
2115 void Matcher::dump_old2new_map() {
2116 _old2new_map.dump();
2117 }
2118 #endif
2120 //---------------------------collect_null_checks-------------------------------
2121 // Find null checks in the ideal graph; write a machine-specific node for
2122 // it. Used by later implicit-null-check handling. Actually collects
2123 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2124 // value being tested.
2125 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2126 Node *iff = proj->in(0);
2127 if( iff->Opcode() == Op_If ) {
2128 // During matching If's have Bool & Cmp side-by-side
2129 BoolNode *b = iff->in(1)->as_Bool();
2130 Node *cmp = iff->in(2);
2131 int opc = cmp->Opcode();
2132 if (opc != Op_CmpP && opc != Op_CmpN) return;
2134 const Type* ct = cmp->in(2)->bottom_type();
2135 if (ct == TypePtr::NULL_PTR ||
2136 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2138 bool push_it = false;
2139 if( proj->Opcode() == Op_IfTrue ) {
2140 extern int all_null_checks_found;
2141 all_null_checks_found++;
2142 if( b->_test._test == BoolTest::ne ) {
2143 push_it = true;
2144 }
2145 } else {
2146 assert( proj->Opcode() == Op_IfFalse, "" );
2147 if( b->_test._test == BoolTest::eq ) {
2148 push_it = true;
2149 }
2150 }
2151 if( push_it ) {
2152 _null_check_tests.push(proj);
2153 Node* val = cmp->in(1);
2154 #ifdef _LP64
2155 if (val->bottom_type()->isa_narrowoop() &&
2156 !Matcher::narrow_oop_use_complex_address()) {
2157 //
2158 // Look for DecodeN node which should be pinned to orig_proj.
2159 // On platforms (Sparc) which can not handle 2 adds
2160 // in addressing mode we have to keep a DecodeN node and
2161 // use it to do implicit NULL check in address.
2162 //
2163 // DecodeN node was pinned to non-null path (orig_proj) during
2164 // CastPP transformation in final_graph_reshaping_impl().
2165 //
2166 uint cnt = orig_proj->outcnt();
2167 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2168 Node* d = orig_proj->raw_out(i);
2169 if (d->is_DecodeN() && d->in(1) == val) {
2170 val = d;
2171 val->set_req(0, NULL); // Unpin now.
2172 // Mark this as special case to distinguish from
2173 // a regular case: CmpP(DecodeN, NULL).
2174 val = (Node*)(((intptr_t)val) | 1);
2175 break;
2176 }
2177 }
2178 }
2179 #endif
2180 _null_check_tests.push(val);
2181 }
2182 }
2183 }
2184 }
2186 //---------------------------validate_null_checks------------------------------
2187 // Its possible that the value being NULL checked is not the root of a match
2188 // tree. If so, I cannot use the value in an implicit null check.
2189 void Matcher::validate_null_checks( ) {
2190 uint cnt = _null_check_tests.size();
2191 for( uint i=0; i < cnt; i+=2 ) {
2192 Node *test = _null_check_tests[i];
2193 Node *val = _null_check_tests[i+1];
2194 bool is_decoden = ((intptr_t)val) & 1;
2195 val = (Node*)(((intptr_t)val) & ~1);
2196 if (has_new_node(val)) {
2197 Node* new_val = new_node(val);
2198 if (is_decoden) {
2199 assert(val->is_DecodeN() && val->in(0) == NULL, "sanity");
2200 // Note: new_val may have a control edge if
2201 // the original ideal node DecodeN was matched before
2202 // it was unpinned in Matcher::collect_null_checks().
2203 // Unpin the mach node and mark it.
2204 new_val->set_req(0, NULL);
2205 new_val = (Node*)(((intptr_t)new_val) | 1);
2206 }
2207 // Is a match-tree root, so replace with the matched value
2208 _null_check_tests.map(i+1, new_val);
2209 } else {
2210 // Yank from candidate list
2211 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2212 _null_check_tests.map(i,_null_check_tests[--cnt]);
2213 _null_check_tests.pop();
2214 _null_check_tests.pop();
2215 i-=2;
2216 }
2217 }
2218 }
2221 // Used by the DFA in dfa_sparc.cpp. Check for a prior FastLock
2222 // acting as an Acquire and thus we don't need an Acquire here. We
2223 // retain the Node to act as a compiler ordering barrier.
2224 bool Matcher::prior_fast_lock( const Node *acq ) {
2225 Node *r = acq->in(0);
2226 if( !r->is_Region() || r->req() <= 1 ) return false;
2227 Node *proj = r->in(1);
2228 if( !proj->is_Proj() ) return false;
2229 Node *call = proj->in(0);
2230 if( !call->is_Call() || call->as_Call()->entry_point() != OptoRuntime::complete_monitor_locking_Java() )
2231 return false;
2233 return true;
2234 }
2236 // Used by the DFA in dfa_sparc.cpp. Check for a following FastUnLock
2237 // acting as a Release and thus we don't need a Release here. We
2238 // retain the Node to act as a compiler ordering barrier.
2239 bool Matcher::post_fast_unlock( const Node *rel ) {
2240 Compile *C = Compile::current();
2241 assert( rel->Opcode() == Op_MemBarRelease, "" );
2242 const MemBarReleaseNode *mem = (const MemBarReleaseNode*)rel;
2243 DUIterator_Fast imax, i = mem->fast_outs(imax);
2244 Node *ctrl = NULL;
2245 while( true ) {
2246 ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
2247 assert( ctrl->is_Proj(), "only projections here" );
2248 ProjNode *proj = (ProjNode*)ctrl;
2249 if( proj->_con == TypeFunc::Control &&
2250 !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
2251 break;
2252 i++;
2253 }
2254 Node *iff = NULL;
2255 for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
2256 Node *x = ctrl->fast_out(j);
2257 if( x->is_If() && x->req() > 1 &&
2258 !C->node_arena()->contains(x) ) { // Unmatched old-space only
2259 iff = x;
2260 break;
2261 }
2262 }
2263 if( !iff ) return false;
2264 Node *bol = iff->in(1);
2265 // The iff might be some random subclass of If or bol might be Con-Top
2266 if (!bol->is_Bool()) return false;
2267 assert( bol->req() > 1, "" );
2268 return (bol->in(1)->Opcode() == Op_FastUnlock);
2269 }
2271 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2272 // atomic instruction acting as a store_load barrier without any
2273 // intervening volatile load, and thus we don't need a barrier here.
2274 // We retain the Node to act as a compiler ordering barrier.
2275 bool Matcher::post_store_load_barrier(const Node *vmb) {
2276 Compile *C = Compile::current();
2277 assert( vmb->is_MemBar(), "" );
2278 assert( vmb->Opcode() != Op_MemBarAcquire, "" );
2279 const MemBarNode *mem = (const MemBarNode*)vmb;
2281 // Get the Proj node, ctrl, that can be used to iterate forward
2282 Node *ctrl = NULL;
2283 DUIterator_Fast imax, i = mem->fast_outs(imax);
2284 while( true ) {
2285 ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
2286 assert( ctrl->is_Proj(), "only projections here" );
2287 ProjNode *proj = (ProjNode*)ctrl;
2288 if( proj->_con == TypeFunc::Control &&
2289 !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
2290 break;
2291 i++;
2292 }
2294 for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
2295 Node *x = ctrl->fast_out(j);
2296 int xop = x->Opcode();
2298 // We don't need current barrier if we see another or a lock
2299 // before seeing volatile load.
2300 //
2301 // Op_Fastunlock previously appeared in the Op_* list below.
2302 // With the advent of 1-0 lock operations we're no longer guaranteed
2303 // that a monitor exit operation contains a serializing instruction.
2305 if (xop == Op_MemBarVolatile ||
2306 xop == Op_FastLock ||
2307 xop == Op_CompareAndSwapL ||
2308 xop == Op_CompareAndSwapP ||
2309 xop == Op_CompareAndSwapN ||
2310 xop == Op_CompareAndSwapI)
2311 return true;
2313 if (x->is_MemBar()) {
2314 // We must retain this membar if there is an upcoming volatile
2315 // load, which will be preceded by acquire membar.
2316 if (xop == Op_MemBarAcquire)
2317 return false;
2318 // For other kinds of barriers, check by pretending we
2319 // are them, and seeing if we can be removed.
2320 else
2321 return post_store_load_barrier((const MemBarNode*)x);
2322 }
2324 // Delicate code to detect case of an upcoming fastlock block
2325 if( x->is_If() && x->req() > 1 &&
2326 !C->node_arena()->contains(x) ) { // Unmatched old-space only
2327 Node *iff = x;
2328 Node *bol = iff->in(1);
2329 // The iff might be some random subclass of If or bol might be Con-Top
2330 if (!bol->is_Bool()) return false;
2331 assert( bol->req() > 1, "" );
2332 return (bol->in(1)->Opcode() == Op_FastUnlock);
2333 }
2334 // probably not necessary to check for these
2335 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj())
2336 return false;
2337 }
2338 return false;
2339 }
2341 //=============================================================================
2342 //---------------------------State---------------------------------------------
2343 State::State(void) {
2344 #ifdef ASSERT
2345 _id = 0;
2346 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2347 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2348 //memset(_cost, -1, sizeof(_cost));
2349 //memset(_rule, -1, sizeof(_rule));
2350 #endif
2351 memset(_valid, 0, sizeof(_valid));
2352 }
2354 #ifdef ASSERT
2355 State::~State() {
2356 _id = 99;
2357 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2358 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2359 memset(_cost, -3, sizeof(_cost));
2360 memset(_rule, -3, sizeof(_rule));
2361 }
2362 #endif
2364 #ifndef PRODUCT
2365 //---------------------------dump----------------------------------------------
2366 void State::dump() {
2367 tty->print("\n");
2368 dump(0);
2369 }
2371 void State::dump(int depth) {
2372 for( int j = 0; j < depth; j++ )
2373 tty->print(" ");
2374 tty->print("--N: ");
2375 _leaf->dump();
2376 uint i;
2377 for( i = 0; i < _LAST_MACH_OPER; i++ )
2378 // Check for valid entry
2379 if( valid(i) ) {
2380 for( int j = 0; j < depth; j++ )
2381 tty->print(" ");
2382 assert(_cost[i] != max_juint, "cost must be a valid value");
2383 assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2384 tty->print_cr("%s %d %s",
2385 ruleName[i], _cost[i], ruleName[_rule[i]] );
2386 }
2387 tty->print_cr("");
2389 for( i=0; i<2; i++ )
2390 if( _kids[i] )
2391 _kids[i]->dump(depth+1);
2392 }
2393 #endif