Fri, 12 Feb 2010 15:27:36 -0800
Merge
1 /*
2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_matcher.cpp.incl"
28 OptoReg::Name OptoReg::c_frame_pointer;
32 const int Matcher::base2reg[Type::lastype] = {
33 Node::NotAMachineReg,0,0, Op_RegI, Op_RegL, 0, Op_RegN,
34 Node::NotAMachineReg, Node::NotAMachineReg, /* tuple, array */
35 Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, Op_RegP, /* the pointers */
36 0, 0/*abio*/,
37 Op_RegP /* Return address */, 0, /* the memories */
38 Op_RegF, Op_RegF, Op_RegF, Op_RegD, Op_RegD, Op_RegD,
39 0 /*bottom*/
40 };
42 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
43 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
44 RegMask Matcher::STACK_ONLY_mask;
45 RegMask Matcher::c_frame_ptr_mask;
46 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
47 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
49 //---------------------------Matcher-------------------------------------------
50 Matcher::Matcher( Node_List &proj_list ) :
51 PhaseTransform( Phase::Ins_Select ),
52 #ifdef ASSERT
53 _old2new_map(C->comp_arena()),
54 _new2old_map(C->comp_arena()),
55 #endif
56 _shared_nodes(C->comp_arena()),
57 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
58 _swallowed(swallowed),
59 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
60 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
61 _must_clone(must_clone), _proj_list(proj_list),
62 _register_save_policy(register_save_policy),
63 _c_reg_save_policy(c_reg_save_policy),
64 _register_save_type(register_save_type),
65 _ruleName(ruleName),
66 _allocation_started(false),
67 _states_arena(Chunk::medium_size),
68 _visited(&_states_arena),
69 _shared(&_states_arena),
70 _dontcare(&_states_arena) {
71 C->set_matcher(this);
73 idealreg2spillmask [Op_RegI] = NULL;
74 idealreg2spillmask [Op_RegN] = NULL;
75 idealreg2spillmask [Op_RegL] = NULL;
76 idealreg2spillmask [Op_RegF] = NULL;
77 idealreg2spillmask [Op_RegD] = NULL;
78 idealreg2spillmask [Op_RegP] = NULL;
80 idealreg2debugmask [Op_RegI] = NULL;
81 idealreg2debugmask [Op_RegN] = NULL;
82 idealreg2debugmask [Op_RegL] = NULL;
83 idealreg2debugmask [Op_RegF] = NULL;
84 idealreg2debugmask [Op_RegD] = NULL;
85 idealreg2debugmask [Op_RegP] = NULL;
87 idealreg2mhdebugmask[Op_RegI] = NULL;
88 idealreg2mhdebugmask[Op_RegN] = NULL;
89 idealreg2mhdebugmask[Op_RegL] = NULL;
90 idealreg2mhdebugmask[Op_RegF] = NULL;
91 idealreg2mhdebugmask[Op_RegD] = NULL;
92 idealreg2mhdebugmask[Op_RegP] = NULL;
94 debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
95 }
97 //------------------------------warp_incoming_stk_arg------------------------
98 // This warps a VMReg into an OptoReg::Name
99 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
100 OptoReg::Name warped;
101 if( reg->is_stack() ) { // Stack slot argument?
102 warped = OptoReg::add(_old_SP, reg->reg2stack() );
103 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
104 if( warped >= _in_arg_limit )
105 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
106 if (!RegMask::can_represent(warped)) {
107 // the compiler cannot represent this method's calling sequence
108 C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
109 return OptoReg::Bad;
110 }
111 return warped;
112 }
113 return OptoReg::as_OptoReg(reg);
114 }
116 //---------------------------compute_old_SP------------------------------------
117 OptoReg::Name Compile::compute_old_SP() {
118 int fixed = fixed_slots();
119 int preserve = in_preserve_stack_slots();
120 return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
121 }
125 #ifdef ASSERT
126 void Matcher::verify_new_nodes_only(Node* xroot) {
127 // Make sure that the new graph only references new nodes
128 ResourceMark rm;
129 Unique_Node_List worklist;
130 VectorSet visited(Thread::current()->resource_area());
131 worklist.push(xroot);
132 while (worklist.size() > 0) {
133 Node* n = worklist.pop();
134 visited <<= n->_idx;
135 assert(C->node_arena()->contains(n), "dead node");
136 for (uint j = 0; j < n->req(); j++) {
137 Node* in = n->in(j);
138 if (in != NULL) {
139 assert(C->node_arena()->contains(in), "dead node");
140 if (!visited.test(in->_idx)) {
141 worklist.push(in);
142 }
143 }
144 }
145 }
146 }
147 #endif
150 //---------------------------match---------------------------------------------
151 void Matcher::match( ) {
152 if( MaxLabelRootDepth < 100 ) { // Too small?
153 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
154 MaxLabelRootDepth = 100;
155 }
156 // One-time initialization of some register masks.
157 init_spill_mask( C->root()->in(1) );
158 _return_addr_mask = return_addr();
159 #ifdef _LP64
160 // Pointers take 2 slots in 64-bit land
161 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
162 #endif
164 // Map a Java-signature return type into return register-value
165 // machine registers for 0, 1 and 2 returned values.
166 const TypeTuple *range = C->tf()->range();
167 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
168 // Get ideal-register return type
169 int ireg = base2reg[range->field_at(TypeFunc::Parms)->base()];
170 // Get machine return register
171 uint sop = C->start()->Opcode();
172 OptoRegPair regs = return_value(ireg, false);
174 // And mask for same
175 _return_value_mask = RegMask(regs.first());
176 if( OptoReg::is_valid(regs.second()) )
177 _return_value_mask.Insert(regs.second());
178 }
180 // ---------------
181 // Frame Layout
183 // Need the method signature to determine the incoming argument types,
184 // because the types determine which registers the incoming arguments are
185 // in, and this affects the matched code.
186 const TypeTuple *domain = C->tf()->domain();
187 uint argcnt = domain->cnt() - TypeFunc::Parms;
188 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
189 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
190 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
191 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
192 uint i;
193 for( i = 0; i<argcnt; i++ ) {
194 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
195 }
197 // Pass array of ideal registers and length to USER code (from the AD file)
198 // that will convert this to an array of register numbers.
199 const StartNode *start = C->start();
200 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
201 #ifdef ASSERT
202 // Sanity check users' calling convention. Real handy while trying to
203 // get the initial port correct.
204 { for (uint i = 0; i<argcnt; i++) {
205 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
206 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
207 _parm_regs[i].set_bad();
208 continue;
209 }
210 VMReg parm_reg = vm_parm_regs[i].first();
211 assert(parm_reg->is_valid(), "invalid arg?");
212 if (parm_reg->is_reg()) {
213 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
214 assert(can_be_java_arg(opto_parm_reg) ||
215 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
216 opto_parm_reg == inline_cache_reg(),
217 "parameters in register must be preserved by runtime stubs");
218 }
219 for (uint j = 0; j < i; j++) {
220 assert(parm_reg != vm_parm_regs[j].first(),
221 "calling conv. must produce distinct regs");
222 }
223 }
224 }
225 #endif
227 // Do some initial frame layout.
229 // Compute the old incoming SP (may be called FP) as
230 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
231 _old_SP = C->compute_old_SP();
232 assert( is_even(_old_SP), "must be even" );
234 // Compute highest incoming stack argument as
235 // _old_SP + out_preserve_stack_slots + incoming argument size.
236 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
237 assert( is_even(_in_arg_limit), "out_preserve must be even" );
238 for( i = 0; i < argcnt; i++ ) {
239 // Permit args to have no register
240 _calling_convention_mask[i].Clear();
241 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
242 continue;
243 }
244 // calling_convention returns stack arguments as a count of
245 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
246 // the allocators point of view, taking into account all the
247 // preserve area, locks & pad2.
249 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
250 if( OptoReg::is_valid(reg1))
251 _calling_convention_mask[i].Insert(reg1);
253 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
254 if( OptoReg::is_valid(reg2))
255 _calling_convention_mask[i].Insert(reg2);
257 // Saved biased stack-slot register number
258 _parm_regs[i].set_pair(reg2, reg1);
259 }
261 // Finally, make sure the incoming arguments take up an even number of
262 // words, in case the arguments or locals need to contain doubleword stack
263 // slots. The rest of the system assumes that stack slot pairs (in
264 // particular, in the spill area) which look aligned will in fact be
265 // aligned relative to the stack pointer in the target machine. Double
266 // stack slots will always be allocated aligned.
267 _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
269 // Compute highest outgoing stack argument as
270 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
271 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
272 assert( is_even(_out_arg_limit), "out_preserve must be even" );
274 if (!RegMask::can_represent(OptoReg::add(_out_arg_limit,-1))) {
275 // the compiler cannot represent this method's calling sequence
276 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
277 }
279 if (C->failing()) return; // bailed out on incoming arg failure
281 // ---------------
282 // Collect roots of matcher trees. Every node for which
283 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
284 // can be a valid interior of some tree.
285 find_shared( C->root() );
286 find_shared( C->top() );
288 C->print_method("Before Matching");
290 // Create new ideal node ConP #NULL even if it does exist in old space
291 // to avoid false sharing if the corresponding mach node is not used.
292 // The corresponding mach node is only used in rare cases for derived
293 // pointers.
294 Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
296 // Swap out to old-space; emptying new-space
297 Arena *old = C->node_arena()->move_contents(C->old_arena());
299 // Save debug and profile information for nodes in old space:
300 _old_node_note_array = C->node_note_array();
301 if (_old_node_note_array != NULL) {
302 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
303 (C->comp_arena(), _old_node_note_array->length(),
304 0, NULL));
305 }
307 // Pre-size the new_node table to avoid the need for range checks.
308 grow_new_node_array(C->unique());
310 // Reset node counter so MachNodes start with _idx at 0
311 int nodes = C->unique(); // save value
312 C->set_unique(0);
314 // Recursively match trees from old space into new space.
315 // Correct leaves of new-space Nodes; they point to old-space.
316 _visited.Clear(); // Clear visit bits for xform call
317 C->set_cached_top_node(xform( C->top(), nodes ));
318 if (!C->failing()) {
319 Node* xroot = xform( C->root(), 1 );
320 if (xroot == NULL) {
321 Matcher::soft_match_failure(); // recursive matching process failed
322 C->record_method_not_compilable("instruction match failed");
323 } else {
324 // During matching shared constants were attached to C->root()
325 // because xroot wasn't available yet, so transfer the uses to
326 // the xroot.
327 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
328 Node* n = C->root()->fast_out(j);
329 if (C->node_arena()->contains(n)) {
330 assert(n->in(0) == C->root(), "should be control user");
331 n->set_req(0, xroot);
332 --j;
333 --jmax;
334 }
335 }
337 // Generate new mach node for ConP #NULL
338 assert(new_ideal_null != NULL, "sanity");
339 _mach_null = match_tree(new_ideal_null);
340 // Don't set control, it will confuse GCM since there are no uses.
341 // The control will be set when this node is used first time
342 // in find_base_for_derived().
343 assert(_mach_null != NULL, "");
345 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
347 #ifdef ASSERT
348 verify_new_nodes_only(xroot);
349 #endif
350 }
351 }
352 if (C->top() == NULL || C->root() == NULL) {
353 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
354 }
355 if (C->failing()) {
356 // delete old;
357 old->destruct_contents();
358 return;
359 }
360 assert( C->top(), "" );
361 assert( C->root(), "" );
362 validate_null_checks();
364 // Now smoke old-space
365 NOT_DEBUG( old->destruct_contents() );
367 // ------------------------
368 // Set up save-on-entry registers
369 Fixup_Save_On_Entry( );
370 }
373 //------------------------------Fixup_Save_On_Entry----------------------------
374 // The stated purpose of this routine is to take care of save-on-entry
375 // registers. However, the overall goal of the Match phase is to convert into
376 // machine-specific instructions which have RegMasks to guide allocation.
377 // So what this procedure really does is put a valid RegMask on each input
378 // to the machine-specific variations of all Return, TailCall and Halt
379 // instructions. It also adds edgs to define the save-on-entry values (and of
380 // course gives them a mask).
382 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
383 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
384 // Do all the pre-defined register masks
385 rms[TypeFunc::Control ] = RegMask::Empty;
386 rms[TypeFunc::I_O ] = RegMask::Empty;
387 rms[TypeFunc::Memory ] = RegMask::Empty;
388 rms[TypeFunc::ReturnAdr] = ret_adr;
389 rms[TypeFunc::FramePtr ] = fp;
390 return rms;
391 }
393 //---------------------------init_first_stack_mask-----------------------------
394 // Create the initial stack mask used by values spilling to the stack.
395 // Disallow any debug info in outgoing argument areas by setting the
396 // initial mask accordingly.
397 void Matcher::init_first_stack_mask() {
399 // Allocate storage for spill masks as masks for the appropriate load type.
400 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * 3*6);
402 idealreg2spillmask [Op_RegN] = &rms[0];
403 idealreg2spillmask [Op_RegI] = &rms[1];
404 idealreg2spillmask [Op_RegL] = &rms[2];
405 idealreg2spillmask [Op_RegF] = &rms[3];
406 idealreg2spillmask [Op_RegD] = &rms[4];
407 idealreg2spillmask [Op_RegP] = &rms[5];
409 idealreg2debugmask [Op_RegN] = &rms[6];
410 idealreg2debugmask [Op_RegI] = &rms[7];
411 idealreg2debugmask [Op_RegL] = &rms[8];
412 idealreg2debugmask [Op_RegF] = &rms[9];
413 idealreg2debugmask [Op_RegD] = &rms[10];
414 idealreg2debugmask [Op_RegP] = &rms[11];
416 idealreg2mhdebugmask[Op_RegN] = &rms[12];
417 idealreg2mhdebugmask[Op_RegI] = &rms[13];
418 idealreg2mhdebugmask[Op_RegL] = &rms[14];
419 idealreg2mhdebugmask[Op_RegF] = &rms[15];
420 idealreg2mhdebugmask[Op_RegD] = &rms[16];
421 idealreg2mhdebugmask[Op_RegP] = &rms[17];
423 OptoReg::Name i;
425 // At first, start with the empty mask
426 C->FIRST_STACK_mask().Clear();
428 // Add in the incoming argument area
429 OptoReg::Name init = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
430 for (i = init; i < _in_arg_limit; i = OptoReg::add(i,1))
431 C->FIRST_STACK_mask().Insert(i);
433 // Add in all bits past the outgoing argument area
434 guarantee(RegMask::can_represent(OptoReg::add(_out_arg_limit,-1)),
435 "must be able to represent all call arguments in reg mask");
436 init = _out_arg_limit;
437 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
438 C->FIRST_STACK_mask().Insert(i);
440 // Finally, set the "infinite stack" bit.
441 C->FIRST_STACK_mask().set_AllStack();
443 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
444 #ifdef _LP64
445 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
446 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
447 #endif
448 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
449 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
450 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
451 idealreg2spillmask[Op_RegL]->OR(C->FIRST_STACK_mask());
452 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
453 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
454 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
455 idealreg2spillmask[Op_RegD]->OR(C->FIRST_STACK_mask());
456 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
457 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
459 // Make up debug masks. Any spill slot plus callee-save registers.
460 // Caller-save registers are assumed to be trashable by the various
461 // inline-cache fixup routines.
462 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
463 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
464 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
465 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
466 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
467 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
469 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
470 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
471 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
472 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
473 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
474 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
476 // Prevent stub compilations from attempting to reference
477 // callee-saved registers from debug info
478 bool exclude_soe = !Compile::current()->is_method_compilation();
480 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
481 // registers the caller has to save do not work
482 if( _register_save_policy[i] == 'C' ||
483 _register_save_policy[i] == 'A' ||
484 (_register_save_policy[i] == 'E' && exclude_soe) ) {
485 idealreg2debugmask [Op_RegN]->Remove(i);
486 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
487 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
488 idealreg2debugmask [Op_RegF]->Remove(i); // masks
489 idealreg2debugmask [Op_RegD]->Remove(i);
490 idealreg2debugmask [Op_RegP]->Remove(i);
492 idealreg2mhdebugmask[Op_RegN]->Remove(i);
493 idealreg2mhdebugmask[Op_RegI]->Remove(i);
494 idealreg2mhdebugmask[Op_RegL]->Remove(i);
495 idealreg2mhdebugmask[Op_RegF]->Remove(i);
496 idealreg2mhdebugmask[Op_RegD]->Remove(i);
497 idealreg2mhdebugmask[Op_RegP]->Remove(i);
498 }
499 }
501 // Subtract the register we use to save the SP for MethodHandle
502 // invokes to from the debug mask.
503 const RegMask save_mask = method_handle_invoke_SP_save_mask();
504 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
505 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
506 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
507 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
508 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
509 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
510 }
512 //---------------------------is_save_on_entry----------------------------------
513 bool Matcher::is_save_on_entry( int reg ) {
514 return
515 _register_save_policy[reg] == 'E' ||
516 _register_save_policy[reg] == 'A' || // Save-on-entry register?
517 // Also save argument registers in the trampolining stubs
518 (C->save_argument_registers() && is_spillable_arg(reg));
519 }
521 //---------------------------Fixup_Save_On_Entry-------------------------------
522 void Matcher::Fixup_Save_On_Entry( ) {
523 init_first_stack_mask();
525 Node *root = C->root(); // Short name for root
526 // Count number of save-on-entry registers.
527 uint soe_cnt = number_of_saved_registers();
528 uint i;
530 // Find the procedure Start Node
531 StartNode *start = C->start();
532 assert( start, "Expect a start node" );
534 // Save argument registers in the trampolining stubs
535 if( C->save_argument_registers() )
536 for( i = 0; i < _last_Mach_Reg; i++ )
537 if( is_spillable_arg(i) )
538 soe_cnt++;
540 // Input RegMask array shared by all Returns.
541 // The type for doubles and longs has a count of 2, but
542 // there is only 1 returned value
543 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
544 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
545 // Returns have 0 or 1 returned values depending on call signature.
546 // Return register is specified by return_value in the AD file.
547 if (ret_edge_cnt > TypeFunc::Parms)
548 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
550 // Input RegMask array shared by all Rethrows.
551 uint reth_edge_cnt = TypeFunc::Parms+1;
552 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
553 // Rethrow takes exception oop only, but in the argument 0 slot.
554 reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
555 #ifdef _LP64
556 // Need two slots for ptrs in 64-bit land
557 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
558 #endif
560 // Input RegMask array shared by all TailCalls
561 uint tail_call_edge_cnt = TypeFunc::Parms+2;
562 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
564 // Input RegMask array shared by all TailJumps
565 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
566 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
568 // TailCalls have 2 returned values (target & moop), whose masks come
569 // from the usual MachNode/MachOper mechanism. Find a sample
570 // TailCall to extract these masks and put the correct masks into
571 // the tail_call_rms array.
572 for( i=1; i < root->req(); i++ ) {
573 MachReturnNode *m = root->in(i)->as_MachReturn();
574 if( m->ideal_Opcode() == Op_TailCall ) {
575 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
576 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
577 break;
578 }
579 }
581 // TailJumps have 2 returned values (target & ex_oop), whose masks come
582 // from the usual MachNode/MachOper mechanism. Find a sample
583 // TailJump to extract these masks and put the correct masks into
584 // the tail_jump_rms array.
585 for( i=1; i < root->req(); i++ ) {
586 MachReturnNode *m = root->in(i)->as_MachReturn();
587 if( m->ideal_Opcode() == Op_TailJump ) {
588 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
589 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
590 break;
591 }
592 }
594 // Input RegMask array shared by all Halts
595 uint halt_edge_cnt = TypeFunc::Parms;
596 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
598 // Capture the return input masks into each exit flavor
599 for( i=1; i < root->req(); i++ ) {
600 MachReturnNode *exit = root->in(i)->as_MachReturn();
601 switch( exit->ideal_Opcode() ) {
602 case Op_Return : exit->_in_rms = ret_rms; break;
603 case Op_Rethrow : exit->_in_rms = reth_rms; break;
604 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
605 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
606 case Op_Halt : exit->_in_rms = halt_rms; break;
607 default : ShouldNotReachHere();
608 }
609 }
611 // Next unused projection number from Start.
612 int proj_cnt = C->tf()->domain()->cnt();
614 // Do all the save-on-entry registers. Make projections from Start for
615 // them, and give them a use at the exit points. To the allocator, they
616 // look like incoming register arguments.
617 for( i = 0; i < _last_Mach_Reg; i++ ) {
618 if( is_save_on_entry(i) ) {
620 // Add the save-on-entry to the mask array
621 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
622 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
623 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
624 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
625 // Halts need the SOE registers, but only in the stack as debug info.
626 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
627 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
629 Node *mproj;
631 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
632 // into a single RegD.
633 if( (i&1) == 0 &&
634 _register_save_type[i ] == Op_RegF &&
635 _register_save_type[i+1] == Op_RegF &&
636 is_save_on_entry(i+1) ) {
637 // Add other bit for double
638 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
639 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
640 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
641 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
642 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
643 mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
644 proj_cnt += 2; // Skip 2 for doubles
645 }
646 else if( (i&1) == 1 && // Else check for high half of double
647 _register_save_type[i-1] == Op_RegF &&
648 _register_save_type[i ] == Op_RegF &&
649 is_save_on_entry(i-1) ) {
650 ret_rms [ ret_edge_cnt] = RegMask::Empty;
651 reth_rms [ reth_edge_cnt] = RegMask::Empty;
652 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
653 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
654 halt_rms [ halt_edge_cnt] = RegMask::Empty;
655 mproj = C->top();
656 }
657 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
658 // into a single RegL.
659 else if( (i&1) == 0 &&
660 _register_save_type[i ] == Op_RegI &&
661 _register_save_type[i+1] == Op_RegI &&
662 is_save_on_entry(i+1) ) {
663 // Add other bit for long
664 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
665 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
666 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
667 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
668 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
669 mproj = new (C, 1) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
670 proj_cnt += 2; // Skip 2 for longs
671 }
672 else if( (i&1) == 1 && // Else check for high half of long
673 _register_save_type[i-1] == Op_RegI &&
674 _register_save_type[i ] == Op_RegI &&
675 is_save_on_entry(i-1) ) {
676 ret_rms [ ret_edge_cnt] = RegMask::Empty;
677 reth_rms [ reth_edge_cnt] = RegMask::Empty;
678 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
679 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
680 halt_rms [ halt_edge_cnt] = RegMask::Empty;
681 mproj = C->top();
682 } else {
683 // Make a projection for it off the Start
684 mproj = new (C, 1) MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
685 }
687 ret_edge_cnt ++;
688 reth_edge_cnt ++;
689 tail_call_edge_cnt ++;
690 tail_jump_edge_cnt ++;
691 halt_edge_cnt ++;
693 // Add a use of the SOE register to all exit paths
694 for( uint j=1; j < root->req(); j++ )
695 root->in(j)->add_req(mproj);
696 } // End of if a save-on-entry register
697 } // End of for all machine registers
698 }
700 //------------------------------init_spill_mask--------------------------------
701 void Matcher::init_spill_mask( Node *ret ) {
702 if( idealreg2regmask[Op_RegI] ) return; // One time only init
704 OptoReg::c_frame_pointer = c_frame_pointer();
705 c_frame_ptr_mask = c_frame_pointer();
706 #ifdef _LP64
707 // pointers are twice as big
708 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
709 #endif
711 // Start at OptoReg::stack0()
712 STACK_ONLY_mask.Clear();
713 OptoReg::Name init = OptoReg::stack2reg(0);
714 // STACK_ONLY_mask is all stack bits
715 OptoReg::Name i;
716 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
717 STACK_ONLY_mask.Insert(i);
718 // Also set the "infinite stack" bit.
719 STACK_ONLY_mask.set_AllStack();
721 // Copy the register names over into the shared world
722 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
723 // SharedInfo::regName[i] = regName[i];
724 // Handy RegMasks per machine register
725 mreg2regmask[i].Insert(i);
726 }
728 // Grab the Frame Pointer
729 Node *fp = ret->in(TypeFunc::FramePtr);
730 Node *mem = ret->in(TypeFunc::Memory);
731 const TypePtr* atp = TypePtr::BOTTOM;
732 // Share frame pointer while making spill ops
733 set_shared(fp);
735 // Compute generic short-offset Loads
736 #ifdef _LP64
737 MachNode *spillCP = match_tree(new (C, 3) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
738 #endif
739 MachNode *spillI = match_tree(new (C, 3) LoadINode(NULL,mem,fp,atp));
740 MachNode *spillL = match_tree(new (C, 3) LoadLNode(NULL,mem,fp,atp));
741 MachNode *spillF = match_tree(new (C, 3) LoadFNode(NULL,mem,fp,atp));
742 MachNode *spillD = match_tree(new (C, 3) LoadDNode(NULL,mem,fp,atp));
743 MachNode *spillP = match_tree(new (C, 3) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM));
744 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
745 spillD != NULL && spillP != NULL, "");
747 // Get the ADLC notion of the right regmask, for each basic type.
748 #ifdef _LP64
749 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
750 #endif
751 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
752 idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
753 idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
754 idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
755 idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
756 }
758 #ifdef ASSERT
759 static void match_alias_type(Compile* C, Node* n, Node* m) {
760 if (!VerifyAliases) return; // do not go looking for trouble by default
761 const TypePtr* nat = n->adr_type();
762 const TypePtr* mat = m->adr_type();
763 int nidx = C->get_alias_index(nat);
764 int midx = C->get_alias_index(mat);
765 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
766 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
767 for (uint i = 1; i < n->req(); i++) {
768 Node* n1 = n->in(i);
769 const TypePtr* n1at = n1->adr_type();
770 if (n1at != NULL) {
771 nat = n1at;
772 nidx = C->get_alias_index(n1at);
773 }
774 }
775 }
776 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
777 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
778 switch (n->Opcode()) {
779 case Op_PrefetchRead:
780 case Op_PrefetchWrite:
781 nidx = Compile::AliasIdxRaw;
782 nat = TypeRawPtr::BOTTOM;
783 break;
784 }
785 }
786 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
787 switch (n->Opcode()) {
788 case Op_ClearArray:
789 midx = Compile::AliasIdxRaw;
790 mat = TypeRawPtr::BOTTOM;
791 break;
792 }
793 }
794 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
795 switch (n->Opcode()) {
796 case Op_Return:
797 case Op_Rethrow:
798 case Op_Halt:
799 case Op_TailCall:
800 case Op_TailJump:
801 nidx = Compile::AliasIdxBot;
802 nat = TypePtr::BOTTOM;
803 break;
804 }
805 }
806 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
807 switch (n->Opcode()) {
808 case Op_StrComp:
809 case Op_StrEquals:
810 case Op_StrIndexOf:
811 case Op_AryEq:
812 case Op_MemBarVolatile:
813 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
814 nidx = Compile::AliasIdxTop;
815 nat = NULL;
816 break;
817 }
818 }
819 if (nidx != midx) {
820 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
821 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
822 n->dump();
823 m->dump();
824 }
825 assert(C->subsume_loads() && C->must_alias(nat, midx),
826 "must not lose alias info when matching");
827 }
828 }
829 #endif
832 //------------------------------MStack-----------------------------------------
833 // State and MStack class used in xform() and find_shared() iterative methods.
834 enum Node_State { Pre_Visit, // node has to be pre-visited
835 Visit, // visit node
836 Post_Visit, // post-visit node
837 Alt_Post_Visit // alternative post-visit path
838 };
840 class MStack: public Node_Stack {
841 public:
842 MStack(int size) : Node_Stack(size) { }
844 void push(Node *n, Node_State ns) {
845 Node_Stack::push(n, (uint)ns);
846 }
847 void push(Node *n, Node_State ns, Node *parent, int indx) {
848 ++_inode_top;
849 if ((_inode_top + 1) >= _inode_max) grow();
850 _inode_top->node = parent;
851 _inode_top->indx = (uint)indx;
852 ++_inode_top;
853 _inode_top->node = n;
854 _inode_top->indx = (uint)ns;
855 }
856 Node *parent() {
857 pop();
858 return node();
859 }
860 Node_State state() const {
861 return (Node_State)index();
862 }
863 void set_state(Node_State ns) {
864 set_index((uint)ns);
865 }
866 };
869 //------------------------------xform------------------------------------------
870 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
871 // Node in new-space. Given a new-space Node, recursively walk his children.
872 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
873 Node *Matcher::xform( Node *n, int max_stack ) {
874 // Use one stack to keep both: child's node/state and parent's node/index
875 MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2
876 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
878 while (mstack.is_nonempty()) {
879 n = mstack.node(); // Leave node on stack
880 Node_State nstate = mstack.state();
881 if (nstate == Visit) {
882 mstack.set_state(Post_Visit);
883 Node *oldn = n;
884 // Old-space or new-space check
885 if (!C->node_arena()->contains(n)) {
886 // Old space!
887 Node* m;
888 if (has_new_node(n)) { // Not yet Label/Reduced
889 m = new_node(n);
890 } else {
891 if (!is_dontcare(n)) { // Matcher can match this guy
892 // Calls match special. They match alone with no children.
893 // Their children, the incoming arguments, match normally.
894 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
895 if (C->failing()) return NULL;
896 if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
897 } else { // Nothing the matcher cares about
898 if( n->is_Proj() && n->in(0)->is_Multi()) { // Projections?
899 // Convert to machine-dependent projection
900 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
901 #ifdef ASSERT
902 _new2old_map.map(m->_idx, n);
903 #endif
904 if (m->in(0) != NULL) // m might be top
905 collect_null_checks(m, n);
906 } else { // Else just a regular 'ol guy
907 m = n->clone(); // So just clone into new-space
908 #ifdef ASSERT
909 _new2old_map.map(m->_idx, n);
910 #endif
911 // Def-Use edges will be added incrementally as Uses
912 // of this node are matched.
913 assert(m->outcnt() == 0, "no Uses of this clone yet");
914 }
915 }
917 set_new_node(n, m); // Map old to new
918 if (_old_node_note_array != NULL) {
919 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
920 n->_idx);
921 C->set_node_notes_at(m->_idx, nn);
922 }
923 debug_only(match_alias_type(C, n, m));
924 }
925 n = m; // n is now a new-space node
926 mstack.set_node(n);
927 }
929 // New space!
930 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
932 int i;
933 // Put precedence edges on stack first (match them last).
934 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
935 Node *m = oldn->in(i);
936 if (m == NULL) break;
937 // set -1 to call add_prec() instead of set_req() during Step1
938 mstack.push(m, Visit, n, -1);
939 }
941 // For constant debug info, I'd rather have unmatched constants.
942 int cnt = n->req();
943 JVMState* jvms = n->jvms();
944 int debug_cnt = jvms ? jvms->debug_start() : cnt;
946 // Now do only debug info. Clone constants rather than matching.
947 // Constants are represented directly in the debug info without
948 // the need for executable machine instructions.
949 // Monitor boxes are also represented directly.
950 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
951 Node *m = n->in(i); // Get input
952 int op = m->Opcode();
953 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
954 if( op == Op_ConI || op == Op_ConP || op == Op_ConN ||
955 op == Op_ConF || op == Op_ConD || op == Op_ConL
956 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
957 ) {
958 m = m->clone();
959 #ifdef ASSERT
960 _new2old_map.map(m->_idx, n);
961 #endif
962 mstack.push(m, Post_Visit, n, i); // Don't need to visit
963 mstack.push(m->in(0), Visit, m, 0);
964 } else {
965 mstack.push(m, Visit, n, i);
966 }
967 }
969 // And now walk his children, and convert his inputs to new-space.
970 for( ; i >= 0; --i ) { // For all normal inputs do
971 Node *m = n->in(i); // Get input
972 if(m != NULL)
973 mstack.push(m, Visit, n, i);
974 }
976 }
977 else if (nstate == Post_Visit) {
978 // Set xformed input
979 Node *p = mstack.parent();
980 if (p != NULL) { // root doesn't have parent
981 int i = (int)mstack.index();
982 if (i >= 0)
983 p->set_req(i, n); // required input
984 else if (i == -1)
985 p->add_prec(n); // precedence input
986 else
987 ShouldNotReachHere();
988 }
989 mstack.pop(); // remove processed node from stack
990 }
991 else {
992 ShouldNotReachHere();
993 }
994 } // while (mstack.is_nonempty())
995 return n; // Return new-space Node
996 }
998 //------------------------------warp_outgoing_stk_arg------------------------
999 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1000 // Convert outgoing argument location to a pre-biased stack offset
1001 if (reg->is_stack()) {
1002 OptoReg::Name warped = reg->reg2stack();
1003 // Adjust the stack slot offset to be the register number used
1004 // by the allocator.
1005 warped = OptoReg::add(begin_out_arg_area, warped);
1006 // Keep track of the largest numbered stack slot used for an arg.
1007 // Largest used slot per call-site indicates the amount of stack
1008 // that is killed by the call.
1009 if( warped >= out_arg_limit_per_call )
1010 out_arg_limit_per_call = OptoReg::add(warped,1);
1011 if (!RegMask::can_represent(warped)) {
1012 C->record_method_not_compilable_all_tiers("unsupported calling sequence");
1013 return OptoReg::Bad;
1014 }
1015 return warped;
1016 }
1017 return OptoReg::as_OptoReg(reg);
1018 }
1021 //------------------------------match_sfpt-------------------------------------
1022 // Helper function to match call instructions. Calls match special.
1023 // They match alone with no children. Their children, the incoming
1024 // arguments, match normally.
1025 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1026 MachSafePointNode *msfpt = NULL;
1027 MachCallNode *mcall = NULL;
1028 uint cnt;
1029 // Split out case for SafePoint vs Call
1030 CallNode *call;
1031 const TypeTuple *domain;
1032 ciMethod* method = NULL;
1033 bool is_method_handle_invoke = false; // for special kill effects
1034 if( sfpt->is_Call() ) {
1035 call = sfpt->as_Call();
1036 domain = call->tf()->domain();
1037 cnt = domain->cnt();
1039 // Match just the call, nothing else
1040 MachNode *m = match_tree(call);
1041 if (C->failing()) return NULL;
1042 if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1044 // Copy data from the Ideal SafePoint to the machine version
1045 mcall = m->as_MachCall();
1047 mcall->set_tf( call->tf());
1048 mcall->set_entry_point(call->entry_point());
1049 mcall->set_cnt( call->cnt());
1051 if( mcall->is_MachCallJava() ) {
1052 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1053 const CallJavaNode *call_java = call->as_CallJava();
1054 method = call_java->method();
1055 mcall_java->_method = method;
1056 mcall_java->_bci = call_java->_bci;
1057 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1058 is_method_handle_invoke = call_java->is_method_handle_invoke();
1059 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1060 if( mcall_java->is_MachCallStaticJava() )
1061 mcall_java->as_MachCallStaticJava()->_name =
1062 call_java->as_CallStaticJava()->_name;
1063 if( mcall_java->is_MachCallDynamicJava() )
1064 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1065 call_java->as_CallDynamicJava()->_vtable_index;
1066 }
1067 else if( mcall->is_MachCallRuntime() ) {
1068 mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1069 }
1070 msfpt = mcall;
1071 }
1072 // This is a non-call safepoint
1073 else {
1074 call = NULL;
1075 domain = NULL;
1076 MachNode *mn = match_tree(sfpt);
1077 if (C->failing()) return NULL;
1078 msfpt = mn->as_MachSafePoint();
1079 cnt = TypeFunc::Parms;
1080 }
1082 // Advertise the correct memory effects (for anti-dependence computation).
1083 msfpt->set_adr_type(sfpt->adr_type());
1085 // Allocate a private array of RegMasks. These RegMasks are not shared.
1086 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1087 // Empty them all.
1088 memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1090 // Do all the pre-defined non-Empty register masks
1091 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1092 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1094 // Place first outgoing argument can possibly be put.
1095 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1096 assert( is_even(begin_out_arg_area), "" );
1097 // Compute max outgoing register number per call site.
1098 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1099 // Calls to C may hammer extra stack slots above and beyond any arguments.
1100 // These are usually backing store for register arguments for varargs.
1101 if( call != NULL && call->is_CallRuntime() )
1102 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1105 // Do the normal argument list (parameters) register masks
1106 int argcnt = cnt - TypeFunc::Parms;
1107 if( argcnt > 0 ) { // Skip it all if we have no args
1108 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1109 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1110 int i;
1111 for( i = 0; i < argcnt; i++ ) {
1112 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1113 }
1114 // V-call to pick proper calling convention
1115 call->calling_convention( sig_bt, parm_regs, argcnt );
1117 #ifdef ASSERT
1118 // Sanity check users' calling convention. Really handy during
1119 // the initial porting effort. Fairly expensive otherwise.
1120 { for (int i = 0; i<argcnt; i++) {
1121 if( !parm_regs[i].first()->is_valid() &&
1122 !parm_regs[i].second()->is_valid() ) continue;
1123 VMReg reg1 = parm_regs[i].first();
1124 VMReg reg2 = parm_regs[i].second();
1125 for (int j = 0; j < i; j++) {
1126 if( !parm_regs[j].first()->is_valid() &&
1127 !parm_regs[j].second()->is_valid() ) continue;
1128 VMReg reg3 = parm_regs[j].first();
1129 VMReg reg4 = parm_regs[j].second();
1130 if( !reg1->is_valid() ) {
1131 assert( !reg2->is_valid(), "valid halvsies" );
1132 } else if( !reg3->is_valid() ) {
1133 assert( !reg4->is_valid(), "valid halvsies" );
1134 } else {
1135 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1136 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1137 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1138 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1139 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1140 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1141 }
1142 }
1143 }
1144 }
1145 #endif
1147 // Visit each argument. Compute its outgoing register mask.
1148 // Return results now can have 2 bits returned.
1149 // Compute max over all outgoing arguments both per call-site
1150 // and over the entire method.
1151 for( i = 0; i < argcnt; i++ ) {
1152 // Address of incoming argument mask to fill in
1153 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1154 if( !parm_regs[i].first()->is_valid() &&
1155 !parm_regs[i].second()->is_valid() ) {
1156 continue; // Avoid Halves
1157 }
1158 // Grab first register, adjust stack slots and insert in mask.
1159 OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1160 if (OptoReg::is_valid(reg1))
1161 rm->Insert( reg1 );
1162 // Grab second register (if any), adjust stack slots and insert in mask.
1163 OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1164 if (OptoReg::is_valid(reg2))
1165 rm->Insert( reg2 );
1166 } // End of for all arguments
1168 // Compute number of stack slots needed to restore stack in case of
1169 // Pascal-style argument popping.
1170 mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1171 }
1173 if (is_method_handle_invoke) {
1174 // Kill some extra stack space in case method handles want to do
1175 // a little in-place argument insertion.
1176 int regs_per_word = NOT_LP64(1) LP64_ONLY(2); // %%% make a global const!
1177 out_arg_limit_per_call += MethodHandlePushLimit * regs_per_word;
1178 // Do not update mcall->_argsize because (a) the extra space is not
1179 // pushed as arguments and (b) _argsize is dead (not used anywhere).
1180 }
1182 // Compute the max stack slot killed by any call. These will not be
1183 // available for debug info, and will be used to adjust FIRST_STACK_mask
1184 // after all call sites have been visited.
1185 if( _out_arg_limit < out_arg_limit_per_call)
1186 _out_arg_limit = out_arg_limit_per_call;
1188 if (mcall) {
1189 // Kill the outgoing argument area, including any non-argument holes and
1190 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1191 // Since the max-per-method covers the max-per-call-site and debug info
1192 // is excluded on the max-per-method basis, debug info cannot land in
1193 // this killed area.
1194 uint r_cnt = mcall->tf()->range()->cnt();
1195 MachProjNode *proj = new (C, 1) MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1196 if (!RegMask::can_represent(OptoReg::Name(out_arg_limit_per_call-1))) {
1197 C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
1198 } else {
1199 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1200 proj->_rout.Insert(OptoReg::Name(i));
1201 }
1202 if( proj->_rout.is_NotEmpty() )
1203 _proj_list.push(proj);
1204 }
1205 // Transfer the safepoint information from the call to the mcall
1206 // Move the JVMState list
1207 msfpt->set_jvms(sfpt->jvms());
1208 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1209 jvms->set_map(sfpt);
1210 }
1212 // Debug inputs begin just after the last incoming parameter
1213 assert( (mcall == NULL) || (mcall->jvms() == NULL) ||
1214 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "" );
1216 // Move the OopMap
1217 msfpt->_oop_map = sfpt->_oop_map;
1219 // Registers killed by the call are set in the local scheduling pass
1220 // of Global Code Motion.
1221 return msfpt;
1222 }
1224 //---------------------------match_tree----------------------------------------
1225 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1226 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1227 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1228 // a Load's result RegMask for memoization in idealreg2regmask[]
1229 MachNode *Matcher::match_tree( const Node *n ) {
1230 assert( n->Opcode() != Op_Phi, "cannot match" );
1231 assert( !n->is_block_start(), "cannot match" );
1232 // Set the mark for all locally allocated State objects.
1233 // When this call returns, the _states_arena arena will be reset
1234 // freeing all State objects.
1235 ResourceMark rm( &_states_arena );
1237 LabelRootDepth = 0;
1239 // StoreNodes require their Memory input to match any LoadNodes
1240 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1241 #ifdef ASSERT
1242 Node* save_mem_node = _mem_node;
1243 _mem_node = n->is_Store() ? (Node*)n : NULL;
1244 #endif
1245 // State object for root node of match tree
1246 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1247 State *s = new (&_states_arena) State;
1248 s->_kids[0] = NULL;
1249 s->_kids[1] = NULL;
1250 s->_leaf = (Node*)n;
1251 // Label the input tree, allocating labels from top-level arena
1252 Label_Root( n, s, n->in(0), mem );
1253 if (C->failing()) return NULL;
1255 // The minimum cost match for the whole tree is found at the root State
1256 uint mincost = max_juint;
1257 uint cost = max_juint;
1258 uint i;
1259 for( i = 0; i < NUM_OPERANDS; i++ ) {
1260 if( s->valid(i) && // valid entry and
1261 s->_cost[i] < cost && // low cost and
1262 s->_rule[i] >= NUM_OPERANDS ) // not an operand
1263 cost = s->_cost[mincost=i];
1264 }
1265 if (mincost == max_juint) {
1266 #ifndef PRODUCT
1267 tty->print("No matching rule for:");
1268 s->dump();
1269 #endif
1270 Matcher::soft_match_failure();
1271 return NULL;
1272 }
1273 // Reduce input tree based upon the state labels to machine Nodes
1274 MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1275 #ifdef ASSERT
1276 _old2new_map.map(n->_idx, m);
1277 _new2old_map.map(m->_idx, (Node*)n);
1278 #endif
1280 // Add any Matcher-ignored edges
1281 uint cnt = n->req();
1282 uint start = 1;
1283 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1284 if( n->is_AddP() ) {
1285 assert( mem == (Node*)1, "" );
1286 start = AddPNode::Base+1;
1287 }
1288 for( i = start; i < cnt; i++ ) {
1289 if( !n->match_edge(i) ) {
1290 if( i < m->req() )
1291 m->ins_req( i, n->in(i) );
1292 else
1293 m->add_req( n->in(i) );
1294 }
1295 }
1297 debug_only( _mem_node = save_mem_node; )
1298 return m;
1299 }
1302 //------------------------------match_into_reg---------------------------------
1303 // Choose to either match this Node in a register or part of the current
1304 // match tree. Return true for requiring a register and false for matching
1305 // as part of the current match tree.
1306 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1308 const Type *t = m->bottom_type();
1310 if( t->singleton() ) {
1311 // Never force constants into registers. Allow them to match as
1312 // constants or registers. Copies of the same value will share
1313 // the same register. See find_shared_node.
1314 return false;
1315 } else { // Not a constant
1316 // Stop recursion if they have different Controls.
1317 // Slot 0 of constants is not really a Control.
1318 if( control && m->in(0) && control != m->in(0) ) {
1320 // Actually, we can live with the most conservative control we
1321 // find, if it post-dominates the others. This allows us to
1322 // pick up load/op/store trees where the load can float a little
1323 // above the store.
1324 Node *x = control;
1325 const uint max_scan = 6; // Arbitrary scan cutoff
1326 uint j;
1327 for( j=0; j<max_scan; j++ ) {
1328 if( x->is_Region() ) // Bail out at merge points
1329 return true;
1330 x = x->in(0);
1331 if( x == m->in(0) ) // Does 'control' post-dominate
1332 break; // m->in(0)? If so, we can use it
1333 }
1334 if( j == max_scan ) // No post-domination before scan end?
1335 return true; // Then break the match tree up
1336 }
1337 if (m->is_DecodeN() && Matcher::clone_shift_expressions) {
1338 // These are commonly used in address expressions and can
1339 // efficiently fold into them on X64 in some cases.
1340 return false;
1341 }
1342 }
1344 // Not forceable cloning. If shared, put it into a register.
1345 return shared;
1346 }
1349 //------------------------------Instruction Selection--------------------------
1350 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1351 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1352 // things the Matcher does not match (e.g., Memory), and things with different
1353 // Controls (hence forced into different blocks). We pass in the Control
1354 // selected for this entire State tree.
1356 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1357 // Store and the Load must have identical Memories (as well as identical
1358 // pointers). Since the Matcher does not have anything for Memory (and
1359 // does not handle DAGs), I have to match the Memory input myself. If the
1360 // Tree root is a Store, I require all Loads to have the identical memory.
1361 Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1362 // Since Label_Root is a recursive function, its possible that we might run
1363 // out of stack space. See bugs 6272980 & 6227033 for more info.
1364 LabelRootDepth++;
1365 if (LabelRootDepth > MaxLabelRootDepth) {
1366 C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
1367 return NULL;
1368 }
1369 uint care = 0; // Edges matcher cares about
1370 uint cnt = n->req();
1371 uint i = 0;
1373 // Examine children for memory state
1374 // Can only subsume a child into your match-tree if that child's memory state
1375 // is not modified along the path to another input.
1376 // It is unsafe even if the other inputs are separate roots.
1377 Node *input_mem = NULL;
1378 for( i = 1; i < cnt; i++ ) {
1379 if( !n->match_edge(i) ) continue;
1380 Node *m = n->in(i); // Get ith input
1381 assert( m, "expect non-null children" );
1382 if( m->is_Load() ) {
1383 if( input_mem == NULL ) {
1384 input_mem = m->in(MemNode::Memory);
1385 } else if( input_mem != m->in(MemNode::Memory) ) {
1386 input_mem = NodeSentinel;
1387 }
1388 }
1389 }
1391 for( i = 1; i < cnt; i++ ){// For my children
1392 if( !n->match_edge(i) ) continue;
1393 Node *m = n->in(i); // Get ith input
1394 // Allocate states out of a private arena
1395 State *s = new (&_states_arena) State;
1396 svec->_kids[care++] = s;
1397 assert( care <= 2, "binary only for now" );
1399 // Recursively label the State tree.
1400 s->_kids[0] = NULL;
1401 s->_kids[1] = NULL;
1402 s->_leaf = m;
1404 // Check for leaves of the State Tree; things that cannot be a part of
1405 // the current tree. If it finds any, that value is matched as a
1406 // register operand. If not, then the normal matching is used.
1407 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1408 //
1409 // Stop recursion if this is LoadNode and the root of this tree is a
1410 // StoreNode and the load & store have different memories.
1411 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1412 // Can NOT include the match of a subtree when its memory state
1413 // is used by any of the other subtrees
1414 (input_mem == NodeSentinel) ) {
1415 #ifndef PRODUCT
1416 // Print when we exclude matching due to different memory states at input-loads
1417 if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1418 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
1419 tty->print_cr("invalid input_mem");
1420 }
1421 #endif
1422 // Switch to a register-only opcode; this value must be in a register
1423 // and cannot be subsumed as part of a larger instruction.
1424 s->DFA( m->ideal_reg(), m );
1426 } else {
1427 // If match tree has no control and we do, adopt it for entire tree
1428 if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1429 control = m->in(0); // Pick up control
1430 // Else match as a normal part of the match tree.
1431 control = Label_Root(m,s,control,mem);
1432 if (C->failing()) return NULL;
1433 }
1434 }
1437 // Call DFA to match this node, and return
1438 svec->DFA( n->Opcode(), n );
1440 #ifdef ASSERT
1441 uint x;
1442 for( x = 0; x < _LAST_MACH_OPER; x++ )
1443 if( svec->valid(x) )
1444 break;
1446 if (x >= _LAST_MACH_OPER) {
1447 n->dump();
1448 svec->dump();
1449 assert( false, "bad AD file" );
1450 }
1451 #endif
1452 return control;
1453 }
1456 // Con nodes reduced using the same rule can share their MachNode
1457 // which reduces the number of copies of a constant in the final
1458 // program. The register allocator is free to split uses later to
1459 // split live ranges.
1460 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1461 if (!leaf->is_Con() && !leaf->is_DecodeN()) return NULL;
1463 // See if this Con has already been reduced using this rule.
1464 if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1465 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1466 if (last != NULL && rule == last->rule()) {
1467 // Don't expect control change for DecodeN
1468 if (leaf->is_DecodeN())
1469 return last;
1470 // Get the new space root.
1471 Node* xroot = new_node(C->root());
1472 if (xroot == NULL) {
1473 // This shouldn't happen give the order of matching.
1474 return NULL;
1475 }
1477 // Shared constants need to have their control be root so they
1478 // can be scheduled properly.
1479 Node* control = last->in(0);
1480 if (control != xroot) {
1481 if (control == NULL || control == C->root()) {
1482 last->set_req(0, xroot);
1483 } else {
1484 assert(false, "unexpected control");
1485 return NULL;
1486 }
1487 }
1488 return last;
1489 }
1490 return NULL;
1491 }
1494 //------------------------------ReduceInst-------------------------------------
1495 // Reduce a State tree (with given Control) into a tree of MachNodes.
1496 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1497 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1498 // Each MachNode has a number of complicated MachOper operands; each
1499 // MachOper also covers a further tree of Ideal Nodes.
1501 // The root of the Ideal match tree is always an instruction, so we enter
1502 // the recursion here. After building the MachNode, we need to recurse
1503 // the tree checking for these cases:
1504 // (1) Child is an instruction -
1505 // Build the instruction (recursively), add it as an edge.
1506 // Build a simple operand (register) to hold the result of the instruction.
1507 // (2) Child is an interior part of an instruction -
1508 // Skip over it (do nothing)
1509 // (3) Child is the start of a operand -
1510 // Build the operand, place it inside the instruction
1511 // Call ReduceOper.
1512 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1513 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1515 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1516 if (shared_node != NULL) {
1517 return shared_node;
1518 }
1520 // Build the object to represent this state & prepare for recursive calls
1521 MachNode *mach = s->MachNodeGenerator( rule, C );
1522 mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
1523 assert( mach->_opnds[0] != NULL, "Missing result operand" );
1524 Node *leaf = s->_leaf;
1525 // Check for instruction or instruction chain rule
1526 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1527 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1528 "duplicating node that's already been matched");
1529 // Instruction
1530 mach->add_req( leaf->in(0) ); // Set initial control
1531 // Reduce interior of complex instruction
1532 ReduceInst_Interior( s, rule, mem, mach, 1 );
1533 } else {
1534 // Instruction chain rules are data-dependent on their inputs
1535 mach->add_req(0); // Set initial control to none
1536 ReduceInst_Chain_Rule( s, rule, mem, mach );
1537 }
1539 // If a Memory was used, insert a Memory edge
1540 if( mem != (Node*)1 ) {
1541 mach->ins_req(MemNode::Memory,mem);
1542 #ifdef ASSERT
1543 // Verify adr type after matching memory operation
1544 const MachOper* oper = mach->memory_operand();
1545 if (oper != NULL && oper != (MachOper*)-1) {
1546 // It has a unique memory operand. Find corresponding ideal mem node.
1547 Node* m = NULL;
1548 if (leaf->is_Mem()) {
1549 m = leaf;
1550 } else {
1551 m = _mem_node;
1552 assert(m != NULL && m->is_Mem(), "expecting memory node");
1553 }
1554 const Type* mach_at = mach->adr_type();
1555 // DecodeN node consumed by an address may have different type
1556 // then its input. Don't compare types for such case.
1557 if (m->adr_type() != mach_at &&
1558 (m->in(MemNode::Address)->is_DecodeN() ||
1559 m->in(MemNode::Address)->is_AddP() &&
1560 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeN() ||
1561 m->in(MemNode::Address)->is_AddP() &&
1562 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1563 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeN())) {
1564 mach_at = m->adr_type();
1565 }
1566 if (m->adr_type() != mach_at) {
1567 m->dump();
1568 tty->print_cr("mach:");
1569 mach->dump(1);
1570 }
1571 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1572 }
1573 #endif
1574 }
1576 // If the _leaf is an AddP, insert the base edge
1577 if( leaf->is_AddP() )
1578 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1580 uint num_proj = _proj_list.size();
1582 // Perform any 1-to-many expansions required
1583 MachNode *ex = mach->Expand(s,_proj_list, mem);
1584 if( ex != mach ) {
1585 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1586 if( ex->in(1)->is_Con() )
1587 ex->in(1)->set_req(0, C->root());
1588 // Remove old node from the graph
1589 for( uint i=0; i<mach->req(); i++ ) {
1590 mach->set_req(i,NULL);
1591 }
1592 #ifdef ASSERT
1593 _new2old_map.map(ex->_idx, s->_leaf);
1594 #endif
1595 }
1597 // PhaseChaitin::fixup_spills will sometimes generate spill code
1598 // via the matcher. By the time, nodes have been wired into the CFG,
1599 // and any further nodes generated by expand rules will be left hanging
1600 // in space, and will not get emitted as output code. Catch this.
1601 // Also, catch any new register allocation constraints ("projections")
1602 // generated belatedly during spill code generation.
1603 if (_allocation_started) {
1604 guarantee(ex == mach, "no expand rules during spill generation");
1605 guarantee(_proj_list.size() == num_proj, "no allocation during spill generation");
1606 }
1608 if (leaf->is_Con() || leaf->is_DecodeN()) {
1609 // Record the con for sharing
1610 _shared_nodes.map(leaf->_idx, ex);
1611 }
1613 return ex;
1614 }
1616 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1617 // 'op' is what I am expecting to receive
1618 int op = _leftOp[rule];
1619 // Operand type to catch childs result
1620 // This is what my child will give me.
1621 int opnd_class_instance = s->_rule[op];
1622 // Choose between operand class or not.
1623 // This is what I will receive.
1624 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1625 // New rule for child. Chase operand classes to get the actual rule.
1626 int newrule = s->_rule[catch_op];
1628 if( newrule < NUM_OPERANDS ) {
1629 // Chain from operand or operand class, may be output of shared node
1630 assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1631 "Bad AD file: Instruction chain rule must chain from operand");
1632 // Insert operand into array of operands for this instruction
1633 mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1635 ReduceOper( s, newrule, mem, mach );
1636 } else {
1637 // Chain from the result of an instruction
1638 assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1639 mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1640 Node *mem1 = (Node*)1;
1641 debug_only(Node *save_mem_node = _mem_node;)
1642 mach->add_req( ReduceInst(s, newrule, mem1) );
1643 debug_only(_mem_node = save_mem_node;)
1644 }
1645 return;
1646 }
1649 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1650 if( s->_leaf->is_Load() ) {
1651 Node *mem2 = s->_leaf->in(MemNode::Memory);
1652 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1653 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1654 mem = mem2;
1655 }
1656 if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1657 if( mach->in(0) == NULL )
1658 mach->set_req(0, s->_leaf->in(0));
1659 }
1661 // Now recursively walk the state tree & add operand list.
1662 for( uint i=0; i<2; i++ ) { // binary tree
1663 State *newstate = s->_kids[i];
1664 if( newstate == NULL ) break; // Might only have 1 child
1665 // 'op' is what I am expecting to receive
1666 int op;
1667 if( i == 0 ) {
1668 op = _leftOp[rule];
1669 } else {
1670 op = _rightOp[rule];
1671 }
1672 // Operand type to catch childs result
1673 // This is what my child will give me.
1674 int opnd_class_instance = newstate->_rule[op];
1675 // Choose between operand class or not.
1676 // This is what I will receive.
1677 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1678 // New rule for child. Chase operand classes to get the actual rule.
1679 int newrule = newstate->_rule[catch_op];
1681 if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1682 // Operand/operandClass
1683 // Insert operand into array of operands for this instruction
1684 mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
1685 ReduceOper( newstate, newrule, mem, mach );
1687 } else { // Child is internal operand or new instruction
1688 if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1689 // internal operand --> call ReduceInst_Interior
1690 // Interior of complex instruction. Do nothing but recurse.
1691 num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1692 } else {
1693 // instruction --> call build operand( ) to catch result
1694 // --> ReduceInst( newrule )
1695 mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
1696 Node *mem1 = (Node*)1;
1697 debug_only(Node *save_mem_node = _mem_node;)
1698 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1699 debug_only(_mem_node = save_mem_node;)
1700 }
1701 }
1702 assert( mach->_opnds[num_opnds-1], "" );
1703 }
1704 return num_opnds;
1705 }
1707 // This routine walks the interior of possible complex operands.
1708 // At each point we check our children in the match tree:
1709 // (1) No children -
1710 // We are a leaf; add _leaf field as an input to the MachNode
1711 // (2) Child is an internal operand -
1712 // Skip over it ( do nothing )
1713 // (3) Child is an instruction -
1714 // Call ReduceInst recursively and
1715 // and instruction as an input to the MachNode
1716 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1717 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1718 State *kid = s->_kids[0];
1719 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1721 // Leaf? And not subsumed?
1722 if( kid == NULL && !_swallowed[rule] ) {
1723 mach->add_req( s->_leaf ); // Add leaf pointer
1724 return; // Bail out
1725 }
1727 if( s->_leaf->is_Load() ) {
1728 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1729 mem = s->_leaf->in(MemNode::Memory);
1730 debug_only(_mem_node = s->_leaf;)
1731 }
1732 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1733 if( !mach->in(0) )
1734 mach->set_req(0,s->_leaf->in(0));
1735 else {
1736 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1737 }
1738 }
1740 for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree
1741 int newrule;
1742 if( i == 0 )
1743 newrule = kid->_rule[_leftOp[rule]];
1744 else
1745 newrule = kid->_rule[_rightOp[rule]];
1747 if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1748 // Internal operand; recurse but do nothing else
1749 ReduceOper( kid, newrule, mem, mach );
1751 } else { // Child is a new instruction
1752 // Reduce the instruction, and add a direct pointer from this
1753 // machine instruction to the newly reduced one.
1754 Node *mem1 = (Node*)1;
1755 debug_only(Node *save_mem_node = _mem_node;)
1756 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1757 debug_only(_mem_node = save_mem_node;)
1758 }
1759 }
1760 }
1763 // -------------------------------------------------------------------------
1764 // Java-Java calling convention
1765 // (what you use when Java calls Java)
1767 //------------------------------find_receiver----------------------------------
1768 // For a given signature, return the OptoReg for parameter 0.
1769 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1770 VMRegPair regs;
1771 BasicType sig_bt = T_OBJECT;
1772 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1773 // Return argument 0 register. In the LP64 build pointers
1774 // take 2 registers, but the VM wants only the 'main' name.
1775 return OptoReg::as_OptoReg(regs.first());
1776 }
1778 // A method-klass-holder may be passed in the inline_cache_reg
1779 // and then expanded into the inline_cache_reg and a method_oop register
1780 // defined in ad_<arch>.cpp
1783 //------------------------------find_shared------------------------------------
1784 // Set bits if Node is shared or otherwise a root
1785 void Matcher::find_shared( Node *n ) {
1786 // Allocate stack of size C->unique() * 2 to avoid frequent realloc
1787 MStack mstack(C->unique() * 2);
1788 // Mark nodes as address_visited if they are inputs to an address expression
1789 VectorSet address_visited(Thread::current()->resource_area());
1790 mstack.push(n, Visit); // Don't need to pre-visit root node
1791 while (mstack.is_nonempty()) {
1792 n = mstack.node(); // Leave node on stack
1793 Node_State nstate = mstack.state();
1794 uint nop = n->Opcode();
1795 if (nstate == Pre_Visit) {
1796 if (address_visited.test(n->_idx)) { // Visited in address already?
1797 // Flag as visited and shared now.
1798 set_visited(n);
1799 }
1800 if (is_visited(n)) { // Visited already?
1801 // Node is shared and has no reason to clone. Flag it as shared.
1802 // This causes it to match into a register for the sharing.
1803 set_shared(n); // Flag as shared and
1804 mstack.pop(); // remove node from stack
1805 continue;
1806 }
1807 nstate = Visit; // Not already visited; so visit now
1808 }
1809 if (nstate == Visit) {
1810 mstack.set_state(Post_Visit);
1811 set_visited(n); // Flag as visited now
1812 bool mem_op = false;
1814 switch( nop ) { // Handle some opcodes special
1815 case Op_Phi: // Treat Phis as shared roots
1816 case Op_Parm:
1817 case Op_Proj: // All handled specially during matching
1818 case Op_SafePointScalarObject:
1819 set_shared(n);
1820 set_dontcare(n);
1821 break;
1822 case Op_If:
1823 case Op_CountedLoopEnd:
1824 mstack.set_state(Alt_Post_Visit); // Alternative way
1825 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
1826 // with matching cmp/branch in 1 instruction. The Matcher needs the
1827 // Bool and CmpX side-by-side, because it can only get at constants
1828 // that are at the leaves of Match trees, and the Bool's condition acts
1829 // as a constant here.
1830 mstack.push(n->in(1), Visit); // Clone the Bool
1831 mstack.push(n->in(0), Pre_Visit); // Visit control input
1832 continue; // while (mstack.is_nonempty())
1833 case Op_ConvI2D: // These forms efficiently match with a prior
1834 case Op_ConvI2F: // Load but not a following Store
1835 if( n->in(1)->is_Load() && // Prior load
1836 n->outcnt() == 1 && // Not already shared
1837 n->unique_out()->is_Store() ) // Following store
1838 set_shared(n); // Force it to be a root
1839 break;
1840 case Op_ReverseBytesI:
1841 case Op_ReverseBytesL:
1842 if( n->in(1)->is_Load() && // Prior load
1843 n->outcnt() == 1 ) // Not already shared
1844 set_shared(n); // Force it to be a root
1845 break;
1846 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
1847 case Op_IfFalse:
1848 case Op_IfTrue:
1849 case Op_MachProj:
1850 case Op_MergeMem:
1851 case Op_Catch:
1852 case Op_CatchProj:
1853 case Op_CProj:
1854 case Op_JumpProj:
1855 case Op_JProj:
1856 case Op_NeverBranch:
1857 set_dontcare(n);
1858 break;
1859 case Op_Jump:
1860 mstack.push(n->in(1), Visit); // Switch Value
1861 mstack.push(n->in(0), Pre_Visit); // Visit Control input
1862 continue; // while (mstack.is_nonempty())
1863 case Op_StrComp:
1864 case Op_StrEquals:
1865 case Op_StrIndexOf:
1866 case Op_AryEq:
1867 set_shared(n); // Force result into register (it will be anyways)
1868 break;
1869 case Op_ConP: { // Convert pointers above the centerline to NUL
1870 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
1871 const TypePtr* tp = tn->type()->is_ptr();
1872 if (tp->_ptr == TypePtr::AnyNull) {
1873 tn->set_type(TypePtr::NULL_PTR);
1874 }
1875 break;
1876 }
1877 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
1878 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
1879 const TypePtr* tp = tn->type()->make_ptr();
1880 if (tp && tp->_ptr == TypePtr::AnyNull) {
1881 tn->set_type(TypeNarrowOop::NULL_PTR);
1882 }
1883 break;
1884 }
1885 case Op_Binary: // These are introduced in the Post_Visit state.
1886 ShouldNotReachHere();
1887 break;
1888 case Op_ClearArray:
1889 case Op_SafePoint:
1890 mem_op = true;
1891 break;
1892 default:
1893 if( n->is_Store() ) {
1894 // Do match stores, despite no ideal reg
1895 mem_op = true;
1896 break;
1897 }
1898 if( n->is_Mem() ) { // Loads and LoadStores
1899 mem_op = true;
1900 // Loads must be root of match tree due to prior load conflict
1901 if( C->subsume_loads() == false )
1902 set_shared(n);
1903 }
1904 // Fall into default case
1905 if( !n->ideal_reg() )
1906 set_dontcare(n); // Unmatchable Nodes
1907 } // end_switch
1909 for(int i = n->req() - 1; i >= 0; --i) { // For my children
1910 Node *m = n->in(i); // Get ith input
1911 if (m == NULL) continue; // Ignore NULLs
1912 uint mop = m->Opcode();
1914 // Must clone all producers of flags, or we will not match correctly.
1915 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
1916 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
1917 // are also there, so we may match a float-branch to int-flags and
1918 // expect the allocator to haul the flags from the int-side to the
1919 // fp-side. No can do.
1920 if( _must_clone[mop] ) {
1921 mstack.push(m, Visit);
1922 continue; // for(int i = ...)
1923 }
1925 if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
1926 // Bases used in addresses must be shared but since
1927 // they are shared through a DecodeN they may appear
1928 // to have a single use so force sharing here.
1929 set_shared(m->in(AddPNode::Base)->in(1));
1930 }
1932 // Clone addressing expressions as they are "free" in memory access instructions
1933 if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
1934 // Some inputs for address expression are not put on stack
1935 // to avoid marking them as shared and forcing them into register
1936 // if they are used only in address expressions.
1937 // But they should be marked as shared if there are other uses
1938 // besides address expressions.
1940 Node *off = m->in(AddPNode::Offset);
1941 if( off->is_Con() &&
1942 // When there are other uses besides address expressions
1943 // put it on stack and mark as shared.
1944 !is_visited(m) ) {
1945 address_visited.test_set(m->_idx); // Flag as address_visited
1946 Node *adr = m->in(AddPNode::Address);
1948 // Intel, ARM and friends can handle 2 adds in addressing mode
1949 if( clone_shift_expressions && adr->is_AddP() &&
1950 // AtomicAdd is not an addressing expression.
1951 // Cheap to find it by looking for screwy base.
1952 !adr->in(AddPNode::Base)->is_top() &&
1953 // Are there other uses besides address expressions?
1954 !is_visited(adr) ) {
1955 address_visited.set(adr->_idx); // Flag as address_visited
1956 Node *shift = adr->in(AddPNode::Offset);
1957 // Check for shift by small constant as well
1958 if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
1959 shift->in(2)->get_int() <= 3 &&
1960 // Are there other uses besides address expressions?
1961 !is_visited(shift) ) {
1962 address_visited.set(shift->_idx); // Flag as address_visited
1963 mstack.push(shift->in(2), Visit);
1964 Node *conv = shift->in(1);
1965 #ifdef _LP64
1966 // Allow Matcher to match the rule which bypass
1967 // ConvI2L operation for an array index on LP64
1968 // if the index value is positive.
1969 if( conv->Opcode() == Op_ConvI2L &&
1970 conv->as_Type()->type()->is_long()->_lo >= 0 &&
1971 // Are there other uses besides address expressions?
1972 !is_visited(conv) ) {
1973 address_visited.set(conv->_idx); // Flag as address_visited
1974 mstack.push(conv->in(1), Pre_Visit);
1975 } else
1976 #endif
1977 mstack.push(conv, Pre_Visit);
1978 } else {
1979 mstack.push(shift, Pre_Visit);
1980 }
1981 mstack.push(adr->in(AddPNode::Address), Pre_Visit);
1982 mstack.push(adr->in(AddPNode::Base), Pre_Visit);
1983 } else { // Sparc, Alpha, PPC and friends
1984 mstack.push(adr, Pre_Visit);
1985 }
1987 // Clone X+offset as it also folds into most addressing expressions
1988 mstack.push(off, Visit);
1989 mstack.push(m->in(AddPNode::Base), Pre_Visit);
1990 continue; // for(int i = ...)
1991 } // if( off->is_Con() )
1992 } // if( mem_op &&
1993 mstack.push(m, Pre_Visit);
1994 } // for(int i = ...)
1995 }
1996 else if (nstate == Alt_Post_Visit) {
1997 mstack.pop(); // Remove node from stack
1998 // We cannot remove the Cmp input from the Bool here, as the Bool may be
1999 // shared and all users of the Bool need to move the Cmp in parallel.
2000 // This leaves both the Bool and the If pointing at the Cmp. To
2001 // prevent the Matcher from trying to Match the Cmp along both paths
2002 // BoolNode::match_edge always returns a zero.
2004 // We reorder the Op_If in a pre-order manner, so we can visit without
2005 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2006 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2007 }
2008 else if (nstate == Post_Visit) {
2009 mstack.pop(); // Remove node from stack
2011 // Now hack a few special opcodes
2012 switch( n->Opcode() ) { // Handle some opcodes special
2013 case Op_StorePConditional:
2014 case Op_StoreIConditional:
2015 case Op_StoreLConditional:
2016 case Op_CompareAndSwapI:
2017 case Op_CompareAndSwapL:
2018 case Op_CompareAndSwapP:
2019 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2020 Node *newval = n->in(MemNode::ValueIn );
2021 Node *oldval = n->in(LoadStoreNode::ExpectedIn);
2022 Node *pair = new (C, 3) BinaryNode( oldval, newval );
2023 n->set_req(MemNode::ValueIn,pair);
2024 n->del_req(LoadStoreNode::ExpectedIn);
2025 break;
2026 }
2027 case Op_CMoveD: // Convert trinary to binary-tree
2028 case Op_CMoveF:
2029 case Op_CMoveI:
2030 case Op_CMoveL:
2031 case Op_CMoveN:
2032 case Op_CMoveP: {
2033 // Restructure into a binary tree for Matching. It's possible that
2034 // we could move this code up next to the graph reshaping for IfNodes
2035 // or vice-versa, but I do not want to debug this for Ladybird.
2036 // 10/2/2000 CNC.
2037 Node *pair1 = new (C, 3) BinaryNode(n->in(1),n->in(1)->in(1));
2038 n->set_req(1,pair1);
2039 Node *pair2 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2040 n->set_req(2,pair2);
2041 n->del_req(3);
2042 break;
2043 }
2044 case Op_StrEquals: {
2045 Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2046 n->set_req(2,pair1);
2047 n->set_req(3,n->in(4));
2048 n->del_req(4);
2049 break;
2050 }
2051 case Op_StrComp:
2052 case Op_StrIndexOf: {
2053 Node *pair1 = new (C, 3) BinaryNode(n->in(2),n->in(3));
2054 n->set_req(2,pair1);
2055 Node *pair2 = new (C, 3) BinaryNode(n->in(4),n->in(5));
2056 n->set_req(3,pair2);
2057 n->del_req(5);
2058 n->del_req(4);
2059 break;
2060 }
2061 default:
2062 break;
2063 }
2064 }
2065 else {
2066 ShouldNotReachHere();
2067 }
2068 } // end of while (mstack.is_nonempty())
2069 }
2071 #ifdef ASSERT
2072 // machine-independent root to machine-dependent root
2073 void Matcher::dump_old2new_map() {
2074 _old2new_map.dump();
2075 }
2076 #endif
2078 //---------------------------collect_null_checks-------------------------------
2079 // Find null checks in the ideal graph; write a machine-specific node for
2080 // it. Used by later implicit-null-check handling. Actually collects
2081 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2082 // value being tested.
2083 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2084 Node *iff = proj->in(0);
2085 if( iff->Opcode() == Op_If ) {
2086 // During matching If's have Bool & Cmp side-by-side
2087 BoolNode *b = iff->in(1)->as_Bool();
2088 Node *cmp = iff->in(2);
2089 int opc = cmp->Opcode();
2090 if (opc != Op_CmpP && opc != Op_CmpN) return;
2092 const Type* ct = cmp->in(2)->bottom_type();
2093 if (ct == TypePtr::NULL_PTR ||
2094 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2096 bool push_it = false;
2097 if( proj->Opcode() == Op_IfTrue ) {
2098 extern int all_null_checks_found;
2099 all_null_checks_found++;
2100 if( b->_test._test == BoolTest::ne ) {
2101 push_it = true;
2102 }
2103 } else {
2104 assert( proj->Opcode() == Op_IfFalse, "" );
2105 if( b->_test._test == BoolTest::eq ) {
2106 push_it = true;
2107 }
2108 }
2109 if( push_it ) {
2110 _null_check_tests.push(proj);
2111 Node* val = cmp->in(1);
2112 #ifdef _LP64
2113 if (UseCompressedOops && !Matcher::clone_shift_expressions &&
2114 val->bottom_type()->isa_narrowoop()) {
2115 //
2116 // Look for DecodeN node which should be pinned to orig_proj.
2117 // On platforms (Sparc) which can not handle 2 adds
2118 // in addressing mode we have to keep a DecodeN node and
2119 // use it to do implicit NULL check in address.
2120 //
2121 // DecodeN node was pinned to non-null path (orig_proj) during
2122 // CastPP transformation in final_graph_reshaping_impl().
2123 //
2124 uint cnt = orig_proj->outcnt();
2125 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2126 Node* d = orig_proj->raw_out(i);
2127 if (d->is_DecodeN() && d->in(1) == val) {
2128 val = d;
2129 val->set_req(0, NULL); // Unpin now.
2130 break;
2131 }
2132 }
2133 }
2134 #endif
2135 _null_check_tests.push(val);
2136 }
2137 }
2138 }
2139 }
2141 //---------------------------validate_null_checks------------------------------
2142 // Its possible that the value being NULL checked is not the root of a match
2143 // tree. If so, I cannot use the value in an implicit null check.
2144 void Matcher::validate_null_checks( ) {
2145 uint cnt = _null_check_tests.size();
2146 for( uint i=0; i < cnt; i+=2 ) {
2147 Node *test = _null_check_tests[i];
2148 Node *val = _null_check_tests[i+1];
2149 if (has_new_node(val)) {
2150 // Is a match-tree root, so replace with the matched value
2151 _null_check_tests.map(i+1, new_node(val));
2152 } else {
2153 // Yank from candidate list
2154 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2155 _null_check_tests.map(i,_null_check_tests[--cnt]);
2156 _null_check_tests.pop();
2157 _null_check_tests.pop();
2158 i-=2;
2159 }
2160 }
2161 }
2164 // Used by the DFA in dfa_sparc.cpp. Check for a prior FastLock
2165 // acting as an Acquire and thus we don't need an Acquire here. We
2166 // retain the Node to act as a compiler ordering barrier.
2167 bool Matcher::prior_fast_lock( const Node *acq ) {
2168 Node *r = acq->in(0);
2169 if( !r->is_Region() || r->req() <= 1 ) return false;
2170 Node *proj = r->in(1);
2171 if( !proj->is_Proj() ) return false;
2172 Node *call = proj->in(0);
2173 if( !call->is_Call() || call->as_Call()->entry_point() != OptoRuntime::complete_monitor_locking_Java() )
2174 return false;
2176 return true;
2177 }
2179 // Used by the DFA in dfa_sparc.cpp. Check for a following FastUnLock
2180 // acting as a Release and thus we don't need a Release here. We
2181 // retain the Node to act as a compiler ordering barrier.
2182 bool Matcher::post_fast_unlock( const Node *rel ) {
2183 Compile *C = Compile::current();
2184 assert( rel->Opcode() == Op_MemBarRelease, "" );
2185 const MemBarReleaseNode *mem = (const MemBarReleaseNode*)rel;
2186 DUIterator_Fast imax, i = mem->fast_outs(imax);
2187 Node *ctrl = NULL;
2188 while( true ) {
2189 ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
2190 assert( ctrl->is_Proj(), "only projections here" );
2191 ProjNode *proj = (ProjNode*)ctrl;
2192 if( proj->_con == TypeFunc::Control &&
2193 !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
2194 break;
2195 i++;
2196 }
2197 Node *iff = NULL;
2198 for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
2199 Node *x = ctrl->fast_out(j);
2200 if( x->is_If() && x->req() > 1 &&
2201 !C->node_arena()->contains(x) ) { // Unmatched old-space only
2202 iff = x;
2203 break;
2204 }
2205 }
2206 if( !iff ) return false;
2207 Node *bol = iff->in(1);
2208 // The iff might be some random subclass of If or bol might be Con-Top
2209 if (!bol->is_Bool()) return false;
2210 assert( bol->req() > 1, "" );
2211 return (bol->in(1)->Opcode() == Op_FastUnlock);
2212 }
2214 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2215 // atomic instruction acting as a store_load barrier without any
2216 // intervening volatile load, and thus we don't need a barrier here.
2217 // We retain the Node to act as a compiler ordering barrier.
2218 bool Matcher::post_store_load_barrier(const Node *vmb) {
2219 Compile *C = Compile::current();
2220 assert( vmb->is_MemBar(), "" );
2221 assert( vmb->Opcode() != Op_MemBarAcquire, "" );
2222 const MemBarNode *mem = (const MemBarNode*)vmb;
2224 // Get the Proj node, ctrl, that can be used to iterate forward
2225 Node *ctrl = NULL;
2226 DUIterator_Fast imax, i = mem->fast_outs(imax);
2227 while( true ) {
2228 ctrl = mem->fast_out(i); // Throw out-of-bounds if proj not found
2229 assert( ctrl->is_Proj(), "only projections here" );
2230 ProjNode *proj = (ProjNode*)ctrl;
2231 if( proj->_con == TypeFunc::Control &&
2232 !C->node_arena()->contains(ctrl) ) // Unmatched old-space only
2233 break;
2234 i++;
2235 }
2237 for( DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++ ) {
2238 Node *x = ctrl->fast_out(j);
2239 int xop = x->Opcode();
2241 // We don't need current barrier if we see another or a lock
2242 // before seeing volatile load.
2243 //
2244 // Op_Fastunlock previously appeared in the Op_* list below.
2245 // With the advent of 1-0 lock operations we're no longer guaranteed
2246 // that a monitor exit operation contains a serializing instruction.
2248 if (xop == Op_MemBarVolatile ||
2249 xop == Op_FastLock ||
2250 xop == Op_CompareAndSwapL ||
2251 xop == Op_CompareAndSwapP ||
2252 xop == Op_CompareAndSwapN ||
2253 xop == Op_CompareAndSwapI)
2254 return true;
2256 if (x->is_MemBar()) {
2257 // We must retain this membar if there is an upcoming volatile
2258 // load, which will be preceded by acquire membar.
2259 if (xop == Op_MemBarAcquire)
2260 return false;
2261 // For other kinds of barriers, check by pretending we
2262 // are them, and seeing if we can be removed.
2263 else
2264 return post_store_load_barrier((const MemBarNode*)x);
2265 }
2267 // Delicate code to detect case of an upcoming fastlock block
2268 if( x->is_If() && x->req() > 1 &&
2269 !C->node_arena()->contains(x) ) { // Unmatched old-space only
2270 Node *iff = x;
2271 Node *bol = iff->in(1);
2272 // The iff might be some random subclass of If or bol might be Con-Top
2273 if (!bol->is_Bool()) return false;
2274 assert( bol->req() > 1, "" );
2275 return (bol->in(1)->Opcode() == Op_FastUnlock);
2276 }
2277 // probably not necessary to check for these
2278 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj())
2279 return false;
2280 }
2281 return false;
2282 }
2284 //=============================================================================
2285 //---------------------------State---------------------------------------------
2286 State::State(void) {
2287 #ifdef ASSERT
2288 _id = 0;
2289 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2290 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2291 //memset(_cost, -1, sizeof(_cost));
2292 //memset(_rule, -1, sizeof(_rule));
2293 #endif
2294 memset(_valid, 0, sizeof(_valid));
2295 }
2297 #ifdef ASSERT
2298 State::~State() {
2299 _id = 99;
2300 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2301 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2302 memset(_cost, -3, sizeof(_cost));
2303 memset(_rule, -3, sizeof(_rule));
2304 }
2305 #endif
2307 #ifndef PRODUCT
2308 //---------------------------dump----------------------------------------------
2309 void State::dump() {
2310 tty->print("\n");
2311 dump(0);
2312 }
2314 void State::dump(int depth) {
2315 for( int j = 0; j < depth; j++ )
2316 tty->print(" ");
2317 tty->print("--N: ");
2318 _leaf->dump();
2319 uint i;
2320 for( i = 0; i < _LAST_MACH_OPER; i++ )
2321 // Check for valid entry
2322 if( valid(i) ) {
2323 for( int j = 0; j < depth; j++ )
2324 tty->print(" ");
2325 assert(_cost[i] != max_juint, "cost must be a valid value");
2326 assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2327 tty->print_cr("%s %d %s",
2328 ruleName[i], _cost[i], ruleName[_rule[i]] );
2329 }
2330 tty->print_cr("");
2332 for( i=0; i<2; i++ )
2333 if( _kids[i] )
2334 _kids[i]->dump(depth+1);
2335 }
2336 #endif