Sat, 07 Nov 2020 10:30:02 +0800
Added tag mips-jdk8u275-b01 for changeset d3b4d62f391f
1 /*
2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "opto/addnode.hpp"
34 #include "opto/callnode.hpp"
35 #include "opto/connode.hpp"
36 #include "opto/idealGraphPrinter.hpp"
37 #include "opto/matcher.hpp"
38 #include "opto/memnode.hpp"
39 #include "opto/opcodes.hpp"
40 #include "opto/regmask.hpp"
41 #include "opto/rootnode.hpp"
42 #include "opto/runtime.hpp"
43 #include "opto/type.hpp"
44 #include "opto/vectornode.hpp"
45 #include "runtime/atomic.hpp"
46 #include "runtime/os.hpp"
47 #if defined AD_MD_HPP
48 # include AD_MD_HPP
49 #elif defined TARGET_ARCH_MODEL_x86_32
50 # include "adfiles/ad_x86_32.hpp"
51 #elif defined TARGET_ARCH_MODEL_x86_64
52 # include "adfiles/ad_x86_64.hpp"
53 #elif defined TARGET_ARCH_MODEL_sparc
54 # include "adfiles/ad_sparc.hpp"
55 #elif defined TARGET_ARCH_MODEL_zero
56 # include "adfiles/ad_zero.hpp"
57 #elif defined TARGET_ARCH_MODEL_ppc_64
58 # include "adfiles/ad_ppc_64.hpp"
59 #elif defined TARGET_ARCH_MODEL_mips_64
60 # include "adfiles/ad_mips_64.hpp"
61 #endif
63 OptoReg::Name OptoReg::c_frame_pointer;
65 const RegMask *Matcher::idealreg2regmask[_last_machine_leaf];
66 RegMask Matcher::mreg2regmask[_last_Mach_Reg];
67 RegMask Matcher::STACK_ONLY_mask;
68 RegMask Matcher::c_frame_ptr_mask;
69 const uint Matcher::_begin_rematerialize = _BEGIN_REMATERIALIZE;
70 const uint Matcher::_end_rematerialize = _END_REMATERIALIZE;
72 //---------------------------Matcher-------------------------------------------
73 Matcher::Matcher()
74 : PhaseTransform( Phase::Ins_Select ),
75 #ifdef ASSERT
76 _old2new_map(C->comp_arena()),
77 _new2old_map(C->comp_arena()),
78 #endif
79 _shared_nodes(C->comp_arena()),
80 _reduceOp(reduceOp), _leftOp(leftOp), _rightOp(rightOp),
81 _swallowed(swallowed),
82 _begin_inst_chain_rule(_BEGIN_INST_CHAIN_RULE),
83 _end_inst_chain_rule(_END_INST_CHAIN_RULE),
84 _must_clone(must_clone),
85 _register_save_policy(register_save_policy),
86 _c_reg_save_policy(c_reg_save_policy),
87 _register_save_type(register_save_type),
88 _ruleName(ruleName),
89 _allocation_started(false),
90 _states_arena(Chunk::medium_size, mtCompiler),
91 _visited(&_states_arena),
92 _shared(&_states_arena),
93 _dontcare(&_states_arena) {
94 C->set_matcher(this);
96 idealreg2spillmask [Op_RegI] = NULL;
97 idealreg2spillmask [Op_RegN] = NULL;
98 idealreg2spillmask [Op_RegL] = NULL;
99 idealreg2spillmask [Op_RegF] = NULL;
100 idealreg2spillmask [Op_RegD] = NULL;
101 idealreg2spillmask [Op_RegP] = NULL;
102 idealreg2spillmask [Op_VecS] = NULL;
103 idealreg2spillmask [Op_VecD] = NULL;
104 idealreg2spillmask [Op_VecX] = NULL;
105 idealreg2spillmask [Op_VecY] = NULL;
106 idealreg2spillmask [Op_RegFlags] = NULL;
108 idealreg2debugmask [Op_RegI] = NULL;
109 idealreg2debugmask [Op_RegN] = NULL;
110 idealreg2debugmask [Op_RegL] = NULL;
111 idealreg2debugmask [Op_RegF] = NULL;
112 idealreg2debugmask [Op_RegD] = NULL;
113 idealreg2debugmask [Op_RegP] = NULL;
114 idealreg2debugmask [Op_VecS] = NULL;
115 idealreg2debugmask [Op_VecD] = NULL;
116 idealreg2debugmask [Op_VecX] = NULL;
117 idealreg2debugmask [Op_VecY] = NULL;
118 idealreg2debugmask [Op_RegFlags] = NULL;
120 idealreg2mhdebugmask[Op_RegI] = NULL;
121 idealreg2mhdebugmask[Op_RegN] = NULL;
122 idealreg2mhdebugmask[Op_RegL] = NULL;
123 idealreg2mhdebugmask[Op_RegF] = NULL;
124 idealreg2mhdebugmask[Op_RegD] = NULL;
125 idealreg2mhdebugmask[Op_RegP] = NULL;
126 idealreg2mhdebugmask[Op_VecS] = NULL;
127 idealreg2mhdebugmask[Op_VecD] = NULL;
128 idealreg2mhdebugmask[Op_VecX] = NULL;
129 idealreg2mhdebugmask[Op_VecY] = NULL;
130 idealreg2mhdebugmask[Op_RegFlags] = NULL;
132 debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
133 }
135 //------------------------------warp_incoming_stk_arg------------------------
136 // This warps a VMReg into an OptoReg::Name
137 OptoReg::Name Matcher::warp_incoming_stk_arg( VMReg reg ) {
138 OptoReg::Name warped;
139 if( reg->is_stack() ) { // Stack slot argument?
140 warped = OptoReg::add(_old_SP, reg->reg2stack() );
141 warped = OptoReg::add(warped, C->out_preserve_stack_slots());
142 if( warped >= _in_arg_limit )
143 _in_arg_limit = OptoReg::add(warped, 1); // Bump max stack slot seen
144 if (!RegMask::can_represent_arg(warped)) {
145 // the compiler cannot represent this method's calling sequence
146 C->record_method_not_compilable_all_tiers("unsupported incoming calling sequence");
147 return OptoReg::Bad;
148 }
149 return warped;
150 }
151 return OptoReg::as_OptoReg(reg);
152 }
154 //---------------------------compute_old_SP------------------------------------
155 OptoReg::Name Compile::compute_old_SP() {
156 int fixed = fixed_slots();
157 int preserve = in_preserve_stack_slots();
158 return OptoReg::stack2reg(round_to(fixed + preserve, Matcher::stack_alignment_in_slots()));
159 }
163 #ifdef ASSERT
164 void Matcher::verify_new_nodes_only(Node* xroot) {
165 // Make sure that the new graph only references new nodes
166 ResourceMark rm;
167 Unique_Node_List worklist;
168 VectorSet visited(Thread::current()->resource_area());
169 worklist.push(xroot);
170 while (worklist.size() > 0) {
171 Node* n = worklist.pop();
172 visited <<= n->_idx;
173 assert(C->node_arena()->contains(n), "dead node");
174 for (uint j = 0; j < n->req(); j++) {
175 Node* in = n->in(j);
176 if (in != NULL) {
177 assert(C->node_arena()->contains(in), "dead node");
178 if (!visited.test(in->_idx)) {
179 worklist.push(in);
180 }
181 }
182 }
183 }
184 }
185 #endif
188 //---------------------------match---------------------------------------------
189 void Matcher::match( ) {
190 if( MaxLabelRootDepth < 100 ) { // Too small?
191 assert(false, "invalid MaxLabelRootDepth, increase it to 100 minimum");
192 MaxLabelRootDepth = 100;
193 }
194 // One-time initialization of some register masks.
195 init_spill_mask( C->root()->in(1) );
196 _return_addr_mask = return_addr();
197 #ifdef _LP64
198 // Pointers take 2 slots in 64-bit land
199 _return_addr_mask.Insert(OptoReg::add(return_addr(),1));
200 #endif
202 // Map a Java-signature return type into return register-value
203 // machine registers for 0, 1 and 2 returned values.
204 const TypeTuple *range = C->tf()->range();
205 if( range->cnt() > TypeFunc::Parms ) { // If not a void function
206 // Get ideal-register return type
207 uint ireg = range->field_at(TypeFunc::Parms)->ideal_reg();
208 // Get machine return register
209 uint sop = C->start()->Opcode();
210 OptoRegPair regs = return_value(ireg, false);
212 // And mask for same
213 _return_value_mask = RegMask(regs.first());
214 if( OptoReg::is_valid(regs.second()) )
215 _return_value_mask.Insert(regs.second());
216 }
218 // ---------------
219 // Frame Layout
221 // Need the method signature to determine the incoming argument types,
222 // because the types determine which registers the incoming arguments are
223 // in, and this affects the matched code.
224 const TypeTuple *domain = C->tf()->domain();
225 uint argcnt = domain->cnt() - TypeFunc::Parms;
226 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
227 VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
228 _parm_regs = NEW_RESOURCE_ARRAY( OptoRegPair, argcnt );
229 _calling_convention_mask = NEW_RESOURCE_ARRAY( RegMask, argcnt );
230 uint i;
231 for( i = 0; i<argcnt; i++ ) {
232 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
233 }
235 // Pass array of ideal registers and length to USER code (from the AD file)
236 // that will convert this to an array of register numbers.
237 const StartNode *start = C->start();
238 start->calling_convention( sig_bt, vm_parm_regs, argcnt );
239 #ifdef ASSERT
240 // Sanity check users' calling convention. Real handy while trying to
241 // get the initial port correct.
242 { for (uint i = 0; i<argcnt; i++) {
243 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
244 assert(domain->field_at(i+TypeFunc::Parms)==Type::HALF, "only allowed on halve" );
245 _parm_regs[i].set_bad();
246 continue;
247 }
248 VMReg parm_reg = vm_parm_regs[i].first();
249 assert(parm_reg->is_valid(), "invalid arg?");
250 if (parm_reg->is_reg()) {
251 OptoReg::Name opto_parm_reg = OptoReg::as_OptoReg(parm_reg);
252 assert(can_be_java_arg(opto_parm_reg) ||
253 C->stub_function() == CAST_FROM_FN_PTR(address, OptoRuntime::rethrow_C) ||
254 opto_parm_reg == inline_cache_reg(),
255 "parameters in register must be preserved by runtime stubs");
256 }
257 for (uint j = 0; j < i; j++) {
258 assert(parm_reg != vm_parm_regs[j].first(),
259 "calling conv. must produce distinct regs");
260 }
261 }
262 }
263 #endif
265 // Do some initial frame layout.
267 // Compute the old incoming SP (may be called FP) as
268 // OptoReg::stack0() + locks + in_preserve_stack_slots + pad2.
269 _old_SP = C->compute_old_SP();
270 assert( is_even(_old_SP), "must be even" );
272 // Compute highest incoming stack argument as
273 // _old_SP + out_preserve_stack_slots + incoming argument size.
274 _in_arg_limit = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
275 assert( is_even(_in_arg_limit), "out_preserve must be even" );
276 for( i = 0; i < argcnt; i++ ) {
277 // Permit args to have no register
278 _calling_convention_mask[i].Clear();
279 if( !vm_parm_regs[i].first()->is_valid() && !vm_parm_regs[i].second()->is_valid() ) {
280 continue;
281 }
282 // calling_convention returns stack arguments as a count of
283 // slots beyond OptoReg::stack0()/VMRegImpl::stack0. We need to convert this to
284 // the allocators point of view, taking into account all the
285 // preserve area, locks & pad2.
287 OptoReg::Name reg1 = warp_incoming_stk_arg(vm_parm_regs[i].first());
288 if( OptoReg::is_valid(reg1))
289 _calling_convention_mask[i].Insert(reg1);
291 OptoReg::Name reg2 = warp_incoming_stk_arg(vm_parm_regs[i].second());
292 if( OptoReg::is_valid(reg2))
293 _calling_convention_mask[i].Insert(reg2);
295 // Saved biased stack-slot register number
296 _parm_regs[i].set_pair(reg2, reg1);
297 }
299 // Finally, make sure the incoming arguments take up an even number of
300 // words, in case the arguments or locals need to contain doubleword stack
301 // slots. The rest of the system assumes that stack slot pairs (in
302 // particular, in the spill area) which look aligned will in fact be
303 // aligned relative to the stack pointer in the target machine. Double
304 // stack slots will always be allocated aligned.
305 _new_SP = OptoReg::Name(round_to(_in_arg_limit, RegMask::SlotsPerLong));
307 // Compute highest outgoing stack argument as
308 // _new_SP + out_preserve_stack_slots + max(outgoing argument size).
309 _out_arg_limit = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
310 assert( is_even(_out_arg_limit), "out_preserve must be even" );
312 if (!RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1))) {
313 // the compiler cannot represent this method's calling sequence
314 C->record_method_not_compilable("must be able to represent all call arguments in reg mask");
315 }
317 if (C->failing()) return; // bailed out on incoming arg failure
319 // ---------------
320 // Collect roots of matcher trees. Every node for which
321 // _shared[_idx] is cleared is guaranteed to not be shared, and thus
322 // can be a valid interior of some tree.
323 find_shared( C->root() );
324 find_shared( C->top() );
326 C->print_method(PHASE_BEFORE_MATCHING);
328 // Create new ideal node ConP #NULL even if it does exist in old space
329 // to avoid false sharing if the corresponding mach node is not used.
330 // The corresponding mach node is only used in rare cases for derived
331 // pointers.
332 Node* new_ideal_null = ConNode::make(C, TypePtr::NULL_PTR);
334 // Swap out to old-space; emptying new-space
335 Arena *old = C->node_arena()->move_contents(C->old_arena());
337 // Save debug and profile information for nodes in old space:
338 _old_node_note_array = C->node_note_array();
339 if (_old_node_note_array != NULL) {
340 C->set_node_note_array(new(C->comp_arena()) GrowableArray<Node_Notes*>
341 (C->comp_arena(), _old_node_note_array->length(),
342 0, NULL));
343 }
345 // Pre-size the new_node table to avoid the need for range checks.
346 grow_new_node_array(C->unique());
348 // Reset node counter so MachNodes start with _idx at 0
349 int live_nodes = C->live_nodes();
350 C->set_unique(0);
351 C->reset_dead_node_list();
353 // Recursively match trees from old space into new space.
354 // Correct leaves of new-space Nodes; they point to old-space.
355 _visited.Clear(); // Clear visit bits for xform call
356 C->set_cached_top_node(xform( C->top(), live_nodes));
357 if (!C->failing()) {
358 Node* xroot = xform( C->root(), 1 );
359 if (xroot == NULL) {
360 Matcher::soft_match_failure(); // recursive matching process failed
361 C->record_method_not_compilable("instruction match failed");
362 } else {
363 // During matching shared constants were attached to C->root()
364 // because xroot wasn't available yet, so transfer the uses to
365 // the xroot.
366 for( DUIterator_Fast jmax, j = C->root()->fast_outs(jmax); j < jmax; j++ ) {
367 Node* n = C->root()->fast_out(j);
368 if (C->node_arena()->contains(n)) {
369 assert(n->in(0) == C->root(), "should be control user");
370 n->set_req(0, xroot);
371 --j;
372 --jmax;
373 }
374 }
376 // Generate new mach node for ConP #NULL
377 assert(new_ideal_null != NULL, "sanity");
378 _mach_null = match_tree(new_ideal_null);
379 // Don't set control, it will confuse GCM since there are no uses.
380 // The control will be set when this node is used first time
381 // in find_base_for_derived().
382 assert(_mach_null != NULL, "");
384 C->set_root(xroot->is_Root() ? xroot->as_Root() : NULL);
386 #ifdef ASSERT
387 verify_new_nodes_only(xroot);
388 #endif
389 }
390 }
391 if (C->top() == NULL || C->root() == NULL) {
392 C->record_method_not_compilable("graph lost"); // %%% cannot happen?
393 }
394 if (C->failing()) {
395 // delete old;
396 old->destruct_contents();
397 return;
398 }
399 assert( C->top(), "" );
400 assert( C->root(), "" );
401 validate_null_checks();
403 // Now smoke old-space
404 NOT_DEBUG( old->destruct_contents() );
406 // ------------------------
407 // Set up save-on-entry registers
408 Fixup_Save_On_Entry( );
409 }
412 //------------------------------Fixup_Save_On_Entry----------------------------
413 // The stated purpose of this routine is to take care of save-on-entry
414 // registers. However, the overall goal of the Match phase is to convert into
415 // machine-specific instructions which have RegMasks to guide allocation.
416 // So what this procedure really does is put a valid RegMask on each input
417 // to the machine-specific variations of all Return, TailCall and Halt
418 // instructions. It also adds edgs to define the save-on-entry values (and of
419 // course gives them a mask).
421 static RegMask *init_input_masks( uint size, RegMask &ret_adr, RegMask &fp ) {
422 RegMask *rms = NEW_RESOURCE_ARRAY( RegMask, size );
423 // Do all the pre-defined register masks
424 rms[TypeFunc::Control ] = RegMask::Empty;
425 rms[TypeFunc::I_O ] = RegMask::Empty;
426 rms[TypeFunc::Memory ] = RegMask::Empty;
427 rms[TypeFunc::ReturnAdr] = ret_adr;
428 rms[TypeFunc::FramePtr ] = fp;
429 return rms;
430 }
432 //---------------------------init_first_stack_mask-----------------------------
433 // Create the initial stack mask used by values spilling to the stack.
434 // Disallow any debug info in outgoing argument areas by setting the
435 // initial mask accordingly.
436 void Matcher::init_first_stack_mask() {
438 // Allocate storage for spill masks as masks for the appropriate load type.
439 RegMask *rms = (RegMask*)C->comp_arena()->Amalloc_D(sizeof(RegMask) * (3*6+4));
441 idealreg2spillmask [Op_RegN] = &rms[0];
442 idealreg2spillmask [Op_RegI] = &rms[1];
443 idealreg2spillmask [Op_RegL] = &rms[2];
444 idealreg2spillmask [Op_RegF] = &rms[3];
445 idealreg2spillmask [Op_RegD] = &rms[4];
446 idealreg2spillmask [Op_RegP] = &rms[5];
448 idealreg2debugmask [Op_RegN] = &rms[6];
449 idealreg2debugmask [Op_RegI] = &rms[7];
450 idealreg2debugmask [Op_RegL] = &rms[8];
451 idealreg2debugmask [Op_RegF] = &rms[9];
452 idealreg2debugmask [Op_RegD] = &rms[10];
453 idealreg2debugmask [Op_RegP] = &rms[11];
455 idealreg2mhdebugmask[Op_RegN] = &rms[12];
456 idealreg2mhdebugmask[Op_RegI] = &rms[13];
457 idealreg2mhdebugmask[Op_RegL] = &rms[14];
458 idealreg2mhdebugmask[Op_RegF] = &rms[15];
459 idealreg2mhdebugmask[Op_RegD] = &rms[16];
460 idealreg2mhdebugmask[Op_RegP] = &rms[17];
462 idealreg2spillmask [Op_VecS] = &rms[18];
463 idealreg2spillmask [Op_VecD] = &rms[19];
464 idealreg2spillmask [Op_VecX] = &rms[20];
465 idealreg2spillmask [Op_VecY] = &rms[21];
467 OptoReg::Name i;
469 // At first, start with the empty mask
470 C->FIRST_STACK_mask().Clear();
472 // Add in the incoming argument area
473 OptoReg::Name init_in = OptoReg::add(_old_SP, C->out_preserve_stack_slots());
474 for (i = init_in; i < _in_arg_limit; i = OptoReg::add(i,1)) {
475 C->FIRST_STACK_mask().Insert(i);
476 }
477 // Add in all bits past the outgoing argument area
478 guarantee(RegMask::can_represent_arg(OptoReg::add(_out_arg_limit,-1)),
479 "must be able to represent all call arguments in reg mask");
480 OptoReg::Name init = _out_arg_limit;
481 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1)) {
482 C->FIRST_STACK_mask().Insert(i);
483 }
484 // Finally, set the "infinite stack" bit.
485 C->FIRST_STACK_mask().set_AllStack();
487 // Make spill masks. Registers for their class, plus FIRST_STACK_mask.
488 RegMask aligned_stack_mask = C->FIRST_STACK_mask();
489 // Keep spill masks aligned.
490 aligned_stack_mask.clear_to_pairs();
491 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
493 *idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
494 #ifdef _LP64
495 *idealreg2spillmask[Op_RegN] = *idealreg2regmask[Op_RegN];
496 idealreg2spillmask[Op_RegN]->OR(C->FIRST_STACK_mask());
497 idealreg2spillmask[Op_RegP]->OR(aligned_stack_mask);
498 #else
499 idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
500 #endif
501 *idealreg2spillmask[Op_RegI] = *idealreg2regmask[Op_RegI];
502 idealreg2spillmask[Op_RegI]->OR(C->FIRST_STACK_mask());
503 *idealreg2spillmask[Op_RegL] = *idealreg2regmask[Op_RegL];
504 idealreg2spillmask[Op_RegL]->OR(aligned_stack_mask);
505 *idealreg2spillmask[Op_RegF] = *idealreg2regmask[Op_RegF];
506 idealreg2spillmask[Op_RegF]->OR(C->FIRST_STACK_mask());
507 *idealreg2spillmask[Op_RegD] = *idealreg2regmask[Op_RegD];
508 idealreg2spillmask[Op_RegD]->OR(aligned_stack_mask);
510 if (Matcher::vector_size_supported(T_BYTE,4)) {
511 *idealreg2spillmask[Op_VecS] = *idealreg2regmask[Op_VecS];
512 idealreg2spillmask[Op_VecS]->OR(C->FIRST_STACK_mask());
513 }
514 if (Matcher::vector_size_supported(T_FLOAT,2)) {
515 // For VecD we need dual alignment and 8 bytes (2 slots) for spills.
516 // RA guarantees such alignment since it is needed for Double and Long values.
517 *idealreg2spillmask[Op_VecD] = *idealreg2regmask[Op_VecD];
518 idealreg2spillmask[Op_VecD]->OR(aligned_stack_mask);
519 }
520 if (Matcher::vector_size_supported(T_FLOAT,4)) {
521 // For VecX we need quadro alignment and 16 bytes (4 slots) for spills.
522 //
523 // RA can use input arguments stack slots for spills but until RA
524 // we don't know frame size and offset of input arg stack slots.
525 //
526 // Exclude last input arg stack slots to avoid spilling vectors there
527 // otherwise vector spills could stomp over stack slots in caller frame.
528 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
529 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecX); k++) {
530 aligned_stack_mask.Remove(in);
531 in = OptoReg::add(in, -1);
532 }
533 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecX);
534 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
535 *idealreg2spillmask[Op_VecX] = *idealreg2regmask[Op_VecX];
536 idealreg2spillmask[Op_VecX]->OR(aligned_stack_mask);
537 }
538 if (Matcher::vector_size_supported(T_FLOAT,8)) {
539 // For VecY we need octo alignment and 32 bytes (8 slots) for spills.
540 OptoReg::Name in = OptoReg::add(_in_arg_limit, -1);
541 for (int k = 1; (in >= init_in) && (k < RegMask::SlotsPerVecY); k++) {
542 aligned_stack_mask.Remove(in);
543 in = OptoReg::add(in, -1);
544 }
545 aligned_stack_mask.clear_to_sets(RegMask::SlotsPerVecY);
546 assert(aligned_stack_mask.is_AllStack(), "should be infinite stack");
547 *idealreg2spillmask[Op_VecY] = *idealreg2regmask[Op_VecY];
548 idealreg2spillmask[Op_VecY]->OR(aligned_stack_mask);
549 }
550 if (UseFPUForSpilling) {
551 // This mask logic assumes that the spill operations are
552 // symmetric and that the registers involved are the same size.
553 // On sparc for instance we may have to use 64 bit moves will
554 // kill 2 registers when used with F0-F31.
555 idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
556 idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
557 #ifdef _LP64
558 idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
559 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
560 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
561 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
562 #else
563 idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
564 #ifdef ARM
565 // ARM has support for moving 64bit values between a pair of
566 // integer registers and a double register
567 idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
568 idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
569 #endif
570 #endif
571 }
573 // Make up debug masks. Any spill slot plus callee-save registers.
574 // Caller-save registers are assumed to be trashable by the various
575 // inline-cache fixup routines.
576 *idealreg2debugmask [Op_RegN]= *idealreg2spillmask[Op_RegN];
577 *idealreg2debugmask [Op_RegI]= *idealreg2spillmask[Op_RegI];
578 *idealreg2debugmask [Op_RegL]= *idealreg2spillmask[Op_RegL];
579 *idealreg2debugmask [Op_RegF]= *idealreg2spillmask[Op_RegF];
580 *idealreg2debugmask [Op_RegD]= *idealreg2spillmask[Op_RegD];
581 *idealreg2debugmask [Op_RegP]= *idealreg2spillmask[Op_RegP];
583 *idealreg2mhdebugmask[Op_RegN]= *idealreg2spillmask[Op_RegN];
584 *idealreg2mhdebugmask[Op_RegI]= *idealreg2spillmask[Op_RegI];
585 *idealreg2mhdebugmask[Op_RegL]= *idealreg2spillmask[Op_RegL];
586 *idealreg2mhdebugmask[Op_RegF]= *idealreg2spillmask[Op_RegF];
587 *idealreg2mhdebugmask[Op_RegD]= *idealreg2spillmask[Op_RegD];
588 *idealreg2mhdebugmask[Op_RegP]= *idealreg2spillmask[Op_RegP];
590 // Prevent stub compilations from attempting to reference
591 // callee-saved registers from debug info
592 bool exclude_soe = !Compile::current()->is_method_compilation();
594 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
595 // registers the caller has to save do not work
596 if( _register_save_policy[i] == 'C' ||
597 _register_save_policy[i] == 'A' ||
598 (_register_save_policy[i] == 'E' && exclude_soe) ) {
599 idealreg2debugmask [Op_RegN]->Remove(i);
600 idealreg2debugmask [Op_RegI]->Remove(i); // Exclude save-on-call
601 idealreg2debugmask [Op_RegL]->Remove(i); // registers from debug
602 idealreg2debugmask [Op_RegF]->Remove(i); // masks
603 idealreg2debugmask [Op_RegD]->Remove(i);
604 idealreg2debugmask [Op_RegP]->Remove(i);
606 idealreg2mhdebugmask[Op_RegN]->Remove(i);
607 idealreg2mhdebugmask[Op_RegI]->Remove(i);
608 idealreg2mhdebugmask[Op_RegL]->Remove(i);
609 idealreg2mhdebugmask[Op_RegF]->Remove(i);
610 idealreg2mhdebugmask[Op_RegD]->Remove(i);
611 idealreg2mhdebugmask[Op_RegP]->Remove(i);
612 }
613 }
615 // Subtract the register we use to save the SP for MethodHandle
616 // invokes to from the debug mask.
617 const RegMask save_mask = method_handle_invoke_SP_save_mask();
618 idealreg2mhdebugmask[Op_RegN]->SUBTRACT(save_mask);
619 idealreg2mhdebugmask[Op_RegI]->SUBTRACT(save_mask);
620 idealreg2mhdebugmask[Op_RegL]->SUBTRACT(save_mask);
621 idealreg2mhdebugmask[Op_RegF]->SUBTRACT(save_mask);
622 idealreg2mhdebugmask[Op_RegD]->SUBTRACT(save_mask);
623 idealreg2mhdebugmask[Op_RegP]->SUBTRACT(save_mask);
624 }
626 //---------------------------is_save_on_entry----------------------------------
627 bool Matcher::is_save_on_entry( int reg ) {
628 return
629 _register_save_policy[reg] == 'E' ||
630 _register_save_policy[reg] == 'A' || // Save-on-entry register?
631 // Also save argument registers in the trampolining stubs
632 (C->save_argument_registers() && is_spillable_arg(reg));
633 }
635 //---------------------------Fixup_Save_On_Entry-------------------------------
636 void Matcher::Fixup_Save_On_Entry( ) {
637 init_first_stack_mask();
639 Node *root = C->root(); // Short name for root
640 // Count number of save-on-entry registers.
641 uint soe_cnt = number_of_saved_registers();
642 uint i;
644 // Find the procedure Start Node
645 StartNode *start = C->start();
646 assert( start, "Expect a start node" );
648 // Save argument registers in the trampolining stubs
649 if( C->save_argument_registers() )
650 for( i = 0; i < _last_Mach_Reg; i++ )
651 if( is_spillable_arg(i) )
652 soe_cnt++;
654 // Input RegMask array shared by all Returns.
655 // The type for doubles and longs has a count of 2, but
656 // there is only 1 returned value
657 uint ret_edge_cnt = TypeFunc::Parms + ((C->tf()->range()->cnt() == TypeFunc::Parms) ? 0 : 1);
658 RegMask *ret_rms = init_input_masks( ret_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
659 // Returns have 0 or 1 returned values depending on call signature.
660 // Return register is specified by return_value in the AD file.
661 if (ret_edge_cnt > TypeFunc::Parms)
662 ret_rms[TypeFunc::Parms+0] = _return_value_mask;
664 // Input RegMask array shared by all Rethrows.
665 uint reth_edge_cnt = TypeFunc::Parms+1;
666 RegMask *reth_rms = init_input_masks( reth_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
667 // Rethrow takes exception oop only, but in the argument 0 slot.
668 reth_rms[TypeFunc::Parms] = mreg2regmask[find_receiver(false)];
669 #ifdef _LP64
670 // Need two slots for ptrs in 64-bit land
671 reth_rms[TypeFunc::Parms].Insert(OptoReg::add(OptoReg::Name(find_receiver(false)),1));
672 #endif
674 // Input RegMask array shared by all TailCalls
675 uint tail_call_edge_cnt = TypeFunc::Parms+2;
676 RegMask *tail_call_rms = init_input_masks( tail_call_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
678 // Input RegMask array shared by all TailJumps
679 uint tail_jump_edge_cnt = TypeFunc::Parms+2;
680 RegMask *tail_jump_rms = init_input_masks( tail_jump_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
682 // TailCalls have 2 returned values (target & moop), whose masks come
683 // from the usual MachNode/MachOper mechanism. Find a sample
684 // TailCall to extract these masks and put the correct masks into
685 // the tail_call_rms array.
686 for( i=1; i < root->req(); i++ ) {
687 MachReturnNode *m = root->in(i)->as_MachReturn();
688 if( m->ideal_Opcode() == Op_TailCall ) {
689 tail_call_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
690 tail_call_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
691 break;
692 }
693 }
695 // TailJumps have 2 returned values (target & ex_oop), whose masks come
696 // from the usual MachNode/MachOper mechanism. Find a sample
697 // TailJump to extract these masks and put the correct masks into
698 // the tail_jump_rms array.
699 for( i=1; i < root->req(); i++ ) {
700 MachReturnNode *m = root->in(i)->as_MachReturn();
701 if( m->ideal_Opcode() == Op_TailJump ) {
702 tail_jump_rms[TypeFunc::Parms+0] = m->MachNode::in_RegMask(TypeFunc::Parms+0);
703 tail_jump_rms[TypeFunc::Parms+1] = m->MachNode::in_RegMask(TypeFunc::Parms+1);
704 break;
705 }
706 }
708 // Input RegMask array shared by all Halts
709 uint halt_edge_cnt = TypeFunc::Parms;
710 RegMask *halt_rms = init_input_masks( halt_edge_cnt + soe_cnt, _return_addr_mask, c_frame_ptr_mask );
712 // Capture the return input masks into each exit flavor
713 for( i=1; i < root->req(); i++ ) {
714 MachReturnNode *exit = root->in(i)->as_MachReturn();
715 switch( exit->ideal_Opcode() ) {
716 case Op_Return : exit->_in_rms = ret_rms; break;
717 case Op_Rethrow : exit->_in_rms = reth_rms; break;
718 case Op_TailCall : exit->_in_rms = tail_call_rms; break;
719 case Op_TailJump : exit->_in_rms = tail_jump_rms; break;
720 case Op_Halt : exit->_in_rms = halt_rms; break;
721 default : ShouldNotReachHere();
722 }
723 }
725 // Next unused projection number from Start.
726 int proj_cnt = C->tf()->domain()->cnt();
728 // Do all the save-on-entry registers. Make projections from Start for
729 // them, and give them a use at the exit points. To the allocator, they
730 // look like incoming register arguments.
731 for( i = 0; i < _last_Mach_Reg; i++ ) {
732 if( is_save_on_entry(i) ) {
734 // Add the save-on-entry to the mask array
735 ret_rms [ ret_edge_cnt] = mreg2regmask[i];
736 reth_rms [ reth_edge_cnt] = mreg2regmask[i];
737 tail_call_rms[tail_call_edge_cnt] = mreg2regmask[i];
738 tail_jump_rms[tail_jump_edge_cnt] = mreg2regmask[i];
739 // Halts need the SOE registers, but only in the stack as debug info.
740 // A just-prior uncommon-trap or deoptimization will use the SOE regs.
741 halt_rms [ halt_edge_cnt] = *idealreg2spillmask[_register_save_type[i]];
743 Node *mproj;
745 // Is this a RegF low half of a RegD? Double up 2 adjacent RegF's
746 // into a single RegD.
747 if( (i&1) == 0 &&
748 _register_save_type[i ] == Op_RegF &&
749 _register_save_type[i+1] == Op_RegF &&
750 is_save_on_entry(i+1) ) {
751 // Add other bit for double
752 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
753 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
754 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
755 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
756 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
757 mproj = new (C) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegD );
758 proj_cnt += 2; // Skip 2 for doubles
759 }
760 else if( (i&1) == 1 && // Else check for high half of double
761 _register_save_type[i-1] == Op_RegF &&
762 _register_save_type[i ] == Op_RegF &&
763 is_save_on_entry(i-1) ) {
764 ret_rms [ ret_edge_cnt] = RegMask::Empty;
765 reth_rms [ reth_edge_cnt] = RegMask::Empty;
766 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
767 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
768 halt_rms [ halt_edge_cnt] = RegMask::Empty;
769 mproj = C->top();
770 }
771 // Is this a RegI low half of a RegL? Double up 2 adjacent RegI's
772 // into a single RegL.
773 else if( (i&1) == 0 &&
774 _register_save_type[i ] == Op_RegI &&
775 _register_save_type[i+1] == Op_RegI &&
776 is_save_on_entry(i+1) ) {
777 // Add other bit for long
778 ret_rms [ ret_edge_cnt].Insert(OptoReg::Name(i+1));
779 reth_rms [ reth_edge_cnt].Insert(OptoReg::Name(i+1));
780 tail_call_rms[tail_call_edge_cnt].Insert(OptoReg::Name(i+1));
781 tail_jump_rms[tail_jump_edge_cnt].Insert(OptoReg::Name(i+1));
782 halt_rms [ halt_edge_cnt].Insert(OptoReg::Name(i+1));
783 mproj = new (C) MachProjNode( start, proj_cnt, ret_rms[ret_edge_cnt], Op_RegL );
784 proj_cnt += 2; // Skip 2 for longs
785 }
786 else if( (i&1) == 1 && // Else check for high half of long
787 _register_save_type[i-1] == Op_RegI &&
788 _register_save_type[i ] == Op_RegI &&
789 is_save_on_entry(i-1) ) {
790 ret_rms [ ret_edge_cnt] = RegMask::Empty;
791 reth_rms [ reth_edge_cnt] = RegMask::Empty;
792 tail_call_rms[tail_call_edge_cnt] = RegMask::Empty;
793 tail_jump_rms[tail_jump_edge_cnt] = RegMask::Empty;
794 halt_rms [ halt_edge_cnt] = RegMask::Empty;
795 mproj = C->top();
796 } else {
797 // Make a projection for it off the Start
798 mproj = new (C) MachProjNode( start, proj_cnt++, ret_rms[ret_edge_cnt], _register_save_type[i] );
799 }
801 ret_edge_cnt ++;
802 reth_edge_cnt ++;
803 tail_call_edge_cnt ++;
804 tail_jump_edge_cnt ++;
805 halt_edge_cnt ++;
807 // Add a use of the SOE register to all exit paths
808 for( uint j=1; j < root->req(); j++ )
809 root->in(j)->add_req(mproj);
810 } // End of if a save-on-entry register
811 } // End of for all machine registers
812 }
814 //------------------------------init_spill_mask--------------------------------
815 void Matcher::init_spill_mask( Node *ret ) {
816 if( idealreg2regmask[Op_RegI] ) return; // One time only init
818 OptoReg::c_frame_pointer = c_frame_pointer();
819 c_frame_ptr_mask = c_frame_pointer();
820 #ifdef _LP64
821 // pointers are twice as big
822 c_frame_ptr_mask.Insert(OptoReg::add(c_frame_pointer(),1));
823 #endif
825 // Start at OptoReg::stack0()
826 STACK_ONLY_mask.Clear();
827 OptoReg::Name init = OptoReg::stack2reg(0);
828 // STACK_ONLY_mask is all stack bits
829 OptoReg::Name i;
830 for (i = init; RegMask::can_represent(i); i = OptoReg::add(i,1))
831 STACK_ONLY_mask.Insert(i);
832 // Also set the "infinite stack" bit.
833 STACK_ONLY_mask.set_AllStack();
835 // Copy the register names over into the shared world
836 for( i=OptoReg::Name(0); i<OptoReg::Name(_last_Mach_Reg); i = OptoReg::add(i,1) ) {
837 // SharedInfo::regName[i] = regName[i];
838 // Handy RegMasks per machine register
839 mreg2regmask[i].Insert(i);
840 }
842 // Grab the Frame Pointer
843 Node *fp = ret->in(TypeFunc::FramePtr);
844 Node *mem = ret->in(TypeFunc::Memory);
845 const TypePtr* atp = TypePtr::BOTTOM;
846 // Share frame pointer while making spill ops
847 set_shared(fp);
849 // Compute generic short-offset Loads
850 #ifdef _LP64
851 MachNode *spillCP = match_tree(new (C) LoadNNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
852 #endif
853 MachNode *spillI = match_tree(new (C) LoadINode(NULL,mem,fp,atp,TypeInt::INT,MemNode::unordered));
854 MachNode *spillL = match_tree(new (C) LoadLNode(NULL,mem,fp,atp,TypeLong::LONG,MemNode::unordered, LoadNode::DependsOnlyOnTest,false));
855 MachNode *spillF = match_tree(new (C) LoadFNode(NULL,mem,fp,atp,Type::FLOAT,MemNode::unordered));
856 MachNode *spillD = match_tree(new (C) LoadDNode(NULL,mem,fp,atp,Type::DOUBLE,MemNode::unordered));
857 MachNode *spillP = match_tree(new (C) LoadPNode(NULL,mem,fp,atp,TypeInstPtr::BOTTOM,MemNode::unordered));
858 assert(spillI != NULL && spillL != NULL && spillF != NULL &&
859 spillD != NULL && spillP != NULL, "");
860 // Get the ADLC notion of the right regmask, for each basic type.
861 #ifdef _LP64
862 idealreg2regmask[Op_RegN] = &spillCP->out_RegMask();
863 #endif
864 idealreg2regmask[Op_RegI] = &spillI->out_RegMask();
865 idealreg2regmask[Op_RegL] = &spillL->out_RegMask();
866 idealreg2regmask[Op_RegF] = &spillF->out_RegMask();
867 idealreg2regmask[Op_RegD] = &spillD->out_RegMask();
868 idealreg2regmask[Op_RegP] = &spillP->out_RegMask();
870 // Vector regmasks.
871 if (Matcher::vector_size_supported(T_BYTE,4)) {
872 TypeVect::VECTS = TypeVect::make(T_BYTE, 4);
873 MachNode *spillVectS = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTS));
874 idealreg2regmask[Op_VecS] = &spillVectS->out_RegMask();
875 }
876 if (Matcher::vector_size_supported(T_FLOAT,2)) {
877 MachNode *spillVectD = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTD));
878 idealreg2regmask[Op_VecD] = &spillVectD->out_RegMask();
879 }
880 if (Matcher::vector_size_supported(T_FLOAT,4)) {
881 MachNode *spillVectX = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTX));
882 idealreg2regmask[Op_VecX] = &spillVectX->out_RegMask();
883 }
884 if (Matcher::vector_size_supported(T_FLOAT,8)) {
885 MachNode *spillVectY = match_tree(new (C) LoadVectorNode(NULL,mem,fp,atp,TypeVect::VECTY));
886 idealreg2regmask[Op_VecY] = &spillVectY->out_RegMask();
887 }
888 }
890 #ifdef ASSERT
891 static void match_alias_type(Compile* C, Node* n, Node* m) {
892 if (!VerifyAliases) return; // do not go looking for trouble by default
893 const TypePtr* nat = n->adr_type();
894 const TypePtr* mat = m->adr_type();
895 int nidx = C->get_alias_index(nat);
896 int midx = C->get_alias_index(mat);
897 // Detune the assert for cases like (AndI 0xFF (LoadB p)).
898 if (nidx == Compile::AliasIdxTop && midx >= Compile::AliasIdxRaw) {
899 for (uint i = 1; i < n->req(); i++) {
900 Node* n1 = n->in(i);
901 const TypePtr* n1at = n1->adr_type();
902 if (n1at != NULL) {
903 nat = n1at;
904 nidx = C->get_alias_index(n1at);
905 }
906 }
907 }
908 // %%% Kludgery. Instead, fix ideal adr_type methods for all these cases:
909 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxRaw) {
910 switch (n->Opcode()) {
911 case Op_PrefetchRead:
912 case Op_PrefetchWrite:
913 case Op_PrefetchAllocation:
914 nidx = Compile::AliasIdxRaw;
915 nat = TypeRawPtr::BOTTOM;
916 break;
917 }
918 }
919 if (nidx == Compile::AliasIdxRaw && midx == Compile::AliasIdxTop) {
920 switch (n->Opcode()) {
921 case Op_ClearArray:
922 midx = Compile::AliasIdxRaw;
923 mat = TypeRawPtr::BOTTOM;
924 break;
925 }
926 }
927 if (nidx == Compile::AliasIdxTop && midx == Compile::AliasIdxBot) {
928 switch (n->Opcode()) {
929 case Op_Return:
930 case Op_Rethrow:
931 case Op_Halt:
932 case Op_TailCall:
933 case Op_TailJump:
934 nidx = Compile::AliasIdxBot;
935 nat = TypePtr::BOTTOM;
936 break;
937 }
938 }
939 if (nidx == Compile::AliasIdxBot && midx == Compile::AliasIdxTop) {
940 switch (n->Opcode()) {
941 case Op_StrComp:
942 case Op_StrEquals:
943 case Op_StrIndexOf:
944 case Op_AryEq:
945 case Op_MemBarVolatile:
946 case Op_MemBarCPUOrder: // %%% these ideals should have narrower adr_type?
947 case Op_EncodeISOArray:
948 nidx = Compile::AliasIdxTop;
949 nat = NULL;
950 break;
951 }
952 }
953 if (nidx != midx) {
954 if (PrintOpto || (PrintMiscellaneous && (WizardMode || Verbose))) {
955 tty->print_cr("==== Matcher alias shift %d => %d", nidx, midx);
956 n->dump();
957 m->dump();
958 }
959 assert(C->subsume_loads() && C->must_alias(nat, midx),
960 "must not lose alias info when matching");
961 }
962 }
963 #endif
966 //------------------------------MStack-----------------------------------------
967 // State and MStack class used in xform() and find_shared() iterative methods.
968 enum Node_State { Pre_Visit, // node has to be pre-visited
969 Visit, // visit node
970 Post_Visit, // post-visit node
971 Alt_Post_Visit // alternative post-visit path
972 };
974 class MStack: public Node_Stack {
975 public:
976 MStack(int size) : Node_Stack(size) { }
978 void push(Node *n, Node_State ns) {
979 Node_Stack::push(n, (uint)ns);
980 }
981 void push(Node *n, Node_State ns, Node *parent, int indx) {
982 ++_inode_top;
983 if ((_inode_top + 1) >= _inode_max) grow();
984 _inode_top->node = parent;
985 _inode_top->indx = (uint)indx;
986 ++_inode_top;
987 _inode_top->node = n;
988 _inode_top->indx = (uint)ns;
989 }
990 Node *parent() {
991 pop();
992 return node();
993 }
994 Node_State state() const {
995 return (Node_State)index();
996 }
997 void set_state(Node_State ns) {
998 set_index((uint)ns);
999 }
1000 };
1003 //------------------------------xform------------------------------------------
1004 // Given a Node in old-space, Match him (Label/Reduce) to produce a machine
1005 // Node in new-space. Given a new-space Node, recursively walk his children.
1006 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; }
1007 Node *Matcher::xform( Node *n, int max_stack ) {
1008 // Use one stack to keep both: child's node/state and parent's node/index
1009 MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2
1010 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root
1012 while (mstack.is_nonempty()) {
1013 C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
1014 if (C->failing()) return NULL;
1015 n = mstack.node(); // Leave node on stack
1016 Node_State nstate = mstack.state();
1017 if (nstate == Visit) {
1018 mstack.set_state(Post_Visit);
1019 Node *oldn = n;
1020 // Old-space or new-space check
1021 if (!C->node_arena()->contains(n)) {
1022 // Old space!
1023 Node* m;
1024 if (has_new_node(n)) { // Not yet Label/Reduced
1025 m = new_node(n);
1026 } else {
1027 if (!is_dontcare(n)) { // Matcher can match this guy
1028 // Calls match special. They match alone with no children.
1029 // Their children, the incoming arguments, match normally.
1030 m = n->is_SafePoint() ? match_sfpt(n->as_SafePoint()):match_tree(n);
1031 if (C->failing()) return NULL;
1032 if (m == NULL) { Matcher::soft_match_failure(); return NULL; }
1033 } else { // Nothing the matcher cares about
1034 if (n->is_Proj() && n->in(0) != NULL && n->in(0)->is_Multi()) { // Projections?
1035 // Convert to machine-dependent projection
1036 m = n->in(0)->as_Multi()->match( n->as_Proj(), this );
1037 #ifdef ASSERT
1038 _new2old_map.map(m->_idx, n);
1039 #endif
1040 if (m->in(0) != NULL) // m might be top
1041 collect_null_checks(m, n);
1042 } else { // Else just a regular 'ol guy
1043 m = n->clone(); // So just clone into new-space
1044 #ifdef ASSERT
1045 _new2old_map.map(m->_idx, n);
1046 #endif
1047 // Def-Use edges will be added incrementally as Uses
1048 // of this node are matched.
1049 assert(m->outcnt() == 0, "no Uses of this clone yet");
1050 }
1051 }
1053 set_new_node(n, m); // Map old to new
1054 if (_old_node_note_array != NULL) {
1055 Node_Notes* nn = C->locate_node_notes(_old_node_note_array,
1056 n->_idx);
1057 C->set_node_notes_at(m->_idx, nn);
1058 }
1059 debug_only(match_alias_type(C, n, m));
1060 }
1061 n = m; // n is now a new-space node
1062 mstack.set_node(n);
1063 }
1065 // New space!
1066 if (_visited.test_set(n->_idx)) continue; // while(mstack.is_nonempty())
1068 int i;
1069 // Put precedence edges on stack first (match them last).
1070 for (i = oldn->req(); (uint)i < oldn->len(); i++) {
1071 Node *m = oldn->in(i);
1072 if (m == NULL) break;
1073 // set -1 to call add_prec() instead of set_req() during Step1
1074 mstack.push(m, Visit, n, -1);
1075 }
1077 // For constant debug info, I'd rather have unmatched constants.
1078 int cnt = n->req();
1079 JVMState* jvms = n->jvms();
1080 int debug_cnt = jvms ? jvms->debug_start() : cnt;
1082 // Now do only debug info. Clone constants rather than matching.
1083 // Constants are represented directly in the debug info without
1084 // the need for executable machine instructions.
1085 // Monitor boxes are also represented directly.
1086 for (i = cnt - 1; i >= debug_cnt; --i) { // For all debug inputs do
1087 Node *m = n->in(i); // Get input
1088 int op = m->Opcode();
1089 assert((op == Op_BoxLock) == jvms->is_monitor_use(i), "boxes only at monitor sites");
1090 if( op == Op_ConI || op == Op_ConP || op == Op_ConN || op == Op_ConNKlass ||
1091 op == Op_ConF || op == Op_ConD || op == Op_ConL
1092 // || op == Op_BoxLock // %%%% enable this and remove (+++) in chaitin.cpp
1093 ) {
1094 m = m->clone();
1095 #ifdef ASSERT
1096 _new2old_map.map(m->_idx, n);
1097 #endif
1098 mstack.push(m, Post_Visit, n, i); // Don't need to visit
1099 mstack.push(m->in(0), Visit, m, 0);
1100 } else {
1101 mstack.push(m, Visit, n, i);
1102 }
1103 }
1105 // And now walk his children, and convert his inputs to new-space.
1106 for( ; i >= 0; --i ) { // For all normal inputs do
1107 Node *m = n->in(i); // Get input
1108 if(m != NULL)
1109 mstack.push(m, Visit, n, i);
1110 }
1112 }
1113 else if (nstate == Post_Visit) {
1114 // Set xformed input
1115 Node *p = mstack.parent();
1116 if (p != NULL) { // root doesn't have parent
1117 int i = (int)mstack.index();
1118 if (i >= 0)
1119 p->set_req(i, n); // required input
1120 else if (i == -1)
1121 p->add_prec(n); // precedence input
1122 else
1123 ShouldNotReachHere();
1124 }
1125 mstack.pop(); // remove processed node from stack
1126 }
1127 else {
1128 ShouldNotReachHere();
1129 }
1130 } // while (mstack.is_nonempty())
1131 return n; // Return new-space Node
1132 }
1134 //------------------------------warp_outgoing_stk_arg------------------------
1135 OptoReg::Name Matcher::warp_outgoing_stk_arg( VMReg reg, OptoReg::Name begin_out_arg_area, OptoReg::Name &out_arg_limit_per_call ) {
1136 // Convert outgoing argument location to a pre-biased stack offset
1137 if (reg->is_stack()) {
1138 OptoReg::Name warped = reg->reg2stack();
1139 // Adjust the stack slot offset to be the register number used
1140 // by the allocator.
1141 warped = OptoReg::add(begin_out_arg_area, warped);
1142 // Keep track of the largest numbered stack slot used for an arg.
1143 // Largest used slot per call-site indicates the amount of stack
1144 // that is killed by the call.
1145 if( warped >= out_arg_limit_per_call )
1146 out_arg_limit_per_call = OptoReg::add(warped,1);
1147 if (!RegMask::can_represent_arg(warped)) {
1148 C->record_method_not_compilable_all_tiers("unsupported calling sequence");
1149 return OptoReg::Bad;
1150 }
1151 return warped;
1152 }
1153 return OptoReg::as_OptoReg(reg);
1154 }
1157 //------------------------------match_sfpt-------------------------------------
1158 // Helper function to match call instructions. Calls match special.
1159 // They match alone with no children. Their children, the incoming
1160 // arguments, match normally.
1161 MachNode *Matcher::match_sfpt( SafePointNode *sfpt ) {
1162 MachSafePointNode *msfpt = NULL;
1163 MachCallNode *mcall = NULL;
1164 uint cnt;
1165 // Split out case for SafePoint vs Call
1166 CallNode *call;
1167 const TypeTuple *domain;
1168 ciMethod* method = NULL;
1169 bool is_method_handle_invoke = false; // for special kill effects
1170 if( sfpt->is_Call() ) {
1171 call = sfpt->as_Call();
1172 domain = call->tf()->domain();
1173 cnt = domain->cnt();
1175 // Match just the call, nothing else
1176 MachNode *m = match_tree(call);
1177 if (C->failing()) return NULL;
1178 if( m == NULL ) { Matcher::soft_match_failure(); return NULL; }
1180 // Copy data from the Ideal SafePoint to the machine version
1181 mcall = m->as_MachCall();
1183 mcall->set_tf( call->tf());
1184 mcall->set_entry_point(call->entry_point());
1185 mcall->set_cnt( call->cnt());
1187 if( mcall->is_MachCallJava() ) {
1188 MachCallJavaNode *mcall_java = mcall->as_MachCallJava();
1189 const CallJavaNode *call_java = call->as_CallJava();
1190 method = call_java->method();
1191 mcall_java->_method = method;
1192 mcall_java->_bci = call_java->_bci;
1193 mcall_java->_optimized_virtual = call_java->is_optimized_virtual();
1194 is_method_handle_invoke = call_java->is_method_handle_invoke();
1195 mcall_java->_method_handle_invoke = is_method_handle_invoke;
1196 if (is_method_handle_invoke) {
1197 C->set_has_method_handle_invokes(true);
1198 }
1199 if( mcall_java->is_MachCallStaticJava() )
1200 mcall_java->as_MachCallStaticJava()->_name =
1201 call_java->as_CallStaticJava()->_name;
1202 if( mcall_java->is_MachCallDynamicJava() )
1203 mcall_java->as_MachCallDynamicJava()->_vtable_index =
1204 call_java->as_CallDynamicJava()->_vtable_index;
1205 }
1206 else if( mcall->is_MachCallRuntime() ) {
1207 mcall->as_MachCallRuntime()->_name = call->as_CallRuntime()->_name;
1208 }
1209 msfpt = mcall;
1210 }
1211 // This is a non-call safepoint
1212 else {
1213 call = NULL;
1214 domain = NULL;
1215 MachNode *mn = match_tree(sfpt);
1216 if (C->failing()) return NULL;
1217 msfpt = mn->as_MachSafePoint();
1218 cnt = TypeFunc::Parms;
1219 }
1221 // Advertise the correct memory effects (for anti-dependence computation).
1222 msfpt->set_adr_type(sfpt->adr_type());
1224 // Allocate a private array of RegMasks. These RegMasks are not shared.
1225 msfpt->_in_rms = NEW_RESOURCE_ARRAY( RegMask, cnt );
1226 // Empty them all.
1227 memset( msfpt->_in_rms, 0, sizeof(RegMask)*cnt );
1229 // Do all the pre-defined non-Empty register masks
1230 msfpt->_in_rms[TypeFunc::ReturnAdr] = _return_addr_mask;
1231 msfpt->_in_rms[TypeFunc::FramePtr ] = c_frame_ptr_mask;
1233 // Place first outgoing argument can possibly be put.
1234 OptoReg::Name begin_out_arg_area = OptoReg::add(_new_SP, C->out_preserve_stack_slots());
1235 assert( is_even(begin_out_arg_area), "" );
1236 // Compute max outgoing register number per call site.
1237 OptoReg::Name out_arg_limit_per_call = begin_out_arg_area;
1238 // Calls to C may hammer extra stack slots above and beyond any arguments.
1239 // These are usually backing store for register arguments for varargs.
1240 if( call != NULL && call->is_CallRuntime() )
1241 out_arg_limit_per_call = OptoReg::add(out_arg_limit_per_call,C->varargs_C_out_slots_killed());
1244 // Do the normal argument list (parameters) register masks
1245 int argcnt = cnt - TypeFunc::Parms;
1246 if( argcnt > 0 ) { // Skip it all if we have no args
1247 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, argcnt );
1248 VMRegPair *parm_regs = NEW_RESOURCE_ARRAY( VMRegPair, argcnt );
1249 int i;
1250 for( i = 0; i < argcnt; i++ ) {
1251 sig_bt[i] = domain->field_at(i+TypeFunc::Parms)->basic_type();
1252 }
1253 // V-call to pick proper calling convention
1254 call->calling_convention( sig_bt, parm_regs, argcnt );
1256 #ifdef ASSERT
1257 // Sanity check users' calling convention. Really handy during
1258 // the initial porting effort. Fairly expensive otherwise.
1259 { for (int i = 0; i<argcnt; i++) {
1260 if( !parm_regs[i].first()->is_valid() &&
1261 !parm_regs[i].second()->is_valid() ) continue;
1262 VMReg reg1 = parm_regs[i].first();
1263 VMReg reg2 = parm_regs[i].second();
1264 for (int j = 0; j < i; j++) {
1265 if( !parm_regs[j].first()->is_valid() &&
1266 !parm_regs[j].second()->is_valid() ) continue;
1267 VMReg reg3 = parm_regs[j].first();
1268 VMReg reg4 = parm_regs[j].second();
1269 if( !reg1->is_valid() ) {
1270 assert( !reg2->is_valid(), "valid halvsies" );
1271 } else if( !reg3->is_valid() ) {
1272 assert( !reg4->is_valid(), "valid halvsies" );
1273 } else {
1274 assert( reg1 != reg2, "calling conv. must produce distinct regs");
1275 assert( reg1 != reg3, "calling conv. must produce distinct regs");
1276 assert( reg1 != reg4, "calling conv. must produce distinct regs");
1277 assert( reg2 != reg3, "calling conv. must produce distinct regs");
1278 assert( reg2 != reg4 || !reg2->is_valid(), "calling conv. must produce distinct regs");
1279 assert( reg3 != reg4, "calling conv. must produce distinct regs");
1280 }
1281 }
1282 }
1283 }
1284 #endif
1286 // Visit each argument. Compute its outgoing register mask.
1287 // Return results now can have 2 bits returned.
1288 // Compute max over all outgoing arguments both per call-site
1289 // and over the entire method.
1290 for( i = 0; i < argcnt; i++ ) {
1291 // Address of incoming argument mask to fill in
1292 RegMask *rm = &mcall->_in_rms[i+TypeFunc::Parms];
1293 if( !parm_regs[i].first()->is_valid() &&
1294 !parm_regs[i].second()->is_valid() ) {
1295 continue; // Avoid Halves
1296 }
1297 // Grab first register, adjust stack slots and insert in mask.
1298 OptoReg::Name reg1 = warp_outgoing_stk_arg(parm_regs[i].first(), begin_out_arg_area, out_arg_limit_per_call );
1299 if (OptoReg::is_valid(reg1))
1300 rm->Insert( reg1 );
1301 // Grab second register (if any), adjust stack slots and insert in mask.
1302 OptoReg::Name reg2 = warp_outgoing_stk_arg(parm_regs[i].second(), begin_out_arg_area, out_arg_limit_per_call );
1303 if (OptoReg::is_valid(reg2))
1304 rm->Insert( reg2 );
1305 } // End of for all arguments
1307 // Compute number of stack slots needed to restore stack in case of
1308 // Pascal-style argument popping.
1309 mcall->_argsize = out_arg_limit_per_call - begin_out_arg_area;
1310 }
1312 // Compute the max stack slot killed by any call. These will not be
1313 // available for debug info, and will be used to adjust FIRST_STACK_mask
1314 // after all call sites have been visited.
1315 if( _out_arg_limit < out_arg_limit_per_call)
1316 _out_arg_limit = out_arg_limit_per_call;
1318 if (mcall) {
1319 // Kill the outgoing argument area, including any non-argument holes and
1320 // any legacy C-killed slots. Use Fat-Projections to do the killing.
1321 // Since the max-per-method covers the max-per-call-site and debug info
1322 // is excluded on the max-per-method basis, debug info cannot land in
1323 // this killed area.
1324 uint r_cnt = mcall->tf()->range()->cnt();
1325 MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+10000, RegMask::Empty, MachProjNode::fat_proj );
1326 if (!RegMask::can_represent_arg(OptoReg::Name(out_arg_limit_per_call-1))) {
1327 C->record_method_not_compilable_all_tiers("unsupported outgoing calling sequence");
1328 } else {
1329 for (int i = begin_out_arg_area; i < out_arg_limit_per_call; i++)
1330 proj->_rout.Insert(OptoReg::Name(i));
1331 }
1332 if (proj->_rout.is_NotEmpty()) {
1333 push_projection(proj);
1334 }
1335 }
1336 // Transfer the safepoint information from the call to the mcall
1337 // Move the JVMState list
1338 msfpt->set_jvms(sfpt->jvms());
1339 for (JVMState* jvms = msfpt->jvms(); jvms; jvms = jvms->caller()) {
1340 jvms->set_map(sfpt);
1341 }
1343 // Debug inputs begin just after the last incoming parameter
1344 assert((mcall == NULL) || (mcall->jvms() == NULL) ||
1345 (mcall->jvms()->debug_start() + mcall->_jvmadj == mcall->tf()->domain()->cnt()), "");
1347 // Move the OopMap
1348 msfpt->_oop_map = sfpt->_oop_map;
1350 // Add additional edges.
1351 if (msfpt->mach_constant_base_node_input() != (uint)-1 && !msfpt->is_MachCallLeaf()) {
1352 // For these calls we can not add MachConstantBase in expand(), as the
1353 // ins are not complete then.
1354 msfpt->ins_req(msfpt->mach_constant_base_node_input(), C->mach_constant_base_node());
1355 if (msfpt->jvms() &&
1356 msfpt->mach_constant_base_node_input() <= msfpt->jvms()->debug_start() + msfpt->_jvmadj) {
1357 // We added an edge before jvms, so we must adapt the position of the ins.
1358 msfpt->jvms()->adapt_position(+1);
1359 }
1360 }
1362 // Registers killed by the call are set in the local scheduling pass
1363 // of Global Code Motion.
1364 return msfpt;
1365 }
1367 //---------------------------match_tree----------------------------------------
1368 // Match a Ideal Node DAG - turn it into a tree; Label & Reduce. Used as part
1369 // of the whole-sale conversion from Ideal to Mach Nodes. Also used for
1370 // making GotoNodes while building the CFG and in init_spill_mask() to identify
1371 // a Load's result RegMask for memoization in idealreg2regmask[]
1372 MachNode *Matcher::match_tree( const Node *n ) {
1373 assert( n->Opcode() != Op_Phi, "cannot match" );
1374 assert( !n->is_block_start(), "cannot match" );
1375 // Set the mark for all locally allocated State objects.
1376 // When this call returns, the _states_arena arena will be reset
1377 // freeing all State objects.
1378 ResourceMark rm( &_states_arena );
1380 LabelRootDepth = 0;
1382 // StoreNodes require their Memory input to match any LoadNodes
1383 Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
1384 #ifdef ASSERT
1385 Node* save_mem_node = _mem_node;
1386 _mem_node = n->is_Store() ? (Node*)n : NULL;
1387 #endif
1388 // State object for root node of match tree
1389 // Allocate it on _states_arena - stack allocation can cause stack overflow.
1390 State *s = new (&_states_arena) State;
1391 s->_kids[0] = NULL;
1392 s->_kids[1] = NULL;
1393 s->_leaf = (Node*)n;
1394 // Label the input tree, allocating labels from top-level arena
1395 Label_Root( n, s, n->in(0), mem );
1396 if (C->failing()) return NULL;
1398 // The minimum cost match for the whole tree is found at the root State
1399 uint mincost = max_juint;
1400 uint cost = max_juint;
1401 uint i;
1402 for( i = 0; i < NUM_OPERANDS; i++ ) {
1403 if( s->valid(i) && // valid entry and
1404 s->_cost[i] < cost && // low cost and
1405 s->_rule[i] >= NUM_OPERANDS ) // not an operand
1406 cost = s->_cost[mincost=i];
1407 }
1408 if (mincost == max_juint) {
1409 #ifndef PRODUCT
1410 tty->print("No matching rule for:");
1411 s->dump();
1412 #endif
1413 Matcher::soft_match_failure();
1414 return NULL;
1415 }
1416 // Reduce input tree based upon the state labels to machine Nodes
1417 MachNode *m = ReduceInst( s, s->_rule[mincost], mem );
1418 #ifdef ASSERT
1419 _old2new_map.map(n->_idx, m);
1420 _new2old_map.map(m->_idx, (Node*)n);
1421 #endif
1423 // Add any Matcher-ignored edges
1424 uint cnt = n->req();
1425 uint start = 1;
1426 if( mem != (Node*)1 ) start = MemNode::Memory+1;
1427 if( n->is_AddP() ) {
1428 assert( mem == (Node*)1, "" );
1429 start = AddPNode::Base+1;
1430 }
1431 for( i = start; i < cnt; i++ ) {
1432 if( !n->match_edge(i) ) {
1433 if( i < m->req() )
1434 m->ins_req( i, n->in(i) );
1435 else
1436 m->add_req( n->in(i) );
1437 }
1438 }
1440 debug_only( _mem_node = save_mem_node; )
1441 return m;
1442 }
1445 //------------------------------match_into_reg---------------------------------
1446 // Choose to either match this Node in a register or part of the current
1447 // match tree. Return true for requiring a register and false for matching
1448 // as part of the current match tree.
1449 static bool match_into_reg( const Node *n, Node *m, Node *control, int i, bool shared ) {
1451 const Type *t = m->bottom_type();
1453 if (t->singleton()) {
1454 // Never force constants into registers. Allow them to match as
1455 // constants or registers. Copies of the same value will share
1456 // the same register. See find_shared_node.
1457 return false;
1458 } else { // Not a constant
1459 // Stop recursion if they have different Controls.
1460 Node* m_control = m->in(0);
1461 // Control of load's memory can post-dominates load's control.
1462 // So use it since load can't float above its memory.
1463 Node* mem_control = (m->is_Load()) ? m->in(MemNode::Memory)->in(0) : NULL;
1464 if (control && m_control && control != m_control && control != mem_control) {
1466 // Actually, we can live with the most conservative control we
1467 // find, if it post-dominates the others. This allows us to
1468 // pick up load/op/store trees where the load can float a little
1469 // above the store.
1470 Node *x = control;
1471 const uint max_scan = 6; // Arbitrary scan cutoff
1472 uint j;
1473 for (j=0; j<max_scan; j++) {
1474 if (x->is_Region()) // Bail out at merge points
1475 return true;
1476 x = x->in(0);
1477 if (x == m_control) // Does 'control' post-dominate
1478 break; // m->in(0)? If so, we can use it
1479 if (x == mem_control) // Does 'control' post-dominate
1480 break; // mem_control? If so, we can use it
1481 }
1482 if (j == max_scan) // No post-domination before scan end?
1483 return true; // Then break the match tree up
1484 }
1485 if ((m->is_DecodeN() && Matcher::narrow_oop_use_complex_address()) ||
1486 (m->is_DecodeNKlass() && Matcher::narrow_klass_use_complex_address())) {
1487 // These are commonly used in address expressions and can
1488 // efficiently fold into them on X64 in some cases.
1489 return false;
1490 }
1491 }
1493 // Not forceable cloning. If shared, put it into a register.
1494 return shared;
1495 }
1498 //------------------------------Instruction Selection--------------------------
1499 // Label method walks a "tree" of nodes, using the ADLC generated DFA to match
1500 // ideal nodes to machine instructions. Trees are delimited by shared Nodes,
1501 // things the Matcher does not match (e.g., Memory), and things with different
1502 // Controls (hence forced into different blocks). We pass in the Control
1503 // selected for this entire State tree.
1505 // The Matcher works on Trees, but an Intel add-to-memory requires a DAG: the
1506 // Store and the Load must have identical Memories (as well as identical
1507 // pointers). Since the Matcher does not have anything for Memory (and
1508 // does not handle DAGs), I have to match the Memory input myself. If the
1509 // Tree root is a Store, I require all Loads to have the identical memory.
1510 Node *Matcher::Label_Root( const Node *n, State *svec, Node *control, const Node *mem){
1511 // Since Label_Root is a recursive function, its possible that we might run
1512 // out of stack space. See bugs 6272980 & 6227033 for more info.
1513 LabelRootDepth++;
1514 if (LabelRootDepth > MaxLabelRootDepth) {
1515 C->record_method_not_compilable_all_tiers("Out of stack space, increase MaxLabelRootDepth");
1516 return NULL;
1517 }
1518 uint care = 0; // Edges matcher cares about
1519 uint cnt = n->req();
1520 uint i = 0;
1522 // Examine children for memory state
1523 // Can only subsume a child into your match-tree if that child's memory state
1524 // is not modified along the path to another input.
1525 // It is unsafe even if the other inputs are separate roots.
1526 Node *input_mem = NULL;
1527 for( i = 1; i < cnt; i++ ) {
1528 if( !n->match_edge(i) ) continue;
1529 Node *m = n->in(i); // Get ith input
1530 assert( m, "expect non-null children" );
1531 if( m->is_Load() ) {
1532 if( input_mem == NULL ) {
1533 input_mem = m->in(MemNode::Memory);
1534 } else if( input_mem != m->in(MemNode::Memory) ) {
1535 input_mem = NodeSentinel;
1536 }
1537 }
1538 }
1540 for( i = 1; i < cnt; i++ ){// For my children
1541 if( !n->match_edge(i) ) continue;
1542 Node *m = n->in(i); // Get ith input
1543 // Allocate states out of a private arena
1544 State *s = new (&_states_arena) State;
1545 svec->_kids[care++] = s;
1546 assert( care <= 2, "binary only for now" );
1548 // Recursively label the State tree.
1549 s->_kids[0] = NULL;
1550 s->_kids[1] = NULL;
1551 s->_leaf = m;
1553 // Check for leaves of the State Tree; things that cannot be a part of
1554 // the current tree. If it finds any, that value is matched as a
1555 // register operand. If not, then the normal matching is used.
1556 if( match_into_reg(n, m, control, i, is_shared(m)) ||
1557 //
1558 // Stop recursion if this is LoadNode and the root of this tree is a
1559 // StoreNode and the load & store have different memories.
1560 ((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ||
1561 // Can NOT include the match of a subtree when its memory state
1562 // is used by any of the other subtrees
1563 (input_mem == NodeSentinel) ) {
1564 #ifndef PRODUCT
1565 // Print when we exclude matching due to different memory states at input-loads
1566 if( PrintOpto && (Verbose && WizardMode) && (input_mem == NodeSentinel)
1567 && !((mem!=(Node*)1) && m->is_Load() && m->in(MemNode::Memory) != mem) ) {
1568 tty->print_cr("invalid input_mem");
1569 }
1570 #endif
1571 // Switch to a register-only opcode; this value must be in a register
1572 // and cannot be subsumed as part of a larger instruction.
1573 s->DFA( m->ideal_reg(), m );
1575 } else {
1576 // If match tree has no control and we do, adopt it for entire tree
1577 if( control == NULL && m->in(0) != NULL && m->req() > 1 )
1578 control = m->in(0); // Pick up control
1579 // Else match as a normal part of the match tree.
1580 control = Label_Root(m,s,control,mem);
1581 if (C->failing()) return NULL;
1582 }
1583 }
1586 // Call DFA to match this node, and return
1587 svec->DFA( n->Opcode(), n );
1589 #ifdef ASSERT
1590 uint x;
1591 for( x = 0; x < _LAST_MACH_OPER; x++ )
1592 if( svec->valid(x) )
1593 break;
1595 if (x >= _LAST_MACH_OPER) {
1596 n->dump();
1597 svec->dump();
1598 assert( false, "bad AD file" );
1599 }
1600 #endif
1601 return control;
1602 }
1605 // Con nodes reduced using the same rule can share their MachNode
1606 // which reduces the number of copies of a constant in the final
1607 // program. The register allocator is free to split uses later to
1608 // split live ranges.
1609 MachNode* Matcher::find_shared_node(Node* leaf, uint rule) {
1610 if (!leaf->is_Con() && !leaf->is_DecodeNarrowPtr()) return NULL;
1612 // See if this Con has already been reduced using this rule.
1613 if (_shared_nodes.Size() <= leaf->_idx) return NULL;
1614 MachNode* last = (MachNode*)_shared_nodes.at(leaf->_idx);
1615 if (last != NULL && rule == last->rule()) {
1616 // Don't expect control change for DecodeN
1617 if (leaf->is_DecodeNarrowPtr())
1618 return last;
1619 // Get the new space root.
1620 Node* xroot = new_node(C->root());
1621 if (xroot == NULL) {
1622 // This shouldn't happen give the order of matching.
1623 return NULL;
1624 }
1626 // Shared constants need to have their control be root so they
1627 // can be scheduled properly.
1628 Node* control = last->in(0);
1629 if (control != xroot) {
1630 if (control == NULL || control == C->root()) {
1631 last->set_req(0, xroot);
1632 } else {
1633 assert(false, "unexpected control");
1634 return NULL;
1635 }
1636 }
1637 return last;
1638 }
1639 return NULL;
1640 }
1643 //------------------------------ReduceInst-------------------------------------
1644 // Reduce a State tree (with given Control) into a tree of MachNodes.
1645 // This routine (and it's cohort ReduceOper) convert Ideal Nodes into
1646 // complicated machine Nodes. Each MachNode covers some tree of Ideal Nodes.
1647 // Each MachNode has a number of complicated MachOper operands; each
1648 // MachOper also covers a further tree of Ideal Nodes.
1650 // The root of the Ideal match tree is always an instruction, so we enter
1651 // the recursion here. After building the MachNode, we need to recurse
1652 // the tree checking for these cases:
1653 // (1) Child is an instruction -
1654 // Build the instruction (recursively), add it as an edge.
1655 // Build a simple operand (register) to hold the result of the instruction.
1656 // (2) Child is an interior part of an instruction -
1657 // Skip over it (do nothing)
1658 // (3) Child is the start of a operand -
1659 // Build the operand, place it inside the instruction
1660 // Call ReduceOper.
1661 MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
1662 assert( rule >= NUM_OPERANDS, "called with operand rule" );
1664 MachNode* shared_node = find_shared_node(s->_leaf, rule);
1665 if (shared_node != NULL) {
1666 return shared_node;
1667 }
1669 // Build the object to represent this state & prepare for recursive calls
1670 MachNode *mach = s->MachNodeGenerator( rule, C );
1671 guarantee(mach != NULL, "Missing MachNode");
1672 mach->_opnds[0] = s->MachOperGenerator( _reduceOp[rule], C );
1673 assert( mach->_opnds[0] != NULL, "Missing result operand" );
1674 Node *leaf = s->_leaf;
1675 // Check for instruction or instruction chain rule
1676 if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
1677 assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
1678 "duplicating node that's already been matched");
1679 // Instruction
1680 mach->add_req( leaf->in(0) ); // Set initial control
1681 // Reduce interior of complex instruction
1682 ReduceInst_Interior( s, rule, mem, mach, 1 );
1683 } else {
1684 // Instruction chain rules are data-dependent on their inputs
1685 mach->add_req(0); // Set initial control to none
1686 ReduceInst_Chain_Rule( s, rule, mem, mach );
1687 }
1689 // If a Memory was used, insert a Memory edge
1690 if( mem != (Node*)1 ) {
1691 mach->ins_req(MemNode::Memory,mem);
1692 #ifdef ASSERT
1693 // Verify adr type after matching memory operation
1694 const MachOper* oper = mach->memory_operand();
1695 if (oper != NULL && oper != (MachOper*)-1) {
1696 // It has a unique memory operand. Find corresponding ideal mem node.
1697 Node* m = NULL;
1698 if (leaf->is_Mem()) {
1699 m = leaf;
1700 } else {
1701 m = _mem_node;
1702 assert(m != NULL && m->is_Mem(), "expecting memory node");
1703 }
1704 const Type* mach_at = mach->adr_type();
1705 // DecodeN node consumed by an address may have different type
1706 // then its input. Don't compare types for such case.
1707 if (m->adr_type() != mach_at &&
1708 (m->in(MemNode::Address)->is_DecodeNarrowPtr() ||
1709 m->in(MemNode::Address)->is_AddP() &&
1710 m->in(MemNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr() ||
1711 m->in(MemNode::Address)->is_AddP() &&
1712 m->in(MemNode::Address)->in(AddPNode::Address)->is_AddP() &&
1713 m->in(MemNode::Address)->in(AddPNode::Address)->in(AddPNode::Address)->is_DecodeNarrowPtr())) {
1714 mach_at = m->adr_type();
1715 }
1716 if (m->adr_type() != mach_at) {
1717 m->dump();
1718 tty->print_cr("mach:");
1719 mach->dump(1);
1720 }
1721 assert(m->adr_type() == mach_at, "matcher should not change adr type");
1722 }
1723 #endif
1724 }
1726 // If the _leaf is an AddP, insert the base edge
1727 if (leaf->is_AddP()) {
1728 mach->ins_req(AddPNode::Base,leaf->in(AddPNode::Base));
1729 }
1731 uint number_of_projections_prior = number_of_projections();
1733 // Perform any 1-to-many expansions required
1734 MachNode *ex = mach->Expand(s, _projection_list, mem);
1735 if (ex != mach) {
1736 assert(ex->ideal_reg() == mach->ideal_reg(), "ideal types should match");
1737 if( ex->in(1)->is_Con() )
1738 ex->in(1)->set_req(0, C->root());
1739 // Remove old node from the graph
1740 for( uint i=0; i<mach->req(); i++ ) {
1741 mach->set_req(i,NULL);
1742 }
1743 #ifdef ASSERT
1744 _new2old_map.map(ex->_idx, s->_leaf);
1745 #endif
1746 }
1748 // PhaseChaitin::fixup_spills will sometimes generate spill code
1749 // via the matcher. By the time, nodes have been wired into the CFG,
1750 // and any further nodes generated by expand rules will be left hanging
1751 // in space, and will not get emitted as output code. Catch this.
1752 // Also, catch any new register allocation constraints ("projections")
1753 // generated belatedly during spill code generation.
1754 if (_allocation_started) {
1755 guarantee(ex == mach, "no expand rules during spill generation");
1756 guarantee(number_of_projections_prior == number_of_projections(), "no allocation during spill generation");
1757 }
1759 if (leaf->is_Con() || leaf->is_DecodeNarrowPtr()) {
1760 // Record the con for sharing
1761 _shared_nodes.map(leaf->_idx, ex);
1762 }
1764 return ex;
1765 }
1767 void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *mach ) {
1768 // 'op' is what I am expecting to receive
1769 int op = _leftOp[rule];
1770 // Operand type to catch childs result
1771 // This is what my child will give me.
1772 int opnd_class_instance = s->_rule[op];
1773 // Choose between operand class or not.
1774 // This is what I will receive.
1775 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op;
1776 // New rule for child. Chase operand classes to get the actual rule.
1777 int newrule = s->_rule[catch_op];
1779 if( newrule < NUM_OPERANDS ) {
1780 // Chain from operand or operand class, may be output of shared node
1781 assert( 0 <= opnd_class_instance && opnd_class_instance < NUM_OPERANDS,
1782 "Bad AD file: Instruction chain rule must chain from operand");
1783 // Insert operand into array of operands for this instruction
1784 mach->_opnds[1] = s->MachOperGenerator( opnd_class_instance, C );
1786 ReduceOper( s, newrule, mem, mach );
1787 } else {
1788 // Chain from the result of an instruction
1789 assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
1790 mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
1791 Node *mem1 = (Node*)1;
1792 debug_only(Node *save_mem_node = _mem_node;)
1793 mach->add_req( ReduceInst(s, newrule, mem1) );
1794 debug_only(_mem_node = save_mem_node;)
1795 }
1796 return;
1797 }
1800 uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mach, uint num_opnds ) {
1801 if( s->_leaf->is_Load() ) {
1802 Node *mem2 = s->_leaf->in(MemNode::Memory);
1803 assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
1804 debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
1805 mem = mem2;
1806 }
1807 if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
1808 if( mach->in(0) == NULL )
1809 mach->set_req(0, s->_leaf->in(0));
1810 }
1812 // Now recursively walk the state tree & add operand list.
1813 for( uint i=0; i<2; i++ ) { // binary tree
1814 State *newstate = s->_kids[i];
1815 if( newstate == NULL ) break; // Might only have 1 child
1816 // 'op' is what I am expecting to receive
1817 int op;
1818 if( i == 0 ) {
1819 op = _leftOp[rule];
1820 } else {
1821 op = _rightOp[rule];
1822 }
1823 // Operand type to catch childs result
1824 // This is what my child will give me.
1825 int opnd_class_instance = newstate->_rule[op];
1826 // Choose between operand class or not.
1827 // This is what I will receive.
1828 int catch_op = (op >= FIRST_OPERAND_CLASS && op < NUM_OPERANDS) ? opnd_class_instance : op;
1829 // New rule for child. Chase operand classes to get the actual rule.
1830 int newrule = newstate->_rule[catch_op];
1832 if( newrule < NUM_OPERANDS ) { // Operand/operandClass or internalOp/instruction?
1833 // Operand/operandClass
1834 // Insert operand into array of operands for this instruction
1835 mach->_opnds[num_opnds++] = newstate->MachOperGenerator( opnd_class_instance, C );
1836 ReduceOper( newstate, newrule, mem, mach );
1838 } else { // Child is internal operand or new instruction
1839 if( newrule < _LAST_MACH_OPER ) { // internal operand or instruction?
1840 // internal operand --> call ReduceInst_Interior
1841 // Interior of complex instruction. Do nothing but recurse.
1842 num_opnds = ReduceInst_Interior( newstate, newrule, mem, mach, num_opnds );
1843 } else {
1844 // instruction --> call build operand( ) to catch result
1845 // --> ReduceInst( newrule )
1846 mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
1847 Node *mem1 = (Node*)1;
1848 debug_only(Node *save_mem_node = _mem_node;)
1849 mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
1850 debug_only(_mem_node = save_mem_node;)
1851 }
1852 }
1853 assert( mach->_opnds[num_opnds-1], "" );
1854 }
1855 return num_opnds;
1856 }
1858 // This routine walks the interior of possible complex operands.
1859 // At each point we check our children in the match tree:
1860 // (1) No children -
1861 // We are a leaf; add _leaf field as an input to the MachNode
1862 // (2) Child is an internal operand -
1863 // Skip over it ( do nothing )
1864 // (3) Child is an instruction -
1865 // Call ReduceInst recursively and
1866 // and instruction as an input to the MachNode
1867 void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
1868 assert( rule < _LAST_MACH_OPER, "called with operand rule" );
1869 State *kid = s->_kids[0];
1870 assert( kid == NULL || s->_leaf->in(0) == NULL, "internal operands have no control" );
1872 // Leaf? And not subsumed?
1873 if( kid == NULL && !_swallowed[rule] ) {
1874 mach->add_req( s->_leaf ); // Add leaf pointer
1875 return; // Bail out
1876 }
1878 if( s->_leaf->is_Load() ) {
1879 assert( mem == (Node*)1, "multiple Memories being matched at once?" );
1880 mem = s->_leaf->in(MemNode::Memory);
1881 debug_only(_mem_node = s->_leaf;)
1882 }
1883 if( s->_leaf->in(0) && s->_leaf->req() > 1) {
1884 if( !mach->in(0) )
1885 mach->set_req(0,s->_leaf->in(0));
1886 else {
1887 assert( s->_leaf->in(0) == mach->in(0), "same instruction, differing controls?" );
1888 }
1889 }
1891 for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) { // binary tree
1892 int newrule;
1893 if( i == 0)
1894 newrule = kid->_rule[_leftOp[rule]];
1895 else
1896 newrule = kid->_rule[_rightOp[rule]];
1898 if( newrule < _LAST_MACH_OPER ) { // Operand or instruction?
1899 // Internal operand; recurse but do nothing else
1900 ReduceOper( kid, newrule, mem, mach );
1902 } else { // Child is a new instruction
1903 // Reduce the instruction, and add a direct pointer from this
1904 // machine instruction to the newly reduced one.
1905 Node *mem1 = (Node*)1;
1906 debug_only(Node *save_mem_node = _mem_node;)
1907 mach->add_req( ReduceInst( kid, newrule, mem1 ) );
1908 debug_only(_mem_node = save_mem_node;)
1909 }
1910 }
1911 }
1914 // -------------------------------------------------------------------------
1915 // Java-Java calling convention
1916 // (what you use when Java calls Java)
1918 //------------------------------find_receiver----------------------------------
1919 // For a given signature, return the OptoReg for parameter 0.
1920 OptoReg::Name Matcher::find_receiver( bool is_outgoing ) {
1921 VMRegPair regs;
1922 BasicType sig_bt = T_OBJECT;
1923 calling_convention(&sig_bt, ®s, 1, is_outgoing);
1924 // Return argument 0 register. In the LP64 build pointers
1925 // take 2 registers, but the VM wants only the 'main' name.
1926 return OptoReg::as_OptoReg(regs.first());
1927 }
1929 // This function identifies sub-graphs in which a 'load' node is
1930 // input to two different nodes, and such that it can be matched
1931 // with BMI instructions like blsi, blsr, etc.
1932 // Example : for b = -a[i] & a[i] can be matched to blsi r32, m32.
1933 // The graph is (AndL (SubL Con0 LoadL*) LoadL*), where LoadL*
1934 // refers to the same node.
1935 #ifdef X86
1936 // Match the generic fused operations pattern (op1 (op2 Con{ConType} mop) mop)
1937 // This is a temporary solution until we make DAGs expressible in ADL.
1938 template<typename ConType>
1939 class FusedPatternMatcher {
1940 Node* _op1_node;
1941 Node* _mop_node;
1942 int _con_op;
1944 static int match_next(Node* n, int next_op, int next_op_idx) {
1945 if (n->in(1) == NULL || n->in(2) == NULL) {
1946 return -1;
1947 }
1949 if (next_op_idx == -1) { // n is commutative, try rotations
1950 if (n->in(1)->Opcode() == next_op) {
1951 return 1;
1952 } else if (n->in(2)->Opcode() == next_op) {
1953 return 2;
1954 }
1955 } else {
1956 assert(next_op_idx > 0 && next_op_idx <= 2, "Bad argument index");
1957 if (n->in(next_op_idx)->Opcode() == next_op) {
1958 return next_op_idx;
1959 }
1960 }
1961 return -1;
1962 }
1963 public:
1964 FusedPatternMatcher(Node* op1_node, Node *mop_node, int con_op) :
1965 _op1_node(op1_node), _mop_node(mop_node), _con_op(con_op) { }
1967 bool match(int op1, int op1_op2_idx, // op1 and the index of the op1->op2 edge, -1 if op1 is commutative
1968 int op2, int op2_con_idx, // op2 and the index of the op2->con edge, -1 if op2 is commutative
1969 typename ConType::NativeType con_value) {
1970 if (_op1_node->Opcode() != op1) {
1971 return false;
1972 }
1973 if (_mop_node->outcnt() > 2) {
1974 return false;
1975 }
1976 op1_op2_idx = match_next(_op1_node, op2, op1_op2_idx);
1977 if (op1_op2_idx == -1) {
1978 return false;
1979 }
1980 // Memory operation must be the other edge
1981 int op1_mop_idx = (op1_op2_idx & 1) + 1;
1983 // Check that the mop node is really what we want
1984 if (_op1_node->in(op1_mop_idx) == _mop_node) {
1985 Node *op2_node = _op1_node->in(op1_op2_idx);
1986 if (op2_node->outcnt() > 1) {
1987 return false;
1988 }
1989 assert(op2_node->Opcode() == op2, "Should be");
1990 op2_con_idx = match_next(op2_node, _con_op, op2_con_idx);
1991 if (op2_con_idx == -1) {
1992 return false;
1993 }
1994 // Memory operation must be the other edge
1995 int op2_mop_idx = (op2_con_idx & 1) + 1;
1996 // Check that the memory operation is the same node
1997 if (op2_node->in(op2_mop_idx) == _mop_node) {
1998 // Now check the constant
1999 const Type* con_type = op2_node->in(op2_con_idx)->bottom_type();
2000 if (con_type != Type::TOP && ConType::as_self(con_type)->get_con() == con_value) {
2001 return true;
2002 }
2003 }
2004 }
2005 return false;
2006 }
2007 };
2010 bool Matcher::is_bmi_pattern(Node *n, Node *m) {
2011 if (n != NULL && m != NULL) {
2012 if (m->Opcode() == Op_LoadI) {
2013 FusedPatternMatcher<TypeInt> bmii(n, m, Op_ConI);
2014 return bmii.match(Op_AndI, -1, Op_SubI, 1, 0) ||
2015 bmii.match(Op_AndI, -1, Op_AddI, -1, -1) ||
2016 bmii.match(Op_XorI, -1, Op_AddI, -1, -1);
2017 } else if (m->Opcode() == Op_LoadL) {
2018 FusedPatternMatcher<TypeLong> bmil(n, m, Op_ConL);
2019 return bmil.match(Op_AndL, -1, Op_SubL, 1, 0) ||
2020 bmil.match(Op_AndL, -1, Op_AddL, -1, -1) ||
2021 bmil.match(Op_XorL, -1, Op_AddL, -1, -1);
2022 }
2023 }
2024 return false;
2025 }
2026 #endif // X86
2028 // A method-klass-holder may be passed in the inline_cache_reg
2029 // and then expanded into the inline_cache_reg and a method_oop register
2030 // defined in ad_<arch>.cpp
2033 //------------------------------find_shared------------------------------------
2034 // Set bits if Node is shared or otherwise a root
2035 void Matcher::find_shared( Node *n ) {
2036 // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc
2037 MStack mstack(C->live_nodes() * 2);
2038 // Mark nodes as address_visited if they are inputs to an address expression
2039 VectorSet address_visited(Thread::current()->resource_area());
2040 mstack.push(n, Visit); // Don't need to pre-visit root node
2041 while (mstack.is_nonempty()) {
2042 n = mstack.node(); // Leave node on stack
2043 Node_State nstate = mstack.state();
2044 uint nop = n->Opcode();
2045 if (nstate == Pre_Visit) {
2046 if (address_visited.test(n->_idx)) { // Visited in address already?
2047 // Flag as visited and shared now.
2048 set_visited(n);
2049 }
2050 if (is_visited(n)) { // Visited already?
2051 // Node is shared and has no reason to clone. Flag it as shared.
2052 // This causes it to match into a register for the sharing.
2053 set_shared(n); // Flag as shared and
2054 if (n->is_DecodeNarrowPtr()) {
2055 // Oop field/array element loads must be shared but since
2056 // they are shared through a DecodeN they may appear to have
2057 // a single use so force sharing here.
2058 set_shared(n->in(1));
2059 }
2060 mstack.pop(); // remove node from stack
2061 continue;
2062 }
2063 nstate = Visit; // Not already visited; so visit now
2064 }
2065 if (nstate == Visit) {
2066 mstack.set_state(Post_Visit);
2067 set_visited(n); // Flag as visited now
2068 bool mem_op = false;
2070 switch( nop ) { // Handle some opcodes special
2071 case Op_Phi: // Treat Phis as shared roots
2072 case Op_Parm:
2073 case Op_Proj: // All handled specially during matching
2074 case Op_SafePointScalarObject:
2075 set_shared(n);
2076 set_dontcare(n);
2077 break;
2078 case Op_If:
2079 case Op_CountedLoopEnd:
2080 mstack.set_state(Alt_Post_Visit); // Alternative way
2081 // Convert (If (Bool (CmpX A B))) into (If (Bool) (CmpX A B)). Helps
2082 // with matching cmp/branch in 1 instruction. The Matcher needs the
2083 // Bool and CmpX side-by-side, because it can only get at constants
2084 // that are at the leaves of Match trees, and the Bool's condition acts
2085 // as a constant here.
2086 mstack.push(n->in(1), Visit); // Clone the Bool
2087 mstack.push(n->in(0), Pre_Visit); // Visit control input
2088 continue; // while (mstack.is_nonempty())
2089 case Op_ConvI2D: // These forms efficiently match with a prior
2090 case Op_ConvI2F: // Load but not a following Store
2091 if( n->in(1)->is_Load() && // Prior load
2092 n->outcnt() == 1 && // Not already shared
2093 n->unique_out()->is_Store() ) // Following store
2094 set_shared(n); // Force it to be a root
2095 break;
2096 case Op_ReverseBytesI:
2097 case Op_ReverseBytesL:
2098 if( n->in(1)->is_Load() && // Prior load
2099 n->outcnt() == 1 ) // Not already shared
2100 set_shared(n); // Force it to be a root
2101 break;
2102 case Op_BoxLock: // Cant match until we get stack-regs in ADLC
2103 case Op_IfFalse:
2104 case Op_IfTrue:
2105 case Op_MachProj:
2106 case Op_MergeMem:
2107 case Op_Catch:
2108 case Op_CatchProj:
2109 case Op_CProj:
2110 case Op_JumpProj:
2111 case Op_JProj:
2112 case Op_NeverBranch:
2113 set_dontcare(n);
2114 break;
2115 case Op_Jump:
2116 mstack.push(n->in(1), Pre_Visit); // Switch Value (could be shared)
2117 mstack.push(n->in(0), Pre_Visit); // Visit Control input
2118 continue; // while (mstack.is_nonempty())
2119 case Op_StrComp:
2120 case Op_StrEquals:
2121 case Op_StrIndexOf:
2122 case Op_AryEq:
2123 case Op_EncodeISOArray:
2124 set_shared(n); // Force result into register (it will be anyways)
2125 break;
2126 case Op_ConP: { // Convert pointers above the centerline to NUL
2127 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2128 const TypePtr* tp = tn->type()->is_ptr();
2129 if (tp->_ptr == TypePtr::AnyNull) {
2130 tn->set_type(TypePtr::NULL_PTR);
2131 }
2132 break;
2133 }
2134 case Op_ConN: { // Convert narrow pointers above the centerline to NUL
2135 TypeNode *tn = n->as_Type(); // Constants derive from type nodes
2136 const TypePtr* tp = tn->type()->make_ptr();
2137 if (tp && tp->_ptr == TypePtr::AnyNull) {
2138 tn->set_type(TypeNarrowOop::NULL_PTR);
2139 }
2140 break;
2141 }
2142 case Op_Binary: // These are introduced in the Post_Visit state.
2143 ShouldNotReachHere();
2144 break;
2145 case Op_ClearArray:
2146 case Op_SafePoint:
2147 mem_op = true;
2148 break;
2149 default:
2150 if( n->is_Store() ) {
2151 // Do match stores, despite no ideal reg
2152 mem_op = true;
2153 break;
2154 }
2155 if( n->is_Mem() ) { // Loads and LoadStores
2156 mem_op = true;
2157 // Loads must be root of match tree due to prior load conflict
2158 if( C->subsume_loads() == false )
2159 set_shared(n);
2160 }
2161 // Fall into default case
2162 if( !n->ideal_reg() )
2163 set_dontcare(n); // Unmatchable Nodes
2164 } // end_switch
2166 for(int i = n->req() - 1; i >= 0; --i) { // For my children
2167 Node *m = n->in(i); // Get ith input
2168 if (m == NULL) continue; // Ignore NULLs
2169 uint mop = m->Opcode();
2171 // Must clone all producers of flags, or we will not match correctly.
2172 // Suppose a compare setting int-flags is shared (e.g., a switch-tree)
2173 // then it will match into an ideal Op_RegFlags. Alas, the fp-flags
2174 // are also there, so we may match a float-branch to int-flags and
2175 // expect the allocator to haul the flags from the int-side to the
2176 // fp-side. No can do.
2177 if( _must_clone[mop] ) {
2178 mstack.push(m, Visit);
2179 continue; // for(int i = ...)
2180 }
2182 // if 'n' and 'm' are part of a graph for BMI instruction, clone this node.
2183 #ifdef X86
2184 if (UseBMI1Instructions && is_bmi_pattern(n, m)) {
2185 mstack.push(m, Visit);
2186 continue;
2187 }
2188 #endif
2190 // Clone addressing expressions as they are "free" in memory access instructions
2191 if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
2192 // Some inputs for address expression are not put on stack
2193 // to avoid marking them as shared and forcing them into register
2194 // if they are used only in address expressions.
2195 // But they should be marked as shared if there are other uses
2196 // besides address expressions.
2198 Node *off = m->in(AddPNode::Offset);
2199 if( off->is_Con() &&
2200 // When there are other uses besides address expressions
2201 // put it on stack and mark as shared.
2202 !is_visited(m) ) {
2203 address_visited.test_set(m->_idx); // Flag as address_visited
2204 Node *adr = m->in(AddPNode::Address);
2206 // Intel, ARM and friends can handle 2 adds in addressing mode
2207 if( clone_shift_expressions && adr->is_AddP() &&
2208 // AtomicAdd is not an addressing expression.
2209 // Cheap to find it by looking for screwy base.
2210 !adr->in(AddPNode::Base)->is_top() &&
2211 // Are there other uses besides address expressions?
2212 !is_visited(adr) ) {
2213 address_visited.set(adr->_idx); // Flag as address_visited
2214 Node *shift = adr->in(AddPNode::Offset);
2215 // Check for shift by small constant as well
2216 if( shift->Opcode() == Op_LShiftX && shift->in(2)->is_Con() &&
2217 shift->in(2)->get_int() <= 3 &&
2218 // Are there other uses besides address expressions?
2219 !is_visited(shift) ) {
2220 address_visited.set(shift->_idx); // Flag as address_visited
2221 mstack.push(shift->in(2), Visit);
2222 Node *conv = shift->in(1);
2223 #ifdef _LP64
2224 // Allow Matcher to match the rule which bypass
2225 // ConvI2L operation for an array index on LP64
2226 // if the index value is positive.
2227 if( conv->Opcode() == Op_ConvI2L &&
2228 conv->as_Type()->type()->is_long()->_lo >= 0 &&
2229 // Are there other uses besides address expressions?
2230 !is_visited(conv) ) {
2231 address_visited.set(conv->_idx); // Flag as address_visited
2232 mstack.push(conv->in(1), Pre_Visit);
2233 } else
2234 #endif
2235 mstack.push(conv, Pre_Visit);
2236 } else {
2237 mstack.push(shift, Pre_Visit);
2238 }
2239 mstack.push(adr->in(AddPNode::Address), Pre_Visit);
2240 mstack.push(adr->in(AddPNode::Base), Pre_Visit);
2241 } else { // Sparc, Alpha, PPC and friends
2242 mstack.push(adr, Pre_Visit);
2243 }
2245 // Clone X+offset as it also folds into most addressing expressions
2246 mstack.push(off, Visit);
2247 mstack.push(m->in(AddPNode::Base), Pre_Visit);
2248 continue; // for(int i = ...)
2249 } // if( off->is_Con() )
2250 } // if( mem_op &&
2251 mstack.push(m, Pre_Visit);
2252 } // for(int i = ...)
2253 }
2254 else if (nstate == Alt_Post_Visit) {
2255 mstack.pop(); // Remove node from stack
2256 // We cannot remove the Cmp input from the Bool here, as the Bool may be
2257 // shared and all users of the Bool need to move the Cmp in parallel.
2258 // This leaves both the Bool and the If pointing at the Cmp. To
2259 // prevent the Matcher from trying to Match the Cmp along both paths
2260 // BoolNode::match_edge always returns a zero.
2262 // We reorder the Op_If in a pre-order manner, so we can visit without
2263 // accidentally sharing the Cmp (the Bool and the If make 2 users).
2264 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool
2265 }
2266 else if (nstate == Post_Visit) {
2267 mstack.pop(); // Remove node from stack
2269 // Now hack a few special opcodes
2270 switch( n->Opcode() ) { // Handle some opcodes special
2271 case Op_StorePConditional:
2272 case Op_StoreIConditional:
2273 case Op_StoreLConditional:
2274 case Op_CompareAndSwapI:
2275 case Op_CompareAndSwapL:
2276 case Op_CompareAndSwapP:
2277 case Op_CompareAndSwapN: { // Convert trinary to binary-tree
2278 Node *newval = n->in(MemNode::ValueIn );
2279 Node *oldval = n->in(LoadStoreConditionalNode::ExpectedIn);
2280 Node *pair = new (C) BinaryNode( oldval, newval );
2281 n->set_req(MemNode::ValueIn,pair);
2282 n->del_req(LoadStoreConditionalNode::ExpectedIn);
2283 break;
2284 }
2285 case Op_CMoveD: // Convert trinary to binary-tree
2286 case Op_CMoveF:
2287 case Op_CMoveI:
2288 case Op_CMoveL:
2289 case Op_CMoveN:
2290 case Op_CMoveP: {
2291 // Restructure into a binary tree for Matching. It's possible that
2292 // we could move this code up next to the graph reshaping for IfNodes
2293 // or vice-versa, but I do not want to debug this for Ladybird.
2294 // 10/2/2000 CNC.
2295 Node *pair1 = new (C) BinaryNode(n->in(1),n->in(1)->in(1));
2296 n->set_req(1,pair1);
2297 Node *pair2 = new (C) BinaryNode(n->in(2),n->in(3));
2298 n->set_req(2,pair2);
2299 n->del_req(3);
2300 break;
2301 }
2302 case Op_LoopLimit: {
2303 Node *pair1 = new (C) BinaryNode(n->in(1),n->in(2));
2304 n->set_req(1,pair1);
2305 n->set_req(2,n->in(3));
2306 n->del_req(3);
2307 break;
2308 }
2309 case Op_StrEquals: {
2310 Node *pair1 = new (C) BinaryNode(n->in(2),n->in(3));
2311 n->set_req(2,pair1);
2312 n->set_req(3,n->in(4));
2313 n->del_req(4);
2314 break;
2315 }
2316 case Op_StrComp:
2317 case Op_StrIndexOf: {
2318 Node *pair1 = new (C) BinaryNode(n->in(2),n->in(3));
2319 n->set_req(2,pair1);
2320 Node *pair2 = new (C) BinaryNode(n->in(4),n->in(5));
2321 n->set_req(3,pair2);
2322 n->del_req(5);
2323 n->del_req(4);
2324 break;
2325 }
2326 case Op_EncodeISOArray: {
2327 // Restructure into a binary tree for Matching.
2328 Node* pair = new (C) BinaryNode(n->in(3), n->in(4));
2329 n->set_req(3, pair);
2330 n->del_req(4);
2331 break;
2332 }
2333 default:
2334 break;
2335 }
2336 }
2337 else {
2338 ShouldNotReachHere();
2339 }
2340 } // end of while (mstack.is_nonempty())
2341 }
2343 #ifdef ASSERT
2344 // machine-independent root to machine-dependent root
2345 void Matcher::dump_old2new_map() {
2346 _old2new_map.dump();
2347 }
2348 #endif
2350 //---------------------------collect_null_checks-------------------------------
2351 // Find null checks in the ideal graph; write a machine-specific node for
2352 // it. Used by later implicit-null-check handling. Actually collects
2353 // either an IfTrue or IfFalse for the common NOT-null path, AND the ideal
2354 // value being tested.
2355 void Matcher::collect_null_checks( Node *proj, Node *orig_proj ) {
2356 Node *iff = proj->in(0);
2357 if( iff->Opcode() == Op_If ) {
2358 // During matching If's have Bool & Cmp side-by-side
2359 BoolNode *b = iff->in(1)->as_Bool();
2360 Node *cmp = iff->in(2);
2361 int opc = cmp->Opcode();
2362 if (opc != Op_CmpP && opc != Op_CmpN) return;
2364 const Type* ct = cmp->in(2)->bottom_type();
2365 if (ct == TypePtr::NULL_PTR ||
2366 (opc == Op_CmpN && ct == TypeNarrowOop::NULL_PTR)) {
2368 bool push_it = false;
2369 if( proj->Opcode() == Op_IfTrue ) {
2370 extern int all_null_checks_found;
2371 all_null_checks_found++;
2372 if( b->_test._test == BoolTest::ne ) {
2373 push_it = true;
2374 }
2375 } else {
2376 assert( proj->Opcode() == Op_IfFalse, "" );
2377 if( b->_test._test == BoolTest::eq ) {
2378 push_it = true;
2379 }
2380 }
2381 if( push_it ) {
2382 _null_check_tests.push(proj);
2383 Node* val = cmp->in(1);
2384 #ifdef _LP64
2385 if (val->bottom_type()->isa_narrowoop() &&
2386 !Matcher::narrow_oop_use_complex_address()) {
2387 //
2388 // Look for DecodeN node which should be pinned to orig_proj.
2389 // On platforms (Sparc) which can not handle 2 adds
2390 // in addressing mode we have to keep a DecodeN node and
2391 // use it to do implicit NULL check in address.
2392 //
2393 // DecodeN node was pinned to non-null path (orig_proj) during
2394 // CastPP transformation in final_graph_reshaping_impl().
2395 //
2396 uint cnt = orig_proj->outcnt();
2397 for (uint i = 0; i < orig_proj->outcnt(); i++) {
2398 Node* d = orig_proj->raw_out(i);
2399 if (d->is_DecodeN() && d->in(1) == val) {
2400 val = d;
2401 val->set_req(0, NULL); // Unpin now.
2402 // Mark this as special case to distinguish from
2403 // a regular case: CmpP(DecodeN, NULL).
2404 val = (Node*)(((intptr_t)val) | 1);
2405 break;
2406 }
2407 }
2408 }
2409 #endif
2410 _null_check_tests.push(val);
2411 }
2412 }
2413 }
2414 }
2416 //---------------------------validate_null_checks------------------------------
2417 // Its possible that the value being NULL checked is not the root of a match
2418 // tree. If so, I cannot use the value in an implicit null check.
2419 void Matcher::validate_null_checks( ) {
2420 uint cnt = _null_check_tests.size();
2421 for( uint i=0; i < cnt; i+=2 ) {
2422 Node *test = _null_check_tests[i];
2423 Node *val = _null_check_tests[i+1];
2424 bool is_decoden = ((intptr_t)val) & 1;
2425 val = (Node*)(((intptr_t)val) & ~1);
2426 if (has_new_node(val)) {
2427 Node* new_val = new_node(val);
2428 if (is_decoden) {
2429 assert(val->is_DecodeNarrowPtr() && val->in(0) == NULL, "sanity");
2430 // Note: new_val may have a control edge if
2431 // the original ideal node DecodeN was matched before
2432 // it was unpinned in Matcher::collect_null_checks().
2433 // Unpin the mach node and mark it.
2434 new_val->set_req(0, NULL);
2435 new_val = (Node*)(((intptr_t)new_val) | 1);
2436 }
2437 // Is a match-tree root, so replace with the matched value
2438 _null_check_tests.map(i+1, new_val);
2439 } else {
2440 // Yank from candidate list
2441 _null_check_tests.map(i+1,_null_check_tests[--cnt]);
2442 _null_check_tests.map(i,_null_check_tests[--cnt]);
2443 _null_check_tests.pop();
2444 _null_check_tests.pop();
2445 i-=2;
2446 }
2447 }
2448 }
2450 // Used by the DFA in dfa_xxx.cpp. Check for a following barrier or
2451 // atomic instruction acting as a store_load barrier without any
2452 // intervening volatile load, and thus we don't need a barrier here.
2453 // We retain the Node to act as a compiler ordering barrier.
2454 bool Matcher::post_store_load_barrier(const Node* vmb) {
2455 Compile* C = Compile::current();
2456 assert(vmb->is_MemBar(), "");
2457 assert(vmb->Opcode() != Op_MemBarAcquire && vmb->Opcode() != Op_LoadFence, "");
2458 const MemBarNode* membar = vmb->as_MemBar();
2460 // Get the Ideal Proj node, ctrl, that can be used to iterate forward
2461 Node* ctrl = NULL;
2462 for (DUIterator_Fast imax, i = membar->fast_outs(imax); i < imax; i++) {
2463 Node* p = membar->fast_out(i);
2464 assert(p->is_Proj(), "only projections here");
2465 if ((p->as_Proj()->_con == TypeFunc::Control) &&
2466 !C->node_arena()->contains(p)) { // Unmatched old-space only
2467 ctrl = p;
2468 break;
2469 }
2470 }
2471 assert((ctrl != NULL), "missing control projection");
2473 for (DUIterator_Fast jmax, j = ctrl->fast_outs(jmax); j < jmax; j++) {
2474 Node *x = ctrl->fast_out(j);
2475 int xop = x->Opcode();
2477 // We don't need current barrier if we see another or a lock
2478 // before seeing volatile load.
2479 //
2480 // Op_Fastunlock previously appeared in the Op_* list below.
2481 // With the advent of 1-0 lock operations we're no longer guaranteed
2482 // that a monitor exit operation contains a serializing instruction.
2484 if (xop == Op_MemBarVolatile ||
2485 xop == Op_CompareAndSwapL ||
2486 xop == Op_CompareAndSwapP ||
2487 xop == Op_CompareAndSwapN ||
2488 xop == Op_CompareAndSwapI) {
2489 return true;
2490 }
2492 // Op_FastLock previously appeared in the Op_* list above.
2493 // With biased locking we're no longer guaranteed that a monitor
2494 // enter operation contains a serializing instruction.
2495 if ((xop == Op_FastLock) && !UseBiasedLocking) {
2496 return true;
2497 }
2499 if (x->is_MemBar()) {
2500 // We must retain this membar if there is an upcoming volatile
2501 // load, which will be followed by acquire membar.
2502 if (xop == Op_MemBarAcquire || xop == Op_LoadFence) {
2503 return false;
2504 } else {
2505 // For other kinds of barriers, check by pretending we
2506 // are them, and seeing if we can be removed.
2507 return post_store_load_barrier(x->as_MemBar());
2508 }
2509 }
2511 // probably not necessary to check for these
2512 if (x->is_Call() || x->is_SafePoint() || x->is_block_proj()) {
2513 return false;
2514 }
2515 }
2516 return false;
2517 }
2519 // Check whether node n is a branch to an uncommon trap that we could
2520 // optimize as test with very high branch costs in case of going to
2521 // the uncommon trap. The code must be able to be recompiled to use
2522 // a cheaper test.
2523 bool Matcher::branches_to_uncommon_trap(const Node *n) {
2524 // Don't do it for natives, adapters, or runtime stubs
2525 Compile *C = Compile::current();
2526 if (!C->is_method_compilation()) return false;
2528 assert(n->is_If(), "You should only call this on if nodes.");
2529 IfNode *ifn = n->as_If();
2531 Node *ifFalse = NULL;
2532 for (DUIterator_Fast imax, i = ifn->fast_outs(imax); i < imax; i++) {
2533 if (ifn->fast_out(i)->is_IfFalse()) {
2534 ifFalse = ifn->fast_out(i);
2535 break;
2536 }
2537 }
2538 assert(ifFalse, "An If should have an ifFalse. Graph is broken.");
2540 Node *reg = ifFalse;
2541 int cnt = 4; // We must protect against cycles. Limit to 4 iterations.
2542 // Alternatively use visited set? Seems too expensive.
2543 while (reg != NULL && cnt > 0) {
2544 CallNode *call = NULL;
2545 RegionNode *nxt_reg = NULL;
2546 for (DUIterator_Fast imax, i = reg->fast_outs(imax); i < imax; i++) {
2547 Node *o = reg->fast_out(i);
2548 if (o->is_Call()) {
2549 call = o->as_Call();
2550 }
2551 if (o->is_Region()) {
2552 nxt_reg = o->as_Region();
2553 }
2554 }
2556 if (call &&
2557 call->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) {
2558 const Type* trtype = call->in(TypeFunc::Parms)->bottom_type();
2559 if (trtype->isa_int() && trtype->is_int()->is_con()) {
2560 jint tr_con = trtype->is_int()->get_con();
2561 Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con);
2562 Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con);
2563 assert((int)reason < (int)BitsPerInt, "recode bit map");
2565 if (is_set_nth_bit(C->allowed_deopt_reasons(), (int)reason)
2566 && action != Deoptimization::Action_none) {
2567 // This uncommon trap is sure to recompile, eventually.
2568 // When that happens, C->too_many_traps will prevent
2569 // this transformation from happening again.
2570 return true;
2571 }
2572 }
2573 }
2575 reg = nxt_reg;
2576 cnt--;
2577 }
2579 return false;
2580 }
2582 //=============================================================================
2583 //---------------------------State---------------------------------------------
2584 State::State(void) {
2585 #ifdef ASSERT
2586 _id = 0;
2587 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2588 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2589 //memset(_cost, -1, sizeof(_cost));
2590 //memset(_rule, -1, sizeof(_rule));
2591 #endif
2592 memset(_valid, 0, sizeof(_valid));
2593 }
2595 #ifdef ASSERT
2596 State::~State() {
2597 _id = 99;
2598 _kids[0] = _kids[1] = (State*)(intptr_t) CONST64(0xcafebabecafebabe);
2599 _leaf = (Node*)(intptr_t) CONST64(0xbaadf00dbaadf00d);
2600 memset(_cost, -3, sizeof(_cost));
2601 memset(_rule, -3, sizeof(_rule));
2602 }
2603 #endif
2605 #ifndef PRODUCT
2606 //---------------------------dump----------------------------------------------
2607 void State::dump() {
2608 tty->print("\n");
2609 dump(0);
2610 }
2612 void State::dump(int depth) {
2613 for( int j = 0; j < depth; j++ )
2614 tty->print(" ");
2615 tty->print("--N: ");
2616 _leaf->dump();
2617 uint i;
2618 for( i = 0; i < _LAST_MACH_OPER; i++ )
2619 // Check for valid entry
2620 if( valid(i) ) {
2621 for( int j = 0; j < depth; j++ )
2622 tty->print(" ");
2623 assert(_cost[i] != max_juint, "cost must be a valid value");
2624 assert(_rule[i] < _last_Mach_Node, "rule[i] must be valid rule");
2625 tty->print_cr("%s %d %s",
2626 ruleName[i], _cost[i], ruleName[_rule[i]] );
2627 }
2628 tty->cr();
2630 for( i=0; i<2; i++ )
2631 if( _kids[i] )
2632 _kids[i]->dump(depth+1);
2633 }
2634 #endif