Fri, 27 Feb 2009 13:27:09 -0800
6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never
1 /*
2 * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_output.cpp.incl"
28 extern uint size_java_to_interp();
29 extern uint reloc_java_to_interp();
30 extern uint size_exception_handler();
31 extern uint size_deopt_handler();
33 #ifndef PRODUCT
34 #define DEBUG_ARG(x) , x
35 #else
36 #define DEBUG_ARG(x)
37 #endif
39 extern int emit_exception_handler(CodeBuffer &cbuf);
40 extern int emit_deopt_handler(CodeBuffer &cbuf);
42 //------------------------------Output-----------------------------------------
43 // Convert Nodes to instruction bits and pass off to the VM
44 void Compile::Output() {
45 // RootNode goes
46 assert( _cfg->_broot->_nodes.size() == 0, "" );
48 // Initialize the space for the BufferBlob used to find and verify
49 // instruction size in MachNode::emit_size()
50 init_scratch_buffer_blob();
51 if (failing()) return; // Out of memory
53 // Make sure I can find the Start Node
54 Block_Array& bbs = _cfg->_bbs;
55 Block *entry = _cfg->_blocks[1];
56 Block *broot = _cfg->_broot;
58 const StartNode *start = entry->_nodes[0]->as_Start();
60 // Replace StartNode with prolog
61 MachPrologNode *prolog = new (this) MachPrologNode();
62 entry->_nodes.map( 0, prolog );
63 bbs.map( prolog->_idx, entry );
64 bbs.map( start->_idx, NULL ); // start is no longer in any block
66 // Virtual methods need an unverified entry point
68 if( is_osr_compilation() ) {
69 if( PoisonOSREntry ) {
70 // TODO: Should use a ShouldNotReachHereNode...
71 _cfg->insert( broot, 0, new (this) MachBreakpointNode() );
72 }
73 } else {
74 if( _method && !_method->flags().is_static() ) {
75 // Insert unvalidated entry point
76 _cfg->insert( broot, 0, new (this) MachUEPNode() );
77 }
79 }
82 // Break before main entry point
83 if( (_method && _method->break_at_execute())
84 #ifndef PRODUCT
85 ||(OptoBreakpoint && is_method_compilation())
86 ||(OptoBreakpointOSR && is_osr_compilation())
87 ||(OptoBreakpointC2R && !_method)
88 #endif
89 ) {
90 // checking for _method means that OptoBreakpoint does not apply to
91 // runtime stubs or frame converters
92 _cfg->insert( entry, 1, new (this) MachBreakpointNode() );
93 }
95 // Insert epilogs before every return
96 for( uint i=0; i<_cfg->_num_blocks; i++ ) {
97 Block *b = _cfg->_blocks[i];
98 if( !b->is_connector() && b->non_connector_successor(0) == _cfg->_broot ) { // Found a program exit point?
99 Node *m = b->end();
100 if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
101 MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
102 b->add_inst( epilog );
103 bbs.map(epilog->_idx, b);
104 //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
105 }
106 }
107 }
109 # ifdef ENABLE_ZAP_DEAD_LOCALS
110 if ( ZapDeadCompiledLocals ) Insert_zap_nodes();
111 # endif
113 ScheduleAndBundle();
115 #ifndef PRODUCT
116 if (trace_opto_output()) {
117 tty->print("\n---- After ScheduleAndBundle ----\n");
118 for (uint i = 0; i < _cfg->_num_blocks; i++) {
119 tty->print("\nBB#%03d:\n", i);
120 Block *bb = _cfg->_blocks[i];
121 for (uint j = 0; j < bb->_nodes.size(); j++) {
122 Node *n = bb->_nodes[j];
123 OptoReg::Name reg = _regalloc->get_reg_first(n);
124 tty->print(" %-6s ", reg >= 0 && reg < REG_COUNT ? Matcher::regName[reg] : "");
125 n->dump();
126 }
127 }
128 }
129 #endif
131 if (failing()) return;
133 BuildOopMaps();
135 if (failing()) return;
137 Fill_buffer();
138 }
140 bool Compile::need_stack_bang(int frame_size_in_bytes) const {
141 // Determine if we need to generate a stack overflow check.
142 // Do it if the method is not a stub function and
143 // has java calls or has frame size > vm_page_size/8.
144 return (stub_function() == NULL &&
145 (has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3));
146 }
148 bool Compile::need_register_stack_bang() const {
149 // Determine if we need to generate a register stack overflow check.
150 // This is only used on architectures which have split register
151 // and memory stacks (ie. IA64).
152 // Bang if the method is not a stub function and has java calls
153 return (stub_function() == NULL && has_java_calls());
154 }
156 # ifdef ENABLE_ZAP_DEAD_LOCALS
159 // In order to catch compiler oop-map bugs, we have implemented
160 // a debugging mode called ZapDeadCompilerLocals.
161 // This mode causes the compiler to insert a call to a runtime routine,
162 // "zap_dead_locals", right before each place in compiled code
163 // that could potentially be a gc-point (i.e., a safepoint or oop map point).
164 // The runtime routine checks that locations mapped as oops are really
165 // oops, that locations mapped as values do not look like oops,
166 // and that locations mapped as dead are not used later
167 // (by zapping them to an invalid address).
169 int Compile::_CompiledZap_count = 0;
171 void Compile::Insert_zap_nodes() {
172 bool skip = false;
175 // Dink with static counts because code code without the extra
176 // runtime calls is MUCH faster for debugging purposes
178 if ( CompileZapFirst == 0 ) ; // nothing special
179 else if ( CompileZapFirst > CompiledZap_count() ) skip = true;
180 else if ( CompileZapFirst == CompiledZap_count() )
181 warning("starting zap compilation after skipping");
183 if ( CompileZapLast == -1 ) ; // nothing special
184 else if ( CompileZapLast < CompiledZap_count() ) skip = true;
185 else if ( CompileZapLast == CompiledZap_count() )
186 warning("about to compile last zap");
188 ++_CompiledZap_count; // counts skipped zaps, too
190 if ( skip ) return;
193 if ( _method == NULL )
194 return; // no safepoints/oopmaps emitted for calls in stubs,so we don't care
196 // Insert call to zap runtime stub before every node with an oop map
197 for( uint i=0; i<_cfg->_num_blocks; i++ ) {
198 Block *b = _cfg->_blocks[i];
199 for ( uint j = 0; j < b->_nodes.size(); ++j ) {
200 Node *n = b->_nodes[j];
202 // Determining if we should insert a zap-a-lot node in output.
203 // We do that for all nodes that has oopmap info, except for calls
204 // to allocation. Calls to allocation passes in the old top-of-eden pointer
205 // and expect the C code to reset it. Hence, there can be no safepoints between
206 // the inlined-allocation and the call to new_Java, etc.
207 // We also cannot zap monitor calls, as they must hold the microlock
208 // during the call to Zap, which also wants to grab the microlock.
209 bool insert = n->is_MachSafePoint() && (n->as_MachSafePoint()->oop_map() != NULL);
210 if ( insert ) { // it is MachSafePoint
211 if ( !n->is_MachCall() ) {
212 insert = false;
213 } else if ( n->is_MachCall() ) {
214 MachCallNode* call = n->as_MachCall();
215 if (call->entry_point() == OptoRuntime::new_instance_Java() ||
216 call->entry_point() == OptoRuntime::new_array_Java() ||
217 call->entry_point() == OptoRuntime::multianewarray2_Java() ||
218 call->entry_point() == OptoRuntime::multianewarray3_Java() ||
219 call->entry_point() == OptoRuntime::multianewarray4_Java() ||
220 call->entry_point() == OptoRuntime::multianewarray5_Java() ||
221 call->entry_point() == OptoRuntime::slow_arraycopy_Java() ||
222 call->entry_point() == OptoRuntime::complete_monitor_locking_Java()
223 ) {
224 insert = false;
225 }
226 }
227 if (insert) {
228 Node *zap = call_zap_node(n->as_MachSafePoint(), i);
229 b->_nodes.insert( j, zap );
230 _cfg->_bbs.map( zap->_idx, b );
231 ++j;
232 }
233 }
234 }
235 }
236 }
239 Node* Compile::call_zap_node(MachSafePointNode* node_to_check, int block_no) {
240 const TypeFunc *tf = OptoRuntime::zap_dead_locals_Type();
241 CallStaticJavaNode* ideal_node =
242 new (this, tf->domain()->cnt()) CallStaticJavaNode( tf,
243 OptoRuntime::zap_dead_locals_stub(_method->flags().is_native()),
244 "call zap dead locals stub", 0, TypePtr::BOTTOM);
245 // We need to copy the OopMap from the site we're zapping at.
246 // We have to make a copy, because the zap site might not be
247 // a call site, and zap_dead is a call site.
248 OopMap* clone = node_to_check->oop_map()->deep_copy();
250 // Add the cloned OopMap to the zap node
251 ideal_node->set_oop_map(clone);
252 return _matcher->match_sfpt(ideal_node);
253 }
255 //------------------------------is_node_getting_a_safepoint--------------------
256 bool Compile::is_node_getting_a_safepoint( Node* n) {
257 // This code duplicates the logic prior to the call of add_safepoint
258 // below in this file.
259 if( n->is_MachSafePoint() ) return true;
260 return false;
261 }
263 # endif // ENABLE_ZAP_DEAD_LOCALS
265 //------------------------------compute_loop_first_inst_sizes------------------
266 // Compute the size of first NumberOfLoopInstrToAlign instructions at the top
267 // of a loop. When aligning a loop we need to provide enough instructions
268 // in cpu's fetch buffer to feed decoders. The loop alignment could be
269 // avoided if we have enough instructions in fetch buffer at the head of a loop.
270 // By default, the size is set to 999999 by Block's constructor so that
271 // a loop will be aligned if the size is not reset here.
272 //
273 // Note: Mach instructions could contain several HW instructions
274 // so the size is estimated only.
275 //
276 void Compile::compute_loop_first_inst_sizes() {
277 // The next condition is used to gate the loop alignment optimization.
278 // Don't aligned a loop if there are enough instructions at the head of a loop
279 // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad
280 // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is
281 // equal to 11 bytes which is the largest address NOP instruction.
282 if( MaxLoopPad < OptoLoopAlignment-1 ) {
283 uint last_block = _cfg->_num_blocks-1;
284 for( uint i=1; i <= last_block; i++ ) {
285 Block *b = _cfg->_blocks[i];
286 // Check the first loop's block which requires an alignment.
287 if( b->loop_alignment() > (uint)relocInfo::addr_unit() ) {
288 uint sum_size = 0;
289 uint inst_cnt = NumberOfLoopInstrToAlign;
290 inst_cnt = b->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
292 // Check subsequent fallthrough blocks if the loop's first
293 // block(s) does not have enough instructions.
294 Block *nb = b;
295 while( inst_cnt > 0 &&
296 i < last_block &&
297 !_cfg->_blocks[i+1]->has_loop_alignment() &&
298 !nb->has_successor(b) ) {
299 i++;
300 nb = _cfg->_blocks[i];
301 inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, _regalloc);
302 } // while( inst_cnt > 0 && i < last_block )
304 b->set_first_inst_size(sum_size);
305 } // f( b->head()->is_Loop() )
306 } // for( i <= last_block )
307 } // if( MaxLoopPad < OptoLoopAlignment-1 )
308 }
310 //----------------------Shorten_branches---------------------------------------
311 // The architecture description provides short branch variants for some long
312 // branch instructions. Replace eligible long branches with short branches.
313 void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size) {
315 // fill in the nop array for bundling computations
316 MachNode *_nop_list[Bundle::_nop_count];
317 Bundle::initialize_nops(_nop_list, this);
319 // ------------------
320 // Compute size of each block, method size, and relocation information size
321 uint *jmp_end = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks);
322 uint *blk_starts = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks+1);
323 DEBUG_ONLY( uint *jmp_target = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); )
324 DEBUG_ONLY( uint *jmp_rule = NEW_RESOURCE_ARRAY(uint,_cfg->_num_blocks); )
325 blk_starts[0] = 0;
327 // Initialize the sizes to 0
328 code_size = 0; // Size in bytes of generated code
329 stub_size = 0; // Size in bytes of all stub entries
330 // Size in bytes of all relocation entries, including those in local stubs.
331 // Start with 2-bytes of reloc info for the unvalidated entry point
332 reloc_size = 1; // Number of relocation entries
333 const_size = 0; // size of fp constants in words
335 // Make three passes. The first computes pessimistic blk_starts,
336 // relative jmp_end, reloc_size and const_size information.
337 // The second performs short branch substitution using the pessimistic
338 // sizing. The third inserts nops where needed.
340 Node *nj; // tmp
342 // Step one, perform a pessimistic sizing pass.
343 uint i;
344 uint min_offset_from_last_call = 1; // init to a positive value
345 uint nop_size = (new (this) MachNopNode())->size(_regalloc);
346 for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
347 Block *b = _cfg->_blocks[i];
349 // Sum all instruction sizes to compute block size
350 uint last_inst = b->_nodes.size();
351 uint blk_size = 0;
352 for( uint j = 0; j<last_inst; j++ ) {
353 nj = b->_nodes[j];
354 uint inst_size = nj->size(_regalloc);
355 blk_size += inst_size;
356 // Handle machine instruction nodes
357 if( nj->is_Mach() ) {
358 MachNode *mach = nj->as_Mach();
359 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
360 reloc_size += mach->reloc();
361 const_size += mach->const_size();
362 if( mach->is_MachCall() ) {
363 MachCallNode *mcall = mach->as_MachCall();
364 // This destination address is NOT PC-relative
366 mcall->method_set((intptr_t)mcall->entry_point());
368 if( mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method ) {
369 stub_size += size_java_to_interp();
370 reloc_size += reloc_java_to_interp();
371 }
372 } else if (mach->is_MachSafePoint()) {
373 // If call/safepoint are adjacent, account for possible
374 // nop to disambiguate the two safepoints.
375 if (min_offset_from_last_call == 0) {
376 blk_size += nop_size;
377 }
378 }
379 }
380 min_offset_from_last_call += inst_size;
381 // Remember end of call offset
382 if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
383 min_offset_from_last_call = 0;
384 }
385 }
387 // During short branch replacement, we store the relative (to blk_starts)
388 // end of jump in jmp_end, rather than the absolute end of jump. This
389 // is so that we do not need to recompute sizes of all nodes when we compute
390 // correct blk_starts in our next sizing pass.
391 jmp_end[i] = blk_size;
392 DEBUG_ONLY( jmp_target[i] = 0; )
394 // When the next block starts a loop, we may insert pad NOP
395 // instructions. Since we cannot know our future alignment,
396 // assume the worst.
397 if( i<_cfg->_num_blocks-1 ) {
398 Block *nb = _cfg->_blocks[i+1];
399 int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit();
400 if( max_loop_pad > 0 ) {
401 assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), "");
402 blk_size += max_loop_pad;
403 }
404 }
406 // Save block size; update total method size
407 blk_starts[i+1] = blk_starts[i]+blk_size;
408 }
410 // Step two, replace eligible long jumps.
412 // Note: this will only get the long branches within short branch
413 // range. Another pass might detect more branches that became
414 // candidates because the shortening in the first pass exposed
415 // more opportunities. Unfortunately, this would require
416 // recomputing the starting and ending positions for the blocks
417 for( i=0; i<_cfg->_num_blocks; i++ ) {
418 Block *b = _cfg->_blocks[i];
420 int j;
421 // Find the branch; ignore trailing NOPs.
422 for( j = b->_nodes.size()-1; j>=0; j-- ) {
423 nj = b->_nodes[j];
424 if( !nj->is_Mach() || nj->as_Mach()->ideal_Opcode() != Op_Con )
425 break;
426 }
428 if (j >= 0) {
429 if( nj->is_Mach() && nj->as_Mach()->may_be_short_branch() ) {
430 MachNode *mach = nj->as_Mach();
431 // This requires the TRUE branch target be in succs[0]
432 uint bnum = b->non_connector_successor(0)->_pre_order;
433 uintptr_t target = blk_starts[bnum];
434 if( mach->is_pc_relative() ) {
435 int offset = target-(blk_starts[i] + jmp_end[i]);
436 if (_matcher->is_short_branch_offset(mach->rule(), offset)) {
437 // We've got a winner. Replace this branch.
438 MachNode* replacement = mach->short_branch_version(this);
439 b->_nodes.map(j, replacement);
440 mach->subsume_by(replacement);
442 // Update the jmp_end size to save time in our
443 // next pass.
444 jmp_end[i] -= (mach->size(_regalloc) - replacement->size(_regalloc));
445 DEBUG_ONLY( jmp_target[i] = bnum; );
446 DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
447 }
448 } else {
449 #ifndef PRODUCT
450 mach->dump(3);
451 #endif
452 Unimplemented();
453 }
454 }
455 }
456 }
458 // Compute the size of first NumberOfLoopInstrToAlign instructions at head
459 // of a loop. It is used to determine the padding for loop alignment.
460 compute_loop_first_inst_sizes();
462 // Step 3, compute the offsets of all the labels
463 uint last_call_adr = max_uint;
464 for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
465 // copy the offset of the beginning to the corresponding label
466 assert(labels[i].is_unused(), "cannot patch at this point");
467 labels[i].bind_loc(blk_starts[i], CodeBuffer::SECT_INSTS);
469 // insert padding for any instructions that need it
470 Block *b = _cfg->_blocks[i];
471 uint last_inst = b->_nodes.size();
472 uint adr = blk_starts[i];
473 for( uint j = 0; j<last_inst; j++ ) {
474 nj = b->_nodes[j];
475 if( nj->is_Mach() ) {
476 int padding = nj->as_Mach()->compute_padding(adr);
477 // If call/safepoint are adjacent insert a nop (5010568)
478 if (padding == 0 && nj->is_MachSafePoint() && !nj->is_MachCall() &&
479 adr == last_call_adr ) {
480 padding = nop_size;
481 }
482 if(padding > 0) {
483 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
484 int nops_cnt = padding / nop_size;
485 MachNode *nop = new (this) MachNopNode(nops_cnt);
486 b->_nodes.insert(j++, nop);
487 _cfg->_bbs.map( nop->_idx, b );
488 adr += padding;
489 last_inst++;
490 }
491 }
492 adr += nj->size(_regalloc);
494 // Remember end of call offset
495 if (nj->is_MachCall() && nj->as_MachCall()->is_safepoint_node()) {
496 last_call_adr = adr;
497 }
498 }
500 if ( i != _cfg->_num_blocks-1) {
501 // Get the size of the block
502 uint blk_size = adr - blk_starts[i];
504 // When the next block is the top of a loop, we may insert pad NOP
505 // instructions.
506 Block *nb = _cfg->_blocks[i+1];
507 int current_offset = blk_starts[i] + blk_size;
508 current_offset += nb->alignment_padding(current_offset);
509 // Save block size; update total method size
510 blk_starts[i+1] = current_offset;
511 }
512 }
514 #ifdef ASSERT
515 for( i=0; i<_cfg->_num_blocks; i++ ) { // For all blocks
516 if( jmp_target[i] != 0 ) {
517 int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_end[i]);
518 if (!_matcher->is_short_branch_offset(jmp_rule[i], offset)) {
519 tty->print_cr("target (%d) - jmp_end(%d) = offset (%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_end[i], offset, i, jmp_target[i]);
520 }
521 assert(_matcher->is_short_branch_offset(jmp_rule[i], offset), "Displacement too large for short jmp");
522 }
523 }
524 #endif
526 // ------------------
527 // Compute size for code buffer
528 code_size = blk_starts[i-1] + jmp_end[i-1];
530 // Relocation records
531 reloc_size += 1; // Relo entry for exception handler
533 // Adjust reloc_size to number of record of relocation info
534 // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for
535 // a relocation index.
536 // The CodeBuffer will expand the locs array if this estimate is too low.
537 reloc_size *= 10 / sizeof(relocInfo);
539 // Adjust const_size to number of bytes
540 const_size *= 2*jintSize; // both float and double take two words per entry
542 }
544 //------------------------------FillLocArray-----------------------------------
545 // Create a bit of debug info and append it to the array. The mapping is from
546 // Java local or expression stack to constant, register or stack-slot. For
547 // doubles, insert 2 mappings and return 1 (to tell the caller that the next
548 // entry has been taken care of and caller should skip it).
549 static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) {
550 // This should never have accepted Bad before
551 assert(OptoReg::is_valid(regnum), "location must be valid");
552 return (OptoReg::is_reg(regnum))
553 ? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
554 : new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum)));
555 }
558 ObjectValue*
559 Compile::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) {
560 for (int i = 0; i < objs->length(); i++) {
561 assert(objs->at(i)->is_object(), "corrupt object cache");
562 ObjectValue* sv = (ObjectValue*) objs->at(i);
563 if (sv->id() == id) {
564 return sv;
565 }
566 }
567 // Otherwise..
568 return NULL;
569 }
571 void Compile::set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
572 ObjectValue* sv ) {
573 assert(sv_for_node_id(objs, sv->id()) == NULL, "Precondition");
574 objs->append(sv);
575 }
578 void Compile::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
579 GrowableArray<ScopeValue*> *array,
580 GrowableArray<ScopeValue*> *objs ) {
581 assert( local, "use _top instead of null" );
582 if (array->length() != idx) {
583 assert(array->length() == idx + 1, "Unexpected array count");
584 // Old functionality:
585 // return
586 // New functionality:
587 // Assert if the local is not top. In product mode let the new node
588 // override the old entry.
589 assert(local == top(), "LocArray collision");
590 if (local == top()) {
591 return;
592 }
593 array->pop();
594 }
595 const Type *t = local->bottom_type();
597 // Is it a safepoint scalar object node?
598 if (local->is_SafePointScalarObject()) {
599 SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
601 ObjectValue* sv = Compile::sv_for_node_id(objs, spobj->_idx);
602 if (sv == NULL) {
603 ciKlass* cik = t->is_oopptr()->klass();
604 assert(cik->is_instance_klass() ||
605 cik->is_array_klass(), "Not supported allocation.");
606 sv = new ObjectValue(spobj->_idx,
607 new ConstantOopWriteValue(cik->encoding()));
608 Compile::set_sv_for_object_node(objs, sv);
610 uint first_ind = spobj->first_index();
611 for (uint i = 0; i < spobj->n_fields(); i++) {
612 Node* fld_node = sfpt->in(first_ind+i);
613 (void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
614 }
615 }
616 array->append(sv);
617 return;
618 }
620 // Grab the register number for the local
621 OptoReg::Name regnum = _regalloc->get_reg_first(local);
622 if( OptoReg::is_valid(regnum) ) {// Got a register/stack?
623 // Record the double as two float registers.
624 // The register mask for such a value always specifies two adjacent
625 // float registers, with the lower register number even.
626 // Normally, the allocation of high and low words to these registers
627 // is irrelevant, because nearly all operations on register pairs
628 // (e.g., StoreD) treat them as a single unit.
629 // Here, we assume in addition that the words in these two registers
630 // stored "naturally" (by operations like StoreD and double stores
631 // within the interpreter) such that the lower-numbered register
632 // is written to the lower memory address. This may seem like
633 // a machine dependency, but it is not--it is a requirement on
634 // the author of the <arch>.ad file to ensure that, for every
635 // even/odd double-register pair to which a double may be allocated,
636 // the word in the even single-register is stored to the first
637 // memory word. (Note that register numbers are completely
638 // arbitrary, and are not tied to any machine-level encodings.)
639 #ifdef _LP64
640 if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
641 array->append(new ConstantIntValue(0));
642 array->append(new_loc_value( _regalloc, regnum, Location::dbl ));
643 } else if ( t->base() == Type::Long ) {
644 array->append(new ConstantIntValue(0));
645 array->append(new_loc_value( _regalloc, regnum, Location::lng ));
646 } else if ( t->base() == Type::RawPtr ) {
647 // jsr/ret return address which must be restored into a the full
648 // width 64-bit stack slot.
649 array->append(new_loc_value( _regalloc, regnum, Location::lng ));
650 }
651 #else //_LP64
652 #ifdef SPARC
653 if (t->base() == Type::Long && OptoReg::is_reg(regnum)) {
654 // For SPARC we have to swap high and low words for
655 // long values stored in a single-register (g0-g7).
656 array->append(new_loc_value( _regalloc, regnum , Location::normal ));
657 array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
658 } else
659 #endif //SPARC
660 if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) {
661 // Repack the double/long as two jints.
662 // The convention the interpreter uses is that the second local
663 // holds the first raw word of the native double representation.
664 // This is actually reasonable, since locals and stack arrays
665 // grow downwards in all implementations.
666 // (If, on some machine, the interpreter's Java locals or stack
667 // were to grow upwards, the embedded doubles would be word-swapped.)
668 array->append(new_loc_value( _regalloc, OptoReg::add(regnum,1), Location::normal ));
669 array->append(new_loc_value( _regalloc, regnum , Location::normal ));
670 }
671 #endif //_LP64
672 else if( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
673 OptoReg::is_reg(regnum) ) {
674 array->append(new_loc_value( _regalloc, regnum, Matcher::float_in_double
675 ? Location::float_in_dbl : Location::normal ));
676 } else if( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
677 array->append(new_loc_value( _regalloc, regnum, Matcher::int_in_long
678 ? Location::int_in_long : Location::normal ));
679 } else if( t->base() == Type::NarrowOop ) {
680 array->append(new_loc_value( _regalloc, regnum, Location::narrowoop ));
681 } else {
682 array->append(new_loc_value( _regalloc, regnum, _regalloc->is_oop(local) ? Location::oop : Location::normal ));
683 }
684 return;
685 }
687 // No register. It must be constant data.
688 switch (t->base()) {
689 case Type::Half: // Second half of a double
690 ShouldNotReachHere(); // Caller should skip 2nd halves
691 break;
692 case Type::AnyPtr:
693 array->append(new ConstantOopWriteValue(NULL));
694 break;
695 case Type::AryPtr:
696 case Type::InstPtr:
697 case Type::KlassPtr: // fall through
698 array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->encoding()));
699 break;
700 case Type::NarrowOop:
701 if (t == TypeNarrowOop::NULL_PTR) {
702 array->append(new ConstantOopWriteValue(NULL));
703 } else {
704 array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->encoding()));
705 }
706 break;
707 case Type::Int:
708 array->append(new ConstantIntValue(t->is_int()->get_con()));
709 break;
710 case Type::RawPtr:
711 // A return address (T_ADDRESS).
712 assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI");
713 #ifdef _LP64
714 // Must be restored to the full-width 64-bit stack slot.
715 array->append(new ConstantLongValue(t->is_ptr()->get_con()));
716 #else
717 array->append(new ConstantIntValue(t->is_ptr()->get_con()));
718 #endif
719 break;
720 case Type::FloatCon: {
721 float f = t->is_float_constant()->getf();
722 array->append(new ConstantIntValue(jint_cast(f)));
723 break;
724 }
725 case Type::DoubleCon: {
726 jdouble d = t->is_double_constant()->getd();
727 #ifdef _LP64
728 array->append(new ConstantIntValue(0));
729 array->append(new ConstantDoubleValue(d));
730 #else
731 // Repack the double as two jints.
732 // The convention the interpreter uses is that the second local
733 // holds the first raw word of the native double representation.
734 // This is actually reasonable, since locals and stack arrays
735 // grow downwards in all implementations.
736 // (If, on some machine, the interpreter's Java locals or stack
737 // were to grow upwards, the embedded doubles would be word-swapped.)
738 jint *dp = (jint*)&d;
739 array->append(new ConstantIntValue(dp[1]));
740 array->append(new ConstantIntValue(dp[0]));
741 #endif
742 break;
743 }
744 case Type::Long: {
745 jlong d = t->is_long()->get_con();
746 #ifdef _LP64
747 array->append(new ConstantIntValue(0));
748 array->append(new ConstantLongValue(d));
749 #else
750 // Repack the long as two jints.
751 // The convention the interpreter uses is that the second local
752 // holds the first raw word of the native double representation.
753 // This is actually reasonable, since locals and stack arrays
754 // grow downwards in all implementations.
755 // (If, on some machine, the interpreter's Java locals or stack
756 // were to grow upwards, the embedded doubles would be word-swapped.)
757 jint *dp = (jint*)&d;
758 array->append(new ConstantIntValue(dp[1]));
759 array->append(new ConstantIntValue(dp[0]));
760 #endif
761 break;
762 }
763 case Type::Top: // Add an illegal value here
764 array->append(new LocationValue(Location()));
765 break;
766 default:
767 ShouldNotReachHere();
768 break;
769 }
770 }
772 // Determine if this node starts a bundle
773 bool Compile::starts_bundle(const Node *n) const {
774 return (_node_bundling_limit > n->_idx &&
775 _node_bundling_base[n->_idx].starts_bundle());
776 }
778 //--------------------------Process_OopMap_Node--------------------------------
779 void Compile::Process_OopMap_Node(MachNode *mach, int current_offset) {
781 // Handle special safepoint nodes for synchronization
782 MachSafePointNode *sfn = mach->as_MachSafePoint();
783 MachCallNode *mcall;
785 #ifdef ENABLE_ZAP_DEAD_LOCALS
786 assert( is_node_getting_a_safepoint(mach), "logic does not match; false negative");
787 #endif
789 int safepoint_pc_offset = current_offset;
791 // Add the safepoint in the DebugInfoRecorder
792 if( !mach->is_MachCall() ) {
793 mcall = NULL;
794 debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
795 } else {
796 mcall = mach->as_MachCall();
797 safepoint_pc_offset += mcall->ret_addr_offset();
798 debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
799 }
801 // Loop over the JVMState list to add scope information
802 // Do not skip safepoints with a NULL method, they need monitor info
803 JVMState* youngest_jvms = sfn->jvms();
804 int max_depth = youngest_jvms->depth();
806 // Allocate the object pool for scalar-replaced objects -- the map from
807 // small-integer keys (which can be recorded in the local and ostack
808 // arrays) to descriptions of the object state.
809 GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
811 // Visit scopes from oldest to youngest.
812 for (int depth = 1; depth <= max_depth; depth++) {
813 JVMState* jvms = youngest_jvms->of_depth(depth);
814 int idx;
815 ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
816 // Safepoints that do not have method() set only provide oop-map and monitor info
817 // to support GC; these do not support deoptimization.
818 int num_locs = (method == NULL) ? 0 : jvms->loc_size();
819 int num_exps = (method == NULL) ? 0 : jvms->stk_size();
820 int num_mon = jvms->nof_monitors();
821 assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(),
822 "JVMS local count must match that of the method");
824 // Add Local and Expression Stack Information
826 // Insert locals into the locarray
827 GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs);
828 for( idx = 0; idx < num_locs; idx++ ) {
829 FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
830 }
832 // Insert expression stack entries into the exparray
833 GrowableArray<ScopeValue*> *exparray = new GrowableArray<ScopeValue*>(num_exps);
834 for( idx = 0; idx < num_exps; idx++ ) {
835 FillLocArray( idx, sfn, sfn->stack(jvms, idx), exparray, objs );
836 }
838 // Add in mappings of the monitors
839 assert( !method ||
840 !method->is_synchronized() ||
841 method->is_native() ||
842 num_mon > 0 ||
843 !GenerateSynchronizationCode,
844 "monitors must always exist for synchronized methods");
846 // Build the growable array of ScopeValues for exp stack
847 GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
849 // Loop over monitors and insert into array
850 for(idx = 0; idx < num_mon; idx++) {
851 // Grab the node that defines this monitor
852 Node* box_node = sfn->monitor_box(jvms, idx);
853 Node* obj_node = sfn->monitor_obj(jvms, idx);
855 // Create ScopeValue for object
856 ScopeValue *scval = NULL;
858 if( obj_node->is_SafePointScalarObject() ) {
859 SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
860 scval = Compile::sv_for_node_id(objs, spobj->_idx);
861 if (scval == NULL) {
862 const Type *t = obj_node->bottom_type();
863 ciKlass* cik = t->is_oopptr()->klass();
864 assert(cik->is_instance_klass() ||
865 cik->is_array_klass(), "Not supported allocation.");
866 ObjectValue* sv = new ObjectValue(spobj->_idx,
867 new ConstantOopWriteValue(cik->encoding()));
868 Compile::set_sv_for_object_node(objs, sv);
870 uint first_ind = spobj->first_index();
871 for (uint i = 0; i < spobj->n_fields(); i++) {
872 Node* fld_node = sfn->in(first_ind+i);
873 (void)FillLocArray(sv->field_values()->length(), sfn, fld_node, sv->field_values(), objs);
874 }
875 scval = sv;
876 }
877 } else if( !obj_node->is_Con() ) {
878 OptoReg::Name obj_reg = _regalloc->get_reg_first(obj_node);
879 if( obj_node->bottom_type()->base() == Type::NarrowOop ) {
880 scval = new_loc_value( _regalloc, obj_reg, Location::narrowoop );
881 } else {
882 scval = new_loc_value( _regalloc, obj_reg, Location::oop );
883 }
884 } else {
885 const TypePtr *tp = obj_node->bottom_type()->make_ptr();
886 scval = new ConstantOopWriteValue(tp->is_instptr()->const_oop()->encoding());
887 }
889 OptoReg::Name box_reg = BoxLockNode::stack_slot(box_node);
890 Location basic_lock = Location::new_stk_loc(Location::normal,_regalloc->reg2offset(box_reg));
891 while( !box_node->is_BoxLock() ) box_node = box_node->in(1);
892 monarray->append(new MonitorValue(scval, basic_lock, box_node->as_BoxLock()->is_eliminated()));
893 }
895 // We dump the object pool first, since deoptimization reads it in first.
896 debug_info()->dump_object_pool(objs);
898 // Build first class objects to pass to scope
899 DebugToken *locvals = debug_info()->create_scope_values(locarray);
900 DebugToken *expvals = debug_info()->create_scope_values(exparray);
901 DebugToken *monvals = debug_info()->create_monitor_values(monarray);
903 // Make method available for all Safepoints
904 ciMethod* scope_method = method ? method : _method;
905 // Describe the scope here
906 assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
907 // Now we can describe the scope.
908 debug_info()->describe_scope(safepoint_pc_offset,scope_method,jvms->bci(),locvals,expvals,monvals);
909 } // End jvms loop
911 // Mark the end of the scope set.
912 debug_info()->end_safepoint(safepoint_pc_offset);
913 }
917 // A simplified version of Process_OopMap_Node, to handle non-safepoints.
918 class NonSafepointEmitter {
919 Compile* C;
920 JVMState* _pending_jvms;
921 int _pending_offset;
923 void emit_non_safepoint();
925 public:
926 NonSafepointEmitter(Compile* compile) {
927 this->C = compile;
928 _pending_jvms = NULL;
929 _pending_offset = 0;
930 }
932 void observe_instruction(Node* n, int pc_offset) {
933 if (!C->debug_info()->recording_non_safepoints()) return;
935 Node_Notes* nn = C->node_notes_at(n->_idx);
936 if (nn == NULL || nn->jvms() == NULL) return;
937 if (_pending_jvms != NULL &&
938 _pending_jvms->same_calls_as(nn->jvms())) {
939 // Repeated JVMS? Stretch it up here.
940 _pending_offset = pc_offset;
941 } else {
942 if (_pending_jvms != NULL &&
943 _pending_offset < pc_offset) {
944 emit_non_safepoint();
945 }
946 _pending_jvms = NULL;
947 if (pc_offset > C->debug_info()->last_pc_offset()) {
948 // This is the only way _pending_jvms can become non-NULL:
949 _pending_jvms = nn->jvms();
950 _pending_offset = pc_offset;
951 }
952 }
953 }
955 // Stay out of the way of real safepoints:
956 void observe_safepoint(JVMState* jvms, int pc_offset) {
957 if (_pending_jvms != NULL &&
958 !_pending_jvms->same_calls_as(jvms) &&
959 _pending_offset < pc_offset) {
960 emit_non_safepoint();
961 }
962 _pending_jvms = NULL;
963 }
965 void flush_at_end() {
966 if (_pending_jvms != NULL) {
967 emit_non_safepoint();
968 }
969 _pending_jvms = NULL;
970 }
971 };
973 void NonSafepointEmitter::emit_non_safepoint() {
974 JVMState* youngest_jvms = _pending_jvms;
975 int pc_offset = _pending_offset;
977 // Clear it now:
978 _pending_jvms = NULL;
980 DebugInformationRecorder* debug_info = C->debug_info();
981 assert(debug_info->recording_non_safepoints(), "sanity");
983 debug_info->add_non_safepoint(pc_offset);
984 int max_depth = youngest_jvms->depth();
986 // Visit scopes from oldest to youngest.
987 for (int depth = 1; depth <= max_depth; depth++) {
988 JVMState* jvms = youngest_jvms->of_depth(depth);
989 ciMethod* method = jvms->has_method() ? jvms->method() : NULL;
990 debug_info->describe_scope(pc_offset, method, jvms->bci());
991 }
993 // Mark the end of the scope set.
994 debug_info->end_non_safepoint(pc_offset);
995 }
999 // helper for Fill_buffer bailout logic
1000 static void turn_off_compiler(Compile* C) {
1001 if (CodeCache::unallocated_capacity() >= CodeCacheMinimumFreeSpace*10) {
1002 // Do not turn off compilation if a single giant method has
1003 // blown the code cache size.
1004 C->record_failure("excessive request to CodeCache");
1005 } else {
1006 // Let CompilerBroker disable further compilations.
1007 C->record_failure("CodeCache is full");
1008 }
1009 }
1012 //------------------------------Fill_buffer------------------------------------
1013 void Compile::Fill_buffer() {
1015 // Set the initially allocated size
1016 int code_req = initial_code_capacity;
1017 int locs_req = initial_locs_capacity;
1018 int stub_req = TraceJumps ? initial_stub_capacity * 10 : initial_stub_capacity;
1019 int const_req = initial_const_capacity;
1020 bool labels_not_set = true;
1022 int pad_req = NativeCall::instruction_size;
1023 // The extra spacing after the code is necessary on some platforms.
1024 // Sometimes we need to patch in a jump after the last instruction,
1025 // if the nmethod has been deoptimized. (See 4932387, 4894843.)
1027 uint i;
1028 // Compute the byte offset where we can store the deopt pc.
1029 if (fixed_slots() != 0) {
1030 _orig_pc_slot_offset_in_bytes = _regalloc->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
1031 }
1033 // Compute prolog code size
1034 _method_size = 0;
1035 _frame_slots = OptoReg::reg2stack(_matcher->_old_SP)+_regalloc->_framesize;
1036 #ifdef IA64
1037 if (save_argument_registers()) {
1038 // 4815101: this is a stub with implicit and unknown precision fp args.
1039 // The usual spill mechanism can only generate stfd's in this case, which
1040 // doesn't work if the fp reg to spill contains a single-precision denorm.
1041 // Instead, we hack around the normal spill mechanism using stfspill's and
1042 // ldffill's in the MachProlog and MachEpilog emit methods. We allocate
1043 // space here for the fp arg regs (f8-f15) we're going to thusly spill.
1044 //
1045 // If we ever implement 16-byte 'registers' == stack slots, we can
1046 // get rid of this hack and have SpillCopy generate stfspill/ldffill
1047 // instead of stfd/stfs/ldfd/ldfs.
1048 _frame_slots += 8*(16/BytesPerInt);
1049 }
1050 #endif
1051 assert( _frame_slots >= 0 && _frame_slots < 1000000, "sanity check" );
1053 // Create an array of unused labels, one for each basic block
1054 Label *blk_labels = NEW_RESOURCE_ARRAY(Label, _cfg->_num_blocks+1);
1056 for( i=0; i <= _cfg->_num_blocks; i++ ) {
1057 blk_labels[i].init();
1058 }
1060 // If this machine supports different size branch offsets, then pre-compute
1061 // the length of the blocks
1062 if( _matcher->is_short_branch_offset(-1, 0) ) {
1063 Shorten_branches(blk_labels, code_req, locs_req, stub_req, const_req);
1064 labels_not_set = false;
1065 }
1067 // nmethod and CodeBuffer count stubs & constants as part of method's code.
1068 int exception_handler_req = size_exception_handler();
1069 int deopt_handler_req = size_deopt_handler();
1070 exception_handler_req += MAX_stubs_size; // add marginal slop for handler
1071 deopt_handler_req += MAX_stubs_size; // add marginal slop for handler
1072 stub_req += MAX_stubs_size; // ensure per-stub margin
1073 code_req += MAX_inst_size; // ensure per-instruction margin
1074 if (StressCodeBuffers)
1075 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
1076 int total_req = code_req + pad_req + stub_req + exception_handler_req + deopt_handler_req + const_req;
1077 CodeBuffer* cb = code_buffer();
1078 cb->initialize(total_req, locs_req);
1080 // Have we run out of code space?
1081 if (cb->blob() == NULL) {
1082 turn_off_compiler(this);
1083 return;
1084 }
1085 // Configure the code buffer.
1086 cb->initialize_consts_size(const_req);
1087 cb->initialize_stubs_size(stub_req);
1088 cb->initialize_oop_recorder(env()->oop_recorder());
1090 // fill in the nop array for bundling computations
1091 MachNode *_nop_list[Bundle::_nop_count];
1092 Bundle::initialize_nops(_nop_list, this);
1094 // Create oopmap set.
1095 _oop_map_set = new OopMapSet();
1097 // !!!!! This preserves old handling of oopmaps for now
1098 debug_info()->set_oopmaps(_oop_map_set);
1100 // Count and start of implicit null check instructions
1101 uint inct_cnt = 0;
1102 uint *inct_starts = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
1104 // Count and start of calls
1105 uint *call_returns = NEW_RESOURCE_ARRAY(uint, _cfg->_num_blocks+1);
1107 uint return_offset = 0;
1108 MachNode *nop = new (this) MachNopNode();
1110 int previous_offset = 0;
1111 int current_offset = 0;
1112 int last_call_offset = -1;
1114 // Create an array of unused labels, one for each basic block, if printing is enabled
1115 #ifndef PRODUCT
1116 int *node_offsets = NULL;
1117 uint node_offset_limit = unique();
1120 if ( print_assembly() )
1121 node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
1122 #endif
1124 NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily
1126 // ------------------
1127 // Now fill in the code buffer
1128 Node *delay_slot = NULL;
1130 for( i=0; i < _cfg->_num_blocks; i++ ) {
1131 Block *b = _cfg->_blocks[i];
1133 Node *head = b->head();
1135 // If this block needs to start aligned (i.e, can be reached other
1136 // than by falling-thru from the previous block), then force the
1137 // start of a new bundle.
1138 if( Pipeline::requires_bundling() && starts_bundle(head) )
1139 cb->flush_bundle(true);
1141 // Define the label at the beginning of the basic block
1142 if( labels_not_set )
1143 MacroAssembler(cb).bind( blk_labels[b->_pre_order] );
1145 else
1146 assert( blk_labels[b->_pre_order].loc_pos() == cb->code_size(),
1147 "label position does not match code offset" );
1149 uint last_inst = b->_nodes.size();
1151 // Emit block normally, except for last instruction.
1152 // Emit means "dump code bits into code buffer".
1153 for( uint j = 0; j<last_inst; j++ ) {
1155 // Get the node
1156 Node* n = b->_nodes[j];
1158 // See if delay slots are supported
1159 if (valid_bundle_info(n) &&
1160 node_bundling(n)->used_in_unconditional_delay()) {
1161 assert(delay_slot == NULL, "no use of delay slot node");
1162 assert(n->size(_regalloc) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
1164 delay_slot = n;
1165 continue;
1166 }
1168 // If this starts a new instruction group, then flush the current one
1169 // (but allow split bundles)
1170 if( Pipeline::requires_bundling() && starts_bundle(n) )
1171 cb->flush_bundle(false);
1173 // The following logic is duplicated in the code ifdeffed for
1174 // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It
1175 // should be factored out. Or maybe dispersed to the nodes?
1177 // Special handling for SafePoint/Call Nodes
1178 bool is_mcall = false;
1179 if( n->is_Mach() ) {
1180 MachNode *mach = n->as_Mach();
1181 is_mcall = n->is_MachCall();
1182 bool is_sfn = n->is_MachSafePoint();
1184 // If this requires all previous instructions be flushed, then do so
1185 if( is_sfn || is_mcall || mach->alignment_required() != 1) {
1186 cb->flush_bundle(true);
1187 current_offset = cb->code_size();
1188 }
1190 // align the instruction if necessary
1191 int nop_size = nop->size(_regalloc);
1192 int padding = mach->compute_padding(current_offset);
1193 // Make sure safepoint node for polling is distinct from a call's
1194 // return by adding a nop if needed.
1195 if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset ) {
1196 padding = nop_size;
1197 }
1198 assert( labels_not_set || padding == 0, "instruction should already be aligned")
1200 if(padding > 0) {
1201 assert((padding % nop_size) == 0, "padding is not a multiple of NOP size");
1202 int nops_cnt = padding / nop_size;
1203 MachNode *nop = new (this) MachNopNode(nops_cnt);
1204 b->_nodes.insert(j++, nop);
1205 last_inst++;
1206 _cfg->_bbs.map( nop->_idx, b );
1207 nop->emit(*cb, _regalloc);
1208 cb->flush_bundle(true);
1209 current_offset = cb->code_size();
1210 }
1212 // Remember the start of the last call in a basic block
1213 if (is_mcall) {
1214 MachCallNode *mcall = mach->as_MachCall();
1216 // This destination address is NOT PC-relative
1217 mcall->method_set((intptr_t)mcall->entry_point());
1219 // Save the return address
1220 call_returns[b->_pre_order] = current_offset + mcall->ret_addr_offset();
1222 if (!mcall->is_safepoint_node()) {
1223 is_mcall = false;
1224 is_sfn = false;
1225 }
1226 }
1228 // sfn will be valid whenever mcall is valid now because of inheritance
1229 if( is_sfn || is_mcall ) {
1231 // Handle special safepoint nodes for synchronization
1232 if( !is_mcall ) {
1233 MachSafePointNode *sfn = mach->as_MachSafePoint();
1234 // !!!!! Stubs only need an oopmap right now, so bail out
1235 if( sfn->jvms()->method() == NULL) {
1236 // Write the oopmap directly to the code blob??!!
1237 # ifdef ENABLE_ZAP_DEAD_LOCALS
1238 assert( !is_node_getting_a_safepoint(sfn), "logic does not match; false positive");
1239 # endif
1240 continue;
1241 }
1242 } // End synchronization
1244 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1245 current_offset);
1246 Process_OopMap_Node(mach, current_offset);
1247 } // End if safepoint
1249 // If this is a null check, then add the start of the previous instruction to the list
1250 else if( mach->is_MachNullCheck() ) {
1251 inct_starts[inct_cnt++] = previous_offset;
1252 }
1254 // If this is a branch, then fill in the label with the target BB's label
1255 else if ( mach->is_Branch() ) {
1257 if ( mach->ideal_Opcode() == Op_Jump ) {
1258 for (uint h = 0; h < b->_num_succs; h++ ) {
1259 Block* succs_block = b->_succs[h];
1260 for (uint j = 1; j < succs_block->num_preds(); j++) {
1261 Node* jpn = succs_block->pred(j);
1262 if ( jpn->is_JumpProj() && jpn->in(0) == mach ) {
1263 uint block_num = succs_block->non_connector()->_pre_order;
1264 Label *blkLabel = &blk_labels[block_num];
1265 mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
1266 }
1267 }
1268 }
1269 } else {
1270 // For Branchs
1271 // This requires the TRUE branch target be in succs[0]
1272 uint block_num = b->non_connector_successor(0)->_pre_order;
1273 mach->label_set( blk_labels[block_num], block_num );
1274 }
1275 }
1277 #ifdef ASSERT
1278 // Check that oop-store precedes the card-mark
1279 else if( mach->ideal_Opcode() == Op_StoreCM ) {
1280 uint storeCM_idx = j;
1281 Node *oop_store = mach->in(mach->_cnt); // First precedence edge
1282 assert( oop_store != NULL, "storeCM expects a precedence edge");
1283 uint i4;
1284 for( i4 = 0; i4 < last_inst; ++i4 ) {
1285 if( b->_nodes[i4] == oop_store ) break;
1286 }
1287 // Note: This test can provide a false failure if other precedence
1288 // edges have been added to the storeCMNode.
1289 assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
1290 }
1291 #endif
1293 else if( !n->is_Proj() ) {
1294 // Remember the beginning of the previous instruction, in case
1295 // it's followed by a flag-kill and a null-check. Happens on
1296 // Intel all the time, with add-to-memory kind of opcodes.
1297 previous_offset = current_offset;
1298 }
1299 }
1301 // Verify that there is sufficient space remaining
1302 cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
1303 if (cb->blob() == NULL) {
1304 turn_off_compiler(this);
1305 return;
1306 }
1308 // Save the offset for the listing
1309 #ifndef PRODUCT
1310 if( node_offsets && n->_idx < node_offset_limit )
1311 node_offsets[n->_idx] = cb->code_size();
1312 #endif
1314 // "Normal" instruction case
1315 n->emit(*cb, _regalloc);
1316 current_offset = cb->code_size();
1317 non_safepoints.observe_instruction(n, current_offset);
1319 // mcall is last "call" that can be a safepoint
1320 // record it so we can see if a poll will directly follow it
1321 // in which case we'll need a pad to make the PcDesc sites unique
1322 // see 5010568. This can be slightly inaccurate but conservative
1323 // in the case that return address is not actually at current_offset.
1324 // This is a small price to pay.
1326 if (is_mcall) {
1327 last_call_offset = current_offset;
1328 }
1330 // See if this instruction has a delay slot
1331 if ( valid_bundle_info(n) && node_bundling(n)->use_unconditional_delay()) {
1332 assert(delay_slot != NULL, "expecting delay slot node");
1334 // Back up 1 instruction
1335 cb->set_code_end(
1336 cb->code_end()-Pipeline::instr_unit_size());
1338 // Save the offset for the listing
1339 #ifndef PRODUCT
1340 if( node_offsets && delay_slot->_idx < node_offset_limit )
1341 node_offsets[delay_slot->_idx] = cb->code_size();
1342 #endif
1344 // Support a SafePoint in the delay slot
1345 if( delay_slot->is_MachSafePoint() ) {
1346 MachNode *mach = delay_slot->as_Mach();
1347 // !!!!! Stubs only need an oopmap right now, so bail out
1348 if( !mach->is_MachCall() && mach->as_MachSafePoint()->jvms()->method() == NULL ) {
1349 // Write the oopmap directly to the code blob??!!
1350 # ifdef ENABLE_ZAP_DEAD_LOCALS
1351 assert( !is_node_getting_a_safepoint(mach), "logic does not match; false positive");
1352 # endif
1353 delay_slot = NULL;
1354 continue;
1355 }
1357 int adjusted_offset = current_offset - Pipeline::instr_unit_size();
1358 non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
1359 adjusted_offset);
1360 // Generate an OopMap entry
1361 Process_OopMap_Node(mach, adjusted_offset);
1362 }
1364 // Insert the delay slot instruction
1365 delay_slot->emit(*cb, _regalloc);
1367 // Don't reuse it
1368 delay_slot = NULL;
1369 }
1371 } // End for all instructions in block
1373 // If the next block is the top of a loop, pad this block out to align
1374 // the loop top a little. Helps prevent pipe stalls at loop back branches.
1375 int nop_size = (new (this) MachNopNode())->size(_regalloc);
1376 if( i<_cfg->_num_blocks-1 ) {
1377 Block *nb = _cfg->_blocks[i+1];
1378 uint padding = nb->alignment_padding(current_offset);
1379 if( padding > 0 ) {
1380 MachNode *nop = new (this) MachNopNode(padding / nop_size);
1381 b->_nodes.insert( b->_nodes.size(), nop );
1382 _cfg->_bbs.map( nop->_idx, b );
1383 nop->emit(*cb, _regalloc);
1384 current_offset = cb->code_size();
1385 }
1386 }
1388 } // End of for all blocks
1390 non_safepoints.flush_at_end();
1392 // Offset too large?
1393 if (failing()) return;
1395 // Define a pseudo-label at the end of the code
1396 MacroAssembler(cb).bind( blk_labels[_cfg->_num_blocks] );
1398 // Compute the size of the first block
1399 _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos();
1401 assert(cb->code_size() < 500000, "method is unreasonably large");
1403 // ------------------
1405 #ifndef PRODUCT
1406 // Information on the size of the method, without the extraneous code
1407 Scheduling::increment_method_size(cb->code_size());
1408 #endif
1410 // ------------------
1411 // Fill in exception table entries.
1412 FillExceptionTables(inct_cnt, call_returns, inct_starts, blk_labels);
1414 // Only java methods have exception handlers and deopt handlers
1415 if (_method) {
1416 // Emit the exception handler code.
1417 _code_offsets.set_value(CodeOffsets::Exceptions, emit_exception_handler(*cb));
1418 // Emit the deopt handler code.
1419 _code_offsets.set_value(CodeOffsets::Deopt, emit_deopt_handler(*cb));
1420 }
1422 // One last check for failed CodeBuffer::expand:
1423 if (cb->blob() == NULL) {
1424 turn_off_compiler(this);
1425 return;
1426 }
1428 #ifndef PRODUCT
1429 // Dump the assembly code, including basic-block numbers
1430 if (print_assembly()) {
1431 ttyLocker ttyl; // keep the following output all in one block
1432 if (!VMThread::should_terminate()) { // test this under the tty lock
1433 // This output goes directly to the tty, not the compiler log.
1434 // To enable tools to match it up with the compilation activity,
1435 // be sure to tag this tty output with the compile ID.
1436 if (xtty != NULL) {
1437 xtty->head("opto_assembly compile_id='%d'%s", compile_id(),
1438 is_osr_compilation() ? " compile_kind='osr'" :
1439 "");
1440 }
1441 if (method() != NULL) {
1442 method()->print_oop();
1443 print_codes();
1444 }
1445 dump_asm(node_offsets, node_offset_limit);
1446 if (xtty != NULL) {
1447 xtty->tail("opto_assembly");
1448 }
1449 }
1450 }
1451 #endif
1453 }
1455 void Compile::FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels) {
1456 _inc_table.set_size(cnt);
1458 uint inct_cnt = 0;
1459 for( uint i=0; i<_cfg->_num_blocks; i++ ) {
1460 Block *b = _cfg->_blocks[i];
1461 Node *n = NULL;
1462 int j;
1464 // Find the branch; ignore trailing NOPs.
1465 for( j = b->_nodes.size()-1; j>=0; j-- ) {
1466 n = b->_nodes[j];
1467 if( !n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con )
1468 break;
1469 }
1471 // If we didn't find anything, continue
1472 if( j < 0 ) continue;
1474 // Compute ExceptionHandlerTable subtable entry and add it
1475 // (skip empty blocks)
1476 if( n->is_Catch() ) {
1478 // Get the offset of the return from the call
1479 uint call_return = call_returns[b->_pre_order];
1480 #ifdef ASSERT
1481 assert( call_return > 0, "no call seen for this basic block" );
1482 while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
1483 assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
1484 #endif
1485 // last instruction is a CatchNode, find it's CatchProjNodes
1486 int nof_succs = b->_num_succs;
1487 // allocate space
1488 GrowableArray<intptr_t> handler_bcis(nof_succs);
1489 GrowableArray<intptr_t> handler_pcos(nof_succs);
1490 // iterate through all successors
1491 for (int j = 0; j < nof_succs; j++) {
1492 Block* s = b->_succs[j];
1493 bool found_p = false;
1494 for( uint k = 1; k < s->num_preds(); k++ ) {
1495 Node *pk = s->pred(k);
1496 if( pk->is_CatchProj() && pk->in(0) == n ) {
1497 const CatchProjNode* p = pk->as_CatchProj();
1498 found_p = true;
1499 // add the corresponding handler bci & pco information
1500 if( p->_con != CatchProjNode::fall_through_index ) {
1501 // p leads to an exception handler (and is not fall through)
1502 assert(s == _cfg->_blocks[s->_pre_order],"bad numbering");
1503 // no duplicates, please
1504 if( !handler_bcis.contains(p->handler_bci()) ) {
1505 uint block_num = s->non_connector()->_pre_order;
1506 handler_bcis.append(p->handler_bci());
1507 handler_pcos.append(blk_labels[block_num].loc_pos());
1508 }
1509 }
1510 }
1511 }
1512 assert(found_p, "no matching predecessor found");
1513 // Note: Due to empty block removal, one block may have
1514 // several CatchProj inputs, from the same Catch.
1515 }
1517 // Set the offset of the return from the call
1518 _handler_table.add_subtable(call_return, &handler_bcis, NULL, &handler_pcos);
1519 continue;
1520 }
1522 // Handle implicit null exception table updates
1523 if( n->is_MachNullCheck() ) {
1524 uint block_num = b->non_connector_successor(0)->_pre_order;
1525 _inc_table.append( inct_starts[inct_cnt++], blk_labels[block_num].loc_pos() );
1526 continue;
1527 }
1528 } // End of for all blocks fill in exception table entries
1529 }
1531 // Static Variables
1532 #ifndef PRODUCT
1533 uint Scheduling::_total_nop_size = 0;
1534 uint Scheduling::_total_method_size = 0;
1535 uint Scheduling::_total_branches = 0;
1536 uint Scheduling::_total_unconditional_delays = 0;
1537 uint Scheduling::_total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
1538 #endif
1540 // Initializer for class Scheduling
1542 Scheduling::Scheduling(Arena *arena, Compile &compile)
1543 : _arena(arena),
1544 _cfg(compile.cfg()),
1545 _bbs(compile.cfg()->_bbs),
1546 _regalloc(compile.regalloc()),
1547 _reg_node(arena),
1548 _bundle_instr_count(0),
1549 _bundle_cycle_number(0),
1550 _scheduled(arena),
1551 _available(arena),
1552 _next_node(NULL),
1553 _bundle_use(0, 0, resource_count, &_bundle_use_elements[0]),
1554 _pinch_free_list(arena)
1555 #ifndef PRODUCT
1556 , _branches(0)
1557 , _unconditional_delays(0)
1558 #endif
1559 {
1560 // Create a MachNopNode
1561 _nop = new (&compile) MachNopNode();
1563 // Now that the nops are in the array, save the count
1564 // (but allow entries for the nops)
1565 _node_bundling_limit = compile.unique();
1566 uint node_max = _regalloc->node_regs_max_index();
1568 compile.set_node_bundling_limit(_node_bundling_limit);
1570 // This one is persistent within the Compile class
1571 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max);
1573 // Allocate space for fixed-size arrays
1574 _node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1575 _uses = NEW_ARENA_ARRAY(arena, short, node_max);
1576 _current_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max);
1578 // Clear the arrays
1579 memset(_node_bundling_base, 0, node_max * sizeof(Bundle));
1580 memset(_node_latency, 0, node_max * sizeof(unsigned short));
1581 memset(_uses, 0, node_max * sizeof(short));
1582 memset(_current_latency, 0, node_max * sizeof(unsigned short));
1584 // Clear the bundling information
1585 memcpy(_bundle_use_elements,
1586 Pipeline_Use::elaborated_elements,
1587 sizeof(Pipeline_Use::elaborated_elements));
1589 // Get the last node
1590 Block *bb = _cfg->_blocks[_cfg->_blocks.size()-1];
1592 _next_node = bb->_nodes[bb->_nodes.size()-1];
1593 }
1595 #ifndef PRODUCT
1596 // Scheduling destructor
1597 Scheduling::~Scheduling() {
1598 _total_branches += _branches;
1599 _total_unconditional_delays += _unconditional_delays;
1600 }
1601 #endif
1603 // Step ahead "i" cycles
1604 void Scheduling::step(uint i) {
1606 Bundle *bundle = node_bundling(_next_node);
1607 bundle->set_starts_bundle();
1609 // Update the bundle record, but leave the flags information alone
1610 if (_bundle_instr_count > 0) {
1611 bundle->set_instr_count(_bundle_instr_count);
1612 bundle->set_resources_used(_bundle_use.resourcesUsed());
1613 }
1615 // Update the state information
1616 _bundle_instr_count = 0;
1617 _bundle_cycle_number += i;
1618 _bundle_use.step(i);
1619 }
1621 void Scheduling::step_and_clear() {
1622 Bundle *bundle = node_bundling(_next_node);
1623 bundle->set_starts_bundle();
1625 // Update the bundle record
1626 if (_bundle_instr_count > 0) {
1627 bundle->set_instr_count(_bundle_instr_count);
1628 bundle->set_resources_used(_bundle_use.resourcesUsed());
1630 _bundle_cycle_number += 1;
1631 }
1633 // Clear the bundling information
1634 _bundle_instr_count = 0;
1635 _bundle_use.reset();
1637 memcpy(_bundle_use_elements,
1638 Pipeline_Use::elaborated_elements,
1639 sizeof(Pipeline_Use::elaborated_elements));
1640 }
1642 //------------------------------ScheduleAndBundle------------------------------
1643 // Perform instruction scheduling and bundling over the sequence of
1644 // instructions in backwards order.
1645 void Compile::ScheduleAndBundle() {
1647 // Don't optimize this if it isn't a method
1648 if (!_method)
1649 return;
1651 // Don't optimize this if scheduling is disabled
1652 if (!do_scheduling())
1653 return;
1655 NOT_PRODUCT( TracePhase t2("isched", &_t_instrSched, TimeCompiler); )
1657 // Create a data structure for all the scheduling information
1658 Scheduling scheduling(Thread::current()->resource_area(), *this);
1660 // Walk backwards over each basic block, computing the needed alignment
1661 // Walk over all the basic blocks
1662 scheduling.DoScheduling();
1663 }
1665 //------------------------------ComputeLocalLatenciesForward-------------------
1666 // Compute the latency of all the instructions. This is fairly simple,
1667 // because we already have a legal ordering. Walk over the instructions
1668 // from first to last, and compute the latency of the instruction based
1669 // on the latency of the preceding instruction(s).
1670 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) {
1671 #ifndef PRODUCT
1672 if (_cfg->C->trace_opto_output())
1673 tty->print("# -> ComputeLocalLatenciesForward\n");
1674 #endif
1676 // Walk over all the schedulable instructions
1677 for( uint j=_bb_start; j < _bb_end; j++ ) {
1679 // This is a kludge, forcing all latency calculations to start at 1.
1680 // Used to allow latency 0 to force an instruction to the beginning
1681 // of the bb
1682 uint latency = 1;
1683 Node *use = bb->_nodes[j];
1684 uint nlen = use->len();
1686 // Walk over all the inputs
1687 for ( uint k=0; k < nlen; k++ ) {
1688 Node *def = use->in(k);
1689 if (!def)
1690 continue;
1692 uint l = _node_latency[def->_idx] + use->latency(k);
1693 if (latency < l)
1694 latency = l;
1695 }
1697 _node_latency[use->_idx] = latency;
1699 #ifndef PRODUCT
1700 if (_cfg->C->trace_opto_output()) {
1701 tty->print("# latency %4d: ", latency);
1702 use->dump();
1703 }
1704 #endif
1705 }
1707 #ifndef PRODUCT
1708 if (_cfg->C->trace_opto_output())
1709 tty->print("# <- ComputeLocalLatenciesForward\n");
1710 #endif
1712 } // end ComputeLocalLatenciesForward
1714 // See if this node fits into the present instruction bundle
1715 bool Scheduling::NodeFitsInBundle(Node *n) {
1716 uint n_idx = n->_idx;
1718 // If this is the unconditional delay instruction, then it fits
1719 if (n == _unconditional_delay_slot) {
1720 #ifndef PRODUCT
1721 if (_cfg->C->trace_opto_output())
1722 tty->print("# NodeFitsInBundle [%4d]: TRUE; is in unconditional delay slot\n", n->_idx);
1723 #endif
1724 return (true);
1725 }
1727 // If the node cannot be scheduled this cycle, skip it
1728 if (_current_latency[n_idx] > _bundle_cycle_number) {
1729 #ifndef PRODUCT
1730 if (_cfg->C->trace_opto_output())
1731 tty->print("# NodeFitsInBundle [%4d]: FALSE; latency %4d > %d\n",
1732 n->_idx, _current_latency[n_idx], _bundle_cycle_number);
1733 #endif
1734 return (false);
1735 }
1737 const Pipeline *node_pipeline = n->pipeline();
1739 uint instruction_count = node_pipeline->instructionCount();
1740 if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
1741 instruction_count = 0;
1742 else if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
1743 instruction_count++;
1745 if (_bundle_instr_count + instruction_count > Pipeline::_max_instrs_per_cycle) {
1746 #ifndef PRODUCT
1747 if (_cfg->C->trace_opto_output())
1748 tty->print("# NodeFitsInBundle [%4d]: FALSE; too many instructions: %d > %d\n",
1749 n->_idx, _bundle_instr_count + instruction_count, Pipeline::_max_instrs_per_cycle);
1750 #endif
1751 return (false);
1752 }
1754 // Don't allow non-machine nodes to be handled this way
1755 if (!n->is_Mach() && instruction_count == 0)
1756 return (false);
1758 // See if there is any overlap
1759 uint delay = _bundle_use.full_latency(0, node_pipeline->resourceUse());
1761 if (delay > 0) {
1762 #ifndef PRODUCT
1763 if (_cfg->C->trace_opto_output())
1764 tty->print("# NodeFitsInBundle [%4d]: FALSE; functional units overlap\n", n_idx);
1765 #endif
1766 return false;
1767 }
1769 #ifndef PRODUCT
1770 if (_cfg->C->trace_opto_output())
1771 tty->print("# NodeFitsInBundle [%4d]: TRUE\n", n_idx);
1772 #endif
1774 return true;
1775 }
1777 Node * Scheduling::ChooseNodeToBundle() {
1778 uint siz = _available.size();
1780 if (siz == 0) {
1782 #ifndef PRODUCT
1783 if (_cfg->C->trace_opto_output())
1784 tty->print("# ChooseNodeToBundle: NULL\n");
1785 #endif
1786 return (NULL);
1787 }
1789 // Fast path, if only 1 instruction in the bundle
1790 if (siz == 1) {
1791 #ifndef PRODUCT
1792 if (_cfg->C->trace_opto_output()) {
1793 tty->print("# ChooseNodeToBundle (only 1): ");
1794 _available[0]->dump();
1795 }
1796 #endif
1797 return (_available[0]);
1798 }
1800 // Don't bother, if the bundle is already full
1801 if (_bundle_instr_count < Pipeline::_max_instrs_per_cycle) {
1802 for ( uint i = 0; i < siz; i++ ) {
1803 Node *n = _available[i];
1805 // Skip projections, we'll handle them another way
1806 if (n->is_Proj())
1807 continue;
1809 // This presupposed that instructions are inserted into the
1810 // available list in a legality order; i.e. instructions that
1811 // must be inserted first are at the head of the list
1812 if (NodeFitsInBundle(n)) {
1813 #ifndef PRODUCT
1814 if (_cfg->C->trace_opto_output()) {
1815 tty->print("# ChooseNodeToBundle: ");
1816 n->dump();
1817 }
1818 #endif
1819 return (n);
1820 }
1821 }
1822 }
1824 // Nothing fits in this bundle, choose the highest priority
1825 #ifndef PRODUCT
1826 if (_cfg->C->trace_opto_output()) {
1827 tty->print("# ChooseNodeToBundle: ");
1828 _available[0]->dump();
1829 }
1830 #endif
1832 return _available[0];
1833 }
1835 //------------------------------AddNodeToAvailableList-------------------------
1836 void Scheduling::AddNodeToAvailableList(Node *n) {
1837 assert( !n->is_Proj(), "projections never directly made available" );
1838 #ifndef PRODUCT
1839 if (_cfg->C->trace_opto_output()) {
1840 tty->print("# AddNodeToAvailableList: ");
1841 n->dump();
1842 }
1843 #endif
1845 int latency = _current_latency[n->_idx];
1847 // Insert in latency order (insertion sort)
1848 uint i;
1849 for ( i=0; i < _available.size(); i++ )
1850 if (_current_latency[_available[i]->_idx] > latency)
1851 break;
1853 // Special Check for compares following branches
1854 if( n->is_Mach() && _scheduled.size() > 0 ) {
1855 int op = n->as_Mach()->ideal_Opcode();
1856 Node *last = _scheduled[0];
1857 if( last->is_MachIf() && last->in(1) == n &&
1858 ( op == Op_CmpI ||
1859 op == Op_CmpU ||
1860 op == Op_CmpP ||
1861 op == Op_CmpF ||
1862 op == Op_CmpD ||
1863 op == Op_CmpL ) ) {
1865 // Recalculate position, moving to front of same latency
1866 for ( i=0 ; i < _available.size(); i++ )
1867 if (_current_latency[_available[i]->_idx] >= latency)
1868 break;
1869 }
1870 }
1872 // Insert the node in the available list
1873 _available.insert(i, n);
1875 #ifndef PRODUCT
1876 if (_cfg->C->trace_opto_output())
1877 dump_available();
1878 #endif
1879 }
1881 //------------------------------DecrementUseCounts-----------------------------
1882 void Scheduling::DecrementUseCounts(Node *n, const Block *bb) {
1883 for ( uint i=0; i < n->len(); i++ ) {
1884 Node *def = n->in(i);
1885 if (!def) continue;
1886 if( def->is_Proj() ) // If this is a machine projection, then
1887 def = def->in(0); // propagate usage thru to the base instruction
1889 if( _bbs[def->_idx] != bb ) // Ignore if not block-local
1890 continue;
1892 // Compute the latency
1893 uint l = _bundle_cycle_number + n->latency(i);
1894 if (_current_latency[def->_idx] < l)
1895 _current_latency[def->_idx] = l;
1897 // If this does not have uses then schedule it
1898 if ((--_uses[def->_idx]) == 0)
1899 AddNodeToAvailableList(def);
1900 }
1901 }
1903 //------------------------------AddNodeToBundle--------------------------------
1904 void Scheduling::AddNodeToBundle(Node *n, const Block *bb) {
1905 #ifndef PRODUCT
1906 if (_cfg->C->trace_opto_output()) {
1907 tty->print("# AddNodeToBundle: ");
1908 n->dump();
1909 }
1910 #endif
1912 // Remove this from the available list
1913 uint i;
1914 for (i = 0; i < _available.size(); i++)
1915 if (_available[i] == n)
1916 break;
1917 assert(i < _available.size(), "entry in _available list not found");
1918 _available.remove(i);
1920 // See if this fits in the current bundle
1921 const Pipeline *node_pipeline = n->pipeline();
1922 const Pipeline_Use& node_usage = node_pipeline->resourceUse();
1924 // Check for instructions to be placed in the delay slot. We
1925 // do this before we actually schedule the current instruction,
1926 // because the delay slot follows the current instruction.
1927 if (Pipeline::_branch_has_delay_slot &&
1928 node_pipeline->hasBranchDelay() &&
1929 !_unconditional_delay_slot) {
1931 uint siz = _available.size();
1933 // Conditional branches can support an instruction that
1934 // is unconditionally executed and not dependent by the
1935 // branch, OR a conditionally executed instruction if
1936 // the branch is taken. In practice, this means that
1937 // the first instruction at the branch target is
1938 // copied to the delay slot, and the branch goes to
1939 // the instruction after that at the branch target
1940 if ( n->is_Mach() && n->is_Branch() ) {
1942 assert( !n->is_MachNullCheck(), "should not look for delay slot for Null Check" );
1943 assert( !n->is_Catch(), "should not look for delay slot for Catch" );
1945 #ifndef PRODUCT
1946 _branches++;
1947 #endif
1949 // At least 1 instruction is on the available list
1950 // that is not dependent on the branch
1951 for (uint i = 0; i < siz; i++) {
1952 Node *d = _available[i];
1953 const Pipeline *avail_pipeline = d->pipeline();
1955 // Don't allow safepoints in the branch shadow, that will
1956 // cause a number of difficulties
1957 if ( avail_pipeline->instructionCount() == 1 &&
1958 !avail_pipeline->hasMultipleBundles() &&
1959 !avail_pipeline->hasBranchDelay() &&
1960 Pipeline::instr_has_unit_size() &&
1961 d->size(_regalloc) == Pipeline::instr_unit_size() &&
1962 NodeFitsInBundle(d) &&
1963 !node_bundling(d)->used_in_delay()) {
1965 if (d->is_Mach() && !d->is_MachSafePoint()) {
1966 // A node that fits in the delay slot was found, so we need to
1967 // set the appropriate bits in the bundle pipeline information so
1968 // that it correctly indicates resource usage. Later, when we
1969 // attempt to add this instruction to the bundle, we will skip
1970 // setting the resource usage.
1971 _unconditional_delay_slot = d;
1972 node_bundling(n)->set_use_unconditional_delay();
1973 node_bundling(d)->set_used_in_unconditional_delay();
1974 _bundle_use.add_usage(avail_pipeline->resourceUse());
1975 _current_latency[d->_idx] = _bundle_cycle_number;
1976 _next_node = d;
1977 ++_bundle_instr_count;
1978 #ifndef PRODUCT
1979 _unconditional_delays++;
1980 #endif
1981 break;
1982 }
1983 }
1984 }
1985 }
1987 // No delay slot, add a nop to the usage
1988 if (!_unconditional_delay_slot) {
1989 // See if adding an instruction in the delay slot will overflow
1990 // the bundle.
1991 if (!NodeFitsInBundle(_nop)) {
1992 #ifndef PRODUCT
1993 if (_cfg->C->trace_opto_output())
1994 tty->print("# *** STEP(1 instruction for delay slot) ***\n");
1995 #endif
1996 step(1);
1997 }
1999 _bundle_use.add_usage(_nop->pipeline()->resourceUse());
2000 _next_node = _nop;
2001 ++_bundle_instr_count;
2002 }
2004 // See if the instruction in the delay slot requires a
2005 // step of the bundles
2006 if (!NodeFitsInBundle(n)) {
2007 #ifndef PRODUCT
2008 if (_cfg->C->trace_opto_output())
2009 tty->print("# *** STEP(branch won't fit) ***\n");
2010 #endif
2011 // Update the state information
2012 _bundle_instr_count = 0;
2013 _bundle_cycle_number += 1;
2014 _bundle_use.step(1);
2015 }
2016 }
2018 // Get the number of instructions
2019 uint instruction_count = node_pipeline->instructionCount();
2020 if (node_pipeline->mayHaveNoCode() && n->size(_regalloc) == 0)
2021 instruction_count = 0;
2023 // Compute the latency information
2024 uint delay = 0;
2026 if (instruction_count > 0 || !node_pipeline->mayHaveNoCode()) {
2027 int relative_latency = _current_latency[n->_idx] - _bundle_cycle_number;
2028 if (relative_latency < 0)
2029 relative_latency = 0;
2031 delay = _bundle_use.full_latency(relative_latency, node_usage);
2033 // Does not fit in this bundle, start a new one
2034 if (delay > 0) {
2035 step(delay);
2037 #ifndef PRODUCT
2038 if (_cfg->C->trace_opto_output())
2039 tty->print("# *** STEP(%d) ***\n", delay);
2040 #endif
2041 }
2042 }
2044 // If this was placed in the delay slot, ignore it
2045 if (n != _unconditional_delay_slot) {
2047 if (delay == 0) {
2048 if (node_pipeline->hasMultipleBundles()) {
2049 #ifndef PRODUCT
2050 if (_cfg->C->trace_opto_output())
2051 tty->print("# *** STEP(multiple instructions) ***\n");
2052 #endif
2053 step(1);
2054 }
2056 else if (instruction_count + _bundle_instr_count > Pipeline::_max_instrs_per_cycle) {
2057 #ifndef PRODUCT
2058 if (_cfg->C->trace_opto_output())
2059 tty->print("# *** STEP(%d >= %d instructions) ***\n",
2060 instruction_count + _bundle_instr_count,
2061 Pipeline::_max_instrs_per_cycle);
2062 #endif
2063 step(1);
2064 }
2065 }
2067 if (node_pipeline->hasBranchDelay() && !_unconditional_delay_slot)
2068 _bundle_instr_count++;
2070 // Set the node's latency
2071 _current_latency[n->_idx] = _bundle_cycle_number;
2073 // Now merge the functional unit information
2074 if (instruction_count > 0 || !node_pipeline->mayHaveNoCode())
2075 _bundle_use.add_usage(node_usage);
2077 // Increment the number of instructions in this bundle
2078 _bundle_instr_count += instruction_count;
2080 // Remember this node for later
2081 if (n->is_Mach())
2082 _next_node = n;
2083 }
2085 // It's possible to have a BoxLock in the graph and in the _bbs mapping but
2086 // not in the bb->_nodes array. This happens for debug-info-only BoxLocks.
2087 // 'Schedule' them (basically ignore in the schedule) but do not insert them
2088 // into the block. All other scheduled nodes get put in the schedule here.
2089 int op = n->Opcode();
2090 if( (op == Op_Node && n->req() == 0) || // anti-dependence node OR
2091 (op != Op_Node && // Not an unused antidepedence node and
2092 // not an unallocated boxlock
2093 (OptoReg::is_valid(_regalloc->get_reg_first(n)) || op != Op_BoxLock)) ) {
2095 // Push any trailing projections
2096 if( bb->_nodes[bb->_nodes.size()-1] != n ) {
2097 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
2098 Node *foi = n->fast_out(i);
2099 if( foi->is_Proj() )
2100 _scheduled.push(foi);
2101 }
2102 }
2104 // Put the instruction in the schedule list
2105 _scheduled.push(n);
2106 }
2108 #ifndef PRODUCT
2109 if (_cfg->C->trace_opto_output())
2110 dump_available();
2111 #endif
2113 // Walk all the definitions, decrementing use counts, and
2114 // if a definition has a 0 use count, place it in the available list.
2115 DecrementUseCounts(n,bb);
2116 }
2118 //------------------------------ComputeUseCount--------------------------------
2119 // This method sets the use count within a basic block. We will ignore all
2120 // uses outside the current basic block. As we are doing a backwards walk,
2121 // any node we reach that has a use count of 0 may be scheduled. This also
2122 // avoids the problem of cyclic references from phi nodes, as long as phi
2123 // nodes are at the front of the basic block. This method also initializes
2124 // the available list to the set of instructions that have no uses within this
2125 // basic block.
2126 void Scheduling::ComputeUseCount(const Block *bb) {
2127 #ifndef PRODUCT
2128 if (_cfg->C->trace_opto_output())
2129 tty->print("# -> ComputeUseCount\n");
2130 #endif
2132 // Clear the list of available and scheduled instructions, just in case
2133 _available.clear();
2134 _scheduled.clear();
2136 // No delay slot specified
2137 _unconditional_delay_slot = NULL;
2139 #ifdef ASSERT
2140 for( uint i=0; i < bb->_nodes.size(); i++ )
2141 assert( _uses[bb->_nodes[i]->_idx] == 0, "_use array not clean" );
2142 #endif
2144 // Force the _uses count to never go to zero for unscheduable pieces
2145 // of the block
2146 for( uint k = 0; k < _bb_start; k++ )
2147 _uses[bb->_nodes[k]->_idx] = 1;
2148 for( uint l = _bb_end; l < bb->_nodes.size(); l++ )
2149 _uses[bb->_nodes[l]->_idx] = 1;
2151 // Iterate backwards over the instructions in the block. Don't count the
2152 // branch projections at end or the block header instructions.
2153 for( uint j = _bb_end-1; j >= _bb_start; j-- ) {
2154 Node *n = bb->_nodes[j];
2155 if( n->is_Proj() ) continue; // Projections handled another way
2157 // Account for all uses
2158 for ( uint k = 0; k < n->len(); k++ ) {
2159 Node *inp = n->in(k);
2160 if (!inp) continue;
2161 assert(inp != n, "no cycles allowed" );
2162 if( _bbs[inp->_idx] == bb ) { // Block-local use?
2163 if( inp->is_Proj() ) // Skip through Proj's
2164 inp = inp->in(0);
2165 ++_uses[inp->_idx]; // Count 1 block-local use
2166 }
2167 }
2169 // If this instruction has a 0 use count, then it is available
2170 if (!_uses[n->_idx]) {
2171 _current_latency[n->_idx] = _bundle_cycle_number;
2172 AddNodeToAvailableList(n);
2173 }
2175 #ifndef PRODUCT
2176 if (_cfg->C->trace_opto_output()) {
2177 tty->print("# uses: %3d: ", _uses[n->_idx]);
2178 n->dump();
2179 }
2180 #endif
2181 }
2183 #ifndef PRODUCT
2184 if (_cfg->C->trace_opto_output())
2185 tty->print("# <- ComputeUseCount\n");
2186 #endif
2187 }
2189 // This routine performs scheduling on each basic block in reverse order,
2190 // using instruction latencies and taking into account function unit
2191 // availability.
2192 void Scheduling::DoScheduling() {
2193 #ifndef PRODUCT
2194 if (_cfg->C->trace_opto_output())
2195 tty->print("# -> DoScheduling\n");
2196 #endif
2198 Block *succ_bb = NULL;
2199 Block *bb;
2201 // Walk over all the basic blocks in reverse order
2202 for( int i=_cfg->_num_blocks-1; i >= 0; succ_bb = bb, i-- ) {
2203 bb = _cfg->_blocks[i];
2205 #ifndef PRODUCT
2206 if (_cfg->C->trace_opto_output()) {
2207 tty->print("# Schedule BB#%03d (initial)\n", i);
2208 for (uint j = 0; j < bb->_nodes.size(); j++)
2209 bb->_nodes[j]->dump();
2210 }
2211 #endif
2213 // On the head node, skip processing
2214 if( bb == _cfg->_broot )
2215 continue;
2217 // Skip empty, connector blocks
2218 if (bb->is_connector())
2219 continue;
2221 // If the following block is not the sole successor of
2222 // this one, then reset the pipeline information
2223 if (bb->_num_succs != 1 || bb->non_connector_successor(0) != succ_bb) {
2224 #ifndef PRODUCT
2225 if (_cfg->C->trace_opto_output()) {
2226 tty->print("*** bundle start of next BB, node %d, for %d instructions\n",
2227 _next_node->_idx, _bundle_instr_count);
2228 }
2229 #endif
2230 step_and_clear();
2231 }
2233 // Leave untouched the starting instruction, any Phis, a CreateEx node
2234 // or Top. bb->_nodes[_bb_start] is the first schedulable instruction.
2235 _bb_end = bb->_nodes.size()-1;
2236 for( _bb_start=1; _bb_start <= _bb_end; _bb_start++ ) {
2237 Node *n = bb->_nodes[_bb_start];
2238 // Things not matched, like Phinodes and ProjNodes don't get scheduled.
2239 // Also, MachIdealNodes do not get scheduled
2240 if( !n->is_Mach() ) continue; // Skip non-machine nodes
2241 MachNode *mach = n->as_Mach();
2242 int iop = mach->ideal_Opcode();
2243 if( iop == Op_CreateEx ) continue; // CreateEx is pinned
2244 if( iop == Op_Con ) continue; // Do not schedule Top
2245 if( iop == Op_Node && // Do not schedule PhiNodes, ProjNodes
2246 mach->pipeline() == MachNode::pipeline_class() &&
2247 !n->is_SpillCopy() ) // Breakpoints, Prolog, etc
2248 continue;
2249 break; // Funny loop structure to be sure...
2250 }
2251 // Compute last "interesting" instruction in block - last instruction we
2252 // might schedule. _bb_end points just after last schedulable inst. We
2253 // normally schedule conditional branches (despite them being forced last
2254 // in the block), because they have delay slots we can fill. Calls all
2255 // have their delay slots filled in the template expansions, so we don't
2256 // bother scheduling them.
2257 Node *last = bb->_nodes[_bb_end];
2258 if( last->is_Catch() ||
2259 (last->is_Mach() && last->as_Mach()->ideal_Opcode() == Op_Halt) ) {
2260 // There must be a prior call. Skip it.
2261 while( !bb->_nodes[--_bb_end]->is_Call() ) {
2262 assert( bb->_nodes[_bb_end]->is_Proj(), "skipping projections after expected call" );
2263 }
2264 } else if( last->is_MachNullCheck() ) {
2265 // Backup so the last null-checked memory instruction is
2266 // outside the schedulable range. Skip over the nullcheck,
2267 // projection, and the memory nodes.
2268 Node *mem = last->in(1);
2269 do {
2270 _bb_end--;
2271 } while (mem != bb->_nodes[_bb_end]);
2272 } else {
2273 // Set _bb_end to point after last schedulable inst.
2274 _bb_end++;
2275 }
2277 assert( _bb_start <= _bb_end, "inverted block ends" );
2279 // Compute the register antidependencies for the basic block
2280 ComputeRegisterAntidependencies(bb);
2281 if (_cfg->C->failing()) return; // too many D-U pinch points
2283 // Compute intra-bb latencies for the nodes
2284 ComputeLocalLatenciesForward(bb);
2286 // Compute the usage within the block, and set the list of all nodes
2287 // in the block that have no uses within the block.
2288 ComputeUseCount(bb);
2290 // Schedule the remaining instructions in the block
2291 while ( _available.size() > 0 ) {
2292 Node *n = ChooseNodeToBundle();
2293 AddNodeToBundle(n,bb);
2294 }
2296 assert( _scheduled.size() == _bb_end - _bb_start, "wrong number of instructions" );
2297 #ifdef ASSERT
2298 for( uint l = _bb_start; l < _bb_end; l++ ) {
2299 Node *n = bb->_nodes[l];
2300 uint m;
2301 for( m = 0; m < _bb_end-_bb_start; m++ )
2302 if( _scheduled[m] == n )
2303 break;
2304 assert( m < _bb_end-_bb_start, "instruction missing in schedule" );
2305 }
2306 #endif
2308 // Now copy the instructions (in reverse order) back to the block
2309 for ( uint k = _bb_start; k < _bb_end; k++ )
2310 bb->_nodes.map(k, _scheduled[_bb_end-k-1]);
2312 #ifndef PRODUCT
2313 if (_cfg->C->trace_opto_output()) {
2314 tty->print("# Schedule BB#%03d (final)\n", i);
2315 uint current = 0;
2316 for (uint j = 0; j < bb->_nodes.size(); j++) {
2317 Node *n = bb->_nodes[j];
2318 if( valid_bundle_info(n) ) {
2319 Bundle *bundle = node_bundling(n);
2320 if (bundle->instr_count() > 0 || bundle->flags() > 0) {
2321 tty->print("*** Bundle: ");
2322 bundle->dump();
2323 }
2324 n->dump();
2325 }
2326 }
2327 }
2328 #endif
2329 #ifdef ASSERT
2330 verify_good_schedule(bb,"after block local scheduling");
2331 #endif
2332 }
2334 #ifndef PRODUCT
2335 if (_cfg->C->trace_opto_output())
2336 tty->print("# <- DoScheduling\n");
2337 #endif
2339 // Record final node-bundling array location
2340 _regalloc->C->set_node_bundling_base(_node_bundling_base);
2342 } // end DoScheduling
2344 //------------------------------verify_good_schedule---------------------------
2345 // Verify that no live-range used in the block is killed in the block by a
2346 // wrong DEF. This doesn't verify live-ranges that span blocks.
2348 // Check for edge existence. Used to avoid adding redundant precedence edges.
2349 static bool edge_from_to( Node *from, Node *to ) {
2350 for( uint i=0; i<from->len(); i++ )
2351 if( from->in(i) == to )
2352 return true;
2353 return false;
2354 }
2356 #ifdef ASSERT
2357 //------------------------------verify_do_def----------------------------------
2358 void Scheduling::verify_do_def( Node *n, OptoReg::Name def, const char *msg ) {
2359 // Check for bad kills
2360 if( OptoReg::is_valid(def) ) { // Ignore stores & control flow
2361 Node *prior_use = _reg_node[def];
2362 if( prior_use && !edge_from_to(prior_use,n) ) {
2363 tty->print("%s = ",OptoReg::as_VMReg(def)->name());
2364 n->dump();
2365 tty->print_cr("...");
2366 prior_use->dump();
2367 assert_msg(edge_from_to(prior_use,n),msg);
2368 }
2369 _reg_node.map(def,NULL); // Kill live USEs
2370 }
2371 }
2373 //------------------------------verify_good_schedule---------------------------
2374 void Scheduling::verify_good_schedule( Block *b, const char *msg ) {
2376 // Zap to something reasonable for the verify code
2377 _reg_node.clear();
2379 // Walk over the block backwards. Check to make sure each DEF doesn't
2380 // kill a live value (other than the one it's supposed to). Add each
2381 // USE to the live set.
2382 for( uint i = b->_nodes.size()-1; i >= _bb_start; i-- ) {
2383 Node *n = b->_nodes[i];
2384 int n_op = n->Opcode();
2385 if( n_op == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2386 // Fat-proj kills a slew of registers
2387 RegMask rm = n->out_RegMask();// Make local copy
2388 while( rm.is_NotEmpty() ) {
2389 OptoReg::Name kill = rm.find_first_elem();
2390 rm.Remove(kill);
2391 verify_do_def( n, kill, msg );
2392 }
2393 } else if( n_op != Op_Node ) { // Avoid brand new antidependence nodes
2394 // Get DEF'd registers the normal way
2395 verify_do_def( n, _regalloc->get_reg_first(n), msg );
2396 verify_do_def( n, _regalloc->get_reg_second(n), msg );
2397 }
2399 // Now make all USEs live
2400 for( uint i=1; i<n->req(); i++ ) {
2401 Node *def = n->in(i);
2402 assert(def != 0, "input edge required");
2403 OptoReg::Name reg_lo = _regalloc->get_reg_first(def);
2404 OptoReg::Name reg_hi = _regalloc->get_reg_second(def);
2405 if( OptoReg::is_valid(reg_lo) ) {
2406 assert_msg(!_reg_node[reg_lo] || edge_from_to(_reg_node[reg_lo],def), msg );
2407 _reg_node.map(reg_lo,n);
2408 }
2409 if( OptoReg::is_valid(reg_hi) ) {
2410 assert_msg(!_reg_node[reg_hi] || edge_from_to(_reg_node[reg_hi],def), msg );
2411 _reg_node.map(reg_hi,n);
2412 }
2413 }
2415 }
2417 // Zap to something reasonable for the Antidependence code
2418 _reg_node.clear();
2419 }
2420 #endif
2422 // Conditionally add precedence edges. Avoid putting edges on Projs.
2423 static void add_prec_edge_from_to( Node *from, Node *to ) {
2424 if( from->is_Proj() ) { // Put precedence edge on Proj's input
2425 assert( from->req() == 1 && (from->len() == 1 || from->in(1)==0), "no precedence edges on projections" );
2426 from = from->in(0);
2427 }
2428 if( from != to && // No cycles (for things like LD L0,[L0+4] )
2429 !edge_from_to( from, to ) ) // Avoid duplicate edge
2430 from->add_prec(to);
2431 }
2433 //------------------------------anti_do_def------------------------------------
2434 void Scheduling::anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ) {
2435 if( !OptoReg::is_valid(def_reg) ) // Ignore stores & control flow
2436 return;
2438 Node *pinch = _reg_node[def_reg]; // Get pinch point
2439 if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
2440 is_def ) { // Check for a true def (not a kill)
2441 _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
2442 return;
2443 }
2445 Node *kill = def; // Rename 'def' to more descriptive 'kill'
2446 debug_only( def = (Node*)0xdeadbeef; )
2448 // After some number of kills there _may_ be a later def
2449 Node *later_def = NULL;
2451 // Finding a kill requires a real pinch-point.
2452 // Check for not already having a pinch-point.
2453 // Pinch points are Op_Node's.
2454 if( pinch->Opcode() != Op_Node ) { // Or later-def/kill as pinch-point?
2455 later_def = pinch; // Must be def/kill as optimistic pinch-point
2456 if ( _pinch_free_list.size() > 0) {
2457 pinch = _pinch_free_list.pop();
2458 } else {
2459 pinch = new (_cfg->C, 1) Node(1); // Pinch point to-be
2460 }
2461 if (pinch->_idx >= _regalloc->node_regs_max_index()) {
2462 _cfg->C->record_method_not_compilable("too many D-U pinch points");
2463 return;
2464 }
2465 _bbs.map(pinch->_idx,b); // Pretend it's valid in this block (lazy init)
2466 _reg_node.map(def_reg,pinch); // Record pinch-point
2467 //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
2468 if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
2469 pinch->init_req(0, _cfg->C->top()); // set not NULL for the next call
2470 add_prec_edge_from_to(later_def,pinch); // Add edge from kill to pinch
2471 later_def = NULL; // and no later def
2472 }
2473 pinch->set_req(0,later_def); // Hook later def so we can find it
2474 } else { // Else have valid pinch point
2475 if( pinch->in(0) ) // If there is a later-def
2476 later_def = pinch->in(0); // Get it
2477 }
2479 // Add output-dependence edge from later def to kill
2480 if( later_def ) // If there is some original def
2481 add_prec_edge_from_to(later_def,kill); // Add edge from def to kill
2483 // See if current kill is also a use, and so is forced to be the pinch-point.
2484 if( pinch->Opcode() == Op_Node ) {
2485 Node *uses = kill->is_Proj() ? kill->in(0) : kill;
2486 for( uint i=1; i<uses->req(); i++ ) {
2487 if( _regalloc->get_reg_first(uses->in(i)) == def_reg ||
2488 _regalloc->get_reg_second(uses->in(i)) == def_reg ) {
2489 // Yes, found a use/kill pinch-point
2490 pinch->set_req(0,NULL); //
2491 pinch->replace_by(kill); // Move anti-dep edges up
2492 pinch = kill;
2493 _reg_node.map(def_reg,pinch);
2494 return;
2495 }
2496 }
2497 }
2499 // Add edge from kill to pinch-point
2500 add_prec_edge_from_to(kill,pinch);
2501 }
2503 //------------------------------anti_do_use------------------------------------
2504 void Scheduling::anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ) {
2505 if( !OptoReg::is_valid(use_reg) ) // Ignore stores & control flow
2506 return;
2507 Node *pinch = _reg_node[use_reg]; // Get pinch point
2508 // Check for no later def_reg/kill in block
2509 if( pinch && _bbs[pinch->_idx] == b &&
2510 // Use has to be block-local as well
2511 _bbs[use->_idx] == b ) {
2512 if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
2513 pinch->req() == 1 ) { // pinch not yet in block?
2514 pinch->del_req(0); // yank pointer to later-def, also set flag
2515 // Insert the pinch-point in the block just after the last use
2516 b->_nodes.insert(b->find_node(use)+1,pinch);
2517 _bb_end++; // Increase size scheduled region in block
2518 }
2520 add_prec_edge_from_to(pinch,use);
2521 }
2522 }
2524 //------------------------------ComputeRegisterAntidependences-----------------
2525 // We insert antidependences between the reads and following write of
2526 // allocated registers to prevent illegal code motion. Hopefully, the
2527 // number of added references should be fairly small, especially as we
2528 // are only adding references within the current basic block.
2529 void Scheduling::ComputeRegisterAntidependencies(Block *b) {
2531 #ifdef ASSERT
2532 verify_good_schedule(b,"before block local scheduling");
2533 #endif
2535 // A valid schedule, for each register independently, is an endless cycle
2536 // of: a def, then some uses (connected to the def by true dependencies),
2537 // then some kills (defs with no uses), finally the cycle repeats with a new
2538 // def. The uses are allowed to float relative to each other, as are the
2539 // kills. No use is allowed to slide past a kill (or def). This requires
2540 // antidependencies between all uses of a single def and all kills that
2541 // follow, up to the next def. More edges are redundant, because later defs
2542 // & kills are already serialized with true or antidependencies. To keep
2543 // the edge count down, we add a 'pinch point' node if there's more than
2544 // one use or more than one kill/def.
2546 // We add dependencies in one bottom-up pass.
2548 // For each instruction we handle it's DEFs/KILLs, then it's USEs.
2550 // For each DEF/KILL, we check to see if there's a prior DEF/KILL for this
2551 // register. If not, we record the DEF/KILL in _reg_node, the
2552 // register-to-def mapping. If there is a prior DEF/KILL, we insert a
2553 // "pinch point", a new Node that's in the graph but not in the block.
2554 // We put edges from the prior and current DEF/KILLs to the pinch point.
2555 // We put the pinch point in _reg_node. If there's already a pinch point
2556 // we merely add an edge from the current DEF/KILL to the pinch point.
2558 // After doing the DEF/KILLs, we handle USEs. For each used register, we
2559 // put an edge from the pinch point to the USE.
2561 // To be expedient, the _reg_node array is pre-allocated for the whole
2562 // compilation. _reg_node is lazily initialized; it either contains a NULL,
2563 // or a valid def/kill/pinch-point, or a leftover node from some prior
2564 // block. Leftover node from some prior block is treated like a NULL (no
2565 // prior def, so no anti-dependence needed). Valid def is distinguished by
2566 // it being in the current block.
2567 bool fat_proj_seen = false;
2568 uint last_safept = _bb_end-1;
2569 Node* end_node = (_bb_end-1 >= _bb_start) ? b->_nodes[last_safept] : NULL;
2570 Node* last_safept_node = end_node;
2571 for( uint i = _bb_end-1; i >= _bb_start; i-- ) {
2572 Node *n = b->_nodes[i];
2573 int is_def = n->outcnt(); // def if some uses prior to adding precedence edges
2574 if( n->Opcode() == Op_MachProj && n->ideal_reg() == MachProjNode::fat_proj ) {
2575 // Fat-proj kills a slew of registers
2576 // This can add edges to 'n' and obscure whether or not it was a def,
2577 // hence the is_def flag.
2578 fat_proj_seen = true;
2579 RegMask rm = n->out_RegMask();// Make local copy
2580 while( rm.is_NotEmpty() ) {
2581 OptoReg::Name kill = rm.find_first_elem();
2582 rm.Remove(kill);
2583 anti_do_def( b, n, kill, is_def );
2584 }
2585 } else {
2586 // Get DEF'd registers the normal way
2587 anti_do_def( b, n, _regalloc->get_reg_first(n), is_def );
2588 anti_do_def( b, n, _regalloc->get_reg_second(n), is_def );
2589 }
2591 // Check each register used by this instruction for a following DEF/KILL
2592 // that must occur afterward and requires an anti-dependence edge.
2593 for( uint j=0; j<n->req(); j++ ) {
2594 Node *def = n->in(j);
2595 if( def ) {
2596 assert( def->Opcode() != Op_MachProj || def->ideal_reg() != MachProjNode::fat_proj, "" );
2597 anti_do_use( b, n, _regalloc->get_reg_first(def) );
2598 anti_do_use( b, n, _regalloc->get_reg_second(def) );
2599 }
2600 }
2601 // Do not allow defs of new derived values to float above GC
2602 // points unless the base is definitely available at the GC point.
2604 Node *m = b->_nodes[i];
2606 // Add precedence edge from following safepoint to use of derived pointer
2607 if( last_safept_node != end_node &&
2608 m != last_safept_node) {
2609 for (uint k = 1; k < m->req(); k++) {
2610 const Type *t = m->in(k)->bottom_type();
2611 if( t->isa_oop_ptr() &&
2612 t->is_ptr()->offset() != 0 ) {
2613 last_safept_node->add_prec( m );
2614 break;
2615 }
2616 }
2617 }
2619 if( n->jvms() ) { // Precedence edge from derived to safept
2620 // Check if last_safept_node was moved by pinch-point insertion in anti_do_use()
2621 if( b->_nodes[last_safept] != last_safept_node ) {
2622 last_safept = b->find_node(last_safept_node);
2623 }
2624 for( uint j=last_safept; j > i; j-- ) {
2625 Node *mach = b->_nodes[j];
2626 if( mach->is_Mach() && mach->as_Mach()->ideal_Opcode() == Op_AddP )
2627 mach->add_prec( n );
2628 }
2629 last_safept = i;
2630 last_safept_node = m;
2631 }
2632 }
2634 if (fat_proj_seen) {
2635 // Garbage collect pinch nodes that were not consumed.
2636 // They are usually created by a fat kill MachProj for a call.
2637 garbage_collect_pinch_nodes();
2638 }
2639 }
2641 //------------------------------garbage_collect_pinch_nodes-------------------------------
2643 // Garbage collect pinch nodes for reuse by other blocks.
2644 //
2645 // The block scheduler's insertion of anti-dependence
2646 // edges creates many pinch nodes when the block contains
2647 // 2 or more Calls. A pinch node is used to prevent a
2648 // combinatorial explosion of edges. If a set of kills for a
2649 // register is anti-dependent on a set of uses (or defs), rather
2650 // than adding an edge in the graph between each pair of kill
2651 // and use (or def), a pinch is inserted between them:
2652 //
2653 // use1 use2 use3
2654 // \ | /
2655 // \ | /
2656 // pinch
2657 // / | \
2658 // / | \
2659 // kill1 kill2 kill3
2660 //
2661 // One pinch node is created per register killed when
2662 // the second call is encountered during a backwards pass
2663 // over the block. Most of these pinch nodes are never
2664 // wired into the graph because the register is never
2665 // used or def'ed in the block.
2666 //
2667 void Scheduling::garbage_collect_pinch_nodes() {
2668 #ifndef PRODUCT
2669 if (_cfg->C->trace_opto_output()) tty->print("Reclaimed pinch nodes:");
2670 #endif
2671 int trace_cnt = 0;
2672 for (uint k = 0; k < _reg_node.Size(); k++) {
2673 Node* pinch = _reg_node[k];
2674 if (pinch != NULL && pinch->Opcode() == Op_Node &&
2675 // no predecence input edges
2676 (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
2677 cleanup_pinch(pinch);
2678 _pinch_free_list.push(pinch);
2679 _reg_node.map(k, NULL);
2680 #ifndef PRODUCT
2681 if (_cfg->C->trace_opto_output()) {
2682 trace_cnt++;
2683 if (trace_cnt > 40) {
2684 tty->print("\n");
2685 trace_cnt = 0;
2686 }
2687 tty->print(" %d", pinch->_idx);
2688 }
2689 #endif
2690 }
2691 }
2692 #ifndef PRODUCT
2693 if (_cfg->C->trace_opto_output()) tty->print("\n");
2694 #endif
2695 }
2697 // Clean up a pinch node for reuse.
2698 void Scheduling::cleanup_pinch( Node *pinch ) {
2699 assert (pinch && pinch->Opcode() == Op_Node && pinch->req() == 1, "just checking");
2701 for (DUIterator_Last imin, i = pinch->last_outs(imin); i >= imin; ) {
2702 Node* use = pinch->last_out(i);
2703 uint uses_found = 0;
2704 for (uint j = use->req(); j < use->len(); j++) {
2705 if (use->in(j) == pinch) {
2706 use->rm_prec(j);
2707 uses_found++;
2708 }
2709 }
2710 assert(uses_found > 0, "must be a precedence edge");
2711 i -= uses_found; // we deleted 1 or more copies of this edge
2712 }
2713 // May have a later_def entry
2714 pinch->set_req(0, NULL);
2715 }
2717 //------------------------------print_statistics-------------------------------
2718 #ifndef PRODUCT
2720 void Scheduling::dump_available() const {
2721 tty->print("#Availist ");
2722 for (uint i = 0; i < _available.size(); i++)
2723 tty->print(" N%d/l%d", _available[i]->_idx,_current_latency[_available[i]->_idx]);
2724 tty->cr();
2725 }
2727 // Print Scheduling Statistics
2728 void Scheduling::print_statistics() {
2729 // Print the size added by nops for bundling
2730 tty->print("Nops added %d bytes to total of %d bytes",
2731 _total_nop_size, _total_method_size);
2732 if (_total_method_size > 0)
2733 tty->print(", for %.2f%%",
2734 ((double)_total_nop_size) / ((double) _total_method_size) * 100.0);
2735 tty->print("\n");
2737 // Print the number of branch shadows filled
2738 if (Pipeline::_branch_has_delay_slot) {
2739 tty->print("Of %d branches, %d had unconditional delay slots filled",
2740 _total_branches, _total_unconditional_delays);
2741 if (_total_branches > 0)
2742 tty->print(", for %.2f%%",
2743 ((double)_total_unconditional_delays) / ((double)_total_branches) * 100.0);
2744 tty->print("\n");
2745 }
2747 uint total_instructions = 0, total_bundles = 0;
2749 for (uint i = 1; i <= Pipeline::_max_instrs_per_cycle; i++) {
2750 uint bundle_count = _total_instructions_per_bundle[i];
2751 total_instructions += bundle_count * i;
2752 total_bundles += bundle_count;
2753 }
2755 if (total_bundles > 0)
2756 tty->print("Average ILP (excluding nops) is %.2f\n",
2757 ((double)total_instructions) / ((double)total_bundles));
2758 }
2759 #endif