1169 // (but allow split bundles) |
1169 // (but allow split bundles) |
1170 if( Pipeline::requires_bundling() && starts_bundle(n) ) |
1170 if( Pipeline::requires_bundling() && starts_bundle(n) ) |
1171 cb->flush_bundle(false); |
1171 cb->flush_bundle(false); |
1172 |
1172 |
1173 // The following logic is duplicated in the code ifdeffed for |
1173 // The following logic is duplicated in the code ifdeffed for |
1174 // ENABLE_ZAP_DEAD_LOCALS which apppears above in this file. It |
1174 // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It |
1175 // should be factored out. Or maybe dispersed to the nodes? |
1175 // should be factored out. Or maybe dispersed to the nodes? |
1176 |
1176 |
1177 // Special handling for SafePoint/Call Nodes |
1177 // Special handling for SafePoint/Call Nodes |
1178 bool is_mcall = false; |
1178 bool is_mcall = false; |
1179 if( n->is_Mach() ) { |
1179 if( n->is_Mach() ) { |
1273 mach->label_set( blk_labels[block_num], block_num ); |
1273 mach->label_set( blk_labels[block_num], block_num ); |
1274 } |
1274 } |
1275 } |
1275 } |
1276 |
1276 |
1277 #ifdef ASSERT |
1277 #ifdef ASSERT |
1278 // Check that oop-store preceeds the card-mark |
1278 // Check that oop-store precedes the card-mark |
1279 else if( mach->ideal_Opcode() == Op_StoreCM ) { |
1279 else if( mach->ideal_Opcode() == Op_StoreCM ) { |
1280 uint storeCM_idx = j; |
1280 uint storeCM_idx = j; |
1281 Node *oop_store = mach->in(mach->_cnt); // First precedence edge |
1281 Node *oop_store = mach->in(mach->_cnt); // First precedence edge |
1282 assert( oop_store != NULL, "storeCM expects a precedence edge"); |
1282 assert( oop_store != NULL, "storeCM expects a precedence edge"); |
1283 uint i4; |
1283 uint i4; |
1289 assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); |
1289 assert( i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store"); |
1290 } |
1290 } |
1291 #endif |
1291 #endif |
1292 |
1292 |
1293 else if( !n->is_Proj() ) { |
1293 else if( !n->is_Proj() ) { |
1294 // Remember the begining of the previous instruction, in case |
1294 // Remember the beginning of the previous instruction, in case |
1295 // it's followed by a flag-kill and a null-check. Happens on |
1295 // it's followed by a flag-kill and a null-check. Happens on |
1296 // Intel all the time, with add-to-memory kind of opcodes. |
1296 // Intel all the time, with add-to-memory kind of opcodes. |
1297 previous_offset = current_offset; |
1297 previous_offset = current_offset; |
1298 } |
1298 } |
1299 } |
1299 } |
1565 _node_bundling_limit = compile.unique(); |
1565 _node_bundling_limit = compile.unique(); |
1566 uint node_max = _regalloc->node_regs_max_index(); |
1566 uint node_max = _regalloc->node_regs_max_index(); |
1567 |
1567 |
1568 compile.set_node_bundling_limit(_node_bundling_limit); |
1568 compile.set_node_bundling_limit(_node_bundling_limit); |
1569 |
1569 |
1570 // This one is persistant within the Compile class |
1570 // This one is persistent within the Compile class |
1571 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max); |
1571 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max); |
1572 |
1572 |
1573 // Allocate space for fixed-size arrays |
1573 // Allocate space for fixed-size arrays |
1574 _node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max); |
1574 _node_latency = NEW_ARENA_ARRAY(arena, unsigned short, node_max); |
1575 _uses = NEW_ARENA_ARRAY(arena, short, node_max); |
1575 _uses = NEW_ARENA_ARRAY(arena, short, node_max); |
1664 |
1664 |
1665 //------------------------------ComputeLocalLatenciesForward------------------- |
1665 //------------------------------ComputeLocalLatenciesForward------------------- |
1666 // Compute the latency of all the instructions. This is fairly simple, |
1666 // Compute the latency of all the instructions. This is fairly simple, |
1667 // because we already have a legal ordering. Walk over the instructions |
1667 // because we already have a legal ordering. Walk over the instructions |
1668 // from first to last, and compute the latency of the instruction based |
1668 // from first to last, and compute the latency of the instruction based |
1669 // on the latency of the preceeding instruction(s). |
1669 // on the latency of the preceding instruction(s). |
1670 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) { |
1670 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) { |
1671 #ifndef PRODUCT |
1671 #ifndef PRODUCT |
1672 if (_cfg->C->trace_opto_output()) |
1672 if (_cfg->C->trace_opto_output()) |
1673 tty->print("# -> ComputeLocalLatenciesForward\n"); |
1673 tty->print("# -> ComputeLocalLatenciesForward\n"); |
1674 #endif |
1674 #endif |
1929 !_unconditional_delay_slot) { |
1929 !_unconditional_delay_slot) { |
1930 |
1930 |
1931 uint siz = _available.size(); |
1931 uint siz = _available.size(); |
1932 |
1932 |
1933 // Conditional branches can support an instruction that |
1933 // Conditional branches can support an instruction that |
1934 // is unconditionally executed and not dependant by the |
1934 // is unconditionally executed and not dependent by the |
1935 // branch, OR a conditionally executed instruction if |
1935 // branch, OR a conditionally executed instruction if |
1936 // the branch is taken. In practice, this means that |
1936 // the branch is taken. In practice, this means that |
1937 // the first instruction at the branch target is |
1937 // the first instruction at the branch target is |
1938 // copied to the delay slot, and the branch goes to |
1938 // copied to the delay slot, and the branch goes to |
1939 // the instruction after that at the branch target |
1939 // the instruction after that at the branch target |
1945 #ifndef PRODUCT |
1945 #ifndef PRODUCT |
1946 _branches++; |
1946 _branches++; |
1947 #endif |
1947 #endif |
1948 |
1948 |
1949 // At least 1 instruction is on the available list |
1949 // At least 1 instruction is on the available list |
1950 // that is not dependant on the branch |
1950 // that is not dependent on the branch |
1951 for (uint i = 0; i < siz; i++) { |
1951 for (uint i = 0; i < siz; i++) { |
1952 Node *d = _available[i]; |
1952 Node *d = _available[i]; |
1953 const Pipeline *avail_pipeline = d->pipeline(); |
1953 const Pipeline *avail_pipeline = d->pipeline(); |
1954 |
1954 |
1955 // Don't allow safepoints in the branch shadow, that will |
1955 // Don't allow safepoints in the branch shadow, that will |