2048 // Make sure the offset goes inside the instance layout. |
2048 // Make sure the offset goes inside the instance layout. |
2049 return k->contains_field_offset(tp->offset()); |
2049 return k->contains_field_offset(tp->offset()); |
2050 // Note that OffsetBot and OffsetTop are very negative. |
2050 // Note that OffsetBot and OffsetTop are very negative. |
2051 } |
2051 } |
2052 |
2052 |
|
2053 // Eliminate trivially redundant StoreCMs and accumulate their |
|
2054 // precedence edges. |
|
2055 static void eliminate_redundant_card_marks(Node* n) { |
|
2056 assert(n->Opcode() == Op_StoreCM, "expected StoreCM"); |
|
2057 if (n->in(MemNode::Address)->outcnt() > 1) { |
|
2058 // There are multiple users of the same address so it might be |
|
2059 // possible to eliminate some of the StoreCMs |
|
2060 Node* mem = n->in(MemNode::Memory); |
|
2061 Node* adr = n->in(MemNode::Address); |
|
2062 Node* val = n->in(MemNode::ValueIn); |
|
2063 Node* prev = n; |
|
2064 bool done = false; |
|
2065 // Walk the chain of StoreCMs eliminating ones that match. As |
|
2066 // long as it's a chain of single users then the optimization is |
|
2067 // safe. Eliminating partially redundant StoreCMs would require |
|
2068 // cloning copies down the other paths. |
|
2069 while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) { |
|
2070 if (adr == mem->in(MemNode::Address) && |
|
2071 val == mem->in(MemNode::ValueIn)) { |
|
2072 // redundant StoreCM |
|
2073 if (mem->req() > MemNode::OopStore) { |
|
2074 // Hasn't been processed by this code yet. |
|
2075 n->add_prec(mem->in(MemNode::OopStore)); |
|
2076 } else { |
|
2077 // Already converted to precedence edge |
|
2078 for (uint i = mem->req(); i < mem->len(); i++) { |
|
2079 // Accumulate any precedence edges |
|
2080 if (mem->in(i) != NULL) { |
|
2081 n->add_prec(mem->in(i)); |
|
2082 } |
|
2083 } |
|
2084 // Everything above this point has been processed. |
|
2085 done = true; |
|
2086 } |
|
2087 // Eliminate the previous StoreCM |
|
2088 prev->set_req(MemNode::Memory, mem->in(MemNode::Memory)); |
|
2089 assert(mem->outcnt() == 0, "should be dead"); |
|
2090 mem->disconnect_inputs(NULL); |
|
2091 } else { |
|
2092 prev = mem; |
|
2093 } |
|
2094 mem = prev->in(MemNode::Memory); |
|
2095 } |
|
2096 } |
|
2097 } |
|
2098 |
2053 //------------------------------final_graph_reshaping_impl---------------------- |
2099 //------------------------------final_graph_reshaping_impl---------------------- |
2054 // Implement items 1-5 from final_graph_reshaping below. |
2100 // Implement items 1-5 from final_graph_reshaping below. |
2055 static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { |
2101 static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc ) { |
2056 |
2102 |
2057 if ( n->outcnt() == 0 ) return; // dead node |
2103 if ( n->outcnt() == 0 ) return; // dead node |
2174 case Op_StoreF: |
2220 case Op_StoreF: |
2175 case Op_LoadF: |
2221 case Op_LoadF: |
2176 frc.inc_float_count(); |
2222 frc.inc_float_count(); |
2177 goto handle_mem; |
2223 goto handle_mem; |
2178 |
2224 |
|
2225 case Op_StoreCM: |
|
2226 { |
|
2227 // Convert OopStore dependence into precedence edge |
|
2228 Node* prec = n->in(MemNode::OopStore); |
|
2229 n->del_req(MemNode::OopStore); |
|
2230 n->add_prec(prec); |
|
2231 eliminate_redundant_card_marks(n); |
|
2232 } |
|
2233 |
|
2234 // fall through |
|
2235 |
2179 case Op_StoreB: |
2236 case Op_StoreB: |
2180 case Op_StoreC: |
2237 case Op_StoreC: |
2181 case Op_StoreCM: |
|
2182 case Op_StorePConditional: |
2238 case Op_StorePConditional: |
2183 case Op_StoreI: |
2239 case Op_StoreI: |
2184 case Op_StoreL: |
2240 case Op_StoreL: |
2185 case Op_StoreIConditional: |
2241 case Op_StoreIConditional: |
2186 case Op_StoreLConditional: |
2242 case Op_StoreLConditional: |