src/share/vm/opto/live.cpp

Fri, 16 Aug 2013 10:23:55 +0200

author
adlertz
date
Fri, 16 Aug 2013 10:23:55 +0200
changeset 5539
adb9a7d94cb5
parent 5509
d1034bd8cefc
child 5635
650868c062a9
permissions
-rw-r--r--

8023003: Cleanup the public interface to PhaseCFG
Summary: public methods that don't need to be public should be private.
Reviewed-by: kvn, twisti

duke@435 1 /*
stefank@2314 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "memory/allocation.inline.hpp"
stefank@2314 27 #include "opto/callnode.hpp"
stefank@2314 28 #include "opto/chaitin.hpp"
stefank@2314 29 #include "opto/live.hpp"
stefank@2314 30 #include "opto/machnode.hpp"
duke@435 31
duke@435 32
duke@435 33 // Compute live-in/live-out. We use a totally incremental algorithm. The LIVE
duke@435 34 // problem is monotonic. The steady-state solution looks like this: pull a
duke@435 35 // block from the worklist. It has a set of delta's - values which are newly
duke@435 36 // live-in from the block. Push these to the live-out sets of all predecessor
duke@435 37 // blocks. At each predecessor, the new live-out values are ANDed with what is
duke@435 38 // already live-out (extra stuff is added to the live-out sets). Then the
duke@435 39 // remaining new live-out values are ANDed with what is locally defined.
duke@435 40 // Leftover bits become the new live-in for the predecessor block, and the pred
duke@435 41 // block is put on the worklist.
duke@435 42 // The locally live-in stuff is computed once and added to predecessor
twisti@1040 43 // live-out sets. This separate compilation is done in the outer loop below.
neliasso@4949 44 PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
duke@435 45 }
duke@435 46
duke@435 47 void PhaseLive::compute(uint maxlrg) {
duke@435 48 _maxlrg = maxlrg;
duke@435 49 _worklist = new (_arena) Block_List();
duke@435 50
duke@435 51 // Init the sparse live arrays. This data is live on exit from here!
duke@435 52 // The _live info is the live-out info.
adlertz@5539 53 _live = (IndexSet*)_arena->Amalloc(sizeof(IndexSet) * _cfg.number_of_blocks());
duke@435 54 uint i;
adlertz@5539 55 for (i = 0; i < _cfg.number_of_blocks(); i++) {
duke@435 56 _live[i].initialize(_maxlrg);
duke@435 57 }
duke@435 58
duke@435 59 // Init the sparse arrays for delta-sets.
duke@435 60 ResourceMark rm; // Nuke temp storage on exit
duke@435 61
duke@435 62 // Does the memory used by _defs and _deltas get reclaimed? Does it matter? TT
duke@435 63
duke@435 64 // Array of values defined locally in blocks
adlertz@5539 65 _defs = NEW_RESOURCE_ARRAY(IndexSet,_cfg.number_of_blocks());
adlertz@5539 66 for (i = 0; i < _cfg.number_of_blocks(); i++) {
duke@435 67 _defs[i].initialize(_maxlrg);
duke@435 68 }
duke@435 69
duke@435 70 // Array of delta-set pointers, indexed by block pre_order-1.
adlertz@5539 71 _deltas = NEW_RESOURCE_ARRAY(IndexSet*,_cfg.number_of_blocks());
adlertz@5539 72 memset( _deltas, 0, sizeof(IndexSet*)* _cfg.number_of_blocks());
duke@435 73
duke@435 74 _free_IndexSet = NULL;
duke@435 75
duke@435 76 // Blocks having done pass-1
duke@435 77 VectorSet first_pass(Thread::current()->resource_area());
duke@435 78
duke@435 79 // Outer loop: must compute local live-in sets and push into predecessors.
adlertz@5539 80 for (uint j = _cfg.number_of_blocks(); j > 0; j--) {
adlertz@5539 81 Block* block = _cfg.get_block(j - 1);
duke@435 82
duke@435 83 // Compute the local live-in set. Start with any new live-out bits.
adlertz@5539 84 IndexSet* use = getset(block);
adlertz@5539 85 IndexSet* def = &_defs[block->_pre_order-1];
duke@435 86 DEBUG_ONLY(IndexSet *def_outside = getfreeset();)
duke@435 87 uint i;
adlertz@5539 88 for (i = block->_nodes.size(); i > 1; i--) {
adlertz@5539 89 Node* n = block->_nodes[i-1];
adlertz@5539 90 if (n->is_Phi()) {
adlertz@5539 91 break;
adlertz@5539 92 }
duke@435 93
duke@435 94 uint r = _names[n->_idx];
duke@435 95 assert(!def_outside->member(r), "Use of external LRG overlaps the same LRG defined in this block");
duke@435 96 def->insert( r );
duke@435 97 use->remove( r );
duke@435 98 uint cnt = n->req();
adlertz@5539 99 for (uint k = 1; k < cnt; k++) {
duke@435 100 Node *nk = n->in(k);
duke@435 101 uint nkidx = nk->_idx;
adlertz@5539 102 if (_cfg.get_block_for_node(nk) != block) {
duke@435 103 uint u = _names[nkidx];
adlertz@5539 104 use->insert(u);
adlertz@5539 105 DEBUG_ONLY(def_outside->insert(u);)
duke@435 106 }
duke@435 107 }
duke@435 108 }
duke@435 109 #ifdef ASSERT
duke@435 110 def_outside->set_next(_free_IndexSet);
duke@435 111 _free_IndexSet = def_outside; // Drop onto free list
duke@435 112 #endif
duke@435 113 // Remove anything defined by Phis and the block start instruction
adlertz@5539 114 for (uint k = i; k > 0; k--) {
adlertz@5539 115 uint r = _names[block->_nodes[k - 1]->_idx];
adlertz@5539 116 def->insert(r);
adlertz@5539 117 use->remove(r);
duke@435 118 }
duke@435 119
duke@435 120 // Push these live-in things to predecessors
adlertz@5539 121 for (uint l = 1; l < block->num_preds(); l++) {
adlertz@5539 122 Block* p = _cfg.get_block_for_node(block->pred(l));
adlertz@5539 123 add_liveout(p, use, first_pass);
duke@435 124
duke@435 125 // PhiNode uses go in the live-out set of prior blocks.
adlertz@5539 126 for (uint k = i; k > 0; k--) {
adlertz@5539 127 add_liveout(p, _names[block->_nodes[k-1]->in(l)->_idx], first_pass);
adlertz@5539 128 }
duke@435 129 }
adlertz@5539 130 freeset(block);
adlertz@5539 131 first_pass.set(block->_pre_order);
duke@435 132
duke@435 133 // Inner loop: blocks that picked up new live-out values to be propagated
adlertz@5539 134 while (_worklist->size()) {
adlertz@5539 135 Block* block = _worklist->pop();
adlertz@5539 136 IndexSet *delta = getset(block);
duke@435 137 assert( delta->count(), "missing delta set" );
duke@435 138
duke@435 139 // Add new-live-in to predecessors live-out sets
adlertz@5539 140 for (uint l = 1; l < block->num_preds(); l++) {
adlertz@5539 141 Block* predecessor = _cfg.get_block_for_node(block->pred(l));
adlertz@5539 142 add_liveout(predecessor, delta, first_pass);
adlertz@5509 143 }
duke@435 144
adlertz@5539 145 freeset(block);
duke@435 146 } // End of while-worklist-not-empty
duke@435 147
duke@435 148 } // End of for-all-blocks-outer-loop
duke@435 149
duke@435 150 // We explicitly clear all of the IndexSets which we are about to release.
duke@435 151 // This allows us to recycle their internal memory into IndexSet's free list.
duke@435 152
adlertz@5539 153 for (i = 0; i < _cfg.number_of_blocks(); i++) {
duke@435 154 _defs[i].clear();
duke@435 155 if (_deltas[i]) {
duke@435 156 // Is this always true?
duke@435 157 _deltas[i]->clear();
duke@435 158 }
duke@435 159 }
duke@435 160 IndexSet *free = _free_IndexSet;
duke@435 161 while (free != NULL) {
duke@435 162 IndexSet *temp = free;
duke@435 163 free = free->next();
duke@435 164 temp->clear();
duke@435 165 }
duke@435 166
duke@435 167 }
duke@435 168
duke@435 169 #ifndef PRODUCT
duke@435 170 void PhaseLive::stats(uint iters) const {
duke@435 171 }
duke@435 172 #endif
duke@435 173
duke@435 174 // Get an IndexSet for a block. Return existing one, if any. Make a new
duke@435 175 // empty one if a prior one does not exist.
duke@435 176 IndexSet *PhaseLive::getset( Block *p ) {
duke@435 177 IndexSet *delta = _deltas[p->_pre_order-1];
duke@435 178 if( !delta ) // Not on worklist?
duke@435 179 // Get a free set; flag as being on worklist
duke@435 180 delta = _deltas[p->_pre_order-1] = getfreeset();
duke@435 181 return delta; // Return set of new live-out items
duke@435 182 }
duke@435 183
duke@435 184 // Pull from free list, or allocate. Internal allocation on the returned set
duke@435 185 // is always from thread local storage.
duke@435 186 IndexSet *PhaseLive::getfreeset( ) {
duke@435 187 IndexSet *f = _free_IndexSet;
duke@435 188 if( !f ) {
duke@435 189 f = new IndexSet;
duke@435 190 // f->set_arena(Thread::current()->resource_area());
duke@435 191 f->initialize(_maxlrg, Thread::current()->resource_area());
duke@435 192 } else {
duke@435 193 // Pull from free list
duke@435 194 _free_IndexSet = f->next();
duke@435 195 //f->_cnt = 0; // Reset to empty
duke@435 196 // f->set_arena(Thread::current()->resource_area());
duke@435 197 f->initialize(_maxlrg, Thread::current()->resource_area());
duke@435 198 }
duke@435 199 return f;
duke@435 200 }
duke@435 201
duke@435 202 // Free an IndexSet from a block.
duke@435 203 void PhaseLive::freeset( const Block *p ) {
duke@435 204 IndexSet *f = _deltas[p->_pre_order-1];
duke@435 205 f->set_next(_free_IndexSet);
duke@435 206 _free_IndexSet = f; // Drop onto free list
duke@435 207 _deltas[p->_pre_order-1] = NULL;
duke@435 208 }
duke@435 209
duke@435 210 // Add a live-out value to a given blocks live-out set. If it is new, then
duke@435 211 // also add it to the delta set and stick the block on the worklist.
duke@435 212 void PhaseLive::add_liveout( Block *p, uint r, VectorSet &first_pass ) {
duke@435 213 IndexSet *live = &_live[p->_pre_order-1];
duke@435 214 if( live->insert(r) ) { // If actually inserted...
duke@435 215 // We extended the live-out set. See if the value is generated locally.
duke@435 216 // If it is not, then we must extend the live-in set.
duke@435 217 if( !_defs[p->_pre_order-1].member( r ) ) {
duke@435 218 if( !_deltas[p->_pre_order-1] && // Not on worklist?
duke@435 219 first_pass.test(p->_pre_order) )
duke@435 220 _worklist->push(p); // Actually go on worklist if already 1st pass
duke@435 221 getset(p)->insert(r);
duke@435 222 }
duke@435 223 }
duke@435 224 }
duke@435 225
duke@435 226 // Add a vector of live-out values to a given blocks live-out set.
duke@435 227 void PhaseLive::add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass ) {
duke@435 228 IndexSet *live = &_live[p->_pre_order-1];
duke@435 229 IndexSet *defs = &_defs[p->_pre_order-1];
duke@435 230 IndexSet *on_worklist = _deltas[p->_pre_order-1];
duke@435 231 IndexSet *delta = on_worklist ? on_worklist : getfreeset();
duke@435 232
duke@435 233 IndexSetIterator elements(lo);
duke@435 234 uint r;
duke@435 235 while ((r = elements.next()) != 0) {
duke@435 236 if( live->insert(r) && // If actually inserted...
duke@435 237 !defs->member( r ) ) // and not defined locally
duke@435 238 delta->insert(r); // Then add to live-in set
duke@435 239 }
duke@435 240
duke@435 241 if( delta->count() ) { // If actually added things
duke@435 242 _deltas[p->_pre_order-1] = delta; // Flag as on worklist now
duke@435 243 if( !on_worklist && // Not on worklist?
duke@435 244 first_pass.test(p->_pre_order) )
duke@435 245 _worklist->push(p); // Actually go on worklist if already 1st pass
duke@435 246 } else { // Nothing there; just free it
duke@435 247 delta->set_next(_free_IndexSet);
duke@435 248 _free_IndexSet = delta; // Drop onto free list
duke@435 249 }
duke@435 250 }
duke@435 251
duke@435 252 #ifndef PRODUCT
duke@435 253 // Dump the live-out set for a block
duke@435 254 void PhaseLive::dump( const Block *b ) const {
duke@435 255 tty->print("Block %d: ",b->_pre_order);
duke@435 256 tty->print("LiveOut: "); _live[b->_pre_order-1].dump();
duke@435 257 uint cnt = b->_nodes.size();
duke@435 258 for( uint i=0; i<cnt; i++ ) {
duke@435 259 tty->print("L%d/", _names[b->_nodes[i]->_idx] );
duke@435 260 b->_nodes[i]->dump();
duke@435 261 }
duke@435 262 tty->print("\n");
duke@435 263 }
duke@435 264
duke@435 265 // Verify that base pointers and derived pointers are still sane.
duke@435 266 void PhaseChaitin::verify_base_ptrs( ResourceArea *a ) const {
kvn@985 267 #ifdef ASSERT
kvn@985 268 Unique_Node_List worklist(a);
adlertz@5539 269 for (uint i = 0; i < _cfg.number_of_blocks(); i++) {
adlertz@5539 270 Block* block = _cfg.get_block(i);
adlertz@5539 271 for (uint j = block->end_idx() + 1; j > 1; j--) {
adlertz@5539 272 Node* n = block->_nodes[j-1];
adlertz@5539 273 if (n->is_Phi()) {
adlertz@5539 274 break;
adlertz@5539 275 }
duke@435 276 // Found a safepoint?
adlertz@5539 277 if (n->is_MachSafePoint()) {
duke@435 278 MachSafePointNode *sfpt = n->as_MachSafePoint();
duke@435 279 JVMState* jvms = sfpt->jvms();
duke@435 280 if (jvms != NULL) {
duke@435 281 // Now scan for a live derived pointer
duke@435 282 if (jvms->oopoff() < sfpt->req()) {
duke@435 283 // Check each derived/base pair
kvn@985 284 for (uint idx = jvms->oopoff(); idx < sfpt->req(); idx++) {
duke@435 285 Node *check = sfpt->in(idx);
kvn@985 286 bool is_derived = ((idx - jvms->oopoff()) & 1) == 0;
duke@435 287 // search upwards through spills and spill phis for AddP
kvn@985 288 worklist.clear();
kvn@985 289 worklist.push(check);
kvn@985 290 uint k = 0;
kvn@985 291 while( k < worklist.size() ) {
kvn@985 292 check = worklist.at(k);
kvn@985 293 assert(check,"Bad base or derived pointer");
kvn@985 294 // See PhaseChaitin::find_base_for_derived() for all cases.
kvn@985 295 int isc = check->is_Copy();
kvn@985 296 if( isc ) {
kvn@985 297 worklist.push(check->in(isc));
kvn@985 298 } else if( check->is_Phi() ) {
kvn@985 299 for (uint m = 1; m < check->req(); m++)
kvn@985 300 worklist.push(check->in(m));
kvn@985 301 } else if( check->is_Con() ) {
kvn@985 302 if (is_derived) {
kvn@985 303 // Derived is NULL+offset
kvn@985 304 assert(!is_derived || check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad derived pointer");
kvn@985 305 } else {
kvn@985 306 assert(check->bottom_type()->is_ptr()->_offset == 0,"Bad base pointer");
kvn@985 307 // Base either ConP(NULL) or loadConP
kvn@985 308 if (check->is_Mach()) {
kvn@985 309 assert(check->as_Mach()->ideal_Opcode() == Op_ConP,"Bad base pointer");
kvn@985 310 } else {
kvn@985 311 assert(check->Opcode() == Op_ConP &&
kvn@985 312 check->bottom_type()->is_ptr()->ptr() == TypePtr::Null,"Bad base pointer");
kvn@985 313 }
kvn@985 314 }
kvn@985 315 } else if( check->bottom_type()->is_ptr()->_offset == 0 ) {
kvn@985 316 if(check->is_Proj() || check->is_Mach() &&
kvn@985 317 (check->as_Mach()->ideal_Opcode() == Op_CreateEx ||
kvn@985 318 check->as_Mach()->ideal_Opcode() == Op_ThreadLocal ||
kvn@985 319 check->as_Mach()->ideal_Opcode() == Op_CMoveP ||
kvn@985 320 check->as_Mach()->ideal_Opcode() == Op_CheckCastPP ||
kvn@985 321 #ifdef _LP64
kvn@985 322 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_CastPP ||
kvn@985 323 UseCompressedOops && check->as_Mach()->ideal_Opcode() == Op_DecodeN ||
roland@4159 324 UseCompressedKlassPointers && check->as_Mach()->ideal_Opcode() == Op_DecodeNKlass ||
kvn@985 325 #endif
kvn@985 326 check->as_Mach()->ideal_Opcode() == Op_LoadP ||
kvn@1001 327 check->as_Mach()->ideal_Opcode() == Op_LoadKlass)) {
kvn@1001 328 // Valid nodes
kvn@1001 329 } else {
kvn@1001 330 check->dump();
kvn@985 331 assert(false,"Bad base or derived pointer");
kvn@1001 332 }
kvn@985 333 } else {
kvn@985 334 assert(is_derived,"Bad base pointer");
kvn@985 335 assert(check->is_Mach() && check->as_Mach()->ideal_Opcode() == Op_AddP,"Bad derived pointer");
kvn@985 336 }
kvn@985 337 k++;
kvn@985 338 assert(k < 100000,"Derived pointer checking in infinite loop");
duke@435 339 } // End while
duke@435 340 }
duke@435 341 } // End of check for derived pointers
duke@435 342 } // End of Kcheck for debug info
duke@435 343 } // End of if found a safepoint
duke@435 344 } // End of forall instructions in block
duke@435 345 } // End of forall blocks
kvn@985 346 #endif
duke@435 347 }
kvn@1001 348
kvn@1001 349 // Verify that graphs and base pointers are still sane.
kvn@1001 350 void PhaseChaitin::verify( ResourceArea *a, bool verify_ifg ) const {
kvn@1001 351 #ifdef ASSERT
kvn@1001 352 if( VerifyOpto || VerifyRegisterAllocator ) {
kvn@1001 353 _cfg.verify();
kvn@1001 354 verify_base_ptrs(a);
kvn@1001 355 if(verify_ifg)
kvn@1001 356 _ifg->verify(this);
kvn@1001 357 }
duke@435 358 #endif
kvn@1001 359 }
kvn@1001 360
kvn@1001 361 #endif

mercurial