1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/opto/block.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,510 @@ 1.4 +/* 1.5 + * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +// Optimization - Graph Style 1.29 + 1.30 +class Block; 1.31 +class CFGLoop; 1.32 +class MachCallNode; 1.33 +class Matcher; 1.34 +class RootNode; 1.35 +class VectorSet; 1.36 +struct Tarjan; 1.37 + 1.38 +//------------------------------Block_Array------------------------------------ 1.39 +// Map dense integer indices to Blocks. Uses classic doubling-array trick. 1.40 +// Abstractly provides an infinite array of Block*'s, initialized to NULL. 1.41 +// Note that the constructor just zeros things, and since I use Arena 1.42 +// allocation I do not need a destructor to reclaim storage. 1.43 +class Block_Array : public ResourceObj { 1.44 + uint _size; // allocated size, as opposed to formal limit 1.45 + debug_only(uint _limit;) // limit to formal domain 1.46 +protected: 1.47 + Block **_blocks; 1.48 + void grow( uint i ); // Grow array node to fit 1.49 + 1.50 +public: 1.51 + Arena *_arena; // Arena to allocate in 1.52 + 1.53 + Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { 1.54 + debug_only(_limit=0); 1.55 + _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); 1.56 + for( int i = 0; i < OptoBlockListSize; i++ ) { 1.57 + _blocks[i] = NULL; 1.58 + } 1.59 + } 1.60 + Block *lookup( uint i ) const // Lookup, or NULL for not mapped 1.61 + { return (i<Max()) ? _blocks[i] : (Block*)NULL; } 1.62 + Block *operator[] ( uint i ) const // Lookup, or assert for not mapped 1.63 + { assert( i < Max(), "oob" ); return _blocks[i]; } 1.64 + // Extend the mapping: index i maps to Block *n. 1.65 + void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; } 1.66 + uint Max() const { debug_only(return _limit); return _size; } 1.67 +}; 1.68 + 1.69 + 1.70 +class Block_List : public Block_Array { 1.71 +public: 1.72 + uint _cnt; 1.73 + Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} 1.74 + void push( Block *b ) { map(_cnt++,b); } 1.75 + Block *pop() { return _blocks[--_cnt]; } 1.76 + Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} 1.77 + void remove( uint i ); 1.78 + void insert( uint i, Block *n ); 1.79 + uint size() const { return _cnt; } 1.80 + void reset() { _cnt = 0; } 1.81 +}; 1.82 + 1.83 + 1.84 +class CFGElement : public ResourceObj { 1.85 + public: 1.86 + float _freq; // Execution frequency (estimate) 1.87 + 1.88 + CFGElement() : _freq(0.0f) {} 1.89 + virtual bool is_block() { return false; } 1.90 + virtual bool is_loop() { return false; } 1.91 + Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; } 1.92 + CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; } 1.93 +}; 1.94 + 1.95 +//------------------------------Block------------------------------------------ 1.96 +// This class defines a Basic Block. 1.97 +// Basic blocks are used during the output routines, and are not used during 1.98 +// any optimization pass. They are created late in the game. 1.99 +class Block : public CFGElement { 1.100 + public: 1.101 + // Nodes in this block, in order 1.102 + Node_List _nodes; 1.103 + 1.104 + // Basic blocks have a Node which defines Control for all Nodes pinned in 1.105 + // this block. This Node is a RegionNode. Exception-causing Nodes 1.106 + // (division, subroutines) and Phi functions are always pinned. Later, 1.107 + // every Node will get pinned to some block. 1.108 + Node *head() const { return _nodes[0]; } 1.109 + 1.110 + // CAUTION: num_preds() is ONE based, so that predecessor numbers match 1.111 + // input edges to Regions and Phis. 1.112 + uint num_preds() const { return head()->req(); } 1.113 + Node *pred(uint i) const { return head()->in(i); } 1.114 + 1.115 + // Array of successor blocks, same size as projs array 1.116 + Block_Array _succs; 1.117 + 1.118 + // Basic blocks have some number of Nodes which split control to all 1.119 + // following blocks. These Nodes are always Projections. The field in 1.120 + // the Projection and the block-ending Node determine which Block follows. 1.121 + uint _num_succs; 1.122 + 1.123 + // Basic blocks also carry all sorts of good old fashioned DFS information 1.124 + // used to find loops, loop nesting depth, dominators, etc. 1.125 + uint _pre_order; // Pre-order DFS number 1.126 + 1.127 + // Dominator tree 1.128 + uint _dom_depth; // Depth in dominator tree for fast LCA 1.129 + Block* _idom; // Immediate dominator block 1.130 + 1.131 + CFGLoop *_loop; // Loop to which this block belongs 1.132 + uint _rpo; // Number in reverse post order walk 1.133 + 1.134 + virtual bool is_block() { return true; } 1.135 + float succ_prob(uint i); // return probability of i'th successor 1.136 + 1.137 + Block* dom_lca(Block* that); // Compute LCA in dominator tree. 1.138 +#ifdef ASSERT 1.139 + bool dominates(Block* that) { 1.140 + int dom_diff = this->_dom_depth - that->_dom_depth; 1.141 + if (dom_diff > 0) return false; 1.142 + for (; dom_diff < 0; dom_diff++) that = that->_idom; 1.143 + return this == that; 1.144 + } 1.145 +#endif 1.146 + 1.147 + // Report the alignment required by this block. Must be a power of 2. 1.148 + // The previous block will insert nops to get this alignment. 1.149 + uint code_alignment(); 1.150 + 1.151 + // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. 1.152 + // It is currently also used to scale such frequencies relative to 1.153 + // FreqCountInvocations relative to the old value of 1500. 1.154 +#define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations) 1.155 + 1.156 + // Register Pressure (estimate) for Splitting heuristic 1.157 + uint _reg_pressure; 1.158 + uint _ihrp_index; 1.159 + uint _freg_pressure; 1.160 + uint _fhrp_index; 1.161 + 1.162 + // Mark and visited bits for an LCA calculation in insert_anti_dependences. 1.163 + // Since they hold unique node indexes, they do not need reinitialization. 1.164 + node_idx_t _raise_LCA_mark; 1.165 + void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; } 1.166 + node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; } 1.167 + node_idx_t _raise_LCA_visited; 1.168 + void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; } 1.169 + node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; } 1.170 + 1.171 + // Estimated size in bytes of first instructions in a loop. 1.172 + uint _first_inst_size; 1.173 + uint first_inst_size() const { return _first_inst_size; } 1.174 + void set_first_inst_size(uint s) { _first_inst_size = s; } 1.175 + 1.176 + // Compute the size of first instructions in this block. 1.177 + uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra); 1.178 + 1.179 + // Compute alignment padding if the block needs it. 1.180 + // Align a loop if loop's padding is less or equal to padding limit 1.181 + // or the size of first instructions in the loop > padding. 1.182 + uint alignment_padding(int current_offset) { 1.183 + int block_alignment = code_alignment(); 1.184 + int max_pad = block_alignment-relocInfo::addr_unit(); 1.185 + if( max_pad > 0 ) { 1.186 + assert(is_power_of_2(max_pad+relocInfo::addr_unit()), ""); 1.187 + int current_alignment = current_offset & max_pad; 1.188 + if( current_alignment != 0 ) { 1.189 + uint padding = (block_alignment-current_alignment) & max_pad; 1.190 + if( !head()->is_Loop() || 1.191 + padding <= (uint)MaxLoopPad || 1.192 + first_inst_size() > padding ) { 1.193 + return padding; 1.194 + } 1.195 + } 1.196 + } 1.197 + return 0; 1.198 + } 1.199 + 1.200 + // Connector blocks. Connector blocks are basic blocks devoid of 1.201 + // instructions, but may have relevant non-instruction Nodes, such as 1.202 + // Phis or MergeMems. Such blocks are discovered and marked during the 1.203 + // RemoveEmpty phase, and elided during Output. 1.204 + bool _connector; 1.205 + void set_connector() { _connector = true; } 1.206 + bool is_connector() const { return _connector; }; 1.207 + 1.208 + // Create a new Block with given head Node. 1.209 + // Creates the (empty) predecessor arrays. 1.210 + Block( Arena *a, Node *headnode ) 1.211 + : CFGElement(), 1.212 + _nodes(a), 1.213 + _succs(a), 1.214 + _num_succs(0), 1.215 + _pre_order(0), 1.216 + _idom(0), 1.217 + _loop(NULL), 1.218 + _reg_pressure(0), 1.219 + _ihrp_index(1), 1.220 + _freg_pressure(0), 1.221 + _fhrp_index(1), 1.222 + _raise_LCA_mark(0), 1.223 + _raise_LCA_visited(0), 1.224 + _first_inst_size(999999), 1.225 + _connector(false) { 1.226 + _nodes.push(headnode); 1.227 + } 1.228 + 1.229 + // Index of 'end' Node 1.230 + uint end_idx() const { 1.231 + // %%%%% add a proj after every goto 1.232 + // so (last->is_block_proj() != last) always, then simplify this code 1.233 + // This will not give correct end_idx for block 0 when it only contains root. 1.234 + int last_idx = _nodes.size() - 1; 1.235 + Node *last = _nodes[last_idx]; 1.236 + assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], ""); 1.237 + return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs); 1.238 + } 1.239 + 1.240 + // Basic blocks have a Node which ends them. This Node determines which 1.241 + // basic block follows this one in the program flow. This Node is either an 1.242 + // IfNode, a GotoNode, a JmpNode, or a ReturnNode. 1.243 + Node *end() const { return _nodes[end_idx()]; } 1.244 + 1.245 + // Add an instruction to an existing block. It must go after the head 1.246 + // instruction and before the end instruction. 1.247 + void add_inst( Node *n ) { _nodes.insert(end_idx(),n); } 1.248 + // Find node in block 1.249 + uint find_node( const Node *n ) const; 1.250 + // Find and remove n from block list 1.251 + void find_remove( const Node *n ); 1.252 + 1.253 + // Schedule a call next in the block 1.254 + uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call); 1.255 + 1.256 + // Perform basic-block local scheduling 1.257 + Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot); 1.258 + void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); 1.259 + void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); 1.260 + bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call); 1.261 + // Cleanup if any code lands between a Call and his Catch 1.262 + void call_catch_cleanup(Block_Array &bbs); 1.263 + // Detect implicit-null-check opportunities. Basically, find NULL checks 1.264 + // with suitable memory ops nearby. Use the memory op to do the NULL check. 1.265 + // I can generate a memory op if there is not one nearby. 1.266 + void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); 1.267 + 1.268 + // Return the empty status of a block 1.269 + enum { not_empty, empty_with_goto, completely_empty }; 1.270 + int is_Empty() const; 1.271 + 1.272 + // Forward through connectors 1.273 + Block* non_connector() { 1.274 + Block* s = this; 1.275 + while (s->is_connector()) { 1.276 + s = s->_succs[0]; 1.277 + } 1.278 + return s; 1.279 + } 1.280 + 1.281 + // Successor block, after forwarding through connectors 1.282 + Block* non_connector_successor(int i) const { 1.283 + return _succs[i]->non_connector(); 1.284 + } 1.285 + 1.286 + // Examine block's code shape to predict if it is not commonly executed. 1.287 + bool has_uncommon_code() const; 1.288 + 1.289 + // Use frequency calculations and code shape to predict if the block 1.290 + // is uncommon. 1.291 + bool is_uncommon( Block_Array &bbs ) const; 1.292 + 1.293 +#ifndef PRODUCT 1.294 + // Debugging print of basic block 1.295 + void dump_bidx(const Block* orig) const; 1.296 + void dump_pred(const Block_Array *bbs, Block* orig) const; 1.297 + void dump_head( const Block_Array *bbs ) const; 1.298 + void dump( ) const; 1.299 + void dump( const Block_Array *bbs ) const; 1.300 +#endif 1.301 +}; 1.302 + 1.303 + 1.304 +//------------------------------PhaseCFG--------------------------------------- 1.305 +// Build an array of Basic Block pointers, one per Node. 1.306 +class PhaseCFG : public Phase { 1.307 + private: 1.308 + // Build a proper looking cfg. Return count of basic blocks 1.309 + uint build_cfg(); 1.310 + 1.311 + // Perform DFS search. 1.312 + // Setup 'vertex' as DFS to vertex mapping. 1.313 + // Setup 'semi' as vertex to DFS mapping. 1.314 + // Set 'parent' to DFS parent. 1.315 + uint DFS( Tarjan *tarjan ); 1.316 + 1.317 + // Helper function to insert a node into a block 1.318 + void schedule_node_into_block( Node *n, Block *b ); 1.319 + 1.320 + // Set the basic block for pinned Nodes 1.321 + void schedule_pinned_nodes( VectorSet &visited ); 1.322 + 1.323 + // I'll need a few machine-specific GotoNodes. Clone from this one. 1.324 + MachNode *_goto; 1.325 + void insert_goto_at(uint block_no, uint succ_no); 1.326 + 1.327 + Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); 1.328 + void verify_anti_dependences(Block* LCA, Node* load) { 1.329 + assert(LCA == _bbs[load->_idx], "should already be scheduled"); 1.330 + insert_anti_dependences(LCA, load, true); 1.331 + } 1.332 + 1.333 + public: 1.334 + PhaseCFG( Arena *a, RootNode *r, Matcher &m ); 1.335 + 1.336 + uint _num_blocks; // Count of basic blocks 1.337 + Block_List _blocks; // List of basic blocks 1.338 + RootNode *_root; // Root of whole program 1.339 + Block_Array _bbs; // Map Nodes to owning Basic Block 1.340 + Block *_broot; // Basic block of root 1.341 + uint _rpo_ctr; 1.342 + CFGLoop* _root_loop; 1.343 + 1.344 + // Per node latency estimation, valid only during GCM 1.345 + GrowableArray<uint> _node_latency; 1.346 + 1.347 +#ifndef PRODUCT 1.348 + bool _trace_opto_pipelining; // tracing flag 1.349 +#endif 1.350 + 1.351 + // Build dominators 1.352 + void Dominators(); 1.353 + 1.354 + // Estimate block frequencies based on IfNode probabilities 1.355 + void Estimate_Block_Frequency(); 1.356 + 1.357 + // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific 1.358 + // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. 1.359 + void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); 1.360 + 1.361 + // Compute the (backwards) latency of a node from the uses 1.362 + void latency_from_uses(Node *n); 1.363 + 1.364 + // Compute the (backwards) latency of a node from a single use 1.365 + int latency_from_use(Node *n, const Node *def, Node *use); 1.366 + 1.367 + // Compute the (backwards) latency of a node from the uses of this instruction 1.368 + void partial_latency_of_defs(Node *n); 1.369 + 1.370 + // Schedule Nodes early in their basic blocks. 1.371 + bool schedule_early(VectorSet &visited, Node_List &roots); 1.372 + 1.373 + // For each node, find the latest block it can be scheduled into 1.374 + // and then select the cheapest block between the latest and earliest 1.375 + // block to place the node. 1.376 + void schedule_late(VectorSet &visited, Node_List &stack); 1.377 + 1.378 + // Pick a block between early and late that is a cheaper alternative 1.379 + // to late. Helper for schedule_late. 1.380 + Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); 1.381 + 1.382 + // Compute the instruction global latency with a backwards walk 1.383 + void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack); 1.384 + 1.385 + // Remove empty basic blocks 1.386 + void RemoveEmpty(); 1.387 + bool MoveToNext(Block* bx, uint b_index); 1.388 + void MoveToEnd(Block* bx, uint b_index); 1.389 + 1.390 + // Check for NeverBranch at block end. This needs to become a GOTO to the 1.391 + // true target. NeverBranch are treated as a conditional branch that always 1.392 + // goes the same direction for most of the optimizer and are used to give a 1.393 + // fake exit path to infinite loops. At this late stage they need to turn 1.394 + // into Goto's so that when you enter the infinite loop you indeed hang. 1.395 + void convert_NeverBranch_to_Goto(Block *b); 1.396 + 1.397 + CFGLoop* create_loop_tree(); 1.398 + 1.399 + // Insert a node into a block, and update the _bbs 1.400 + void insert( Block *b, uint idx, Node *n ) { 1.401 + b->_nodes.insert( idx, n ); 1.402 + _bbs.map( n->_idx, b ); 1.403 + } 1.404 + 1.405 +#ifndef PRODUCT 1.406 + bool trace_opto_pipelining() const { return _trace_opto_pipelining; } 1.407 + 1.408 + // Debugging print of CFG 1.409 + void dump( ) const; // CFG only 1.410 + void _dump_cfg( const Node *end, VectorSet &visited ) const; 1.411 + void verify() const; 1.412 + void dump_headers(); 1.413 +#else 1.414 + bool trace_opto_pipelining() const { return false; } 1.415 +#endif 1.416 +}; 1.417 + 1.418 + 1.419 +//------------------------------UnionFindInfo---------------------------------- 1.420 +// Map Block indices to a block-index for a cfg-cover. 1.421 +// Array lookup in the optimized case. 1.422 +class UnionFind : public ResourceObj { 1.423 + uint _cnt, _max; 1.424 + uint* _indices; 1.425 + ReallocMark _nesting; // assertion check for reallocations 1.426 +public: 1.427 + UnionFind( uint max ); 1.428 + void reset( uint max ); // Reset to identity map for [0..max] 1.429 + 1.430 + uint lookup( uint nidx ) const { 1.431 + return _indices[nidx]; 1.432 + } 1.433 + uint operator[] (uint nidx) const { return lookup(nidx); } 1.434 + 1.435 + void map( uint from_idx, uint to_idx ) { 1.436 + assert( from_idx < _cnt, "oob" ); 1.437 + _indices[from_idx] = to_idx; 1.438 + } 1.439 + void extend( uint from_idx, uint to_idx ); 1.440 + 1.441 + uint Size() const { return _cnt; } 1.442 + 1.443 + uint Find( uint idx ) { 1.444 + assert( idx < 65536, "Must fit into uint"); 1.445 + uint uf_idx = lookup(idx); 1.446 + return (uf_idx == idx) ? uf_idx : Find_compress(idx); 1.447 + } 1.448 + uint Find_compress( uint idx ); 1.449 + uint Find_const( uint idx ) const; 1.450 + void Union( uint idx1, uint idx2 ); 1.451 + 1.452 +}; 1.453 + 1.454 +//----------------------------BlockProbPair--------------------------- 1.455 +// Ordered pair of Node*. 1.456 +class BlockProbPair VALUE_OBJ_CLASS_SPEC { 1.457 +protected: 1.458 + Block* _target; // block target 1.459 + float _prob; // probability of edge to block 1.460 +public: 1.461 + BlockProbPair() : _target(NULL), _prob(0.0) {} 1.462 + BlockProbPair(Block* b, float p) : _target(b), _prob(p) {} 1.463 + 1.464 + Block* get_target() const { return _target; } 1.465 + float get_prob() const { return _prob; } 1.466 +}; 1.467 + 1.468 +//------------------------------CFGLoop------------------------------------------- 1.469 +class CFGLoop : public CFGElement { 1.470 + int _id; 1.471 + int _depth; 1.472 + CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null 1.473 + CFGLoop *_sibling; // null terminated list 1.474 + CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops 1.475 + GrowableArray<CFGElement*> _members; // list of members of loop 1.476 + GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities 1.477 + float _exit_prob; // probability any loop exit is taken on a single loop iteration 1.478 + void update_succ_freq(Block* b, float freq); 1.479 + 1.480 + public: 1.481 + CFGLoop(int id) : 1.482 + CFGElement(), 1.483 + _id(id), 1.484 + _depth(0), 1.485 + _parent(NULL), 1.486 + _sibling(NULL), 1.487 + _child(NULL), 1.488 + _exit_prob(1.0f) {} 1.489 + CFGLoop* parent() { return _parent; } 1.490 + void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); 1.491 + void add_member(CFGElement *s) { _members.push(s); } 1.492 + void add_nested_loop(CFGLoop* cl); 1.493 + Block* head() { 1.494 + assert(_members.at(0)->is_block(), "head must be a block"); 1.495 + Block* hd = _members.at(0)->as_Block(); 1.496 + assert(hd->_loop == this, "just checking"); 1.497 + assert(hd->head()->is_Loop(), "must begin with loop head node"); 1.498 + return hd; 1.499 + } 1.500 + Block* backedge_block(); // Return the block on the backedge of the loop (else NULL) 1.501 + void compute_loop_depth(int depth); 1.502 + void compute_freq(); // compute frequency with loop assuming head freq 1.0f 1.503 + void scale_freq(); // scale frequency by loop trip count (including outer loops) 1.504 + bool in_loop_nest(Block* b); 1.505 + float trip_count() const { return 1.0f / _exit_prob; } 1.506 + virtual bool is_loop() { return true; } 1.507 + int id() { return _id; } 1.508 + 1.509 +#ifndef PRODUCT 1.510 + void dump( ) const; 1.511 + void dump_tree() const; 1.512 +#endif 1.513 +};