1.1 --- a/src/share/vm/opto/callnode.cpp Mon May 06 19:49:23 2013 -0700 1.2 +++ b/src/share/vm/opto/callnode.cpp Wed May 08 15:08:01 2013 -0700 1.3 @@ -523,7 +523,9 @@ 1.4 1.5 1.6 void JVMState::dump_on(outputStream* st) const { 1.7 - if (_map && !((uintptr_t)_map & 1)) { 1.8 + bool print_map = _map && !((uintptr_t)_map & 1) && 1.9 + ((caller() == NULL) || (caller()->map() != _map)); 1.10 + if (print_map) { 1.11 if (_map->len() > _map->req()) { // _map->has_exceptions() 1.12 Node* ex = _map->in(_map->req()); // _map->next_exception() 1.13 // skip the first one; it's already being printed 1.14 @@ -532,7 +534,10 @@ 1.15 ex->dump(1); 1.16 } 1.17 } 1.18 - _map->dump(2); 1.19 + _map->dump(Verbose ? 2 : 1); 1.20 + } 1.21 + if (caller() != NULL) { 1.22 + caller()->dump_on(st); 1.23 } 1.24 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 1.25 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 1.26 @@ -546,9 +551,6 @@ 1.27 _method->print_codes_on(bci(), bci()+1, st); 1.28 } 1.29 } 1.30 - if (caller() != NULL) { 1.31 - caller()->dump_on(st); 1.32 - } 1.33 } 1.34 1.35 // Extra way to dump a jvms from the debugger, 1.36 @@ -584,6 +586,15 @@ 1.37 return n; 1.38 } 1.39 1.40 +/** 1.41 + * Reset map for all callers 1.42 + */ 1.43 +void JVMState::set_map_deep(SafePointNode* map) { 1.44 + for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { 1.45 + p->set_map(map); 1.46 + } 1.47 +} 1.48 + 1.49 //============================================================================= 1.50 uint CallNode::cmp( const Node &n ) const 1.51 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 1.52 @@ -663,17 +674,49 @@ 1.53 // Determine whether the call could modify the field of the specified 1.54 // instance at the specified offset. 1.55 // 1.56 -bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { 1.57 - const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); 1.58 - 1.59 - // If not an OopPtr or not an instance type, assume the worst. 1.60 - // Note: currently this method is called only for instance types. 1.61 - if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { 1.62 - return true; 1.63 +bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 1.64 + assert((t_oop != NULL), "sanity"); 1.65 + if (t_oop->is_known_instance()) { 1.66 + // The instance_id is set only for scalar-replaceable allocations which 1.67 + // are not passed as arguments according to Escape Analysis. 1.68 + return false; 1.69 } 1.70 - // The instance_id is set only for scalar-replaceable allocations which 1.71 - // are not passed as arguments according to Escape Analysis. 1.72 - return false; 1.73 + if (t_oop->is_ptr_to_boxed_value()) { 1.74 + ciKlass* boxing_klass = t_oop->klass(); 1.75 + if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 1.76 + // Skip unrelated boxing methods. 1.77 + Node* proj = proj_out(TypeFunc::Parms); 1.78 + if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { 1.79 + return false; 1.80 + } 1.81 + } 1.82 + if (is_CallJava() && as_CallJava()->method() != NULL) { 1.83 + ciMethod* meth = as_CallJava()->method(); 1.84 + if (meth->is_accessor()) { 1.85 + return false; 1.86 + } 1.87 + // May modify (by reflection) if an boxing object is passed 1.88 + // as argument or returned. 1.89 + if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) { 1.90 + Node* proj = proj_out(TypeFunc::Parms); 1.91 + const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 1.92 + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 1.93 + (inst_t->klass() == boxing_klass))) { 1.94 + return true; 1.95 + } 1.96 + } 1.97 + const TypeTuple* d = tf()->domain(); 1.98 + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1.99 + const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 1.100 + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 1.101 + (inst_t->klass() == boxing_klass))) { 1.102 + return true; 1.103 + } 1.104 + } 1.105 + return false; 1.106 + } 1.107 + } 1.108 + return true; 1.109 } 1.110 1.111 // Does this call have a direct reference to n other than debug information? 1.112 @@ -1020,6 +1063,7 @@ 1.113 int scloff = jvms->scloff(); 1.114 int endoff = jvms->endoff(); 1.115 assert(endoff == (int)req(), "no other states or debug info after me"); 1.116 + assert(jvms->scl_size() == 0, "parsed code should not have scalar objects"); 1.117 Node* top = Compile::current()->top(); 1.118 for (uint i = 0; i < grow_by; i++) { 1.119 ins_req(monoff, top); 1.120 @@ -1035,6 +1079,7 @@ 1.121 const int MonitorEdges = 2; 1.122 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1.123 assert(req() == jvms()->endoff(), "correct sizing"); 1.124 + assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); 1.125 int nextmon = jvms()->scloff(); 1.126 if (GenerateSynchronizationCode) { 1.127 add_req(lock->box_node()); 1.128 @@ -1050,6 +1095,7 @@ 1.129 1.130 void SafePointNode::pop_monitor() { 1.131 // Delete last monitor from debug info 1.132 + assert((jvms()->scl_size() == 0), "parsed code should not have scalar objects"); 1.133 debug_only(int num_before_pop = jvms()->nof_monitors()); 1.134 const int MonitorEdges = (1<<JVMState::logMonitorEdges); 1.135 int scloff = jvms()->scloff(); 1.136 @@ -1154,6 +1200,7 @@ 1.137 init_class_id(Class_Allocate); 1.138 init_flags(Flag_is_macro); 1.139 _is_scalar_replaceable = false; 1.140 + _is_non_escaping = false; 1.141 Node *topnode = C->top(); 1.142 1.143 init_req( TypeFunc::Control , ctrl ); 1.144 @@ -1169,8 +1216,6 @@ 1.145 } 1.146 1.147 //============================================================================= 1.148 -uint AllocateArrayNode::size_of() const { return sizeof(*this); } 1.149 - 1.150 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.151 if (remove_dead_region(phase, can_reshape)) return this; 1.152 // Don't bother trying to transform a dead node 1.153 @@ -1235,6 +1280,8 @@ 1.154 // - the narrow_length is 0 1.155 // - the narrow_length is not wider than length 1.156 assert(narrow_length_type == TypeInt::ZERO || 1.157 + length_type->is_con() && narrow_length_type->is_con() && 1.158 + (narrow_length_type->_hi <= length_type->_lo) || 1.159 (narrow_length_type->_hi <= length_type->_hi && 1.160 narrow_length_type->_lo >= length_type->_lo), 1.161 "narrow type must be narrower than length type");