Fri, 23 Jan 2009 10:41:28 -0500
Merge
src/share/vm/gc_implementation/includeDB_gc_g1 | file | annotate | diff | comparison | revisions |
1.1 --- a/src/share/vm/classfile/classFileParser.cpp Wed Jan 21 11:14:19 2009 -0500 1.2 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Jan 23 10:41:28 2009 -0500 1.3 @@ -232,7 +232,9 @@ 1.4 length >= 1, "Illegal constant pool size %u in class file %s", 1.5 length, CHECK_(nullHandle)); 1.6 constantPoolOop constant_pool = 1.7 - oopFactory::new_constantPool(length, CHECK_(nullHandle)); 1.8 + oopFactory::new_constantPool(length, 1.9 + methodOopDesc::IsSafeConc, 1.10 + CHECK_(nullHandle)); 1.11 constantPoolHandle cp (THREAD, constant_pool); 1.12 1.13 cp->set_partially_loaded(); // Enables heap verify to work on partial constantPoolOops 1.14 @@ -1675,7 +1677,8 @@ 1.15 // All sizing information for a methodOop is finally available, now create it 1.16 methodOop m_oop = oopFactory::new_method( 1.17 code_length, access_flags, linenumber_table_length, 1.18 - total_lvt_length, checked_exceptions_length, CHECK_(nullHandle)); 1.19 + total_lvt_length, checked_exceptions_length, 1.20 + methodOopDesc::IsSafeConc, CHECK_(nullHandle)); 1.21 methodHandle m (THREAD, m_oop); 1.22 1.23 ClassLoadingService::add_class_method_size(m_oop->size()*HeapWordSize);
2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Wed Jan 21 11:14:19 2009 -0500 2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp Fri Jan 23 10:41:28 2009 -0500 2.3 @@ -706,6 +706,30 @@ 2.4 } 2.5 } 2.6 2.7 +// Apply the given closure to each live object in the space 2.8 +// The usage of CompactibleFreeListSpace 2.9 +// by the ConcurrentMarkSweepGeneration for concurrent GC's allows 2.10 +// objects in the space with references to objects that are no longer 2.11 +// valid. For example, an object may reference another object 2.12 +// that has already been sweep up (collected). This method uses 2.13 +// obj_is_alive() to determine whether it is safe to apply the closure to 2.14 +// an object. See obj_is_alive() for details on how liveness of an 2.15 +// object is decided. 2.16 + 2.17 +void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) { 2.18 + assert_lock_strong(freelistLock()); 2.19 + NOT_PRODUCT(verify_objects_initialized()); 2.20 + HeapWord *cur, *limit; 2.21 + size_t curSize; 2.22 + for (cur = bottom(), limit = end(); cur < limit; 2.23 + cur += curSize) { 2.24 + curSize = block_size(cur); 2.25 + if (block_is_obj(cur) && obj_is_alive(cur)) { 2.26 + blk->do_object(oop(cur)); 2.27 + } 2.28 + } 2.29 +} 2.30 + 2.31 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr, 2.32 UpwardsObjectClosure* cl) { 2.33 assert_locked(); 2.34 @@ -861,7 +885,9 @@ 2.35 } else { 2.36 // must read from what 'p' points to in each loop. 2.37 klassOop k = ((volatile oopDesc*)p)->klass_or_null(); 2.38 - if (k != NULL && ((oopDesc*)p)->is_parsable()) { 2.39 + if (k != NULL && 2.40 + ((oopDesc*)p)->is_parsable() && 2.41 + ((oopDesc*)p)->is_conc_safe()) { 2.42 assert(k->is_oop(), "Should really be klass oop."); 2.43 oop o = (oop)p; 2.44 assert(o->is_oop(), "Should be an oop");
3.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Wed Jan 21 11:14:19 2009 -0500 3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp Fri Jan 23 10:41:28 2009 -0500 3.3 @@ -481,6 +481,15 @@ 3.4 void oop_iterate(OopClosure* cl); 3.5 3.6 void object_iterate(ObjectClosure* blk); 3.7 + // Apply the closure to each object in the space whose references 3.8 + // point to objects in the heap. The usage of CompactibleFreeListSpace 3.9 + // by the ConcurrentMarkSweepGeneration for concurrent GC's allows 3.10 + // objects in the space with references to objects that are no longer 3.11 + // valid. For example, an object may reference another object 3.12 + // that has already been sweep up (collected). This method uses 3.13 + // obj_is_alive() to determine whether it is safe to iterate of 3.14 + // an object. 3.15 + void safe_object_iterate(ObjectClosure* blk); 3.16 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 3.17 3.18 // Requires that "mr" be entirely within the space.
4.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Wed Jan 21 11:14:19 2009 -0500 4.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Jan 23 10:41:28 2009 -0500 4.3 @@ -3018,6 +3018,16 @@ 4.4 } 4.5 4.6 void 4.7 +ConcurrentMarkSweepGeneration::safe_object_iterate(ObjectClosure* cl) { 4.8 + if (freelistLock()->owned_by_self()) { 4.9 + Generation::safe_object_iterate(cl); 4.10 + } else { 4.11 + MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag); 4.12 + Generation::safe_object_iterate(cl); 4.13 + } 4.14 +} 4.15 + 4.16 +void 4.17 ConcurrentMarkSweepGeneration::pre_adjust_pointers() { 4.18 } 4.19 4.20 @@ -6623,7 +6633,11 @@ 4.21 if (_bitMap->isMarked(addr)) { 4.22 // it's marked; is it potentially uninitialized? 4.23 if (p->klass_or_null() != NULL) { 4.24 - if (CMSPermGenPrecleaningEnabled && !p->is_parsable()) { 4.25 + // If is_conc_safe is false, the object may be undergoing 4.26 + // change by the VM outside a safepoint. Don't try to 4.27 + // scan it, but rather leave it for the remark phase. 4.28 + if (CMSPermGenPrecleaningEnabled && 4.29 + (!p->is_conc_safe() || !p->is_parsable())) { 4.30 // Signal precleaning to redirty the card since 4.31 // the klass pointer is already installed. 4.32 assert(size == 0, "Initial value"); 4.33 @@ -7001,7 +7015,6 @@ 4.34 _mut->clear_range(mr); 4.35 } 4.36 DEBUG_ONLY(}) 4.37 - 4.38 // Note: the finger doesn't advance while we drain 4.39 // the stack below. 4.40 PushOrMarkClosure pushOrMarkClosure(_collector, 4.41 @@ -8062,9 +8075,13 @@ 4.42 #ifdef DEBUG 4.43 if (oop(addr)->klass_or_null() != NULL && 4.44 ( !_collector->should_unload_classes() 4.45 - || oop(addr)->is_parsable())) { 4.46 + || (oop(addr)->is_parsable()) && 4.47 + oop(addr)->is_conc_safe())) { 4.48 // Ignore mark word because we are running concurrent with mutators 4.49 assert(oop(addr)->is_oop(true), "live block should be an oop"); 4.50 + // is_conc_safe is checked before performing this assertion 4.51 + // because an object that is not is_conc_safe may yet have 4.52 + // the return from size() correct. 4.53 assert(size == 4.54 CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size()), 4.55 "P-mark and computed size do not agree"); 4.56 @@ -8077,6 +8094,13 @@ 4.57 (!_collector->should_unload_classes() 4.58 || oop(addr)->is_parsable()), 4.59 "Should be an initialized object"); 4.60 + // Note that there are objects used during class redefinition 4.61 + // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite() 4.62 + // which are discarded with their is_conc_safe state still 4.63 + // false. These object may be floating garbage so may be 4.64 + // seen here. If they are floating garbage their size 4.65 + // should be attainable from their klass. Do not that 4.66 + // is_conc_safe() is true for oop(addr). 4.67 // Ignore mark word because we are running concurrent with mutators 4.68 assert(oop(addr)->is_oop(true), "live block should be an oop"); 4.69 // Verify that the bit map has no bits marked between
5.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Wed Jan 21 11:14:19 2009 -0500 5.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp Fri Jan 23 10:41:28 2009 -0500 5.3 @@ -1212,6 +1212,7 @@ 5.4 // More iteration support 5.5 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 5.6 virtual void oop_iterate(OopClosure* cl); 5.7 + virtual void safe_object_iterate(ObjectClosure* cl); 5.8 virtual void object_iterate(ObjectClosure* cl); 5.9 5.10 // Need to declare the full complement of closures, whether we'll
6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jan 21 11:14:19 2009 -0500 6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jan 23 10:41:28 2009 -0500 6.3 @@ -1285,7 +1285,9 @@ 6.4 _unclean_regions_coming(false), 6.5 _young_list(new YoungList(this)), 6.6 _gc_time_stamp(0), 6.7 - _surviving_young_words(NULL) 6.8 + _surviving_young_words(NULL), 6.9 + _in_cset_fast_test(NULL), 6.10 + _in_cset_fast_test_base(NULL) 6.11 { 6.12 _g1h = this; // To catch bugs. 6.13 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) { 6.14 @@ -2485,6 +2487,19 @@ 6.15 g1_policy()->record_collection_pause_start(start_time_sec, 6.16 start_used_bytes); 6.17 6.18 + guarantee(_in_cset_fast_test == NULL, "invariant"); 6.19 + guarantee(_in_cset_fast_test_base == NULL, "invariant"); 6.20 + _in_cset_fast_test_length = n_regions(); 6.21 + _in_cset_fast_test_base = 6.22 + NEW_C_HEAP_ARRAY(bool, _in_cset_fast_test_length); 6.23 + memset(_in_cset_fast_test_base, false, 6.24 + _in_cset_fast_test_length * sizeof(bool)); 6.25 + // We're biasing _in_cset_fast_test to avoid subtracting the 6.26 + // beginning of the heap every time we want to index; basically 6.27 + // it's the same with what we do with the card table. 6.28 + _in_cset_fast_test = _in_cset_fast_test_base - 6.29 + ((size_t) _g1_reserved.start() >> HeapRegion::LogOfHRGrainBytes); 6.30 + 6.31 #if SCAN_ONLY_VERBOSE 6.32 _young_list->print(); 6.33 #endif // SCAN_ONLY_VERBOSE 6.34 @@ -2553,6 +2568,12 @@ 6.35 free_collection_set(g1_policy()->collection_set()); 6.36 g1_policy()->clear_collection_set(); 6.37 6.38 + FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base); 6.39 + // this is more for peace of mind; we're nulling them here and 6.40 + // we're expecting them to be null at the beginning of the next GC 6.41 + _in_cset_fast_test = NULL; 6.42 + _in_cset_fast_test_base = NULL; 6.43 + 6.44 if (popular_region != NULL) { 6.45 // We have to wait until now, because we don't want the region to 6.46 // be rescheduled for pop-evac during RS update. 6.47 @@ -3560,6 +3581,9 @@ 6.48 size_t undo_waste() { return _undo_waste; } 6.49 6.50 void push_on_queue(oop* ref) { 6.51 + assert(ref != NULL, "invariant"); 6.52 + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), "invariant"); 6.53 + 6.54 if (!refs()->push(ref)) { 6.55 overflowed_refs()->push(ref); 6.56 IF_G1_DETAILED_STATS(note_overflow_push()); 6.57 @@ -3572,6 +3596,10 @@ 6.58 if (!refs()->pop_local(ref)) { 6.59 ref = NULL; 6.60 } else { 6.61 + assert(ref != NULL, "invariant"); 6.62 + assert(has_partial_array_mask(ref) || _g1h->obj_in_cs(*ref), 6.63 + "invariant"); 6.64 + 6.65 IF_G1_DETAILED_STATS(note_pop()); 6.66 } 6.67 } 6.68 @@ -3601,8 +3629,7 @@ 6.69 6.70 obj = alloc_buf->allocate(word_sz); 6.71 assert(obj != NULL, "buffer was definitely big enough..."); 6.72 - } 6.73 - else { 6.74 + } else { 6.75 obj = _g1h->par_allocate_during_gc(purpose, word_sz); 6.76 } 6.77 return obj; 6.78 @@ -3695,24 +3722,57 @@ 6.79 } 6.80 } 6.81 6.82 +private: 6.83 + void deal_with_reference(oop* ref_to_scan) { 6.84 + if (has_partial_array_mask(ref_to_scan)) { 6.85 + _partial_scan_cl->do_oop_nv(ref_to_scan); 6.86 + } else { 6.87 + // Note: we can use "raw" versions of "region_containing" because 6.88 + // "obj_to_scan" is definitely in the heap, and is not in a 6.89 + // humongous region. 6.90 + HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 6.91 + _evac_cl->set_region(r); 6.92 + _evac_cl->do_oop_nv(ref_to_scan); 6.93 + } 6.94 + } 6.95 + 6.96 +public: 6.97 void trim_queue() { 6.98 + // I've replicated the loop twice, first to drain the overflow 6.99 + // queue, second to drain the task queue. This is better than 6.100 + // having a single loop, which checks both conditions and, inside 6.101 + // it, either pops the overflow queue or the task queue, as each 6.102 + // loop is tighter. Also, the decision to drain the overflow queue 6.103 + // first is not arbitrary, as the overflow queue is not visible 6.104 + // to the other workers, whereas the task queue is. So, we want to 6.105 + // drain the "invisible" entries first, while allowing the other 6.106 + // workers to potentially steal the "visible" entries. 6.107 + 6.108 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) { 6.109 - oop *ref_to_scan = NULL; 6.110 - if (overflowed_refs_to_scan() == 0) { 6.111 + while (overflowed_refs_to_scan() > 0) { 6.112 + oop *ref_to_scan = NULL; 6.113 + pop_from_overflow_queue(ref_to_scan); 6.114 + assert(ref_to_scan != NULL, "invariant"); 6.115 + // We shouldn't have pushed it on the queue if it was not 6.116 + // pointing into the CSet. 6.117 + assert(ref_to_scan != NULL, "sanity"); 6.118 + assert(has_partial_array_mask(ref_to_scan) || 6.119 + _g1h->obj_in_cs(*ref_to_scan), "sanity"); 6.120 + 6.121 + deal_with_reference(ref_to_scan); 6.122 + } 6.123 + 6.124 + while (refs_to_scan() > 0) { 6.125 + oop *ref_to_scan = NULL; 6.126 pop_from_queue(ref_to_scan); 6.127 - } else { 6.128 - pop_from_overflow_queue(ref_to_scan); 6.129 - } 6.130 - if (ref_to_scan != NULL) { 6.131 - if ((intptr_t)ref_to_scan & G1_PARTIAL_ARRAY_MASK) { 6.132 - _partial_scan_cl->do_oop_nv(ref_to_scan); 6.133 - } else { 6.134 - // Note: we can use "raw" versions of "region_containing" because 6.135 - // "obj_to_scan" is definitely in the heap, and is not in a 6.136 - // humongous region. 6.137 - HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 6.138 - _evac_cl->set_region(r); 6.139 - _evac_cl->do_oop_nv(ref_to_scan); 6.140 + 6.141 + if (ref_to_scan != NULL) { 6.142 + // We shouldn't have pushed it on the queue if it was not 6.143 + // pointing into the CSet. 6.144 + assert(has_partial_array_mask(ref_to_scan) || 6.145 + _g1h->obj_in_cs(*ref_to_scan), "sanity"); 6.146 + 6.147 + deal_with_reference(ref_to_scan); 6.148 } 6.149 } 6.150 } 6.151 @@ -3728,16 +3788,25 @@ 6.152 // Should probably be made inline and moved in g1OopClosures.inline.hpp. 6.153 void G1ParScanClosure::do_oop_nv(oop* p) { 6.154 oop obj = *p; 6.155 + 6.156 if (obj != NULL) { 6.157 - if (_g1->obj_in_cs(obj)) { 6.158 - if (obj->is_forwarded()) { 6.159 - *p = obj->forwardee(); 6.160 - } else { 6.161 - _par_scan_state->push_on_queue(p); 6.162 - return; 6.163 - } 6.164 + if (_g1->in_cset_fast_test(obj)) { 6.165 + // We're not going to even bother checking whether the object is 6.166 + // already forwarded or not, as this usually causes an immediate 6.167 + // stall. We'll try to prefetch the object (for write, given that 6.168 + // we might need to install the forwarding reference) and we'll 6.169 + // get back to it when pop it from the queue 6.170 + Prefetch::write(obj->mark_addr(), 0); 6.171 + Prefetch::read(obj->mark_addr(), (HeapWordSize*2)); 6.172 + 6.173 + // slightly paranoid test; I'm trying to catch potential 6.174 + // problems before we go into push_on_queue to know where the 6.175 + // problem is coming from 6.176 + assert(obj == *p, "the value of *p should not have changed"); 6.177 + _par_scan_state->push_on_queue(p); 6.178 + } else { 6.179 + _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 6.180 } 6.181 - _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 6.182 } 6.183 } 6.184 6.185 @@ -3777,13 +3846,36 @@ 6.186 return _g1->handle_evacuation_failure_par(cl, old); 6.187 } 6.188 6.189 + // We're going to allocate linearly, so might as well prefetch ahead. 6.190 + Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 6.191 + 6.192 oop forward_ptr = old->forward_to_atomic(obj); 6.193 if (forward_ptr == NULL) { 6.194 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 6.195 + if (g1p->track_object_age(alloc_purpose)) { 6.196 + // We could simply do obj->incr_age(). However, this causes a 6.197 + // performance issue. obj->incr_age() will first check whether 6.198 + // the object has a displaced mark by checking its mark word; 6.199 + // getting the mark word from the new location of the object 6.200 + // stalls. So, given that we already have the mark word and we 6.201 + // are about to install it anyway, it's better to increase the 6.202 + // age on the mark word, when the object does not have a 6.203 + // displaced mark word. We're not expecting many objects to have 6.204 + // a displaced marked word, so that case is not optimized 6.205 + // further (it could be...) and we simply call obj->incr_age(). 6.206 + 6.207 + if (m->has_displaced_mark_helper()) { 6.208 + // in this case, we have to install the mark word first, 6.209 + // otherwise obj looks to be forwarded (the old mark word, 6.210 + // which contains the forward pointer, was copied) 6.211 + obj->set_mark(m); 6.212 + obj->incr_age(); 6.213 + } else { 6.214 + m = m->incr_age(); 6.215 + } 6.216 + } 6.217 obj->set_mark(m); 6.218 - if (g1p->track_object_age(alloc_purpose)) { 6.219 - obj->incr_age(); 6.220 - } 6.221 + 6.222 // preserve "next" mark bit 6.223 if (_g1->mark_in_progress() && !_g1->is_obj_ill(old)) { 6.224 if (!use_local_bitmaps || 6.225 @@ -3805,9 +3897,11 @@ 6.226 6.227 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 6.228 arrayOop(old)->set_length(0); 6.229 - _par_scan_state->push_on_queue((oop*) ((intptr_t)old | G1_PARTIAL_ARRAY_MASK)); 6.230 + _par_scan_state->push_on_queue(set_partial_array_mask(old)); 6.231 } else { 6.232 - _scanner->set_region(_g1->heap_region_containing(obj)); 6.233 + // No point in using the slower heap_region_containing() method, 6.234 + // given that we know obj is in the heap. 6.235 + _scanner->set_region(_g1->heap_region_containing_raw(obj)); 6.236 obj->oop_iterate_backwards(_scanner); 6.237 } 6.238 } else { 6.239 @@ -3817,47 +3911,55 @@ 6.240 return obj; 6.241 } 6.242 6.243 -template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 6.244 -void G1ParCopyClosure<do_gen_barrier, barrier, do_mark_forwardee>::do_oop_work(oop* p) { 6.245 +template<bool do_gen_barrier, G1Barrier barrier, 6.246 + bool do_mark_forwardee, bool skip_cset_test> 6.247 +void G1ParCopyClosure<do_gen_barrier, barrier, 6.248 + do_mark_forwardee, skip_cset_test>::do_oop_work(oop* p) { 6.249 oop obj = *p; 6.250 assert(barrier != G1BarrierRS || obj != NULL, 6.251 "Precondition: G1BarrierRS implies obj is nonNull"); 6.252 6.253 - if (obj != NULL) { 6.254 - if (_g1->obj_in_cs(obj)) { 6.255 + // The only time we skip the cset test is when we're scanning 6.256 + // references popped from the queue. And we only push on the queue 6.257 + // references that we know point into the cset, so no point in 6.258 + // checking again. But we'll leave an assert here for peace of mind. 6.259 + assert(!skip_cset_test || _g1->obj_in_cs(obj), "invariant"); 6.260 + 6.261 + // here the null check is implicit in the cset_fast_test() test 6.262 + if (skip_cset_test || _g1->in_cset_fast_test(obj)) { 6.263 #if G1_REM_SET_LOGGING 6.264 - gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" into CS.", 6.265 - p, (void*) obj); 6.266 + gclog_or_tty->print_cr("Loc "PTR_FORMAT" contains pointer "PTR_FORMAT" " 6.267 + "into CS.", p, (void*) obj); 6.268 #endif 6.269 - if (obj->is_forwarded()) { 6.270 - *p = obj->forwardee(); 6.271 - } else { 6.272 - *p = copy_to_survivor_space(obj); 6.273 - } 6.274 - // When scanning the RS, we only care about objs in CS. 6.275 - if (barrier == G1BarrierRS) { 6.276 - _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 6.277 - } 6.278 + if (obj->is_forwarded()) { 6.279 + *p = obj->forwardee(); 6.280 + } else { 6.281 + *p = copy_to_survivor_space(obj); 6.282 } 6.283 - // When scanning moved objs, must look at all oops. 6.284 - if (barrier == G1BarrierEvac) { 6.285 + // When scanning the RS, we only care about objs in CS. 6.286 + if (barrier == G1BarrierRS) { 6.287 _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 6.288 } 6.289 - 6.290 - if (do_gen_barrier) { 6.291 - par_do_barrier(p); 6.292 - } 6.293 - } 6.294 -} 6.295 - 6.296 -template void G1ParCopyClosure<false, G1BarrierEvac, false>::do_oop_work(oop* p); 6.297 - 6.298 -template <class T> void G1ParScanPartialArrayClosure::process_array_chunk( 6.299 + } 6.300 + 6.301 + // When scanning moved objs, must look at all oops. 6.302 + if (barrier == G1BarrierEvac && obj != NULL) { 6.303 + _g1_rem->par_write_ref(_from, p, _par_scan_state->queue_num()); 6.304 + } 6.305 + 6.306 + if (do_gen_barrier && obj != NULL) { 6.307 + par_do_barrier(p); 6.308 + } 6.309 +} 6.310 + 6.311 +template void G1ParCopyClosure<false, G1BarrierEvac, false, true>::do_oop_work(oop* p); 6.312 + 6.313 +template<class T> void G1ParScanPartialArrayClosure::process_array_chunk( 6.314 oop obj, int start, int end) { 6.315 // process our set of indices (include header in first chunk) 6.316 assert(start < end, "invariant"); 6.317 T* const base = (T*)objArrayOop(obj)->base(); 6.318 - T* const start_addr = base + start; 6.319 + T* const start_addr = (start == 0) ? (T*) obj : base + start; 6.320 T* const end_addr = base + end; 6.321 MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); 6.322 _scanner.set_region(_g1->heap_region_containing(obj)); 6.323 @@ -3866,7 +3968,8 @@ 6.324 6.325 void G1ParScanPartialArrayClosure::do_oop_nv(oop* p) { 6.326 assert(!UseCompressedOops, "Needs to be fixed to work with compressed oops"); 6.327 - oop old = oop((intptr_t)p & ~G1_PARTIAL_ARRAY_MASK); 6.328 + assert(has_partial_array_mask(p), "invariant"); 6.329 + oop old = clear_partial_array_mask(p); 6.330 assert(old->is_objArray(), "must be obj array"); 6.331 assert(old->is_forwarded(), "must be forwarded"); 6.332 assert(Universe::heap()->is_in_reserved(old), "must be in heap."); 6.333 @@ -3884,7 +3987,7 @@ 6.334 end = start + ParGCArrayScanChunk; 6.335 arrayOop(old)->set_length(end); 6.336 // Push remainder. 6.337 - _par_scan_state->push_on_queue((oop*) ((intptr_t) old | G1_PARTIAL_ARRAY_MASK)); 6.338 + _par_scan_state->push_on_queue(set_partial_array_mask(old)); 6.339 } else { 6.340 // Restore length so that the heap remains parsable in 6.341 // case of evacuation failure. 6.342 @@ -3893,11 +3996,6 @@ 6.343 6.344 // process our set of indices (include header in first chunk) 6.345 process_array_chunk<oop>(obj, start, end); 6.346 - oop* start_addr = start == 0 ? (oop*)obj : obj->obj_at_addr<oop>(start); 6.347 - oop* end_addr = (oop*)(obj->base()) + end; // obj_at_addr(end) asserts end < length 6.348 - MemRegion mr((HeapWord*)start_addr, (HeapWord*)end_addr); 6.349 - _scanner.set_region(_g1->heap_region_containing(obj)); 6.350 - obj->oop_iterate(&_scanner, mr); 6.351 } 6.352 6.353 int G1ScanAndBalanceClosure::_nq = 0; 6.354 @@ -3931,6 +4029,13 @@ 6.355 pss->hash_seed(), 6.356 ref_to_scan)) { 6.357 IF_G1_DETAILED_STATS(pss->note_steal()); 6.358 + 6.359 + // slightly paranoid tests; I'm trying to catch potential 6.360 + // problems before we go into push_on_queue to know where the 6.361 + // problem is coming from 6.362 + assert(ref_to_scan != NULL, "invariant"); 6.363 + assert(has_partial_array_mask(ref_to_scan) || 6.364 + _g1h->obj_in_cs(*ref_to_scan), "invariant"); 6.365 pss->push_on_queue(ref_to_scan); 6.366 continue; 6.367 } 6.368 @@ -3976,10 +4081,10 @@ 6.369 ResourceMark rm; 6.370 HandleMark hm; 6.371 6.372 - G1ParScanThreadState pss(_g1h, i); 6.373 - G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 6.374 - G1ParScanHeapEvacClosure evac_failure_cl(_g1h, &pss); 6.375 - G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 6.376 + G1ParScanThreadState pss(_g1h, i); 6.377 + G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss); 6.378 + G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss); 6.379 + G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss); 6.380 6.381 pss.set_evac_closure(&scan_evac_cl); 6.382 pss.set_evac_failure_closure(&evac_failure_cl);
7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jan 21 11:14:19 2009 -0500 7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Jan 23 10:41:28 2009 -0500 7.3 @@ -247,6 +247,27 @@ 7.4 NumberSeq _pop_obj_rc_at_copy; 7.5 void print_popularity_summary_info() const; 7.6 7.7 + // This is used for a quick test on whether a reference points into 7.8 + // the collection set or not. Basically, we have an array, with one 7.9 + // byte per region, and that byte denotes whether the corresponding 7.10 + // region is in the collection set or not. The entry corresponding 7.11 + // the bottom of the heap, i.e., region 0, is pointed to by 7.12 + // _in_cset_fast_test_base. The _in_cset_fast_test field has been 7.13 + // biased so that it actually points to address 0 of the address 7.14 + // space, to make the test as fast as possible (we can simply shift 7.15 + // the address to address into it, instead of having to subtract the 7.16 + // bottom of the heap from the address before shifting it; basically 7.17 + // it works in the same way the card table works). 7.18 + bool* _in_cset_fast_test; 7.19 + 7.20 + // The allocated array used for the fast test on whether a reference 7.21 + // points into the collection set or not. This field is also used to 7.22 + // free the array. 7.23 + bool* _in_cset_fast_test_base; 7.24 + 7.25 + // The length of the _in_cset_fast_test_base array. 7.26 + size_t _in_cset_fast_test_length; 7.27 + 7.28 volatile unsigned _gc_time_stamp; 7.29 7.30 size_t* _surviving_young_words; 7.31 @@ -368,6 +389,38 @@ 7.32 virtual void gc_prologue(bool full); 7.33 virtual void gc_epilogue(bool full); 7.34 7.35 + // We register a region with the fast "in collection set" test. We 7.36 + // simply set to true the array slot corresponding to this region. 7.37 + void register_region_with_in_cset_fast_test(HeapRegion* r) { 7.38 + assert(_in_cset_fast_test_base != NULL, "sanity"); 7.39 + assert(r->in_collection_set(), "invariant"); 7.40 + int index = r->hrs_index(); 7.41 + assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length, 7.42 + "invariant"); 7.43 + assert(!_in_cset_fast_test_base[index], "invariant"); 7.44 + _in_cset_fast_test_base[index] = true; 7.45 + } 7.46 + 7.47 + // This is a fast test on whether a reference points into the 7.48 + // collection set or not. It does not assume that the reference 7.49 + // points into the heap; if it doesn't, it will return false. 7.50 + bool in_cset_fast_test(oop obj) { 7.51 + assert(_in_cset_fast_test != NULL, "sanity"); 7.52 + if (_g1_committed.contains((HeapWord*) obj)) { 7.53 + // no need to subtract the bottom of the heap from obj, 7.54 + // _in_cset_fast_test is biased 7.55 + size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; 7.56 + bool ret = _in_cset_fast_test[index]; 7.57 + // let's make sure the result is consistent with what the slower 7.58 + // test returns 7.59 + assert( ret || !obj_in_cs(obj), "sanity"); 7.60 + assert(!ret || obj_in_cs(obj), "sanity"); 7.61 + return ret; 7.62 + } else { 7.63 + return false; 7.64 + } 7.65 + } 7.66 + 7.67 protected: 7.68 7.69 // Shrink the garbage-first heap by at most the given size (in bytes!). 7.70 @@ -850,6 +903,7 @@ 7.71 7.72 // Iterate over all objects, calling "cl.do_object" on each. 7.73 virtual void object_iterate(ObjectClosure* cl); 7.74 + virtual void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 7.75 7.76 // Iterate over all objects allocated since the last collection, calling 7.77 // "cl.do_object" on each. The heap must have been initialized properly
8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jan 21 11:14:19 2009 -0500 8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Jan 23 10:41:28 2009 -0500 8.3 @@ -36,8 +36,11 @@ 8.4 8.5 inline HeapRegion* 8.6 G1CollectedHeap::heap_region_containing_raw(const void* addr) const { 8.7 - HeapRegion* res = _hrs->addr_to_region(addr); 8.8 - assert(res != NULL, "addr outside of heap?"); 8.9 + assert(_g1_reserved.contains(addr), "invariant"); 8.10 + size_t index = ((intptr_t) addr - (intptr_t) _g1_reserved.start()) 8.11 + >> HeapRegion::LogOfHRGrainBytes; 8.12 + HeapRegion* res = _hrs->at(index); 8.13 + assert(res == _hrs->addr_to_region(addr), "sanity"); 8.14 return res; 8.15 } 8.16
9.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jan 21 11:14:19 2009 -0500 9.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Jan 23 10:41:28 2009 -0500 9.3 @@ -2985,6 +2985,7 @@ 9.4 _collection_set = hr; 9.5 _collection_set_size++; 9.6 _collection_set_bytes_used_before += hr->used(); 9.7 + _g1->register_region_with_in_cset_fast_test(hr); 9.8 } 9.9 9.10 void
10.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Wed Jan 21 11:14:19 2009 -0500 10.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp Fri Jan 23 10:41:28 2009 -0500 10.3 @@ -77,6 +77,18 @@ 10.4 10.5 #define G1_PARTIAL_ARRAY_MASK 1 10.6 10.7 +inline bool has_partial_array_mask(oop* ref) { 10.8 + return (intptr_t) ref & G1_PARTIAL_ARRAY_MASK; 10.9 +} 10.10 + 10.11 +inline oop* set_partial_array_mask(oop obj) { 10.12 + return (oop*) ((intptr_t) obj | G1_PARTIAL_ARRAY_MASK); 10.13 +} 10.14 + 10.15 +inline oop clear_partial_array_mask(oop* ref) { 10.16 + return oop((intptr_t) ref & ~G1_PARTIAL_ARRAY_MASK); 10.17 +} 10.18 + 10.19 class G1ParScanPartialArrayClosure : public G1ParClosureSuper { 10.20 G1ParScanClosure _scanner; 10.21 template <class T> void process_array_chunk(oop obj, int start, int end); 10.22 @@ -101,7 +113,8 @@ 10.23 G1ParClosureSuper(g1, par_scan_state), _scanner(scanner) { } 10.24 }; 10.25 10.26 -template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 10.27 +template<bool do_gen_barrier, G1Barrier barrier, 10.28 + bool do_mark_forwardee, bool skip_cset_test> 10.29 class G1ParCopyClosure : public G1ParCopyHelper { 10.30 G1ParScanClosure _scanner; 10.31 void do_oop_work(oop* p); 10.32 @@ -119,14 +132,22 @@ 10.33 virtual void do_oop(narrowOop* p) { do_oop_nv(p); } 10.34 }; 10.35 10.36 -typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure; 10.37 -typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure; 10.38 -typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure; 10.39 -typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure; 10.40 -typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure; 10.41 -typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure; 10.42 -typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure; 10.43 - 10.44 +typedef G1ParCopyClosure<false, G1BarrierNone, false, false> G1ParScanExtRootClosure; 10.45 +typedef G1ParCopyClosure<true, G1BarrierNone, false, false> G1ParScanPermClosure; 10.46 +typedef G1ParCopyClosure<false, G1BarrierNone, true, false> G1ParScanAndMarkExtRootClosure; 10.47 +typedef G1ParCopyClosure<true, G1BarrierNone, true, false> G1ParScanAndMarkPermClosure; 10.48 +typedef G1ParCopyClosure<false, G1BarrierRS, false, false> G1ParScanHeapRSClosure; 10.49 +typedef G1ParCopyClosure<false, G1BarrierRS, true, false> G1ParScanAndMarkHeapRSClosure; 10.50 +// This is the only case when we set skip_cset_test. Basically, this 10.51 +// closure is (should?) only be called directly while we're draining 10.52 +// the overflow and task queues. In that case we know that the 10.53 +// reference in question points into the collection set, otherwise we 10.54 +// would not have pushed it on the queue. 10.55 +typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure; 10.56 +// We need a separate closure to handle references during evacuation 10.57 +// failure processing, as it cannot asume that the reference already 10.58 + // points to the collection set (like G1ParScanHeapEvacClosure does). 10.59 +typedef G1ParCopyClosure<false, G1BarrierEvac, false, false> G1ParScanHeapEvacFailureClosure; 10.60 10.61 class FilterIntoCSClosure: public OopClosure { 10.62 G1CollectedHeap* _g1;
11.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Wed Jan 21 11:14:19 2009 -0500 11.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Fri Jan 23 10:41:28 2009 -0500 11.3 @@ -28,7 +28,7 @@ 11.4 11.5 #define G1_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw) \ 11.6 \ 11.7 - product(intx, ParallelGCG1AllocBufferSize, 4*K, \ 11.8 + product(intx, ParallelGCG1AllocBufferSize, 8*K, \ 11.9 "Size of parallel G1 allocation buffers in to-space.") \ 11.10 \ 11.11 product(intx, G1TimeSliceMS, 500, \
12.1 --- a/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Wed Jan 21 11:14:19 2009 -0500 12.2 +++ b/src/share/vm/gc_implementation/g1/g1_specialized_oop_closures.hpp Fri Jan 23 10:41:28 2009 -0500 12.3 @@ -32,11 +32,13 @@ 12.4 G1BarrierNone, G1BarrierRS, G1BarrierEvac 12.5 }; 12.6 12.7 -template<bool do_gen_barrier, G1Barrier barrier, bool do_mark_forwardee> 12.8 +template<bool do_gen_barrier, G1Barrier barrier, 12.9 + bool do_mark_forwardee, bool skip_cset_test> 12.10 class G1ParCopyClosure; 12.11 class G1ParScanClosure; 12.12 12.13 -typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure; 12.14 +typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> 12.15 + G1ParScanHeapEvacClosure; 12.16 12.17 class FilterIntoCSClosure; 12.18 class FilterOutOfRegionClosure;
13.1 --- a/src/share/vm/gc_implementation/includeDB_gc_g1 Wed Jan 21 11:14:19 2009 -0500 13.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_g1 Fri Jan 23 10:41:28 2009 -0500 13.3 @@ -31,7 +31,7 @@ 13.4 cardTableRS.cpp concurrentMark.hpp 13.5 cardTableRS.cpp g1SATBCardTableModRefBS.hpp 13.6 13.7 -collectionSetChooser.cpp g1CollectedHeap.hpp 13.8 +collectionSetChooser.cpp g1CollectedHeap.inline.hpp 13.9 collectionSetChooser.cpp g1CollectorPolicy.hpp 13.10 collectionSetChooser.cpp collectionSetChooser.hpp 13.11 collectionSetChooser.cpp space.inline.hpp 13.12 @@ -43,7 +43,7 @@ 13.13 concurrentG1Refine.cpp concurrentG1Refine.hpp 13.14 concurrentG1Refine.cpp concurrentG1RefineThread.hpp 13.15 concurrentG1Refine.cpp copy.hpp 13.16 -concurrentG1Refine.cpp g1CollectedHeap.hpp 13.17 +concurrentG1Refine.cpp g1CollectedHeap.inline.hpp 13.18 concurrentG1Refine.cpp g1RemSet.hpp 13.19 concurrentG1Refine.cpp space.inline.hpp 13.20 13.21 @@ -51,7 +51,7 @@ 13.22 13.23 concurrentG1RefineThread.cpp concurrentG1Refine.hpp 13.24 concurrentG1RefineThread.cpp concurrentG1RefineThread.hpp 13.25 -concurrentG1RefineThread.cpp g1CollectedHeap.hpp 13.26 +concurrentG1RefineThread.cpp g1CollectedHeap.inline.hpp 13.27 concurrentG1RefineThread.cpp g1CollectorPolicy.hpp 13.28 concurrentG1RefineThread.cpp handles.inline.hpp 13.29 concurrentG1RefineThread.cpp mutexLocker.hpp 13.30 @@ -168,7 +168,7 @@ 13.31 g1CollectorPolicy.cpp concurrentMarkThread.inline.hpp 13.32 g1CollectorPolicy.cpp debug.hpp 13.33 g1CollectorPolicy.cpp java.hpp 13.34 -g1CollectorPolicy.cpp g1CollectedHeap.hpp 13.35 +g1CollectorPolicy.cpp g1CollectedHeap.inline.hpp 13.36 g1CollectorPolicy.cpp g1CollectorPolicy.hpp 13.37 g1CollectorPolicy.cpp heapRegionRemSet.hpp 13.38 g1CollectorPolicy.cpp mutexLocker.hpp 13.39 @@ -189,7 +189,7 @@ 13.40 g1MarkSweep.cpp codeCache.hpp 13.41 g1MarkSweep.cpp events.hpp 13.42 g1MarkSweep.cpp fprofiler.hpp 13.43 -g1MarkSweep.hpp g1CollectedHeap.hpp 13.44 +g1MarkSweep.hpp g1CollectedHeap.inline.hpp 13.45 g1MarkSweep.cpp g1MarkSweep.hpp 13.46 g1MarkSweep.cpp gcLocker.hpp 13.47 g1MarkSweep.cpp genCollectedHeap.hpp 13.48 @@ -285,7 +285,7 @@ 13.49 heapRegionRemSet.cpp space.inline.hpp 13.50 13.51 heapRegionSeq.cpp allocation.hpp 13.52 -heapRegionSeq.cpp g1CollectedHeap.hpp 13.53 +heapRegionSeq.cpp g1CollectedHeap.inline.hpp 13.54 heapRegionSeq.cpp heapRegionSeq.hpp 13.55 13.56 heapRegionSeq.hpp growableArray.hpp 13.57 @@ -336,18 +336,18 @@ 13.58 survRateGroup.hpp numberSeq.hpp 13.59 13.60 survRateGroup.cpp allocation.hpp 13.61 -survRateGroup.cpp g1CollectedHeap.hpp 13.62 +survRateGroup.cpp g1CollectedHeap.inline.hpp 13.63 survRateGroup.cpp g1CollectorPolicy.hpp 13.64 survRateGroup.cpp heapRegion.hpp 13.65 survRateGroup.cpp survRateGroup.hpp 13.66 13.67 thread.cpp concurrentMarkThread.inline.hpp 13.68 13.69 -universe.cpp g1CollectedHeap.hpp 13.70 +universe.cpp g1CollectedHeap.inline.hpp 13.71 universe.cpp g1CollectorPolicy.hpp 13.72 13.73 vm_operations_g1.hpp vmGCOperations.hpp 13.74 13.75 vm_operations_g1.cpp vm_operations_g1.hpp 13.76 -vm_operations_g1.cpp g1CollectedHeap.hpp 13.77 +vm_operations_g1.cpp g1CollectedHeap.inline.hpp 13.78 vm_operations_g1.cpp isGCActiveMark.hpp
14.1 --- a/src/share/vm/gc_implementation/includeDB_gc_shared Wed Jan 21 11:14:19 2009 -0500 14.2 +++ b/src/share/vm/gc_implementation/includeDB_gc_shared Fri Jan 23 10:41:28 2009 -0500 14.3 @@ -100,4 +100,4 @@ 14.4 spaceCounters.hpp perfData.hpp 14.5 spaceCounters.hpp generationCounters.hpp 14.6 14.7 -vmGCOperations.cpp g1CollectedHeap.hpp 14.8 +vmGCOperations.cpp g1CollectedHeap.inline.hpp
15.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Wed Jan 21 11:14:19 2009 -0500 15.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp Fri Jan 23 10:41:28 2009 -0500 15.3 @@ -200,6 +200,7 @@ 15.4 15.5 void oop_iterate(OopClosure* cl); 15.6 void object_iterate(ObjectClosure* cl); 15.7 + void safe_object_iterate(ObjectClosure* cl) { object_iterate(cl); } 15.8 void permanent_oop_iterate(OopClosure* cl); 15.9 void permanent_object_iterate(ObjectClosure* cl); 15.10
16.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp Wed Jan 21 11:14:19 2009 -0500 16.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Fri Jan 23 10:41:28 2009 -0500 16.3 @@ -466,6 +466,10 @@ 16.4 // This includes objects in permanent memory. 16.5 virtual void object_iterate(ObjectClosure* cl) = 0; 16.6 16.7 + // Similar to object_iterate() except iterates only 16.8 + // over live objects. 16.9 + virtual void safe_object_iterate(ObjectClosure* cl) = 0; 16.10 + 16.11 // Behaves the same as oop_iterate, except only traverses 16.12 // interior pointers contained in permanent memory. If there 16.13 // is no permanent memory, does nothing.
17.1 --- a/src/share/vm/memory/genCollectedHeap.cpp Wed Jan 21 11:14:19 2009 -0500 17.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp Fri Jan 23 10:41:28 2009 -0500 17.3 @@ -910,6 +910,13 @@ 17.4 perm_gen()->object_iterate(cl); 17.5 } 17.6 17.7 +void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) { 17.8 + for (int i = 0; i < _n_gens; i++) { 17.9 + _gens[i]->safe_object_iterate(cl); 17.10 + } 17.11 + perm_gen()->safe_object_iterate(cl); 17.12 +} 17.13 + 17.14 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) { 17.15 for (int i = 0; i < _n_gens; i++) { 17.16 _gens[i]->object_iterate_since_last_GC(cl);
18.1 --- a/src/share/vm/memory/genCollectedHeap.hpp Wed Jan 21 11:14:19 2009 -0500 18.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp Fri Jan 23 10:41:28 2009 -0500 18.3 @@ -215,6 +215,7 @@ 18.4 void oop_iterate(OopClosure* cl); 18.5 void oop_iterate(MemRegion mr, OopClosure* cl); 18.6 void object_iterate(ObjectClosure* cl); 18.7 + void safe_object_iterate(ObjectClosure* cl); 18.8 void object_iterate_since_last_GC(ObjectClosure* cl); 18.9 Space* space_containing(const void* addr) const; 18.10
19.1 --- a/src/share/vm/memory/generation.cpp Wed Jan 21 11:14:19 2009 -0500 19.2 +++ b/src/share/vm/memory/generation.cpp Fri Jan 23 10:41:28 2009 -0500 19.3 @@ -319,6 +319,21 @@ 19.4 space_iterate(&blk); 19.5 } 19.6 19.7 +class GenerationSafeObjIterateClosure : public SpaceClosure { 19.8 + private: 19.9 + ObjectClosure* _cl; 19.10 + public: 19.11 + virtual void do_space(Space* s) { 19.12 + s->safe_object_iterate(_cl); 19.13 + } 19.14 + GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} 19.15 +}; 19.16 + 19.17 +void Generation::safe_object_iterate(ObjectClosure* cl) { 19.18 + GenerationSafeObjIterateClosure blk(cl); 19.19 + space_iterate(&blk); 19.20 +} 19.21 + 19.22 void Generation::prepare_for_compaction(CompactPoint* cp) { 19.23 // Generic implementation, can be specialized 19.24 CompactibleSpace* space = first_compaction_space();
20.1 --- a/src/share/vm/memory/generation.hpp Wed Jan 21 11:14:19 2009 -0500 20.2 +++ b/src/share/vm/memory/generation.hpp Fri Jan 23 10:41:28 2009 -0500 20.3 @@ -518,6 +518,11 @@ 20.4 // each. 20.5 virtual void object_iterate(ObjectClosure* cl); 20.6 20.7 + // Iterate over all safe objects in the generation, calling "cl.do_object" on 20.8 + // each. An object is safe if its references point to other objects in 20.9 + // the heap. This defaults to object_iterate() unless overridden. 20.10 + virtual void safe_object_iterate(ObjectClosure* cl); 20.11 + 20.12 // Iterate over all objects allocated in the generation since the last 20.13 // collection, calling "cl.do_object" on each. The generation must have 20.14 // been initialized properly to support this function, or else this call
21.1 --- a/src/share/vm/memory/heapInspection.cpp Wed Jan 21 11:14:19 2009 -0500 21.2 +++ b/src/share/vm/memory/heapInspection.cpp Fri Jan 23 10:41:28 2009 -0500 21.3 @@ -263,6 +263,9 @@ 21.4 if (!cit.allocation_failed()) { 21.5 // Iterate over objects in the heap 21.6 RecordInstanceClosure ric(&cit); 21.7 + // If this operation encounters a bad object when using CMS, 21.8 + // consider using safe_object_iterate() which avoids perm gen 21.9 + // objects that may contain bad references. 21.10 Universe::heap()->object_iterate(&ric); 21.11 21.12 // Report if certain classes are not counted because of 21.13 @@ -317,5 +320,8 @@ 21.14 21.15 // Iterate over objects in the heap 21.16 FindInstanceClosure fic(k, result); 21.17 + // If this operation encounters a bad object when using CMS, 21.18 + // consider using safe_object_iterate() which avoids perm gen 21.19 + // objects that may contain bad references. 21.20 Universe::heap()->object_iterate(&fic); 21.21 }
22.1 --- a/src/share/vm/memory/oopFactory.cpp Wed Jan 21 11:14:19 2009 -0500 22.2 +++ b/src/share/vm/memory/oopFactory.cpp Fri Jan 23 10:41:28 2009 -0500 22.3 @@ -82,9 +82,11 @@ 22.4 } 22.5 22.6 22.7 -constantPoolOop oopFactory::new_constantPool(int length, TRAPS) { 22.8 +constantPoolOop oopFactory::new_constantPool(int length, 22.9 + bool is_conc_safe, 22.10 + TRAPS) { 22.11 constantPoolKlass* ck = constantPoolKlass::cast(Universe::constantPoolKlassObj()); 22.12 - return ck->allocate(length, CHECK_NULL); 22.13 + return ck->allocate(length, is_conc_safe, CHECK_NULL); 22.14 } 22.15 22.16 22.17 @@ -105,11 +107,13 @@ 22.18 int compressed_line_number_size, 22.19 int localvariable_table_length, 22.20 int checked_exceptions_length, 22.21 + bool is_conc_safe, 22.22 TRAPS) { 22.23 klassOop cmkObj = Universe::constMethodKlassObj(); 22.24 constMethodKlass* cmk = constMethodKlass::cast(cmkObj); 22.25 return cmk->allocate(byte_code_size, compressed_line_number_size, 22.26 localvariable_table_length, checked_exceptions_length, 22.27 + is_conc_safe, 22.28 CHECK_NULL); 22.29 } 22.30 22.31 @@ -117,14 +121,17 @@ 22.32 methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags, 22.33 int compressed_line_number_size, 22.34 int localvariable_table_length, 22.35 - int checked_exceptions_length, TRAPS) { 22.36 + int checked_exceptions_length, 22.37 + bool is_conc_safe, 22.38 + TRAPS) { 22.39 methodKlass* mk = methodKlass::cast(Universe::methodKlassObj()); 22.40 assert(!access_flags.is_native() || byte_code_size == 0, 22.41 "native methods should not contain byte codes"); 22.42 constMethodOop cm = new_constMethod(byte_code_size, 22.43 compressed_line_number_size, 22.44 localvariable_table_length, 22.45 - checked_exceptions_length, CHECK_NULL); 22.46 + checked_exceptions_length, 22.47 + is_conc_safe, CHECK_NULL); 22.48 constMethodHandle rw(THREAD, cm); 22.49 return mk->allocate(rw, access_flags, CHECK_NULL); 22.50 }
23.1 --- a/src/share/vm/memory/oopFactory.hpp Wed Jan 21 11:14:19 2009 -0500 23.2 +++ b/src/share/vm/memory/oopFactory.hpp Fri Jan 23 10:41:28 2009 -0500 23.3 @@ -81,7 +81,9 @@ 23.4 static symbolHandle new_symbol_handle(const char* name, TRAPS) { return new_symbol_handle(name, (int)strlen(name), CHECK_(symbolHandle())); } 23.5 23.6 // Constant pools 23.7 - static constantPoolOop new_constantPool (int length, TRAPS); 23.8 + static constantPoolOop new_constantPool (int length, 23.9 + bool is_conc_safe, 23.10 + TRAPS); 23.11 static constantPoolCacheOop new_constantPoolCache(int length, TRAPS); 23.12 23.13 // Instance classes 23.14 @@ -93,9 +95,20 @@ 23.15 static constMethodOop new_constMethod(int byte_code_size, 23.16 int compressed_line_number_size, 23.17 int localvariable_table_length, 23.18 - int checked_exceptions_length, TRAPS); 23.19 + int checked_exceptions_length, 23.20 + bool is_conc_safe, 23.21 + TRAPS); 23.22 public: 23.23 - static methodOop new_method(int byte_code_size, AccessFlags access_flags, int compressed_line_number_size, int localvariable_table_length, int checked_exceptions_length, TRAPS); 23.24 + // Set is_conc_safe for methods which cannot safely be 23.25 + // processed by concurrent GC even after the return of 23.26 + // the method. 23.27 + static methodOop new_method(int byte_code_size, 23.28 + AccessFlags access_flags, 23.29 + int compressed_line_number_size, 23.30 + int localvariable_table_length, 23.31 + int checked_exceptions_length, 23.32 + bool is_conc_safe, 23.33 + TRAPS); 23.34 23.35 // Method Data containers 23.36 static methodDataOop new_methodData(methodHandle method, TRAPS);
24.1 --- a/src/share/vm/memory/space.cpp Wed Jan 21 11:14:19 2009 -0500 24.2 +++ b/src/share/vm/memory/space.cpp Fri Jan 23 10:41:28 2009 -0500 24.3 @@ -569,7 +569,15 @@ 24.4 if (prev > mr.start()) { 24.5 region_start_addr = prev; 24.6 blk_start_addr = prev; 24.7 - assert(blk_start_addr == block_start(region_start_addr), "invariant"); 24.8 + // The previous invocation may have pushed "prev" beyond the 24.9 + // last allocated block yet there may be still be blocks 24.10 + // in this region due to a particular coalescing policy. 24.11 + // Relax the assertion so that the case where the unallocated 24.12 + // block is maintained and "prev" is beyond the unallocated 24.13 + // block does not cause the assertion to fire. 24.14 + assert((BlockOffsetArrayUseUnallocatedBlock && 24.15 + (!is_in(prev))) || 24.16 + (blk_start_addr == block_start(region_start_addr)), "invariant"); 24.17 } else { 24.18 region_start_addr = mr.start(); 24.19 blk_start_addr = block_start(region_start_addr); 24.20 @@ -705,6 +713,12 @@ 24.21 object_iterate_from(bm, blk); 24.22 } 24.23 24.24 +// For a continguous space object_iterate() and safe_object_iterate() 24.25 +// are the same. 24.26 +void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { 24.27 + object_iterate(blk); 24.28 +} 24.29 + 24.30 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) { 24.31 assert(mark.space() == this, "Mark does not match space"); 24.32 HeapWord* p = mark.point();
25.1 --- a/src/share/vm/memory/space.hpp Wed Jan 21 11:14:19 2009 -0500 25.2 +++ b/src/share/vm/memory/space.hpp Fri Jan 23 10:41:28 2009 -0500 25.3 @@ -193,6 +193,9 @@ 25.4 // each. Objects allocated by applications of the closure are not 25.5 // included in the iteration. 25.6 virtual void object_iterate(ObjectClosure* blk) = 0; 25.7 + // Similar to object_iterate() except only iterates over 25.8 + // objects whose internal references point to objects in the space. 25.9 + virtual void safe_object_iterate(ObjectClosure* blk) = 0; 25.10 25.11 // Iterate over all objects that intersect with mr, calling "cl->do_object" 25.12 // on each. There is an exception to this: if this closure has already 25.13 @@ -843,6 +846,9 @@ 25.14 void oop_iterate(OopClosure* cl); 25.15 void oop_iterate(MemRegion mr, OopClosure* cl); 25.16 void object_iterate(ObjectClosure* blk); 25.17 + // For contiguous spaces this method will iterate safely over objects 25.18 + // in the space (i.e., between bottom and top) when at a safepoint. 25.19 + void safe_object_iterate(ObjectClosure* blk); 25.20 void object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl); 25.21 // iterates on objects up to the safe limit 25.22 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
26.1 --- a/src/share/vm/oops/constMethodKlass.cpp Wed Jan 21 11:14:19 2009 -0500 26.2 +++ b/src/share/vm/oops/constMethodKlass.cpp Fri Jan 23 10:41:28 2009 -0500 26.3 @@ -49,10 +49,16 @@ 26.4 return constMethodOop(obj)->object_is_parsable(); 26.5 } 26.6 26.7 +bool constMethodKlass::oop_is_conc_safe(oop obj) const { 26.8 + assert(obj->is_constMethod(), "must be constMethod oop"); 26.9 + return constMethodOop(obj)->is_conc_safe(); 26.10 +} 26.11 + 26.12 constMethodOop constMethodKlass::allocate(int byte_code_size, 26.13 int compressed_line_number_size, 26.14 int localvariable_table_length, 26.15 int checked_exceptions_length, 26.16 + bool is_conc_safe, 26.17 TRAPS) { 26.18 26.19 int size = constMethodOopDesc::object_size(byte_code_size, 26.20 @@ -75,6 +81,7 @@ 26.21 compressed_line_number_size, 26.22 localvariable_table_length); 26.23 assert(cm->size() == size, "wrong size for object"); 26.24 + cm->set_is_conc_safe(is_conc_safe); 26.25 cm->set_partially_loaded(); 26.26 assert(cm->is_parsable(), "Is safely parsable by gc"); 26.27 return cm;
27.1 --- a/src/share/vm/oops/constMethodKlass.hpp Wed Jan 21 11:14:19 2009 -0500 27.2 +++ b/src/share/vm/oops/constMethodKlass.hpp Fri Jan 23 10:41:28 2009 -0500 27.3 @@ -32,12 +32,16 @@ 27.4 // Testing 27.5 bool oop_is_constMethod() const { return true; } 27.6 virtual bool oop_is_parsable(oop obj) const; 27.7 + virtual bool oop_is_conc_safe(oop obj) const; 27.8 + 27.9 27.10 // Allocation 27.11 DEFINE_ALLOCATE_PERMANENT(constMethodKlass); 27.12 constMethodOop allocate(int byte_code_size, int compressed_line_number_size, 27.13 int localvariable_table_length, 27.14 - int checked_exceptions_length, TRAPS); 27.15 + int checked_exceptions_length, 27.16 + bool is_conc_safe, 27.17 + TRAPS); 27.18 static klassOop create_klass(TRAPS); 27.19 27.20 // Sizing
28.1 --- a/src/share/vm/oops/constMethodOop.hpp Wed Jan 21 11:14:19 2009 -0500 28.2 +++ b/src/share/vm/oops/constMethodOop.hpp Fri Jan 23 10:41:28 2009 -0500 28.3 @@ -104,6 +104,7 @@ 28.4 // loads and stores. This value may updated and read without a lock by 28.5 // multiple threads, so is volatile. 28.6 volatile uint64_t _fingerprint; 28.7 + volatile bool _is_conc_safe; // if true, safe for concurrent GC processing 28.8 28.9 public: 28.10 oop* oop_block_beg() const { return adr_method(); } 28.11 @@ -273,6 +274,8 @@ 28.12 oop* adr_method() const { return (oop*)&_method; } 28.13 oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; } 28.14 oop* adr_exception_table() const { return (oop*)&_exception_table; } 28.15 + bool is_conc_safe() { return _is_conc_safe; } 28.16 + void set_is_conc_safe(bool v) { _is_conc_safe = v; } 28.17 28.18 // Unique id for the method 28.19 static const u2 MAX_IDNUM;
29.1 --- a/src/share/vm/oops/constantPoolKlass.cpp Wed Jan 21 11:14:19 2009 -0500 29.2 +++ b/src/share/vm/oops/constantPoolKlass.cpp Fri Jan 23 10:41:28 2009 -0500 29.3 @@ -25,7 +25,7 @@ 29.4 # include "incls/_precompiled.incl" 29.5 # include "incls/_constantPoolKlass.cpp.incl" 29.6 29.7 -constantPoolOop constantPoolKlass::allocate(int length, TRAPS) { 29.8 +constantPoolOop constantPoolKlass::allocate(int length, bool is_conc_safe, TRAPS) { 29.9 int size = constantPoolOopDesc::object_size(length); 29.10 KlassHandle klass (THREAD, as_klassOop()); 29.11 constantPoolOop c = 29.12 @@ -38,6 +38,9 @@ 29.13 c->set_flags(0); 29.14 // only set to non-zero if constant pool is merged by RedefineClasses 29.15 c->set_orig_length(0); 29.16 + // if constant pool may change during RedefineClasses, it is created 29.17 + // unsafe for GC concurrent processing. 29.18 + c->set_is_conc_safe(is_conc_safe); 29.19 // all fields are initialized; needed for GC 29.20 29.21 // initialize tag array 29.22 @@ -207,6 +210,11 @@ 29.23 return size; 29.24 } 29.25 29.26 +bool constantPoolKlass::oop_is_conc_safe(oop obj) const { 29.27 + assert(obj->is_constantPool(), "must be constantPool"); 29.28 + return constantPoolOop(obj)->is_conc_safe(); 29.29 +} 29.30 + 29.31 #ifndef SERIALGC 29.32 int constantPoolKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { 29.33 assert (obj->is_constantPool(), "obj must be constant pool");
30.1 --- a/src/share/vm/oops/constantPoolKlass.hpp Wed Jan 21 11:14:19 2009 -0500 30.2 +++ b/src/share/vm/oops/constantPoolKlass.hpp Fri Jan 23 10:41:28 2009 -0500 30.3 @@ -34,7 +34,7 @@ 30.4 30.5 // Allocation 30.6 DEFINE_ALLOCATE_PERMANENT(constantPoolKlass); 30.7 - constantPoolOop allocate(int length, TRAPS); 30.8 + constantPoolOop allocate(int length, bool is_conc_safe, TRAPS); 30.9 static klassOop create_klass(TRAPS); 30.10 30.11 // Casting from klassOop 30.12 @@ -48,6 +48,8 @@ 30.13 int object_size() const { return align_object_size(header_size()); } 30.14 30.15 // Garbage collection 30.16 + // Returns true is the object is safe for GC concurrent processing. 30.17 + virtual bool oop_is_conc_safe(oop obj) const; 30.18 void oop_follow_contents(oop obj); 30.19 int oop_adjust_pointers(oop obj); 30.20
31.1 --- a/src/share/vm/oops/constantPoolOop.hpp Wed Jan 21 11:14:19 2009 -0500 31.2 +++ b/src/share/vm/oops/constantPoolOop.hpp Fri Jan 23 10:41:28 2009 -0500 31.3 @@ -43,6 +43,8 @@ 31.4 klassOop _pool_holder; // the corresponding class 31.5 int _flags; // a few header bits to describe contents for GC 31.6 int _length; // number of elements in the array 31.7 + volatile bool _is_conc_safe; // if true, safe for concurrent 31.8 + // GC processing 31.9 // only set to non-zero if constant pool is merged by RedefineClasses 31.10 int _orig_length; 31.11 31.12 @@ -379,6 +381,9 @@ 31.13 static int object_size(int length) { return align_object_size(header_size() + length); } 31.14 int object_size() { return object_size(length()); } 31.15 31.16 + bool is_conc_safe() { return _is_conc_safe; } 31.17 + void set_is_conc_safe(bool v) { _is_conc_safe = v; } 31.18 + 31.19 friend class constantPoolKlass; 31.20 friend class ClassFileParser; 31.21 friend class SystemDictionary;
32.1 --- a/src/share/vm/oops/klass.hpp Wed Jan 21 11:14:19 2009 -0500 32.2 +++ b/src/share/vm/oops/klass.hpp Fri Jan 23 10:41:28 2009 -0500 32.3 @@ -606,8 +606,19 @@ 32.4 #undef assert_same_query 32.5 32.6 // Unless overridden, oop is parsable if it has a klass pointer. 32.7 + // Parsability of an object is object specific. 32.8 virtual bool oop_is_parsable(oop obj) const { return true; } 32.9 32.10 + // Unless overridden, oop is safe for concurrent GC processing 32.11 + // after its allocation is complete. The exception to 32.12 + // this is the case where objects are changed after allocation. 32.13 + // Class redefinition is one of the known exceptions. During 32.14 + // class redefinition, an allocated class can changed in order 32.15 + // order to create a merged class (the combiniation of the 32.16 + // old class definition that has to be perserved and the new class 32.17 + // definition which is being created. 32.18 + virtual bool oop_is_conc_safe(oop obj) const { return true; } 32.19 + 32.20 // Access flags 32.21 AccessFlags access_flags() const { return _access_flags; } 32.22 void set_access_flags(AccessFlags flags) { _access_flags = flags; }
33.1 --- a/src/share/vm/oops/methodOop.cpp Wed Jan 21 11:14:19 2009 -0500 33.2 +++ b/src/share/vm/oops/methodOop.cpp Fri Jan 23 10:41:28 2009 -0500 33.3 @@ -792,15 +792,34 @@ 33.4 AccessFlags flags = m->access_flags(); 33.5 int checked_exceptions_len = m->checked_exceptions_length(); 33.6 int localvariable_len = m->localvariable_table_length(); 33.7 - methodOop newm_oop = oopFactory::new_method(new_code_length, flags, new_compressed_linenumber_size, localvariable_len, checked_exceptions_len, CHECK_(methodHandle())); 33.8 + // Allocate newm_oop with the is_conc_safe parameter set 33.9 + // to IsUnsafeConc to indicate that newm_oop is not yet 33.10 + // safe for concurrent processing by a GC. 33.11 + methodOop newm_oop = oopFactory::new_method(new_code_length, 33.12 + flags, 33.13 + new_compressed_linenumber_size, 33.14 + localvariable_len, 33.15 + checked_exceptions_len, 33.16 + IsUnsafeConc, 33.17 + CHECK_(methodHandle())); 33.18 methodHandle newm (THREAD, newm_oop); 33.19 int new_method_size = newm->method_size(); 33.20 // Create a shallow copy of methodOopDesc part, but be careful to preserve the new constMethodOop 33.21 constMethodOop newcm = newm->constMethod(); 33.22 int new_const_method_size = newm->constMethod()->object_size(); 33.23 + 33.24 memcpy(newm(), m(), sizeof(methodOopDesc)); 33.25 // Create shallow copy of constMethodOopDesc, but be careful to preserve the methodOop 33.26 + // is_conc_safe is set to false because that is the value of 33.27 + // is_conc_safe initialzied into newcm and the copy should 33.28 + // not overwrite that value. During the window during which it is 33.29 + // tagged as unsafe, some extra work could be needed during precleaning 33.30 + // or concurrent marking but those phases will be correct. Setting and 33.31 + // resetting is done in preference to a careful copying into newcm to 33.32 + // avoid having to know the precise layout of a constMethodOop. 33.33 + m->constMethod()->set_is_conc_safe(false); 33.34 memcpy(newcm, m->constMethod(), sizeof(constMethodOopDesc)); 33.35 + m->constMethod()->set_is_conc_safe(true); 33.36 // Reset correct method/const method, method size, and parameter info 33.37 newcm->set_method(newm()); 33.38 newm->set_constMethod(newcm); 33.39 @@ -831,6 +850,10 @@ 33.40 m->localvariable_table_start(), 33.41 localvariable_len * sizeof(LocalVariableTableElement)); 33.42 } 33.43 + 33.44 + // Only set is_conc_safe to true when changes to newcm are 33.45 + // complete. 33.46 + newcm->set_is_conc_safe(true); 33.47 return newm; 33.48 } 33.49
34.1 --- a/src/share/vm/oops/methodOop.hpp Wed Jan 21 11:14:19 2009 -0500 34.2 +++ b/src/share/vm/oops/methodOop.hpp Fri Jan 23 10:41:28 2009 -0500 34.3 @@ -129,6 +129,10 @@ 34.4 volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry 34.5 34.6 public: 34.7 + 34.8 + static const bool IsUnsafeConc = false; 34.9 + static const bool IsSafeConc = true; 34.10 + 34.11 // accessors for instance variables 34.12 constMethodOop constMethod() const { return _constMethod; } 34.13 void set_constMethod(constMethodOop xconst) { oop_store_without_check((oop*)&_constMethod, (oop)xconst); }
35.1 --- a/src/share/vm/oops/oop.hpp Wed Jan 21 11:14:19 2009 -0500 35.2 +++ b/src/share/vm/oops/oop.hpp Fri Jan 23 10:41:28 2009 -0500 35.3 @@ -108,6 +108,13 @@ 35.4 // installation of their klass pointer. 35.5 bool is_parsable(); 35.6 35.7 + // Some perm gen objects that have been allocated and initialized 35.8 + // can be changed by the VM when not at a safe point (class rededfinition 35.9 + // is an example). Such objects should not be examined by the 35.10 + // concurrent processing of a garbage collector if is_conc_safe() 35.11 + // returns false. 35.12 + bool is_conc_safe(); 35.13 + 35.14 // type test operations (inlined in oop.inline.h) 35.15 bool is_instance() const; 35.16 bool is_instanceRef() const;
36.1 --- a/src/share/vm/oops/oop.inline.hpp Wed Jan 21 11:14:19 2009 -0500 36.2 +++ b/src/share/vm/oops/oop.inline.hpp Fri Jan 23 10:41:28 2009 -0500 36.3 @@ -435,6 +435,10 @@ 36.4 return blueprint()->oop_is_parsable(this); 36.5 } 36.6 36.7 +inline bool oopDesc::is_conc_safe() { 36.8 + return blueprint()->oop_is_conc_safe(this); 36.9 +} 36.10 + 36.11 inline void update_barrier_set(void* p, oop v) { 36.12 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!"); 36.13 oopDesc::bs()->write_ref_field(p, v);
37.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Wed Jan 21 11:14:19 2009 -0500 37.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Jan 23 10:41:28 2009 -0500 37.3 @@ -1230,8 +1230,14 @@ 37.4 37.5 // Constant pools are not easily reused so we allocate a new one 37.6 // each time. 37.7 + // merge_cp is created unsafe for concurrent GC processing. It 37.8 + // should be marked safe before discarding it because, even if 37.9 + // garbage. If it crosses a card boundary, it may be scanned 37.10 + // in order to find the start of the first complete object on the card. 37.11 constantPoolHandle merge_cp(THREAD, 37.12 - oopFactory::new_constantPool(merge_cp_length, THREAD)); 37.13 + oopFactory::new_constantPool(merge_cp_length, 37.14 + methodOopDesc::IsUnsafeConc, 37.15 + THREAD)); 37.16 int orig_length = old_cp->orig_length(); 37.17 if (orig_length == 0) { 37.18 // This old_cp is an actual original constant pool. We save 37.19 @@ -1274,6 +1280,7 @@ 37.20 // rewriting so we can't use the old constant pool with the new 37.21 // class. 37.22 37.23 + merge_cp()->set_is_conc_safe(true); 37.24 merge_cp = constantPoolHandle(); // toss the merged constant pool 37.25 } else if (old_cp->length() < scratch_cp->length()) { 37.26 // The old constant pool has fewer entries than the new constant 37.27 @@ -1283,6 +1290,7 @@ 37.28 // rewriting so we can't use the new constant pool with the old 37.29 // class. 37.30 37.31 + merge_cp()->set_is_conc_safe(true); 37.32 merge_cp = constantPoolHandle(); // toss the merged constant pool 37.33 } else { 37.34 // The old constant pool has more entries than the new constant 37.35 @@ -1296,6 +1304,7 @@ 37.36 set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, 37.37 THREAD); 37.38 // drop local ref to the merged constant pool 37.39 + merge_cp()->set_is_conc_safe(true); 37.40 merge_cp = constantPoolHandle(); 37.41 } 37.42 } else { 37.43 @@ -1325,7 +1334,10 @@ 37.44 // GCed. 37.45 set_new_constant_pool(scratch_class, merge_cp, merge_cp_length, true, 37.46 THREAD); 37.47 + merge_cp()->set_is_conc_safe(true); 37.48 } 37.49 + assert(old_cp()->is_conc_safe(), "Just checking"); 37.50 + assert(scratch_cp()->is_conc_safe(), "Just checking"); 37.51 37.52 return JVMTI_ERROR_NONE; 37.53 } // end merge_cp_and_rewrite() 37.54 @@ -2314,13 +2326,16 @@ 37.55 // worst case merge situation. We want to associate the minimum 37.56 // sized constant pool with the klass to save space. 37.57 constantPoolHandle smaller_cp(THREAD, 37.58 - oopFactory::new_constantPool(scratch_cp_length, THREAD)); 37.59 + oopFactory::new_constantPool(scratch_cp_length, 37.60 + methodOopDesc::IsUnsafeConc, 37.61 + THREAD)); 37.62 // preserve orig_length() value in the smaller copy 37.63 int orig_length = scratch_cp->orig_length(); 37.64 assert(orig_length != 0, "sanity check"); 37.65 smaller_cp->set_orig_length(orig_length); 37.66 scratch_cp->copy_cp_to(1, scratch_cp_length - 1, smaller_cp, 1, THREAD); 37.67 scratch_cp = smaller_cp; 37.68 + smaller_cp()->set_is_conc_safe(true); 37.69 } 37.70 37.71 // attach new constant pool to klass 37.72 @@ -2516,6 +2531,7 @@ 37.73 37.74 rewrite_cp_refs_in_stack_map_table(method, THREAD); 37.75 } // end for each method 37.76 + assert(scratch_cp()->is_conc_safe(), "Just checking"); 37.77 } // end set_new_constant_pool() 37.78 37.79
38.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp Wed Jan 21 11:14:19 2009 -0500 38.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp Fri Jan 23 10:41:28 2009 -0500 38.3 @@ -1320,6 +1320,9 @@ 38.4 } 38.5 38.6 // do the iteration 38.7 + // If this operation encounters a bad object when using CMS, 38.8 + // consider using safe_object_iterate() which avoids perm gen 38.9 + // objects that may contain bad references. 38.10 Universe::heap()->object_iterate(_blk); 38.11 38.12 // when sharing is enabled we must iterate over the shared spaces
39.1 --- a/src/share/vm/services/heapDumper.cpp Wed Jan 21 11:14:19 2009 -0500 39.2 +++ b/src/share/vm/services/heapDumper.cpp Fri Jan 23 10:41:28 2009 -0500 39.3 @@ -1700,7 +1700,7 @@ 39.4 // The HPROF_GC_CLASS_DUMP and HPROF_GC_INSTANCE_DUMP are the vast bulk 39.5 // of the heap dump. 39.6 HeapObjectDumper obj_dumper(this, writer()); 39.7 - Universe::heap()->object_iterate(&obj_dumper); 39.8 + Universe::heap()->safe_object_iterate(&obj_dumper); 39.9 39.10 // HPROF_GC_ROOT_THREAD_OBJ + frames + jni locals 39.11 do_threads();