1.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp Tue Jan 12 14:56:46 2010 -0800 1.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp Wed Jan 13 15:26:39 2010 -0800 1.3 @@ -59,8 +59,18 @@ 1.4 PerfDataManager::create_string_variable(SUN_GC, "lastCause", 1.5 80, GCCause::to_string(_gc_lastcause), CHECK); 1.6 } 1.7 + _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. 1.8 } 1.9 1.10 +void CollectedHeap::pre_initialize() { 1.11 + // Used for ReduceInitialCardMarks (when COMPILER2 is used); 1.12 + // otherwise remains unused. 1.13 +#ifdef COMPLER2 1.14 + _defer_initial_card_mark = ReduceInitialCardMarks && (DeferInitialCardMark || card_mark_must_follow_store()); 1.15 +#else 1.16 + assert(_defer_initial_card_mark == false, "Who would set it?"); 1.17 +#endif 1.18 +} 1.19 1.20 #ifndef PRODUCT 1.21 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { 1.22 @@ -140,12 +150,13 @@ 1.23 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { 1.24 MemRegion deferred = thread->deferred_card_mark(); 1.25 if (!deferred.is_empty()) { 1.26 + assert(_defer_initial_card_mark, "Otherwise should be empty"); 1.27 { 1.28 // Verify that the storage points to a parsable object in heap 1.29 DEBUG_ONLY(oop old_obj = oop(deferred.start());) 1.30 assert(is_in(old_obj), "Not in allocated heap"); 1.31 assert(!can_elide_initializing_store_barrier(old_obj), 1.32 - "Else should have been filtered in defer_store_barrier()"); 1.33 + "Else should have been filtered in new_store_pre_barrier()"); 1.34 assert(!is_in_permanent(old_obj), "Sanity: not expected"); 1.35 assert(old_obj->is_oop(true), "Not an oop"); 1.36 assert(old_obj->is_parsable(), "Will not be concurrently parsable"); 1.37 @@ -174,9 +185,7 @@ 1.38 // so long as the card-mark is completed before the next 1.39 // scavenge. For all these cases, we can do a card mark 1.40 // at the point at which we do a slow path allocation 1.41 -// in the old gen. For uniformity, however, we end 1.42 -// up using the same scheme (see below) for all three 1.43 -// cases (deferring the card-mark appropriately). 1.44 +// in the old gen, i.e. in this call. 1.45 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires 1.46 // in addition that the card-mark for an old gen allocated 1.47 // object strictly follow any associated initializing stores. 1.48 @@ -199,12 +208,13 @@ 1.49 // but, like in CMS, because of the presence of concurrent refinement 1.50 // (much like CMS' precleaning), must strictly follow the oop-store. 1.51 // Thus, using the same protocol for maintaining the intended 1.52 -// invariants turns out, serendepitously, to be the same for all 1.53 -// three collectors/heap types above. 1.54 +// invariants turns out, serendepitously, to be the same for both 1.55 +// G1 and CMS. 1.56 // 1.57 -// For each future collector, this should be reexamined with 1.58 -// that specific collector in mind. 1.59 -oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) { 1.60 +// For any future collector, this code should be reexamined with 1.61 +// that specific collector in mind, and the documentation above suitably 1.62 +// extended and updated. 1.63 +oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { 1.64 // If a previous card-mark was deferred, flush it now. 1.65 flush_deferred_store_barrier(thread); 1.66 if (can_elide_initializing_store_barrier(new_obj)) { 1.67 @@ -212,10 +222,17 @@ 1.68 // following the flush above. 1.69 assert(thread->deferred_card_mark().is_empty(), "Error"); 1.70 } else { 1.71 - // Remember info for the newly deferred store barrier 1.72 - MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size()); 1.73 - assert(!deferred.is_empty(), "Error"); 1.74 - thread->set_deferred_card_mark(deferred); 1.75 + MemRegion mr((HeapWord*)new_obj, new_obj->size()); 1.76 + assert(!mr.is_empty(), "Error"); 1.77 + if (_defer_initial_card_mark) { 1.78 + // Defer the card mark 1.79 + thread->set_deferred_card_mark(mr); 1.80 + } else { 1.81 + // Do the card mark 1.82 + BarrierSet* bs = barrier_set(); 1.83 + assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); 1.84 + bs->write_region(mr); 1.85 + } 1.86 } 1.87 return new_obj; 1.88 } 1.89 @@ -313,22 +330,6 @@ 1.90 return NULL; 1.91 } 1.92 1.93 -void CollectedHeap::fill_all_tlabs(bool retire) { 1.94 - assert(UseTLAB, "should not reach here"); 1.95 - // See note in ensure_parsability() below. 1.96 - assert(SafepointSynchronize::is_at_safepoint() || 1.97 - !is_init_completed(), 1.98 - "should only fill tlabs at safepoint"); 1.99 - // The main thread starts allocating via a TLAB even before it 1.100 - // has added itself to the threads list at vm boot-up. 1.101 - assert(Threads::first() != NULL, 1.102 - "Attempt to fill tlabs before main thread has been added" 1.103 - " to threads list is doomed to failure!"); 1.104 - for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 1.105 - thread->tlab().make_parsable(retire); 1.106 - } 1.107 -} 1.108 - 1.109 void CollectedHeap::ensure_parsability(bool retire_tlabs) { 1.110 // The second disjunct in the assertion below makes a concession 1.111 // for the start-up verification done while the VM is being 1.112 @@ -343,8 +344,24 @@ 1.113 "Should only be called at a safepoint or at start-up" 1.114 " otherwise concurrent mutator activity may make heap " 1.115 " unparsable again"); 1.116 - if (UseTLAB) { 1.117 - fill_all_tlabs(retire_tlabs); 1.118 + const bool use_tlab = UseTLAB; 1.119 + const bool deferred = _defer_initial_card_mark; 1.120 + // The main thread starts allocating via a TLAB even before it 1.121 + // has added itself to the threads list at vm boot-up. 1.122 + assert(!use_tlab || Threads::first() != NULL, 1.123 + "Attempt to fill tlabs before main thread has been added" 1.124 + " to threads list is doomed to failure!"); 1.125 + for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { 1.126 + if (use_tlab) thread->tlab().make_parsable(retire_tlabs); 1.127 +#ifdef COMPILER2 1.128 + // The deferred store barriers must all have been flushed to the 1.129 + // card-table (or other remembered set structure) before GC starts 1.130 + // processing the card-table (or other remembered set). 1.131 + if (deferred) flush_deferred_store_barrier(thread); 1.132 +#else 1.133 + assert(!deferred, "Should be false"); 1.134 + assert(thread->deferred_card_mark().is_empty(), "Should be empty"); 1.135 +#endif 1.136 } 1.137 } 1.138