Tue, 24 Sep 2013 14:46:29 +0200
8025305: Cleanup CardTableModRefBS usage in G1
Summary: Move some G1 specific code from CardTableModRefBS to G1SATBCardTableModRefBS.
Reviewed-by: brutisso, tschatzl, ehelin
1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Fri Sep 27 13:41:07 2013 +0200 1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Tue Sep 24 14:46:29 2013 +0200 1.3 @@ -81,7 +81,7 @@ 1.4 size_t* marked_bytes_array, 1.5 BitMap* task_card_bm) { 1.6 G1CollectedHeap* g1h = _g1h; 1.7 - CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set()); 1.8 + CardTableModRefBS* ct_bs = g1h->g1_barrier_set(); 1.9 1.10 HeapWord* start = mr.start(); 1.11 HeapWord* end = mr.end();
2.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Fri Sep 27 13:41:07 2013 +0200 2.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Tue Sep 24 14:46:29 2013 +0200 2.3 @@ -65,9 +65,7 @@ 2.4 // threshold limit is no more than this. 2.5 guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity"); 2.6 2.7 - ModRefBarrierSet* bs = _g1h->mr_bs(); 2.8 - guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); 2.9 - _ct_bs = (CardTableModRefBS*)bs; 2.10 + _ct_bs = _g1h->g1_barrier_set(); 2.11 _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start()); 2.12 2.13 // Allocate/Reserve the counts table
3.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Sep 27 13:41:07 2013 +0200 3.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Sep 24 14:46:29 2013 +0200 3.3 @@ -125,10 +125,8 @@ 3.4 int _histo[256]; 3.5 public: 3.6 ClearLoggedCardTableEntryClosure() : 3.7 - _calls(0) 3.8 + _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) 3.9 { 3.10 - _g1h = G1CollectedHeap::heap(); 3.11 - _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 3.12 for (int i = 0; i < 256; i++) _histo[i] = 0; 3.13 } 3.14 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 3.15 @@ -158,11 +156,8 @@ 3.16 CardTableModRefBS* _ctbs; 3.17 public: 3.18 RedirtyLoggedCardTableEntryClosure() : 3.19 - _calls(0) 3.20 - { 3.21 - _g1h = G1CollectedHeap::heap(); 3.22 - _ctbs = (CardTableModRefBS*)_g1h->barrier_set(); 3.23 - } 3.24 + _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {} 3.25 + 3.26 bool do_card_ptr(jbyte* card_ptr, int worker_i) { 3.27 if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) { 3.28 _calls++; 3.29 @@ -478,7 +473,7 @@ 3.30 3.31 void G1CollectedHeap::check_ct_logs_at_safepoint() { 3.32 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); 3.33 - CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 3.34 + CardTableModRefBS* ct_bs = g1_barrier_set(); 3.35 3.36 // Count the dirty cards at the start. 3.37 CountNonCleanMemRegionClosure count1(this); 3.38 @@ -1205,7 +1200,7 @@ 3.39 }; 3.40 3.41 void G1CollectedHeap::clear_rsets_post_compaction() { 3.42 - PostMCRemSetClearClosure rs_clear(this, mr_bs()); 3.43 + PostMCRemSetClearClosure rs_clear(this, g1_barrier_set()); 3.44 heap_region_iterate(&rs_clear); 3.45 } 3.46 3.47 @@ -2045,20 +2040,13 @@ 3.48 // Create the gen rem set (and barrier set) for the entire reserved region. 3.49 _rem_set = collector_policy()->create_rem_set(_reserved, 2); 3.50 set_barrier_set(rem_set()->bs()); 3.51 - if (barrier_set()->is_a(BarrierSet::ModRef)) { 3.52 - _mr_bs = (ModRefBarrierSet*)_barrier_set; 3.53 - } else { 3.54 - vm_exit_during_initialization("G1 requires a mod ref bs."); 3.55 + if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) { 3.56 + vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS"); 3.57 return JNI_ENOMEM; 3.58 } 3.59 3.60 // Also create a G1 rem set. 3.61 - if (mr_bs()->is_a(BarrierSet::CardTableModRef)) { 3.62 - _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs()); 3.63 - } else { 3.64 - vm_exit_during_initialization("G1 requires a cardtable mod ref bs."); 3.65 - return JNI_ENOMEM; 3.66 - } 3.67 + _g1_rem_set = new G1RemSet(this, g1_barrier_set()); 3.68 3.69 // Carve out the G1 part of the heap. 3.70 3.71 @@ -4555,7 +4543,7 @@ 3.72 : _g1h(g1h), 3.73 _refs(g1h->task_queue(queue_num)), 3.74 _dcq(&g1h->dirty_card_queue_set()), 3.75 - _ct_bs((CardTableModRefBS*)_g1h->barrier_set()), 3.76 + _ct_bs(g1h->g1_barrier_set()), 3.77 _g1_rem(g1h->g1_rem_set()), 3.78 _hash_seed(17), _queue_num(queue_num), 3.79 _term_attempts(0), 3.80 @@ -5984,11 +5972,11 @@ 3.81 } 3.82 3.83 class G1ParCleanupCTTask : public AbstractGangTask { 3.84 - CardTableModRefBS* _ct_bs; 3.85 + G1SATBCardTableModRefBS* _ct_bs; 3.86 G1CollectedHeap* _g1h; 3.87 HeapRegion* volatile _su_head; 3.88 public: 3.89 - G1ParCleanupCTTask(CardTableModRefBS* ct_bs, 3.90 + G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs, 3.91 G1CollectedHeap* g1h) : 3.92 AbstractGangTask("G1 Par Cleanup CT Task"), 3.93 _ct_bs(ct_bs), _g1h(g1h) { } 3.94 @@ -6011,9 +5999,9 @@ 3.95 #ifndef PRODUCT 3.96 class G1VerifyCardTableCleanup: public HeapRegionClosure { 3.97 G1CollectedHeap* _g1h; 3.98 - CardTableModRefBS* _ct_bs; 3.99 + G1SATBCardTableModRefBS* _ct_bs; 3.100 public: 3.101 - G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs) 3.102 + G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs) 3.103 : _g1h(g1h), _ct_bs(ct_bs) { } 3.104 virtual bool doHeapRegion(HeapRegion* r) { 3.105 if (r->is_survivor()) { 3.106 @@ -6027,7 +6015,7 @@ 3.107 3.108 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) { 3.109 // All of the region should be clean. 3.110 - CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set(); 3.111 + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 3.112 MemRegion mr(hr->bottom(), hr->end()); 3.113 ct_bs->verify_not_dirty_region(mr); 3.114 } 3.115 @@ -6040,13 +6028,13 @@ 3.116 // not dirty that area (one less thing to have to do while holding 3.117 // a lock). So we can only verify that [bottom(),pre_dummy_top()] 3.118 // is dirty. 3.119 - CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); 3.120 + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 3.121 MemRegion mr(hr->bottom(), hr->pre_dummy_top()); 3.122 ct_bs->verify_dirty_region(mr); 3.123 } 3.124 3.125 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) { 3.126 - CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set(); 3.127 + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 3.128 for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) { 3.129 verify_dirty_region(hr); 3.130 } 3.131 @@ -6058,7 +6046,7 @@ 3.132 #endif 3.133 3.134 void G1CollectedHeap::cleanUpCardTable() { 3.135 - CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set()); 3.136 + G1SATBCardTableModRefBS* ct_bs = g1_barrier_set(); 3.137 double start = os::elapsedTime(); 3.138 3.139 {
4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Sep 27 13:41:07 2013 +0200 4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Sep 24 14:46:29 2013 +0200 4.3 @@ -31,6 +31,7 @@ 4.4 #include "gc_implementation/g1/g1HRPrinter.hpp" 4.5 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 4.6 #include "gc_implementation/g1/g1RemSet.hpp" 4.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 4.8 #include "gc_implementation/g1/g1YCTypes.hpp" 4.9 #include "gc_implementation/g1/heapRegionSeq.hpp" 4.10 #include "gc_implementation/g1/heapRegionSets.hpp" 4.11 @@ -791,8 +792,6 @@ 4.12 4.13 // The g1 remembered set of the heap. 4.14 G1RemSet* _g1_rem_set; 4.15 - // And it's mod ref barrier set, used to track updates for the above. 4.16 - ModRefBarrierSet* _mr_bs; 4.17 4.18 // A set of cards that cover the objects for which the Rsets should be updated 4.19 // concurrently after the collection. 4.20 @@ -1127,7 +1126,6 @@ 4.21 4.22 // The rem set and barrier set. 4.23 G1RemSet* g1_rem_set() const { return _g1_rem_set; } 4.24 - ModRefBarrierSet* mr_bs() const { return _mr_bs; } 4.25 4.26 unsigned get_gc_time_stamp() { 4.27 return _gc_time_stamp; 4.28 @@ -1346,6 +1344,10 @@ 4.29 4.30 virtual bool is_in_closed_subset(const void* p) const; 4.31 4.32 + G1SATBCardTableModRefBS* g1_barrier_set() { 4.33 + return (G1SATBCardTableModRefBS*) barrier_set(); 4.34 + } 4.35 + 4.36 // This resets the card table to all zeros. It is used after 4.37 // a collection pause which used the card table to claim cards. 4.38 void cleanUpCardTable(); 4.39 @@ -1875,7 +1877,7 @@ 4.40 G1CollectedHeap* _g1h; 4.41 RefToScanQueue* _refs; 4.42 DirtyCardQueue _dcq; 4.43 - CardTableModRefBS* _ct_bs; 4.44 + G1SATBCardTableModRefBS* _ct_bs; 4.45 G1RemSet* _g1_rem; 4.46 4.47 G1ParGCAllocBufferContainer _surviving_alloc_buffer; 4.48 @@ -1914,7 +1916,7 @@ 4.49 void add_to_undo_waste(size_t waste) { _undo_waste += waste; } 4.50 4.51 DirtyCardQueue& dirty_card_queue() { return _dcq; } 4.52 - CardTableModRefBS* ctbs() { return _ct_bs; } 4.53 + G1SATBCardTableModRefBS* ctbs() { return _ct_bs; } 4.54 4.55 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) { 4.56 if (!from->is_survivor()) {
5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Fri Sep 27 13:41:07 2013 +0200 5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Sep 24 14:46:29 2013 +0200 5.3 @@ -134,7 +134,7 @@ 5.4 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 5.5 5.6 MemRegion mr(start, end); 5.7 - ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr); 5.8 + g1_barrier_set()->dirty(mr); 5.9 } 5.10 5.11 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
6.1 --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Fri Sep 27 13:41:07 2013 +0200 6.2 +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp Tue Sep 24 14:46:29 2013 +0200 6.3 @@ -41,11 +41,11 @@ 6.4 private: 6.5 G1CollectedHeap* _g1; 6.6 DirtyCardQueue *_dcq; 6.7 - CardTableModRefBS* _ct_bs; 6.8 + G1SATBCardTableModRefBS* _ct_bs; 6.9 6.10 public: 6.11 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 6.12 - _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {} 6.13 + _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} 6.14 6.15 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 6.16 virtual void do_oop( oop* p) { do_oop_work(p); }
7.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Fri Sep 27 13:41:07 2013 +0200 7.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp Tue Sep 24 14:46:29 2013 +0200 7.3 @@ -220,7 +220,7 @@ 7.4 public: 7.5 G1PrepareCompactClosure(CompactibleSpace* cs) 7.6 : _g1h(G1CollectedHeap::heap()), 7.7 - _mrbs(G1CollectedHeap::heap()->mr_bs()), 7.8 + _mrbs(_g1h->g1_barrier_set()), 7.9 _cp(NULL, cs, cs->initialize_threshold()), 7.10 _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { } 7.11
8.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Sep 27 13:41:07 2013 +0200 8.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Sep 24 14:46:29 2013 +0200 8.3 @@ -109,7 +109,7 @@ 8.4 CodeBlobToOopClosure* _code_root_cl; 8.5 8.6 G1BlockOffsetSharedArray* _bot_shared; 8.7 - CardTableModRefBS *_ct_bs; 8.8 + G1SATBCardTableModRefBS *_ct_bs; 8.9 8.10 double _strong_code_root_scan_time_sec; 8.11 int _worker_i; 8.12 @@ -130,7 +130,7 @@ 8.13 { 8.14 _g1h = G1CollectedHeap::heap(); 8.15 _bot_shared = _g1h->bot_shared(); 8.16 - _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set()); 8.17 + _ct_bs = _g1h->g1_barrier_set(); 8.18 _block_size = MAX2<int>(G1RSetScanBlockSize, 1); 8.19 } 8.20 8.21 @@ -505,12 +505,7 @@ 8.22 ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) : 8.23 _g1h(G1CollectedHeap::heap()), 8.24 _region_bm(region_bm), _card_bm(card_bm), 8.25 - _ctbs(NULL) 8.26 - { 8.27 - ModRefBarrierSet* bs = _g1h->mr_bs(); 8.28 - guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition"); 8.29 - _ctbs = (CardTableModRefBS*)bs; 8.30 - } 8.31 + _ctbs(_g1h->g1_barrier_set()) {} 8.32 8.33 bool doHeapRegion(HeapRegion* r) { 8.34 if (!r->continuesHumongous()) {
9.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Fri Sep 27 13:41:07 2013 +0200 9.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Tue Sep 24 14:46:29 2013 +0200 9.3 @@ -64,6 +64,27 @@ 9.4 } 9.5 } 9.6 9.7 +bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) { 9.8 + jbyte val = _byte_map[card_index]; 9.9 + // It's already processed 9.10 + if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 9.11 + return false; 9.12 + } 9.13 + // Cached bit can be installed either on a clean card or on a claimed card. 9.14 + jbyte new_val = val; 9.15 + if (val == clean_card_val()) { 9.16 + new_val = (jbyte)deferred_card_val(); 9.17 + } else { 9.18 + if (val & claimed_card_val()) { 9.19 + new_val = val | (jbyte)deferred_card_val(); 9.20 + } 9.21 + } 9.22 + if (new_val != val) { 9.23 + Atomic::cmpxchg(new_val, &_byte_map[card_index], val); 9.24 + } 9.25 + return true; 9.26 +} 9.27 + 9.28 G1SATBCardTableLoggingModRefBS:: 9.29 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap, 9.30 int max_covered_regions) :
10.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Sep 27 13:41:07 2013 +0200 10.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Tue Sep 24 14:46:29 2013 +0200 10.3 @@ -89,6 +89,42 @@ 10.4 write_ref_array_pre_work(dst, count); 10.5 } 10.6 } 10.7 + 10.8 +/* 10.9 + Claimed and deferred bits are used together in G1 during the evacuation 10.10 + pause. These bits can have the following state transitions: 10.11 + 1. The claimed bit can be put over any other card state. Except that 10.12 + the "dirty -> dirty and claimed" transition is checked for in 10.13 + G1 code and is not used. 10.14 + 2. Deferred bit can be set only if the previous state of the card 10.15 + was either clean or claimed. mark_card_deferred() is wait-free. 10.16 + We do not care if the operation is be successful because if 10.17 + it does not it will only result in duplicate entry in the update 10.18 + buffer because of the "cache-miss". So it's not worth spinning. 10.19 + */ 10.20 + 10.21 + bool is_card_claimed(size_t card_index) { 10.22 + jbyte val = _byte_map[card_index]; 10.23 + return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); 10.24 + } 10.25 + 10.26 + void set_card_claimed(size_t card_index) { 10.27 + jbyte val = _byte_map[card_index]; 10.28 + if (val == clean_card_val()) { 10.29 + val = (jbyte)claimed_card_val(); 10.30 + } else { 10.31 + val |= (jbyte)claimed_card_val(); 10.32 + } 10.33 + _byte_map[card_index] = val; 10.34 + } 10.35 + 10.36 + bool mark_card_deferred(size_t card_index); 10.37 + 10.38 + bool is_card_deferred(size_t card_index) { 10.39 + jbyte val = _byte_map[card_index]; 10.40 + return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); 10.41 + } 10.42 + 10.43 }; 10.44 10.45 // Adds card-table logging to the post-barrier.
11.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp Fri Sep 27 13:41:07 2013 +0200 11.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Tue Sep 24 14:46:29 2013 +0200 11.3 @@ -423,60 +423,6 @@ 11.4 inline_write_ref_field(field, newVal); 11.5 } 11.6 11.7 -/* 11.8 - Claimed and deferred bits are used together in G1 during the evacuation 11.9 - pause. These bits can have the following state transitions: 11.10 - 1. The claimed bit can be put over any other card state. Except that 11.11 - the "dirty -> dirty and claimed" transition is checked for in 11.12 - G1 code and is not used. 11.13 - 2. Deferred bit can be set only if the previous state of the card 11.14 - was either clean or claimed. mark_card_deferred() is wait-free. 11.15 - We do not care if the operation is be successful because if 11.16 - it does not it will only result in duplicate entry in the update 11.17 - buffer because of the "cache-miss". So it's not worth spinning. 11.18 - */ 11.19 - 11.20 - 11.21 -bool CardTableModRefBS::claim_card(size_t card_index) { 11.22 - jbyte val = _byte_map[card_index]; 11.23 - assert(val != dirty_card_val(), "Shouldn't claim a dirty card"); 11.24 - while (val == clean_card_val() || 11.25 - (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) { 11.26 - jbyte new_val = val; 11.27 - if (val == clean_card_val()) { 11.28 - new_val = (jbyte)claimed_card_val(); 11.29 - } else { 11.30 - new_val = val | (jbyte)claimed_card_val(); 11.31 - } 11.32 - jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val); 11.33 - if (res == val) { 11.34 - return true; 11.35 - } 11.36 - val = res; 11.37 - } 11.38 - return false; 11.39 -} 11.40 - 11.41 -bool CardTableModRefBS::mark_card_deferred(size_t card_index) { 11.42 - jbyte val = _byte_map[card_index]; 11.43 - // It's already processed 11.44 - if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) { 11.45 - return false; 11.46 - } 11.47 - // Cached bit can be installed either on a clean card or on a claimed card. 11.48 - jbyte new_val = val; 11.49 - if (val == clean_card_val()) { 11.50 - new_val = (jbyte)deferred_card_val(); 11.51 - } else { 11.52 - if (val & claimed_card_val()) { 11.53 - new_val = val | (jbyte)deferred_card_val(); 11.54 - } 11.55 - } 11.56 - if (new_val != val) { 11.57 - Atomic::cmpxchg(new_val, &_byte_map[card_index], val); 11.58 - } 11.59 - return true; 11.60 -} 11.61 11.62 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp, 11.63 MemRegion mr,
12.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp Fri Sep 27 13:41:07 2013 +0200 12.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Tue Sep 24 14:46:29 2013 +0200 12.3 @@ -339,34 +339,10 @@ 12.4 _byte_map[card_index] = dirty_card_val(); 12.5 } 12.6 12.7 - bool is_card_claimed(size_t card_index) { 12.8 - jbyte val = _byte_map[card_index]; 12.9 - return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val(); 12.10 - } 12.11 - 12.12 - void set_card_claimed(size_t card_index) { 12.13 - jbyte val = _byte_map[card_index]; 12.14 - if (val == clean_card_val()) { 12.15 - val = (jbyte)claimed_card_val(); 12.16 - } else { 12.17 - val |= (jbyte)claimed_card_val(); 12.18 - } 12.19 - _byte_map[card_index] = val; 12.20 - } 12.21 - 12.22 - bool claim_card(size_t card_index); 12.23 - 12.24 bool is_card_clean(size_t card_index) { 12.25 return _byte_map[card_index] == clean_card_val(); 12.26 } 12.27 12.28 - bool is_card_deferred(size_t card_index) { 12.29 - jbyte val = _byte_map[card_index]; 12.30 - return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val(); 12.31 - } 12.32 - 12.33 - bool mark_card_deferred(size_t card_index); 12.34 - 12.35 // Card marking array base (adjusted for heap low boundary) 12.36 // This would be the 0th element of _byte_map, if the heap started at 0x0. 12.37 // But since the heap starts at some higher address, this points to somewhere