src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7208
7baf47cb97cb
child 7535
7ae4e26cb1e0
child 7828
cbc7c4c9e11c
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
tschatzl@6402 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
stefank@2314 27
tschatzl@6402 28 #include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
stefank@2314 29 #include "gc_implementation/g1/sparsePRT.hpp"
stefank@2314 30
ysr@777 31 // Remembered set for a heap region. Represent a set of "cards" that
ysr@777 32 // contain pointers into the owner heap region. Cards are defined somewhat
ysr@777 33 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
ysr@777 34
ysr@777 35 class G1CollectedHeap;
ysr@777 36 class G1BlockOffsetSharedArray;
ysr@777 37 class HeapRegion;
ysr@777 38 class HeapRegionRemSetIterator;
johnc@3891 39 class PerRegionTable;
ysr@777 40 class SparsePRT;
johnc@5548 41 class nmethod;
ysr@777 42
tonyp@2493 43 // Essentially a wrapper around SparsePRTCleanupTask. See
tonyp@2493 44 // sparsePRT.hpp for more details.
tonyp@2493 45 class HRRSCleanupTask : public SparsePRTCleanupTask {
tonyp@2493 46 };
ysr@777 47
tschatzl@6407 48 // The FromCardCache remembers the most recently processed card on the heap on
tschatzl@6407 49 // a per-region and per-thread basis.
tschatzl@6407 50 class FromCardCache : public AllStatic {
tschatzl@6407 51 private:
tschatzl@6407 52 // Array of card indices. Indexed by thread X and heap region to minimize
tschatzl@6407 53 // thread contention.
tschatzl@6407 54 static int** _cache;
tschatzl@6407 55 static uint _max_regions;
tschatzl@6407 56 static size_t _static_mem_size;
tschatzl@6407 57
tschatzl@6407 58 public:
tschatzl@6407 59 enum {
tschatzl@6407 60 InvalidCard = -1 // Card value of an invalid card, i.e. a card index not otherwise used.
tschatzl@6407 61 };
tschatzl@6407 62
tschatzl@6407 63 static void clear(uint region_idx);
tschatzl@6407 64
tschatzl@6407 65 // Returns true if the given card is in the cache at the given location, or
tschatzl@6407 66 // replaces the card at that location and returns false.
tschatzl@6407 67 static bool contains_or_replace(uint worker_id, uint region_idx, int card) {
tschatzl@6407 68 int card_in_cache = at(worker_id, region_idx);
tschatzl@6407 69 if (card_in_cache == card) {
tschatzl@6407 70 return true;
tschatzl@6407 71 } else {
tschatzl@6407 72 set(worker_id, region_idx, card);
tschatzl@6407 73 return false;
tschatzl@6407 74 }
tschatzl@6407 75 }
tschatzl@6407 76
tschatzl@6407 77 static int at(uint worker_id, uint region_idx) {
tschatzl@6407 78 return _cache[worker_id][region_idx];
tschatzl@6407 79 }
tschatzl@6407 80
tschatzl@6407 81 static void set(uint worker_id, uint region_idx, int val) {
tschatzl@6407 82 _cache[worker_id][region_idx] = val;
tschatzl@6407 83 }
tschatzl@6407 84
tschatzl@6407 85 static void initialize(uint n_par_rs, uint max_num_regions);
tschatzl@6407 86
tschatzl@7051 87 static void invalidate(uint start_idx, size_t num_regions);
tschatzl@6407 88
tschatzl@6407 89 static void print(outputStream* out = gclog_or_tty) PRODUCT_RETURN;
tschatzl@6407 90
tschatzl@6407 91 static size_t static_mem_size() {
tschatzl@6407 92 return _static_mem_size;
tschatzl@6407 93 }
tschatzl@6407 94 };
tschatzl@6407 95
ysr@777 96 // The "_coarse_map" is a bitmap with one bit for each region, where set
ysr@777 97 // bits indicate that the corresponding region may contain some pointer
ysr@777 98 // into the owning region.
ysr@777 99
ysr@777 100 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
ysr@777 101 // (PRTs), indicating regions for which we're keeping the RS as a set of
ysr@777 102 // cards. The strategy is to cap the size of the fine-grain table,
ysr@777 103 // deleting an entry and setting the corresponding coarse-grained bit when
ysr@777 104 // we would overflow this cap.
ysr@777 105
ysr@777 106 // We use a mixture of locking and lock-free techniques here. We allow
ysr@777 107 // threads to locate PRTs without locking, but threads attempting to alter
ysr@777 108 // a bucket list obtain a lock. This means that any failing attempt to
ysr@777 109 // find a PRT must be retried with the lock. It might seem dangerous that
ysr@777 110 // a read can find a PRT that is concurrently deleted. This is all right,
ysr@777 111 // because:
ysr@777 112 //
ysr@777 113 // 1) We only actually free PRT's at safe points (though we reuse them at
ysr@777 114 // other times).
ysr@777 115 // 2) We find PRT's in an attempt to add entries. If a PRT is deleted,
ysr@777 116 // it's _coarse_map bit is set, so the that we were attempting to add
ysr@777 117 // is represented. If a deleted PRT is re-used, a thread adding a bit,
ysr@777 118 // thinking the PRT is for a different region, does no harm.
ysr@777 119
apetrusenko@984 120 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
ysr@777 121 friend class HeapRegionRemSetIterator;
ysr@777 122
ysr@777 123 G1CollectedHeap* _g1h;
tschatzl@6402 124 Mutex* _m;
ysr@777 125 HeapRegion* _hr;
ysr@777 126
ysr@777 127 // These are protected by "_m".
ysr@777 128 BitMap _coarse_map;
ysr@777 129 size_t _n_coarse_entries;
ysr@777 130 static jint _n_coarsenings;
ysr@777 131
johnc@3891 132 PerRegionTable** _fine_grain_regions;
johnc@3891 133 size_t _n_fine_entries;
ysr@777 134
johnc@3956 135 // The fine grain remembered sets are doubly linked together using
johnc@3956 136 // their 'next' and 'prev' fields.
johnc@3956 137 // This allows fast bulk freeing of all the fine grain remembered
johnc@3956 138 // set entries, and fast finding of all of them without iterating
johnc@3956 139 // over the _fine_grain_regions table.
johnc@3956 140 PerRegionTable * _first_all_fine_prts;
johnc@3956 141 PerRegionTable * _last_all_fine_prts;
johnc@3956 142
johnc@3891 143 // Used to sample a subset of the fine grain PRTs to determine which
johnc@3891 144 // PRT to evict and coarsen.
ysr@777 145 size_t _fine_eviction_start;
ysr@777 146 static size_t _fine_eviction_stride;
ysr@777 147 static size_t _fine_eviction_sample_size;
ysr@777 148
ysr@777 149 SparsePRT _sparse_table;
ysr@777 150
ysr@777 151 // These are static after init.
ysr@777 152 static size_t _max_fine_entries;
ysr@777 153 static size_t _mod_max_fine_entries_mask;
ysr@777 154
ysr@777 155 // Requires "prt" to be the first element of the bucket list appropriate
ysr@777 156 // for "hr". If this list contains an entry for "hr", return it,
ysr@777 157 // otherwise return "NULL".
johnc@3891 158 PerRegionTable* find_region_table(size_t ind, HeapRegion* hr) const;
ysr@777 159
johnc@3891 160 // Find, delete, and return a candidate PerRegionTable, if any exists,
ysr@777 161 // adding the deleted region to the coarse bitmap. Requires the caller
ysr@777 162 // to hold _m, and the fine-grain table to be full.
johnc@3891 163 PerRegionTable* delete_region_table();
ysr@777 164
ysr@777 165 // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
ysr@777 166 // be the correct index for "hr"), delete it and return true; else return
ysr@777 167 // false.
ysr@777 168 bool del_single_region_table(size_t ind, HeapRegion* hr);
ysr@777 169
johnc@3956 170 // link/add the given fine grain remembered set into the "all" list
johnc@3956 171 void link_to_all(PerRegionTable * prt);
johnc@3956 172 // unlink/remove the given fine grain remembered set into the "all" list
johnc@3956 173 void unlink_from_all(PerRegionTable * prt);
johnc@3956 174
ysr@777 175 public:
tschatzl@6402 176 OtherRegionsTable(HeapRegion* hr, Mutex* m);
ysr@777 177
ysr@777 178 HeapRegion* hr() const { return _hr; }
ysr@777 179
ysr@777 180 // For now. Could "expand" some tables in the future, so that this made
ysr@777 181 // sense.
ysr@1280 182 void add_reference(OopOrNarrowOopStar from, int tid);
ysr@777 183
ysr@777 184 // Removes any entries shown by the given bitmaps to contain only dead
ysr@777 185 // objects.
ysr@777 186 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
ysr@777 187
tschatzl@7010 188 // Returns whether this remembered set (and all sub-sets) contain no entries.
tschatzl@7010 189 bool is_empty() const;
tschatzl@7010 190
ysr@777 191 size_t occupied() const;
ysr@777 192 size_t occ_fine() const;
ysr@777 193 size_t occ_coarse() const;
ysr@777 194 size_t occ_sparse() const;
ysr@777 195
ysr@777 196 static jint n_coarsenings() { return _n_coarsenings; }
ysr@777 197
ysr@777 198 // Returns size in bytes.
ysr@777 199 // Not const because it takes a lock.
ysr@777 200 size_t mem_size() const;
ysr@777 201 static size_t static_mem_size();
ysr@777 202 static size_t fl_mem_size();
ysr@777 203
ysr@1280 204 bool contains_reference(OopOrNarrowOopStar from) const;
ysr@1280 205 bool contains_reference_locked(OopOrNarrowOopStar from) const;
ysr@777 206
ysr@777 207 void clear();
ysr@777 208
ysr@777 209 // Specifically clear the from_card_cache.
ysr@777 210 void clear_fcc();
ysr@777 211
tonyp@2493 212 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 213
ysr@777 214 // Declare the heap size (in # of regions) to the OtherRegionsTable.
ysr@777 215 // (Uses it to initialize from_card_cache).
tschatzl@7051 216 static void initialize(uint max_regions);
ysr@777 217
tschatzl@7051 218 // Declares that regions between start_idx <= i < start_idx + num_regions are
tschatzl@7051 219 // not in use. Make sure that any entries for these regions are invalid.
tschatzl@7051 220 static void invalidate(uint start_idx, size_t num_regions);
ysr@777 221
ysr@777 222 static void print_from_card_cache();
ysr@777 223 };
ysr@777 224
zgu@3900 225 class HeapRegionRemSet : public CHeapObj<mtGC> {
ysr@777 226 friend class VMStructs;
ysr@777 227 friend class HeapRegionRemSetIterator;
ysr@777 228
ysr@777 229 public:
ysr@777 230 enum Event {
ysr@777 231 Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
ysr@777 232 };
ysr@777 233
ysr@777 234 private:
ysr@777 235 G1BlockOffsetSharedArray* _bosa;
ysr@777 236 G1BlockOffsetSharedArray* bosa() const { return _bosa; }
ysr@777 237
tschatzl@6402 238 // A set of code blobs (nmethods) whose code contains pointers into
johnc@5548 239 // the region that owns this RSet.
tschatzl@6402 240 G1CodeRootSet _code_roots;
tschatzl@6402 241
tschatzl@6402 242 Mutex _m;
johnc@5548 243
ysr@777 244 OtherRegionsTable _other_regions;
ysr@777 245
ysr@777 246 enum ParIterState { Unclaimed, Claimed, Complete };
iveresov@1696 247 volatile ParIterState _iter_state;
iveresov@1696 248 volatile jlong _iter_claimed;
ysr@777 249
ysr@777 250 // Unused unless G1RecordHRRSOops is true.
ysr@777 251
ysr@777 252 static const int MaxRecorded = 1000000;
ysr@1280 253 static OopOrNarrowOopStar* _recorded_oops;
ysr@1280 254 static HeapWord** _recorded_cards;
ysr@1280 255 static HeapRegion** _recorded_regions;
ysr@1280 256 static int _n_recorded;
ysr@777 257
ysr@777 258 static const int MaxRecordedEvents = 1000;
ysr@777 259 static Event* _recorded_events;
ysr@777 260 static int* _recorded_event_index;
ysr@777 261 static int _n_recorded_events;
ysr@777 262
ysr@777 263 static void print_event(outputStream* str, Event evnt);
ysr@777 264
ysr@777 265 public:
tschatzl@6402 266 HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, HeapRegion* hr);
ysr@777 267
tschatzl@6403 268 static uint num_par_rem_sets();
iveresov@1696 269 static void setup_remset_size();
ysr@777 270
ysr@777 271 HeapRegion* hr() const {
ysr@777 272 return _other_regions.hr();
ysr@777 273 }
ysr@777 274
tschatzl@7010 275 bool is_empty() const {
tschatzl@7010 276 return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
tschatzl@7010 277 }
tschatzl@7010 278
tschatzl@6402 279 size_t occupied() {
tschatzl@6402 280 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
tschatzl@6402 281 return occupied_locked();
tschatzl@6402 282 }
tschatzl@6402 283 size_t occupied_locked() {
ysr@777 284 return _other_regions.occupied();
ysr@777 285 }
ysr@777 286 size_t occ_fine() const {
ysr@777 287 return _other_regions.occ_fine();
ysr@777 288 }
ysr@777 289 size_t occ_coarse() const {
ysr@777 290 return _other_regions.occ_coarse();
ysr@777 291 }
ysr@777 292 size_t occ_sparse() const {
ysr@777 293 return _other_regions.occ_sparse();
ysr@777 294 }
ysr@777 295
ysr@777 296 static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
ysr@777 297
johnc@3891 298 // Used in the sequential case.
ysr@1280 299 void add_reference(OopOrNarrowOopStar from) {
johnc@3891 300 _other_regions.add_reference(from, 0);
ysr@777 301 }
ysr@777 302
johnc@3891 303 // Used in the parallel case.
ysr@1280 304 void add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 305 _other_regions.add_reference(from, tid);
ysr@777 306 }
ysr@777 307
ysr@777 308 // Removes any entries shown by the given bitmaps to contain only dead
ysr@777 309 // objects.
ysr@777 310 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
ysr@777 311
ysr@777 312 // The region is being reclaimed; clear its remset, and any mention of
ysr@777 313 // entries for this region in other remsets.
ysr@777 314 void clear();
tschatzl@6402 315 void clear_locked();
ysr@777 316
ysr@777 317 // Attempt to claim the region. Returns true iff this call caused an
ysr@777 318 // atomic transition from Unclaimed to Claimed.
ysr@777 319 bool claim_iter();
ysr@777 320 // Sets the iteration state to "complete".
ysr@777 321 void set_iter_complete();
ysr@777 322 // Returns "true" iff the region's iteration is complete.
ysr@777 323 bool iter_is_complete();
ysr@777 324
iveresov@1696 325 // Support for claiming blocks of cards during iteration
iveresov@1696 326 size_t iter_claimed() const { return (size_t)_iter_claimed; }
iveresov@1696 327 // Claim the next block of cards
iveresov@1696 328 size_t iter_claimed_next(size_t step) {
iveresov@1696 329 size_t current, next;
iveresov@1696 330 do {
iveresov@1696 331 current = iter_claimed();
iveresov@1696 332 next = current + step;
iveresov@1696 333 } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
iveresov@1696 334 return current;
iveresov@1696 335 }
tonyp@2974 336 void reset_for_par_iteration();
tonyp@2974 337
tonyp@2974 338 bool verify_ready_for_par_iteration() {
tonyp@2974 339 return (_iter_state == Unclaimed) && (_iter_claimed == 0);
tonyp@2974 340 }
iveresov@1696 341
ysr@777 342 // The actual # of bytes this hr_remset takes up.
johnc@5548 343 // Note also includes the strong code root set.
ysr@777 344 size_t mem_size() {
tschatzl@6402 345 MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
ysr@777 346 return _other_regions.mem_size()
ysr@777 347 // This correction is necessary because the above includes the second
ysr@777 348 // part.
tschatzl@6932 349 + (sizeof(HeapRegionRemSet) - sizeof(OtherRegionsTable))
johnc@5548 350 + strong_code_roots_mem_size();
ysr@777 351 }
ysr@777 352
ysr@777 353 // Returns the memory occupancy of all static data structures associated
ysr@777 354 // with remembered sets.
ysr@777 355 static size_t static_mem_size() {
mgerdin@7208 356 return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
ysr@777 357 }
ysr@777 358
ysr@777 359 // Returns the memory occupancy of all free_list data structures associated
ysr@777 360 // with remembered sets.
ysr@777 361 static size_t fl_mem_size() {
mgerdin@7208 362 return OtherRegionsTable::fl_mem_size();
ysr@777 363 }
ysr@777 364
ysr@1280 365 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 366 return _other_regions.contains_reference(from);
ysr@777 367 }
johnc@5548 368
johnc@5548 369 // Routines for managing the list of code roots that point into
johnc@5548 370 // the heap region that owns this RSet.
johnc@5548 371 void add_strong_code_root(nmethod* nm);
mgerdin@7208 372 void add_strong_code_root_locked(nmethod* nm);
johnc@5548 373 void remove_strong_code_root(nmethod* nm);
johnc@5548 374
johnc@5548 375 // Applies blk->do_code_blob() to each of the entries in
johnc@5548 376 // the strong code roots list
johnc@5548 377 void strong_code_roots_do(CodeBlobClosure* blk) const;
johnc@5548 378
mgerdin@7208 379 void clean_strong_code_roots(HeapRegion* hr);
mgerdin@7208 380
johnc@5548 381 // Returns the number of elements in the strong code roots list
tschatzl@7010 382 size_t strong_code_roots_list_length() const {
tschatzl@6402 383 return _code_roots.length();
johnc@5548 384 }
johnc@5548 385
johnc@5548 386 // Returns true if the strong code roots contains the given
johnc@5548 387 // nmethod.
johnc@5548 388 bool strong_code_roots_list_contains(nmethod* nm) {
tschatzl@6402 389 return _code_roots.contains(nm);
johnc@5548 390 }
johnc@5548 391
johnc@5548 392 // Returns the amount of memory, in bytes, currently
johnc@5548 393 // consumed by the strong code roots.
johnc@5548 394 size_t strong_code_roots_mem_size();
johnc@5548 395
tschatzl@6402 396 void print() PRODUCT_RETURN;
ysr@777 397
ysr@777 398 // Called during a stop-world phase to perform any deferred cleanups.
ysr@777 399 static void cleanup();
ysr@777 400
ysr@777 401 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
ysr@777 402 // (Uses it to initialize from_card_cache).
tonyp@3713 403 static void init_heap(uint max_regions) {
tschatzl@7051 404 OtherRegionsTable::initialize(max_regions);
ysr@777 405 }
ysr@777 406
tschatzl@7051 407 static void invalidate(uint start_idx, uint num_regions) {
tschatzl@7051 408 OtherRegionsTable::invalidate(start_idx, num_regions);
ysr@777 409 }
ysr@777 410
ysr@777 411 #ifndef PRODUCT
ysr@777 412 static void print_from_card_cache() {
ysr@777 413 OtherRegionsTable::print_from_card_cache();
ysr@777 414 }
ysr@777 415 #endif
ysr@777 416
ysr@1280 417 static void record(HeapRegion* hr, OopOrNarrowOopStar f);
ysr@777 418 static void print_recorded();
ysr@777 419 static void record_event(Event evnt);
ysr@777 420
tonyp@2493 421 // These are wrappers for the similarly-named methods on
tonyp@2493 422 // SparsePRT. Look at sparsePRT.hpp for more details.
tonyp@2493 423 static void reset_for_cleanup_tasks();
tonyp@2493 424 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 425 static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 426
ysr@777 427 // Run unit tests.
ysr@777 428 #ifndef PRODUCT
tschatzl@5165 429 static void test_prt();
ysr@777 430 static void test();
ysr@777 431 #endif
ysr@777 432 };
ysr@777 433
johnc@5014 434 class HeapRegionRemSetIterator : public StackObj {
tschatzl@6927 435 private:
tschatzl@6927 436 // The region RSet over which we are iterating.
tschatzl@6402 437 HeapRegionRemSet* _hrrs;
ysr@777 438
ysr@777 439 // Local caching of HRRS fields.
ysr@777 440 const BitMap* _coarse_map;
ysr@777 441
ysr@777 442 G1BlockOffsetSharedArray* _bosa;
ysr@777 443 G1CollectedHeap* _g1h;
ysr@777 444
tschatzl@6927 445 // The number of cards yielded since initialization.
ysr@777 446 size_t _n_yielded_fine;
ysr@777 447 size_t _n_yielded_coarse;
ysr@777 448 size_t _n_yielded_sparse;
ysr@777 449
tschatzl@6927 450 // Indicates what granularity of table that we are currently iterating over.
johnc@5014 451 // We start iterating over the sparse table, progress to the fine grain
johnc@5014 452 // table, and then finish with the coarse table.
ysr@777 453 enum IterState {
ysr@777 454 Sparse,
ysr@777 455 Fine,
ysr@777 456 Coarse
ysr@777 457 };
ysr@777 458 IterState _is;
ysr@777 459
tschatzl@6927 460 // For both Coarse and Fine remembered set iteration this contains the
tschatzl@6927 461 // first card number of the heap region we currently iterate over.
ysr@777 462 size_t _cur_region_card_offset;
ysr@777 463
tschatzl@6927 464 // Current region index for the Coarse remembered set iteration.
johnc@3182 465 int _coarse_cur_region_index;
johnc@3182 466 size_t _coarse_cur_region_cur_card;
ysr@777 467
ysr@777 468 bool coarse_has_next(size_t& card_index);
ysr@777 469
tschatzl@6927 470 // The PRT we are currently iterating over.
tschatzl@6927 471 PerRegionTable* _fine_cur_prt;
tschatzl@6927 472 // Card offset within the current PRT.
tschatzl@6927 473 size_t _cur_card_in_prt;
ysr@777 474
tschatzl@6927 475 // Update internal variables when switching to the given PRT.
tschatzl@6927 476 void switch_to_prt(PerRegionTable* prt);
ysr@777 477 bool fine_has_next();
ysr@777 478 bool fine_has_next(size_t& card_index);
ysr@777 479
tschatzl@6927 480 // The Sparse remembered set iterator.
tschatzl@6927 481 SparsePRTIter _sparse_iter;
tschatzl@6927 482
tschatzl@6927 483 public:
tschatzl@6402 484 HeapRegionRemSetIterator(HeapRegionRemSet* hrrs);
ysr@777 485
ysr@777 486 // If there remains one or more cards to be yielded, returns true and
ysr@777 487 // sets "card_index" to one of those cards (which is then considered
ysr@777 488 // yielded.) Otherwise, returns false (and leaves "card_index"
ysr@777 489 // undefined.)
ysr@777 490 bool has_next(size_t& card_index);
ysr@777 491
ysr@777 492 size_t n_yielded_fine() { return _n_yielded_fine; }
ysr@777 493 size_t n_yielded_coarse() { return _n_yielded_coarse; }
ysr@777 494 size_t n_yielded_sparse() { return _n_yielded_sparse; }
ysr@777 495 size_t n_yielded() {
ysr@777 496 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
ysr@777 497 }
ysr@777 498 };
ysr@777 499
stefank@2314 500 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP

mercurial