src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2493
97ba643ea3ed
child 2974
e8b0b0392037
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

ysr@777 1 /*
tonyp@2493 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/sparsePRT.hpp"
stefank@2314 29
ysr@777 30 // Remembered set for a heap region. Represent a set of "cards" that
ysr@777 31 // contain pointers into the owner heap region. Cards are defined somewhat
ysr@777 32 // abstractly, in terms of what the "BlockOffsetTable" in use can parse.
ysr@777 33
ysr@777 34 class G1CollectedHeap;
ysr@777 35 class G1BlockOffsetSharedArray;
ysr@777 36 class HeapRegion;
ysr@777 37 class HeapRegionRemSetIterator;
ysr@777 38 class PosParPRT;
ysr@777 39 class SparsePRT;
ysr@777 40
tonyp@2493 41 // Essentially a wrapper around SparsePRTCleanupTask. See
tonyp@2493 42 // sparsePRT.hpp for more details.
tonyp@2493 43 class HRRSCleanupTask : public SparsePRTCleanupTask {
tonyp@2493 44 };
ysr@777 45
ysr@777 46 // The "_coarse_map" is a bitmap with one bit for each region, where set
ysr@777 47 // bits indicate that the corresponding region may contain some pointer
ysr@777 48 // into the owning region.
ysr@777 49
ysr@777 50 // The "_fine_grain_entries" array is an open hash table of PerRegionTables
ysr@777 51 // (PRTs), indicating regions for which we're keeping the RS as a set of
ysr@777 52 // cards. The strategy is to cap the size of the fine-grain table,
ysr@777 53 // deleting an entry and setting the corresponding coarse-grained bit when
ysr@777 54 // we would overflow this cap.
ysr@777 55
ysr@777 56 // We use a mixture of locking and lock-free techniques here. We allow
ysr@777 57 // threads to locate PRTs without locking, but threads attempting to alter
ysr@777 58 // a bucket list obtain a lock. This means that any failing attempt to
ysr@777 59 // find a PRT must be retried with the lock. It might seem dangerous that
ysr@777 60 // a read can find a PRT that is concurrently deleted. This is all right,
ysr@777 61 // because:
ysr@777 62 //
ysr@777 63 // 1) We only actually free PRT's at safe points (though we reuse them at
ysr@777 64 // other times).
ysr@777 65 // 2) We find PRT's in an attempt to add entries. If a PRT is deleted,
ysr@777 66 // it's _coarse_map bit is set, so the that we were attempting to add
ysr@777 67 // is represented. If a deleted PRT is re-used, a thread adding a bit,
ysr@777 68 // thinking the PRT is for a different region, does no harm.
ysr@777 69
apetrusenko@984 70 class OtherRegionsTable VALUE_OBJ_CLASS_SPEC {
ysr@777 71 friend class HeapRegionRemSetIterator;
ysr@777 72
ysr@777 73 G1CollectedHeap* _g1h;
ysr@777 74 Mutex _m;
ysr@777 75 HeapRegion* _hr;
ysr@777 76
ysr@777 77 // These are protected by "_m".
ysr@777 78 BitMap _coarse_map;
ysr@777 79 size_t _n_coarse_entries;
ysr@777 80 static jint _n_coarsenings;
ysr@777 81
ysr@777 82 PosParPRT** _fine_grain_regions;
ysr@777 83 size_t _n_fine_entries;
ysr@777 84
ysr@777 85 #define SAMPLE_FOR_EVICTION 1
ysr@777 86 #if SAMPLE_FOR_EVICTION
ysr@777 87 size_t _fine_eviction_start;
ysr@777 88 static size_t _fine_eviction_stride;
ysr@777 89 static size_t _fine_eviction_sample_size;
ysr@777 90 #endif
ysr@777 91
ysr@777 92 SparsePRT _sparse_table;
ysr@777 93
ysr@777 94 // These are static after init.
ysr@777 95 static size_t _max_fine_entries;
ysr@777 96 static size_t _mod_max_fine_entries_mask;
ysr@777 97
ysr@777 98 // Requires "prt" to be the first element of the bucket list appropriate
ysr@777 99 // for "hr". If this list contains an entry for "hr", return it,
ysr@777 100 // otherwise return "NULL".
ysr@777 101 PosParPRT* find_region_table(size_t ind, HeapRegion* hr) const;
ysr@777 102
ysr@777 103 // Find, delete, and return a candidate PosParPRT, if any exists,
ysr@777 104 // adding the deleted region to the coarse bitmap. Requires the caller
ysr@777 105 // to hold _m, and the fine-grain table to be full.
ysr@777 106 PosParPRT* delete_region_table();
ysr@777 107
ysr@777 108 // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
ysr@777 109 // be the correct index for "hr"), delete it and return true; else return
ysr@777 110 // false.
ysr@777 111 bool del_single_region_table(size_t ind, HeapRegion* hr);
ysr@777 112
ysr@777 113 static jint _cache_probes;
ysr@777 114 static jint _cache_hits;
ysr@777 115
ysr@777 116 // Indexed by thread X heap region, to minimize thread contention.
ysr@777 117 static int** _from_card_cache;
ysr@777 118 static size_t _from_card_cache_max_regions;
ysr@777 119 static size_t _from_card_cache_mem_size;
ysr@777 120
ysr@777 121 public:
ysr@777 122 OtherRegionsTable(HeapRegion* hr);
ysr@777 123
ysr@777 124 HeapRegion* hr() const { return _hr; }
ysr@777 125
ysr@777 126 // For now. Could "expand" some tables in the future, so that this made
ysr@777 127 // sense.
ysr@1280 128 void add_reference(OopOrNarrowOopStar from, int tid);
ysr@777 129
ysr@1280 130 void add_reference(OopOrNarrowOopStar from) {
ysr@777 131 return add_reference(from, 0);
ysr@777 132 }
ysr@777 133
ysr@777 134 // Removes any entries shown by the given bitmaps to contain only dead
ysr@777 135 // objects.
ysr@777 136 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
ysr@777 137
ysr@777 138 // Not const because it takes a lock.
ysr@777 139 size_t occupied() const;
ysr@777 140 size_t occ_fine() const;
ysr@777 141 size_t occ_coarse() const;
ysr@777 142 size_t occ_sparse() const;
ysr@777 143
ysr@777 144 static jint n_coarsenings() { return _n_coarsenings; }
ysr@777 145
ysr@777 146 // Returns size in bytes.
ysr@777 147 // Not const because it takes a lock.
ysr@777 148 size_t mem_size() const;
ysr@777 149 static size_t static_mem_size();
ysr@777 150 static size_t fl_mem_size();
ysr@777 151
ysr@1280 152 bool contains_reference(OopOrNarrowOopStar from) const;
ysr@1280 153 bool contains_reference_locked(OopOrNarrowOopStar from) const;
ysr@777 154
ysr@777 155 void clear();
ysr@777 156
ysr@777 157 // Specifically clear the from_card_cache.
ysr@777 158 void clear_fcc();
ysr@777 159
ysr@777 160 // "from_hr" is being cleared; remove any entries from it.
ysr@777 161 void clear_incoming_entry(HeapRegion* from_hr);
ysr@777 162
tonyp@2493 163 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 164
ysr@777 165 // Declare the heap size (in # of regions) to the OtherRegionsTable.
ysr@777 166 // (Uses it to initialize from_card_cache).
ysr@777 167 static void init_from_card_cache(size_t max_regions);
ysr@777 168
ysr@777 169 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
ysr@777 170 // Make sure any entries for higher regions are invalid.
ysr@777 171 static void shrink_from_card_cache(size_t new_n_regs);
ysr@777 172
ysr@777 173 static void print_from_card_cache();
ysr@777 174 };
ysr@777 175
ysr@777 176 class HeapRegionRemSet : public CHeapObj {
ysr@777 177 friend class VMStructs;
ysr@777 178 friend class HeapRegionRemSetIterator;
ysr@777 179
ysr@777 180 public:
ysr@777 181 enum Event {
ysr@777 182 Event_EvacStart, Event_EvacEnd, Event_RSUpdateEnd
ysr@777 183 };
ysr@777 184
ysr@777 185 private:
ysr@777 186 G1BlockOffsetSharedArray* _bosa;
ysr@777 187 G1BlockOffsetSharedArray* bosa() const { return _bosa; }
ysr@777 188
ysr@777 189 OtherRegionsTable _other_regions;
ysr@777 190
ysr@777 191 enum ParIterState { Unclaimed, Claimed, Complete };
iveresov@1696 192 volatile ParIterState _iter_state;
iveresov@1696 193 volatile jlong _iter_claimed;
ysr@777 194
ysr@777 195 // Unused unless G1RecordHRRSOops is true.
ysr@777 196
ysr@777 197 static const int MaxRecorded = 1000000;
ysr@1280 198 static OopOrNarrowOopStar* _recorded_oops;
ysr@1280 199 static HeapWord** _recorded_cards;
ysr@1280 200 static HeapRegion** _recorded_regions;
ysr@1280 201 static int _n_recorded;
ysr@777 202
ysr@777 203 static const int MaxRecordedEvents = 1000;
ysr@777 204 static Event* _recorded_events;
ysr@777 205 static int* _recorded_event_index;
ysr@777 206 static int _n_recorded_events;
ysr@777 207
ysr@777 208 static void print_event(outputStream* str, Event evnt);
ysr@777 209
ysr@777 210 public:
ysr@777 211 HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
ysr@777 212 HeapRegion* hr);
ysr@777 213
ysr@777 214 static int num_par_rem_sets();
iveresov@1696 215 static void setup_remset_size();
ysr@777 216
ysr@777 217 HeapRegion* hr() const {
ysr@777 218 return _other_regions.hr();
ysr@777 219 }
ysr@777 220
ysr@777 221 size_t occupied() const {
ysr@777 222 return _other_regions.occupied();
ysr@777 223 }
ysr@777 224 size_t occ_fine() const {
ysr@777 225 return _other_regions.occ_fine();
ysr@777 226 }
ysr@777 227 size_t occ_coarse() const {
ysr@777 228 return _other_regions.occ_coarse();
ysr@777 229 }
ysr@777 230 size_t occ_sparse() const {
ysr@777 231 return _other_regions.occ_sparse();
ysr@777 232 }
ysr@777 233
ysr@777 234 static jint n_coarsenings() { return OtherRegionsTable::n_coarsenings(); }
ysr@777 235
ysr@777 236 /* Used in the sequential case. Returns "true" iff this addition causes
ysr@777 237 the size limit to be reached. */
ysr@1280 238 void add_reference(OopOrNarrowOopStar from) {
ysr@777 239 _other_regions.add_reference(from);
ysr@777 240 }
ysr@777 241
ysr@777 242 /* Used in the parallel case. Returns "true" iff this addition causes
ysr@777 243 the size limit to be reached. */
ysr@1280 244 void add_reference(OopOrNarrowOopStar from, int tid) {
ysr@777 245 _other_regions.add_reference(from, tid);
ysr@777 246 }
ysr@777 247
ysr@777 248 // Removes any entries shown by the given bitmaps to contain only dead
ysr@777 249 // objects.
ysr@777 250 void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
ysr@777 251
ysr@777 252 // The region is being reclaimed; clear its remset, and any mention of
ysr@777 253 // entries for this region in other remsets.
ysr@777 254 void clear();
ysr@777 255
ysr@777 256 // Forget any entries due to pointers from "from_hr".
ysr@777 257 void clear_incoming_entry(HeapRegion* from_hr) {
ysr@777 258 _other_regions.clear_incoming_entry(from_hr);
ysr@777 259 }
ysr@777 260
ysr@777 261 #if 0
ysr@777 262 virtual void cleanup() = 0;
ysr@777 263 #endif
ysr@777 264
ysr@777 265 // Should be called from single-threaded code.
ysr@777 266 void init_for_par_iteration();
ysr@777 267 // Attempt to claim the region. Returns true iff this call caused an
ysr@777 268 // atomic transition from Unclaimed to Claimed.
ysr@777 269 bool claim_iter();
ysr@777 270 // Sets the iteration state to "complete".
ysr@777 271 void set_iter_complete();
ysr@777 272 // Returns "true" iff the region's iteration is complete.
ysr@777 273 bool iter_is_complete();
ysr@777 274
iveresov@1696 275 // Support for claiming blocks of cards during iteration
iveresov@1696 276 void set_iter_claimed(size_t x) { _iter_claimed = (jlong)x; }
iveresov@1696 277 size_t iter_claimed() const { return (size_t)_iter_claimed; }
iveresov@1696 278 // Claim the next block of cards
iveresov@1696 279 size_t iter_claimed_next(size_t step) {
iveresov@1696 280 size_t current, next;
iveresov@1696 281 do {
iveresov@1696 282 current = iter_claimed();
iveresov@1696 283 next = current + step;
iveresov@1696 284 } while (Atomic::cmpxchg((jlong)next, &_iter_claimed, (jlong)current) != (jlong)current);
iveresov@1696 285 return current;
iveresov@1696 286 }
iveresov@1696 287
ysr@777 288 // Initialize the given iterator to iterate over this rem set.
ysr@777 289 void init_iterator(HeapRegionRemSetIterator* iter) const;
ysr@777 290
ysr@777 291 #if 0
ysr@777 292 // Apply the "do_card" method to the start address of every card in the
ysr@777 293 // rem set. Returns false if some application of the closure aborted.
ysr@777 294 virtual bool card_iterate(CardClosure* iter) = 0;
ysr@777 295 #endif
ysr@777 296
ysr@777 297 // The actual # of bytes this hr_remset takes up.
ysr@777 298 size_t mem_size() {
ysr@777 299 return _other_regions.mem_size()
ysr@777 300 // This correction is necessary because the above includes the second
ysr@777 301 // part.
ysr@777 302 + sizeof(this) - sizeof(OtherRegionsTable);
ysr@777 303 }
ysr@777 304
ysr@777 305 // Returns the memory occupancy of all static data structures associated
ysr@777 306 // with remembered sets.
ysr@777 307 static size_t static_mem_size() {
ysr@777 308 return OtherRegionsTable::static_mem_size();
ysr@777 309 }
ysr@777 310
ysr@777 311 // Returns the memory occupancy of all free_list data structures associated
ysr@777 312 // with remembered sets.
ysr@777 313 static size_t fl_mem_size() {
ysr@777 314 return OtherRegionsTable::fl_mem_size();
ysr@777 315 }
ysr@777 316
ysr@1280 317 bool contains_reference(OopOrNarrowOopStar from) const {
ysr@777 318 return _other_regions.contains_reference(from);
ysr@777 319 }
ysr@777 320 void print() const;
ysr@777 321
ysr@777 322 // Called during a stop-world phase to perform any deferred cleanups.
ysr@777 323 // The second version may be called by parallel threads after then finish
ysr@777 324 // collection work.
ysr@777 325 static void cleanup();
ysr@777 326 static void par_cleanup();
ysr@777 327
ysr@777 328 // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
ysr@777 329 // (Uses it to initialize from_card_cache).
ysr@777 330 static void init_heap(size_t max_regions) {
ysr@777 331 OtherRegionsTable::init_from_card_cache(max_regions);
ysr@777 332 }
ysr@777 333
ysr@777 334 // Declares that only regions i s.t. 0 <= i < new_n_regs are in use.
ysr@777 335 static void shrink_heap(size_t new_n_regs) {
ysr@777 336 OtherRegionsTable::shrink_from_card_cache(new_n_regs);
ysr@777 337 }
ysr@777 338
ysr@777 339 #ifndef PRODUCT
ysr@777 340 static void print_from_card_cache() {
ysr@777 341 OtherRegionsTable::print_from_card_cache();
ysr@777 342 }
ysr@777 343 #endif
ysr@777 344
ysr@1280 345 static void record(HeapRegion* hr, OopOrNarrowOopStar f);
ysr@777 346 static void print_recorded();
ysr@777 347 static void record_event(Event evnt);
ysr@777 348
tonyp@2493 349 // These are wrappers for the similarly-named methods on
tonyp@2493 350 // SparsePRT. Look at sparsePRT.hpp for more details.
tonyp@2493 351 static void reset_for_cleanup_tasks();
tonyp@2493 352 void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 353 static void finish_cleanup_task(HRRSCleanupTask* hrrs_cleanup_task);
tonyp@2493 354
ysr@777 355 // Run unit tests.
ysr@777 356 #ifndef PRODUCT
ysr@777 357 static void test();
ysr@777 358 #endif
ysr@777 359 };
ysr@777 360
ysr@777 361 class HeapRegionRemSetIterator : public CHeapObj {
ysr@777 362
ysr@777 363 // The region over which we're iterating.
ysr@777 364 const HeapRegionRemSet* _hrrs;
ysr@777 365
ysr@777 366 // Local caching of HRRS fields.
ysr@777 367 const BitMap* _coarse_map;
ysr@777 368 PosParPRT** _fine_grain_regions;
ysr@777 369
ysr@777 370 G1BlockOffsetSharedArray* _bosa;
ysr@777 371 G1CollectedHeap* _g1h;
ysr@777 372
ysr@777 373 // The number yielded since initialization.
ysr@777 374 size_t _n_yielded_fine;
ysr@777 375 size_t _n_yielded_coarse;
ysr@777 376 size_t _n_yielded_sparse;
ysr@777 377
ysr@777 378 // If true we're iterating over the coarse table; if false the fine
ysr@777 379 // table.
ysr@777 380 enum IterState {
ysr@777 381 Sparse,
ysr@777 382 Fine,
ysr@777 383 Coarse
ysr@777 384 };
ysr@777 385 IterState _is;
ysr@777 386
ysr@777 387 // In both kinds of iteration, heap offset of first card of current
ysr@777 388 // region.
ysr@777 389 size_t _cur_region_card_offset;
ysr@777 390 // Card offset within cur region.
ysr@777 391 size_t _cur_region_cur_card;
ysr@777 392
ysr@777 393 // Coarse table iteration fields:
ysr@777 394
ysr@777 395 // Current region index;
ysr@777 396 int _coarse_cur_region_index;
ysr@777 397 int _coarse_cur_region_cur_card;
ysr@777 398
ysr@777 399 bool coarse_has_next(size_t& card_index);
ysr@777 400
ysr@777 401 // Fine table iteration fields:
ysr@777 402
ysr@777 403 // Index of bucket-list we're working on.
ysr@777 404 int _fine_array_index;
ysr@777 405 // Per Region Table we're doing within current bucket list.
ysr@777 406 PosParPRT* _fine_cur_prt;
ysr@777 407
ysr@777 408 /* SparsePRT::*/ SparsePRTIter _sparse_iter;
ysr@777 409
ysr@777 410 void fine_find_next_non_null_prt();
ysr@777 411
ysr@777 412 bool fine_has_next();
ysr@777 413 bool fine_has_next(size_t& card_index);
ysr@777 414
ysr@777 415 public:
ysr@777 416 // We require an iterator to be initialized before use, so the
ysr@777 417 // constructor does little.
ysr@777 418 HeapRegionRemSetIterator();
ysr@777 419
ysr@777 420 void initialize(const HeapRegionRemSet* hrrs);
ysr@777 421
ysr@777 422 // If there remains one or more cards to be yielded, returns true and
ysr@777 423 // sets "card_index" to one of those cards (which is then considered
ysr@777 424 // yielded.) Otherwise, returns false (and leaves "card_index"
ysr@777 425 // undefined.)
ysr@777 426 bool has_next(size_t& card_index);
ysr@777 427
ysr@777 428 size_t n_yielded_fine() { return _n_yielded_fine; }
ysr@777 429 size_t n_yielded_coarse() { return _n_yielded_coarse; }
ysr@777 430 size_t n_yielded_sparse() { return _n_yielded_sparse; }
ysr@777 431 size_t n_yielded() {
ysr@777 432 return n_yielded_fine() + n_yielded_coarse() + n_yielded_sparse();
ysr@777 433 }
ysr@777 434 };
ysr@777 435
ysr@777 436 #if 0
ysr@777 437 class CardClosure: public Closure {
ysr@777 438 public:
ysr@777 439 virtual void do_card(HeapWord* card_start) = 0;
ysr@777 440 };
ysr@777 441
ysr@777 442 #endif
stefank@2314 443
stefank@2314 444 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONREMSET_HPP

mercurial