src/share/vm/gc_implementation/g1/heapRegion.hpp

Sat, 16 Oct 2010 17:12:19 -0400

author
tonyp
date
Sat, 16 Oct 2010 17:12:19 -0400
changeset 2241
72a161e62cc4
parent 2021
5cbac8938c4c
child 2314
f95d63e2154a
permissions
-rw-r--r--

6991377: G1: race between concurrent refinement and humongous object allocation
Summary: There is a race between the concurrent refinement threads and the humongous object allocation that can cause the concurrent refinement threads to corrupt the part of the BOT that it is being initialized by the humongous object allocation operation. The solution is to do the humongous object allocation in careful steps to ensure that the concurrent refinement threads always have a consistent view over the BOT, region contents, and top. The fix includes some very minor tidying up in sparsePRT.
Reviewed-by: jcoomes, johnc, ysr

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #ifndef SERIALGC
ysr@777 26
ysr@777 27 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 28 // can be collected independently.
ysr@777 29
ysr@777 30 // NOTE: Although a HeapRegion is a Space, its
ysr@777 31 // Space::initDirtyCardClosure method must not be called.
ysr@777 32 // The problem is that the existence of this method breaks
ysr@777 33 // the independence of barrier sets from remembered sets.
ysr@777 34 // The solution is to remove this method from the definition
ysr@777 35 // of a Space.
ysr@777 36
ysr@777 37 class CompactibleSpace;
ysr@777 38 class ContiguousSpace;
ysr@777 39 class HeapRegionRemSet;
ysr@777 40 class HeapRegionRemSetIterator;
ysr@777 41 class HeapRegion;
ysr@777 42
ysr@777 43 // A dirty card to oop closure for heap regions. It
ysr@777 44 // knows how to get the G1 heap and how to use the bitmap
ysr@777 45 // in the concurrent marker used by G1 to filter remembered
ysr@777 46 // sets.
ysr@777 47
ysr@777 48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777 49 public:
ysr@777 50 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 51 enum FilterKind {
ysr@777 52 NoFilterKind,
ysr@777 53 IntoCSFilterKind,
ysr@777 54 OutOfRegionFilterKind
ysr@777 55 };
ysr@777 56
ysr@777 57 protected:
ysr@777 58 HeapRegion* _hr;
ysr@777 59 FilterKind _fk;
ysr@777 60 G1CollectedHeap* _g1;
ysr@777 61
ysr@777 62 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 63 HeapWord* bottom, HeapWord* top,
ysr@777 64 OopClosure* cl);
ysr@777 65
ysr@777 66 // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777 67 // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777 68 // warning.
ysr@777 69 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 70 HeapWord* bottom, HeapWord* top,
ysr@777 71 FilteringClosure* cl) {
ysr@777 72 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777 73 (OopClosure*)cl);
ysr@777 74 }
ysr@777 75
ysr@777 76 // Get the actual top of the area on which the closure will
ysr@777 77 // operate, given where the top is assumed to be (the end of the
ysr@777 78 // memory region passed to do_MemRegion) and where the object
ysr@777 79 // at the top is assumed to start. For example, an object may
ysr@777 80 // start at the top but actually extend past the assumed top,
ysr@777 81 // in which case the top becomes the end of the object.
ysr@777 82 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777 83 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777 84 }
ysr@777 85
ysr@777 86 // Walk the given memory region from bottom to (actual) top
ysr@777 87 // looking for objects and applying the oop closure (_cl) to
ysr@777 88 // them. The base implementation of this treats the area as
ysr@777 89 // blocks, where a block may or may not be an object. Sub-
ysr@777 90 // classes should override this to provide more accurate
ysr@777 91 // or possibly more efficient walking.
ysr@777 92 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777 93 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777 94 }
ysr@777 95
ysr@777 96 public:
ysr@777 97 HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 98 HeapRegion* hr, OopClosure* cl,
ysr@777 99 CardTableModRefBS::PrecisionStyle precision,
ysr@777 100 FilterKind fk);
ysr@777 101 };
ysr@777 102
ysr@777 103
ysr@777 104 // The complicating factor is that BlockOffsetTable diverged
ysr@777 105 // significantly, and we need functionality that is only in the G1 version.
ysr@777 106 // So I copied that code, which led to an alternate G1 version of
ysr@777 107 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 108 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 109
ysr@777 110 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 111 // all regions at every GC pause is time consuming (if I remember
ysr@777 112 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 113 // that are GC alloc regions. To achieve this, we use time
ysr@777 114 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 115 // unique time stamp (essentially a counter that gets
ysr@777 116 // incremented). Every time we want to call save_marks on a region,
ysr@777 117 // we set the saved_mark_word to top and also copy the current GC
ysr@777 118 // time stamp to the time stamp field of the space. Reading the
ysr@777 119 // saved_mark_word involves checking the time stamp of the
ysr@777 120 // region. If it is the same as the current GC time stamp, then we
ysr@777 121 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 122 // time stamp of the region is not the same as the current GC time
ysr@777 123 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 124 // invalid. Time stamps (on the regions and also on the
ysr@777 125 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 126 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 127 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 128 // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777 129
ysr@777 130 class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777 131 friend class VMStructs;
ysr@777 132 protected:
ysr@777 133 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 134 Mutex _par_alloc_lock;
ysr@777 135 volatile unsigned _gc_time_stamp;
ysr@777 136
ysr@777 137 public:
ysr@777 138 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777 139 // assumed to contain zeros.
ysr@777 140 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 141 MemRegion mr, bool is_zeroed = false);
ysr@777 142
ysr@777 143 void set_bottom(HeapWord* value);
ysr@777 144 void set_end(HeapWord* value);
ysr@777 145
ysr@777 146 virtual HeapWord* saved_mark_word() const;
ysr@777 147 virtual void set_saved_mark();
ysr@777 148 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
ysr@777 149
tonyp@791 150 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 151 virtual void clear(bool mangle_space);
ysr@777 152
ysr@777 153 HeapWord* block_start(const void* p);
ysr@777 154 HeapWord* block_start_const(const void* p) const;
ysr@777 155
ysr@777 156 // Add offset table update.
ysr@777 157 virtual HeapWord* allocate(size_t word_size);
ysr@777 158 HeapWord* par_allocate(size_t word_size);
ysr@777 159
ysr@777 160 // MarkSweep support phase3
ysr@777 161 virtual HeapWord* initialize_threshold();
ysr@777 162 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 163
ysr@777 164 virtual void print() const;
ysr@777 165 };
ysr@777 166
ysr@777 167 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 168 friend class VMStructs;
ysr@777 169 private:
ysr@777 170
tonyp@790 171 enum HumongousType {
tonyp@790 172 NotHumongous = 0,
tonyp@790 173 StartsHumongous,
tonyp@790 174 ContinuesHumongous
tonyp@790 175 };
tonyp@790 176
ysr@777 177 // The next filter kind that should be used for a "new_dcto_cl" call with
ysr@777 178 // the "traditional" signature.
ysr@777 179 HeapRegionDCTOC::FilterKind _next_fk;
ysr@777 180
ysr@777 181 // Requires that the region "mr" be dense with objects, and begin and end
ysr@777 182 // with an object.
ysr@777 183 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777 184
ysr@777 185 // The remembered set for this region.
ysr@777 186 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 187 // issues.)
ysr@777 188 HeapRegionRemSet* _rem_set;
ysr@777 189
ysr@777 190 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 191
ysr@777 192 protected:
ysr@777 193 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 194 // sequence, otherwise -1.
ysr@777 195 int _hrs_index;
ysr@777 196
tonyp@790 197 HumongousType _humongous_type;
ysr@777 198 // For a humongous region, region in which it starts.
ysr@777 199 HeapRegion* _humongous_start_region;
ysr@777 200 // For the start region of a humongous sequence, it's original end().
ysr@777 201 HeapWord* _orig_end;
ysr@777 202
ysr@777 203 // True iff the region is in current collection_set.
ysr@777 204 bool _in_collection_set;
ysr@777 205
ysr@777 206 // True iff the region is on the unclean list, waiting to be zero filled.
ysr@777 207 bool _is_on_unclean_list;
ysr@777 208
ysr@777 209 // True iff the region is on the free list, ready for allocation.
ysr@777 210 bool _is_on_free_list;
ysr@777 211
ysr@777 212 // Is this or has it been an allocation region in the current collection
ysr@777 213 // pause.
ysr@777 214 bool _is_gc_alloc_region;
ysr@777 215
ysr@777 216 // True iff an attempt to evacuate an object in the region failed.
ysr@777 217 bool _evacuation_failed;
ysr@777 218
ysr@777 219 // A heap region may be a member one of a number of special subsets, each
ysr@777 220 // represented as linked lists through the field below. Currently, these
ysr@777 221 // sets include:
ysr@777 222 // The collection set.
ysr@777 223 // The set of allocation regions used in a collection pause.
ysr@777 224 // Spaces that may contain gray objects.
ysr@777 225 HeapRegion* _next_in_special_set;
ysr@777 226
ysr@777 227 // next region in the young "generation" region set
ysr@777 228 HeapRegion* _next_young_region;
ysr@777 229
apetrusenko@1231 230 // Next region whose cards need cleaning
apetrusenko@1231 231 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 232
ysr@777 233 // For parallel heapRegion traversal.
ysr@777 234 jint _claimed;
ysr@777 235
ysr@777 236 // We use concurrent marking to determine the amount of live data
ysr@777 237 // in each heap region.
ysr@777 238 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 239 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 240
ysr@777 241 // See "sort_index" method. -1 means is not in the array.
ysr@777 242 int _sort_index;
ysr@777 243
ysr@777 244 // <PREDICTION>
ysr@777 245 double _gc_efficiency;
ysr@777 246 // </PREDICTION>
ysr@777 247
ysr@777 248 enum YoungType {
ysr@777 249 NotYoung, // a region is not young
ysr@777 250 Young, // a region is young
ysr@777 251 Survivor // a region is young and it contains
ysr@777 252 // survivor
ysr@777 253 };
ysr@777 254
johnc@2021 255 volatile YoungType _young_type;
ysr@777 256 int _young_index_in_cset;
ysr@777 257 SurvRateGroup* _surv_rate_group;
ysr@777 258 int _age_index;
ysr@777 259
ysr@777 260 // The start of the unmarked area. The unmarked area extends from this
ysr@777 261 // word until the top and/or end of the region, and is the part
ysr@777 262 // of the region for which no marking was done, i.e. objects may
ysr@777 263 // have been allocated in this part since the last mark phase.
ysr@777 264 // "prev" is the top at the start of the last completed marking.
ysr@777 265 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 266 HeapWord* _prev_top_at_mark_start;
ysr@777 267 HeapWord* _next_top_at_mark_start;
ysr@777 268 // If a collection pause is in progress, this is the top at the start
ysr@777 269 // of that pause.
ysr@777 270
ysr@777 271 // We've counted the marked bytes of objects below here.
ysr@777 272 HeapWord* _top_at_conc_mark_count;
ysr@777 273
ysr@777 274 void init_top_at_mark_start() {
ysr@777 275 assert(_prev_marked_bytes == 0 &&
ysr@777 276 _next_marked_bytes == 0,
ysr@777 277 "Must be called after zero_marked_bytes.");
ysr@777 278 HeapWord* bot = bottom();
ysr@777 279 _prev_top_at_mark_start = bot;
ysr@777 280 _next_top_at_mark_start = bot;
ysr@777 281 _top_at_conc_mark_count = bot;
ysr@777 282 }
ysr@777 283
ysr@777 284 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
ysr@777 285 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
ysr@777 286 // made it so.
ysr@777 287
ysr@777 288 void set_young_type(YoungType new_type) {
ysr@777 289 //assert(_young_type != new_type, "setting the same type" );
ysr@777 290 // TODO: add more assertions here
ysr@777 291 _young_type = new_type;
ysr@777 292 }
ysr@777 293
johnc@1829 294 // Cached attributes used in the collection set policy information
johnc@1829 295
johnc@1829 296 // The RSet length that was added to the total value
johnc@1829 297 // for the collection set.
johnc@1829 298 size_t _recorded_rs_length;
johnc@1829 299
johnc@1829 300 // The predicted elapsed time that was added to total value
johnc@1829 301 // for the collection set.
johnc@1829 302 double _predicted_elapsed_time_ms;
johnc@1829 303
johnc@1829 304 // The predicted number of bytes to copy that was added to
johnc@1829 305 // the total value for the collection set.
johnc@1829 306 size_t _predicted_bytes_to_copy;
johnc@1829 307
ysr@777 308 public:
ysr@777 309 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
ysr@777 310 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 311 MemRegion mr, bool is_zeroed);
ysr@777 312
tonyp@1377 313 static int LogOfHRGrainBytes;
tonyp@1377 314 static int LogOfHRGrainWords;
tonyp@1377 315 // The normal type of these should be size_t. However, they used to
tonyp@1377 316 // be members of an enum before and they are assumed by the
tonyp@1377 317 // compilers to be ints. To avoid going and fixing all their uses,
tonyp@1377 318 // I'm declaring them as ints. I'm not anticipating heap region
tonyp@1377 319 // sizes to reach anywhere near 2g, so using an int here is safe.
tonyp@1377 320 static int GrainBytes;
tonyp@1377 321 static int GrainWords;
tonyp@1377 322 static int CardsPerRegion;
tonyp@1377 323
tonyp@1377 324 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 325 // well as other related fields that are based on the heap region
tonyp@1377 326 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 327 // CardsPerRegion). All those fields are considered constant
tonyp@1377 328 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 329 // up once during initialization time.
tonyp@1377 330 static void setup_heap_region_size(uintx min_heap_size);
ysr@777 331
tonyp@790 332 enum ClaimValues {
tonyp@790 333 InitialClaimValue = 0,
tonyp@790 334 FinalCountClaimValue = 1,
tonyp@790 335 NoteEndClaimValue = 2,
tonyp@825 336 ScrubRemSetClaimValue = 3,
apetrusenko@1061 337 ParVerifyClaimValue = 4,
apetrusenko@1061 338 RebuildRSClaimValue = 5
tonyp@790 339 };
tonyp@790 340
ysr@777 341 // Concurrent refinement requires contiguous heap regions (in which TLABs
ysr@777 342 // might be allocated) to be zero-filled. Each region therefore has a
ysr@777 343 // zero-fill-state.
ysr@777 344 enum ZeroFillState {
ysr@777 345 NotZeroFilled,
ysr@777 346 ZeroFilling,
ysr@777 347 ZeroFilled,
ysr@777 348 Allocated
ysr@777 349 };
ysr@777 350
ysr@777 351 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 352 // sequence, otherwise -1.
ysr@777 353 int hrs_index() const { return _hrs_index; }
ysr@777 354 void set_hrs_index(int index) { _hrs_index = index; }
ysr@777 355
ysr@777 356 // The number of bytes marked live in the region in the last marking phase.
ysr@777 357 size_t marked_bytes() { return _prev_marked_bytes; }
ysr@777 358 // The number of bytes counted in the next marking.
ysr@777 359 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 360 // The number of bytes live wrt the next marking.
ysr@777 361 size_t next_live_bytes() {
ysr@777 362 return (top() - next_top_at_mark_start())
ysr@777 363 * HeapWordSize
ysr@777 364 + next_marked_bytes();
ysr@777 365 }
ysr@777 366
ysr@777 367 // A lower bound on the amount of garbage bytes in the region.
ysr@777 368 size_t garbage_bytes() {
ysr@777 369 size_t used_at_mark_start_bytes =
ysr@777 370 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 371 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 372 "Can't mark more than we have.");
ysr@777 373 return used_at_mark_start_bytes - marked_bytes();
ysr@777 374 }
ysr@777 375
ysr@777 376 // An upper bound on the number of live bytes in the region.
ysr@777 377 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 378
ysr@777 379 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 380 _next_marked_bytes = _next_marked_bytes + incr_bytes;
ysr@777 381 guarantee( _next_marked_bytes <= used(), "invariant" );
ysr@777 382 }
ysr@777 383
ysr@777 384 void zero_marked_bytes() {
ysr@777 385 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 386 }
ysr@777 387
tonyp@790 388 bool isHumongous() const { return _humongous_type != NotHumongous; }
tonyp@790 389 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
tonyp@790 390 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
ysr@777 391 // For a humongous region, region in which it starts.
ysr@777 392 HeapRegion* humongous_start_region() const {
ysr@777 393 return _humongous_start_region;
ysr@777 394 }
ysr@777 395
ysr@777 396 // Causes the current region to represent a humongous object spanning "n"
ysr@777 397 // regions.
tonyp@2241 398 void set_startsHumongous(HeapWord* new_end);
ysr@777 399
ysr@777 400 // The regions that continue a humongous sequence should be added using
ysr@777 401 // this method, in increasing address order.
ysr@777 402 void set_continuesHumongous(HeapRegion* start);
ysr@777 403
ysr@777 404 // If the region has a remembered set, return a pointer to it.
ysr@777 405 HeapRegionRemSet* rem_set() const {
ysr@777 406 return _rem_set;
ysr@777 407 }
ysr@777 408
ysr@777 409 // True iff the region is in current collection_set.
ysr@777 410 bool in_collection_set() const {
ysr@777 411 return _in_collection_set;
ysr@777 412 }
ysr@777 413 void set_in_collection_set(bool b) {
ysr@777 414 _in_collection_set = b;
ysr@777 415 }
ysr@777 416 HeapRegion* next_in_collection_set() {
ysr@777 417 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 418 assert(_next_in_special_set == NULL ||
ysr@777 419 _next_in_special_set->in_collection_set(),
ysr@777 420 "Malformed CS.");
ysr@777 421 return _next_in_special_set;
ysr@777 422 }
ysr@777 423 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 424 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 425 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 426 _next_in_special_set = r;
ysr@777 427 }
ysr@777 428
ysr@777 429 // True iff it is or has been an allocation region in the current
ysr@777 430 // collection pause.
ysr@777 431 bool is_gc_alloc_region() const {
ysr@777 432 return _is_gc_alloc_region;
ysr@777 433 }
ysr@777 434 void set_is_gc_alloc_region(bool b) {
ysr@777 435 _is_gc_alloc_region = b;
ysr@777 436 }
ysr@777 437 HeapRegion* next_gc_alloc_region() {
ysr@777 438 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 439 assert(_next_in_special_set == NULL ||
ysr@777 440 _next_in_special_set->is_gc_alloc_region(),
ysr@777 441 "Malformed CS.");
ysr@777 442 return _next_in_special_set;
ysr@777 443 }
ysr@777 444 void set_next_gc_alloc_region(HeapRegion* r) {
ysr@777 445 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 446 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
ysr@777 447 _next_in_special_set = r;
ysr@777 448 }
ysr@777 449
ysr@777 450 bool is_on_free_list() {
ysr@777 451 return _is_on_free_list;
ysr@777 452 }
ysr@777 453
ysr@777 454 void set_on_free_list(bool b) {
ysr@777 455 _is_on_free_list = b;
ysr@777 456 }
ysr@777 457
ysr@777 458 HeapRegion* next_from_free_list() {
ysr@777 459 assert(is_on_free_list(),
ysr@777 460 "Should only invoke on free space.");
ysr@777 461 assert(_next_in_special_set == NULL ||
ysr@777 462 _next_in_special_set->is_on_free_list(),
ysr@777 463 "Malformed Free List.");
ysr@777 464 return _next_in_special_set;
ysr@777 465 }
ysr@777 466
ysr@777 467 void set_next_on_free_list(HeapRegion* r) {
ysr@777 468 assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
ysr@777 469 _next_in_special_set = r;
ysr@777 470 }
ysr@777 471
ysr@777 472 bool is_on_unclean_list() {
ysr@777 473 return _is_on_unclean_list;
ysr@777 474 }
ysr@777 475
ysr@777 476 void set_on_unclean_list(bool b);
ysr@777 477
ysr@777 478 HeapRegion* next_from_unclean_list() {
ysr@777 479 assert(is_on_unclean_list(),
ysr@777 480 "Should only invoke on unclean space.");
ysr@777 481 assert(_next_in_special_set == NULL ||
ysr@777 482 _next_in_special_set->is_on_unclean_list(),
ysr@777 483 "Malformed unclean List.");
ysr@777 484 return _next_in_special_set;
ysr@777 485 }
ysr@777 486
ysr@777 487 void set_next_on_unclean_list(HeapRegion* r);
ysr@777 488
ysr@777 489 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 490 void set_next_young_region(HeapRegion* hr) {
ysr@777 491 _next_young_region = hr;
ysr@777 492 }
ysr@777 493
apetrusenko@1231 494 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 495 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 496 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 497 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 498
ysr@777 499 // Allows logical separation between objects allocated before and after.
ysr@777 500 void save_marks();
ysr@777 501
ysr@777 502 // Reset HR stuff to default values.
ysr@777 503 void hr_clear(bool par, bool clear_space);
ysr@777 504
tonyp@791 505 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 506
ysr@777 507 // Ensure that "this" is zero-filled.
ysr@777 508 void ensure_zero_filled();
ysr@777 509 // This one requires that the calling thread holds ZF_mon.
ysr@777 510 void ensure_zero_filled_locked();
ysr@777 511
ysr@777 512 // Get the start of the unmarked area in this region.
ysr@777 513 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 514 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 515
ysr@777 516 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777 517 // allocated in the current region before the last call to "save_mark".
ysr@777 518 void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777 519
ysr@777 520 // This call determines the "filter kind" argument that will be used for
ysr@777 521 // the next call to "new_dcto_cl" on this region with the "traditional"
ysr@777 522 // signature (i.e., the call below.) The default, in the absence of a
ysr@777 523 // preceding call to this method, is "NoFilterKind", and a call to this
ysr@777 524 // method is necessary for each such call, or else it reverts to the
ysr@777 525 // default.
ysr@777 526 // (This is really ugly, but all other methods I could think of changed a
ysr@777 527 // lot of main-line code for G1.)
ysr@777 528 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
ysr@777 529 _next_fk = nfk;
ysr@777 530 }
ysr@777 531
ysr@777 532 DirtyCardToOopClosure*
ysr@777 533 new_dcto_closure(OopClosure* cl,
ysr@777 534 CardTableModRefBS::PrecisionStyle precision,
ysr@777 535 HeapRegionDCTOC::FilterKind fk);
ysr@777 536
ysr@777 537 #if WHASSUP
ysr@777 538 DirtyCardToOopClosure*
ysr@777 539 new_dcto_closure(OopClosure* cl,
ysr@777 540 CardTableModRefBS::PrecisionStyle precision,
ysr@777 541 HeapWord* boundary) {
ysr@777 542 assert(boundary == NULL, "This arg doesn't make sense here.");
ysr@777 543 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
ysr@777 544 _next_fk = HeapRegionDCTOC::NoFilterKind;
ysr@777 545 return res;
ysr@777 546 }
ysr@777 547 #endif
ysr@777 548
ysr@777 549 //
ysr@777 550 // Note the start or end of marking. This tells the heap region
ysr@777 551 // that the collector is about to start or has finished (concurrently)
ysr@777 552 // marking the heap.
ysr@777 553 //
ysr@777 554
ysr@777 555 // Note the start of a marking phase. Record the
ysr@777 556 // start of the unmarked area of the region here.
ysr@777 557 void note_start_of_marking(bool during_initial_mark) {
ysr@777 558 init_top_at_conc_mark_count();
ysr@777 559 _next_marked_bytes = 0;
ysr@777 560 if (during_initial_mark && is_young() && !is_survivor())
ysr@777 561 _next_top_at_mark_start = bottom();
ysr@777 562 else
ysr@777 563 _next_top_at_mark_start = top();
ysr@777 564 }
ysr@777 565
ysr@777 566 // Note the end of a marking phase. Install the start of
ysr@777 567 // the unmarked area that was captured at start of marking.
ysr@777 568 void note_end_of_marking() {
ysr@777 569 _prev_top_at_mark_start = _next_top_at_mark_start;
ysr@777 570 _prev_marked_bytes = _next_marked_bytes;
ysr@777 571 _next_marked_bytes = 0;
ysr@777 572
ysr@777 573 guarantee(_prev_marked_bytes <=
ysr@777 574 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
ysr@777 575 "invariant");
ysr@777 576 }
ysr@777 577
ysr@777 578 // After an evacuation, we need to update _next_top_at_mark_start
ysr@777 579 // to be the current top. Note this is only valid if we have only
ysr@777 580 // ever evacuated into this region. If we evacuate, allocate, and
ysr@777 581 // then evacuate we are in deep doodoo.
ysr@777 582 void note_end_of_copying() {
tonyp@1456 583 assert(top() >= _next_top_at_mark_start, "Increase only");
tonyp@1456 584 _next_top_at_mark_start = top();
ysr@777 585 }
ysr@777 586
ysr@777 587 // Returns "false" iff no object in the region was allocated when the
ysr@777 588 // last mark phase ended.
ysr@777 589 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 590
ysr@777 591 // If "is_marked()" is true, then this is the index of the region in
ysr@777 592 // an array constructed at the end of marking of the regions in a
ysr@777 593 // "desirability" order.
ysr@777 594 int sort_index() {
ysr@777 595 return _sort_index;
ysr@777 596 }
ysr@777 597 void set_sort_index(int i) {
ysr@777 598 _sort_index = i;
ysr@777 599 }
ysr@777 600
ysr@777 601 void init_top_at_conc_mark_count() {
ysr@777 602 _top_at_conc_mark_count = bottom();
ysr@777 603 }
ysr@777 604
ysr@777 605 void set_top_at_conc_mark_count(HeapWord *cur) {
ysr@777 606 assert(bottom() <= cur && cur <= end(), "Sanity.");
ysr@777 607 _top_at_conc_mark_count = cur;
ysr@777 608 }
ysr@777 609
ysr@777 610 HeapWord* top_at_conc_mark_count() {
ysr@777 611 return _top_at_conc_mark_count;
ysr@777 612 }
ysr@777 613
ysr@777 614 void reset_during_compaction() {
ysr@777 615 guarantee( isHumongous() && startsHumongous(),
ysr@777 616 "should only be called for humongous regions");
ysr@777 617
ysr@777 618 zero_marked_bytes();
ysr@777 619 init_top_at_mark_start();
ysr@777 620 }
ysr@777 621
ysr@777 622 // <PREDICTION>
ysr@777 623 void calc_gc_efficiency(void);
ysr@777 624 double gc_efficiency() { return _gc_efficiency;}
ysr@777 625 // </PREDICTION>
ysr@777 626
ysr@777 627 bool is_young() const { return _young_type != NotYoung; }
ysr@777 628 bool is_survivor() const { return _young_type == Survivor; }
ysr@777 629
ysr@777 630 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 631 void set_young_index_in_cset(int index) {
ysr@777 632 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 633 _young_index_in_cset = index;
ysr@777 634 }
ysr@777 635
ysr@777 636 int age_in_surv_rate_group() {
ysr@777 637 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 638 assert( _age_index > -1, "pre-condition" );
ysr@777 639 return _surv_rate_group->age_in_group(_age_index);
ysr@777 640 }
ysr@777 641
ysr@777 642 void record_surv_words_in_group(size_t words_survived) {
ysr@777 643 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 644 assert( _age_index > -1, "pre-condition" );
ysr@777 645 int age_in_group = age_in_surv_rate_group();
ysr@777 646 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 647 }
ysr@777 648
ysr@777 649 int age_in_surv_rate_group_cond() {
ysr@777 650 if (_surv_rate_group != NULL)
ysr@777 651 return age_in_surv_rate_group();
ysr@777 652 else
ysr@777 653 return -1;
ysr@777 654 }
ysr@777 655
ysr@777 656 SurvRateGroup* surv_rate_group() {
ysr@777 657 return _surv_rate_group;
ysr@777 658 }
ysr@777 659
ysr@777 660 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 661 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 662 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 663 assert( is_young(), "pre-condition" );
ysr@777 664
ysr@777 665 _surv_rate_group = surv_rate_group;
ysr@777 666 _age_index = surv_rate_group->next_age_index();
ysr@777 667 }
ysr@777 668
ysr@777 669 void uninstall_surv_rate_group() {
ysr@777 670 if (_surv_rate_group != NULL) {
ysr@777 671 assert( _age_index > -1, "pre-condition" );
ysr@777 672 assert( is_young(), "pre-condition" );
ysr@777 673
ysr@777 674 _surv_rate_group = NULL;
ysr@777 675 _age_index = -1;
ysr@777 676 } else {
ysr@777 677 assert( _age_index == -1, "pre-condition" );
ysr@777 678 }
ysr@777 679 }
ysr@777 680
ysr@777 681 void set_young() { set_young_type(Young); }
ysr@777 682
ysr@777 683 void set_survivor() { set_young_type(Survivor); }
ysr@777 684
ysr@777 685 void set_not_young() { set_young_type(NotYoung); }
ysr@777 686
ysr@777 687 // Determine if an object has been allocated since the last
ysr@777 688 // mark performed by the collector. This returns true iff the object
ysr@777 689 // is within the unmarked area of the region.
ysr@777 690 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 691 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 692 }
ysr@777 693 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 694 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 695 }
ysr@777 696
ysr@777 697 // For parallel heapRegion traversal.
ysr@777 698 bool claimHeapRegion(int claimValue);
ysr@777 699 jint claim_value() { return _claimed; }
ysr@777 700 // Use this carefully: only when you're sure no one is claiming...
ysr@777 701 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 702
ysr@777 703 // Returns the "evacuation_failed" property of the region.
ysr@777 704 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 705
ysr@777 706 // Sets the "evacuation_failed" property of the region.
ysr@777 707 void set_evacuation_failed(bool b) {
ysr@777 708 _evacuation_failed = b;
ysr@777 709
ysr@777 710 if (b) {
ysr@777 711 init_top_at_conc_mark_count();
ysr@777 712 _next_marked_bytes = 0;
ysr@777 713 }
ysr@777 714 }
ysr@777 715
ysr@777 716 // Requires that "mr" be entirely within the region.
ysr@777 717 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 718 // If the iteration encounters an unparseable portion of the region,
ysr@777 719 // or if "cl->abort()" is true after a closure application,
ysr@777 720 // terminate the iteration and return the address of the start of the
ysr@777 721 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 722 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 723 // completed.
ysr@777 724 HeapWord*
ysr@777 725 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 726
johnc@2021 727 // In this version - if filter_young is true and the region
johnc@2021 728 // is a young region then we skip the iteration.
ysr@777 729 HeapWord*
ysr@777 730 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 731 FilterOutOfRegionClosure* cl,
johnc@2021 732 bool filter_young);
ysr@777 733
ysr@777 734 // A version of block start that is guaranteed to find *some* block
ysr@777 735 // boundary at or before "p", but does not object iteration, and may
ysr@777 736 // therefore be used safely when the heap is unparseable.
ysr@777 737 HeapWord* block_start_careful(const void* p) const {
ysr@777 738 return _offsets.block_start_careful(p);
ysr@777 739 }
ysr@777 740
ysr@777 741 // Requires that "addr" is within the region. Returns the start of the
ysr@777 742 // first ("careful") block that starts at or after "addr", or else the
ysr@777 743 // "end" of the region if there is no such block.
ysr@777 744 HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777 745
ysr@777 746 // Returns the zero-fill-state of the current region.
ysr@777 747 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
ysr@777 748 bool zero_fill_is_allocated() { return _zfs == Allocated; }
ysr@777 749 Thread* zero_filler() { return _zero_filler; }
ysr@777 750
ysr@777 751 // Indicate that the contents of the region are unknown, and therefore
ysr@777 752 // might require zero-filling.
ysr@777 753 void set_zero_fill_needed() {
ysr@777 754 set_zero_fill_state_work(NotZeroFilled);
ysr@777 755 }
ysr@777 756 void set_zero_fill_in_progress(Thread* t) {
ysr@777 757 set_zero_fill_state_work(ZeroFilling);
ysr@777 758 _zero_filler = t;
ysr@777 759 }
ysr@777 760 void set_zero_fill_complete();
ysr@777 761 void set_zero_fill_allocated() {
ysr@777 762 set_zero_fill_state_work(Allocated);
ysr@777 763 }
ysr@777 764
ysr@777 765 void set_zero_fill_state_work(ZeroFillState zfs);
ysr@777 766
ysr@777 767 // This is called when a full collection shrinks the heap.
ysr@777 768 // We want to set the heap region to a value which says
ysr@777 769 // it is no longer part of the heap. For now, we'll let "NotZF" fill
ysr@777 770 // that role.
ysr@777 771 void reset_zero_fill() {
ysr@777 772 set_zero_fill_state_work(NotZeroFilled);
ysr@777 773 _zero_filler = NULL;
ysr@777 774 }
ysr@777 775
johnc@1829 776 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 777 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 778 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 779
johnc@1829 780 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 781 _recorded_rs_length = rs_length;
johnc@1829 782 }
johnc@1829 783
johnc@1829 784 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 785 _predicted_elapsed_time_ms = ms;
johnc@1829 786 }
johnc@1829 787
johnc@1829 788 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 789 _predicted_bytes_to_copy = bytes;
johnc@1829 790 }
johnc@1829 791
ysr@777 792 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777 793 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777 794 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777 795
ysr@777 796 CompactibleSpace* next_compaction_space() const;
ysr@777 797
ysr@777 798 virtual void reset_after_compaction();
ysr@777 799
ysr@777 800 void print() const;
ysr@777 801 void print_on(outputStream* st) const;
ysr@777 802
tonyp@1246 803 // use_prev_marking == true -> use "prev" marking information,
tonyp@1246 804 // use_prev_marking == false -> use "next" marking information
tonyp@1246 805 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 806 // consistent most of the time, so most calls to this should use
tonyp@1246 807 // use_prev_marking == true. Currently, there is only one case where
tonyp@1246 808 // this is called with use_prev_marking == false, which is to verify
tonyp@1246 809 // the "next" marking information at the end of remark.
tonyp@1455 810 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
tonyp@1246 811
tonyp@1246 812 // Override; it uses the "prev" marking information
ysr@777 813 virtual void verify(bool allow_dirty) const;
ysr@777 814
ysr@777 815 #ifdef DEBUG
ysr@777 816 HeapWord* allocate(size_t size);
ysr@777 817 #endif
ysr@777 818 };
ysr@777 819
ysr@777 820 // HeapRegionClosure is used for iterating over regions.
ysr@777 821 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 822 class HeapRegionClosure : public StackObj {
ysr@777 823 friend class HeapRegionSeq;
ysr@777 824 friend class G1CollectedHeap;
ysr@777 825
ysr@777 826 bool _complete;
ysr@777 827 void incomplete() { _complete = false; }
ysr@777 828
ysr@777 829 public:
ysr@777 830 HeapRegionClosure(): _complete(true) {}
ysr@777 831
ysr@777 832 // Typically called on each region until it returns true.
ysr@777 833 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 834
ysr@777 835 // True after iteration if the closure was applied to all heap regions
ysr@777 836 // and returned "false" in all cases.
ysr@777 837 bool complete() { return _complete; }
ysr@777 838 };
ysr@777 839
ysr@777 840 // A linked lists of heap regions. It leaves the "next" field
ysr@777 841 // unspecified; that's up to subtypes.
apetrusenko@984 842 class RegionList VALUE_OBJ_CLASS_SPEC {
ysr@777 843 protected:
ysr@777 844 virtual HeapRegion* get_next(HeapRegion* chr) = 0;
ysr@777 845 virtual void set_next(HeapRegion* chr,
ysr@777 846 HeapRegion* new_next) = 0;
ysr@777 847
ysr@777 848 HeapRegion* _hd;
ysr@777 849 HeapRegion* _tl;
ysr@777 850 size_t _sz;
ysr@777 851
ysr@777 852 // Protected constructor because this type is only meaningful
ysr@777 853 // when the _get/_set next functions are defined.
ysr@777 854 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
ysr@777 855 public:
ysr@777 856 void reset() {
ysr@777 857 _hd = NULL;
ysr@777 858 _tl = NULL;
ysr@777 859 _sz = 0;
ysr@777 860 }
ysr@777 861 HeapRegion* hd() { return _hd; }
ysr@777 862 HeapRegion* tl() { return _tl; }
ysr@777 863 size_t sz() { return _sz; }
ysr@777 864 size_t length();
ysr@777 865
ysr@777 866 bool well_formed() {
ysr@777 867 return
ysr@777 868 ((hd() == NULL && tl() == NULL && sz() == 0)
ysr@777 869 || (hd() != NULL && tl() != NULL && sz() > 0))
ysr@777 870 && (sz() == length());
ysr@777 871 }
ysr@777 872 virtual void insert_before_head(HeapRegion* r);
ysr@777 873 void prepend_list(RegionList* new_list);
ysr@777 874 virtual HeapRegion* pop();
ysr@777 875 void dec_sz() { _sz--; }
ysr@777 876 // Requires that "r" is an element of the list, and is not the tail.
ysr@777 877 void delete_after(HeapRegion* r);
ysr@777 878 };
ysr@777 879
ysr@777 880 class EmptyNonHRegionList: public RegionList {
ysr@777 881 protected:
ysr@777 882 // Protected constructor because this type is only meaningful
ysr@777 883 // when the _get/_set next functions are defined.
ysr@777 884 EmptyNonHRegionList() : RegionList() {}
ysr@777 885
ysr@777 886 public:
ysr@777 887 void insert_before_head(HeapRegion* r) {
ysr@777 888 // assert(r->is_empty(), "Better be empty");
ysr@777 889 assert(!r->isHumongous(), "Better not be humongous.");
ysr@777 890 RegionList::insert_before_head(r);
ysr@777 891 }
ysr@777 892 void prepend_list(EmptyNonHRegionList* new_list) {
ysr@777 893 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
ysr@777 894 // "Better be empty");
ysr@777 895 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
ysr@777 896 "Better not be humongous.");
ysr@777 897 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
ysr@777 898 // "Better be empty");
ysr@777 899 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
ysr@777 900 "Better not be humongous.");
ysr@777 901 RegionList::prepend_list(new_list);
ysr@777 902 }
ysr@777 903 };
ysr@777 904
ysr@777 905 class UncleanRegionList: public EmptyNonHRegionList {
ysr@777 906 public:
ysr@777 907 HeapRegion* get_next(HeapRegion* hr) {
ysr@777 908 return hr->next_from_unclean_list();
ysr@777 909 }
ysr@777 910 void set_next(HeapRegion* hr, HeapRegion* new_next) {
ysr@777 911 hr->set_next_on_unclean_list(new_next);
ysr@777 912 }
ysr@777 913
ysr@777 914 UncleanRegionList() : EmptyNonHRegionList() {}
ysr@777 915
ysr@777 916 void insert_before_head(HeapRegion* r) {
ysr@777 917 assert(!r->is_on_free_list(),
ysr@777 918 "Better not already be on free list");
ysr@777 919 assert(!r->is_on_unclean_list(),
ysr@777 920 "Better not already be on unclean list");
ysr@777 921 r->set_zero_fill_needed();
ysr@777 922 r->set_on_unclean_list(true);
ysr@777 923 EmptyNonHRegionList::insert_before_head(r);
ysr@777 924 }
ysr@777 925 void prepend_list(UncleanRegionList* new_list) {
ysr@777 926 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
ysr@777 927 "Better not already be on free list");
ysr@777 928 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
ysr@777 929 "Better already be marked as on unclean list");
ysr@777 930 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
ysr@777 931 "Better not already be on free list");
ysr@777 932 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
ysr@777 933 "Better already be marked as on unclean list");
ysr@777 934 EmptyNonHRegionList::prepend_list(new_list);
ysr@777 935 }
ysr@777 936 HeapRegion* pop() {
ysr@777 937 HeapRegion* res = RegionList::pop();
ysr@777 938 if (res != NULL) res->set_on_unclean_list(false);
ysr@777 939 return res;
ysr@777 940 }
ysr@777 941 };
ysr@777 942
ysr@777 943 // Local Variables: ***
ysr@777 944 // c-indentation-style: gnu ***
ysr@777 945 // End: ***
ysr@777 946
ysr@777 947 #endif // SERIALGC

mercurial