src/share/vm/gc_implementation/g1/heapRegion.hpp

Mon, 02 Aug 2010 12:51:43 -0700

author
johnc
date
Mon, 02 Aug 2010 12:51:43 -0700
changeset 2060
2d160770d2e5
parent 2021
5cbac8938c4c
child 2241
72a161e62cc4
permissions
-rw-r--r--

6814437: G1: remove the _new_refs array
Summary: The per-worker _new_refs array is used to hold references that point into the collection set. It is populated during RSet updating and subsequently processed. In the event of an evacuation failure it processed again to recreate the RSets of regions in the collection set. Remove the per-worker _new_refs array by processing the references directly. Use a DirtyCardQueue to hold the cards containing the references so that the RSets of regions in the collection set can be recreated when handling an evacuation failure.
Reviewed-by: iveresov, jmasa, tonyp

ysr@777 1 /*
trims@1907 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 #ifndef SERIALGC
ysr@777 26
ysr@777 27 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 28 // can be collected independently.
ysr@777 29
ysr@777 30 // NOTE: Although a HeapRegion is a Space, its
ysr@777 31 // Space::initDirtyCardClosure method must not be called.
ysr@777 32 // The problem is that the existence of this method breaks
ysr@777 33 // the independence of barrier sets from remembered sets.
ysr@777 34 // The solution is to remove this method from the definition
ysr@777 35 // of a Space.
ysr@777 36
ysr@777 37 class CompactibleSpace;
ysr@777 38 class ContiguousSpace;
ysr@777 39 class HeapRegionRemSet;
ysr@777 40 class HeapRegionRemSetIterator;
ysr@777 41 class HeapRegion;
ysr@777 42
ysr@777 43 // A dirty card to oop closure for heap regions. It
ysr@777 44 // knows how to get the G1 heap and how to use the bitmap
ysr@777 45 // in the concurrent marker used by G1 to filter remembered
ysr@777 46 // sets.
ysr@777 47
ysr@777 48 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777 49 public:
ysr@777 50 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 51 enum FilterKind {
ysr@777 52 NoFilterKind,
ysr@777 53 IntoCSFilterKind,
ysr@777 54 OutOfRegionFilterKind
ysr@777 55 };
ysr@777 56
ysr@777 57 protected:
ysr@777 58 HeapRegion* _hr;
ysr@777 59 FilterKind _fk;
ysr@777 60 G1CollectedHeap* _g1;
ysr@777 61
ysr@777 62 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 63 HeapWord* bottom, HeapWord* top,
ysr@777 64 OopClosure* cl);
ysr@777 65
ysr@777 66 // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777 67 // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777 68 // warning.
ysr@777 69 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 70 HeapWord* bottom, HeapWord* top,
ysr@777 71 FilteringClosure* cl) {
ysr@777 72 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777 73 (OopClosure*)cl);
ysr@777 74 }
ysr@777 75
ysr@777 76 // Get the actual top of the area on which the closure will
ysr@777 77 // operate, given where the top is assumed to be (the end of the
ysr@777 78 // memory region passed to do_MemRegion) and where the object
ysr@777 79 // at the top is assumed to start. For example, an object may
ysr@777 80 // start at the top but actually extend past the assumed top,
ysr@777 81 // in which case the top becomes the end of the object.
ysr@777 82 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777 83 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777 84 }
ysr@777 85
ysr@777 86 // Walk the given memory region from bottom to (actual) top
ysr@777 87 // looking for objects and applying the oop closure (_cl) to
ysr@777 88 // them. The base implementation of this treats the area as
ysr@777 89 // blocks, where a block may or may not be an object. Sub-
ysr@777 90 // classes should override this to provide more accurate
ysr@777 91 // or possibly more efficient walking.
ysr@777 92 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777 93 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777 94 }
ysr@777 95
ysr@777 96 public:
ysr@777 97 HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 98 HeapRegion* hr, OopClosure* cl,
ysr@777 99 CardTableModRefBS::PrecisionStyle precision,
ysr@777 100 FilterKind fk);
ysr@777 101 };
ysr@777 102
ysr@777 103
ysr@777 104 // The complicating factor is that BlockOffsetTable diverged
ysr@777 105 // significantly, and we need functionality that is only in the G1 version.
ysr@777 106 // So I copied that code, which led to an alternate G1 version of
ysr@777 107 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 108 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 109
ysr@777 110 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 111 // all regions at every GC pause is time consuming (if I remember
ysr@777 112 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 113 // that are GC alloc regions. To achieve this, we use time
ysr@777 114 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 115 // unique time stamp (essentially a counter that gets
ysr@777 116 // incremented). Every time we want to call save_marks on a region,
ysr@777 117 // we set the saved_mark_word to top and also copy the current GC
ysr@777 118 // time stamp to the time stamp field of the space. Reading the
ysr@777 119 // saved_mark_word involves checking the time stamp of the
ysr@777 120 // region. If it is the same as the current GC time stamp, then we
ysr@777 121 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 122 // time stamp of the region is not the same as the current GC time
ysr@777 123 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 124 // invalid. Time stamps (on the regions and also on the
ysr@777 125 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 126 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 127 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 128 // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777 129
ysr@777 130 class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777 131 friend class VMStructs;
ysr@777 132 protected:
ysr@777 133 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 134 Mutex _par_alloc_lock;
ysr@777 135 volatile unsigned _gc_time_stamp;
ysr@777 136
ysr@777 137 public:
ysr@777 138 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777 139 // assumed to contain zeros.
ysr@777 140 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 141 MemRegion mr, bool is_zeroed = false);
ysr@777 142
ysr@777 143 void set_bottom(HeapWord* value);
ysr@777 144 void set_end(HeapWord* value);
ysr@777 145
ysr@777 146 virtual HeapWord* saved_mark_word() const;
ysr@777 147 virtual void set_saved_mark();
ysr@777 148 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
ysr@777 149
tonyp@791 150 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 151 virtual void clear(bool mangle_space);
ysr@777 152
ysr@777 153 HeapWord* block_start(const void* p);
ysr@777 154 HeapWord* block_start_const(const void* p) const;
ysr@777 155
ysr@777 156 // Add offset table update.
ysr@777 157 virtual HeapWord* allocate(size_t word_size);
ysr@777 158 HeapWord* par_allocate(size_t word_size);
ysr@777 159
ysr@777 160 // MarkSweep support phase3
ysr@777 161 virtual HeapWord* initialize_threshold();
ysr@777 162 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 163
ysr@777 164 virtual void print() const;
ysr@777 165 };
ysr@777 166
ysr@777 167 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 168 friend class VMStructs;
ysr@777 169 private:
ysr@777 170
tonyp@790 171 enum HumongousType {
tonyp@790 172 NotHumongous = 0,
tonyp@790 173 StartsHumongous,
tonyp@790 174 ContinuesHumongous
tonyp@790 175 };
tonyp@790 176
ysr@777 177 // The next filter kind that should be used for a "new_dcto_cl" call with
ysr@777 178 // the "traditional" signature.
ysr@777 179 HeapRegionDCTOC::FilterKind _next_fk;
ysr@777 180
ysr@777 181 // Requires that the region "mr" be dense with objects, and begin and end
ysr@777 182 // with an object.
ysr@777 183 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777 184
ysr@777 185 // The remembered set for this region.
ysr@777 186 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 187 // issues.)
ysr@777 188 HeapRegionRemSet* _rem_set;
ysr@777 189
ysr@777 190 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 191
ysr@777 192 protected:
ysr@777 193 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 194 // sequence, otherwise -1.
ysr@777 195 int _hrs_index;
ysr@777 196
tonyp@790 197 HumongousType _humongous_type;
ysr@777 198 // For a humongous region, region in which it starts.
ysr@777 199 HeapRegion* _humongous_start_region;
ysr@777 200 // For the start region of a humongous sequence, it's original end().
ysr@777 201 HeapWord* _orig_end;
ysr@777 202
ysr@777 203 // True iff the region is in current collection_set.
ysr@777 204 bool _in_collection_set;
ysr@777 205
ysr@777 206 // True iff the region is on the unclean list, waiting to be zero filled.
ysr@777 207 bool _is_on_unclean_list;
ysr@777 208
ysr@777 209 // True iff the region is on the free list, ready for allocation.
ysr@777 210 bool _is_on_free_list;
ysr@777 211
ysr@777 212 // Is this or has it been an allocation region in the current collection
ysr@777 213 // pause.
ysr@777 214 bool _is_gc_alloc_region;
ysr@777 215
ysr@777 216 // True iff an attempt to evacuate an object in the region failed.
ysr@777 217 bool _evacuation_failed;
ysr@777 218
ysr@777 219 // A heap region may be a member one of a number of special subsets, each
ysr@777 220 // represented as linked lists through the field below. Currently, these
ysr@777 221 // sets include:
ysr@777 222 // The collection set.
ysr@777 223 // The set of allocation regions used in a collection pause.
ysr@777 224 // Spaces that may contain gray objects.
ysr@777 225 HeapRegion* _next_in_special_set;
ysr@777 226
ysr@777 227 // next region in the young "generation" region set
ysr@777 228 HeapRegion* _next_young_region;
ysr@777 229
apetrusenko@1231 230 // Next region whose cards need cleaning
apetrusenko@1231 231 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 232
ysr@777 233 // For parallel heapRegion traversal.
ysr@777 234 jint _claimed;
ysr@777 235
ysr@777 236 // We use concurrent marking to determine the amount of live data
ysr@777 237 // in each heap region.
ysr@777 238 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 239 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 240
ysr@777 241 // See "sort_index" method. -1 means is not in the array.
ysr@777 242 int _sort_index;
ysr@777 243
ysr@777 244 // <PREDICTION>
ysr@777 245 double _gc_efficiency;
ysr@777 246 // </PREDICTION>
ysr@777 247
ysr@777 248 enum YoungType {
ysr@777 249 NotYoung, // a region is not young
ysr@777 250 Young, // a region is young
ysr@777 251 Survivor // a region is young and it contains
ysr@777 252 // survivor
ysr@777 253 };
ysr@777 254
johnc@2021 255 volatile YoungType _young_type;
ysr@777 256 int _young_index_in_cset;
ysr@777 257 SurvRateGroup* _surv_rate_group;
ysr@777 258 int _age_index;
ysr@777 259
ysr@777 260 // The start of the unmarked area. The unmarked area extends from this
ysr@777 261 // word until the top and/or end of the region, and is the part
ysr@777 262 // of the region for which no marking was done, i.e. objects may
ysr@777 263 // have been allocated in this part since the last mark phase.
ysr@777 264 // "prev" is the top at the start of the last completed marking.
ysr@777 265 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 266 HeapWord* _prev_top_at_mark_start;
ysr@777 267 HeapWord* _next_top_at_mark_start;
ysr@777 268 // If a collection pause is in progress, this is the top at the start
ysr@777 269 // of that pause.
ysr@777 270
ysr@777 271 // We've counted the marked bytes of objects below here.
ysr@777 272 HeapWord* _top_at_conc_mark_count;
ysr@777 273
ysr@777 274 void init_top_at_mark_start() {
ysr@777 275 assert(_prev_marked_bytes == 0 &&
ysr@777 276 _next_marked_bytes == 0,
ysr@777 277 "Must be called after zero_marked_bytes.");
ysr@777 278 HeapWord* bot = bottom();
ysr@777 279 _prev_top_at_mark_start = bot;
ysr@777 280 _next_top_at_mark_start = bot;
ysr@777 281 _top_at_conc_mark_count = bot;
ysr@777 282 }
ysr@777 283
ysr@777 284 jint _zfs; // A member of ZeroFillState. Protected by ZF_lock.
ysr@777 285 Thread* _zero_filler; // If _zfs is ZeroFilling, the thread that (last)
ysr@777 286 // made it so.
ysr@777 287
ysr@777 288 void set_young_type(YoungType new_type) {
ysr@777 289 //assert(_young_type != new_type, "setting the same type" );
ysr@777 290 // TODO: add more assertions here
ysr@777 291 _young_type = new_type;
ysr@777 292 }
ysr@777 293
johnc@1829 294 // Cached attributes used in the collection set policy information
johnc@1829 295
johnc@1829 296 // The RSet length that was added to the total value
johnc@1829 297 // for the collection set.
johnc@1829 298 size_t _recorded_rs_length;
johnc@1829 299
johnc@1829 300 // The predicted elapsed time that was added to total value
johnc@1829 301 // for the collection set.
johnc@1829 302 double _predicted_elapsed_time_ms;
johnc@1829 303
johnc@1829 304 // The predicted number of bytes to copy that was added to
johnc@1829 305 // the total value for the collection set.
johnc@1829 306 size_t _predicted_bytes_to_copy;
johnc@1829 307
ysr@777 308 public:
ysr@777 309 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
ysr@777 310 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 311 MemRegion mr, bool is_zeroed);
ysr@777 312
tonyp@1377 313 static int LogOfHRGrainBytes;
tonyp@1377 314 static int LogOfHRGrainWords;
tonyp@1377 315 // The normal type of these should be size_t. However, they used to
tonyp@1377 316 // be members of an enum before and they are assumed by the
tonyp@1377 317 // compilers to be ints. To avoid going and fixing all their uses,
tonyp@1377 318 // I'm declaring them as ints. I'm not anticipating heap region
tonyp@1377 319 // sizes to reach anywhere near 2g, so using an int here is safe.
tonyp@1377 320 static int GrainBytes;
tonyp@1377 321 static int GrainWords;
tonyp@1377 322 static int CardsPerRegion;
tonyp@1377 323
tonyp@1377 324 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 325 // well as other related fields that are based on the heap region
tonyp@1377 326 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 327 // CardsPerRegion). All those fields are considered constant
tonyp@1377 328 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 329 // up once during initialization time.
tonyp@1377 330 static void setup_heap_region_size(uintx min_heap_size);
ysr@777 331
tonyp@790 332 enum ClaimValues {
tonyp@790 333 InitialClaimValue = 0,
tonyp@790 334 FinalCountClaimValue = 1,
tonyp@790 335 NoteEndClaimValue = 2,
tonyp@825 336 ScrubRemSetClaimValue = 3,
apetrusenko@1061 337 ParVerifyClaimValue = 4,
apetrusenko@1061 338 RebuildRSClaimValue = 5
tonyp@790 339 };
tonyp@790 340
ysr@777 341 // Concurrent refinement requires contiguous heap regions (in which TLABs
ysr@777 342 // might be allocated) to be zero-filled. Each region therefore has a
ysr@777 343 // zero-fill-state.
ysr@777 344 enum ZeroFillState {
ysr@777 345 NotZeroFilled,
ysr@777 346 ZeroFilling,
ysr@777 347 ZeroFilled,
ysr@777 348 Allocated
ysr@777 349 };
ysr@777 350
ysr@777 351 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 352 // sequence, otherwise -1.
ysr@777 353 int hrs_index() const { return _hrs_index; }
ysr@777 354 void set_hrs_index(int index) { _hrs_index = index; }
ysr@777 355
ysr@777 356 // The number of bytes marked live in the region in the last marking phase.
ysr@777 357 size_t marked_bytes() { return _prev_marked_bytes; }
ysr@777 358 // The number of bytes counted in the next marking.
ysr@777 359 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 360 // The number of bytes live wrt the next marking.
ysr@777 361 size_t next_live_bytes() {
ysr@777 362 return (top() - next_top_at_mark_start())
ysr@777 363 * HeapWordSize
ysr@777 364 + next_marked_bytes();
ysr@777 365 }
ysr@777 366
ysr@777 367 // A lower bound on the amount of garbage bytes in the region.
ysr@777 368 size_t garbage_bytes() {
ysr@777 369 size_t used_at_mark_start_bytes =
ysr@777 370 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 371 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 372 "Can't mark more than we have.");
ysr@777 373 return used_at_mark_start_bytes - marked_bytes();
ysr@777 374 }
ysr@777 375
ysr@777 376 // An upper bound on the number of live bytes in the region.
ysr@777 377 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 378
ysr@777 379 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 380 _next_marked_bytes = _next_marked_bytes + incr_bytes;
ysr@777 381 guarantee( _next_marked_bytes <= used(), "invariant" );
ysr@777 382 }
ysr@777 383
ysr@777 384 void zero_marked_bytes() {
ysr@777 385 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 386 }
ysr@777 387
tonyp@790 388 bool isHumongous() const { return _humongous_type != NotHumongous; }
tonyp@790 389 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
tonyp@790 390 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
ysr@777 391 // For a humongous region, region in which it starts.
ysr@777 392 HeapRegion* humongous_start_region() const {
ysr@777 393 return _humongous_start_region;
ysr@777 394 }
ysr@777 395
ysr@777 396 // Causes the current region to represent a humongous object spanning "n"
ysr@777 397 // regions.
ysr@777 398 virtual void set_startsHumongous();
ysr@777 399
ysr@777 400 // The regions that continue a humongous sequence should be added using
ysr@777 401 // this method, in increasing address order.
ysr@777 402 void set_continuesHumongous(HeapRegion* start);
ysr@777 403
ysr@777 404 void add_continuingHumongousRegion(HeapRegion* cont);
ysr@777 405
ysr@777 406 // If the region has a remembered set, return a pointer to it.
ysr@777 407 HeapRegionRemSet* rem_set() const {
ysr@777 408 return _rem_set;
ysr@777 409 }
ysr@777 410
ysr@777 411 // True iff the region is in current collection_set.
ysr@777 412 bool in_collection_set() const {
ysr@777 413 return _in_collection_set;
ysr@777 414 }
ysr@777 415 void set_in_collection_set(bool b) {
ysr@777 416 _in_collection_set = b;
ysr@777 417 }
ysr@777 418 HeapRegion* next_in_collection_set() {
ysr@777 419 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 420 assert(_next_in_special_set == NULL ||
ysr@777 421 _next_in_special_set->in_collection_set(),
ysr@777 422 "Malformed CS.");
ysr@777 423 return _next_in_special_set;
ysr@777 424 }
ysr@777 425 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 426 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 427 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 428 _next_in_special_set = r;
ysr@777 429 }
ysr@777 430
ysr@777 431 // True iff it is or has been an allocation region in the current
ysr@777 432 // collection pause.
ysr@777 433 bool is_gc_alloc_region() const {
ysr@777 434 return _is_gc_alloc_region;
ysr@777 435 }
ysr@777 436 void set_is_gc_alloc_region(bool b) {
ysr@777 437 _is_gc_alloc_region = b;
ysr@777 438 }
ysr@777 439 HeapRegion* next_gc_alloc_region() {
ysr@777 440 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 441 assert(_next_in_special_set == NULL ||
ysr@777 442 _next_in_special_set->is_gc_alloc_region(),
ysr@777 443 "Malformed CS.");
ysr@777 444 return _next_in_special_set;
ysr@777 445 }
ysr@777 446 void set_next_gc_alloc_region(HeapRegion* r) {
ysr@777 447 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 448 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
ysr@777 449 _next_in_special_set = r;
ysr@777 450 }
ysr@777 451
ysr@777 452 bool is_on_free_list() {
ysr@777 453 return _is_on_free_list;
ysr@777 454 }
ysr@777 455
ysr@777 456 void set_on_free_list(bool b) {
ysr@777 457 _is_on_free_list = b;
ysr@777 458 }
ysr@777 459
ysr@777 460 HeapRegion* next_from_free_list() {
ysr@777 461 assert(is_on_free_list(),
ysr@777 462 "Should only invoke on free space.");
ysr@777 463 assert(_next_in_special_set == NULL ||
ysr@777 464 _next_in_special_set->is_on_free_list(),
ysr@777 465 "Malformed Free List.");
ysr@777 466 return _next_in_special_set;
ysr@777 467 }
ysr@777 468
ysr@777 469 void set_next_on_free_list(HeapRegion* r) {
ysr@777 470 assert(r == NULL || r->is_on_free_list(), "Malformed free list.");
ysr@777 471 _next_in_special_set = r;
ysr@777 472 }
ysr@777 473
ysr@777 474 bool is_on_unclean_list() {
ysr@777 475 return _is_on_unclean_list;
ysr@777 476 }
ysr@777 477
ysr@777 478 void set_on_unclean_list(bool b);
ysr@777 479
ysr@777 480 HeapRegion* next_from_unclean_list() {
ysr@777 481 assert(is_on_unclean_list(),
ysr@777 482 "Should only invoke on unclean space.");
ysr@777 483 assert(_next_in_special_set == NULL ||
ysr@777 484 _next_in_special_set->is_on_unclean_list(),
ysr@777 485 "Malformed unclean List.");
ysr@777 486 return _next_in_special_set;
ysr@777 487 }
ysr@777 488
ysr@777 489 void set_next_on_unclean_list(HeapRegion* r);
ysr@777 490
ysr@777 491 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 492 void set_next_young_region(HeapRegion* hr) {
ysr@777 493 _next_young_region = hr;
ysr@777 494 }
ysr@777 495
apetrusenko@1231 496 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 497 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 498 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 499 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 500
ysr@777 501 // Allows logical separation between objects allocated before and after.
ysr@777 502 void save_marks();
ysr@777 503
ysr@777 504 // Reset HR stuff to default values.
ysr@777 505 void hr_clear(bool par, bool clear_space);
ysr@777 506
tonyp@791 507 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 508
ysr@777 509 // Ensure that "this" is zero-filled.
ysr@777 510 void ensure_zero_filled();
ysr@777 511 // This one requires that the calling thread holds ZF_mon.
ysr@777 512 void ensure_zero_filled_locked();
ysr@777 513
ysr@777 514 // Get the start of the unmarked area in this region.
ysr@777 515 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 516 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 517
ysr@777 518 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777 519 // allocated in the current region before the last call to "save_mark".
ysr@777 520 void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777 521
ysr@777 522 // This call determines the "filter kind" argument that will be used for
ysr@777 523 // the next call to "new_dcto_cl" on this region with the "traditional"
ysr@777 524 // signature (i.e., the call below.) The default, in the absence of a
ysr@777 525 // preceding call to this method, is "NoFilterKind", and a call to this
ysr@777 526 // method is necessary for each such call, or else it reverts to the
ysr@777 527 // default.
ysr@777 528 // (This is really ugly, but all other methods I could think of changed a
ysr@777 529 // lot of main-line code for G1.)
ysr@777 530 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
ysr@777 531 _next_fk = nfk;
ysr@777 532 }
ysr@777 533
ysr@777 534 DirtyCardToOopClosure*
ysr@777 535 new_dcto_closure(OopClosure* cl,
ysr@777 536 CardTableModRefBS::PrecisionStyle precision,
ysr@777 537 HeapRegionDCTOC::FilterKind fk);
ysr@777 538
ysr@777 539 #if WHASSUP
ysr@777 540 DirtyCardToOopClosure*
ysr@777 541 new_dcto_closure(OopClosure* cl,
ysr@777 542 CardTableModRefBS::PrecisionStyle precision,
ysr@777 543 HeapWord* boundary) {
ysr@777 544 assert(boundary == NULL, "This arg doesn't make sense here.");
ysr@777 545 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
ysr@777 546 _next_fk = HeapRegionDCTOC::NoFilterKind;
ysr@777 547 return res;
ysr@777 548 }
ysr@777 549 #endif
ysr@777 550
ysr@777 551 //
ysr@777 552 // Note the start or end of marking. This tells the heap region
ysr@777 553 // that the collector is about to start or has finished (concurrently)
ysr@777 554 // marking the heap.
ysr@777 555 //
ysr@777 556
ysr@777 557 // Note the start of a marking phase. Record the
ysr@777 558 // start of the unmarked area of the region here.
ysr@777 559 void note_start_of_marking(bool during_initial_mark) {
ysr@777 560 init_top_at_conc_mark_count();
ysr@777 561 _next_marked_bytes = 0;
ysr@777 562 if (during_initial_mark && is_young() && !is_survivor())
ysr@777 563 _next_top_at_mark_start = bottom();
ysr@777 564 else
ysr@777 565 _next_top_at_mark_start = top();
ysr@777 566 }
ysr@777 567
ysr@777 568 // Note the end of a marking phase. Install the start of
ysr@777 569 // the unmarked area that was captured at start of marking.
ysr@777 570 void note_end_of_marking() {
ysr@777 571 _prev_top_at_mark_start = _next_top_at_mark_start;
ysr@777 572 _prev_marked_bytes = _next_marked_bytes;
ysr@777 573 _next_marked_bytes = 0;
ysr@777 574
ysr@777 575 guarantee(_prev_marked_bytes <=
ysr@777 576 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
ysr@777 577 "invariant");
ysr@777 578 }
ysr@777 579
ysr@777 580 // After an evacuation, we need to update _next_top_at_mark_start
ysr@777 581 // to be the current top. Note this is only valid if we have only
ysr@777 582 // ever evacuated into this region. If we evacuate, allocate, and
ysr@777 583 // then evacuate we are in deep doodoo.
ysr@777 584 void note_end_of_copying() {
tonyp@1456 585 assert(top() >= _next_top_at_mark_start, "Increase only");
tonyp@1456 586 _next_top_at_mark_start = top();
ysr@777 587 }
ysr@777 588
ysr@777 589 // Returns "false" iff no object in the region was allocated when the
ysr@777 590 // last mark phase ended.
ysr@777 591 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 592
ysr@777 593 // If "is_marked()" is true, then this is the index of the region in
ysr@777 594 // an array constructed at the end of marking of the regions in a
ysr@777 595 // "desirability" order.
ysr@777 596 int sort_index() {
ysr@777 597 return _sort_index;
ysr@777 598 }
ysr@777 599 void set_sort_index(int i) {
ysr@777 600 _sort_index = i;
ysr@777 601 }
ysr@777 602
ysr@777 603 void init_top_at_conc_mark_count() {
ysr@777 604 _top_at_conc_mark_count = bottom();
ysr@777 605 }
ysr@777 606
ysr@777 607 void set_top_at_conc_mark_count(HeapWord *cur) {
ysr@777 608 assert(bottom() <= cur && cur <= end(), "Sanity.");
ysr@777 609 _top_at_conc_mark_count = cur;
ysr@777 610 }
ysr@777 611
ysr@777 612 HeapWord* top_at_conc_mark_count() {
ysr@777 613 return _top_at_conc_mark_count;
ysr@777 614 }
ysr@777 615
ysr@777 616 void reset_during_compaction() {
ysr@777 617 guarantee( isHumongous() && startsHumongous(),
ysr@777 618 "should only be called for humongous regions");
ysr@777 619
ysr@777 620 zero_marked_bytes();
ysr@777 621 init_top_at_mark_start();
ysr@777 622 }
ysr@777 623
ysr@777 624 // <PREDICTION>
ysr@777 625 void calc_gc_efficiency(void);
ysr@777 626 double gc_efficiency() { return _gc_efficiency;}
ysr@777 627 // </PREDICTION>
ysr@777 628
ysr@777 629 bool is_young() const { return _young_type != NotYoung; }
ysr@777 630 bool is_survivor() const { return _young_type == Survivor; }
ysr@777 631
ysr@777 632 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 633 void set_young_index_in_cset(int index) {
ysr@777 634 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 635 _young_index_in_cset = index;
ysr@777 636 }
ysr@777 637
ysr@777 638 int age_in_surv_rate_group() {
ysr@777 639 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 640 assert( _age_index > -1, "pre-condition" );
ysr@777 641 return _surv_rate_group->age_in_group(_age_index);
ysr@777 642 }
ysr@777 643
ysr@777 644 void record_surv_words_in_group(size_t words_survived) {
ysr@777 645 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 646 assert( _age_index > -1, "pre-condition" );
ysr@777 647 int age_in_group = age_in_surv_rate_group();
ysr@777 648 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 649 }
ysr@777 650
ysr@777 651 int age_in_surv_rate_group_cond() {
ysr@777 652 if (_surv_rate_group != NULL)
ysr@777 653 return age_in_surv_rate_group();
ysr@777 654 else
ysr@777 655 return -1;
ysr@777 656 }
ysr@777 657
ysr@777 658 SurvRateGroup* surv_rate_group() {
ysr@777 659 return _surv_rate_group;
ysr@777 660 }
ysr@777 661
ysr@777 662 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 663 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 664 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 665 assert( is_young(), "pre-condition" );
ysr@777 666
ysr@777 667 _surv_rate_group = surv_rate_group;
ysr@777 668 _age_index = surv_rate_group->next_age_index();
ysr@777 669 }
ysr@777 670
ysr@777 671 void uninstall_surv_rate_group() {
ysr@777 672 if (_surv_rate_group != NULL) {
ysr@777 673 assert( _age_index > -1, "pre-condition" );
ysr@777 674 assert( is_young(), "pre-condition" );
ysr@777 675
ysr@777 676 _surv_rate_group = NULL;
ysr@777 677 _age_index = -1;
ysr@777 678 } else {
ysr@777 679 assert( _age_index == -1, "pre-condition" );
ysr@777 680 }
ysr@777 681 }
ysr@777 682
ysr@777 683 void set_young() { set_young_type(Young); }
ysr@777 684
ysr@777 685 void set_survivor() { set_young_type(Survivor); }
ysr@777 686
ysr@777 687 void set_not_young() { set_young_type(NotYoung); }
ysr@777 688
ysr@777 689 // Determine if an object has been allocated since the last
ysr@777 690 // mark performed by the collector. This returns true iff the object
ysr@777 691 // is within the unmarked area of the region.
ysr@777 692 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 693 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 694 }
ysr@777 695 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 696 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 697 }
ysr@777 698
ysr@777 699 // For parallel heapRegion traversal.
ysr@777 700 bool claimHeapRegion(int claimValue);
ysr@777 701 jint claim_value() { return _claimed; }
ysr@777 702 // Use this carefully: only when you're sure no one is claiming...
ysr@777 703 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 704
ysr@777 705 // Returns the "evacuation_failed" property of the region.
ysr@777 706 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 707
ysr@777 708 // Sets the "evacuation_failed" property of the region.
ysr@777 709 void set_evacuation_failed(bool b) {
ysr@777 710 _evacuation_failed = b;
ysr@777 711
ysr@777 712 if (b) {
ysr@777 713 init_top_at_conc_mark_count();
ysr@777 714 _next_marked_bytes = 0;
ysr@777 715 }
ysr@777 716 }
ysr@777 717
ysr@777 718 // Requires that "mr" be entirely within the region.
ysr@777 719 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 720 // If the iteration encounters an unparseable portion of the region,
ysr@777 721 // or if "cl->abort()" is true after a closure application,
ysr@777 722 // terminate the iteration and return the address of the start of the
ysr@777 723 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 724 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 725 // completed.
ysr@777 726 HeapWord*
ysr@777 727 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 728
johnc@2021 729 // In this version - if filter_young is true and the region
johnc@2021 730 // is a young region then we skip the iteration.
ysr@777 731 HeapWord*
ysr@777 732 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 733 FilterOutOfRegionClosure* cl,
johnc@2021 734 bool filter_young);
ysr@777 735
ysr@777 736 // The region "mr" is entirely in "this", and starts and ends at block
ysr@777 737 // boundaries. The caller declares that all the contained blocks are
ysr@777 738 // coalesced into one.
ysr@777 739 void declare_filled_region_to_BOT(MemRegion mr) {
ysr@777 740 _offsets.single_block(mr.start(), mr.end());
ysr@777 741 }
ysr@777 742
ysr@777 743 // A version of block start that is guaranteed to find *some* block
ysr@777 744 // boundary at or before "p", but does not object iteration, and may
ysr@777 745 // therefore be used safely when the heap is unparseable.
ysr@777 746 HeapWord* block_start_careful(const void* p) const {
ysr@777 747 return _offsets.block_start_careful(p);
ysr@777 748 }
ysr@777 749
ysr@777 750 // Requires that "addr" is within the region. Returns the start of the
ysr@777 751 // first ("careful") block that starts at or after "addr", or else the
ysr@777 752 // "end" of the region if there is no such block.
ysr@777 753 HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777 754
ysr@777 755 // Returns the zero-fill-state of the current region.
ysr@777 756 ZeroFillState zero_fill_state() { return (ZeroFillState)_zfs; }
ysr@777 757 bool zero_fill_is_allocated() { return _zfs == Allocated; }
ysr@777 758 Thread* zero_filler() { return _zero_filler; }
ysr@777 759
ysr@777 760 // Indicate that the contents of the region are unknown, and therefore
ysr@777 761 // might require zero-filling.
ysr@777 762 void set_zero_fill_needed() {
ysr@777 763 set_zero_fill_state_work(NotZeroFilled);
ysr@777 764 }
ysr@777 765 void set_zero_fill_in_progress(Thread* t) {
ysr@777 766 set_zero_fill_state_work(ZeroFilling);
ysr@777 767 _zero_filler = t;
ysr@777 768 }
ysr@777 769 void set_zero_fill_complete();
ysr@777 770 void set_zero_fill_allocated() {
ysr@777 771 set_zero_fill_state_work(Allocated);
ysr@777 772 }
ysr@777 773
ysr@777 774 void set_zero_fill_state_work(ZeroFillState zfs);
ysr@777 775
ysr@777 776 // This is called when a full collection shrinks the heap.
ysr@777 777 // We want to set the heap region to a value which says
ysr@777 778 // it is no longer part of the heap. For now, we'll let "NotZF" fill
ysr@777 779 // that role.
ysr@777 780 void reset_zero_fill() {
ysr@777 781 set_zero_fill_state_work(NotZeroFilled);
ysr@777 782 _zero_filler = NULL;
ysr@777 783 }
ysr@777 784
johnc@1829 785 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 786 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 787 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 788
johnc@1829 789 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 790 _recorded_rs_length = rs_length;
johnc@1829 791 }
johnc@1829 792
johnc@1829 793 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 794 _predicted_elapsed_time_ms = ms;
johnc@1829 795 }
johnc@1829 796
johnc@1829 797 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 798 _predicted_bytes_to_copy = bytes;
johnc@1829 799 }
johnc@1829 800
ysr@777 801 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777 802 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777 803 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777 804
ysr@777 805 CompactibleSpace* next_compaction_space() const;
ysr@777 806
ysr@777 807 virtual void reset_after_compaction();
ysr@777 808
ysr@777 809 void print() const;
ysr@777 810 void print_on(outputStream* st) const;
ysr@777 811
tonyp@1246 812 // use_prev_marking == true -> use "prev" marking information,
tonyp@1246 813 // use_prev_marking == false -> use "next" marking information
tonyp@1246 814 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 815 // consistent most of the time, so most calls to this should use
tonyp@1246 816 // use_prev_marking == true. Currently, there is only one case where
tonyp@1246 817 // this is called with use_prev_marking == false, which is to verify
tonyp@1246 818 // the "next" marking information at the end of remark.
tonyp@1455 819 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
tonyp@1246 820
tonyp@1246 821 // Override; it uses the "prev" marking information
ysr@777 822 virtual void verify(bool allow_dirty) const;
ysr@777 823
ysr@777 824 #ifdef DEBUG
ysr@777 825 HeapWord* allocate(size_t size);
ysr@777 826 #endif
ysr@777 827 };
ysr@777 828
ysr@777 829 // HeapRegionClosure is used for iterating over regions.
ysr@777 830 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 831 class HeapRegionClosure : public StackObj {
ysr@777 832 friend class HeapRegionSeq;
ysr@777 833 friend class G1CollectedHeap;
ysr@777 834
ysr@777 835 bool _complete;
ysr@777 836 void incomplete() { _complete = false; }
ysr@777 837
ysr@777 838 public:
ysr@777 839 HeapRegionClosure(): _complete(true) {}
ysr@777 840
ysr@777 841 // Typically called on each region until it returns true.
ysr@777 842 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 843
ysr@777 844 // True after iteration if the closure was applied to all heap regions
ysr@777 845 // and returned "false" in all cases.
ysr@777 846 bool complete() { return _complete; }
ysr@777 847 };
ysr@777 848
ysr@777 849 // A linked lists of heap regions. It leaves the "next" field
ysr@777 850 // unspecified; that's up to subtypes.
apetrusenko@984 851 class RegionList VALUE_OBJ_CLASS_SPEC {
ysr@777 852 protected:
ysr@777 853 virtual HeapRegion* get_next(HeapRegion* chr) = 0;
ysr@777 854 virtual void set_next(HeapRegion* chr,
ysr@777 855 HeapRegion* new_next) = 0;
ysr@777 856
ysr@777 857 HeapRegion* _hd;
ysr@777 858 HeapRegion* _tl;
ysr@777 859 size_t _sz;
ysr@777 860
ysr@777 861 // Protected constructor because this type is only meaningful
ysr@777 862 // when the _get/_set next functions are defined.
ysr@777 863 RegionList() : _hd(NULL), _tl(NULL), _sz(0) {}
ysr@777 864 public:
ysr@777 865 void reset() {
ysr@777 866 _hd = NULL;
ysr@777 867 _tl = NULL;
ysr@777 868 _sz = 0;
ysr@777 869 }
ysr@777 870 HeapRegion* hd() { return _hd; }
ysr@777 871 HeapRegion* tl() { return _tl; }
ysr@777 872 size_t sz() { return _sz; }
ysr@777 873 size_t length();
ysr@777 874
ysr@777 875 bool well_formed() {
ysr@777 876 return
ysr@777 877 ((hd() == NULL && tl() == NULL && sz() == 0)
ysr@777 878 || (hd() != NULL && tl() != NULL && sz() > 0))
ysr@777 879 && (sz() == length());
ysr@777 880 }
ysr@777 881 virtual void insert_before_head(HeapRegion* r);
ysr@777 882 void prepend_list(RegionList* new_list);
ysr@777 883 virtual HeapRegion* pop();
ysr@777 884 void dec_sz() { _sz--; }
ysr@777 885 // Requires that "r" is an element of the list, and is not the tail.
ysr@777 886 void delete_after(HeapRegion* r);
ysr@777 887 };
ysr@777 888
ysr@777 889 class EmptyNonHRegionList: public RegionList {
ysr@777 890 protected:
ysr@777 891 // Protected constructor because this type is only meaningful
ysr@777 892 // when the _get/_set next functions are defined.
ysr@777 893 EmptyNonHRegionList() : RegionList() {}
ysr@777 894
ysr@777 895 public:
ysr@777 896 void insert_before_head(HeapRegion* r) {
ysr@777 897 // assert(r->is_empty(), "Better be empty");
ysr@777 898 assert(!r->isHumongous(), "Better not be humongous.");
ysr@777 899 RegionList::insert_before_head(r);
ysr@777 900 }
ysr@777 901 void prepend_list(EmptyNonHRegionList* new_list) {
ysr@777 902 // assert(new_list->hd() == NULL || new_list->hd()->is_empty(),
ysr@777 903 // "Better be empty");
ysr@777 904 assert(new_list->hd() == NULL || !new_list->hd()->isHumongous(),
ysr@777 905 "Better not be humongous.");
ysr@777 906 // assert(new_list->tl() == NULL || new_list->tl()->is_empty(),
ysr@777 907 // "Better be empty");
ysr@777 908 assert(new_list->tl() == NULL || !new_list->tl()->isHumongous(),
ysr@777 909 "Better not be humongous.");
ysr@777 910 RegionList::prepend_list(new_list);
ysr@777 911 }
ysr@777 912 };
ysr@777 913
ysr@777 914 class UncleanRegionList: public EmptyNonHRegionList {
ysr@777 915 public:
ysr@777 916 HeapRegion* get_next(HeapRegion* hr) {
ysr@777 917 return hr->next_from_unclean_list();
ysr@777 918 }
ysr@777 919 void set_next(HeapRegion* hr, HeapRegion* new_next) {
ysr@777 920 hr->set_next_on_unclean_list(new_next);
ysr@777 921 }
ysr@777 922
ysr@777 923 UncleanRegionList() : EmptyNonHRegionList() {}
ysr@777 924
ysr@777 925 void insert_before_head(HeapRegion* r) {
ysr@777 926 assert(!r->is_on_free_list(),
ysr@777 927 "Better not already be on free list");
ysr@777 928 assert(!r->is_on_unclean_list(),
ysr@777 929 "Better not already be on unclean list");
ysr@777 930 r->set_zero_fill_needed();
ysr@777 931 r->set_on_unclean_list(true);
ysr@777 932 EmptyNonHRegionList::insert_before_head(r);
ysr@777 933 }
ysr@777 934 void prepend_list(UncleanRegionList* new_list) {
ysr@777 935 assert(new_list->tl() == NULL || !new_list->tl()->is_on_free_list(),
ysr@777 936 "Better not already be on free list");
ysr@777 937 assert(new_list->tl() == NULL || new_list->tl()->is_on_unclean_list(),
ysr@777 938 "Better already be marked as on unclean list");
ysr@777 939 assert(new_list->hd() == NULL || !new_list->hd()->is_on_free_list(),
ysr@777 940 "Better not already be on free list");
ysr@777 941 assert(new_list->hd() == NULL || new_list->hd()->is_on_unclean_list(),
ysr@777 942 "Better already be marked as on unclean list");
ysr@777 943 EmptyNonHRegionList::prepend_list(new_list);
ysr@777 944 }
ysr@777 945 HeapRegion* pop() {
ysr@777 946 HeapRegion* res = RegionList::pop();
ysr@777 947 if (res != NULL) res->set_on_unclean_list(false);
ysr@777 948 return res;
ysr@777 949 }
ysr@777 950 };
ysr@777 951
ysr@777 952 // Local Variables: ***
ysr@777 953 // c-indentation-style: gnu ***
ysr@777 954 // End: ***
ysr@777 955
ysr@777 956 #endif // SERIALGC

mercurial