src/share/vm/gc_implementation/g1/heapRegion.hpp

Tue, 14 Jun 2011 11:01:10 -0700

author
johnc
date
Tue, 14 Jun 2011 11:01:10 -0700
changeset 2969
6747fd0512e0
parent 2963
c3f1170908be
child 3028
f44782f04dd4
permissions
-rw-r--r--

7004681: G1: Extend marking verification to Full GCs
Summary: Perform a heap verification after the first phase of G1's full GC using objects' mark words to determine liveness. The third parameter of the heap verification routines, which was used in G1 to determine which marking bitmap to use in liveness calculations, has been changed from a boolean to an enum with values defined for using the mark word, and the 'prev' and 'next' bitmaps.
Reviewed-by: tonyp, ysr

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
stefank@2314 30 #include "gc_implementation/g1/survRateGroup.hpp"
stefank@2314 31 #include "gc_implementation/shared/ageTable.hpp"
stefank@2314 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 33 #include "memory/space.inline.hpp"
stefank@2314 34 #include "memory/watermark.hpp"
stefank@2314 35
ysr@777 36 #ifndef SERIALGC
ysr@777 37
ysr@777 38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 39 // can be collected independently.
ysr@777 40
ysr@777 41 // NOTE: Although a HeapRegion is a Space, its
ysr@777 42 // Space::initDirtyCardClosure method must not be called.
ysr@777 43 // The problem is that the existence of this method breaks
ysr@777 44 // the independence of barrier sets from remembered sets.
ysr@777 45 // The solution is to remove this method from the definition
ysr@777 46 // of a Space.
ysr@777 47
ysr@777 48 class CompactibleSpace;
ysr@777 49 class ContiguousSpace;
ysr@777 50 class HeapRegionRemSet;
ysr@777 51 class HeapRegionRemSetIterator;
ysr@777 52 class HeapRegion;
tonyp@2472 53 class HeapRegionSetBase;
tonyp@2472 54
tonyp@2963 55 #define HR_FORMAT SIZE_FORMAT":(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
tonyp@2963 56 #define HR_FORMAT_PARAMS(_hr_) \
tonyp@2963 57 (_hr_)->hrs_index(), \
tonyp@2963 58 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
tonyp@2963 59 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
ysr@777 60
ysr@777 61 // A dirty card to oop closure for heap regions. It
ysr@777 62 // knows how to get the G1 heap and how to use the bitmap
ysr@777 63 // in the concurrent marker used by G1 to filter remembered
ysr@777 64 // sets.
ysr@777 65
ysr@777 66 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777 67 public:
ysr@777 68 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 69 enum FilterKind {
ysr@777 70 NoFilterKind,
ysr@777 71 IntoCSFilterKind,
ysr@777 72 OutOfRegionFilterKind
ysr@777 73 };
ysr@777 74
ysr@777 75 protected:
ysr@777 76 HeapRegion* _hr;
ysr@777 77 FilterKind _fk;
ysr@777 78 G1CollectedHeap* _g1;
ysr@777 79
ysr@777 80 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 81 HeapWord* bottom, HeapWord* top,
ysr@777 82 OopClosure* cl);
ysr@777 83
ysr@777 84 // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777 85 // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777 86 // warning.
ysr@777 87 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 88 HeapWord* bottom, HeapWord* top,
ysr@777 89 FilteringClosure* cl) {
ysr@777 90 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777 91 (OopClosure*)cl);
ysr@777 92 }
ysr@777 93
ysr@777 94 // Get the actual top of the area on which the closure will
ysr@777 95 // operate, given where the top is assumed to be (the end of the
ysr@777 96 // memory region passed to do_MemRegion) and where the object
ysr@777 97 // at the top is assumed to start. For example, an object may
ysr@777 98 // start at the top but actually extend past the assumed top,
ysr@777 99 // in which case the top becomes the end of the object.
ysr@777 100 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777 101 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777 102 }
ysr@777 103
ysr@777 104 // Walk the given memory region from bottom to (actual) top
ysr@777 105 // looking for objects and applying the oop closure (_cl) to
ysr@777 106 // them. The base implementation of this treats the area as
ysr@777 107 // blocks, where a block may or may not be an object. Sub-
ysr@777 108 // classes should override this to provide more accurate
ysr@777 109 // or possibly more efficient walking.
ysr@777 110 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777 111 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777 112 }
ysr@777 113
ysr@777 114 public:
ysr@777 115 HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 116 HeapRegion* hr, OopClosure* cl,
ysr@777 117 CardTableModRefBS::PrecisionStyle precision,
ysr@777 118 FilterKind fk);
ysr@777 119 };
ysr@777 120
ysr@777 121
ysr@777 122 // The complicating factor is that BlockOffsetTable diverged
ysr@777 123 // significantly, and we need functionality that is only in the G1 version.
ysr@777 124 // So I copied that code, which led to an alternate G1 version of
ysr@777 125 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 126 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 127
ysr@777 128 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 129 // all regions at every GC pause is time consuming (if I remember
ysr@777 130 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 131 // that are GC alloc regions. To achieve this, we use time
ysr@777 132 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 133 // unique time stamp (essentially a counter that gets
ysr@777 134 // incremented). Every time we want to call save_marks on a region,
ysr@777 135 // we set the saved_mark_word to top and also copy the current GC
ysr@777 136 // time stamp to the time stamp field of the space. Reading the
ysr@777 137 // saved_mark_word involves checking the time stamp of the
ysr@777 138 // region. If it is the same as the current GC time stamp, then we
ysr@777 139 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 140 // time stamp of the region is not the same as the current GC time
ysr@777 141 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 142 // invalid. Time stamps (on the regions and also on the
ysr@777 143 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 144 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 145 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 146 // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777 147
ysr@777 148 class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777 149 friend class VMStructs;
ysr@777 150 protected:
ysr@777 151 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 152 Mutex _par_alloc_lock;
ysr@777 153 volatile unsigned _gc_time_stamp;
tonyp@2715 154 // When we need to retire an allocation region, while other threads
tonyp@2715 155 // are also concurrently trying to allocate into it, we typically
tonyp@2715 156 // allocate a dummy object at the end of the region to ensure that
tonyp@2715 157 // no more allocations can take place in it. However, sometimes we
tonyp@2715 158 // want to know where the end of the last "real" object we allocated
tonyp@2715 159 // into the region was and this is what this keeps track.
tonyp@2715 160 HeapWord* _pre_dummy_top;
ysr@777 161
ysr@777 162 public:
ysr@777 163 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777 164 // assumed to contain zeros.
ysr@777 165 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 166 MemRegion mr, bool is_zeroed = false);
ysr@777 167
ysr@777 168 void set_bottom(HeapWord* value);
ysr@777 169 void set_end(HeapWord* value);
ysr@777 170
ysr@777 171 virtual HeapWord* saved_mark_word() const;
ysr@777 172 virtual void set_saved_mark();
ysr@777 173 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
ysr@777 174
tonyp@2715 175 // See the comment above in the declaration of _pre_dummy_top for an
tonyp@2715 176 // explanation of what it is.
tonyp@2715 177 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
tonyp@2715 178 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
tonyp@2715 179 _pre_dummy_top = pre_dummy_top;
tonyp@2715 180 }
tonyp@2715 181 HeapWord* pre_dummy_top() {
tonyp@2715 182 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
tonyp@2715 183 }
tonyp@2715 184 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
tonyp@2715 185
tonyp@791 186 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 187 virtual void clear(bool mangle_space);
ysr@777 188
ysr@777 189 HeapWord* block_start(const void* p);
ysr@777 190 HeapWord* block_start_const(const void* p) const;
ysr@777 191
ysr@777 192 // Add offset table update.
ysr@777 193 virtual HeapWord* allocate(size_t word_size);
ysr@777 194 HeapWord* par_allocate(size_t word_size);
ysr@777 195
ysr@777 196 // MarkSweep support phase3
ysr@777 197 virtual HeapWord* initialize_threshold();
ysr@777 198 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 199
ysr@777 200 virtual void print() const;
tonyp@2453 201
tonyp@2453 202 void reset_bot() {
tonyp@2453 203 _offsets.zero_bottom_entry();
tonyp@2453 204 _offsets.initialize_threshold();
tonyp@2453 205 }
tonyp@2453 206
tonyp@2453 207 void update_bot_for_object(HeapWord* start, size_t word_size) {
tonyp@2453 208 _offsets.alloc_block(start, word_size);
tonyp@2453 209 }
tonyp@2453 210
tonyp@2453 211 void print_bot_on(outputStream* out) {
tonyp@2453 212 _offsets.print_on(out);
tonyp@2453 213 }
ysr@777 214 };
ysr@777 215
ysr@777 216 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 217 friend class VMStructs;
ysr@777 218 private:
ysr@777 219
tonyp@790 220 enum HumongousType {
tonyp@790 221 NotHumongous = 0,
tonyp@790 222 StartsHumongous,
tonyp@790 223 ContinuesHumongous
tonyp@790 224 };
tonyp@790 225
ysr@777 226 // The next filter kind that should be used for a "new_dcto_cl" call with
ysr@777 227 // the "traditional" signature.
ysr@777 228 HeapRegionDCTOC::FilterKind _next_fk;
ysr@777 229
ysr@777 230 // Requires that the region "mr" be dense with objects, and begin and end
ysr@777 231 // with an object.
ysr@777 232 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777 233
ysr@777 234 // The remembered set for this region.
ysr@777 235 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 236 // issues.)
ysr@777 237 HeapRegionRemSet* _rem_set;
ysr@777 238
ysr@777 239 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 240
ysr@777 241 protected:
tonyp@2963 242 // The index of this region in the heap region sequence.
tonyp@2963 243 size_t _hrs_index;
ysr@777 244
tonyp@790 245 HumongousType _humongous_type;
ysr@777 246 // For a humongous region, region in which it starts.
ysr@777 247 HeapRegion* _humongous_start_region;
ysr@777 248 // For the start region of a humongous sequence, it's original end().
ysr@777 249 HeapWord* _orig_end;
ysr@777 250
ysr@777 251 // True iff the region is in current collection_set.
ysr@777 252 bool _in_collection_set;
ysr@777 253
ysr@777 254 // Is this or has it been an allocation region in the current collection
ysr@777 255 // pause.
ysr@777 256 bool _is_gc_alloc_region;
ysr@777 257
ysr@777 258 // True iff an attempt to evacuate an object in the region failed.
ysr@777 259 bool _evacuation_failed;
ysr@777 260
ysr@777 261 // A heap region may be a member one of a number of special subsets, each
ysr@777 262 // represented as linked lists through the field below. Currently, these
ysr@777 263 // sets include:
ysr@777 264 // The collection set.
ysr@777 265 // The set of allocation regions used in a collection pause.
ysr@777 266 // Spaces that may contain gray objects.
ysr@777 267 HeapRegion* _next_in_special_set;
ysr@777 268
ysr@777 269 // next region in the young "generation" region set
ysr@777 270 HeapRegion* _next_young_region;
ysr@777 271
apetrusenko@1231 272 // Next region whose cards need cleaning
apetrusenko@1231 273 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 274
tonyp@2472 275 // Fields used by the HeapRegionSetBase class and subclasses.
tonyp@2472 276 HeapRegion* _next;
tonyp@2472 277 #ifdef ASSERT
tonyp@2472 278 HeapRegionSetBase* _containing_set;
tonyp@2472 279 #endif // ASSERT
tonyp@2472 280 bool _pending_removal;
tonyp@2472 281
ysr@777 282 // For parallel heapRegion traversal.
ysr@777 283 jint _claimed;
ysr@777 284
ysr@777 285 // We use concurrent marking to determine the amount of live data
ysr@777 286 // in each heap region.
ysr@777 287 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 288 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 289
ysr@777 290 // See "sort_index" method. -1 means is not in the array.
ysr@777 291 int _sort_index;
ysr@777 292
ysr@777 293 // <PREDICTION>
ysr@777 294 double _gc_efficiency;
ysr@777 295 // </PREDICTION>
ysr@777 296
ysr@777 297 enum YoungType {
ysr@777 298 NotYoung, // a region is not young
ysr@777 299 Young, // a region is young
tonyp@2963 300 Survivor // a region is young and it contains survivors
ysr@777 301 };
ysr@777 302
johnc@2021 303 volatile YoungType _young_type;
ysr@777 304 int _young_index_in_cset;
ysr@777 305 SurvRateGroup* _surv_rate_group;
ysr@777 306 int _age_index;
ysr@777 307
ysr@777 308 // The start of the unmarked area. The unmarked area extends from this
ysr@777 309 // word until the top and/or end of the region, and is the part
ysr@777 310 // of the region for which no marking was done, i.e. objects may
ysr@777 311 // have been allocated in this part since the last mark phase.
ysr@777 312 // "prev" is the top at the start of the last completed marking.
ysr@777 313 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 314 HeapWord* _prev_top_at_mark_start;
ysr@777 315 HeapWord* _next_top_at_mark_start;
ysr@777 316 // If a collection pause is in progress, this is the top at the start
ysr@777 317 // of that pause.
ysr@777 318
ysr@777 319 // We've counted the marked bytes of objects below here.
ysr@777 320 HeapWord* _top_at_conc_mark_count;
ysr@777 321
ysr@777 322 void init_top_at_mark_start() {
ysr@777 323 assert(_prev_marked_bytes == 0 &&
ysr@777 324 _next_marked_bytes == 0,
ysr@777 325 "Must be called after zero_marked_bytes.");
ysr@777 326 HeapWord* bot = bottom();
ysr@777 327 _prev_top_at_mark_start = bot;
ysr@777 328 _next_top_at_mark_start = bot;
ysr@777 329 _top_at_conc_mark_count = bot;
ysr@777 330 }
ysr@777 331
ysr@777 332 void set_young_type(YoungType new_type) {
ysr@777 333 //assert(_young_type != new_type, "setting the same type" );
ysr@777 334 // TODO: add more assertions here
ysr@777 335 _young_type = new_type;
ysr@777 336 }
ysr@777 337
johnc@1829 338 // Cached attributes used in the collection set policy information
johnc@1829 339
johnc@1829 340 // The RSet length that was added to the total value
johnc@1829 341 // for the collection set.
johnc@1829 342 size_t _recorded_rs_length;
johnc@1829 343
johnc@1829 344 // The predicted elapsed time that was added to total value
johnc@1829 345 // for the collection set.
johnc@1829 346 double _predicted_elapsed_time_ms;
johnc@1829 347
johnc@1829 348 // The predicted number of bytes to copy that was added to
johnc@1829 349 // the total value for the collection set.
johnc@1829 350 size_t _predicted_bytes_to_copy;
johnc@1829 351
ysr@777 352 public:
ysr@777 353 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
tonyp@2963 354 HeapRegion(size_t hrs_index,
tonyp@2963 355 G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 356 MemRegion mr, bool is_zeroed);
ysr@777 357
tonyp@1377 358 static int LogOfHRGrainBytes;
tonyp@1377 359 static int LogOfHRGrainWords;
tonyp@1377 360 // The normal type of these should be size_t. However, they used to
tonyp@1377 361 // be members of an enum before and they are assumed by the
tonyp@1377 362 // compilers to be ints. To avoid going and fixing all their uses,
tonyp@1377 363 // I'm declaring them as ints. I'm not anticipating heap region
tonyp@1377 364 // sizes to reach anywhere near 2g, so using an int here is safe.
tonyp@1377 365 static int GrainBytes;
tonyp@1377 366 static int GrainWords;
tonyp@1377 367 static int CardsPerRegion;
tonyp@1377 368
tonyp@1377 369 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 370 // well as other related fields that are based on the heap region
tonyp@1377 371 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 372 // CardsPerRegion). All those fields are considered constant
tonyp@1377 373 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 374 // up once during initialization time.
tonyp@1377 375 static void setup_heap_region_size(uintx min_heap_size);
ysr@777 376
tonyp@790 377 enum ClaimValues {
tonyp@790 378 InitialClaimValue = 0,
tonyp@790 379 FinalCountClaimValue = 1,
tonyp@790 380 NoteEndClaimValue = 2,
tonyp@825 381 ScrubRemSetClaimValue = 3,
apetrusenko@1061 382 ParVerifyClaimValue = 4,
apetrusenko@1061 383 RebuildRSClaimValue = 5
tonyp@790 384 };
tonyp@790 385
tonyp@2454 386 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
tonyp@2454 387 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 388 return ContiguousSpace::par_allocate(word_size);
tonyp@2454 389 }
tonyp@2454 390 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
tonyp@2454 391 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 392 return ContiguousSpace::allocate(word_size);
tonyp@2454 393 }
tonyp@2454 394
ysr@777 395 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 396 // sequence, otherwise -1.
tonyp@2963 397 size_t hrs_index() const { return _hrs_index; }
ysr@777 398
ysr@777 399 // The number of bytes marked live in the region in the last marking phase.
ysr@777 400 size_t marked_bytes() { return _prev_marked_bytes; }
tonyp@2717 401 size_t live_bytes() {
tonyp@2717 402 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
tonyp@2717 403 }
tonyp@2717 404
ysr@777 405 // The number of bytes counted in the next marking.
ysr@777 406 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 407 // The number of bytes live wrt the next marking.
ysr@777 408 size_t next_live_bytes() {
tonyp@2717 409 return
tonyp@2717 410 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
ysr@777 411 }
ysr@777 412
ysr@777 413 // A lower bound on the amount of garbage bytes in the region.
ysr@777 414 size_t garbage_bytes() {
ysr@777 415 size_t used_at_mark_start_bytes =
ysr@777 416 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 417 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 418 "Can't mark more than we have.");
ysr@777 419 return used_at_mark_start_bytes - marked_bytes();
ysr@777 420 }
ysr@777 421
ysr@777 422 // An upper bound on the number of live bytes in the region.
ysr@777 423 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 424
ysr@777 425 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 426 _next_marked_bytes = _next_marked_bytes + incr_bytes;
ysr@777 427 guarantee( _next_marked_bytes <= used(), "invariant" );
ysr@777 428 }
ysr@777 429
ysr@777 430 void zero_marked_bytes() {
ysr@777 431 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 432 }
ysr@777 433
tonyp@790 434 bool isHumongous() const { return _humongous_type != NotHumongous; }
tonyp@790 435 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
tonyp@790 436 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
ysr@777 437 // For a humongous region, region in which it starts.
ysr@777 438 HeapRegion* humongous_start_region() const {
ysr@777 439 return _humongous_start_region;
ysr@777 440 }
ysr@777 441
tonyp@2453 442 // Makes the current region be a "starts humongous" region, i.e.,
tonyp@2453 443 // the first region in a series of one or more contiguous regions
tonyp@2453 444 // that will contain a single "humongous" object. The two parameters
tonyp@2453 445 // are as follows:
tonyp@2453 446 //
tonyp@2453 447 // new_top : The new value of the top field of this region which
tonyp@2453 448 // points to the end of the humongous object that's being
tonyp@2453 449 // allocated. If there is more than one region in the series, top
tonyp@2453 450 // will lie beyond this region's original end field and on the last
tonyp@2453 451 // region in the series.
tonyp@2453 452 //
tonyp@2453 453 // new_end : The new value of the end field of this region which
tonyp@2453 454 // points to the end of the last region in the series. If there is
tonyp@2453 455 // one region in the series (namely: this one) end will be the same
tonyp@2453 456 // as the original end of this region.
tonyp@2453 457 //
tonyp@2453 458 // Updating top and end as described above makes this region look as
tonyp@2453 459 // if it spans the entire space taken up by all the regions in the
tonyp@2453 460 // series and an single allocation moved its top to new_top. This
tonyp@2453 461 // ensures that the space (capacity / allocated) taken up by all
tonyp@2453 462 // humongous regions can be calculated by just looking at the
tonyp@2453 463 // "starts humongous" regions and by ignoring the "continues
tonyp@2453 464 // humongous" regions.
tonyp@2453 465 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
ysr@777 466
tonyp@2453 467 // Makes the current region be a "continues humongous'
tonyp@2453 468 // region. first_hr is the "start humongous" region of the series
tonyp@2453 469 // which this region will be part of.
tonyp@2453 470 void set_continuesHumongous(HeapRegion* first_hr);
ysr@777 471
tonyp@2472 472 // Unsets the humongous-related fields on the region.
tonyp@2472 473 void set_notHumongous();
tonyp@2472 474
ysr@777 475 // If the region has a remembered set, return a pointer to it.
ysr@777 476 HeapRegionRemSet* rem_set() const {
ysr@777 477 return _rem_set;
ysr@777 478 }
ysr@777 479
ysr@777 480 // True iff the region is in current collection_set.
ysr@777 481 bool in_collection_set() const {
ysr@777 482 return _in_collection_set;
ysr@777 483 }
ysr@777 484 void set_in_collection_set(bool b) {
ysr@777 485 _in_collection_set = b;
ysr@777 486 }
ysr@777 487 HeapRegion* next_in_collection_set() {
ysr@777 488 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 489 assert(_next_in_special_set == NULL ||
ysr@777 490 _next_in_special_set->in_collection_set(),
ysr@777 491 "Malformed CS.");
ysr@777 492 return _next_in_special_set;
ysr@777 493 }
ysr@777 494 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 495 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 496 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 497 _next_in_special_set = r;
ysr@777 498 }
ysr@777 499
ysr@777 500 // True iff it is or has been an allocation region in the current
ysr@777 501 // collection pause.
ysr@777 502 bool is_gc_alloc_region() const {
ysr@777 503 return _is_gc_alloc_region;
ysr@777 504 }
ysr@777 505 void set_is_gc_alloc_region(bool b) {
ysr@777 506 _is_gc_alloc_region = b;
ysr@777 507 }
ysr@777 508 HeapRegion* next_gc_alloc_region() {
ysr@777 509 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 510 assert(_next_in_special_set == NULL ||
ysr@777 511 _next_in_special_set->is_gc_alloc_region(),
ysr@777 512 "Malformed CS.");
ysr@777 513 return _next_in_special_set;
ysr@777 514 }
ysr@777 515 void set_next_gc_alloc_region(HeapRegion* r) {
ysr@777 516 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 517 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
ysr@777 518 _next_in_special_set = r;
ysr@777 519 }
ysr@777 520
tonyp@2472 521 // Methods used by the HeapRegionSetBase class and subclasses.
tonyp@2472 522
tonyp@2472 523 // Getter and setter for the next field used to link regions into
tonyp@2472 524 // linked lists.
tonyp@2472 525 HeapRegion* next() { return _next; }
tonyp@2472 526
tonyp@2472 527 void set_next(HeapRegion* next) { _next = next; }
tonyp@2472 528
tonyp@2472 529 // Every region added to a set is tagged with a reference to that
tonyp@2472 530 // set. This is used for doing consistency checking to make sure that
tonyp@2472 531 // the contents of a set are as they should be and it's only
tonyp@2472 532 // available in non-product builds.
tonyp@2472 533 #ifdef ASSERT
tonyp@2472 534 void set_containing_set(HeapRegionSetBase* containing_set) {
tonyp@2472 535 assert((containing_set == NULL && _containing_set != NULL) ||
tonyp@2472 536 (containing_set != NULL && _containing_set == NULL),
tonyp@2472 537 err_msg("containing_set: "PTR_FORMAT" "
tonyp@2472 538 "_containing_set: "PTR_FORMAT,
tonyp@2472 539 containing_set, _containing_set));
tonyp@2472 540
tonyp@2472 541 _containing_set = containing_set;
tonyp@2643 542 }
tonyp@2472 543
tonyp@2472 544 HeapRegionSetBase* containing_set() { return _containing_set; }
tonyp@2472 545 #else // ASSERT
tonyp@2472 546 void set_containing_set(HeapRegionSetBase* containing_set) { }
tonyp@2472 547
tonyp@2643 548 // containing_set() is only used in asserts so there's no reason
tonyp@2472 549 // to provide a dummy version of it.
tonyp@2472 550 #endif // ASSERT
tonyp@2472 551
tonyp@2472 552 // If we want to remove regions from a list in bulk we can simply tag
tonyp@2472 553 // them with the pending_removal tag and call the
tonyp@2472 554 // remove_all_pending() method on the list.
tonyp@2472 555
tonyp@2472 556 bool pending_removal() { return _pending_removal; }
tonyp@2472 557
tonyp@2472 558 void set_pending_removal(bool pending_removal) {
tonyp@2643 559 if (pending_removal) {
tonyp@2643 560 assert(!_pending_removal && containing_set() != NULL,
tonyp@2643 561 "can only set pending removal to true if it's false and "
tonyp@2643 562 "the region belongs to a region set");
tonyp@2643 563 } else {
tonyp@2643 564 assert( _pending_removal && containing_set() == NULL,
tonyp@2643 565 "can only set pending removal to false if it's true and "
tonyp@2643 566 "the region does not belong to a region set");
tonyp@2643 567 }
tonyp@2472 568
tonyp@2472 569 _pending_removal = pending_removal;
ysr@777 570 }
ysr@777 571
ysr@777 572 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 573 void set_next_young_region(HeapRegion* hr) {
ysr@777 574 _next_young_region = hr;
ysr@777 575 }
ysr@777 576
apetrusenko@1231 577 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 578 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 579 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 580 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 581
tonyp@2963 582 HeapWord* orig_end() { return _orig_end; }
tonyp@2963 583
ysr@777 584 // Allows logical separation between objects allocated before and after.
ysr@777 585 void save_marks();
ysr@777 586
ysr@777 587 // Reset HR stuff to default values.
ysr@777 588 void hr_clear(bool par, bool clear_space);
tonyp@2849 589 void par_clear();
ysr@777 590
tonyp@791 591 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 592
ysr@777 593 // Get the start of the unmarked area in this region.
ysr@777 594 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 595 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 596
ysr@777 597 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777 598 // allocated in the current region before the last call to "save_mark".
ysr@777 599 void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777 600
ysr@777 601 // This call determines the "filter kind" argument that will be used for
ysr@777 602 // the next call to "new_dcto_cl" on this region with the "traditional"
ysr@777 603 // signature (i.e., the call below.) The default, in the absence of a
ysr@777 604 // preceding call to this method, is "NoFilterKind", and a call to this
ysr@777 605 // method is necessary for each such call, or else it reverts to the
ysr@777 606 // default.
ysr@777 607 // (This is really ugly, but all other methods I could think of changed a
ysr@777 608 // lot of main-line code for G1.)
ysr@777 609 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
ysr@777 610 _next_fk = nfk;
ysr@777 611 }
ysr@777 612
ysr@777 613 DirtyCardToOopClosure*
ysr@777 614 new_dcto_closure(OopClosure* cl,
ysr@777 615 CardTableModRefBS::PrecisionStyle precision,
ysr@777 616 HeapRegionDCTOC::FilterKind fk);
ysr@777 617
ysr@777 618 #if WHASSUP
ysr@777 619 DirtyCardToOopClosure*
ysr@777 620 new_dcto_closure(OopClosure* cl,
ysr@777 621 CardTableModRefBS::PrecisionStyle precision,
ysr@777 622 HeapWord* boundary) {
ysr@777 623 assert(boundary == NULL, "This arg doesn't make sense here.");
ysr@777 624 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
ysr@777 625 _next_fk = HeapRegionDCTOC::NoFilterKind;
ysr@777 626 return res;
ysr@777 627 }
ysr@777 628 #endif
ysr@777 629
ysr@777 630 //
ysr@777 631 // Note the start or end of marking. This tells the heap region
ysr@777 632 // that the collector is about to start or has finished (concurrently)
ysr@777 633 // marking the heap.
ysr@777 634 //
ysr@777 635
ysr@777 636 // Note the start of a marking phase. Record the
ysr@777 637 // start of the unmarked area of the region here.
ysr@777 638 void note_start_of_marking(bool during_initial_mark) {
ysr@777 639 init_top_at_conc_mark_count();
ysr@777 640 _next_marked_bytes = 0;
ysr@777 641 if (during_initial_mark && is_young() && !is_survivor())
ysr@777 642 _next_top_at_mark_start = bottom();
ysr@777 643 else
ysr@777 644 _next_top_at_mark_start = top();
ysr@777 645 }
ysr@777 646
ysr@777 647 // Note the end of a marking phase. Install the start of
ysr@777 648 // the unmarked area that was captured at start of marking.
ysr@777 649 void note_end_of_marking() {
ysr@777 650 _prev_top_at_mark_start = _next_top_at_mark_start;
ysr@777 651 _prev_marked_bytes = _next_marked_bytes;
ysr@777 652 _next_marked_bytes = 0;
ysr@777 653
ysr@777 654 guarantee(_prev_marked_bytes <=
ysr@777 655 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
ysr@777 656 "invariant");
ysr@777 657 }
ysr@777 658
ysr@777 659 // After an evacuation, we need to update _next_top_at_mark_start
ysr@777 660 // to be the current top. Note this is only valid if we have only
ysr@777 661 // ever evacuated into this region. If we evacuate, allocate, and
ysr@777 662 // then evacuate we are in deep doodoo.
ysr@777 663 void note_end_of_copying() {
tonyp@1456 664 assert(top() >= _next_top_at_mark_start, "Increase only");
tonyp@1456 665 _next_top_at_mark_start = top();
ysr@777 666 }
ysr@777 667
ysr@777 668 // Returns "false" iff no object in the region was allocated when the
ysr@777 669 // last mark phase ended.
ysr@777 670 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 671
ysr@777 672 // If "is_marked()" is true, then this is the index of the region in
ysr@777 673 // an array constructed at the end of marking of the regions in a
ysr@777 674 // "desirability" order.
ysr@777 675 int sort_index() {
ysr@777 676 return _sort_index;
ysr@777 677 }
ysr@777 678 void set_sort_index(int i) {
ysr@777 679 _sort_index = i;
ysr@777 680 }
ysr@777 681
ysr@777 682 void init_top_at_conc_mark_count() {
ysr@777 683 _top_at_conc_mark_count = bottom();
ysr@777 684 }
ysr@777 685
ysr@777 686 void set_top_at_conc_mark_count(HeapWord *cur) {
ysr@777 687 assert(bottom() <= cur && cur <= end(), "Sanity.");
ysr@777 688 _top_at_conc_mark_count = cur;
ysr@777 689 }
ysr@777 690
ysr@777 691 HeapWord* top_at_conc_mark_count() {
ysr@777 692 return _top_at_conc_mark_count;
ysr@777 693 }
ysr@777 694
ysr@777 695 void reset_during_compaction() {
ysr@777 696 guarantee( isHumongous() && startsHumongous(),
ysr@777 697 "should only be called for humongous regions");
ysr@777 698
ysr@777 699 zero_marked_bytes();
ysr@777 700 init_top_at_mark_start();
ysr@777 701 }
ysr@777 702
ysr@777 703 // <PREDICTION>
ysr@777 704 void calc_gc_efficiency(void);
ysr@777 705 double gc_efficiency() { return _gc_efficiency;}
ysr@777 706 // </PREDICTION>
ysr@777 707
ysr@777 708 bool is_young() const { return _young_type != NotYoung; }
ysr@777 709 bool is_survivor() const { return _young_type == Survivor; }
ysr@777 710
ysr@777 711 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 712 void set_young_index_in_cset(int index) {
ysr@777 713 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 714 _young_index_in_cset = index;
ysr@777 715 }
ysr@777 716
ysr@777 717 int age_in_surv_rate_group() {
ysr@777 718 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 719 assert( _age_index > -1, "pre-condition" );
ysr@777 720 return _surv_rate_group->age_in_group(_age_index);
ysr@777 721 }
ysr@777 722
ysr@777 723 void record_surv_words_in_group(size_t words_survived) {
ysr@777 724 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 725 assert( _age_index > -1, "pre-condition" );
ysr@777 726 int age_in_group = age_in_surv_rate_group();
ysr@777 727 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 728 }
ysr@777 729
ysr@777 730 int age_in_surv_rate_group_cond() {
ysr@777 731 if (_surv_rate_group != NULL)
ysr@777 732 return age_in_surv_rate_group();
ysr@777 733 else
ysr@777 734 return -1;
ysr@777 735 }
ysr@777 736
ysr@777 737 SurvRateGroup* surv_rate_group() {
ysr@777 738 return _surv_rate_group;
ysr@777 739 }
ysr@777 740
ysr@777 741 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 742 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 743 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 744 assert( is_young(), "pre-condition" );
ysr@777 745
ysr@777 746 _surv_rate_group = surv_rate_group;
ysr@777 747 _age_index = surv_rate_group->next_age_index();
ysr@777 748 }
ysr@777 749
ysr@777 750 void uninstall_surv_rate_group() {
ysr@777 751 if (_surv_rate_group != NULL) {
ysr@777 752 assert( _age_index > -1, "pre-condition" );
ysr@777 753 assert( is_young(), "pre-condition" );
ysr@777 754
ysr@777 755 _surv_rate_group = NULL;
ysr@777 756 _age_index = -1;
ysr@777 757 } else {
ysr@777 758 assert( _age_index == -1, "pre-condition" );
ysr@777 759 }
ysr@777 760 }
ysr@777 761
ysr@777 762 void set_young() { set_young_type(Young); }
ysr@777 763
ysr@777 764 void set_survivor() { set_young_type(Survivor); }
ysr@777 765
ysr@777 766 void set_not_young() { set_young_type(NotYoung); }
ysr@777 767
ysr@777 768 // Determine if an object has been allocated since the last
ysr@777 769 // mark performed by the collector. This returns true iff the object
ysr@777 770 // is within the unmarked area of the region.
ysr@777 771 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 772 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 773 }
ysr@777 774 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 775 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 776 }
ysr@777 777
ysr@777 778 // For parallel heapRegion traversal.
ysr@777 779 bool claimHeapRegion(int claimValue);
ysr@777 780 jint claim_value() { return _claimed; }
ysr@777 781 // Use this carefully: only when you're sure no one is claiming...
ysr@777 782 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 783
ysr@777 784 // Returns the "evacuation_failed" property of the region.
ysr@777 785 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 786
ysr@777 787 // Sets the "evacuation_failed" property of the region.
ysr@777 788 void set_evacuation_failed(bool b) {
ysr@777 789 _evacuation_failed = b;
ysr@777 790
ysr@777 791 if (b) {
ysr@777 792 init_top_at_conc_mark_count();
ysr@777 793 _next_marked_bytes = 0;
ysr@777 794 }
ysr@777 795 }
ysr@777 796
ysr@777 797 // Requires that "mr" be entirely within the region.
ysr@777 798 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 799 // If the iteration encounters an unparseable portion of the region,
ysr@777 800 // or if "cl->abort()" is true after a closure application,
ysr@777 801 // terminate the iteration and return the address of the start of the
ysr@777 802 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 803 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 804 // completed.
ysr@777 805 HeapWord*
ysr@777 806 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 807
tonyp@2849 808 // filter_young: if true and the region is a young region then we
tonyp@2849 809 // skip the iteration.
tonyp@2849 810 // card_ptr: if not NULL, and we decide that the card is not young
tonyp@2849 811 // and we iterate over it, we'll clean the card before we start the
tonyp@2849 812 // iteration.
ysr@777 813 HeapWord*
ysr@777 814 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 815 FilterOutOfRegionClosure* cl,
tonyp@2849 816 bool filter_young,
tonyp@2849 817 jbyte* card_ptr);
ysr@777 818
ysr@777 819 // A version of block start that is guaranteed to find *some* block
ysr@777 820 // boundary at or before "p", but does not object iteration, and may
ysr@777 821 // therefore be used safely when the heap is unparseable.
ysr@777 822 HeapWord* block_start_careful(const void* p) const {
ysr@777 823 return _offsets.block_start_careful(p);
ysr@777 824 }
ysr@777 825
ysr@777 826 // Requires that "addr" is within the region. Returns the start of the
ysr@777 827 // first ("careful") block that starts at or after "addr", or else the
ysr@777 828 // "end" of the region if there is no such block.
ysr@777 829 HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777 830
johnc@1829 831 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 832 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 833 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 834
johnc@1829 835 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 836 _recorded_rs_length = rs_length;
johnc@1829 837 }
johnc@1829 838
johnc@1829 839 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 840 _predicted_elapsed_time_ms = ms;
johnc@1829 841 }
johnc@1829 842
johnc@1829 843 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 844 _predicted_bytes_to_copy = bytes;
johnc@1829 845 }
johnc@1829 846
ysr@777 847 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777 848 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777 849 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777 850
ysr@777 851 CompactibleSpace* next_compaction_space() const;
ysr@777 852
ysr@777 853 virtual void reset_after_compaction();
ysr@777 854
ysr@777 855 void print() const;
ysr@777 856 void print_on(outputStream* st) const;
ysr@777 857
johnc@2969 858 // vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 859 // vo == UseNextMarking -> use "next" marking information
johnc@2969 860 // vo == UseMarkWord -> use the mark word in the object header
johnc@2969 861 //
tonyp@1246 862 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 863 // consistent most of the time, so most calls to this should use
johnc@2969 864 // vo == UsePrevMarking.
johnc@2969 865 // Currently, there is only one case where this is called with
johnc@2969 866 // vo == UseNextMarking, which is to verify the "next" marking
johnc@2969 867 // information at the end of remark.
johnc@2969 868 // Currently there is only one place where this is called with
johnc@2969 869 // vo == UseMarkWord, which is to verify the marking during a
johnc@2969 870 // full GC.
johnc@2969 871 void verify(bool allow_dirty, VerifyOption vo, bool *failures) const;
tonyp@1246 872
tonyp@1246 873 // Override; it uses the "prev" marking information
ysr@777 874 virtual void verify(bool allow_dirty) const;
ysr@777 875 };
ysr@777 876
ysr@777 877 // HeapRegionClosure is used for iterating over regions.
ysr@777 878 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 879 class HeapRegionClosure : public StackObj {
ysr@777 880 friend class HeapRegionSeq;
ysr@777 881 friend class G1CollectedHeap;
ysr@777 882
ysr@777 883 bool _complete;
ysr@777 884 void incomplete() { _complete = false; }
ysr@777 885
ysr@777 886 public:
ysr@777 887 HeapRegionClosure(): _complete(true) {}
ysr@777 888
ysr@777 889 // Typically called on each region until it returns true.
ysr@777 890 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 891
ysr@777 892 // True after iteration if the closure was applied to all heap regions
ysr@777 893 // and returned "false" in all cases.
ysr@777 894 bool complete() { return _complete; }
ysr@777 895 };
ysr@777 896
ysr@777 897 #endif // SERIALGC
stefank@2314 898
stefank@2314 899 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP

mercurial