src/share/vm/gc_implementation/g1/heapRegion.hpp

Wed, 19 Jan 2011 19:30:42 -0500

author
tonyp
date
Wed, 19 Jan 2011 19:30:42 -0500
changeset 2472
0fa27f37d4d4
parent 2454
b158bed62ef5
child 2643
1216415d8e35
permissions
-rw-r--r--

6977804: G1: remove the zero-filling thread
Summary: This changeset removes the zero-filling thread from G1 and collapses the two free region lists we had before (the "free" and "unclean" lists) into one. The new free list uses the new heap region sets / lists abstractions that we'll ultimately use it to keep track of all regions in the heap. A heap region set was also introduced for the humongous regions. Finally, this change increases the concurrency between the thread that completes freeing regions (after a cleanup pause) and the rest of the system (before we'd have to wait for said thread to complete before allocating a new region). The changest also includes a lot of refactoring and code simplification.
Reviewed-by: jcoomes, johnc

ysr@777 1 /*
tonyp@2453 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
stefank@2314 30 #include "gc_implementation/g1/survRateGroup.hpp"
stefank@2314 31 #include "gc_implementation/shared/ageTable.hpp"
stefank@2314 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 33 #include "memory/space.inline.hpp"
stefank@2314 34 #include "memory/watermark.hpp"
stefank@2314 35
ysr@777 36 #ifndef SERIALGC
ysr@777 37
ysr@777 38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 39 // can be collected independently.
ysr@777 40
ysr@777 41 // NOTE: Although a HeapRegion is a Space, its
ysr@777 42 // Space::initDirtyCardClosure method must not be called.
ysr@777 43 // The problem is that the existence of this method breaks
ysr@777 44 // the independence of barrier sets from remembered sets.
ysr@777 45 // The solution is to remove this method from the definition
ysr@777 46 // of a Space.
ysr@777 47
ysr@777 48 class CompactibleSpace;
ysr@777 49 class ContiguousSpace;
ysr@777 50 class HeapRegionRemSet;
ysr@777 51 class HeapRegionRemSetIterator;
ysr@777 52 class HeapRegion;
tonyp@2472 53 class HeapRegionSetBase;
tonyp@2472 54
tonyp@2472 55 #define HR_FORMAT "%d:["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
tonyp@2472 56 #define HR_FORMAT_PARAMS(__hr) (__hr)->hrs_index(), (__hr)->bottom(), \
tonyp@2472 57 (__hr)->top(), (__hr)->end()
ysr@777 58
ysr@777 59 // A dirty card to oop closure for heap regions. It
ysr@777 60 // knows how to get the G1 heap and how to use the bitmap
ysr@777 61 // in the concurrent marker used by G1 to filter remembered
ysr@777 62 // sets.
ysr@777 63
ysr@777 64 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777 65 public:
ysr@777 66 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 67 enum FilterKind {
ysr@777 68 NoFilterKind,
ysr@777 69 IntoCSFilterKind,
ysr@777 70 OutOfRegionFilterKind
ysr@777 71 };
ysr@777 72
ysr@777 73 protected:
ysr@777 74 HeapRegion* _hr;
ysr@777 75 FilterKind _fk;
ysr@777 76 G1CollectedHeap* _g1;
ysr@777 77
ysr@777 78 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 79 HeapWord* bottom, HeapWord* top,
ysr@777 80 OopClosure* cl);
ysr@777 81
ysr@777 82 // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777 83 // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777 84 // warning.
ysr@777 85 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 86 HeapWord* bottom, HeapWord* top,
ysr@777 87 FilteringClosure* cl) {
ysr@777 88 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777 89 (OopClosure*)cl);
ysr@777 90 }
ysr@777 91
ysr@777 92 // Get the actual top of the area on which the closure will
ysr@777 93 // operate, given where the top is assumed to be (the end of the
ysr@777 94 // memory region passed to do_MemRegion) and where the object
ysr@777 95 // at the top is assumed to start. For example, an object may
ysr@777 96 // start at the top but actually extend past the assumed top,
ysr@777 97 // in which case the top becomes the end of the object.
ysr@777 98 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777 99 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777 100 }
ysr@777 101
ysr@777 102 // Walk the given memory region from bottom to (actual) top
ysr@777 103 // looking for objects and applying the oop closure (_cl) to
ysr@777 104 // them. The base implementation of this treats the area as
ysr@777 105 // blocks, where a block may or may not be an object. Sub-
ysr@777 106 // classes should override this to provide more accurate
ysr@777 107 // or possibly more efficient walking.
ysr@777 108 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777 109 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777 110 }
ysr@777 111
ysr@777 112 public:
ysr@777 113 HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 114 HeapRegion* hr, OopClosure* cl,
ysr@777 115 CardTableModRefBS::PrecisionStyle precision,
ysr@777 116 FilterKind fk);
ysr@777 117 };
ysr@777 118
ysr@777 119
ysr@777 120 // The complicating factor is that BlockOffsetTable diverged
ysr@777 121 // significantly, and we need functionality that is only in the G1 version.
ysr@777 122 // So I copied that code, which led to an alternate G1 version of
ysr@777 123 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 124 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 125
ysr@777 126 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 127 // all regions at every GC pause is time consuming (if I remember
ysr@777 128 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 129 // that are GC alloc regions. To achieve this, we use time
ysr@777 130 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 131 // unique time stamp (essentially a counter that gets
ysr@777 132 // incremented). Every time we want to call save_marks on a region,
ysr@777 133 // we set the saved_mark_word to top and also copy the current GC
ysr@777 134 // time stamp to the time stamp field of the space. Reading the
ysr@777 135 // saved_mark_word involves checking the time stamp of the
ysr@777 136 // region. If it is the same as the current GC time stamp, then we
ysr@777 137 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 138 // time stamp of the region is not the same as the current GC time
ysr@777 139 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 140 // invalid. Time stamps (on the regions and also on the
ysr@777 141 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 142 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 143 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 144 // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777 145
ysr@777 146 class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777 147 friend class VMStructs;
ysr@777 148 protected:
ysr@777 149 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 150 Mutex _par_alloc_lock;
ysr@777 151 volatile unsigned _gc_time_stamp;
ysr@777 152
ysr@777 153 public:
ysr@777 154 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777 155 // assumed to contain zeros.
ysr@777 156 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 157 MemRegion mr, bool is_zeroed = false);
ysr@777 158
ysr@777 159 void set_bottom(HeapWord* value);
ysr@777 160 void set_end(HeapWord* value);
ysr@777 161
ysr@777 162 virtual HeapWord* saved_mark_word() const;
ysr@777 163 virtual void set_saved_mark();
ysr@777 164 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
ysr@777 165
tonyp@791 166 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 167 virtual void clear(bool mangle_space);
ysr@777 168
ysr@777 169 HeapWord* block_start(const void* p);
ysr@777 170 HeapWord* block_start_const(const void* p) const;
ysr@777 171
ysr@777 172 // Add offset table update.
ysr@777 173 virtual HeapWord* allocate(size_t word_size);
ysr@777 174 HeapWord* par_allocate(size_t word_size);
ysr@777 175
ysr@777 176 // MarkSweep support phase3
ysr@777 177 virtual HeapWord* initialize_threshold();
ysr@777 178 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 179
ysr@777 180 virtual void print() const;
tonyp@2453 181
tonyp@2453 182 void reset_bot() {
tonyp@2453 183 _offsets.zero_bottom_entry();
tonyp@2453 184 _offsets.initialize_threshold();
tonyp@2453 185 }
tonyp@2453 186
tonyp@2453 187 void update_bot_for_object(HeapWord* start, size_t word_size) {
tonyp@2453 188 _offsets.alloc_block(start, word_size);
tonyp@2453 189 }
tonyp@2453 190
tonyp@2453 191 void print_bot_on(outputStream* out) {
tonyp@2453 192 _offsets.print_on(out);
tonyp@2453 193 }
ysr@777 194 };
ysr@777 195
ysr@777 196 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 197 friend class VMStructs;
ysr@777 198 private:
ysr@777 199
tonyp@790 200 enum HumongousType {
tonyp@790 201 NotHumongous = 0,
tonyp@790 202 StartsHumongous,
tonyp@790 203 ContinuesHumongous
tonyp@790 204 };
tonyp@790 205
ysr@777 206 // The next filter kind that should be used for a "new_dcto_cl" call with
ysr@777 207 // the "traditional" signature.
ysr@777 208 HeapRegionDCTOC::FilterKind _next_fk;
ysr@777 209
ysr@777 210 // Requires that the region "mr" be dense with objects, and begin and end
ysr@777 211 // with an object.
ysr@777 212 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777 213
ysr@777 214 // The remembered set for this region.
ysr@777 215 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 216 // issues.)
ysr@777 217 HeapRegionRemSet* _rem_set;
ysr@777 218
ysr@777 219 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 220
ysr@777 221 protected:
ysr@777 222 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 223 // sequence, otherwise -1.
ysr@777 224 int _hrs_index;
ysr@777 225
tonyp@790 226 HumongousType _humongous_type;
ysr@777 227 // For a humongous region, region in which it starts.
ysr@777 228 HeapRegion* _humongous_start_region;
ysr@777 229 // For the start region of a humongous sequence, it's original end().
ysr@777 230 HeapWord* _orig_end;
ysr@777 231
ysr@777 232 // True iff the region is in current collection_set.
ysr@777 233 bool _in_collection_set;
ysr@777 234
ysr@777 235 // Is this or has it been an allocation region in the current collection
ysr@777 236 // pause.
ysr@777 237 bool _is_gc_alloc_region;
ysr@777 238
ysr@777 239 // True iff an attempt to evacuate an object in the region failed.
ysr@777 240 bool _evacuation_failed;
ysr@777 241
ysr@777 242 // A heap region may be a member one of a number of special subsets, each
ysr@777 243 // represented as linked lists through the field below. Currently, these
ysr@777 244 // sets include:
ysr@777 245 // The collection set.
ysr@777 246 // The set of allocation regions used in a collection pause.
ysr@777 247 // Spaces that may contain gray objects.
ysr@777 248 HeapRegion* _next_in_special_set;
ysr@777 249
ysr@777 250 // next region in the young "generation" region set
ysr@777 251 HeapRegion* _next_young_region;
ysr@777 252
apetrusenko@1231 253 // Next region whose cards need cleaning
apetrusenko@1231 254 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 255
tonyp@2472 256 // Fields used by the HeapRegionSetBase class and subclasses.
tonyp@2472 257 HeapRegion* _next;
tonyp@2472 258 #ifdef ASSERT
tonyp@2472 259 HeapRegionSetBase* _containing_set;
tonyp@2472 260 #endif // ASSERT
tonyp@2472 261 bool _pending_removal;
tonyp@2472 262
ysr@777 263 // For parallel heapRegion traversal.
ysr@777 264 jint _claimed;
ysr@777 265
ysr@777 266 // We use concurrent marking to determine the amount of live data
ysr@777 267 // in each heap region.
ysr@777 268 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 269 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 270
ysr@777 271 // See "sort_index" method. -1 means is not in the array.
ysr@777 272 int _sort_index;
ysr@777 273
ysr@777 274 // <PREDICTION>
ysr@777 275 double _gc_efficiency;
ysr@777 276 // </PREDICTION>
ysr@777 277
ysr@777 278 enum YoungType {
ysr@777 279 NotYoung, // a region is not young
ysr@777 280 Young, // a region is young
ysr@777 281 Survivor // a region is young and it contains
ysr@777 282 // survivor
ysr@777 283 };
ysr@777 284
johnc@2021 285 volatile YoungType _young_type;
ysr@777 286 int _young_index_in_cset;
ysr@777 287 SurvRateGroup* _surv_rate_group;
ysr@777 288 int _age_index;
ysr@777 289
ysr@777 290 // The start of the unmarked area. The unmarked area extends from this
ysr@777 291 // word until the top and/or end of the region, and is the part
ysr@777 292 // of the region for which no marking was done, i.e. objects may
ysr@777 293 // have been allocated in this part since the last mark phase.
ysr@777 294 // "prev" is the top at the start of the last completed marking.
ysr@777 295 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 296 HeapWord* _prev_top_at_mark_start;
ysr@777 297 HeapWord* _next_top_at_mark_start;
ysr@777 298 // If a collection pause is in progress, this is the top at the start
ysr@777 299 // of that pause.
ysr@777 300
ysr@777 301 // We've counted the marked bytes of objects below here.
ysr@777 302 HeapWord* _top_at_conc_mark_count;
ysr@777 303
ysr@777 304 void init_top_at_mark_start() {
ysr@777 305 assert(_prev_marked_bytes == 0 &&
ysr@777 306 _next_marked_bytes == 0,
ysr@777 307 "Must be called after zero_marked_bytes.");
ysr@777 308 HeapWord* bot = bottom();
ysr@777 309 _prev_top_at_mark_start = bot;
ysr@777 310 _next_top_at_mark_start = bot;
ysr@777 311 _top_at_conc_mark_count = bot;
ysr@777 312 }
ysr@777 313
ysr@777 314 void set_young_type(YoungType new_type) {
ysr@777 315 //assert(_young_type != new_type, "setting the same type" );
ysr@777 316 // TODO: add more assertions here
ysr@777 317 _young_type = new_type;
ysr@777 318 }
ysr@777 319
johnc@1829 320 // Cached attributes used in the collection set policy information
johnc@1829 321
johnc@1829 322 // The RSet length that was added to the total value
johnc@1829 323 // for the collection set.
johnc@1829 324 size_t _recorded_rs_length;
johnc@1829 325
johnc@1829 326 // The predicted elapsed time that was added to total value
johnc@1829 327 // for the collection set.
johnc@1829 328 double _predicted_elapsed_time_ms;
johnc@1829 329
johnc@1829 330 // The predicted number of bytes to copy that was added to
johnc@1829 331 // the total value for the collection set.
johnc@1829 332 size_t _predicted_bytes_to_copy;
johnc@1829 333
ysr@777 334 public:
ysr@777 335 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
ysr@777 336 HeapRegion(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 337 MemRegion mr, bool is_zeroed);
ysr@777 338
tonyp@1377 339 static int LogOfHRGrainBytes;
tonyp@1377 340 static int LogOfHRGrainWords;
tonyp@1377 341 // The normal type of these should be size_t. However, they used to
tonyp@1377 342 // be members of an enum before and they are assumed by the
tonyp@1377 343 // compilers to be ints. To avoid going and fixing all their uses,
tonyp@1377 344 // I'm declaring them as ints. I'm not anticipating heap region
tonyp@1377 345 // sizes to reach anywhere near 2g, so using an int here is safe.
tonyp@1377 346 static int GrainBytes;
tonyp@1377 347 static int GrainWords;
tonyp@1377 348 static int CardsPerRegion;
tonyp@1377 349
tonyp@1377 350 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 351 // well as other related fields that are based on the heap region
tonyp@1377 352 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 353 // CardsPerRegion). All those fields are considered constant
tonyp@1377 354 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 355 // up once during initialization time.
tonyp@1377 356 static void setup_heap_region_size(uintx min_heap_size);
ysr@777 357
tonyp@790 358 enum ClaimValues {
tonyp@790 359 InitialClaimValue = 0,
tonyp@790 360 FinalCountClaimValue = 1,
tonyp@790 361 NoteEndClaimValue = 2,
tonyp@825 362 ScrubRemSetClaimValue = 3,
apetrusenko@1061 363 ParVerifyClaimValue = 4,
apetrusenko@1061 364 RebuildRSClaimValue = 5
tonyp@790 365 };
tonyp@790 366
tonyp@2454 367 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
tonyp@2454 368 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 369 return ContiguousSpace::par_allocate(word_size);
tonyp@2454 370 }
tonyp@2454 371 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
tonyp@2454 372 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 373 return ContiguousSpace::allocate(word_size);
tonyp@2454 374 }
tonyp@2454 375
ysr@777 376 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 377 // sequence, otherwise -1.
ysr@777 378 int hrs_index() const { return _hrs_index; }
ysr@777 379 void set_hrs_index(int index) { _hrs_index = index; }
ysr@777 380
ysr@777 381 // The number of bytes marked live in the region in the last marking phase.
ysr@777 382 size_t marked_bytes() { return _prev_marked_bytes; }
ysr@777 383 // The number of bytes counted in the next marking.
ysr@777 384 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 385 // The number of bytes live wrt the next marking.
ysr@777 386 size_t next_live_bytes() {
ysr@777 387 return (top() - next_top_at_mark_start())
ysr@777 388 * HeapWordSize
ysr@777 389 + next_marked_bytes();
ysr@777 390 }
ysr@777 391
ysr@777 392 // A lower bound on the amount of garbage bytes in the region.
ysr@777 393 size_t garbage_bytes() {
ysr@777 394 size_t used_at_mark_start_bytes =
ysr@777 395 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 396 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 397 "Can't mark more than we have.");
ysr@777 398 return used_at_mark_start_bytes - marked_bytes();
ysr@777 399 }
ysr@777 400
ysr@777 401 // An upper bound on the number of live bytes in the region.
ysr@777 402 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 403
ysr@777 404 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 405 _next_marked_bytes = _next_marked_bytes + incr_bytes;
ysr@777 406 guarantee( _next_marked_bytes <= used(), "invariant" );
ysr@777 407 }
ysr@777 408
ysr@777 409 void zero_marked_bytes() {
ysr@777 410 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 411 }
ysr@777 412
tonyp@790 413 bool isHumongous() const { return _humongous_type != NotHumongous; }
tonyp@790 414 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
tonyp@790 415 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
ysr@777 416 // For a humongous region, region in which it starts.
ysr@777 417 HeapRegion* humongous_start_region() const {
ysr@777 418 return _humongous_start_region;
ysr@777 419 }
ysr@777 420
tonyp@2453 421 // Makes the current region be a "starts humongous" region, i.e.,
tonyp@2453 422 // the first region in a series of one or more contiguous regions
tonyp@2453 423 // that will contain a single "humongous" object. The two parameters
tonyp@2453 424 // are as follows:
tonyp@2453 425 //
tonyp@2453 426 // new_top : The new value of the top field of this region which
tonyp@2453 427 // points to the end of the humongous object that's being
tonyp@2453 428 // allocated. If there is more than one region in the series, top
tonyp@2453 429 // will lie beyond this region's original end field and on the last
tonyp@2453 430 // region in the series.
tonyp@2453 431 //
tonyp@2453 432 // new_end : The new value of the end field of this region which
tonyp@2453 433 // points to the end of the last region in the series. If there is
tonyp@2453 434 // one region in the series (namely: this one) end will be the same
tonyp@2453 435 // as the original end of this region.
tonyp@2453 436 //
tonyp@2453 437 // Updating top and end as described above makes this region look as
tonyp@2453 438 // if it spans the entire space taken up by all the regions in the
tonyp@2453 439 // series and an single allocation moved its top to new_top. This
tonyp@2453 440 // ensures that the space (capacity / allocated) taken up by all
tonyp@2453 441 // humongous regions can be calculated by just looking at the
tonyp@2453 442 // "starts humongous" regions and by ignoring the "continues
tonyp@2453 443 // humongous" regions.
tonyp@2453 444 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
ysr@777 445
tonyp@2453 446 // Makes the current region be a "continues humongous'
tonyp@2453 447 // region. first_hr is the "start humongous" region of the series
tonyp@2453 448 // which this region will be part of.
tonyp@2453 449 void set_continuesHumongous(HeapRegion* first_hr);
ysr@777 450
tonyp@2472 451 // Unsets the humongous-related fields on the region.
tonyp@2472 452 void set_notHumongous();
tonyp@2472 453
ysr@777 454 // If the region has a remembered set, return a pointer to it.
ysr@777 455 HeapRegionRemSet* rem_set() const {
ysr@777 456 return _rem_set;
ysr@777 457 }
ysr@777 458
ysr@777 459 // True iff the region is in current collection_set.
ysr@777 460 bool in_collection_set() const {
ysr@777 461 return _in_collection_set;
ysr@777 462 }
ysr@777 463 void set_in_collection_set(bool b) {
ysr@777 464 _in_collection_set = b;
ysr@777 465 }
ysr@777 466 HeapRegion* next_in_collection_set() {
ysr@777 467 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 468 assert(_next_in_special_set == NULL ||
ysr@777 469 _next_in_special_set->in_collection_set(),
ysr@777 470 "Malformed CS.");
ysr@777 471 return _next_in_special_set;
ysr@777 472 }
ysr@777 473 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 474 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 475 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 476 _next_in_special_set = r;
ysr@777 477 }
ysr@777 478
ysr@777 479 // True iff it is or has been an allocation region in the current
ysr@777 480 // collection pause.
ysr@777 481 bool is_gc_alloc_region() const {
ysr@777 482 return _is_gc_alloc_region;
ysr@777 483 }
ysr@777 484 void set_is_gc_alloc_region(bool b) {
ysr@777 485 _is_gc_alloc_region = b;
ysr@777 486 }
ysr@777 487 HeapRegion* next_gc_alloc_region() {
ysr@777 488 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 489 assert(_next_in_special_set == NULL ||
ysr@777 490 _next_in_special_set->is_gc_alloc_region(),
ysr@777 491 "Malformed CS.");
ysr@777 492 return _next_in_special_set;
ysr@777 493 }
ysr@777 494 void set_next_gc_alloc_region(HeapRegion* r) {
ysr@777 495 assert(is_gc_alloc_region(), "should only invoke on member of CS.");
ysr@777 496 assert(r == NULL || r->is_gc_alloc_region(), "Malformed CS.");
ysr@777 497 _next_in_special_set = r;
ysr@777 498 }
ysr@777 499
tonyp@2472 500 // Methods used by the HeapRegionSetBase class and subclasses.
tonyp@2472 501
tonyp@2472 502 // Getter and setter for the next field used to link regions into
tonyp@2472 503 // linked lists.
tonyp@2472 504 HeapRegion* next() { return _next; }
tonyp@2472 505
tonyp@2472 506 void set_next(HeapRegion* next) { _next = next; }
tonyp@2472 507
tonyp@2472 508 // Every region added to a set is tagged with a reference to that
tonyp@2472 509 // set. This is used for doing consistency checking to make sure that
tonyp@2472 510 // the contents of a set are as they should be and it's only
tonyp@2472 511 // available in non-product builds.
tonyp@2472 512 #ifdef ASSERT
tonyp@2472 513 void set_containing_set(HeapRegionSetBase* containing_set) {
tonyp@2472 514 assert((containing_set == NULL && _containing_set != NULL) ||
tonyp@2472 515 (containing_set != NULL && _containing_set == NULL),
tonyp@2472 516 err_msg("containing_set: "PTR_FORMAT" "
tonyp@2472 517 "_containing_set: "PTR_FORMAT,
tonyp@2472 518 containing_set, _containing_set));
tonyp@2472 519
tonyp@2472 520 _containing_set = containing_set;
tonyp@2472 521 }
tonyp@2472 522
tonyp@2472 523 HeapRegionSetBase* containing_set() { return _containing_set; }
tonyp@2472 524 #else // ASSERT
tonyp@2472 525 void set_containing_set(HeapRegionSetBase* containing_set) { }
tonyp@2472 526
tonyp@2472 527 // containing_set() is only used in asserts so there's not reason
tonyp@2472 528 // to provide a dummy version of it.
tonyp@2472 529 #endif // ASSERT
tonyp@2472 530
tonyp@2472 531 // If we want to remove regions from a list in bulk we can simply tag
tonyp@2472 532 // them with the pending_removal tag and call the
tonyp@2472 533 // remove_all_pending() method on the list.
tonyp@2472 534
tonyp@2472 535 bool pending_removal() { return _pending_removal; }
tonyp@2472 536
tonyp@2472 537 void set_pending_removal(bool pending_removal) {
tonyp@2472 538 // We can only set pending_removal to true, if it's false and the
tonyp@2472 539 // region belongs to a set.
tonyp@2472 540 assert(!pending_removal ||
tonyp@2472 541 (!_pending_removal && containing_set() != NULL), "pre-condition");
tonyp@2472 542 // We can only set pending_removal to false, if it's true and the
tonyp@2472 543 // region does not belong to a set.
tonyp@2472 544 assert( pending_removal ||
tonyp@2472 545 ( _pending_removal && containing_set() == NULL), "pre-condition");
tonyp@2472 546
tonyp@2472 547 _pending_removal = pending_removal;
ysr@777 548 }
ysr@777 549
ysr@777 550 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 551 void set_next_young_region(HeapRegion* hr) {
ysr@777 552 _next_young_region = hr;
ysr@777 553 }
ysr@777 554
apetrusenko@1231 555 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 556 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 557 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 558 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 559
ysr@777 560 // Allows logical separation between objects allocated before and after.
ysr@777 561 void save_marks();
ysr@777 562
ysr@777 563 // Reset HR stuff to default values.
ysr@777 564 void hr_clear(bool par, bool clear_space);
ysr@777 565
tonyp@791 566 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 567
ysr@777 568 // Get the start of the unmarked area in this region.
ysr@777 569 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 570 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 571
ysr@777 572 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777 573 // allocated in the current region before the last call to "save_mark".
ysr@777 574 void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777 575
ysr@777 576 // This call determines the "filter kind" argument that will be used for
ysr@777 577 // the next call to "new_dcto_cl" on this region with the "traditional"
ysr@777 578 // signature (i.e., the call below.) The default, in the absence of a
ysr@777 579 // preceding call to this method, is "NoFilterKind", and a call to this
ysr@777 580 // method is necessary for each such call, or else it reverts to the
ysr@777 581 // default.
ysr@777 582 // (This is really ugly, but all other methods I could think of changed a
ysr@777 583 // lot of main-line code for G1.)
ysr@777 584 void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
ysr@777 585 _next_fk = nfk;
ysr@777 586 }
ysr@777 587
ysr@777 588 DirtyCardToOopClosure*
ysr@777 589 new_dcto_closure(OopClosure* cl,
ysr@777 590 CardTableModRefBS::PrecisionStyle precision,
ysr@777 591 HeapRegionDCTOC::FilterKind fk);
ysr@777 592
ysr@777 593 #if WHASSUP
ysr@777 594 DirtyCardToOopClosure*
ysr@777 595 new_dcto_closure(OopClosure* cl,
ysr@777 596 CardTableModRefBS::PrecisionStyle precision,
ysr@777 597 HeapWord* boundary) {
ysr@777 598 assert(boundary == NULL, "This arg doesn't make sense here.");
ysr@777 599 DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
ysr@777 600 _next_fk = HeapRegionDCTOC::NoFilterKind;
ysr@777 601 return res;
ysr@777 602 }
ysr@777 603 #endif
ysr@777 604
ysr@777 605 //
ysr@777 606 // Note the start or end of marking. This tells the heap region
ysr@777 607 // that the collector is about to start or has finished (concurrently)
ysr@777 608 // marking the heap.
ysr@777 609 //
ysr@777 610
ysr@777 611 // Note the start of a marking phase. Record the
ysr@777 612 // start of the unmarked area of the region here.
ysr@777 613 void note_start_of_marking(bool during_initial_mark) {
ysr@777 614 init_top_at_conc_mark_count();
ysr@777 615 _next_marked_bytes = 0;
ysr@777 616 if (during_initial_mark && is_young() && !is_survivor())
ysr@777 617 _next_top_at_mark_start = bottom();
ysr@777 618 else
ysr@777 619 _next_top_at_mark_start = top();
ysr@777 620 }
ysr@777 621
ysr@777 622 // Note the end of a marking phase. Install the start of
ysr@777 623 // the unmarked area that was captured at start of marking.
ysr@777 624 void note_end_of_marking() {
ysr@777 625 _prev_top_at_mark_start = _next_top_at_mark_start;
ysr@777 626 _prev_marked_bytes = _next_marked_bytes;
ysr@777 627 _next_marked_bytes = 0;
ysr@777 628
ysr@777 629 guarantee(_prev_marked_bytes <=
ysr@777 630 (size_t) (prev_top_at_mark_start() - bottom()) * HeapWordSize,
ysr@777 631 "invariant");
ysr@777 632 }
ysr@777 633
ysr@777 634 // After an evacuation, we need to update _next_top_at_mark_start
ysr@777 635 // to be the current top. Note this is only valid if we have only
ysr@777 636 // ever evacuated into this region. If we evacuate, allocate, and
ysr@777 637 // then evacuate we are in deep doodoo.
ysr@777 638 void note_end_of_copying() {
tonyp@1456 639 assert(top() >= _next_top_at_mark_start, "Increase only");
tonyp@1456 640 _next_top_at_mark_start = top();
ysr@777 641 }
ysr@777 642
ysr@777 643 // Returns "false" iff no object in the region was allocated when the
ysr@777 644 // last mark phase ended.
ysr@777 645 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 646
ysr@777 647 // If "is_marked()" is true, then this is the index of the region in
ysr@777 648 // an array constructed at the end of marking of the regions in a
ysr@777 649 // "desirability" order.
ysr@777 650 int sort_index() {
ysr@777 651 return _sort_index;
ysr@777 652 }
ysr@777 653 void set_sort_index(int i) {
ysr@777 654 _sort_index = i;
ysr@777 655 }
ysr@777 656
ysr@777 657 void init_top_at_conc_mark_count() {
ysr@777 658 _top_at_conc_mark_count = bottom();
ysr@777 659 }
ysr@777 660
ysr@777 661 void set_top_at_conc_mark_count(HeapWord *cur) {
ysr@777 662 assert(bottom() <= cur && cur <= end(), "Sanity.");
ysr@777 663 _top_at_conc_mark_count = cur;
ysr@777 664 }
ysr@777 665
ysr@777 666 HeapWord* top_at_conc_mark_count() {
ysr@777 667 return _top_at_conc_mark_count;
ysr@777 668 }
ysr@777 669
ysr@777 670 void reset_during_compaction() {
ysr@777 671 guarantee( isHumongous() && startsHumongous(),
ysr@777 672 "should only be called for humongous regions");
ysr@777 673
ysr@777 674 zero_marked_bytes();
ysr@777 675 init_top_at_mark_start();
ysr@777 676 }
ysr@777 677
ysr@777 678 // <PREDICTION>
ysr@777 679 void calc_gc_efficiency(void);
ysr@777 680 double gc_efficiency() { return _gc_efficiency;}
ysr@777 681 // </PREDICTION>
ysr@777 682
ysr@777 683 bool is_young() const { return _young_type != NotYoung; }
ysr@777 684 bool is_survivor() const { return _young_type == Survivor; }
ysr@777 685
ysr@777 686 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 687 void set_young_index_in_cset(int index) {
ysr@777 688 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 689 _young_index_in_cset = index;
ysr@777 690 }
ysr@777 691
ysr@777 692 int age_in_surv_rate_group() {
ysr@777 693 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 694 assert( _age_index > -1, "pre-condition" );
ysr@777 695 return _surv_rate_group->age_in_group(_age_index);
ysr@777 696 }
ysr@777 697
ysr@777 698 void record_surv_words_in_group(size_t words_survived) {
ysr@777 699 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 700 assert( _age_index > -1, "pre-condition" );
ysr@777 701 int age_in_group = age_in_surv_rate_group();
ysr@777 702 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 703 }
ysr@777 704
ysr@777 705 int age_in_surv_rate_group_cond() {
ysr@777 706 if (_surv_rate_group != NULL)
ysr@777 707 return age_in_surv_rate_group();
ysr@777 708 else
ysr@777 709 return -1;
ysr@777 710 }
ysr@777 711
ysr@777 712 SurvRateGroup* surv_rate_group() {
ysr@777 713 return _surv_rate_group;
ysr@777 714 }
ysr@777 715
ysr@777 716 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 717 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 718 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 719 assert( is_young(), "pre-condition" );
ysr@777 720
ysr@777 721 _surv_rate_group = surv_rate_group;
ysr@777 722 _age_index = surv_rate_group->next_age_index();
ysr@777 723 }
ysr@777 724
ysr@777 725 void uninstall_surv_rate_group() {
ysr@777 726 if (_surv_rate_group != NULL) {
ysr@777 727 assert( _age_index > -1, "pre-condition" );
ysr@777 728 assert( is_young(), "pre-condition" );
ysr@777 729
ysr@777 730 _surv_rate_group = NULL;
ysr@777 731 _age_index = -1;
ysr@777 732 } else {
ysr@777 733 assert( _age_index == -1, "pre-condition" );
ysr@777 734 }
ysr@777 735 }
ysr@777 736
ysr@777 737 void set_young() { set_young_type(Young); }
ysr@777 738
ysr@777 739 void set_survivor() { set_young_type(Survivor); }
ysr@777 740
ysr@777 741 void set_not_young() { set_young_type(NotYoung); }
ysr@777 742
ysr@777 743 // Determine if an object has been allocated since the last
ysr@777 744 // mark performed by the collector. This returns true iff the object
ysr@777 745 // is within the unmarked area of the region.
ysr@777 746 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 747 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 748 }
ysr@777 749 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 750 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 751 }
ysr@777 752
ysr@777 753 // For parallel heapRegion traversal.
ysr@777 754 bool claimHeapRegion(int claimValue);
ysr@777 755 jint claim_value() { return _claimed; }
ysr@777 756 // Use this carefully: only when you're sure no one is claiming...
ysr@777 757 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 758
ysr@777 759 // Returns the "evacuation_failed" property of the region.
ysr@777 760 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 761
ysr@777 762 // Sets the "evacuation_failed" property of the region.
ysr@777 763 void set_evacuation_failed(bool b) {
ysr@777 764 _evacuation_failed = b;
ysr@777 765
ysr@777 766 if (b) {
ysr@777 767 init_top_at_conc_mark_count();
ysr@777 768 _next_marked_bytes = 0;
ysr@777 769 }
ysr@777 770 }
ysr@777 771
ysr@777 772 // Requires that "mr" be entirely within the region.
ysr@777 773 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 774 // If the iteration encounters an unparseable portion of the region,
ysr@777 775 // or if "cl->abort()" is true after a closure application,
ysr@777 776 // terminate the iteration and return the address of the start of the
ysr@777 777 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 778 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 779 // completed.
ysr@777 780 HeapWord*
ysr@777 781 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 782
johnc@2021 783 // In this version - if filter_young is true and the region
johnc@2021 784 // is a young region then we skip the iteration.
ysr@777 785 HeapWord*
ysr@777 786 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 787 FilterOutOfRegionClosure* cl,
johnc@2021 788 bool filter_young);
ysr@777 789
ysr@777 790 // A version of block start that is guaranteed to find *some* block
ysr@777 791 // boundary at or before "p", but does not object iteration, and may
ysr@777 792 // therefore be used safely when the heap is unparseable.
ysr@777 793 HeapWord* block_start_careful(const void* p) const {
ysr@777 794 return _offsets.block_start_careful(p);
ysr@777 795 }
ysr@777 796
ysr@777 797 // Requires that "addr" is within the region. Returns the start of the
ysr@777 798 // first ("careful") block that starts at or after "addr", or else the
ysr@777 799 // "end" of the region if there is no such block.
ysr@777 800 HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777 801
johnc@1829 802 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 803 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 804 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 805
johnc@1829 806 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 807 _recorded_rs_length = rs_length;
johnc@1829 808 }
johnc@1829 809
johnc@1829 810 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 811 _predicted_elapsed_time_ms = ms;
johnc@1829 812 }
johnc@1829 813
johnc@1829 814 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 815 _predicted_bytes_to_copy = bytes;
johnc@1829 816 }
johnc@1829 817
ysr@777 818 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777 819 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777 820 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777 821
ysr@777 822 CompactibleSpace* next_compaction_space() const;
ysr@777 823
ysr@777 824 virtual void reset_after_compaction();
ysr@777 825
ysr@777 826 void print() const;
ysr@777 827 void print_on(outputStream* st) const;
ysr@777 828
tonyp@1246 829 // use_prev_marking == true -> use "prev" marking information,
tonyp@1246 830 // use_prev_marking == false -> use "next" marking information
tonyp@1246 831 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 832 // consistent most of the time, so most calls to this should use
tonyp@1246 833 // use_prev_marking == true. Currently, there is only one case where
tonyp@1246 834 // this is called with use_prev_marking == false, which is to verify
tonyp@1246 835 // the "next" marking information at the end of remark.
tonyp@1455 836 void verify(bool allow_dirty, bool use_prev_marking, bool *failures) const;
tonyp@1246 837
tonyp@1246 838 // Override; it uses the "prev" marking information
ysr@777 839 virtual void verify(bool allow_dirty) const;
ysr@777 840 };
ysr@777 841
ysr@777 842 // HeapRegionClosure is used for iterating over regions.
ysr@777 843 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 844 class HeapRegionClosure : public StackObj {
ysr@777 845 friend class HeapRegionSeq;
ysr@777 846 friend class G1CollectedHeap;
ysr@777 847
ysr@777 848 bool _complete;
ysr@777 849 void incomplete() { _complete = false; }
ysr@777 850
ysr@777 851 public:
ysr@777 852 HeapRegionClosure(): _complete(true) {}
ysr@777 853
ysr@777 854 // Typically called on each region until it returns true.
ysr@777 855 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 856
ysr@777 857 // True after iteration if the closure was applied to all heap regions
ysr@777 858 // and returned "false" in all cases.
ysr@777 859 bool complete() { return _complete; }
ysr@777 860 };
ysr@777 861
ysr@777 862 #endif // SERIALGC
stefank@2314 863
stefank@2314 864 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP

mercurial