src/share/vm/gc_implementation/g1/heapRegion.hpp

Tue, 21 Aug 2012 14:10:39 -0700

author
johnc
date
Tue, 21 Aug 2012 14:10:39 -0700
changeset 3998
7383557659bd
parent 3957
a2f7274eb6ef
child 4037
da91efe96a93
permissions
-rw-r--r--

7185699: G1: Prediction model discrepancies
Summary: Correct the result value of G1CollectedHeap::pending_card_num(). Change the code that calculates the GC efficiency of a non-young heap region to use historical data from mixed GCs and the actual number of live bytes when predicting how long it would take to collect the region. Changes were also reviewed by Thomas Schatzl.
Reviewed-by: azeemj, brutisso

ysr@777 1 /*
johnc@3412 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
stefank@2314 30 #include "gc_implementation/g1/survRateGroup.hpp"
stefank@2314 31 #include "gc_implementation/shared/ageTable.hpp"
stefank@2314 32 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 33 #include "memory/space.inline.hpp"
stefank@2314 34 #include "memory/watermark.hpp"
stefank@2314 35
ysr@777 36 #ifndef SERIALGC
ysr@777 37
ysr@777 38 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 39 // can be collected independently.
ysr@777 40
ysr@777 41 // NOTE: Although a HeapRegion is a Space, its
ysr@777 42 // Space::initDirtyCardClosure method must not be called.
ysr@777 43 // The problem is that the existence of this method breaks
ysr@777 44 // the independence of barrier sets from remembered sets.
ysr@777 45 // The solution is to remove this method from the definition
ysr@777 46 // of a Space.
ysr@777 47
ysr@777 48 class CompactibleSpace;
ysr@777 49 class ContiguousSpace;
ysr@777 50 class HeapRegionRemSet;
ysr@777 51 class HeapRegionRemSetIterator;
ysr@777 52 class HeapRegion;
tonyp@2472 53 class HeapRegionSetBase;
tonyp@2472 54
tonyp@3713 55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
tonyp@2963 56 #define HR_FORMAT_PARAMS(_hr_) \
tonyp@2963 57 (_hr_)->hrs_index(), \
tonyp@3957 58 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
tonyp@3957 59 (_hr_)->startsHumongous() ? "HS" : \
tonyp@3957 60 (_hr_)->continuesHumongous() ? "HC" : \
tonyp@3957 61 !(_hr_)->is_empty() ? "O" : "F", \
tonyp@2963 62 (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
ysr@777 63
tonyp@3713 64 // sentinel value for hrs_index
tonyp@3713 65 #define G1_NULL_HRS_INDEX ((uint) -1)
tonyp@3713 66
ysr@777 67 // A dirty card to oop closure for heap regions. It
ysr@777 68 // knows how to get the G1 heap and how to use the bitmap
ysr@777 69 // in the concurrent marker used by G1 to filter remembered
ysr@777 70 // sets.
ysr@777 71
ysr@777 72 class HeapRegionDCTOC : public ContiguousSpaceDCTOC {
ysr@777 73 public:
ysr@777 74 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 75 enum FilterKind {
ysr@777 76 NoFilterKind,
ysr@777 77 IntoCSFilterKind,
ysr@777 78 OutOfRegionFilterKind
ysr@777 79 };
ysr@777 80
ysr@777 81 protected:
ysr@777 82 HeapRegion* _hr;
ysr@777 83 FilterKind _fk;
ysr@777 84 G1CollectedHeap* _g1;
ysr@777 85
ysr@777 86 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 87 HeapWord* bottom, HeapWord* top,
ysr@777 88 OopClosure* cl);
ysr@777 89
ysr@777 90 // We don't specialize this for FilteringClosure; filtering is handled by
ysr@777 91 // the "FilterKind" mechanism. But we provide this to avoid a compiler
ysr@777 92 // warning.
ysr@777 93 void walk_mem_region_with_cl(MemRegion mr,
ysr@777 94 HeapWord* bottom, HeapWord* top,
ysr@777 95 FilteringClosure* cl) {
ysr@777 96 HeapRegionDCTOC::walk_mem_region_with_cl(mr, bottom, top,
ysr@777 97 (OopClosure*)cl);
ysr@777 98 }
ysr@777 99
ysr@777 100 // Get the actual top of the area on which the closure will
ysr@777 101 // operate, given where the top is assumed to be (the end of the
ysr@777 102 // memory region passed to do_MemRegion) and where the object
ysr@777 103 // at the top is assumed to start. For example, an object may
ysr@777 104 // start at the top but actually extend past the assumed top,
ysr@777 105 // in which case the top becomes the end of the object.
ysr@777 106 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj) {
ysr@777 107 return ContiguousSpaceDCTOC::get_actual_top(top, top_obj);
ysr@777 108 }
ysr@777 109
ysr@777 110 // Walk the given memory region from bottom to (actual) top
ysr@777 111 // looking for objects and applying the oop closure (_cl) to
ysr@777 112 // them. The base implementation of this treats the area as
ysr@777 113 // blocks, where a block may or may not be an object. Sub-
ysr@777 114 // classes should override this to provide more accurate
ysr@777 115 // or possibly more efficient walking.
ysr@777 116 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top) {
ysr@777 117 Filtering_DCTOC::walk_mem_region(mr, bottom, top);
ysr@777 118 }
ysr@777 119
ysr@777 120 public:
ysr@777 121 HeapRegionDCTOC(G1CollectedHeap* g1,
ysr@777 122 HeapRegion* hr, OopClosure* cl,
ysr@777 123 CardTableModRefBS::PrecisionStyle precision,
ysr@777 124 FilterKind fk);
ysr@777 125 };
ysr@777 126
ysr@777 127 // The complicating factor is that BlockOffsetTable diverged
ysr@777 128 // significantly, and we need functionality that is only in the G1 version.
ysr@777 129 // So I copied that code, which led to an alternate G1 version of
ysr@777 130 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 131 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 132
ysr@777 133 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 134 // all regions at every GC pause is time consuming (if I remember
ysr@777 135 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 136 // that are GC alloc regions. To achieve this, we use time
ysr@777 137 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 138 // unique time stamp (essentially a counter that gets
ysr@777 139 // incremented). Every time we want to call save_marks on a region,
ysr@777 140 // we set the saved_mark_word to top and also copy the current GC
ysr@777 141 // time stamp to the time stamp field of the space. Reading the
ysr@777 142 // saved_mark_word involves checking the time stamp of the
ysr@777 143 // region. If it is the same as the current GC time stamp, then we
ysr@777 144 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 145 // time stamp of the region is not the same as the current GC time
ysr@777 146 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 147 // invalid. Time stamps (on the regions and also on the
ysr@777 148 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 149 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 150 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 151 // evacuation pauses between two cleanups, which is _highly_ unlikely.
ysr@777 152
ysr@777 153 class G1OffsetTableContigSpace: public ContiguousSpace {
ysr@777 154 friend class VMStructs;
ysr@777 155 protected:
ysr@777 156 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 157 Mutex _par_alloc_lock;
ysr@777 158 volatile unsigned _gc_time_stamp;
tonyp@2715 159 // When we need to retire an allocation region, while other threads
tonyp@2715 160 // are also concurrently trying to allocate into it, we typically
tonyp@2715 161 // allocate a dummy object at the end of the region to ensure that
tonyp@2715 162 // no more allocations can take place in it. However, sometimes we
tonyp@2715 163 // want to know where the end of the last "real" object we allocated
tonyp@2715 164 // into the region was and this is what this keeps track.
tonyp@2715 165 HeapWord* _pre_dummy_top;
ysr@777 166
ysr@777 167 public:
ysr@777 168 // Constructor. If "is_zeroed" is true, the MemRegion "mr" may be
ysr@777 169 // assumed to contain zeros.
ysr@777 170 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 171 MemRegion mr, bool is_zeroed = false);
ysr@777 172
ysr@777 173 void set_bottom(HeapWord* value);
ysr@777 174 void set_end(HeapWord* value);
ysr@777 175
ysr@777 176 virtual HeapWord* saved_mark_word() const;
ysr@777 177 virtual void set_saved_mark();
ysr@777 178 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
tonyp@3957 179 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
ysr@777 180
tonyp@2715 181 // See the comment above in the declaration of _pre_dummy_top for an
tonyp@2715 182 // explanation of what it is.
tonyp@2715 183 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
tonyp@2715 184 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
tonyp@2715 185 _pre_dummy_top = pre_dummy_top;
tonyp@2715 186 }
tonyp@2715 187 HeapWord* pre_dummy_top() {
tonyp@2715 188 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
tonyp@2715 189 }
tonyp@2715 190 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
tonyp@2715 191
tonyp@791 192 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tonyp@791 193 virtual void clear(bool mangle_space);
ysr@777 194
ysr@777 195 HeapWord* block_start(const void* p);
ysr@777 196 HeapWord* block_start_const(const void* p) const;
ysr@777 197
ysr@777 198 // Add offset table update.
ysr@777 199 virtual HeapWord* allocate(size_t word_size);
ysr@777 200 HeapWord* par_allocate(size_t word_size);
ysr@777 201
ysr@777 202 // MarkSweep support phase3
ysr@777 203 virtual HeapWord* initialize_threshold();
ysr@777 204 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 205
ysr@777 206 virtual void print() const;
tonyp@2453 207
tonyp@2453 208 void reset_bot() {
tonyp@2453 209 _offsets.zero_bottom_entry();
tonyp@2453 210 _offsets.initialize_threshold();
tonyp@2453 211 }
tonyp@2453 212
tonyp@2453 213 void update_bot_for_object(HeapWord* start, size_t word_size) {
tonyp@2453 214 _offsets.alloc_block(start, word_size);
tonyp@2453 215 }
tonyp@2453 216
tonyp@2453 217 void print_bot_on(outputStream* out) {
tonyp@2453 218 _offsets.print_on(out);
tonyp@2453 219 }
ysr@777 220 };
ysr@777 221
ysr@777 222 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 223 friend class VMStructs;
ysr@777 224 private:
ysr@777 225
tonyp@790 226 enum HumongousType {
tonyp@790 227 NotHumongous = 0,
tonyp@790 228 StartsHumongous,
tonyp@790 229 ContinuesHumongous
tonyp@790 230 };
tonyp@790 231
ysr@777 232 // Requires that the region "mr" be dense with objects, and begin and end
ysr@777 233 // with an object.
ysr@777 234 void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
ysr@777 235
ysr@777 236 // The remembered set for this region.
ysr@777 237 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 238 // issues.)
ysr@777 239 HeapRegionRemSet* _rem_set;
ysr@777 240
ysr@777 241 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 242
ysr@777 243 protected:
tonyp@2963 244 // The index of this region in the heap region sequence.
tonyp@3713 245 uint _hrs_index;
ysr@777 246
tonyp@790 247 HumongousType _humongous_type;
ysr@777 248 // For a humongous region, region in which it starts.
ysr@777 249 HeapRegion* _humongous_start_region;
ysr@777 250 // For the start region of a humongous sequence, it's original end().
ysr@777 251 HeapWord* _orig_end;
ysr@777 252
ysr@777 253 // True iff the region is in current collection_set.
ysr@777 254 bool _in_collection_set;
ysr@777 255
ysr@777 256 // True iff an attempt to evacuate an object in the region failed.
ysr@777 257 bool _evacuation_failed;
ysr@777 258
ysr@777 259 // A heap region may be a member one of a number of special subsets, each
ysr@777 260 // represented as linked lists through the field below. Currently, these
ysr@777 261 // sets include:
ysr@777 262 // The collection set.
ysr@777 263 // The set of allocation regions used in a collection pause.
ysr@777 264 // Spaces that may contain gray objects.
ysr@777 265 HeapRegion* _next_in_special_set;
ysr@777 266
ysr@777 267 // next region in the young "generation" region set
ysr@777 268 HeapRegion* _next_young_region;
ysr@777 269
apetrusenko@1231 270 // Next region whose cards need cleaning
apetrusenko@1231 271 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 272
tonyp@2472 273 // Fields used by the HeapRegionSetBase class and subclasses.
tonyp@2472 274 HeapRegion* _next;
tonyp@2472 275 #ifdef ASSERT
tonyp@2472 276 HeapRegionSetBase* _containing_set;
tonyp@2472 277 #endif // ASSERT
tonyp@2472 278 bool _pending_removal;
tonyp@2472 279
ysr@777 280 // For parallel heapRegion traversal.
ysr@777 281 jint _claimed;
ysr@777 282
ysr@777 283 // We use concurrent marking to determine the amount of live data
ysr@777 284 // in each heap region.
ysr@777 285 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 286 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 287
tonyp@3714 288 // The calculated GC efficiency of the region.
ysr@777 289 double _gc_efficiency;
ysr@777 290
ysr@777 291 enum YoungType {
ysr@777 292 NotYoung, // a region is not young
ysr@777 293 Young, // a region is young
tonyp@2963 294 Survivor // a region is young and it contains survivors
ysr@777 295 };
ysr@777 296
johnc@2021 297 volatile YoungType _young_type;
ysr@777 298 int _young_index_in_cset;
ysr@777 299 SurvRateGroup* _surv_rate_group;
ysr@777 300 int _age_index;
ysr@777 301
ysr@777 302 // The start of the unmarked area. The unmarked area extends from this
ysr@777 303 // word until the top and/or end of the region, and is the part
ysr@777 304 // of the region for which no marking was done, i.e. objects may
ysr@777 305 // have been allocated in this part since the last mark phase.
ysr@777 306 // "prev" is the top at the start of the last completed marking.
ysr@777 307 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 308 HeapWord* _prev_top_at_mark_start;
ysr@777 309 HeapWord* _next_top_at_mark_start;
ysr@777 310 // If a collection pause is in progress, this is the top at the start
ysr@777 311 // of that pause.
ysr@777 312
ysr@777 313 void init_top_at_mark_start() {
ysr@777 314 assert(_prev_marked_bytes == 0 &&
ysr@777 315 _next_marked_bytes == 0,
ysr@777 316 "Must be called after zero_marked_bytes.");
ysr@777 317 HeapWord* bot = bottom();
ysr@777 318 _prev_top_at_mark_start = bot;
ysr@777 319 _next_top_at_mark_start = bot;
ysr@777 320 }
ysr@777 321
ysr@777 322 void set_young_type(YoungType new_type) {
ysr@777 323 //assert(_young_type != new_type, "setting the same type" );
ysr@777 324 // TODO: add more assertions here
ysr@777 325 _young_type = new_type;
ysr@777 326 }
ysr@777 327
johnc@1829 328 // Cached attributes used in the collection set policy information
johnc@1829 329
johnc@1829 330 // The RSet length that was added to the total value
johnc@1829 331 // for the collection set.
johnc@1829 332 size_t _recorded_rs_length;
johnc@1829 333
johnc@1829 334 // The predicted elapsed time that was added to total value
johnc@1829 335 // for the collection set.
johnc@1829 336 double _predicted_elapsed_time_ms;
johnc@1829 337
johnc@1829 338 // The predicted number of bytes to copy that was added to
johnc@1829 339 // the total value for the collection set.
johnc@1829 340 size_t _predicted_bytes_to_copy;
johnc@1829 341
ysr@777 342 public:
ysr@777 343 // If "is_zeroed" is "true", the region "mr" can be assumed to contain zeros.
tonyp@3713 344 HeapRegion(uint hrs_index,
tonyp@2963 345 G1BlockOffsetSharedArray* sharedOffsetArray,
ysr@777 346 MemRegion mr, bool is_zeroed);
ysr@777 347
johnc@3182 348 static int LogOfHRGrainBytes;
johnc@3182 349 static int LogOfHRGrainWords;
johnc@3182 350
johnc@3182 351 static size_t GrainBytes;
johnc@3182 352 static size_t GrainWords;
johnc@3182 353 static size_t CardsPerRegion;
tonyp@1377 354
tonyp@3176 355 static size_t align_up_to_region_byte_size(size_t sz) {
tonyp@3176 356 return (sz + (size_t) GrainBytes - 1) &
tonyp@3176 357 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
tonyp@3176 358 }
tonyp@3176 359
tonyp@1377 360 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 361 // well as other related fields that are based on the heap region
tonyp@1377 362 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 363 // CardsPerRegion). All those fields are considered constant
tonyp@1377 364 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 365 // up once during initialization time.
tonyp@1377 366 static void setup_heap_region_size(uintx min_heap_size);
ysr@777 367
tonyp@790 368 enum ClaimValues {
johnc@3296 369 InitialClaimValue = 0,
johnc@3296 370 FinalCountClaimValue = 1,
johnc@3296 371 NoteEndClaimValue = 2,
johnc@3296 372 ScrubRemSetClaimValue = 3,
johnc@3296 373 ParVerifyClaimValue = 4,
johnc@3296 374 RebuildRSClaimValue = 5,
tonyp@3691 375 ParEvacFailureClaimValue = 6,
tonyp@3691 376 AggregateCountClaimValue = 7,
tonyp@3691 377 VerifyCountClaimValue = 8
tonyp@790 378 };
tonyp@790 379
tonyp@2454 380 inline HeapWord* par_allocate_no_bot_updates(size_t word_size) {
tonyp@2454 381 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 382 return ContiguousSpace::par_allocate(word_size);
tonyp@2454 383 }
tonyp@2454 384 inline HeapWord* allocate_no_bot_updates(size_t word_size) {
tonyp@2454 385 assert(is_young(), "we can only skip BOT updates on young regions");
tonyp@2454 386 return ContiguousSpace::allocate(word_size);
tonyp@2454 387 }
tonyp@2454 388
ysr@777 389 // If this region is a member of a HeapRegionSeq, the index in that
ysr@777 390 // sequence, otherwise -1.
tonyp@3713 391 uint hrs_index() const { return _hrs_index; }
ysr@777 392
ysr@777 393 // The number of bytes marked live in the region in the last marking phase.
ysr@777 394 size_t marked_bytes() { return _prev_marked_bytes; }
tonyp@2717 395 size_t live_bytes() {
tonyp@2717 396 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
tonyp@2717 397 }
tonyp@2717 398
ysr@777 399 // The number of bytes counted in the next marking.
ysr@777 400 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 401 // The number of bytes live wrt the next marking.
ysr@777 402 size_t next_live_bytes() {
tonyp@2717 403 return
tonyp@2717 404 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
ysr@777 405 }
ysr@777 406
ysr@777 407 // A lower bound on the amount of garbage bytes in the region.
ysr@777 408 size_t garbage_bytes() {
ysr@777 409 size_t used_at_mark_start_bytes =
ysr@777 410 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 411 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 412 "Can't mark more than we have.");
ysr@777 413 return used_at_mark_start_bytes - marked_bytes();
ysr@777 414 }
ysr@777 415
tonyp@3539 416 // Return the amount of bytes we'll reclaim if we collect this
tonyp@3539 417 // region. This includes not only the known garbage bytes in the
tonyp@3539 418 // region but also any unallocated space in it, i.e., [top, end),
tonyp@3539 419 // since it will also be reclaimed if we collect the region.
tonyp@3539 420 size_t reclaimable_bytes() {
tonyp@3539 421 size_t known_live_bytes = live_bytes();
tonyp@3539 422 assert(known_live_bytes <= capacity(), "sanity");
tonyp@3539 423 return capacity() - known_live_bytes;
tonyp@3539 424 }
tonyp@3539 425
ysr@777 426 // An upper bound on the number of live bytes in the region.
ysr@777 427 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 428
ysr@777 429 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 430 _next_marked_bytes = _next_marked_bytes + incr_bytes;
johnc@3292 431 assert(_next_marked_bytes <= used(), "invariant" );
ysr@777 432 }
ysr@777 433
ysr@777 434 void zero_marked_bytes() {
ysr@777 435 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 436 }
ysr@777 437
tonyp@790 438 bool isHumongous() const { return _humongous_type != NotHumongous; }
tonyp@790 439 bool startsHumongous() const { return _humongous_type == StartsHumongous; }
tonyp@790 440 bool continuesHumongous() const { return _humongous_type == ContinuesHumongous; }
ysr@777 441 // For a humongous region, region in which it starts.
ysr@777 442 HeapRegion* humongous_start_region() const {
ysr@777 443 return _humongous_start_region;
ysr@777 444 }
ysr@777 445
tonyp@3957 446 // Return the number of distinct regions that are covered by this region:
tonyp@3957 447 // 1 if the region is not humongous, >= 1 if the region is humongous.
tonyp@3957 448 uint region_num() const {
tonyp@3957 449 if (!isHumongous()) {
tonyp@3957 450 return 1U;
tonyp@3957 451 } else {
tonyp@3957 452 assert(startsHumongous(), "doesn't make sense on HC regions");
tonyp@3957 453 assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
tonyp@3957 454 return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
tonyp@3957 455 }
tonyp@3957 456 }
tonyp@3957 457
tonyp@3957 458 // Return the index + 1 of the last HC regions that's associated
tonyp@3957 459 // with this HS region.
tonyp@3957 460 uint last_hc_index() const {
tonyp@3957 461 assert(startsHumongous(), "don't call this otherwise");
tonyp@3957 462 return hrs_index() + region_num();
tonyp@3957 463 }
tonyp@3957 464
brutisso@3216 465 // Same as Space::is_in_reserved, but will use the original size of the region.
brutisso@3216 466 // The original size is different only for start humongous regions. They get
brutisso@3216 467 // their _end set up to be the end of the last continues region of the
brutisso@3216 468 // corresponding humongous object.
brutisso@3216 469 bool is_in_reserved_raw(const void* p) const {
brutisso@3216 470 return _bottom <= p && p < _orig_end;
brutisso@3216 471 }
brutisso@3216 472
tonyp@2453 473 // Makes the current region be a "starts humongous" region, i.e.,
tonyp@2453 474 // the first region in a series of one or more contiguous regions
tonyp@2453 475 // that will contain a single "humongous" object. The two parameters
tonyp@2453 476 // are as follows:
tonyp@2453 477 //
tonyp@2453 478 // new_top : The new value of the top field of this region which
tonyp@2453 479 // points to the end of the humongous object that's being
tonyp@2453 480 // allocated. If there is more than one region in the series, top
tonyp@2453 481 // will lie beyond this region's original end field and on the last
tonyp@2453 482 // region in the series.
tonyp@2453 483 //
tonyp@2453 484 // new_end : The new value of the end field of this region which
tonyp@2453 485 // points to the end of the last region in the series. If there is
tonyp@2453 486 // one region in the series (namely: this one) end will be the same
tonyp@2453 487 // as the original end of this region.
tonyp@2453 488 //
tonyp@2453 489 // Updating top and end as described above makes this region look as
tonyp@2453 490 // if it spans the entire space taken up by all the regions in the
tonyp@2453 491 // series and an single allocation moved its top to new_top. This
tonyp@2453 492 // ensures that the space (capacity / allocated) taken up by all
tonyp@2453 493 // humongous regions can be calculated by just looking at the
tonyp@2453 494 // "starts humongous" regions and by ignoring the "continues
tonyp@2453 495 // humongous" regions.
tonyp@2453 496 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
ysr@777 497
tonyp@2453 498 // Makes the current region be a "continues humongous'
tonyp@2453 499 // region. first_hr is the "start humongous" region of the series
tonyp@2453 500 // which this region will be part of.
tonyp@2453 501 void set_continuesHumongous(HeapRegion* first_hr);
ysr@777 502
tonyp@2472 503 // Unsets the humongous-related fields on the region.
tonyp@2472 504 void set_notHumongous();
tonyp@2472 505
ysr@777 506 // If the region has a remembered set, return a pointer to it.
ysr@777 507 HeapRegionRemSet* rem_set() const {
ysr@777 508 return _rem_set;
ysr@777 509 }
ysr@777 510
ysr@777 511 // True iff the region is in current collection_set.
ysr@777 512 bool in_collection_set() const {
ysr@777 513 return _in_collection_set;
ysr@777 514 }
ysr@777 515 void set_in_collection_set(bool b) {
ysr@777 516 _in_collection_set = b;
ysr@777 517 }
ysr@777 518 HeapRegion* next_in_collection_set() {
ysr@777 519 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 520 assert(_next_in_special_set == NULL ||
ysr@777 521 _next_in_special_set->in_collection_set(),
ysr@777 522 "Malformed CS.");
ysr@777 523 return _next_in_special_set;
ysr@777 524 }
ysr@777 525 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 526 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 527 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 528 _next_in_special_set = r;
ysr@777 529 }
ysr@777 530
tonyp@2472 531 // Methods used by the HeapRegionSetBase class and subclasses.
tonyp@2472 532
tonyp@2472 533 // Getter and setter for the next field used to link regions into
tonyp@2472 534 // linked lists.
tonyp@2472 535 HeapRegion* next() { return _next; }
tonyp@2472 536
tonyp@2472 537 void set_next(HeapRegion* next) { _next = next; }
tonyp@2472 538
tonyp@2472 539 // Every region added to a set is tagged with a reference to that
tonyp@2472 540 // set. This is used for doing consistency checking to make sure that
tonyp@2472 541 // the contents of a set are as they should be and it's only
tonyp@2472 542 // available in non-product builds.
tonyp@2472 543 #ifdef ASSERT
tonyp@2472 544 void set_containing_set(HeapRegionSetBase* containing_set) {
tonyp@2472 545 assert((containing_set == NULL && _containing_set != NULL) ||
tonyp@2472 546 (containing_set != NULL && _containing_set == NULL),
tonyp@2472 547 err_msg("containing_set: "PTR_FORMAT" "
tonyp@2472 548 "_containing_set: "PTR_FORMAT,
tonyp@2472 549 containing_set, _containing_set));
tonyp@2472 550
tonyp@2472 551 _containing_set = containing_set;
tonyp@2643 552 }
tonyp@2472 553
tonyp@2472 554 HeapRegionSetBase* containing_set() { return _containing_set; }
tonyp@2472 555 #else // ASSERT
tonyp@2472 556 void set_containing_set(HeapRegionSetBase* containing_set) { }
tonyp@2472 557
tonyp@2643 558 // containing_set() is only used in asserts so there's no reason
tonyp@2472 559 // to provide a dummy version of it.
tonyp@2472 560 #endif // ASSERT
tonyp@2472 561
tonyp@2472 562 // If we want to remove regions from a list in bulk we can simply tag
tonyp@2472 563 // them with the pending_removal tag and call the
tonyp@2472 564 // remove_all_pending() method on the list.
tonyp@2472 565
tonyp@2472 566 bool pending_removal() { return _pending_removal; }
tonyp@2472 567
tonyp@2472 568 void set_pending_removal(bool pending_removal) {
tonyp@2643 569 if (pending_removal) {
tonyp@2643 570 assert(!_pending_removal && containing_set() != NULL,
tonyp@2643 571 "can only set pending removal to true if it's false and "
tonyp@2643 572 "the region belongs to a region set");
tonyp@2643 573 } else {
tonyp@2643 574 assert( _pending_removal && containing_set() == NULL,
tonyp@2643 575 "can only set pending removal to false if it's true and "
tonyp@2643 576 "the region does not belong to a region set");
tonyp@2643 577 }
tonyp@2472 578
tonyp@2472 579 _pending_removal = pending_removal;
ysr@777 580 }
ysr@777 581
ysr@777 582 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 583 void set_next_young_region(HeapRegion* hr) {
ysr@777 584 _next_young_region = hr;
ysr@777 585 }
ysr@777 586
apetrusenko@1231 587 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 588 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 589 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 590 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 591
tonyp@2963 592 HeapWord* orig_end() { return _orig_end; }
tonyp@2963 593
ysr@777 594 // Allows logical separation between objects allocated before and after.
ysr@777 595 void save_marks();
ysr@777 596
ysr@777 597 // Reset HR stuff to default values.
ysr@777 598 void hr_clear(bool par, bool clear_space);
tonyp@2849 599 void par_clear();
ysr@777 600
tonyp@791 601 void initialize(MemRegion mr, bool clear_space, bool mangle_space);
ysr@777 602
ysr@777 603 // Get the start of the unmarked area in this region.
ysr@777 604 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 605 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 606
ysr@777 607 // Apply "cl->do_oop" to (the addresses of) all reference fields in objects
ysr@777 608 // allocated in the current region before the last call to "save_mark".
ysr@777 609 void oop_before_save_marks_iterate(OopClosure* cl);
ysr@777 610
ysr@777 611 // Note the start or end of marking. This tells the heap region
ysr@777 612 // that the collector is about to start or has finished (concurrently)
ysr@777 613 // marking the heap.
ysr@777 614
tonyp@3416 615 // Notify the region that concurrent marking is starting. Initialize
tonyp@3416 616 // all fields related to the next marking info.
tonyp@3416 617 inline void note_start_of_marking();
ysr@777 618
tonyp@3416 619 // Notify the region that concurrent marking has finished. Copy the
tonyp@3416 620 // (now finalized) next marking info fields into the prev marking
tonyp@3416 621 // info fields.
tonyp@3416 622 inline void note_end_of_marking();
ysr@777 623
tonyp@3416 624 // Notify the region that it will be used as to-space during a GC
tonyp@3416 625 // and we are about to start copying objects into it.
tonyp@3416 626 inline void note_start_of_copying(bool during_initial_mark);
ysr@777 627
tonyp@3416 628 // Notify the region that it ceases being to-space during a GC and
tonyp@3416 629 // we will not copy objects into it any more.
tonyp@3416 630 inline void note_end_of_copying(bool during_initial_mark);
tonyp@3416 631
tonyp@3416 632 // Notify the region that we are about to start processing
tonyp@3416 633 // self-forwarded objects during evac failure handling.
tonyp@3416 634 void note_self_forwarding_removal_start(bool during_initial_mark,
tonyp@3416 635 bool during_conc_mark);
tonyp@3416 636
tonyp@3416 637 // Notify the region that we have finished processing self-forwarded
tonyp@3416 638 // objects during evac failure handling.
tonyp@3416 639 void note_self_forwarding_removal_end(bool during_initial_mark,
tonyp@3416 640 bool during_conc_mark,
tonyp@3416 641 size_t marked_bytes);
ysr@777 642
ysr@777 643 // Returns "false" iff no object in the region was allocated when the
ysr@777 644 // last mark phase ended.
ysr@777 645 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 646
ysr@777 647 void reset_during_compaction() {
tonyp@3957 648 assert(isHumongous() && startsHumongous(),
tonyp@3957 649 "should only be called for starts humongous regions");
ysr@777 650
ysr@777 651 zero_marked_bytes();
ysr@777 652 init_top_at_mark_start();
ysr@777 653 }
ysr@777 654
ysr@777 655 void calc_gc_efficiency(void);
ysr@777 656 double gc_efficiency() { return _gc_efficiency;}
ysr@777 657
ysr@777 658 bool is_young() const { return _young_type != NotYoung; }
ysr@777 659 bool is_survivor() const { return _young_type == Survivor; }
ysr@777 660
ysr@777 661 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 662 void set_young_index_in_cset(int index) {
ysr@777 663 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 664 _young_index_in_cset = index;
ysr@777 665 }
ysr@777 666
ysr@777 667 int age_in_surv_rate_group() {
ysr@777 668 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 669 assert( _age_index > -1, "pre-condition" );
ysr@777 670 return _surv_rate_group->age_in_group(_age_index);
ysr@777 671 }
ysr@777 672
ysr@777 673 void record_surv_words_in_group(size_t words_survived) {
ysr@777 674 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 675 assert( _age_index > -1, "pre-condition" );
ysr@777 676 int age_in_group = age_in_surv_rate_group();
ysr@777 677 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 678 }
ysr@777 679
ysr@777 680 int age_in_surv_rate_group_cond() {
ysr@777 681 if (_surv_rate_group != NULL)
ysr@777 682 return age_in_surv_rate_group();
ysr@777 683 else
ysr@777 684 return -1;
ysr@777 685 }
ysr@777 686
ysr@777 687 SurvRateGroup* surv_rate_group() {
ysr@777 688 return _surv_rate_group;
ysr@777 689 }
ysr@777 690
ysr@777 691 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 692 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 693 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 694 assert( is_young(), "pre-condition" );
ysr@777 695
ysr@777 696 _surv_rate_group = surv_rate_group;
ysr@777 697 _age_index = surv_rate_group->next_age_index();
ysr@777 698 }
ysr@777 699
ysr@777 700 void uninstall_surv_rate_group() {
ysr@777 701 if (_surv_rate_group != NULL) {
ysr@777 702 assert( _age_index > -1, "pre-condition" );
ysr@777 703 assert( is_young(), "pre-condition" );
ysr@777 704
ysr@777 705 _surv_rate_group = NULL;
ysr@777 706 _age_index = -1;
ysr@777 707 } else {
ysr@777 708 assert( _age_index == -1, "pre-condition" );
ysr@777 709 }
ysr@777 710 }
ysr@777 711
ysr@777 712 void set_young() { set_young_type(Young); }
ysr@777 713
ysr@777 714 void set_survivor() { set_young_type(Survivor); }
ysr@777 715
ysr@777 716 void set_not_young() { set_young_type(NotYoung); }
ysr@777 717
ysr@777 718 // Determine if an object has been allocated since the last
ysr@777 719 // mark performed by the collector. This returns true iff the object
ysr@777 720 // is within the unmarked area of the region.
ysr@777 721 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 722 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 723 }
ysr@777 724 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 725 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 726 }
ysr@777 727
ysr@777 728 // For parallel heapRegion traversal.
ysr@777 729 bool claimHeapRegion(int claimValue);
ysr@777 730 jint claim_value() { return _claimed; }
ysr@777 731 // Use this carefully: only when you're sure no one is claiming...
ysr@777 732 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 733
ysr@777 734 // Returns the "evacuation_failed" property of the region.
ysr@777 735 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 736
ysr@777 737 // Sets the "evacuation_failed" property of the region.
ysr@777 738 void set_evacuation_failed(bool b) {
ysr@777 739 _evacuation_failed = b;
ysr@777 740
ysr@777 741 if (b) {
ysr@777 742 _next_marked_bytes = 0;
ysr@777 743 }
ysr@777 744 }
ysr@777 745
ysr@777 746 // Requires that "mr" be entirely within the region.
ysr@777 747 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 748 // If the iteration encounters an unparseable portion of the region,
ysr@777 749 // or if "cl->abort()" is true after a closure application,
ysr@777 750 // terminate the iteration and return the address of the start of the
ysr@777 751 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 752 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 753 // completed.
ysr@777 754 HeapWord*
ysr@777 755 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 756
tonyp@2849 757 // filter_young: if true and the region is a young region then we
tonyp@2849 758 // skip the iteration.
tonyp@2849 759 // card_ptr: if not NULL, and we decide that the card is not young
tonyp@2849 760 // and we iterate over it, we'll clean the card before we start the
tonyp@2849 761 // iteration.
ysr@777 762 HeapWord*
ysr@777 763 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 764 FilterOutOfRegionClosure* cl,
tonyp@2849 765 bool filter_young,
tonyp@2849 766 jbyte* card_ptr);
ysr@777 767
ysr@777 768 // A version of block start that is guaranteed to find *some* block
ysr@777 769 // boundary at or before "p", but does not object iteration, and may
ysr@777 770 // therefore be used safely when the heap is unparseable.
ysr@777 771 HeapWord* block_start_careful(const void* p) const {
ysr@777 772 return _offsets.block_start_careful(p);
ysr@777 773 }
ysr@777 774
ysr@777 775 // Requires that "addr" is within the region. Returns the start of the
ysr@777 776 // first ("careful") block that starts at or after "addr", or else the
ysr@777 777 // "end" of the region if there is no such block.
ysr@777 778 HeapWord* next_block_start_careful(HeapWord* addr);
ysr@777 779
johnc@1829 780 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 781 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 782 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 783
johnc@1829 784 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 785 _recorded_rs_length = rs_length;
johnc@1829 786 }
johnc@1829 787
johnc@1829 788 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 789 _predicted_elapsed_time_ms = ms;
johnc@1829 790 }
johnc@1829 791
johnc@1829 792 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 793 _predicted_bytes_to_copy = bytes;
johnc@1829 794 }
johnc@1829 795
ysr@777 796 #define HeapRegion_OOP_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
ysr@777 797 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
ysr@777 798 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
ysr@777 799
tonyp@3957 800 virtual CompactibleSpace* next_compaction_space() const;
ysr@777 801
ysr@777 802 virtual void reset_after_compaction();
ysr@777 803
ysr@777 804 void print() const;
ysr@777 805 void print_on(outputStream* st) const;
ysr@777 806
johnc@2969 807 // vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 808 // vo == UseNextMarking -> use "next" marking information
johnc@2969 809 // vo == UseMarkWord -> use the mark word in the object header
johnc@2969 810 //
tonyp@1246 811 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 812 // consistent most of the time, so most calls to this should use
johnc@2969 813 // vo == UsePrevMarking.
johnc@2969 814 // Currently, there is only one case where this is called with
johnc@2969 815 // vo == UseNextMarking, which is to verify the "next" marking
johnc@2969 816 // information at the end of remark.
johnc@2969 817 // Currently there is only one place where this is called with
johnc@2969 818 // vo == UseMarkWord, which is to verify the marking during a
johnc@2969 819 // full GC.
brutisso@3711 820 void verify(VerifyOption vo, bool *failures) const;
tonyp@1246 821
tonyp@1246 822 // Override; it uses the "prev" marking information
brutisso@3711 823 virtual void verify() const;
ysr@777 824 };
ysr@777 825
ysr@777 826 // HeapRegionClosure is used for iterating over regions.
ysr@777 827 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 828 class HeapRegionClosure : public StackObj {
ysr@777 829 friend class HeapRegionSeq;
ysr@777 830 friend class G1CollectedHeap;
ysr@777 831
ysr@777 832 bool _complete;
ysr@777 833 void incomplete() { _complete = false; }
ysr@777 834
ysr@777 835 public:
ysr@777 836 HeapRegionClosure(): _complete(true) {}
ysr@777 837
ysr@777 838 // Typically called on each region until it returns true.
ysr@777 839 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 840
ysr@777 841 // True after iteration if the closure was applied to all heap regions
ysr@777 842 // and returned "false" in all cases.
ysr@777 843 bool complete() { return _complete; }
ysr@777 844 };
ysr@777 845
ysr@777 846 #endif // SERIALGC
stefank@2314 847
stefank@2314 848 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP

mercurial