src/share/vm/gc_implementation/g1/heapRegion.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7256
0fcaab91d485
child 7535
7ae4e26cb1e0
child 7647
80ac3ee51955
child 7971
b554c7fa9478
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP
stefank@2314 27
sjohanss@7118 28 #include "gc_implementation/g1/g1AllocationContext.hpp"
mgerdin@6987 29 #include "gc_implementation/g1/g1BlockOffsetTable.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1_specialized_oop_closures.hpp"
brutisso@7195 31 #include "gc_implementation/g1/heapRegionType.hpp"
stefank@2314 32 #include "gc_implementation/g1/survRateGroup.hpp"
stefank@2314 33 #include "gc_implementation/shared/ageTable.hpp"
stefank@2314 34 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 35 #include "memory/space.inline.hpp"
stefank@2314 36 #include "memory/watermark.hpp"
jprovino@4542 37 #include "utilities/macros.hpp"
stefank@2314 38
ysr@777 39 // A HeapRegion is the smallest piece of a G1CollectedHeap that
ysr@777 40 // can be collected independently.
ysr@777 41
ysr@777 42 // NOTE: Although a HeapRegion is a Space, its
ysr@777 43 // Space::initDirtyCardClosure method must not be called.
ysr@777 44 // The problem is that the existence of this method breaks
ysr@777 45 // the independence of barrier sets from remembered sets.
ysr@777 46 // The solution is to remove this method from the definition
ysr@777 47 // of a Space.
ysr@777 48
ysr@777 49 class HeapRegionRemSet;
ysr@777 50 class HeapRegionRemSetIterator;
ysr@777 51 class HeapRegion;
tonyp@2472 52 class HeapRegionSetBase;
johnc@5548 53 class nmethod;
tonyp@2472 54
tonyp@3713 55 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
tonyp@2963 56 #define HR_FORMAT_PARAMS(_hr_) \
tschatzl@7091 57 (_hr_)->hrm_index(), \
brutisso@7195 58 (_hr_)->get_short_type_str(), \
drchase@6680 59 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end())
ysr@777 60
tschatzl@7091 61 // sentinel value for hrm_index
tschatzl@7091 62 #define G1_NO_HRM_INDEX ((uint) -1)
tonyp@3713 63
ysr@777 64 // A dirty card to oop closure for heap regions. It
ysr@777 65 // knows how to get the G1 heap and how to use the bitmap
ysr@777 66 // in the concurrent marker used by G1 to filter remembered
ysr@777 67 // sets.
ysr@777 68
mgerdin@6986 69 class HeapRegionDCTOC : public DirtyCardToOopClosure {
ysr@777 70 public:
ysr@777 71 // Specification of possible DirtyCardToOopClosure filtering.
ysr@777 72 enum FilterKind {
ysr@777 73 NoFilterKind,
ysr@777 74 IntoCSFilterKind,
ysr@777 75 OutOfRegionFilterKind
ysr@777 76 };
ysr@777 77
ysr@777 78 protected:
ysr@777 79 HeapRegion* _hr;
ysr@777 80 FilterKind _fk;
ysr@777 81 G1CollectedHeap* _g1;
ysr@777 82
ysr@777 83 // Walk the given memory region from bottom to (actual) top
ysr@777 84 // looking for objects and applying the oop closure (_cl) to
ysr@777 85 // them. The base implementation of this treats the area as
ysr@777 86 // blocks, where a block may or may not be an object. Sub-
ysr@777 87 // classes should override this to provide more accurate
ysr@777 88 // or possibly more efficient walking.
mgerdin@6986 89 void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
ysr@777 90
ysr@777 91 public:
ysr@777 92 HeapRegionDCTOC(G1CollectedHeap* g1,
coleenp@4037 93 HeapRegion* hr, ExtendedOopClosure* cl,
ysr@777 94 CardTableModRefBS::PrecisionStyle precision,
ysr@777 95 FilterKind fk);
ysr@777 96 };
ysr@777 97
ysr@777 98 // The complicating factor is that BlockOffsetTable diverged
ysr@777 99 // significantly, and we need functionality that is only in the G1 version.
ysr@777 100 // So I copied that code, which led to an alternate G1 version of
ysr@777 101 // OffsetTableContigSpace. If the two versions of BlockOffsetTable could
ysr@777 102 // be reconciled, then G1OffsetTableContigSpace could go away.
ysr@777 103
ysr@777 104 // The idea behind time stamps is the following. Doing a save_marks on
ysr@777 105 // all regions at every GC pause is time consuming (if I remember
ysr@777 106 // well, 10ms or so). So, we would like to do that only for regions
ysr@777 107 // that are GC alloc regions. To achieve this, we use time
ysr@777 108 // stamps. For every evacuation pause, G1CollectedHeap generates a
ysr@777 109 // unique time stamp (essentially a counter that gets
ysr@777 110 // incremented). Every time we want to call save_marks on a region,
ysr@777 111 // we set the saved_mark_word to top and also copy the current GC
ysr@777 112 // time stamp to the time stamp field of the space. Reading the
ysr@777 113 // saved_mark_word involves checking the time stamp of the
ysr@777 114 // region. If it is the same as the current GC time stamp, then we
ysr@777 115 // can safely read the saved_mark_word field, as it is valid. If the
ysr@777 116 // time stamp of the region is not the same as the current GC time
ysr@777 117 // stamp, then we instead read top, as the saved_mark_word field is
ysr@777 118 // invalid. Time stamps (on the regions and also on the
ysr@777 119 // G1CollectedHeap) are reset at every cleanup (we iterate over
ysr@777 120 // the regions anyway) and at the end of a Full GC. The current scheme
ysr@777 121 // that uses sequential unsigned ints will fail only if we have 4b
ysr@777 122 // evacuation pauses between two cleanups, which is _highly_ unlikely.
mgerdin@6990 123 class G1OffsetTableContigSpace: public CompactibleSpace {
ysr@777 124 friend class VMStructs;
mgerdin@6990 125 HeapWord* _top;
ysr@777 126 protected:
ysr@777 127 G1BlockOffsetArrayContigSpace _offsets;
ysr@777 128 Mutex _par_alloc_lock;
ysr@777 129 volatile unsigned _gc_time_stamp;
tonyp@2715 130 // When we need to retire an allocation region, while other threads
tonyp@2715 131 // are also concurrently trying to allocate into it, we typically
tonyp@2715 132 // allocate a dummy object at the end of the region to ensure that
tonyp@2715 133 // no more allocations can take place in it. However, sometimes we
tonyp@2715 134 // want to know where the end of the last "real" object we allocated
tonyp@2715 135 // into the region was and this is what this keeps track.
tonyp@2715 136 HeapWord* _pre_dummy_top;
ysr@777 137
ysr@777 138 public:
ysr@777 139 G1OffsetTableContigSpace(G1BlockOffsetSharedArray* sharedOffsetArray,
johnc@4065 140 MemRegion mr);
ysr@777 141
mgerdin@6990 142 void set_top(HeapWord* value) { _top = value; }
mgerdin@6990 143 HeapWord* top() const { return _top; }
mgerdin@6990 144
mgerdin@6990 145 protected:
tschatzl@7050 146 // Reset the G1OffsetTableContigSpace.
tschatzl@7050 147 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
tschatzl@7050 148
mgerdin@6990 149 HeapWord** top_addr() { return &_top; }
mgerdin@6990 150 // Allocation helpers (return NULL if full).
mgerdin@6990 151 inline HeapWord* allocate_impl(size_t word_size, HeapWord* end_value);
mgerdin@6990 152 inline HeapWord* par_allocate_impl(size_t word_size, HeapWord* end_value);
mgerdin@6990 153
mgerdin@6990 154 public:
mgerdin@6990 155 void reset_after_compaction() { set_top(compaction_top()); }
mgerdin@6990 156
mgerdin@6990 157 size_t used() const { return byte_size(bottom(), top()); }
mgerdin@6990 158 size_t free() const { return byte_size(top(), end()); }
mgerdin@6990 159 bool is_free_block(const HeapWord* p) const { return p >= top(); }
mgerdin@6990 160
mgerdin@6990 161 MemRegion used_region() const { return MemRegion(bottom(), top()); }
mgerdin@6990 162
mgerdin@6990 163 void object_iterate(ObjectClosure* blk);
mgerdin@6990 164 void safe_object_iterate(ObjectClosure* blk);
mgerdin@6990 165
ysr@777 166 void set_bottom(HeapWord* value);
ysr@777 167 void set_end(HeapWord* value);
ysr@777 168
ysr@777 169 virtual HeapWord* saved_mark_word() const;
mgerdin@6988 170 void record_top_and_timestamp();
ysr@777 171 void reset_gc_time_stamp() { _gc_time_stamp = 0; }
tonyp@3957 172 unsigned get_gc_time_stamp() { return _gc_time_stamp; }
ysr@777 173
tonyp@2715 174 // See the comment above in the declaration of _pre_dummy_top for an
tonyp@2715 175 // explanation of what it is.
tonyp@2715 176 void set_pre_dummy_top(HeapWord* pre_dummy_top) {
tonyp@2715 177 assert(is_in(pre_dummy_top) && pre_dummy_top <= top(), "pre-condition");
tonyp@2715 178 _pre_dummy_top = pre_dummy_top;
tonyp@2715 179 }
tonyp@2715 180 HeapWord* pre_dummy_top() {
tonyp@2715 181 return (_pre_dummy_top == NULL) ? top() : _pre_dummy_top;
tonyp@2715 182 }
tonyp@2715 183 void reset_pre_dummy_top() { _pre_dummy_top = NULL; }
tonyp@2715 184
tonyp@791 185 virtual void clear(bool mangle_space);
ysr@777 186
ysr@777 187 HeapWord* block_start(const void* p);
ysr@777 188 HeapWord* block_start_const(const void* p) const;
ysr@777 189
mgerdin@6990 190 void prepare_for_compaction(CompactPoint* cp);
mgerdin@6990 191
ysr@777 192 // Add offset table update.
ysr@777 193 virtual HeapWord* allocate(size_t word_size);
ysr@777 194 HeapWord* par_allocate(size_t word_size);
ysr@777 195
ysr@777 196 // MarkSweep support phase3
ysr@777 197 virtual HeapWord* initialize_threshold();
ysr@777 198 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
ysr@777 199
ysr@777 200 virtual void print() const;
tonyp@2453 201
tonyp@2453 202 void reset_bot() {
tschatzl@7050 203 _offsets.reset_bot();
tonyp@2453 204 }
tonyp@2453 205
tonyp@2453 206 void print_bot_on(outputStream* out) {
tonyp@2453 207 _offsets.print_on(out);
tonyp@2453 208 }
ysr@777 209 };
ysr@777 210
ysr@777 211 class HeapRegion: public G1OffsetTableContigSpace {
ysr@777 212 friend class VMStructs;
ysr@777 213 private:
ysr@777 214
ysr@777 215 // The remembered set for this region.
ysr@777 216 // (Might want to make this "inline" later, to avoid some alloc failure
ysr@777 217 // issues.)
ysr@777 218 HeapRegionRemSet* _rem_set;
ysr@777 219
ysr@777 220 G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
ysr@777 221
ysr@777 222 protected:
tonyp@2963 223 // The index of this region in the heap region sequence.
tschatzl@7091 224 uint _hrm_index;
ysr@777 225
sjohanss@7118 226 AllocationContext_t _allocation_context;
sjohanss@7118 227
brutisso@7195 228 HeapRegionType _type;
brutisso@7195 229
ysr@777 230 // For a humongous region, region in which it starts.
ysr@777 231 HeapRegion* _humongous_start_region;
ysr@777 232 // For the start region of a humongous sequence, it's original end().
ysr@777 233 HeapWord* _orig_end;
ysr@777 234
ysr@777 235 // True iff the region is in current collection_set.
ysr@777 236 bool _in_collection_set;
ysr@777 237
ysr@777 238 // True iff an attempt to evacuate an object in the region failed.
ysr@777 239 bool _evacuation_failed;
ysr@777 240
ysr@777 241 // A heap region may be a member one of a number of special subsets, each
stefank@6992 242 // represented as linked lists through the field below. Currently, there
stefank@6992 243 // is only one set:
ysr@777 244 // The collection set.
ysr@777 245 HeapRegion* _next_in_special_set;
ysr@777 246
ysr@777 247 // next region in the young "generation" region set
ysr@777 248 HeapRegion* _next_young_region;
ysr@777 249
apetrusenko@1231 250 // Next region whose cards need cleaning
apetrusenko@1231 251 HeapRegion* _next_dirty_cards_region;
apetrusenko@1231 252
tonyp@2472 253 // Fields used by the HeapRegionSetBase class and subclasses.
tonyp@2472 254 HeapRegion* _next;
jwilhelm@6422 255 HeapRegion* _prev;
tonyp@2472 256 #ifdef ASSERT
tonyp@2472 257 HeapRegionSetBase* _containing_set;
tonyp@2472 258 #endif // ASSERT
tonyp@2472 259
ysr@777 260 // For parallel heapRegion traversal.
ysr@777 261 jint _claimed;
ysr@777 262
ysr@777 263 // We use concurrent marking to determine the amount of live data
ysr@777 264 // in each heap region.
ysr@777 265 size_t _prev_marked_bytes; // Bytes known to be live via last completed marking.
ysr@777 266 size_t _next_marked_bytes; // Bytes known to be live via in-progress marking.
ysr@777 267
tonyp@3714 268 // The calculated GC efficiency of the region.
ysr@777 269 double _gc_efficiency;
ysr@777 270
ysr@777 271 int _young_index_in_cset;
ysr@777 272 SurvRateGroup* _surv_rate_group;
ysr@777 273 int _age_index;
ysr@777 274
ysr@777 275 // The start of the unmarked area. The unmarked area extends from this
ysr@777 276 // word until the top and/or end of the region, and is the part
ysr@777 277 // of the region for which no marking was done, i.e. objects may
ysr@777 278 // have been allocated in this part since the last mark phase.
ysr@777 279 // "prev" is the top at the start of the last completed marking.
ysr@777 280 // "next" is the top at the start of the in-progress marking (if any.)
ysr@777 281 HeapWord* _prev_top_at_mark_start;
ysr@777 282 HeapWord* _next_top_at_mark_start;
ysr@777 283 // If a collection pause is in progress, this is the top at the start
ysr@777 284 // of that pause.
ysr@777 285
ysr@777 286 void init_top_at_mark_start() {
ysr@777 287 assert(_prev_marked_bytes == 0 &&
ysr@777 288 _next_marked_bytes == 0,
ysr@777 289 "Must be called after zero_marked_bytes.");
ysr@777 290 HeapWord* bot = bottom();
ysr@777 291 _prev_top_at_mark_start = bot;
ysr@777 292 _next_top_at_mark_start = bot;
ysr@777 293 }
ysr@777 294
johnc@1829 295 // Cached attributes used in the collection set policy information
johnc@1829 296
johnc@1829 297 // The RSet length that was added to the total value
johnc@1829 298 // for the collection set.
johnc@1829 299 size_t _recorded_rs_length;
johnc@1829 300
johnc@1829 301 // The predicted elapsed time that was added to total value
johnc@1829 302 // for the collection set.
johnc@1829 303 double _predicted_elapsed_time_ms;
johnc@1829 304
johnc@1829 305 // The predicted number of bytes to copy that was added to
johnc@1829 306 // the total value for the collection set.
johnc@1829 307 size_t _predicted_bytes_to_copy;
johnc@1829 308
ysr@777 309 public:
tschatzl@7091 310 HeapRegion(uint hrm_index,
tonyp@2963 311 G1BlockOffsetSharedArray* sharedOffsetArray,
sjohanss@7131 312 MemRegion mr);
ysr@777 313
tschatzl@7050 314 // Initializing the HeapRegion not only resets the data structure, but also
tschatzl@7050 315 // resets the BOT for that heap region.
tschatzl@7050 316 // The default values for clear_space means that we will do the clearing if
tschatzl@7050 317 // there's clearing to be done ourselves. We also always mangle the space.
tschatzl@7050 318 virtual void initialize(MemRegion mr, bool clear_space = false, bool mangle_space = SpaceDecorator::Mangle);
tschatzl@7050 319
johnc@3182 320 static int LogOfHRGrainBytes;
johnc@3182 321 static int LogOfHRGrainWords;
johnc@3182 322
johnc@3182 323 static size_t GrainBytes;
johnc@3182 324 static size_t GrainWords;
johnc@3182 325 static size_t CardsPerRegion;
tonyp@1377 326
tonyp@3176 327 static size_t align_up_to_region_byte_size(size_t sz) {
tonyp@3176 328 return (sz + (size_t) GrainBytes - 1) &
tonyp@3176 329 ~((1 << (size_t) LogOfHRGrainBytes) - 1);
tonyp@3176 330 }
tonyp@3176 331
tschatzl@5701 332 static size_t max_region_size();
tschatzl@5701 333
tonyp@1377 334 // It sets up the heap region size (GrainBytes / GrainWords), as
tonyp@1377 335 // well as other related fields that are based on the heap region
tonyp@1377 336 // size (LogOfHRGrainBytes / LogOfHRGrainWords /
tonyp@1377 337 // CardsPerRegion). All those fields are considered constant
tonyp@1377 338 // throughout the JVM's execution, therefore they should only be set
tonyp@1377 339 // up once during initialization time.
brutisso@5646 340 static void setup_heap_region_size(size_t initial_heap_size, size_t max_heap_size);
ysr@777 341
tonyp@790 342 enum ClaimValues {
johnc@3296 343 InitialClaimValue = 0,
johnc@3296 344 FinalCountClaimValue = 1,
johnc@3296 345 NoteEndClaimValue = 2,
johnc@3296 346 ScrubRemSetClaimValue = 3,
johnc@3296 347 ParVerifyClaimValue = 4,
johnc@3296 348 RebuildRSClaimValue = 5,
tonyp@3691 349 ParEvacFailureClaimValue = 6,
tonyp@3691 350 AggregateCountClaimValue = 7,
johnc@5548 351 VerifyCountClaimValue = 8,
johnc@5548 352 ParMarkRootClaimValue = 9
tonyp@790 353 };
tonyp@790 354
mgerdin@6990 355 // All allocated blocks are occupied by objects in a HeapRegion
mgerdin@6990 356 bool block_is_obj(const HeapWord* p) const;
mgerdin@6990 357
mgerdin@6990 358 // Returns the object size for all valid block starts
mgerdin@6990 359 // and the amount of unallocated words if called on top()
mgerdin@6990 360 size_t block_size(const HeapWord* p) const;
mgerdin@6990 361
mgerdin@6990 362 inline HeapWord* par_allocate_no_bot_updates(size_t word_size);
mgerdin@6990 363 inline HeapWord* allocate_no_bot_updates(size_t word_size);
tonyp@2454 364
tschatzl@7091 365 // If this region is a member of a HeapRegionManager, the index in that
ysr@777 366 // sequence, otherwise -1.
tschatzl@7091 367 uint hrm_index() const { return _hrm_index; }
ysr@777 368
ysr@777 369 // The number of bytes marked live in the region in the last marking phase.
ysr@777 370 size_t marked_bytes() { return _prev_marked_bytes; }
tonyp@2717 371 size_t live_bytes() {
tonyp@2717 372 return (top() - prev_top_at_mark_start()) * HeapWordSize + marked_bytes();
tonyp@2717 373 }
tonyp@2717 374
ysr@777 375 // The number of bytes counted in the next marking.
ysr@777 376 size_t next_marked_bytes() { return _next_marked_bytes; }
ysr@777 377 // The number of bytes live wrt the next marking.
ysr@777 378 size_t next_live_bytes() {
tonyp@2717 379 return
tonyp@2717 380 (top() - next_top_at_mark_start()) * HeapWordSize + next_marked_bytes();
ysr@777 381 }
ysr@777 382
ysr@777 383 // A lower bound on the amount of garbage bytes in the region.
ysr@777 384 size_t garbage_bytes() {
ysr@777 385 size_t used_at_mark_start_bytes =
ysr@777 386 (prev_top_at_mark_start() - bottom()) * HeapWordSize;
ysr@777 387 assert(used_at_mark_start_bytes >= marked_bytes(),
ysr@777 388 "Can't mark more than we have.");
ysr@777 389 return used_at_mark_start_bytes - marked_bytes();
ysr@777 390 }
ysr@777 391
tonyp@3539 392 // Return the amount of bytes we'll reclaim if we collect this
tonyp@3539 393 // region. This includes not only the known garbage bytes in the
tonyp@3539 394 // region but also any unallocated space in it, i.e., [top, end),
tonyp@3539 395 // since it will also be reclaimed if we collect the region.
tonyp@3539 396 size_t reclaimable_bytes() {
tonyp@3539 397 size_t known_live_bytes = live_bytes();
tonyp@3539 398 assert(known_live_bytes <= capacity(), "sanity");
tonyp@3539 399 return capacity() - known_live_bytes;
tonyp@3539 400 }
tonyp@3539 401
ysr@777 402 // An upper bound on the number of live bytes in the region.
ysr@777 403 size_t max_live_bytes() { return used() - garbage_bytes(); }
ysr@777 404
ysr@777 405 void add_to_marked_bytes(size_t incr_bytes) {
ysr@777 406 _next_marked_bytes = _next_marked_bytes + incr_bytes;
johnc@3292 407 assert(_next_marked_bytes <= used(), "invariant" );
ysr@777 408 }
ysr@777 409
ysr@777 410 void zero_marked_bytes() {
ysr@777 411 _prev_marked_bytes = _next_marked_bytes = 0;
ysr@777 412 }
ysr@777 413
brutisso@7195 414 const char* get_type_str() const { return _type.get_str(); }
brutisso@7195 415 const char* get_short_type_str() const { return _type.get_short_str(); }
brutisso@7195 416
brutisso@7195 417 bool is_free() const { return _type.is_free(); }
brutisso@7195 418
brutisso@7195 419 bool is_young() const { return _type.is_young(); }
brutisso@7195 420 bool is_eden() const { return _type.is_eden(); }
brutisso@7195 421 bool is_survivor() const { return _type.is_survivor(); }
brutisso@7195 422
brutisso@7195 423 bool isHumongous() const { return _type.is_humongous(); }
brutisso@7195 424 bool startsHumongous() const { return _type.is_starts_humongous(); }
brutisso@7195 425 bool continuesHumongous() const { return _type.is_continues_humongous(); }
brutisso@7195 426
brutisso@7195 427 bool is_old() const { return _type.is_old(); }
brutisso@7195 428
ysr@777 429 // For a humongous region, region in which it starts.
ysr@777 430 HeapRegion* humongous_start_region() const {
ysr@777 431 return _humongous_start_region;
ysr@777 432 }
ysr@777 433
tonyp@3957 434 // Return the number of distinct regions that are covered by this region:
tonyp@3957 435 // 1 if the region is not humongous, >= 1 if the region is humongous.
tonyp@3957 436 uint region_num() const {
tonyp@3957 437 if (!isHumongous()) {
tonyp@3957 438 return 1U;
tonyp@3957 439 } else {
tonyp@3957 440 assert(startsHumongous(), "doesn't make sense on HC regions");
tonyp@3957 441 assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
tonyp@3957 442 return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
tonyp@3957 443 }
tonyp@3957 444 }
tonyp@3957 445
tonyp@3957 446 // Return the index + 1 of the last HC regions that's associated
tonyp@3957 447 // with this HS region.
tonyp@3957 448 uint last_hc_index() const {
tonyp@3957 449 assert(startsHumongous(), "don't call this otherwise");
tschatzl@7091 450 return hrm_index() + region_num();
tonyp@3957 451 }
tonyp@3957 452
brutisso@3216 453 // Same as Space::is_in_reserved, but will use the original size of the region.
brutisso@3216 454 // The original size is different only for start humongous regions. They get
brutisso@3216 455 // their _end set up to be the end of the last continues region of the
brutisso@3216 456 // corresponding humongous object.
brutisso@3216 457 bool is_in_reserved_raw(const void* p) const {
brutisso@3216 458 return _bottom <= p && p < _orig_end;
brutisso@3216 459 }
brutisso@3216 460
tonyp@2453 461 // Makes the current region be a "starts humongous" region, i.e.,
tonyp@2453 462 // the first region in a series of one or more contiguous regions
tonyp@2453 463 // that will contain a single "humongous" object. The two parameters
tonyp@2453 464 // are as follows:
tonyp@2453 465 //
tonyp@2453 466 // new_top : The new value of the top field of this region which
tonyp@2453 467 // points to the end of the humongous object that's being
tonyp@2453 468 // allocated. If there is more than one region in the series, top
tonyp@2453 469 // will lie beyond this region's original end field and on the last
tonyp@2453 470 // region in the series.
tonyp@2453 471 //
tonyp@2453 472 // new_end : The new value of the end field of this region which
tonyp@2453 473 // points to the end of the last region in the series. If there is
tonyp@2453 474 // one region in the series (namely: this one) end will be the same
tonyp@2453 475 // as the original end of this region.
tonyp@2453 476 //
tonyp@2453 477 // Updating top and end as described above makes this region look as
tonyp@2453 478 // if it spans the entire space taken up by all the regions in the
tonyp@2453 479 // series and an single allocation moved its top to new_top. This
tonyp@2453 480 // ensures that the space (capacity / allocated) taken up by all
tonyp@2453 481 // humongous regions can be calculated by just looking at the
tonyp@2453 482 // "starts humongous" regions and by ignoring the "continues
tonyp@2453 483 // humongous" regions.
tonyp@2453 484 void set_startsHumongous(HeapWord* new_top, HeapWord* new_end);
ysr@777 485
tonyp@2453 486 // Makes the current region be a "continues humongous'
tonyp@2453 487 // region. first_hr is the "start humongous" region of the series
tonyp@2453 488 // which this region will be part of.
tonyp@2453 489 void set_continuesHumongous(HeapRegion* first_hr);
ysr@777 490
tonyp@2472 491 // Unsets the humongous-related fields on the region.
brutisso@7195 492 void clear_humongous();
tonyp@2472 493
ysr@777 494 // If the region has a remembered set, return a pointer to it.
ysr@777 495 HeapRegionRemSet* rem_set() const {
ysr@777 496 return _rem_set;
ysr@777 497 }
ysr@777 498
ysr@777 499 // True iff the region is in current collection_set.
ysr@777 500 bool in_collection_set() const {
ysr@777 501 return _in_collection_set;
ysr@777 502 }
ysr@777 503 void set_in_collection_set(bool b) {
ysr@777 504 _in_collection_set = b;
ysr@777 505 }
ysr@777 506 HeapRegion* next_in_collection_set() {
ysr@777 507 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 508 assert(_next_in_special_set == NULL ||
ysr@777 509 _next_in_special_set->in_collection_set(),
ysr@777 510 "Malformed CS.");
ysr@777 511 return _next_in_special_set;
ysr@777 512 }
ysr@777 513 void set_next_in_collection_set(HeapRegion* r) {
ysr@777 514 assert(in_collection_set(), "should only invoke on member of CS.");
ysr@777 515 assert(r == NULL || r->in_collection_set(), "Malformed CS.");
ysr@777 516 _next_in_special_set = r;
ysr@777 517 }
ysr@777 518
sjohanss@7118 519 void set_allocation_context(AllocationContext_t context) {
sjohanss@7118 520 _allocation_context = context;
sjohanss@7118 521 }
sjohanss@7118 522
sjohanss@7118 523 AllocationContext_t allocation_context() const {
sjohanss@7118 524 return _allocation_context;
sjohanss@7118 525 }
sjohanss@7118 526
tonyp@2472 527 // Methods used by the HeapRegionSetBase class and subclasses.
tonyp@2472 528
jwilhelm@6422 529 // Getter and setter for the next and prev fields used to link regions into
tonyp@2472 530 // linked lists.
tonyp@2472 531 HeapRegion* next() { return _next; }
jwilhelm@6422 532 HeapRegion* prev() { return _prev; }
tonyp@2472 533
tonyp@2472 534 void set_next(HeapRegion* next) { _next = next; }
jwilhelm@6422 535 void set_prev(HeapRegion* prev) { _prev = prev; }
tonyp@2472 536
tonyp@2472 537 // Every region added to a set is tagged with a reference to that
tonyp@2472 538 // set. This is used for doing consistency checking to make sure that
tonyp@2472 539 // the contents of a set are as they should be and it's only
tonyp@2472 540 // available in non-product builds.
tonyp@2472 541 #ifdef ASSERT
tonyp@2472 542 void set_containing_set(HeapRegionSetBase* containing_set) {
tonyp@2472 543 assert((containing_set == NULL && _containing_set != NULL) ||
tonyp@2472 544 (containing_set != NULL && _containing_set == NULL),
tonyp@2472 545 err_msg("containing_set: "PTR_FORMAT" "
tonyp@2472 546 "_containing_set: "PTR_FORMAT,
drchase@6680 547 p2i(containing_set), p2i(_containing_set)));
tonyp@2472 548
tonyp@2472 549 _containing_set = containing_set;
tonyp@2643 550 }
tonyp@2472 551
tonyp@2472 552 HeapRegionSetBase* containing_set() { return _containing_set; }
tonyp@2472 553 #else // ASSERT
tonyp@2472 554 void set_containing_set(HeapRegionSetBase* containing_set) { }
tonyp@2472 555
tonyp@2643 556 // containing_set() is only used in asserts so there's no reason
tonyp@2472 557 // to provide a dummy version of it.
tonyp@2472 558 #endif // ASSERT
tonyp@2472 559
ysr@777 560 HeapRegion* get_next_young_region() { return _next_young_region; }
ysr@777 561 void set_next_young_region(HeapRegion* hr) {
ysr@777 562 _next_young_region = hr;
ysr@777 563 }
ysr@777 564
apetrusenko@1231 565 HeapRegion* get_next_dirty_cards_region() const { return _next_dirty_cards_region; }
apetrusenko@1231 566 HeapRegion** next_dirty_cards_region_addr() { return &_next_dirty_cards_region; }
apetrusenko@1231 567 void set_next_dirty_cards_region(HeapRegion* hr) { _next_dirty_cards_region = hr; }
apetrusenko@1231 568 bool is_on_dirty_cards_region_list() const { return get_next_dirty_cards_region() != NULL; }
apetrusenko@1231 569
tschatzl@7100 570 HeapWord* orig_end() const { return _orig_end; }
tonyp@2963 571
ysr@777 572 // Reset HR stuff to default values.
tschatzl@6404 573 void hr_clear(bool par, bool clear_space, bool locked = false);
tonyp@2849 574 void par_clear();
ysr@777 575
ysr@777 576 // Get the start of the unmarked area in this region.
ysr@777 577 HeapWord* prev_top_at_mark_start() const { return _prev_top_at_mark_start; }
ysr@777 578 HeapWord* next_top_at_mark_start() const { return _next_top_at_mark_start; }
ysr@777 579
ysr@777 580 // Note the start or end of marking. This tells the heap region
ysr@777 581 // that the collector is about to start or has finished (concurrently)
ysr@777 582 // marking the heap.
ysr@777 583
tonyp@3416 584 // Notify the region that concurrent marking is starting. Initialize
tonyp@3416 585 // all fields related to the next marking info.
tonyp@3416 586 inline void note_start_of_marking();
ysr@777 587
tonyp@3416 588 // Notify the region that concurrent marking has finished. Copy the
tonyp@3416 589 // (now finalized) next marking info fields into the prev marking
tonyp@3416 590 // info fields.
tonyp@3416 591 inline void note_end_of_marking();
ysr@777 592
tonyp@3416 593 // Notify the region that it will be used as to-space during a GC
tonyp@3416 594 // and we are about to start copying objects into it.
tonyp@3416 595 inline void note_start_of_copying(bool during_initial_mark);
ysr@777 596
tonyp@3416 597 // Notify the region that it ceases being to-space during a GC and
tonyp@3416 598 // we will not copy objects into it any more.
tonyp@3416 599 inline void note_end_of_copying(bool during_initial_mark);
tonyp@3416 600
tonyp@3416 601 // Notify the region that we are about to start processing
tonyp@3416 602 // self-forwarded objects during evac failure handling.
tonyp@3416 603 void note_self_forwarding_removal_start(bool during_initial_mark,
tonyp@3416 604 bool during_conc_mark);
tonyp@3416 605
tonyp@3416 606 // Notify the region that we have finished processing self-forwarded
tonyp@3416 607 // objects during evac failure handling.
tonyp@3416 608 void note_self_forwarding_removal_end(bool during_initial_mark,
tonyp@3416 609 bool during_conc_mark,
tonyp@3416 610 size_t marked_bytes);
ysr@777 611
ysr@777 612 // Returns "false" iff no object in the region was allocated when the
ysr@777 613 // last mark phase ended.
ysr@777 614 bool is_marked() { return _prev_top_at_mark_start != bottom(); }
ysr@777 615
ysr@777 616 void reset_during_compaction() {
tonyp@3957 617 assert(isHumongous() && startsHumongous(),
tonyp@3957 618 "should only be called for starts humongous regions");
ysr@777 619
ysr@777 620 zero_marked_bytes();
ysr@777 621 init_top_at_mark_start();
ysr@777 622 }
ysr@777 623
ysr@777 624 void calc_gc_efficiency(void);
ysr@777 625 double gc_efficiency() { return _gc_efficiency;}
ysr@777 626
ysr@777 627 int young_index_in_cset() const { return _young_index_in_cset; }
ysr@777 628 void set_young_index_in_cset(int index) {
ysr@777 629 assert( (index == -1) || is_young(), "pre-condition" );
ysr@777 630 _young_index_in_cset = index;
ysr@777 631 }
ysr@777 632
ysr@777 633 int age_in_surv_rate_group() {
ysr@777 634 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 635 assert( _age_index > -1, "pre-condition" );
ysr@777 636 return _surv_rate_group->age_in_group(_age_index);
ysr@777 637 }
ysr@777 638
ysr@777 639 void record_surv_words_in_group(size_t words_survived) {
ysr@777 640 assert( _surv_rate_group != NULL, "pre-condition" );
ysr@777 641 assert( _age_index > -1, "pre-condition" );
ysr@777 642 int age_in_group = age_in_surv_rate_group();
ysr@777 643 _surv_rate_group->record_surviving_words(age_in_group, words_survived);
ysr@777 644 }
ysr@777 645
ysr@777 646 int age_in_surv_rate_group_cond() {
ysr@777 647 if (_surv_rate_group != NULL)
ysr@777 648 return age_in_surv_rate_group();
ysr@777 649 else
ysr@777 650 return -1;
ysr@777 651 }
ysr@777 652
ysr@777 653 SurvRateGroup* surv_rate_group() {
ysr@777 654 return _surv_rate_group;
ysr@777 655 }
ysr@777 656
ysr@777 657 void install_surv_rate_group(SurvRateGroup* surv_rate_group) {
ysr@777 658 assert( surv_rate_group != NULL, "pre-condition" );
ysr@777 659 assert( _surv_rate_group == NULL, "pre-condition" );
ysr@777 660 assert( is_young(), "pre-condition" );
ysr@777 661
ysr@777 662 _surv_rate_group = surv_rate_group;
ysr@777 663 _age_index = surv_rate_group->next_age_index();
ysr@777 664 }
ysr@777 665
ysr@777 666 void uninstall_surv_rate_group() {
ysr@777 667 if (_surv_rate_group != NULL) {
ysr@777 668 assert( _age_index > -1, "pre-condition" );
ysr@777 669 assert( is_young(), "pre-condition" );
ysr@777 670
ysr@777 671 _surv_rate_group = NULL;
ysr@777 672 _age_index = -1;
ysr@777 673 } else {
ysr@777 674 assert( _age_index == -1, "pre-condition" );
ysr@777 675 }
ysr@777 676 }
ysr@777 677
brutisso@7195 678 void set_free() { _type.set_free(); }
ysr@777 679
brutisso@7195 680 void set_eden() { _type.set_eden(); }
brutisso@7195 681 void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
brutisso@7195 682 void set_survivor() { _type.set_survivor(); }
ysr@777 683
brutisso@7195 684 void set_old() { _type.set_old(); }
ysr@777 685
ysr@777 686 // Determine if an object has been allocated since the last
ysr@777 687 // mark performed by the collector. This returns true iff the object
ysr@777 688 // is within the unmarked area of the region.
ysr@777 689 bool obj_allocated_since_prev_marking(oop obj) const {
ysr@777 690 return (HeapWord *) obj >= prev_top_at_mark_start();
ysr@777 691 }
ysr@777 692 bool obj_allocated_since_next_marking(oop obj) const {
ysr@777 693 return (HeapWord *) obj >= next_top_at_mark_start();
ysr@777 694 }
ysr@777 695
ysr@777 696 // For parallel heapRegion traversal.
ysr@777 697 bool claimHeapRegion(int claimValue);
ysr@777 698 jint claim_value() { return _claimed; }
ysr@777 699 // Use this carefully: only when you're sure no one is claiming...
ysr@777 700 void set_claim_value(int claimValue) { _claimed = claimValue; }
ysr@777 701
ysr@777 702 // Returns the "evacuation_failed" property of the region.
ysr@777 703 bool evacuation_failed() { return _evacuation_failed; }
ysr@777 704
ysr@777 705 // Sets the "evacuation_failed" property of the region.
ysr@777 706 void set_evacuation_failed(bool b) {
ysr@777 707 _evacuation_failed = b;
ysr@777 708
ysr@777 709 if (b) {
ysr@777 710 _next_marked_bytes = 0;
ysr@777 711 }
ysr@777 712 }
ysr@777 713
ysr@777 714 // Requires that "mr" be entirely within the region.
ysr@777 715 // Apply "cl->do_object" to all objects that intersect with "mr".
ysr@777 716 // If the iteration encounters an unparseable portion of the region,
ysr@777 717 // or if "cl->abort()" is true after a closure application,
ysr@777 718 // terminate the iteration and return the address of the start of the
ysr@777 719 // subregion that isn't done. (The two can be distinguished by querying
ysr@777 720 // "cl->abort()".) Return of "NULL" indicates that the iteration
ysr@777 721 // completed.
ysr@777 722 HeapWord*
ysr@777 723 object_iterate_mem_careful(MemRegion mr, ObjectClosure* cl);
ysr@777 724
tonyp@2849 725 // filter_young: if true and the region is a young region then we
tonyp@2849 726 // skip the iteration.
tonyp@2849 727 // card_ptr: if not NULL, and we decide that the card is not young
tonyp@2849 728 // and we iterate over it, we'll clean the card before we start the
tonyp@2849 729 // iteration.
ysr@777 730 HeapWord*
ysr@777 731 oops_on_card_seq_iterate_careful(MemRegion mr,
johnc@2021 732 FilterOutOfRegionClosure* cl,
tonyp@2849 733 bool filter_young,
tonyp@2849 734 jbyte* card_ptr);
ysr@777 735
johnc@1829 736 size_t recorded_rs_length() const { return _recorded_rs_length; }
johnc@1829 737 double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
johnc@1829 738 size_t predicted_bytes_to_copy() const { return _predicted_bytes_to_copy; }
johnc@1829 739
johnc@1829 740 void set_recorded_rs_length(size_t rs_length) {
johnc@1829 741 _recorded_rs_length = rs_length;
johnc@1829 742 }
johnc@1829 743
johnc@1829 744 void set_predicted_elapsed_time_ms(double ms) {
johnc@1829 745 _predicted_elapsed_time_ms = ms;
johnc@1829 746 }
johnc@1829 747
johnc@1829 748 void set_predicted_bytes_to_copy(size_t bytes) {
johnc@1829 749 _predicted_bytes_to_copy = bytes;
johnc@1829 750 }
johnc@1829 751
tonyp@3957 752 virtual CompactibleSpace* next_compaction_space() const;
ysr@777 753
ysr@777 754 virtual void reset_after_compaction();
ysr@777 755
johnc@5548 756 // Routines for managing a list of code roots (attached to the
johnc@5548 757 // this region's RSet) that point into this heap region.
johnc@5548 758 void add_strong_code_root(nmethod* nm);
mgerdin@7208 759 void add_strong_code_root_locked(nmethod* nm);
johnc@5548 760 void remove_strong_code_root(nmethod* nm);
johnc@5548 761
johnc@5548 762 // Applies blk->do_code_blob() to each of the entries in
johnc@5548 763 // the strong code roots list for this region
johnc@5548 764 void strong_code_roots_do(CodeBlobClosure* blk) const;
johnc@5548 765
johnc@5548 766 // Verify that the entries on the strong code root list for this
johnc@5548 767 // region are live and include at least one pointer into this region.
johnc@5548 768 void verify_strong_code_roots(VerifyOption vo, bool* failures) const;
johnc@5548 769
ysr@777 770 void print() const;
ysr@777 771 void print_on(outputStream* st) const;
ysr@777 772
johnc@2969 773 // vo == UsePrevMarking -> use "prev" marking information,
johnc@2969 774 // vo == UseNextMarking -> use "next" marking information
johnc@2969 775 // vo == UseMarkWord -> use the mark word in the object header
johnc@2969 776 //
tonyp@1246 777 // NOTE: Only the "prev" marking information is guaranteed to be
tonyp@1246 778 // consistent most of the time, so most calls to this should use
johnc@2969 779 // vo == UsePrevMarking.
johnc@2969 780 // Currently, there is only one case where this is called with
johnc@2969 781 // vo == UseNextMarking, which is to verify the "next" marking
johnc@2969 782 // information at the end of remark.
johnc@2969 783 // Currently there is only one place where this is called with
johnc@2969 784 // vo == UseMarkWord, which is to verify the marking during a
johnc@2969 785 // full GC.
brutisso@3711 786 void verify(VerifyOption vo, bool *failures) const;
tonyp@1246 787
tonyp@1246 788 // Override; it uses the "prev" marking information
brutisso@3711 789 virtual void verify() const;
ysr@777 790 };
ysr@777 791
ysr@777 792 // HeapRegionClosure is used for iterating over regions.
ysr@777 793 // Terminates the iteration when the "doHeapRegion" method returns "true".
ysr@777 794 class HeapRegionClosure : public StackObj {
tschatzl@7091 795 friend class HeapRegionManager;
ysr@777 796 friend class G1CollectedHeap;
ysr@777 797
ysr@777 798 bool _complete;
ysr@777 799 void incomplete() { _complete = false; }
ysr@777 800
ysr@777 801 public:
ysr@777 802 HeapRegionClosure(): _complete(true) {}
ysr@777 803
ysr@777 804 // Typically called on each region until it returns true.
ysr@777 805 virtual bool doHeapRegion(HeapRegion* r) = 0;
ysr@777 806
ysr@777 807 // True after iteration if the closure was applied to all heap regions
ysr@777 808 // and returned "false" in all cases.
ysr@777 809 bool complete() { return _complete; }
ysr@777 810 };
ysr@777 811
stefank@2314 812 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_HPP

mercurial