src/share/vm/gc_implementation/g1/concurrentMark.hpp

Tue, 08 Aug 2017 15:57:29 +0800

author
aoqi
date
Tue, 08 Aug 2017 15:57:29 +0800
changeset 6876
710a3c8b516e
parent 6693
8a140676873f
parent 0
f90c822e73f8
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
aoqi@0 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP
aoqi@0 27
aoqi@0 28 #include "gc_implementation/g1/heapRegionSet.hpp"
aoqi@0 29 #include "utilities/taskqueue.hpp"
aoqi@0 30
aoqi@0 31 class G1CollectedHeap;
aoqi@0 32 class CMTask;
aoqi@0 33 typedef GenericTaskQueue<oop, mtGC> CMTaskQueue;
aoqi@0 34 typedef GenericTaskQueueSet<CMTaskQueue, mtGC> CMTaskQueueSet;
aoqi@0 35
aoqi@0 36 // Closure used by CM during concurrent reference discovery
aoqi@0 37 // and reference processing (during remarking) to determine
aoqi@0 38 // if a particular object is alive. It is primarily used
aoqi@0 39 // to determine if referents of discovered reference objects
aoqi@0 40 // are alive. An instance is also embedded into the
aoqi@0 41 // reference processor as the _is_alive_non_header field
aoqi@0 42 class G1CMIsAliveClosure: public BoolObjectClosure {
aoqi@0 43 G1CollectedHeap* _g1;
aoqi@0 44 public:
aoqi@0 45 G1CMIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) { }
aoqi@0 46
aoqi@0 47 bool do_object_b(oop obj);
aoqi@0 48 };
aoqi@0 49
aoqi@0 50 // A generic CM bit map. This is essentially a wrapper around the BitMap
aoqi@0 51 // class, with one bit per (1<<_shifter) HeapWords.
aoqi@0 52
aoqi@0 53 class CMBitMapRO VALUE_OBJ_CLASS_SPEC {
aoqi@0 54 protected:
aoqi@0 55 HeapWord* _bmStartWord; // base address of range covered by map
aoqi@0 56 size_t _bmWordSize; // map size (in #HeapWords covered)
aoqi@0 57 const int _shifter; // map to char or bit
aoqi@0 58 VirtualSpace _virtual_space; // underlying the bit map
aoqi@0 59 BitMap _bm; // the bit map itself
aoqi@0 60
aoqi@0 61 public:
aoqi@0 62 // constructor
aoqi@0 63 CMBitMapRO(int shifter);
aoqi@0 64
aoqi@0 65 enum { do_yield = true };
aoqi@0 66
aoqi@0 67 // inquiries
aoqi@0 68 HeapWord* startWord() const { return _bmStartWord; }
aoqi@0 69 size_t sizeInWords() const { return _bmWordSize; }
aoqi@0 70 // the following is one past the last word in space
aoqi@0 71 HeapWord* endWord() const { return _bmStartWord + _bmWordSize; }
aoqi@0 72
aoqi@0 73 // read marks
aoqi@0 74
aoqi@0 75 bool isMarked(HeapWord* addr) const {
aoqi@0 76 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 77 "outside underlying space?");
aoqi@0 78 return _bm.at(heapWordToOffset(addr));
aoqi@0 79 }
aoqi@0 80
aoqi@0 81 // iteration
aoqi@0 82 inline bool iterate(BitMapClosure* cl, MemRegion mr);
aoqi@0 83 inline bool iterate(BitMapClosure* cl);
aoqi@0 84
aoqi@0 85 // Return the address corresponding to the next marked bit at or after
aoqi@0 86 // "addr", and before "limit", if "limit" is non-NULL. If there is no
aoqi@0 87 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
aoqi@0 88 HeapWord* getNextMarkedWordAddress(HeapWord* addr,
aoqi@0 89 HeapWord* limit = NULL) const;
aoqi@0 90 // Return the address corresponding to the next unmarked bit at or after
aoqi@0 91 // "addr", and before "limit", if "limit" is non-NULL. If there is no
aoqi@0 92 // such bit, returns "limit" if that is non-NULL, or else "endWord()".
aoqi@0 93 HeapWord* getNextUnmarkedWordAddress(HeapWord* addr,
aoqi@0 94 HeapWord* limit = NULL) const;
aoqi@0 95
aoqi@0 96 // conversion utilities
aoqi@0 97 HeapWord* offsetToHeapWord(size_t offset) const {
aoqi@0 98 return _bmStartWord + (offset << _shifter);
aoqi@0 99 }
aoqi@0 100 size_t heapWordToOffset(HeapWord* addr) const {
aoqi@0 101 return pointer_delta(addr, _bmStartWord) >> _shifter;
aoqi@0 102 }
aoqi@0 103 int heapWordDiffToOffsetDiff(size_t diff) const;
aoqi@0 104
aoqi@0 105 // The argument addr should be the start address of a valid object
aoqi@0 106 HeapWord* nextObject(HeapWord* addr) {
aoqi@0 107 oop obj = (oop) addr;
aoqi@0 108 HeapWord* res = addr + obj->size();
aoqi@0 109 assert(offsetToHeapWord(heapWordToOffset(res)) == res, "sanity");
aoqi@0 110 return res;
aoqi@0 111 }
aoqi@0 112
aoqi@0 113 void print_on_error(outputStream* st, const char* prefix) const;
aoqi@0 114
aoqi@0 115 // debugging
aoqi@0 116 NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
aoqi@0 117 };
aoqi@0 118
aoqi@0 119 class CMBitMap : public CMBitMapRO {
aoqi@0 120
aoqi@0 121 public:
aoqi@0 122 // constructor
aoqi@0 123 CMBitMap(int shifter) :
aoqi@0 124 CMBitMapRO(shifter) {}
aoqi@0 125
aoqi@0 126 // Allocates the back store for the marking bitmap
aoqi@0 127 bool allocate(ReservedSpace heap_rs);
aoqi@0 128
aoqi@0 129 // write marks
aoqi@0 130 void mark(HeapWord* addr) {
aoqi@0 131 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 132 "outside underlying space?");
aoqi@0 133 _bm.set_bit(heapWordToOffset(addr));
aoqi@0 134 }
aoqi@0 135 void clear(HeapWord* addr) {
aoqi@0 136 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 137 "outside underlying space?");
aoqi@0 138 _bm.clear_bit(heapWordToOffset(addr));
aoqi@0 139 }
aoqi@0 140 bool parMark(HeapWord* addr) {
aoqi@0 141 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 142 "outside underlying space?");
aoqi@0 143 return _bm.par_set_bit(heapWordToOffset(addr));
aoqi@0 144 }
aoqi@0 145 bool parClear(HeapWord* addr) {
aoqi@0 146 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
aoqi@0 147 "outside underlying space?");
aoqi@0 148 return _bm.par_clear_bit(heapWordToOffset(addr));
aoqi@0 149 }
aoqi@0 150 void markRange(MemRegion mr);
aoqi@0 151 void clearAll();
aoqi@0 152 void clearRange(MemRegion mr);
aoqi@0 153
aoqi@0 154 // Starting at the bit corresponding to "addr" (inclusive), find the next
aoqi@0 155 // "1" bit, if any. This bit starts some run of consecutive "1"'s; find
aoqi@0 156 // the end of this run (stopping at "end_addr"). Return the MemRegion
aoqi@0 157 // covering from the start of the region corresponding to the first bit
aoqi@0 158 // of the run to the end of the region corresponding to the last bit of
aoqi@0 159 // the run. If there is no "1" bit at or after "addr", return an empty
aoqi@0 160 // MemRegion.
aoqi@0 161 MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
aoqi@0 162 };
aoqi@0 163
aoqi@0 164 // Represents a marking stack used by ConcurrentMarking in the G1 collector.
aoqi@0 165 class CMMarkStack VALUE_OBJ_CLASS_SPEC {
aoqi@0 166 VirtualSpace _virtual_space; // Underlying backing store for actual stack
aoqi@0 167 ConcurrentMark* _cm;
aoqi@0 168 oop* _base; // bottom of stack
aoqi@0 169 jint _index; // one more than last occupied index
aoqi@0 170 jint _capacity; // max #elements
aoqi@0 171 jint _saved_index; // value of _index saved at start of GC
aoqi@0 172 NOT_PRODUCT(jint _max_depth;) // max depth plumbed during run
aoqi@0 173
aoqi@0 174 bool _overflow;
aoqi@0 175 bool _should_expand;
aoqi@0 176 DEBUG_ONLY(bool _drain_in_progress;)
aoqi@0 177 DEBUG_ONLY(bool _drain_in_progress_yields;)
aoqi@0 178
aoqi@0 179 public:
aoqi@0 180 CMMarkStack(ConcurrentMark* cm);
aoqi@0 181 ~CMMarkStack();
aoqi@0 182
aoqi@0 183 #ifndef PRODUCT
aoqi@0 184 jint max_depth() const {
aoqi@0 185 return _max_depth;
aoqi@0 186 }
aoqi@0 187 #endif
aoqi@0 188
aoqi@0 189 bool allocate(size_t capacity);
aoqi@0 190
aoqi@0 191 oop pop() {
aoqi@0 192 if (!isEmpty()) {
aoqi@0 193 return _base[--_index] ;
aoqi@0 194 }
aoqi@0 195 return NULL;
aoqi@0 196 }
aoqi@0 197
aoqi@0 198 // If overflow happens, don't do the push, and record the overflow.
aoqi@0 199 // *Requires* that "ptr" is already marked.
aoqi@0 200 void push(oop ptr) {
aoqi@0 201 if (isFull()) {
aoqi@0 202 // Record overflow.
aoqi@0 203 _overflow = true;
aoqi@0 204 return;
aoqi@0 205 } else {
aoqi@0 206 _base[_index++] = ptr;
aoqi@0 207 NOT_PRODUCT(_max_depth = MAX2(_max_depth, _index));
aoqi@0 208 }
aoqi@0 209 }
aoqi@0 210 // Non-block impl. Note: concurrency is allowed only with other
aoqi@0 211 // "par_push" operations, not with "pop" or "drain". We would need
aoqi@0 212 // parallel versions of them if such concurrency was desired.
aoqi@0 213 void par_push(oop ptr);
aoqi@0 214
aoqi@0 215 // Pushes the first "n" elements of "ptr_arr" on the stack.
aoqi@0 216 // Non-block impl. Note: concurrency is allowed only with other
aoqi@0 217 // "par_adjoin_arr" or "push" operations, not with "pop" or "drain".
aoqi@0 218 void par_adjoin_arr(oop* ptr_arr, int n);
aoqi@0 219
aoqi@0 220 // Pushes the first "n" elements of "ptr_arr" on the stack.
aoqi@0 221 // Locking impl: concurrency is allowed only with
aoqi@0 222 // "par_push_arr" and/or "par_pop_arr" operations, which use the same
aoqi@0 223 // locking strategy.
aoqi@0 224 void par_push_arr(oop* ptr_arr, int n);
aoqi@0 225
aoqi@0 226 // If returns false, the array was empty. Otherwise, removes up to "max"
aoqi@0 227 // elements from the stack, and transfers them to "ptr_arr" in an
aoqi@0 228 // unspecified order. The actual number transferred is given in "n" ("n
aoqi@0 229 // == 0" is deliberately redundant with the return value.) Locking impl:
aoqi@0 230 // concurrency is allowed only with "par_push_arr" and/or "par_pop_arr"
aoqi@0 231 // operations, which use the same locking strategy.
aoqi@0 232 bool par_pop_arr(oop* ptr_arr, int max, int* n);
aoqi@0 233
aoqi@0 234 // Drain the mark stack, applying the given closure to all fields of
aoqi@0 235 // objects on the stack. (That is, continue until the stack is empty,
aoqi@0 236 // even if closure applications add entries to the stack.) The "bm"
aoqi@0 237 // argument, if non-null, may be used to verify that only marked objects
aoqi@0 238 // are on the mark stack. If "yield_after" is "true", then the
aoqi@0 239 // concurrent marker performing the drain offers to yield after
aoqi@0 240 // processing each object. If a yield occurs, stops the drain operation
aoqi@0 241 // and returns false. Otherwise, returns true.
aoqi@0 242 template<class OopClosureClass>
aoqi@0 243 bool drain(OopClosureClass* cl, CMBitMap* bm, bool yield_after = false);
aoqi@0 244
aoqi@0 245 bool isEmpty() { return _index == 0; }
aoqi@0 246 bool isFull() { return _index == _capacity; }
aoqi@0 247 int maxElems() { return _capacity; }
aoqi@0 248
aoqi@0 249 bool overflow() { return _overflow; }
aoqi@0 250 void clear_overflow() { _overflow = false; }
aoqi@0 251
aoqi@0 252 bool should_expand() const { return _should_expand; }
aoqi@0 253 void set_should_expand();
aoqi@0 254
aoqi@0 255 // Expand the stack, typically in response to an overflow condition
aoqi@0 256 void expand();
aoqi@0 257
aoqi@0 258 int size() { return _index; }
aoqi@0 259
aoqi@0 260 void setEmpty() { _index = 0; clear_overflow(); }
aoqi@0 261
aoqi@0 262 // Record the current index.
aoqi@0 263 void note_start_of_gc();
aoqi@0 264
aoqi@0 265 // Make sure that we have not added any entries to the stack during GC.
aoqi@0 266 void note_end_of_gc();
aoqi@0 267
aoqi@0 268 // iterate over the oops in the mark stack, up to the bound recorded via
aoqi@0 269 // the call above.
aoqi@0 270 void oops_do(OopClosure* f);
aoqi@0 271 };
aoqi@0 272
aoqi@0 273 class ForceOverflowSettings VALUE_OBJ_CLASS_SPEC {
aoqi@0 274 private:
aoqi@0 275 #ifndef PRODUCT
aoqi@0 276 uintx _num_remaining;
aoqi@0 277 bool _force;
aoqi@0 278 #endif // !defined(PRODUCT)
aoqi@0 279
aoqi@0 280 public:
aoqi@0 281 void init() PRODUCT_RETURN;
aoqi@0 282 void update() PRODUCT_RETURN;
aoqi@0 283 bool should_force() PRODUCT_RETURN_( return false; );
aoqi@0 284 };
aoqi@0 285
aoqi@0 286 // this will enable a variety of different statistics per GC task
aoqi@0 287 #define _MARKING_STATS_ 0
aoqi@0 288 // this will enable the higher verbose levels
aoqi@0 289 #define _MARKING_VERBOSE_ 0
aoqi@0 290
aoqi@0 291 #if _MARKING_STATS_
aoqi@0 292 #define statsOnly(statement) \
aoqi@0 293 do { \
aoqi@0 294 statement ; \
aoqi@0 295 } while (0)
aoqi@0 296 #else // _MARKING_STATS_
aoqi@0 297 #define statsOnly(statement) \
aoqi@0 298 do { \
aoqi@0 299 } while (0)
aoqi@0 300 #endif // _MARKING_STATS_
aoqi@0 301
aoqi@0 302 typedef enum {
aoqi@0 303 no_verbose = 0, // verbose turned off
aoqi@0 304 stats_verbose, // only prints stats at the end of marking
aoqi@0 305 low_verbose, // low verbose, mostly per region and per major event
aoqi@0 306 medium_verbose, // a bit more detailed than low
aoqi@0 307 high_verbose // per object verbose
aoqi@0 308 } CMVerboseLevel;
aoqi@0 309
aoqi@0 310 class YoungList;
aoqi@0 311
aoqi@0 312 // Root Regions are regions that are not empty at the beginning of a
aoqi@0 313 // marking cycle and which we might collect during an evacuation pause
aoqi@0 314 // while the cycle is active. Given that, during evacuation pauses, we
aoqi@0 315 // do not copy objects that are explicitly marked, what we have to do
aoqi@0 316 // for the root regions is to scan them and mark all objects reachable
aoqi@0 317 // from them. According to the SATB assumptions, we only need to visit
aoqi@0 318 // each object once during marking. So, as long as we finish this scan
aoqi@0 319 // before the next evacuation pause, we can copy the objects from the
aoqi@0 320 // root regions without having to mark them or do anything else to them.
aoqi@0 321 //
aoqi@0 322 // Currently, we only support root region scanning once (at the start
aoqi@0 323 // of the marking cycle) and the root regions are all the survivor
aoqi@0 324 // regions populated during the initial-mark pause.
aoqi@0 325 class CMRootRegions VALUE_OBJ_CLASS_SPEC {
aoqi@0 326 private:
aoqi@0 327 YoungList* _young_list;
aoqi@0 328 ConcurrentMark* _cm;
aoqi@0 329
aoqi@0 330 volatile bool _scan_in_progress;
aoqi@0 331 volatile bool _should_abort;
aoqi@0 332 HeapRegion* volatile _next_survivor;
aoqi@0 333
aoqi@0 334 public:
aoqi@0 335 CMRootRegions();
aoqi@0 336 // We actually do most of the initialization in this method.
aoqi@0 337 void init(G1CollectedHeap* g1h, ConcurrentMark* cm);
aoqi@0 338
aoqi@0 339 // Reset the claiming / scanning of the root regions.
aoqi@0 340 void prepare_for_scan();
aoqi@0 341
aoqi@0 342 // Forces get_next() to return NULL so that the iteration aborts early.
aoqi@0 343 void abort() { _should_abort = true; }
aoqi@0 344
aoqi@0 345 // Return true if the CM thread are actively scanning root regions,
aoqi@0 346 // false otherwise.
aoqi@0 347 bool scan_in_progress() { return _scan_in_progress; }
aoqi@0 348
aoqi@0 349 // Claim the next root region to scan atomically, or return NULL if
aoqi@0 350 // all have been claimed.
aoqi@0 351 HeapRegion* claim_next();
aoqi@0 352
aoqi@0 353 // Flag that we're done with root region scanning and notify anyone
aoqi@0 354 // who's waiting on it. If aborted is false, assume that all regions
aoqi@0 355 // have been claimed.
aoqi@0 356 void scan_finished();
aoqi@0 357
aoqi@0 358 // If CM threads are still scanning root regions, wait until they
aoqi@0 359 // are done. Return true if we had to wait, false otherwise.
aoqi@0 360 bool wait_until_scan_finished();
aoqi@0 361 };
aoqi@0 362
aoqi@0 363 class ConcurrentMarkThread;
aoqi@0 364
aoqi@0 365 class ConcurrentMark: public CHeapObj<mtGC> {
aoqi@0 366 friend class CMMarkStack;
aoqi@0 367 friend class ConcurrentMarkThread;
aoqi@0 368 friend class CMTask;
aoqi@0 369 friend class CMBitMapClosure;
aoqi@0 370 friend class CMGlobalObjectClosure;
aoqi@0 371 friend class CMRemarkTask;
aoqi@0 372 friend class CMConcurrentMarkingTask;
aoqi@0 373 friend class G1ParNoteEndTask;
aoqi@0 374 friend class CalcLiveObjectsClosure;
aoqi@0 375 friend class G1CMRefProcTaskProxy;
aoqi@0 376 friend class G1CMRefProcTaskExecutor;
aoqi@0 377 friend class G1CMKeepAliveAndDrainClosure;
aoqi@0 378 friend class G1CMDrainMarkingStackClosure;
aoqi@0 379
aoqi@0 380 protected:
aoqi@0 381 ConcurrentMarkThread* _cmThread; // the thread doing the work
aoqi@0 382 G1CollectedHeap* _g1h; // the heap.
aoqi@0 383 uint _parallel_marking_threads; // the number of marking
aoqi@0 384 // threads we're use
aoqi@0 385 uint _max_parallel_marking_threads; // max number of marking
aoqi@0 386 // threads we'll ever use
aoqi@0 387 double _sleep_factor; // how much we have to sleep, with
aoqi@0 388 // respect to the work we just did, to
aoqi@0 389 // meet the marking overhead goal
aoqi@0 390 double _marking_task_overhead; // marking target overhead for
aoqi@0 391 // a single task
aoqi@0 392
aoqi@0 393 // same as the two above, but for the cleanup task
aoqi@0 394 double _cleanup_sleep_factor;
aoqi@0 395 double _cleanup_task_overhead;
aoqi@0 396
aoqi@0 397 FreeRegionList _cleanup_list;
aoqi@0 398
aoqi@0 399 // Concurrent marking support structures
aoqi@0 400 CMBitMap _markBitMap1;
aoqi@0 401 CMBitMap _markBitMap2;
aoqi@0 402 CMBitMapRO* _prevMarkBitMap; // completed mark bitmap
aoqi@0 403 CMBitMap* _nextMarkBitMap; // under-construction mark bitmap
aoqi@0 404
aoqi@0 405 BitMap _region_bm;
aoqi@0 406 BitMap _card_bm;
aoqi@0 407
aoqi@0 408 // Heap bounds
aoqi@0 409 HeapWord* _heap_start;
aoqi@0 410 HeapWord* _heap_end;
aoqi@0 411
aoqi@0 412 // Root region tracking and claiming.
aoqi@0 413 CMRootRegions _root_regions;
aoqi@0 414
aoqi@0 415 // For gray objects
aoqi@0 416 CMMarkStack _markStack; // Grey objects behind global finger.
aoqi@0 417 HeapWord* volatile _finger; // the global finger, region aligned,
aoqi@0 418 // always points to the end of the
aoqi@0 419 // last claimed region
aoqi@0 420
aoqi@0 421 // marking tasks
aoqi@0 422 uint _max_worker_id;// maximum worker id
aoqi@0 423 uint _active_tasks; // task num currently active
aoqi@0 424 CMTask** _tasks; // task queue array (max_worker_id len)
aoqi@0 425 CMTaskQueueSet* _task_queues; // task queue set
aoqi@0 426 ParallelTaskTerminator _terminator; // for termination
aoqi@0 427
aoqi@0 428 // Two sync barriers that are used to synchronise tasks when an
aoqi@0 429 // overflow occurs. The algorithm is the following. All tasks enter
aoqi@0 430 // the first one to ensure that they have all stopped manipulating
aoqi@0 431 // the global data structures. After they exit it, they re-initialise
aoqi@0 432 // their data structures and task 0 re-initialises the global data
aoqi@0 433 // structures. Then, they enter the second sync barrier. This
aoqi@0 434 // ensure, that no task starts doing work before all data
aoqi@0 435 // structures (local and global) have been re-initialised. When they
aoqi@0 436 // exit it, they are free to start working again.
aoqi@0 437 WorkGangBarrierSync _first_overflow_barrier_sync;
aoqi@0 438 WorkGangBarrierSync _second_overflow_barrier_sync;
aoqi@0 439
aoqi@0 440 // this is set by any task, when an overflow on the global data
aoqi@0 441 // structures is detected.
aoqi@0 442 volatile bool _has_overflown;
aoqi@0 443 // true: marking is concurrent, false: we're in remark
aoqi@0 444 volatile bool _concurrent;
aoqi@0 445 // set at the end of a Full GC so that marking aborts
aoqi@0 446 volatile bool _has_aborted;
aoqi@0 447
aoqi@0 448 // used when remark aborts due to an overflow to indicate that
aoqi@0 449 // another concurrent marking phase should start
aoqi@0 450 volatile bool _restart_for_overflow;
aoqi@0 451
aoqi@0 452 // This is true from the very start of concurrent marking until the
aoqi@0 453 // point when all the tasks complete their work. It is really used
aoqi@0 454 // to determine the points between the end of concurrent marking and
aoqi@0 455 // time of remark.
aoqi@0 456 volatile bool _concurrent_marking_in_progress;
aoqi@0 457
aoqi@0 458 // verbose level
aoqi@0 459 CMVerboseLevel _verbose_level;
aoqi@0 460
aoqi@0 461 // All of these times are in ms.
aoqi@0 462 NumberSeq _init_times;
aoqi@0 463 NumberSeq _remark_times;
aoqi@0 464 NumberSeq _remark_mark_times;
aoqi@0 465 NumberSeq _remark_weak_ref_times;
aoqi@0 466 NumberSeq _cleanup_times;
aoqi@0 467 double _total_counting_time;
aoqi@0 468 double _total_rs_scrub_time;
aoqi@0 469
aoqi@0 470 double* _accum_task_vtime; // accumulated task vtime
aoqi@0 471
aoqi@0 472 FlexibleWorkGang* _parallel_workers;
aoqi@0 473
aoqi@0 474 ForceOverflowSettings _force_overflow_conc;
aoqi@0 475 ForceOverflowSettings _force_overflow_stw;
aoqi@0 476
aoqi@0 477 void weakRefsWork(bool clear_all_soft_refs);
aoqi@0 478
aoqi@0 479 void swapMarkBitMaps();
aoqi@0 480
aoqi@0 481 // It resets the global marking data structures, as well as the
aoqi@0 482 // task local ones; should be called during initial mark.
aoqi@0 483 void reset();
aoqi@0 484
aoqi@0 485 // Resets all the marking data structures. Called when we have to restart
aoqi@0 486 // marking or when marking completes (via set_non_marking_state below).
aoqi@0 487 void reset_marking_state(bool clear_overflow = true);
aoqi@0 488
aoqi@0 489 // We do this after we're done with marking so that the marking data
aoqi@0 490 // structures are initialised to a sensible and predictable state.
aoqi@0 491 void set_non_marking_state();
aoqi@0 492
aoqi@0 493 // Called to indicate how many threads are currently active.
aoqi@0 494 void set_concurrency(uint active_tasks);
aoqi@0 495
aoqi@0 496 // It should be called to indicate which phase we're in (concurrent
aoqi@0 497 // mark or remark) and how many threads are currently active.
aoqi@0 498 void set_concurrency_and_phase(uint active_tasks, bool concurrent);
aoqi@0 499
aoqi@0 500 // prints all gathered CM-related statistics
aoqi@0 501 void print_stats();
aoqi@0 502
aoqi@0 503 bool cleanup_list_is_empty() {
aoqi@0 504 return _cleanup_list.is_empty();
aoqi@0 505 }
aoqi@0 506
aoqi@0 507 // accessor methods
aoqi@0 508 uint parallel_marking_threads() const { return _parallel_marking_threads; }
aoqi@0 509 uint max_parallel_marking_threads() const { return _max_parallel_marking_threads;}
aoqi@0 510 double sleep_factor() { return _sleep_factor; }
aoqi@0 511 double marking_task_overhead() { return _marking_task_overhead;}
aoqi@0 512 double cleanup_sleep_factor() { return _cleanup_sleep_factor; }
aoqi@0 513 double cleanup_task_overhead() { return _cleanup_task_overhead;}
aoqi@0 514
aoqi@0 515 bool use_parallel_marking_threads() const {
aoqi@0 516 assert(parallel_marking_threads() <=
aoqi@0 517 max_parallel_marking_threads(), "sanity");
aoqi@0 518 assert((_parallel_workers == NULL && parallel_marking_threads() == 0) ||
aoqi@0 519 parallel_marking_threads() > 0,
aoqi@0 520 "parallel workers not set up correctly");
aoqi@0 521 return _parallel_workers != NULL;
aoqi@0 522 }
aoqi@0 523
aoqi@0 524 HeapWord* finger() { return _finger; }
aoqi@0 525 bool concurrent() { return _concurrent; }
aoqi@0 526 uint active_tasks() { return _active_tasks; }
aoqi@0 527 ParallelTaskTerminator* terminator() { return &_terminator; }
aoqi@0 528
aoqi@0 529 // It claims the next available region to be scanned by a marking
aoqi@0 530 // task/thread. It might return NULL if the next region is empty or
aoqi@0 531 // we have run out of regions. In the latter case, out_of_regions()
aoqi@0 532 // determines whether we've really run out of regions or the task
aoqi@0 533 // should call claim_region() again. This might seem a bit
aoqi@0 534 // awkward. Originally, the code was written so that claim_region()
aoqi@0 535 // either successfully returned with a non-empty region or there
aoqi@0 536 // were no more regions to be claimed. The problem with this was
aoqi@0 537 // that, in certain circumstances, it iterated over large chunks of
aoqi@0 538 // the heap finding only empty regions and, while it was working, it
aoqi@0 539 // was preventing the calling task to call its regular clock
aoqi@0 540 // method. So, this way, each task will spend very little time in
aoqi@0 541 // claim_region() and is allowed to call the regular clock method
aoqi@0 542 // frequently.
aoqi@0 543 HeapRegion* claim_region(uint worker_id);
aoqi@0 544
aoqi@0 545 // It determines whether we've run out of regions to scan. Note that
aoqi@0 546 // the finger can point past the heap end in case the heap was expanded
aoqi@0 547 // to satisfy an allocation without doing a GC. This is fine, because all
aoqi@0 548 // objects in those regions will be considered live anyway because of
aoqi@0 549 // SATB guarantees (i.e. their TAMS will be equal to bottom).
aoqi@0 550 bool out_of_regions() { return _finger >= _heap_end; }
aoqi@0 551
aoqi@0 552 // Returns the task with the given id
aoqi@0 553 CMTask* task(int id) {
aoqi@0 554 assert(0 <= id && id < (int) _active_tasks,
aoqi@0 555 "task id not within active bounds");
aoqi@0 556 return _tasks[id];
aoqi@0 557 }
aoqi@0 558
aoqi@0 559 // Returns the task queue with the given id
aoqi@0 560 CMTaskQueue* task_queue(int id) {
aoqi@0 561 assert(0 <= id && id < (int) _active_tasks,
aoqi@0 562 "task queue id not within active bounds");
aoqi@0 563 return (CMTaskQueue*) _task_queues->queue(id);
aoqi@0 564 }
aoqi@0 565
aoqi@0 566 // Returns the task queue set
aoqi@0 567 CMTaskQueueSet* task_queues() { return _task_queues; }
aoqi@0 568
aoqi@0 569 // Access / manipulation of the overflow flag which is set to
aoqi@0 570 // indicate that the global stack has overflown
aoqi@0 571 bool has_overflown() { return _has_overflown; }
aoqi@0 572 void set_has_overflown() { _has_overflown = true; }
aoqi@0 573 void clear_has_overflown() { _has_overflown = false; }
aoqi@0 574 bool restart_for_overflow() { return _restart_for_overflow; }
aoqi@0 575
aoqi@0 576 // Methods to enter the two overflow sync barriers
aoqi@0 577 void enter_first_sync_barrier(uint worker_id);
aoqi@0 578 void enter_second_sync_barrier(uint worker_id);
aoqi@0 579
aoqi@0 580 ForceOverflowSettings* force_overflow_conc() {
aoqi@0 581 return &_force_overflow_conc;
aoqi@0 582 }
aoqi@0 583
aoqi@0 584 ForceOverflowSettings* force_overflow_stw() {
aoqi@0 585 return &_force_overflow_stw;
aoqi@0 586 }
aoqi@0 587
aoqi@0 588 ForceOverflowSettings* force_overflow() {
aoqi@0 589 if (concurrent()) {
aoqi@0 590 return force_overflow_conc();
aoqi@0 591 } else {
aoqi@0 592 return force_overflow_stw();
aoqi@0 593 }
aoqi@0 594 }
aoqi@0 595
aoqi@0 596 // Live Data Counting data structures...
aoqi@0 597 // These data structures are initialized at the start of
aoqi@0 598 // marking. They are written to while marking is active.
aoqi@0 599 // They are aggregated during remark; the aggregated values
aoqi@0 600 // are then used to populate the _region_bm, _card_bm, and
aoqi@0 601 // the total live bytes, which are then subsequently updated
aoqi@0 602 // during cleanup.
aoqi@0 603
aoqi@0 604 // An array of bitmaps (one bit map per task). Each bitmap
aoqi@0 605 // is used to record the cards spanned by the live objects
aoqi@0 606 // marked by that task/worker.
aoqi@0 607 BitMap* _count_card_bitmaps;
aoqi@0 608
aoqi@0 609 // Used to record the number of marked live bytes
aoqi@0 610 // (for each region, by worker thread).
aoqi@0 611 size_t** _count_marked_bytes;
aoqi@0 612
aoqi@0 613 // Card index of the bottom of the G1 heap. Used for biasing indices into
aoqi@0 614 // the card bitmaps.
aoqi@0 615 intptr_t _heap_bottom_card_num;
aoqi@0 616
aoqi@0 617 // Set to true when initialization is complete
aoqi@0 618 bool _completed_initialization;
aoqi@0 619
aoqi@0 620 public:
aoqi@0 621 // Manipulation of the global mark stack.
aoqi@0 622 // Notice that the first mark_stack_push is CAS-based, whereas the
aoqi@0 623 // two below are Mutex-based. This is OK since the first one is only
aoqi@0 624 // called during evacuation pauses and doesn't compete with the
aoqi@0 625 // other two (which are called by the marking tasks during
aoqi@0 626 // concurrent marking or remark).
aoqi@0 627 bool mark_stack_push(oop p) {
aoqi@0 628 _markStack.par_push(p);
aoqi@0 629 if (_markStack.overflow()) {
aoqi@0 630 set_has_overflown();
aoqi@0 631 return false;
aoqi@0 632 }
aoqi@0 633 return true;
aoqi@0 634 }
aoqi@0 635 bool mark_stack_push(oop* arr, int n) {
aoqi@0 636 _markStack.par_push_arr(arr, n);
aoqi@0 637 if (_markStack.overflow()) {
aoqi@0 638 set_has_overflown();
aoqi@0 639 return false;
aoqi@0 640 }
aoqi@0 641 return true;
aoqi@0 642 }
aoqi@0 643 void mark_stack_pop(oop* arr, int max, int* n) {
aoqi@0 644 _markStack.par_pop_arr(arr, max, n);
aoqi@0 645 }
aoqi@0 646 size_t mark_stack_size() { return _markStack.size(); }
aoqi@0 647 size_t partial_mark_stack_size_target() { return _markStack.maxElems()/3; }
aoqi@0 648 bool mark_stack_overflow() { return _markStack.overflow(); }
aoqi@0 649 bool mark_stack_empty() { return _markStack.isEmpty(); }
aoqi@0 650
aoqi@0 651 CMRootRegions* root_regions() { return &_root_regions; }
aoqi@0 652
aoqi@0 653 bool concurrent_marking_in_progress() {
aoqi@0 654 return _concurrent_marking_in_progress;
aoqi@0 655 }
aoqi@0 656 void set_concurrent_marking_in_progress() {
aoqi@0 657 _concurrent_marking_in_progress = true;
aoqi@0 658 }
aoqi@0 659 void clear_concurrent_marking_in_progress() {
aoqi@0 660 _concurrent_marking_in_progress = false;
aoqi@0 661 }
aoqi@0 662
aoqi@0 663 void update_accum_task_vtime(int i, double vtime) {
aoqi@0 664 _accum_task_vtime[i] += vtime;
aoqi@0 665 }
aoqi@0 666
aoqi@0 667 double all_task_accum_vtime() {
aoqi@0 668 double ret = 0.0;
aoqi@0 669 for (uint i = 0; i < _max_worker_id; ++i)
aoqi@0 670 ret += _accum_task_vtime[i];
aoqi@0 671 return ret;
aoqi@0 672 }
aoqi@0 673
aoqi@0 674 // Attempts to steal an object from the task queues of other tasks
aoqi@0 675 bool try_stealing(uint worker_id, int* hash_seed, oop& obj) {
aoqi@0 676 return _task_queues->steal(worker_id, hash_seed, obj);
aoqi@0 677 }
aoqi@0 678
aoqi@0 679 ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
aoqi@0 680 ~ConcurrentMark();
aoqi@0 681
aoqi@0 682 ConcurrentMarkThread* cmThread() { return _cmThread; }
aoqi@0 683
aoqi@0 684 CMBitMapRO* prevMarkBitMap() const { return _prevMarkBitMap; }
aoqi@0 685 CMBitMap* nextMarkBitMap() const { return _nextMarkBitMap; }
aoqi@0 686
aoqi@0 687 // Returns the number of GC threads to be used in a concurrent
aoqi@0 688 // phase based on the number of GC threads being used in a STW
aoqi@0 689 // phase.
aoqi@0 690 uint scale_parallel_threads(uint n_par_threads);
aoqi@0 691
aoqi@0 692 // Calculates the number of GC threads to be used in a concurrent phase.
aoqi@0 693 uint calc_parallel_marking_threads();
aoqi@0 694
aoqi@0 695 // The following three are interaction between CM and
aoqi@0 696 // G1CollectedHeap
aoqi@0 697
aoqi@0 698 // This notifies CM that a root during initial-mark needs to be
aoqi@0 699 // grayed. It is MT-safe. word_size is the size of the object in
aoqi@0 700 // words. It is passed explicitly as sometimes we cannot calculate
aoqi@0 701 // it from the given object because it might be in an inconsistent
aoqi@0 702 // state (e.g., in to-space and being copied). So the caller is
aoqi@0 703 // responsible for dealing with this issue (e.g., get the size from
aoqi@0 704 // the from-space image when the to-space image might be
aoqi@0 705 // inconsistent) and always passing the size. hr is the region that
aoqi@0 706 // contains the object and it's passed optionally from callers who
aoqi@0 707 // might already have it (no point in recalculating it).
aoqi@0 708 inline void grayRoot(oop obj, size_t word_size,
aoqi@0 709 uint worker_id, HeapRegion* hr = NULL);
aoqi@0 710
aoqi@0 711 // It iterates over the heap and for each object it comes across it
aoqi@0 712 // will dump the contents of its reference fields, as well as
aoqi@0 713 // liveness information for the object and its referents. The dump
aoqi@0 714 // will be written to a file with the following name:
aoqi@0 715 // G1PrintReachableBaseFile + "." + str.
aoqi@0 716 // vo decides whether the prev (vo == UsePrevMarking), the next
aoqi@0 717 // (vo == UseNextMarking) marking information, or the mark word
aoqi@0 718 // (vo == UseMarkWord) will be used to determine the liveness of
aoqi@0 719 // each object / referent.
aoqi@0 720 // If all is true, all objects in the heap will be dumped, otherwise
aoqi@0 721 // only the live ones. In the dump the following symbols / breviations
aoqi@0 722 // are used:
aoqi@0 723 // M : an explicitly live object (its bitmap bit is set)
aoqi@0 724 // > : an implicitly live object (over tams)
aoqi@0 725 // O : an object outside the G1 heap (typically: in the perm gen)
aoqi@0 726 // NOT : a reference field whose referent is not live
aoqi@0 727 // AND MARKED : indicates that an object is both explicitly and
aoqi@0 728 // implicitly live (it should be one or the other, not both)
aoqi@0 729 void print_reachable(const char* str,
aoqi@0 730 VerifyOption vo, bool all) PRODUCT_RETURN;
aoqi@0 731
aoqi@0 732 // Clear the next marking bitmap (will be called concurrently).
aoqi@0 733 void clearNextBitmap();
aoqi@0 734
aoqi@0 735 // These two do the work that needs to be done before and after the
aoqi@0 736 // initial root checkpoint. Since this checkpoint can be done at two
aoqi@0 737 // different points (i.e. an explicit pause or piggy-backed on a
aoqi@0 738 // young collection), then it's nice to be able to easily share the
aoqi@0 739 // pre/post code. It might be the case that we can put everything in
aoqi@0 740 // the post method. TP
aoqi@0 741 void checkpointRootsInitialPre();
aoqi@0 742 void checkpointRootsInitialPost();
aoqi@0 743
aoqi@0 744 // Scan all the root regions and mark everything reachable from
aoqi@0 745 // them.
aoqi@0 746 void scanRootRegions();
aoqi@0 747
aoqi@0 748 // Scan a single root region and mark everything reachable from it.
aoqi@0 749 void scanRootRegion(HeapRegion* hr, uint worker_id);
aoqi@0 750
aoqi@0 751 // Do concurrent phase of marking, to a tentative transitive closure.
aoqi@0 752 void markFromRoots();
aoqi@0 753
aoqi@0 754 void checkpointRootsFinal(bool clear_all_soft_refs);
aoqi@0 755 void checkpointRootsFinalWork();
aoqi@0 756 void cleanup();
aoqi@0 757 void completeCleanup();
aoqi@0 758
aoqi@0 759 // Mark in the previous bitmap. NB: this is usually read-only, so use
aoqi@0 760 // this carefully!
aoqi@0 761 inline void markPrev(oop p);
aoqi@0 762
aoqi@0 763 // Clears marks for all objects in the given range, for the prev,
aoqi@0 764 // next, or both bitmaps. NB: the previous bitmap is usually
aoqi@0 765 // read-only, so use this carefully!
aoqi@0 766 void clearRangePrevBitmap(MemRegion mr);
aoqi@0 767 void clearRangeNextBitmap(MemRegion mr);
aoqi@0 768 void clearRangeBothBitmaps(MemRegion mr);
aoqi@0 769
aoqi@0 770 // Notify data structures that a GC has started.
aoqi@0 771 void note_start_of_gc() {
aoqi@0 772 _markStack.note_start_of_gc();
aoqi@0 773 }
aoqi@0 774
aoqi@0 775 // Notify data structures that a GC is finished.
aoqi@0 776 void note_end_of_gc() {
aoqi@0 777 _markStack.note_end_of_gc();
aoqi@0 778 }
aoqi@0 779
aoqi@0 780 // Verify that there are no CSet oops on the stacks (taskqueues /
aoqi@0 781 // global mark stack), enqueued SATB buffers, per-thread SATB
aoqi@0 782 // buffers, and fingers (global / per-task). The boolean parameters
aoqi@0 783 // decide which of the above data structures to verify. If marking
aoqi@0 784 // is not in progress, it's a no-op.
aoqi@0 785 void verify_no_cset_oops(bool verify_stacks,
aoqi@0 786 bool verify_enqueued_buffers,
aoqi@0 787 bool verify_thread_buffers,
aoqi@0 788 bool verify_fingers) PRODUCT_RETURN;
aoqi@0 789
aoqi@0 790 // It is called at the end of an evacuation pause during marking so
aoqi@0 791 // that CM is notified of where the new end of the heap is. It
aoqi@0 792 // doesn't do anything if concurrent_marking_in_progress() is false,
aoqi@0 793 // unless the force parameter is true.
aoqi@0 794 void update_g1_committed(bool force = false);
aoqi@0 795
aoqi@0 796 bool isMarked(oop p) const {
aoqi@0 797 assert(p != NULL && p->is_oop(), "expected an oop");
aoqi@0 798 HeapWord* addr = (HeapWord*)p;
aoqi@0 799 assert(addr >= _nextMarkBitMap->startWord() ||
aoqi@0 800 addr < _nextMarkBitMap->endWord(), "in a region");
aoqi@0 801
aoqi@0 802 return _nextMarkBitMap->isMarked(addr);
aoqi@0 803 }
aoqi@0 804
aoqi@0 805 inline bool not_yet_marked(oop p) const;
aoqi@0 806
aoqi@0 807 // XXX Debug code
aoqi@0 808 bool containing_card_is_marked(void* p);
aoqi@0 809 bool containing_cards_are_marked(void* start, void* last);
aoqi@0 810
aoqi@0 811 bool isPrevMarked(oop p) const {
aoqi@0 812 assert(p != NULL && p->is_oop(), "expected an oop");
aoqi@0 813 HeapWord* addr = (HeapWord*)p;
aoqi@0 814 assert(addr >= _prevMarkBitMap->startWord() ||
aoqi@0 815 addr < _prevMarkBitMap->endWord(), "in a region");
aoqi@0 816
aoqi@0 817 return _prevMarkBitMap->isMarked(addr);
aoqi@0 818 }
aoqi@0 819
aoqi@0 820 inline bool do_yield_check(uint worker_i = 0);
aoqi@0 821 inline bool should_yield();
aoqi@0 822
aoqi@0 823 // Called to abort the marking cycle after a Full GC takes palce.
aoqi@0 824 void abort();
aoqi@0 825
aoqi@0 826 bool has_aborted() { return _has_aborted; }
aoqi@0 827
aoqi@0 828 // This prints the global/local fingers. It is used for debugging.
aoqi@0 829 NOT_PRODUCT(void print_finger();)
aoqi@0 830
aoqi@0 831 void print_summary_info();
aoqi@0 832
aoqi@0 833 void print_worker_threads_on(outputStream* st) const;
aoqi@0 834
aoqi@0 835 void print_on_error(outputStream* st) const;
aoqi@0 836
aoqi@0 837 // The following indicate whether a given verbose level has been
aoqi@0 838 // set. Notice that anything above stats is conditional to
aoqi@0 839 // _MARKING_VERBOSE_ having been set to 1
aoqi@0 840 bool verbose_stats() {
aoqi@0 841 return _verbose_level >= stats_verbose;
aoqi@0 842 }
aoqi@0 843 bool verbose_low() {
aoqi@0 844 return _MARKING_VERBOSE_ && _verbose_level >= low_verbose;
aoqi@0 845 }
aoqi@0 846 bool verbose_medium() {
aoqi@0 847 return _MARKING_VERBOSE_ && _verbose_level >= medium_verbose;
aoqi@0 848 }
aoqi@0 849 bool verbose_high() {
aoqi@0 850 return _MARKING_VERBOSE_ && _verbose_level >= high_verbose;
aoqi@0 851 }
aoqi@0 852
aoqi@0 853 // Liveness counting
aoqi@0 854
aoqi@0 855 // Utility routine to set an exclusive range of cards on the given
aoqi@0 856 // card liveness bitmap
aoqi@0 857 inline void set_card_bitmap_range(BitMap* card_bm,
aoqi@0 858 BitMap::idx_t start_idx,
aoqi@0 859 BitMap::idx_t end_idx,
aoqi@0 860 bool is_par);
aoqi@0 861
aoqi@0 862 // Returns the card number of the bottom of the G1 heap.
aoqi@0 863 // Used in biasing indices into accounting card bitmaps.
aoqi@0 864 intptr_t heap_bottom_card_num() const {
aoqi@0 865 return _heap_bottom_card_num;
aoqi@0 866 }
aoqi@0 867
aoqi@0 868 // Returns the card bitmap for a given task or worker id.
aoqi@0 869 BitMap* count_card_bitmap_for(uint worker_id) {
aoqi@0 870 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
aoqi@0 871 assert(_count_card_bitmaps != NULL, "uninitialized");
aoqi@0 872 BitMap* task_card_bm = &_count_card_bitmaps[worker_id];
aoqi@0 873 assert(task_card_bm->size() == _card_bm.size(), "size mismatch");
aoqi@0 874 return task_card_bm;
aoqi@0 875 }
aoqi@0 876
aoqi@0 877 // Returns the array containing the marked bytes for each region,
aoqi@0 878 // for the given worker or task id.
aoqi@0 879 size_t* count_marked_bytes_array_for(uint worker_id) {
aoqi@0 880 assert(0 <= worker_id && worker_id < _max_worker_id, "oob");
aoqi@0 881 assert(_count_marked_bytes != NULL, "uninitialized");
aoqi@0 882 size_t* marked_bytes_array = _count_marked_bytes[worker_id];
aoqi@0 883 assert(marked_bytes_array != NULL, "uninitialized");
aoqi@0 884 return marked_bytes_array;
aoqi@0 885 }
aoqi@0 886
aoqi@0 887 // Returns the index in the liveness accounting card table bitmap
aoqi@0 888 // for the given address
aoqi@0 889 inline BitMap::idx_t card_bitmap_index_for(HeapWord* addr);
aoqi@0 890
aoqi@0 891 // Counts the size of the given memory region in the the given
aoqi@0 892 // marked_bytes array slot for the given HeapRegion.
aoqi@0 893 // Sets the bits in the given card bitmap that are associated with the
aoqi@0 894 // cards that are spanned by the memory region.
aoqi@0 895 inline void count_region(MemRegion mr, HeapRegion* hr,
aoqi@0 896 size_t* marked_bytes_array,
aoqi@0 897 BitMap* task_card_bm);
aoqi@0 898
aoqi@0 899 // Counts the given memory region in the task/worker counting
aoqi@0 900 // data structures for the given worker id.
aoqi@0 901 inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
aoqi@0 902
aoqi@0 903 // Counts the given memory region in the task/worker counting
aoqi@0 904 // data structures for the given worker id.
aoqi@0 905 inline void count_region(MemRegion mr, uint worker_id);
aoqi@0 906
aoqi@0 907 // Counts the given object in the given task/worker counting
aoqi@0 908 // data structures.
aoqi@0 909 inline void count_object(oop obj, HeapRegion* hr,
aoqi@0 910 size_t* marked_bytes_array,
aoqi@0 911 BitMap* task_card_bm);
aoqi@0 912
aoqi@0 913 // Counts the given object in the task/worker counting data
aoqi@0 914 // structures for the given worker id.
aoqi@0 915 inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
aoqi@0 916
aoqi@0 917 // Attempts to mark the given object and, if successful, counts
aoqi@0 918 // the object in the given task/worker counting structures.
aoqi@0 919 inline bool par_mark_and_count(oop obj, HeapRegion* hr,
aoqi@0 920 size_t* marked_bytes_array,
aoqi@0 921 BitMap* task_card_bm);
aoqi@0 922
aoqi@0 923 // Attempts to mark the given object and, if successful, counts
aoqi@0 924 // the object in the task/worker counting structures for the
aoqi@0 925 // given worker id.
aoqi@0 926 inline bool par_mark_and_count(oop obj, size_t word_size,
aoqi@0 927 HeapRegion* hr, uint worker_id);
aoqi@0 928
aoqi@0 929 // Attempts to mark the given object and, if successful, counts
aoqi@0 930 // the object in the task/worker counting structures for the
aoqi@0 931 // given worker id.
aoqi@0 932 inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
aoqi@0 933
aoqi@0 934 // Similar to the above routine but we don't know the heap region that
aoqi@0 935 // contains the object to be marked/counted, which this routine looks up.
aoqi@0 936 inline bool par_mark_and_count(oop obj, uint worker_id);
aoqi@0 937
aoqi@0 938 // Similar to the above routine but there are times when we cannot
aoqi@0 939 // safely calculate the size of obj due to races and we, therefore,
aoqi@0 940 // pass the size in as a parameter. It is the caller's reponsibility
aoqi@0 941 // to ensure that the size passed in for obj is valid.
aoqi@0 942 inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
aoqi@0 943
aoqi@0 944 // Unconditionally mark the given object, and unconditinally count
aoqi@0 945 // the object in the counting structures for worker id 0.
aoqi@0 946 // Should *not* be called from parallel code.
aoqi@0 947 inline bool mark_and_count(oop obj, HeapRegion* hr);
aoqi@0 948
aoqi@0 949 // Similar to the above routine but we don't know the heap region that
aoqi@0 950 // contains the object to be marked/counted, which this routine looks up.
aoqi@0 951 // Should *not* be called from parallel code.
aoqi@0 952 inline bool mark_and_count(oop obj);
aoqi@0 953
aoqi@0 954 // Returns true if initialization was successfully completed.
aoqi@0 955 bool completed_initialization() const {
aoqi@0 956 return _completed_initialization;
aoqi@0 957 }
aoqi@0 958
aoqi@0 959 protected:
aoqi@0 960 // Clear all the per-task bitmaps and arrays used to store the
aoqi@0 961 // counting data.
aoqi@0 962 void clear_all_count_data();
aoqi@0 963
aoqi@0 964 // Aggregates the counting data for each worker/task
aoqi@0 965 // that was constructed while marking. Also sets
aoqi@0 966 // the amount of marked bytes for each region and
aoqi@0 967 // the top at concurrent mark count.
aoqi@0 968 void aggregate_count_data();
aoqi@0 969
aoqi@0 970 // Verification routine
aoqi@0 971 void verify_count_data();
aoqi@0 972 };
aoqi@0 973
aoqi@0 974 // A class representing a marking task.
aoqi@0 975 class CMTask : public TerminatorTerminator {
aoqi@0 976 private:
aoqi@0 977 enum PrivateConstants {
aoqi@0 978 // the regular clock call is called once the scanned words reaches
aoqi@0 979 // this limit
aoqi@0 980 words_scanned_period = 12*1024,
aoqi@0 981 // the regular clock call is called once the number of visited
aoqi@0 982 // references reaches this limit
aoqi@0 983 refs_reached_period = 384,
aoqi@0 984 // initial value for the hash seed, used in the work stealing code
aoqi@0 985 init_hash_seed = 17,
aoqi@0 986 // how many entries will be transferred between global stack and
aoqi@0 987 // local queues
aoqi@0 988 global_stack_transfer_size = 16
aoqi@0 989 };
aoqi@0 990
aoqi@0 991 uint _worker_id;
aoqi@0 992 G1CollectedHeap* _g1h;
aoqi@0 993 ConcurrentMark* _cm;
aoqi@0 994 CMBitMap* _nextMarkBitMap;
aoqi@0 995 // the task queue of this task
aoqi@0 996 CMTaskQueue* _task_queue;
aoqi@0 997 private:
aoqi@0 998 // the task queue set---needed for stealing
aoqi@0 999 CMTaskQueueSet* _task_queues;
aoqi@0 1000 // indicates whether the task has been claimed---this is only for
aoqi@0 1001 // debugging purposes
aoqi@0 1002 bool _claimed;
aoqi@0 1003
aoqi@0 1004 // number of calls to this task
aoqi@0 1005 int _calls;
aoqi@0 1006
aoqi@0 1007 // when the virtual timer reaches this time, the marking step should
aoqi@0 1008 // exit
aoqi@0 1009 double _time_target_ms;
aoqi@0 1010 // the start time of the current marking step
aoqi@0 1011 double _start_time_ms;
aoqi@0 1012
aoqi@0 1013 // the oop closure used for iterations over oops
aoqi@0 1014 G1CMOopClosure* _cm_oop_closure;
aoqi@0 1015
aoqi@0 1016 // the region this task is scanning, NULL if we're not scanning any
aoqi@0 1017 HeapRegion* _curr_region;
aoqi@0 1018 // the local finger of this task, NULL if we're not scanning a region
aoqi@0 1019 HeapWord* _finger;
aoqi@0 1020 // limit of the region this task is scanning, NULL if we're not scanning one
aoqi@0 1021 HeapWord* _region_limit;
aoqi@0 1022
aoqi@0 1023 // the number of words this task has scanned
aoqi@0 1024 size_t _words_scanned;
aoqi@0 1025 // When _words_scanned reaches this limit, the regular clock is
aoqi@0 1026 // called. Notice that this might be decreased under certain
aoqi@0 1027 // circumstances (i.e. when we believe that we did an expensive
aoqi@0 1028 // operation).
aoqi@0 1029 size_t _words_scanned_limit;
aoqi@0 1030 // the initial value of _words_scanned_limit (i.e. what it was
aoqi@0 1031 // before it was decreased).
aoqi@0 1032 size_t _real_words_scanned_limit;
aoqi@0 1033
aoqi@0 1034 // the number of references this task has visited
aoqi@0 1035 size_t _refs_reached;
aoqi@0 1036 // When _refs_reached reaches this limit, the regular clock is
aoqi@0 1037 // called. Notice this this might be decreased under certain
aoqi@0 1038 // circumstances (i.e. when we believe that we did an expensive
aoqi@0 1039 // operation).
aoqi@0 1040 size_t _refs_reached_limit;
aoqi@0 1041 // the initial value of _refs_reached_limit (i.e. what it was before
aoqi@0 1042 // it was decreased).
aoqi@0 1043 size_t _real_refs_reached_limit;
aoqi@0 1044
aoqi@0 1045 // used by the work stealing stuff
aoqi@0 1046 int _hash_seed;
aoqi@0 1047 // if this is true, then the task has aborted for some reason
aoqi@0 1048 bool _has_aborted;
aoqi@0 1049 // set when the task aborts because it has met its time quota
aoqi@0 1050 bool _has_timed_out;
aoqi@0 1051 // true when we're draining SATB buffers; this avoids the task
aoqi@0 1052 // aborting due to SATB buffers being available (as we're already
aoqi@0 1053 // dealing with them)
aoqi@0 1054 bool _draining_satb_buffers;
aoqi@0 1055
aoqi@0 1056 // number sequence of past step times
aoqi@0 1057 NumberSeq _step_times_ms;
aoqi@0 1058 // elapsed time of this task
aoqi@0 1059 double _elapsed_time_ms;
aoqi@0 1060 // termination time of this task
aoqi@0 1061 double _termination_time_ms;
aoqi@0 1062 // when this task got into the termination protocol
aoqi@0 1063 double _termination_start_time_ms;
aoqi@0 1064
aoqi@0 1065 // true when the task is during a concurrent phase, false when it is
aoqi@0 1066 // in the remark phase (so, in the latter case, we do not have to
aoqi@0 1067 // check all the things that we have to check during the concurrent
aoqi@0 1068 // phase, i.e. SATB buffer availability...)
aoqi@0 1069 bool _concurrent;
aoqi@0 1070
aoqi@0 1071 TruncatedSeq _marking_step_diffs_ms;
aoqi@0 1072
aoqi@0 1073 // Counting data structures. Embedding the task's marked_bytes_array
aoqi@0 1074 // and card bitmap into the actual task saves having to go through
aoqi@0 1075 // the ConcurrentMark object.
aoqi@0 1076 size_t* _marked_bytes_array;
aoqi@0 1077 BitMap* _card_bm;
aoqi@0 1078
aoqi@0 1079 // LOTS of statistics related with this task
aoqi@0 1080 #if _MARKING_STATS_
aoqi@0 1081 NumberSeq _all_clock_intervals_ms;
aoqi@0 1082 double _interval_start_time_ms;
aoqi@0 1083
aoqi@0 1084 int _aborted;
aoqi@0 1085 int _aborted_overflow;
aoqi@0 1086 int _aborted_cm_aborted;
aoqi@0 1087 int _aborted_yield;
aoqi@0 1088 int _aborted_timed_out;
aoqi@0 1089 int _aborted_satb;
aoqi@0 1090 int _aborted_termination;
aoqi@0 1091
aoqi@0 1092 int _steal_attempts;
aoqi@0 1093 int _steals;
aoqi@0 1094
aoqi@0 1095 int _clock_due_to_marking;
aoqi@0 1096 int _clock_due_to_scanning;
aoqi@0 1097
aoqi@0 1098 int _local_pushes;
aoqi@0 1099 int _local_pops;
aoqi@0 1100 int _local_max_size;
aoqi@0 1101 int _objs_scanned;
aoqi@0 1102
aoqi@0 1103 int _global_pushes;
aoqi@0 1104 int _global_pops;
aoqi@0 1105 int _global_max_size;
aoqi@0 1106
aoqi@0 1107 int _global_transfers_to;
aoqi@0 1108 int _global_transfers_from;
aoqi@0 1109
aoqi@0 1110 int _regions_claimed;
aoqi@0 1111 int _objs_found_on_bitmap;
aoqi@0 1112
aoqi@0 1113 int _satb_buffers_processed;
aoqi@0 1114 #endif // _MARKING_STATS_
aoqi@0 1115
aoqi@0 1116 // it updates the local fields after this task has claimed
aoqi@0 1117 // a new region to scan
aoqi@0 1118 void setup_for_region(HeapRegion* hr);
aoqi@0 1119 // it brings up-to-date the limit of the region
aoqi@0 1120 void update_region_limit();
aoqi@0 1121
aoqi@0 1122 // called when either the words scanned or the refs visited limit
aoqi@0 1123 // has been reached
aoqi@0 1124 void reached_limit();
aoqi@0 1125 // recalculates the words scanned and refs visited limits
aoqi@0 1126 void recalculate_limits();
aoqi@0 1127 // decreases the words scanned and refs visited limits when we reach
aoqi@0 1128 // an expensive operation
aoqi@0 1129 void decrease_limits();
aoqi@0 1130 // it checks whether the words scanned or refs visited reached their
aoqi@0 1131 // respective limit and calls reached_limit() if they have
aoqi@0 1132 void check_limits() {
aoqi@0 1133 if (_words_scanned >= _words_scanned_limit ||
aoqi@0 1134 _refs_reached >= _refs_reached_limit) {
aoqi@0 1135 reached_limit();
aoqi@0 1136 }
aoqi@0 1137 }
aoqi@0 1138 // this is supposed to be called regularly during a marking step as
aoqi@0 1139 // it checks a bunch of conditions that might cause the marking step
aoqi@0 1140 // to abort
aoqi@0 1141 void regular_clock_call();
aoqi@0 1142 bool concurrent() { return _concurrent; }
aoqi@0 1143
aoqi@0 1144 public:
aoqi@0 1145 // It resets the task; it should be called right at the beginning of
aoqi@0 1146 // a marking phase.
aoqi@0 1147 void reset(CMBitMap* _nextMarkBitMap);
aoqi@0 1148 // it clears all the fields that correspond to a claimed region.
aoqi@0 1149 void clear_region_fields();
aoqi@0 1150
aoqi@0 1151 void set_concurrent(bool concurrent) { _concurrent = concurrent; }
aoqi@0 1152
aoqi@0 1153 // The main method of this class which performs a marking step
aoqi@0 1154 // trying not to exceed the given duration. However, it might exit
aoqi@0 1155 // prematurely, according to some conditions (i.e. SATB buffers are
aoqi@0 1156 // available for processing).
aoqi@0 1157 void do_marking_step(double target_ms,
aoqi@0 1158 bool do_termination,
aoqi@0 1159 bool is_serial);
aoqi@0 1160
aoqi@0 1161 // These two calls start and stop the timer
aoqi@0 1162 void record_start_time() {
aoqi@0 1163 _elapsed_time_ms = os::elapsedTime() * 1000.0;
aoqi@0 1164 }
aoqi@0 1165 void record_end_time() {
aoqi@0 1166 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms;
aoqi@0 1167 }
aoqi@0 1168
aoqi@0 1169 // returns the worker ID associated with this task.
aoqi@0 1170 uint worker_id() { return _worker_id; }
aoqi@0 1171
aoqi@0 1172 // From TerminatorTerminator. It determines whether this task should
aoqi@0 1173 // exit the termination protocol after it's entered it.
aoqi@0 1174 virtual bool should_exit_termination();
aoqi@0 1175
aoqi@0 1176 // Resets the local region fields after a task has finished scanning a
aoqi@0 1177 // region; or when they have become stale as a result of the region
aoqi@0 1178 // being evacuated.
aoqi@0 1179 void giveup_current_region();
aoqi@0 1180
aoqi@0 1181 HeapWord* finger() { return _finger; }
aoqi@0 1182
aoqi@0 1183 bool has_aborted() { return _has_aborted; }
aoqi@0 1184 void set_has_aborted() { _has_aborted = true; }
aoqi@0 1185 void clear_has_aborted() { _has_aborted = false; }
aoqi@0 1186 bool has_timed_out() { return _has_timed_out; }
aoqi@0 1187 bool claimed() { return _claimed; }
aoqi@0 1188
aoqi@0 1189 void set_cm_oop_closure(G1CMOopClosure* cm_oop_closure);
aoqi@0 1190
aoqi@0 1191 // It grays the object by marking it and, if necessary, pushing it
aoqi@0 1192 // on the local queue
aoqi@0 1193 inline void deal_with_reference(oop obj);
aoqi@0 1194
aoqi@0 1195 // It scans an object and visits its children.
aoqi@0 1196 void scan_object(oop obj);
aoqi@0 1197
aoqi@0 1198 // It pushes an object on the local queue.
aoqi@0 1199 inline void push(oop obj);
aoqi@0 1200
aoqi@0 1201 // These two move entries to/from the global stack.
aoqi@0 1202 void move_entries_to_global_stack();
aoqi@0 1203 void get_entries_from_global_stack();
aoqi@0 1204
aoqi@0 1205 // It pops and scans objects from the local queue. If partially is
aoqi@0 1206 // true, then it stops when the queue size is of a given limit. If
aoqi@0 1207 // partially is false, then it stops when the queue is empty.
aoqi@0 1208 void drain_local_queue(bool partially);
aoqi@0 1209 // It moves entries from the global stack to the local queue and
aoqi@0 1210 // drains the local queue. If partially is true, then it stops when
aoqi@0 1211 // both the global stack and the local queue reach a given size. If
aoqi@0 1212 // partially if false, it tries to empty them totally.
aoqi@0 1213 void drain_global_stack(bool partially);
aoqi@0 1214 // It keeps picking SATB buffers and processing them until no SATB
aoqi@0 1215 // buffers are available.
aoqi@0 1216 void drain_satb_buffers();
aoqi@0 1217
aoqi@0 1218 // moves the local finger to a new location
aoqi@0 1219 inline void move_finger_to(HeapWord* new_finger) {
aoqi@0 1220 assert(new_finger >= _finger && new_finger < _region_limit, "invariant");
aoqi@0 1221 _finger = new_finger;
aoqi@0 1222 }
aoqi@0 1223
aoqi@0 1224 CMTask(uint worker_id, ConcurrentMark *cm,
aoqi@0 1225 size_t* marked_bytes, BitMap* card_bm,
aoqi@0 1226 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
aoqi@0 1227
aoqi@0 1228 // it prints statistics associated with this task
aoqi@0 1229 void print_stats();
aoqi@0 1230
aoqi@0 1231 #if _MARKING_STATS_
aoqi@0 1232 void increase_objs_found_on_bitmap() { ++_objs_found_on_bitmap; }
aoqi@0 1233 #endif // _MARKING_STATS_
aoqi@0 1234 };
aoqi@0 1235
aoqi@0 1236 // Class that's used to to print out per-region liveness
aoqi@0 1237 // information. It's currently used at the end of marking and also
aoqi@0 1238 // after we sort the old regions at the end of the cleanup operation.
aoqi@0 1239 class G1PrintRegionLivenessInfoClosure: public HeapRegionClosure {
aoqi@0 1240 private:
aoqi@0 1241 outputStream* _out;
aoqi@0 1242
aoqi@0 1243 // Accumulators for these values.
aoqi@0 1244 size_t _total_used_bytes;
aoqi@0 1245 size_t _total_capacity_bytes;
aoqi@0 1246 size_t _total_prev_live_bytes;
aoqi@0 1247 size_t _total_next_live_bytes;
aoqi@0 1248
aoqi@0 1249 // These are set up when we come across a "stars humongous" region
aoqi@0 1250 // (as this is where most of this information is stored, not in the
aoqi@0 1251 // subsequent "continues humongous" regions). After that, for every
aoqi@0 1252 // region in a given humongous region series we deduce the right
aoqi@0 1253 // values for it by simply subtracting the appropriate amount from
aoqi@0 1254 // these fields. All these values should reach 0 after we've visited
aoqi@0 1255 // the last region in the series.
aoqi@0 1256 size_t _hum_used_bytes;
aoqi@0 1257 size_t _hum_capacity_bytes;
aoqi@0 1258 size_t _hum_prev_live_bytes;
aoqi@0 1259 size_t _hum_next_live_bytes;
aoqi@0 1260
aoqi@0 1261 // Accumulator for the remembered set size
aoqi@0 1262 size_t _total_remset_bytes;
aoqi@0 1263
aoqi@0 1264 // Accumulator for strong code roots memory size
aoqi@0 1265 size_t _total_strong_code_roots_bytes;
aoqi@0 1266
aoqi@0 1267 static double perc(size_t val, size_t total) {
aoqi@0 1268 if (total == 0) {
aoqi@0 1269 return 0.0;
aoqi@0 1270 } else {
aoqi@0 1271 return 100.0 * ((double) val / (double) total);
aoqi@0 1272 }
aoqi@0 1273 }
aoqi@0 1274
aoqi@0 1275 static double bytes_to_mb(size_t val) {
aoqi@0 1276 return (double) val / (double) M;
aoqi@0 1277 }
aoqi@0 1278
aoqi@0 1279 // See the .cpp file.
aoqi@0 1280 size_t get_hum_bytes(size_t* hum_bytes);
aoqi@0 1281 void get_hum_bytes(size_t* used_bytes, size_t* capacity_bytes,
aoqi@0 1282 size_t* prev_live_bytes, size_t* next_live_bytes);
aoqi@0 1283
aoqi@0 1284 public:
aoqi@0 1285 // The header and footer are printed in the constructor and
aoqi@0 1286 // destructor respectively.
aoqi@0 1287 G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name);
aoqi@0 1288 virtual bool doHeapRegion(HeapRegion* r);
aoqi@0 1289 ~G1PrintRegionLivenessInfoClosure();
aoqi@0 1290 };
aoqi@0 1291
aoqi@0 1292 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_HPP

mercurial