Fri, 12 Aug 2011 11:31:06 -0400
7039627: G1: avoid BOT updates for survivor allocations and dirty survivor regions incrementally
Summary: Refactor the allocation code during GC to use the G1AllocRegion abstraction. Use separate subclasses of G1AllocRegion for survivor and old regions. Avoid BOT updates and dirty survivor cards incrementally for the former.
Reviewed-by: brutisso, johnc, ysr
1 /*
2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 #include "gc_implementation/g1/g1HRPrinter.hpp"
31 #include "gc_implementation/g1/g1RemSet.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 #include "gc_implementation/g1/heapRegionSets.hpp"
35 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/memRegion.hpp"
39 #include "memory/sharedHeap.hpp"
41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
42 // It uses the "Garbage First" heap organization and algorithm, which
43 // may combine concurrent marking with parallel, incremental compaction of
44 // heap subsets that will yield large amounts of garbage.
46 class HeapRegion;
47 class HRRSCleanupTask;
48 class PermanentGenerationSpec;
49 class GenerationSpec;
50 class OopsInHeapRegionClosure;
51 class G1ScanHeapEvacClosure;
52 class ObjectClosure;
53 class SpaceClosure;
54 class CompactibleSpaceClosure;
55 class Space;
56 class G1CollectorPolicy;
57 class GenRemSet;
58 class G1RemSet;
59 class HeapRegionRemSetIterator;
60 class ConcurrentMark;
61 class ConcurrentMarkThread;
62 class ConcurrentG1Refine;
63 class GenerationCounters;
65 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
66 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
68 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
69 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
71 enum GCAllocPurpose {
72 GCAllocForTenured,
73 GCAllocForSurvived,
74 GCAllocPurposeCount
75 };
77 class YoungList : public CHeapObj {
78 private:
79 G1CollectedHeap* _g1h;
81 HeapRegion* _head;
83 HeapRegion* _survivor_head;
84 HeapRegion* _survivor_tail;
86 HeapRegion* _curr;
88 size_t _length;
89 size_t _survivor_length;
91 size_t _last_sampled_rs_lengths;
92 size_t _sampled_rs_lengths;
94 void empty_list(HeapRegion* list);
96 public:
97 YoungList(G1CollectedHeap* g1h);
99 void push_region(HeapRegion* hr);
100 void add_survivor_region(HeapRegion* hr);
102 void empty_list();
103 bool is_empty() { return _length == 0; }
104 size_t length() { return _length; }
105 size_t survivor_length() { return _survivor_length; }
107 // Currently we do not keep track of the used byte sum for the
108 // young list and the survivors and it'd be quite a lot of work to
109 // do so. When we'll eventually replace the young list with
110 // instances of HeapRegionLinkedList we'll get that for free. So,
111 // we'll report the more accurate information then.
112 size_t eden_used_bytes() {
113 assert(length() >= survivor_length(), "invariant");
114 return (length() - survivor_length()) * HeapRegion::GrainBytes;
115 }
116 size_t survivor_used_bytes() {
117 return survivor_length() * HeapRegion::GrainBytes;
118 }
120 void rs_length_sampling_init();
121 bool rs_length_sampling_more();
122 void rs_length_sampling_next();
124 void reset_sampled_info() {
125 _last_sampled_rs_lengths = 0;
126 }
127 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
129 // for development purposes
130 void reset_auxilary_lists();
131 void clear() { _head = NULL; _length = 0; }
133 void clear_survivors() {
134 _survivor_head = NULL;
135 _survivor_tail = NULL;
136 _survivor_length = 0;
137 }
139 HeapRegion* first_region() { return _head; }
140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 HeapRegion* last_survivor_region() { return _survivor_tail; }
143 // debugging
144 bool check_list_well_formed();
145 bool check_list_empty(bool check_sample = true);
146 void print();
147 };
149 class MutatorAllocRegion : public G1AllocRegion {
150 protected:
151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 public:
154 MutatorAllocRegion()
155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 };
158 class SurvivorGCAllocRegion : public G1AllocRegion {
159 protected:
160 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
161 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
162 public:
163 SurvivorGCAllocRegion()
164 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
165 };
167 class OldGCAllocRegion : public G1AllocRegion {
168 protected:
169 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
170 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
171 public:
172 OldGCAllocRegion()
173 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
174 };
176 class RefineCardTableEntryClosure;
177 class G1CollectedHeap : public SharedHeap {
178 friend class VM_G1CollectForAllocation;
179 friend class VM_GenCollectForPermanentAllocation;
180 friend class VM_G1CollectFull;
181 friend class VM_G1IncCollectionPause;
182 friend class VMStructs;
183 friend class MutatorAllocRegion;
184 friend class SurvivorGCAllocRegion;
185 friend class OldGCAllocRegion;
187 // Closures used in implementation.
188 friend class G1ParCopyHelper;
189 friend class G1IsAliveClosure;
190 friend class G1EvacuateFollowersClosure;
191 friend class G1ParScanThreadState;
192 friend class G1ParScanClosureSuper;
193 friend class G1ParEvacuateFollowersClosure;
194 friend class G1ParTask;
195 friend class G1FreeGarbageRegionClosure;
196 friend class RefineCardTableEntryClosure;
197 friend class G1PrepareCompactClosure;
198 friend class RegionSorter;
199 friend class RegionResetter;
200 friend class CountRCClosure;
201 friend class EvacPopObjClosure;
202 friend class G1ParCleanupCTTask;
204 // Other related classes.
205 friend class G1MarkSweep;
207 private:
208 // The one and only G1CollectedHeap, so static functions can find it.
209 static G1CollectedHeap* _g1h;
211 static size_t _humongous_object_threshold_in_words;
213 // Storage for the G1 heap (excludes the permanent generation).
214 VirtualSpace _g1_storage;
215 MemRegion _g1_reserved;
217 // The part of _g1_storage that is currently committed.
218 MemRegion _g1_committed;
220 // The master free list. It will satisfy all new region allocations.
221 MasterFreeRegionList _free_list;
223 // The secondary free list which contains regions that have been
224 // freed up during the cleanup process. This will be appended to the
225 // master free list when appropriate.
226 SecondaryFreeRegionList _secondary_free_list;
228 // It keeps track of the humongous regions.
229 MasterHumongousRegionSet _humongous_set;
231 // The number of regions we could create by expansion.
232 size_t _expansion_regions;
234 // The block offset table for the G1 heap.
235 G1BlockOffsetSharedArray* _bot_shared;
237 // Move all of the regions off the free lists, then rebuild those free
238 // lists, before and after full GC.
239 void tear_down_region_lists();
240 void rebuild_region_lists();
242 // The sequence of all heap regions in the heap.
243 HeapRegionSeq _hrs;
245 // Alloc region used to satisfy mutator allocation requests.
246 MutatorAllocRegion _mutator_alloc_region;
248 // Alloc region used to satisfy allocation requests by the GC for
249 // survivor objects.
250 SurvivorGCAllocRegion _survivor_gc_alloc_region;
252 // Alloc region used to satisfy allocation requests by the GC for
253 // old objects.
254 OldGCAllocRegion _old_gc_alloc_region;
256 // The last old region we allocated to during the last GC.
257 // Typically, it is not full so we should re-use it during the next GC.
258 HeapRegion* _retained_old_gc_alloc_region;
260 // It resets the mutator alloc region before new allocations can take place.
261 void init_mutator_alloc_region();
263 // It releases the mutator alloc region.
264 void release_mutator_alloc_region();
266 // It initializes the GC alloc regions at the start of a GC.
267 void init_gc_alloc_regions();
269 // It releases the GC alloc regions at the end of a GC.
270 void release_gc_alloc_regions();
272 // It does any cleanup that needs to be done on the GC alloc regions
273 // before a Full GC.
274 void abandon_gc_alloc_regions();
276 // Helper for monitoring and management support.
277 G1MonitoringSupport* _g1mm;
279 // Determines PLAB size for a particular allocation purpose.
280 static size_t desired_plab_sz(GCAllocPurpose purpose);
282 // Outside of GC pauses, the number of bytes used in all regions other
283 // than the current allocation region.
284 size_t _summary_bytes_used;
286 // This is used for a quick test on whether a reference points into
287 // the collection set or not. Basically, we have an array, with one
288 // byte per region, and that byte denotes whether the corresponding
289 // region is in the collection set or not. The entry corresponding
290 // the bottom of the heap, i.e., region 0, is pointed to by
291 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
292 // biased so that it actually points to address 0 of the address
293 // space, to make the test as fast as possible (we can simply shift
294 // the address to address into it, instead of having to subtract the
295 // bottom of the heap from the address before shifting it; basically
296 // it works in the same way the card table works).
297 bool* _in_cset_fast_test;
299 // The allocated array used for the fast test on whether a reference
300 // points into the collection set or not. This field is also used to
301 // free the array.
302 bool* _in_cset_fast_test_base;
304 // The length of the _in_cset_fast_test_base array.
305 size_t _in_cset_fast_test_length;
307 volatile unsigned _gc_time_stamp;
309 size_t* _surviving_young_words;
311 G1HRPrinter _hr_printer;
313 void setup_surviving_young_words();
314 void update_surviving_young_words(size_t* surv_young_words);
315 void cleanup_surviving_young_words();
317 // It decides whether an explicit GC should start a concurrent cycle
318 // instead of doing a STW GC. Currently, a concurrent cycle is
319 // explicitly started if:
320 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
321 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
322 bool should_do_concurrent_full_gc(GCCause::Cause cause);
324 // Keeps track of how many "full collections" (i.e., Full GCs or
325 // concurrent cycles) we have completed. The number of them we have
326 // started is maintained in _total_full_collections in CollectedHeap.
327 volatile unsigned int _full_collections_completed;
329 // This is a non-product method that is helpful for testing. It is
330 // called at the end of a GC and artificially expands the heap by
331 // allocating a number of dead regions. This way we can induce very
332 // frequent marking cycles and stress the cleanup / concurrent
333 // cleanup code more (as all the regions that will be allocated by
334 // this method will be found dead by the marking cycle).
335 void allocate_dummy_regions() PRODUCT_RETURN;
337 // These are macros so that, if the assert fires, we get the correct
338 // line number, file, etc.
340 #define heap_locking_asserts_err_msg(_extra_message_) \
341 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
342 (_extra_message_), \
343 BOOL_TO_STR(Heap_lock->owned_by_self()), \
344 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
345 BOOL_TO_STR(Thread::current()->is_VM_thread()))
347 #define assert_heap_locked() \
348 do { \
349 assert(Heap_lock->owned_by_self(), \
350 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
351 } while (0)
353 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
354 do { \
355 assert(Heap_lock->owned_by_self() || \
356 (SafepointSynchronize::is_at_safepoint() && \
357 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
358 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
359 "should be at a safepoint")); \
360 } while (0)
362 #define assert_heap_locked_and_not_at_safepoint() \
363 do { \
364 assert(Heap_lock->owned_by_self() && \
365 !SafepointSynchronize::is_at_safepoint(), \
366 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
367 "should not be at a safepoint")); \
368 } while (0)
370 #define assert_heap_not_locked() \
371 do { \
372 assert(!Heap_lock->owned_by_self(), \
373 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
374 } while (0)
376 #define assert_heap_not_locked_and_not_at_safepoint() \
377 do { \
378 assert(!Heap_lock->owned_by_self() && \
379 !SafepointSynchronize::is_at_safepoint(), \
380 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
381 "should not be at a safepoint")); \
382 } while (0)
384 #define assert_at_safepoint(_should_be_vm_thread_) \
385 do { \
386 assert(SafepointSynchronize::is_at_safepoint() && \
387 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
388 heap_locking_asserts_err_msg("should be at a safepoint")); \
389 } while (0)
391 #define assert_not_at_safepoint() \
392 do { \
393 assert(!SafepointSynchronize::is_at_safepoint(), \
394 heap_locking_asserts_err_msg("should not be at a safepoint")); \
395 } while (0)
397 protected:
399 // The young region list.
400 YoungList* _young_list;
402 // The current policy object for the collector.
403 G1CollectorPolicy* _g1_policy;
405 // This is the second level of trying to allocate a new region. If
406 // new_region() didn't find a region on the free_list, this call will
407 // check whether there's anything available on the
408 // secondary_free_list and/or wait for more regions to appear on
409 // that list, if _free_regions_coming is set.
410 HeapRegion* new_region_try_secondary_free_list();
412 // Try to allocate a single non-humongous HeapRegion sufficient for
413 // an allocation of the given word_size. If do_expand is true,
414 // attempt to expand the heap if necessary to satisfy the allocation
415 // request.
416 HeapRegion* new_region(size_t word_size, bool do_expand);
418 // Attempt to satisfy a humongous allocation request of the given
419 // size by finding a contiguous set of free regions of num_regions
420 // length and remove them from the master free list. Return the
421 // index of the first region or G1_NULL_HRS_INDEX if the search
422 // was unsuccessful.
423 size_t humongous_obj_allocate_find_first(size_t num_regions,
424 size_t word_size);
426 // Initialize a contiguous set of free regions of length num_regions
427 // and starting at index first so that they appear as a single
428 // humongous region.
429 HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
430 size_t num_regions,
431 size_t word_size);
433 // Attempt to allocate a humongous object of the given size. Return
434 // NULL if unsuccessful.
435 HeapWord* humongous_obj_allocate(size_t word_size);
437 // The following two methods, allocate_new_tlab() and
438 // mem_allocate(), are the two main entry points from the runtime
439 // into the G1's allocation routines. They have the following
440 // assumptions:
441 //
442 // * They should both be called outside safepoints.
443 //
444 // * They should both be called without holding the Heap_lock.
445 //
446 // * All allocation requests for new TLABs should go to
447 // allocate_new_tlab().
448 //
449 // * All non-TLAB allocation requests should go to mem_allocate().
450 //
451 // * If either call cannot satisfy the allocation request using the
452 // current allocating region, they will try to get a new one. If
453 // this fails, they will attempt to do an evacuation pause and
454 // retry the allocation.
455 //
456 // * If all allocation attempts fail, even after trying to schedule
457 // an evacuation pause, allocate_new_tlab() will return NULL,
458 // whereas mem_allocate() will attempt a heap expansion and/or
459 // schedule a Full GC.
460 //
461 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
462 // should never be called with word_size being humongous. All
463 // humongous allocation requests should go to mem_allocate() which
464 // will satisfy them with a special path.
466 virtual HeapWord* allocate_new_tlab(size_t word_size);
468 virtual HeapWord* mem_allocate(size_t word_size,
469 bool* gc_overhead_limit_was_exceeded);
471 // The following three methods take a gc_count_before_ret
472 // parameter which is used to return the GC count if the method
473 // returns NULL. Given that we are required to read the GC count
474 // while holding the Heap_lock, and these paths will take the
475 // Heap_lock at some point, it's easier to get them to read the GC
476 // count while holding the Heap_lock before they return NULL instead
477 // of the caller (namely: mem_allocate()) having to also take the
478 // Heap_lock just to read the GC count.
480 // First-level mutator allocation attempt: try to allocate out of
481 // the mutator alloc region without taking the Heap_lock. This
482 // should only be used for non-humongous allocations.
483 inline HeapWord* attempt_allocation(size_t word_size,
484 unsigned int* gc_count_before_ret);
486 // Second-level mutator allocation attempt: take the Heap_lock and
487 // retry the allocation attempt, potentially scheduling a GC
488 // pause. This should only be used for non-humongous allocations.
489 HeapWord* attempt_allocation_slow(size_t word_size,
490 unsigned int* gc_count_before_ret);
492 // Takes the Heap_lock and attempts a humongous allocation. It can
493 // potentially schedule a GC pause.
494 HeapWord* attempt_allocation_humongous(size_t word_size,
495 unsigned int* gc_count_before_ret);
497 // Allocation attempt that should be called during safepoints (e.g.,
498 // at the end of a successful GC). expect_null_mutator_alloc_region
499 // specifies whether the mutator alloc region is expected to be NULL
500 // or not.
501 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
502 bool expect_null_mutator_alloc_region);
504 // It dirties the cards that cover the block so that so that the post
505 // write barrier never queues anything when updating objects on this
506 // block. It is assumed (and in fact we assert) that the block
507 // belongs to a young region.
508 inline void dirty_young_block(HeapWord* start, size_t word_size);
510 // Allocate blocks during garbage collection. Will ensure an
511 // allocation region, either by picking one or expanding the
512 // heap, and then allocate a block of the given size. The block
513 // may not be a humongous - it must fit into a single heap region.
514 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
516 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
517 HeapRegion* alloc_region,
518 bool par,
519 size_t word_size);
521 // Ensure that no further allocations can happen in "r", bearing in mind
522 // that parallel threads might be attempting allocations.
523 void par_allocate_remaining_space(HeapRegion* r);
525 // Allocation attempt during GC for a survivor object / PLAB.
526 inline HeapWord* survivor_attempt_allocation(size_t word_size);
528 // Allocation attempt during GC for an old object / PLAB.
529 inline HeapWord* old_attempt_allocation(size_t word_size);
531 // These methods are the "callbacks" from the G1AllocRegion class.
533 // For mutator alloc regions.
534 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
535 void retire_mutator_alloc_region(HeapRegion* alloc_region,
536 size_t allocated_bytes);
538 // For GC alloc regions.
539 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
540 GCAllocPurpose ap);
541 void retire_gc_alloc_region(HeapRegion* alloc_region,
542 size_t allocated_bytes, GCAllocPurpose ap);
544 // - if explicit_gc is true, the GC is for a System.gc() or a heap
545 // inspection request and should collect the entire heap
546 // - if clear_all_soft_refs is true, all soft references should be
547 // cleared during the GC
548 // - if explicit_gc is false, word_size describes the allocation that
549 // the GC should attempt (at least) to satisfy
550 // - it returns false if it is unable to do the collection due to the
551 // GC locker being active, true otherwise
552 bool do_collection(bool explicit_gc,
553 bool clear_all_soft_refs,
554 size_t word_size);
556 // Callback from VM_G1CollectFull operation.
557 // Perform a full collection.
558 void do_full_collection(bool clear_all_soft_refs);
560 // Resize the heap if necessary after a full collection. If this is
561 // after a collect-for allocation, "word_size" is the allocation size,
562 // and will be considered part of the used portion of the heap.
563 void resize_if_necessary_after_full_collection(size_t word_size);
565 // Callback from VM_G1CollectForAllocation operation.
566 // This function does everything necessary/possible to satisfy a
567 // failed allocation request (including collection, expansion, etc.)
568 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
570 // Attempting to expand the heap sufficiently
571 // to support an allocation of the given "word_size". If
572 // successful, perform the allocation and return the address of the
573 // allocated block, or else "NULL".
574 HeapWord* expand_and_allocate(size_t word_size);
576 public:
578 G1MonitoringSupport* g1mm() { return _g1mm; }
580 // Expand the garbage-first heap by at least the given size (in bytes!).
581 // Returns true if the heap was expanded by the requested amount;
582 // false otherwise.
583 // (Rounds up to a HeapRegion boundary.)
584 bool expand(size_t expand_bytes);
586 // Do anything common to GC's.
587 virtual void gc_prologue(bool full);
588 virtual void gc_epilogue(bool full);
590 // We register a region with the fast "in collection set" test. We
591 // simply set to true the array slot corresponding to this region.
592 void register_region_with_in_cset_fast_test(HeapRegion* r) {
593 assert(_in_cset_fast_test_base != NULL, "sanity");
594 assert(r->in_collection_set(), "invariant");
595 size_t index = r->hrs_index();
596 assert(index < _in_cset_fast_test_length, "invariant");
597 assert(!_in_cset_fast_test_base[index], "invariant");
598 _in_cset_fast_test_base[index] = true;
599 }
601 // This is a fast test on whether a reference points into the
602 // collection set or not. It does not assume that the reference
603 // points into the heap; if it doesn't, it will return false.
604 bool in_cset_fast_test(oop obj) {
605 assert(_in_cset_fast_test != NULL, "sanity");
606 if (_g1_committed.contains((HeapWord*) obj)) {
607 // no need to subtract the bottom of the heap from obj,
608 // _in_cset_fast_test is biased
609 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
610 bool ret = _in_cset_fast_test[index];
611 // let's make sure the result is consistent with what the slower
612 // test returns
613 assert( ret || !obj_in_cs(obj), "sanity");
614 assert(!ret || obj_in_cs(obj), "sanity");
615 return ret;
616 } else {
617 return false;
618 }
619 }
621 void clear_cset_fast_test() {
622 assert(_in_cset_fast_test_base != NULL, "sanity");
623 memset(_in_cset_fast_test_base, false,
624 _in_cset_fast_test_length * sizeof(bool));
625 }
627 // This is called at the end of either a concurrent cycle or a Full
628 // GC to update the number of full collections completed. Those two
629 // can happen in a nested fashion, i.e., we start a concurrent
630 // cycle, a Full GC happens half-way through it which ends first,
631 // and then the cycle notices that a Full GC happened and ends
632 // too. The concurrent parameter is a boolean to help us do a bit
633 // tighter consistency checking in the method. If concurrent is
634 // false, the caller is the inner caller in the nesting (i.e., the
635 // Full GC). If concurrent is true, the caller is the outer caller
636 // in this nesting (i.e., the concurrent cycle). Further nesting is
637 // not currently supported. The end of the this call also notifies
638 // the FullGCCount_lock in case a Java thread is waiting for a full
639 // GC to happen (e.g., it called System.gc() with
640 // +ExplicitGCInvokesConcurrent).
641 void increment_full_collections_completed(bool concurrent);
643 unsigned int full_collections_completed() {
644 return _full_collections_completed;
645 }
647 G1HRPrinter* hr_printer() { return &_hr_printer; }
649 protected:
651 // Shrink the garbage-first heap by at most the given size (in bytes!).
652 // (Rounds down to a HeapRegion boundary.)
653 virtual void shrink(size_t expand_bytes);
654 void shrink_helper(size_t expand_bytes);
656 #if TASKQUEUE_STATS
657 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
658 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
659 void reset_taskqueue_stats();
660 #endif // TASKQUEUE_STATS
662 // Schedule the VM operation that will do an evacuation pause to
663 // satisfy an allocation request of word_size. *succeeded will
664 // return whether the VM operation was successful (it did do an
665 // evacuation pause) or not (another thread beat us to it or the GC
666 // locker was active). Given that we should not be holding the
667 // Heap_lock when we enter this method, we will pass the
668 // gc_count_before (i.e., total_collections()) as a parameter since
669 // it has to be read while holding the Heap_lock. Currently, both
670 // methods that call do_collection_pause() release the Heap_lock
671 // before the call, so it's easy to read gc_count_before just before.
672 HeapWord* do_collection_pause(size_t word_size,
673 unsigned int gc_count_before,
674 bool* succeeded);
676 // The guts of the incremental collection pause, executed by the vm
677 // thread. It returns false if it is unable to do the collection due
678 // to the GC locker being active, true otherwise
679 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
681 // Actually do the work of evacuating the collection set.
682 void evacuate_collection_set();
684 // The g1 remembered set of the heap.
685 G1RemSet* _g1_rem_set;
686 // And it's mod ref barrier set, used to track updates for the above.
687 ModRefBarrierSet* _mr_bs;
689 // A set of cards that cover the objects for which the Rsets should be updated
690 // concurrently after the collection.
691 DirtyCardQueueSet _dirty_card_queue_set;
693 // The Heap Region Rem Set Iterator.
694 HeapRegionRemSetIterator** _rem_set_iterator;
696 // The closure used to refine a single card.
697 RefineCardTableEntryClosure* _refine_cte_cl;
699 // A function to check the consistency of dirty card logs.
700 void check_ct_logs_at_safepoint();
702 // A DirtyCardQueueSet that is used to hold cards that contain
703 // references into the current collection set. This is used to
704 // update the remembered sets of the regions in the collection
705 // set in the event of an evacuation failure.
706 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
708 // After a collection pause, make the regions in the CS into free
709 // regions.
710 void free_collection_set(HeapRegion* cs_head);
712 // Abandon the current collection set without recording policy
713 // statistics or updating free lists.
714 void abandon_collection_set(HeapRegion* cs_head);
716 // Applies "scan_non_heap_roots" to roots outside the heap,
717 // "scan_rs" to roots inside the heap (having done "set_region" to
718 // indicate the region in which the root resides), and does "scan_perm"
719 // (setting the generation to the perm generation.) If "scan_rs" is
720 // NULL, then this step is skipped. The "worker_i"
721 // param is for use with parallel roots processing, and should be
722 // the "i" of the calling parallel worker thread's work(i) function.
723 // In the sequential case this param will be ignored.
724 void g1_process_strong_roots(bool collecting_perm_gen,
725 SharedHeap::ScanningOption so,
726 OopClosure* scan_non_heap_roots,
727 OopsInHeapRegionClosure* scan_rs,
728 OopsInGenClosure* scan_perm,
729 int worker_i);
731 // Apply "blk" to all the weak roots of the system. These include
732 // JNI weak roots, the code cache, system dictionary, symbol table,
733 // string table, and referents of reachable weak refs.
734 void g1_process_weak_roots(OopClosure* root_closure,
735 OopClosure* non_root_closure);
737 // Frees a non-humongous region by initializing its contents and
738 // adding it to the free list that's passed as a parameter (this is
739 // usually a local list which will be appended to the master free
740 // list later). The used bytes of freed regions are accumulated in
741 // pre_used. If par is true, the region's RSet will not be freed
742 // up. The assumption is that this will be done later.
743 void free_region(HeapRegion* hr,
744 size_t* pre_used,
745 FreeRegionList* free_list,
746 bool par);
748 // Frees a humongous region by collapsing it into individual regions
749 // and calling free_region() for each of them. The freed regions
750 // will be added to the free list that's passed as a parameter (this
751 // is usually a local list which will be appended to the master free
752 // list later). The used bytes of freed regions are accumulated in
753 // pre_used. If par is true, the region's RSet will not be freed
754 // up. The assumption is that this will be done later.
755 void free_humongous_region(HeapRegion* hr,
756 size_t* pre_used,
757 FreeRegionList* free_list,
758 HumongousRegionSet* humongous_proxy_set,
759 bool par);
761 // Notifies all the necessary spaces that the committed space has
762 // been updated (either expanded or shrunk). It should be called
763 // after _g1_storage is updated.
764 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
766 // The concurrent marker (and the thread it runs in.)
767 ConcurrentMark* _cm;
768 ConcurrentMarkThread* _cmThread;
769 bool _mark_in_progress;
771 // The concurrent refiner.
772 ConcurrentG1Refine* _cg1r;
774 // The parallel task queues
775 RefToScanQueueSet *_task_queues;
777 // True iff a evacuation has failed in the current collection.
778 bool _evacuation_failed;
780 // Set the attribute indicating whether evacuation has failed in the
781 // current collection.
782 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
784 // Failed evacuations cause some logical from-space objects to have
785 // forwarding pointers to themselves. Reset them.
786 void remove_self_forwarding_pointers();
788 // When one is non-null, so is the other. Together, they each pair is
789 // an object with a preserved mark, and its mark value.
790 GrowableArray<oop>* _objs_with_preserved_marks;
791 GrowableArray<markOop>* _preserved_marks_of_objs;
793 // Preserve the mark of "obj", if necessary, in preparation for its mark
794 // word being overwritten with a self-forwarding-pointer.
795 void preserve_mark_if_necessary(oop obj, markOop m);
797 // The stack of evac-failure objects left to be scanned.
798 GrowableArray<oop>* _evac_failure_scan_stack;
799 // The closure to apply to evac-failure objects.
801 OopsInHeapRegionClosure* _evac_failure_closure;
802 // Set the field above.
803 void
804 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
805 _evac_failure_closure = evac_failure_closure;
806 }
808 // Push "obj" on the scan stack.
809 void push_on_evac_failure_scan_stack(oop obj);
810 // Process scan stack entries until the stack is empty.
811 void drain_evac_failure_scan_stack();
812 // True iff an invocation of "drain_scan_stack" is in progress; to
813 // prevent unnecessary recursion.
814 bool _drain_in_progress;
816 // Do any necessary initialization for evacuation-failure handling.
817 // "cl" is the closure that will be used to process evac-failure
818 // objects.
819 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
820 // Do any necessary cleanup for evacuation-failure handling data
821 // structures.
822 void finalize_for_evac_failure();
824 // An attempt to evacuate "obj" has failed; take necessary steps.
825 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
826 void handle_evacuation_failure_common(oop obj, markOop m);
828 // Instance of the concurrent mark is_alive closure for embedding
829 // into the reference processor as the is_alive_non_header. This
830 // prevents unnecessary additions to the discovered lists during
831 // concurrent discovery.
832 G1CMIsAliveClosure _is_alive_closure;
834 // ("Weak") Reference processing support
835 ReferenceProcessor* _ref_processor;
837 enum G1H_process_strong_roots_tasks {
838 G1H_PS_mark_stack_oops_do,
839 G1H_PS_refProcessor_oops_do,
840 // Leave this one last.
841 G1H_PS_NumElements
842 };
844 SubTasksDone* _process_strong_tasks;
846 volatile bool _free_regions_coming;
848 public:
850 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
852 void set_refine_cte_cl_concurrency(bool concurrent);
854 RefToScanQueue *task_queue(int i) const;
856 // A set of cards where updates happened during the GC
857 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
859 // A DirtyCardQueueSet that is used to hold cards that contain
860 // references into the current collection set. This is used to
861 // update the remembered sets of the regions in the collection
862 // set in the event of an evacuation failure.
863 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
864 { return _into_cset_dirty_card_queue_set; }
866 // Create a G1CollectedHeap with the specified policy.
867 // Must call the initialize method afterwards.
868 // May not return if something goes wrong.
869 G1CollectedHeap(G1CollectorPolicy* policy);
871 // Initialize the G1CollectedHeap to have the initial and
872 // maximum sizes, permanent generation, and remembered and barrier sets
873 // specified by the policy object.
874 jint initialize();
876 virtual void ref_processing_init();
878 void set_par_threads(int t) {
879 SharedHeap::set_par_threads(t);
880 _process_strong_tasks->set_n_threads(t);
881 }
883 virtual CollectedHeap::Name kind() const {
884 return CollectedHeap::G1CollectedHeap;
885 }
887 // The current policy object for the collector.
888 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
890 // Adaptive size policy. No such thing for g1.
891 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
893 // The rem set and barrier set.
894 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
895 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
897 // The rem set iterator.
898 HeapRegionRemSetIterator* rem_set_iterator(int i) {
899 return _rem_set_iterator[i];
900 }
902 HeapRegionRemSetIterator* rem_set_iterator() {
903 return _rem_set_iterator[0];
904 }
906 unsigned get_gc_time_stamp() {
907 return _gc_time_stamp;
908 }
910 void reset_gc_time_stamp() {
911 _gc_time_stamp = 0;
912 OrderAccess::fence();
913 }
915 void increment_gc_time_stamp() {
916 ++_gc_time_stamp;
917 OrderAccess::fence();
918 }
920 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
921 DirtyCardQueue* into_cset_dcq,
922 bool concurrent, int worker_i);
924 // The shared block offset table array.
925 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
927 // Reference Processing accessor
928 ReferenceProcessor* ref_processor() { return _ref_processor; }
930 virtual size_t capacity() const;
931 virtual size_t used() const;
932 // This should be called when we're not holding the heap lock. The
933 // result might be a bit inaccurate.
934 size_t used_unlocked() const;
935 size_t recalculate_used() const;
937 // These virtual functions do the actual allocation.
938 // Some heaps may offer a contiguous region for shared non-blocking
939 // allocation, via inlined code (by exporting the address of the top and
940 // end fields defining the extent of the contiguous allocation region.)
941 // But G1CollectedHeap doesn't yet support this.
943 // Return an estimate of the maximum allocation that could be performed
944 // without triggering any collection or expansion activity. In a
945 // generational collector, for example, this is probably the largest
946 // allocation that could be supported (without expansion) in the youngest
947 // generation. It is "unsafe" because no locks are taken; the result
948 // should be treated as an approximation, not a guarantee, for use in
949 // heuristic resizing decisions.
950 virtual size_t unsafe_max_alloc();
952 virtual bool is_maximal_no_gc() const {
953 return _g1_storage.uncommitted_size() == 0;
954 }
956 // The total number of regions in the heap.
957 size_t n_regions() { return _hrs.length(); }
959 // The max number of regions in the heap.
960 size_t max_regions() { return _hrs.max_length(); }
962 // The number of regions that are completely free.
963 size_t free_regions() { return _free_list.length(); }
965 // The number of regions that are not completely free.
966 size_t used_regions() { return n_regions() - free_regions(); }
968 // The number of regions available for "regular" expansion.
969 size_t expansion_regions() { return _expansion_regions; }
971 // Factory method for HeapRegion instances. It will return NULL if
972 // the allocation fails.
973 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
975 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
976 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
977 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
978 void verify_dirty_young_regions() PRODUCT_RETURN;
980 // verify_region_sets() performs verification over the region
981 // lists. It will be compiled in the product code to be used when
982 // necessary (i.e., during heap verification).
983 void verify_region_sets();
985 // verify_region_sets_optional() is planted in the code for
986 // list verification in non-product builds (and it can be enabled in
987 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
988 #if HEAP_REGION_SET_FORCE_VERIFY
989 void verify_region_sets_optional() {
990 verify_region_sets();
991 }
992 #else // HEAP_REGION_SET_FORCE_VERIFY
993 void verify_region_sets_optional() { }
994 #endif // HEAP_REGION_SET_FORCE_VERIFY
996 #ifdef ASSERT
997 bool is_on_master_free_list(HeapRegion* hr) {
998 return hr->containing_set() == &_free_list;
999 }
1001 bool is_in_humongous_set(HeapRegion* hr) {
1002 return hr->containing_set() == &_humongous_set;
1003 }
1004 #endif // ASSERT
1006 // Wrapper for the region list operations that can be called from
1007 // methods outside this class.
1009 void secondary_free_list_add_as_tail(FreeRegionList* list) {
1010 _secondary_free_list.add_as_tail(list);
1011 }
1013 void append_secondary_free_list() {
1014 _free_list.add_as_head(&_secondary_free_list);
1015 }
1017 void append_secondary_free_list_if_not_empty_with_lock() {
1018 // If the secondary free list looks empty there's no reason to
1019 // take the lock and then try to append it.
1020 if (!_secondary_free_list.is_empty()) {
1021 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1022 append_secondary_free_list();
1023 }
1024 }
1026 void set_free_regions_coming();
1027 void reset_free_regions_coming();
1028 bool free_regions_coming() { return _free_regions_coming; }
1029 void wait_while_free_regions_coming();
1031 // Perform a collection of the heap; intended for use in implementing
1032 // "System.gc". This probably implies as full a collection as the
1033 // "CollectedHeap" supports.
1034 virtual void collect(GCCause::Cause cause);
1036 // The same as above but assume that the caller holds the Heap_lock.
1037 void collect_locked(GCCause::Cause cause);
1039 // This interface assumes that it's being called by the
1040 // vm thread. It collects the heap assuming that the
1041 // heap lock is already held and that we are executing in
1042 // the context of the vm thread.
1043 virtual void collect_as_vm_thread(GCCause::Cause cause);
1045 // True iff a evacuation has failed in the most-recent collection.
1046 bool evacuation_failed() { return _evacuation_failed; }
1048 // It will free a region if it has allocated objects in it that are
1049 // all dead. It calls either free_region() or
1050 // free_humongous_region() depending on the type of the region that
1051 // is passed to it.
1052 void free_region_if_empty(HeapRegion* hr,
1053 size_t* pre_used,
1054 FreeRegionList* free_list,
1055 HumongousRegionSet* humongous_proxy_set,
1056 HRRSCleanupTask* hrrs_cleanup_task,
1057 bool par);
1059 // It appends the free list to the master free list and updates the
1060 // master humongous list according to the contents of the proxy
1061 // list. It also adjusts the total used bytes according to pre_used
1062 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1063 void update_sets_after_freeing_regions(size_t pre_used,
1064 FreeRegionList* free_list,
1065 HumongousRegionSet* humongous_proxy_set,
1066 bool par);
1068 // Returns "TRUE" iff "p" points into the allocated area of the heap.
1069 virtual bool is_in(const void* p) const;
1071 // Return "TRUE" iff the given object address is within the collection
1072 // set.
1073 inline bool obj_in_cs(oop obj);
1075 // Return "TRUE" iff the given object address is in the reserved
1076 // region of g1 (excluding the permanent generation).
1077 bool is_in_g1_reserved(const void* p) const {
1078 return _g1_reserved.contains(p);
1079 }
1081 // Returns a MemRegion that corresponds to the space that has been
1082 // reserved for the heap
1083 MemRegion g1_reserved() {
1084 return _g1_reserved;
1085 }
1087 // Returns a MemRegion that corresponds to the space that has been
1088 // committed in the heap
1089 MemRegion g1_committed() {
1090 return _g1_committed;
1091 }
1093 virtual bool is_in_closed_subset(const void* p) const;
1095 // This resets the card table to all zeros. It is used after
1096 // a collection pause which used the card table to claim cards.
1097 void cleanUpCardTable();
1099 // Iteration functions.
1101 // Iterate over all the ref-containing fields of all objects, calling
1102 // "cl.do_oop" on each.
1103 virtual void oop_iterate(OopClosure* cl) {
1104 oop_iterate(cl, true);
1105 }
1106 void oop_iterate(OopClosure* cl, bool do_perm);
1108 // Same as above, restricted to a memory region.
1109 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
1110 oop_iterate(mr, cl, true);
1111 }
1112 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1114 // Iterate over all objects, calling "cl.do_object" on each.
1115 virtual void object_iterate(ObjectClosure* cl) {
1116 object_iterate(cl, true);
1117 }
1118 virtual void safe_object_iterate(ObjectClosure* cl) {
1119 object_iterate(cl, true);
1120 }
1121 void object_iterate(ObjectClosure* cl, bool do_perm);
1123 // Iterate over all objects allocated since the last collection, calling
1124 // "cl.do_object" on each. The heap must have been initialized properly
1125 // to support this function, or else this call will fail.
1126 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1128 // Iterate over all spaces in use in the heap, in ascending address order.
1129 virtual void space_iterate(SpaceClosure* cl);
1131 // Iterate over heap regions, in address order, terminating the
1132 // iteration early if the "doHeapRegion" method returns "true".
1133 void heap_region_iterate(HeapRegionClosure* blk) const;
1135 // Iterate over heap regions starting with r (or the first region if "r"
1136 // is NULL), in address order, terminating early if the "doHeapRegion"
1137 // method returns "true".
1138 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1140 // Return the region with the given index. It assumes the index is valid.
1141 HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1143 // Divide the heap region sequence into "chunks" of some size (the number
1144 // of regions divided by the number of parallel threads times some
1145 // overpartition factor, currently 4). Assumes that this will be called
1146 // in parallel by ParallelGCThreads worker threads with discinct worker
1147 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1148 // calls will use the same "claim_value", and that that claim value is
1149 // different from the claim_value of any heap region before the start of
1150 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1151 // attempting to claim the first region in each chunk, and, if
1152 // successful, applying the closure to each region in the chunk (and
1153 // setting the claim value of the second and subsequent regions of the
1154 // chunk.) For now requires that "doHeapRegion" always returns "false",
1155 // i.e., that a closure never attempt to abort a traversal.
1156 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1157 int worker,
1158 jint claim_value);
1160 // It resets all the region claim values to the default.
1161 void reset_heap_region_claim_values();
1163 #ifdef ASSERT
1164 bool check_heap_region_claim_values(jint claim_value);
1165 #endif // ASSERT
1167 // Iterate over the regions (if any) in the current collection set.
1168 void collection_set_iterate(HeapRegionClosure* blk);
1170 // As above but starting from region r
1171 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1173 // Returns the first (lowest address) compactible space in the heap.
1174 virtual CompactibleSpace* first_compactible_space();
1176 // A CollectedHeap will contain some number of spaces. This finds the
1177 // space containing a given address, or else returns NULL.
1178 virtual Space* space_containing(const void* addr) const;
1180 // A G1CollectedHeap will contain some number of heap regions. This
1181 // finds the region containing a given address, or else returns NULL.
1182 template <class T>
1183 inline HeapRegion* heap_region_containing(const T addr) const;
1185 // Like the above, but requires "addr" to be in the heap (to avoid a
1186 // null-check), and unlike the above, may return an continuing humongous
1187 // region.
1188 template <class T>
1189 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1191 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1192 // each address in the (reserved) heap is a member of exactly
1193 // one block. The defining characteristic of a block is that it is
1194 // possible to find its size, and thus to progress forward to the next
1195 // block. (Blocks may be of different sizes.) Thus, blocks may
1196 // represent Java objects, or they might be free blocks in a
1197 // free-list-based heap (or subheap), as long as the two kinds are
1198 // distinguishable and the size of each is determinable.
1200 // Returns the address of the start of the "block" that contains the
1201 // address "addr". We say "blocks" instead of "object" since some heaps
1202 // may not pack objects densely; a chunk may either be an object or a
1203 // non-object.
1204 virtual HeapWord* block_start(const void* addr) const;
1206 // Requires "addr" to be the start of a chunk, and returns its size.
1207 // "addr + size" is required to be the start of a new chunk, or the end
1208 // of the active area of the heap.
1209 virtual size_t block_size(const HeapWord* addr) const;
1211 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1212 // the block is an object.
1213 virtual bool block_is_obj(const HeapWord* addr) const;
1215 // Does this heap support heap inspection? (+PrintClassHistogram)
1216 virtual bool supports_heap_inspection() const { return true; }
1218 // Section on thread-local allocation buffers (TLABs)
1219 // See CollectedHeap for semantics.
1221 virtual bool supports_tlab_allocation() const;
1222 virtual size_t tlab_capacity(Thread* thr) const;
1223 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1225 // Can a compiler initialize a new object without store barriers?
1226 // This permission only extends from the creation of a new object
1227 // via a TLAB up to the first subsequent safepoint. If such permission
1228 // is granted for this heap type, the compiler promises to call
1229 // defer_store_barrier() below on any slow path allocation of
1230 // a new object for which such initializing store barriers will
1231 // have been elided. G1, like CMS, allows this, but should be
1232 // ready to provide a compensating write barrier as necessary
1233 // if that storage came out of a non-young region. The efficiency
1234 // of this implementation depends crucially on being able to
1235 // answer very efficiently in constant time whether a piece of
1236 // storage in the heap comes from a young region or not.
1237 // See ReduceInitialCardMarks.
1238 virtual bool can_elide_tlab_store_barriers() const {
1239 // 6920090: Temporarily disabled, because of lingering
1240 // instabilities related to RICM with G1. In the
1241 // interim, the option ReduceInitialCardMarksForG1
1242 // below is left solely as a debugging device at least
1243 // until 6920109 fixes the instabilities.
1244 return ReduceInitialCardMarksForG1;
1245 }
1247 virtual bool card_mark_must_follow_store() const {
1248 return true;
1249 }
1251 bool is_in_young(const oop obj) {
1252 HeapRegion* hr = heap_region_containing(obj);
1253 return hr != NULL && hr->is_young();
1254 }
1256 #ifdef ASSERT
1257 virtual bool is_in_partial_collection(const void* p);
1258 #endif
1260 virtual bool is_scavengable(const void* addr);
1262 // We don't need barriers for initializing stores to objects
1263 // in the young gen: for the SATB pre-barrier, there is no
1264 // pre-value that needs to be remembered; for the remembered-set
1265 // update logging post-barrier, we don't maintain remembered set
1266 // information for young gen objects. Note that non-generational
1267 // G1 does not have any "young" objects, should not elide
1268 // the rs logging barrier and so should always answer false below.
1269 // However, non-generational G1 (-XX:-G1Gen) appears to have
1270 // bit-rotted so was not tested below.
1271 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1272 // Re 6920090, 6920109 above.
1273 assert(ReduceInitialCardMarksForG1, "Else cannot be here");
1274 assert(G1Gen || !is_in_young(new_obj),
1275 "Non-generational G1 should never return true below");
1276 return is_in_young(new_obj);
1277 }
1279 // Can a compiler elide a store barrier when it writes
1280 // a permanent oop into the heap? Applies when the compiler
1281 // is storing x to the heap, where x->is_perm() is true.
1282 virtual bool can_elide_permanent_oop_store_barriers() const {
1283 // At least until perm gen collection is also G1-ified, at
1284 // which point this should return false.
1285 return true;
1286 }
1288 // Returns "true" iff the given word_size is "very large".
1289 static bool isHumongous(size_t word_size) {
1290 // Note this has to be strictly greater-than as the TLABs
1291 // are capped at the humongous thresold and we want to
1292 // ensure that we don't try to allocate a TLAB as
1293 // humongous and that we don't allocate a humongous
1294 // object in a TLAB.
1295 return word_size > _humongous_object_threshold_in_words;
1296 }
1298 // Update mod union table with the set of dirty cards.
1299 void updateModUnion();
1301 // Set the mod union bits corresponding to the given memRegion. Note
1302 // that this is always a safe operation, since it doesn't clear any
1303 // bits.
1304 void markModUnionRange(MemRegion mr);
1306 // Records the fact that a marking phase is no longer in progress.
1307 void set_marking_complete() {
1308 _mark_in_progress = false;
1309 }
1310 void set_marking_started() {
1311 _mark_in_progress = true;
1312 }
1313 bool mark_in_progress() {
1314 return _mark_in_progress;
1315 }
1317 // Print the maximum heap capacity.
1318 virtual size_t max_capacity() const;
1320 virtual jlong millis_since_last_gc();
1322 // Perform any cleanup actions necessary before allowing a verification.
1323 virtual void prepare_for_verify();
1325 // Perform verification.
1327 // vo == UsePrevMarking -> use "prev" marking information,
1328 // vo == UseNextMarking -> use "next" marking information
1329 // vo == UseMarkWord -> use the mark word in the object header
1330 //
1331 // NOTE: Only the "prev" marking information is guaranteed to be
1332 // consistent most of the time, so most calls to this should use
1333 // vo == UsePrevMarking.
1334 // Currently, there is only one case where this is called with
1335 // vo == UseNextMarking, which is to verify the "next" marking
1336 // information at the end of remark.
1337 // Currently there is only one place where this is called with
1338 // vo == UseMarkWord, which is to verify the marking during a
1339 // full GC.
1340 void verify(bool allow_dirty, bool silent, VerifyOption vo);
1342 // Override; it uses the "prev" marking information
1343 virtual void verify(bool allow_dirty, bool silent);
1344 // Default behavior by calling print(tty);
1345 virtual void print() const;
1346 // This calls print_on(st, PrintHeapAtGCExtended).
1347 virtual void print_on(outputStream* st) const;
1348 // If extended is true, it will print out information for all
1349 // regions in the heap by calling print_on_extended(st).
1350 virtual void print_on(outputStream* st, bool extended) const;
1351 virtual void print_on_extended(outputStream* st) const;
1353 virtual void print_gc_threads_on(outputStream* st) const;
1354 virtual void gc_threads_do(ThreadClosure* tc) const;
1356 // Override
1357 void print_tracing_info() const;
1359 // The following two methods are helpful for debugging RSet issues.
1360 void print_cset_rsets() PRODUCT_RETURN;
1361 void print_all_rsets() PRODUCT_RETURN;
1363 // Convenience function to be used in situations where the heap type can be
1364 // asserted to be this type.
1365 static G1CollectedHeap* heap();
1367 void empty_young_list();
1369 void set_region_short_lived_locked(HeapRegion* hr);
1370 // add appropriate methods for any other surv rate groups
1372 YoungList* young_list() { return _young_list; }
1374 // debugging
1375 bool check_young_list_well_formed() {
1376 return _young_list->check_list_well_formed();
1377 }
1379 bool check_young_list_empty(bool check_heap,
1380 bool check_sample = true);
1382 // *** Stuff related to concurrent marking. It's not clear to me that so
1383 // many of these need to be public.
1385 // The functions below are helper functions that a subclass of
1386 // "CollectedHeap" can use in the implementation of its virtual
1387 // functions.
1388 // This performs a concurrent marking of the live objects in a
1389 // bitmap off to the side.
1390 void doConcurrentMark();
1392 // Do a full concurrent marking, synchronously.
1393 void do_sync_mark();
1395 bool isMarkedPrev(oop obj) const;
1396 bool isMarkedNext(oop obj) const;
1398 // vo == UsePrevMarking -> use "prev" marking information,
1399 // vo == UseNextMarking -> use "next" marking information,
1400 // vo == UseMarkWord -> use mark word from object header
1401 bool is_obj_dead_cond(const oop obj,
1402 const HeapRegion* hr,
1403 const VerifyOption vo) const {
1405 switch (vo) {
1406 case VerifyOption_G1UsePrevMarking:
1407 return is_obj_dead(obj, hr);
1408 case VerifyOption_G1UseNextMarking:
1409 return is_obj_ill(obj, hr);
1410 default:
1411 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1412 return !obj->is_gc_marked();
1413 }
1414 }
1416 // Determine if an object is dead, given the object and also
1417 // the region to which the object belongs. An object is dead
1418 // iff a) it was not allocated since the last mark and b) it
1419 // is not marked.
1421 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1422 return
1423 !hr->obj_allocated_since_prev_marking(obj) &&
1424 !isMarkedPrev(obj);
1425 }
1427 // This is used when copying an object to survivor space.
1428 // If the object is marked live, then we mark the copy live.
1429 // If the object is allocated since the start of this mark
1430 // cycle, then we mark the copy live.
1431 // If the object has been around since the previous mark
1432 // phase, and hasn't been marked yet during this phase,
1433 // then we don't mark it, we just wait for the
1434 // current marking cycle to get to it.
1436 // This function returns true when an object has been
1437 // around since the previous marking and hasn't yet
1438 // been marked during this marking.
1440 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1441 return
1442 !hr->obj_allocated_since_next_marking(obj) &&
1443 !isMarkedNext(obj);
1444 }
1446 // Determine if an object is dead, given only the object itself.
1447 // This will find the region to which the object belongs and
1448 // then call the region version of the same function.
1450 // Added if it is in permanent gen it isn't dead.
1451 // Added if it is NULL it isn't dead.
1453 // vo == UsePrevMarking -> use "prev" marking information,
1454 // vo == UseNextMarking -> use "next" marking information,
1455 // vo == UseMarkWord -> use mark word from object header
1456 bool is_obj_dead_cond(const oop obj,
1457 const VerifyOption vo) const {
1459 switch (vo) {
1460 case VerifyOption_G1UsePrevMarking:
1461 return is_obj_dead(obj);
1462 case VerifyOption_G1UseNextMarking:
1463 return is_obj_ill(obj);
1464 default:
1465 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1466 return !obj->is_gc_marked();
1467 }
1468 }
1470 bool is_obj_dead(const oop obj) const {
1471 const HeapRegion* hr = heap_region_containing(obj);
1472 if (hr == NULL) {
1473 if (Universe::heap()->is_in_permanent(obj))
1474 return false;
1475 else if (obj == NULL) return false;
1476 else return true;
1477 }
1478 else return is_obj_dead(obj, hr);
1479 }
1481 bool is_obj_ill(const oop obj) const {
1482 const HeapRegion* hr = heap_region_containing(obj);
1483 if (hr == NULL) {
1484 if (Universe::heap()->is_in_permanent(obj))
1485 return false;
1486 else if (obj == NULL) return false;
1487 else return true;
1488 }
1489 else return is_obj_ill(obj, hr);
1490 }
1492 // The following is just to alert the verification code
1493 // that a full collection has occurred and that the
1494 // remembered sets are no longer up to date.
1495 bool _full_collection;
1496 void set_full_collection() { _full_collection = true;}
1497 void clear_full_collection() {_full_collection = false;}
1498 bool full_collection() {return _full_collection;}
1500 ConcurrentMark* concurrent_mark() const { return _cm; }
1501 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1503 // The dirty cards region list is used to record a subset of regions
1504 // whose cards need clearing. The list if populated during the
1505 // remembered set scanning and drained during the card table
1506 // cleanup. Although the methods are reentrant, population/draining
1507 // phases must not overlap. For synchronization purposes the last
1508 // element on the list points to itself.
1509 HeapRegion* _dirty_cards_region_list;
1510 void push_dirty_cards_region(HeapRegion* hr);
1511 HeapRegion* pop_dirty_cards_region();
1513 public:
1514 void stop_conc_gc_threads();
1516 // <NEW PREDICTION>
1518 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1519 void check_if_region_is_too_expensive(double predicted_time_ms);
1520 size_t pending_card_num();
1521 size_t max_pending_card_num();
1522 size_t cards_scanned();
1524 // </NEW PREDICTION>
1526 protected:
1527 size_t _max_heap_capacity;
1528 };
1530 #define use_local_bitmaps 1
1531 #define verify_local_bitmaps 0
1532 #define oop_buffer_length 256
1534 #ifndef PRODUCT
1535 class GCLabBitMap;
1536 class GCLabBitMapClosure: public BitMapClosure {
1537 private:
1538 ConcurrentMark* _cm;
1539 GCLabBitMap* _bitmap;
1541 public:
1542 GCLabBitMapClosure(ConcurrentMark* cm,
1543 GCLabBitMap* bitmap) {
1544 _cm = cm;
1545 _bitmap = bitmap;
1546 }
1548 virtual bool do_bit(size_t offset);
1549 };
1550 #endif // !PRODUCT
1552 class GCLabBitMap: public BitMap {
1553 private:
1554 ConcurrentMark* _cm;
1556 int _shifter;
1557 size_t _bitmap_word_covers_words;
1559 // beginning of the heap
1560 HeapWord* _heap_start;
1562 // this is the actual start of the GCLab
1563 HeapWord* _real_start_word;
1565 // this is the actual end of the GCLab
1566 HeapWord* _real_end_word;
1568 // this is the first word, possibly located before the actual start
1569 // of the GCLab, that corresponds to the first bit of the bitmap
1570 HeapWord* _start_word;
1572 // size of a GCLab in words
1573 size_t _gclab_word_size;
1575 static int shifter() {
1576 return MinObjAlignment - 1;
1577 }
1579 // how many heap words does a single bitmap word corresponds to?
1580 static size_t bitmap_word_covers_words() {
1581 return BitsPerWord << shifter();
1582 }
1584 size_t gclab_word_size() const {
1585 return _gclab_word_size;
1586 }
1588 // Calculates actual GCLab size in words
1589 size_t gclab_real_word_size() const {
1590 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
1591 / BitsPerWord;
1592 }
1594 static size_t bitmap_size_in_bits(size_t gclab_word_size) {
1595 size_t bits_in_bitmap = gclab_word_size >> shifter();
1596 // We are going to ensure that the beginning of a word in this
1597 // bitmap also corresponds to the beginning of a word in the
1598 // global marking bitmap. To handle the case where a GCLab
1599 // starts from the middle of the bitmap, we need to add enough
1600 // space (i.e. up to a bitmap word) to ensure that we have
1601 // enough bits in the bitmap.
1602 return bits_in_bitmap + BitsPerWord - 1;
1603 }
1604 public:
1605 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
1606 : BitMap(bitmap_size_in_bits(gclab_word_size)),
1607 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1608 _shifter(shifter()),
1609 _bitmap_word_covers_words(bitmap_word_covers_words()),
1610 _heap_start(heap_start),
1611 _gclab_word_size(gclab_word_size),
1612 _real_start_word(NULL),
1613 _real_end_word(NULL),
1614 _start_word(NULL)
1615 {
1616 guarantee( size_in_words() >= bitmap_size_in_words(),
1617 "just making sure");
1618 }
1620 inline unsigned heapWordToOffset(HeapWord* addr) {
1621 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1622 assert(offset < size(), "offset should be within bounds");
1623 return offset;
1624 }
1626 inline HeapWord* offsetToHeapWord(size_t offset) {
1627 HeapWord* addr = _start_word + (offset << _shifter);
1628 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1629 return addr;
1630 }
1632 bool fields_well_formed() {
1633 bool ret1 = (_real_start_word == NULL) &&
1634 (_real_end_word == NULL) &&
1635 (_start_word == NULL);
1636 if (ret1)
1637 return true;
1639 bool ret2 = _real_start_word >= _start_word &&
1640 _start_word < _real_end_word &&
1641 (_real_start_word + _gclab_word_size) == _real_end_word &&
1642 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1643 > _real_end_word;
1644 return ret2;
1645 }
1647 inline bool mark(HeapWord* addr) {
1648 guarantee(use_local_bitmaps, "invariant");
1649 assert(fields_well_formed(), "invariant");
1651 if (addr >= _real_start_word && addr < _real_end_word) {
1652 assert(!isMarked(addr), "should not have already been marked");
1654 // first mark it on the bitmap
1655 at_put(heapWordToOffset(addr), true);
1657 return true;
1658 } else {
1659 return false;
1660 }
1661 }
1663 inline bool isMarked(HeapWord* addr) {
1664 guarantee(use_local_bitmaps, "invariant");
1665 assert(fields_well_formed(), "invariant");
1667 return at(heapWordToOffset(addr));
1668 }
1670 void set_buffer(HeapWord* start) {
1671 guarantee(use_local_bitmaps, "invariant");
1672 clear();
1674 assert(start != NULL, "invariant");
1675 _real_start_word = start;
1676 _real_end_word = start + _gclab_word_size;
1678 size_t diff =
1679 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1680 _start_word = start - diff;
1682 assert(fields_well_formed(), "invariant");
1683 }
1685 #ifndef PRODUCT
1686 void verify() {
1687 // verify that the marks have been propagated
1688 GCLabBitMapClosure cl(_cm, this);
1689 iterate(&cl);
1690 }
1691 #endif // PRODUCT
1693 void retire() {
1694 guarantee(use_local_bitmaps, "invariant");
1695 assert(fields_well_formed(), "invariant");
1697 if (_start_word != NULL) {
1698 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1700 // this means that the bitmap was set up for the GCLab
1701 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1703 mark_bitmap->mostly_disjoint_range_union(this,
1704 0, // always start from the start of the bitmap
1705 _start_word,
1706 gclab_real_word_size());
1707 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1709 #ifndef PRODUCT
1710 if (use_local_bitmaps && verify_local_bitmaps)
1711 verify();
1712 #endif // PRODUCT
1713 } else {
1714 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1715 }
1716 }
1718 size_t bitmap_size_in_words() const {
1719 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1720 }
1722 };
1724 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1725 private:
1726 bool _retired;
1727 bool _during_marking;
1728 GCLabBitMap _bitmap;
1730 public:
1731 G1ParGCAllocBuffer(size_t gclab_word_size) :
1732 ParGCAllocBuffer(gclab_word_size),
1733 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
1734 _bitmap(G1CollectedHeap::heap()->reserved_region().start(), gclab_word_size),
1735 _retired(false)
1736 { }
1738 inline bool mark(HeapWord* addr) {
1739 guarantee(use_local_bitmaps, "invariant");
1740 assert(_during_marking, "invariant");
1741 return _bitmap.mark(addr);
1742 }
1744 inline void set_buf(HeapWord* buf) {
1745 if (use_local_bitmaps && _during_marking)
1746 _bitmap.set_buffer(buf);
1747 ParGCAllocBuffer::set_buf(buf);
1748 _retired = false;
1749 }
1751 inline void retire(bool end_of_gc, bool retain) {
1752 if (_retired)
1753 return;
1754 if (use_local_bitmaps && _during_marking) {
1755 _bitmap.retire();
1756 }
1757 ParGCAllocBuffer::retire(end_of_gc, retain);
1758 _retired = true;
1759 }
1760 };
1762 class G1ParScanThreadState : public StackObj {
1763 protected:
1764 G1CollectedHeap* _g1h;
1765 RefToScanQueue* _refs;
1766 DirtyCardQueue _dcq;
1767 CardTableModRefBS* _ct_bs;
1768 G1RemSet* _g1_rem;
1770 G1ParGCAllocBuffer _surviving_alloc_buffer;
1771 G1ParGCAllocBuffer _tenured_alloc_buffer;
1772 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1773 ageTable _age_table;
1775 size_t _alloc_buffer_waste;
1776 size_t _undo_waste;
1778 OopsInHeapRegionClosure* _evac_failure_cl;
1779 G1ParScanHeapEvacClosure* _evac_cl;
1780 G1ParScanPartialArrayClosure* _partial_scan_cl;
1782 int _hash_seed;
1783 int _queue_num;
1785 size_t _term_attempts;
1787 double _start;
1788 double _start_strong_roots;
1789 double _strong_roots_time;
1790 double _start_term;
1791 double _term_time;
1793 // Map from young-age-index (0 == not young, 1 is youngest) to
1794 // surviving words. base is what we get back from the malloc call
1795 size_t* _surviving_young_words_base;
1796 // this points into the array, as we use the first few entries for padding
1797 size_t* _surviving_young_words;
1799 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1801 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1803 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1805 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1806 CardTableModRefBS* ctbs() { return _ct_bs; }
1808 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1809 if (!from->is_survivor()) {
1810 _g1_rem->par_write_ref(from, p, tid);
1811 }
1812 }
1814 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1815 // If the new value of the field points to the same region or
1816 // is the to-space, we don't need to include it in the Rset updates.
1817 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1818 size_t card_index = ctbs()->index_for(p);
1819 // If the card hasn't been added to the buffer, do it.
1820 if (ctbs()->mark_card_deferred(card_index)) {
1821 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1822 }
1823 }
1824 }
1826 public:
1827 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
1829 ~G1ParScanThreadState() {
1830 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1831 }
1833 RefToScanQueue* refs() { return _refs; }
1834 ageTable* age_table() { return &_age_table; }
1836 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1837 return _alloc_buffers[purpose];
1838 }
1840 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1841 size_t undo_waste() const { return _undo_waste; }
1843 #ifdef ASSERT
1844 bool verify_ref(narrowOop* ref) const;
1845 bool verify_ref(oop* ref) const;
1846 bool verify_task(StarTask ref) const;
1847 #endif // ASSERT
1849 template <class T> void push_on_queue(T* ref) {
1850 assert(verify_ref(ref), "sanity");
1851 refs()->push(ref);
1852 }
1854 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1855 if (G1DeferredRSUpdate) {
1856 deferred_rs_update(from, p, tid);
1857 } else {
1858 immediate_rs_update(from, p, tid);
1859 }
1860 }
1862 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1864 HeapWord* obj = NULL;
1865 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1866 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1867 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1868 assert(gclab_word_size == alloc_buf->word_sz(),
1869 "dynamic resizing is not supported");
1870 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1871 alloc_buf->retire(false, false);
1873 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1874 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1875 // Otherwise.
1876 alloc_buf->set_buf(buf);
1878 obj = alloc_buf->allocate(word_sz);
1879 assert(obj != NULL, "buffer was definitely big enough...");
1880 } else {
1881 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1882 }
1883 return obj;
1884 }
1886 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1887 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1888 if (obj != NULL) return obj;
1889 return allocate_slow(purpose, word_sz);
1890 }
1892 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1893 if (alloc_buffer(purpose)->contains(obj)) {
1894 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1895 "should contain whole object");
1896 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1897 } else {
1898 CollectedHeap::fill_with_object(obj, word_sz);
1899 add_to_undo_waste(word_sz);
1900 }
1901 }
1903 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1904 _evac_failure_cl = evac_failure_cl;
1905 }
1906 OopsInHeapRegionClosure* evac_failure_closure() {
1907 return _evac_failure_cl;
1908 }
1910 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1911 _evac_cl = evac_cl;
1912 }
1914 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
1915 _partial_scan_cl = partial_scan_cl;
1916 }
1918 int* hash_seed() { return &_hash_seed; }
1919 int queue_num() { return _queue_num; }
1921 size_t term_attempts() const { return _term_attempts; }
1922 void note_term_attempt() { _term_attempts++; }
1924 void start_strong_roots() {
1925 _start_strong_roots = os::elapsedTime();
1926 }
1927 void end_strong_roots() {
1928 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1929 }
1930 double strong_roots_time() const { return _strong_roots_time; }
1932 void start_term_time() {
1933 note_term_attempt();
1934 _start_term = os::elapsedTime();
1935 }
1936 void end_term_time() {
1937 _term_time += (os::elapsedTime() - _start_term);
1938 }
1939 double term_time() const { return _term_time; }
1941 double elapsed_time() const {
1942 return os::elapsedTime() - _start;
1943 }
1945 static void
1946 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1947 void
1948 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1950 size_t* surviving_young_words() {
1951 // We add on to hide entry 0 which accumulates surviving words for
1952 // age -1 regions (i.e. non-young ones)
1953 return _surviving_young_words;
1954 }
1956 void retire_alloc_buffers() {
1957 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1958 size_t waste = _alloc_buffers[ap]->words_remaining();
1959 add_to_alloc_buffer_waste(waste);
1960 _alloc_buffers[ap]->retire(true, false);
1961 }
1962 }
1964 template <class T> void deal_with_reference(T* ref_to_scan) {
1965 if (has_partial_array_mask(ref_to_scan)) {
1966 _partial_scan_cl->do_oop_nv(ref_to_scan);
1967 } else {
1968 // Note: we can use "raw" versions of "region_containing" because
1969 // "obj_to_scan" is definitely in the heap, and is not in a
1970 // humongous region.
1971 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1972 _evac_cl->set_region(r);
1973 _evac_cl->do_oop_nv(ref_to_scan);
1974 }
1975 }
1977 void deal_with_reference(StarTask ref) {
1978 assert(verify_task(ref), "sanity");
1979 if (ref.is_narrow()) {
1980 deal_with_reference((narrowOop*)ref);
1981 } else {
1982 deal_with_reference((oop*)ref);
1983 }
1984 }
1986 public:
1987 void trim_queue();
1988 };
1990 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP