Wed, 15 Feb 2012 13:06:53 -0500
7132029: G1: mixed GC phase lasts for longer than it should
Summary: Revamp of the mechanism that chooses old regions for inclusion in the CSet. It simplifies the code and introduces min and max bounds on the number of old regions added to the CSet at each mixed GC to avoid pathological cases. It also ensures that when we do a mixed GC we'll always find old regions to add to the CSet (i.e., it eliminates the case where a mixed GC will collect no old regions which can happen today).
Reviewed-by: johnc, brutisso
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1AllocRegion.hpp"
30 #include "gc_implementation/g1/g1HRPrinter.hpp"
31 #include "gc_implementation/g1/g1RemSet.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 #include "gc_implementation/g1/heapRegionSeq.hpp"
34 #include "gc_implementation/g1/heapRegionSets.hpp"
35 #include "gc_implementation/shared/hSpaceCounters.hpp"
36 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
37 #include "memory/barrierSet.hpp"
38 #include "memory/memRegion.hpp"
39 #include "memory/sharedHeap.hpp"
41 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
42 // It uses the "Garbage First" heap organization and algorithm, which
43 // may combine concurrent marking with parallel, incremental compaction of
44 // heap subsets that will yield large amounts of garbage.
46 class HeapRegion;
47 class HRRSCleanupTask;
48 class PermanentGenerationSpec;
49 class GenerationSpec;
50 class OopsInHeapRegionClosure;
51 class G1ScanHeapEvacClosure;
52 class ObjectClosure;
53 class SpaceClosure;
54 class CompactibleSpaceClosure;
55 class Space;
56 class G1CollectorPolicy;
57 class GenRemSet;
58 class G1RemSet;
59 class HeapRegionRemSetIterator;
60 class ConcurrentMark;
61 class ConcurrentMarkThread;
62 class ConcurrentG1Refine;
63 class GenerationCounters;
65 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
66 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
68 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
69 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
71 enum GCAllocPurpose {
72 GCAllocForTenured,
73 GCAllocForSurvived,
74 GCAllocPurposeCount
75 };
77 class YoungList : public CHeapObj {
78 private:
79 G1CollectedHeap* _g1h;
81 HeapRegion* _head;
83 HeapRegion* _survivor_head;
84 HeapRegion* _survivor_tail;
86 HeapRegion* _curr;
88 size_t _length;
89 size_t _survivor_length;
91 size_t _last_sampled_rs_lengths;
92 size_t _sampled_rs_lengths;
94 void empty_list(HeapRegion* list);
96 public:
97 YoungList(G1CollectedHeap* g1h);
99 void push_region(HeapRegion* hr);
100 void add_survivor_region(HeapRegion* hr);
102 void empty_list();
103 bool is_empty() { return _length == 0; }
104 size_t length() { return _length; }
105 size_t survivor_length() { return _survivor_length; }
107 // Currently we do not keep track of the used byte sum for the
108 // young list and the survivors and it'd be quite a lot of work to
109 // do so. When we'll eventually replace the young list with
110 // instances of HeapRegionLinkedList we'll get that for free. So,
111 // we'll report the more accurate information then.
112 size_t eden_used_bytes() {
113 assert(length() >= survivor_length(), "invariant");
114 return (length() - survivor_length()) * HeapRegion::GrainBytes;
115 }
116 size_t survivor_used_bytes() {
117 return survivor_length() * HeapRegion::GrainBytes;
118 }
120 void rs_length_sampling_init();
121 bool rs_length_sampling_more();
122 void rs_length_sampling_next();
124 void reset_sampled_info() {
125 _last_sampled_rs_lengths = 0;
126 }
127 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
129 // for development purposes
130 void reset_auxilary_lists();
131 void clear() { _head = NULL; _length = 0; }
133 void clear_survivors() {
134 _survivor_head = NULL;
135 _survivor_tail = NULL;
136 _survivor_length = 0;
137 }
139 HeapRegion* first_region() { return _head; }
140 HeapRegion* first_survivor_region() { return _survivor_head; }
141 HeapRegion* last_survivor_region() { return _survivor_tail; }
143 // debugging
144 bool check_list_well_formed();
145 bool check_list_empty(bool check_sample = true);
146 void print();
147 };
149 class MutatorAllocRegion : public G1AllocRegion {
150 protected:
151 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
152 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
153 public:
154 MutatorAllocRegion()
155 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
156 };
158 // The G1 STW is alive closure.
159 // An instance is embedded into the G1CH and used as the
160 // (optional) _is_alive_non_header closure in the STW
161 // reference processor. It is also extensively used during
162 // refence processing during STW evacuation pauses.
163 class G1STWIsAliveClosure: public BoolObjectClosure {
164 G1CollectedHeap* _g1;
165 public:
166 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
167 void do_object(oop p) { assert(false, "Do not call."); }
168 bool do_object_b(oop p);
169 };
171 class SurvivorGCAllocRegion : public G1AllocRegion {
172 protected:
173 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
174 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
175 public:
176 SurvivorGCAllocRegion()
177 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
178 };
180 class OldGCAllocRegion : public G1AllocRegion {
181 protected:
182 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
183 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
184 public:
185 OldGCAllocRegion()
186 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
187 };
189 class RefineCardTableEntryClosure;
191 class G1CollectedHeap : public SharedHeap {
192 friend class VM_G1CollectForAllocation;
193 friend class VM_GenCollectForPermanentAllocation;
194 friend class VM_G1CollectFull;
195 friend class VM_G1IncCollectionPause;
196 friend class VMStructs;
197 friend class MutatorAllocRegion;
198 friend class SurvivorGCAllocRegion;
199 friend class OldGCAllocRegion;
201 // Closures used in implementation.
202 friend class G1ParCopyHelper;
203 friend class G1IsAliveClosure;
204 friend class G1EvacuateFollowersClosure;
205 friend class G1ParScanThreadState;
206 friend class G1ParScanClosureSuper;
207 friend class G1ParEvacuateFollowersClosure;
208 friend class G1ParTask;
209 friend class G1FreeGarbageRegionClosure;
210 friend class RefineCardTableEntryClosure;
211 friend class G1PrepareCompactClosure;
212 friend class RegionSorter;
213 friend class RegionResetter;
214 friend class CountRCClosure;
215 friend class EvacPopObjClosure;
216 friend class G1ParCleanupCTTask;
218 // Other related classes.
219 friend class G1MarkSweep;
221 private:
222 // The one and only G1CollectedHeap, so static functions can find it.
223 static G1CollectedHeap* _g1h;
225 static size_t _humongous_object_threshold_in_words;
227 // Storage for the G1 heap (excludes the permanent generation).
228 VirtualSpace _g1_storage;
229 MemRegion _g1_reserved;
231 // The part of _g1_storage that is currently committed.
232 MemRegion _g1_committed;
234 // The master free list. It will satisfy all new region allocations.
235 MasterFreeRegionList _free_list;
237 // The secondary free list which contains regions that have been
238 // freed up during the cleanup process. This will be appended to the
239 // master free list when appropriate.
240 SecondaryFreeRegionList _secondary_free_list;
242 // It keeps track of the old regions.
243 MasterOldRegionSet _old_set;
245 // It keeps track of the humongous regions.
246 MasterHumongousRegionSet _humongous_set;
248 // The number of regions we could create by expansion.
249 size_t _expansion_regions;
251 // The block offset table for the G1 heap.
252 G1BlockOffsetSharedArray* _bot_shared;
254 // Tears down the region sets / lists so that they are empty and the
255 // regions on the heap do not belong to a region set / list. The
256 // only exception is the humongous set which we leave unaltered. If
257 // free_list_only is true, it will only tear down the master free
258 // list. It is called before a Full GC (free_list_only == false) or
259 // before heap shrinking (free_list_only == true).
260 void tear_down_region_sets(bool free_list_only);
262 // Rebuilds the region sets / lists so that they are repopulated to
263 // reflect the contents of the heap. The only exception is the
264 // humongous set which was not torn down in the first place. If
265 // free_list_only is true, it will only rebuild the master free
266 // list. It is called after a Full GC (free_list_only == false) or
267 // after heap shrinking (free_list_only == true).
268 void rebuild_region_sets(bool free_list_only);
270 // The sequence of all heap regions in the heap.
271 HeapRegionSeq _hrs;
273 // Alloc region used to satisfy mutator allocation requests.
274 MutatorAllocRegion _mutator_alloc_region;
276 // Alloc region used to satisfy allocation requests by the GC for
277 // survivor objects.
278 SurvivorGCAllocRegion _survivor_gc_alloc_region;
280 // Alloc region used to satisfy allocation requests by the GC for
281 // old objects.
282 OldGCAllocRegion _old_gc_alloc_region;
284 // The last old region we allocated to during the last GC.
285 // Typically, it is not full so we should re-use it during the next GC.
286 HeapRegion* _retained_old_gc_alloc_region;
288 // It specifies whether we should attempt to expand the heap after a
289 // region allocation failure. If heap expansion fails we set this to
290 // false so that we don't re-attempt the heap expansion (it's likely
291 // that subsequent expansion attempts will also fail if one fails).
292 // Currently, it is only consulted during GC and it's reset at the
293 // start of each GC.
294 bool _expand_heap_after_alloc_failure;
296 // It resets the mutator alloc region before new allocations can take place.
297 void init_mutator_alloc_region();
299 // It releases the mutator alloc region.
300 void release_mutator_alloc_region();
302 // It initializes the GC alloc regions at the start of a GC.
303 void init_gc_alloc_regions();
305 // It releases the GC alloc regions at the end of a GC.
306 void release_gc_alloc_regions();
308 // It does any cleanup that needs to be done on the GC alloc regions
309 // before a Full GC.
310 void abandon_gc_alloc_regions();
312 // Helper for monitoring and management support.
313 G1MonitoringSupport* _g1mm;
315 // Determines PLAB size for a particular allocation purpose.
316 static size_t desired_plab_sz(GCAllocPurpose purpose);
318 // Outside of GC pauses, the number of bytes used in all regions other
319 // than the current allocation region.
320 size_t _summary_bytes_used;
322 // This is used for a quick test on whether a reference points into
323 // the collection set or not. Basically, we have an array, with one
324 // byte per region, and that byte denotes whether the corresponding
325 // region is in the collection set or not. The entry corresponding
326 // the bottom of the heap, i.e., region 0, is pointed to by
327 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
328 // biased so that it actually points to address 0 of the address
329 // space, to make the test as fast as possible (we can simply shift
330 // the address to address into it, instead of having to subtract the
331 // bottom of the heap from the address before shifting it; basically
332 // it works in the same way the card table works).
333 bool* _in_cset_fast_test;
335 // The allocated array used for the fast test on whether a reference
336 // points into the collection set or not. This field is also used to
337 // free the array.
338 bool* _in_cset_fast_test_base;
340 // The length of the _in_cset_fast_test_base array.
341 size_t _in_cset_fast_test_length;
343 volatile unsigned _gc_time_stamp;
345 size_t* _surviving_young_words;
347 G1HRPrinter _hr_printer;
349 void setup_surviving_young_words();
350 void update_surviving_young_words(size_t* surv_young_words);
351 void cleanup_surviving_young_words();
353 // It decides whether an explicit GC should start a concurrent cycle
354 // instead of doing a STW GC. Currently, a concurrent cycle is
355 // explicitly started if:
356 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
357 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
358 // (c) cause == _g1_humongous_allocation
359 bool should_do_concurrent_full_gc(GCCause::Cause cause);
361 // Keeps track of how many "full collections" (i.e., Full GCs or
362 // concurrent cycles) we have completed. The number of them we have
363 // started is maintained in _total_full_collections in CollectedHeap.
364 volatile unsigned int _full_collections_completed;
366 // This is a non-product method that is helpful for testing. It is
367 // called at the end of a GC and artificially expands the heap by
368 // allocating a number of dead regions. This way we can induce very
369 // frequent marking cycles and stress the cleanup / concurrent
370 // cleanup code more (as all the regions that will be allocated by
371 // this method will be found dead by the marking cycle).
372 void allocate_dummy_regions() PRODUCT_RETURN;
374 // These are macros so that, if the assert fires, we get the correct
375 // line number, file, etc.
377 #define heap_locking_asserts_err_msg(_extra_message_) \
378 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
379 (_extra_message_), \
380 BOOL_TO_STR(Heap_lock->owned_by_self()), \
381 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
382 BOOL_TO_STR(Thread::current()->is_VM_thread()))
384 #define assert_heap_locked() \
385 do { \
386 assert(Heap_lock->owned_by_self(), \
387 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
388 } while (0)
390 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
391 do { \
392 assert(Heap_lock->owned_by_self() || \
393 (SafepointSynchronize::is_at_safepoint() && \
394 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
395 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
396 "should be at a safepoint")); \
397 } while (0)
399 #define assert_heap_locked_and_not_at_safepoint() \
400 do { \
401 assert(Heap_lock->owned_by_self() && \
402 !SafepointSynchronize::is_at_safepoint(), \
403 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
404 "should not be at a safepoint")); \
405 } while (0)
407 #define assert_heap_not_locked() \
408 do { \
409 assert(!Heap_lock->owned_by_self(), \
410 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
411 } while (0)
413 #define assert_heap_not_locked_and_not_at_safepoint() \
414 do { \
415 assert(!Heap_lock->owned_by_self() && \
416 !SafepointSynchronize::is_at_safepoint(), \
417 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
418 "should not be at a safepoint")); \
419 } while (0)
421 #define assert_at_safepoint(_should_be_vm_thread_) \
422 do { \
423 assert(SafepointSynchronize::is_at_safepoint() && \
424 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
425 heap_locking_asserts_err_msg("should be at a safepoint")); \
426 } while (0)
428 #define assert_not_at_safepoint() \
429 do { \
430 assert(!SafepointSynchronize::is_at_safepoint(), \
431 heap_locking_asserts_err_msg("should not be at a safepoint")); \
432 } while (0)
434 protected:
436 // The young region list.
437 YoungList* _young_list;
439 // The current policy object for the collector.
440 G1CollectorPolicy* _g1_policy;
442 // This is the second level of trying to allocate a new region. If
443 // new_region() didn't find a region on the free_list, this call will
444 // check whether there's anything available on the
445 // secondary_free_list and/or wait for more regions to appear on
446 // that list, if _free_regions_coming is set.
447 HeapRegion* new_region_try_secondary_free_list();
449 // Try to allocate a single non-humongous HeapRegion sufficient for
450 // an allocation of the given word_size. If do_expand is true,
451 // attempt to expand the heap if necessary to satisfy the allocation
452 // request.
453 HeapRegion* new_region(size_t word_size, bool do_expand);
455 // Attempt to satisfy a humongous allocation request of the given
456 // size by finding a contiguous set of free regions of num_regions
457 // length and remove them from the master free list. Return the
458 // index of the first region or G1_NULL_HRS_INDEX if the search
459 // was unsuccessful.
460 size_t humongous_obj_allocate_find_first(size_t num_regions,
461 size_t word_size);
463 // Initialize a contiguous set of free regions of length num_regions
464 // and starting at index first so that they appear as a single
465 // humongous region.
466 HeapWord* humongous_obj_allocate_initialize_regions(size_t first,
467 size_t num_regions,
468 size_t word_size);
470 // Attempt to allocate a humongous object of the given size. Return
471 // NULL if unsuccessful.
472 HeapWord* humongous_obj_allocate(size_t word_size);
474 // The following two methods, allocate_new_tlab() and
475 // mem_allocate(), are the two main entry points from the runtime
476 // into the G1's allocation routines. They have the following
477 // assumptions:
478 //
479 // * They should both be called outside safepoints.
480 //
481 // * They should both be called without holding the Heap_lock.
482 //
483 // * All allocation requests for new TLABs should go to
484 // allocate_new_tlab().
485 //
486 // * All non-TLAB allocation requests should go to mem_allocate().
487 //
488 // * If either call cannot satisfy the allocation request using the
489 // current allocating region, they will try to get a new one. If
490 // this fails, they will attempt to do an evacuation pause and
491 // retry the allocation.
492 //
493 // * If all allocation attempts fail, even after trying to schedule
494 // an evacuation pause, allocate_new_tlab() will return NULL,
495 // whereas mem_allocate() will attempt a heap expansion and/or
496 // schedule a Full GC.
497 //
498 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
499 // should never be called with word_size being humongous. All
500 // humongous allocation requests should go to mem_allocate() which
501 // will satisfy them with a special path.
503 virtual HeapWord* allocate_new_tlab(size_t word_size);
505 virtual HeapWord* mem_allocate(size_t word_size,
506 bool* gc_overhead_limit_was_exceeded);
508 // The following three methods take a gc_count_before_ret
509 // parameter which is used to return the GC count if the method
510 // returns NULL. Given that we are required to read the GC count
511 // while holding the Heap_lock, and these paths will take the
512 // Heap_lock at some point, it's easier to get them to read the GC
513 // count while holding the Heap_lock before they return NULL instead
514 // of the caller (namely: mem_allocate()) having to also take the
515 // Heap_lock just to read the GC count.
517 // First-level mutator allocation attempt: try to allocate out of
518 // the mutator alloc region without taking the Heap_lock. This
519 // should only be used for non-humongous allocations.
520 inline HeapWord* attempt_allocation(size_t word_size,
521 unsigned int* gc_count_before_ret);
523 // Second-level mutator allocation attempt: take the Heap_lock and
524 // retry the allocation attempt, potentially scheduling a GC
525 // pause. This should only be used for non-humongous allocations.
526 HeapWord* attempt_allocation_slow(size_t word_size,
527 unsigned int* gc_count_before_ret);
529 // Takes the Heap_lock and attempts a humongous allocation. It can
530 // potentially schedule a GC pause.
531 HeapWord* attempt_allocation_humongous(size_t word_size,
532 unsigned int* gc_count_before_ret);
534 // Allocation attempt that should be called during safepoints (e.g.,
535 // at the end of a successful GC). expect_null_mutator_alloc_region
536 // specifies whether the mutator alloc region is expected to be NULL
537 // or not.
538 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
539 bool expect_null_mutator_alloc_region);
541 // It dirties the cards that cover the block so that so that the post
542 // write barrier never queues anything when updating objects on this
543 // block. It is assumed (and in fact we assert) that the block
544 // belongs to a young region.
545 inline void dirty_young_block(HeapWord* start, size_t word_size);
547 // Allocate blocks during garbage collection. Will ensure an
548 // allocation region, either by picking one or expanding the
549 // heap, and then allocate a block of the given size. The block
550 // may not be a humongous - it must fit into a single heap region.
551 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
553 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
554 HeapRegion* alloc_region,
555 bool par,
556 size_t word_size);
558 // Ensure that no further allocations can happen in "r", bearing in mind
559 // that parallel threads might be attempting allocations.
560 void par_allocate_remaining_space(HeapRegion* r);
562 // Allocation attempt during GC for a survivor object / PLAB.
563 inline HeapWord* survivor_attempt_allocation(size_t word_size);
565 // Allocation attempt during GC for an old object / PLAB.
566 inline HeapWord* old_attempt_allocation(size_t word_size);
568 // These methods are the "callbacks" from the G1AllocRegion class.
570 // For mutator alloc regions.
571 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
572 void retire_mutator_alloc_region(HeapRegion* alloc_region,
573 size_t allocated_bytes);
575 // For GC alloc regions.
576 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count,
577 GCAllocPurpose ap);
578 void retire_gc_alloc_region(HeapRegion* alloc_region,
579 size_t allocated_bytes, GCAllocPurpose ap);
581 // - if explicit_gc is true, the GC is for a System.gc() or a heap
582 // inspection request and should collect the entire heap
583 // - if clear_all_soft_refs is true, all soft references should be
584 // cleared during the GC
585 // - if explicit_gc is false, word_size describes the allocation that
586 // the GC should attempt (at least) to satisfy
587 // - it returns false if it is unable to do the collection due to the
588 // GC locker being active, true otherwise
589 bool do_collection(bool explicit_gc,
590 bool clear_all_soft_refs,
591 size_t word_size);
593 // Callback from VM_G1CollectFull operation.
594 // Perform a full collection.
595 void do_full_collection(bool clear_all_soft_refs);
597 // Resize the heap if necessary after a full collection. If this is
598 // after a collect-for allocation, "word_size" is the allocation size,
599 // and will be considered part of the used portion of the heap.
600 void resize_if_necessary_after_full_collection(size_t word_size);
602 // Callback from VM_G1CollectForAllocation operation.
603 // This function does everything necessary/possible to satisfy a
604 // failed allocation request (including collection, expansion, etc.)
605 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
607 // Attempting to expand the heap sufficiently
608 // to support an allocation of the given "word_size". If
609 // successful, perform the allocation and return the address of the
610 // allocated block, or else "NULL".
611 HeapWord* expand_and_allocate(size_t word_size);
613 // Process any reference objects discovered during
614 // an incremental evacuation pause.
615 void process_discovered_references();
617 // Enqueue any remaining discovered references
618 // after processing.
619 void enqueue_discovered_references();
621 public:
623 G1MonitoringSupport* g1mm() {
624 assert(_g1mm != NULL, "should have been initialized");
625 return _g1mm;
626 }
628 // Expand the garbage-first heap by at least the given size (in bytes!).
629 // Returns true if the heap was expanded by the requested amount;
630 // false otherwise.
631 // (Rounds up to a HeapRegion boundary.)
632 bool expand(size_t expand_bytes);
634 // Do anything common to GC's.
635 virtual void gc_prologue(bool full);
636 virtual void gc_epilogue(bool full);
638 // We register a region with the fast "in collection set" test. We
639 // simply set to true the array slot corresponding to this region.
640 void register_region_with_in_cset_fast_test(HeapRegion* r) {
641 assert(_in_cset_fast_test_base != NULL, "sanity");
642 assert(r->in_collection_set(), "invariant");
643 size_t index = r->hrs_index();
644 assert(index < _in_cset_fast_test_length, "invariant");
645 assert(!_in_cset_fast_test_base[index], "invariant");
646 _in_cset_fast_test_base[index] = true;
647 }
649 // This is a fast test on whether a reference points into the
650 // collection set or not. It does not assume that the reference
651 // points into the heap; if it doesn't, it will return false.
652 bool in_cset_fast_test(oop obj) {
653 assert(_in_cset_fast_test != NULL, "sanity");
654 if (_g1_committed.contains((HeapWord*) obj)) {
655 // no need to subtract the bottom of the heap from obj,
656 // _in_cset_fast_test is biased
657 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
658 bool ret = _in_cset_fast_test[index];
659 // let's make sure the result is consistent with what the slower
660 // test returns
661 assert( ret || !obj_in_cs(obj), "sanity");
662 assert(!ret || obj_in_cs(obj), "sanity");
663 return ret;
664 } else {
665 return false;
666 }
667 }
669 void clear_cset_fast_test() {
670 assert(_in_cset_fast_test_base != NULL, "sanity");
671 memset(_in_cset_fast_test_base, false,
672 _in_cset_fast_test_length * sizeof(bool));
673 }
675 // This is called at the end of either a concurrent cycle or a Full
676 // GC to update the number of full collections completed. Those two
677 // can happen in a nested fashion, i.e., we start a concurrent
678 // cycle, a Full GC happens half-way through it which ends first,
679 // and then the cycle notices that a Full GC happened and ends
680 // too. The concurrent parameter is a boolean to help us do a bit
681 // tighter consistency checking in the method. If concurrent is
682 // false, the caller is the inner caller in the nesting (i.e., the
683 // Full GC). If concurrent is true, the caller is the outer caller
684 // in this nesting (i.e., the concurrent cycle). Further nesting is
685 // not currently supported. The end of the this call also notifies
686 // the FullGCCount_lock in case a Java thread is waiting for a full
687 // GC to happen (e.g., it called System.gc() with
688 // +ExplicitGCInvokesConcurrent).
689 void increment_full_collections_completed(bool concurrent);
691 unsigned int full_collections_completed() {
692 return _full_collections_completed;
693 }
695 G1HRPrinter* hr_printer() { return &_hr_printer; }
697 protected:
699 // Shrink the garbage-first heap by at most the given size (in bytes!).
700 // (Rounds down to a HeapRegion boundary.)
701 virtual void shrink(size_t expand_bytes);
702 void shrink_helper(size_t expand_bytes);
704 #if TASKQUEUE_STATS
705 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
706 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
707 void reset_taskqueue_stats();
708 #endif // TASKQUEUE_STATS
710 // Schedule the VM operation that will do an evacuation pause to
711 // satisfy an allocation request of word_size. *succeeded will
712 // return whether the VM operation was successful (it did do an
713 // evacuation pause) or not (another thread beat us to it or the GC
714 // locker was active). Given that we should not be holding the
715 // Heap_lock when we enter this method, we will pass the
716 // gc_count_before (i.e., total_collections()) as a parameter since
717 // it has to be read while holding the Heap_lock. Currently, both
718 // methods that call do_collection_pause() release the Heap_lock
719 // before the call, so it's easy to read gc_count_before just before.
720 HeapWord* do_collection_pause(size_t word_size,
721 unsigned int gc_count_before,
722 bool* succeeded);
724 // The guts of the incremental collection pause, executed by the vm
725 // thread. It returns false if it is unable to do the collection due
726 // to the GC locker being active, true otherwise
727 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
729 // Actually do the work of evacuating the collection set.
730 void evacuate_collection_set();
732 // The g1 remembered set of the heap.
733 G1RemSet* _g1_rem_set;
734 // And it's mod ref barrier set, used to track updates for the above.
735 ModRefBarrierSet* _mr_bs;
737 // A set of cards that cover the objects for which the Rsets should be updated
738 // concurrently after the collection.
739 DirtyCardQueueSet _dirty_card_queue_set;
741 // The Heap Region Rem Set Iterator.
742 HeapRegionRemSetIterator** _rem_set_iterator;
744 // The closure used to refine a single card.
745 RefineCardTableEntryClosure* _refine_cte_cl;
747 // A function to check the consistency of dirty card logs.
748 void check_ct_logs_at_safepoint();
750 // A DirtyCardQueueSet that is used to hold cards that contain
751 // references into the current collection set. This is used to
752 // update the remembered sets of the regions in the collection
753 // set in the event of an evacuation failure.
754 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
756 // After a collection pause, make the regions in the CS into free
757 // regions.
758 void free_collection_set(HeapRegion* cs_head);
760 // Abandon the current collection set without recording policy
761 // statistics or updating free lists.
762 void abandon_collection_set(HeapRegion* cs_head);
764 // Applies "scan_non_heap_roots" to roots outside the heap,
765 // "scan_rs" to roots inside the heap (having done "set_region" to
766 // indicate the region in which the root resides), and does "scan_perm"
767 // (setting the generation to the perm generation.) If "scan_rs" is
768 // NULL, then this step is skipped. The "worker_i"
769 // param is for use with parallel roots processing, and should be
770 // the "i" of the calling parallel worker thread's work(i) function.
771 // In the sequential case this param will be ignored.
772 void g1_process_strong_roots(bool collecting_perm_gen,
773 ScanningOption so,
774 OopClosure* scan_non_heap_roots,
775 OopsInHeapRegionClosure* scan_rs,
776 OopsInGenClosure* scan_perm,
777 int worker_i);
779 // Apply "blk" to all the weak roots of the system. These include
780 // JNI weak roots, the code cache, system dictionary, symbol table,
781 // string table, and referents of reachable weak refs.
782 void g1_process_weak_roots(OopClosure* root_closure,
783 OopClosure* non_root_closure);
785 // Frees a non-humongous region by initializing its contents and
786 // adding it to the free list that's passed as a parameter (this is
787 // usually a local list which will be appended to the master free
788 // list later). The used bytes of freed regions are accumulated in
789 // pre_used. If par is true, the region's RSet will not be freed
790 // up. The assumption is that this will be done later.
791 void free_region(HeapRegion* hr,
792 size_t* pre_used,
793 FreeRegionList* free_list,
794 bool par);
796 // Frees a humongous region by collapsing it into individual regions
797 // and calling free_region() for each of them. The freed regions
798 // will be added to the free list that's passed as a parameter (this
799 // is usually a local list which will be appended to the master free
800 // list later). The used bytes of freed regions are accumulated in
801 // pre_used. If par is true, the region's RSet will not be freed
802 // up. The assumption is that this will be done later.
803 void free_humongous_region(HeapRegion* hr,
804 size_t* pre_used,
805 FreeRegionList* free_list,
806 HumongousRegionSet* humongous_proxy_set,
807 bool par);
809 // Notifies all the necessary spaces that the committed space has
810 // been updated (either expanded or shrunk). It should be called
811 // after _g1_storage is updated.
812 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
814 // The concurrent marker (and the thread it runs in.)
815 ConcurrentMark* _cm;
816 ConcurrentMarkThread* _cmThread;
817 bool _mark_in_progress;
819 // The concurrent refiner.
820 ConcurrentG1Refine* _cg1r;
822 // The parallel task queues
823 RefToScanQueueSet *_task_queues;
825 // True iff a evacuation has failed in the current collection.
826 bool _evacuation_failed;
828 // Set the attribute indicating whether evacuation has failed in the
829 // current collection.
830 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
832 // Failed evacuations cause some logical from-space objects to have
833 // forwarding pointers to themselves. Reset them.
834 void remove_self_forwarding_pointers();
836 // When one is non-null, so is the other. Together, they each pair is
837 // an object with a preserved mark, and its mark value.
838 GrowableArray<oop>* _objs_with_preserved_marks;
839 GrowableArray<markOop>* _preserved_marks_of_objs;
841 // Preserve the mark of "obj", if necessary, in preparation for its mark
842 // word being overwritten with a self-forwarding-pointer.
843 void preserve_mark_if_necessary(oop obj, markOop m);
845 // The stack of evac-failure objects left to be scanned.
846 GrowableArray<oop>* _evac_failure_scan_stack;
847 // The closure to apply to evac-failure objects.
849 OopsInHeapRegionClosure* _evac_failure_closure;
850 // Set the field above.
851 void
852 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
853 _evac_failure_closure = evac_failure_closure;
854 }
856 // Push "obj" on the scan stack.
857 void push_on_evac_failure_scan_stack(oop obj);
858 // Process scan stack entries until the stack is empty.
859 void drain_evac_failure_scan_stack();
860 // True iff an invocation of "drain_scan_stack" is in progress; to
861 // prevent unnecessary recursion.
862 bool _drain_in_progress;
864 // Do any necessary initialization for evacuation-failure handling.
865 // "cl" is the closure that will be used to process evac-failure
866 // objects.
867 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
868 // Do any necessary cleanup for evacuation-failure handling data
869 // structures.
870 void finalize_for_evac_failure();
872 // An attempt to evacuate "obj" has failed; take necessary steps.
873 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
874 void handle_evacuation_failure_common(oop obj, markOop m);
876 // ("Weak") Reference processing support.
877 //
878 // G1 has 2 instances of the referece processor class. One
879 // (_ref_processor_cm) handles reference object discovery
880 // and subsequent processing during concurrent marking cycles.
881 //
882 // The other (_ref_processor_stw) handles reference object
883 // discovery and processing during full GCs and incremental
884 // evacuation pauses.
885 //
886 // During an incremental pause, reference discovery will be
887 // temporarily disabled for _ref_processor_cm and will be
888 // enabled for _ref_processor_stw. At the end of the evacuation
889 // pause references discovered by _ref_processor_stw will be
890 // processed and discovery will be disabled. The previous
891 // setting for reference object discovery for _ref_processor_cm
892 // will be re-instated.
893 //
894 // At the start of marking:
895 // * Discovery by the CM ref processor is verified to be inactive
896 // and it's discovered lists are empty.
897 // * Discovery by the CM ref processor is then enabled.
898 //
899 // At the end of marking:
900 // * Any references on the CM ref processor's discovered
901 // lists are processed (possibly MT).
902 //
903 // At the start of full GC we:
904 // * Disable discovery by the CM ref processor and
905 // empty CM ref processor's discovered lists
906 // (without processing any entries).
907 // * Verify that the STW ref processor is inactive and it's
908 // discovered lists are empty.
909 // * Temporarily set STW ref processor discovery as single threaded.
910 // * Temporarily clear the STW ref processor's _is_alive_non_header
911 // field.
912 // * Finally enable discovery by the STW ref processor.
913 //
914 // The STW ref processor is used to record any discovered
915 // references during the full GC.
916 //
917 // At the end of a full GC we:
918 // * Enqueue any reference objects discovered by the STW ref processor
919 // that have non-live referents. This has the side-effect of
920 // making the STW ref processor inactive by disabling discovery.
921 // * Verify that the CM ref processor is still inactive
922 // and no references have been placed on it's discovered
923 // lists (also checked as a precondition during initial marking).
925 // The (stw) reference processor...
926 ReferenceProcessor* _ref_processor_stw;
928 // During reference object discovery, the _is_alive_non_header
929 // closure (if non-null) is applied to the referent object to
930 // determine whether the referent is live. If so then the
931 // reference object does not need to be 'discovered' and can
932 // be treated as a regular oop. This has the benefit of reducing
933 // the number of 'discovered' reference objects that need to
934 // be processed.
935 //
936 // Instance of the is_alive closure for embedding into the
937 // STW reference processor as the _is_alive_non_header field.
938 // Supplying a value for the _is_alive_non_header field is
939 // optional but doing so prevents unnecessary additions to
940 // the discovered lists during reference discovery.
941 G1STWIsAliveClosure _is_alive_closure_stw;
943 // The (concurrent marking) reference processor...
944 ReferenceProcessor* _ref_processor_cm;
946 // Instance of the concurrent mark is_alive closure for embedding
947 // into the Concurrent Marking reference processor as the
948 // _is_alive_non_header field. Supplying a value for the
949 // _is_alive_non_header field is optional but doing so prevents
950 // unnecessary additions to the discovered lists during reference
951 // discovery.
952 G1CMIsAliveClosure _is_alive_closure_cm;
954 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
955 HeapRegion** _worker_cset_start_region;
957 // Time stamp to validate the regions recorded in the cache
958 // used by G1CollectedHeap::start_cset_region_for_worker().
959 // The heap region entry for a given worker is valid iff
960 // the associated time stamp value matches the current value
961 // of G1CollectedHeap::_gc_time_stamp.
962 unsigned int* _worker_cset_start_region_time_stamp;
964 enum G1H_process_strong_roots_tasks {
965 G1H_PS_filter_satb_buffers,
966 G1H_PS_refProcessor_oops_do,
967 // Leave this one last.
968 G1H_PS_NumElements
969 };
971 SubTasksDone* _process_strong_tasks;
973 volatile bool _free_regions_coming;
975 public:
977 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
979 void set_refine_cte_cl_concurrency(bool concurrent);
981 RefToScanQueue *task_queue(int i) const;
983 // A set of cards where updates happened during the GC
984 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
986 // A DirtyCardQueueSet that is used to hold cards that contain
987 // references into the current collection set. This is used to
988 // update the remembered sets of the regions in the collection
989 // set in the event of an evacuation failure.
990 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
991 { return _into_cset_dirty_card_queue_set; }
993 // Create a G1CollectedHeap with the specified policy.
994 // Must call the initialize method afterwards.
995 // May not return if something goes wrong.
996 G1CollectedHeap(G1CollectorPolicy* policy);
998 // Initialize the G1CollectedHeap to have the initial and
999 // maximum sizes, permanent generation, and remembered and barrier sets
1000 // specified by the policy object.
1001 jint initialize();
1003 // Initialize weak reference processing.
1004 virtual void ref_processing_init();
1006 void set_par_threads(uint t) {
1007 SharedHeap::set_par_threads(t);
1008 // Done in SharedHeap but oddly there are
1009 // two _process_strong_tasks's in a G1CollectedHeap
1010 // so do it here too.
1011 _process_strong_tasks->set_n_threads(t);
1012 }
1014 // Set _n_par_threads according to a policy TBD.
1015 void set_par_threads();
1017 void set_n_termination(int t) {
1018 _process_strong_tasks->set_n_threads(t);
1019 }
1021 virtual CollectedHeap::Name kind() const {
1022 return CollectedHeap::G1CollectedHeap;
1023 }
1025 // The current policy object for the collector.
1026 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1028 // Adaptive size policy. No such thing for g1.
1029 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1031 // The rem set and barrier set.
1032 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1033 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
1035 // The rem set iterator.
1036 HeapRegionRemSetIterator* rem_set_iterator(int i) {
1037 return _rem_set_iterator[i];
1038 }
1040 HeapRegionRemSetIterator* rem_set_iterator() {
1041 return _rem_set_iterator[0];
1042 }
1044 unsigned get_gc_time_stamp() {
1045 return _gc_time_stamp;
1046 }
1048 void reset_gc_time_stamp() {
1049 _gc_time_stamp = 0;
1050 OrderAccess::fence();
1051 // Clear the cached CSet starting regions and time stamps.
1052 // Their validity is dependent on the GC timestamp.
1053 clear_cset_start_regions();
1054 }
1056 void increment_gc_time_stamp() {
1057 ++_gc_time_stamp;
1058 OrderAccess::fence();
1059 }
1061 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1062 DirtyCardQueue* into_cset_dcq,
1063 bool concurrent, int worker_i);
1065 // The shared block offset table array.
1066 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1068 // Reference Processing accessors
1070 // The STW reference processor....
1071 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1073 // The Concurent Marking reference processor...
1074 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1076 virtual size_t capacity() const;
1077 virtual size_t used() const;
1078 // This should be called when we're not holding the heap lock. The
1079 // result might be a bit inaccurate.
1080 size_t used_unlocked() const;
1081 size_t recalculate_used() const;
1083 // These virtual functions do the actual allocation.
1084 // Some heaps may offer a contiguous region for shared non-blocking
1085 // allocation, via inlined code (by exporting the address of the top and
1086 // end fields defining the extent of the contiguous allocation region.)
1087 // But G1CollectedHeap doesn't yet support this.
1089 // Return an estimate of the maximum allocation that could be performed
1090 // without triggering any collection or expansion activity. In a
1091 // generational collector, for example, this is probably the largest
1092 // allocation that could be supported (without expansion) in the youngest
1093 // generation. It is "unsafe" because no locks are taken; the result
1094 // should be treated as an approximation, not a guarantee, for use in
1095 // heuristic resizing decisions.
1096 virtual size_t unsafe_max_alloc();
1098 virtual bool is_maximal_no_gc() const {
1099 return _g1_storage.uncommitted_size() == 0;
1100 }
1102 // The total number of regions in the heap.
1103 size_t n_regions() { return _hrs.length(); }
1105 // The max number of regions in the heap.
1106 size_t max_regions() { return _hrs.max_length(); }
1108 // The number of regions that are completely free.
1109 size_t free_regions() { return _free_list.length(); }
1111 // The number of regions that are not completely free.
1112 size_t used_regions() { return n_regions() - free_regions(); }
1114 // The number of regions available for "regular" expansion.
1115 size_t expansion_regions() { return _expansion_regions; }
1117 // Factory method for HeapRegion instances. It will return NULL if
1118 // the allocation fails.
1119 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom);
1121 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1122 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1123 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1124 void verify_dirty_young_regions() PRODUCT_RETURN;
1126 // verify_region_sets() performs verification over the region
1127 // lists. It will be compiled in the product code to be used when
1128 // necessary (i.e., during heap verification).
1129 void verify_region_sets();
1131 // verify_region_sets_optional() is planted in the code for
1132 // list verification in non-product builds (and it can be enabled in
1133 // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
1134 #if HEAP_REGION_SET_FORCE_VERIFY
1135 void verify_region_sets_optional() {
1136 verify_region_sets();
1137 }
1138 #else // HEAP_REGION_SET_FORCE_VERIFY
1139 void verify_region_sets_optional() { }
1140 #endif // HEAP_REGION_SET_FORCE_VERIFY
1142 #ifdef ASSERT
1143 bool is_on_master_free_list(HeapRegion* hr) {
1144 return hr->containing_set() == &_free_list;
1145 }
1147 bool is_in_humongous_set(HeapRegion* hr) {
1148 return hr->containing_set() == &_humongous_set;
1149 }
1150 #endif // ASSERT
1152 // Wrapper for the region list operations that can be called from
1153 // methods outside this class.
1155 void secondary_free_list_add_as_tail(FreeRegionList* list) {
1156 _secondary_free_list.add_as_tail(list);
1157 }
1159 void append_secondary_free_list() {
1160 _free_list.add_as_head(&_secondary_free_list);
1161 }
1163 void append_secondary_free_list_if_not_empty_with_lock() {
1164 // If the secondary free list looks empty there's no reason to
1165 // take the lock and then try to append it.
1166 if (!_secondary_free_list.is_empty()) {
1167 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1168 append_secondary_free_list();
1169 }
1170 }
1172 void old_set_remove(HeapRegion* hr) {
1173 _old_set.remove(hr);
1174 }
1176 size_t non_young_capacity_bytes() {
1177 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1178 }
1180 void set_free_regions_coming();
1181 void reset_free_regions_coming();
1182 bool free_regions_coming() { return _free_regions_coming; }
1183 void wait_while_free_regions_coming();
1185 // Determine whether the given region is one that we are using as an
1186 // old GC alloc region.
1187 bool is_old_gc_alloc_region(HeapRegion* hr) {
1188 return hr == _retained_old_gc_alloc_region;
1189 }
1191 // Perform a collection of the heap; intended for use in implementing
1192 // "System.gc". This probably implies as full a collection as the
1193 // "CollectedHeap" supports.
1194 virtual void collect(GCCause::Cause cause);
1196 // The same as above but assume that the caller holds the Heap_lock.
1197 void collect_locked(GCCause::Cause cause);
1199 // This interface assumes that it's being called by the
1200 // vm thread. It collects the heap assuming that the
1201 // heap lock is already held and that we are executing in
1202 // the context of the vm thread.
1203 virtual void collect_as_vm_thread(GCCause::Cause cause);
1205 // True iff a evacuation has failed in the most-recent collection.
1206 bool evacuation_failed() { return _evacuation_failed; }
1208 // It will free a region if it has allocated objects in it that are
1209 // all dead. It calls either free_region() or
1210 // free_humongous_region() depending on the type of the region that
1211 // is passed to it.
1212 void free_region_if_empty(HeapRegion* hr,
1213 size_t* pre_used,
1214 FreeRegionList* free_list,
1215 OldRegionSet* old_proxy_set,
1216 HumongousRegionSet* humongous_proxy_set,
1217 HRRSCleanupTask* hrrs_cleanup_task,
1218 bool par);
1220 // It appends the free list to the master free list and updates the
1221 // master humongous list according to the contents of the proxy
1222 // list. It also adjusts the total used bytes according to pre_used
1223 // (if par is true, it will do so by taking the ParGCRareEvent_lock).
1224 void update_sets_after_freeing_regions(size_t pre_used,
1225 FreeRegionList* free_list,
1226 OldRegionSet* old_proxy_set,
1227 HumongousRegionSet* humongous_proxy_set,
1228 bool par);
1230 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1231 virtual bool is_in(const void* p) const;
1233 // Return "TRUE" iff the given object address is within the collection
1234 // set.
1235 inline bool obj_in_cs(oop obj);
1237 // Return "TRUE" iff the given object address is in the reserved
1238 // region of g1 (excluding the permanent generation).
1239 bool is_in_g1_reserved(const void* p) const {
1240 return _g1_reserved.contains(p);
1241 }
1243 // Returns a MemRegion that corresponds to the space that has been
1244 // reserved for the heap
1245 MemRegion g1_reserved() {
1246 return _g1_reserved;
1247 }
1249 // Returns a MemRegion that corresponds to the space that has been
1250 // committed in the heap
1251 MemRegion g1_committed() {
1252 return _g1_committed;
1253 }
1255 virtual bool is_in_closed_subset(const void* p) const;
1257 // This resets the card table to all zeros. It is used after
1258 // a collection pause which used the card table to claim cards.
1259 void cleanUpCardTable();
1261 // Iteration functions.
1263 // Iterate over all the ref-containing fields of all objects, calling
1264 // "cl.do_oop" on each.
1265 virtual void oop_iterate(OopClosure* cl) {
1266 oop_iterate(cl, true);
1267 }
1268 void oop_iterate(OopClosure* cl, bool do_perm);
1270 // Same as above, restricted to a memory region.
1271 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
1272 oop_iterate(mr, cl, true);
1273 }
1274 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
1276 // Iterate over all objects, calling "cl.do_object" on each.
1277 virtual void object_iterate(ObjectClosure* cl) {
1278 object_iterate(cl, true);
1279 }
1280 virtual void safe_object_iterate(ObjectClosure* cl) {
1281 object_iterate(cl, true);
1282 }
1283 void object_iterate(ObjectClosure* cl, bool do_perm);
1285 // Iterate over all objects allocated since the last collection, calling
1286 // "cl.do_object" on each. The heap must have been initialized properly
1287 // to support this function, or else this call will fail.
1288 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
1290 // Iterate over all spaces in use in the heap, in ascending address order.
1291 virtual void space_iterate(SpaceClosure* cl);
1293 // Iterate over heap regions, in address order, terminating the
1294 // iteration early if the "doHeapRegion" method returns "true".
1295 void heap_region_iterate(HeapRegionClosure* blk) const;
1297 // Iterate over heap regions starting with r (or the first region if "r"
1298 // is NULL), in address order, terminating early if the "doHeapRegion"
1299 // method returns "true".
1300 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
1302 // Return the region with the given index. It assumes the index is valid.
1303 HeapRegion* region_at(size_t index) const { return _hrs.at(index); }
1305 // Divide the heap region sequence into "chunks" of some size (the number
1306 // of regions divided by the number of parallel threads times some
1307 // overpartition factor, currently 4). Assumes that this will be called
1308 // in parallel by ParallelGCThreads worker threads with discinct worker
1309 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1310 // calls will use the same "claim_value", and that that claim value is
1311 // different from the claim_value of any heap region before the start of
1312 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1313 // attempting to claim the first region in each chunk, and, if
1314 // successful, applying the closure to each region in the chunk (and
1315 // setting the claim value of the second and subsequent regions of the
1316 // chunk.) For now requires that "doHeapRegion" always returns "false",
1317 // i.e., that a closure never attempt to abort a traversal.
1318 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1319 uint worker,
1320 uint no_of_par_workers,
1321 jint claim_value);
1323 // It resets all the region claim values to the default.
1324 void reset_heap_region_claim_values();
1326 // Resets the claim values of regions in the current
1327 // collection set to the default.
1328 void reset_cset_heap_region_claim_values();
1330 #ifdef ASSERT
1331 bool check_heap_region_claim_values(jint claim_value);
1333 // Same as the routine above but only checks regions in the
1334 // current collection set.
1335 bool check_cset_heap_region_claim_values(jint claim_value);
1336 #endif // ASSERT
1338 // Clear the cached cset start regions and (more importantly)
1339 // the time stamps. Called when we reset the GC time stamp.
1340 void clear_cset_start_regions();
1342 // Given the id of a worker, obtain or calculate a suitable
1343 // starting region for iterating over the current collection set.
1344 HeapRegion* start_cset_region_for_worker(int worker_i);
1346 // Iterate over the regions (if any) in the current collection set.
1347 void collection_set_iterate(HeapRegionClosure* blk);
1349 // As above but starting from region r
1350 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1352 // Returns the first (lowest address) compactible space in the heap.
1353 virtual CompactibleSpace* first_compactible_space();
1355 // A CollectedHeap will contain some number of spaces. This finds the
1356 // space containing a given address, or else returns NULL.
1357 virtual Space* space_containing(const void* addr) const;
1359 // A G1CollectedHeap will contain some number of heap regions. This
1360 // finds the region containing a given address, or else returns NULL.
1361 template <class T>
1362 inline HeapRegion* heap_region_containing(const T addr) const;
1364 // Like the above, but requires "addr" to be in the heap (to avoid a
1365 // null-check), and unlike the above, may return an continuing humongous
1366 // region.
1367 template <class T>
1368 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1370 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1371 // each address in the (reserved) heap is a member of exactly
1372 // one block. The defining characteristic of a block is that it is
1373 // possible to find its size, and thus to progress forward to the next
1374 // block. (Blocks may be of different sizes.) Thus, blocks may
1375 // represent Java objects, or they might be free blocks in a
1376 // free-list-based heap (or subheap), as long as the two kinds are
1377 // distinguishable and the size of each is determinable.
1379 // Returns the address of the start of the "block" that contains the
1380 // address "addr". We say "blocks" instead of "object" since some heaps
1381 // may not pack objects densely; a chunk may either be an object or a
1382 // non-object.
1383 virtual HeapWord* block_start(const void* addr) const;
1385 // Requires "addr" to be the start of a chunk, and returns its size.
1386 // "addr + size" is required to be the start of a new chunk, or the end
1387 // of the active area of the heap.
1388 virtual size_t block_size(const HeapWord* addr) const;
1390 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1391 // the block is an object.
1392 virtual bool block_is_obj(const HeapWord* addr) const;
1394 // Does this heap support heap inspection? (+PrintClassHistogram)
1395 virtual bool supports_heap_inspection() const { return true; }
1397 // Section on thread-local allocation buffers (TLABs)
1398 // See CollectedHeap for semantics.
1400 virtual bool supports_tlab_allocation() const;
1401 virtual size_t tlab_capacity(Thread* thr) const;
1402 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
1404 // Can a compiler initialize a new object without store barriers?
1405 // This permission only extends from the creation of a new object
1406 // via a TLAB up to the first subsequent safepoint. If such permission
1407 // is granted for this heap type, the compiler promises to call
1408 // defer_store_barrier() below on any slow path allocation of
1409 // a new object for which such initializing store barriers will
1410 // have been elided. G1, like CMS, allows this, but should be
1411 // ready to provide a compensating write barrier as necessary
1412 // if that storage came out of a non-young region. The efficiency
1413 // of this implementation depends crucially on being able to
1414 // answer very efficiently in constant time whether a piece of
1415 // storage in the heap comes from a young region or not.
1416 // See ReduceInitialCardMarks.
1417 virtual bool can_elide_tlab_store_barriers() const {
1418 return true;
1419 }
1421 virtual bool card_mark_must_follow_store() const {
1422 return true;
1423 }
1425 bool is_in_young(const oop obj) {
1426 HeapRegion* hr = heap_region_containing(obj);
1427 return hr != NULL && hr->is_young();
1428 }
1430 #ifdef ASSERT
1431 virtual bool is_in_partial_collection(const void* p);
1432 #endif
1434 virtual bool is_scavengable(const void* addr);
1436 // We don't need barriers for initializing stores to objects
1437 // in the young gen: for the SATB pre-barrier, there is no
1438 // pre-value that needs to be remembered; for the remembered-set
1439 // update logging post-barrier, we don't maintain remembered set
1440 // information for young gen objects.
1441 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1442 return is_in_young(new_obj);
1443 }
1445 // Can a compiler elide a store barrier when it writes
1446 // a permanent oop into the heap? Applies when the compiler
1447 // is storing x to the heap, where x->is_perm() is true.
1448 virtual bool can_elide_permanent_oop_store_barriers() const {
1449 // At least until perm gen collection is also G1-ified, at
1450 // which point this should return false.
1451 return true;
1452 }
1454 // Returns "true" iff the given word_size is "very large".
1455 static bool isHumongous(size_t word_size) {
1456 // Note this has to be strictly greater-than as the TLABs
1457 // are capped at the humongous thresold and we want to
1458 // ensure that we don't try to allocate a TLAB as
1459 // humongous and that we don't allocate a humongous
1460 // object in a TLAB.
1461 return word_size > _humongous_object_threshold_in_words;
1462 }
1464 // Update mod union table with the set of dirty cards.
1465 void updateModUnion();
1467 // Set the mod union bits corresponding to the given memRegion. Note
1468 // that this is always a safe operation, since it doesn't clear any
1469 // bits.
1470 void markModUnionRange(MemRegion mr);
1472 // Records the fact that a marking phase is no longer in progress.
1473 void set_marking_complete() {
1474 _mark_in_progress = false;
1475 }
1476 void set_marking_started() {
1477 _mark_in_progress = true;
1478 }
1479 bool mark_in_progress() {
1480 return _mark_in_progress;
1481 }
1483 // Print the maximum heap capacity.
1484 virtual size_t max_capacity() const;
1486 virtual jlong millis_since_last_gc();
1488 // Perform any cleanup actions necessary before allowing a verification.
1489 virtual void prepare_for_verify();
1491 // Perform verification.
1493 // vo == UsePrevMarking -> use "prev" marking information,
1494 // vo == UseNextMarking -> use "next" marking information
1495 // vo == UseMarkWord -> use the mark word in the object header
1496 //
1497 // NOTE: Only the "prev" marking information is guaranteed to be
1498 // consistent most of the time, so most calls to this should use
1499 // vo == UsePrevMarking.
1500 // Currently, there is only one case where this is called with
1501 // vo == UseNextMarking, which is to verify the "next" marking
1502 // information at the end of remark.
1503 // Currently there is only one place where this is called with
1504 // vo == UseMarkWord, which is to verify the marking during a
1505 // full GC.
1506 void verify(bool allow_dirty, bool silent, VerifyOption vo);
1508 // Override; it uses the "prev" marking information
1509 virtual void verify(bool allow_dirty, bool silent);
1510 virtual void print_on(outputStream* st) const;
1511 virtual void print_extended_on(outputStream* st) const;
1513 virtual void print_gc_threads_on(outputStream* st) const;
1514 virtual void gc_threads_do(ThreadClosure* tc) const;
1516 // Override
1517 void print_tracing_info() const;
1519 // The following two methods are helpful for debugging RSet issues.
1520 void print_cset_rsets() PRODUCT_RETURN;
1521 void print_all_rsets() PRODUCT_RETURN;
1523 // Convenience function to be used in situations where the heap type can be
1524 // asserted to be this type.
1525 static G1CollectedHeap* heap();
1527 void set_region_short_lived_locked(HeapRegion* hr);
1528 // add appropriate methods for any other surv rate groups
1530 YoungList* young_list() { return _young_list; }
1532 // debugging
1533 bool check_young_list_well_formed() {
1534 return _young_list->check_list_well_formed();
1535 }
1537 bool check_young_list_empty(bool check_heap,
1538 bool check_sample = true);
1540 // *** Stuff related to concurrent marking. It's not clear to me that so
1541 // many of these need to be public.
1543 // The functions below are helper functions that a subclass of
1544 // "CollectedHeap" can use in the implementation of its virtual
1545 // functions.
1546 // This performs a concurrent marking of the live objects in a
1547 // bitmap off to the side.
1548 void doConcurrentMark();
1550 bool isMarkedPrev(oop obj) const;
1551 bool isMarkedNext(oop obj) const;
1553 // vo == UsePrevMarking -> use "prev" marking information,
1554 // vo == UseNextMarking -> use "next" marking information,
1555 // vo == UseMarkWord -> use mark word from object header
1556 bool is_obj_dead_cond(const oop obj,
1557 const HeapRegion* hr,
1558 const VerifyOption vo) const {
1560 switch (vo) {
1561 case VerifyOption_G1UsePrevMarking:
1562 return is_obj_dead(obj, hr);
1563 case VerifyOption_G1UseNextMarking:
1564 return is_obj_ill(obj, hr);
1565 default:
1566 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1567 return !obj->is_gc_marked();
1568 }
1569 }
1571 // Determine if an object is dead, given the object and also
1572 // the region to which the object belongs. An object is dead
1573 // iff a) it was not allocated since the last mark and b) it
1574 // is not marked.
1576 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1577 return
1578 !hr->obj_allocated_since_prev_marking(obj) &&
1579 !isMarkedPrev(obj);
1580 }
1582 // This is used when copying an object to survivor space.
1583 // If the object is marked live, then we mark the copy live.
1584 // If the object is allocated since the start of this mark
1585 // cycle, then we mark the copy live.
1586 // If the object has been around since the previous mark
1587 // phase, and hasn't been marked yet during this phase,
1588 // then we don't mark it, we just wait for the
1589 // current marking cycle to get to it.
1591 // This function returns true when an object has been
1592 // around since the previous marking and hasn't yet
1593 // been marked during this marking.
1595 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1596 return
1597 !hr->obj_allocated_since_next_marking(obj) &&
1598 !isMarkedNext(obj);
1599 }
1601 // Determine if an object is dead, given only the object itself.
1602 // This will find the region to which the object belongs and
1603 // then call the region version of the same function.
1605 // Added if it is in permanent gen it isn't dead.
1606 // Added if it is NULL it isn't dead.
1608 // vo == UsePrevMarking -> use "prev" marking information,
1609 // vo == UseNextMarking -> use "next" marking information,
1610 // vo == UseMarkWord -> use mark word from object header
1611 bool is_obj_dead_cond(const oop obj,
1612 const VerifyOption vo) const {
1614 switch (vo) {
1615 case VerifyOption_G1UsePrevMarking:
1616 return is_obj_dead(obj);
1617 case VerifyOption_G1UseNextMarking:
1618 return is_obj_ill(obj);
1619 default:
1620 assert(vo == VerifyOption_G1UseMarkWord, "must be");
1621 return !obj->is_gc_marked();
1622 }
1623 }
1625 bool is_obj_dead(const oop obj) const {
1626 const HeapRegion* hr = heap_region_containing(obj);
1627 if (hr == NULL) {
1628 if (Universe::heap()->is_in_permanent(obj))
1629 return false;
1630 else if (obj == NULL) return false;
1631 else return true;
1632 }
1633 else return is_obj_dead(obj, hr);
1634 }
1636 bool is_obj_ill(const oop obj) const {
1637 const HeapRegion* hr = heap_region_containing(obj);
1638 if (hr == NULL) {
1639 if (Universe::heap()->is_in_permanent(obj))
1640 return false;
1641 else if (obj == NULL) return false;
1642 else return true;
1643 }
1644 else return is_obj_ill(obj, hr);
1645 }
1647 // The following is just to alert the verification code
1648 // that a full collection has occurred and that the
1649 // remembered sets are no longer up to date.
1650 bool _full_collection;
1651 void set_full_collection() { _full_collection = true;}
1652 void clear_full_collection() {_full_collection = false;}
1653 bool full_collection() {return _full_collection;}
1655 ConcurrentMark* concurrent_mark() const { return _cm; }
1656 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1658 // The dirty cards region list is used to record a subset of regions
1659 // whose cards need clearing. The list if populated during the
1660 // remembered set scanning and drained during the card table
1661 // cleanup. Although the methods are reentrant, population/draining
1662 // phases must not overlap. For synchronization purposes the last
1663 // element on the list points to itself.
1664 HeapRegion* _dirty_cards_region_list;
1665 void push_dirty_cards_region(HeapRegion* hr);
1666 HeapRegion* pop_dirty_cards_region();
1668 public:
1669 void stop_conc_gc_threads();
1671 size_t pending_card_num();
1672 size_t max_pending_card_num();
1673 size_t cards_scanned();
1675 protected:
1676 size_t _max_heap_capacity;
1677 };
1679 #define use_local_bitmaps 1
1680 #define verify_local_bitmaps 0
1681 #define oop_buffer_length 256
1683 #ifndef PRODUCT
1684 class GCLabBitMap;
1685 class GCLabBitMapClosure: public BitMapClosure {
1686 private:
1687 ConcurrentMark* _cm;
1688 GCLabBitMap* _bitmap;
1690 public:
1691 GCLabBitMapClosure(ConcurrentMark* cm,
1692 GCLabBitMap* bitmap) {
1693 _cm = cm;
1694 _bitmap = bitmap;
1695 }
1697 virtual bool do_bit(size_t offset);
1698 };
1699 #endif // !PRODUCT
1701 class GCLabBitMap: public BitMap {
1702 private:
1703 ConcurrentMark* _cm;
1705 int _shifter;
1706 size_t _bitmap_word_covers_words;
1708 // beginning of the heap
1709 HeapWord* _heap_start;
1711 // this is the actual start of the GCLab
1712 HeapWord* _real_start_word;
1714 // this is the actual end of the GCLab
1715 HeapWord* _real_end_word;
1717 // this is the first word, possibly located before the actual start
1718 // of the GCLab, that corresponds to the first bit of the bitmap
1719 HeapWord* _start_word;
1721 // size of a GCLab in words
1722 size_t _gclab_word_size;
1724 static int shifter() {
1725 return MinObjAlignment - 1;
1726 }
1728 // how many heap words does a single bitmap word corresponds to?
1729 static size_t bitmap_word_covers_words() {
1730 return BitsPerWord << shifter();
1731 }
1733 size_t gclab_word_size() const {
1734 return _gclab_word_size;
1735 }
1737 // Calculates actual GCLab size in words
1738 size_t gclab_real_word_size() const {
1739 return bitmap_size_in_bits(pointer_delta(_real_end_word, _start_word))
1740 / BitsPerWord;
1741 }
1743 static size_t bitmap_size_in_bits(size_t gclab_word_size) {
1744 size_t bits_in_bitmap = gclab_word_size >> shifter();
1745 // We are going to ensure that the beginning of a word in this
1746 // bitmap also corresponds to the beginning of a word in the
1747 // global marking bitmap. To handle the case where a GCLab
1748 // starts from the middle of the bitmap, we need to add enough
1749 // space (i.e. up to a bitmap word) to ensure that we have
1750 // enough bits in the bitmap.
1751 return bits_in_bitmap + BitsPerWord - 1;
1752 }
1753 public:
1754 GCLabBitMap(HeapWord* heap_start, size_t gclab_word_size)
1755 : BitMap(bitmap_size_in_bits(gclab_word_size)),
1756 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1757 _shifter(shifter()),
1758 _bitmap_word_covers_words(bitmap_word_covers_words()),
1759 _heap_start(heap_start),
1760 _gclab_word_size(gclab_word_size),
1761 _real_start_word(NULL),
1762 _real_end_word(NULL),
1763 _start_word(NULL) {
1764 guarantee(false, "GCLabBitMap::GCLabBitmap(): don't call this any more");
1765 }
1767 inline unsigned heapWordToOffset(HeapWord* addr) {
1768 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1769 assert(offset < size(), "offset should be within bounds");
1770 return offset;
1771 }
1773 inline HeapWord* offsetToHeapWord(size_t offset) {
1774 HeapWord* addr = _start_word + (offset << _shifter);
1775 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1776 return addr;
1777 }
1779 bool fields_well_formed() {
1780 bool ret1 = (_real_start_word == NULL) &&
1781 (_real_end_word == NULL) &&
1782 (_start_word == NULL);
1783 if (ret1)
1784 return true;
1786 bool ret2 = _real_start_word >= _start_word &&
1787 _start_word < _real_end_word &&
1788 (_real_start_word + _gclab_word_size) == _real_end_word &&
1789 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1790 > _real_end_word;
1791 return ret2;
1792 }
1794 inline bool mark(HeapWord* addr) {
1795 guarantee(use_local_bitmaps, "invariant");
1796 assert(fields_well_formed(), "invariant");
1798 if (addr >= _real_start_word && addr < _real_end_word) {
1799 assert(!isMarked(addr), "should not have already been marked");
1801 // first mark it on the bitmap
1802 at_put(heapWordToOffset(addr), true);
1804 return true;
1805 } else {
1806 return false;
1807 }
1808 }
1810 inline bool isMarked(HeapWord* addr) {
1811 guarantee(use_local_bitmaps, "invariant");
1812 assert(fields_well_formed(), "invariant");
1814 return at(heapWordToOffset(addr));
1815 }
1817 void set_buffer(HeapWord* start) {
1818 guarantee(false, "set_buffer(): don't call this any more");
1820 guarantee(use_local_bitmaps, "invariant");
1821 clear();
1823 assert(start != NULL, "invariant");
1824 _real_start_word = start;
1825 _real_end_word = start + _gclab_word_size;
1827 size_t diff =
1828 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1829 _start_word = start - diff;
1831 assert(fields_well_formed(), "invariant");
1832 }
1834 #ifndef PRODUCT
1835 void verify() {
1836 // verify that the marks have been propagated
1837 GCLabBitMapClosure cl(_cm, this);
1838 iterate(&cl);
1839 }
1840 #endif // PRODUCT
1842 void retire() {
1843 guarantee(false, "retire(): don't call this any more");
1845 guarantee(use_local_bitmaps, "invariant");
1846 assert(fields_well_formed(), "invariant");
1848 if (_start_word != NULL) {
1849 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1851 // this means that the bitmap was set up for the GCLab
1852 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1854 mark_bitmap->mostly_disjoint_range_union(this,
1855 0, // always start from the start of the bitmap
1856 _start_word,
1857 gclab_real_word_size());
1858 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1860 #ifndef PRODUCT
1861 if (use_local_bitmaps && verify_local_bitmaps)
1862 verify();
1863 #endif // PRODUCT
1864 } else {
1865 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1866 }
1867 }
1869 size_t bitmap_size_in_words() const {
1870 return (bitmap_size_in_bits(gclab_word_size()) + BitsPerWord - 1) / BitsPerWord;
1871 }
1873 };
1875 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1876 private:
1877 bool _retired;
1879 public:
1880 G1ParGCAllocBuffer(size_t gclab_word_size);
1882 void set_buf(HeapWord* buf) {
1883 ParGCAllocBuffer::set_buf(buf);
1884 _retired = false;
1885 }
1887 void retire(bool end_of_gc, bool retain) {
1888 if (_retired)
1889 return;
1890 ParGCAllocBuffer::retire(end_of_gc, retain);
1891 _retired = true;
1892 }
1893 };
1895 class G1ParScanThreadState : public StackObj {
1896 protected:
1897 G1CollectedHeap* _g1h;
1898 RefToScanQueue* _refs;
1899 DirtyCardQueue _dcq;
1900 CardTableModRefBS* _ct_bs;
1901 G1RemSet* _g1_rem;
1903 G1ParGCAllocBuffer _surviving_alloc_buffer;
1904 G1ParGCAllocBuffer _tenured_alloc_buffer;
1905 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1906 ageTable _age_table;
1908 size_t _alloc_buffer_waste;
1909 size_t _undo_waste;
1911 OopsInHeapRegionClosure* _evac_failure_cl;
1912 G1ParScanHeapEvacClosure* _evac_cl;
1913 G1ParScanPartialArrayClosure* _partial_scan_cl;
1915 int _hash_seed;
1916 uint _queue_num;
1918 size_t _term_attempts;
1920 double _start;
1921 double _start_strong_roots;
1922 double _strong_roots_time;
1923 double _start_term;
1924 double _term_time;
1926 // Map from young-age-index (0 == not young, 1 is youngest) to
1927 // surviving words. base is what we get back from the malloc call
1928 size_t* _surviving_young_words_base;
1929 // this points into the array, as we use the first few entries for padding
1930 size_t* _surviving_young_words;
1932 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1934 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1936 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1938 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1939 CardTableModRefBS* ctbs() { return _ct_bs; }
1941 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1942 if (!from->is_survivor()) {
1943 _g1_rem->par_write_ref(from, p, tid);
1944 }
1945 }
1947 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1948 // If the new value of the field points to the same region or
1949 // is the to-space, we don't need to include it in the Rset updates.
1950 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1951 size_t card_index = ctbs()->index_for(p);
1952 // If the card hasn't been added to the buffer, do it.
1953 if (ctbs()->mark_card_deferred(card_index)) {
1954 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1955 }
1956 }
1957 }
1959 public:
1960 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num);
1962 ~G1ParScanThreadState() {
1963 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1964 }
1966 RefToScanQueue* refs() { return _refs; }
1967 ageTable* age_table() { return &_age_table; }
1969 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1970 return _alloc_buffers[purpose];
1971 }
1973 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1974 size_t undo_waste() const { return _undo_waste; }
1976 #ifdef ASSERT
1977 bool verify_ref(narrowOop* ref) const;
1978 bool verify_ref(oop* ref) const;
1979 bool verify_task(StarTask ref) const;
1980 #endif // ASSERT
1982 template <class T> void push_on_queue(T* ref) {
1983 assert(verify_ref(ref), "sanity");
1984 refs()->push(ref);
1985 }
1987 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1988 if (G1DeferredRSUpdate) {
1989 deferred_rs_update(from, p, tid);
1990 } else {
1991 immediate_rs_update(from, p, tid);
1992 }
1993 }
1995 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1997 HeapWord* obj = NULL;
1998 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1999 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
2000 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
2001 assert(gclab_word_size == alloc_buf->word_sz(),
2002 "dynamic resizing is not supported");
2003 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
2004 alloc_buf->retire(false, false);
2006 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
2007 if (buf == NULL) return NULL; // Let caller handle allocation failure.
2008 // Otherwise.
2009 alloc_buf->set_buf(buf);
2011 obj = alloc_buf->allocate(word_sz);
2012 assert(obj != NULL, "buffer was definitely big enough...");
2013 } else {
2014 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
2015 }
2016 return obj;
2017 }
2019 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
2020 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
2021 if (obj != NULL) return obj;
2022 return allocate_slow(purpose, word_sz);
2023 }
2025 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
2026 if (alloc_buffer(purpose)->contains(obj)) {
2027 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
2028 "should contain whole object");
2029 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
2030 } else {
2031 CollectedHeap::fill_with_object(obj, word_sz);
2032 add_to_undo_waste(word_sz);
2033 }
2034 }
2036 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
2037 _evac_failure_cl = evac_failure_cl;
2038 }
2039 OopsInHeapRegionClosure* evac_failure_closure() {
2040 return _evac_failure_cl;
2041 }
2043 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
2044 _evac_cl = evac_cl;
2045 }
2047 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
2048 _partial_scan_cl = partial_scan_cl;
2049 }
2051 int* hash_seed() { return &_hash_seed; }
2052 uint queue_num() { return _queue_num; }
2054 size_t term_attempts() const { return _term_attempts; }
2055 void note_term_attempt() { _term_attempts++; }
2057 void start_strong_roots() {
2058 _start_strong_roots = os::elapsedTime();
2059 }
2060 void end_strong_roots() {
2061 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
2062 }
2063 double strong_roots_time() const { return _strong_roots_time; }
2065 void start_term_time() {
2066 note_term_attempt();
2067 _start_term = os::elapsedTime();
2068 }
2069 void end_term_time() {
2070 _term_time += (os::elapsedTime() - _start_term);
2071 }
2072 double term_time() const { return _term_time; }
2074 double elapsed_time() const {
2075 return os::elapsedTime() - _start;
2076 }
2078 static void
2079 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
2080 void
2081 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
2083 size_t* surviving_young_words() {
2084 // We add on to hide entry 0 which accumulates surviving words for
2085 // age -1 regions (i.e. non-young ones)
2086 return _surviving_young_words;
2087 }
2089 void retire_alloc_buffers() {
2090 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
2091 size_t waste = _alloc_buffers[ap]->words_remaining();
2092 add_to_alloc_buffer_waste(waste);
2093 _alloc_buffers[ap]->retire(true, false);
2094 }
2095 }
2097 template <class T> void deal_with_reference(T* ref_to_scan) {
2098 if (has_partial_array_mask(ref_to_scan)) {
2099 _partial_scan_cl->do_oop_nv(ref_to_scan);
2100 } else {
2101 // Note: we can use "raw" versions of "region_containing" because
2102 // "obj_to_scan" is definitely in the heap, and is not in a
2103 // humongous region.
2104 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
2105 _evac_cl->set_region(r);
2106 _evac_cl->do_oop_nv(ref_to_scan);
2107 }
2108 }
2110 void deal_with_reference(StarTask ref) {
2111 assert(verify_task(ref), "sanity");
2112 if (ref.is_narrow()) {
2113 deal_with_reference((narrowOop*)ref);
2114 } else {
2115 deal_with_reference((oop*)ref);
2116 }
2117 }
2119 public:
2120 void trim_queue();
2121 };
2123 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP