Fri, 04 Apr 2014 10:43:56 +0200
8038498: Fix includes and C inlining after 8035330
Summary: Change 8035330: Remove G1ParScanPartialArrayClosure and G1ParScanHeapEvacClosure broke the debug build on AIX. The method do_oop_partial_array() is added in a header, but requires the inline function par_write_ref() through several inlined calls. In some cpp files, like arguments.cpp, par_write_ref() is not defined as the corresponding inline header and is not included. The AIX debug VM does not start because of the missing symbol. This change solves this by cleaning up include dependencies.
Reviewed-by: tschatzl, stefank
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/evacuationInfo.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.hpp"
31 #include "gc_implementation/g1/g1HRPrinter.hpp"
32 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
35 #include "gc_implementation/g1/g1YCTypes.hpp"
36 #include "gc_implementation/g1/heapRegionSeq.hpp"
37 #include "gc_implementation/g1/heapRegionSet.hpp"
38 #include "gc_implementation/shared/hSpaceCounters.hpp"
39 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
40 #include "memory/barrierSet.hpp"
41 #include "memory/memRegion.hpp"
42 #include "memory/sharedHeap.hpp"
43 #include "utilities/stack.hpp"
45 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
46 // It uses the "Garbage First" heap organization and algorithm, which
47 // may combine concurrent marking with parallel, incremental compaction of
48 // heap subsets that will yield large amounts of garbage.
50 // Forward declarations
51 class HeapRegion;
52 class HRRSCleanupTask;
53 class GenerationSpec;
54 class OopsInHeapRegionClosure;
55 class G1KlassScanClosure;
56 class G1ScanHeapEvacClosure;
57 class ObjectClosure;
58 class SpaceClosure;
59 class CompactibleSpaceClosure;
60 class Space;
61 class G1CollectorPolicy;
62 class GenRemSet;
63 class G1RemSet;
64 class HeapRegionRemSetIterator;
65 class ConcurrentMark;
66 class ConcurrentMarkThread;
67 class ConcurrentG1Refine;
68 class ConcurrentGCTimer;
69 class GenerationCounters;
70 class STWGCTimer;
71 class G1NewTracer;
72 class G1OldTracer;
73 class EvacuationFailedInfo;
74 class nmethod;
75 class Ticks;
77 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
78 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
80 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
81 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
83 enum GCAllocPurpose {
84 GCAllocForTenured,
85 GCAllocForSurvived,
86 GCAllocPurposeCount
87 };
89 class YoungList : public CHeapObj<mtGC> {
90 private:
91 G1CollectedHeap* _g1h;
93 HeapRegion* _head;
95 HeapRegion* _survivor_head;
96 HeapRegion* _survivor_tail;
98 HeapRegion* _curr;
100 uint _length;
101 uint _survivor_length;
103 size_t _last_sampled_rs_lengths;
104 size_t _sampled_rs_lengths;
106 void empty_list(HeapRegion* list);
108 public:
109 YoungList(G1CollectedHeap* g1h);
111 void push_region(HeapRegion* hr);
112 void add_survivor_region(HeapRegion* hr);
114 void empty_list();
115 bool is_empty() { return _length == 0; }
116 uint length() { return _length; }
117 uint survivor_length() { return _survivor_length; }
119 // Currently we do not keep track of the used byte sum for the
120 // young list and the survivors and it'd be quite a lot of work to
121 // do so. When we'll eventually replace the young list with
122 // instances of HeapRegionLinkedList we'll get that for free. So,
123 // we'll report the more accurate information then.
124 size_t eden_used_bytes() {
125 assert(length() >= survivor_length(), "invariant");
126 return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes;
127 }
128 size_t survivor_used_bytes() {
129 return (size_t) survivor_length() * HeapRegion::GrainBytes;
130 }
132 void rs_length_sampling_init();
133 bool rs_length_sampling_more();
134 void rs_length_sampling_next();
136 void reset_sampled_info() {
137 _last_sampled_rs_lengths = 0;
138 }
139 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
141 // for development purposes
142 void reset_auxilary_lists();
143 void clear() { _head = NULL; _length = 0; }
145 void clear_survivors() {
146 _survivor_head = NULL;
147 _survivor_tail = NULL;
148 _survivor_length = 0;
149 }
151 HeapRegion* first_region() { return _head; }
152 HeapRegion* first_survivor_region() { return _survivor_head; }
153 HeapRegion* last_survivor_region() { return _survivor_tail; }
155 // debugging
156 bool check_list_well_formed();
157 bool check_list_empty(bool check_sample = true);
158 void print();
159 };
161 class MutatorAllocRegion : public G1AllocRegion {
162 protected:
163 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
164 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
165 public:
166 MutatorAllocRegion()
167 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
168 };
170 class SurvivorGCAllocRegion : public G1AllocRegion {
171 protected:
172 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
173 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
174 public:
175 SurvivorGCAllocRegion()
176 : G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
177 };
179 class OldGCAllocRegion : public G1AllocRegion {
180 protected:
181 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
182 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
183 public:
184 OldGCAllocRegion()
185 : G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
186 };
188 // The G1 STW is alive closure.
189 // An instance is embedded into the G1CH and used as the
190 // (optional) _is_alive_non_header closure in the STW
191 // reference processor. It is also extensively used during
192 // reference processing during STW evacuation pauses.
193 class G1STWIsAliveClosure: public BoolObjectClosure {
194 G1CollectedHeap* _g1;
195 public:
196 G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
197 bool do_object_b(oop p);
198 };
200 class RefineCardTableEntryClosure;
202 class G1CollectedHeap : public SharedHeap {
203 friend class VM_G1CollectForAllocation;
204 friend class VM_G1CollectFull;
205 friend class VM_G1IncCollectionPause;
206 friend class VMStructs;
207 friend class MutatorAllocRegion;
208 friend class SurvivorGCAllocRegion;
209 friend class OldGCAllocRegion;
211 // Closures used in implementation.
212 template <G1Barrier barrier, bool do_mark_object>
213 friend class G1ParCopyClosure;
214 friend class G1IsAliveClosure;
215 friend class G1EvacuateFollowersClosure;
216 friend class G1ParScanThreadState;
217 friend class G1ParScanClosureSuper;
218 friend class G1ParEvacuateFollowersClosure;
219 friend class G1ParTask;
220 friend class G1FreeGarbageRegionClosure;
221 friend class RefineCardTableEntryClosure;
222 friend class G1PrepareCompactClosure;
223 friend class RegionSorter;
224 friend class RegionResetter;
225 friend class CountRCClosure;
226 friend class EvacPopObjClosure;
227 friend class G1ParCleanupCTTask;
229 // Other related classes.
230 friend class G1MarkSweep;
232 private:
233 // The one and only G1CollectedHeap, so static functions can find it.
234 static G1CollectedHeap* _g1h;
236 static size_t _humongous_object_threshold_in_words;
238 // Storage for the G1 heap.
239 VirtualSpace _g1_storage;
240 MemRegion _g1_reserved;
242 // The part of _g1_storage that is currently committed.
243 MemRegion _g1_committed;
245 // The master free list. It will satisfy all new region allocations.
246 FreeRegionList _free_list;
248 // The secondary free list which contains regions that have been
249 // freed up during the cleanup process. This will be appended to the
250 // master free list when appropriate.
251 FreeRegionList _secondary_free_list;
253 // It keeps track of the old regions.
254 HeapRegionSet _old_set;
256 // It keeps track of the humongous regions.
257 HeapRegionSet _humongous_set;
259 // The number of regions we could create by expansion.
260 uint _expansion_regions;
262 // The block offset table for the G1 heap.
263 G1BlockOffsetSharedArray* _bot_shared;
265 // Tears down the region sets / lists so that they are empty and the
266 // regions on the heap do not belong to a region set / list. The
267 // only exception is the humongous set which we leave unaltered. If
268 // free_list_only is true, it will only tear down the master free
269 // list. It is called before a Full GC (free_list_only == false) or
270 // before heap shrinking (free_list_only == true).
271 void tear_down_region_sets(bool free_list_only);
273 // Rebuilds the region sets / lists so that they are repopulated to
274 // reflect the contents of the heap. The only exception is the
275 // humongous set which was not torn down in the first place. If
276 // free_list_only is true, it will only rebuild the master free
277 // list. It is called after a Full GC (free_list_only == false) or
278 // after heap shrinking (free_list_only == true).
279 void rebuild_region_sets(bool free_list_only);
281 // The sequence of all heap regions in the heap.
282 HeapRegionSeq _hrs;
284 // Alloc region used to satisfy mutator allocation requests.
285 MutatorAllocRegion _mutator_alloc_region;
287 // Alloc region used to satisfy allocation requests by the GC for
288 // survivor objects.
289 SurvivorGCAllocRegion _survivor_gc_alloc_region;
291 // PLAB sizing policy for survivors.
292 PLABStats _survivor_plab_stats;
294 // Alloc region used to satisfy allocation requests by the GC for
295 // old objects.
296 OldGCAllocRegion _old_gc_alloc_region;
298 // PLAB sizing policy for tenured objects.
299 PLABStats _old_plab_stats;
301 PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
302 PLABStats* stats = NULL;
304 switch (purpose) {
305 case GCAllocForSurvived:
306 stats = &_survivor_plab_stats;
307 break;
308 case GCAllocForTenured:
309 stats = &_old_plab_stats;
310 break;
311 default:
312 assert(false, "unrecognized GCAllocPurpose");
313 }
315 return stats;
316 }
318 // The last old region we allocated to during the last GC.
319 // Typically, it is not full so we should re-use it during the next GC.
320 HeapRegion* _retained_old_gc_alloc_region;
322 // It specifies whether we should attempt to expand the heap after a
323 // region allocation failure. If heap expansion fails we set this to
324 // false so that we don't re-attempt the heap expansion (it's likely
325 // that subsequent expansion attempts will also fail if one fails).
326 // Currently, it is only consulted during GC and it's reset at the
327 // start of each GC.
328 bool _expand_heap_after_alloc_failure;
330 // It resets the mutator alloc region before new allocations can take place.
331 void init_mutator_alloc_region();
333 // It releases the mutator alloc region.
334 void release_mutator_alloc_region();
336 // It initializes the GC alloc regions at the start of a GC.
337 void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
339 // It releases the GC alloc regions at the end of a GC.
340 void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
342 // It does any cleanup that needs to be done on the GC alloc regions
343 // before a Full GC.
344 void abandon_gc_alloc_regions();
346 // Helper for monitoring and management support.
347 G1MonitoringSupport* _g1mm;
349 // Determines PLAB size for a particular allocation purpose.
350 size_t desired_plab_sz(GCAllocPurpose purpose);
352 // Outside of GC pauses, the number of bytes used in all regions other
353 // than the current allocation region.
354 size_t _summary_bytes_used;
356 // This is used for a quick test on whether a reference points into
357 // the collection set or not. Basically, we have an array, with one
358 // byte per region, and that byte denotes whether the corresponding
359 // region is in the collection set or not. The entry corresponding
360 // the bottom of the heap, i.e., region 0, is pointed to by
361 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
362 // biased so that it actually points to address 0 of the address
363 // space, to make the test as fast as possible (we can simply shift
364 // the address to address into it, instead of having to subtract the
365 // bottom of the heap from the address before shifting it; basically
366 // it works in the same way the card table works).
367 bool* _in_cset_fast_test;
369 // The allocated array used for the fast test on whether a reference
370 // points into the collection set or not. This field is also used to
371 // free the array.
372 bool* _in_cset_fast_test_base;
374 // The length of the _in_cset_fast_test_base array.
375 uint _in_cset_fast_test_length;
377 volatile unsigned _gc_time_stamp;
379 size_t* _surviving_young_words;
381 G1HRPrinter _hr_printer;
383 void setup_surviving_young_words();
384 void update_surviving_young_words(size_t* surv_young_words);
385 void cleanup_surviving_young_words();
387 // It decides whether an explicit GC should start a concurrent cycle
388 // instead of doing a STW GC. Currently, a concurrent cycle is
389 // explicitly started if:
390 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or
391 // (b) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent.
392 // (c) cause == _g1_humongous_allocation
393 bool should_do_concurrent_full_gc(GCCause::Cause cause);
395 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
396 // concurrent cycles) we have started.
397 volatile unsigned int _old_marking_cycles_started;
399 // Keeps track of how many "old marking cycles" (i.e., Full GCs or
400 // concurrent cycles) we have completed.
401 volatile unsigned int _old_marking_cycles_completed;
403 bool _concurrent_cycle_started;
405 // This is a non-product method that is helpful for testing. It is
406 // called at the end of a GC and artificially expands the heap by
407 // allocating a number of dead regions. This way we can induce very
408 // frequent marking cycles and stress the cleanup / concurrent
409 // cleanup code more (as all the regions that will be allocated by
410 // this method will be found dead by the marking cycle).
411 void allocate_dummy_regions() PRODUCT_RETURN;
413 // Clear RSets after a compaction. It also resets the GC time stamps.
414 void clear_rsets_post_compaction();
416 // If the HR printer is active, dump the state of the regions in the
417 // heap after a compaction.
418 void print_hrs_post_compaction();
420 double verify(bool guard, const char* msg);
421 void verify_before_gc();
422 void verify_after_gc();
424 void log_gc_header();
425 void log_gc_footer(double pause_time_sec);
427 // These are macros so that, if the assert fires, we get the correct
428 // line number, file, etc.
430 #define heap_locking_asserts_err_msg(_extra_message_) \
431 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
432 (_extra_message_), \
433 BOOL_TO_STR(Heap_lock->owned_by_self()), \
434 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
435 BOOL_TO_STR(Thread::current()->is_VM_thread()))
437 #define assert_heap_locked() \
438 do { \
439 assert(Heap_lock->owned_by_self(), \
440 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
441 } while (0)
443 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
444 do { \
445 assert(Heap_lock->owned_by_self() || \
446 (SafepointSynchronize::is_at_safepoint() && \
447 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
448 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
449 "should be at a safepoint")); \
450 } while (0)
452 #define assert_heap_locked_and_not_at_safepoint() \
453 do { \
454 assert(Heap_lock->owned_by_self() && \
455 !SafepointSynchronize::is_at_safepoint(), \
456 heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
457 "should not be at a safepoint")); \
458 } while (0)
460 #define assert_heap_not_locked() \
461 do { \
462 assert(!Heap_lock->owned_by_self(), \
463 heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
464 } while (0)
466 #define assert_heap_not_locked_and_not_at_safepoint() \
467 do { \
468 assert(!Heap_lock->owned_by_self() && \
469 !SafepointSynchronize::is_at_safepoint(), \
470 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
471 "should not be at a safepoint")); \
472 } while (0)
474 #define assert_at_safepoint(_should_be_vm_thread_) \
475 do { \
476 assert(SafepointSynchronize::is_at_safepoint() && \
477 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
478 heap_locking_asserts_err_msg("should be at a safepoint")); \
479 } while (0)
481 #define assert_not_at_safepoint() \
482 do { \
483 assert(!SafepointSynchronize::is_at_safepoint(), \
484 heap_locking_asserts_err_msg("should not be at a safepoint")); \
485 } while (0)
487 protected:
489 // The young region list.
490 YoungList* _young_list;
492 // The current policy object for the collector.
493 G1CollectorPolicy* _g1_policy;
495 // This is the second level of trying to allocate a new region. If
496 // new_region() didn't find a region on the free_list, this call will
497 // check whether there's anything available on the
498 // secondary_free_list and/or wait for more regions to appear on
499 // that list, if _free_regions_coming is set.
500 HeapRegion* new_region_try_secondary_free_list(bool is_old);
502 // Try to allocate a single non-humongous HeapRegion sufficient for
503 // an allocation of the given word_size. If do_expand is true,
504 // attempt to expand the heap if necessary to satisfy the allocation
505 // request. If the region is to be used as an old region or for a
506 // humongous object, set is_old to true. If not, to false.
507 HeapRegion* new_region(size_t word_size, bool is_old, bool do_expand);
509 // Attempt to satisfy a humongous allocation request of the given
510 // size by finding a contiguous set of free regions of num_regions
511 // length and remove them from the master free list. Return the
512 // index of the first region or G1_NULL_HRS_INDEX if the search
513 // was unsuccessful.
514 uint humongous_obj_allocate_find_first(uint num_regions,
515 size_t word_size);
517 // Initialize a contiguous set of free regions of length num_regions
518 // and starting at index first so that they appear as a single
519 // humongous region.
520 HeapWord* humongous_obj_allocate_initialize_regions(uint first,
521 uint num_regions,
522 size_t word_size);
524 // Attempt to allocate a humongous object of the given size. Return
525 // NULL if unsuccessful.
526 HeapWord* humongous_obj_allocate(size_t word_size);
528 // The following two methods, allocate_new_tlab() and
529 // mem_allocate(), are the two main entry points from the runtime
530 // into the G1's allocation routines. They have the following
531 // assumptions:
532 //
533 // * They should both be called outside safepoints.
534 //
535 // * They should both be called without holding the Heap_lock.
536 //
537 // * All allocation requests for new TLABs should go to
538 // allocate_new_tlab().
539 //
540 // * All non-TLAB allocation requests should go to mem_allocate().
541 //
542 // * If either call cannot satisfy the allocation request using the
543 // current allocating region, they will try to get a new one. If
544 // this fails, they will attempt to do an evacuation pause and
545 // retry the allocation.
546 //
547 // * If all allocation attempts fail, even after trying to schedule
548 // an evacuation pause, allocate_new_tlab() will return NULL,
549 // whereas mem_allocate() will attempt a heap expansion and/or
550 // schedule a Full GC.
551 //
552 // * We do not allow humongous-sized TLABs. So, allocate_new_tlab
553 // should never be called with word_size being humongous. All
554 // humongous allocation requests should go to mem_allocate() which
555 // will satisfy them with a special path.
557 virtual HeapWord* allocate_new_tlab(size_t word_size);
559 virtual HeapWord* mem_allocate(size_t word_size,
560 bool* gc_overhead_limit_was_exceeded);
562 // The following three methods take a gc_count_before_ret
563 // parameter which is used to return the GC count if the method
564 // returns NULL. Given that we are required to read the GC count
565 // while holding the Heap_lock, and these paths will take the
566 // Heap_lock at some point, it's easier to get them to read the GC
567 // count while holding the Heap_lock before they return NULL instead
568 // of the caller (namely: mem_allocate()) having to also take the
569 // Heap_lock just to read the GC count.
571 // First-level mutator allocation attempt: try to allocate out of
572 // the mutator alloc region without taking the Heap_lock. This
573 // should only be used for non-humongous allocations.
574 inline HeapWord* attempt_allocation(size_t word_size,
575 unsigned int* gc_count_before_ret,
576 int* gclocker_retry_count_ret);
578 // Second-level mutator allocation attempt: take the Heap_lock and
579 // retry the allocation attempt, potentially scheduling a GC
580 // pause. This should only be used for non-humongous allocations.
581 HeapWord* attempt_allocation_slow(size_t word_size,
582 unsigned int* gc_count_before_ret,
583 int* gclocker_retry_count_ret);
585 // Takes the Heap_lock and attempts a humongous allocation. It can
586 // potentially schedule a GC pause.
587 HeapWord* attempt_allocation_humongous(size_t word_size,
588 unsigned int* gc_count_before_ret,
589 int* gclocker_retry_count_ret);
591 // Allocation attempt that should be called during safepoints (e.g.,
592 // at the end of a successful GC). expect_null_mutator_alloc_region
593 // specifies whether the mutator alloc region is expected to be NULL
594 // or not.
595 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
596 bool expect_null_mutator_alloc_region);
598 // It dirties the cards that cover the block so that so that the post
599 // write barrier never queues anything when updating objects on this
600 // block. It is assumed (and in fact we assert) that the block
601 // belongs to a young region.
602 inline void dirty_young_block(HeapWord* start, size_t word_size);
604 // Allocate blocks during garbage collection. Will ensure an
605 // allocation region, either by picking one or expanding the
606 // heap, and then allocate a block of the given size. The block
607 // may not be a humongous - it must fit into a single heap region.
608 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
610 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
611 HeapRegion* alloc_region,
612 bool par,
613 size_t word_size);
615 // Ensure that no further allocations can happen in "r", bearing in mind
616 // that parallel threads might be attempting allocations.
617 void par_allocate_remaining_space(HeapRegion* r);
619 // Allocation attempt during GC for a survivor object / PLAB.
620 inline HeapWord* survivor_attempt_allocation(size_t word_size);
622 // Allocation attempt during GC for an old object / PLAB.
623 inline HeapWord* old_attempt_allocation(size_t word_size);
625 // These methods are the "callbacks" from the G1AllocRegion class.
627 // For mutator alloc regions.
628 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
629 void retire_mutator_alloc_region(HeapRegion* alloc_region,
630 size_t allocated_bytes);
632 // For GC alloc regions.
633 HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
634 GCAllocPurpose ap);
635 void retire_gc_alloc_region(HeapRegion* alloc_region,
636 size_t allocated_bytes, GCAllocPurpose ap);
638 // - if explicit_gc is true, the GC is for a System.gc() or a heap
639 // inspection request and should collect the entire heap
640 // - if clear_all_soft_refs is true, all soft references should be
641 // cleared during the GC
642 // - if explicit_gc is false, word_size describes the allocation that
643 // the GC should attempt (at least) to satisfy
644 // - it returns false if it is unable to do the collection due to the
645 // GC locker being active, true otherwise
646 bool do_collection(bool explicit_gc,
647 bool clear_all_soft_refs,
648 size_t word_size);
650 // Callback from VM_G1CollectFull operation.
651 // Perform a full collection.
652 virtual void do_full_collection(bool clear_all_soft_refs);
654 // Resize the heap if necessary after a full collection. If this is
655 // after a collect-for allocation, "word_size" is the allocation size,
656 // and will be considered part of the used portion of the heap.
657 void resize_if_necessary_after_full_collection(size_t word_size);
659 // Callback from VM_G1CollectForAllocation operation.
660 // This function does everything necessary/possible to satisfy a
661 // failed allocation request (including collection, expansion, etc.)
662 HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
664 // Attempting to expand the heap sufficiently
665 // to support an allocation of the given "word_size". If
666 // successful, perform the allocation and return the address of the
667 // allocated block, or else "NULL".
668 HeapWord* expand_and_allocate(size_t word_size);
670 // Process any reference objects discovered during
671 // an incremental evacuation pause.
672 void process_discovered_references(uint no_of_gc_workers);
674 // Enqueue any remaining discovered references
675 // after processing.
676 void enqueue_discovered_references(uint no_of_gc_workers);
678 public:
680 G1MonitoringSupport* g1mm() {
681 assert(_g1mm != NULL, "should have been initialized");
682 return _g1mm;
683 }
685 // Expand the garbage-first heap by at least the given size (in bytes!).
686 // Returns true if the heap was expanded by the requested amount;
687 // false otherwise.
688 // (Rounds up to a HeapRegion boundary.)
689 bool expand(size_t expand_bytes);
691 // Do anything common to GC's.
692 virtual void gc_prologue(bool full);
693 virtual void gc_epilogue(bool full);
695 // We register a region with the fast "in collection set" test. We
696 // simply set to true the array slot corresponding to this region.
697 void register_region_with_in_cset_fast_test(HeapRegion* r) {
698 assert(_in_cset_fast_test_base != NULL, "sanity");
699 assert(r->in_collection_set(), "invariant");
700 uint index = r->hrs_index();
701 assert(index < _in_cset_fast_test_length, "invariant");
702 assert(!_in_cset_fast_test_base[index], "invariant");
703 _in_cset_fast_test_base[index] = true;
704 }
706 // This is a fast test on whether a reference points into the
707 // collection set or not. Assume that the reference
708 // points into the heap.
709 inline bool in_cset_fast_test(oop obj);
711 void clear_cset_fast_test() {
712 assert(_in_cset_fast_test_base != NULL, "sanity");
713 memset(_in_cset_fast_test_base, false,
714 (size_t) _in_cset_fast_test_length * sizeof(bool));
715 }
717 // This is called at the start of either a concurrent cycle or a Full
718 // GC to update the number of old marking cycles started.
719 void increment_old_marking_cycles_started();
721 // This is called at the end of either a concurrent cycle or a Full
722 // GC to update the number of old marking cycles completed. Those two
723 // can happen in a nested fashion, i.e., we start a concurrent
724 // cycle, a Full GC happens half-way through it which ends first,
725 // and then the cycle notices that a Full GC happened and ends
726 // too. The concurrent parameter is a boolean to help us do a bit
727 // tighter consistency checking in the method. If concurrent is
728 // false, the caller is the inner caller in the nesting (i.e., the
729 // Full GC). If concurrent is true, the caller is the outer caller
730 // in this nesting (i.e., the concurrent cycle). Further nesting is
731 // not currently supported. The end of this call also notifies
732 // the FullGCCount_lock in case a Java thread is waiting for a full
733 // GC to happen (e.g., it called System.gc() with
734 // +ExplicitGCInvokesConcurrent).
735 void increment_old_marking_cycles_completed(bool concurrent);
737 unsigned int old_marking_cycles_completed() {
738 return _old_marking_cycles_completed;
739 }
741 void register_concurrent_cycle_start(const Ticks& start_time);
742 void register_concurrent_cycle_end();
743 void trace_heap_after_concurrent_cycle();
745 G1YCType yc_type();
747 G1HRPrinter* hr_printer() { return &_hr_printer; }
749 // Frees a non-humongous region by initializing its contents and
750 // adding it to the free list that's passed as a parameter (this is
751 // usually a local list which will be appended to the master free
752 // list later). The used bytes of freed regions are accumulated in
753 // pre_used. If par is true, the region's RSet will not be freed
754 // up. The assumption is that this will be done later.
755 // The locked parameter indicates if the caller has already taken
756 // care of proper synchronization. This may allow some optimizations.
757 void free_region(HeapRegion* hr,
758 FreeRegionList* free_list,
759 bool par,
760 bool locked = false);
762 // Frees a humongous region by collapsing it into individual regions
763 // and calling free_region() for each of them. The freed regions
764 // will be added to the free list that's passed as a parameter (this
765 // is usually a local list which will be appended to the master free
766 // list later). The used bytes of freed regions are accumulated in
767 // pre_used. If par is true, the region's RSet will not be freed
768 // up. The assumption is that this will be done later.
769 void free_humongous_region(HeapRegion* hr,
770 FreeRegionList* free_list,
771 bool par);
772 protected:
774 // Shrink the garbage-first heap by at most the given size (in bytes!).
775 // (Rounds down to a HeapRegion boundary.)
776 virtual void shrink(size_t expand_bytes);
777 void shrink_helper(size_t expand_bytes);
779 #if TASKQUEUE_STATS
780 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
781 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
782 void reset_taskqueue_stats();
783 #endif // TASKQUEUE_STATS
785 // Schedule the VM operation that will do an evacuation pause to
786 // satisfy an allocation request of word_size. *succeeded will
787 // return whether the VM operation was successful (it did do an
788 // evacuation pause) or not (another thread beat us to it or the GC
789 // locker was active). Given that we should not be holding the
790 // Heap_lock when we enter this method, we will pass the
791 // gc_count_before (i.e., total_collections()) as a parameter since
792 // it has to be read while holding the Heap_lock. Currently, both
793 // methods that call do_collection_pause() release the Heap_lock
794 // before the call, so it's easy to read gc_count_before just before.
795 HeapWord* do_collection_pause(size_t word_size,
796 unsigned int gc_count_before,
797 bool* succeeded,
798 GCCause::Cause gc_cause);
800 // The guts of the incremental collection pause, executed by the vm
801 // thread. It returns false if it is unable to do the collection due
802 // to the GC locker being active, true otherwise
803 bool do_collection_pause_at_safepoint(double target_pause_time_ms);
805 // Actually do the work of evacuating the collection set.
806 void evacuate_collection_set(EvacuationInfo& evacuation_info);
808 // The g1 remembered set of the heap.
809 G1RemSet* _g1_rem_set;
811 // A set of cards that cover the objects for which the Rsets should be updated
812 // concurrently after the collection.
813 DirtyCardQueueSet _dirty_card_queue_set;
815 // The closure used to refine a single card.
816 RefineCardTableEntryClosure* _refine_cte_cl;
818 // A function to check the consistency of dirty card logs.
819 void check_ct_logs_at_safepoint();
821 // A DirtyCardQueueSet that is used to hold cards that contain
822 // references into the current collection set. This is used to
823 // update the remembered sets of the regions in the collection
824 // set in the event of an evacuation failure.
825 DirtyCardQueueSet _into_cset_dirty_card_queue_set;
827 // After a collection pause, make the regions in the CS into free
828 // regions.
829 void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
831 // Abandon the current collection set without recording policy
832 // statistics or updating free lists.
833 void abandon_collection_set(HeapRegion* cs_head);
835 // Applies "scan_non_heap_roots" to roots outside the heap,
836 // "scan_rs" to roots inside the heap (having done "set_region" to
837 // indicate the region in which the root resides),
838 // and does "scan_metadata" If "scan_rs" is
839 // NULL, then this step is skipped. The "worker_i"
840 // param is for use with parallel roots processing, and should be
841 // the "i" of the calling parallel worker thread's work(i) function.
842 // In the sequential case this param will be ignored.
843 void g1_process_strong_roots(bool is_scavenging,
844 ScanningOption so,
845 OopClosure* scan_non_heap_roots,
846 OopsInHeapRegionClosure* scan_rs,
847 G1KlassScanClosure* scan_klasses,
848 int worker_i);
850 // Apply "blk" to all the weak roots of the system. These include
851 // JNI weak roots, the code cache, system dictionary, symbol table,
852 // string table, and referents of reachable weak refs.
853 void g1_process_weak_roots(OopClosure* root_closure);
855 // Notifies all the necessary spaces that the committed space has
856 // been updated (either expanded or shrunk). It should be called
857 // after _g1_storage is updated.
858 void update_committed_space(HeapWord* old_end, HeapWord* new_end);
860 // The concurrent marker (and the thread it runs in.)
861 ConcurrentMark* _cm;
862 ConcurrentMarkThread* _cmThread;
863 bool _mark_in_progress;
865 // The concurrent refiner.
866 ConcurrentG1Refine* _cg1r;
868 // The parallel task queues
869 RefToScanQueueSet *_task_queues;
871 // True iff a evacuation has failed in the current collection.
872 bool _evacuation_failed;
874 EvacuationFailedInfo* _evacuation_failed_info_array;
876 // Failed evacuations cause some logical from-space objects to have
877 // forwarding pointers to themselves. Reset them.
878 void remove_self_forwarding_pointers();
880 // Together, these store an object with a preserved mark, and its mark value.
881 Stack<oop, mtGC> _objs_with_preserved_marks;
882 Stack<markOop, mtGC> _preserved_marks_of_objs;
884 // Preserve the mark of "obj", if necessary, in preparation for its mark
885 // word being overwritten with a self-forwarding-pointer.
886 void preserve_mark_if_necessary(oop obj, markOop m);
888 // The stack of evac-failure objects left to be scanned.
889 GrowableArray<oop>* _evac_failure_scan_stack;
890 // The closure to apply to evac-failure objects.
892 OopsInHeapRegionClosure* _evac_failure_closure;
893 // Set the field above.
894 void
895 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
896 _evac_failure_closure = evac_failure_closure;
897 }
899 // Push "obj" on the scan stack.
900 void push_on_evac_failure_scan_stack(oop obj);
901 // Process scan stack entries until the stack is empty.
902 void drain_evac_failure_scan_stack();
903 // True iff an invocation of "drain_scan_stack" is in progress; to
904 // prevent unnecessary recursion.
905 bool _drain_in_progress;
907 // Do any necessary initialization for evacuation-failure handling.
908 // "cl" is the closure that will be used to process evac-failure
909 // objects.
910 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
911 // Do any necessary cleanup for evacuation-failure handling data
912 // structures.
913 void finalize_for_evac_failure();
915 // An attempt to evacuate "obj" has failed; take necessary steps.
916 oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
917 void handle_evacuation_failure_common(oop obj, markOop m);
919 #ifndef PRODUCT
920 // Support for forcing evacuation failures. Analogous to
921 // PromotionFailureALot for the other collectors.
923 // Records whether G1EvacuationFailureALot should be in effect
924 // for the current GC
925 bool _evacuation_failure_alot_for_current_gc;
927 // Used to record the GC number for interval checking when
928 // determining whether G1EvaucationFailureALot is in effect
929 // for the current GC.
930 size_t _evacuation_failure_alot_gc_number;
932 // Count of the number of evacuations between failures.
933 volatile size_t _evacuation_failure_alot_count;
935 // Set whether G1EvacuationFailureALot should be in effect
936 // for the current GC (based upon the type of GC and which
937 // command line flags are set);
938 inline bool evacuation_failure_alot_for_gc_type(bool gcs_are_young,
939 bool during_initial_mark,
940 bool during_marking);
942 inline void set_evacuation_failure_alot_for_current_gc();
944 // Return true if it's time to cause an evacuation failure.
945 inline bool evacuation_should_fail();
947 // Reset the G1EvacuationFailureALot counters. Should be called at
948 // the end of an evacuation pause in which an evacuation failure occurred.
949 inline void reset_evacuation_should_fail();
950 #endif // !PRODUCT
952 // ("Weak") Reference processing support.
953 //
954 // G1 has 2 instances of the reference processor class. One
955 // (_ref_processor_cm) handles reference object discovery
956 // and subsequent processing during concurrent marking cycles.
957 //
958 // The other (_ref_processor_stw) handles reference object
959 // discovery and processing during full GCs and incremental
960 // evacuation pauses.
961 //
962 // During an incremental pause, reference discovery will be
963 // temporarily disabled for _ref_processor_cm and will be
964 // enabled for _ref_processor_stw. At the end of the evacuation
965 // pause references discovered by _ref_processor_stw will be
966 // processed and discovery will be disabled. The previous
967 // setting for reference object discovery for _ref_processor_cm
968 // will be re-instated.
969 //
970 // At the start of marking:
971 // * Discovery by the CM ref processor is verified to be inactive
972 // and it's discovered lists are empty.
973 // * Discovery by the CM ref processor is then enabled.
974 //
975 // At the end of marking:
976 // * Any references on the CM ref processor's discovered
977 // lists are processed (possibly MT).
978 //
979 // At the start of full GC we:
980 // * Disable discovery by the CM ref processor and
981 // empty CM ref processor's discovered lists
982 // (without processing any entries).
983 // * Verify that the STW ref processor is inactive and it's
984 // discovered lists are empty.
985 // * Temporarily set STW ref processor discovery as single threaded.
986 // * Temporarily clear the STW ref processor's _is_alive_non_header
987 // field.
988 // * Finally enable discovery by the STW ref processor.
989 //
990 // The STW ref processor is used to record any discovered
991 // references during the full GC.
992 //
993 // At the end of a full GC we:
994 // * Enqueue any reference objects discovered by the STW ref processor
995 // that have non-live referents. This has the side-effect of
996 // making the STW ref processor inactive by disabling discovery.
997 // * Verify that the CM ref processor is still inactive
998 // and no references have been placed on it's discovered
999 // lists (also checked as a precondition during initial marking).
1001 // The (stw) reference processor...
1002 ReferenceProcessor* _ref_processor_stw;
1004 STWGCTimer* _gc_timer_stw;
1005 ConcurrentGCTimer* _gc_timer_cm;
1007 G1OldTracer* _gc_tracer_cm;
1008 G1NewTracer* _gc_tracer_stw;
1010 // During reference object discovery, the _is_alive_non_header
1011 // closure (if non-null) is applied to the referent object to
1012 // determine whether the referent is live. If so then the
1013 // reference object does not need to be 'discovered' and can
1014 // be treated as a regular oop. This has the benefit of reducing
1015 // the number of 'discovered' reference objects that need to
1016 // be processed.
1017 //
1018 // Instance of the is_alive closure for embedding into the
1019 // STW reference processor as the _is_alive_non_header field.
1020 // Supplying a value for the _is_alive_non_header field is
1021 // optional but doing so prevents unnecessary additions to
1022 // the discovered lists during reference discovery.
1023 G1STWIsAliveClosure _is_alive_closure_stw;
1025 // The (concurrent marking) reference processor...
1026 ReferenceProcessor* _ref_processor_cm;
1028 // Instance of the concurrent mark is_alive closure for embedding
1029 // into the Concurrent Marking reference processor as the
1030 // _is_alive_non_header field. Supplying a value for the
1031 // _is_alive_non_header field is optional but doing so prevents
1032 // unnecessary additions to the discovered lists during reference
1033 // discovery.
1034 G1CMIsAliveClosure _is_alive_closure_cm;
1036 // Cache used by G1CollectedHeap::start_cset_region_for_worker().
1037 HeapRegion** _worker_cset_start_region;
1039 // Time stamp to validate the regions recorded in the cache
1040 // used by G1CollectedHeap::start_cset_region_for_worker().
1041 // The heap region entry for a given worker is valid iff
1042 // the associated time stamp value matches the current value
1043 // of G1CollectedHeap::_gc_time_stamp.
1044 unsigned int* _worker_cset_start_region_time_stamp;
1046 enum G1H_process_strong_roots_tasks {
1047 G1H_PS_filter_satb_buffers,
1048 G1H_PS_refProcessor_oops_do,
1049 // Leave this one last.
1050 G1H_PS_NumElements
1051 };
1053 SubTasksDone* _process_strong_tasks;
1055 volatile bool _free_regions_coming;
1057 public:
1059 SubTasksDone* process_strong_tasks() { return _process_strong_tasks; }
1061 void set_refine_cte_cl_concurrency(bool concurrent);
1063 RefToScanQueue *task_queue(int i) const;
1065 // A set of cards where updates happened during the GC
1066 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
1068 // A DirtyCardQueueSet that is used to hold cards that contain
1069 // references into the current collection set. This is used to
1070 // update the remembered sets of the regions in the collection
1071 // set in the event of an evacuation failure.
1072 DirtyCardQueueSet& into_cset_dirty_card_queue_set()
1073 { return _into_cset_dirty_card_queue_set; }
1075 // Create a G1CollectedHeap with the specified policy.
1076 // Must call the initialize method afterwards.
1077 // May not return if something goes wrong.
1078 G1CollectedHeap(G1CollectorPolicy* policy);
1080 // Initialize the G1CollectedHeap to have the initial and
1081 // maximum sizes and remembered and barrier sets
1082 // specified by the policy object.
1083 jint initialize();
1085 // Return the (conservative) maximum heap alignment for any G1 heap
1086 static size_t conservative_max_heap_alignment();
1088 // Initialize weak reference processing.
1089 virtual void ref_processing_init();
1091 void set_par_threads(uint t) {
1092 SharedHeap::set_par_threads(t);
1093 // Done in SharedHeap but oddly there are
1094 // two _process_strong_tasks's in a G1CollectedHeap
1095 // so do it here too.
1096 _process_strong_tasks->set_n_threads(t);
1097 }
1099 // Set _n_par_threads according to a policy TBD.
1100 void set_par_threads();
1102 void set_n_termination(int t) {
1103 _process_strong_tasks->set_n_threads(t);
1104 }
1106 virtual CollectedHeap::Name kind() const {
1107 return CollectedHeap::G1CollectedHeap;
1108 }
1110 // The current policy object for the collector.
1111 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
1113 virtual CollectorPolicy* collector_policy() const { return (CollectorPolicy*) g1_policy(); }
1115 // Adaptive size policy. No such thing for g1.
1116 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
1118 // The rem set and barrier set.
1119 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
1121 unsigned get_gc_time_stamp() {
1122 return _gc_time_stamp;
1123 }
1125 void reset_gc_time_stamp() {
1126 _gc_time_stamp = 0;
1127 OrderAccess::fence();
1128 // Clear the cached CSet starting regions and time stamps.
1129 // Their validity is dependent on the GC timestamp.
1130 clear_cset_start_regions();
1131 }
1133 void check_gc_time_stamps() PRODUCT_RETURN;
1135 void increment_gc_time_stamp() {
1136 ++_gc_time_stamp;
1137 OrderAccess::fence();
1138 }
1140 // Reset the given region's GC timestamp. If it's starts humongous,
1141 // also reset the GC timestamp of its corresponding
1142 // continues humongous regions too.
1143 void reset_gc_time_stamps(HeapRegion* hr);
1145 void iterate_dirty_card_closure(CardTableEntryClosure* cl,
1146 DirtyCardQueue* into_cset_dcq,
1147 bool concurrent, int worker_i);
1149 // The shared block offset table array.
1150 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
1152 // Reference Processing accessors
1154 // The STW reference processor....
1155 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
1157 // The Concurrent Marking reference processor...
1158 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
1160 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
1161 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
1163 virtual size_t capacity() const;
1164 virtual size_t used() const;
1165 // This should be called when we're not holding the heap lock. The
1166 // result might be a bit inaccurate.
1167 size_t used_unlocked() const;
1168 size_t recalculate_used() const;
1170 // These virtual functions do the actual allocation.
1171 // Some heaps may offer a contiguous region for shared non-blocking
1172 // allocation, via inlined code (by exporting the address of the top and
1173 // end fields defining the extent of the contiguous allocation region.)
1174 // But G1CollectedHeap doesn't yet support this.
1176 // Return an estimate of the maximum allocation that could be performed
1177 // without triggering any collection or expansion activity. In a
1178 // generational collector, for example, this is probably the largest
1179 // allocation that could be supported (without expansion) in the youngest
1180 // generation. It is "unsafe" because no locks are taken; the result
1181 // should be treated as an approximation, not a guarantee, for use in
1182 // heuristic resizing decisions.
1183 virtual size_t unsafe_max_alloc();
1185 virtual bool is_maximal_no_gc() const {
1186 return _g1_storage.uncommitted_size() == 0;
1187 }
1189 // The total number of regions in the heap.
1190 uint n_regions() { return _hrs.length(); }
1192 // The max number of regions in the heap.
1193 uint max_regions() { return _hrs.max_length(); }
1195 // The number of regions that are completely free.
1196 uint free_regions() { return _free_list.length(); }
1198 // The number of regions that are not completely free.
1199 uint used_regions() { return n_regions() - free_regions(); }
1201 // The number of regions available for "regular" expansion.
1202 uint expansion_regions() { return _expansion_regions; }
1204 // Factory method for HeapRegion instances. It will return NULL if
1205 // the allocation fails.
1206 HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom);
1208 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1209 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN;
1210 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
1211 void verify_dirty_young_regions() PRODUCT_RETURN;
1213 // verify_region_sets() performs verification over the region
1214 // lists. It will be compiled in the product code to be used when
1215 // necessary (i.e., during heap verification).
1216 void verify_region_sets();
1218 // verify_region_sets_optional() is planted in the code for
1219 // list verification in non-product builds (and it can be enabled in
1220 // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
1221 #if HEAP_REGION_SET_FORCE_VERIFY
1222 void verify_region_sets_optional() {
1223 verify_region_sets();
1224 }
1225 #else // HEAP_REGION_SET_FORCE_VERIFY
1226 void verify_region_sets_optional() { }
1227 #endif // HEAP_REGION_SET_FORCE_VERIFY
1229 #ifdef ASSERT
1230 bool is_on_master_free_list(HeapRegion* hr) {
1231 return hr->containing_set() == &_free_list;
1232 }
1233 #endif // ASSERT
1235 // Wrapper for the region list operations that can be called from
1236 // methods outside this class.
1238 void secondary_free_list_add(FreeRegionList* list) {
1239 _secondary_free_list.add_ordered(list);
1240 }
1242 void append_secondary_free_list() {
1243 _free_list.add_ordered(&_secondary_free_list);
1244 }
1246 void append_secondary_free_list_if_not_empty_with_lock() {
1247 // If the secondary free list looks empty there's no reason to
1248 // take the lock and then try to append it.
1249 if (!_secondary_free_list.is_empty()) {
1250 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1251 append_secondary_free_list();
1252 }
1253 }
1255 inline void old_set_remove(HeapRegion* hr);
1257 size_t non_young_capacity_bytes() {
1258 return _old_set.total_capacity_bytes() + _humongous_set.total_capacity_bytes();
1259 }
1261 void set_free_regions_coming();
1262 void reset_free_regions_coming();
1263 bool free_regions_coming() { return _free_regions_coming; }
1264 void wait_while_free_regions_coming();
1266 // Determine whether the given region is one that we are using as an
1267 // old GC alloc region.
1268 bool is_old_gc_alloc_region(HeapRegion* hr) {
1269 return hr == _retained_old_gc_alloc_region;
1270 }
1272 // Perform a collection of the heap; intended for use in implementing
1273 // "System.gc". This probably implies as full a collection as the
1274 // "CollectedHeap" supports.
1275 virtual void collect(GCCause::Cause cause);
1277 // The same as above but assume that the caller holds the Heap_lock.
1278 void collect_locked(GCCause::Cause cause);
1280 // True iff an evacuation has failed in the most-recent collection.
1281 bool evacuation_failed() { return _evacuation_failed; }
1283 void remove_from_old_sets(const HeapRegionSetCount& old_regions_removed, const HeapRegionSetCount& humongous_regions_removed);
1284 void prepend_to_freelist(FreeRegionList* list);
1285 void decrement_summary_bytes(size_t bytes);
1287 // Returns "TRUE" iff "p" points into the committed areas of the heap.
1288 virtual bool is_in(const void* p) const;
1290 // Return "TRUE" iff the given object address is within the collection
1291 // set.
1292 inline bool obj_in_cs(oop obj);
1294 // Return "TRUE" iff the given object address is in the reserved
1295 // region of g1.
1296 bool is_in_g1_reserved(const void* p) const {
1297 return _g1_reserved.contains(p);
1298 }
1300 // Returns a MemRegion that corresponds to the space that has been
1301 // reserved for the heap
1302 MemRegion g1_reserved() {
1303 return _g1_reserved;
1304 }
1306 // Returns a MemRegion that corresponds to the space that has been
1307 // committed in the heap
1308 MemRegion g1_committed() {
1309 return _g1_committed;
1310 }
1312 virtual bool is_in_closed_subset(const void* p) const;
1314 G1SATBCardTableModRefBS* g1_barrier_set() {
1315 return (G1SATBCardTableModRefBS*) barrier_set();
1316 }
1318 // This resets the card table to all zeros. It is used after
1319 // a collection pause which used the card table to claim cards.
1320 void cleanUpCardTable();
1322 // Iteration functions.
1324 // Iterate over all the ref-containing fields of all objects, calling
1325 // "cl.do_oop" on each.
1326 virtual void oop_iterate(ExtendedOopClosure* cl);
1328 // Same as above, restricted to a memory region.
1329 void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1331 // Iterate over all objects, calling "cl.do_object" on each.
1332 virtual void object_iterate(ObjectClosure* cl);
1334 virtual void safe_object_iterate(ObjectClosure* cl) {
1335 object_iterate(cl);
1336 }
1338 // Iterate over all spaces in use in the heap, in ascending address order.
1339 virtual void space_iterate(SpaceClosure* cl);
1341 // Iterate over heap regions, in address order, terminating the
1342 // iteration early if the "doHeapRegion" method returns "true".
1343 void heap_region_iterate(HeapRegionClosure* blk) const;
1345 // Return the region with the given index. It assumes the index is valid.
1346 inline HeapRegion* region_at(uint index) const;
1348 // Divide the heap region sequence into "chunks" of some size (the number
1349 // of regions divided by the number of parallel threads times some
1350 // overpartition factor, currently 4). Assumes that this will be called
1351 // in parallel by ParallelGCThreads worker threads with discinct worker
1352 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
1353 // calls will use the same "claim_value", and that that claim value is
1354 // different from the claim_value of any heap region before the start of
1355 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
1356 // attempting to claim the first region in each chunk, and, if
1357 // successful, applying the closure to each region in the chunk (and
1358 // setting the claim value of the second and subsequent regions of the
1359 // chunk.) For now requires that "doHeapRegion" always returns "false",
1360 // i.e., that a closure never attempt to abort a traversal.
1361 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
1362 uint worker,
1363 uint no_of_par_workers,
1364 jint claim_value);
1366 // It resets all the region claim values to the default.
1367 void reset_heap_region_claim_values();
1369 // Resets the claim values of regions in the current
1370 // collection set to the default.
1371 void reset_cset_heap_region_claim_values();
1373 #ifdef ASSERT
1374 bool check_heap_region_claim_values(jint claim_value);
1376 // Same as the routine above but only checks regions in the
1377 // current collection set.
1378 bool check_cset_heap_region_claim_values(jint claim_value);
1379 #endif // ASSERT
1381 // Clear the cached cset start regions and (more importantly)
1382 // the time stamps. Called when we reset the GC time stamp.
1383 void clear_cset_start_regions();
1385 // Given the id of a worker, obtain or calculate a suitable
1386 // starting region for iterating over the current collection set.
1387 HeapRegion* start_cset_region_for_worker(int worker_i);
1389 // This is a convenience method that is used by the
1390 // HeapRegionIterator classes to calculate the starting region for
1391 // each worker so that they do not all start from the same region.
1392 HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
1394 // Iterate over the regions (if any) in the current collection set.
1395 void collection_set_iterate(HeapRegionClosure* blk);
1397 // As above but starting from region r
1398 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
1400 // Returns the first (lowest address) compactible space in the heap.
1401 virtual CompactibleSpace* first_compactible_space();
1403 // A CollectedHeap will contain some number of spaces. This finds the
1404 // space containing a given address, or else returns NULL.
1405 virtual Space* space_containing(const void* addr) const;
1407 // A G1CollectedHeap will contain some number of heap regions. This
1408 // finds the region containing a given address, or else returns NULL.
1409 template <class T>
1410 inline HeapRegion* heap_region_containing(const T addr) const;
1412 // Like the above, but requires "addr" to be in the heap (to avoid a
1413 // null-check), and unlike the above, may return an continuing humongous
1414 // region.
1415 template <class T>
1416 inline HeapRegion* heap_region_containing_raw(const T addr) const;
1418 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
1419 // each address in the (reserved) heap is a member of exactly
1420 // one block. The defining characteristic of a block is that it is
1421 // possible to find its size, and thus to progress forward to the next
1422 // block. (Blocks may be of different sizes.) Thus, blocks may
1423 // represent Java objects, or they might be free blocks in a
1424 // free-list-based heap (or subheap), as long as the two kinds are
1425 // distinguishable and the size of each is determinable.
1427 // Returns the address of the start of the "block" that contains the
1428 // address "addr". We say "blocks" instead of "object" since some heaps
1429 // may not pack objects densely; a chunk may either be an object or a
1430 // non-object.
1431 virtual HeapWord* block_start(const void* addr) const;
1433 // Requires "addr" to be the start of a chunk, and returns its size.
1434 // "addr + size" is required to be the start of a new chunk, or the end
1435 // of the active area of the heap.
1436 virtual size_t block_size(const HeapWord* addr) const;
1438 // Requires "addr" to be the start of a block, and returns "TRUE" iff
1439 // the block is an object.
1440 virtual bool block_is_obj(const HeapWord* addr) const;
1442 // Does this heap support heap inspection? (+PrintClassHistogram)
1443 virtual bool supports_heap_inspection() const { return true; }
1445 // Section on thread-local allocation buffers (TLABs)
1446 // See CollectedHeap for semantics.
1448 bool supports_tlab_allocation() const;
1449 size_t tlab_capacity(Thread* ignored) const;
1450 size_t tlab_used(Thread* ignored) const;
1451 size_t max_tlab_size() const;
1452 size_t unsafe_max_tlab_alloc(Thread* ignored) const;
1454 // Can a compiler initialize a new object without store barriers?
1455 // This permission only extends from the creation of a new object
1456 // via a TLAB up to the first subsequent safepoint. If such permission
1457 // is granted for this heap type, the compiler promises to call
1458 // defer_store_barrier() below on any slow path allocation of
1459 // a new object for which such initializing store barriers will
1460 // have been elided. G1, like CMS, allows this, but should be
1461 // ready to provide a compensating write barrier as necessary
1462 // if that storage came out of a non-young region. The efficiency
1463 // of this implementation depends crucially on being able to
1464 // answer very efficiently in constant time whether a piece of
1465 // storage in the heap comes from a young region or not.
1466 // See ReduceInitialCardMarks.
1467 virtual bool can_elide_tlab_store_barriers() const {
1468 return true;
1469 }
1471 virtual bool card_mark_must_follow_store() const {
1472 return true;
1473 }
1475 inline bool is_in_young(const oop obj);
1477 #ifdef ASSERT
1478 virtual bool is_in_partial_collection(const void* p);
1479 #endif
1481 virtual bool is_scavengable(const void* addr);
1483 // We don't need barriers for initializing stores to objects
1484 // in the young gen: for the SATB pre-barrier, there is no
1485 // pre-value that needs to be remembered; for the remembered-set
1486 // update logging post-barrier, we don't maintain remembered set
1487 // information for young gen objects.
1488 virtual inline bool can_elide_initializing_store_barrier(oop new_obj);
1490 // Returns "true" iff the given word_size is "very large".
1491 static bool isHumongous(size_t word_size) {
1492 // Note this has to be strictly greater-than as the TLABs
1493 // are capped at the humongous thresold and we want to
1494 // ensure that we don't try to allocate a TLAB as
1495 // humongous and that we don't allocate a humongous
1496 // object in a TLAB.
1497 return word_size > _humongous_object_threshold_in_words;
1498 }
1500 // Update mod union table with the set of dirty cards.
1501 void updateModUnion();
1503 // Set the mod union bits corresponding to the given memRegion. Note
1504 // that this is always a safe operation, since it doesn't clear any
1505 // bits.
1506 void markModUnionRange(MemRegion mr);
1508 // Records the fact that a marking phase is no longer in progress.
1509 void set_marking_complete() {
1510 _mark_in_progress = false;
1511 }
1512 void set_marking_started() {
1513 _mark_in_progress = true;
1514 }
1515 bool mark_in_progress() {
1516 return _mark_in_progress;
1517 }
1519 // Print the maximum heap capacity.
1520 virtual size_t max_capacity() const;
1522 virtual jlong millis_since_last_gc();
1525 // Convenience function to be used in situations where the heap type can be
1526 // asserted to be this type.
1527 static G1CollectedHeap* heap();
1529 void set_region_short_lived_locked(HeapRegion* hr);
1530 // add appropriate methods for any other surv rate groups
1532 YoungList* young_list() const { return _young_list; }
1534 // debugging
1535 bool check_young_list_well_formed() {
1536 return _young_list->check_list_well_formed();
1537 }
1539 bool check_young_list_empty(bool check_heap,
1540 bool check_sample = true);
1542 // *** Stuff related to concurrent marking. It's not clear to me that so
1543 // many of these need to be public.
1545 // The functions below are helper functions that a subclass of
1546 // "CollectedHeap" can use in the implementation of its virtual
1547 // functions.
1548 // This performs a concurrent marking of the live objects in a
1549 // bitmap off to the side.
1550 void doConcurrentMark();
1552 bool isMarkedPrev(oop obj) const;
1553 bool isMarkedNext(oop obj) const;
1555 // Determine if an object is dead, given the object and also
1556 // the region to which the object belongs. An object is dead
1557 // iff a) it was not allocated since the last mark and b) it
1558 // is not marked.
1560 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1561 return
1562 !hr->obj_allocated_since_prev_marking(obj) &&
1563 !isMarkedPrev(obj);
1564 }
1566 // This function returns true when an object has been
1567 // around since the previous marking and hasn't yet
1568 // been marked during this marking.
1570 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1571 return
1572 !hr->obj_allocated_since_next_marking(obj) &&
1573 !isMarkedNext(obj);
1574 }
1576 // Determine if an object is dead, given only the object itself.
1577 // This will find the region to which the object belongs and
1578 // then call the region version of the same function.
1580 // Added if it is NULL it isn't dead.
1582 inline bool is_obj_dead(const oop obj) const;
1584 inline bool is_obj_ill(const oop obj) const;
1586 bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
1587 HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
1588 bool is_marked(oop obj, VerifyOption vo);
1589 const char* top_at_mark_start_str(VerifyOption vo);
1591 ConcurrentMark* concurrent_mark() const { return _cm; }
1593 // Refinement
1595 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1597 // The dirty cards region list is used to record a subset of regions
1598 // whose cards need clearing. The list if populated during the
1599 // remembered set scanning and drained during the card table
1600 // cleanup. Although the methods are reentrant, population/draining
1601 // phases must not overlap. For synchronization purposes the last
1602 // element on the list points to itself.
1603 HeapRegion* _dirty_cards_region_list;
1604 void push_dirty_cards_region(HeapRegion* hr);
1605 HeapRegion* pop_dirty_cards_region();
1607 // Optimized nmethod scanning support routines
1609 // Register the given nmethod with the G1 heap
1610 virtual void register_nmethod(nmethod* nm);
1612 // Unregister the given nmethod from the G1 heap
1613 virtual void unregister_nmethod(nmethod* nm);
1615 // Migrate the nmethods in the code root lists of the regions
1616 // in the collection set to regions in to-space. In the event
1617 // of an evacuation failure, nmethods that reference objects
1618 // that were not successfullly evacuated are not migrated.
1619 void migrate_strong_code_roots();
1621 // Free up superfluous code root memory.
1622 void purge_code_root_memory();
1624 // During an initial mark pause, mark all the code roots that
1625 // point into regions *not* in the collection set.
1626 void mark_strong_code_roots(uint worker_id);
1628 // Rebuild the stong code root lists for each region
1629 // after a full GC
1630 void rebuild_strong_code_roots();
1632 // Delete entries for dead interned string and clean up unreferenced symbols
1633 // in symbol table, possibly in parallel.
1634 void unlink_string_and_symbol_table(BoolObjectClosure* is_alive, bool unlink_strings = true, bool unlink_symbols = true);
1636 // Redirty logged cards in the refinement queue.
1637 void redirty_logged_cards();
1638 // Verification
1640 // The following is just to alert the verification code
1641 // that a full collection has occurred and that the
1642 // remembered sets are no longer up to date.
1643 bool _full_collection;
1644 void set_full_collection() { _full_collection = true;}
1645 void clear_full_collection() {_full_collection = false;}
1646 bool full_collection() {return _full_collection;}
1648 // Perform any cleanup actions necessary before allowing a verification.
1649 virtual void prepare_for_verify();
1651 // Perform verification.
1653 // vo == UsePrevMarking -> use "prev" marking information,
1654 // vo == UseNextMarking -> use "next" marking information
1655 // vo == UseMarkWord -> use the mark word in the object header
1656 //
1657 // NOTE: Only the "prev" marking information is guaranteed to be
1658 // consistent most of the time, so most calls to this should use
1659 // vo == UsePrevMarking.
1660 // Currently, there is only one case where this is called with
1661 // vo == UseNextMarking, which is to verify the "next" marking
1662 // information at the end of remark.
1663 // Currently there is only one place where this is called with
1664 // vo == UseMarkWord, which is to verify the marking during a
1665 // full GC.
1666 void verify(bool silent, VerifyOption vo);
1668 // Override; it uses the "prev" marking information
1669 virtual void verify(bool silent);
1671 // The methods below are here for convenience and dispatch the
1672 // appropriate method depending on value of the given VerifyOption
1673 // parameter. The values for that parameter, and their meanings,
1674 // are the same as those above.
1676 bool is_obj_dead_cond(const oop obj,
1677 const HeapRegion* hr,
1678 const VerifyOption vo) const;
1680 bool is_obj_dead_cond(const oop obj,
1681 const VerifyOption vo) const;
1683 // Printing
1685 virtual void print_on(outputStream* st) const;
1686 virtual void print_extended_on(outputStream* st) const;
1687 virtual void print_on_error(outputStream* st) const;
1689 virtual void print_gc_threads_on(outputStream* st) const;
1690 virtual void gc_threads_do(ThreadClosure* tc) const;
1692 // Override
1693 void print_tracing_info() const;
1695 // The following two methods are helpful for debugging RSet issues.
1696 void print_cset_rsets() PRODUCT_RETURN;
1697 void print_all_rsets() PRODUCT_RETURN;
1699 public:
1700 void stop_conc_gc_threads();
1702 size_t pending_card_num();
1703 size_t cards_scanned();
1705 protected:
1706 size_t _max_heap_capacity;
1707 };
1709 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1710 private:
1711 bool _retired;
1713 public:
1714 G1ParGCAllocBuffer(size_t gclab_word_size);
1716 void set_buf(HeapWord* buf) {
1717 ParGCAllocBuffer::set_buf(buf);
1718 _retired = false;
1719 }
1721 void retire(bool end_of_gc, bool retain) {
1722 if (_retired)
1723 return;
1724 ParGCAllocBuffer::retire(end_of_gc, retain);
1725 _retired = true;
1726 }
1727 };
1729 class G1ParScanThreadState : public StackObj {
1730 protected:
1731 G1CollectedHeap* _g1h;
1732 RefToScanQueue* _refs;
1733 DirtyCardQueue _dcq;
1734 G1SATBCardTableModRefBS* _ct_bs;
1735 G1RemSet* _g1_rem;
1737 G1ParGCAllocBuffer _surviving_alloc_buffer;
1738 G1ParGCAllocBuffer _tenured_alloc_buffer;
1739 G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
1740 ageTable _age_table;
1742 G1ParScanClosure _scanner;
1744 size_t _alloc_buffer_waste;
1745 size_t _undo_waste;
1747 OopsInHeapRegionClosure* _evac_failure_cl;
1749 int _hash_seed;
1750 uint _queue_num;
1752 size_t _term_attempts;
1754 double _start;
1755 double _start_strong_roots;
1756 double _strong_roots_time;
1757 double _start_term;
1758 double _term_time;
1760 // Map from young-age-index (0 == not young, 1 is youngest) to
1761 // surviving words. base is what we get back from the malloc call
1762 size_t* _surviving_young_words_base;
1763 // this points into the array, as we use the first few entries for padding
1764 size_t* _surviving_young_words;
1766 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
1768 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1770 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1772 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1773 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
1775 template <class T> inline void immediate_rs_update(HeapRegion* from, T* p, int tid);
1777 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1778 // If the new value of the field points to the same region or
1779 // is the to-space, we don't need to include it in the Rset updates.
1780 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1781 size_t card_index = ctbs()->index_for(p);
1782 // If the card hasn't been added to the buffer, do it.
1783 if (ctbs()->mark_card_deferred(card_index)) {
1784 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1785 }
1786 }
1787 }
1789 public:
1790 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
1792 ~G1ParScanThreadState() {
1793 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
1794 }
1796 RefToScanQueue* refs() { return _refs; }
1797 ageTable* age_table() { return &_age_table; }
1799 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1800 return _alloc_buffers[purpose];
1801 }
1803 size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
1804 size_t undo_waste() const { return _undo_waste; }
1806 #ifdef ASSERT
1807 bool verify_ref(narrowOop* ref) const;
1808 bool verify_ref(oop* ref) const;
1809 bool verify_task(StarTask ref) const;
1810 #endif // ASSERT
1812 template <class T> void push_on_queue(T* ref) {
1813 assert(verify_ref(ref), "sanity");
1814 refs()->push(ref);
1815 }
1817 template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
1819 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1820 HeapWord* obj = NULL;
1821 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
1822 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
1823 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1824 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1825 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
1827 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
1828 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1829 // Otherwise.
1830 alloc_buf->set_word_size(gclab_word_size);
1831 alloc_buf->set_buf(buf);
1833 obj = alloc_buf->allocate(word_sz);
1834 assert(obj != NULL, "buffer was definitely big enough...");
1835 } else {
1836 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1837 }
1838 return obj;
1839 }
1841 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1842 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1843 if (obj != NULL) return obj;
1844 return allocate_slow(purpose, word_sz);
1845 }
1847 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1848 if (alloc_buffer(purpose)->contains(obj)) {
1849 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1850 "should contain whole object");
1851 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1852 } else {
1853 CollectedHeap::fill_with_object(obj, word_sz);
1854 add_to_undo_waste(word_sz);
1855 }
1856 }
1858 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1859 _evac_failure_cl = evac_failure_cl;
1860 }
1861 OopsInHeapRegionClosure* evac_failure_closure() {
1862 return _evac_failure_cl;
1863 }
1865 int* hash_seed() { return &_hash_seed; }
1866 uint queue_num() { return _queue_num; }
1868 size_t term_attempts() const { return _term_attempts; }
1869 void note_term_attempt() { _term_attempts++; }
1871 void start_strong_roots() {
1872 _start_strong_roots = os::elapsedTime();
1873 }
1874 void end_strong_roots() {
1875 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1876 }
1877 double strong_roots_time() const { return _strong_roots_time; }
1879 void start_term_time() {
1880 note_term_attempt();
1881 _start_term = os::elapsedTime();
1882 }
1883 void end_term_time() {
1884 _term_time += (os::elapsedTime() - _start_term);
1885 }
1886 double term_time() const { return _term_time; }
1888 double elapsed_time() const {
1889 return os::elapsedTime() - _start;
1890 }
1892 static void
1893 print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
1894 void
1895 print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
1897 size_t* surviving_young_words() {
1898 // We add on to hide entry 0 which accumulates surviving words for
1899 // age -1 regions (i.e. non-young ones)
1900 return _surviving_young_words;
1901 }
1903 void retire_alloc_buffers() {
1904 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1905 size_t waste = _alloc_buffers[ap]->words_remaining();
1906 add_to_alloc_buffer_waste(waste);
1907 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
1908 true /* end_of_gc */,
1909 false /* retain */);
1910 }
1911 }
1912 private:
1913 #define G1_PARTIAL_ARRAY_MASK 0x2
1915 inline bool has_partial_array_mask(oop* ref) const {
1916 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
1917 }
1919 // We never encode partial array oops as narrowOop*, so return false immediately.
1920 // This allows the compiler to create optimized code when popping references from
1921 // the work queue.
1922 inline bool has_partial_array_mask(narrowOop* ref) const {
1923 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
1924 return false;
1925 }
1927 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
1928 // We always encode partial arrays as regular oop, to allow the
1929 // specialization for has_partial_array_mask() for narrowOops above.
1930 // This means that unintentional use of this method with narrowOops are caught
1931 // by the compiler.
1932 inline oop* set_partial_array_mask(oop obj) const {
1933 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
1934 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
1935 }
1937 inline oop clear_partial_array_mask(oop* ref) const {
1938 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
1939 }
1941 inline void do_oop_partial_array(oop* p);
1943 // This method is applied to the fields of the objects that have just been copied.
1944 template <class T> void do_oop_evac(T* p, HeapRegion* from) {
1945 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
1946 "Reference should not be NULL here as such are never pushed to the task queue.");
1947 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
1949 // Although we never intentionally push references outside of the collection
1950 // set, due to (benign) races in the claim mechanism during RSet scanning more
1951 // than one thread might claim the same card. So the same card may be
1952 // processed multiple times. So redo this check.
1953 if (_g1h->in_cset_fast_test(obj)) {
1954 oop forwardee;
1955 if (obj->is_forwarded()) {
1956 forwardee = obj->forwardee();
1957 } else {
1958 forwardee = copy_to_survivor_space(obj);
1959 }
1960 assert(forwardee != NULL, "forwardee should not be NULL");
1961 oopDesc::encode_store_heap_oop(p, forwardee);
1962 }
1964 assert(obj != NULL, "Must be");
1965 update_rs(from, p, queue_num());
1966 }
1967 public:
1969 oop copy_to_survivor_space(oop const obj);
1971 template <class T> inline void deal_with_reference(T* ref_to_scan);
1973 inline void deal_with_reference(StarTask ref);
1975 public:
1976 void trim_queue();
1977 };
1979 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP