Thu, 15 Apr 2010 18:45:30 -0400
6939027: G1: assertion failure during the concurrent phase of cleanup
Summary: The outgoing region map is not maintained properly and it's causing an assert failure. Given that we don't actually use it, I'm removing it. I'm piggy-backing a small change on this which removes a message that it's printed before a Full GC when DisableExplicitGC is set.
Reviewed-by: apetrusenko, ysr
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // A "G1CollectedHeap" is an implementation of a java heap for HotSpot.
26 // It uses the "Garbage First" heap organization and algorithm, which
27 // may combine concurrent marking with parallel, incremental compaction of
28 // heap subsets that will yield large amounts of garbage.
30 class HeapRegion;
31 class HeapRegionSeq;
32 class PermanentGenerationSpec;
33 class GenerationSpec;
34 class OopsInHeapRegionClosure;
35 class G1ScanHeapEvacClosure;
36 class ObjectClosure;
37 class SpaceClosure;
38 class CompactibleSpaceClosure;
39 class Space;
40 class G1CollectorPolicy;
41 class GenRemSet;
42 class G1RemSet;
43 class HeapRegionRemSetIterator;
44 class ConcurrentMark;
45 class ConcurrentMarkThread;
46 class ConcurrentG1Refine;
47 class ConcurrentZFThread;
49 // If want to accumulate detailed statistics on work queues
50 // turn this on.
51 #define G1_DETAILED_STATS 0
53 #if G1_DETAILED_STATS
54 # define IF_G1_DETAILED_STATS(code) code
55 #else
56 # define IF_G1_DETAILED_STATS(code)
57 #endif
59 typedef GenericTaskQueue<StarTask> RefToScanQueue;
60 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
62 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
63 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
65 enum G1GCThreadGroups {
66 G1CRGroup = 0,
67 G1ZFGroup = 1,
68 G1CMGroup = 2,
69 G1CLGroup = 3
70 };
72 enum GCAllocPurpose {
73 GCAllocForTenured,
74 GCAllocForSurvived,
75 GCAllocPurposeCount
76 };
78 class YoungList : public CHeapObj {
79 private:
80 G1CollectedHeap* _g1h;
82 HeapRegion* _head;
84 HeapRegion* _scan_only_head;
85 HeapRegion* _scan_only_tail;
86 size_t _length;
87 size_t _scan_only_length;
89 size_t _last_sampled_rs_lengths;
90 size_t _sampled_rs_lengths;
91 HeapRegion* _curr;
92 HeapRegion* _curr_scan_only;
94 HeapRegion* _survivor_head;
95 HeapRegion* _survivor_tail;
96 size_t _survivor_length;
98 void empty_list(HeapRegion* list);
100 public:
101 YoungList(G1CollectedHeap* g1h);
103 void push_region(HeapRegion* hr);
104 void add_survivor_region(HeapRegion* hr);
105 HeapRegion* pop_region();
106 void empty_list();
107 bool is_empty() { return _length == 0; }
108 size_t length() { return _length; }
109 size_t scan_only_length() { return _scan_only_length; }
110 size_t survivor_length() { return _survivor_length; }
112 void rs_length_sampling_init();
113 bool rs_length_sampling_more();
114 void rs_length_sampling_next();
116 void reset_sampled_info() {
117 _last_sampled_rs_lengths = 0;
118 }
119 size_t sampled_rs_lengths() { return _last_sampled_rs_lengths; }
121 // for development purposes
122 void reset_auxilary_lists();
123 HeapRegion* first_region() { return _head; }
124 HeapRegion* first_scan_only_region() { return _scan_only_head; }
125 HeapRegion* first_survivor_region() { return _survivor_head; }
126 HeapRegion* last_survivor_region() { return _survivor_tail; }
127 HeapRegion* par_get_next_scan_only_region() {
128 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
129 HeapRegion* ret = _curr_scan_only;
130 if (ret != NULL)
131 _curr_scan_only = ret->get_next_young_region();
132 return ret;
133 }
135 // debugging
136 bool check_list_well_formed();
137 bool check_list_empty(bool ignore_scan_only_list,
138 bool check_sample = true);
139 void print();
140 };
142 class RefineCardTableEntryClosure;
143 class G1CollectedHeap : public SharedHeap {
144 friend class VM_G1CollectForAllocation;
145 friend class VM_GenCollectForPermanentAllocation;
146 friend class VM_G1CollectFull;
147 friend class VM_G1IncCollectionPause;
148 friend class VMStructs;
150 // Closures used in implementation.
151 friend class G1ParCopyHelper;
152 friend class G1IsAliveClosure;
153 friend class G1EvacuateFollowersClosure;
154 friend class G1ParScanThreadState;
155 friend class G1ParScanClosureSuper;
156 friend class G1ParEvacuateFollowersClosure;
157 friend class G1ParTask;
158 friend class G1FreeGarbageRegionClosure;
159 friend class RefineCardTableEntryClosure;
160 friend class G1PrepareCompactClosure;
161 friend class RegionSorter;
162 friend class CountRCClosure;
163 friend class EvacPopObjClosure;
164 friend class G1ParCleanupCTTask;
166 // Other related classes.
167 friend class G1MarkSweep;
169 private:
170 // The one and only G1CollectedHeap, so static functions can find it.
171 static G1CollectedHeap* _g1h;
173 static size_t _humongous_object_threshold_in_words;
175 // Storage for the G1 heap (excludes the permanent generation).
176 VirtualSpace _g1_storage;
177 MemRegion _g1_reserved;
179 // The part of _g1_storage that is currently committed.
180 MemRegion _g1_committed;
182 // The maximum part of _g1_storage that has ever been committed.
183 MemRegion _g1_max_committed;
185 // The number of regions that are completely free.
186 size_t _free_regions;
188 // The number of regions we could create by expansion.
189 size_t _expansion_regions;
191 // Return the number of free regions in the heap (by direct counting.)
192 size_t count_free_regions();
193 // Return the number of free regions on the free and unclean lists.
194 size_t count_free_regions_list();
196 // The block offset table for the G1 heap.
197 G1BlockOffsetSharedArray* _bot_shared;
199 // Move all of the regions off the free lists, then rebuild those free
200 // lists, before and after full GC.
201 void tear_down_region_lists();
202 void rebuild_region_lists();
203 // This sets all non-empty regions to need zero-fill (which they will if
204 // they are empty after full collection.)
205 void set_used_regions_to_need_zero_fill();
207 // The sequence of all heap regions in the heap.
208 HeapRegionSeq* _hrs;
210 // The region from which normal-sized objects are currently being
211 // allocated. May be NULL.
212 HeapRegion* _cur_alloc_region;
214 // Postcondition: cur_alloc_region == NULL.
215 void abandon_cur_alloc_region();
216 void abandon_gc_alloc_regions();
218 // The to-space memory regions into which objects are being copied during
219 // a GC.
220 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
221 size_t _gc_alloc_region_counts[GCAllocPurposeCount];
222 // These are the regions, one per GCAllocPurpose, that are half-full
223 // at the end of a collection and that we want to reuse during the
224 // next collection.
225 HeapRegion* _retained_gc_alloc_regions[GCAllocPurposeCount];
226 // This specifies whether we will keep the last half-full region at
227 // the end of a collection so that it can be reused during the next
228 // collection (this is specified per GCAllocPurpose)
229 bool _retain_gc_alloc_region[GCAllocPurposeCount];
231 // A list of the regions that have been set to be alloc regions in the
232 // current collection.
233 HeapRegion* _gc_alloc_region_list;
235 // When called by par thread, require par_alloc_during_gc_lock() to be held.
236 void push_gc_alloc_region(HeapRegion* hr);
238 // This should only be called single-threaded. Undeclares all GC alloc
239 // regions.
240 void forget_alloc_region_list();
242 // Should be used to set an alloc region, because there's other
243 // associated bookkeeping.
244 void set_gc_alloc_region(int purpose, HeapRegion* r);
246 // Check well-formedness of alloc region list.
247 bool check_gc_alloc_regions();
249 // Outside of GC pauses, the number of bytes used in all regions other
250 // than the current allocation region.
251 size_t _summary_bytes_used;
253 // This is used for a quick test on whether a reference points into
254 // the collection set or not. Basically, we have an array, with one
255 // byte per region, and that byte denotes whether the corresponding
256 // region is in the collection set or not. The entry corresponding
257 // the bottom of the heap, i.e., region 0, is pointed to by
258 // _in_cset_fast_test_base. The _in_cset_fast_test field has been
259 // biased so that it actually points to address 0 of the address
260 // space, to make the test as fast as possible (we can simply shift
261 // the address to address into it, instead of having to subtract the
262 // bottom of the heap from the address before shifting it; basically
263 // it works in the same way the card table works).
264 bool* _in_cset_fast_test;
266 // The allocated array used for the fast test on whether a reference
267 // points into the collection set or not. This field is also used to
268 // free the array.
269 bool* _in_cset_fast_test_base;
271 // The length of the _in_cset_fast_test_base array.
272 size_t _in_cset_fast_test_length;
274 volatile unsigned _gc_time_stamp;
276 size_t* _surviving_young_words;
278 void setup_surviving_young_words();
279 void update_surviving_young_words(size_t* surv_young_words);
280 void cleanup_surviving_young_words();
282 protected:
284 // Returns "true" iff none of the gc alloc regions have any allocations
285 // since the last call to "save_marks".
286 bool all_alloc_regions_no_allocs_since_save_marks();
287 // Perform finalization stuff on all allocation regions.
288 void retire_all_alloc_regions();
290 // The number of regions allocated to hold humongous objects.
291 int _num_humongous_regions;
292 YoungList* _young_list;
294 // The current policy object for the collector.
295 G1CollectorPolicy* _g1_policy;
297 // Parallel allocation lock to protect the current allocation region.
298 Mutex _par_alloc_during_gc_lock;
299 Mutex* par_alloc_during_gc_lock() { return &_par_alloc_during_gc_lock; }
301 // If possible/desirable, allocate a new HeapRegion for normal object
302 // allocation sufficient for an allocation of the given "word_size".
303 // If "do_expand" is true, will attempt to expand the heap if necessary
304 // to to satisfy the request. If "zero_filled" is true, requires a
305 // zero-filled region.
306 // (Returning NULL will trigger a GC.)
307 virtual HeapRegion* newAllocRegion_work(size_t word_size,
308 bool do_expand,
309 bool zero_filled);
311 virtual HeapRegion* newAllocRegion(size_t word_size,
312 bool zero_filled = true) {
313 return newAllocRegion_work(word_size, false, zero_filled);
314 }
315 virtual HeapRegion* newAllocRegionWithExpansion(int purpose,
316 size_t word_size,
317 bool zero_filled = true);
319 // Attempt to allocate an object of the given (very large) "word_size".
320 // Returns "NULL" on failure.
321 virtual HeapWord* humongousObjAllocate(size_t word_size);
323 // If possible, allocate a block of the given word_size, else return "NULL".
324 // Returning NULL will trigger GC or heap expansion.
325 // These two methods have rather awkward pre- and
326 // post-conditions. If they are called outside a safepoint, then
327 // they assume that the caller is holding the heap lock. Upon return
328 // they release the heap lock, if they are returning a non-NULL
329 // value. attempt_allocation_slow() also dirties the cards of a
330 // newly-allocated young region after it releases the heap
331 // lock. This change in interface was the neatest way to achieve
332 // this card dirtying without affecting mem_allocate(), which is a
333 // more frequently called method. We tried two or three different
334 // approaches, but they were even more hacky.
335 HeapWord* attempt_allocation(size_t word_size,
336 bool permit_collection_pause = true);
338 HeapWord* attempt_allocation_slow(size_t word_size,
339 bool permit_collection_pause = true);
341 // Allocate blocks during garbage collection. Will ensure an
342 // allocation region, either by picking one or expanding the
343 // heap, and then allocate a block of the given size. The block
344 // may not be a humongous - it must fit into a single heap region.
345 HeapWord* allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
346 HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
348 HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
349 HeapRegion* alloc_region,
350 bool par,
351 size_t word_size);
353 // Ensure that no further allocations can happen in "r", bearing in mind
354 // that parallel threads might be attempting allocations.
355 void par_allocate_remaining_space(HeapRegion* r);
357 // Retires an allocation region when it is full or at the end of a
358 // GC pause.
359 void retire_alloc_region(HeapRegion* alloc_region, bool par);
361 // Helper function for two callbacks below.
362 // "full", if true, indicates that the GC is for a System.gc() request,
363 // and should collect the entire heap. If "clear_all_soft_refs" is true,
364 // all soft references are cleared during the GC. If "full" is false,
365 // "word_size" describes the allocation that the GC should
366 // attempt (at least) to satisfy.
367 void do_collection(bool full, bool clear_all_soft_refs,
368 size_t word_size);
370 // Callback from VM_G1CollectFull operation.
371 // Perform a full collection.
372 void do_full_collection(bool clear_all_soft_refs);
374 // Resize the heap if necessary after a full collection. If this is
375 // after a collect-for allocation, "word_size" is the allocation size,
376 // and will be considered part of the used portion of the heap.
377 void resize_if_necessary_after_full_collection(size_t word_size);
379 // Callback from VM_G1CollectForAllocation operation.
380 // This function does everything necessary/possible to satisfy a
381 // failed allocation request (including collection, expansion, etc.)
382 HeapWord* satisfy_failed_allocation(size_t word_size);
384 // Attempting to expand the heap sufficiently
385 // to support an allocation of the given "word_size". If
386 // successful, perform the allocation and return the address of the
387 // allocated block, or else "NULL".
388 virtual HeapWord* expand_and_allocate(size_t word_size);
390 public:
391 // Expand the garbage-first heap by at least the given size (in bytes!).
392 // (Rounds up to a HeapRegion boundary.)
393 virtual void expand(size_t expand_bytes);
395 // Do anything common to GC's.
396 virtual void gc_prologue(bool full);
397 virtual void gc_epilogue(bool full);
399 // We register a region with the fast "in collection set" test. We
400 // simply set to true the array slot corresponding to this region.
401 void register_region_with_in_cset_fast_test(HeapRegion* r) {
402 assert(_in_cset_fast_test_base != NULL, "sanity");
403 assert(r->in_collection_set(), "invariant");
404 int index = r->hrs_index();
405 assert(0 <= (size_t) index && (size_t) index < _in_cset_fast_test_length,
406 "invariant");
407 assert(!_in_cset_fast_test_base[index], "invariant");
408 _in_cset_fast_test_base[index] = true;
409 }
411 // This is a fast test on whether a reference points into the
412 // collection set or not. It does not assume that the reference
413 // points into the heap; if it doesn't, it will return false.
414 bool in_cset_fast_test(oop obj) {
415 assert(_in_cset_fast_test != NULL, "sanity");
416 if (_g1_committed.contains((HeapWord*) obj)) {
417 // no need to subtract the bottom of the heap from obj,
418 // _in_cset_fast_test is biased
419 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes;
420 bool ret = _in_cset_fast_test[index];
421 // let's make sure the result is consistent with what the slower
422 // test returns
423 assert( ret || !obj_in_cs(obj), "sanity");
424 assert(!ret || obj_in_cs(obj), "sanity");
425 return ret;
426 } else {
427 return false;
428 }
429 }
431 protected:
433 // Shrink the garbage-first heap by at most the given size (in bytes!).
434 // (Rounds down to a HeapRegion boundary.)
435 virtual void shrink(size_t expand_bytes);
436 void shrink_helper(size_t expand_bytes);
438 // Do an incremental collection: identify a collection set, and evacuate
439 // its live objects elsewhere.
440 virtual void do_collection_pause();
442 // The guts of the incremental collection pause, executed by the vm
443 // thread.
444 virtual void do_collection_pause_at_safepoint();
446 // Actually do the work of evacuating the collection set.
447 virtual void evacuate_collection_set();
449 // If this is an appropriate right time, do a collection pause.
450 // The "word_size" argument, if non-zero, indicates the size of an
451 // allocation request that is prompting this query.
452 void do_collection_pause_if_appropriate(size_t word_size);
454 // The g1 remembered set of the heap.
455 G1RemSet* _g1_rem_set;
456 // And it's mod ref barrier set, used to track updates for the above.
457 ModRefBarrierSet* _mr_bs;
459 // A set of cards that cover the objects for which the Rsets should be updated
460 // concurrently after the collection.
461 DirtyCardQueueSet _dirty_card_queue_set;
463 // The Heap Region Rem Set Iterator.
464 HeapRegionRemSetIterator** _rem_set_iterator;
466 // The closure used to refine a single card.
467 RefineCardTableEntryClosure* _refine_cte_cl;
469 // A function to check the consistency of dirty card logs.
470 void check_ct_logs_at_safepoint();
472 // After a collection pause, make the regions in the CS into free
473 // regions.
474 void free_collection_set(HeapRegion* cs_head);
476 // Applies "scan_non_heap_roots" to roots outside the heap,
477 // "scan_rs" to roots inside the heap (having done "set_region" to
478 // indicate the region in which the root resides), and does "scan_perm"
479 // (setting the generation to the perm generation.) If "scan_rs" is
480 // NULL, then this step is skipped. The "worker_i"
481 // param is for use with parallel roots processing, and should be
482 // the "i" of the calling parallel worker thread's work(i) function.
483 // In the sequential case this param will be ignored.
484 void g1_process_strong_roots(bool collecting_perm_gen,
485 SharedHeap::ScanningOption so,
486 OopClosure* scan_non_heap_roots,
487 OopsInHeapRegionClosure* scan_rs,
488 OopsInHeapRegionClosure* scan_so,
489 OopsInGenClosure* scan_perm,
490 int worker_i);
492 void scan_scan_only_set(OopsInHeapRegionClosure* oc,
493 int worker_i);
494 void scan_scan_only_region(HeapRegion* hr,
495 OopsInHeapRegionClosure* oc,
496 int worker_i);
498 // Apply "blk" to all the weak roots of the system. These include
499 // JNI weak roots, the code cache, system dictionary, symbol table,
500 // string table, and referents of reachable weak refs.
501 void g1_process_weak_roots(OopClosure* root_closure,
502 OopClosure* non_root_closure);
504 // Invoke "save_marks" on all heap regions.
505 void save_marks();
507 // Free a heap region.
508 void free_region(HeapRegion* hr);
509 // A component of "free_region", exposed for 'batching'.
510 // All the params after "hr" are out params: the used bytes of the freed
511 // region(s), the number of H regions cleared, the number of regions
512 // freed, and pointers to the head and tail of a list of freed contig
513 // regions, linked throught the "next_on_unclean_list" field.
514 void free_region_work(HeapRegion* hr,
515 size_t& pre_used,
516 size_t& cleared_h,
517 size_t& freed_regions,
518 UncleanRegionList* list,
519 bool par = false);
522 // The concurrent marker (and the thread it runs in.)
523 ConcurrentMark* _cm;
524 ConcurrentMarkThread* _cmThread;
525 bool _mark_in_progress;
527 // The concurrent refiner.
528 ConcurrentG1Refine* _cg1r;
530 // The concurrent zero-fill thread.
531 ConcurrentZFThread* _czft;
533 // The parallel task queues
534 RefToScanQueueSet *_task_queues;
536 // True iff a evacuation has failed in the current collection.
537 bool _evacuation_failed;
539 // Set the attribute indicating whether evacuation has failed in the
540 // current collection.
541 void set_evacuation_failed(bool b) { _evacuation_failed = b; }
543 // Failed evacuations cause some logical from-space objects to have
544 // forwarding pointers to themselves. Reset them.
545 void remove_self_forwarding_pointers();
547 // When one is non-null, so is the other. Together, they each pair is
548 // an object with a preserved mark, and its mark value.
549 GrowableArray<oop>* _objs_with_preserved_marks;
550 GrowableArray<markOop>* _preserved_marks_of_objs;
552 // Preserve the mark of "obj", if necessary, in preparation for its mark
553 // word being overwritten with a self-forwarding-pointer.
554 void preserve_mark_if_necessary(oop obj, markOop m);
556 // The stack of evac-failure objects left to be scanned.
557 GrowableArray<oop>* _evac_failure_scan_stack;
558 // The closure to apply to evac-failure objects.
560 OopsInHeapRegionClosure* _evac_failure_closure;
561 // Set the field above.
562 void
563 set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_closure) {
564 _evac_failure_closure = evac_failure_closure;
565 }
567 // Push "obj" on the scan stack.
568 void push_on_evac_failure_scan_stack(oop obj);
569 // Process scan stack entries until the stack is empty.
570 void drain_evac_failure_scan_stack();
571 // True iff an invocation of "drain_scan_stack" is in progress; to
572 // prevent unnecessary recursion.
573 bool _drain_in_progress;
575 // Do any necessary initialization for evacuation-failure handling.
576 // "cl" is the closure that will be used to process evac-failure
577 // objects.
578 void init_for_evac_failure(OopsInHeapRegionClosure* cl);
579 // Do any necessary cleanup for evacuation-failure handling data
580 // structures.
581 void finalize_for_evac_failure();
583 // An attempt to evacuate "obj" has failed; take necessary steps.
584 void handle_evacuation_failure(oop obj);
585 oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
586 void handle_evacuation_failure_common(oop obj, markOop m);
589 // Ensure that the relevant gc_alloc regions are set.
590 void get_gc_alloc_regions();
591 // We're done with GC alloc regions. We are going to tear down the
592 // gc alloc list and remove the gc alloc tag from all the regions on
593 // that list. However, we will also retain the last (i.e., the one
594 // that is half-full) GC alloc region, per GCAllocPurpose, for
595 // possible reuse during the next collection, provided
596 // _retain_gc_alloc_region[] indicates that it should be the
597 // case. Said regions are kept in the _retained_gc_alloc_regions[]
598 // array. If the parameter totally is set, we will not retain any
599 // regions, irrespective of what _retain_gc_alloc_region[]
600 // indicates.
601 void release_gc_alloc_regions(bool totally);
602 #ifndef PRODUCT
603 // Useful for debugging.
604 void print_gc_alloc_regions();
605 #endif // !PRODUCT
607 // ("Weak") Reference processing support
608 ReferenceProcessor* _ref_processor;
610 enum G1H_process_strong_roots_tasks {
611 G1H_PS_mark_stack_oops_do,
612 G1H_PS_refProcessor_oops_do,
613 // Leave this one last.
614 G1H_PS_NumElements
615 };
617 SubTasksDone* _process_strong_tasks;
619 // List of regions which require zero filling.
620 UncleanRegionList _unclean_region_list;
621 bool _unclean_regions_coming;
623 public:
624 void set_refine_cte_cl_concurrency(bool concurrent);
626 RefToScanQueue *task_queue(int i);
628 // A set of cards where updates happened during the GC
629 DirtyCardQueueSet& dirty_card_queue_set() { return _dirty_card_queue_set; }
631 // Create a G1CollectedHeap with the specified policy.
632 // Must call the initialize method afterwards.
633 // May not return if something goes wrong.
634 G1CollectedHeap(G1CollectorPolicy* policy);
636 // Initialize the G1CollectedHeap to have the initial and
637 // maximum sizes, permanent generation, and remembered and barrier sets
638 // specified by the policy object.
639 jint initialize();
641 void ref_processing_init();
643 void set_par_threads(int t) {
644 SharedHeap::set_par_threads(t);
645 _process_strong_tasks->set_par_threads(t);
646 }
648 virtual CollectedHeap::Name kind() const {
649 return CollectedHeap::G1CollectedHeap;
650 }
652 // The current policy object for the collector.
653 G1CollectorPolicy* g1_policy() const { return _g1_policy; }
655 // Adaptive size policy. No such thing for g1.
656 virtual AdaptiveSizePolicy* size_policy() { return NULL; }
658 // The rem set and barrier set.
659 G1RemSet* g1_rem_set() const { return _g1_rem_set; }
660 ModRefBarrierSet* mr_bs() const { return _mr_bs; }
662 // The rem set iterator.
663 HeapRegionRemSetIterator* rem_set_iterator(int i) {
664 return _rem_set_iterator[i];
665 }
667 HeapRegionRemSetIterator* rem_set_iterator() {
668 return _rem_set_iterator[0];
669 }
671 unsigned get_gc_time_stamp() {
672 return _gc_time_stamp;
673 }
675 void reset_gc_time_stamp() {
676 _gc_time_stamp = 0;
677 OrderAccess::fence();
678 }
680 void increment_gc_time_stamp() {
681 ++_gc_time_stamp;
682 OrderAccess::fence();
683 }
685 void iterate_dirty_card_closure(bool concurrent, int worker_i);
687 // The shared block offset table array.
688 G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
690 // Reference Processing accessor
691 ReferenceProcessor* ref_processor() { return _ref_processor; }
693 // Reserved (g1 only; super method includes perm), capacity and the used
694 // portion in bytes.
695 size_t g1_reserved_obj_bytes() const { return _g1_reserved.byte_size(); }
696 virtual size_t capacity() const;
697 virtual size_t used() const;
698 // This should be called when we're not holding the heap lock. The
699 // result might be a bit inaccurate.
700 size_t used_unlocked() const;
701 size_t recalculate_used() const;
702 #ifndef PRODUCT
703 size_t recalculate_used_regions() const;
704 #endif // PRODUCT
706 // These virtual functions do the actual allocation.
707 virtual HeapWord* mem_allocate(size_t word_size,
708 bool is_noref,
709 bool is_tlab,
710 bool* gc_overhead_limit_was_exceeded);
712 // Some heaps may offer a contiguous region for shared non-blocking
713 // allocation, via inlined code (by exporting the address of the top and
714 // end fields defining the extent of the contiguous allocation region.)
715 // But G1CollectedHeap doesn't yet support this.
717 // Return an estimate of the maximum allocation that could be performed
718 // without triggering any collection or expansion activity. In a
719 // generational collector, for example, this is probably the largest
720 // allocation that could be supported (without expansion) in the youngest
721 // generation. It is "unsafe" because no locks are taken; the result
722 // should be treated as an approximation, not a guarantee, for use in
723 // heuristic resizing decisions.
724 virtual size_t unsafe_max_alloc();
726 virtual bool is_maximal_no_gc() const {
727 return _g1_storage.uncommitted_size() == 0;
728 }
730 // The total number of regions in the heap.
731 size_t n_regions();
733 // The number of regions that are completely free.
734 size_t max_regions();
736 // The number of regions that are completely free.
737 size_t free_regions();
739 // The number of regions that are not completely free.
740 size_t used_regions() { return n_regions() - free_regions(); }
742 // True iff the ZF thread should run.
743 bool should_zf();
745 // The number of regions available for "regular" expansion.
746 size_t expansion_regions() { return _expansion_regions; }
748 #ifndef PRODUCT
749 bool regions_accounted_for();
750 bool print_region_accounting_info();
751 void print_region_counts();
752 #endif
754 HeapRegion* alloc_region_from_unclean_list(bool zero_filled);
755 HeapRegion* alloc_region_from_unclean_list_locked(bool zero_filled);
757 void put_region_on_unclean_list(HeapRegion* r);
758 void put_region_on_unclean_list_locked(HeapRegion* r);
760 void prepend_region_list_on_unclean_list(UncleanRegionList* list);
761 void prepend_region_list_on_unclean_list_locked(UncleanRegionList* list);
763 void set_unclean_regions_coming(bool b);
764 void set_unclean_regions_coming_locked(bool b);
765 // Wait for cleanup to be complete.
766 void wait_for_cleanup_complete();
767 // Like above, but assumes that the calling thread owns the Heap_lock.
768 void wait_for_cleanup_complete_locked();
770 // Return the head of the unclean list.
771 HeapRegion* peek_unclean_region_list_locked();
772 // Remove and return the head of the unclean list.
773 HeapRegion* pop_unclean_region_list_locked();
775 // List of regions which are zero filled and ready for allocation.
776 HeapRegion* _free_region_list;
777 // Number of elements on the free list.
778 size_t _free_region_list_size;
780 // If the head of the unclean list is ZeroFilled, move it to the free
781 // list.
782 bool move_cleaned_region_to_free_list_locked();
783 bool move_cleaned_region_to_free_list();
785 void put_free_region_on_list_locked(HeapRegion* r);
786 void put_free_region_on_list(HeapRegion* r);
788 // Remove and return the head element of the free list.
789 HeapRegion* pop_free_region_list_locked();
791 // If "zero_filled" is true, we first try the free list, then we try the
792 // unclean list, zero-filling the result. If "zero_filled" is false, we
793 // first try the unclean list, then the zero-filled list.
794 HeapRegion* alloc_free_region_from_lists(bool zero_filled);
796 // Verify the integrity of the region lists.
797 void remove_allocated_regions_from_lists();
798 bool verify_region_lists();
799 bool verify_region_lists_locked();
800 size_t unclean_region_list_length();
801 size_t free_region_list_length();
803 // Perform a collection of the heap; intended for use in implementing
804 // "System.gc". This probably implies as full a collection as the
805 // "CollectedHeap" supports.
806 virtual void collect(GCCause::Cause cause);
808 // The same as above but assume that the caller holds the Heap_lock.
809 void collect_locked(GCCause::Cause cause);
811 // This interface assumes that it's being called by the
812 // vm thread. It collects the heap assuming that the
813 // heap lock is already held and that we are executing in
814 // the context of the vm thread.
815 virtual void collect_as_vm_thread(GCCause::Cause cause);
817 // True iff a evacuation has failed in the most-recent collection.
818 bool evacuation_failed() { return _evacuation_failed; }
820 // Free a region if it is totally full of garbage. Returns the number of
821 // bytes freed (0 ==> didn't free it).
822 size_t free_region_if_totally_empty(HeapRegion *hr);
823 void free_region_if_totally_empty_work(HeapRegion *hr,
824 size_t& pre_used,
825 size_t& cleared_h_regions,
826 size_t& freed_regions,
827 UncleanRegionList* list,
828 bool par = false);
830 // If we've done free region work that yields the given changes, update
831 // the relevant global variables.
832 void finish_free_region_work(size_t pre_used,
833 size_t cleared_h_regions,
834 size_t freed_regions,
835 UncleanRegionList* list);
838 // Returns "TRUE" iff "p" points into the allocated area of the heap.
839 virtual bool is_in(const void* p) const;
841 // Return "TRUE" iff the given object address is within the collection
842 // set.
843 inline bool obj_in_cs(oop obj);
845 // Return "TRUE" iff the given object address is in the reserved
846 // region of g1 (excluding the permanent generation).
847 bool is_in_g1_reserved(const void* p) const {
848 return _g1_reserved.contains(p);
849 }
851 // Returns a MemRegion that corresponds to the space that has been
852 // committed in the heap
853 MemRegion g1_committed() {
854 return _g1_committed;
855 }
857 NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;)
859 // Dirty card table entries covering a list of young regions.
860 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
862 // This resets the card table to all zeros. It is used after
863 // a collection pause which used the card table to claim cards.
864 void cleanUpCardTable();
866 // Iteration functions.
868 // Iterate over all the ref-containing fields of all objects, calling
869 // "cl.do_oop" on each.
870 virtual void oop_iterate(OopClosure* cl) {
871 oop_iterate(cl, true);
872 }
873 void oop_iterate(OopClosure* cl, bool do_perm);
875 // Same as above, restricted to a memory region.
876 virtual void oop_iterate(MemRegion mr, OopClosure* cl) {
877 oop_iterate(mr, cl, true);
878 }
879 void oop_iterate(MemRegion mr, OopClosure* cl, bool do_perm);
881 // Iterate over all objects, calling "cl.do_object" on each.
882 virtual void object_iterate(ObjectClosure* cl) {
883 object_iterate(cl, true);
884 }
885 virtual void safe_object_iterate(ObjectClosure* cl) {
886 object_iterate(cl, true);
887 }
888 void object_iterate(ObjectClosure* cl, bool do_perm);
890 // Iterate over all objects allocated since the last collection, calling
891 // "cl.do_object" on each. The heap must have been initialized properly
892 // to support this function, or else this call will fail.
893 virtual void object_iterate_since_last_GC(ObjectClosure* cl);
895 // Iterate over all spaces in use in the heap, in ascending address order.
896 virtual void space_iterate(SpaceClosure* cl);
898 // Iterate over heap regions, in address order, terminating the
899 // iteration early if the "doHeapRegion" method returns "true".
900 void heap_region_iterate(HeapRegionClosure* blk);
902 // Iterate over heap regions starting with r (or the first region if "r"
903 // is NULL), in address order, terminating early if the "doHeapRegion"
904 // method returns "true".
905 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk);
907 // As above but starting from the region at index idx.
908 void heap_region_iterate_from(int idx, HeapRegionClosure* blk);
910 HeapRegion* region_at(size_t idx);
912 // Divide the heap region sequence into "chunks" of some size (the number
913 // of regions divided by the number of parallel threads times some
914 // overpartition factor, currently 4). Assumes that this will be called
915 // in parallel by ParallelGCThreads worker threads with discinct worker
916 // ids in the range [0..max(ParallelGCThreads-1, 1)], that all parallel
917 // calls will use the same "claim_value", and that that claim value is
918 // different from the claim_value of any heap region before the start of
919 // the iteration. Applies "blk->doHeapRegion" to each of the regions, by
920 // attempting to claim the first region in each chunk, and, if
921 // successful, applying the closure to each region in the chunk (and
922 // setting the claim value of the second and subsequent regions of the
923 // chunk.) For now requires that "doHeapRegion" always returns "false",
924 // i.e., that a closure never attempt to abort a traversal.
925 void heap_region_par_iterate_chunked(HeapRegionClosure* blk,
926 int worker,
927 jint claim_value);
929 // It resets all the region claim values to the default.
930 void reset_heap_region_claim_values();
932 #ifdef ASSERT
933 bool check_heap_region_claim_values(jint claim_value);
934 #endif // ASSERT
936 // Iterate over the regions (if any) in the current collection set.
937 void collection_set_iterate(HeapRegionClosure* blk);
939 // As above but starting from region r
940 void collection_set_iterate_from(HeapRegion* r, HeapRegionClosure *blk);
942 // Returns the first (lowest address) compactible space in the heap.
943 virtual CompactibleSpace* first_compactible_space();
945 // A CollectedHeap will contain some number of spaces. This finds the
946 // space containing a given address, or else returns NULL.
947 virtual Space* space_containing(const void* addr) const;
949 // A G1CollectedHeap will contain some number of heap regions. This
950 // finds the region containing a given address, or else returns NULL.
951 HeapRegion* heap_region_containing(const void* addr) const;
953 // Like the above, but requires "addr" to be in the heap (to avoid a
954 // null-check), and unlike the above, may return an continuing humongous
955 // region.
956 HeapRegion* heap_region_containing_raw(const void* addr) const;
958 // A CollectedHeap is divided into a dense sequence of "blocks"; that is,
959 // each address in the (reserved) heap is a member of exactly
960 // one block. The defining characteristic of a block is that it is
961 // possible to find its size, and thus to progress forward to the next
962 // block. (Blocks may be of different sizes.) Thus, blocks may
963 // represent Java objects, or they might be free blocks in a
964 // free-list-based heap (or subheap), as long as the two kinds are
965 // distinguishable and the size of each is determinable.
967 // Returns the address of the start of the "block" that contains the
968 // address "addr". We say "blocks" instead of "object" since some heaps
969 // may not pack objects densely; a chunk may either be an object or a
970 // non-object.
971 virtual HeapWord* block_start(const void* addr) const;
973 // Requires "addr" to be the start of a chunk, and returns its size.
974 // "addr + size" is required to be the start of a new chunk, or the end
975 // of the active area of the heap.
976 virtual size_t block_size(const HeapWord* addr) const;
978 // Requires "addr" to be the start of a block, and returns "TRUE" iff
979 // the block is an object.
980 virtual bool block_is_obj(const HeapWord* addr) const;
982 // Does this heap support heap inspection? (+PrintClassHistogram)
983 virtual bool supports_heap_inspection() const { return true; }
985 // Section on thread-local allocation buffers (TLABs)
986 // See CollectedHeap for semantics.
988 virtual bool supports_tlab_allocation() const;
989 virtual size_t tlab_capacity(Thread* thr) const;
990 virtual size_t unsafe_max_tlab_alloc(Thread* thr) const;
991 virtual HeapWord* allocate_new_tlab(size_t size);
993 // Can a compiler initialize a new object without store barriers?
994 // This permission only extends from the creation of a new object
995 // via a TLAB up to the first subsequent safepoint. If such permission
996 // is granted for this heap type, the compiler promises to call
997 // defer_store_barrier() below on any slow path allocation of
998 // a new object for which such initializing store barriers will
999 // have been elided. G1, like CMS, allows this, but should be
1000 // ready to provide a compensating write barrier as necessary
1001 // if that storage came out of a non-young region. The efficiency
1002 // of this implementation depends crucially on being able to
1003 // answer very efficiently in constant time whether a piece of
1004 // storage in the heap comes from a young region or not.
1005 // See ReduceInitialCardMarks.
1006 virtual bool can_elide_tlab_store_barriers() const {
1007 // 6920090: Temporarily disabled, because of lingering
1008 // instabilities related to RICM with G1. In the
1009 // interim, the option ReduceInitialCardMarksForG1
1010 // below is left solely as a debugging device at least
1011 // until 6920109 fixes the instabilities.
1012 return ReduceInitialCardMarksForG1;
1013 }
1015 virtual bool card_mark_must_follow_store() const {
1016 return true;
1017 }
1019 bool is_in_young(oop obj) {
1020 HeapRegion* hr = heap_region_containing(obj);
1021 return hr != NULL && hr->is_young();
1022 }
1024 // We don't need barriers for initializing stores to objects
1025 // in the young gen: for the SATB pre-barrier, there is no
1026 // pre-value that needs to be remembered; for the remembered-set
1027 // update logging post-barrier, we don't maintain remembered set
1028 // information for young gen objects. Note that non-generational
1029 // G1 does not have any "young" objects, should not elide
1030 // the rs logging barrier and so should always answer false below.
1031 // However, non-generational G1 (-XX:-G1Gen) appears to have
1032 // bit-rotted so was not tested below.
1033 virtual bool can_elide_initializing_store_barrier(oop new_obj) {
1034 // Re 6920090, 6920109 above.
1035 assert(ReduceInitialCardMarksForG1, "Else cannot be here");
1036 assert(G1Gen || !is_in_young(new_obj),
1037 "Non-generational G1 should never return true below");
1038 return is_in_young(new_obj);
1039 }
1041 // Can a compiler elide a store barrier when it writes
1042 // a permanent oop into the heap? Applies when the compiler
1043 // is storing x to the heap, where x->is_perm() is true.
1044 virtual bool can_elide_permanent_oop_store_barriers() const {
1045 // At least until perm gen collection is also G1-ified, at
1046 // which point this should return false.
1047 return true;
1048 }
1050 virtual bool allocs_are_zero_filled();
1052 // The boundary between a "large" and "small" array of primitives, in
1053 // words.
1054 virtual size_t large_typearray_limit();
1056 // Returns "true" iff the given word_size is "very large".
1057 static bool isHumongous(size_t word_size) {
1058 // Note this has to be strictly greater-than as the TLABs
1059 // are capped at the humongous thresold and we want to
1060 // ensure that we don't try to allocate a TLAB as
1061 // humongous and that we don't allocate a humongous
1062 // object in a TLAB.
1063 return word_size > _humongous_object_threshold_in_words;
1064 }
1066 // Update mod union table with the set of dirty cards.
1067 void updateModUnion();
1069 // Set the mod union bits corresponding to the given memRegion. Note
1070 // that this is always a safe operation, since it doesn't clear any
1071 // bits.
1072 void markModUnionRange(MemRegion mr);
1074 // Records the fact that a marking phase is no longer in progress.
1075 void set_marking_complete() {
1076 _mark_in_progress = false;
1077 }
1078 void set_marking_started() {
1079 _mark_in_progress = true;
1080 }
1081 bool mark_in_progress() {
1082 return _mark_in_progress;
1083 }
1085 // Print the maximum heap capacity.
1086 virtual size_t max_capacity() const;
1088 virtual jlong millis_since_last_gc();
1090 // Perform any cleanup actions necessary before allowing a verification.
1091 virtual void prepare_for_verify();
1093 // Perform verification.
1095 // use_prev_marking == true -> use "prev" marking information,
1096 // use_prev_marking == false -> use "next" marking information
1097 // NOTE: Only the "prev" marking information is guaranteed to be
1098 // consistent most of the time, so most calls to this should use
1099 // use_prev_marking == true. Currently, there is only one case where
1100 // this is called with use_prev_marking == false, which is to verify
1101 // the "next" marking information at the end of remark.
1102 void verify(bool allow_dirty, bool silent, bool use_prev_marking);
1104 // Override; it uses the "prev" marking information
1105 virtual void verify(bool allow_dirty, bool silent);
1106 // Default behavior by calling print(tty);
1107 virtual void print() const;
1108 // This calls print_on(st, PrintHeapAtGCExtended).
1109 virtual void print_on(outputStream* st) const;
1110 // If extended is true, it will print out information for all
1111 // regions in the heap by calling print_on_extended(st).
1112 virtual void print_on(outputStream* st, bool extended) const;
1113 virtual void print_on_extended(outputStream* st) const;
1115 virtual void print_gc_threads_on(outputStream* st) const;
1116 virtual void gc_threads_do(ThreadClosure* tc) const;
1118 // Override
1119 void print_tracing_info() const;
1121 // If "addr" is a pointer into the (reserved?) heap, returns a positive
1122 // number indicating the "arena" within the heap in which "addr" falls.
1123 // Or else returns 0.
1124 virtual int addr_to_arena_id(void* addr) const;
1126 // Convenience function to be used in situations where the heap type can be
1127 // asserted to be this type.
1128 static G1CollectedHeap* heap();
1130 void empty_young_list();
1131 bool should_set_young_locked();
1133 void set_region_short_lived_locked(HeapRegion* hr);
1134 // add appropriate methods for any other surv rate groups
1136 void young_list_rs_length_sampling_init() {
1137 _young_list->rs_length_sampling_init();
1138 }
1139 bool young_list_rs_length_sampling_more() {
1140 return _young_list->rs_length_sampling_more();
1141 }
1142 void young_list_rs_length_sampling_next() {
1143 _young_list->rs_length_sampling_next();
1144 }
1145 size_t young_list_sampled_rs_lengths() {
1146 return _young_list->sampled_rs_lengths();
1147 }
1149 size_t young_list_length() { return _young_list->length(); }
1150 size_t young_list_scan_only_length() {
1151 return _young_list->scan_only_length(); }
1153 HeapRegion* pop_region_from_young_list() {
1154 return _young_list->pop_region();
1155 }
1157 HeapRegion* young_list_first_region() {
1158 return _young_list->first_region();
1159 }
1161 // debugging
1162 bool check_young_list_well_formed() {
1163 return _young_list->check_list_well_formed();
1164 }
1165 bool check_young_list_empty(bool ignore_scan_only_list,
1166 bool check_sample = true);
1168 // *** Stuff related to concurrent marking. It's not clear to me that so
1169 // many of these need to be public.
1171 // The functions below are helper functions that a subclass of
1172 // "CollectedHeap" can use in the implementation of its virtual
1173 // functions.
1174 // This performs a concurrent marking of the live objects in a
1175 // bitmap off to the side.
1176 void doConcurrentMark();
1178 // This is called from the marksweep collector which then does
1179 // a concurrent mark and verifies that the results agree with
1180 // the stop the world marking.
1181 void checkConcurrentMark();
1182 void do_sync_mark();
1184 bool isMarkedPrev(oop obj) const;
1185 bool isMarkedNext(oop obj) const;
1187 // use_prev_marking == true -> use "prev" marking information,
1188 // use_prev_marking == false -> use "next" marking information
1189 bool is_obj_dead_cond(const oop obj,
1190 const HeapRegion* hr,
1191 const bool use_prev_marking) const {
1192 if (use_prev_marking) {
1193 return is_obj_dead(obj, hr);
1194 } else {
1195 return is_obj_ill(obj, hr);
1196 }
1197 }
1199 // Determine if an object is dead, given the object and also
1200 // the region to which the object belongs. An object is dead
1201 // iff a) it was not allocated since the last mark and b) it
1202 // is not marked.
1204 bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
1205 return
1206 !hr->obj_allocated_since_prev_marking(obj) &&
1207 !isMarkedPrev(obj);
1208 }
1210 // This is used when copying an object to survivor space.
1211 // If the object is marked live, then we mark the copy live.
1212 // If the object is allocated since the start of this mark
1213 // cycle, then we mark the copy live.
1214 // If the object has been around since the previous mark
1215 // phase, and hasn't been marked yet during this phase,
1216 // then we don't mark it, we just wait for the
1217 // current marking cycle to get to it.
1219 // This function returns true when an object has been
1220 // around since the previous marking and hasn't yet
1221 // been marked during this marking.
1223 bool is_obj_ill(const oop obj, const HeapRegion* hr) const {
1224 return
1225 !hr->obj_allocated_since_next_marking(obj) &&
1226 !isMarkedNext(obj);
1227 }
1229 // Determine if an object is dead, given only the object itself.
1230 // This will find the region to which the object belongs and
1231 // then call the region version of the same function.
1233 // Added if it is in permanent gen it isn't dead.
1234 // Added if it is NULL it isn't dead.
1236 // use_prev_marking == true -> use "prev" marking information,
1237 // use_prev_marking == false -> use "next" marking information
1238 bool is_obj_dead_cond(const oop obj,
1239 const bool use_prev_marking) {
1240 if (use_prev_marking) {
1241 return is_obj_dead(obj);
1242 } else {
1243 return is_obj_ill(obj);
1244 }
1245 }
1247 bool is_obj_dead(const oop obj) {
1248 const HeapRegion* hr = heap_region_containing(obj);
1249 if (hr == NULL) {
1250 if (Universe::heap()->is_in_permanent(obj))
1251 return false;
1252 else if (obj == NULL) return false;
1253 else return true;
1254 }
1255 else return is_obj_dead(obj, hr);
1256 }
1258 bool is_obj_ill(const oop obj) {
1259 const HeapRegion* hr = heap_region_containing(obj);
1260 if (hr == NULL) {
1261 if (Universe::heap()->is_in_permanent(obj))
1262 return false;
1263 else if (obj == NULL) return false;
1264 else return true;
1265 }
1266 else return is_obj_ill(obj, hr);
1267 }
1269 // The following is just to alert the verification code
1270 // that a full collection has occurred and that the
1271 // remembered sets are no longer up to date.
1272 bool _full_collection;
1273 void set_full_collection() { _full_collection = true;}
1274 void clear_full_collection() {_full_collection = false;}
1275 bool full_collection() {return _full_collection;}
1277 ConcurrentMark* concurrent_mark() const { return _cm; }
1278 ConcurrentG1Refine* concurrent_g1_refine() const { return _cg1r; }
1280 // The dirty cards region list is used to record a subset of regions
1281 // whose cards need clearing. The list if populated during the
1282 // remembered set scanning and drained during the card table
1283 // cleanup. Although the methods are reentrant, population/draining
1284 // phases must not overlap. For synchronization purposes the last
1285 // element on the list points to itself.
1286 HeapRegion* _dirty_cards_region_list;
1287 void push_dirty_cards_region(HeapRegion* hr);
1288 HeapRegion* pop_dirty_cards_region();
1290 public:
1291 void stop_conc_gc_threads();
1293 // <NEW PREDICTION>
1295 double predict_region_elapsed_time_ms(HeapRegion* hr, bool young);
1296 void check_if_region_is_too_expensive(double predicted_time_ms);
1297 size_t pending_card_num();
1298 size_t max_pending_card_num();
1299 size_t cards_scanned();
1301 // </NEW PREDICTION>
1303 protected:
1304 size_t _max_heap_capacity;
1306 // debug_only(static void check_for_valid_allocation_state();)
1308 public:
1309 // Temporary: call to mark things unimplemented for the G1 heap (e.g.,
1310 // MemoryService). In productization, we can make this assert false
1311 // to catch such places (as well as searching for calls to this...)
1312 static void g1_unimplemented();
1314 };
1316 #define use_local_bitmaps 1
1317 #define verify_local_bitmaps 0
1318 #define oop_buffer_length 256
1320 #ifndef PRODUCT
1321 class GCLabBitMap;
1322 class GCLabBitMapClosure: public BitMapClosure {
1323 private:
1324 ConcurrentMark* _cm;
1325 GCLabBitMap* _bitmap;
1327 public:
1328 GCLabBitMapClosure(ConcurrentMark* cm,
1329 GCLabBitMap* bitmap) {
1330 _cm = cm;
1331 _bitmap = bitmap;
1332 }
1334 virtual bool do_bit(size_t offset);
1335 };
1336 #endif // !PRODUCT
1338 class GCLabBitMap: public BitMap {
1339 private:
1340 ConcurrentMark* _cm;
1342 int _shifter;
1343 size_t _bitmap_word_covers_words;
1345 // beginning of the heap
1346 HeapWord* _heap_start;
1348 // this is the actual start of the GCLab
1349 HeapWord* _real_start_word;
1351 // this is the actual end of the GCLab
1352 HeapWord* _real_end_word;
1354 // this is the first word, possibly located before the actual start
1355 // of the GCLab, that corresponds to the first bit of the bitmap
1356 HeapWord* _start_word;
1358 // size of a GCLab in words
1359 size_t _gclab_word_size;
1361 static int shifter() {
1362 return MinObjAlignment - 1;
1363 }
1365 // how many heap words does a single bitmap word corresponds to?
1366 static size_t bitmap_word_covers_words() {
1367 return BitsPerWord << shifter();
1368 }
1370 static size_t gclab_word_size() {
1371 return G1ParallelGCAllocBufferSize / HeapWordSize;
1372 }
1374 static size_t bitmap_size_in_bits() {
1375 size_t bits_in_bitmap = gclab_word_size() >> shifter();
1376 // We are going to ensure that the beginning of a word in this
1377 // bitmap also corresponds to the beginning of a word in the
1378 // global marking bitmap. To handle the case where a GCLab
1379 // starts from the middle of the bitmap, we need to add enough
1380 // space (i.e. up to a bitmap word) to ensure that we have
1381 // enough bits in the bitmap.
1382 return bits_in_bitmap + BitsPerWord - 1;
1383 }
1384 public:
1385 GCLabBitMap(HeapWord* heap_start)
1386 : BitMap(bitmap_size_in_bits()),
1387 _cm(G1CollectedHeap::heap()->concurrent_mark()),
1388 _shifter(shifter()),
1389 _bitmap_word_covers_words(bitmap_word_covers_words()),
1390 _heap_start(heap_start),
1391 _gclab_word_size(gclab_word_size()),
1392 _real_start_word(NULL),
1393 _real_end_word(NULL),
1394 _start_word(NULL)
1395 {
1396 guarantee( size_in_words() >= bitmap_size_in_words(),
1397 "just making sure");
1398 }
1400 inline unsigned heapWordToOffset(HeapWord* addr) {
1401 unsigned offset = (unsigned) pointer_delta(addr, _start_word) >> _shifter;
1402 assert(offset < size(), "offset should be within bounds");
1403 return offset;
1404 }
1406 inline HeapWord* offsetToHeapWord(size_t offset) {
1407 HeapWord* addr = _start_word + (offset << _shifter);
1408 assert(_real_start_word <= addr && addr < _real_end_word, "invariant");
1409 return addr;
1410 }
1412 bool fields_well_formed() {
1413 bool ret1 = (_real_start_word == NULL) &&
1414 (_real_end_word == NULL) &&
1415 (_start_word == NULL);
1416 if (ret1)
1417 return true;
1419 bool ret2 = _real_start_word >= _start_word &&
1420 _start_word < _real_end_word &&
1421 (_real_start_word + _gclab_word_size) == _real_end_word &&
1422 (_start_word + _gclab_word_size + _bitmap_word_covers_words)
1423 > _real_end_word;
1424 return ret2;
1425 }
1427 inline bool mark(HeapWord* addr) {
1428 guarantee(use_local_bitmaps, "invariant");
1429 assert(fields_well_formed(), "invariant");
1431 if (addr >= _real_start_word && addr < _real_end_word) {
1432 assert(!isMarked(addr), "should not have already been marked");
1434 // first mark it on the bitmap
1435 at_put(heapWordToOffset(addr), true);
1437 return true;
1438 } else {
1439 return false;
1440 }
1441 }
1443 inline bool isMarked(HeapWord* addr) {
1444 guarantee(use_local_bitmaps, "invariant");
1445 assert(fields_well_formed(), "invariant");
1447 return at(heapWordToOffset(addr));
1448 }
1450 void set_buffer(HeapWord* start) {
1451 guarantee(use_local_bitmaps, "invariant");
1452 clear();
1454 assert(start != NULL, "invariant");
1455 _real_start_word = start;
1456 _real_end_word = start + _gclab_word_size;
1458 size_t diff =
1459 pointer_delta(start, _heap_start) % _bitmap_word_covers_words;
1460 _start_word = start - diff;
1462 assert(fields_well_formed(), "invariant");
1463 }
1465 #ifndef PRODUCT
1466 void verify() {
1467 // verify that the marks have been propagated
1468 GCLabBitMapClosure cl(_cm, this);
1469 iterate(&cl);
1470 }
1471 #endif // PRODUCT
1473 void retire() {
1474 guarantee(use_local_bitmaps, "invariant");
1475 assert(fields_well_formed(), "invariant");
1477 if (_start_word != NULL) {
1478 CMBitMap* mark_bitmap = _cm->nextMarkBitMap();
1480 // this means that the bitmap was set up for the GCLab
1481 assert(_real_start_word != NULL && _real_end_word != NULL, "invariant");
1483 mark_bitmap->mostly_disjoint_range_union(this,
1484 0, // always start from the start of the bitmap
1485 _start_word,
1486 size_in_words());
1487 _cm->grayRegionIfNecessary(MemRegion(_real_start_word, _real_end_word));
1489 #ifndef PRODUCT
1490 if (use_local_bitmaps && verify_local_bitmaps)
1491 verify();
1492 #endif // PRODUCT
1493 } else {
1494 assert(_real_start_word == NULL && _real_end_word == NULL, "invariant");
1495 }
1496 }
1498 static size_t bitmap_size_in_words() {
1499 return (bitmap_size_in_bits() + BitsPerWord - 1) / BitsPerWord;
1500 }
1501 };
1503 class G1ParGCAllocBuffer: public ParGCAllocBuffer {
1504 private:
1505 bool _retired;
1506 bool _during_marking;
1507 GCLabBitMap _bitmap;
1509 public:
1510 G1ParGCAllocBuffer() :
1511 ParGCAllocBuffer(G1ParallelGCAllocBufferSize / HeapWordSize),
1512 _during_marking(G1CollectedHeap::heap()->mark_in_progress()),
1513 _bitmap(G1CollectedHeap::heap()->reserved_region().start()),
1514 _retired(false)
1515 { }
1517 inline bool mark(HeapWord* addr) {
1518 guarantee(use_local_bitmaps, "invariant");
1519 assert(_during_marking, "invariant");
1520 return _bitmap.mark(addr);
1521 }
1523 inline void set_buf(HeapWord* buf) {
1524 if (use_local_bitmaps && _during_marking)
1525 _bitmap.set_buffer(buf);
1526 ParGCAllocBuffer::set_buf(buf);
1527 _retired = false;
1528 }
1530 inline void retire(bool end_of_gc, bool retain) {
1531 if (_retired)
1532 return;
1533 if (use_local_bitmaps && _during_marking) {
1534 _bitmap.retire();
1535 }
1536 ParGCAllocBuffer::retire(end_of_gc, retain);
1537 _retired = true;
1538 }
1539 };
1541 class G1ParScanThreadState : public StackObj {
1542 protected:
1543 G1CollectedHeap* _g1h;
1544 RefToScanQueue* _refs;
1545 DirtyCardQueue _dcq;
1546 CardTableModRefBS* _ct_bs;
1547 G1RemSet* _g1_rem;
1549 typedef GrowableArray<StarTask> OverflowQueue;
1550 OverflowQueue* _overflowed_refs;
1552 G1ParGCAllocBuffer _alloc_buffers[GCAllocPurposeCount];
1553 ageTable _age_table;
1555 size_t _alloc_buffer_waste;
1556 size_t _undo_waste;
1558 OopsInHeapRegionClosure* _evac_failure_cl;
1559 G1ParScanHeapEvacClosure* _evac_cl;
1560 G1ParScanPartialArrayClosure* _partial_scan_cl;
1562 int _hash_seed;
1563 int _queue_num;
1565 int _term_attempts;
1566 #if G1_DETAILED_STATS
1567 int _pushes, _pops, _steals, _steal_attempts;
1568 int _overflow_pushes;
1569 #endif
1571 double _start;
1572 double _start_strong_roots;
1573 double _strong_roots_time;
1574 double _start_term;
1575 double _term_time;
1577 // Map from young-age-index (0 == not young, 1 is youngest) to
1578 // surviving words. base is what we get back from the malloc call
1579 size_t* _surviving_young_words_base;
1580 // this points into the array, as we use the first few entries for padding
1581 size_t* _surviving_young_words;
1583 #define PADDING_ELEM_NUM (64 / sizeof(size_t))
1585 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
1587 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
1589 DirtyCardQueue& dirty_card_queue() { return _dcq; }
1590 CardTableModRefBS* ctbs() { return _ct_bs; }
1592 template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
1593 if (!from->is_survivor()) {
1594 _g1_rem->par_write_ref(from, p, tid);
1595 }
1596 }
1598 template <class T> void deferred_rs_update(HeapRegion* from, T* p, int tid) {
1599 // If the new value of the field points to the same region or
1600 // is the to-space, we don't need to include it in the Rset updates.
1601 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
1602 size_t card_index = ctbs()->index_for(p);
1603 // If the card hasn't been added to the buffer, do it.
1604 if (ctbs()->mark_card_deferred(card_index)) {
1605 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
1606 }
1607 }
1608 }
1610 public:
1611 G1ParScanThreadState(G1CollectedHeap* g1h, int queue_num);
1613 ~G1ParScanThreadState() {
1614 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
1615 }
1617 RefToScanQueue* refs() { return _refs; }
1618 OverflowQueue* overflowed_refs() { return _overflowed_refs; }
1619 ageTable* age_table() { return &_age_table; }
1621 G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
1622 return &_alloc_buffers[purpose];
1623 }
1625 size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
1626 size_t undo_waste() { return _undo_waste; }
1628 template <class T> void push_on_queue(T* ref) {
1629 assert(ref != NULL, "invariant");
1630 assert(has_partial_array_mask(ref) ||
1631 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(ref)), "invariant");
1632 #ifdef ASSERT
1633 if (has_partial_array_mask(ref)) {
1634 oop p = clear_partial_array_mask(ref);
1635 // Verify that we point into the CS
1636 assert(_g1h->obj_in_cs(p), "Should be in CS");
1637 }
1638 #endif
1639 if (!refs()->push(ref)) {
1640 overflowed_refs()->push(ref);
1641 IF_G1_DETAILED_STATS(note_overflow_push());
1642 } else {
1643 IF_G1_DETAILED_STATS(note_push());
1644 }
1645 }
1647 void pop_from_queue(StarTask& ref) {
1648 if (refs()->pop_local(ref)) {
1649 assert((oop*)ref != NULL, "pop_local() returned true");
1650 assert(UseCompressedOops || !ref.is_narrow(), "Error");
1651 assert(has_partial_array_mask((oop*)ref) ||
1652 _g1h->is_in_g1_reserved(ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)ref)
1653 : oopDesc::load_decode_heap_oop((oop*)ref)),
1654 "invariant");
1655 IF_G1_DETAILED_STATS(note_pop());
1656 } else {
1657 StarTask null_task;
1658 ref = null_task;
1659 }
1660 }
1662 void pop_from_overflow_queue(StarTask& ref) {
1663 StarTask new_ref = overflowed_refs()->pop();
1664 assert((oop*)new_ref != NULL, "pop() from a local non-empty stack");
1665 assert(UseCompressedOops || !new_ref.is_narrow(), "Error");
1666 assert(has_partial_array_mask((oop*)new_ref) ||
1667 _g1h->is_in_g1_reserved(new_ref.is_narrow() ? oopDesc::load_decode_heap_oop((narrowOop*)new_ref)
1668 : oopDesc::load_decode_heap_oop((oop*)new_ref)),
1669 "invariant");
1670 ref = new_ref;
1671 }
1673 int refs_to_scan() { return refs()->size(); }
1674 int overflowed_refs_to_scan() { return overflowed_refs()->length(); }
1676 template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
1677 if (G1DeferredRSUpdate) {
1678 deferred_rs_update(from, p, tid);
1679 } else {
1680 immediate_rs_update(from, p, tid);
1681 }
1682 }
1684 HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
1686 HeapWord* obj = NULL;
1687 if (word_sz * 100 <
1688 (size_t)(G1ParallelGCAllocBufferSize / HeapWordSize) *
1689 ParallelGCBufferWastePct) {
1690 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
1691 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
1692 alloc_buf->retire(false, false);
1694 HeapWord* buf =
1695 _g1h->par_allocate_during_gc(purpose, G1ParallelGCAllocBufferSize / HeapWordSize);
1696 if (buf == NULL) return NULL; // Let caller handle allocation failure.
1697 // Otherwise.
1698 alloc_buf->set_buf(buf);
1700 obj = alloc_buf->allocate(word_sz);
1701 assert(obj != NULL, "buffer was definitely big enough...");
1702 } else {
1703 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
1704 }
1705 return obj;
1706 }
1708 HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
1709 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
1710 if (obj != NULL) return obj;
1711 return allocate_slow(purpose, word_sz);
1712 }
1714 void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
1715 if (alloc_buffer(purpose)->contains(obj)) {
1716 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
1717 "should contain whole object");
1718 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
1719 } else {
1720 CollectedHeap::fill_with_object(obj, word_sz);
1721 add_to_undo_waste(word_sz);
1722 }
1723 }
1725 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
1726 _evac_failure_cl = evac_failure_cl;
1727 }
1728 OopsInHeapRegionClosure* evac_failure_closure() {
1729 return _evac_failure_cl;
1730 }
1732 void set_evac_closure(G1ParScanHeapEvacClosure* evac_cl) {
1733 _evac_cl = evac_cl;
1734 }
1736 void set_partial_scan_closure(G1ParScanPartialArrayClosure* partial_scan_cl) {
1737 _partial_scan_cl = partial_scan_cl;
1738 }
1740 int* hash_seed() { return &_hash_seed; }
1741 int queue_num() { return _queue_num; }
1743 int term_attempts() { return _term_attempts; }
1744 void note_term_attempt() { _term_attempts++; }
1746 #if G1_DETAILED_STATS
1747 int pushes() { return _pushes; }
1748 int pops() { return _pops; }
1749 int steals() { return _steals; }
1750 int steal_attempts() { return _steal_attempts; }
1751 int overflow_pushes() { return _overflow_pushes; }
1753 void note_push() { _pushes++; }
1754 void note_pop() { _pops++; }
1755 void note_steal() { _steals++; }
1756 void note_steal_attempt() { _steal_attempts++; }
1757 void note_overflow_push() { _overflow_pushes++; }
1758 #endif
1760 void start_strong_roots() {
1761 _start_strong_roots = os::elapsedTime();
1762 }
1763 void end_strong_roots() {
1764 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
1765 }
1766 double strong_roots_time() { return _strong_roots_time; }
1768 void start_term_time() {
1769 note_term_attempt();
1770 _start_term = os::elapsedTime();
1771 }
1772 void end_term_time() {
1773 _term_time += (os::elapsedTime() - _start_term);
1774 }
1775 double term_time() { return _term_time; }
1777 double elapsed() {
1778 return os::elapsedTime() - _start;
1779 }
1781 size_t* surviving_young_words() {
1782 // We add on to hide entry 0 which accumulates surviving words for
1783 // age -1 regions (i.e. non-young ones)
1784 return _surviving_young_words;
1785 }
1787 void retire_alloc_buffers() {
1788 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
1789 size_t waste = _alloc_buffers[ap].words_remaining();
1790 add_to_alloc_buffer_waste(waste);
1791 _alloc_buffers[ap].retire(true, false);
1792 }
1793 }
1795 private:
1796 template <class T> void deal_with_reference(T* ref_to_scan) {
1797 if (has_partial_array_mask(ref_to_scan)) {
1798 _partial_scan_cl->do_oop_nv(ref_to_scan);
1799 } else {
1800 // Note: we can use "raw" versions of "region_containing" because
1801 // "obj_to_scan" is definitely in the heap, and is not in a
1802 // humongous region.
1803 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
1804 _evac_cl->set_region(r);
1805 _evac_cl->do_oop_nv(ref_to_scan);
1806 }
1807 }
1809 public:
1810 void trim_queue() {
1811 // I've replicated the loop twice, first to drain the overflow
1812 // queue, second to drain the task queue. This is better than
1813 // having a single loop, which checks both conditions and, inside
1814 // it, either pops the overflow queue or the task queue, as each
1815 // loop is tighter. Also, the decision to drain the overflow queue
1816 // first is not arbitrary, as the overflow queue is not visible
1817 // to the other workers, whereas the task queue is. So, we want to
1818 // drain the "invisible" entries first, while allowing the other
1819 // workers to potentially steal the "visible" entries.
1821 while (refs_to_scan() > 0 || overflowed_refs_to_scan() > 0) {
1822 while (overflowed_refs_to_scan() > 0) {
1823 StarTask ref_to_scan;
1824 assert((oop*)ref_to_scan == NULL, "Constructed above");
1825 pop_from_overflow_queue(ref_to_scan);
1826 // We shouldn't have pushed it on the queue if it was not
1827 // pointing into the CSet.
1828 assert((oop*)ref_to_scan != NULL, "Follows from inner loop invariant");
1829 if (ref_to_scan.is_narrow()) {
1830 assert(UseCompressedOops, "Error");
1831 narrowOop* p = (narrowOop*)ref_to_scan;
1832 assert(!has_partial_array_mask(p) &&
1833 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
1834 deal_with_reference(p);
1835 } else {
1836 oop* p = (oop*)ref_to_scan;
1837 assert((has_partial_array_mask(p) && _g1h->is_in_g1_reserved(clear_partial_array_mask(p))) ||
1838 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
1839 deal_with_reference(p);
1840 }
1841 }
1843 while (refs_to_scan() > 0) {
1844 StarTask ref_to_scan;
1845 assert((oop*)ref_to_scan == NULL, "Constructed above");
1846 pop_from_queue(ref_to_scan);
1847 if ((oop*)ref_to_scan != NULL) {
1848 if (ref_to_scan.is_narrow()) {
1849 assert(UseCompressedOops, "Error");
1850 narrowOop* p = (narrowOop*)ref_to_scan;
1851 assert(!has_partial_array_mask(p) &&
1852 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
1853 deal_with_reference(p);
1854 } else {
1855 oop* p = (oop*)ref_to_scan;
1856 assert((has_partial_array_mask(p) && _g1h->obj_in_cs(clear_partial_array_mask(p))) ||
1857 _g1h->is_in_g1_reserved(oopDesc::load_decode_heap_oop(p)), "sanity");
1858 deal_with_reference(p);
1859 }
1860 }
1861 }
1862 }
1863 }
1864 };