src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

changeset 2643
1216415d8e35
parent 2593
4e0069ff33df
child 2714
455328d90876
equal deleted inserted replaced
2594:11303bede852 2643:1216415d8e35
54 class G1RemSet; 54 class G1RemSet;
55 class HeapRegionRemSetIterator; 55 class HeapRegionRemSetIterator;
56 class ConcurrentMark; 56 class ConcurrentMark;
57 class ConcurrentMarkThread; 57 class ConcurrentMarkThread;
58 class ConcurrentG1Refine; 58 class ConcurrentG1Refine;
59 class ConcurrentZFThread;
60 59
61 typedef OverflowTaskQueue<StarTask> RefToScanQueue; 60 typedef OverflowTaskQueue<StarTask> RefToScanQueue;
62 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet; 61 typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
63 62
64 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 63 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
65 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 64 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
66
67 enum G1GCThreadGroups {
68 G1CRGroup = 0,
69 G1ZFGroup = 1,
70 G1CMGroup = 2
71 };
72 65
73 enum GCAllocPurpose { 66 enum GCAllocPurpose {
74 GCAllocForTenured, 67 GCAllocForTenured,
75 GCAllocForSurvived, 68 GCAllocForSurvived,
76 GCAllocPurposeCount 69 GCAllocPurposeCount
292 volatile unsigned int _full_collections_completed; 285 volatile unsigned int _full_collections_completed;
293 286
294 // These are macros so that, if the assert fires, we get the correct 287 // These are macros so that, if the assert fires, we get the correct
295 // line number, file, etc. 288 // line number, file, etc.
296 289
297 #define heap_locking_asserts_err_msg(__extra_message) \ 290 #define heap_locking_asserts_err_msg(_extra_message_) \
298 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \ 291 err_msg("%s : Heap_lock locked: %s, at safepoint: %s, is VM thread: %s", \
299 (__extra_message), \ 292 (_extra_message_), \
300 BOOL_TO_STR(Heap_lock->owned_by_self()), \ 293 BOOL_TO_STR(Heap_lock->owned_by_self()), \
301 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \ 294 BOOL_TO_STR(SafepointSynchronize::is_at_safepoint()), \
302 BOOL_TO_STR(Thread::current()->is_VM_thread())) 295 BOOL_TO_STR(Thread::current()->is_VM_thread()))
303 296
304 #define assert_heap_locked() \ 297 #define assert_heap_locked() \
305 do { \ 298 do { \
306 assert(Heap_lock->owned_by_self(), \ 299 assert(Heap_lock->owned_by_self(), \
307 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \ 300 heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
308 } while (0) 301 } while (0)
309 302
310 #define assert_heap_locked_or_at_safepoint(__should_be_vm_thread) \ 303 #define assert_heap_locked_or_at_safepoint(_should_be_vm_thread_) \
311 do { \ 304 do { \
312 assert(Heap_lock->owned_by_self() || \ 305 assert(Heap_lock->owned_by_self() || \
313 (SafepointSynchronize::is_at_safepoint() && \ 306 (SafepointSynchronize::is_at_safepoint() && \
314 ((__should_be_vm_thread) == Thread::current()->is_VM_thread())), \ 307 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread())), \
315 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \ 308 heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
316 "should be at a safepoint")); \ 309 "should be at a safepoint")); \
317 } while (0) 310 } while (0)
318 311
319 #define assert_heap_locked_and_not_at_safepoint() \ 312 #define assert_heap_locked_and_not_at_safepoint() \
336 !SafepointSynchronize::is_at_safepoint(), \ 329 !SafepointSynchronize::is_at_safepoint(), \
337 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \ 330 heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
338 "should not be at a safepoint")); \ 331 "should not be at a safepoint")); \
339 } while (0) 332 } while (0)
340 333
341 #define assert_at_safepoint(__should_be_vm_thread) \ 334 #define assert_at_safepoint(_should_be_vm_thread_) \
342 do { \ 335 do { \
343 assert(SafepointSynchronize::is_at_safepoint() && \ 336 assert(SafepointSynchronize::is_at_safepoint() && \
344 ((__should_be_vm_thread) == Thread::current()->is_VM_thread()), \ 337 ((_should_be_vm_thread_) == Thread::current()->is_VM_thread()), \
345 heap_locking_asserts_err_msg("should be at a safepoint")); \ 338 heap_locking_asserts_err_msg("should be at a safepoint")); \
346 } while (0) 339 } while (0)
347 340
348 #define assert_not_at_safepoint() \ 341 #define assert_not_at_safepoint() \
349 do { \ 342 do { \
369 // This is the second level of trying to allocate a new region. If 362 // This is the second level of trying to allocate a new region. If
370 // new_region_work didn't find a region in the free_list, this call 363 // new_region_work didn't find a region in the free_list, this call
371 // will check whether there's anything available in the 364 // will check whether there's anything available in the
372 // secondary_free_list and/or wait for more regions to appear in that 365 // secondary_free_list and/or wait for more regions to appear in that
373 // list, if _free_regions_coming is set. 366 // list, if _free_regions_coming is set.
374 HeapRegion* new_region_try_secondary_free_list(size_t word_size); 367 HeapRegion* new_region_try_secondary_free_list();
375 368
376 // It will try to allocate a single non-humongous HeapRegion 369 // Try to allocate a single non-humongous HeapRegion sufficient for
377 // sufficient for an allocation of the given word_size. If 370 // an allocation of the given word_size. If do_expand is true,
378 // do_expand is true, it will attempt to expand the heap if 371 // attempt to expand the heap if necessary to satisfy the allocation
379 // necessary to satisfy the allocation request. Note that word_size 372 // request.
380 // is only used to make sure that we expand sufficiently but, given
381 // that the allocation request is assumed not to be humongous,
382 // having word_size is not strictly necessary (expanding by a single
383 // region will always be sufficient). But let's keep that parameter
384 // in case we need it in the future.
385 HeapRegion* new_region_work(size_t word_size, bool do_expand); 373 HeapRegion* new_region_work(size_t word_size, bool do_expand);
386 374
387 // It will try to allocate a new region to be used for allocation by 375 // Try to allocate a new region to be used for allocation by a
388 // mutator threads. It will not try to expand the heap if not region 376 // mutator thread. Attempt to expand the heap if no region is
389 // is available. 377 // available.
390 HeapRegion* new_alloc_region(size_t word_size) { 378 HeapRegion* new_alloc_region(size_t word_size) {
391 return new_region_work(word_size, false /* do_expand */); 379 return new_region_work(word_size, false /* do_expand */);
392 } 380 }
393 381
394 // It will try to allocate a new region to be used for allocation by 382 // Try to allocate a new region to be used for allocation by a GC
395 // a GC thread. It will try to expand the heap if no region is 383 // thread. Attempt to expand the heap if no region is available.
396 // available.
397 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); 384 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
398 385
386 // Attempt to satisfy a humongous allocation request of the given
387 // size by finding a contiguous set of free regions of num_regions
388 // length and remove them from the master free list. Return the
389 // index of the first region or -1 if the search was unsuccessful.
399 int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size); 390 int humongous_obj_allocate_find_first(size_t num_regions, size_t word_size);
400 391
401 // Attempt to allocate an object of the given (very large) "word_size". 392 // Initialize a contiguous set of free regions of length num_regions
402 // Returns "NULL" on failure. 393 // and starting at index first so that they appear as a single
394 // humongous region.
395 HeapWord* humongous_obj_allocate_initialize_regions(int first,
396 size_t num_regions,
397 size_t word_size);
398
399 // Attempt to allocate a humongous object of the given size. Return
400 // NULL if unsuccessful.
403 HeapWord* humongous_obj_allocate(size_t word_size); 401 HeapWord* humongous_obj_allocate(size_t word_size);
404 402
405 // The following two methods, allocate_new_tlab() and 403 // The following two methods, allocate_new_tlab() and
406 // mem_allocate(), are the two main entry points from the runtime 404 // mem_allocate(), are the two main entry points from the runtime
407 // into the G1's allocation routines. They have the following 405 // into the G1's allocation routines. They have the following
774 OopClosure* non_root_closure); 772 OopClosure* non_root_closure);
775 773
776 // Invoke "save_marks" on all heap regions. 774 // Invoke "save_marks" on all heap regions.
777 void save_marks(); 775 void save_marks();
778 776
779 // It frees a non-humongous region by initializing its contents and 777 // Frees a non-humongous region by initializing its contents and
780 // adding it to the free list that's passed as a parameter (this is 778 // adding it to the free list that's passed as a parameter (this is
781 // usually a local list which will be appended to the master free 779 // usually a local list which will be appended to the master free
782 // list later). The used bytes of freed regions are accumulated in 780 // list later). The used bytes of freed regions are accumulated in
783 // pre_used. If par is true, the region's RSet will not be freed 781 // pre_used. If par is true, the region's RSet will not be freed
784 // up. The assumption is that this will be done later. 782 // up. The assumption is that this will be done later.
785 void free_region(HeapRegion* hr, 783 void free_region(HeapRegion* hr,
786 size_t* pre_used, 784 size_t* pre_used,
787 FreeRegionList* free_list, 785 FreeRegionList* free_list,
788 bool par); 786 bool par);
789 787
790 // It frees a humongous region by collapsing it into individual 788 // Frees a humongous region by collapsing it into individual regions
791 // regions and calling free_region() for each of them. The freed 789 // and calling free_region() for each of them. The freed regions
792 // regions will be added to the free list that's passed as a parameter 790 // will be added to the free list that's passed as a parameter (this
793 // (this is usually a local list which will be appended to the 791 // is usually a local list which will be appended to the master free
794 // master free list later). The used bytes of freed regions are 792 // list later). The used bytes of freed regions are accumulated in
795 // accumulated in pre_used. If par is true, the region's RSet will 793 // pre_used. If par is true, the region's RSet will not be freed
796 // not be freed up. The assumption is that this will be done later. 794 // up. The assumption is that this will be done later.
797 void free_humongous_region(HeapRegion* hr, 795 void free_humongous_region(HeapRegion* hr,
798 size_t* pre_used, 796 size_t* pre_used,
799 FreeRegionList* free_list, 797 FreeRegionList* free_list,
800 HumongousRegionSet* humongous_proxy_set, 798 HumongousRegionSet* humongous_proxy_set,
801 bool par); 799 bool par);
1044 #else // HEAP_REGION_SET_FORCE_VERIFY 1042 #else // HEAP_REGION_SET_FORCE_VERIFY
1045 void verify_region_sets_optional() { } 1043 void verify_region_sets_optional() { }
1046 #endif // HEAP_REGION_SET_FORCE_VERIFY 1044 #endif // HEAP_REGION_SET_FORCE_VERIFY
1047 1045
1048 #ifdef ASSERT 1046 #ifdef ASSERT
1049 bool is_on_free_list(HeapRegion* hr) { 1047 bool is_on_master_free_list(HeapRegion* hr) {
1050 return hr->containing_set() == &_free_list; 1048 return hr->containing_set() == &_free_list;
1051 } 1049 }
1052 1050
1053 bool is_on_humongous_set(HeapRegion* hr) { 1051 bool is_in_humongous_set(HeapRegion* hr) {
1054 return hr->containing_set() == &_humongous_set; 1052 return hr->containing_set() == &_humongous_set;
1055 } 1053 }
1056 #endif // ASSERT 1054 #endif // ASSERT
1057 1055
1058 // Wrapper for the region list operations that can be called from 1056 // Wrapper for the region list operations that can be called from
1059 // methods outside this class. 1057 // methods outside this class.
1060 1058
1064 1062
1065 void append_secondary_free_list() { 1063 void append_secondary_free_list() {
1066 _free_list.add_as_tail(&_secondary_free_list); 1064 _free_list.add_as_tail(&_secondary_free_list);
1067 } 1065 }
1068 1066
1069 void append_secondary_free_list_if_not_empty() { 1067 void append_secondary_free_list_if_not_empty_with_lock() {
1068 // If the secondary free list looks empty there's no reason to
1069 // take the lock and then try to append it.
1070 if (!_secondary_free_list.is_empty()) { 1070 if (!_secondary_free_list.is_empty()) {
1071 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag); 1071 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
1072 append_secondary_free_list(); 1072 append_secondary_free_list();
1073 } 1073 }
1074 } 1074 }

mercurial