src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp

changeset 2715
abdfc822206f
parent 2714
455328d90876
child 2717
371bbc844bf1
equal deleted inserted replaced
2714:455328d90876 2715:abdfc822206f
24 24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
27 27
28 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1AllocRegion.hpp"
29 #include "gc_implementation/g1/g1RemSet.hpp" 30 #include "gc_implementation/g1/g1RemSet.hpp"
30 #include "gc_implementation/g1/heapRegionSets.hpp" 31 #include "gc_implementation/g1/heapRegionSets.hpp"
31 #include "gc_implementation/parNew/parGCAllocBuffer.hpp" 32 #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
32 #include "memory/barrierSet.hpp" 33 #include "memory/barrierSet.hpp"
33 #include "memory/memRegion.hpp" 34 #include "memory/memRegion.hpp"
126 bool check_list_well_formed(); 127 bool check_list_well_formed();
127 bool check_list_empty(bool check_sample = true); 128 bool check_list_empty(bool check_sample = true);
128 void print(); 129 void print();
129 }; 130 };
130 131
132 class MutatorAllocRegion : public G1AllocRegion {
133 protected:
134 virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
135 virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
136 public:
137 MutatorAllocRegion()
138 : G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
139 };
140
131 class RefineCardTableEntryClosure; 141 class RefineCardTableEntryClosure;
132 class G1CollectedHeap : public SharedHeap { 142 class G1CollectedHeap : public SharedHeap {
133 friend class VM_G1CollectForAllocation; 143 friend class VM_G1CollectForAllocation;
134 friend class VM_GenCollectForPermanentAllocation; 144 friend class VM_GenCollectForPermanentAllocation;
135 friend class VM_G1CollectFull; 145 friend class VM_G1CollectFull;
136 friend class VM_G1IncCollectionPause; 146 friend class VM_G1IncCollectionPause;
137 friend class VMStructs; 147 friend class VMStructs;
148 friend class MutatorAllocRegion;
138 149
139 // Closures used in implementation. 150 // Closures used in implementation.
140 friend class G1ParCopyHelper; 151 friend class G1ParCopyHelper;
141 friend class G1IsAliveClosure; 152 friend class G1IsAliveClosure;
142 friend class G1EvacuateFollowersClosure; 153 friend class G1EvacuateFollowersClosure;
195 void rebuild_region_lists(); 206 void rebuild_region_lists();
196 207
197 // The sequence of all heap regions in the heap. 208 // The sequence of all heap regions in the heap.
198 HeapRegionSeq* _hrs; 209 HeapRegionSeq* _hrs;
199 210
200 // The region from which normal-sized objects are currently being 211 // Alloc region used to satisfy mutator allocation requests.
201 // allocated. May be NULL. 212 MutatorAllocRegion _mutator_alloc_region;
202 HeapRegion* _cur_alloc_region; 213
203 214 // It resets the mutator alloc region before new allocations can take place.
204 // Postcondition: cur_alloc_region == NULL. 215 void init_mutator_alloc_region();
205 void abandon_cur_alloc_region(); 216
217 // It releases the mutator alloc region.
218 void release_mutator_alloc_region();
219
206 void abandon_gc_alloc_regions(); 220 void abandon_gc_alloc_regions();
207 221
208 // The to-space memory regions into which objects are being copied during 222 // The to-space memory regions into which objects are being copied during
209 // a GC. 223 // a GC.
210 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount]; 224 HeapRegion* _gc_alloc_regions[GCAllocPurposeCount];
358 372
359 // The current policy object for the collector. 373 // The current policy object for the collector.
360 G1CollectorPolicy* _g1_policy; 374 G1CollectorPolicy* _g1_policy;
361 375
362 // This is the second level of trying to allocate a new region. If 376 // This is the second level of trying to allocate a new region. If
363 // new_region_work didn't find a region in the free_list, this call 377 // new_region() didn't find a region on the free_list, this call will
364 // will check whether there's anything available in the 378 // check whether there's anything available on the
365 // secondary_free_list and/or wait for more regions to appear in that 379 // secondary_free_list and/or wait for more regions to appear on
366 // list, if _free_regions_coming is set. 380 // that list, if _free_regions_coming is set.
367 HeapRegion* new_region_try_secondary_free_list(); 381 HeapRegion* new_region_try_secondary_free_list();
368 382
369 // Try to allocate a single non-humongous HeapRegion sufficient for 383 // Try to allocate a single non-humongous HeapRegion sufficient for
370 // an allocation of the given word_size. If do_expand is true, 384 // an allocation of the given word_size. If do_expand is true,
371 // attempt to expand the heap if necessary to satisfy the allocation 385 // attempt to expand the heap if necessary to satisfy the allocation
372 // request. 386 // request.
373 HeapRegion* new_region_work(size_t word_size, bool do_expand); 387 HeapRegion* new_region(size_t word_size, bool do_expand);
374 388
375 // Try to allocate a new region to be used for allocation by a 389 // Try to allocate a new region to be used for allocation by
376 // mutator thread. Attempt to expand the heap if no region is 390 // a GC thread. It will try to expand the heap if no region is
377 // available. 391 // available.
378 HeapRegion* new_alloc_region(size_t word_size) {
379 return new_region_work(word_size, false /* do_expand */);
380 }
381
382 // Try to allocate a new region to be used for allocation by a GC
383 // thread. Attempt to expand the heap if no region is available.
384 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); 392 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size);
385 393
386 // Attempt to satisfy a humongous allocation request of the given 394 // Attempt to satisfy a humongous allocation request of the given
387 // size by finding a contiguous set of free regions of num_regions 395 // size by finding a contiguous set of free regions of num_regions
388 // length and remove them from the master free list. Return the 396 // length and remove them from the master free list. Return the
413 // allocate_new_tlab(). 421 // allocate_new_tlab().
414 // 422 //
415 // * All non-TLAB allocation requests should go to mem_allocate() 423 // * All non-TLAB allocation requests should go to mem_allocate()
416 // and mem_allocate() should never be called with is_tlab == true. 424 // and mem_allocate() should never be called with is_tlab == true.
417 // 425 //
418 // * If the GC locker is active we currently stall until we can
419 // allocate a new young region. This will be changed in the
420 // near future (see CR 6994056).
421 //
422 // * If either call cannot satisfy the allocation request using the 426 // * If either call cannot satisfy the allocation request using the
423 // current allocating region, they will try to get a new one. If 427 // current allocating region, they will try to get a new one. If
424 // this fails, they will attempt to do an evacuation pause and 428 // this fails, they will attempt to do an evacuation pause and
425 // retry the allocation. 429 // retry the allocation.
426 // 430 //
439 virtual HeapWord* mem_allocate(size_t word_size, 443 virtual HeapWord* mem_allocate(size_t word_size,
440 bool is_noref, 444 bool is_noref,
441 bool is_tlab, /* expected to be false */ 445 bool is_tlab, /* expected to be false */
442 bool* gc_overhead_limit_was_exceeded); 446 bool* gc_overhead_limit_was_exceeded);
443 447
444 // The following methods, allocate_from_cur_allocation_region(), 448 // The following three methods take a gc_count_before_ret
445 // attempt_allocation(), attempt_allocation_locked(), 449 // parameter which is used to return the GC count if the method
446 // replace_cur_alloc_region_and_allocate(), 450 // returns NULL. Given that we are required to read the GC count
447 // attempt_allocation_slow(), and attempt_allocation_humongous() 451 // while holding the Heap_lock, and these paths will take the
448 // have very awkward pre- and post-conditions with respect to 452 // Heap_lock at some point, it's easier to get them to read the GC
449 // locking: 453 // count while holding the Heap_lock before they return NULL instead
450 // 454 // of the caller (namely: mem_allocate()) having to also take the
451 // If they are called outside a safepoint they assume the caller 455 // Heap_lock just to read the GC count.
452 // holds the Heap_lock when it calls them. However, on exit they 456
453 // will release the Heap_lock if they return a non-NULL result, but 457 // First-level mutator allocation attempt: try to allocate out of
454 // keep holding the Heap_lock if they return a NULL result. The 458 // the mutator alloc region without taking the Heap_lock. This
455 // reason for this is that we need to dirty the cards that span 459 // should only be used for non-humongous allocations.
456 // allocated blocks on young regions to avoid having to take the 460 inline HeapWord* attempt_allocation(size_t word_size,
457 // slow path of the write barrier (for performance reasons we don't 461 unsigned int* gc_count_before_ret);
458 // update RSets for references whose source is a young region, so we 462
459 // don't need to look at dirty cards on young regions). But, doing 463 // Second-level mutator allocation attempt: take the Heap_lock and
460 // this card dirtying while holding the Heap_lock can be a 464 // retry the allocation attempt, potentially scheduling a GC
461 // scalability bottleneck, especially given that some allocation 465 // pause. This should only be used for non-humongous allocations.
462 // requests might be of non-trivial size (and the larger the region 466 HeapWord* attempt_allocation_slow(size_t word_size,
463 // size is, the fewer allocations requests will be considered 467 unsigned int* gc_count_before_ret);
464 // humongous, as the humongous size limit is a fraction of the 468
465 // region size). So, when one of these calls succeeds in allocating 469 // Takes the Heap_lock and attempts a humongous allocation. It can
466 // a block it does the card dirtying after it releases the Heap_lock 470 // potentially schedule a GC pause.
467 // which is why it will return without holding it.
468 //
469 // The above assymetry is the reason why locking / unlocking is done
470 // explicitly (i.e., with Heap_lock->lock() and
471 // Heap_lock->unlocked()) instead of using MutexLocker and
472 // MutexUnlocker objects. The latter would ensure that the lock is
473 // unlocked / re-locked at every possible exit out of the basic
474 // block. However, we only want that action to happen in selected
475 // places.
476 //
477 // Further, if the above methods are called during a safepoint, then
478 // naturally there's no assumption about the Heap_lock being held or
479 // there's no attempt to unlock it. The parameter at_safepoint
480 // indicates whether the call is made during a safepoint or not (as
481 // an optimization, to avoid reading the global flag with
482 // SafepointSynchronize::is_at_safepoint()).
483 //
484 // The methods share these parameters:
485 //
486 // * word_size : the size of the allocation request in words
487 // * at_safepoint : whether the call is done at a safepoint; this
488 // also determines whether a GC is permitted
489 // (at_safepoint == false) or not (at_safepoint == true)
490 // * do_dirtying : whether the method should dirty the allocated
491 // block before returning
492 //
493 // They all return either the address of the block, if they
494 // successfully manage to allocate it, or NULL.
495
496 // It tries to satisfy an allocation request out of the current
497 // alloc region, which is passed as a parameter. It assumes that the
498 // caller has checked that the current alloc region is not NULL.
499 // Given that the caller has to check the current alloc region for
500 // at least NULL, it might as well pass it as the first parameter so
501 // that the method doesn't have to read it from the
502 // _cur_alloc_region field again. It is called from both
503 // attempt_allocation() and attempt_allocation_locked() and the
504 // with_heap_lock parameter indicates whether the caller was holding
505 // the heap lock when it called it or not.
506 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
507 size_t word_size,
508 bool with_heap_lock);
509
510 // First-level of allocation slow path: it attempts to allocate out
511 // of the current alloc region in a lock-free manner using a CAS. If
512 // that fails it takes the Heap_lock and calls
513 // attempt_allocation_locked() for the second-level slow path.
514 inline HeapWord* attempt_allocation(size_t word_size);
515
516 // Second-level of allocation slow path: while holding the Heap_lock
517 // it tries to allocate out of the current alloc region and, if that
518 // fails, tries to allocate out of a new current alloc region.
519 inline HeapWord* attempt_allocation_locked(size_t word_size);
520
521 // It assumes that the current alloc region has been retired and
522 // tries to allocate a new one. If it's successful, it performs the
523 // allocation out of the new current alloc region and updates
524 // _cur_alloc_region. Normally, it would try to allocate a new
525 // region if the young gen is not full, unless can_expand is true in
526 // which case it would always try to allocate a new region.
527 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
528 bool at_safepoint,
529 bool do_dirtying,
530 bool can_expand);
531
532 // Third-level of allocation slow path: when we are unable to
533 // allocate a new current alloc region to satisfy an allocation
534 // request (i.e., when attempt_allocation_locked() fails). It will
535 // try to do an evacuation pause, which might stall due to the GC
536 // locker, and retry the allocation attempt when appropriate.
537 HeapWord* attempt_allocation_slow(size_t word_size);
538
539 // The method that tries to satisfy a humongous allocation
540 // request. If it cannot satisfy it it will try to do an evacuation
541 // pause to perhaps reclaim enough space to be able to satisfy the
542 // allocation request afterwards.
543 HeapWord* attempt_allocation_humongous(size_t word_size, 471 HeapWord* attempt_allocation_humongous(size_t word_size,
544 bool at_safepoint); 472 unsigned int* gc_count_before_ret);
545 473
546 // It does the common work when we are retiring the current alloc region. 474 // Allocation attempt that should be called during safepoints (e.g.,
547 inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region); 475 // at the end of a successful GC). expect_null_mutator_alloc_region
548 476 // specifies whether the mutator alloc region is expected to be NULL
549 // It retires the current alloc region, which is passed as a 477 // or not.
550 // parameter (since, typically, the caller is already holding on to
551 // it). It sets _cur_alloc_region to NULL.
552 void retire_cur_alloc_region(HeapRegion* cur_alloc_region);
553
554 // It attempts to do an allocation immediately before or after an
555 // evacuation pause and can only be called by the VM thread. It has
556 // slightly different assumptions that the ones before (i.e.,
557 // assumes that the current alloc region has been retired).
558 HeapWord* attempt_allocation_at_safepoint(size_t word_size, 478 HeapWord* attempt_allocation_at_safepoint(size_t word_size,
559 bool expect_null_cur_alloc_region); 479 bool expect_null_mutator_alloc_region);
560 480
561 // It dirties the cards that cover the block so that so that the post 481 // It dirties the cards that cover the block so that so that the post
562 // write barrier never queues anything when updating objects on this 482 // write barrier never queues anything when updating objects on this
563 // block. It is assumed (and in fact we assert) that the block 483 // block. It is assumed (and in fact we assert) that the block
564 // belongs to a young region. 484 // belongs to a young region.
580 void par_allocate_remaining_space(HeapRegion* r); 500 void par_allocate_remaining_space(HeapRegion* r);
581 501
582 // Retires an allocation region when it is full or at the end of a 502 // Retires an allocation region when it is full or at the end of a
583 // GC pause. 503 // GC pause.
584 void retire_alloc_region(HeapRegion* alloc_region, bool par); 504 void retire_alloc_region(HeapRegion* alloc_region, bool par);
505
506 // These two methods are the "callbacks" from the G1AllocRegion class.
507
508 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force);
509 void retire_mutator_alloc_region(HeapRegion* alloc_region,
510 size_t allocated_bytes);
585 511
586 // - if explicit_gc is true, the GC is for a System.gc() or a heap 512 // - if explicit_gc is true, the GC is for a System.gc() or a heap
587 // inspection request and should collect the entire heap 513 // inspection request and should collect the entire heap
588 // - if clear_all_soft_refs is true, all soft references should be 514 // - if clear_all_soft_refs is true, all soft references should be
589 // cleared during the GC 515 // cleared during the GC
1024 // The number of regions that are not completely free. 950 // The number of regions that are not completely free.
1025 size_t used_regions() { return n_regions() - free_regions(); } 951 size_t used_regions() { return n_regions() - free_regions(); }
1026 952
1027 // The number of regions available for "regular" expansion. 953 // The number of regions available for "regular" expansion.
1028 size_t expansion_regions() { return _expansion_regions; } 954 size_t expansion_regions() { return _expansion_regions; }
955
956 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN;
957 void verify_dirty_young_regions() PRODUCT_RETURN;
1029 958
1030 // verify_region_sets() performs verification over the region 959 // verify_region_sets() performs verification over the region
1031 // lists. It will be compiled in the product code to be used when 960 // lists. It will be compiled in the product code to be used when
1032 // necessary (i.e., during heap verification). 961 // necessary (i.e., during heap verification).
1033 void verify_region_sets(); 962 void verify_region_sets();

mercurial