358 |
372 |
359 // The current policy object for the collector. |
373 // The current policy object for the collector. |
360 G1CollectorPolicy* _g1_policy; |
374 G1CollectorPolicy* _g1_policy; |
361 |
375 |
362 // This is the second level of trying to allocate a new region. If |
376 // This is the second level of trying to allocate a new region. If |
363 // new_region_work didn't find a region in the free_list, this call |
377 // new_region() didn't find a region on the free_list, this call will |
364 // will check whether there's anything available in the |
378 // check whether there's anything available on the |
365 // secondary_free_list and/or wait for more regions to appear in that |
379 // secondary_free_list and/or wait for more regions to appear on |
366 // list, if _free_regions_coming is set. |
380 // that list, if _free_regions_coming is set. |
367 HeapRegion* new_region_try_secondary_free_list(); |
381 HeapRegion* new_region_try_secondary_free_list(); |
368 |
382 |
369 // Try to allocate a single non-humongous HeapRegion sufficient for |
383 // Try to allocate a single non-humongous HeapRegion sufficient for |
370 // an allocation of the given word_size. If do_expand is true, |
384 // an allocation of the given word_size. If do_expand is true, |
371 // attempt to expand the heap if necessary to satisfy the allocation |
385 // attempt to expand the heap if necessary to satisfy the allocation |
372 // request. |
386 // request. |
373 HeapRegion* new_region_work(size_t word_size, bool do_expand); |
387 HeapRegion* new_region(size_t word_size, bool do_expand); |
374 |
388 |
375 // Try to allocate a new region to be used for allocation by a |
389 // Try to allocate a new region to be used for allocation by |
376 // mutator thread. Attempt to expand the heap if no region is |
390 // a GC thread. It will try to expand the heap if no region is |
377 // available. |
391 // available. |
378 HeapRegion* new_alloc_region(size_t word_size) { |
|
379 return new_region_work(word_size, false /* do_expand */); |
|
380 } |
|
381 |
|
382 // Try to allocate a new region to be used for allocation by a GC |
|
383 // thread. Attempt to expand the heap if no region is available. |
|
384 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); |
392 HeapRegion* new_gc_alloc_region(int purpose, size_t word_size); |
385 |
393 |
386 // Attempt to satisfy a humongous allocation request of the given |
394 // Attempt to satisfy a humongous allocation request of the given |
387 // size by finding a contiguous set of free regions of num_regions |
395 // size by finding a contiguous set of free regions of num_regions |
388 // length and remove them from the master free list. Return the |
396 // length and remove them from the master free list. Return the |
439 virtual HeapWord* mem_allocate(size_t word_size, |
443 virtual HeapWord* mem_allocate(size_t word_size, |
440 bool is_noref, |
444 bool is_noref, |
441 bool is_tlab, /* expected to be false */ |
445 bool is_tlab, /* expected to be false */ |
442 bool* gc_overhead_limit_was_exceeded); |
446 bool* gc_overhead_limit_was_exceeded); |
443 |
447 |
444 // The following methods, allocate_from_cur_allocation_region(), |
448 // The following three methods take a gc_count_before_ret |
445 // attempt_allocation(), attempt_allocation_locked(), |
449 // parameter which is used to return the GC count if the method |
446 // replace_cur_alloc_region_and_allocate(), |
450 // returns NULL. Given that we are required to read the GC count |
447 // attempt_allocation_slow(), and attempt_allocation_humongous() |
451 // while holding the Heap_lock, and these paths will take the |
448 // have very awkward pre- and post-conditions with respect to |
452 // Heap_lock at some point, it's easier to get them to read the GC |
449 // locking: |
453 // count while holding the Heap_lock before they return NULL instead |
450 // |
454 // of the caller (namely: mem_allocate()) having to also take the |
451 // If they are called outside a safepoint they assume the caller |
455 // Heap_lock just to read the GC count. |
452 // holds the Heap_lock when it calls them. However, on exit they |
456 |
453 // will release the Heap_lock if they return a non-NULL result, but |
457 // First-level mutator allocation attempt: try to allocate out of |
454 // keep holding the Heap_lock if they return a NULL result. The |
458 // the mutator alloc region without taking the Heap_lock. This |
455 // reason for this is that we need to dirty the cards that span |
459 // should only be used for non-humongous allocations. |
456 // allocated blocks on young regions to avoid having to take the |
460 inline HeapWord* attempt_allocation(size_t word_size, |
457 // slow path of the write barrier (for performance reasons we don't |
461 unsigned int* gc_count_before_ret); |
458 // update RSets for references whose source is a young region, so we |
462 |
459 // don't need to look at dirty cards on young regions). But, doing |
463 // Second-level mutator allocation attempt: take the Heap_lock and |
460 // this card dirtying while holding the Heap_lock can be a |
464 // retry the allocation attempt, potentially scheduling a GC |
461 // scalability bottleneck, especially given that some allocation |
465 // pause. This should only be used for non-humongous allocations. |
462 // requests might be of non-trivial size (and the larger the region |
466 HeapWord* attempt_allocation_slow(size_t word_size, |
463 // size is, the fewer allocations requests will be considered |
467 unsigned int* gc_count_before_ret); |
464 // humongous, as the humongous size limit is a fraction of the |
468 |
465 // region size). So, when one of these calls succeeds in allocating |
469 // Takes the Heap_lock and attempts a humongous allocation. It can |
466 // a block it does the card dirtying after it releases the Heap_lock |
470 // potentially schedule a GC pause. |
467 // which is why it will return without holding it. |
|
468 // |
|
469 // The above assymetry is the reason why locking / unlocking is done |
|
470 // explicitly (i.e., with Heap_lock->lock() and |
|
471 // Heap_lock->unlocked()) instead of using MutexLocker and |
|
472 // MutexUnlocker objects. The latter would ensure that the lock is |
|
473 // unlocked / re-locked at every possible exit out of the basic |
|
474 // block. However, we only want that action to happen in selected |
|
475 // places. |
|
476 // |
|
477 // Further, if the above methods are called during a safepoint, then |
|
478 // naturally there's no assumption about the Heap_lock being held or |
|
479 // there's no attempt to unlock it. The parameter at_safepoint |
|
480 // indicates whether the call is made during a safepoint or not (as |
|
481 // an optimization, to avoid reading the global flag with |
|
482 // SafepointSynchronize::is_at_safepoint()). |
|
483 // |
|
484 // The methods share these parameters: |
|
485 // |
|
486 // * word_size : the size of the allocation request in words |
|
487 // * at_safepoint : whether the call is done at a safepoint; this |
|
488 // also determines whether a GC is permitted |
|
489 // (at_safepoint == false) or not (at_safepoint == true) |
|
490 // * do_dirtying : whether the method should dirty the allocated |
|
491 // block before returning |
|
492 // |
|
493 // They all return either the address of the block, if they |
|
494 // successfully manage to allocate it, or NULL. |
|
495 |
|
496 // It tries to satisfy an allocation request out of the current |
|
497 // alloc region, which is passed as a parameter. It assumes that the |
|
498 // caller has checked that the current alloc region is not NULL. |
|
499 // Given that the caller has to check the current alloc region for |
|
500 // at least NULL, it might as well pass it as the first parameter so |
|
501 // that the method doesn't have to read it from the |
|
502 // _cur_alloc_region field again. It is called from both |
|
503 // attempt_allocation() and attempt_allocation_locked() and the |
|
504 // with_heap_lock parameter indicates whether the caller was holding |
|
505 // the heap lock when it called it or not. |
|
506 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, |
|
507 size_t word_size, |
|
508 bool with_heap_lock); |
|
509 |
|
510 // First-level of allocation slow path: it attempts to allocate out |
|
511 // of the current alloc region in a lock-free manner using a CAS. If |
|
512 // that fails it takes the Heap_lock and calls |
|
513 // attempt_allocation_locked() for the second-level slow path. |
|
514 inline HeapWord* attempt_allocation(size_t word_size); |
|
515 |
|
516 // Second-level of allocation slow path: while holding the Heap_lock |
|
517 // it tries to allocate out of the current alloc region and, if that |
|
518 // fails, tries to allocate out of a new current alloc region. |
|
519 inline HeapWord* attempt_allocation_locked(size_t word_size); |
|
520 |
|
521 // It assumes that the current alloc region has been retired and |
|
522 // tries to allocate a new one. If it's successful, it performs the |
|
523 // allocation out of the new current alloc region and updates |
|
524 // _cur_alloc_region. Normally, it would try to allocate a new |
|
525 // region if the young gen is not full, unless can_expand is true in |
|
526 // which case it would always try to allocate a new region. |
|
527 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, |
|
528 bool at_safepoint, |
|
529 bool do_dirtying, |
|
530 bool can_expand); |
|
531 |
|
532 // Third-level of allocation slow path: when we are unable to |
|
533 // allocate a new current alloc region to satisfy an allocation |
|
534 // request (i.e., when attempt_allocation_locked() fails). It will |
|
535 // try to do an evacuation pause, which might stall due to the GC |
|
536 // locker, and retry the allocation attempt when appropriate. |
|
537 HeapWord* attempt_allocation_slow(size_t word_size); |
|
538 |
|
539 // The method that tries to satisfy a humongous allocation |
|
540 // request. If it cannot satisfy it it will try to do an evacuation |
|
541 // pause to perhaps reclaim enough space to be able to satisfy the |
|
542 // allocation request afterwards. |
|
543 HeapWord* attempt_allocation_humongous(size_t word_size, |
471 HeapWord* attempt_allocation_humongous(size_t word_size, |
544 bool at_safepoint); |
472 unsigned int* gc_count_before_ret); |
545 |
473 |
546 // It does the common work when we are retiring the current alloc region. |
474 // Allocation attempt that should be called during safepoints (e.g., |
547 inline void retire_cur_alloc_region_common(HeapRegion* cur_alloc_region); |
475 // at the end of a successful GC). expect_null_mutator_alloc_region |
548 |
476 // specifies whether the mutator alloc region is expected to be NULL |
549 // It retires the current alloc region, which is passed as a |
477 // or not. |
550 // parameter (since, typically, the caller is already holding on to |
|
551 // it). It sets _cur_alloc_region to NULL. |
|
552 void retire_cur_alloc_region(HeapRegion* cur_alloc_region); |
|
553 |
|
554 // It attempts to do an allocation immediately before or after an |
|
555 // evacuation pause and can only be called by the VM thread. It has |
|
556 // slightly different assumptions that the ones before (i.e., |
|
557 // assumes that the current alloc region has been retired). |
|
558 HeapWord* attempt_allocation_at_safepoint(size_t word_size, |
478 HeapWord* attempt_allocation_at_safepoint(size_t word_size, |
559 bool expect_null_cur_alloc_region); |
479 bool expect_null_mutator_alloc_region); |
560 |
480 |
561 // It dirties the cards that cover the block so that so that the post |
481 // It dirties the cards that cover the block so that so that the post |
562 // write barrier never queues anything when updating objects on this |
482 // write barrier never queues anything when updating objects on this |
563 // block. It is assumed (and in fact we assert) that the block |
483 // block. It is assumed (and in fact we assert) that the block |
564 // belongs to a young region. |
484 // belongs to a young region. |