428 bool is_noref, |
428 bool is_noref, |
429 bool is_tlab, /* expected to be false */ |
429 bool is_tlab, /* expected to be false */ |
430 bool* gc_overhead_limit_was_exceeded); |
430 bool* gc_overhead_limit_was_exceeded); |
431 |
431 |
432 // The following methods, allocate_from_cur_allocation_region(), |
432 // The following methods, allocate_from_cur_allocation_region(), |
433 // attempt_allocation(), replace_cur_alloc_region_and_allocate(), |
433 // attempt_allocation(), attempt_allocation_locked(), |
|
434 // replace_cur_alloc_region_and_allocate(), |
434 // attempt_allocation_slow(), and attempt_allocation_humongous() |
435 // attempt_allocation_slow(), and attempt_allocation_humongous() |
435 // have very awkward pre- and post-conditions with respect to |
436 // have very awkward pre- and post-conditions with respect to |
436 // locking: |
437 // locking: |
437 // |
438 // |
438 // If they are called outside a safepoint they assume the caller |
439 // If they are called outside a safepoint they assume the caller |
479 // |
480 // |
480 // They all return either the address of the block, if they |
481 // They all return either the address of the block, if they |
481 // successfully manage to allocate it, or NULL. |
482 // successfully manage to allocate it, or NULL. |
482 |
483 |
483 // It tries to satisfy an allocation request out of the current |
484 // It tries to satisfy an allocation request out of the current |
484 // allocating region, which is passed as a parameter. It assumes |
485 // alloc region, which is passed as a parameter. It assumes that the |
485 // that the caller has checked that the current allocating region is |
486 // caller has checked that the current alloc region is not NULL. |
486 // not NULL. Given that the caller has to check the current |
487 // Given that the caller has to check the current alloc region for |
487 // allocating region for at least NULL, it might as well pass it as |
488 // at least NULL, it might as well pass it as the first parameter so |
488 // the first parameter so that the method doesn't have to read it |
489 // that the method doesn't have to read it from the |
489 // from the _cur_alloc_region field again. |
490 // _cur_alloc_region field again. It is called from both |
|
491 // attempt_allocation() and attempt_allocation_locked() and the |
|
492 // with_heap_lock parameter indicates whether the caller was holding |
|
493 // the heap lock when it called it or not. |
490 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, |
494 inline HeapWord* allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, |
491 size_t word_size); |
495 size_t word_size, |
492 |
496 bool with_heap_lock); |
493 // It attempts to allocate out of the current alloc region. If that |
497 |
494 // fails, it retires the current alloc region (if there is one), |
498 // First-level of allocation slow path: it attempts to allocate out |
495 // tries to get a new one and retries the allocation. |
499 // of the current alloc region in a lock-free manner using a CAS. If |
|
500 // that fails it takes the Heap_lock and calls |
|
501 // attempt_allocation_locked() for the second-level slow path. |
496 inline HeapWord* attempt_allocation(size_t word_size); |
502 inline HeapWord* attempt_allocation(size_t word_size); |
|
503 |
|
504 // Second-level of allocation slow path: while holding the Heap_lock |
|
505 // it tries to allocate out of the current alloc region and, if that |
|
506 // fails, tries to allocate out of a new current alloc region. |
|
507 inline HeapWord* attempt_allocation_locked(size_t word_size); |
497 |
508 |
498 // It assumes that the current alloc region has been retired and |
509 // It assumes that the current alloc region has been retired and |
499 // tries to allocate a new one. If it's successful, it performs the |
510 // tries to allocate a new one. If it's successful, it performs the |
500 // allocation out of the new current alloc region and updates |
511 // allocation out of the new current alloc region and updates |
501 // _cur_alloc_region. Normally, it would try to allocate a new |
512 // _cur_alloc_region. Normally, it would try to allocate a new |
504 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, |
515 HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size, |
505 bool at_safepoint, |
516 bool at_safepoint, |
506 bool do_dirtying, |
517 bool do_dirtying, |
507 bool can_expand); |
518 bool can_expand); |
508 |
519 |
509 // The slow path when we are unable to allocate a new current alloc |
520 // Third-level of allocation slow path: when we are unable to |
510 // region to satisfy an allocation request (i.e., when |
521 // allocate a new current alloc region to satisfy an allocation |
511 // attempt_allocation() fails). It will try to do an evacuation |
522 // request (i.e., when attempt_allocation_locked() fails). It will |
512 // pause, which might stall due to the GC locker, and retry the |
523 // try to do an evacuation pause, which might stall due to the GC |
513 // allocation attempt when appropriate. |
524 // locker, and retry the allocation attempt when appropriate. |
514 HeapWord* attempt_allocation_slow(size_t word_size); |
525 HeapWord* attempt_allocation_slow(size_t word_size); |
515 |
526 |
516 // The method that tries to satisfy a humongous allocation |
527 // The method that tries to satisfy a humongous allocation |
517 // request. If it cannot satisfy it it will try to do an evacuation |
528 // request. If it cannot satisfy it it will try to do an evacuation |
518 // pause to perhaps reclaim enough space to be able to satisfy the |
529 // pause to perhaps reclaim enough space to be able to satisfy the |