55 HeapWord* res = NULL; |
55 HeapWord* res = NULL; |
56 |
56 |
57 assert( SafepointSynchronize::is_at_safepoint() || |
57 assert( SafepointSynchronize::is_at_safepoint() || |
58 Heap_lock->owned_by_self(), "pre-condition of the call" ); |
58 Heap_lock->owned_by_self(), "pre-condition of the call" ); |
59 |
59 |
60 if (_cur_alloc_region != NULL) { |
60 // All humongous allocation requests should go through the slow path in |
61 |
61 // attempt_allocation_slow(). |
|
62 if (!isHumongous(word_size) && _cur_alloc_region != NULL) { |
62 // If this allocation causes a region to become non empty, |
63 // If this allocation causes a region to become non empty, |
63 // then we need to update our free_regions count. |
64 // then we need to update our free_regions count. |
64 |
65 |
65 if (_cur_alloc_region->is_empty()) { |
66 if (_cur_alloc_region->is_empty()) { |
66 res = _cur_alloc_region->allocate(word_size); |
67 res = _cur_alloc_region->allocate(word_size); |
67 if (res != NULL) |
68 if (res != NULL) |
68 _free_regions--; |
69 _free_regions--; |
69 } else { |
70 } else { |
70 res = _cur_alloc_region->allocate(word_size); |
71 res = _cur_alloc_region->allocate(word_size); |
71 } |
72 } |
72 } |
73 |
73 if (res != NULL) { |
74 if (res != NULL) { |
74 if (!SafepointSynchronize::is_at_safepoint()) { |
75 if (!SafepointSynchronize::is_at_safepoint()) { |
75 assert( Heap_lock->owned_by_self(), "invariant" ); |
76 assert( Heap_lock->owned_by_self(), "invariant" ); |
76 Heap_lock->unlock(); |
77 Heap_lock->unlock(); |
|
78 } |
|
79 return res; |
77 } |
80 } |
78 return res; |
|
79 } |
81 } |
80 // attempt_allocation_slow will also unlock the heap lock when appropriate. |
82 // attempt_allocation_slow will also unlock the heap lock when appropriate. |
81 return attempt_allocation_slow(word_size, permit_collection_pause); |
83 return attempt_allocation_slow(word_size, permit_collection_pause); |
82 } |
84 } |
83 |
85 |