816 } |
816 } |
817 |
817 |
818 // This version requires locking. |
818 // This version requires locking. |
819 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, |
819 inline HeapWord* ContiguousSpace::allocate_impl(size_t size, |
820 HeapWord* const end_value) { |
820 HeapWord* const end_value) { |
|
821 // In G1 there are places where a GC worker can allocates into a |
|
822 // region using this serial allocation code without being prone to a |
|
823 // race with other GC workers (we ensure that no other GC worker can |
|
824 // access the same region at the same time). So the assert below is |
|
825 // too strong in the case of G1. |
821 assert(Heap_lock->owned_by_self() || |
826 assert(Heap_lock->owned_by_self() || |
822 (SafepointSynchronize::is_at_safepoint() && |
827 (SafepointSynchronize::is_at_safepoint() && |
823 Thread::current()->is_VM_thread()), |
828 (Thread::current()->is_VM_thread() || UseG1GC)), |
824 "not locked"); |
829 "not locked"); |
825 HeapWord* obj = top(); |
830 HeapWord* obj = top(); |
826 if (pointer_delta(end_value, obj) >= size) { |
831 if (pointer_delta(end_value, obj) >= size) { |
827 HeapWord* new_top = obj + size; |
832 HeapWord* new_top = obj + size; |
828 set_top(new_top); |
833 set_top(new_top); |