24 |
24 |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
27 |
27 |
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
28 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" |
|
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" |
|
30 #include "gc_implementation/g1/heapRegion.hpp" |
|
31 #include "memory/space.hpp" |
|
32 #include "runtime/atomic.inline.hpp" |
|
33 |
|
34 // This version requires locking. |
|
35 inline HeapWord* G1OffsetTableContigSpace::allocate_impl(size_t size, |
|
36 HeapWord* const end_value) { |
|
37 HeapWord* obj = top(); |
|
38 if (pointer_delta(end_value, obj) >= size) { |
|
39 HeapWord* new_top = obj + size; |
|
40 set_top(new_top); |
|
41 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
|
42 return obj; |
|
43 } else { |
|
44 return NULL; |
|
45 } |
|
46 } |
|
47 |
|
48 // This version is lock-free. |
|
49 inline HeapWord* G1OffsetTableContigSpace::par_allocate_impl(size_t size, |
|
50 HeapWord* const end_value) { |
|
51 do { |
|
52 HeapWord* obj = top(); |
|
53 if (pointer_delta(end_value, obj) >= size) { |
|
54 HeapWord* new_top = obj + size; |
|
55 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
|
56 // result can be one of two: |
|
57 // the old top value: the exchange succeeded |
|
58 // otherwise: the new value of the top is returned. |
|
59 if (result == obj) { |
|
60 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment"); |
|
61 return obj; |
|
62 } |
|
63 } else { |
|
64 return NULL; |
|
65 } |
|
66 } while (true); |
|
67 } |
29 |
68 |
30 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { |
69 inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { |
31 HeapWord* res = ContiguousSpace::allocate(size); |
70 HeapWord* res = allocate_impl(size, end()); |
32 if (res != NULL) { |
71 if (res != NULL) { |
33 _offsets.alloc_block(res, size); |
72 _offsets.alloc_block(res, size); |
34 } |
73 } |
35 return res; |
74 return res; |
36 } |
75 } |
38 // Because of the requirement of keeping "_offsets" up to date with the |
77 // Because of the requirement of keeping "_offsets" up to date with the |
39 // allocations, we sequentialize these with a lock. Therefore, best if |
78 // allocations, we sequentialize these with a lock. Therefore, best if |
40 // this is used for larger LAB allocations only. |
79 // this is used for larger LAB allocations only. |
41 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { |
80 inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { |
42 MutexLocker x(&_par_alloc_lock); |
81 MutexLocker x(&_par_alloc_lock); |
43 // Given that we take the lock no need to use par_allocate() here. |
82 return allocate(size); |
44 HeapWord* res = ContiguousSpace::allocate(size); |
|
45 if (res != NULL) { |
|
46 _offsets.alloc_block(res, size); |
|
47 } |
|
48 return res; |
|
49 } |
83 } |
50 |
84 |
51 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { |
85 inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { |
52 return _offsets.block_start(p); |
86 return _offsets.block_start(p); |
53 } |
87 } |
54 |
88 |
55 inline HeapWord* |
89 inline HeapWord* |
56 G1OffsetTableContigSpace::block_start_const(const void* p) const { |
90 G1OffsetTableContigSpace::block_start_const(const void* p) const { |
57 return _offsets.block_start_const(p); |
91 return _offsets.block_start_const(p); |
|
92 } |
|
93 |
|
94 inline bool |
|
95 HeapRegion::block_is_obj(const HeapWord* p) const { |
|
96 return p < top(); |
|
97 } |
|
98 |
|
99 inline size_t |
|
100 HeapRegion::block_size(const HeapWord *addr) const { |
|
101 const HeapWord* current_top = top(); |
|
102 if (addr < current_top) { |
|
103 return oop(addr)->size(); |
|
104 } else { |
|
105 assert(addr == current_top, "just checking"); |
|
106 return pointer_delta(end(), addr); |
|
107 } |
|
108 } |
|
109 |
|
110 inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t word_size) { |
|
111 assert(is_young(), "we can only skip BOT updates on young regions"); |
|
112 return par_allocate_impl(word_size, end()); |
|
113 } |
|
114 |
|
115 inline HeapWord* HeapRegion::allocate_no_bot_updates(size_t word_size) { |
|
116 assert(is_young(), "we can only skip BOT updates on young regions"); |
|
117 return allocate_impl(word_size, end()); |
58 } |
118 } |
59 |
119 |
60 inline void HeapRegion::note_start_of_marking() { |
120 inline void HeapRegion::note_start_of_marking() { |
61 _next_marked_bytes = 0; |
121 _next_marked_bytes = 0; |
62 _next_top_at_mark_start = top(); |
122 _next_top_at_mark_start = top(); |