71 assert(0 <= _index && _index <= _sz, "Invariant."); |
71 assert(0 <= _index && _index <= _sz, "Invariant."); |
72 } |
72 } |
73 |
73 |
74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) { |
74 void PtrQueue::locking_enqueue_completed_buffer(void** buf) { |
75 assert(_lock->owned_by_self(), "Required."); |
75 assert(_lock->owned_by_self(), "Required."); |
|
76 |
|
77 // We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before |
|
78 // we acquire DirtyCardQ_CBL_mon inside enqeue_complete_buffer as they |
|
79 // have the same rank and we may get the "possible deadlock" message |
76 _lock->unlock(); |
80 _lock->unlock(); |
|
81 |
77 qset()->enqueue_complete_buffer(buf); |
82 qset()->enqueue_complete_buffer(buf); |
78 // We must relock only because the caller will unlock, for the normal |
83 // We must relock only because the caller will unlock, for the normal |
79 // case. |
84 // case. |
80 _lock->lock_without_safepoint_check(); |
85 _lock->lock_without_safepoint_check(); |
81 } |
86 } |
138 assert(0 == _index, "Precondition."); |
143 assert(0 == _index, "Precondition."); |
139 // This thread records the full buffer and allocates a new one (while |
144 // This thread records the full buffer and allocates a new one (while |
140 // holding the lock if there is one). |
145 // holding the lock if there is one). |
141 if (_buf != NULL) { |
146 if (_buf != NULL) { |
142 if (_lock) { |
147 if (_lock) { |
143 locking_enqueue_completed_buffer(_buf); |
148 assert(_lock->owned_by_self(), "Required."); |
|
149 |
|
150 // The current PtrQ may be the shared dirty card queue and |
|
151 // may be being manipulated by more than one worker thread |
|
152 // during a pause. Since the enqueuing of the completed |
|
153 // buffer unlocks the Shared_DirtyCardQ_lock more than one |
|
154 // worker thread can 'race' on reading the shared queue attributes |
|
155 // (_buf and _index) and multiple threads can call into this |
|
156 // routine for the same buffer. This will cause the completed |
|
157 // buffer to be added to the CBL multiple times. |
|
158 |
|
159 // We "claim" the current buffer by caching value of _buf in |
|
160 // a local and clearing the field while holding _lock. When |
|
161 // _lock is released (while enqueueing the completed buffer) |
|
162 // the thread that acquires _lock will skip this code, |
|
163 // preventing the subsequent the multiple enqueue, and |
|
164 // install a newly allocated buffer below. |
|
165 |
|
166 void** buf = _buf; // local pointer to completed buffer |
|
167 _buf = NULL; // clear shared _buf field |
|
168 |
|
169 locking_enqueue_completed_buffer(buf); // enqueue completed buffer |
|
170 |
|
171 // While the current thread was enqueuing the buffer another thread |
|
172 // may have a allocated a new buffer and inserted it into this pointer |
|
173 // queue. If that happens then we just return so that the current |
|
174 // thread doesn't overwrite the buffer allocated by the other thread |
|
175 // and potentially losing some dirtied cards. |
|
176 |
|
177 if (_buf != NULL) return; |
144 } else { |
178 } else { |
145 if (qset()->process_or_enqueue_complete_buffer(_buf)) { |
179 if (qset()->process_or_enqueue_complete_buffer(_buf)) { |
146 // Recycle the buffer. No allocation. |
180 // Recycle the buffer. No allocation. |
147 _sz = qset()->buffer_size(); |
181 _sz = qset()->buffer_size(); |
148 _index = _sz; |
182 _index = _sz; |