Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "utilities/macros.hpp"
27 #if INCLUDE_ALL_GCS
28 #include "utilities/yieldingWorkgroup.hpp"
29 #endif // INCLUDE_ALL_GCS
31 // Forward declaration of classes declared here.
33 class GangWorker;
34 class WorkData;
36 YieldingFlexibleWorkGang::YieldingFlexibleWorkGang(
37 const char* name, uint workers, bool are_GC_task_threads) :
38 FlexibleWorkGang(name, workers, are_GC_task_threads, false),
39 _yielded_workers(0) {}
41 GangWorker* YieldingFlexibleWorkGang::allocate_worker(uint which) {
42 YieldingFlexibleGangWorker* new_member =
43 new YieldingFlexibleGangWorker(this, which);
44 return (YieldingFlexibleGangWorker*) new_member;
45 }
47 // Run a task; returns when the task is done, or the workers yield,
48 // or the task is aborted, or the work gang is terminated via stop().
49 // A task that has been yielded can be continued via this interface
50 // by using the same task repeatedly as the argument to the call.
51 // It is expected that the YieldingFlexibleGangTask carries the appropriate
52 // continuation information used by workers to continue the task
53 // from its last yield point. Thus, a completed task will return
54 // immediately with no actual work having been done by the workers.
55 /////////////////////
56 // Implementatiuon notes: remove before checking XXX
57 /*
58 Each gang is working on a task at a certain time.
59 Some subset of workers may have yielded and some may
60 have finished their quota of work. Until this task has
61 been completed, the workers are bound to that task.
62 Once the task has been completed, the gang unbounds
63 itself from the task.
65 The yielding work gang thus exports two invokation
66 interfaces: run_task() and continue_task(). The
67 first is used to initiate a new task and bind it
68 to the workers; the second is used to continue an
69 already bound task that has yielded. Upon completion
70 the binding is released and a new binding may be
71 created.
73 The shape of a yielding work gang is as follows:
75 Overseer invokes run_task(*task).
76 Lock gang monitor
77 Check that there is no existing binding for the gang
78 If so, abort with an error
79 Else, create a new binding of this gang to the given task
80 Set number of active workers (as asked)
81 Notify workers that work is ready to be done
82 [the requisite # workers would then start up
83 and do the task]
84 Wait on the monitor until either
85 all work is completed or the task has yielded
86 -- this is normally done through
87 yielded + completed == active
88 [completed workers are rest to idle state by overseer?]
89 return appropriate status to caller
91 Overseer invokes continue_task(*task),
92 Lock gang monitor
93 Check that task is the same as current binding
94 If not, abort with an error
95 Else, set the number of active workers as requested?
96 Notify workers that they can continue from yield points
97 New workers can also start up as required
98 while satisfying the constraint that
99 active + yielded does not exceed required number
100 Wait (as above).
102 NOTE: In the above, for simplicity in a first iteration
103 our gangs will be of fixed population and will not
104 therefore be flexible work gangs, just yielding work
105 gangs. Once this works well, we will in a second
106 iteration.refinement introduce flexibility into
107 the work gang.
109 NOTE: we can always create a new gang per each iteration
110 in order to get the flexibility, but we will for now
111 desist that simplified route.
113 */
114 /////////////////////
115 void YieldingFlexibleWorkGang::start_task(YieldingFlexibleGangTask* new_task) {
116 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
117 assert(task() == NULL, "Gang currently tied to a task");
118 assert(new_task != NULL, "Null task");
119 // Bind task to gang
120 _task = new_task;
121 new_task->set_gang(this); // Establish 2-way binding to support yielding
122 _sequence_number++;
124 uint requested_size = new_task->requested_size();
125 assert(requested_size >= 0, "Should be non-negative");
126 if (requested_size != 0) {
127 _active_workers = MIN2(requested_size, total_workers());
128 } else {
129 _active_workers = active_workers();
130 }
131 new_task->set_actual_size(_active_workers);
132 new_task->set_for_termination(_active_workers);
134 assert(_started_workers == 0, "Tabula rasa non");
135 assert(_finished_workers == 0, "Tabula rasa non");
136 assert(_yielded_workers == 0, "Tabula rasa non");
137 yielding_task()->set_status(ACTIVE);
139 // Wake up all the workers, the first few will get to work,
140 // and the rest will go back to sleep
141 monitor()->notify_all();
142 wait_for_gang();
143 }
145 void YieldingFlexibleWorkGang::wait_for_gang() {
147 assert(monitor()->owned_by_self(), "Data race");
148 // Wait for task to complete or yield
149 for (Status status = yielding_task()->status();
150 status != COMPLETED && status != YIELDED && status != ABORTED;
151 status = yielding_task()->status()) {
152 assert(started_workers() <= active_workers(), "invariant");
153 assert(finished_workers() <= active_workers(), "invariant");
154 assert(yielded_workers() <= active_workers(), "invariant");
155 monitor()->wait(Mutex::_no_safepoint_check_flag);
156 }
157 switch (yielding_task()->status()) {
158 case COMPLETED:
159 case ABORTED: {
160 assert(finished_workers() == active_workers(), "Inconsistent status");
161 assert(yielded_workers() == 0, "Invariant");
162 reset(); // for next task; gang<->task binding released
163 break;
164 }
165 case YIELDED: {
166 assert(yielded_workers() > 0, "Invariant");
167 assert(yielded_workers() + finished_workers() == active_workers(),
168 "Inconsistent counts");
169 break;
170 }
171 case ACTIVE:
172 case INACTIVE:
173 case COMPLETING:
174 case YIELDING:
175 case ABORTING:
176 default:
177 ShouldNotReachHere();
178 }
179 }
181 void YieldingFlexibleWorkGang::continue_task(
182 YieldingFlexibleGangTask* gang_task) {
184 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
185 assert(task() != NULL && task() == gang_task, "Incorrect usage");
186 assert(_started_workers == _active_workers, "Precondition");
187 assert(_yielded_workers > 0 && yielding_task()->status() == YIELDED,
188 "Else why are we calling continue_task()");
189 // Restart the yielded gang workers
190 yielding_task()->set_status(ACTIVE);
191 monitor()->notify_all();
192 wait_for_gang();
193 }
195 void YieldingFlexibleWorkGang::reset() {
196 _started_workers = 0;
197 _finished_workers = 0;
198 yielding_task()->set_gang(NULL);
199 _task = NULL; // unbind gang from task
200 }
202 void YieldingFlexibleWorkGang::yield() {
203 assert(task() != NULL, "Inconsistency; should have task binding");
204 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
205 assert(yielded_workers() < active_workers(), "Consistency check");
206 if (yielding_task()->status() == ABORTING) {
207 // Do not yield; we need to abort as soon as possible
208 // XXX NOTE: This can cause a performance pathology in the
209 // current implementation in Mustang, as of today, and
210 // pre-Mustang in that as soon as an overflow occurs,
211 // yields will not be honoured. The right way to proceed
212 // of course is to fix bug # TBF, so that abort's cause
213 // us to return at each potential yield point.
214 return;
215 }
216 if (++_yielded_workers + finished_workers() == active_workers()) {
217 yielding_task()->set_status(YIELDED);
218 monitor()->notify_all();
219 } else {
220 yielding_task()->set_status(YIELDING);
221 }
223 while (true) {
224 switch (yielding_task()->status()) {
225 case YIELDING:
226 case YIELDED: {
227 monitor()->wait(Mutex::_no_safepoint_check_flag);
228 break; // from switch
229 }
230 case ACTIVE:
231 case ABORTING:
232 case COMPLETING: {
233 assert(_yielded_workers > 0, "Else why am i here?");
234 _yielded_workers--;
235 return;
236 }
237 case INACTIVE:
238 case ABORTED:
239 case COMPLETED:
240 default: {
241 ShouldNotReachHere();
242 }
243 }
244 }
245 // Only return is from inside switch statement above
246 ShouldNotReachHere();
247 }
249 void YieldingFlexibleWorkGang::abort() {
250 assert(task() != NULL, "Inconsistency; should have task binding");
251 MutexLockerEx ml(monitor(), Mutex::_no_safepoint_check_flag);
252 assert(yielded_workers() < active_workers(), "Consistency check");
253 #ifndef PRODUCT
254 switch (yielding_task()->status()) {
255 // allowed states
256 case ACTIVE:
257 case ABORTING:
258 case COMPLETING:
259 case YIELDING:
260 break;
261 // not allowed states
262 case INACTIVE:
263 case ABORTED:
264 case COMPLETED:
265 case YIELDED:
266 default:
267 ShouldNotReachHere();
268 }
269 #endif // !PRODUCT
270 Status prev_status = yielding_task()->status();
271 yielding_task()->set_status(ABORTING);
272 if (prev_status == YIELDING) {
273 assert(yielded_workers() > 0, "Inconsistency");
274 // At least one thread has yielded, wake it up
275 // so it can go back to waiting stations ASAP.
276 monitor()->notify_all();
277 }
278 }
280 ///////////////////////////////
281 // YieldingFlexibleGangTask
282 ///////////////////////////////
283 void YieldingFlexibleGangTask::yield() {
284 assert(gang() != NULL, "No gang to signal");
285 gang()->yield();
286 }
288 void YieldingFlexibleGangTask::abort() {
289 assert(gang() != NULL, "No gang to signal");
290 gang()->abort();
291 }
293 ///////////////////////////////
294 // YieldingFlexibleGangWorker
295 ///////////////////////////////
296 void YieldingFlexibleGangWorker::loop() {
297 int previous_sequence_number = 0;
298 Monitor* gang_monitor = gang()->monitor();
299 MutexLockerEx ml(gang_monitor, Mutex::_no_safepoint_check_flag);
300 WorkData data;
301 int id;
302 while (true) {
303 // Check if there is work to do or if we have been asked
304 // to terminate
305 gang()->internal_worker_poll(&data);
306 if (data.terminate()) {
307 // We have been asked to terminate.
308 assert(gang()->task() == NULL, "No task binding");
309 // set_status(TERMINATED);
310 return;
311 } else if (data.task() != NULL &&
312 data.sequence_number() != previous_sequence_number) {
313 // There is work to be done.
314 // First check if we need to become active or if there
315 // are already the requisite number of workers
316 if (gang()->started_workers() == yf_gang()->active_workers()) {
317 // There are already enough workers, we do not need to
318 // to run; fall through and wait on monitor.
319 } else {
320 // We need to pitch in and do the work.
321 assert(gang()->started_workers() < yf_gang()->active_workers(),
322 "Unexpected state");
323 id = gang()->started_workers();
324 gang()->internal_note_start();
325 // Now, release the gang mutex and do the work.
326 {
327 MutexUnlockerEx mul(gang_monitor, Mutex::_no_safepoint_check_flag);
328 data.task()->work(id); // This might include yielding
329 }
330 // Reacquire monitor and note completion of this worker
331 gang()->internal_note_finish();
332 // Update status of task based on whether all workers have
333 // finished or some have yielded
334 assert(data.task() == gang()->task(), "Confused task binding");
335 if (gang()->finished_workers() == yf_gang()->active_workers()) {
336 switch (data.yf_task()->status()) {
337 case ABORTING: {
338 data.yf_task()->set_status(ABORTED);
339 break;
340 }
341 case ACTIVE:
342 case COMPLETING: {
343 data.yf_task()->set_status(COMPLETED);
344 break;
345 }
346 default:
347 ShouldNotReachHere();
348 }
349 gang_monitor->notify_all(); // Notify overseer
350 } else { // at least one worker is still working or yielded
351 assert(gang()->finished_workers() < yf_gang()->active_workers(),
352 "Counts inconsistent");
353 switch (data.yf_task()->status()) {
354 case ACTIVE: {
355 // first, but not only thread to complete
356 data.yf_task()->set_status(COMPLETING);
357 break;
358 }
359 case YIELDING: {
360 if (gang()->finished_workers() + yf_gang()->yielded_workers()
361 == yf_gang()->active_workers()) {
362 data.yf_task()->set_status(YIELDED);
363 gang_monitor->notify_all(); // notify overseer
364 }
365 break;
366 }
367 case ABORTING:
368 case COMPLETING: {
369 break; // nothing to do
370 }
371 default: // everything else: INACTIVE, YIELDED, ABORTED, COMPLETED
372 ShouldNotReachHere();
373 }
374 }
375 }
376 }
377 // Remember the sequence number
378 previous_sequence_number = data.sequence_number();
379 // Wait for more work
380 gang_monitor->wait(Mutex::_no_safepoint_check_flag);
381 }
382 }