src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7007
7df07d855c8e
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

brutisso@6904 1 /*
johnc@4386 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
stefank@2314 27 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
stefank@2314 28 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
brutisso@3710 29 #include "gc_implementation/g1/g1Log.hpp"
stefank@2314 30 #include "gc_implementation/g1/g1MMUTracker.hpp"
stefank@2314 31 #include "gc_implementation/g1/vm_operations_g1.hpp"
brutisso@6904 32 #include "gc_implementation/shared/gcTrace.hpp"
stefank@2314 33 #include "memory/resourceArea.hpp"
stefank@2314 34 #include "runtime/vmThread.hpp"
ysr@777 35
ysr@777 36 // ======= Concurrent Mark Thread ========
ysr@777 37
ysr@777 38 // The CM thread is created when the G1 garbage collector is used
ysr@777 39
ysr@777 40 SurrogateLockerThread*
ysr@777 41 ConcurrentMarkThread::_slt = NULL;
ysr@777 42
ysr@777 43 ConcurrentMarkThread::ConcurrentMarkThread(ConcurrentMark* cm) :
ysr@777 44 ConcurrentGCThread(),
ysr@777 45 _cm(cm),
ysr@777 46 _started(false),
ysr@777 47 _in_progress(false),
ysr@777 48 _vtime_accum(0.0),
johnc@3463 49 _vtime_mark_accum(0.0) {
ysr@777 50 create_and_start();
ysr@777 51 }
ysr@777 52
ysr@777 53 class CMCheckpointRootsFinalClosure: public VoidClosure {
ysr@777 54
ysr@777 55 ConcurrentMark* _cm;
ysr@777 56 public:
ysr@777 57
ysr@777 58 CMCheckpointRootsFinalClosure(ConcurrentMark* cm) :
ysr@777 59 _cm(cm) {}
ysr@777 60
ysr@777 61 void do_void(){
ysr@777 62 _cm->checkpointRootsFinal(false); // !clear_all_soft_refs
ysr@777 63 }
ysr@777 64 };
ysr@777 65
ysr@777 66 class CMCleanUp: public VoidClosure {
ysr@777 67 ConcurrentMark* _cm;
ysr@777 68 public:
ysr@777 69
ysr@777 70 CMCleanUp(ConcurrentMark* cm) :
ysr@777 71 _cm(cm) {}
ysr@777 72
ysr@777 73 void do_void(){
ysr@777 74 _cm->cleanup();
ysr@777 75 }
ysr@777 76 };
ysr@777 77
ysr@777 78
ysr@777 79
ysr@777 80 void ConcurrentMarkThread::run() {
ysr@777 81 initialize_in_thread();
ysr@777 82 _vtime_start = os::elapsedVTime();
ysr@777 83 wait_for_universe_init();
ysr@777 84
tonyp@2472 85 G1CollectedHeap* g1h = G1CollectedHeap::heap();
tonyp@2472 86 G1CollectorPolicy* g1_policy = g1h->g1_policy();
ysr@777 87 G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
ysr@777 88 Thread *current_thread = Thread::current();
ysr@777 89
ysr@777 90 while (!_should_terminate) {
ysr@777 91 // wait until started is set.
ysr@777 92 sleepBeforeNextCycle();
pliden@6690 93 if (_should_terminate) {
pliden@6690 94 break;
pliden@6690 95 }
pliden@6690 96
ysr@777 97 {
ysr@777 98 ResourceMark rm;
ysr@777 99 HandleMark hm;
ysr@777 100 double cycle_start = os::elapsedVTime();
ysr@777 101
tonyp@3464 102 // We have to ensure that we finish scanning the root regions
tonyp@3464 103 // before the next GC takes place. To ensure this we have to
tonyp@3464 104 // make sure that we do not join the STS until the root regions
tonyp@3464 105 // have been scanned. If we did then it's possible that a
tonyp@3464 106 // subsequent GC could block us from joining the STS and proceed
tonyp@3464 107 // without the root regions have been scanned which would be a
tonyp@3464 108 // correctness issue.
tonyp@3464 109
tonyp@3464 110 double scan_start = os::elapsedTime();
tonyp@3464 111 if (!cm()->has_aborted()) {
brutisso@3710 112 if (G1Log::fine()) {
brutisso@6904 113 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
tonyp@3464 114 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
tonyp@3464 115 }
tonyp@3464 116
tonyp@3464 117 _cm->scanRootRegions();
tonyp@3464 118
tonyp@3464 119 double scan_end = os::elapsedTime();
brutisso@3710 120 if (G1Log::fine()) {
brutisso@6904 121 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
johnc@4648 122 gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
tonyp@3464 123 scan_end - scan_start);
tonyp@3464 124 }
tonyp@3464 125 }
tonyp@3464 126
tonyp@3464 127 double mark_start_sec = os::elapsedTime();
brutisso@3710 128 if (G1Log::fine()) {
brutisso@6904 129 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
tonyp@1054 130 gclog_or_tty->print_cr("[GC concurrent-mark-start]");
ysr@777 131 }
ysr@777 132
ysr@777 133 int iter = 0;
ysr@777 134 do {
ysr@777 135 iter++;
ysr@777 136 if (!cm()->has_aborted()) {
ysr@777 137 _cm->markFromRoots();
ysr@777 138 }
ysr@777 139
ysr@777 140 double mark_end_time = os::elapsedVTime();
ysr@777 141 double mark_end_sec = os::elapsedTime();
ysr@777 142 _vtime_mark_accum += (mark_end_time - cycle_start);
ysr@777 143 if (!cm()->has_aborted()) {
ysr@777 144 if (g1_policy->adaptive_young_list_length()) {
ysr@777 145 double now = os::elapsedTime();
ysr@777 146 double remark_prediction_ms = g1_policy->predict_remark_time_ms();
ysr@777 147 jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);
ysr@777 148 os::sleep(current_thread, sleep_time_ms, false);
ysr@777 149 }
ysr@777 150
brutisso@3710 151 if (G1Log::fine()) {
brutisso@6904 152 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
johnc@4648 153 gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
ysr@777 154 mark_end_sec - mark_start_sec);
ysr@777 155 }
ysr@777 156
ysr@777 157 CMCheckpointRootsFinalClosure final_cl(_cm);
sla@5237 158 VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
ysr@777 159 VMThread::execute(&op);
ysr@777 160 }
ysr@777 161 if (cm()->restart_for_overflow()) {
johnc@4386 162 if (G1TraceMarkStackOverflow) {
johnc@4386 163 gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
johnc@4386 164 "in remark (restart #%d).", iter);
johnc@4386 165 }
brutisso@3710 166 if (G1Log::fine()) {
brutisso@6904 167 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
ysr@777 168 gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
ysr@777 169 }
ysr@777 170 }
ysr@777 171 } while (cm()->restart_for_overflow());
johnc@3218 172
ysr@777 173 double end_time = os::elapsedVTime();
ysr@777 174 // Update the total virtual time before doing this, since it will try
ysr@777 175 // to measure it to get the vtime for this marking. We purposely
ysr@777 176 // neglect the presumably-short "completeCleanup" phase here.
ysr@777 177 _vtime_accum = (end_time - _vtime_start);
johnc@3463 178
ysr@777 179 if (!cm()->has_aborted()) {
ysr@777 180 if (g1_policy->adaptive_young_list_length()) {
ysr@777 181 double now = os::elapsedTime();
ysr@777 182 double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();
ysr@777 183 jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);
ysr@777 184 os::sleep(current_thread, sleep_time_ms, false);
ysr@777 185 }
ysr@777 186
ysr@777 187 CMCleanUp cl_cl(_cm);
sla@5237 188 VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
ysr@777 189 VMThread::execute(&op);
ysr@777 190 } else {
johnc@3295 191 // We don't want to update the marking status if a GC pause
johnc@3295 192 // is already underway.
pliden@6906 193 SuspendibleThreadSetJoiner sts;
tonyp@2472 194 g1h->set_marking_complete();
ysr@777 195 }
ysr@777 196
tonyp@2472 197 // Check if cleanup set the free_regions_coming flag. If it
tonyp@2472 198 // hasn't, we can just skip the next step.
tonyp@2472 199 if (g1h->free_regions_coming()) {
tonyp@2472 200 // The following will finish freeing up any regions that we
tonyp@2472 201 // found to be empty during cleanup. We'll do this part
tonyp@2472 202 // without joining the suspendible set. If an evacuation pause
tonyp@2643 203 // takes place, then we would carry on freeing regions in
tonyp@2472 204 // case they are needed by the pause. If a Full GC takes
tonyp@2643 205 // place, it would wait for us to process the regions
tonyp@2472 206 // reclaimed by cleanup.
tonyp@2472 207
ysr@777 208 double cleanup_start_sec = os::elapsedTime();
brutisso@3710 209 if (G1Log::fine()) {
brutisso@6904 210 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
ysr@777 211 gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
ysr@777 212 }
ysr@777 213
tonyp@3208 214 // Now do the concurrent cleanup operation.
ysr@777 215 _cm->completeCleanup();
tonyp@3208 216
tonyp@2501 217 // Notify anyone who's waiting that there are no more free
tonyp@3208 218 // regions coming. We have to do this before we join the STS
tonyp@3208 219 // (in fact, we should not attempt to join the STS in the
tonyp@3208 220 // interval between finishing the cleanup pause and clearing
tonyp@3208 221 // the free_regions_coming flag) otherwise we might deadlock:
tonyp@3208 222 // a GC worker could be blocked waiting for the notification
tonyp@3208 223 // whereas this thread will be blocked for the pause to finish
tonyp@3208 224 // while it's trying to join the STS, which is conditional on
tonyp@3208 225 // the GC workers finishing.
tonyp@2501 226 g1h->reset_free_regions_coming();
tonyp@2501 227
tonyp@2472 228 double cleanup_end_sec = os::elapsedTime();
brutisso@3710 229 if (G1Log::fine()) {
brutisso@6904 230 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
johnc@4648 231 gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
tonyp@2472 232 cleanup_end_sec - cleanup_start_sec);
ysr@777 233 }
ysr@777 234 }
tonyp@2472 235 guarantee(cm()->cleanup_list_is_empty(),
tonyp@2472 236 "at this point there should be no regions on the cleanup list");
ysr@777 237
tonyp@3208 238 // There is a tricky race before recording that the concurrent
tonyp@3208 239 // cleanup has completed and a potential Full GC starting around
tonyp@3208 240 // the same time. We want to make sure that the Full GC calls
tonyp@3208 241 // abort() on concurrent mark after
tonyp@3208 242 // record_concurrent_mark_cleanup_completed(), since abort() is
tonyp@3208 243 // the method that will reset the concurrent mark state. If we
tonyp@3208 244 // end up calling record_concurrent_mark_cleanup_completed()
tonyp@3208 245 // after abort() then we might incorrectly undo some of the work
tonyp@3208 246 // abort() did. Checking the has_aborted() flag after joining
tonyp@3208 247 // the STS allows the correct ordering of the two methods. There
tonyp@3208 248 // are two scenarios:
tonyp@3208 249 //
tonyp@3208 250 // a) If we reach here before the Full GC, the fact that we have
tonyp@3208 251 // joined the STS means that the Full GC cannot start until we
tonyp@3208 252 // leave the STS, so record_concurrent_mark_cleanup_completed()
tonyp@3208 253 // will complete before abort() is called.
tonyp@3208 254 //
tonyp@3208 255 // b) If we reach here during the Full GC, we'll be held up from
tonyp@3208 256 // joining the STS until the Full GC is done, which means that
tonyp@3208 257 // abort() will have completed and has_aborted() will return
tonyp@3208 258 // true to prevent us from calling
tonyp@3208 259 // record_concurrent_mark_cleanup_completed() (and, in fact, it's
tonyp@3208 260 // not needed any more as the concurrent mark state has been
tonyp@3208 261 // already reset).
pliden@6906 262 {
pliden@6906 263 SuspendibleThreadSetJoiner sts;
pliden@6906 264 if (!cm()->has_aborted()) {
pliden@6906 265 g1_policy->record_concurrent_mark_cleanup_completed();
pliden@6906 266 }
tonyp@3208 267 }
tonyp@3208 268
ysr@777 269 if (cm()->has_aborted()) {
brutisso@3710 270 if (G1Log::fine()) {
brutisso@6904 271 gclog_or_tty->gclog_stamp(cm()->concurrent_gc_id());
ysr@777 272 gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
ysr@777 273 }
ysr@777 274 }
ysr@777 275
tonyp@3208 276 // We now want to allow clearing of the marking bitmap to be
ysr@777 277 // suspended by a collection pause.
tschatzl@7007 278 // We may have aborted just before the remark. Do not bother clearing the
tschatzl@7007 279 // bitmap then, as it has been done during mark abort.
tschatzl@7007 280 if (!cm()->has_aborted()) {
pliden@6906 281 SuspendibleThreadSetJoiner sts;
pliden@6906 282 _cm->clearNextBitmap();
tschatzl@7007 283 } else {
tschatzl@7007 284 assert(!G1VerifyBitmaps || _cm->nextMarkBitmapIsClear(), "Next mark bitmap must be clear");
pliden@6906 285 }
ysr@777 286 }
tonyp@2011 287
tonyp@2011 288 // Update the number of full collections that have been
tonyp@2011 289 // completed. This will also notify the FullGCCount_lock in case a
tonyp@2011 290 // Java thread is waiting for a full GC to happen (e.g., it
tonyp@2011 291 // called System.gc() with +ExplicitGCInvokesConcurrent).
pliden@6906 292 {
pliden@6906 293 SuspendibleThreadSetJoiner sts;
pliden@6906 294 g1h->increment_old_marking_cycles_completed(true /* concurrent */);
pliden@6906 295 g1h->register_concurrent_cycle_end();
pliden@6906 296 }
ysr@777 297 }
ysr@777 298 assert(_should_terminate, "just checking");
ysr@777 299
ysr@777 300 terminate();
ysr@777 301 }
ysr@777 302
ysr@777 303 void ConcurrentMarkThread::stop() {
pliden@6690 304 {
pliden@6690 305 MutexLockerEx ml(Terminator_lock);
pliden@6690 306 _should_terminate = true;
pliden@6690 307 }
pliden@6690 308
pliden@6690 309 {
pliden@6690 310 MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
pliden@6690 311 CGC_lock->notify_all();
pliden@6690 312 }
pliden@6690 313
pliden@6690 314 {
pliden@6690 315 MutexLockerEx ml(Terminator_lock);
pliden@6690 316 while (!_has_terminated) {
pliden@6690 317 Terminator_lock->wait();
pliden@6690 318 }
ysr@777 319 }
ysr@777 320 }
ysr@777 321
tonyp@1454 322 void ConcurrentMarkThread::print() const {
tonyp@1454 323 print_on(tty);
tonyp@1454 324 }
tonyp@1454 325
tonyp@1454 326 void ConcurrentMarkThread::print_on(outputStream* st) const {
tonyp@1454 327 st->print("\"G1 Main Concurrent Mark GC Thread\" ");
tonyp@1454 328 Thread::print_on(st);
tonyp@1454 329 st->cr();
ysr@777 330 }
ysr@777 331
ysr@777 332 void ConcurrentMarkThread::sleepBeforeNextCycle() {
ysr@777 333 // We join here because we don't want to do the "shouldConcurrentMark()"
ysr@777 334 // below while the world is otherwise stopped.
johnc@2195 335 assert(!in_progress(), "should have been cleared");
johnc@2195 336
ysr@777 337 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
pliden@6690 338 while (!started() && !_should_terminate) {
ysr@777 339 CGC_lock->wait(Mutex::_no_safepoint_check_flag);
ysr@777 340 }
pliden@6690 341
pliden@6690 342 if (started()) {
pliden@6690 343 set_in_progress();
pliden@6690 344 clear_started();
pliden@6690 345 }
ysr@777 346 }
ysr@777 347
johnc@3218 348 // Note: As is the case with CMS - this method, although exported
johnc@3218 349 // by the ConcurrentMarkThread, which is a non-JavaThread, can only
johnc@3218 350 // be called by a JavaThread. Currently this is done at vm creation
johnc@3218 351 // time (post-vm-init) by the main/Primordial (Java)Thread.
johnc@3218 352 // XXX Consider changing this in the future to allow the CM thread
ysr@777 353 // itself to create this thread?
ysr@777 354 void ConcurrentMarkThread::makeSurrogateLockerThread(TRAPS) {
johnc@3218 355 assert(UseG1GC, "SLT thread needed only for concurrent GC");
johnc@3218 356 assert(THREAD->is_Java_thread(), "must be a Java thread");
ysr@777 357 assert(_slt == NULL, "SLT already created");
ysr@777 358 _slt = SurrogateLockerThread::make(THREAD);
ysr@777 359 }

mercurial