Thu, 13 Jun 2013 22:02:40 -0700
8014431: cleanup warnings indicated by the -Wunused-value compiler option on linux
Reviewed-by: dholmes, coleenp
Contributed-by: jeremymanson@google.com, calvin.cheung@oracle.com
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/symbolTable.hpp"
27 #include "classfile/systemDictionary.hpp"
28 #include "classfile/vmSymbols.hpp"
29 #include "code/icBuffer.hpp"
30 #include "gc_implementation/shared/collectorCounters.hpp"
31 #include "gc_implementation/shared/gcTraceTime.hpp"
32 #include "gc_implementation/shared/vmGCOperations.hpp"
33 #include "gc_interface/collectedHeap.inline.hpp"
34 #include "memory/filemap.hpp"
35 #include "memory/gcLocker.inline.hpp"
36 #include "memory/genCollectedHeap.hpp"
37 #include "memory/genOopClosures.inline.hpp"
38 #include "memory/generation.inline.hpp"
39 #include "memory/generationSpec.hpp"
40 #include "memory/resourceArea.hpp"
41 #include "memory/sharedHeap.hpp"
42 #include "memory/space.hpp"
43 #include "oops/oop.inline.hpp"
44 #include "oops/oop.inline2.hpp"
45 #include "runtime/aprofiler.hpp"
46 #include "runtime/biasedLocking.hpp"
47 #include "runtime/fprofiler.hpp"
48 #include "runtime/handles.hpp"
49 #include "runtime/handles.inline.hpp"
50 #include "runtime/java.hpp"
51 #include "runtime/vmThread.hpp"
52 #include "services/memoryService.hpp"
53 #include "utilities/vmError.hpp"
54 #include "utilities/workgroup.hpp"
55 #include "utilities/macros.hpp"
56 #if INCLUDE_ALL_GCS
57 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
58 #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
59 #endif // INCLUDE_ALL_GCS
61 GenCollectedHeap* GenCollectedHeap::_gch;
62 NOT_PRODUCT(size_t GenCollectedHeap::_skip_header_HeapWords = 0;)
64 // The set of potentially parallel tasks in strong root scanning.
65 enum GCH_process_strong_roots_tasks {
66 // We probably want to parallelize both of these internally, but for now...
67 GCH_PS_younger_gens,
68 // Leave this one last.
69 GCH_PS_NumElements
70 };
72 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
73 SharedHeap(policy),
74 _gen_policy(policy),
75 _gen_process_strong_tasks(new SubTasksDone(GCH_PS_NumElements)),
76 _full_collections_completed(0)
77 {
78 if (_gen_process_strong_tasks == NULL ||
79 !_gen_process_strong_tasks->valid()) {
80 vm_exit_during_initialization("Failed necessary allocation.");
81 }
82 assert(policy != NULL, "Sanity check");
83 }
85 jint GenCollectedHeap::initialize() {
86 CollectedHeap::pre_initialize();
88 int i;
89 _n_gens = gen_policy()->number_of_generations();
91 // While there are no constraints in the GC code that HeapWordSize
92 // be any particular value, there are multiple other areas in the
93 // system which believe this to be true (e.g. oop->object_size in some
94 // cases incorrectly returns the size in wordSize units rather than
95 // HeapWordSize).
96 guarantee(HeapWordSize == wordSize, "HeapWordSize must equal wordSize");
98 // The heap must be at least as aligned as generations.
99 size_t alignment = Generation::GenGrain;
101 _gen_specs = gen_policy()->generations();
103 // Make sure the sizes are all aligned.
104 for (i = 0; i < _n_gens; i++) {
105 _gen_specs[i]->align(alignment);
106 }
108 // Allocate space for the heap.
110 char* heap_address;
111 size_t total_reserved = 0;
112 int n_covered_regions = 0;
113 ReservedSpace heap_rs(0);
115 heap_address = allocate(alignment, &total_reserved,
116 &n_covered_regions, &heap_rs);
118 if (!heap_rs.is_reserved()) {
119 vm_shutdown_during_initialization(
120 "Could not reserve enough space for object heap");
121 return JNI_ENOMEM;
122 }
124 _reserved = MemRegion((HeapWord*)heap_rs.base(),
125 (HeapWord*)(heap_rs.base() + heap_rs.size()));
127 // It is important to do this in a way such that concurrent readers can't
128 // temporarily think somethings in the heap. (Seen this happen in asserts.)
129 _reserved.set_word_size(0);
130 _reserved.set_start((HeapWord*)heap_rs.base());
131 size_t actual_heap_size = heap_rs.size();
132 _reserved.set_end((HeapWord*)(heap_rs.base() + actual_heap_size));
134 _rem_set = collector_policy()->create_rem_set(_reserved, n_covered_regions);
135 set_barrier_set(rem_set()->bs());
137 _gch = this;
139 for (i = 0; i < _n_gens; i++) {
140 ReservedSpace this_rs = heap_rs.first_part(_gen_specs[i]->max_size(), false, false);
141 _gens[i] = _gen_specs[i]->init(this_rs, i, rem_set());
142 heap_rs = heap_rs.last_part(_gen_specs[i]->max_size());
143 }
144 clear_incremental_collection_failed();
146 #if INCLUDE_ALL_GCS
147 // If we are running CMS, create the collector responsible
148 // for collecting the CMS generations.
149 if (collector_policy()->is_concurrent_mark_sweep_policy()) {
150 bool success = create_cms_collector();
151 if (!success) return JNI_ENOMEM;
152 }
153 #endif // INCLUDE_ALL_GCS
155 return JNI_OK;
156 }
159 char* GenCollectedHeap::allocate(size_t alignment,
160 size_t* _total_reserved,
161 int* _n_covered_regions,
162 ReservedSpace* heap_rs){
163 const char overflow_msg[] = "The size of the object heap + VM data exceeds "
164 "the maximum representable size";
166 // Now figure out the total size.
167 size_t total_reserved = 0;
168 int n_covered_regions = 0;
169 const size_t pageSize = UseLargePages ?
170 os::large_page_size() : os::vm_page_size();
172 for (int i = 0; i < _n_gens; i++) {
173 total_reserved += _gen_specs[i]->max_size();
174 if (total_reserved < _gen_specs[i]->max_size()) {
175 vm_exit_during_initialization(overflow_msg);
176 }
177 n_covered_regions += _gen_specs[i]->n_covered_regions();
178 }
179 assert(total_reserved % pageSize == 0,
180 err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize="
181 SIZE_FORMAT, total_reserved, pageSize));
183 // Needed until the cardtable is fixed to have the right number
184 // of covered regions.
185 n_covered_regions += 2;
187 if (UseLargePages) {
188 assert(total_reserved != 0, "total_reserved cannot be 0");
189 total_reserved = round_to(total_reserved, os::large_page_size());
190 if (total_reserved < os::large_page_size()) {
191 vm_exit_during_initialization(overflow_msg);
192 }
193 }
195 *_total_reserved = total_reserved;
196 *_n_covered_regions = n_covered_regions;
197 *heap_rs = Universe::reserve_heap(total_reserved, alignment);
198 return heap_rs->base();
199 }
202 void GenCollectedHeap::post_initialize() {
203 SharedHeap::post_initialize();
204 TwoGenerationCollectorPolicy *policy =
205 (TwoGenerationCollectorPolicy *)collector_policy();
206 guarantee(policy->is_two_generation_policy(), "Illegal policy type");
207 DefNewGeneration* def_new_gen = (DefNewGeneration*) get_gen(0);
208 assert(def_new_gen->kind() == Generation::DefNew ||
209 def_new_gen->kind() == Generation::ParNew ||
210 def_new_gen->kind() == Generation::ASParNew,
211 "Wrong generation kind");
213 Generation* old_gen = get_gen(1);
214 assert(old_gen->kind() == Generation::ConcurrentMarkSweep ||
215 old_gen->kind() == Generation::ASConcurrentMarkSweep ||
216 old_gen->kind() == Generation::MarkSweepCompact,
217 "Wrong generation kind");
219 policy->initialize_size_policy(def_new_gen->eden()->capacity(),
220 old_gen->capacity(),
221 def_new_gen->from()->capacity());
222 policy->initialize_gc_policy_counters();
223 }
225 void GenCollectedHeap::ref_processing_init() {
226 SharedHeap::ref_processing_init();
227 for (int i = 0; i < _n_gens; i++) {
228 _gens[i]->ref_processor_init();
229 }
230 }
232 size_t GenCollectedHeap::capacity() const {
233 size_t res = 0;
234 for (int i = 0; i < _n_gens; i++) {
235 res += _gens[i]->capacity();
236 }
237 return res;
238 }
240 size_t GenCollectedHeap::used() const {
241 size_t res = 0;
242 for (int i = 0; i < _n_gens; i++) {
243 res += _gens[i]->used();
244 }
245 return res;
246 }
248 // Save the "used_region" for generations level and lower.
249 void GenCollectedHeap::save_used_regions(int level) {
250 assert(level < _n_gens, "Illegal level parameter");
251 for (int i = level; i >= 0; i--) {
252 _gens[i]->save_used_region();
253 }
254 }
256 size_t GenCollectedHeap::max_capacity() const {
257 size_t res = 0;
258 for (int i = 0; i < _n_gens; i++) {
259 res += _gens[i]->max_capacity();
260 }
261 return res;
262 }
264 // Update the _full_collections_completed counter
265 // at the end of a stop-world full GC.
266 unsigned int GenCollectedHeap::update_full_collections_completed() {
267 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
268 assert(_full_collections_completed <= _total_full_collections,
269 "Can't complete more collections than were started");
270 _full_collections_completed = _total_full_collections;
271 ml.notify_all();
272 return _full_collections_completed;
273 }
275 // Update the _full_collections_completed counter, as appropriate,
276 // at the end of a concurrent GC cycle. Note the conditional update
277 // below to allow this method to be called by a concurrent collector
278 // without synchronizing in any manner with the VM thread (which
279 // may already have initiated a STW full collection "concurrently").
280 unsigned int GenCollectedHeap::update_full_collections_completed(unsigned int count) {
281 MonitorLockerEx ml(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
282 assert((_full_collections_completed <= _total_full_collections) &&
283 (count <= _total_full_collections),
284 "Can't complete more collections than were started");
285 if (count > _full_collections_completed) {
286 _full_collections_completed = count;
287 ml.notify_all();
288 }
289 return _full_collections_completed;
290 }
293 #ifndef PRODUCT
294 // Override of memory state checking method in CollectedHeap:
295 // Some collectors (CMS for example) can't have badHeapWordVal written
296 // in the first two words of an object. (For instance , in the case of
297 // CMS these words hold state used to synchronize between certain
298 // (concurrent) GC steps and direct allocating mutators.)
299 // The skip_header_HeapWords() method below, allows us to skip
300 // over the requisite number of HeapWord's. Note that (for
301 // generational collectors) this means that those many words are
302 // skipped in each object, irrespective of the generation in which
303 // that object lives. The resultant loss of precision seems to be
304 // harmless and the pain of avoiding that imprecision appears somewhat
305 // higher than we are prepared to pay for such rudimentary debugging
306 // support.
307 void GenCollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr,
308 size_t size) {
309 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
310 // We are asked to check a size in HeapWords,
311 // but the memory is mangled in juint words.
312 juint* start = (juint*) (addr + skip_header_HeapWords());
313 juint* end = (juint*) (addr + size);
314 for (juint* slot = start; slot < end; slot += 1) {
315 assert(*slot == badHeapWordVal,
316 "Found non badHeapWordValue in pre-allocation check");
317 }
318 }
319 }
320 #endif
322 HeapWord* GenCollectedHeap::attempt_allocation(size_t size,
323 bool is_tlab,
324 bool first_only) {
325 HeapWord* res;
326 for (int i = 0; i < _n_gens; i++) {
327 if (_gens[i]->should_allocate(size, is_tlab)) {
328 res = _gens[i]->allocate(size, is_tlab);
329 if (res != NULL) return res;
330 else if (first_only) break;
331 }
332 }
333 // Otherwise...
334 return NULL;
335 }
337 HeapWord* GenCollectedHeap::mem_allocate(size_t size,
338 bool* gc_overhead_limit_was_exceeded) {
339 return collector_policy()->mem_allocate_work(size,
340 false /* is_tlab */,
341 gc_overhead_limit_was_exceeded);
342 }
344 bool GenCollectedHeap::must_clear_all_soft_refs() {
345 return _gc_cause == GCCause::_last_ditch_collection;
346 }
348 bool GenCollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
349 return UseConcMarkSweepGC &&
350 ((cause == GCCause::_gc_locker && GCLockerInvokesConcurrent) ||
351 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
352 }
354 void GenCollectedHeap::do_collection(bool full,
355 bool clear_all_soft_refs,
356 size_t size,
357 bool is_tlab,
358 int max_level) {
359 bool prepared_for_verification = false;
360 ResourceMark rm;
361 DEBUG_ONLY(Thread* my_thread = Thread::current();)
363 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
364 assert(my_thread->is_VM_thread() ||
365 my_thread->is_ConcurrentGC_thread(),
366 "incorrect thread type capability");
367 assert(Heap_lock->is_locked(),
368 "the requesting thread should have the Heap_lock");
369 guarantee(!is_gc_active(), "collection is not reentrant");
370 assert(max_level < n_gens(), "sanity check");
372 if (GC_locker::check_active_before_gc()) {
373 return; // GC is disabled (e.g. JNI GetXXXCritical operation)
374 }
376 const bool do_clear_all_soft_refs = clear_all_soft_refs ||
377 collector_policy()->should_clear_all_soft_refs();
379 ClearedAllSoftRefs casr(do_clear_all_soft_refs, collector_policy());
381 const size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
383 print_heap_before_gc();
385 {
386 FlagSetting fl(_is_gc_active, true);
388 bool complete = full && (max_level == (n_gens()-1));
389 const char* gc_cause_prefix = complete ? "Full GC" : "GC";
390 gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
391 TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
392 GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
394 gc_prologue(complete);
395 increment_total_collections(complete);
397 size_t gch_prev_used = used();
399 int starting_level = 0;
400 if (full) {
401 // Search for the oldest generation which will collect all younger
402 // generations, and start collection loop there.
403 for (int i = max_level; i >= 0; i--) {
404 if (_gens[i]->full_collects_younger_generations()) {
405 starting_level = i;
406 break;
407 }
408 }
409 }
411 bool must_restore_marks_for_biased_locking = false;
413 int max_level_collected = starting_level;
414 for (int i = starting_level; i <= max_level; i++) {
415 if (_gens[i]->should_collect(full, size, is_tlab)) {
416 if (i == n_gens() - 1) { // a major collection is to happen
417 if (!complete) {
418 // The full_collections increment was missed above.
419 increment_total_full_collections();
420 }
421 pre_full_gc_dump(NULL); // do any pre full gc dumps
422 }
423 // Timer for individual generations. Last argument is false: no CR
424 // FIXME: We should try to start the timing earlier to cover more of the GC pause
425 GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
426 TraceCollectorStats tcs(_gens[i]->counters());
427 TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
429 size_t prev_used = _gens[i]->used();
430 _gens[i]->stat_record()->invocations++;
431 _gens[i]->stat_record()->accumulated_time.start();
433 // Must be done anew before each collection because
434 // a previous collection will do mangling and will
435 // change top of some spaces.
436 record_gen_tops_before_GC();
438 if (PrintGC && Verbose) {
439 gclog_or_tty->print("level=%d invoke=%d size=" SIZE_FORMAT,
440 i,
441 _gens[i]->stat_record()->invocations,
442 size*HeapWordSize);
443 }
445 if (VerifyBeforeGC && i >= VerifyGCLevel &&
446 total_collections() >= VerifyGCStartAt) {
447 HandleMark hm; // Discard invalid handles created during verification
448 if (!prepared_for_verification) {
449 prepare_for_verify();
450 prepared_for_verification = true;
451 }
452 Universe::verify(" VerifyBeforeGC:");
453 }
454 COMPILER2_PRESENT(DerivedPointerTable::clear());
456 if (!must_restore_marks_for_biased_locking &&
457 _gens[i]->performs_in_place_marking()) {
458 // We perform this mark word preservation work lazily
459 // because it's only at this point that we know whether we
460 // absolutely have to do it; we want to avoid doing it for
461 // scavenge-only collections where it's unnecessary
462 must_restore_marks_for_biased_locking = true;
463 BiasedLocking::preserve_marks();
464 }
466 // Do collection work
467 {
468 // Note on ref discovery: For what appear to be historical reasons,
469 // GCH enables and disabled (by enqueing) refs discovery.
470 // In the future this should be moved into the generation's
471 // collect method so that ref discovery and enqueueing concerns
472 // are local to a generation. The collect method could return
473 // an appropriate indication in the case that notification on
474 // the ref lock was needed. This will make the treatment of
475 // weak refs more uniform (and indeed remove such concerns
476 // from GCH). XXX
478 HandleMark hm; // Discard invalid handles created during gc
479 save_marks(); // save marks for all gens
480 // We want to discover references, but not process them yet.
481 // This mode is disabled in process_discovered_references if the
482 // generation does some collection work, or in
483 // enqueue_discovered_references if the generation returns
484 // without doing any work.
485 ReferenceProcessor* rp = _gens[i]->ref_processor();
486 // If the discovery of ("weak") refs in this generation is
487 // atomic wrt other collectors in this configuration, we
488 // are guaranteed to have empty discovered ref lists.
489 if (rp->discovery_is_atomic()) {
490 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
491 rp->setup_policy(do_clear_all_soft_refs);
492 } else {
493 // collect() below will enable discovery as appropriate
494 }
495 _gens[i]->collect(full, do_clear_all_soft_refs, size, is_tlab);
496 if (!rp->enqueuing_is_done()) {
497 rp->enqueue_discovered_references();
498 } else {
499 rp->set_enqueuing_is_done(false);
500 }
501 rp->verify_no_references_recorded();
502 }
503 max_level_collected = i;
505 // Determine if allocation request was met.
506 if (size > 0) {
507 if (!is_tlab || _gens[i]->supports_tlab_allocation()) {
508 if (size*HeapWordSize <= _gens[i]->unsafe_max_alloc_nogc()) {
509 size = 0;
510 }
511 }
512 }
514 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
516 _gens[i]->stat_record()->accumulated_time.stop();
518 update_gc_stats(i, full);
520 if (VerifyAfterGC && i >= VerifyGCLevel &&
521 total_collections() >= VerifyGCStartAt) {
522 HandleMark hm; // Discard invalid handles created during verification
523 Universe::verify(" VerifyAfterGC:");
524 }
526 if (PrintGCDetails) {
527 gclog_or_tty->print(":");
528 _gens[i]->print_heap_change(prev_used);
529 }
530 }
531 }
533 // Update "complete" boolean wrt what actually transpired --
534 // for instance, a promotion failure could have led to
535 // a whole heap collection.
536 complete = complete || (max_level_collected == n_gens() - 1);
538 if (complete) { // We did a "major" collection
539 // FIXME: See comment at pre_full_gc_dump call
540 post_full_gc_dump(NULL); // do any post full gc dumps
541 }
543 if (PrintGCDetails) {
544 print_heap_change(gch_prev_used);
546 // Print metaspace info for full GC with PrintGCDetails flag.
547 if (complete) {
548 MetaspaceAux::print_metaspace_change(metadata_prev_used);
549 }
550 }
552 for (int j = max_level_collected; j >= 0; j -= 1) {
553 // Adjust generation sizes.
554 _gens[j]->compute_new_size();
555 }
557 if (complete) {
558 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
559 ClassLoaderDataGraph::purge();
560 MetaspaceAux::verify_metrics();
561 // Resize the metaspace capacity after full collections
562 MetaspaceGC::compute_new_size();
563 update_full_collections_completed();
564 }
566 // Track memory usage and detect low memory after GC finishes
567 MemoryService::track_memory_usage();
569 gc_epilogue(complete);
571 if (must_restore_marks_for_biased_locking) {
572 BiasedLocking::restore_marks();
573 }
574 }
576 AdaptiveSizePolicy* sp = gen_policy()->size_policy();
577 AdaptiveSizePolicyOutput(sp, total_collections());
579 print_heap_after_gc();
581 #ifdef TRACESPINNING
582 ParallelTaskTerminator::print_termination_counts();
583 #endif
584 }
586 HeapWord* GenCollectedHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
587 return collector_policy()->satisfy_failed_allocation(size, is_tlab);
588 }
590 void GenCollectedHeap::set_par_threads(uint t) {
591 SharedHeap::set_par_threads(t);
592 _gen_process_strong_tasks->set_n_threads(t);
593 }
595 void GenCollectedHeap::
596 gen_process_strong_roots(int level,
597 bool younger_gens_as_roots,
598 bool activate_scope,
599 bool is_scavenging,
600 SharedHeap::ScanningOption so,
601 OopsInGenClosure* not_older_gens,
602 bool do_code_roots,
603 OopsInGenClosure* older_gens,
604 KlassClosure* klass_closure) {
605 // General strong roots.
607 if (!do_code_roots) {
608 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
609 not_older_gens, NULL, klass_closure);
610 } else {
611 bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
612 CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
613 SharedHeap::process_strong_roots(activate_scope, is_scavenging, so,
614 not_older_gens, &code_roots, klass_closure);
615 }
617 if (younger_gens_as_roots) {
618 if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
619 for (int i = 0; i < level; i++) {
620 not_older_gens->set_generation(_gens[i]);
621 _gens[i]->oop_iterate(not_older_gens);
622 }
623 not_older_gens->reset_generation();
624 }
625 }
626 // When collection is parallel, all threads get to cooperate to do
627 // older-gen scanning.
628 for (int i = level+1; i < _n_gens; i++) {
629 older_gens->set_generation(_gens[i]);
630 rem_set()->younger_refs_iterate(_gens[i], older_gens);
631 older_gens->reset_generation();
632 }
634 _gen_process_strong_tasks->all_tasks_completed();
635 }
637 void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
638 CodeBlobClosure* code_roots) {
639 SharedHeap::process_weak_roots(root_closure, code_roots);
640 // "Local" "weak" refs
641 for (int i = 0; i < _n_gens; i++) {
642 _gens[i]->ref_processor()->weak_oops_do(root_closure);
643 }
644 }
646 #define GCH_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
647 void GenCollectedHeap:: \
648 oop_since_save_marks_iterate(int level, \
649 OopClosureType* cur, \
650 OopClosureType* older) { \
651 _gens[level]->oop_since_save_marks_iterate##nv_suffix(cur); \
652 for (int i = level+1; i < n_gens(); i++) { \
653 _gens[i]->oop_since_save_marks_iterate##nv_suffix(older); \
654 } \
655 }
657 ALL_SINCE_SAVE_MARKS_CLOSURES(GCH_SINCE_SAVE_MARKS_ITERATE_DEFN)
659 #undef GCH_SINCE_SAVE_MARKS_ITERATE_DEFN
661 bool GenCollectedHeap::no_allocs_since_save_marks(int level) {
662 for (int i = level; i < _n_gens; i++) {
663 if (!_gens[i]->no_allocs_since_save_marks()) return false;
664 }
665 return true;
666 }
668 bool GenCollectedHeap::supports_inline_contig_alloc() const {
669 return _gens[0]->supports_inline_contig_alloc();
670 }
672 HeapWord** GenCollectedHeap::top_addr() const {
673 return _gens[0]->top_addr();
674 }
676 HeapWord** GenCollectedHeap::end_addr() const {
677 return _gens[0]->end_addr();
678 }
680 size_t GenCollectedHeap::unsafe_max_alloc() {
681 return _gens[0]->unsafe_max_alloc_nogc();
682 }
684 // public collection interfaces
686 void GenCollectedHeap::collect(GCCause::Cause cause) {
687 if (should_do_concurrent_full_gc(cause)) {
688 #if INCLUDE_ALL_GCS
689 // mostly concurrent full collection
690 collect_mostly_concurrent(cause);
691 #else // INCLUDE_ALL_GCS
692 ShouldNotReachHere();
693 #endif // INCLUDE_ALL_GCS
694 } else {
695 #ifdef ASSERT
696 if (cause == GCCause::_scavenge_alot) {
697 // minor collection only
698 collect(cause, 0);
699 } else {
700 // Stop-the-world full collection
701 collect(cause, n_gens() - 1);
702 }
703 #else
704 // Stop-the-world full collection
705 collect(cause, n_gens() - 1);
706 #endif
707 }
708 }
710 void GenCollectedHeap::collect(GCCause::Cause cause, int max_level) {
711 // The caller doesn't have the Heap_lock
712 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
713 MutexLocker ml(Heap_lock);
714 collect_locked(cause, max_level);
715 }
717 void GenCollectedHeap::collect_locked(GCCause::Cause cause) {
718 // The caller has the Heap_lock
719 assert(Heap_lock->owned_by_self(), "this thread should own the Heap_lock");
720 collect_locked(cause, n_gens() - 1);
721 }
723 // this is the private collection interface
724 // The Heap_lock is expected to be held on entry.
726 void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
727 // Read the GC count while holding the Heap_lock
728 unsigned int gc_count_before = total_collections();
729 unsigned int full_gc_count_before = total_full_collections();
730 {
731 MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
732 VM_GenCollectFull op(gc_count_before, full_gc_count_before,
733 cause, max_level);
734 VMThread::execute(&op);
735 }
736 }
738 #if INCLUDE_ALL_GCS
739 bool GenCollectedHeap::create_cms_collector() {
741 assert(((_gens[1]->kind() == Generation::ConcurrentMarkSweep) ||
742 (_gens[1]->kind() == Generation::ASConcurrentMarkSweep)),
743 "Unexpected generation kinds");
744 // Skip two header words in the block content verification
745 NOT_PRODUCT(_skip_header_HeapWords = CMSCollector::skip_header_HeapWords();)
746 CMSCollector* collector = new CMSCollector(
747 (ConcurrentMarkSweepGeneration*)_gens[1],
748 _rem_set->as_CardTableRS(),
749 (ConcurrentMarkSweepPolicy*) collector_policy());
751 if (collector == NULL || !collector->completed_initialization()) {
752 if (collector) {
753 delete collector; // Be nice in embedded situation
754 }
755 vm_shutdown_during_initialization("Could not create CMS collector");
756 return false;
757 }
758 return true; // success
759 }
761 void GenCollectedHeap::collect_mostly_concurrent(GCCause::Cause cause) {
762 assert(!Heap_lock->owned_by_self(), "Should not own Heap_lock");
764 MutexLocker ml(Heap_lock);
765 // Read the GC counts while holding the Heap_lock
766 unsigned int full_gc_count_before = total_full_collections();
767 unsigned int gc_count_before = total_collections();
768 {
769 MutexUnlocker mu(Heap_lock);
770 VM_GenCollectFullConcurrent op(gc_count_before, full_gc_count_before, cause);
771 VMThread::execute(&op);
772 }
773 }
774 #endif // INCLUDE_ALL_GCS
776 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
777 do_full_collection(clear_all_soft_refs, _n_gens - 1);
778 }
780 void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
781 int max_level) {
782 int local_max_level;
783 if (!incremental_collection_will_fail(false /* don't consult_young */) &&
784 gc_cause() == GCCause::_gc_locker) {
785 local_max_level = 0;
786 } else {
787 local_max_level = max_level;
788 }
790 do_collection(true /* full */,
791 clear_all_soft_refs /* clear_all_soft_refs */,
792 0 /* size */,
793 false /* is_tlab */,
794 local_max_level /* max_level */);
795 // Hack XXX FIX ME !!!
796 // A scavenge may not have been attempted, or may have
797 // been attempted and failed, because the old gen was too full
798 if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
799 incremental_collection_will_fail(false /* don't consult_young */)) {
800 if (PrintGCDetails) {
801 gclog_or_tty->print_cr("GC locker: Trying a full collection "
802 "because scavenge failed");
803 }
804 // This time allow the old gen to be collected as well
805 do_collection(true /* full */,
806 clear_all_soft_refs /* clear_all_soft_refs */,
807 0 /* size */,
808 false /* is_tlab */,
809 n_gens() - 1 /* max_level */);
810 }
811 }
813 bool GenCollectedHeap::is_in_young(oop p) {
814 bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
815 assert(result == _gens[0]->is_in_reserved(p),
816 err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
817 return result;
818 }
820 // Returns "TRUE" iff "p" points into the committed areas of the heap.
821 bool GenCollectedHeap::is_in(const void* p) const {
822 #ifndef ASSERT
823 guarantee(VerifyBeforeGC ||
824 VerifyDuringGC ||
825 VerifyBeforeExit ||
826 VerifyDuringStartup ||
827 PrintAssembly ||
828 tty->count() != 0 || // already printing
829 VerifyAfterGC ||
830 VMError::fatal_error_in_progress(), "too expensive");
832 #endif
833 // This might be sped up with a cache of the last generation that
834 // answered yes.
835 for (int i = 0; i < _n_gens; i++) {
836 if (_gens[i]->is_in(p)) return true;
837 }
838 // Otherwise...
839 return false;
840 }
842 #ifdef ASSERT
843 // Don't implement this by using is_in_young(). This method is used
844 // in some cases to check that is_in_young() is correct.
845 bool GenCollectedHeap::is_in_partial_collection(const void* p) {
846 assert(is_in_reserved(p) || p == NULL,
847 "Does not work if address is non-null and outside of the heap");
848 return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
849 }
850 #endif
852 void GenCollectedHeap::oop_iterate(ExtendedOopClosure* cl) {
853 for (int i = 0; i < _n_gens; i++) {
854 _gens[i]->oop_iterate(cl);
855 }
856 }
858 void GenCollectedHeap::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
859 for (int i = 0; i < _n_gens; i++) {
860 _gens[i]->oop_iterate(mr, cl);
861 }
862 }
864 void GenCollectedHeap::object_iterate(ObjectClosure* cl) {
865 for (int i = 0; i < _n_gens; i++) {
866 _gens[i]->object_iterate(cl);
867 }
868 }
870 void GenCollectedHeap::safe_object_iterate(ObjectClosure* cl) {
871 for (int i = 0; i < _n_gens; i++) {
872 _gens[i]->safe_object_iterate(cl);
873 }
874 }
876 void GenCollectedHeap::object_iterate_since_last_GC(ObjectClosure* cl) {
877 for (int i = 0; i < _n_gens; i++) {
878 _gens[i]->object_iterate_since_last_GC(cl);
879 }
880 }
882 Space* GenCollectedHeap::space_containing(const void* addr) const {
883 for (int i = 0; i < _n_gens; i++) {
884 Space* res = _gens[i]->space_containing(addr);
885 if (res != NULL) return res;
886 }
887 // Otherwise...
888 assert(false, "Could not find containing space");
889 return NULL;
890 }
893 HeapWord* GenCollectedHeap::block_start(const void* addr) const {
894 assert(is_in_reserved(addr), "block_start of address outside of heap");
895 for (int i = 0; i < _n_gens; i++) {
896 if (_gens[i]->is_in_reserved(addr)) {
897 assert(_gens[i]->is_in(addr),
898 "addr should be in allocated part of generation");
899 return _gens[i]->block_start(addr);
900 }
901 }
902 assert(false, "Some generation should contain the address");
903 return NULL;
904 }
906 size_t GenCollectedHeap::block_size(const HeapWord* addr) const {
907 assert(is_in_reserved(addr), "block_size of address outside of heap");
908 for (int i = 0; i < _n_gens; i++) {
909 if (_gens[i]->is_in_reserved(addr)) {
910 assert(_gens[i]->is_in(addr),
911 "addr should be in allocated part of generation");
912 return _gens[i]->block_size(addr);
913 }
914 }
915 assert(false, "Some generation should contain the address");
916 return 0;
917 }
919 bool GenCollectedHeap::block_is_obj(const HeapWord* addr) const {
920 assert(is_in_reserved(addr), "block_is_obj of address outside of heap");
921 assert(block_start(addr) == addr, "addr must be a block start");
922 for (int i = 0; i < _n_gens; i++) {
923 if (_gens[i]->is_in_reserved(addr)) {
924 return _gens[i]->block_is_obj(addr);
925 }
926 }
927 assert(false, "Some generation should contain the address");
928 return false;
929 }
931 bool GenCollectedHeap::supports_tlab_allocation() const {
932 for (int i = 0; i < _n_gens; i += 1) {
933 if (_gens[i]->supports_tlab_allocation()) {
934 return true;
935 }
936 }
937 return false;
938 }
940 size_t GenCollectedHeap::tlab_capacity(Thread* thr) const {
941 size_t result = 0;
942 for (int i = 0; i < _n_gens; i += 1) {
943 if (_gens[i]->supports_tlab_allocation()) {
944 result += _gens[i]->tlab_capacity();
945 }
946 }
947 return result;
948 }
950 size_t GenCollectedHeap::unsafe_max_tlab_alloc(Thread* thr) const {
951 size_t result = 0;
952 for (int i = 0; i < _n_gens; i += 1) {
953 if (_gens[i]->supports_tlab_allocation()) {
954 result += _gens[i]->unsafe_max_tlab_alloc();
955 }
956 }
957 return result;
958 }
960 HeapWord* GenCollectedHeap::allocate_new_tlab(size_t size) {
961 bool gc_overhead_limit_was_exceeded;
962 return collector_policy()->mem_allocate_work(size /* size */,
963 true /* is_tlab */,
964 &gc_overhead_limit_was_exceeded);
965 }
967 // Requires "*prev_ptr" to be non-NULL. Deletes and a block of minimal size
968 // from the list headed by "*prev_ptr".
969 static ScratchBlock *removeSmallestScratch(ScratchBlock **prev_ptr) {
970 bool first = true;
971 size_t min_size = 0; // "first" makes this conceptually infinite.
972 ScratchBlock **smallest_ptr, *smallest;
973 ScratchBlock *cur = *prev_ptr;
974 while (cur) {
975 assert(*prev_ptr == cur, "just checking");
976 if (first || cur->num_words < min_size) {
977 smallest_ptr = prev_ptr;
978 smallest = cur;
979 min_size = smallest->num_words;
980 first = false;
981 }
982 prev_ptr = &cur->next;
983 cur = cur->next;
984 }
985 smallest = *smallest_ptr;
986 *smallest_ptr = smallest->next;
987 return smallest;
988 }
990 // Sort the scratch block list headed by res into decreasing size order,
991 // and set "res" to the result.
992 static void sort_scratch_list(ScratchBlock*& list) {
993 ScratchBlock* sorted = NULL;
994 ScratchBlock* unsorted = list;
995 while (unsorted) {
996 ScratchBlock *smallest = removeSmallestScratch(&unsorted);
997 smallest->next = sorted;
998 sorted = smallest;
999 }
1000 list = sorted;
1001 }
1003 ScratchBlock* GenCollectedHeap::gather_scratch(Generation* requestor,
1004 size_t max_alloc_words) {
1005 ScratchBlock* res = NULL;
1006 for (int i = 0; i < _n_gens; i++) {
1007 _gens[i]->contribute_scratch(res, requestor, max_alloc_words);
1008 }
1009 sort_scratch_list(res);
1010 return res;
1011 }
1013 void GenCollectedHeap::release_scratch() {
1014 for (int i = 0; i < _n_gens; i++) {
1015 _gens[i]->reset_scratch();
1016 }
1017 }
1019 class GenPrepareForVerifyClosure: public GenCollectedHeap::GenClosure {
1020 void do_generation(Generation* gen) {
1021 gen->prepare_for_verify();
1022 }
1023 };
1025 void GenCollectedHeap::prepare_for_verify() {
1026 ensure_parsability(false); // no need to retire TLABs
1027 GenPrepareForVerifyClosure blk;
1028 generation_iterate(&blk, false);
1029 }
1032 void GenCollectedHeap::generation_iterate(GenClosure* cl,
1033 bool old_to_young) {
1034 if (old_to_young) {
1035 for (int i = _n_gens-1; i >= 0; i--) {
1036 cl->do_generation(_gens[i]);
1037 }
1038 } else {
1039 for (int i = 0; i < _n_gens; i++) {
1040 cl->do_generation(_gens[i]);
1041 }
1042 }
1043 }
1045 void GenCollectedHeap::space_iterate(SpaceClosure* cl) {
1046 for (int i = 0; i < _n_gens; i++) {
1047 _gens[i]->space_iterate(cl, true);
1048 }
1049 }
1051 bool GenCollectedHeap::is_maximal_no_gc() const {
1052 for (int i = 0; i < _n_gens; i++) {
1053 if (!_gens[i]->is_maximal_no_gc()) {
1054 return false;
1055 }
1056 }
1057 return true;
1058 }
1060 void GenCollectedHeap::save_marks() {
1061 for (int i = 0; i < _n_gens; i++) {
1062 _gens[i]->save_marks();
1063 }
1064 }
1066 void GenCollectedHeap::compute_new_generation_sizes(int collectedGen) {
1067 for (int i = 0; i <= collectedGen; i++) {
1068 _gens[i]->compute_new_size();
1069 }
1070 }
1072 GenCollectedHeap* GenCollectedHeap::heap() {
1073 assert(_gch != NULL, "Uninitialized access to GenCollectedHeap::heap()");
1074 assert(_gch->kind() == CollectedHeap::GenCollectedHeap, "not a generational heap");
1075 return _gch;
1076 }
1079 void GenCollectedHeap::prepare_for_compaction() {
1080 Generation* scanning_gen = _gens[_n_gens-1];
1081 // Start by compacting into same gen.
1082 CompactPoint cp(scanning_gen, NULL, NULL);
1083 while (scanning_gen != NULL) {
1084 scanning_gen->prepare_for_compaction(&cp);
1085 scanning_gen = prev_gen(scanning_gen);
1086 }
1087 }
1089 GCStats* GenCollectedHeap::gc_stats(int level) const {
1090 return _gens[level]->gc_stats();
1091 }
1093 void GenCollectedHeap::verify(bool silent, VerifyOption option /* ignored */) {
1094 for (int i = _n_gens-1; i >= 0; i--) {
1095 Generation* g = _gens[i];
1096 if (!silent) {
1097 gclog_or_tty->print(g->name());
1098 gclog_or_tty->print(" ");
1099 }
1100 g->verify();
1101 }
1102 if (!silent) {
1103 gclog_or_tty->print("remset ");
1104 }
1105 rem_set()->verify();
1106 }
1108 void GenCollectedHeap::print_on(outputStream* st) const {
1109 for (int i = 0; i < _n_gens; i++) {
1110 _gens[i]->print_on(st);
1111 }
1112 MetaspaceAux::print_on(st);
1113 }
1115 void GenCollectedHeap::gc_threads_do(ThreadClosure* tc) const {
1116 if (workers() != NULL) {
1117 workers()->threads_do(tc);
1118 }
1119 #if INCLUDE_ALL_GCS
1120 if (UseConcMarkSweepGC) {
1121 ConcurrentMarkSweepThread::threads_do(tc);
1122 }
1123 #endif // INCLUDE_ALL_GCS
1124 }
1126 void GenCollectedHeap::print_gc_threads_on(outputStream* st) const {
1127 #if INCLUDE_ALL_GCS
1128 if (UseParNewGC) {
1129 workers()->print_worker_threads_on(st);
1130 }
1131 if (UseConcMarkSweepGC) {
1132 ConcurrentMarkSweepThread::print_all_on(st);
1133 }
1134 #endif // INCLUDE_ALL_GCS
1135 }
1137 void GenCollectedHeap::print_on_error(outputStream* st) const {
1138 this->CollectedHeap::print_on_error(st);
1140 #if INCLUDE_ALL_GCS
1141 if (UseConcMarkSweepGC) {
1142 st->cr();
1143 CMSCollector::print_on_error(st);
1144 }
1145 #endif // INCLUDE_ALL_GCS
1146 }
1148 void GenCollectedHeap::print_tracing_info() const {
1149 if (TraceGen0Time) {
1150 get_gen(0)->print_summary_info();
1151 }
1152 if (TraceGen1Time) {
1153 get_gen(1)->print_summary_info();
1154 }
1155 }
1157 void GenCollectedHeap::print_heap_change(size_t prev_used) const {
1158 if (PrintGCDetails && Verbose) {
1159 gclog_or_tty->print(" " SIZE_FORMAT
1160 "->" SIZE_FORMAT
1161 "(" SIZE_FORMAT ")",
1162 prev_used, used(), capacity());
1163 } else {
1164 gclog_or_tty->print(" " SIZE_FORMAT "K"
1165 "->" SIZE_FORMAT "K"
1166 "(" SIZE_FORMAT "K)",
1167 prev_used / K, used() / K, capacity() / K);
1168 }
1169 }
1171 class GenGCPrologueClosure: public GenCollectedHeap::GenClosure {
1172 private:
1173 bool _full;
1174 public:
1175 void do_generation(Generation* gen) {
1176 gen->gc_prologue(_full);
1177 }
1178 GenGCPrologueClosure(bool full) : _full(full) {};
1179 };
1181 void GenCollectedHeap::gc_prologue(bool full) {
1182 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
1184 always_do_update_barrier = false;
1185 // Fill TLAB's and such
1186 CollectedHeap::accumulate_statistics_all_tlabs();
1187 ensure_parsability(true); // retire TLABs
1189 // Call allocation profiler
1190 AllocationProfiler::iterate_since_last_gc();
1191 // Walk generations
1192 GenGCPrologueClosure blk(full);
1193 generation_iterate(&blk, false); // not old-to-young.
1194 };
1196 class GenGCEpilogueClosure: public GenCollectedHeap::GenClosure {
1197 private:
1198 bool _full;
1199 public:
1200 void do_generation(Generation* gen) {
1201 gen->gc_epilogue(_full);
1202 }
1203 GenGCEpilogueClosure(bool full) : _full(full) {};
1204 };
1206 void GenCollectedHeap::gc_epilogue(bool full) {
1207 #ifdef COMPILER2
1208 assert(DerivedPointerTable::is_empty(), "derived pointer present");
1209 size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
1210 guarantee(actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
1211 #endif /* COMPILER2 */
1213 resize_all_tlabs();
1215 GenGCEpilogueClosure blk(full);
1216 generation_iterate(&blk, false); // not old-to-young.
1218 if (!CleanChunkPoolAsync) {
1219 Chunk::clean_chunk_pool();
1220 }
1222 MetaspaceCounters::update_performance_counters();
1224 always_do_update_barrier = UseConcMarkSweepGC;
1225 };
1227 #ifndef PRODUCT
1228 class GenGCSaveTopsBeforeGCClosure: public GenCollectedHeap::GenClosure {
1229 private:
1230 public:
1231 void do_generation(Generation* gen) {
1232 gen->record_spaces_top();
1233 }
1234 };
1236 void GenCollectedHeap::record_gen_tops_before_GC() {
1237 if (ZapUnusedHeapArea) {
1238 GenGCSaveTopsBeforeGCClosure blk;
1239 generation_iterate(&blk, false); // not old-to-young.
1240 }
1241 }
1242 #endif // not PRODUCT
1244 class GenEnsureParsabilityClosure: public GenCollectedHeap::GenClosure {
1245 public:
1246 void do_generation(Generation* gen) {
1247 gen->ensure_parsability();
1248 }
1249 };
1251 void GenCollectedHeap::ensure_parsability(bool retire_tlabs) {
1252 CollectedHeap::ensure_parsability(retire_tlabs);
1253 GenEnsureParsabilityClosure ep_cl;
1254 generation_iterate(&ep_cl, false);
1255 }
1257 oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
1258 oop obj,
1259 size_t obj_size) {
1260 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1261 HeapWord* result = NULL;
1263 // First give each higher generation a chance to allocate the promoted object.
1264 Generation* allocator = next_gen(gen);
1265 if (allocator != NULL) {
1266 do {
1267 result = allocator->allocate(obj_size, false);
1268 } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
1269 }
1271 if (result == NULL) {
1272 // Then give gen and higher generations a chance to expand and allocate the
1273 // object.
1274 do {
1275 result = gen->expand_and_allocate(obj_size, false);
1276 } while (result == NULL && (gen = next_gen(gen)) != NULL);
1277 }
1279 if (result != NULL) {
1280 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
1281 }
1282 return oop(result);
1283 }
1285 class GenTimeOfLastGCClosure: public GenCollectedHeap::GenClosure {
1286 jlong _time; // in ms
1287 jlong _now; // in ms
1289 public:
1290 GenTimeOfLastGCClosure(jlong now) : _time(now), _now(now) { }
1292 jlong time() { return _time; }
1294 void do_generation(Generation* gen) {
1295 _time = MIN2(_time, gen->time_of_last_gc(_now));
1296 }
1297 };
1299 jlong GenCollectedHeap::millis_since_last_gc() {
1300 // We need a monotonically non-deccreasing time in ms but
1301 // os::javaTimeMillis() does not guarantee monotonicity.
1302 jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
1303 GenTimeOfLastGCClosure tolgc_cl(now);
1304 // iterate over generations getting the oldest
1305 // time that a generation was collected
1306 generation_iterate(&tolgc_cl, false);
1308 // javaTimeNanos() is guaranteed to be monotonically non-decreasing
1309 // provided the underlying platform provides such a time source
1310 // (and it is bug free). So we still have to guard against getting
1311 // back a time later than 'now'.
1312 jlong retVal = now - tolgc_cl.time();
1313 if (retVal < 0) {
1314 NOT_PRODUCT(warning("time warp: "INT64_FORMAT, retVal);)
1315 return 0;
1316 }
1317 return retVal;
1318 }