src/share/vm/gc_interface/collectedHeap.cpp

Wed, 15 Feb 2012 10:12:55 -0800

author
never
date
Wed, 15 Feb 2012 10:12:55 -0800
changeset 3571
09d00c18e323
parent 3499
aa3d708d67c4
child 3668
cc74fa5a91a9
permissions
-rw-r--r--

7145537: minor tweaks to LogEvents
Reviewed-by: kvn, twisti

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "gc_implementation/shared/vmGCOperations.hpp"
    28 #include "gc_interface/collectedHeap.hpp"
    29 #include "gc_interface/collectedHeap.inline.hpp"
    30 #include "oops/oop.inline.hpp"
    31 #include "oops/instanceMirrorKlass.hpp"
    32 #include "runtime/init.hpp"
    33 #include "services/heapDumper.hpp"
    34 #ifdef TARGET_OS_FAMILY_linux
    35 # include "thread_linux.inline.hpp"
    36 #endif
    37 #ifdef TARGET_OS_FAMILY_solaris
    38 # include "thread_solaris.inline.hpp"
    39 #endif
    40 #ifdef TARGET_OS_FAMILY_windows
    41 # include "thread_windows.inline.hpp"
    42 #endif
    43 #ifdef TARGET_OS_FAMILY_bsd
    44 # include "thread_bsd.inline.hpp"
    45 #endif
    48 #ifdef ASSERT
    49 int CollectedHeap::_fire_out_of_memory_count = 0;
    50 #endif
    52 size_t CollectedHeap::_filler_array_max_size = 0;
    54 template <>
    55 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
    56   st->print_cr("GC heap %s", m.is_before ? "before" : "after");
    57   st->print_raw(m);
    58 }
    60 void GCHeapLog::log_heap(bool before) {
    61   if (!should_log()) {
    62     return;
    63   }
    65   double timestamp = fetch_timestamp();
    66   MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
    67   int index = compute_log_index();
    68   _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
    69   _records[index].timestamp = timestamp;
    70   _records[index].data.is_before = before;
    71   stringStream st(_records[index].data.buffer(), _records[index].data.size());
    72   if (before) {
    73     Universe::print_heap_before_gc(&st, true);
    74   } else {
    75     Universe::print_heap_after_gc(&st, true);
    76   }
    77 }
    79 // Memory state functions.
    82 CollectedHeap::CollectedHeap() : _n_par_threads(0)
    84 {
    85   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
    86   const size_t elements_per_word = HeapWordSize / sizeof(jint);
    87   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
    88                                              max_len * elements_per_word);
    90   _barrier_set = NULL;
    91   _is_gc_active = false;
    92   _total_collections = _total_full_collections = 0;
    93   _gc_cause = _gc_lastcause = GCCause::_no_gc;
    94   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
    95   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
    97   if (UsePerfData) {
    98     EXCEPTION_MARK;
   100     // create the gc cause jvmstat counters
   101     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
   102                              80, GCCause::to_string(_gc_cause), CHECK);
   104     _perf_gc_lastcause =
   105                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
   106                              80, GCCause::to_string(_gc_lastcause), CHECK);
   107   }
   108   _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
   109   // Create the ring log
   110   if (LogEvents) {
   111     _gc_heap_log = new GCHeapLog();
   112   } else {
   113     _gc_heap_log = NULL;
   114   }
   115 }
   117 void CollectedHeap::pre_initialize() {
   118   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
   119   // otherwise remains unused.
   120 #ifdef COMPILER2
   121   _defer_initial_card_mark =    ReduceInitialCardMarks && can_elide_tlab_store_barriers()
   122                              && (DeferInitialCardMark || card_mark_must_follow_store());
   123 #else
   124   assert(_defer_initial_card_mark == false, "Who would set it?");
   125 #endif
   126 }
   128 #ifndef PRODUCT
   129 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
   130   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   131     for (size_t slot = 0; slot < size; slot += 1) {
   132       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
   133              "Found badHeapWordValue in post-allocation check");
   134     }
   135   }
   136 }
   138 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
   139   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
   140     for (size_t slot = 0; slot < size; slot += 1) {
   141       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
   142              "Found non badHeapWordValue in pre-allocation check");
   143     }
   144   }
   145 }
   146 #endif // PRODUCT
   148 #ifdef ASSERT
   149 void CollectedHeap::check_for_valid_allocation_state() {
   150   Thread *thread = Thread::current();
   151   // How to choose between a pending exception and a potential
   152   // OutOfMemoryError?  Don't allow pending exceptions.
   153   // This is a VM policy failure, so how do we exhaustively test it?
   154   assert(!thread->has_pending_exception(),
   155          "shouldn't be allocating with pending exception");
   156   if (StrictSafepointChecks) {
   157     assert(thread->allow_allocation(),
   158            "Allocation done by thread for which allocation is blocked "
   159            "by No_Allocation_Verifier!");
   160     // Allocation of an oop can always invoke a safepoint,
   161     // hence, the true argument
   162     thread->check_for_valid_safepoint_state(true);
   163   }
   164 }
   165 #endif
   167 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
   169   // Retain tlab and allocate object in shared space if
   170   // the amount free in the tlab is too large to discard.
   171   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
   172     thread->tlab().record_slow_allocation(size);
   173     return NULL;
   174   }
   176   // Discard tlab and allocate a new one.
   177   // To minimize fragmentation, the last TLAB may be smaller than the rest.
   178   size_t new_tlab_size = thread->tlab().compute_size(size);
   180   thread->tlab().clear_before_allocation();
   182   if (new_tlab_size == 0) {
   183     return NULL;
   184   }
   186   // Allocate a new TLAB...
   187   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
   188   if (obj == NULL) {
   189     return NULL;
   190   }
   191   if (ZeroTLAB) {
   192     // ..and clear it.
   193     Copy::zero_to_words(obj, new_tlab_size);
   194   } else {
   195     // ...and zap just allocated object.
   196 #ifdef ASSERT
   197     // Skip mangling the space corresponding to the object header to
   198     // ensure that the returned space is not considered parsable by
   199     // any concurrent GC thread.
   200     size_t hdr_size = oopDesc::header_size();
   201     Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
   202 #endif // ASSERT
   203   }
   204   thread->tlab().fill(obj, obj + size, new_tlab_size);
   205   return obj;
   206 }
   208 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
   209   MemRegion deferred = thread->deferred_card_mark();
   210   if (!deferred.is_empty()) {
   211     assert(_defer_initial_card_mark, "Otherwise should be empty");
   212     {
   213       // Verify that the storage points to a parsable object in heap
   214       DEBUG_ONLY(oop old_obj = oop(deferred.start());)
   215       assert(is_in(old_obj), "Not in allocated heap");
   216       assert(!can_elide_initializing_store_barrier(old_obj),
   217              "Else should have been filtered in new_store_pre_barrier()");
   218       assert(!is_in_permanent(old_obj), "Sanity: not expected");
   219       assert(old_obj->is_oop(true), "Not an oop");
   220       assert(old_obj->is_parsable(), "Will not be concurrently parsable");
   221       assert(deferred.word_size() == (size_t)(old_obj->size()),
   222              "Mismatch: multiple objects?");
   223     }
   224     BarrierSet* bs = barrier_set();
   225     assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
   226     bs->write_region(deferred);
   227     // "Clear" the deferred_card_mark field
   228     thread->set_deferred_card_mark(MemRegion());
   229   }
   230   assert(thread->deferred_card_mark().is_empty(), "invariant");
   231 }
   233 // Helper for ReduceInitialCardMarks. For performance,
   234 // compiled code may elide card-marks for initializing stores
   235 // to a newly allocated object along the fast-path. We
   236 // compensate for such elided card-marks as follows:
   237 // (a) Generational, non-concurrent collectors, such as
   238 //     GenCollectedHeap(ParNew,DefNew,Tenured) and
   239 //     ParallelScavengeHeap(ParallelGC, ParallelOldGC)
   240 //     need the card-mark if and only if the region is
   241 //     in the old gen, and do not care if the card-mark
   242 //     succeeds or precedes the initializing stores themselves,
   243 //     so long as the card-mark is completed before the next
   244 //     scavenge. For all these cases, we can do a card mark
   245 //     at the point at which we do a slow path allocation
   246 //     in the old gen, i.e. in this call.
   247 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
   248 //     in addition that the card-mark for an old gen allocated
   249 //     object strictly follow any associated initializing stores.
   250 //     In these cases, the memRegion remembered below is
   251 //     used to card-mark the entire region either just before the next
   252 //     slow-path allocation by this thread or just before the next scavenge or
   253 //     CMS-associated safepoint, whichever of these events happens first.
   254 //     (The implicit assumption is that the object has been fully
   255 //     initialized by this point, a fact that we assert when doing the
   256 //     card-mark.)
   257 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
   258 //     G1 concurrent marking is in progress an SATB (pre-write-)barrier is
   259 //     is used to remember the pre-value of any store. Initializing
   260 //     stores will not need this barrier, so we need not worry about
   261 //     compensating for the missing pre-barrier here. Turning now
   262 //     to the post-barrier, we note that G1 needs a RS update barrier
   263 //     which simply enqueues a (sequence of) dirty cards which may
   264 //     optionally be refined by the concurrent update threads. Note
   265 //     that this barrier need only be applied to a non-young write,
   266 //     but, like in CMS, because of the presence of concurrent refinement
   267 //     (much like CMS' precleaning), must strictly follow the oop-store.
   268 //     Thus, using the same protocol for maintaining the intended
   269 //     invariants turns out, serendepitously, to be the same for both
   270 //     G1 and CMS.
   271 //
   272 // For any future collector, this code should be reexamined with
   273 // that specific collector in mind, and the documentation above suitably
   274 // extended and updated.
   275 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
   276   // If a previous card-mark was deferred, flush it now.
   277   flush_deferred_store_barrier(thread);
   278   if (can_elide_initializing_store_barrier(new_obj)) {
   279     // The deferred_card_mark region should be empty
   280     // following the flush above.
   281     assert(thread->deferred_card_mark().is_empty(), "Error");
   282   } else {
   283     MemRegion mr((HeapWord*)new_obj, new_obj->size());
   284     assert(!mr.is_empty(), "Error");
   285     if (_defer_initial_card_mark) {
   286       // Defer the card mark
   287       thread->set_deferred_card_mark(mr);
   288     } else {
   289       // Do the card mark
   290       BarrierSet* bs = barrier_set();
   291       assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
   292       bs->write_region(mr);
   293     }
   294   }
   295   return new_obj;
   296 }
   298 size_t CollectedHeap::filler_array_hdr_size() {
   299   return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
   300 }
   302 size_t CollectedHeap::filler_array_min_size() {
   303   return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
   304 }
   306 size_t CollectedHeap::filler_array_max_size() {
   307   return _filler_array_max_size;
   308 }
   310 #ifdef ASSERT
   311 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
   312 {
   313   assert(words >= min_fill_size(), "too small to fill");
   314   assert(words % MinObjAlignment == 0, "unaligned size");
   315   assert(Universe::heap()->is_in_reserved(start), "not in heap");
   316   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
   317 }
   319 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
   320 {
   321   if (ZapFillerObjects && zap) {
   322     Copy::fill_to_words(start + filler_array_hdr_size(),
   323                         words - filler_array_hdr_size(), 0XDEAFBABE);
   324   }
   325 }
   326 #endif // ASSERT
   328 void
   329 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
   330 {
   331   assert(words >= filler_array_min_size(), "too small for an array");
   332   assert(words <= filler_array_max_size(), "too big for a single object");
   334   const size_t payload_size = words - filler_array_hdr_size();
   335   const size_t len = payload_size * HeapWordSize / sizeof(jint);
   337   // Set the length first for concurrent GC.
   338   ((arrayOop)start)->set_length((int)len);
   339   post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
   340   DEBUG_ONLY(zap_filler_array(start, words, zap);)
   341 }
   343 void
   344 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
   345 {
   346   assert(words <= filler_array_max_size(), "too big for a single object");
   348   if (words >= filler_array_min_size()) {
   349     fill_with_array(start, words, zap);
   350   } else if (words > 0) {
   351     assert(words == min_fill_size(), "unaligned size");
   352     post_allocation_setup_common(SystemDictionary::Object_klass(), start,
   353                                  words);
   354   }
   355 }
   357 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
   358 {
   359   DEBUG_ONLY(fill_args_check(start, words);)
   360   HandleMark hm;  // Free handles before leaving.
   361   fill_with_object_impl(start, words, zap);
   362 }
   364 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
   365 {
   366   DEBUG_ONLY(fill_args_check(start, words);)
   367   HandleMark hm;  // Free handles before leaving.
   369 #ifdef _LP64
   370   // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
   371   // First fill with arrays, ensuring that any remaining space is big enough to
   372   // fill.  The remainder is filled with a single object.
   373   const size_t min = min_fill_size();
   374   const size_t max = filler_array_max_size();
   375   while (words > max) {
   376     const size_t cur = words - max >= min ? max : max - min;
   377     fill_with_array(start, cur, zap);
   378     start += cur;
   379     words -= cur;
   380   }
   381 #endif
   383   fill_with_object_impl(start, words, zap);
   384 }
   386 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
   387   guarantee(false, "thread-local allocation buffers not supported");
   388   return NULL;
   389 }
   391 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
   392   // The second disjunct in the assertion below makes a concession
   393   // for the start-up verification done while the VM is being
   394   // created. Callers be careful that you know that mutators
   395   // aren't going to interfere -- for instance, this is permissible
   396   // if we are still single-threaded and have either not yet
   397   // started allocating (nothing much to verify) or we have
   398   // started allocating but are now a full-fledged JavaThread
   399   // (and have thus made our TLAB's) available for filling.
   400   assert(SafepointSynchronize::is_at_safepoint() ||
   401          !is_init_completed(),
   402          "Should only be called at a safepoint or at start-up"
   403          " otherwise concurrent mutator activity may make heap "
   404          " unparsable again");
   405   const bool use_tlab = UseTLAB;
   406   const bool deferred = _defer_initial_card_mark;
   407   // The main thread starts allocating via a TLAB even before it
   408   // has added itself to the threads list at vm boot-up.
   409   assert(!use_tlab || Threads::first() != NULL,
   410          "Attempt to fill tlabs before main thread has been added"
   411          " to threads list is doomed to failure!");
   412   for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
   413      if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
   414 #ifdef COMPILER2
   415      // The deferred store barriers must all have been flushed to the
   416      // card-table (or other remembered set structure) before GC starts
   417      // processing the card-table (or other remembered set).
   418      if (deferred) flush_deferred_store_barrier(thread);
   419 #else
   420      assert(!deferred, "Should be false");
   421      assert(thread->deferred_card_mark().is_empty(), "Should be empty");
   422 #endif
   423   }
   424 }
   426 void CollectedHeap::accumulate_statistics_all_tlabs() {
   427   if (UseTLAB) {
   428     assert(SafepointSynchronize::is_at_safepoint() ||
   429          !is_init_completed(),
   430          "should only accumulate statistics on tlabs at safepoint");
   432     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
   433   }
   434 }
   436 void CollectedHeap::resize_all_tlabs() {
   437   if (UseTLAB) {
   438     assert(SafepointSynchronize::is_at_safepoint() ||
   439          !is_init_completed(),
   440          "should only resize tlabs at safepoint");
   442     ThreadLocalAllocBuffer::resize_all_tlabs();
   443   }
   444 }
   446 void CollectedHeap::pre_full_gc_dump() {
   447   if (HeapDumpBeforeFullGC) {
   448     TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
   449     // We are doing a "major" collection and a heap dump before
   450     // major collection has been requested.
   451     HeapDumper::dump_heap();
   452   }
   453   if (PrintClassHistogramBeforeFullGC) {
   454     TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
   455     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
   456     inspector.doit();
   457   }
   458 }
   460 void CollectedHeap::post_full_gc_dump() {
   461   if (HeapDumpAfterFullGC) {
   462     TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
   463     HeapDumper::dump_heap();
   464   }
   465   if (PrintClassHistogramAfterFullGC) {
   466     TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
   467     VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
   468     inspector.doit();
   469   }
   470 }
   472 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
   473   debug_only(check_for_valid_allocation_state());
   474   assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
   475   assert(size >= 0, "int won't convert to size_t");
   476   HeapWord* obj;
   477   if (JavaObjectsInPerm) {
   478     obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
   479   } else {
   480     assert(ScavengeRootsInCode > 0, "must be");
   481     obj = common_mem_allocate_init(size, CHECK_NULL);
   482   }
   483   post_allocation_setup_common(klass, obj, size);
   484   assert(Universe::is_bootstrapping() ||
   485          !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
   486   NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
   487   oop mirror = (oop)obj;
   489   java_lang_Class::set_oop_size(mirror, size);
   491   // Setup indirections
   492   if (!real_klass.is_null()) {
   493     java_lang_Class::set_klass(mirror, real_klass());
   494     real_klass->set_java_mirror(mirror);
   495   }
   497   instanceMirrorKlass* mk = instanceMirrorKlass::cast(mirror->klass());
   498   assert(size == mk->instance_size(real_klass), "should have been set");
   500   // notify jvmti and dtrace
   501   post_allocation_notify(klass, (oop)obj);
   503   return mirror;
   504 }
   506 /////////////// Unit tests ///////////////
   508 #ifndef PRODUCT
   509 void CollectedHeap::test_is_in() {
   510   CollectedHeap* heap = Universe::heap();
   512   uintptr_t epsilon    = (uintptr_t) MinObjAlignment;
   513   uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
   514   uintptr_t heap_end   = (uintptr_t) heap->_reserved.end();
   516   // Test that NULL is not in the heap.
   517   assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
   519   // Test that a pointer to before the heap start is reported as outside the heap.
   520   assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
   521   void* before_heap = (void*)(heap_start - epsilon);
   522   assert(!heap->is_in(before_heap),
   523       err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
   525   // Test that a pointer to after the heap end is reported as outside the heap.
   526   assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
   527   void* after_heap = (void*)(heap_end + epsilon);
   528   assert(!heap->is_in(after_heap),
   529       err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
   530 }
   531 #endif

mercurial