src/share/vm/gc_interface/collectedHeap.cpp

Mon, 09 Mar 2009 13:28:46 -0700

author
xdono
date
Mon, 09 Mar 2009 13:28:46 -0700
changeset 1014
0fbdb4381b99
parent 929
d593294016c3
child 1063
7bb995fbd3c0
permissions
-rw-r--r--

6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair

     1 /*
     2  * Copyright 2001-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 # include "incls/_precompiled.incl"
    26 # include "incls/_collectedHeap.cpp.incl"
    29 #ifdef ASSERT
    30 int CollectedHeap::_fire_out_of_memory_count = 0;
    31 #endif
    33 size_t CollectedHeap::_filler_array_max_size = 0;
    35 // Memory state functions.
    37 CollectedHeap::CollectedHeap()
    38 {
    39   const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
    40   const size_t elements_per_word = HeapWordSize / sizeof(jint);
    41   _filler_array_max_size = align_object_size(filler_array_hdr_size() +
    42                                              max_len * elements_per_word);
    44   _barrier_set = NULL;
    45   _is_gc_active = false;
    46   _total_collections = _total_full_collections = 0;
    47   _gc_cause = _gc_lastcause = GCCause::_no_gc;
    48   NOT_PRODUCT(_promotion_failure_alot_count = 0;)
    49   NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
    51   if (UsePerfData) {
    52     EXCEPTION_MARK;
    54     // create the gc cause jvmstat counters
    55     _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
    56                              80, GCCause::to_string(_gc_cause), CHECK);
    58     _perf_gc_lastcause =
    59                 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
    60                              80, GCCause::to_string(_gc_lastcause), CHECK);
    61   }
    62 }
    65 #ifndef PRODUCT
    66 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
    67   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
    68     for (size_t slot = 0; slot < size; slot += 1) {
    69       assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
    70              "Found badHeapWordValue in post-allocation check");
    71     }
    72   }
    73 }
    75 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
    76  {
    77   if (CheckMemoryInitialization && ZapUnusedHeapArea) {
    78     for (size_t slot = 0; slot < size; slot += 1) {
    79       assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
    80              "Found non badHeapWordValue in pre-allocation check");
    81     }
    82   }
    83 }
    84 #endif // PRODUCT
    86 #ifdef ASSERT
    87 void CollectedHeap::check_for_valid_allocation_state() {
    88   Thread *thread = Thread::current();
    89   // How to choose between a pending exception and a potential
    90   // OutOfMemoryError?  Don't allow pending exceptions.
    91   // This is a VM policy failure, so how do we exhaustively test it?
    92   assert(!thread->has_pending_exception(),
    93          "shouldn't be allocating with pending exception");
    94   if (StrictSafepointChecks) {
    95     assert(thread->allow_allocation(),
    96            "Allocation done by thread for which allocation is blocked "
    97            "by No_Allocation_Verifier!");
    98     // Allocation of an oop can always invoke a safepoint,
    99     // hence, the true argument
   100     thread->check_for_valid_safepoint_state(true);
   101   }
   102 }
   103 #endif
   105 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
   107   // Retain tlab and allocate object in shared space if
   108   // the amount free in the tlab is too large to discard.
   109   if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
   110     thread->tlab().record_slow_allocation(size);
   111     return NULL;
   112   }
   114   // Discard tlab and allocate a new one.
   115   // To minimize fragmentation, the last TLAB may be smaller than the rest.
   116   size_t new_tlab_size = thread->tlab().compute_size(size);
   118   thread->tlab().clear_before_allocation();
   120   if (new_tlab_size == 0) {
   121     return NULL;
   122   }
   124   // Allocate a new TLAB...
   125   HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
   126   if (obj == NULL) {
   127     return NULL;
   128   }
   129   if (ZeroTLAB) {
   130     // ..and clear it.
   131     Copy::zero_to_words(obj, new_tlab_size);
   132   } else {
   133     // ...and clear just the allocated object.
   134     Copy::zero_to_words(obj, size);
   135   }
   136   thread->tlab().fill(obj, obj + size, new_tlab_size);
   137   return obj;
   138 }
   140 size_t CollectedHeap::filler_array_hdr_size() {
   141   return size_t(arrayOopDesc::header_size(T_INT));
   142 }
   144 size_t CollectedHeap::filler_array_min_size() {
   145   return align_object_size(filler_array_hdr_size());
   146 }
   148 size_t CollectedHeap::filler_array_max_size() {
   149   return _filler_array_max_size;
   150 }
   152 #ifdef ASSERT
   153 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
   154 {
   155   assert(words >= min_fill_size(), "too small to fill");
   156   assert(words % MinObjAlignment == 0, "unaligned size");
   157   assert(Universe::heap()->is_in_reserved(start), "not in heap");
   158   assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
   159 }
   161 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
   162 {
   163   if (ZapFillerObjects) {
   164     Copy::fill_to_words(start + filler_array_hdr_size(),
   165                         words - filler_array_hdr_size(), 0XDEAFBABE);
   166   }
   167 }
   168 #endif // ASSERT
   170 void
   171 CollectedHeap::fill_with_array(HeapWord* start, size_t words)
   172 {
   173   assert(words >= filler_array_min_size(), "too small for an array");
   174   assert(words <= filler_array_max_size(), "too big for a single object");
   176   const size_t payload_size = words - filler_array_hdr_size();
   177   const size_t len = payload_size * HeapWordSize / sizeof(jint);
   179   // Set the length first for concurrent GC.
   180   ((arrayOop)start)->set_length((int)len);
   181   post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
   182   DEBUG_ONLY(zap_filler_array(start, words);)
   183 }
   185 void
   186 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
   187 {
   188   assert(words <= filler_array_max_size(), "too big for a single object");
   190   if (words >= filler_array_min_size()) {
   191     fill_with_array(start, words);
   192   } else if (words > 0) {
   193     assert(words == min_fill_size(), "unaligned size");
   194     post_allocation_setup_common(SystemDictionary::object_klass(), start,
   195                                  words);
   196   }
   197 }
   199 void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
   200 {
   201   DEBUG_ONLY(fill_args_check(start, words);)
   202   HandleMark hm;  // Free handles before leaving.
   203   fill_with_object_impl(start, words);
   204 }
   206 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
   207 {
   208   DEBUG_ONLY(fill_args_check(start, words);)
   209   HandleMark hm;  // Free handles before leaving.
   211 #ifdef LP64
   212   // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
   213   // First fill with arrays, ensuring that any remaining space is big enough to
   214   // fill.  The remainder is filled with a single object.
   215   const size_t min = min_fill_size();
   216   const size_t max = filler_array_max_size();
   217   while (words > max) {
   218     const size_t cur = words - max >= min ? max : max - min;
   219     fill_with_array(start, cur);
   220     start += cur;
   221     words -= cur;
   222   }
   223 #endif
   225   fill_with_object_impl(start, words);
   226 }
   228 oop CollectedHeap::new_store_barrier(oop new_obj) {
   229   // %%% This needs refactoring.  (It was imported from the server compiler.)
   230   guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
   231   BarrierSet* bs = this->barrier_set();
   232   assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
   233   int new_size = new_obj->size();
   234   bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
   235   return new_obj;
   236 }
   238 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
   239   guarantee(false, "thread-local allocation buffers not supported");
   240   return NULL;
   241 }
   243 void CollectedHeap::fill_all_tlabs(bool retire) {
   244   assert(UseTLAB, "should not reach here");
   245   // See note in ensure_parsability() below.
   246   assert(SafepointSynchronize::is_at_safepoint() ||
   247          !is_init_completed(),
   248          "should only fill tlabs at safepoint");
   249   // The main thread starts allocating via a TLAB even before it
   250   // has added itself to the threads list at vm boot-up.
   251   assert(Threads::first() != NULL,
   252          "Attempt to fill tlabs before main thread has been added"
   253          " to threads list is doomed to failure!");
   254   for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
   255      thread->tlab().make_parsable(retire);
   256   }
   257 }
   259 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
   260   // The second disjunct in the assertion below makes a concession
   261   // for the start-up verification done while the VM is being
   262   // created. Callers be careful that you know that mutators
   263   // aren't going to interfere -- for instance, this is permissible
   264   // if we are still single-threaded and have either not yet
   265   // started allocating (nothing much to verify) or we have
   266   // started allocating but are now a full-fledged JavaThread
   267   // (and have thus made our TLAB's) available for filling.
   268   assert(SafepointSynchronize::is_at_safepoint() ||
   269          !is_init_completed(),
   270          "Should only be called at a safepoint or at start-up"
   271          " otherwise concurrent mutator activity may make heap "
   272          " unparsable again");
   273   if (UseTLAB) {
   274     fill_all_tlabs(retire_tlabs);
   275   }
   276 }
   278 void CollectedHeap::accumulate_statistics_all_tlabs() {
   279   if (UseTLAB) {
   280     assert(SafepointSynchronize::is_at_safepoint() ||
   281          !is_init_completed(),
   282          "should only accumulate statistics on tlabs at safepoint");
   284     ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
   285   }
   286 }
   288 void CollectedHeap::resize_all_tlabs() {
   289   if (UseTLAB) {
   290     assert(SafepointSynchronize::is_at_safepoint() ||
   291          !is_init_completed(),
   292          "should only resize tlabs at safepoint");
   294     ThreadLocalAllocBuffer::resize_all_tlabs();
   295   }
   296 }

mercurial