duke@435: /* xdono@1014: * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_collectedHeap.cpp.incl" duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: int CollectedHeap::_fire_out_of_memory_count = 0; duke@435: #endif duke@435: jcoomes@916: size_t CollectedHeap::_filler_array_max_size = 0; jcoomes@916: duke@435: // Memory state functions. duke@435: jcoomes@916: CollectedHeap::CollectedHeap() jcoomes@916: { jcoomes@916: const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); jcoomes@916: const size_t elements_per_word = HeapWordSize / sizeof(jint); jcoomes@916: _filler_array_max_size = align_object_size(filler_array_hdr_size() + jcoomes@916: max_len * elements_per_word); jcoomes@916: jcoomes@916: _barrier_set = NULL; jcoomes@916: _is_gc_active = false; jcoomes@916: _total_collections = _total_full_collections = 0; jcoomes@916: _gc_cause = _gc_lastcause = GCCause::_no_gc; duke@435: NOT_PRODUCT(_promotion_failure_alot_count = 0;) duke@435: NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) duke@435: duke@435: if (UsePerfData) { duke@435: EXCEPTION_MARK; duke@435: duke@435: // create the gc cause jvmstat counters duke@435: _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", duke@435: 80, GCCause::to_string(_gc_cause), CHECK); duke@435: duke@435: _perf_gc_lastcause = duke@435: PerfDataManager::create_string_variable(SUN_GC, "lastCause", duke@435: 80, GCCause::to_string(_gc_lastcause), CHECK); duke@435: } duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: for (size_t slot = 0; slot < size; slot += 1) { duke@435: assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), duke@435: "Found badHeapWordValue in post-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) duke@435: { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: for (size_t slot = 0; slot < size; slot += 1) { duke@435: assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), duke@435: "Found non badHeapWordValue in pre-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: #endif // PRODUCT duke@435: duke@435: #ifdef ASSERT duke@435: void CollectedHeap::check_for_valid_allocation_state() { duke@435: Thread *thread = Thread::current(); duke@435: // How to choose between a pending exception and a potential duke@435: // OutOfMemoryError? Don't allow pending exceptions. duke@435: // This is a VM policy failure, so how do we exhaustively test it? duke@435: assert(!thread->has_pending_exception(), duke@435: "shouldn't be allocating with pending exception"); duke@435: if (StrictSafepointChecks) { duke@435: assert(thread->allow_allocation(), duke@435: "Allocation done by thread for which allocation is blocked " duke@435: "by No_Allocation_Verifier!"); duke@435: // Allocation of an oop can always invoke a safepoint, duke@435: // hence, the true argument duke@435: thread->check_for_valid_safepoint_state(true); duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { duke@435: duke@435: // Retain tlab and allocate object in shared space if duke@435: // the amount free in the tlab is too large to discard. duke@435: if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { duke@435: thread->tlab().record_slow_allocation(size); duke@435: return NULL; duke@435: } duke@435: duke@435: // Discard tlab and allocate a new one. duke@435: // To minimize fragmentation, the last TLAB may be smaller than the rest. duke@435: size_t new_tlab_size = thread->tlab().compute_size(size); duke@435: duke@435: thread->tlab().clear_before_allocation(); duke@435: duke@435: if (new_tlab_size == 0) { duke@435: return NULL; duke@435: } duke@435: duke@435: // Allocate a new TLAB... duke@435: HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); duke@435: if (obj == NULL) { duke@435: return NULL; duke@435: } duke@435: if (ZeroTLAB) { duke@435: // ..and clear it. duke@435: Copy::zero_to_words(obj, new_tlab_size); duke@435: } else { duke@435: // ...and clear just the allocated object. duke@435: Copy::zero_to_words(obj, size); duke@435: } duke@435: thread->tlab().fill(obj, obj + size, new_tlab_size); duke@435: return obj; duke@435: } duke@435: jcoomes@916: size_t CollectedHeap::filler_array_hdr_size() { jcoomes@916: return size_t(arrayOopDesc::header_size(T_INT)); jcoomes@916: } jcoomes@916: jcoomes@916: size_t CollectedHeap::filler_array_min_size() { jcoomes@916: return align_object_size(filler_array_hdr_size()); jcoomes@916: } jcoomes@916: jcoomes@916: size_t CollectedHeap::filler_array_max_size() { jcoomes@916: return _filler_array_max_size; jcoomes@916: } jcoomes@916: jcoomes@916: #ifdef ASSERT jcoomes@916: void CollectedHeap::fill_args_check(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: assert(words >= min_fill_size(), "too small to fill"); jcoomes@916: assert(words % MinObjAlignment == 0, "unaligned size"); jcoomes@916: assert(Universe::heap()->is_in_reserved(start), "not in heap"); jcoomes@916: assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); jcoomes@916: } jcoomes@916: jcoomes@916: void CollectedHeap::zap_filler_array(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: if (ZapFillerObjects) { jcoomes@916: Copy::fill_to_words(start + filler_array_hdr_size(), jcoomes@916: words - filler_array_hdr_size(), 0XDEAFBABE); jcoomes@916: } jcoomes@916: } jcoomes@916: #endif // ASSERT jcoomes@916: jcoomes@916: void jcoomes@916: CollectedHeap::fill_with_array(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: assert(words >= filler_array_min_size(), "too small for an array"); jcoomes@916: assert(words <= filler_array_max_size(), "too big for a single object"); jcoomes@916: jcoomes@916: const size_t payload_size = words - filler_array_hdr_size(); jcoomes@916: const size_t len = payload_size * HeapWordSize / sizeof(jint); jcoomes@916: jcoomes@916: // Set the length first for concurrent GC. jcoomes@916: ((arrayOop)start)->set_length((int)len); jcoomes@929: post_allocation_setup_common(Universe::intArrayKlassObj(), start, words); jcoomes@916: DEBUG_ONLY(zap_filler_array(start, words);) jcoomes@916: } jcoomes@916: jcoomes@916: void jcoomes@916: CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: assert(words <= filler_array_max_size(), "too big for a single object"); jcoomes@916: jcoomes@916: if (words >= filler_array_min_size()) { jcoomes@916: fill_with_array(start, words); jcoomes@916: } else if (words > 0) { jcoomes@916: assert(words == min_fill_size(), "unaligned size"); jcoomes@916: post_allocation_setup_common(SystemDictionary::object_klass(), start, jcoomes@916: words); jcoomes@916: } jcoomes@916: } jcoomes@916: jcoomes@916: void CollectedHeap::fill_with_object(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: DEBUG_ONLY(fill_args_check(start, words);) jcoomes@916: HandleMark hm; // Free handles before leaving. jcoomes@916: fill_with_object_impl(start, words); jcoomes@916: } jcoomes@916: jcoomes@916: void CollectedHeap::fill_with_objects(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: DEBUG_ONLY(fill_args_check(start, words);) jcoomes@916: HandleMark hm; // Free handles before leaving. jcoomes@916: jcoomes@916: #ifdef LP64 jcoomes@916: // A single array can fill ~8G, so multiple objects are needed only in 64-bit. jcoomes@916: // First fill with arrays, ensuring that any remaining space is big enough to jcoomes@916: // fill. The remainder is filled with a single object. jcoomes@916: const size_t min = min_fill_size(); jcoomes@916: const size_t max = filler_array_max_size(); jcoomes@916: while (words > max) { jcoomes@916: const size_t cur = words - max >= min ? max : max - min; jcoomes@916: fill_with_array(start, cur); jcoomes@916: start += cur; jcoomes@916: words -= cur; jcoomes@916: } jcoomes@916: #endif jcoomes@916: jcoomes@916: fill_with_object_impl(start, words); jcoomes@916: } jcoomes@916: duke@435: oop CollectedHeap::new_store_barrier(oop new_obj) { duke@435: // %%% This needs refactoring. (It was imported from the server compiler.) duke@435: guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported"); duke@435: BarrierSet* bs = this->barrier_set(); duke@435: assert(bs->has_write_region_opt(), "Barrier set does not have write_region"); duke@435: int new_size = new_obj->size(); duke@435: bs->write_region(MemRegion((HeapWord*)new_obj, new_size)); duke@435: return new_obj; duke@435: } duke@435: duke@435: HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { duke@435: guarantee(false, "thread-local allocation buffers not supported"); duke@435: return NULL; duke@435: } duke@435: duke@435: void CollectedHeap::fill_all_tlabs(bool retire) { duke@435: assert(UseTLAB, "should not reach here"); duke@435: // See note in ensure_parsability() below. duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "should only fill tlabs at safepoint"); duke@435: // The main thread starts allocating via a TLAB even before it duke@435: // has added itself to the threads list at vm boot-up. duke@435: assert(Threads::first() != NULL, duke@435: "Attempt to fill tlabs before main thread has been added" duke@435: " to threads list is doomed to failure!"); duke@435: for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { duke@435: thread->tlab().make_parsable(retire); duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::ensure_parsability(bool retire_tlabs) { duke@435: // The second disjunct in the assertion below makes a concession duke@435: // for the start-up verification done while the VM is being duke@435: // created. Callers be careful that you know that mutators duke@435: // aren't going to interfere -- for instance, this is permissible duke@435: // if we are still single-threaded and have either not yet duke@435: // started allocating (nothing much to verify) or we have duke@435: // started allocating but are now a full-fledged JavaThread duke@435: // (and have thus made our TLAB's) available for filling. duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "Should only be called at a safepoint or at start-up" duke@435: " otherwise concurrent mutator activity may make heap " duke@435: " unparsable again"); duke@435: if (UseTLAB) { duke@435: fill_all_tlabs(retire_tlabs); duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::accumulate_statistics_all_tlabs() { duke@435: if (UseTLAB) { duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "should only accumulate statistics on tlabs at safepoint"); duke@435: duke@435: ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::resize_all_tlabs() { duke@435: if (UseTLAB) { duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "should only resize tlabs at safepoint"); duke@435: duke@435: ThreadLocalAllocBuffer::resize_all_tlabs(); duke@435: } duke@435: } ysr@1050: ysr@1050: void CollectedHeap::pre_full_gc_dump() { ysr@1050: if (HeapDumpBeforeFullGC) { ysr@1050: TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty); ysr@1050: // We are doing a "major" collection and a heap dump before ysr@1050: // major collection has been requested. ysr@1050: HeapDumper::dump_heap(); ysr@1050: } ysr@1050: if (PrintClassHistogramBeforeFullGC) { ysr@1050: TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty); ysr@1050: VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); ysr@1050: inspector.doit(); ysr@1050: } ysr@1050: } ysr@1050: ysr@1050: void CollectedHeap::post_full_gc_dump() { ysr@1050: if (HeapDumpAfterFullGC) { ysr@1050: TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty); ysr@1050: HeapDumper::dump_heap(); ysr@1050: } ysr@1050: if (PrintClassHistogramAfterFullGC) { ysr@1050: TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty); ysr@1050: VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); ysr@1050: inspector.doit(); ysr@1050: } ysr@1050: }