duke@435: /* never@3499: * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "gc_implementation/shared/vmGCOperations.hpp" stefank@2314: #include "gc_interface/collectedHeap.hpp" stefank@2314: #include "gc_interface/collectedHeap.inline.hpp" stefank@2314: #include "oops/oop.inline.hpp" never@3205: #include "oops/instanceMirrorKlass.hpp" stefank@2314: #include "runtime/init.hpp" stefank@2314: #include "services/heapDumper.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "thread_linux.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "thread_solaris.inline.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "thread_windows.inline.hpp" stefank@2314: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "thread_bsd.inline.hpp" never@3156: #endif duke@435: duke@435: duke@435: #ifdef ASSERT duke@435: int CollectedHeap::_fire_out_of_memory_count = 0; duke@435: #endif duke@435: jcoomes@916: size_t CollectedHeap::_filler_array_max_size = 0; jcoomes@916: never@3499: template <> never@3499: void EventLogBase::print(outputStream* st, GCMessage& m) { never@3499: st->print_cr("GC heap %s", m.is_before ? "before" : "after"); never@3499: st->print_raw(m); never@3499: } never@3499: never@3499: void GCHeapLog::log_heap(bool before) { never@3499: if (!should_log()) { never@3499: return; never@3499: } never@3499: never@3571: double timestamp = fetch_timestamp(); never@3499: MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); never@3499: int index = compute_log_index(); never@3499: _records[index].thread = NULL; // Its the GC thread so it's not that interesting. never@3499: _records[index].timestamp = timestamp; never@3499: _records[index].data.is_before = before; never@3499: stringStream st(_records[index].data.buffer(), _records[index].data.size()); never@3499: if (before) { never@3571: Universe::print_heap_before_gc(&st, true); never@3499: } else { never@3571: Universe::print_heap_after_gc(&st, true); never@3499: } never@3499: } never@3499: duke@435: // Memory state functions. duke@435: jmasa@2188: jmasa@2188: CollectedHeap::CollectedHeap() : _n_par_threads(0) jmasa@2188: jcoomes@916: { jcoomes@916: const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); jcoomes@916: const size_t elements_per_word = HeapWordSize / sizeof(jint); jcoomes@916: _filler_array_max_size = align_object_size(filler_array_hdr_size() + brutisso@3668: max_len / elements_per_word); jcoomes@916: jcoomes@916: _barrier_set = NULL; jcoomes@916: _is_gc_active = false; jcoomes@916: _total_collections = _total_full_collections = 0; jcoomes@916: _gc_cause = _gc_lastcause = GCCause::_no_gc; duke@435: NOT_PRODUCT(_promotion_failure_alot_count = 0;) duke@435: NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) duke@435: duke@435: if (UsePerfData) { duke@435: EXCEPTION_MARK; duke@435: duke@435: // create the gc cause jvmstat counters duke@435: _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", duke@435: 80, GCCause::to_string(_gc_cause), CHECK); duke@435: duke@435: _perf_gc_lastcause = duke@435: PerfDataManager::create_string_variable(SUN_GC, "lastCause", duke@435: 80, GCCause::to_string(_gc_lastcause), CHECK); duke@435: } ysr@1601: _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. never@3499: // Create the ring log never@3499: if (LogEvents) { never@3499: _gc_heap_log = new GCHeapLog(); never@3499: } else { never@3499: _gc_heap_log = NULL; never@3499: } duke@435: } duke@435: coleenp@4037: // This interface assumes that it's being called by the coleenp@4037: // vm thread. It collects the heap assuming that the coleenp@4037: // heap lock is already held and that we are executing in coleenp@4037: // the context of the vm thread. coleenp@4037: void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { coleenp@4037: assert(Thread::current()->is_VM_thread(), "Precondition#1"); coleenp@4037: assert(Heap_lock->is_locked(), "Precondition#2"); coleenp@4037: GCCauseSetter gcs(this, cause); coleenp@4037: switch (cause) { coleenp@4037: case GCCause::_heap_inspection: coleenp@4037: case GCCause::_heap_dump: coleenp@4037: case GCCause::_metadata_GC_threshold : { coleenp@4037: HandleMark hm; coleenp@4037: do_full_collection(false); // don't clear all soft refs coleenp@4037: break; coleenp@4037: } coleenp@4037: case GCCause::_last_ditch_collection: { coleenp@4037: HandleMark hm; coleenp@4037: do_full_collection(true); // do clear all soft refs coleenp@4037: break; coleenp@4037: } coleenp@4037: default: coleenp@4037: ShouldNotReachHere(); // Unexpected use of this function coleenp@4037: } coleenp@4037: } coleenp@4037: MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( coleenp@4037: ClassLoaderData* loader_data, coleenp@4037: size_t size, Metaspace::MetadataType mdtype) { coleenp@4037: return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); coleenp@4037: } coleenp@4037: coleenp@4037: ysr@1601: void CollectedHeap::pre_initialize() { ysr@1601: // Used for ReduceInitialCardMarks (when COMPILER2 is used); ysr@1601: // otherwise remains unused. ysr@1903: #ifdef COMPILER2 ysr@1629: _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() ysr@1629: && (DeferInitialCardMark || card_mark_must_follow_store()); ysr@1601: #else ysr@1601: assert(_defer_initial_card_mark == false, "Who would set it?"); ysr@1601: #endif ysr@1601: } duke@435: duke@435: #ifndef PRODUCT duke@435: void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: for (size_t slot = 0; slot < size; slot += 1) { duke@435: assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), duke@435: "Found badHeapWordValue in post-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: ysr@2533: void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { duke@435: if (CheckMemoryInitialization && ZapUnusedHeapArea) { duke@435: for (size_t slot = 0; slot < size; slot += 1) { duke@435: assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), duke@435: "Found non badHeapWordValue in pre-allocation check"); duke@435: } duke@435: } duke@435: } duke@435: #endif // PRODUCT duke@435: duke@435: #ifdef ASSERT duke@435: void CollectedHeap::check_for_valid_allocation_state() { duke@435: Thread *thread = Thread::current(); duke@435: // How to choose between a pending exception and a potential duke@435: // OutOfMemoryError? Don't allow pending exceptions. duke@435: // This is a VM policy failure, so how do we exhaustively test it? duke@435: assert(!thread->has_pending_exception(), duke@435: "shouldn't be allocating with pending exception"); duke@435: if (StrictSafepointChecks) { duke@435: assert(thread->allow_allocation(), duke@435: "Allocation done by thread for which allocation is blocked " duke@435: "by No_Allocation_Verifier!"); duke@435: // Allocation of an oop can always invoke a safepoint, duke@435: // hence, the true argument duke@435: thread->check_for_valid_safepoint_state(true); duke@435: } duke@435: } duke@435: #endif duke@435: duke@435: HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { duke@435: duke@435: // Retain tlab and allocate object in shared space if duke@435: // the amount free in the tlab is too large to discard. duke@435: if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { duke@435: thread->tlab().record_slow_allocation(size); duke@435: return NULL; duke@435: } duke@435: duke@435: // Discard tlab and allocate a new one. duke@435: // To minimize fragmentation, the last TLAB may be smaller than the rest. duke@435: size_t new_tlab_size = thread->tlab().compute_size(size); duke@435: duke@435: thread->tlab().clear_before_allocation(); duke@435: duke@435: if (new_tlab_size == 0) { duke@435: return NULL; duke@435: } duke@435: duke@435: // Allocate a new TLAB... duke@435: HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); duke@435: if (obj == NULL) { duke@435: return NULL; duke@435: } duke@435: if (ZeroTLAB) { duke@435: // ..and clear it. duke@435: Copy::zero_to_words(obj, new_tlab_size); duke@435: } else { kvn@3092: // ...and zap just allocated object. kvn@3092: #ifdef ASSERT kvn@3092: // Skip mangling the space corresponding to the object header to kvn@3092: // ensure that the returned space is not considered parsable by kvn@3092: // any concurrent GC thread. kvn@3092: size_t hdr_size = oopDesc::header_size(); kvn@3092: Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); kvn@3092: #endif // ASSERT duke@435: } duke@435: thread->tlab().fill(obj, obj + size, new_tlab_size); duke@435: return obj; duke@435: } duke@435: ysr@1462: void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { ysr@1462: MemRegion deferred = thread->deferred_card_mark(); ysr@1462: if (!deferred.is_empty()) { ysr@1601: assert(_defer_initial_card_mark, "Otherwise should be empty"); ysr@1462: { ysr@1462: // Verify that the storage points to a parsable object in heap ysr@1462: DEBUG_ONLY(oop old_obj = oop(deferred.start());) ysr@1462: assert(is_in(old_obj), "Not in allocated heap"); ysr@1462: assert(!can_elide_initializing_store_barrier(old_obj), ysr@1601: "Else should have been filtered in new_store_pre_barrier()"); ysr@1462: assert(old_obj->is_oop(true), "Not an oop"); ysr@1462: assert(deferred.word_size() == (size_t)(old_obj->size()), ysr@1462: "Mismatch: multiple objects?"); ysr@1462: } ysr@1462: BarrierSet* bs = barrier_set(); ysr@1462: assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); ysr@1462: bs->write_region(deferred); ysr@1462: // "Clear" the deferred_card_mark field ysr@1462: thread->set_deferred_card_mark(MemRegion()); ysr@1462: } ysr@1462: assert(thread->deferred_card_mark().is_empty(), "invariant"); ysr@1462: } ysr@1462: ysr@1462: // Helper for ReduceInitialCardMarks. For performance, ysr@1462: // compiled code may elide card-marks for initializing stores ysr@1462: // to a newly allocated object along the fast-path. We ysr@1462: // compensate for such elided card-marks as follows: ysr@1462: // (a) Generational, non-concurrent collectors, such as ysr@1462: // GenCollectedHeap(ParNew,DefNew,Tenured) and ysr@1462: // ParallelScavengeHeap(ParallelGC, ParallelOldGC) ysr@1462: // need the card-mark if and only if the region is ysr@1462: // in the old gen, and do not care if the card-mark ysr@1462: // succeeds or precedes the initializing stores themselves, ysr@1462: // so long as the card-mark is completed before the next ysr@1462: // scavenge. For all these cases, we can do a card mark ysr@1462: // at the point at which we do a slow path allocation ysr@1601: // in the old gen, i.e. in this call. ysr@1462: // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires ysr@1462: // in addition that the card-mark for an old gen allocated ysr@1462: // object strictly follow any associated initializing stores. ysr@1462: // In these cases, the memRegion remembered below is ysr@1462: // used to card-mark the entire region either just before the next ysr@1462: // slow-path allocation by this thread or just before the next scavenge or ysr@1462: // CMS-associated safepoint, whichever of these events happens first. ysr@1462: // (The implicit assumption is that the object has been fully ysr@1462: // initialized by this point, a fact that we assert when doing the ysr@1462: // card-mark.) ysr@1462: // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a ysr@1462: // G1 concurrent marking is in progress an SATB (pre-write-)barrier is ysr@1462: // is used to remember the pre-value of any store. Initializing ysr@1462: // stores will not need this barrier, so we need not worry about ysr@1462: // compensating for the missing pre-barrier here. Turning now ysr@1462: // to the post-barrier, we note that G1 needs a RS update barrier ysr@1462: // which simply enqueues a (sequence of) dirty cards which may ysr@1462: // optionally be refined by the concurrent update threads. Note ysr@1462: // that this barrier need only be applied to a non-young write, ysr@1462: // but, like in CMS, because of the presence of concurrent refinement ysr@1462: // (much like CMS' precleaning), must strictly follow the oop-store. ysr@1462: // Thus, using the same protocol for maintaining the intended ysr@1601: // invariants turns out, serendepitously, to be the same for both ysr@1601: // G1 and CMS. ysr@1462: // ysr@1601: // For any future collector, this code should be reexamined with ysr@1601: // that specific collector in mind, and the documentation above suitably ysr@1601: // extended and updated. ysr@1601: oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { ysr@1462: // If a previous card-mark was deferred, flush it now. ysr@1462: flush_deferred_store_barrier(thread); ysr@1462: if (can_elide_initializing_store_barrier(new_obj)) { ysr@1462: // The deferred_card_mark region should be empty ysr@1462: // following the flush above. ysr@1462: assert(thread->deferred_card_mark().is_empty(), "Error"); ysr@1462: } else { ysr@1601: MemRegion mr((HeapWord*)new_obj, new_obj->size()); ysr@1601: assert(!mr.is_empty(), "Error"); ysr@1601: if (_defer_initial_card_mark) { ysr@1601: // Defer the card mark ysr@1601: thread->set_deferred_card_mark(mr); ysr@1601: } else { ysr@1601: // Do the card mark ysr@1601: BarrierSet* bs = barrier_set(); ysr@1601: assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); ysr@1601: bs->write_region(mr); ysr@1601: } ysr@1462: } ysr@1462: return new_obj; ysr@1462: } ysr@1462: jcoomes@916: size_t CollectedHeap::filler_array_hdr_size() { kvn@1926: return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long jcoomes@916: } jcoomes@916: jcoomes@916: size_t CollectedHeap::filler_array_min_size() { kvn@1926: return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment jcoomes@916: } jcoomes@916: jcoomes@916: #ifdef ASSERT jcoomes@916: void CollectedHeap::fill_args_check(HeapWord* start, size_t words) jcoomes@916: { jcoomes@916: assert(words >= min_fill_size(), "too small to fill"); jcoomes@916: assert(words % MinObjAlignment == 0, "unaligned size"); jcoomes@916: assert(Universe::heap()->is_in_reserved(start), "not in heap"); jcoomes@916: assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); jcoomes@916: } jcoomes@916: johnc@1600: void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) jcoomes@916: { johnc@1600: if (ZapFillerObjects && zap) { jcoomes@916: Copy::fill_to_words(start + filler_array_hdr_size(), jcoomes@916: words - filler_array_hdr_size(), 0XDEAFBABE); jcoomes@916: } jcoomes@916: } jcoomes@916: #endif // ASSERT jcoomes@916: jcoomes@916: void johnc@1600: CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) jcoomes@916: { jcoomes@916: assert(words >= filler_array_min_size(), "too small for an array"); jcoomes@916: assert(words <= filler_array_max_size(), "too big for a single object"); jcoomes@916: jcoomes@916: const size_t payload_size = words - filler_array_hdr_size(); jcoomes@916: const size_t len = payload_size * HeapWordSize / sizeof(jint); brutisso@3668: assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len)); jcoomes@916: jcoomes@916: // Set the length first for concurrent GC. jcoomes@916: ((arrayOop)start)->set_length((int)len); brutisso@3675: post_allocation_setup_common(Universe::intArrayKlassObj(), start); johnc@1600: DEBUG_ONLY(zap_filler_array(start, words, zap);) jcoomes@916: } jcoomes@916: jcoomes@916: void johnc@1600: CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) jcoomes@916: { jcoomes@916: assert(words <= filler_array_max_size(), "too big for a single object"); jcoomes@916: jcoomes@916: if (words >= filler_array_min_size()) { johnc@1600: fill_with_array(start, words, zap); jcoomes@916: } else if (words > 0) { jcoomes@916: assert(words == min_fill_size(), "unaligned size"); brutisso@3675: post_allocation_setup_common(SystemDictionary::Object_klass(), start); jcoomes@916: } jcoomes@916: } jcoomes@916: johnc@1600: void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) jcoomes@916: { jcoomes@916: DEBUG_ONLY(fill_args_check(start, words);) jcoomes@916: HandleMark hm; // Free handles before leaving. johnc@1600: fill_with_object_impl(start, words, zap); jcoomes@916: } jcoomes@916: johnc@1600: void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) jcoomes@916: { jcoomes@916: DEBUG_ONLY(fill_args_check(start, words);) jcoomes@916: HandleMark hm; // Free handles before leaving. jcoomes@916: ysr@1904: #ifdef _LP64 jcoomes@916: // A single array can fill ~8G, so multiple objects are needed only in 64-bit. jcoomes@916: // First fill with arrays, ensuring that any remaining space is big enough to jcoomes@916: // fill. The remainder is filled with a single object. jcoomes@916: const size_t min = min_fill_size(); jcoomes@916: const size_t max = filler_array_max_size(); jcoomes@916: while (words > max) { jcoomes@916: const size_t cur = words - max >= min ? max : max - min; johnc@1600: fill_with_array(start, cur, zap); jcoomes@916: start += cur; jcoomes@916: words -= cur; jcoomes@916: } jcoomes@916: #endif jcoomes@916: johnc@1600: fill_with_object_impl(start, words, zap); jcoomes@916: } jcoomes@916: duke@435: HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { duke@435: guarantee(false, "thread-local allocation buffers not supported"); duke@435: return NULL; duke@435: } duke@435: duke@435: void CollectedHeap::ensure_parsability(bool retire_tlabs) { duke@435: // The second disjunct in the assertion below makes a concession duke@435: // for the start-up verification done while the VM is being duke@435: // created. Callers be careful that you know that mutators duke@435: // aren't going to interfere -- for instance, this is permissible duke@435: // if we are still single-threaded and have either not yet duke@435: // started allocating (nothing much to verify) or we have duke@435: // started allocating but are now a full-fledged JavaThread duke@435: // (and have thus made our TLAB's) available for filling. duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "Should only be called at a safepoint or at start-up" duke@435: " otherwise concurrent mutator activity may make heap " duke@435: " unparsable again"); ysr@1601: const bool use_tlab = UseTLAB; ysr@1601: const bool deferred = _defer_initial_card_mark; ysr@1601: // The main thread starts allocating via a TLAB even before it ysr@1601: // has added itself to the threads list at vm boot-up. ysr@1601: assert(!use_tlab || Threads::first() != NULL, ysr@1601: "Attempt to fill tlabs before main thread has been added" ysr@1601: " to threads list is doomed to failure!"); ysr@1601: for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { ysr@1601: if (use_tlab) thread->tlab().make_parsable(retire_tlabs); ysr@1601: #ifdef COMPILER2 ysr@1601: // The deferred store barriers must all have been flushed to the ysr@1601: // card-table (or other remembered set structure) before GC starts ysr@1601: // processing the card-table (or other remembered set). ysr@1601: if (deferred) flush_deferred_store_barrier(thread); ysr@1601: #else ysr@1601: assert(!deferred, "Should be false"); ysr@1601: assert(thread->deferred_card_mark().is_empty(), "Should be empty"); ysr@1601: #endif duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::accumulate_statistics_all_tlabs() { duke@435: if (UseTLAB) { duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "should only accumulate statistics on tlabs at safepoint"); duke@435: duke@435: ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); duke@435: } duke@435: } duke@435: duke@435: void CollectedHeap::resize_all_tlabs() { duke@435: if (UseTLAB) { duke@435: assert(SafepointSynchronize::is_at_safepoint() || duke@435: !is_init_completed(), duke@435: "should only resize tlabs at safepoint"); duke@435: duke@435: ThreadLocalAllocBuffer::resize_all_tlabs(); duke@435: } duke@435: } ysr@1050: ysr@1050: void CollectedHeap::pre_full_gc_dump() { ysr@1050: if (HeapDumpBeforeFullGC) { ysr@3067: TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty); ysr@1050: // We are doing a "major" collection and a heap dump before ysr@1050: // major collection has been requested. ysr@1050: HeapDumper::dump_heap(); ysr@1050: } ysr@1050: if (PrintClassHistogramBeforeFullGC) { ysr@3067: TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty); ysr@1050: VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); ysr@1050: inspector.doit(); ysr@1050: } ysr@1050: } ysr@1050: ysr@1050: void CollectedHeap::post_full_gc_dump() { ysr@1050: if (HeapDumpAfterFullGC) { ysr@3067: TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty); ysr@1050: HeapDumper::dump_heap(); ysr@1050: } ysr@1050: if (PrintClassHistogramAfterFullGC) { ysr@3067: TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty); ysr@1050: VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); ysr@1050: inspector.doit(); ysr@1050: } ysr@1050: } never@3205: never@3205: oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) { never@3205: debug_only(check_for_valid_allocation_state()); never@3205: assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); never@3205: assert(size >= 0, "int won't convert to size_t"); never@3205: HeapWord* obj; never@3205: assert(ScavengeRootsInCode > 0, "must be"); never@3205: obj = common_mem_allocate_init(size, CHECK_NULL); brutisso@3675: post_allocation_setup_common(klass, obj); never@3205: assert(Universe::is_bootstrapping() || coleenp@4037: !((oop)obj)->is_array(), "must not be an array"); never@3205: NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); never@3205: oop mirror = (oop)obj; never@3205: never@3205: java_lang_Class::set_oop_size(mirror, size); never@3205: never@3205: // Setup indirections never@3205: if (!real_klass.is_null()) { never@3205: java_lang_Class::set_klass(mirror, real_klass()); never@3205: real_klass->set_java_mirror(mirror); never@3205: } never@3205: coleenp@4047: InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass()); never@3205: assert(size == mk->instance_size(real_klass), "should have been set"); never@3205: never@3205: // notify jvmti and dtrace never@3205: post_allocation_notify(klass, (oop)obj); never@3205: never@3205: return mirror; never@3205: } stefank@3335: stefank@3335: /////////////// Unit tests /////////////// stefank@3335: stefank@3335: #ifndef PRODUCT stefank@3335: void CollectedHeap::test_is_in() { stefank@3335: CollectedHeap* heap = Universe::heap(); stefank@3335: stefank@3375: uintptr_t epsilon = (uintptr_t) MinObjAlignment; stefank@3375: uintptr_t heap_start = (uintptr_t) heap->_reserved.start(); stefank@3375: uintptr_t heap_end = (uintptr_t) heap->_reserved.end(); stefank@3375: stefank@3335: // Test that NULL is not in the heap. stefank@3335: assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); stefank@3335: stefank@3335: // Test that a pointer to before the heap start is reported as outside the heap. stefank@3375: assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity"); stefank@3375: void* before_heap = (void*)(heap_start - epsilon); stefank@3335: assert(!heap->is_in(before_heap), stefank@3335: err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap)); stefank@3335: stefank@3335: // Test that a pointer to after the heap end is reported as outside the heap. stefank@3375: assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity"); stefank@3375: void* after_heap = (void*)(heap_end + epsilon); stefank@3335: assert(!heap->is_in(after_heap), stefank@3335: err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap)); stefank@3335: } stefank@3335: #endif