Fri, 21 Mar 2014 10:16:35 +0100
8036696: Add metaspace gc threshold to metaspace summary trace event
Reviewed-by: jmasa, stefank, mgerdin
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/shared/gcHeapSummary.hpp"
28 #include "gc_implementation/shared/gcTrace.hpp"
29 #include "gc_implementation/shared/gcTraceTime.hpp"
30 #include "gc_implementation/shared/gcWhen.hpp"
31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 #include "gc_interface/allocTracer.hpp"
33 #include "gc_interface/collectedHeap.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "memory/metaspace.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/instanceMirrorKlass.hpp"
38 #include "runtime/init.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "services/heapDumper.hpp"
43 #ifdef ASSERT
44 int CollectedHeap::_fire_out_of_memory_count = 0;
45 #endif
47 size_t CollectedHeap::_filler_array_max_size = 0;
49 template <>
50 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
51 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
52 st->print_raw(m);
53 }
55 void GCHeapLog::log_heap(bool before) {
56 if (!should_log()) {
57 return;
58 }
60 double timestamp = fetch_timestamp();
61 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
62 int index = compute_log_index();
63 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
64 _records[index].timestamp = timestamp;
65 _records[index].data.is_before = before;
66 stringStream st(_records[index].data.buffer(), _records[index].data.size());
67 if (before) {
68 Universe::print_heap_before_gc(&st, true);
69 } else {
70 Universe::print_heap_after_gc(&st, true);
71 }
72 }
74 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
75 size_t capacity_in_words = capacity() / HeapWordSize;
77 return VirtualSpaceSummary(
78 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
79 }
81 GCHeapSummary CollectedHeap::create_heap_summary() {
82 VirtualSpaceSummary heap_space = create_heap_space_summary();
83 return GCHeapSummary(heap_space, used());
84 }
86 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
87 const MetaspaceSizes meta_space(
88 MetaspaceAux::allocated_capacity_bytes(),
89 MetaspaceAux::allocated_used_bytes(),
90 MetaspaceAux::reserved_bytes());
91 const MetaspaceSizes data_space(
92 MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType),
93 MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
94 MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
95 const MetaspaceSizes class_space(
96 MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType),
97 MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
98 MetaspaceAux::reserved_bytes(Metaspace::ClassType));
100 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space);
101 }
103 void CollectedHeap::print_heap_before_gc() {
104 if (PrintHeapAtGC) {
105 Universe::print_heap_before_gc();
106 }
107 if (_gc_heap_log != NULL) {
108 _gc_heap_log->log_heap_before();
109 }
110 }
112 void CollectedHeap::print_heap_after_gc() {
113 if (PrintHeapAtGC) {
114 Universe::print_heap_after_gc();
115 }
116 if (_gc_heap_log != NULL) {
117 _gc_heap_log->log_heap_after();
118 }
119 }
121 void CollectedHeap::register_nmethod(nmethod* nm) {
122 assert_locked_or_safepoint(CodeCache_lock);
123 }
125 void CollectedHeap::unregister_nmethod(nmethod* nm) {
126 assert_locked_or_safepoint(CodeCache_lock);
127 }
129 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
130 const GCHeapSummary& heap_summary = create_heap_summary();
131 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
132 gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
133 }
135 void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
136 trace_heap(GCWhen::BeforeGC, gc_tracer);
137 }
139 void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
140 trace_heap(GCWhen::AfterGC, gc_tracer);
141 }
143 // Memory state functions.
146 CollectedHeap::CollectedHeap() : _n_par_threads(0)
147 {
148 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
149 const size_t elements_per_word = HeapWordSize / sizeof(jint);
150 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
151 max_len / elements_per_word);
153 _barrier_set = NULL;
154 _is_gc_active = false;
155 _total_collections = _total_full_collections = 0;
156 _gc_cause = _gc_lastcause = GCCause::_no_gc;
157 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
158 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
160 if (UsePerfData) {
161 EXCEPTION_MARK;
163 // create the gc cause jvmstat counters
164 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
165 80, GCCause::to_string(_gc_cause), CHECK);
167 _perf_gc_lastcause =
168 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
169 80, GCCause::to_string(_gc_lastcause), CHECK);
170 }
171 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
172 // Create the ring log
173 if (LogEvents) {
174 _gc_heap_log = new GCHeapLog();
175 } else {
176 _gc_heap_log = NULL;
177 }
178 }
180 // This interface assumes that it's being called by the
181 // vm thread. It collects the heap assuming that the
182 // heap lock is already held and that we are executing in
183 // the context of the vm thread.
184 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
185 assert(Thread::current()->is_VM_thread(), "Precondition#1");
186 assert(Heap_lock->is_locked(), "Precondition#2");
187 GCCauseSetter gcs(this, cause);
188 switch (cause) {
189 case GCCause::_heap_inspection:
190 case GCCause::_heap_dump:
191 case GCCause::_metadata_GC_threshold : {
192 HandleMark hm;
193 do_full_collection(false); // don't clear all soft refs
194 break;
195 }
196 case GCCause::_last_ditch_collection: {
197 HandleMark hm;
198 do_full_collection(true); // do clear all soft refs
199 break;
200 }
201 default:
202 ShouldNotReachHere(); // Unexpected use of this function
203 }
204 }
206 void CollectedHeap::pre_initialize() {
207 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
208 // otherwise remains unused.
209 #ifdef COMPILER2
210 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
211 && (DeferInitialCardMark || card_mark_must_follow_store());
212 #else
213 assert(_defer_initial_card_mark == false, "Who would set it?");
214 #endif
215 }
217 #ifndef PRODUCT
218 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
219 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
220 for (size_t slot = 0; slot < size; slot += 1) {
221 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
222 "Found badHeapWordValue in post-allocation check");
223 }
224 }
225 }
227 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
228 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
229 for (size_t slot = 0; slot < size; slot += 1) {
230 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
231 "Found non badHeapWordValue in pre-allocation check");
232 }
233 }
234 }
235 #endif // PRODUCT
237 #ifdef ASSERT
238 void CollectedHeap::check_for_valid_allocation_state() {
239 Thread *thread = Thread::current();
240 // How to choose between a pending exception and a potential
241 // OutOfMemoryError? Don't allow pending exceptions.
242 // This is a VM policy failure, so how do we exhaustively test it?
243 assert(!thread->has_pending_exception(),
244 "shouldn't be allocating with pending exception");
245 if (StrictSafepointChecks) {
246 assert(thread->allow_allocation(),
247 "Allocation done by thread for which allocation is blocked "
248 "by No_Allocation_Verifier!");
249 // Allocation of an oop can always invoke a safepoint,
250 // hence, the true argument
251 thread->check_for_valid_safepoint_state(true);
252 }
253 }
254 #endif
256 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
258 // Retain tlab and allocate object in shared space if
259 // the amount free in the tlab is too large to discard.
260 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
261 thread->tlab().record_slow_allocation(size);
262 return NULL;
263 }
265 // Discard tlab and allocate a new one.
266 // To minimize fragmentation, the last TLAB may be smaller than the rest.
267 size_t new_tlab_size = thread->tlab().compute_size(size);
269 thread->tlab().clear_before_allocation();
271 if (new_tlab_size == 0) {
272 return NULL;
273 }
275 // Allocate a new TLAB...
276 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
277 if (obj == NULL) {
278 return NULL;
279 }
281 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
283 if (ZeroTLAB) {
284 // ..and clear it.
285 Copy::zero_to_words(obj, new_tlab_size);
286 } else {
287 // ...and zap just allocated object.
288 #ifdef ASSERT
289 // Skip mangling the space corresponding to the object header to
290 // ensure that the returned space is not considered parsable by
291 // any concurrent GC thread.
292 size_t hdr_size = oopDesc::header_size();
293 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
294 #endif // ASSERT
295 }
296 thread->tlab().fill(obj, obj + size, new_tlab_size);
297 return obj;
298 }
300 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
301 MemRegion deferred = thread->deferred_card_mark();
302 if (!deferred.is_empty()) {
303 assert(_defer_initial_card_mark, "Otherwise should be empty");
304 {
305 // Verify that the storage points to a parsable object in heap
306 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
307 assert(is_in(old_obj), "Not in allocated heap");
308 assert(!can_elide_initializing_store_barrier(old_obj),
309 "Else should have been filtered in new_store_pre_barrier()");
310 assert(old_obj->is_oop(true), "Not an oop");
311 assert(deferred.word_size() == (size_t)(old_obj->size()),
312 "Mismatch: multiple objects?");
313 }
314 BarrierSet* bs = barrier_set();
315 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
316 bs->write_region(deferred);
317 // "Clear" the deferred_card_mark field
318 thread->set_deferred_card_mark(MemRegion());
319 }
320 assert(thread->deferred_card_mark().is_empty(), "invariant");
321 }
323 size_t CollectedHeap::max_tlab_size() const {
324 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
325 // This restriction could be removed by enabling filling with multiple arrays.
326 // If we compute that the reasonable way as
327 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
328 // we'll overflow on the multiply, so we do the divide first.
329 // We actually lose a little by dividing first,
330 // but that just makes the TLAB somewhat smaller than the biggest array,
331 // which is fine, since we'll be able to fill that.
332 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
333 sizeof(jint) *
334 ((juint) max_jint / (size_t) HeapWordSize);
335 return align_size_down(max_int_size, MinObjAlignment);
336 }
338 // Helper for ReduceInitialCardMarks. For performance,
339 // compiled code may elide card-marks for initializing stores
340 // to a newly allocated object along the fast-path. We
341 // compensate for such elided card-marks as follows:
342 // (a) Generational, non-concurrent collectors, such as
343 // GenCollectedHeap(ParNew,DefNew,Tenured) and
344 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
345 // need the card-mark if and only if the region is
346 // in the old gen, and do not care if the card-mark
347 // succeeds or precedes the initializing stores themselves,
348 // so long as the card-mark is completed before the next
349 // scavenge. For all these cases, we can do a card mark
350 // at the point at which we do a slow path allocation
351 // in the old gen, i.e. in this call.
352 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
353 // in addition that the card-mark for an old gen allocated
354 // object strictly follow any associated initializing stores.
355 // In these cases, the memRegion remembered below is
356 // used to card-mark the entire region either just before the next
357 // slow-path allocation by this thread or just before the next scavenge or
358 // CMS-associated safepoint, whichever of these events happens first.
359 // (The implicit assumption is that the object has been fully
360 // initialized by this point, a fact that we assert when doing the
361 // card-mark.)
362 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
363 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
364 // is used to remember the pre-value of any store. Initializing
365 // stores will not need this barrier, so we need not worry about
366 // compensating for the missing pre-barrier here. Turning now
367 // to the post-barrier, we note that G1 needs a RS update barrier
368 // which simply enqueues a (sequence of) dirty cards which may
369 // optionally be refined by the concurrent update threads. Note
370 // that this barrier need only be applied to a non-young write,
371 // but, like in CMS, because of the presence of concurrent refinement
372 // (much like CMS' precleaning), must strictly follow the oop-store.
373 // Thus, using the same protocol for maintaining the intended
374 // invariants turns out, serendepitously, to be the same for both
375 // G1 and CMS.
376 //
377 // For any future collector, this code should be reexamined with
378 // that specific collector in mind, and the documentation above suitably
379 // extended and updated.
380 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
381 // If a previous card-mark was deferred, flush it now.
382 flush_deferred_store_barrier(thread);
383 if (can_elide_initializing_store_barrier(new_obj)) {
384 // The deferred_card_mark region should be empty
385 // following the flush above.
386 assert(thread->deferred_card_mark().is_empty(), "Error");
387 } else {
388 MemRegion mr((HeapWord*)new_obj, new_obj->size());
389 assert(!mr.is_empty(), "Error");
390 if (_defer_initial_card_mark) {
391 // Defer the card mark
392 thread->set_deferred_card_mark(mr);
393 } else {
394 // Do the card mark
395 BarrierSet* bs = barrier_set();
396 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
397 bs->write_region(mr);
398 }
399 }
400 return new_obj;
401 }
403 size_t CollectedHeap::filler_array_hdr_size() {
404 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
405 }
407 size_t CollectedHeap::filler_array_min_size() {
408 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
409 }
411 #ifdef ASSERT
412 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
413 {
414 assert(words >= min_fill_size(), "too small to fill");
415 assert(words % MinObjAlignment == 0, "unaligned size");
416 assert(Universe::heap()->is_in_reserved(start), "not in heap");
417 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
418 }
420 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
421 {
422 if (ZapFillerObjects && zap) {
423 Copy::fill_to_words(start + filler_array_hdr_size(),
424 words - filler_array_hdr_size(), 0XDEAFBABE);
425 }
426 }
427 #endif // ASSERT
429 void
430 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
431 {
432 assert(words >= filler_array_min_size(), "too small for an array");
433 assert(words <= filler_array_max_size(), "too big for a single object");
435 const size_t payload_size = words - filler_array_hdr_size();
436 const size_t len = payload_size * HeapWordSize / sizeof(jint);
437 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
439 // Set the length first for concurrent GC.
440 ((arrayOop)start)->set_length((int)len);
441 post_allocation_setup_common(Universe::intArrayKlassObj(), start);
442 DEBUG_ONLY(zap_filler_array(start, words, zap);)
443 }
445 void
446 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
447 {
448 assert(words <= filler_array_max_size(), "too big for a single object");
450 if (words >= filler_array_min_size()) {
451 fill_with_array(start, words, zap);
452 } else if (words > 0) {
453 assert(words == min_fill_size(), "unaligned size");
454 post_allocation_setup_common(SystemDictionary::Object_klass(), start);
455 }
456 }
458 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
459 {
460 DEBUG_ONLY(fill_args_check(start, words);)
461 HandleMark hm; // Free handles before leaving.
462 fill_with_object_impl(start, words, zap);
463 }
465 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
466 {
467 DEBUG_ONLY(fill_args_check(start, words);)
468 HandleMark hm; // Free handles before leaving.
470 #ifdef _LP64
471 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
472 // First fill with arrays, ensuring that any remaining space is big enough to
473 // fill. The remainder is filled with a single object.
474 const size_t min = min_fill_size();
475 const size_t max = filler_array_max_size();
476 while (words > max) {
477 const size_t cur = words - max >= min ? max : max - min;
478 fill_with_array(start, cur, zap);
479 start += cur;
480 words -= cur;
481 }
482 #endif
484 fill_with_object_impl(start, words, zap);
485 }
487 void CollectedHeap::post_initialize() {
488 collector_policy()->post_heap_initialize();
489 }
491 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
492 guarantee(false, "thread-local allocation buffers not supported");
493 return NULL;
494 }
496 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
497 // The second disjunct in the assertion below makes a concession
498 // for the start-up verification done while the VM is being
499 // created. Callers be careful that you know that mutators
500 // aren't going to interfere -- for instance, this is permissible
501 // if we are still single-threaded and have either not yet
502 // started allocating (nothing much to verify) or we have
503 // started allocating but are now a full-fledged JavaThread
504 // (and have thus made our TLAB's) available for filling.
505 assert(SafepointSynchronize::is_at_safepoint() ||
506 !is_init_completed(),
507 "Should only be called at a safepoint or at start-up"
508 " otherwise concurrent mutator activity may make heap "
509 " unparsable again");
510 const bool use_tlab = UseTLAB;
511 const bool deferred = _defer_initial_card_mark;
512 // The main thread starts allocating via a TLAB even before it
513 // has added itself to the threads list at vm boot-up.
514 assert(!use_tlab || Threads::first() != NULL,
515 "Attempt to fill tlabs before main thread has been added"
516 " to threads list is doomed to failure!");
517 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
518 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
519 #ifdef COMPILER2
520 // The deferred store barriers must all have been flushed to the
521 // card-table (or other remembered set structure) before GC starts
522 // processing the card-table (or other remembered set).
523 if (deferred) flush_deferred_store_barrier(thread);
524 #else
525 assert(!deferred, "Should be false");
526 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
527 #endif
528 }
529 }
531 void CollectedHeap::accumulate_statistics_all_tlabs() {
532 if (UseTLAB) {
533 assert(SafepointSynchronize::is_at_safepoint() ||
534 !is_init_completed(),
535 "should only accumulate statistics on tlabs at safepoint");
537 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
538 }
539 }
541 void CollectedHeap::resize_all_tlabs() {
542 if (UseTLAB) {
543 assert(SafepointSynchronize::is_at_safepoint() ||
544 !is_init_completed(),
545 "should only resize tlabs at safepoint");
547 ThreadLocalAllocBuffer::resize_all_tlabs();
548 }
549 }
551 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
552 if (HeapDumpBeforeFullGC) {
553 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
554 // We are doing a "major" collection and a heap dump before
555 // major collection has been requested.
556 HeapDumper::dump_heap();
557 }
558 if (PrintClassHistogramBeforeFullGC) {
559 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
560 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
561 inspector.doit();
562 }
563 }
565 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
566 if (HeapDumpAfterFullGC) {
567 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
568 HeapDumper::dump_heap();
569 }
570 if (PrintClassHistogramAfterFullGC) {
571 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
572 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
573 inspector.doit();
574 }
575 }
577 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
578 debug_only(check_for_valid_allocation_state());
579 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
580 assert(size >= 0, "int won't convert to size_t");
581 HeapWord* obj;
582 assert(ScavengeRootsInCode > 0, "must be");
583 obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
584 post_allocation_setup_common(klass, obj);
585 assert(Universe::is_bootstrapping() ||
586 !((oop)obj)->is_array(), "must not be an array");
587 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
588 oop mirror = (oop)obj;
590 java_lang_Class::set_oop_size(mirror, size);
592 // Setup indirections
593 if (!real_klass.is_null()) {
594 java_lang_Class::set_klass(mirror, real_klass());
595 real_klass->set_java_mirror(mirror);
596 }
598 InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
599 assert(size == mk->instance_size(real_klass), "should have been set");
601 // notify jvmti and dtrace
602 post_allocation_notify(klass, (oop)obj);
604 return mirror;
605 }
607 /////////////// Unit tests ///////////////
609 #ifndef PRODUCT
610 void CollectedHeap::test_is_in() {
611 CollectedHeap* heap = Universe::heap();
613 uintptr_t epsilon = (uintptr_t) MinObjAlignment;
614 uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
615 uintptr_t heap_end = (uintptr_t) heap->_reserved.end();
617 // Test that NULL is not in the heap.
618 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
620 // Test that a pointer to before the heap start is reported as outside the heap.
621 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
622 void* before_heap = (void*)(heap_start - epsilon);
623 assert(!heap->is_in(before_heap),
624 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
626 // Test that a pointer to after the heap end is reported as outside the heap.
627 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
628 void* after_heap = (void*)(heap_end + epsilon);
629 assert(!heap->is_in(after_heap),
630 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
631 }
632 #endif