Wed, 26 Mar 2014 14:15:02 +0100
8035667: EventMetaspaceSummary doesn't report committed Metaspace memory
Reviewed-by: jmasa, stefank
1 /*
2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/shared/gcHeapSummary.hpp"
28 #include "gc_implementation/shared/gcTrace.hpp"
29 #include "gc_implementation/shared/gcTraceTime.hpp"
30 #include "gc_implementation/shared/gcWhen.hpp"
31 #include "gc_implementation/shared/vmGCOperations.hpp"
32 #include "gc_interface/allocTracer.hpp"
33 #include "gc_interface/collectedHeap.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "memory/metaspace.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/instanceMirrorKlass.hpp"
38 #include "runtime/init.hpp"
39 #include "runtime/thread.inline.hpp"
40 #include "services/heapDumper.hpp"
43 #ifdef ASSERT
44 int CollectedHeap::_fire_out_of_memory_count = 0;
45 #endif
47 size_t CollectedHeap::_filler_array_max_size = 0;
49 template <>
50 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
51 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
52 st->print_raw(m);
53 }
55 void GCHeapLog::log_heap(bool before) {
56 if (!should_log()) {
57 return;
58 }
60 double timestamp = fetch_timestamp();
61 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
62 int index = compute_log_index();
63 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
64 _records[index].timestamp = timestamp;
65 _records[index].data.is_before = before;
66 stringStream st(_records[index].data.buffer(), _records[index].data.size());
67 if (before) {
68 Universe::print_heap_before_gc(&st, true);
69 } else {
70 Universe::print_heap_after_gc(&st, true);
71 }
72 }
74 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
75 size_t capacity_in_words = capacity() / HeapWordSize;
77 return VirtualSpaceSummary(
78 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
79 }
81 GCHeapSummary CollectedHeap::create_heap_summary() {
82 VirtualSpaceSummary heap_space = create_heap_space_summary();
83 return GCHeapSummary(heap_space, used());
84 }
86 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
87 const MetaspaceSizes meta_space(
88 MetaspaceAux::committed_bytes(),
89 MetaspaceAux::allocated_used_bytes(),
90 MetaspaceAux::reserved_bytes());
91 const MetaspaceSizes data_space(
92 MetaspaceAux::committed_bytes(Metaspace::NonClassType),
93 MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType),
94 MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
95 const MetaspaceSizes class_space(
96 MetaspaceAux::committed_bytes(Metaspace::ClassType),
97 MetaspaceAux::allocated_used_bytes(Metaspace::ClassType),
98 MetaspaceAux::reserved_bytes(Metaspace::ClassType));
100 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
101 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
102 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
103 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
105 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
106 ms_chunk_free_list_summary, class_chunk_free_list_summary);
107 }
109 void CollectedHeap::print_heap_before_gc() {
110 if (PrintHeapAtGC) {
111 Universe::print_heap_before_gc();
112 }
113 if (_gc_heap_log != NULL) {
114 _gc_heap_log->log_heap_before();
115 }
116 }
118 void CollectedHeap::print_heap_after_gc() {
119 if (PrintHeapAtGC) {
120 Universe::print_heap_after_gc();
121 }
122 if (_gc_heap_log != NULL) {
123 _gc_heap_log->log_heap_after();
124 }
125 }
127 void CollectedHeap::register_nmethod(nmethod* nm) {
128 assert_locked_or_safepoint(CodeCache_lock);
129 }
131 void CollectedHeap::unregister_nmethod(nmethod* nm) {
132 assert_locked_or_safepoint(CodeCache_lock);
133 }
135 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
136 const GCHeapSummary& heap_summary = create_heap_summary();
137 gc_tracer->report_gc_heap_summary(when, heap_summary);
139 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
140 gc_tracer->report_metaspace_summary(when, metaspace_summary);
141 }
143 void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
144 trace_heap(GCWhen::BeforeGC, gc_tracer);
145 }
147 void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
148 trace_heap(GCWhen::AfterGC, gc_tracer);
149 }
151 // Memory state functions.
154 CollectedHeap::CollectedHeap() : _n_par_threads(0)
155 {
156 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
157 const size_t elements_per_word = HeapWordSize / sizeof(jint);
158 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
159 max_len / elements_per_word);
161 _barrier_set = NULL;
162 _is_gc_active = false;
163 _total_collections = _total_full_collections = 0;
164 _gc_cause = _gc_lastcause = GCCause::_no_gc;
165 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
166 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
168 if (UsePerfData) {
169 EXCEPTION_MARK;
171 // create the gc cause jvmstat counters
172 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
173 80, GCCause::to_string(_gc_cause), CHECK);
175 _perf_gc_lastcause =
176 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
177 80, GCCause::to_string(_gc_lastcause), CHECK);
178 }
179 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
180 // Create the ring log
181 if (LogEvents) {
182 _gc_heap_log = new GCHeapLog();
183 } else {
184 _gc_heap_log = NULL;
185 }
186 }
188 // This interface assumes that it's being called by the
189 // vm thread. It collects the heap assuming that the
190 // heap lock is already held and that we are executing in
191 // the context of the vm thread.
192 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
193 assert(Thread::current()->is_VM_thread(), "Precondition#1");
194 assert(Heap_lock->is_locked(), "Precondition#2");
195 GCCauseSetter gcs(this, cause);
196 switch (cause) {
197 case GCCause::_heap_inspection:
198 case GCCause::_heap_dump:
199 case GCCause::_metadata_GC_threshold : {
200 HandleMark hm;
201 do_full_collection(false); // don't clear all soft refs
202 break;
203 }
204 case GCCause::_last_ditch_collection: {
205 HandleMark hm;
206 do_full_collection(true); // do clear all soft refs
207 break;
208 }
209 default:
210 ShouldNotReachHere(); // Unexpected use of this function
211 }
212 }
214 void CollectedHeap::pre_initialize() {
215 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
216 // otherwise remains unused.
217 #ifdef COMPILER2
218 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
219 && (DeferInitialCardMark || card_mark_must_follow_store());
220 #else
221 assert(_defer_initial_card_mark == false, "Who would set it?");
222 #endif
223 }
225 #ifndef PRODUCT
226 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
227 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
228 for (size_t slot = 0; slot < size; slot += 1) {
229 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
230 "Found badHeapWordValue in post-allocation check");
231 }
232 }
233 }
235 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
236 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
237 for (size_t slot = 0; slot < size; slot += 1) {
238 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
239 "Found non badHeapWordValue in pre-allocation check");
240 }
241 }
242 }
243 #endif // PRODUCT
245 #ifdef ASSERT
246 void CollectedHeap::check_for_valid_allocation_state() {
247 Thread *thread = Thread::current();
248 // How to choose between a pending exception and a potential
249 // OutOfMemoryError? Don't allow pending exceptions.
250 // This is a VM policy failure, so how do we exhaustively test it?
251 assert(!thread->has_pending_exception(),
252 "shouldn't be allocating with pending exception");
253 if (StrictSafepointChecks) {
254 assert(thread->allow_allocation(),
255 "Allocation done by thread for which allocation is blocked "
256 "by No_Allocation_Verifier!");
257 // Allocation of an oop can always invoke a safepoint,
258 // hence, the true argument
259 thread->check_for_valid_safepoint_state(true);
260 }
261 }
262 #endif
264 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
266 // Retain tlab and allocate object in shared space if
267 // the amount free in the tlab is too large to discard.
268 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
269 thread->tlab().record_slow_allocation(size);
270 return NULL;
271 }
273 // Discard tlab and allocate a new one.
274 // To minimize fragmentation, the last TLAB may be smaller than the rest.
275 size_t new_tlab_size = thread->tlab().compute_size(size);
277 thread->tlab().clear_before_allocation();
279 if (new_tlab_size == 0) {
280 return NULL;
281 }
283 // Allocate a new TLAB...
284 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
285 if (obj == NULL) {
286 return NULL;
287 }
289 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
291 if (ZeroTLAB) {
292 // ..and clear it.
293 Copy::zero_to_words(obj, new_tlab_size);
294 } else {
295 // ...and zap just allocated object.
296 #ifdef ASSERT
297 // Skip mangling the space corresponding to the object header to
298 // ensure that the returned space is not considered parsable by
299 // any concurrent GC thread.
300 size_t hdr_size = oopDesc::header_size();
301 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
302 #endif // ASSERT
303 }
304 thread->tlab().fill(obj, obj + size, new_tlab_size);
305 return obj;
306 }
308 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
309 MemRegion deferred = thread->deferred_card_mark();
310 if (!deferred.is_empty()) {
311 assert(_defer_initial_card_mark, "Otherwise should be empty");
312 {
313 // Verify that the storage points to a parsable object in heap
314 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
315 assert(is_in(old_obj), "Not in allocated heap");
316 assert(!can_elide_initializing_store_barrier(old_obj),
317 "Else should have been filtered in new_store_pre_barrier()");
318 assert(old_obj->is_oop(true), "Not an oop");
319 assert(deferred.word_size() == (size_t)(old_obj->size()),
320 "Mismatch: multiple objects?");
321 }
322 BarrierSet* bs = barrier_set();
323 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
324 bs->write_region(deferred);
325 // "Clear" the deferred_card_mark field
326 thread->set_deferred_card_mark(MemRegion());
327 }
328 assert(thread->deferred_card_mark().is_empty(), "invariant");
329 }
331 size_t CollectedHeap::max_tlab_size() const {
332 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
333 // This restriction could be removed by enabling filling with multiple arrays.
334 // If we compute that the reasonable way as
335 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
336 // we'll overflow on the multiply, so we do the divide first.
337 // We actually lose a little by dividing first,
338 // but that just makes the TLAB somewhat smaller than the biggest array,
339 // which is fine, since we'll be able to fill that.
340 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
341 sizeof(jint) *
342 ((juint) max_jint / (size_t) HeapWordSize);
343 return align_size_down(max_int_size, MinObjAlignment);
344 }
346 // Helper for ReduceInitialCardMarks. For performance,
347 // compiled code may elide card-marks for initializing stores
348 // to a newly allocated object along the fast-path. We
349 // compensate for such elided card-marks as follows:
350 // (a) Generational, non-concurrent collectors, such as
351 // GenCollectedHeap(ParNew,DefNew,Tenured) and
352 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
353 // need the card-mark if and only if the region is
354 // in the old gen, and do not care if the card-mark
355 // succeeds or precedes the initializing stores themselves,
356 // so long as the card-mark is completed before the next
357 // scavenge. For all these cases, we can do a card mark
358 // at the point at which we do a slow path allocation
359 // in the old gen, i.e. in this call.
360 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
361 // in addition that the card-mark for an old gen allocated
362 // object strictly follow any associated initializing stores.
363 // In these cases, the memRegion remembered below is
364 // used to card-mark the entire region either just before the next
365 // slow-path allocation by this thread or just before the next scavenge or
366 // CMS-associated safepoint, whichever of these events happens first.
367 // (The implicit assumption is that the object has been fully
368 // initialized by this point, a fact that we assert when doing the
369 // card-mark.)
370 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
371 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
372 // is used to remember the pre-value of any store. Initializing
373 // stores will not need this barrier, so we need not worry about
374 // compensating for the missing pre-barrier here. Turning now
375 // to the post-barrier, we note that G1 needs a RS update barrier
376 // which simply enqueues a (sequence of) dirty cards which may
377 // optionally be refined by the concurrent update threads. Note
378 // that this barrier need only be applied to a non-young write,
379 // but, like in CMS, because of the presence of concurrent refinement
380 // (much like CMS' precleaning), must strictly follow the oop-store.
381 // Thus, using the same protocol for maintaining the intended
382 // invariants turns out, serendepitously, to be the same for both
383 // G1 and CMS.
384 //
385 // For any future collector, this code should be reexamined with
386 // that specific collector in mind, and the documentation above suitably
387 // extended and updated.
388 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
389 // If a previous card-mark was deferred, flush it now.
390 flush_deferred_store_barrier(thread);
391 if (can_elide_initializing_store_barrier(new_obj)) {
392 // The deferred_card_mark region should be empty
393 // following the flush above.
394 assert(thread->deferred_card_mark().is_empty(), "Error");
395 } else {
396 MemRegion mr((HeapWord*)new_obj, new_obj->size());
397 assert(!mr.is_empty(), "Error");
398 if (_defer_initial_card_mark) {
399 // Defer the card mark
400 thread->set_deferred_card_mark(mr);
401 } else {
402 // Do the card mark
403 BarrierSet* bs = barrier_set();
404 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
405 bs->write_region(mr);
406 }
407 }
408 return new_obj;
409 }
411 size_t CollectedHeap::filler_array_hdr_size() {
412 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
413 }
415 size_t CollectedHeap::filler_array_min_size() {
416 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
417 }
419 #ifdef ASSERT
420 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
421 {
422 assert(words >= min_fill_size(), "too small to fill");
423 assert(words % MinObjAlignment == 0, "unaligned size");
424 assert(Universe::heap()->is_in_reserved(start), "not in heap");
425 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
426 }
428 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
429 {
430 if (ZapFillerObjects && zap) {
431 Copy::fill_to_words(start + filler_array_hdr_size(),
432 words - filler_array_hdr_size(), 0XDEAFBABE);
433 }
434 }
435 #endif // ASSERT
437 void
438 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
439 {
440 assert(words >= filler_array_min_size(), "too small for an array");
441 assert(words <= filler_array_max_size(), "too big for a single object");
443 const size_t payload_size = words - filler_array_hdr_size();
444 const size_t len = payload_size * HeapWordSize / sizeof(jint);
445 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
447 // Set the length first for concurrent GC.
448 ((arrayOop)start)->set_length((int)len);
449 post_allocation_setup_common(Universe::intArrayKlassObj(), start);
450 DEBUG_ONLY(zap_filler_array(start, words, zap);)
451 }
453 void
454 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
455 {
456 assert(words <= filler_array_max_size(), "too big for a single object");
458 if (words >= filler_array_min_size()) {
459 fill_with_array(start, words, zap);
460 } else if (words > 0) {
461 assert(words == min_fill_size(), "unaligned size");
462 post_allocation_setup_common(SystemDictionary::Object_klass(), start);
463 }
464 }
466 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
467 {
468 DEBUG_ONLY(fill_args_check(start, words);)
469 HandleMark hm; // Free handles before leaving.
470 fill_with_object_impl(start, words, zap);
471 }
473 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
474 {
475 DEBUG_ONLY(fill_args_check(start, words);)
476 HandleMark hm; // Free handles before leaving.
478 #ifdef _LP64
479 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
480 // First fill with arrays, ensuring that any remaining space is big enough to
481 // fill. The remainder is filled with a single object.
482 const size_t min = min_fill_size();
483 const size_t max = filler_array_max_size();
484 while (words > max) {
485 const size_t cur = words - max >= min ? max : max - min;
486 fill_with_array(start, cur, zap);
487 start += cur;
488 words -= cur;
489 }
490 #endif
492 fill_with_object_impl(start, words, zap);
493 }
495 void CollectedHeap::post_initialize() {
496 collector_policy()->post_heap_initialize();
497 }
499 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
500 guarantee(false, "thread-local allocation buffers not supported");
501 return NULL;
502 }
504 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
505 // The second disjunct in the assertion below makes a concession
506 // for the start-up verification done while the VM is being
507 // created. Callers be careful that you know that mutators
508 // aren't going to interfere -- for instance, this is permissible
509 // if we are still single-threaded and have either not yet
510 // started allocating (nothing much to verify) or we have
511 // started allocating but are now a full-fledged JavaThread
512 // (and have thus made our TLAB's) available for filling.
513 assert(SafepointSynchronize::is_at_safepoint() ||
514 !is_init_completed(),
515 "Should only be called at a safepoint or at start-up"
516 " otherwise concurrent mutator activity may make heap "
517 " unparsable again");
518 const bool use_tlab = UseTLAB;
519 const bool deferred = _defer_initial_card_mark;
520 // The main thread starts allocating via a TLAB even before it
521 // has added itself to the threads list at vm boot-up.
522 assert(!use_tlab || Threads::first() != NULL,
523 "Attempt to fill tlabs before main thread has been added"
524 " to threads list is doomed to failure!");
525 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
526 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
527 #ifdef COMPILER2
528 // The deferred store barriers must all have been flushed to the
529 // card-table (or other remembered set structure) before GC starts
530 // processing the card-table (or other remembered set).
531 if (deferred) flush_deferred_store_barrier(thread);
532 #else
533 assert(!deferred, "Should be false");
534 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
535 #endif
536 }
537 }
539 void CollectedHeap::accumulate_statistics_all_tlabs() {
540 if (UseTLAB) {
541 assert(SafepointSynchronize::is_at_safepoint() ||
542 !is_init_completed(),
543 "should only accumulate statistics on tlabs at safepoint");
545 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
546 }
547 }
549 void CollectedHeap::resize_all_tlabs() {
550 if (UseTLAB) {
551 assert(SafepointSynchronize::is_at_safepoint() ||
552 !is_init_completed(),
553 "should only resize tlabs at safepoint");
555 ThreadLocalAllocBuffer::resize_all_tlabs();
556 }
557 }
559 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
560 if (HeapDumpBeforeFullGC) {
561 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
562 // We are doing a "major" collection and a heap dump before
563 // major collection has been requested.
564 HeapDumper::dump_heap();
565 }
566 if (PrintClassHistogramBeforeFullGC) {
567 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
568 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
569 inspector.doit();
570 }
571 }
573 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
574 if (HeapDumpAfterFullGC) {
575 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
576 HeapDumper::dump_heap();
577 }
578 if (PrintClassHistogramAfterFullGC) {
579 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
580 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
581 inspector.doit();
582 }
583 }
585 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
586 debug_only(check_for_valid_allocation_state());
587 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
588 assert(size >= 0, "int won't convert to size_t");
589 HeapWord* obj;
590 assert(ScavengeRootsInCode > 0, "must be");
591 obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
592 post_allocation_setup_common(klass, obj);
593 assert(Universe::is_bootstrapping() ||
594 !((oop)obj)->is_array(), "must not be an array");
595 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
596 oop mirror = (oop)obj;
598 java_lang_Class::set_oop_size(mirror, size);
600 // Setup indirections
601 if (!real_klass.is_null()) {
602 java_lang_Class::set_klass(mirror, real_klass());
603 real_klass->set_java_mirror(mirror);
604 }
606 InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
607 assert(size == mk->instance_size(real_klass), "should have been set");
609 // notify jvmti and dtrace
610 post_allocation_notify(klass, (oop)obj);
612 return mirror;
613 }
615 /////////////// Unit tests ///////////////
617 #ifndef PRODUCT
618 void CollectedHeap::test_is_in() {
619 CollectedHeap* heap = Universe::heap();
621 uintptr_t epsilon = (uintptr_t) MinObjAlignment;
622 uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
623 uintptr_t heap_end = (uintptr_t) heap->_reserved.end();
625 // Test that NULL is not in the heap.
626 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
628 // Test that a pointer to before the heap start is reported as outside the heap.
629 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
630 void* before_heap = (void*)(heap_start - epsilon);
631 assert(!heap->is_in(before_heap),
632 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
634 // Test that a pointer to after the heap end is reported as outside the heap.
635 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
636 void* after_heap = (void*)(heap_end + epsilon);
637 assert(!heap->is_in(after_heap),
638 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
639 }
640 #endif