src/share/vm/gc_interface/collectedHeap.cpp

Wed, 03 Jul 2019 20:42:37 +0800

author
aoqi
date
Wed, 03 Jul 2019 20:42:37 +0800
changeset 9637
eef07cd490d4
parent 7535
7ae4e26cb1e0
child 9931
fd44df5e3bc3
permissions
-rw-r--r--

Merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #include "precompiled.hpp"
aoqi@0 26 #include "classfile/systemDictionary.hpp"
aoqi@0 27 #include "gc_implementation/shared/gcHeapSummary.hpp"
aoqi@0 28 #include "gc_implementation/shared/gcTrace.hpp"
aoqi@0 29 #include "gc_implementation/shared/gcTraceTime.hpp"
aoqi@0 30 #include "gc_implementation/shared/gcWhen.hpp"
aoqi@0 31 #include "gc_implementation/shared/vmGCOperations.hpp"
aoqi@0 32 #include "gc_interface/allocTracer.hpp"
aoqi@0 33 #include "gc_interface/collectedHeap.hpp"
aoqi@0 34 #include "gc_interface/collectedHeap.inline.hpp"
aoqi@0 35 #include "memory/metaspace.hpp"
aoqi@0 36 #include "oops/oop.inline.hpp"
aoqi@0 37 #include "oops/instanceMirrorKlass.hpp"
aoqi@0 38 #include "runtime/init.hpp"
aoqi@0 39 #include "runtime/thread.inline.hpp"
aoqi@0 40 #include "services/heapDumper.hpp"
aoqi@0 41
aoqi@0 42
aoqi@0 43 #ifdef ASSERT
aoqi@0 44 int CollectedHeap::_fire_out_of_memory_count = 0;
aoqi@0 45 #endif
aoqi@0 46
aoqi@0 47 size_t CollectedHeap::_filler_array_max_size = 0;
aoqi@0 48
aoqi@0 49 template <>
aoqi@0 50 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
aoqi@0 51 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
aoqi@0 52 st->print_raw(m);
aoqi@0 53 }
aoqi@0 54
aoqi@0 55 void GCHeapLog::log_heap(bool before) {
aoqi@0 56 if (!should_log()) {
aoqi@0 57 return;
aoqi@0 58 }
aoqi@0 59
aoqi@0 60 double timestamp = fetch_timestamp();
aoqi@0 61 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
aoqi@0 62 int index = compute_log_index();
aoqi@0 63 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
aoqi@0 64 _records[index].timestamp = timestamp;
aoqi@0 65 _records[index].data.is_before = before;
aoqi@0 66 stringStream st(_records[index].data.buffer(), _records[index].data.size());
aoqi@0 67 if (before) {
aoqi@0 68 Universe::print_heap_before_gc(&st, true);
aoqi@0 69 } else {
aoqi@0 70 Universe::print_heap_after_gc(&st, true);
aoqi@0 71 }
aoqi@0 72 }
aoqi@0 73
aoqi@0 74 VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
aoqi@0 75 size_t capacity_in_words = capacity() / HeapWordSize;
aoqi@0 76
aoqi@0 77 return VirtualSpaceSummary(
aoqi@0 78 reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
aoqi@0 79 }
aoqi@0 80
aoqi@0 81 GCHeapSummary CollectedHeap::create_heap_summary() {
aoqi@0 82 VirtualSpaceSummary heap_space = create_heap_space_summary();
aoqi@0 83 return GCHeapSummary(heap_space, used());
aoqi@0 84 }
aoqi@0 85
aoqi@0 86 MetaspaceSummary CollectedHeap::create_metaspace_summary() {
aoqi@0 87 const MetaspaceSizes meta_space(
aoqi@0 88 MetaspaceAux::committed_bytes(),
aoqi@0 89 MetaspaceAux::used_bytes(),
aoqi@0 90 MetaspaceAux::reserved_bytes());
aoqi@0 91 const MetaspaceSizes data_space(
aoqi@0 92 MetaspaceAux::committed_bytes(Metaspace::NonClassType),
aoqi@0 93 MetaspaceAux::used_bytes(Metaspace::NonClassType),
aoqi@0 94 MetaspaceAux::reserved_bytes(Metaspace::NonClassType));
aoqi@0 95 const MetaspaceSizes class_space(
aoqi@0 96 MetaspaceAux::committed_bytes(Metaspace::ClassType),
aoqi@0 97 MetaspaceAux::used_bytes(Metaspace::ClassType),
aoqi@0 98 MetaspaceAux::reserved_bytes(Metaspace::ClassType));
aoqi@0 99
aoqi@0 100 const MetaspaceChunkFreeListSummary& ms_chunk_free_list_summary =
aoqi@0 101 MetaspaceAux::chunk_free_list_summary(Metaspace::NonClassType);
aoqi@0 102 const MetaspaceChunkFreeListSummary& class_chunk_free_list_summary =
aoqi@0 103 MetaspaceAux::chunk_free_list_summary(Metaspace::ClassType);
aoqi@0 104
aoqi@0 105 return MetaspaceSummary(MetaspaceGC::capacity_until_GC(), meta_space, data_space, class_space,
aoqi@0 106 ms_chunk_free_list_summary, class_chunk_free_list_summary);
aoqi@0 107 }
aoqi@0 108
aoqi@0 109 void CollectedHeap::print_heap_before_gc() {
aoqi@0 110 if (PrintHeapAtGC) {
aoqi@0 111 Universe::print_heap_before_gc();
aoqi@0 112 }
aoqi@0 113 if (_gc_heap_log != NULL) {
aoqi@0 114 _gc_heap_log->log_heap_before();
aoqi@0 115 }
aoqi@0 116 }
aoqi@0 117
aoqi@0 118 void CollectedHeap::print_heap_after_gc() {
aoqi@0 119 if (PrintHeapAtGC) {
aoqi@0 120 Universe::print_heap_after_gc();
aoqi@0 121 }
aoqi@0 122 if (_gc_heap_log != NULL) {
aoqi@0 123 _gc_heap_log->log_heap_after();
aoqi@0 124 }
aoqi@0 125 }
aoqi@0 126
aoqi@0 127 void CollectedHeap::register_nmethod(nmethod* nm) {
aoqi@0 128 assert_locked_or_safepoint(CodeCache_lock);
aoqi@0 129 }
aoqi@0 130
aoqi@0 131 void CollectedHeap::unregister_nmethod(nmethod* nm) {
aoqi@0 132 assert_locked_or_safepoint(CodeCache_lock);
aoqi@0 133 }
aoqi@0 134
aoqi@0 135 void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
aoqi@0 136 const GCHeapSummary& heap_summary = create_heap_summary();
aoqi@0 137 gc_tracer->report_gc_heap_summary(when, heap_summary);
aoqi@0 138
aoqi@0 139 const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
aoqi@0 140 gc_tracer->report_metaspace_summary(when, metaspace_summary);
aoqi@0 141 }
aoqi@0 142
aoqi@0 143 void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
aoqi@0 144 trace_heap(GCWhen::BeforeGC, gc_tracer);
aoqi@0 145 }
aoqi@0 146
aoqi@0 147 void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
aoqi@0 148 trace_heap(GCWhen::AfterGC, gc_tracer);
aoqi@0 149 }
aoqi@0 150
aoqi@0 151 // Memory state functions.
aoqi@0 152
aoqi@0 153
aoqi@0 154 CollectedHeap::CollectedHeap() : _n_par_threads(0)
aoqi@0 155 {
aoqi@0 156 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
aoqi@0 157 const size_t elements_per_word = HeapWordSize / sizeof(jint);
aoqi@0 158 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
aoqi@0 159 max_len / elements_per_word);
aoqi@0 160
aoqi@0 161 _barrier_set = NULL;
aoqi@0 162 _is_gc_active = false;
aoqi@0 163 _total_collections = _total_full_collections = 0;
aoqi@0 164 _gc_cause = _gc_lastcause = GCCause::_no_gc;
aoqi@0 165 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
aoqi@0 166 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
aoqi@0 167
aoqi@0 168 if (UsePerfData) {
aoqi@0 169 EXCEPTION_MARK;
aoqi@0 170
aoqi@0 171 // create the gc cause jvmstat counters
aoqi@0 172 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
aoqi@0 173 80, GCCause::to_string(_gc_cause), CHECK);
aoqi@0 174
aoqi@0 175 _perf_gc_lastcause =
aoqi@0 176 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
aoqi@0 177 80, GCCause::to_string(_gc_lastcause), CHECK);
aoqi@0 178 }
aoqi@0 179 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
aoqi@0 180 // Create the ring log
aoqi@0 181 if (LogEvents) {
aoqi@0 182 _gc_heap_log = new GCHeapLog();
aoqi@0 183 } else {
aoqi@0 184 _gc_heap_log = NULL;
aoqi@0 185 }
aoqi@0 186 }
aoqi@0 187
aoqi@0 188 // This interface assumes that it's being called by the
aoqi@0 189 // vm thread. It collects the heap assuming that the
aoqi@0 190 // heap lock is already held and that we are executing in
aoqi@0 191 // the context of the vm thread.
aoqi@0 192 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
aoqi@0 193 assert(Thread::current()->is_VM_thread(), "Precondition#1");
aoqi@0 194 assert(Heap_lock->is_locked(), "Precondition#2");
aoqi@0 195 GCCauseSetter gcs(this, cause);
aoqi@0 196 switch (cause) {
aoqi@0 197 case GCCause::_heap_inspection:
aoqi@0 198 case GCCause::_heap_dump:
aoqi@0 199 case GCCause::_metadata_GC_threshold : {
aoqi@0 200 HandleMark hm;
aoqi@0 201 do_full_collection(false); // don't clear all soft refs
aoqi@0 202 break;
aoqi@0 203 }
aoqi@0 204 case GCCause::_last_ditch_collection: {
aoqi@0 205 HandleMark hm;
aoqi@0 206 do_full_collection(true); // do clear all soft refs
aoqi@0 207 break;
aoqi@0 208 }
aoqi@0 209 default:
aoqi@0 210 ShouldNotReachHere(); // Unexpected use of this function
aoqi@0 211 }
aoqi@0 212 }
aoqi@0 213
aoqi@0 214 void CollectedHeap::pre_initialize() {
aoqi@0 215 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
aoqi@0 216 // otherwise remains unused.
aoqi@0 217 #ifdef COMPILER2
aoqi@0 218 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
aoqi@0 219 && (DeferInitialCardMark || card_mark_must_follow_store());
aoqi@0 220 #else
aoqi@0 221 assert(_defer_initial_card_mark == false, "Who would set it?");
aoqi@0 222 #endif
aoqi@0 223 }
aoqi@0 224
aoqi@0 225 #ifndef PRODUCT
aoqi@0 226 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
aoqi@0 227 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
aoqi@0 228 for (size_t slot = 0; slot < size; slot += 1) {
aoqi@0 229 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
aoqi@0 230 "Found badHeapWordValue in post-allocation check");
aoqi@0 231 }
aoqi@0 232 }
aoqi@0 233 }
aoqi@0 234
aoqi@0 235 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
aoqi@0 236 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
aoqi@0 237 for (size_t slot = 0; slot < size; slot += 1) {
aoqi@0 238 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
aoqi@0 239 "Found non badHeapWordValue in pre-allocation check");
aoqi@0 240 }
aoqi@0 241 }
aoqi@0 242 }
aoqi@0 243 #endif // PRODUCT
aoqi@0 244
aoqi@0 245 #ifdef ASSERT
aoqi@0 246 void CollectedHeap::check_for_valid_allocation_state() {
aoqi@0 247 Thread *thread = Thread::current();
aoqi@0 248 // How to choose between a pending exception and a potential
aoqi@0 249 // OutOfMemoryError? Don't allow pending exceptions.
aoqi@0 250 // This is a VM policy failure, so how do we exhaustively test it?
aoqi@0 251 assert(!thread->has_pending_exception(),
aoqi@0 252 "shouldn't be allocating with pending exception");
aoqi@0 253 if (StrictSafepointChecks) {
aoqi@0 254 assert(thread->allow_allocation(),
aoqi@0 255 "Allocation done by thread for which allocation is blocked "
aoqi@0 256 "by No_Allocation_Verifier!");
aoqi@0 257 // Allocation of an oop can always invoke a safepoint,
aoqi@0 258 // hence, the true argument
aoqi@0 259 thread->check_for_valid_safepoint_state(true);
aoqi@0 260 }
aoqi@0 261 }
aoqi@0 262 #endif
aoqi@0 263
aoqi@0 264 HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
aoqi@0 265
aoqi@0 266 // Retain tlab and allocate object in shared space if
aoqi@0 267 // the amount free in the tlab is too large to discard.
aoqi@0 268 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
aoqi@0 269 thread->tlab().record_slow_allocation(size);
aoqi@0 270 return NULL;
aoqi@0 271 }
aoqi@0 272
aoqi@0 273 // Discard tlab and allocate a new one.
aoqi@0 274 // To minimize fragmentation, the last TLAB may be smaller than the rest.
aoqi@0 275 size_t new_tlab_size = thread->tlab().compute_size(size);
aoqi@0 276
aoqi@0 277 thread->tlab().clear_before_allocation();
aoqi@0 278
aoqi@0 279 if (new_tlab_size == 0) {
aoqi@0 280 return NULL;
aoqi@0 281 }
aoqi@0 282
aoqi@0 283 // Allocate a new TLAB...
aoqi@0 284 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
aoqi@0 285 if (obj == NULL) {
aoqi@0 286 return NULL;
aoqi@0 287 }
aoqi@0 288
aoqi@0 289 AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
aoqi@0 290
aoqi@0 291 if (ZeroTLAB) {
aoqi@0 292 // ..and clear it.
aoqi@0 293 Copy::zero_to_words(obj, new_tlab_size);
aoqi@0 294 } else {
aoqi@0 295 // ...and zap just allocated object.
aoqi@0 296 #ifdef ASSERT
aoqi@0 297 // Skip mangling the space corresponding to the object header to
aoqi@0 298 // ensure that the returned space is not considered parsable by
aoqi@0 299 // any concurrent GC thread.
aoqi@0 300 size_t hdr_size = oopDesc::header_size();
aoqi@0 301 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
aoqi@0 302 #endif // ASSERT
aoqi@0 303 }
aoqi@0 304 thread->tlab().fill(obj, obj + size, new_tlab_size);
aoqi@0 305 return obj;
aoqi@0 306 }
aoqi@0 307
aoqi@0 308 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
aoqi@0 309 MemRegion deferred = thread->deferred_card_mark();
aoqi@0 310 if (!deferred.is_empty()) {
aoqi@0 311 assert(_defer_initial_card_mark, "Otherwise should be empty");
aoqi@0 312 {
aoqi@0 313 // Verify that the storage points to a parsable object in heap
aoqi@0 314 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
aoqi@0 315 assert(is_in(old_obj), "Not in allocated heap");
aoqi@0 316 assert(!can_elide_initializing_store_barrier(old_obj),
aoqi@0 317 "Else should have been filtered in new_store_pre_barrier()");
aoqi@0 318 assert(old_obj->is_oop(true), "Not an oop");
aoqi@0 319 assert(deferred.word_size() == (size_t)(old_obj->size()),
aoqi@0 320 "Mismatch: multiple objects?");
aoqi@0 321 }
aoqi@0 322 BarrierSet* bs = barrier_set();
aoqi@0 323 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
aoqi@0 324 bs->write_region(deferred);
aoqi@0 325 // "Clear" the deferred_card_mark field
aoqi@0 326 thread->set_deferred_card_mark(MemRegion());
aoqi@0 327 }
aoqi@0 328 assert(thread->deferred_card_mark().is_empty(), "invariant");
aoqi@0 329 }
aoqi@0 330
aoqi@0 331 size_t CollectedHeap::max_tlab_size() const {
aoqi@0 332 // TLABs can't be bigger than we can fill with a int[Integer.MAX_VALUE].
aoqi@0 333 // This restriction could be removed by enabling filling with multiple arrays.
aoqi@0 334 // If we compute that the reasonable way as
aoqi@0 335 // header_size + ((sizeof(jint) * max_jint) / HeapWordSize)
aoqi@0 336 // we'll overflow on the multiply, so we do the divide first.
aoqi@0 337 // We actually lose a little by dividing first,
aoqi@0 338 // but that just makes the TLAB somewhat smaller than the biggest array,
aoqi@0 339 // which is fine, since we'll be able to fill that.
aoqi@0 340 size_t max_int_size = typeArrayOopDesc::header_size(T_INT) +
aoqi@0 341 sizeof(jint) *
aoqi@0 342 ((juint) max_jint / (size_t) HeapWordSize);
aoqi@0 343 return align_size_down(max_int_size, MinObjAlignment);
aoqi@0 344 }
aoqi@0 345
aoqi@0 346 // Helper for ReduceInitialCardMarks. For performance,
aoqi@0 347 // compiled code may elide card-marks for initializing stores
aoqi@0 348 // to a newly allocated object along the fast-path. We
aoqi@0 349 // compensate for such elided card-marks as follows:
aoqi@0 350 // (a) Generational, non-concurrent collectors, such as
aoqi@0 351 // GenCollectedHeap(ParNew,DefNew,Tenured) and
aoqi@0 352 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
aoqi@0 353 // need the card-mark if and only if the region is
aoqi@0 354 // in the old gen, and do not care if the card-mark
aoqi@0 355 // succeeds or precedes the initializing stores themselves,
aoqi@0 356 // so long as the card-mark is completed before the next
aoqi@0 357 // scavenge. For all these cases, we can do a card mark
aoqi@0 358 // at the point at which we do a slow path allocation
aoqi@0 359 // in the old gen, i.e. in this call.
aoqi@0 360 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
aoqi@0 361 // in addition that the card-mark for an old gen allocated
aoqi@0 362 // object strictly follow any associated initializing stores.
aoqi@0 363 // In these cases, the memRegion remembered below is
aoqi@0 364 // used to card-mark the entire region either just before the next
aoqi@0 365 // slow-path allocation by this thread or just before the next scavenge or
aoqi@0 366 // CMS-associated safepoint, whichever of these events happens first.
aoqi@0 367 // (The implicit assumption is that the object has been fully
aoqi@0 368 // initialized by this point, a fact that we assert when doing the
aoqi@0 369 // card-mark.)
aoqi@0 370 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
aoqi@0 371 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
aoqi@0 372 // is used to remember the pre-value of any store. Initializing
aoqi@0 373 // stores will not need this barrier, so we need not worry about
aoqi@0 374 // compensating for the missing pre-barrier here. Turning now
aoqi@0 375 // to the post-barrier, we note that G1 needs a RS update barrier
aoqi@0 376 // which simply enqueues a (sequence of) dirty cards which may
aoqi@0 377 // optionally be refined by the concurrent update threads. Note
aoqi@0 378 // that this barrier need only be applied to a non-young write,
aoqi@0 379 // but, like in CMS, because of the presence of concurrent refinement
aoqi@0 380 // (much like CMS' precleaning), must strictly follow the oop-store.
aoqi@0 381 // Thus, using the same protocol for maintaining the intended
aoqi@0 382 // invariants turns out, serendepitously, to be the same for both
aoqi@0 383 // G1 and CMS.
aoqi@0 384 //
aoqi@0 385 // For any future collector, this code should be reexamined with
aoqi@0 386 // that specific collector in mind, and the documentation above suitably
aoqi@0 387 // extended and updated.
aoqi@0 388 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
aoqi@0 389 // If a previous card-mark was deferred, flush it now.
aoqi@0 390 flush_deferred_store_barrier(thread);
aoqi@0 391 if (can_elide_initializing_store_barrier(new_obj)) {
aoqi@0 392 // The deferred_card_mark region should be empty
aoqi@0 393 // following the flush above.
aoqi@0 394 assert(thread->deferred_card_mark().is_empty(), "Error");
aoqi@0 395 } else {
aoqi@0 396 MemRegion mr((HeapWord*)new_obj, new_obj->size());
aoqi@0 397 assert(!mr.is_empty(), "Error");
aoqi@0 398 if (_defer_initial_card_mark) {
aoqi@0 399 // Defer the card mark
aoqi@0 400 thread->set_deferred_card_mark(mr);
aoqi@0 401 } else {
aoqi@0 402 // Do the card mark
aoqi@0 403 BarrierSet* bs = barrier_set();
aoqi@0 404 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
aoqi@0 405 bs->write_region(mr);
aoqi@0 406 }
aoqi@0 407 }
aoqi@0 408 return new_obj;
aoqi@0 409 }
aoqi@0 410
aoqi@0 411 size_t CollectedHeap::filler_array_hdr_size() {
aoqi@0 412 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
aoqi@0 413 }
aoqi@0 414
aoqi@0 415 size_t CollectedHeap::filler_array_min_size() {
aoqi@0 416 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
aoqi@0 417 }
aoqi@0 418
aoqi@0 419 #ifdef ASSERT
aoqi@0 420 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
aoqi@0 421 {
aoqi@0 422 assert(words >= min_fill_size(), "too small to fill");
aoqi@0 423 assert(words % MinObjAlignment == 0, "unaligned size");
aoqi@0 424 assert(Universe::heap()->is_in_reserved(start), "not in heap");
aoqi@0 425 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
aoqi@0 426 }
aoqi@0 427
aoqi@0 428 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
aoqi@0 429 {
aoqi@0 430 if (ZapFillerObjects && zap) {
aoqi@0 431 Copy::fill_to_words(start + filler_array_hdr_size(),
aoqi@0 432 words - filler_array_hdr_size(), 0XDEAFBABE);
aoqi@0 433 }
aoqi@0 434 }
aoqi@0 435 #endif // ASSERT
aoqi@0 436
aoqi@0 437 void
aoqi@0 438 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
aoqi@0 439 {
aoqi@0 440 assert(words >= filler_array_min_size(), "too small for an array");
aoqi@0 441 assert(words <= filler_array_max_size(), "too big for a single object");
aoqi@0 442
aoqi@0 443 const size_t payload_size = words - filler_array_hdr_size();
aoqi@0 444 const size_t len = payload_size * HeapWordSize / sizeof(jint);
aoqi@0 445 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
aoqi@0 446
aoqi@0 447 // Set the length first for concurrent GC.
aoqi@0 448 ((arrayOop)start)->set_length((int)len);
aoqi@0 449 post_allocation_setup_common(Universe::intArrayKlassObj(), start);
aoqi@0 450 DEBUG_ONLY(zap_filler_array(start, words, zap);)
aoqi@0 451 }
aoqi@0 452
aoqi@0 453 void
aoqi@0 454 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
aoqi@0 455 {
aoqi@0 456 assert(words <= filler_array_max_size(), "too big for a single object");
aoqi@0 457
aoqi@0 458 if (words >= filler_array_min_size()) {
aoqi@0 459 fill_with_array(start, words, zap);
aoqi@0 460 } else if (words > 0) {
aoqi@0 461 assert(words == min_fill_size(), "unaligned size");
aoqi@0 462 post_allocation_setup_common(SystemDictionary::Object_klass(), start);
aoqi@0 463 }
aoqi@0 464 }
aoqi@0 465
aoqi@0 466 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
aoqi@0 467 {
aoqi@0 468 DEBUG_ONLY(fill_args_check(start, words);)
aoqi@0 469 HandleMark hm; // Free handles before leaving.
aoqi@0 470 fill_with_object_impl(start, words, zap);
aoqi@0 471 }
aoqi@0 472
aoqi@0 473 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
aoqi@0 474 {
aoqi@0 475 DEBUG_ONLY(fill_args_check(start, words);)
aoqi@0 476 HandleMark hm; // Free handles before leaving.
aoqi@0 477
aoqi@0 478 #ifdef _LP64
aoqi@0 479 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
aoqi@0 480 // First fill with arrays, ensuring that any remaining space is big enough to
aoqi@0 481 // fill. The remainder is filled with a single object.
aoqi@0 482 const size_t min = min_fill_size();
aoqi@0 483 const size_t max = filler_array_max_size();
aoqi@0 484 while (words > max) {
aoqi@0 485 const size_t cur = words - max >= min ? max : max - min;
aoqi@0 486 fill_with_array(start, cur, zap);
aoqi@0 487 start += cur;
aoqi@0 488 words -= cur;
aoqi@0 489 }
aoqi@0 490 #endif
aoqi@0 491
aoqi@0 492 fill_with_object_impl(start, words, zap);
aoqi@0 493 }
aoqi@0 494
aoqi@0 495 void CollectedHeap::post_initialize() {
aoqi@0 496 collector_policy()->post_heap_initialize();
aoqi@0 497 }
aoqi@0 498
aoqi@0 499 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
aoqi@0 500 guarantee(false, "thread-local allocation buffers not supported");
aoqi@0 501 return NULL;
aoqi@0 502 }
aoqi@0 503
aoqi@0 504 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
aoqi@0 505 // The second disjunct in the assertion below makes a concession
aoqi@0 506 // for the start-up verification done while the VM is being
aoqi@0 507 // created. Callers be careful that you know that mutators
aoqi@0 508 // aren't going to interfere -- for instance, this is permissible
aoqi@0 509 // if we are still single-threaded and have either not yet
aoqi@0 510 // started allocating (nothing much to verify) or we have
aoqi@0 511 // started allocating but are now a full-fledged JavaThread
aoqi@0 512 // (and have thus made our TLAB's) available for filling.
aoqi@0 513 assert(SafepointSynchronize::is_at_safepoint() ||
aoqi@0 514 !is_init_completed(),
aoqi@0 515 "Should only be called at a safepoint or at start-up"
aoqi@0 516 " otherwise concurrent mutator activity may make heap "
aoqi@0 517 " unparsable again");
aoqi@0 518 const bool use_tlab = UseTLAB;
aoqi@0 519 const bool deferred = _defer_initial_card_mark;
aoqi@0 520 // The main thread starts allocating via a TLAB even before it
aoqi@0 521 // has added itself to the threads list at vm boot-up.
aoqi@0 522 assert(!use_tlab || Threads::first() != NULL,
aoqi@0 523 "Attempt to fill tlabs before main thread has been added"
aoqi@0 524 " to threads list is doomed to failure!");
aoqi@0 525 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
aoqi@0 526 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
aoqi@0 527 #ifdef COMPILER2
aoqi@0 528 // The deferred store barriers must all have been flushed to the
aoqi@0 529 // card-table (or other remembered set structure) before GC starts
aoqi@0 530 // processing the card-table (or other remembered set).
aoqi@0 531 if (deferred) flush_deferred_store_barrier(thread);
aoqi@0 532 #else
aoqi@0 533 assert(!deferred, "Should be false");
aoqi@0 534 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
aoqi@0 535 #endif
aoqi@0 536 }
aoqi@0 537 }
aoqi@0 538
aoqi@0 539 void CollectedHeap::accumulate_statistics_all_tlabs() {
aoqi@0 540 if (UseTLAB) {
aoqi@0 541 assert(SafepointSynchronize::is_at_safepoint() ||
aoqi@0 542 !is_init_completed(),
aoqi@0 543 "should only accumulate statistics on tlabs at safepoint");
aoqi@0 544
aoqi@0 545 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
aoqi@0 546 }
aoqi@0 547 }
aoqi@0 548
aoqi@0 549 void CollectedHeap::resize_all_tlabs() {
aoqi@0 550 if (UseTLAB) {
aoqi@0 551 assert(SafepointSynchronize::is_at_safepoint() ||
aoqi@0 552 !is_init_completed(),
aoqi@0 553 "should only resize tlabs at safepoint");
aoqi@0 554
aoqi@0 555 ThreadLocalAllocBuffer::resize_all_tlabs();
aoqi@0 556 }
aoqi@0 557 }
aoqi@0 558
aoqi@0 559 void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
aoqi@0 560 if (HeapDumpBeforeFullGC) {
brutisso@6904 561 GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer, GCId::create());
aoqi@0 562 // We are doing a "major" collection and a heap dump before
aoqi@0 563 // major collection has been requested.
aoqi@0 564 HeapDumper::dump_heap();
aoqi@0 565 }
aoqi@0 566 if (PrintClassHistogramBeforeFullGC) {
brutisso@6904 567 GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer, GCId::create());
aoqi@0 568 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
aoqi@0 569 inspector.doit();
aoqi@0 570 }
aoqi@0 571 }
aoqi@0 572
aoqi@0 573 void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
aoqi@0 574 if (HeapDumpAfterFullGC) {
brutisso@6904 575 GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer, GCId::create());
aoqi@0 576 HeapDumper::dump_heap();
aoqi@0 577 }
aoqi@0 578 if (PrintClassHistogramAfterFullGC) {
brutisso@6904 579 GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer, GCId::create());
aoqi@0 580 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
aoqi@0 581 inspector.doit();
aoqi@0 582 }
aoqi@0 583 }
aoqi@0 584
aoqi@0 585 /////////////// Unit tests ///////////////
aoqi@0 586
aoqi@0 587 #ifndef PRODUCT
aoqi@0 588 void CollectedHeap::test_is_in() {
aoqi@0 589 CollectedHeap* heap = Universe::heap();
aoqi@0 590
aoqi@0 591 uintptr_t epsilon = (uintptr_t) MinObjAlignment;
aoqi@0 592 uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
aoqi@0 593 uintptr_t heap_end = (uintptr_t) heap->_reserved.end();
aoqi@0 594
aoqi@0 595 // Test that NULL is not in the heap.
aoqi@0 596 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
aoqi@0 597
aoqi@0 598 // Test that a pointer to before the heap start is reported as outside the heap.
aoqi@0 599 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
aoqi@0 600 void* before_heap = (void*)(heap_start - epsilon);
aoqi@0 601 assert(!heap->is_in(before_heap),
aoqi@0 602 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(before_heap)));
aoqi@0 603
aoqi@0 604 // Test that a pointer to after the heap end is reported as outside the heap.
aoqi@0 605 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
aoqi@0 606 void* after_heap = (void*)(heap_end + epsilon);
aoqi@0 607 assert(!heap->is_in(after_heap),
aoqi@0 608 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", p2i(after_heap)));
aoqi@0 609 }
aoqi@0 610 #endif

mercurial