Tue, 27 Nov 2012 14:20:21 +0100
8003935: Simplify the needed includes for using Thread::current()
Reviewed-by: dholmes, rbackman, coleenp
duke@435 | 1 | /* |
never@3499 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 27 | #include "gc_implementation/shared/vmGCOperations.hpp" |
stefank@2314 | 28 | #include "gc_interface/collectedHeap.hpp" |
stefank@2314 | 29 | #include "gc_interface/collectedHeap.inline.hpp" |
stefank@2314 | 30 | #include "oops/oop.inline.hpp" |
never@3205 | 31 | #include "oops/instanceMirrorKlass.hpp" |
stefank@2314 | 32 | #include "runtime/init.hpp" |
stefank@4299 | 33 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 34 | #include "services/heapDumper.hpp" |
duke@435 | 35 | |
duke@435 | 36 | |
duke@435 | 37 | #ifdef ASSERT |
duke@435 | 38 | int CollectedHeap::_fire_out_of_memory_count = 0; |
duke@435 | 39 | #endif |
duke@435 | 40 | |
jcoomes@916 | 41 | size_t CollectedHeap::_filler_array_max_size = 0; |
jcoomes@916 | 42 | |
never@3499 | 43 | template <> |
never@3499 | 44 | void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) { |
never@3499 | 45 | st->print_cr("GC heap %s", m.is_before ? "before" : "after"); |
never@3499 | 46 | st->print_raw(m); |
never@3499 | 47 | } |
never@3499 | 48 | |
never@3499 | 49 | void GCHeapLog::log_heap(bool before) { |
never@3499 | 50 | if (!should_log()) { |
never@3499 | 51 | return; |
never@3499 | 52 | } |
never@3499 | 53 | |
never@3571 | 54 | double timestamp = fetch_timestamp(); |
never@3499 | 55 | MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag); |
never@3499 | 56 | int index = compute_log_index(); |
never@3499 | 57 | _records[index].thread = NULL; // Its the GC thread so it's not that interesting. |
never@3499 | 58 | _records[index].timestamp = timestamp; |
never@3499 | 59 | _records[index].data.is_before = before; |
never@3499 | 60 | stringStream st(_records[index].data.buffer(), _records[index].data.size()); |
never@3499 | 61 | if (before) { |
never@3571 | 62 | Universe::print_heap_before_gc(&st, true); |
never@3499 | 63 | } else { |
never@3571 | 64 | Universe::print_heap_after_gc(&st, true); |
never@3499 | 65 | } |
never@3499 | 66 | } |
never@3499 | 67 | |
duke@435 | 68 | // Memory state functions. |
duke@435 | 69 | |
jmasa@2188 | 70 | |
jmasa@2188 | 71 | CollectedHeap::CollectedHeap() : _n_par_threads(0) |
jmasa@2188 | 72 | |
jcoomes@916 | 73 | { |
jcoomes@916 | 74 | const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); |
jcoomes@916 | 75 | const size_t elements_per_word = HeapWordSize / sizeof(jint); |
jcoomes@916 | 76 | _filler_array_max_size = align_object_size(filler_array_hdr_size() + |
brutisso@3668 | 77 | max_len / elements_per_word); |
jcoomes@916 | 78 | |
jcoomes@916 | 79 | _barrier_set = NULL; |
jcoomes@916 | 80 | _is_gc_active = false; |
jcoomes@916 | 81 | _total_collections = _total_full_collections = 0; |
jcoomes@916 | 82 | _gc_cause = _gc_lastcause = GCCause::_no_gc; |
duke@435 | 83 | NOT_PRODUCT(_promotion_failure_alot_count = 0;) |
duke@435 | 84 | NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;) |
duke@435 | 85 | |
duke@435 | 86 | if (UsePerfData) { |
duke@435 | 87 | EXCEPTION_MARK; |
duke@435 | 88 | |
duke@435 | 89 | // create the gc cause jvmstat counters |
duke@435 | 90 | _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause", |
duke@435 | 91 | 80, GCCause::to_string(_gc_cause), CHECK); |
duke@435 | 92 | |
duke@435 | 93 | _perf_gc_lastcause = |
duke@435 | 94 | PerfDataManager::create_string_variable(SUN_GC, "lastCause", |
duke@435 | 95 | 80, GCCause::to_string(_gc_lastcause), CHECK); |
duke@435 | 96 | } |
ysr@1601 | 97 | _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below. |
never@3499 | 98 | // Create the ring log |
never@3499 | 99 | if (LogEvents) { |
never@3499 | 100 | _gc_heap_log = new GCHeapLog(); |
never@3499 | 101 | } else { |
never@3499 | 102 | _gc_heap_log = NULL; |
never@3499 | 103 | } |
duke@435 | 104 | } |
duke@435 | 105 | |
coleenp@4037 | 106 | // This interface assumes that it's being called by the |
coleenp@4037 | 107 | // vm thread. It collects the heap assuming that the |
coleenp@4037 | 108 | // heap lock is already held and that we are executing in |
coleenp@4037 | 109 | // the context of the vm thread. |
coleenp@4037 | 110 | void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) { |
coleenp@4037 | 111 | assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
coleenp@4037 | 112 | assert(Heap_lock->is_locked(), "Precondition#2"); |
coleenp@4037 | 113 | GCCauseSetter gcs(this, cause); |
coleenp@4037 | 114 | switch (cause) { |
coleenp@4037 | 115 | case GCCause::_heap_inspection: |
coleenp@4037 | 116 | case GCCause::_heap_dump: |
coleenp@4037 | 117 | case GCCause::_metadata_GC_threshold : { |
coleenp@4037 | 118 | HandleMark hm; |
coleenp@4037 | 119 | do_full_collection(false); // don't clear all soft refs |
coleenp@4037 | 120 | break; |
coleenp@4037 | 121 | } |
coleenp@4037 | 122 | case GCCause::_last_ditch_collection: { |
coleenp@4037 | 123 | HandleMark hm; |
coleenp@4037 | 124 | do_full_collection(true); // do clear all soft refs |
coleenp@4037 | 125 | break; |
coleenp@4037 | 126 | } |
coleenp@4037 | 127 | default: |
coleenp@4037 | 128 | ShouldNotReachHere(); // Unexpected use of this function |
coleenp@4037 | 129 | } |
coleenp@4037 | 130 | } |
coleenp@4037 | 131 | MetaWord* CollectedHeap::satisfy_failed_metadata_allocation( |
coleenp@4037 | 132 | ClassLoaderData* loader_data, |
coleenp@4037 | 133 | size_t size, Metaspace::MetadataType mdtype) { |
coleenp@4037 | 134 | return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype); |
coleenp@4037 | 135 | } |
coleenp@4037 | 136 | |
coleenp@4037 | 137 | |
ysr@1601 | 138 | void CollectedHeap::pre_initialize() { |
ysr@1601 | 139 | // Used for ReduceInitialCardMarks (when COMPILER2 is used); |
ysr@1601 | 140 | // otherwise remains unused. |
ysr@1903 | 141 | #ifdef COMPILER2 |
ysr@1629 | 142 | _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers() |
ysr@1629 | 143 | && (DeferInitialCardMark || card_mark_must_follow_store()); |
ysr@1601 | 144 | #else |
ysr@1601 | 145 | assert(_defer_initial_card_mark == false, "Who would set it?"); |
ysr@1601 | 146 | #endif |
ysr@1601 | 147 | } |
duke@435 | 148 | |
duke@435 | 149 | #ifndef PRODUCT |
duke@435 | 150 | void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) { |
duke@435 | 151 | if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
duke@435 | 152 | for (size_t slot = 0; slot < size; slot += 1) { |
duke@435 | 153 | assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal), |
duke@435 | 154 | "Found badHeapWordValue in post-allocation check"); |
duke@435 | 155 | } |
duke@435 | 156 | } |
duke@435 | 157 | } |
duke@435 | 158 | |
ysr@2533 | 159 | void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) { |
duke@435 | 160 | if (CheckMemoryInitialization && ZapUnusedHeapArea) { |
duke@435 | 161 | for (size_t slot = 0; slot < size; slot += 1) { |
duke@435 | 162 | assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal), |
duke@435 | 163 | "Found non badHeapWordValue in pre-allocation check"); |
duke@435 | 164 | } |
duke@435 | 165 | } |
duke@435 | 166 | } |
duke@435 | 167 | #endif // PRODUCT |
duke@435 | 168 | |
duke@435 | 169 | #ifdef ASSERT |
duke@435 | 170 | void CollectedHeap::check_for_valid_allocation_state() { |
duke@435 | 171 | Thread *thread = Thread::current(); |
duke@435 | 172 | // How to choose between a pending exception and a potential |
duke@435 | 173 | // OutOfMemoryError? Don't allow pending exceptions. |
duke@435 | 174 | // This is a VM policy failure, so how do we exhaustively test it? |
duke@435 | 175 | assert(!thread->has_pending_exception(), |
duke@435 | 176 | "shouldn't be allocating with pending exception"); |
duke@435 | 177 | if (StrictSafepointChecks) { |
duke@435 | 178 | assert(thread->allow_allocation(), |
duke@435 | 179 | "Allocation done by thread for which allocation is blocked " |
duke@435 | 180 | "by No_Allocation_Verifier!"); |
duke@435 | 181 | // Allocation of an oop can always invoke a safepoint, |
duke@435 | 182 | // hence, the true argument |
duke@435 | 183 | thread->check_for_valid_safepoint_state(true); |
duke@435 | 184 | } |
duke@435 | 185 | } |
duke@435 | 186 | #endif |
duke@435 | 187 | |
duke@435 | 188 | HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) { |
duke@435 | 189 | |
duke@435 | 190 | // Retain tlab and allocate object in shared space if |
duke@435 | 191 | // the amount free in the tlab is too large to discard. |
duke@435 | 192 | if (thread->tlab().free() > thread->tlab().refill_waste_limit()) { |
duke@435 | 193 | thread->tlab().record_slow_allocation(size); |
duke@435 | 194 | return NULL; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | // Discard tlab and allocate a new one. |
duke@435 | 198 | // To minimize fragmentation, the last TLAB may be smaller than the rest. |
duke@435 | 199 | size_t new_tlab_size = thread->tlab().compute_size(size); |
duke@435 | 200 | |
duke@435 | 201 | thread->tlab().clear_before_allocation(); |
duke@435 | 202 | |
duke@435 | 203 | if (new_tlab_size == 0) { |
duke@435 | 204 | return NULL; |
duke@435 | 205 | } |
duke@435 | 206 | |
duke@435 | 207 | // Allocate a new TLAB... |
duke@435 | 208 | HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size); |
duke@435 | 209 | if (obj == NULL) { |
duke@435 | 210 | return NULL; |
duke@435 | 211 | } |
duke@435 | 212 | if (ZeroTLAB) { |
duke@435 | 213 | // ..and clear it. |
duke@435 | 214 | Copy::zero_to_words(obj, new_tlab_size); |
duke@435 | 215 | } else { |
kvn@3092 | 216 | // ...and zap just allocated object. |
kvn@3092 | 217 | #ifdef ASSERT |
kvn@3092 | 218 | // Skip mangling the space corresponding to the object header to |
kvn@3092 | 219 | // ensure that the returned space is not considered parsable by |
kvn@3092 | 220 | // any concurrent GC thread. |
kvn@3092 | 221 | size_t hdr_size = oopDesc::header_size(); |
kvn@3092 | 222 | Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal); |
kvn@3092 | 223 | #endif // ASSERT |
duke@435 | 224 | } |
duke@435 | 225 | thread->tlab().fill(obj, obj + size, new_tlab_size); |
duke@435 | 226 | return obj; |
duke@435 | 227 | } |
duke@435 | 228 | |
ysr@1462 | 229 | void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) { |
ysr@1462 | 230 | MemRegion deferred = thread->deferred_card_mark(); |
ysr@1462 | 231 | if (!deferred.is_empty()) { |
ysr@1601 | 232 | assert(_defer_initial_card_mark, "Otherwise should be empty"); |
ysr@1462 | 233 | { |
ysr@1462 | 234 | // Verify that the storage points to a parsable object in heap |
ysr@1462 | 235 | DEBUG_ONLY(oop old_obj = oop(deferred.start());) |
ysr@1462 | 236 | assert(is_in(old_obj), "Not in allocated heap"); |
ysr@1462 | 237 | assert(!can_elide_initializing_store_barrier(old_obj), |
ysr@1601 | 238 | "Else should have been filtered in new_store_pre_barrier()"); |
ysr@1462 | 239 | assert(old_obj->is_oop(true), "Not an oop"); |
ysr@1462 | 240 | assert(deferred.word_size() == (size_t)(old_obj->size()), |
ysr@1462 | 241 | "Mismatch: multiple objects?"); |
ysr@1462 | 242 | } |
ysr@1462 | 243 | BarrierSet* bs = barrier_set(); |
ysr@1462 | 244 | assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); |
ysr@1462 | 245 | bs->write_region(deferred); |
ysr@1462 | 246 | // "Clear" the deferred_card_mark field |
ysr@1462 | 247 | thread->set_deferred_card_mark(MemRegion()); |
ysr@1462 | 248 | } |
ysr@1462 | 249 | assert(thread->deferred_card_mark().is_empty(), "invariant"); |
ysr@1462 | 250 | } |
ysr@1462 | 251 | |
ysr@1462 | 252 | // Helper for ReduceInitialCardMarks. For performance, |
ysr@1462 | 253 | // compiled code may elide card-marks for initializing stores |
ysr@1462 | 254 | // to a newly allocated object along the fast-path. We |
ysr@1462 | 255 | // compensate for such elided card-marks as follows: |
ysr@1462 | 256 | // (a) Generational, non-concurrent collectors, such as |
ysr@1462 | 257 | // GenCollectedHeap(ParNew,DefNew,Tenured) and |
ysr@1462 | 258 | // ParallelScavengeHeap(ParallelGC, ParallelOldGC) |
ysr@1462 | 259 | // need the card-mark if and only if the region is |
ysr@1462 | 260 | // in the old gen, and do not care if the card-mark |
ysr@1462 | 261 | // succeeds or precedes the initializing stores themselves, |
ysr@1462 | 262 | // so long as the card-mark is completed before the next |
ysr@1462 | 263 | // scavenge. For all these cases, we can do a card mark |
ysr@1462 | 264 | // at the point at which we do a slow path allocation |
ysr@1601 | 265 | // in the old gen, i.e. in this call. |
ysr@1462 | 266 | // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires |
ysr@1462 | 267 | // in addition that the card-mark for an old gen allocated |
ysr@1462 | 268 | // object strictly follow any associated initializing stores. |
ysr@1462 | 269 | // In these cases, the memRegion remembered below is |
ysr@1462 | 270 | // used to card-mark the entire region either just before the next |
ysr@1462 | 271 | // slow-path allocation by this thread or just before the next scavenge or |
ysr@1462 | 272 | // CMS-associated safepoint, whichever of these events happens first. |
ysr@1462 | 273 | // (The implicit assumption is that the object has been fully |
ysr@1462 | 274 | // initialized by this point, a fact that we assert when doing the |
ysr@1462 | 275 | // card-mark.) |
ysr@1462 | 276 | // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a |
ysr@1462 | 277 | // G1 concurrent marking is in progress an SATB (pre-write-)barrier is |
ysr@1462 | 278 | // is used to remember the pre-value of any store. Initializing |
ysr@1462 | 279 | // stores will not need this barrier, so we need not worry about |
ysr@1462 | 280 | // compensating for the missing pre-barrier here. Turning now |
ysr@1462 | 281 | // to the post-barrier, we note that G1 needs a RS update barrier |
ysr@1462 | 282 | // which simply enqueues a (sequence of) dirty cards which may |
ysr@1462 | 283 | // optionally be refined by the concurrent update threads. Note |
ysr@1462 | 284 | // that this barrier need only be applied to a non-young write, |
ysr@1462 | 285 | // but, like in CMS, because of the presence of concurrent refinement |
ysr@1462 | 286 | // (much like CMS' precleaning), must strictly follow the oop-store. |
ysr@1462 | 287 | // Thus, using the same protocol for maintaining the intended |
ysr@1601 | 288 | // invariants turns out, serendepitously, to be the same for both |
ysr@1601 | 289 | // G1 and CMS. |
ysr@1462 | 290 | // |
ysr@1601 | 291 | // For any future collector, this code should be reexamined with |
ysr@1601 | 292 | // that specific collector in mind, and the documentation above suitably |
ysr@1601 | 293 | // extended and updated. |
ysr@1601 | 294 | oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) { |
ysr@1462 | 295 | // If a previous card-mark was deferred, flush it now. |
ysr@1462 | 296 | flush_deferred_store_barrier(thread); |
ysr@1462 | 297 | if (can_elide_initializing_store_barrier(new_obj)) { |
ysr@1462 | 298 | // The deferred_card_mark region should be empty |
ysr@1462 | 299 | // following the flush above. |
ysr@1462 | 300 | assert(thread->deferred_card_mark().is_empty(), "Error"); |
ysr@1462 | 301 | } else { |
ysr@1601 | 302 | MemRegion mr((HeapWord*)new_obj, new_obj->size()); |
ysr@1601 | 303 | assert(!mr.is_empty(), "Error"); |
ysr@1601 | 304 | if (_defer_initial_card_mark) { |
ysr@1601 | 305 | // Defer the card mark |
ysr@1601 | 306 | thread->set_deferred_card_mark(mr); |
ysr@1601 | 307 | } else { |
ysr@1601 | 308 | // Do the card mark |
ysr@1601 | 309 | BarrierSet* bs = barrier_set(); |
ysr@1601 | 310 | assert(bs->has_write_region_opt(), "No write_region() on BarrierSet"); |
ysr@1601 | 311 | bs->write_region(mr); |
ysr@1601 | 312 | } |
ysr@1462 | 313 | } |
ysr@1462 | 314 | return new_obj; |
ysr@1462 | 315 | } |
ysr@1462 | 316 | |
jcoomes@916 | 317 | size_t CollectedHeap::filler_array_hdr_size() { |
kvn@1926 | 318 | return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long |
jcoomes@916 | 319 | } |
jcoomes@916 | 320 | |
jcoomes@916 | 321 | size_t CollectedHeap::filler_array_min_size() { |
kvn@1926 | 322 | return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment |
jcoomes@916 | 323 | } |
jcoomes@916 | 324 | |
jcoomes@916 | 325 | #ifdef ASSERT |
jcoomes@916 | 326 | void CollectedHeap::fill_args_check(HeapWord* start, size_t words) |
jcoomes@916 | 327 | { |
jcoomes@916 | 328 | assert(words >= min_fill_size(), "too small to fill"); |
jcoomes@916 | 329 | assert(words % MinObjAlignment == 0, "unaligned size"); |
jcoomes@916 | 330 | assert(Universe::heap()->is_in_reserved(start), "not in heap"); |
jcoomes@916 | 331 | assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap"); |
jcoomes@916 | 332 | } |
jcoomes@916 | 333 | |
johnc@1600 | 334 | void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap) |
jcoomes@916 | 335 | { |
johnc@1600 | 336 | if (ZapFillerObjects && zap) { |
jcoomes@916 | 337 | Copy::fill_to_words(start + filler_array_hdr_size(), |
jcoomes@916 | 338 | words - filler_array_hdr_size(), 0XDEAFBABE); |
jcoomes@916 | 339 | } |
jcoomes@916 | 340 | } |
jcoomes@916 | 341 | #endif // ASSERT |
jcoomes@916 | 342 | |
jcoomes@916 | 343 | void |
johnc@1600 | 344 | CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) |
jcoomes@916 | 345 | { |
jcoomes@916 | 346 | assert(words >= filler_array_min_size(), "too small for an array"); |
jcoomes@916 | 347 | assert(words <= filler_array_max_size(), "too big for a single object"); |
jcoomes@916 | 348 | |
jcoomes@916 | 349 | const size_t payload_size = words - filler_array_hdr_size(); |
jcoomes@916 | 350 | const size_t len = payload_size * HeapWordSize / sizeof(jint); |
brutisso@3668 | 351 | assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len)); |
jcoomes@916 | 352 | |
jcoomes@916 | 353 | // Set the length first for concurrent GC. |
jcoomes@916 | 354 | ((arrayOop)start)->set_length((int)len); |
brutisso@3675 | 355 | post_allocation_setup_common(Universe::intArrayKlassObj(), start); |
johnc@1600 | 356 | DEBUG_ONLY(zap_filler_array(start, words, zap);) |
jcoomes@916 | 357 | } |
jcoomes@916 | 358 | |
jcoomes@916 | 359 | void |
johnc@1600 | 360 | CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) |
jcoomes@916 | 361 | { |
jcoomes@916 | 362 | assert(words <= filler_array_max_size(), "too big for a single object"); |
jcoomes@916 | 363 | |
jcoomes@916 | 364 | if (words >= filler_array_min_size()) { |
johnc@1600 | 365 | fill_with_array(start, words, zap); |
jcoomes@916 | 366 | } else if (words > 0) { |
jcoomes@916 | 367 | assert(words == min_fill_size(), "unaligned size"); |
brutisso@3675 | 368 | post_allocation_setup_common(SystemDictionary::Object_klass(), start); |
jcoomes@916 | 369 | } |
jcoomes@916 | 370 | } |
jcoomes@916 | 371 | |
johnc@1600 | 372 | void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap) |
jcoomes@916 | 373 | { |
jcoomes@916 | 374 | DEBUG_ONLY(fill_args_check(start, words);) |
jcoomes@916 | 375 | HandleMark hm; // Free handles before leaving. |
johnc@1600 | 376 | fill_with_object_impl(start, words, zap); |
jcoomes@916 | 377 | } |
jcoomes@916 | 378 | |
johnc@1600 | 379 | void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap) |
jcoomes@916 | 380 | { |
jcoomes@916 | 381 | DEBUG_ONLY(fill_args_check(start, words);) |
jcoomes@916 | 382 | HandleMark hm; // Free handles before leaving. |
jcoomes@916 | 383 | |
ysr@1904 | 384 | #ifdef _LP64 |
jcoomes@916 | 385 | // A single array can fill ~8G, so multiple objects are needed only in 64-bit. |
jcoomes@916 | 386 | // First fill with arrays, ensuring that any remaining space is big enough to |
jcoomes@916 | 387 | // fill. The remainder is filled with a single object. |
jcoomes@916 | 388 | const size_t min = min_fill_size(); |
jcoomes@916 | 389 | const size_t max = filler_array_max_size(); |
jcoomes@916 | 390 | while (words > max) { |
jcoomes@916 | 391 | const size_t cur = words - max >= min ? max : max - min; |
johnc@1600 | 392 | fill_with_array(start, cur, zap); |
jcoomes@916 | 393 | start += cur; |
jcoomes@916 | 394 | words -= cur; |
jcoomes@916 | 395 | } |
jcoomes@916 | 396 | #endif |
jcoomes@916 | 397 | |
johnc@1600 | 398 | fill_with_object_impl(start, words, zap); |
jcoomes@916 | 399 | } |
jcoomes@916 | 400 | |
duke@435 | 401 | HeapWord* CollectedHeap::allocate_new_tlab(size_t size) { |
duke@435 | 402 | guarantee(false, "thread-local allocation buffers not supported"); |
duke@435 | 403 | return NULL; |
duke@435 | 404 | } |
duke@435 | 405 | |
duke@435 | 406 | void CollectedHeap::ensure_parsability(bool retire_tlabs) { |
duke@435 | 407 | // The second disjunct in the assertion below makes a concession |
duke@435 | 408 | // for the start-up verification done while the VM is being |
duke@435 | 409 | // created. Callers be careful that you know that mutators |
duke@435 | 410 | // aren't going to interfere -- for instance, this is permissible |
duke@435 | 411 | // if we are still single-threaded and have either not yet |
duke@435 | 412 | // started allocating (nothing much to verify) or we have |
duke@435 | 413 | // started allocating but are now a full-fledged JavaThread |
duke@435 | 414 | // (and have thus made our TLAB's) available for filling. |
duke@435 | 415 | assert(SafepointSynchronize::is_at_safepoint() || |
duke@435 | 416 | !is_init_completed(), |
duke@435 | 417 | "Should only be called at a safepoint or at start-up" |
duke@435 | 418 | " otherwise concurrent mutator activity may make heap " |
duke@435 | 419 | " unparsable again"); |
ysr@1601 | 420 | const bool use_tlab = UseTLAB; |
ysr@1601 | 421 | const bool deferred = _defer_initial_card_mark; |
ysr@1601 | 422 | // The main thread starts allocating via a TLAB even before it |
ysr@1601 | 423 | // has added itself to the threads list at vm boot-up. |
ysr@1601 | 424 | assert(!use_tlab || Threads::first() != NULL, |
ysr@1601 | 425 | "Attempt to fill tlabs before main thread has been added" |
ysr@1601 | 426 | " to threads list is doomed to failure!"); |
ysr@1601 | 427 | for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { |
ysr@1601 | 428 | if (use_tlab) thread->tlab().make_parsable(retire_tlabs); |
ysr@1601 | 429 | #ifdef COMPILER2 |
ysr@1601 | 430 | // The deferred store barriers must all have been flushed to the |
ysr@1601 | 431 | // card-table (or other remembered set structure) before GC starts |
ysr@1601 | 432 | // processing the card-table (or other remembered set). |
ysr@1601 | 433 | if (deferred) flush_deferred_store_barrier(thread); |
ysr@1601 | 434 | #else |
ysr@1601 | 435 | assert(!deferred, "Should be false"); |
ysr@1601 | 436 | assert(thread->deferred_card_mark().is_empty(), "Should be empty"); |
ysr@1601 | 437 | #endif |
duke@435 | 438 | } |
duke@435 | 439 | } |
duke@435 | 440 | |
duke@435 | 441 | void CollectedHeap::accumulate_statistics_all_tlabs() { |
duke@435 | 442 | if (UseTLAB) { |
duke@435 | 443 | assert(SafepointSynchronize::is_at_safepoint() || |
duke@435 | 444 | !is_init_completed(), |
duke@435 | 445 | "should only accumulate statistics on tlabs at safepoint"); |
duke@435 | 446 | |
duke@435 | 447 | ThreadLocalAllocBuffer::accumulate_statistics_before_gc(); |
duke@435 | 448 | } |
duke@435 | 449 | } |
duke@435 | 450 | |
duke@435 | 451 | void CollectedHeap::resize_all_tlabs() { |
duke@435 | 452 | if (UseTLAB) { |
duke@435 | 453 | assert(SafepointSynchronize::is_at_safepoint() || |
duke@435 | 454 | !is_init_completed(), |
duke@435 | 455 | "should only resize tlabs at safepoint"); |
duke@435 | 456 | |
duke@435 | 457 | ThreadLocalAllocBuffer::resize_all_tlabs(); |
duke@435 | 458 | } |
duke@435 | 459 | } |
ysr@1050 | 460 | |
ysr@1050 | 461 | void CollectedHeap::pre_full_gc_dump() { |
ysr@1050 | 462 | if (HeapDumpBeforeFullGC) { |
ysr@3067 | 463 | TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty); |
ysr@1050 | 464 | // We are doing a "major" collection and a heap dump before |
ysr@1050 | 465 | // major collection has been requested. |
ysr@1050 | 466 | HeapDumper::dump_heap(); |
ysr@1050 | 467 | } |
ysr@1050 | 468 | if (PrintClassHistogramBeforeFullGC) { |
ysr@3067 | 469 | TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty); |
ysr@1050 | 470 | VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); |
ysr@1050 | 471 | inspector.doit(); |
ysr@1050 | 472 | } |
ysr@1050 | 473 | } |
ysr@1050 | 474 | |
ysr@1050 | 475 | void CollectedHeap::post_full_gc_dump() { |
ysr@1050 | 476 | if (HeapDumpAfterFullGC) { |
ysr@3067 | 477 | TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty); |
ysr@1050 | 478 | HeapDumper::dump_heap(); |
ysr@1050 | 479 | } |
ysr@1050 | 480 | if (PrintClassHistogramAfterFullGC) { |
ysr@3067 | 481 | TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty); |
ysr@1050 | 482 | VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */); |
ysr@1050 | 483 | inspector.doit(); |
ysr@1050 | 484 | } |
ysr@1050 | 485 | } |
never@3205 | 486 | |
never@3205 | 487 | oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) { |
never@3205 | 488 | debug_only(check_for_valid_allocation_state()); |
never@3205 | 489 | assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); |
never@3205 | 490 | assert(size >= 0, "int won't convert to size_t"); |
never@3205 | 491 | HeapWord* obj; |
never@3205 | 492 | assert(ScavengeRootsInCode > 0, "must be"); |
never@3205 | 493 | obj = common_mem_allocate_init(size, CHECK_NULL); |
brutisso@3675 | 494 | post_allocation_setup_common(klass, obj); |
never@3205 | 495 | assert(Universe::is_bootstrapping() || |
coleenp@4037 | 496 | !((oop)obj)->is_array(), "must not be an array"); |
never@3205 | 497 | NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); |
never@3205 | 498 | oop mirror = (oop)obj; |
never@3205 | 499 | |
never@3205 | 500 | java_lang_Class::set_oop_size(mirror, size); |
never@3205 | 501 | |
never@3205 | 502 | // Setup indirections |
never@3205 | 503 | if (!real_klass.is_null()) { |
never@3205 | 504 | java_lang_Class::set_klass(mirror, real_klass()); |
never@3205 | 505 | real_klass->set_java_mirror(mirror); |
never@3205 | 506 | } |
never@3205 | 507 | |
coleenp@4047 | 508 | InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass()); |
never@3205 | 509 | assert(size == mk->instance_size(real_klass), "should have been set"); |
never@3205 | 510 | |
never@3205 | 511 | // notify jvmti and dtrace |
never@3205 | 512 | post_allocation_notify(klass, (oop)obj); |
never@3205 | 513 | |
never@3205 | 514 | return mirror; |
never@3205 | 515 | } |
stefank@3335 | 516 | |
stefank@3335 | 517 | /////////////// Unit tests /////////////// |
stefank@3335 | 518 | |
stefank@3335 | 519 | #ifndef PRODUCT |
stefank@3335 | 520 | void CollectedHeap::test_is_in() { |
stefank@3335 | 521 | CollectedHeap* heap = Universe::heap(); |
stefank@3335 | 522 | |
stefank@3375 | 523 | uintptr_t epsilon = (uintptr_t) MinObjAlignment; |
stefank@3375 | 524 | uintptr_t heap_start = (uintptr_t) heap->_reserved.start(); |
stefank@3375 | 525 | uintptr_t heap_end = (uintptr_t) heap->_reserved.end(); |
stefank@3375 | 526 | |
stefank@3335 | 527 | // Test that NULL is not in the heap. |
stefank@3335 | 528 | assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap"); |
stefank@3335 | 529 | |
stefank@3335 | 530 | // Test that a pointer to before the heap start is reported as outside the heap. |
stefank@3375 | 531 | assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity"); |
stefank@3375 | 532 | void* before_heap = (void*)(heap_start - epsilon); |
stefank@3335 | 533 | assert(!heap->is_in(before_heap), |
stefank@3335 | 534 | err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap)); |
stefank@3335 | 535 | |
stefank@3335 | 536 | // Test that a pointer to after the heap end is reported as outside the heap. |
stefank@3375 | 537 | assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity"); |
stefank@3375 | 538 | void* after_heap = (void*)(heap_end + epsilon); |
stefank@3335 | 539 | assert(!heap->is_in(after_heap), |
stefank@3335 | 540 | err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap)); |
stefank@3335 | 541 | } |
stefank@3335 | 542 | #endif |