src/share/vm/gc_interface/collectedHeap.cpp

Mon, 09 Mar 2009 13:28:46 -0700

author
xdono
date
Mon, 09 Mar 2009 13:28:46 -0700
changeset 1014
0fbdb4381b99
parent 929
d593294016c3
child 1063
7bb995fbd3c0
permissions
-rw-r--r--

6814575: Update copyright year
Summary: Update copyright for files that have been modified in 2009, up to 03/09
Reviewed-by: katleman, tbell, ohair

duke@435 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_collectedHeap.cpp.incl"
duke@435 27
duke@435 28
duke@435 29 #ifdef ASSERT
duke@435 30 int CollectedHeap::_fire_out_of_memory_count = 0;
duke@435 31 #endif
duke@435 32
jcoomes@916 33 size_t CollectedHeap::_filler_array_max_size = 0;
jcoomes@916 34
duke@435 35 // Memory state functions.
duke@435 36
jcoomes@916 37 CollectedHeap::CollectedHeap()
jcoomes@916 38 {
jcoomes@916 39 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
jcoomes@916 40 const size_t elements_per_word = HeapWordSize / sizeof(jint);
jcoomes@916 41 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
jcoomes@916 42 max_len * elements_per_word);
jcoomes@916 43
jcoomes@916 44 _barrier_set = NULL;
jcoomes@916 45 _is_gc_active = false;
jcoomes@916 46 _total_collections = _total_full_collections = 0;
jcoomes@916 47 _gc_cause = _gc_lastcause = GCCause::_no_gc;
duke@435 48 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
duke@435 49 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
duke@435 50
duke@435 51 if (UsePerfData) {
duke@435 52 EXCEPTION_MARK;
duke@435 53
duke@435 54 // create the gc cause jvmstat counters
duke@435 55 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
duke@435 56 80, GCCause::to_string(_gc_cause), CHECK);
duke@435 57
duke@435 58 _perf_gc_lastcause =
duke@435 59 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
duke@435 60 80, GCCause::to_string(_gc_lastcause), CHECK);
duke@435 61 }
duke@435 62 }
duke@435 63
duke@435 64
duke@435 65 #ifndef PRODUCT
duke@435 66 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
duke@435 67 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
duke@435 68 for (size_t slot = 0; slot < size; slot += 1) {
duke@435 69 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
duke@435 70 "Found badHeapWordValue in post-allocation check");
duke@435 71 }
duke@435 72 }
duke@435 73 }
duke@435 74
duke@435 75 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
duke@435 76 {
duke@435 77 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
duke@435 78 for (size_t slot = 0; slot < size; slot += 1) {
duke@435 79 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
duke@435 80 "Found non badHeapWordValue in pre-allocation check");
duke@435 81 }
duke@435 82 }
duke@435 83 }
duke@435 84 #endif // PRODUCT
duke@435 85
duke@435 86 #ifdef ASSERT
duke@435 87 void CollectedHeap::check_for_valid_allocation_state() {
duke@435 88 Thread *thread = Thread::current();
duke@435 89 // How to choose between a pending exception and a potential
duke@435 90 // OutOfMemoryError? Don't allow pending exceptions.
duke@435 91 // This is a VM policy failure, so how do we exhaustively test it?
duke@435 92 assert(!thread->has_pending_exception(),
duke@435 93 "shouldn't be allocating with pending exception");
duke@435 94 if (StrictSafepointChecks) {
duke@435 95 assert(thread->allow_allocation(),
duke@435 96 "Allocation done by thread for which allocation is blocked "
duke@435 97 "by No_Allocation_Verifier!");
duke@435 98 // Allocation of an oop can always invoke a safepoint,
duke@435 99 // hence, the true argument
duke@435 100 thread->check_for_valid_safepoint_state(true);
duke@435 101 }
duke@435 102 }
duke@435 103 #endif
duke@435 104
duke@435 105 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
duke@435 106
duke@435 107 // Retain tlab and allocate object in shared space if
duke@435 108 // the amount free in the tlab is too large to discard.
duke@435 109 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
duke@435 110 thread->tlab().record_slow_allocation(size);
duke@435 111 return NULL;
duke@435 112 }
duke@435 113
duke@435 114 // Discard tlab and allocate a new one.
duke@435 115 // To minimize fragmentation, the last TLAB may be smaller than the rest.
duke@435 116 size_t new_tlab_size = thread->tlab().compute_size(size);
duke@435 117
duke@435 118 thread->tlab().clear_before_allocation();
duke@435 119
duke@435 120 if (new_tlab_size == 0) {
duke@435 121 return NULL;
duke@435 122 }
duke@435 123
duke@435 124 // Allocate a new TLAB...
duke@435 125 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
duke@435 126 if (obj == NULL) {
duke@435 127 return NULL;
duke@435 128 }
duke@435 129 if (ZeroTLAB) {
duke@435 130 // ..and clear it.
duke@435 131 Copy::zero_to_words(obj, new_tlab_size);
duke@435 132 } else {
duke@435 133 // ...and clear just the allocated object.
duke@435 134 Copy::zero_to_words(obj, size);
duke@435 135 }
duke@435 136 thread->tlab().fill(obj, obj + size, new_tlab_size);
duke@435 137 return obj;
duke@435 138 }
duke@435 139
jcoomes@916 140 size_t CollectedHeap::filler_array_hdr_size() {
jcoomes@916 141 return size_t(arrayOopDesc::header_size(T_INT));
jcoomes@916 142 }
jcoomes@916 143
jcoomes@916 144 size_t CollectedHeap::filler_array_min_size() {
jcoomes@916 145 return align_object_size(filler_array_hdr_size());
jcoomes@916 146 }
jcoomes@916 147
jcoomes@916 148 size_t CollectedHeap::filler_array_max_size() {
jcoomes@916 149 return _filler_array_max_size;
jcoomes@916 150 }
jcoomes@916 151
jcoomes@916 152 #ifdef ASSERT
jcoomes@916 153 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
jcoomes@916 154 {
jcoomes@916 155 assert(words >= min_fill_size(), "too small to fill");
jcoomes@916 156 assert(words % MinObjAlignment == 0, "unaligned size");
jcoomes@916 157 assert(Universe::heap()->is_in_reserved(start), "not in heap");
jcoomes@916 158 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
jcoomes@916 159 }
jcoomes@916 160
jcoomes@916 161 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
jcoomes@916 162 {
jcoomes@916 163 if (ZapFillerObjects) {
jcoomes@916 164 Copy::fill_to_words(start + filler_array_hdr_size(),
jcoomes@916 165 words - filler_array_hdr_size(), 0XDEAFBABE);
jcoomes@916 166 }
jcoomes@916 167 }
jcoomes@916 168 #endif // ASSERT
jcoomes@916 169
jcoomes@916 170 void
jcoomes@916 171 CollectedHeap::fill_with_array(HeapWord* start, size_t words)
jcoomes@916 172 {
jcoomes@916 173 assert(words >= filler_array_min_size(), "too small for an array");
jcoomes@916 174 assert(words <= filler_array_max_size(), "too big for a single object");
jcoomes@916 175
jcoomes@916 176 const size_t payload_size = words - filler_array_hdr_size();
jcoomes@916 177 const size_t len = payload_size * HeapWordSize / sizeof(jint);
jcoomes@916 178
jcoomes@916 179 // Set the length first for concurrent GC.
jcoomes@916 180 ((arrayOop)start)->set_length((int)len);
jcoomes@929 181 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
jcoomes@916 182 DEBUG_ONLY(zap_filler_array(start, words);)
jcoomes@916 183 }
jcoomes@916 184
jcoomes@916 185 void
jcoomes@916 186 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
jcoomes@916 187 {
jcoomes@916 188 assert(words <= filler_array_max_size(), "too big for a single object");
jcoomes@916 189
jcoomes@916 190 if (words >= filler_array_min_size()) {
jcoomes@916 191 fill_with_array(start, words);
jcoomes@916 192 } else if (words > 0) {
jcoomes@916 193 assert(words == min_fill_size(), "unaligned size");
jcoomes@916 194 post_allocation_setup_common(SystemDictionary::object_klass(), start,
jcoomes@916 195 words);
jcoomes@916 196 }
jcoomes@916 197 }
jcoomes@916 198
jcoomes@916 199 void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
jcoomes@916 200 {
jcoomes@916 201 DEBUG_ONLY(fill_args_check(start, words);)
jcoomes@916 202 HandleMark hm; // Free handles before leaving.
jcoomes@916 203 fill_with_object_impl(start, words);
jcoomes@916 204 }
jcoomes@916 205
jcoomes@916 206 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
jcoomes@916 207 {
jcoomes@916 208 DEBUG_ONLY(fill_args_check(start, words);)
jcoomes@916 209 HandleMark hm; // Free handles before leaving.
jcoomes@916 210
jcoomes@916 211 #ifdef LP64
jcoomes@916 212 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
jcoomes@916 213 // First fill with arrays, ensuring that any remaining space is big enough to
jcoomes@916 214 // fill. The remainder is filled with a single object.
jcoomes@916 215 const size_t min = min_fill_size();
jcoomes@916 216 const size_t max = filler_array_max_size();
jcoomes@916 217 while (words > max) {
jcoomes@916 218 const size_t cur = words - max >= min ? max : max - min;
jcoomes@916 219 fill_with_array(start, cur);
jcoomes@916 220 start += cur;
jcoomes@916 221 words -= cur;
jcoomes@916 222 }
jcoomes@916 223 #endif
jcoomes@916 224
jcoomes@916 225 fill_with_object_impl(start, words);
jcoomes@916 226 }
jcoomes@916 227
duke@435 228 oop CollectedHeap::new_store_barrier(oop new_obj) {
duke@435 229 // %%% This needs refactoring. (It was imported from the server compiler.)
duke@435 230 guarantee(can_elide_tlab_store_barriers(), "store barrier elision not supported");
duke@435 231 BarrierSet* bs = this->barrier_set();
duke@435 232 assert(bs->has_write_region_opt(), "Barrier set does not have write_region");
duke@435 233 int new_size = new_obj->size();
duke@435 234 bs->write_region(MemRegion((HeapWord*)new_obj, new_size));
duke@435 235 return new_obj;
duke@435 236 }
duke@435 237
duke@435 238 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
duke@435 239 guarantee(false, "thread-local allocation buffers not supported");
duke@435 240 return NULL;
duke@435 241 }
duke@435 242
duke@435 243 void CollectedHeap::fill_all_tlabs(bool retire) {
duke@435 244 assert(UseTLAB, "should not reach here");
duke@435 245 // See note in ensure_parsability() below.
duke@435 246 assert(SafepointSynchronize::is_at_safepoint() ||
duke@435 247 !is_init_completed(),
duke@435 248 "should only fill tlabs at safepoint");
duke@435 249 // The main thread starts allocating via a TLAB even before it
duke@435 250 // has added itself to the threads list at vm boot-up.
duke@435 251 assert(Threads::first() != NULL,
duke@435 252 "Attempt to fill tlabs before main thread has been added"
duke@435 253 " to threads list is doomed to failure!");
duke@435 254 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
duke@435 255 thread->tlab().make_parsable(retire);
duke@435 256 }
duke@435 257 }
duke@435 258
duke@435 259 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
duke@435 260 // The second disjunct in the assertion below makes a concession
duke@435 261 // for the start-up verification done while the VM is being
duke@435 262 // created. Callers be careful that you know that mutators
duke@435 263 // aren't going to interfere -- for instance, this is permissible
duke@435 264 // if we are still single-threaded and have either not yet
duke@435 265 // started allocating (nothing much to verify) or we have
duke@435 266 // started allocating but are now a full-fledged JavaThread
duke@435 267 // (and have thus made our TLAB's) available for filling.
duke@435 268 assert(SafepointSynchronize::is_at_safepoint() ||
duke@435 269 !is_init_completed(),
duke@435 270 "Should only be called at a safepoint or at start-up"
duke@435 271 " otherwise concurrent mutator activity may make heap "
duke@435 272 " unparsable again");
duke@435 273 if (UseTLAB) {
duke@435 274 fill_all_tlabs(retire_tlabs);
duke@435 275 }
duke@435 276 }
duke@435 277
duke@435 278 void CollectedHeap::accumulate_statistics_all_tlabs() {
duke@435 279 if (UseTLAB) {
duke@435 280 assert(SafepointSynchronize::is_at_safepoint() ||
duke@435 281 !is_init_completed(),
duke@435 282 "should only accumulate statistics on tlabs at safepoint");
duke@435 283
duke@435 284 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
duke@435 285 }
duke@435 286 }
duke@435 287
duke@435 288 void CollectedHeap::resize_all_tlabs() {
duke@435 289 if (UseTLAB) {
duke@435 290 assert(SafepointSynchronize::is_at_safepoint() ||
duke@435 291 !is_init_completed(),
duke@435 292 "should only resize tlabs at safepoint");
duke@435 293
duke@435 294 ThreadLocalAllocBuffer::resize_all_tlabs();
duke@435 295 }
duke@435 296 }

mercurial