Tue, 27 Nov 2012 14:20:21 +0100
8003935: Simplify the needed includes for using Thread::current()
Reviewed-by: dholmes, rbackman, coleenp
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/shared/vmGCOperations.hpp"
28 #include "gc_interface/collectedHeap.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "oops/instanceMirrorKlass.hpp"
32 #include "runtime/init.hpp"
33 #include "runtime/thread.inline.hpp"
34 #include "services/heapDumper.hpp"
37 #ifdef ASSERT
38 int CollectedHeap::_fire_out_of_memory_count = 0;
39 #endif
41 size_t CollectedHeap::_filler_array_max_size = 0;
43 template <>
44 void EventLogBase<GCMessage>::print(outputStream* st, GCMessage& m) {
45 st->print_cr("GC heap %s", m.is_before ? "before" : "after");
46 st->print_raw(m);
47 }
49 void GCHeapLog::log_heap(bool before) {
50 if (!should_log()) {
51 return;
52 }
54 double timestamp = fetch_timestamp();
55 MutexLockerEx ml(&_mutex, Mutex::_no_safepoint_check_flag);
56 int index = compute_log_index();
57 _records[index].thread = NULL; // Its the GC thread so it's not that interesting.
58 _records[index].timestamp = timestamp;
59 _records[index].data.is_before = before;
60 stringStream st(_records[index].data.buffer(), _records[index].data.size());
61 if (before) {
62 Universe::print_heap_before_gc(&st, true);
63 } else {
64 Universe::print_heap_after_gc(&st, true);
65 }
66 }
68 // Memory state functions.
71 CollectedHeap::CollectedHeap() : _n_par_threads(0)
73 {
74 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
75 const size_t elements_per_word = HeapWordSize / sizeof(jint);
76 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
77 max_len / elements_per_word);
79 _barrier_set = NULL;
80 _is_gc_active = false;
81 _total_collections = _total_full_collections = 0;
82 _gc_cause = _gc_lastcause = GCCause::_no_gc;
83 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
84 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
86 if (UsePerfData) {
87 EXCEPTION_MARK;
89 // create the gc cause jvmstat counters
90 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
91 80, GCCause::to_string(_gc_cause), CHECK);
93 _perf_gc_lastcause =
94 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
95 80, GCCause::to_string(_gc_lastcause), CHECK);
96 }
97 _defer_initial_card_mark = false; // strengthened by subclass in pre_initialize() below.
98 // Create the ring log
99 if (LogEvents) {
100 _gc_heap_log = new GCHeapLog();
101 } else {
102 _gc_heap_log = NULL;
103 }
104 }
106 // This interface assumes that it's being called by the
107 // vm thread. It collects the heap assuming that the
108 // heap lock is already held and that we are executing in
109 // the context of the vm thread.
110 void CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
111 assert(Thread::current()->is_VM_thread(), "Precondition#1");
112 assert(Heap_lock->is_locked(), "Precondition#2");
113 GCCauseSetter gcs(this, cause);
114 switch (cause) {
115 case GCCause::_heap_inspection:
116 case GCCause::_heap_dump:
117 case GCCause::_metadata_GC_threshold : {
118 HandleMark hm;
119 do_full_collection(false); // don't clear all soft refs
120 break;
121 }
122 case GCCause::_last_ditch_collection: {
123 HandleMark hm;
124 do_full_collection(true); // do clear all soft refs
125 break;
126 }
127 default:
128 ShouldNotReachHere(); // Unexpected use of this function
129 }
130 }
131 MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
132 ClassLoaderData* loader_data,
133 size_t size, Metaspace::MetadataType mdtype) {
134 return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
135 }
138 void CollectedHeap::pre_initialize() {
139 // Used for ReduceInitialCardMarks (when COMPILER2 is used);
140 // otherwise remains unused.
141 #ifdef COMPILER2
142 _defer_initial_card_mark = ReduceInitialCardMarks && can_elide_tlab_store_barriers()
143 && (DeferInitialCardMark || card_mark_must_follow_store());
144 #else
145 assert(_defer_initial_card_mark == false, "Who would set it?");
146 #endif
147 }
149 #ifndef PRODUCT
150 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
151 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
152 for (size_t slot = 0; slot < size; slot += 1) {
153 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
154 "Found badHeapWordValue in post-allocation check");
155 }
156 }
157 }
159 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size) {
160 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
161 for (size_t slot = 0; slot < size; slot += 1) {
162 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
163 "Found non badHeapWordValue in pre-allocation check");
164 }
165 }
166 }
167 #endif // PRODUCT
169 #ifdef ASSERT
170 void CollectedHeap::check_for_valid_allocation_state() {
171 Thread *thread = Thread::current();
172 // How to choose between a pending exception and a potential
173 // OutOfMemoryError? Don't allow pending exceptions.
174 // This is a VM policy failure, so how do we exhaustively test it?
175 assert(!thread->has_pending_exception(),
176 "shouldn't be allocating with pending exception");
177 if (StrictSafepointChecks) {
178 assert(thread->allow_allocation(),
179 "Allocation done by thread for which allocation is blocked "
180 "by No_Allocation_Verifier!");
181 // Allocation of an oop can always invoke a safepoint,
182 // hence, the true argument
183 thread->check_for_valid_safepoint_state(true);
184 }
185 }
186 #endif
188 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
190 // Retain tlab and allocate object in shared space if
191 // the amount free in the tlab is too large to discard.
192 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
193 thread->tlab().record_slow_allocation(size);
194 return NULL;
195 }
197 // Discard tlab and allocate a new one.
198 // To minimize fragmentation, the last TLAB may be smaller than the rest.
199 size_t new_tlab_size = thread->tlab().compute_size(size);
201 thread->tlab().clear_before_allocation();
203 if (new_tlab_size == 0) {
204 return NULL;
205 }
207 // Allocate a new TLAB...
208 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
209 if (obj == NULL) {
210 return NULL;
211 }
212 if (ZeroTLAB) {
213 // ..and clear it.
214 Copy::zero_to_words(obj, new_tlab_size);
215 } else {
216 // ...and zap just allocated object.
217 #ifdef ASSERT
218 // Skip mangling the space corresponding to the object header to
219 // ensure that the returned space is not considered parsable by
220 // any concurrent GC thread.
221 size_t hdr_size = oopDesc::header_size();
222 Copy::fill_to_words(obj + hdr_size, new_tlab_size - hdr_size, badHeapWordVal);
223 #endif // ASSERT
224 }
225 thread->tlab().fill(obj, obj + size, new_tlab_size);
226 return obj;
227 }
229 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
230 MemRegion deferred = thread->deferred_card_mark();
231 if (!deferred.is_empty()) {
232 assert(_defer_initial_card_mark, "Otherwise should be empty");
233 {
234 // Verify that the storage points to a parsable object in heap
235 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
236 assert(is_in(old_obj), "Not in allocated heap");
237 assert(!can_elide_initializing_store_barrier(old_obj),
238 "Else should have been filtered in new_store_pre_barrier()");
239 assert(old_obj->is_oop(true), "Not an oop");
240 assert(deferred.word_size() == (size_t)(old_obj->size()),
241 "Mismatch: multiple objects?");
242 }
243 BarrierSet* bs = barrier_set();
244 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
245 bs->write_region(deferred);
246 // "Clear" the deferred_card_mark field
247 thread->set_deferred_card_mark(MemRegion());
248 }
249 assert(thread->deferred_card_mark().is_empty(), "invariant");
250 }
252 // Helper for ReduceInitialCardMarks. For performance,
253 // compiled code may elide card-marks for initializing stores
254 // to a newly allocated object along the fast-path. We
255 // compensate for such elided card-marks as follows:
256 // (a) Generational, non-concurrent collectors, such as
257 // GenCollectedHeap(ParNew,DefNew,Tenured) and
258 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
259 // need the card-mark if and only if the region is
260 // in the old gen, and do not care if the card-mark
261 // succeeds or precedes the initializing stores themselves,
262 // so long as the card-mark is completed before the next
263 // scavenge. For all these cases, we can do a card mark
264 // at the point at which we do a slow path allocation
265 // in the old gen, i.e. in this call.
266 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
267 // in addition that the card-mark for an old gen allocated
268 // object strictly follow any associated initializing stores.
269 // In these cases, the memRegion remembered below is
270 // used to card-mark the entire region either just before the next
271 // slow-path allocation by this thread or just before the next scavenge or
272 // CMS-associated safepoint, whichever of these events happens first.
273 // (The implicit assumption is that the object has been fully
274 // initialized by this point, a fact that we assert when doing the
275 // card-mark.)
276 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
277 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
278 // is used to remember the pre-value of any store. Initializing
279 // stores will not need this barrier, so we need not worry about
280 // compensating for the missing pre-barrier here. Turning now
281 // to the post-barrier, we note that G1 needs a RS update barrier
282 // which simply enqueues a (sequence of) dirty cards which may
283 // optionally be refined by the concurrent update threads. Note
284 // that this barrier need only be applied to a non-young write,
285 // but, like in CMS, because of the presence of concurrent refinement
286 // (much like CMS' precleaning), must strictly follow the oop-store.
287 // Thus, using the same protocol for maintaining the intended
288 // invariants turns out, serendepitously, to be the same for both
289 // G1 and CMS.
290 //
291 // For any future collector, this code should be reexamined with
292 // that specific collector in mind, and the documentation above suitably
293 // extended and updated.
294 oop CollectedHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
295 // If a previous card-mark was deferred, flush it now.
296 flush_deferred_store_barrier(thread);
297 if (can_elide_initializing_store_barrier(new_obj)) {
298 // The deferred_card_mark region should be empty
299 // following the flush above.
300 assert(thread->deferred_card_mark().is_empty(), "Error");
301 } else {
302 MemRegion mr((HeapWord*)new_obj, new_obj->size());
303 assert(!mr.is_empty(), "Error");
304 if (_defer_initial_card_mark) {
305 // Defer the card mark
306 thread->set_deferred_card_mark(mr);
307 } else {
308 // Do the card mark
309 BarrierSet* bs = barrier_set();
310 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
311 bs->write_region(mr);
312 }
313 }
314 return new_obj;
315 }
317 size_t CollectedHeap::filler_array_hdr_size() {
318 return size_t(align_object_offset(arrayOopDesc::header_size(T_INT))); // align to Long
319 }
321 size_t CollectedHeap::filler_array_min_size() {
322 return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
323 }
325 #ifdef ASSERT
326 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
327 {
328 assert(words >= min_fill_size(), "too small to fill");
329 assert(words % MinObjAlignment == 0, "unaligned size");
330 assert(Universe::heap()->is_in_reserved(start), "not in heap");
331 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
332 }
334 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words, bool zap)
335 {
336 if (ZapFillerObjects && zap) {
337 Copy::fill_to_words(start + filler_array_hdr_size(),
338 words - filler_array_hdr_size(), 0XDEAFBABE);
339 }
340 }
341 #endif // ASSERT
343 void
344 CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
345 {
346 assert(words >= filler_array_min_size(), "too small for an array");
347 assert(words <= filler_array_max_size(), "too big for a single object");
349 const size_t payload_size = words - filler_array_hdr_size();
350 const size_t len = payload_size * HeapWordSize / sizeof(jint);
351 assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
353 // Set the length first for concurrent GC.
354 ((arrayOop)start)->set_length((int)len);
355 post_allocation_setup_common(Universe::intArrayKlassObj(), start);
356 DEBUG_ONLY(zap_filler_array(start, words, zap);)
357 }
359 void
360 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
361 {
362 assert(words <= filler_array_max_size(), "too big for a single object");
364 if (words >= filler_array_min_size()) {
365 fill_with_array(start, words, zap);
366 } else if (words > 0) {
367 assert(words == min_fill_size(), "unaligned size");
368 post_allocation_setup_common(SystemDictionary::Object_klass(), start);
369 }
370 }
372 void CollectedHeap::fill_with_object(HeapWord* start, size_t words, bool zap)
373 {
374 DEBUG_ONLY(fill_args_check(start, words);)
375 HandleMark hm; // Free handles before leaving.
376 fill_with_object_impl(start, words, zap);
377 }
379 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words, bool zap)
380 {
381 DEBUG_ONLY(fill_args_check(start, words);)
382 HandleMark hm; // Free handles before leaving.
384 #ifdef _LP64
385 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
386 // First fill with arrays, ensuring that any remaining space is big enough to
387 // fill. The remainder is filled with a single object.
388 const size_t min = min_fill_size();
389 const size_t max = filler_array_max_size();
390 while (words > max) {
391 const size_t cur = words - max >= min ? max : max - min;
392 fill_with_array(start, cur, zap);
393 start += cur;
394 words -= cur;
395 }
396 #endif
398 fill_with_object_impl(start, words, zap);
399 }
401 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
402 guarantee(false, "thread-local allocation buffers not supported");
403 return NULL;
404 }
406 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
407 // The second disjunct in the assertion below makes a concession
408 // for the start-up verification done while the VM is being
409 // created. Callers be careful that you know that mutators
410 // aren't going to interfere -- for instance, this is permissible
411 // if we are still single-threaded and have either not yet
412 // started allocating (nothing much to verify) or we have
413 // started allocating but are now a full-fledged JavaThread
414 // (and have thus made our TLAB's) available for filling.
415 assert(SafepointSynchronize::is_at_safepoint() ||
416 !is_init_completed(),
417 "Should only be called at a safepoint or at start-up"
418 " otherwise concurrent mutator activity may make heap "
419 " unparsable again");
420 const bool use_tlab = UseTLAB;
421 const bool deferred = _defer_initial_card_mark;
422 // The main thread starts allocating via a TLAB even before it
423 // has added itself to the threads list at vm boot-up.
424 assert(!use_tlab || Threads::first() != NULL,
425 "Attempt to fill tlabs before main thread has been added"
426 " to threads list is doomed to failure!");
427 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
428 if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
429 #ifdef COMPILER2
430 // The deferred store barriers must all have been flushed to the
431 // card-table (or other remembered set structure) before GC starts
432 // processing the card-table (or other remembered set).
433 if (deferred) flush_deferred_store_barrier(thread);
434 #else
435 assert(!deferred, "Should be false");
436 assert(thread->deferred_card_mark().is_empty(), "Should be empty");
437 #endif
438 }
439 }
441 void CollectedHeap::accumulate_statistics_all_tlabs() {
442 if (UseTLAB) {
443 assert(SafepointSynchronize::is_at_safepoint() ||
444 !is_init_completed(),
445 "should only accumulate statistics on tlabs at safepoint");
447 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
448 }
449 }
451 void CollectedHeap::resize_all_tlabs() {
452 if (UseTLAB) {
453 assert(SafepointSynchronize::is_at_safepoint() ||
454 !is_init_completed(),
455 "should only resize tlabs at safepoint");
457 ThreadLocalAllocBuffer::resize_all_tlabs();
458 }
459 }
461 void CollectedHeap::pre_full_gc_dump() {
462 if (HeapDumpBeforeFullGC) {
463 TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
464 // We are doing a "major" collection and a heap dump before
465 // major collection has been requested.
466 HeapDumper::dump_heap();
467 }
468 if (PrintClassHistogramBeforeFullGC) {
469 TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
470 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
471 inspector.doit();
472 }
473 }
475 void CollectedHeap::post_full_gc_dump() {
476 if (HeapDumpAfterFullGC) {
477 TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
478 HeapDumper::dump_heap();
479 }
480 if (PrintClassHistogramAfterFullGC) {
481 TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
482 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
483 inspector.doit();
484 }
485 }
487 oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle real_klass, TRAPS) {
488 debug_only(check_for_valid_allocation_state());
489 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
490 assert(size >= 0, "int won't convert to size_t");
491 HeapWord* obj;
492 assert(ScavengeRootsInCode > 0, "must be");
493 obj = common_mem_allocate_init(size, CHECK_NULL);
494 post_allocation_setup_common(klass, obj);
495 assert(Universe::is_bootstrapping() ||
496 !((oop)obj)->is_array(), "must not be an array");
497 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
498 oop mirror = (oop)obj;
500 java_lang_Class::set_oop_size(mirror, size);
502 // Setup indirections
503 if (!real_klass.is_null()) {
504 java_lang_Class::set_klass(mirror, real_klass());
505 real_klass->set_java_mirror(mirror);
506 }
508 InstanceMirrorKlass* mk = InstanceMirrorKlass::cast(mirror->klass());
509 assert(size == mk->instance_size(real_klass), "should have been set");
511 // notify jvmti and dtrace
512 post_allocation_notify(klass, (oop)obj);
514 return mirror;
515 }
517 /////////////// Unit tests ///////////////
519 #ifndef PRODUCT
520 void CollectedHeap::test_is_in() {
521 CollectedHeap* heap = Universe::heap();
523 uintptr_t epsilon = (uintptr_t) MinObjAlignment;
524 uintptr_t heap_start = (uintptr_t) heap->_reserved.start();
525 uintptr_t heap_end = (uintptr_t) heap->_reserved.end();
527 // Test that NULL is not in the heap.
528 assert(!heap->is_in(NULL), "NULL is unexpectedly in the heap");
530 // Test that a pointer to before the heap start is reported as outside the heap.
531 assert(heap_start >= ((uintptr_t)NULL + epsilon), "sanity");
532 void* before_heap = (void*)(heap_start - epsilon);
533 assert(!heap->is_in(before_heap),
534 err_msg("before_heap: " PTR_FORMAT " is unexpectedly in the heap", before_heap));
536 // Test that a pointer to after the heap end is reported as outside the heap.
537 assert(heap_end <= ((uintptr_t)-1 - epsilon), "sanity");
538 void* after_heap = (void*)(heap_end + epsilon);
539 assert(!heap->is_in(after_heap),
540 err_msg("after_heap: " PTR_FORMAT " is unexpectedly in the heap", after_heap));
541 }
542 #endif