Wed, 06 Jan 2010 14:22:39 -0800
6914300: ciEnv should export all well known classes
Reviewed-by: kvn, twisti
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_collectedHeap.cpp.incl"
29 #ifdef ASSERT
30 int CollectedHeap::_fire_out_of_memory_count = 0;
31 #endif
33 size_t CollectedHeap::_filler_array_max_size = 0;
35 // Memory state functions.
37 CollectedHeap::CollectedHeap()
38 {
39 const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
40 const size_t elements_per_word = HeapWordSize / sizeof(jint);
41 _filler_array_max_size = align_object_size(filler_array_hdr_size() +
42 max_len * elements_per_word);
44 _barrier_set = NULL;
45 _is_gc_active = false;
46 _total_collections = _total_full_collections = 0;
47 _gc_cause = _gc_lastcause = GCCause::_no_gc;
48 NOT_PRODUCT(_promotion_failure_alot_count = 0;)
49 NOT_PRODUCT(_promotion_failure_alot_gc_number = 0;)
51 if (UsePerfData) {
52 EXCEPTION_MARK;
54 // create the gc cause jvmstat counters
55 _perf_gc_cause = PerfDataManager::create_string_variable(SUN_GC, "cause",
56 80, GCCause::to_string(_gc_cause), CHECK);
58 _perf_gc_lastcause =
59 PerfDataManager::create_string_variable(SUN_GC, "lastCause",
60 80, GCCause::to_string(_gc_lastcause), CHECK);
61 }
62 }
65 #ifndef PRODUCT
66 void CollectedHeap::check_for_bad_heap_word_value(HeapWord* addr, size_t size) {
67 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
68 for (size_t slot = 0; slot < size; slot += 1) {
69 assert((*(intptr_t*) (addr + slot)) != ((intptr_t) badHeapWordVal),
70 "Found badHeapWordValue in post-allocation check");
71 }
72 }
73 }
75 void CollectedHeap::check_for_non_bad_heap_word_value(HeapWord* addr, size_t size)
76 {
77 if (CheckMemoryInitialization && ZapUnusedHeapArea) {
78 for (size_t slot = 0; slot < size; slot += 1) {
79 assert((*(intptr_t*) (addr + slot)) == ((intptr_t) badHeapWordVal),
80 "Found non badHeapWordValue in pre-allocation check");
81 }
82 }
83 }
84 #endif // PRODUCT
86 #ifdef ASSERT
87 void CollectedHeap::check_for_valid_allocation_state() {
88 Thread *thread = Thread::current();
89 // How to choose between a pending exception and a potential
90 // OutOfMemoryError? Don't allow pending exceptions.
91 // This is a VM policy failure, so how do we exhaustively test it?
92 assert(!thread->has_pending_exception(),
93 "shouldn't be allocating with pending exception");
94 if (StrictSafepointChecks) {
95 assert(thread->allow_allocation(),
96 "Allocation done by thread for which allocation is blocked "
97 "by No_Allocation_Verifier!");
98 // Allocation of an oop can always invoke a safepoint,
99 // hence, the true argument
100 thread->check_for_valid_safepoint_state(true);
101 }
102 }
103 #endif
105 HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
107 // Retain tlab and allocate object in shared space if
108 // the amount free in the tlab is too large to discard.
109 if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
110 thread->tlab().record_slow_allocation(size);
111 return NULL;
112 }
114 // Discard tlab and allocate a new one.
115 // To minimize fragmentation, the last TLAB may be smaller than the rest.
116 size_t new_tlab_size = thread->tlab().compute_size(size);
118 thread->tlab().clear_before_allocation();
120 if (new_tlab_size == 0) {
121 return NULL;
122 }
124 // Allocate a new TLAB...
125 HeapWord* obj = Universe::heap()->allocate_new_tlab(new_tlab_size);
126 if (obj == NULL) {
127 return NULL;
128 }
129 if (ZeroTLAB) {
130 // ..and clear it.
131 Copy::zero_to_words(obj, new_tlab_size);
132 } else {
133 // ...and clear just the allocated object.
134 Copy::zero_to_words(obj, size);
135 }
136 thread->tlab().fill(obj, obj + size, new_tlab_size);
137 return obj;
138 }
140 void CollectedHeap::flush_deferred_store_barrier(JavaThread* thread) {
141 MemRegion deferred = thread->deferred_card_mark();
142 if (!deferred.is_empty()) {
143 {
144 // Verify that the storage points to a parsable object in heap
145 DEBUG_ONLY(oop old_obj = oop(deferred.start());)
146 assert(is_in(old_obj), "Not in allocated heap");
147 assert(!can_elide_initializing_store_barrier(old_obj),
148 "Else should have been filtered in defer_store_barrier()");
149 assert(!is_in_permanent(old_obj), "Sanity: not expected");
150 assert(old_obj->is_oop(true), "Not an oop");
151 assert(old_obj->is_parsable(), "Will not be concurrently parsable");
152 assert(deferred.word_size() == (size_t)(old_obj->size()),
153 "Mismatch: multiple objects?");
154 }
155 BarrierSet* bs = barrier_set();
156 assert(bs->has_write_region_opt(), "No write_region() on BarrierSet");
157 bs->write_region(deferred);
158 // "Clear" the deferred_card_mark field
159 thread->set_deferred_card_mark(MemRegion());
160 }
161 assert(thread->deferred_card_mark().is_empty(), "invariant");
162 }
164 // Helper for ReduceInitialCardMarks. For performance,
165 // compiled code may elide card-marks for initializing stores
166 // to a newly allocated object along the fast-path. We
167 // compensate for such elided card-marks as follows:
168 // (a) Generational, non-concurrent collectors, such as
169 // GenCollectedHeap(ParNew,DefNew,Tenured) and
170 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
171 // need the card-mark if and only if the region is
172 // in the old gen, and do not care if the card-mark
173 // succeeds or precedes the initializing stores themselves,
174 // so long as the card-mark is completed before the next
175 // scavenge. For all these cases, we can do a card mark
176 // at the point at which we do a slow path allocation
177 // in the old gen. For uniformity, however, we end
178 // up using the same scheme (see below) for all three
179 // cases (deferring the card-mark appropriately).
180 // (b) GenCollectedHeap(ConcurrentMarkSweepGeneration) requires
181 // in addition that the card-mark for an old gen allocated
182 // object strictly follow any associated initializing stores.
183 // In these cases, the memRegion remembered below is
184 // used to card-mark the entire region either just before the next
185 // slow-path allocation by this thread or just before the next scavenge or
186 // CMS-associated safepoint, whichever of these events happens first.
187 // (The implicit assumption is that the object has been fully
188 // initialized by this point, a fact that we assert when doing the
189 // card-mark.)
190 // (c) G1CollectedHeap(G1) uses two kinds of write barriers. When a
191 // G1 concurrent marking is in progress an SATB (pre-write-)barrier is
192 // is used to remember the pre-value of any store. Initializing
193 // stores will not need this barrier, so we need not worry about
194 // compensating for the missing pre-barrier here. Turning now
195 // to the post-barrier, we note that G1 needs a RS update barrier
196 // which simply enqueues a (sequence of) dirty cards which may
197 // optionally be refined by the concurrent update threads. Note
198 // that this barrier need only be applied to a non-young write,
199 // but, like in CMS, because of the presence of concurrent refinement
200 // (much like CMS' precleaning), must strictly follow the oop-store.
201 // Thus, using the same protocol for maintaining the intended
202 // invariants turns out, serendepitously, to be the same for all
203 // three collectors/heap types above.
204 //
205 // For each future collector, this should be reexamined with
206 // that specific collector in mind.
207 oop CollectedHeap::defer_store_barrier(JavaThread* thread, oop new_obj) {
208 // If a previous card-mark was deferred, flush it now.
209 flush_deferred_store_barrier(thread);
210 if (can_elide_initializing_store_barrier(new_obj)) {
211 // The deferred_card_mark region should be empty
212 // following the flush above.
213 assert(thread->deferred_card_mark().is_empty(), "Error");
214 } else {
215 // Remember info for the newly deferred store barrier
216 MemRegion deferred = MemRegion((HeapWord*)new_obj, new_obj->size());
217 assert(!deferred.is_empty(), "Error");
218 thread->set_deferred_card_mark(deferred);
219 }
220 return new_obj;
221 }
223 size_t CollectedHeap::filler_array_hdr_size() {
224 return size_t(arrayOopDesc::header_size(T_INT));
225 }
227 size_t CollectedHeap::filler_array_min_size() {
228 return align_object_size(filler_array_hdr_size());
229 }
231 size_t CollectedHeap::filler_array_max_size() {
232 return _filler_array_max_size;
233 }
235 #ifdef ASSERT
236 void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
237 {
238 assert(words >= min_fill_size(), "too small to fill");
239 assert(words % MinObjAlignment == 0, "unaligned size");
240 assert(Universe::heap()->is_in_reserved(start), "not in heap");
241 assert(Universe::heap()->is_in_reserved(start + words - 1), "not in heap");
242 }
244 void CollectedHeap::zap_filler_array(HeapWord* start, size_t words)
245 {
246 if (ZapFillerObjects) {
247 Copy::fill_to_words(start + filler_array_hdr_size(),
248 words - filler_array_hdr_size(), 0XDEAFBABE);
249 }
250 }
251 #endif // ASSERT
253 void
254 CollectedHeap::fill_with_array(HeapWord* start, size_t words)
255 {
256 assert(words >= filler_array_min_size(), "too small for an array");
257 assert(words <= filler_array_max_size(), "too big for a single object");
259 const size_t payload_size = words - filler_array_hdr_size();
260 const size_t len = payload_size * HeapWordSize / sizeof(jint);
262 // Set the length first for concurrent GC.
263 ((arrayOop)start)->set_length((int)len);
264 post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
265 DEBUG_ONLY(zap_filler_array(start, words);)
266 }
268 void
269 CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words)
270 {
271 assert(words <= filler_array_max_size(), "too big for a single object");
273 if (words >= filler_array_min_size()) {
274 fill_with_array(start, words);
275 } else if (words > 0) {
276 assert(words == min_fill_size(), "unaligned size");
277 post_allocation_setup_common(SystemDictionary::Object_klass(), start,
278 words);
279 }
280 }
282 void CollectedHeap::fill_with_object(HeapWord* start, size_t words)
283 {
284 DEBUG_ONLY(fill_args_check(start, words);)
285 HandleMark hm; // Free handles before leaving.
286 fill_with_object_impl(start, words);
287 }
289 void CollectedHeap::fill_with_objects(HeapWord* start, size_t words)
290 {
291 DEBUG_ONLY(fill_args_check(start, words);)
292 HandleMark hm; // Free handles before leaving.
294 #ifdef LP64
295 // A single array can fill ~8G, so multiple objects are needed only in 64-bit.
296 // First fill with arrays, ensuring that any remaining space is big enough to
297 // fill. The remainder is filled with a single object.
298 const size_t min = min_fill_size();
299 const size_t max = filler_array_max_size();
300 while (words > max) {
301 const size_t cur = words - max >= min ? max : max - min;
302 fill_with_array(start, cur);
303 start += cur;
304 words -= cur;
305 }
306 #endif
308 fill_with_object_impl(start, words);
309 }
311 HeapWord* CollectedHeap::allocate_new_tlab(size_t size) {
312 guarantee(false, "thread-local allocation buffers not supported");
313 return NULL;
314 }
316 void CollectedHeap::fill_all_tlabs(bool retire) {
317 assert(UseTLAB, "should not reach here");
318 // See note in ensure_parsability() below.
319 assert(SafepointSynchronize::is_at_safepoint() ||
320 !is_init_completed(),
321 "should only fill tlabs at safepoint");
322 // The main thread starts allocating via a TLAB even before it
323 // has added itself to the threads list at vm boot-up.
324 assert(Threads::first() != NULL,
325 "Attempt to fill tlabs before main thread has been added"
326 " to threads list is doomed to failure!");
327 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
328 thread->tlab().make_parsable(retire);
329 }
330 }
332 void CollectedHeap::ensure_parsability(bool retire_tlabs) {
333 // The second disjunct in the assertion below makes a concession
334 // for the start-up verification done while the VM is being
335 // created. Callers be careful that you know that mutators
336 // aren't going to interfere -- for instance, this is permissible
337 // if we are still single-threaded and have either not yet
338 // started allocating (nothing much to verify) or we have
339 // started allocating but are now a full-fledged JavaThread
340 // (and have thus made our TLAB's) available for filling.
341 assert(SafepointSynchronize::is_at_safepoint() ||
342 !is_init_completed(),
343 "Should only be called at a safepoint or at start-up"
344 " otherwise concurrent mutator activity may make heap "
345 " unparsable again");
346 if (UseTLAB) {
347 fill_all_tlabs(retire_tlabs);
348 }
349 }
351 void CollectedHeap::accumulate_statistics_all_tlabs() {
352 if (UseTLAB) {
353 assert(SafepointSynchronize::is_at_safepoint() ||
354 !is_init_completed(),
355 "should only accumulate statistics on tlabs at safepoint");
357 ThreadLocalAllocBuffer::accumulate_statistics_before_gc();
358 }
359 }
361 void CollectedHeap::resize_all_tlabs() {
362 if (UseTLAB) {
363 assert(SafepointSynchronize::is_at_safepoint() ||
364 !is_init_completed(),
365 "should only resize tlabs at safepoint");
367 ThreadLocalAllocBuffer::resize_all_tlabs();
368 }
369 }
371 void CollectedHeap::pre_full_gc_dump() {
372 if (HeapDumpBeforeFullGC) {
373 TraceTime tt("Heap Dump: ", PrintGCDetails, false, gclog_or_tty);
374 // We are doing a "major" collection and a heap dump before
375 // major collection has been requested.
376 HeapDumper::dump_heap();
377 }
378 if (PrintClassHistogramBeforeFullGC) {
379 TraceTime tt("Class Histogram: ", PrintGCDetails, true, gclog_or_tty);
380 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
381 inspector.doit();
382 }
383 }
385 void CollectedHeap::post_full_gc_dump() {
386 if (HeapDumpAfterFullGC) {
387 TraceTime tt("Heap Dump", PrintGCDetails, false, gclog_or_tty);
388 HeapDumper::dump_heap();
389 }
390 if (PrintClassHistogramAfterFullGC) {
391 TraceTime tt("Class Histogram", PrintGCDetails, true, gclog_or_tty);
392 VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
393 inspector.doit();
394 }
395 }