Fri, 05 Apr 2013 10:38:08 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/classLoaderData.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/dependencies.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/cardTableModRefBS.hpp"
37 #include "memory/gcLocker.inline.hpp"
38 #include "memory/genCollectedHeap.hpp"
39 #include "memory/genRemSet.hpp"
40 #include "memory/generation.hpp"
41 #include "memory/metadataFactory.hpp"
42 #include "memory/metaspaceShared.hpp"
43 #include "memory/oopFactory.hpp"
44 #include "memory/space.hpp"
45 #include "memory/universe.hpp"
46 #include "memory/universe.inline.hpp"
47 #include "oops/constantPool.hpp"
48 #include "oops/instanceClassLoaderKlass.hpp"
49 #include "oops/instanceKlass.hpp"
50 #include "oops/instanceMirrorKlass.hpp"
51 #include "oops/instanceRefKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/typeArrayKlass.hpp"
54 #include "prims/jvmtiRedefineClassesTrace.hpp"
55 #include "runtime/aprofiler.hpp"
56 #include "runtime/arguments.hpp"
57 #include "runtime/deoptimization.hpp"
58 #include "runtime/fprofiler.hpp"
59 #include "runtime/handles.inline.hpp"
60 #include "runtime/init.hpp"
61 #include "runtime/java.hpp"
62 #include "runtime/javaCalls.hpp"
63 #include "runtime/sharedRuntime.hpp"
64 #include "runtime/synchronizer.hpp"
65 #include "runtime/thread.inline.hpp"
66 #include "runtime/timer.hpp"
67 #include "runtime/vm_operations.hpp"
68 #include "services/memoryService.hpp"
69 #include "utilities/copy.hpp"
70 #include "utilities/events.hpp"
71 #include "utilities/hashtable.inline.hpp"
72 #include "utilities/preserveException.hpp"
73 #include "utilities/macros.hpp"
74 #if INCLUDE_ALL_GCS
75 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
76 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
77 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
78 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
79 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
80 #endif // INCLUDE_ALL_GCS
82 // Known objects
83 Klass* Universe::_boolArrayKlassObj = NULL;
84 Klass* Universe::_byteArrayKlassObj = NULL;
85 Klass* Universe::_charArrayKlassObj = NULL;
86 Klass* Universe::_intArrayKlassObj = NULL;
87 Klass* Universe::_shortArrayKlassObj = NULL;
88 Klass* Universe::_longArrayKlassObj = NULL;
89 Klass* Universe::_singleArrayKlassObj = NULL;
90 Klass* Universe::_doubleArrayKlassObj = NULL;
91 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
92 Klass* Universe::_objectArrayKlassObj = NULL;
93 oop Universe::_int_mirror = NULL;
94 oop Universe::_float_mirror = NULL;
95 oop Universe::_double_mirror = NULL;
96 oop Universe::_byte_mirror = NULL;
97 oop Universe::_bool_mirror = NULL;
98 oop Universe::_char_mirror = NULL;
99 oop Universe::_long_mirror = NULL;
100 oop Universe::_short_mirror = NULL;
101 oop Universe::_void_mirror = NULL;
102 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
103 oop Universe::_main_thread_group = NULL;
104 oop Universe::_system_thread_group = NULL;
105 objArrayOop Universe::_the_empty_class_klass_array = NULL;
106 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
107 oop Universe::_the_null_string = NULL;
108 oop Universe::_the_min_jint_string = NULL;
109 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
110 LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
111 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
112 oop Universe::_out_of_memory_error_java_heap = NULL;
113 oop Universe::_out_of_memory_error_perm_gen = NULL;
114 oop Universe::_out_of_memory_error_array_size = NULL;
115 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
116 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
117 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
118 bool Universe::_verify_in_progress = false;
119 oop Universe::_null_ptr_exception_instance = NULL;
120 oop Universe::_arithmetic_exception_instance = NULL;
121 oop Universe::_virtual_machine_error_instance = NULL;
122 oop Universe::_vm_exception = NULL;
123 Array<int>* Universe::_the_empty_int_array = NULL;
124 Array<u2>* Universe::_the_empty_short_array = NULL;
125 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
126 Array<Method*>* Universe::_the_empty_method_array = NULL;
128 // These variables are guarded by FullGCALot_lock.
129 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
130 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
132 // Heap
133 int Universe::_verify_count = 0;
135 int Universe::_base_vtable_size = 0;
136 bool Universe::_bootstrapping = false;
137 bool Universe::_fully_initialized = false;
139 size_t Universe::_heap_capacity_at_last_gc;
140 size_t Universe::_heap_used_at_last_gc = 0;
142 CollectedHeap* Universe::_collectedHeap = NULL;
144 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
145 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
146 address Universe::_narrow_ptrs_base;
148 size_t Universe::_class_metaspace_size;
150 void Universe::basic_type_classes_do(void f(Klass*)) {
151 f(boolArrayKlassObj());
152 f(byteArrayKlassObj());
153 f(charArrayKlassObj());
154 f(intArrayKlassObj());
155 f(shortArrayKlassObj());
156 f(longArrayKlassObj());
157 f(singleArrayKlassObj());
158 f(doubleArrayKlassObj());
159 }
161 void Universe::oops_do(OopClosure* f, bool do_all) {
163 f->do_oop((oop*) &_int_mirror);
164 f->do_oop((oop*) &_float_mirror);
165 f->do_oop((oop*) &_double_mirror);
166 f->do_oop((oop*) &_byte_mirror);
167 f->do_oop((oop*) &_bool_mirror);
168 f->do_oop((oop*) &_char_mirror);
169 f->do_oop((oop*) &_long_mirror);
170 f->do_oop((oop*) &_short_mirror);
171 f->do_oop((oop*) &_void_mirror);
173 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
174 f->do_oop((oop*) &_mirrors[i]);
175 }
176 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
178 f->do_oop((oop*)&_the_empty_class_klass_array);
179 f->do_oop((oop*)&_the_null_string);
180 f->do_oop((oop*)&_the_min_jint_string);
181 f->do_oop((oop*)&_out_of_memory_error_java_heap);
182 f->do_oop((oop*)&_out_of_memory_error_perm_gen);
183 f->do_oop((oop*)&_out_of_memory_error_array_size);
184 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
185 f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
186 f->do_oop((oop*)&_null_ptr_exception_instance);
187 f->do_oop((oop*)&_arithmetic_exception_instance);
188 f->do_oop((oop*)&_virtual_machine_error_instance);
189 f->do_oop((oop*)&_main_thread_group);
190 f->do_oop((oop*)&_system_thread_group);
191 f->do_oop((oop*)&_vm_exception);
192 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
193 }
195 // Serialize metadata in and out of CDS archive, not oops.
196 void Universe::serialize(SerializeClosure* f, bool do_all) {
198 f->do_ptr((void**)&_boolArrayKlassObj);
199 f->do_ptr((void**)&_byteArrayKlassObj);
200 f->do_ptr((void**)&_charArrayKlassObj);
201 f->do_ptr((void**)&_intArrayKlassObj);
202 f->do_ptr((void**)&_shortArrayKlassObj);
203 f->do_ptr((void**)&_longArrayKlassObj);
204 f->do_ptr((void**)&_singleArrayKlassObj);
205 f->do_ptr((void**)&_doubleArrayKlassObj);
206 f->do_ptr((void**)&_objectArrayKlassObj);
208 {
209 for (int i = 0; i < T_VOID+1; i++) {
210 if (_typeArrayKlassObjs[i] != NULL) {
211 assert(i >= T_BOOLEAN, "checking");
212 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
213 } else if (do_all) {
214 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
215 }
216 }
217 }
219 f->do_ptr((void**)&_the_array_interfaces_array);
220 f->do_ptr((void**)&_the_empty_int_array);
221 f->do_ptr((void**)&_the_empty_short_array);
222 f->do_ptr((void**)&_the_empty_method_array);
223 f->do_ptr((void**)&_the_empty_klass_array);
224 _finalizer_register_cache->serialize(f);
225 _loader_addClass_cache->serialize(f);
226 _reflect_invoke_cache->serialize(f);
227 }
229 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
230 if (size < alignment || size % alignment != 0) {
231 ResourceMark rm;
232 stringStream st;
233 st.print("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment);
234 char* error = st.as_string();
235 vm_exit_during_initialization(error);
236 }
237 }
239 void initialize_basic_type_klass(Klass* k, TRAPS) {
240 Klass* ok = SystemDictionary::Object_klass();
241 if (UseSharedSpaces) {
242 assert(k->super() == ok, "u3");
243 k->restore_unshareable_info(CHECK);
244 } else {
245 k->initialize_supers(ok, CHECK);
246 }
247 k->append_to_sibling_list();
248 }
250 void Universe::genesis(TRAPS) {
251 ResourceMark rm;
253 { FlagSetting fs(_bootstrapping, true);
255 { MutexLocker mc(Compile_lock);
257 // determine base vtable size; without that we cannot create the array klasses
258 compute_base_vtable_size();
260 if (!UseSharedSpaces) {
261 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
262 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK);
263 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK);
264 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK);
265 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK);
266 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK);
267 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK);
268 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK);
270 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
271 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj;
272 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj;
273 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj;
274 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj;
275 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj;
276 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
277 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
279 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
281 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
282 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
283 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
284 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
285 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
286 }
287 }
289 vmSymbols::initialize(CHECK);
291 SystemDictionary::initialize(CHECK);
293 Klass* ok = SystemDictionary::Object_klass();
295 _the_null_string = StringTable::intern("null", CHECK);
296 _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
298 if (UseSharedSpaces) {
299 // Verify shared interfaces array.
300 assert(_the_array_interfaces_array->at(0) ==
301 SystemDictionary::Cloneable_klass(), "u3");
302 assert(_the_array_interfaces_array->at(1) ==
303 SystemDictionary::Serializable_klass(), "u3");
304 } else {
305 // Set up shared interfaces array. (Do this before supers are set up.)
306 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
307 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
308 }
310 initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
311 initialize_basic_type_klass(charArrayKlassObj(), CHECK);
312 initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
313 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
314 initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
315 initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
316 initialize_basic_type_klass(intArrayKlassObj(), CHECK);
317 initialize_basic_type_klass(longArrayKlassObj(), CHECK);
318 } // end of core bootstrapping
320 // Maybe this could be lifted up now that object array can be initialized
321 // during the bootstrapping.
323 // OLD
324 // Initialize _objectArrayKlass after core bootstraping to make
325 // sure the super class is set up properly for _objectArrayKlass.
326 // ---
327 // NEW
328 // Since some of the old system object arrays have been converted to
329 // ordinary object arrays, _objectArrayKlass will be loaded when
330 // SystemDictionary::initialize(CHECK); is run. See the extra check
331 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
332 _objectArrayKlassObj = InstanceKlass::
333 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
334 // OLD
335 // Add the class to the class hierarchy manually to make sure that
336 // its vtable is initialized after core bootstrapping is completed.
337 // ---
338 // New
339 // Have already been initialized.
340 _objectArrayKlassObj->append_to_sibling_list();
342 // Compute is_jdk version flags.
343 // Only 1.3 or later has the java.lang.Shutdown class.
344 // Only 1.4 or later has the java.lang.CharSequence interface.
345 // Only 1.5 or later has the java.lang.management.MemoryUsage class.
346 if (JDK_Version::is_partially_initialized()) {
347 uint8_t jdk_version;
348 Klass* k = SystemDictionary::resolve_or_null(
349 vmSymbols::java_lang_management_MemoryUsage(), THREAD);
350 CLEAR_PENDING_EXCEPTION; // ignore exceptions
351 if (k == NULL) {
352 k = SystemDictionary::resolve_or_null(
353 vmSymbols::java_lang_CharSequence(), THREAD);
354 CLEAR_PENDING_EXCEPTION; // ignore exceptions
355 if (k == NULL) {
356 k = SystemDictionary::resolve_or_null(
357 vmSymbols::java_lang_Shutdown(), THREAD);
358 CLEAR_PENDING_EXCEPTION; // ignore exceptions
359 if (k == NULL) {
360 jdk_version = 2;
361 } else {
362 jdk_version = 3;
363 }
364 } else {
365 jdk_version = 4;
366 }
367 } else {
368 jdk_version = 5;
369 }
370 JDK_Version::fully_initialize(jdk_version);
371 }
373 #ifdef ASSERT
374 if (FullGCALot) {
375 // Allocate an array of dummy objects.
376 // We'd like these to be at the bottom of the old generation,
377 // so that when we free one and then collect,
378 // (almost) the whole heap moves
379 // and we find out if we actually update all the oops correctly.
380 // But we can't allocate directly in the old generation,
381 // so we allocate wherever, and hope that the first collection
382 // moves these objects to the bottom of the old generation.
383 // We can allocate directly in the permanent generation, so we do.
384 int size;
385 if (UseConcMarkSweepGC) {
386 warning("Using +FullGCALot with concurrent mark sweep gc "
387 "will not force all objects to relocate");
388 size = FullGCALotDummies;
389 } else {
390 size = FullGCALotDummies * 2;
391 }
392 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
393 objArrayHandle dummy_array(THREAD, naked_array);
394 int i = 0;
395 while (i < size) {
396 // Allocate dummy in old generation
397 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
398 dummy_array->obj_at_put(i++, dummy);
399 }
400 {
401 // Only modify the global variable inside the mutex.
402 // If we had a race to here, the other dummy_array instances
403 // and their elements just get dropped on the floor, which is fine.
404 MutexLocker ml(FullGCALot_lock);
405 if (_fullgc_alot_dummy_array == NULL) {
406 _fullgc_alot_dummy_array = dummy_array();
407 }
408 }
409 assert(i == _fullgc_alot_dummy_array->length(), "just checking");
410 }
411 #endif
413 // Initialize dependency array for null class loader
414 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
416 }
418 // CDS support for patching vtables in metadata in the shared archive.
419 // All types inherited from Metadata have vtables, but not types inherited
420 // from MetaspaceObj, because the latter does not have virtual functions.
421 // If the metadata type has a vtable, it cannot be shared in the read-only
422 // section of the CDS archive, because the vtable pointer is patched.
423 static inline void add_vtable(void** list, int* n, void* o, int count) {
424 guarantee((*n) < count, "vtable list too small");
425 void* vtable = dereference_vptr(o);
426 assert(*(void**)(vtable) != NULL, "invalid vtable");
427 list[(*n)++] = vtable;
428 }
430 void Universe::init_self_patching_vtbl_list(void** list, int count) {
431 int n = 0;
432 { InstanceKlass o; add_vtable(list, &n, &o, count); }
433 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
434 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); }
435 { InstanceRefKlass o; add_vtable(list, &n, &o, count); }
436 { TypeArrayKlass o; add_vtable(list, &n, &o, count); }
437 { ObjArrayKlass o; add_vtable(list, &n, &o, count); }
438 { Method o; add_vtable(list, &n, &o, count); }
439 { ConstantPool o; add_vtable(list, &n, &o, count); }
440 }
442 void Universe::initialize_basic_type_mirrors(TRAPS) {
443 assert(_int_mirror==NULL, "basic type mirrors already initialized");
444 _int_mirror =
445 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
446 _float_mirror =
447 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
448 _double_mirror =
449 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
450 _byte_mirror =
451 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
452 _bool_mirror =
453 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
454 _char_mirror =
455 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
456 _long_mirror =
457 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
458 _short_mirror =
459 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
460 _void_mirror =
461 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
463 _mirrors[T_INT] = _int_mirror;
464 _mirrors[T_FLOAT] = _float_mirror;
465 _mirrors[T_DOUBLE] = _double_mirror;
466 _mirrors[T_BYTE] = _byte_mirror;
467 _mirrors[T_BOOLEAN] = _bool_mirror;
468 _mirrors[T_CHAR] = _char_mirror;
469 _mirrors[T_LONG] = _long_mirror;
470 _mirrors[T_SHORT] = _short_mirror;
471 _mirrors[T_VOID] = _void_mirror;
472 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror();
473 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror();
474 }
476 void Universe::fixup_mirrors(TRAPS) {
477 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
478 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
479 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
480 // that the number of objects allocated at this point is very small.
481 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
482 HandleMark hm(THREAD);
483 // Cache the start of the static fields
484 InstanceMirrorKlass::init_offset_of_static_fields();
486 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
487 int list_length = list->length();
488 for (int i = 0; i < list_length; i++) {
489 Klass* k = list->at(i);
490 assert(k->is_klass(), "List should only hold classes");
491 EXCEPTION_MARK;
492 KlassHandle kh(THREAD, k);
493 java_lang_Class::fixup_mirror(kh, CATCH);
494 }
495 delete java_lang_Class::fixup_mirror_list();
496 java_lang_Class::set_fixup_mirror_list(NULL);
497 }
499 static bool has_run_finalizers_on_exit = false;
501 void Universe::run_finalizers_on_exit() {
502 if (has_run_finalizers_on_exit) return;
503 has_run_finalizers_on_exit = true;
505 // Called on VM exit. This ought to be run in a separate thread.
506 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
507 {
508 PRESERVE_EXCEPTION_MARK;
509 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
510 JavaValue result(T_VOID);
511 JavaCalls::call_static(
512 &result,
513 finalizer_klass,
514 vmSymbols::run_finalizers_on_exit_name(),
515 vmSymbols::void_method_signature(),
516 THREAD
517 );
518 // Ignore any pending exceptions
519 CLEAR_PENDING_EXCEPTION;
520 }
521 }
524 // initialize_vtable could cause gc if
525 // 1) we specified true to initialize_vtable and
526 // 2) this ran after gc was enabled
527 // In case those ever change we use handles for oops
528 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
529 // init vtable of k and all subclasses
530 Klass* ko = k_h();
531 klassVtable* vt = ko->vtable();
532 if (vt) vt->initialize_vtable(false, CHECK);
533 if (ko->oop_is_instance()) {
534 InstanceKlass* ik = (InstanceKlass*)ko;
535 for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
536 reinitialize_vtable_of(s_h, CHECK);
537 }
538 }
539 }
542 void initialize_itable_for_klass(Klass* k, TRAPS) {
543 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
544 }
547 void Universe::reinitialize_itables(TRAPS) {
548 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
550 }
553 bool Universe::on_page_boundary(void* addr) {
554 return ((uintptr_t) addr) % os::vm_page_size() == 0;
555 }
558 bool Universe::should_fill_in_stack_trace(Handle throwable) {
559 // never attempt to fill in the stack trace of preallocated errors that do not have
560 // backtrace. These errors are kept alive forever and may be "re-used" when all
561 // preallocated errors with backtrace have been consumed. Also need to avoid
562 // a potential loop which could happen if an out of memory occurs when attempting
563 // to allocate the backtrace.
564 return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
565 (throwable() != Universe::_out_of_memory_error_perm_gen) &&
566 (throwable() != Universe::_out_of_memory_error_array_size) &&
567 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
568 }
571 oop Universe::gen_out_of_memory_error(oop default_err) {
572 // generate an out of memory error:
573 // - if there is a preallocated error with backtrace available then return it wth
574 // a filled in stack trace.
575 // - if there are no preallocated errors with backtrace available then return
576 // an error without backtrace.
577 int next;
578 if (_preallocated_out_of_memory_error_avail_count > 0) {
579 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
580 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
581 } else {
582 next = -1;
583 }
584 if (next < 0) {
585 // all preallocated errors have been used.
586 // return default
587 return default_err;
588 } else {
589 // get the error object at the slot and set set it to NULL so that the
590 // array isn't keeping it alive anymore.
591 oop exc = preallocated_out_of_memory_errors()->obj_at(next);
592 assert(exc != NULL, "slot has been used already");
593 preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
595 // use the message from the default error
596 oop msg = java_lang_Throwable::message(default_err);
597 assert(msg != NULL, "no message");
598 java_lang_Throwable::set_message(exc, msg);
600 // populate the stack trace and return it.
601 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
602 return exc;
603 }
604 }
606 static intptr_t non_oop_bits = 0;
608 void* Universe::non_oop_word() {
609 // Neither the high bits nor the low bits of this value is allowed
610 // to look like (respectively) the high or low bits of a real oop.
611 //
612 // High and low are CPU-specific notions, but low always includes
613 // the low-order bit. Since oops are always aligned at least mod 4,
614 // setting the low-order bit will ensure that the low half of the
615 // word will never look like that of a real oop.
616 //
617 // Using the OS-supplied non-memory-address word (usually 0 or -1)
618 // will take care of the high bits, however many there are.
620 if (non_oop_bits == 0) {
621 non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
622 }
624 return (void*)non_oop_bits;
625 }
627 jint universe_init() {
628 assert(!Universe::_fully_initialized, "called after initialize_vtables");
629 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
630 "LogHeapWordSize is incorrect.");
631 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
632 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
633 "oop size is not not a multiple of HeapWord size");
634 TraceTime timer("Genesis", TraceStartupTime);
635 GC_locker::lock(); // do not allow gc during bootstrapping
636 JavaClasses::compute_hard_coded_offsets();
638 jint status = Universe::initialize_heap();
639 if (status != JNI_OK) {
640 return status;
641 }
643 // Create memory for metadata. Must be after initializing heap for
644 // DumpSharedSpaces.
645 ClassLoaderData::init_null_class_loader_data();
647 // We have a heap so create the Method* caches before
648 // Metaspace::initialize_shared_spaces() tries to populate them.
649 Universe::_finalizer_register_cache = new LatestMethodOopCache();
650 Universe::_loader_addClass_cache = new LatestMethodOopCache();
651 Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
653 if (UseSharedSpaces) {
654 // Read the data structures supporting the shared spaces (shared
655 // system dictionary, symbol table, etc.). After that, access to
656 // the file (other than the mapped regions) is no longer needed, and
657 // the file is closed. Closing the file does not affect the
658 // currently mapped regions.
659 MetaspaceShared::initialize_shared_spaces();
660 StringTable::create_table();
661 } else {
662 SymbolTable::create_table();
663 StringTable::create_table();
664 ClassLoader::create_package_info_table();
665 }
667 return JNI_OK;
668 }
670 // Choose the heap base address and oop encoding mode
671 // when compressed oops are used:
672 // Unscaled - Use 32-bits oops without encoding when
673 // NarrowOopHeapBaseMin + heap_size < 4Gb
674 // ZeroBased - Use zero based compressed oops with encoding when
675 // NarrowOopHeapBaseMin + heap_size < 32Gb
676 // HeapBased - Use compressed oops with heap base + encoding.
678 // 4Gb
679 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
680 // 32Gb
681 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
683 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
684 size_t base = 0;
685 #ifdef _LP64
686 if (UseCompressedOops) {
687 assert(mode == UnscaledNarrowOop ||
688 mode == ZeroBasedNarrowOop ||
689 mode == HeapBasedNarrowOop, "mode is invalid");
690 const size_t total_size = heap_size + HeapBaseMinAddress;
691 // Return specified base for the first request.
692 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
693 base = HeapBaseMinAddress;
695 // If the total size and the metaspace size are small enough to allow
696 // UnscaledNarrowOop then just use UnscaledNarrowOop.
697 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop) &&
698 (!UseCompressedKlassPointers ||
699 (((OopEncodingHeapMax - heap_size) + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax))) {
700 // We don't need to check the metaspace size here because it is always smaller
701 // than total_size.
702 if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
703 (Universe::narrow_oop_shift() == 0)) {
704 // Use 32-bits oops without encoding and
705 // place heap's top on the 4Gb boundary
706 base = (NarrowOopHeapMax - heap_size);
707 } else {
708 // Can't reserve with NarrowOopShift == 0
709 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
710 if (mode == UnscaledNarrowOop ||
711 mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
712 // Use zero based compressed oops with encoding and
713 // place heap's top on the 32Gb boundary in case
714 // total_size > 4Gb or failed to reserve below 4Gb.
715 base = (OopEncodingHeapMax - heap_size);
716 }
717 }
719 // See if ZeroBaseNarrowOop encoding will work for a heap based at
720 // (KlassEncodingMetaspaceMax - class_metaspace_size()).
721 } else if (UseCompressedKlassPointers && (mode != HeapBasedNarrowOop) &&
722 (Universe::class_metaspace_size() + HeapBaseMinAddress <= KlassEncodingMetaspaceMax) &&
723 (KlassEncodingMetaspaceMax + heap_size - Universe::class_metaspace_size() <= OopEncodingHeapMax)) {
724 base = (KlassEncodingMetaspaceMax - Universe::class_metaspace_size());
725 } else {
726 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
727 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
728 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
729 }
731 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
732 // used in ReservedHeapSpace() constructors.
733 // The final values will be set in initialize_heap() below.
734 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax) &&
735 (!UseCompressedKlassPointers || (base + Universe::class_metaspace_size()) <= KlassEncodingMetaspaceMax)) {
736 // Use zero based compressed oops
737 Universe::set_narrow_oop_base(NULL);
738 // Don't need guard page for implicit checks in indexed
739 // addressing mode with zero based Compressed Oops.
740 Universe::set_narrow_oop_use_implicit_null_checks(true);
741 } else {
742 // Set to a non-NULL value so the ReservedSpace ctor computes
743 // the correct no-access prefix.
744 // The final value will be set in initialize_heap() below.
745 Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
746 #ifdef _WIN64
747 if (UseLargePages) {
748 // Cannot allocate guard pages for implicit checks in indexed
749 // addressing mode when large pages are specified on windows.
750 Universe::set_narrow_oop_use_implicit_null_checks(false);
751 }
752 #endif // _WIN64
753 }
754 }
755 #endif
756 return (char*)base; // also return NULL (don't care) for 32-bit VM
757 }
759 jint Universe::initialize_heap() {
761 if (UseParallelGC) {
762 #if INCLUDE_ALL_GCS
763 Universe::_collectedHeap = new ParallelScavengeHeap();
764 #else // INCLUDE_ALL_GCS
765 fatal("UseParallelGC not supported in this VM.");
766 #endif // INCLUDE_ALL_GCS
768 } else if (UseG1GC) {
769 #if INCLUDE_ALL_GCS
770 G1CollectorPolicy* g1p = new G1CollectorPolicy();
771 G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
772 Universe::_collectedHeap = g1h;
773 #else // INCLUDE_ALL_GCS
774 fatal("UseG1GC not supported in java kernel vm.");
775 #endif // INCLUDE_ALL_GCS
777 } else {
778 GenCollectorPolicy *gc_policy;
780 if (UseSerialGC) {
781 gc_policy = new MarkSweepPolicy();
782 } else if (UseConcMarkSweepGC) {
783 #if INCLUDE_ALL_GCS
784 if (UseAdaptiveSizePolicy) {
785 gc_policy = new ASConcurrentMarkSweepPolicy();
786 } else {
787 gc_policy = new ConcurrentMarkSweepPolicy();
788 }
789 #else // INCLUDE_ALL_GCS
790 fatal("UseConcMarkSweepGC not supported in this VM.");
791 #endif // INCLUDE_ALL_GCS
792 } else { // default old generation
793 gc_policy = new MarkSweepPolicy();
794 }
796 Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
797 }
799 jint status = Universe::heap()->initialize();
800 if (status != JNI_OK) {
801 return status;
802 }
804 #ifdef _LP64
805 if (UseCompressedOops) {
806 // Subtract a page because something can get allocated at heap base.
807 // This also makes implicit null checking work, because the
808 // memory+1 page below heap_base needs to cause a signal.
809 // See needs_explicit_null_check.
810 // Only set the heap base for compressed oops because it indicates
811 // compressed oops for pstack code.
812 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
813 if (verbose) {
814 tty->cr();
815 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
816 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
817 }
818 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) ||
819 (UseCompressedKlassPointers &&
820 ((uint64_t)Universe::heap()->base() + Universe::class_metaspace_size() > KlassEncodingMetaspaceMax))) {
821 // Can't reserve heap below 32Gb.
822 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
823 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
824 if (verbose) {
825 tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
826 }
827 } else {
828 Universe::set_narrow_oop_base(0);
829 if (verbose) {
830 tty->print(", zero based Compressed Oops");
831 }
832 #ifdef _WIN64
833 if (!Universe::narrow_oop_use_implicit_null_checks()) {
834 // Don't need guard page for implicit checks in indexed addressing
835 // mode with zero based Compressed Oops.
836 Universe::set_narrow_oop_use_implicit_null_checks(true);
837 }
838 #endif // _WIN64
839 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
840 // Can't reserve heap below 4Gb.
841 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
842 } else {
843 Universe::set_narrow_oop_shift(0);
844 if (verbose) {
845 tty->print(", 32-bits Oops");
846 }
847 }
848 }
849 if (verbose) {
850 tty->cr();
851 tty->cr();
852 }
853 if (UseCompressedKlassPointers) {
854 Universe::set_narrow_klass_base(Universe::narrow_oop_base());
855 Universe::set_narrow_klass_shift(MIN2(Universe::narrow_oop_shift(), LogKlassAlignmentInBytes));
856 }
857 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
858 }
859 // Universe::narrow_oop_base() is one page below the metaspace
860 // base. The actual metaspace base depends on alignment constraints
861 // so we don't know its exact location here.
862 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() - os::vm_page_size() - ClassMetaspaceSize) ||
863 Universe::narrow_oop_base() == NULL, "invalid value");
864 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
865 Universe::narrow_oop_shift() == 0, "invalid value");
866 #endif
868 // We will never reach the CATCH below since Exceptions::_throw will cause
869 // the VM to exit if an exception is thrown during initialization
871 if (UseTLAB) {
872 assert(Universe::heap()->supports_tlab_allocation(),
873 "Should support thread-local allocation buffers");
874 ThreadLocalAllocBuffer::startup_initialization();
875 }
876 return JNI_OK;
877 }
880 // Reserve the Java heap, which is now the same for all GCs.
881 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
882 // Add in the class metaspace area so the classes in the headers can
883 // be compressed the same as instances.
884 // Need to round class space size up because it's below the heap and
885 // the actual alignment depends on its size.
886 Universe::set_class_metaspace_size(align_size_up(ClassMetaspaceSize, alignment));
887 size_t total_reserved = align_size_up(heap_size + Universe::class_metaspace_size(), alignment);
888 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
889 "heap size is too big for compressed oops");
890 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
892 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
894 if (UseCompressedOops) {
895 if (addr != NULL && !total_rs.is_reserved()) {
896 // Failed to reserve at specified address - the requested memory
897 // region is taken already, for example, by 'java' launcher.
898 // Try again to reserver heap higher.
899 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
901 ReservedHeapSpace total_rs0(total_reserved, alignment,
902 UseLargePages, addr);
904 if (addr != NULL && !total_rs0.is_reserved()) {
905 // Failed to reserve at specified address again - give up.
906 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
907 assert(addr == NULL, "");
909 ReservedHeapSpace total_rs1(total_reserved, alignment,
910 UseLargePages, addr);
911 total_rs = total_rs1;
912 } else {
913 total_rs = total_rs0;
914 }
915 }
916 }
918 if (!total_rs.is_reserved()) {
919 vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
920 return total_rs;
921 }
923 // Split the reserved space into main Java heap and a space for
924 // classes so that they can be compressed using the same algorithm
925 // as compressed oops. If compress oops and compress klass ptrs are
926 // used we need the meta space first: if the alignment used for
927 // compressed oops is greater than the one used for compressed klass
928 // ptrs, a metadata space on top of the heap could become
929 // unreachable.
930 ReservedSpace class_rs = total_rs.first_part(Universe::class_metaspace_size());
931 ReservedSpace heap_rs = total_rs.last_part(Universe::class_metaspace_size(), alignment);
932 Metaspace::initialize_class_space(class_rs);
934 if (UseCompressedOops) {
935 // Universe::initialize_heap() will reset this to NULL if unscaled
936 // or zero-based narrow oops are actually used.
937 address base = (address)(total_rs.base() - os::vm_page_size());
938 Universe::set_narrow_oop_base(base);
939 }
940 return heap_rs;
941 }
944 // It's the caller's repsonsibility to ensure glitch-freedom
945 // (if required).
946 void Universe::update_heap_info_at_gc() {
947 _heap_capacity_at_last_gc = heap()->capacity();
948 _heap_used_at_last_gc = heap()->used();
949 }
953 void universe2_init() {
954 EXCEPTION_MARK;
955 Universe::genesis(CATCH);
956 }
959 // This function is defined in JVM.cpp
960 extern void initialize_converter_functions();
962 bool universe_post_init() {
963 assert(!is_init_completed(), "Error: initialization not yet completed!");
964 Universe::_fully_initialized = true;
965 EXCEPTION_MARK;
966 { ResourceMark rm;
967 Interpreter::initialize(); // needed for interpreter entry points
968 if (!UseSharedSpaces) {
969 HandleMark hm(THREAD);
970 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
971 Universe::reinitialize_vtable_of(ok_h, CHECK_false);
972 Universe::reinitialize_itables(CHECK_false);
973 }
974 }
976 HandleMark hm(THREAD);
977 Klass* k;
978 instanceKlassHandle k_h;
979 // Setup preallocated empty java.lang.Class array
980 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
982 // Setup preallocated OutOfMemoryError errors
983 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
984 k_h = instanceKlassHandle(THREAD, k);
985 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
986 Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
987 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
988 Universe::_out_of_memory_error_gc_overhead_limit =
989 k_h->allocate_instance(CHECK_false);
991 // Setup preallocated NullPointerException
992 // (this is currently used for a cheap & dirty solution in compiler exception handling)
993 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
994 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
995 // Setup preallocated ArithmeticException
996 // (this is currently used for a cheap & dirty solution in compiler exception handling)
997 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
998 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
999 // Virtual Machine Error for when we get into a situation we can't resolve
1000 k = SystemDictionary::resolve_or_fail(
1001 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1002 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1003 if (!linked) {
1004 tty->print_cr("Unable to link/verify VirtualMachineError class");
1005 return false; // initialization failed
1006 }
1007 Universe::_virtual_machine_error_instance =
1008 InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1010 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1012 if (!DumpSharedSpaces) {
1013 // These are the only Java fields that are currently set during shared space dumping.
1014 // We prefer to not handle this generally, so we always reinitialize these detail messages.
1015 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1016 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1018 msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
1019 java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
1021 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1022 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1024 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1025 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1027 msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1028 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1030 // Setup the array of errors that have preallocated backtrace
1031 k = Universe::_out_of_memory_error_java_heap->klass();
1032 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1033 k_h = instanceKlassHandle(THREAD, k);
1035 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1036 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1037 for (int i=0; i<len; i++) {
1038 oop err = k_h->allocate_instance(CHECK_false);
1039 Handle err_h = Handle(THREAD, err);
1040 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1041 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1042 }
1043 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1044 }
1047 // Setup static method for registering finalizers
1048 // The finalizer klass must be linked before looking up the method, in
1049 // case it needs to get rewritten.
1050 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1051 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1052 vmSymbols::register_method_name(),
1053 vmSymbols::register_method_signature());
1054 if (m == NULL || !m->is_static()) {
1055 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1056 "java.lang.ref.Finalizer.register", false);
1057 }
1058 Universe::_finalizer_register_cache->init(
1059 SystemDictionary::Finalizer_klass(), m, CHECK_false);
1061 // Resolve on first use and initialize class.
1062 // Note: No race-condition here, since a resolve will always return the same result
1064 // Setup method for security checks
1065 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
1066 k_h = instanceKlassHandle(THREAD, k);
1067 k_h->link_class(CHECK_false);
1068 m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
1069 if (m == NULL || m->is_static()) {
1070 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1071 "java.lang.reflect.Method.invoke", false);
1072 }
1073 Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
1075 // Setup method for registering loaded classes in class loader vector
1076 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1077 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1078 if (m == NULL || m->is_static()) {
1079 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1080 "java.lang.ClassLoader.addClass", false);
1081 }
1082 Universe::_loader_addClass_cache->init(
1083 SystemDictionary::ClassLoader_klass(), m, CHECK_false);
1085 // The folowing is initializing converter functions for serialization in
1086 // JVM.cpp. If we clean up the StrictMath code above we may want to find
1087 // a better solution for this as well.
1088 initialize_converter_functions();
1090 // This needs to be done before the first scavenge/gc, since
1091 // it's an input to soft ref clearing policy.
1092 {
1093 MutexLocker x(Heap_lock);
1094 Universe::update_heap_info_at_gc();
1095 }
1097 // ("weak") refs processing infrastructure initialization
1098 Universe::heap()->post_initialize();
1100 // Initialize performance counters for metaspaces
1101 MetaspaceCounters::initialize_performance_counters();
1103 GC_locker::unlock(); // allow gc after bootstrapping
1105 MemoryService::set_universe_heap(Universe::_collectedHeap);
1106 return true;
1107 }
1110 void Universe::compute_base_vtable_size() {
1111 _base_vtable_size = ClassLoader::compute_Object_vtable();
1112 }
1115 // %%% The Universe::flush_foo methods belong in CodeCache.
1117 // Flushes compiled methods dependent on dependee.
1118 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1119 assert_lock_strong(Compile_lock);
1121 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1123 // CodeCache can only be updated by a thread_in_VM and they will all be
1124 // stopped dring the safepoint so CodeCache will be safe to update without
1125 // holding the CodeCache_lock.
1127 KlassDepChange changes(dependee);
1129 // Compute the dependent nmethods
1130 if (CodeCache::mark_for_deoptimization(changes) > 0) {
1131 // At least one nmethod has been marked for deoptimization
1132 VM_Deoptimize op;
1133 VMThread::execute(&op);
1134 }
1135 }
1137 // Flushes compiled methods dependent on a particular CallSite
1138 // instance when its target is different than the given MethodHandle.
1139 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1140 assert_lock_strong(Compile_lock);
1142 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1144 // CodeCache can only be updated by a thread_in_VM and they will all be
1145 // stopped dring the safepoint so CodeCache will be safe to update without
1146 // holding the CodeCache_lock.
1148 CallSiteDepChange changes(call_site(), method_handle());
1150 // Compute the dependent nmethods that have a reference to a
1151 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1152 // directly instead of CodeCache::mark_for_deoptimization because we
1153 // want dependents on the call site class only not all classes in
1154 // the ContextStream.
1155 int marked = 0;
1156 {
1157 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1158 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1159 marked = call_site_klass->mark_dependent_nmethods(changes);
1160 }
1161 if (marked > 0) {
1162 // At least one nmethod has been marked for deoptimization
1163 VM_Deoptimize op;
1164 VMThread::execute(&op);
1165 }
1166 }
1168 #ifdef HOTSWAP
1169 // Flushes compiled methods dependent on dependee in the evolutionary sense
1170 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1171 // --- Compile_lock is not held. However we are at a safepoint.
1172 assert_locked_or_safepoint(Compile_lock);
1173 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1175 // CodeCache can only be updated by a thread_in_VM and they will all be
1176 // stopped dring the safepoint so CodeCache will be safe to update without
1177 // holding the CodeCache_lock.
1179 // Compute the dependent nmethods
1180 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1181 // At least one nmethod has been marked for deoptimization
1183 // All this already happens inside a VM_Operation, so we'll do all the work here.
1184 // Stuff copied from VM_Deoptimize and modified slightly.
1186 // We do not want any GCs to happen while we are in the middle of this VM operation
1187 ResourceMark rm;
1188 DeoptimizationMarker dm;
1190 // Deoptimize all activations depending on marked nmethods
1191 Deoptimization::deoptimize_dependents();
1193 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1194 CodeCache::make_marked_nmethods_not_entrant();
1195 }
1196 }
1197 #endif // HOTSWAP
1200 // Flushes compiled methods dependent on dependee
1201 void Universe::flush_dependents_on_method(methodHandle m_h) {
1202 // --- Compile_lock is not held. However we are at a safepoint.
1203 assert_locked_or_safepoint(Compile_lock);
1205 // CodeCache can only be updated by a thread_in_VM and they will all be
1206 // stopped dring the safepoint so CodeCache will be safe to update without
1207 // holding the CodeCache_lock.
1209 // Compute the dependent nmethods
1210 if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1211 // At least one nmethod has been marked for deoptimization
1213 // All this already happens inside a VM_Operation, so we'll do all the work here.
1214 // Stuff copied from VM_Deoptimize and modified slightly.
1216 // We do not want any GCs to happen while we are in the middle of this VM operation
1217 ResourceMark rm;
1218 DeoptimizationMarker dm;
1220 // Deoptimize all activations depending on marked nmethods
1221 Deoptimization::deoptimize_dependents();
1223 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1224 CodeCache::make_marked_nmethods_not_entrant();
1225 }
1226 }
1228 void Universe::print() {
1229 print_on(gclog_or_tty);
1230 }
1232 void Universe::print_on(outputStream* st, bool extended) {
1233 st->print_cr("Heap");
1234 if (!extended) {
1235 heap()->print_on(st);
1236 } else {
1237 heap()->print_extended_on(st);
1238 }
1239 }
1241 void Universe::print_heap_at_SIGBREAK() {
1242 if (PrintHeapAtSIGBREAK) {
1243 MutexLocker hl(Heap_lock);
1244 print_on(tty);
1245 tty->cr();
1246 tty->flush();
1247 }
1248 }
1250 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1251 st->print_cr("{Heap before GC invocations=%u (full %u):",
1252 heap()->total_collections(),
1253 heap()->total_full_collections());
1254 if (!PrintHeapAtGCExtended || ignore_extended) {
1255 heap()->print_on(st);
1256 } else {
1257 heap()->print_extended_on(st);
1258 }
1259 }
1261 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1262 st->print_cr("Heap after GC invocations=%u (full %u):",
1263 heap()->total_collections(),
1264 heap()->total_full_collections());
1265 if (!PrintHeapAtGCExtended || ignore_extended) {
1266 heap()->print_on(st);
1267 } else {
1268 heap()->print_extended_on(st);
1269 }
1270 st->print_cr("}");
1271 }
1273 void Universe::verify(bool silent, VerifyOption option) {
1274 // The use of _verify_in_progress is a temporary work around for
1275 // 6320749. Don't bother with a creating a class to set and clear
1276 // it since it is only used in this method and the control flow is
1277 // straight forward.
1278 _verify_in_progress = true;
1280 COMPILER2_PRESENT(
1281 assert(!DerivedPointerTable::is_active(),
1282 "DPT should not be active during verification "
1283 "(of thread stacks below)");
1284 )
1286 ResourceMark rm;
1287 HandleMark hm; // Handles created during verification can be zapped
1288 _verify_count++;
1290 if (!silent) gclog_or_tty->print("[Verifying ");
1291 if (!silent) gclog_or_tty->print("threads ");
1292 Threads::verify();
1293 heap()->verify(silent, option);
1295 if (!silent) gclog_or_tty->print("syms ");
1296 SymbolTable::verify();
1297 if (!silent) gclog_or_tty->print("strs ");
1298 StringTable::verify();
1299 {
1300 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1301 if (!silent) gclog_or_tty->print("zone ");
1302 CodeCache::verify();
1303 }
1304 if (!silent) gclog_or_tty->print("dict ");
1305 SystemDictionary::verify();
1306 #ifndef PRODUCT
1307 if (!silent) gclog_or_tty->print("cldg ");
1308 ClassLoaderDataGraph::verify();
1309 #endif
1310 if (!silent) gclog_or_tty->print("metaspace chunks ");
1311 MetaspaceAux::verify_free_chunks();
1312 if (!silent) gclog_or_tty->print("hand ");
1313 JNIHandles::verify();
1314 if (!silent) gclog_or_tty->print("C-heap ");
1315 os::check_heap();
1316 if (!silent) gclog_or_tty->print("code cache ");
1317 CodeCache::verify_oops();
1318 if (!silent) gclog_or_tty->print_cr("]");
1320 _verify_in_progress = false;
1321 }
1323 // Oop verification (see MacroAssembler::verify_oop)
1325 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
1326 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1329 #ifndef PRODUCT
1331 static void calculate_verify_data(uintptr_t verify_data[2],
1332 HeapWord* low_boundary,
1333 HeapWord* high_boundary) {
1334 assert(low_boundary < high_boundary, "bad interval");
1336 // decide which low-order bits we require to be clear:
1337 size_t alignSize = MinObjAlignmentInBytes;
1338 size_t min_object_size = CollectedHeap::min_fill_size();
1340 // make an inclusive limit:
1341 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1342 uintptr_t min = (uintptr_t)low_boundary;
1343 assert(min < max, "bad interval");
1344 uintptr_t diff = max ^ min;
1346 // throw away enough low-order bits to make the diff vanish
1347 uintptr_t mask = (uintptr_t)(-1);
1348 while ((mask & diff) != 0)
1349 mask <<= 1;
1350 uintptr_t bits = (min & mask);
1351 assert(bits == (max & mask), "correct mask");
1352 // check an intermediate value between min and max, just to make sure:
1353 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1355 // require address alignment, too:
1356 mask |= (alignSize - 1);
1358 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1359 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1360 }
1361 verify_data[0] = mask;
1362 verify_data[1] = bits;
1363 }
1365 // Oop verification (see MacroAssembler::verify_oop)
1367 uintptr_t Universe::verify_oop_mask() {
1368 MemRegion m = heap()->reserved_region();
1369 calculate_verify_data(_verify_oop_data,
1370 m.start(),
1371 m.end());
1372 return _verify_oop_data[0];
1373 }
1377 uintptr_t Universe::verify_oop_bits() {
1378 verify_oop_mask();
1379 return _verify_oop_data[1];
1380 }
1382 uintptr_t Universe::verify_mark_mask() {
1383 return markOopDesc::lock_mask_in_place;
1384 }
1386 uintptr_t Universe::verify_mark_bits() {
1387 intptr_t mask = verify_mark_mask();
1388 intptr_t bits = (intptr_t)markOopDesc::prototype();
1389 assert((bits & ~mask) == 0, "no stray header bits");
1390 return bits;
1391 }
1392 #endif // PRODUCT
1395 void Universe::compute_verify_oop_data() {
1396 verify_oop_mask();
1397 verify_oop_bits();
1398 verify_mark_mask();
1399 verify_mark_bits();
1400 }
1403 void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
1404 if (!UseSharedSpaces) {
1405 _klass = k;
1406 }
1407 #ifndef PRODUCT
1408 else {
1409 // sharing initilization should have already set up _klass
1410 assert(_klass != NULL, "just checking");
1411 }
1412 #endif
1414 _method_idnum = m->method_idnum();
1415 assert(_method_idnum >= 0, "sanity check");
1416 }
1419 ActiveMethodOopsCache::~ActiveMethodOopsCache() {
1420 if (_prev_methods != NULL) {
1421 delete _prev_methods;
1422 _prev_methods = NULL;
1423 }
1424 }
1427 void ActiveMethodOopsCache::add_previous_version(Method* const method) {
1428 assert(Thread::current()->is_VM_thread(),
1429 "only VMThread can add previous versions");
1431 // Only append the previous method if it is executing on the stack.
1432 if (method->on_stack()) {
1434 if (_prev_methods == NULL) {
1435 // This is the first previous version so make some space.
1436 // Start with 2 elements under the assumption that the class
1437 // won't be redefined much.
1438 _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
1439 }
1441 // RC_TRACE macro has an embedded ResourceMark
1442 RC_TRACE(0x00000100,
1443 ("add: %s(%s): adding prev version ref for cached method @%d",
1444 method->name()->as_C_string(), method->signature()->as_C_string(),
1445 _prev_methods->length()));
1447 _prev_methods->append(method);
1448 }
1451 // Since the caller is the VMThread and we are at a safepoint, this is a good
1452 // time to clear out unused method references.
1454 if (_prev_methods == NULL) return;
1456 for (int i = _prev_methods->length() - 1; i >= 0; i--) {
1457 Method* method = _prev_methods->at(i);
1458 assert(method != NULL, "weak method ref was unexpectedly cleared");
1460 if (!method->on_stack()) {
1461 // This method isn't running anymore so remove it
1462 _prev_methods->remove_at(i);
1463 MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
1464 } else {
1465 // RC_TRACE macro has an embedded ResourceMark
1466 RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive",
1467 method->name()->as_C_string(), method->signature()->as_C_string(), i));
1468 }
1469 }
1470 } // end add_previous_version()
1473 bool ActiveMethodOopsCache::is_same_method(Method* const method) const {
1474 InstanceKlass* ik = InstanceKlass::cast(klass());
1475 Method* check_method = ik->method_with_idnum(method_idnum());
1476 assert(check_method != NULL, "sanity check");
1477 if (check_method == method) {
1478 // done with the easy case
1479 return true;
1480 }
1482 if (_prev_methods != NULL) {
1483 // The cached method has been redefined at least once so search
1484 // the previous versions for a match.
1485 for (int i = 0; i < _prev_methods->length(); i++) {
1486 check_method = _prev_methods->at(i);
1487 if (check_method == method) {
1488 // a previous version matches
1489 return true;
1490 }
1491 }
1492 }
1494 // either no previous versions or no previous version matched
1495 return false;
1496 }
1499 Method* LatestMethodOopCache::get_Method() {
1500 InstanceKlass* ik = InstanceKlass::cast(klass());
1501 Method* m = ik->method_with_idnum(method_idnum());
1502 assert(m != NULL, "sanity check");
1503 return m;
1504 }
1507 #ifdef ASSERT
1508 // Release dummy object(s) at bottom of heap
1509 bool Universe::release_fullgc_alot_dummy() {
1510 MutexLocker ml(FullGCALot_lock);
1511 if (_fullgc_alot_dummy_array != NULL) {
1512 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1513 // No more dummies to release, release entire array instead
1514 _fullgc_alot_dummy_array = NULL;
1515 return false;
1516 }
1517 if (!UseConcMarkSweepGC) {
1518 // Release dummy at bottom of old generation
1519 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1520 }
1521 // Release dummy at bottom of permanent generation
1522 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1523 }
1524 return true;
1525 }
1527 #endif // ASSERT