Thu, 21 Aug 2014 13:57:51 -0700
8046070: Class Data Sharing clean up and refactoring
Summary: Cleaned up CDS to be more configurable, maintainable and extensible
Reviewed-by: dholmes, coleenp, acorn, mchung
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/classLoaderData.hpp"
28 #include "classfile/javaClasses.hpp"
29 #if INCLUDE_CDS
30 #include "classfile/sharedClassUtil.hpp"
31 #endif
32 #include "classfile/symbolTable.hpp"
33 #include "classfile/systemDictionary.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "code/codeCache.hpp"
36 #include "code/dependencies.hpp"
37 #include "gc_interface/collectedHeap.inline.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "memory/cardTableModRefBS.hpp"
40 #include "memory/filemap.hpp"
41 #include "memory/gcLocker.inline.hpp"
42 #include "memory/genCollectedHeap.hpp"
43 #include "memory/genRemSet.hpp"
44 #include "memory/generation.hpp"
45 #include "memory/metadataFactory.hpp"
46 #include "memory/metaspaceShared.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/space.hpp"
49 #include "memory/universe.hpp"
50 #include "memory/universe.inline.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/instanceClassLoaderKlass.hpp"
53 #include "oops/instanceKlass.hpp"
54 #include "oops/instanceMirrorKlass.hpp"
55 #include "oops/instanceRefKlass.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/typeArrayKlass.hpp"
58 #include "prims/jvmtiRedefineClassesTrace.hpp"
59 #include "runtime/arguments.hpp"
60 #include "runtime/deoptimization.hpp"
61 #include "runtime/fprofiler.hpp"
62 #include "runtime/handles.inline.hpp"
63 #include "runtime/init.hpp"
64 #include "runtime/java.hpp"
65 #include "runtime/javaCalls.hpp"
66 #include "runtime/sharedRuntime.hpp"
67 #include "runtime/synchronizer.hpp"
68 #include "runtime/thread.inline.hpp"
69 #include "runtime/timer.hpp"
70 #include "runtime/vm_operations.hpp"
71 #include "services/memoryService.hpp"
72 #include "utilities/copy.hpp"
73 #include "utilities/events.hpp"
74 #include "utilities/hashtable.inline.hpp"
75 #include "utilities/preserveException.hpp"
76 #include "utilities/macros.hpp"
77 #if INCLUDE_ALL_GCS
78 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
79 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
80 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
81 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
82 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
83 #endif // INCLUDE_ALL_GCS
85 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
87 // Known objects
88 Klass* Universe::_boolArrayKlassObj = NULL;
89 Klass* Universe::_byteArrayKlassObj = NULL;
90 Klass* Universe::_charArrayKlassObj = NULL;
91 Klass* Universe::_intArrayKlassObj = NULL;
92 Klass* Universe::_shortArrayKlassObj = NULL;
93 Klass* Universe::_longArrayKlassObj = NULL;
94 Klass* Universe::_singleArrayKlassObj = NULL;
95 Klass* Universe::_doubleArrayKlassObj = NULL;
96 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
97 Klass* Universe::_objectArrayKlassObj = NULL;
98 oop Universe::_int_mirror = NULL;
99 oop Universe::_float_mirror = NULL;
100 oop Universe::_double_mirror = NULL;
101 oop Universe::_byte_mirror = NULL;
102 oop Universe::_bool_mirror = NULL;
103 oop Universe::_char_mirror = NULL;
104 oop Universe::_long_mirror = NULL;
105 oop Universe::_short_mirror = NULL;
106 oop Universe::_void_mirror = NULL;
107 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
108 oop Universe::_main_thread_group = NULL;
109 oop Universe::_system_thread_group = NULL;
110 objArrayOop Universe::_the_empty_class_klass_array = NULL;
111 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
112 oop Universe::_the_null_string = NULL;
113 oop Universe::_the_min_jint_string = NULL;
114 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
115 LatestMethodCache* Universe::_loader_addClass_cache = NULL;
116 LatestMethodCache* Universe::_pd_implies_cache = NULL;
117 oop Universe::_out_of_memory_error_java_heap = NULL;
118 oop Universe::_out_of_memory_error_metaspace = NULL;
119 oop Universe::_out_of_memory_error_class_metaspace = NULL;
120 oop Universe::_out_of_memory_error_array_size = NULL;
121 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
122 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
123 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
124 bool Universe::_verify_in_progress = false;
125 oop Universe::_null_ptr_exception_instance = NULL;
126 oop Universe::_arithmetic_exception_instance = NULL;
127 oop Universe::_virtual_machine_error_instance = NULL;
128 oop Universe::_vm_exception = NULL;
129 Method* Universe::_throw_illegal_access_error = NULL;
130 Array<int>* Universe::_the_empty_int_array = NULL;
131 Array<u2>* Universe::_the_empty_short_array = NULL;
132 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
133 Array<Method*>* Universe::_the_empty_method_array = NULL;
135 // These variables are guarded by FullGCALot_lock.
136 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
137 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
139 // Heap
140 int Universe::_verify_count = 0;
142 int Universe::_base_vtable_size = 0;
143 bool Universe::_bootstrapping = false;
144 bool Universe::_fully_initialized = false;
146 size_t Universe::_heap_capacity_at_last_gc;
147 size_t Universe::_heap_used_at_last_gc = 0;
149 CollectedHeap* Universe::_collectedHeap = NULL;
151 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
152 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
153 address Universe::_narrow_ptrs_base;
155 void Universe::basic_type_classes_do(void f(Klass*)) {
156 f(boolArrayKlassObj());
157 f(byteArrayKlassObj());
158 f(charArrayKlassObj());
159 f(intArrayKlassObj());
160 f(shortArrayKlassObj());
161 f(longArrayKlassObj());
162 f(singleArrayKlassObj());
163 f(doubleArrayKlassObj());
164 }
166 void Universe::oops_do(OopClosure* f, bool do_all) {
168 f->do_oop((oop*) &_int_mirror);
169 f->do_oop((oop*) &_float_mirror);
170 f->do_oop((oop*) &_double_mirror);
171 f->do_oop((oop*) &_byte_mirror);
172 f->do_oop((oop*) &_bool_mirror);
173 f->do_oop((oop*) &_char_mirror);
174 f->do_oop((oop*) &_long_mirror);
175 f->do_oop((oop*) &_short_mirror);
176 f->do_oop((oop*) &_void_mirror);
178 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
179 f->do_oop((oop*) &_mirrors[i]);
180 }
181 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
183 f->do_oop((oop*)&_the_empty_class_klass_array);
184 f->do_oop((oop*)&_the_null_string);
185 f->do_oop((oop*)&_the_min_jint_string);
186 f->do_oop((oop*)&_out_of_memory_error_java_heap);
187 f->do_oop((oop*)&_out_of_memory_error_metaspace);
188 f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
189 f->do_oop((oop*)&_out_of_memory_error_array_size);
190 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
191 f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
192 f->do_oop((oop*)&_null_ptr_exception_instance);
193 f->do_oop((oop*)&_arithmetic_exception_instance);
194 f->do_oop((oop*)&_virtual_machine_error_instance);
195 f->do_oop((oop*)&_main_thread_group);
196 f->do_oop((oop*)&_system_thread_group);
197 f->do_oop((oop*)&_vm_exception);
198 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
199 }
201 // Serialize metadata in and out of CDS archive, not oops.
202 void Universe::serialize(SerializeClosure* f, bool do_all) {
204 f->do_ptr((void**)&_boolArrayKlassObj);
205 f->do_ptr((void**)&_byteArrayKlassObj);
206 f->do_ptr((void**)&_charArrayKlassObj);
207 f->do_ptr((void**)&_intArrayKlassObj);
208 f->do_ptr((void**)&_shortArrayKlassObj);
209 f->do_ptr((void**)&_longArrayKlassObj);
210 f->do_ptr((void**)&_singleArrayKlassObj);
211 f->do_ptr((void**)&_doubleArrayKlassObj);
212 f->do_ptr((void**)&_objectArrayKlassObj);
214 {
215 for (int i = 0; i < T_VOID+1; i++) {
216 if (_typeArrayKlassObjs[i] != NULL) {
217 assert(i >= T_BOOLEAN, "checking");
218 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
219 } else if (do_all) {
220 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
221 }
222 }
223 }
225 f->do_ptr((void**)&_the_array_interfaces_array);
226 f->do_ptr((void**)&_the_empty_int_array);
227 f->do_ptr((void**)&_the_empty_short_array);
228 f->do_ptr((void**)&_the_empty_method_array);
229 f->do_ptr((void**)&_the_empty_klass_array);
230 _finalizer_register_cache->serialize(f);
231 _loader_addClass_cache->serialize(f);
232 _pd_implies_cache->serialize(f);
233 }
235 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
236 if (size < alignment || size % alignment != 0) {
237 vm_exit_during_initialization(
238 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
239 }
240 }
242 void initialize_basic_type_klass(Klass* k, TRAPS) {
243 Klass* ok = SystemDictionary::Object_klass();
244 if (UseSharedSpaces) {
245 ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
246 assert(k->super() == ok, "u3");
247 k->restore_unshareable_info(loader_data, Handle(), CHECK);
248 } else {
249 k->initialize_supers(ok, CHECK);
250 }
251 k->append_to_sibling_list();
252 }
254 void Universe::genesis(TRAPS) {
255 ResourceMark rm;
257 { FlagSetting fs(_bootstrapping, true);
259 { MutexLocker mc(Compile_lock);
261 // determine base vtable size; without that we cannot create the array klasses
262 compute_base_vtable_size();
264 if (!UseSharedSpaces) {
265 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
266 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK);
267 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK);
268 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK);
269 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK);
270 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK);
271 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK);
272 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK);
274 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
275 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj;
276 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj;
277 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj;
278 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj;
279 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj;
280 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
281 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
283 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
285 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
286 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
287 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
288 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
289 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
290 }
291 }
293 vmSymbols::initialize(CHECK);
295 SystemDictionary::initialize(CHECK);
297 Klass* ok = SystemDictionary::Object_klass();
299 _the_null_string = StringTable::intern("null", CHECK);
300 _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
302 if (UseSharedSpaces) {
303 // Verify shared interfaces array.
304 assert(_the_array_interfaces_array->at(0) ==
305 SystemDictionary::Cloneable_klass(), "u3");
306 assert(_the_array_interfaces_array->at(1) ==
307 SystemDictionary::Serializable_klass(), "u3");
308 } else {
309 // Set up shared interfaces array. (Do this before supers are set up.)
310 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
311 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
312 }
314 initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
315 initialize_basic_type_klass(charArrayKlassObj(), CHECK);
316 initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
317 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
318 initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
319 initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
320 initialize_basic_type_klass(intArrayKlassObj(), CHECK);
321 initialize_basic_type_klass(longArrayKlassObj(), CHECK);
322 } // end of core bootstrapping
324 // Maybe this could be lifted up now that object array can be initialized
325 // during the bootstrapping.
327 // OLD
328 // Initialize _objectArrayKlass after core bootstraping to make
329 // sure the super class is set up properly for _objectArrayKlass.
330 // ---
331 // NEW
332 // Since some of the old system object arrays have been converted to
333 // ordinary object arrays, _objectArrayKlass will be loaded when
334 // SystemDictionary::initialize(CHECK); is run. See the extra check
335 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
336 _objectArrayKlassObj = InstanceKlass::
337 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
338 // OLD
339 // Add the class to the class hierarchy manually to make sure that
340 // its vtable is initialized after core bootstrapping is completed.
341 // ---
342 // New
343 // Have already been initialized.
344 _objectArrayKlassObj->append_to_sibling_list();
346 // Compute is_jdk version flags.
347 // Only 1.3 or later has the java.lang.Shutdown class.
348 // Only 1.4 or later has the java.lang.CharSequence interface.
349 // Only 1.5 or later has the java.lang.management.MemoryUsage class.
350 if (JDK_Version::is_partially_initialized()) {
351 uint8_t jdk_version;
352 Klass* k = SystemDictionary::resolve_or_null(
353 vmSymbols::java_lang_management_MemoryUsage(), THREAD);
354 CLEAR_PENDING_EXCEPTION; // ignore exceptions
355 if (k == NULL) {
356 k = SystemDictionary::resolve_or_null(
357 vmSymbols::java_lang_CharSequence(), THREAD);
358 CLEAR_PENDING_EXCEPTION; // ignore exceptions
359 if (k == NULL) {
360 k = SystemDictionary::resolve_or_null(
361 vmSymbols::java_lang_Shutdown(), THREAD);
362 CLEAR_PENDING_EXCEPTION; // ignore exceptions
363 if (k == NULL) {
364 jdk_version = 2;
365 } else {
366 jdk_version = 3;
367 }
368 } else {
369 jdk_version = 4;
370 }
371 } else {
372 jdk_version = 5;
373 }
374 JDK_Version::fully_initialize(jdk_version);
375 }
377 #ifdef ASSERT
378 if (FullGCALot) {
379 // Allocate an array of dummy objects.
380 // We'd like these to be at the bottom of the old generation,
381 // so that when we free one and then collect,
382 // (almost) the whole heap moves
383 // and we find out if we actually update all the oops correctly.
384 // But we can't allocate directly in the old generation,
385 // so we allocate wherever, and hope that the first collection
386 // moves these objects to the bottom of the old generation.
387 // We can allocate directly in the permanent generation, so we do.
388 int size;
389 if (UseConcMarkSweepGC) {
390 warning("Using +FullGCALot with concurrent mark sweep gc "
391 "will not force all objects to relocate");
392 size = FullGCALotDummies;
393 } else {
394 size = FullGCALotDummies * 2;
395 }
396 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
397 objArrayHandle dummy_array(THREAD, naked_array);
398 int i = 0;
399 while (i < size) {
400 // Allocate dummy in old generation
401 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
402 dummy_array->obj_at_put(i++, dummy);
403 }
404 {
405 // Only modify the global variable inside the mutex.
406 // If we had a race to here, the other dummy_array instances
407 // and their elements just get dropped on the floor, which is fine.
408 MutexLocker ml(FullGCALot_lock);
409 if (_fullgc_alot_dummy_array == NULL) {
410 _fullgc_alot_dummy_array = dummy_array();
411 }
412 }
413 assert(i == _fullgc_alot_dummy_array->length(), "just checking");
414 }
415 #endif
417 // Initialize dependency array for null class loader
418 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
420 }
422 // CDS support for patching vtables in metadata in the shared archive.
423 // All types inherited from Metadata have vtables, but not types inherited
424 // from MetaspaceObj, because the latter does not have virtual functions.
425 // If the metadata type has a vtable, it cannot be shared in the read-only
426 // section of the CDS archive, because the vtable pointer is patched.
427 static inline void add_vtable(void** list, int* n, void* o, int count) {
428 guarantee((*n) < count, "vtable list too small");
429 void* vtable = dereference_vptr(o);
430 assert(*(void**)(vtable) != NULL, "invalid vtable");
431 list[(*n)++] = vtable;
432 }
434 void Universe::init_self_patching_vtbl_list(void** list, int count) {
435 int n = 0;
436 { InstanceKlass o; add_vtable(list, &n, &o, count); }
437 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
438 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); }
439 { InstanceRefKlass o; add_vtable(list, &n, &o, count); }
440 { TypeArrayKlass o; add_vtable(list, &n, &o, count); }
441 { ObjArrayKlass o; add_vtable(list, &n, &o, count); }
442 { Method o; add_vtable(list, &n, &o, count); }
443 { ConstantPool o; add_vtable(list, &n, &o, count); }
444 }
446 void Universe::initialize_basic_type_mirrors(TRAPS) {
447 assert(_int_mirror==NULL, "basic type mirrors already initialized");
448 _int_mirror =
449 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
450 _float_mirror =
451 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
452 _double_mirror =
453 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
454 _byte_mirror =
455 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
456 _bool_mirror =
457 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
458 _char_mirror =
459 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
460 _long_mirror =
461 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
462 _short_mirror =
463 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
464 _void_mirror =
465 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
467 _mirrors[T_INT] = _int_mirror;
468 _mirrors[T_FLOAT] = _float_mirror;
469 _mirrors[T_DOUBLE] = _double_mirror;
470 _mirrors[T_BYTE] = _byte_mirror;
471 _mirrors[T_BOOLEAN] = _bool_mirror;
472 _mirrors[T_CHAR] = _char_mirror;
473 _mirrors[T_LONG] = _long_mirror;
474 _mirrors[T_SHORT] = _short_mirror;
475 _mirrors[T_VOID] = _void_mirror;
476 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror();
477 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror();
478 }
480 void Universe::fixup_mirrors(TRAPS) {
481 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
482 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
483 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
484 // that the number of objects allocated at this point is very small.
485 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
486 HandleMark hm(THREAD);
487 // Cache the start of the static fields
488 InstanceMirrorKlass::init_offset_of_static_fields();
490 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
491 int list_length = list->length();
492 for (int i = 0; i < list_length; i++) {
493 Klass* k = list->at(i);
494 assert(k->is_klass(), "List should only hold classes");
495 EXCEPTION_MARK;
496 KlassHandle kh(THREAD, k);
497 java_lang_Class::fixup_mirror(kh, CATCH);
498 }
499 delete java_lang_Class::fixup_mirror_list();
500 java_lang_Class::set_fixup_mirror_list(NULL);
501 }
503 static bool has_run_finalizers_on_exit = false;
505 void Universe::run_finalizers_on_exit() {
506 if (has_run_finalizers_on_exit) return;
507 has_run_finalizers_on_exit = true;
509 // Called on VM exit. This ought to be run in a separate thread.
510 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
511 {
512 PRESERVE_EXCEPTION_MARK;
513 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
514 JavaValue result(T_VOID);
515 JavaCalls::call_static(
516 &result,
517 finalizer_klass,
518 vmSymbols::run_finalizers_on_exit_name(),
519 vmSymbols::void_method_signature(),
520 THREAD
521 );
522 // Ignore any pending exceptions
523 CLEAR_PENDING_EXCEPTION;
524 }
525 }
528 // initialize_vtable could cause gc if
529 // 1) we specified true to initialize_vtable and
530 // 2) this ran after gc was enabled
531 // In case those ever change we use handles for oops
532 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
533 // init vtable of k and all subclasses
534 Klass* ko = k_h();
535 klassVtable* vt = ko->vtable();
536 if (vt) vt->initialize_vtable(false, CHECK);
537 if (ko->oop_is_instance()) {
538 InstanceKlass* ik = (InstanceKlass*)ko;
539 for (KlassHandle s_h(THREAD, ik->subklass());
540 s_h() != NULL;
541 s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
542 reinitialize_vtable_of(s_h, CHECK);
543 }
544 }
545 }
548 void initialize_itable_for_klass(Klass* k, TRAPS) {
549 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
550 }
553 void Universe::reinitialize_itables(TRAPS) {
554 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
556 }
559 bool Universe::on_page_boundary(void* addr) {
560 return ((uintptr_t) addr) % os::vm_page_size() == 0;
561 }
564 bool Universe::should_fill_in_stack_trace(Handle throwable) {
565 // never attempt to fill in the stack trace of preallocated errors that do not have
566 // backtrace. These errors are kept alive forever and may be "re-used" when all
567 // preallocated errors with backtrace have been consumed. Also need to avoid
568 // a potential loop which could happen if an out of memory occurs when attempting
569 // to allocate the backtrace.
570 return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
571 (throwable() != Universe::_out_of_memory_error_metaspace) &&
572 (throwable() != Universe::_out_of_memory_error_class_metaspace) &&
573 (throwable() != Universe::_out_of_memory_error_array_size) &&
574 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
575 }
578 oop Universe::gen_out_of_memory_error(oop default_err) {
579 // generate an out of memory error:
580 // - if there is a preallocated error with backtrace available then return it wth
581 // a filled in stack trace.
582 // - if there are no preallocated errors with backtrace available then return
583 // an error without backtrace.
584 int next;
585 if (_preallocated_out_of_memory_error_avail_count > 0) {
586 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
587 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
588 } else {
589 next = -1;
590 }
591 if (next < 0) {
592 // all preallocated errors have been used.
593 // return default
594 return default_err;
595 } else {
596 // get the error object at the slot and set set it to NULL so that the
597 // array isn't keeping it alive anymore.
598 oop exc = preallocated_out_of_memory_errors()->obj_at(next);
599 assert(exc != NULL, "slot has been used already");
600 preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
602 // use the message from the default error
603 oop msg = java_lang_Throwable::message(default_err);
604 assert(msg != NULL, "no message");
605 java_lang_Throwable::set_message(exc, msg);
607 // populate the stack trace and return it.
608 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
609 return exc;
610 }
611 }
613 intptr_t Universe::_non_oop_bits = 0;
615 void* Universe::non_oop_word() {
616 // Neither the high bits nor the low bits of this value is allowed
617 // to look like (respectively) the high or low bits of a real oop.
618 //
619 // High and low are CPU-specific notions, but low always includes
620 // the low-order bit. Since oops are always aligned at least mod 4,
621 // setting the low-order bit will ensure that the low half of the
622 // word will never look like that of a real oop.
623 //
624 // Using the OS-supplied non-memory-address word (usually 0 or -1)
625 // will take care of the high bits, however many there are.
627 if (_non_oop_bits == 0) {
628 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
629 }
631 return (void*)_non_oop_bits;
632 }
634 jint universe_init() {
635 assert(!Universe::_fully_initialized, "called after initialize_vtables");
636 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
637 "LogHeapWordSize is incorrect.");
638 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
639 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
640 "oop size is not not a multiple of HeapWord size");
641 TraceTime timer("Genesis", TraceStartupTime);
642 JavaClasses::compute_hard_coded_offsets();
644 jint status = Universe::initialize_heap();
645 if (status != JNI_OK) {
646 return status;
647 }
649 Metaspace::global_initialize();
651 // Create memory for metadata. Must be after initializing heap for
652 // DumpSharedSpaces.
653 ClassLoaderData::init_null_class_loader_data();
655 // We have a heap so create the Method* caches before
656 // Metaspace::initialize_shared_spaces() tries to populate them.
657 Universe::_finalizer_register_cache = new LatestMethodCache();
658 Universe::_loader_addClass_cache = new LatestMethodCache();
659 Universe::_pd_implies_cache = new LatestMethodCache();
661 if (UseSharedSpaces) {
662 // Read the data structures supporting the shared spaces (shared
663 // system dictionary, symbol table, etc.). After that, access to
664 // the file (other than the mapped regions) is no longer needed, and
665 // the file is closed. Closing the file does not affect the
666 // currently mapped regions.
667 MetaspaceShared::initialize_shared_spaces();
668 StringTable::create_table();
669 } else {
670 SymbolTable::create_table();
671 StringTable::create_table();
672 ClassLoader::create_package_info_table();
674 if (DumpSharedSpaces) {
675 MetaspaceShared::prepare_for_dumping();
676 }
677 }
679 return JNI_OK;
680 }
682 // Choose the heap base address and oop encoding mode
683 // when compressed oops are used:
684 // Unscaled - Use 32-bits oops without encoding when
685 // NarrowOopHeapBaseMin + heap_size < 4Gb
686 // ZeroBased - Use zero based compressed oops with encoding when
687 // NarrowOopHeapBaseMin + heap_size < 32Gb
688 // HeapBased - Use compressed oops with heap base + encoding.
690 // 4Gb
691 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
692 // 32Gb
693 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
695 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
696 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
697 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
698 assert(is_size_aligned(heap_size, alignment), "Must be");
700 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
702 size_t base = 0;
703 #ifdef _LP64
704 if (UseCompressedOops) {
705 assert(mode == UnscaledNarrowOop ||
706 mode == ZeroBasedNarrowOop ||
707 mode == HeapBasedNarrowOop, "mode is invalid");
708 const size_t total_size = heap_size + heap_base_min_address_aligned;
709 // Return specified base for the first request.
710 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
711 base = heap_base_min_address_aligned;
713 // If the total size is small enough to allow UnscaledNarrowOop then
714 // just use UnscaledNarrowOop.
715 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
716 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
717 (Universe::narrow_oop_shift() == 0)) {
718 // Use 32-bits oops without encoding and
719 // place heap's top on the 4Gb boundary
720 base = (UnscaledOopHeapMax - heap_size);
721 } else {
722 // Can't reserve with NarrowOopShift == 0
723 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
725 if (mode == UnscaledNarrowOop ||
726 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
728 // Use zero based compressed oops with encoding and
729 // place heap's top on the 32Gb boundary in case
730 // total_size > 4Gb or failed to reserve below 4Gb.
731 uint64_t heap_top = OopEncodingHeapMax;
733 // For small heaps, save some space for compressed class pointer
734 // space so it can be decoded with no base.
735 if (UseCompressedClassPointers && !UseSharedSpaces &&
736 OopEncodingHeapMax <= 32*G) {
738 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
739 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
740 alignment), "difference must be aligned too");
741 uint64_t new_top = OopEncodingHeapMax-class_space;
743 if (total_size <= new_top) {
744 heap_top = new_top;
745 }
746 }
748 // Align base to the adjusted top of the heap
749 base = heap_top - heap_size;
750 }
751 }
752 } else {
753 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
754 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
755 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
756 }
758 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
759 // used in ReservedHeapSpace() constructors.
760 // The final values will be set in initialize_heap() below.
761 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
762 // Use zero based compressed oops
763 Universe::set_narrow_oop_base(NULL);
764 // Don't need guard page for implicit checks in indexed
765 // addressing mode with zero based Compressed Oops.
766 Universe::set_narrow_oop_use_implicit_null_checks(true);
767 } else {
768 // Set to a non-NULL value so the ReservedSpace ctor computes
769 // the correct no-access prefix.
770 // The final value will be set in initialize_heap() below.
771 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
772 #if defined(_WIN64) || defined(AIX)
773 if (UseLargePages) {
774 // Cannot allocate guard pages for implicit checks in indexed
775 // addressing mode when large pages are specified on windows.
776 Universe::set_narrow_oop_use_implicit_null_checks(false);
777 }
778 #endif // _WIN64
779 }
780 }
781 #endif
783 assert(is_ptr_aligned((char*)base, alignment), "Must be");
784 return (char*)base; // also return NULL (don't care) for 32-bit VM
785 }
787 jint Universe::initialize_heap() {
789 if (UseParallelGC) {
790 #if INCLUDE_ALL_GCS
791 Universe::_collectedHeap = new ParallelScavengeHeap();
792 #else // INCLUDE_ALL_GCS
793 fatal("UseParallelGC not supported in this VM.");
794 #endif // INCLUDE_ALL_GCS
796 } else if (UseG1GC) {
797 #if INCLUDE_ALL_GCS
798 G1CollectorPolicy* g1p = new G1CollectorPolicy();
799 g1p->initialize_all();
800 G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
801 Universe::_collectedHeap = g1h;
802 #else // INCLUDE_ALL_GCS
803 fatal("UseG1GC not supported in java kernel vm.");
804 #endif // INCLUDE_ALL_GCS
806 } else {
807 GenCollectorPolicy *gc_policy;
809 if (UseSerialGC) {
810 gc_policy = new MarkSweepPolicy();
811 } else if (UseConcMarkSweepGC) {
812 #if INCLUDE_ALL_GCS
813 if (UseAdaptiveSizePolicy) {
814 gc_policy = new ASConcurrentMarkSweepPolicy();
815 } else {
816 gc_policy = new ConcurrentMarkSweepPolicy();
817 }
818 #else // INCLUDE_ALL_GCS
819 fatal("UseConcMarkSweepGC not supported in this VM.");
820 #endif // INCLUDE_ALL_GCS
821 } else { // default old generation
822 gc_policy = new MarkSweepPolicy();
823 }
824 gc_policy->initialize_all();
826 Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
827 }
829 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
831 jint status = Universe::heap()->initialize();
832 if (status != JNI_OK) {
833 return status;
834 }
836 #ifdef _LP64
837 if (UseCompressedOops) {
838 // Subtract a page because something can get allocated at heap base.
839 // This also makes implicit null checking work, because the
840 // memory+1 page below heap_base needs to cause a signal.
841 // See needs_explicit_null_check.
842 // Only set the heap base for compressed oops because it indicates
843 // compressed oops for pstack code.
844 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
845 if (verbose) {
846 tty->cr();
847 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
848 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
849 }
850 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
851 // Can't reserve heap below 32Gb.
852 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
853 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
854 #ifdef AIX
855 // There is no protected page before the heap. This assures all oops
856 // are decoded so that NULL is preserved, so this page will not be accessed.
857 Universe::set_narrow_oop_use_implicit_null_checks(false);
858 #endif
859 if (verbose) {
860 tty->print(", %s: "PTR_FORMAT,
861 narrow_oop_mode_to_string(HeapBasedNarrowOop),
862 Universe::narrow_oop_base());
863 }
864 } else {
865 Universe::set_narrow_oop_base(0);
866 if (verbose) {
867 tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
868 }
869 #ifdef _WIN64
870 if (!Universe::narrow_oop_use_implicit_null_checks()) {
871 // Don't need guard page for implicit checks in indexed addressing
872 // mode with zero based Compressed Oops.
873 Universe::set_narrow_oop_use_implicit_null_checks(true);
874 }
875 #endif // _WIN64
876 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
877 // Can't reserve heap below 4Gb.
878 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
879 } else {
880 Universe::set_narrow_oop_shift(0);
881 if (verbose) {
882 tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
883 }
884 }
885 }
887 if (verbose) {
888 tty->cr();
889 tty->cr();
890 }
891 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
892 }
893 // Universe::narrow_oop_base() is one page below the heap.
894 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
895 os::vm_page_size()) ||
896 Universe::narrow_oop_base() == NULL, "invalid value");
897 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
898 Universe::narrow_oop_shift() == 0, "invalid value");
899 #endif
901 // We will never reach the CATCH below since Exceptions::_throw will cause
902 // the VM to exit if an exception is thrown during initialization
904 if (UseTLAB) {
905 assert(Universe::heap()->supports_tlab_allocation(),
906 "Should support thread-local allocation buffers");
907 ThreadLocalAllocBuffer::startup_initialization();
908 }
909 return JNI_OK;
910 }
913 // Reserve the Java heap, which is now the same for all GCs.
914 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
915 assert(alignment <= Arguments::conservative_max_heap_alignment(),
916 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
917 alignment, Arguments::conservative_max_heap_alignment()));
918 size_t total_reserved = align_size_up(heap_size, alignment);
919 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
920 "heap size is too big for compressed oops");
922 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
923 assert(!UseLargePages
924 || UseParallelGC
925 || use_large_pages, "Wrong alignment to use large pages");
927 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
929 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
931 if (UseCompressedOops) {
932 if (addr != NULL && !total_rs.is_reserved()) {
933 // Failed to reserve at specified address - the requested memory
934 // region is taken already, for example, by 'java' launcher.
935 // Try again to reserver heap higher.
936 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
938 ReservedHeapSpace total_rs0(total_reserved, alignment,
939 use_large_pages, addr);
941 if (addr != NULL && !total_rs0.is_reserved()) {
942 // Failed to reserve at specified address again - give up.
943 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
944 assert(addr == NULL, "");
946 ReservedHeapSpace total_rs1(total_reserved, alignment,
947 use_large_pages, addr);
948 total_rs = total_rs1;
949 } else {
950 total_rs = total_rs0;
951 }
952 }
953 }
955 if (!total_rs.is_reserved()) {
956 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
957 return total_rs;
958 }
960 if (UseCompressedOops) {
961 // Universe::initialize_heap() will reset this to NULL if unscaled
962 // or zero-based narrow oops are actually used.
963 address base = (address)(total_rs.base() - os::vm_page_size());
964 Universe::set_narrow_oop_base(base);
965 }
966 return total_rs;
967 }
970 // It's the caller's responsibility to ensure glitch-freedom
971 // (if required).
972 void Universe::update_heap_info_at_gc() {
973 _heap_capacity_at_last_gc = heap()->capacity();
974 _heap_used_at_last_gc = heap()->used();
975 }
978 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
979 switch (mode) {
980 case UnscaledNarrowOop:
981 return "32-bits Oops";
982 case ZeroBasedNarrowOop:
983 return "zero based Compressed Oops";
984 case HeapBasedNarrowOop:
985 return "Compressed Oops with base";
986 }
988 ShouldNotReachHere();
989 return "";
990 }
993 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
994 if (narrow_oop_base() != 0) {
995 return HeapBasedNarrowOop;
996 }
998 if (narrow_oop_shift() != 0) {
999 return ZeroBasedNarrowOop;
1000 }
1002 return UnscaledNarrowOop;
1003 }
1006 void universe2_init() {
1007 EXCEPTION_MARK;
1008 Universe::genesis(CATCH);
1009 }
1012 // This function is defined in JVM.cpp
1013 extern void initialize_converter_functions();
1015 bool universe_post_init() {
1016 assert(!is_init_completed(), "Error: initialization not yet completed!");
1017 Universe::_fully_initialized = true;
1018 EXCEPTION_MARK;
1019 { ResourceMark rm;
1020 Interpreter::initialize(); // needed for interpreter entry points
1021 if (!UseSharedSpaces) {
1022 HandleMark hm(THREAD);
1023 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
1024 Universe::reinitialize_vtable_of(ok_h, CHECK_false);
1025 Universe::reinitialize_itables(CHECK_false);
1026 }
1027 }
1029 HandleMark hm(THREAD);
1030 Klass* k;
1031 instanceKlassHandle k_h;
1032 // Setup preallocated empty java.lang.Class array
1033 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
1035 // Setup preallocated OutOfMemoryError errors
1036 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
1037 k_h = instanceKlassHandle(THREAD, k);
1038 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
1039 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
1040 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
1041 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
1042 Universe::_out_of_memory_error_gc_overhead_limit =
1043 k_h->allocate_instance(CHECK_false);
1045 // Setup preallocated NullPointerException
1046 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1047 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
1048 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1049 // Setup preallocated ArithmeticException
1050 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1051 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
1052 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1053 // Virtual Machine Error for when we get into a situation we can't resolve
1054 k = SystemDictionary::resolve_or_fail(
1055 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1056 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1057 if (!linked) {
1058 tty->print_cr("Unable to link/verify VirtualMachineError class");
1059 return false; // initialization failed
1060 }
1061 Universe::_virtual_machine_error_instance =
1062 InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1064 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1066 if (!DumpSharedSpaces) {
1067 // These are the only Java fields that are currently set during shared space dumping.
1068 // We prefer to not handle this generally, so we always reinitialize these detail messages.
1069 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1070 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1072 msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
1073 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
1074 msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
1075 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
1077 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1078 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1080 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1081 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1083 msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1084 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1086 // Setup the array of errors that have preallocated backtrace
1087 k = Universe::_out_of_memory_error_java_heap->klass();
1088 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1089 k_h = instanceKlassHandle(THREAD, k);
1091 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1092 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1093 for (int i=0; i<len; i++) {
1094 oop err = k_h->allocate_instance(CHECK_false);
1095 Handle err_h = Handle(THREAD, err);
1096 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1097 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1098 }
1099 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1100 }
1103 // Setup static method for registering finalizers
1104 // The finalizer klass must be linked before looking up the method, in
1105 // case it needs to get rewritten.
1106 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1107 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1108 vmSymbols::register_method_name(),
1109 vmSymbols::register_method_signature());
1110 if (m == NULL || !m->is_static()) {
1111 tty->print_cr("Unable to link/verify Finalizer.register method");
1112 return false; // initialization failed (cannot throw exception yet)
1113 }
1114 Universe::_finalizer_register_cache->init(
1115 SystemDictionary::Finalizer_klass(), m);
1117 InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->link_class(CHECK_false);
1118 m = InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->find_method(
1119 vmSymbols::throwIllegalAccessError_name(),
1120 vmSymbols::void_method_signature());
1121 if (m != NULL && !m->is_static()) {
1122 // Note null is okay; this method is used in itables, and if it is null,
1123 // then AbstractMethodError is thrown instead.
1124 tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method");
1125 return false; // initialization failed (cannot throw exception yet)
1126 }
1127 Universe::_throw_illegal_access_error = m;
1129 // Setup method for registering loaded classes in class loader vector
1130 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1131 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1132 if (m == NULL || m->is_static()) {
1133 tty->print_cr("Unable to link/verify ClassLoader.addClass method");
1134 return false; // initialization failed (cannot throw exception yet)
1135 }
1136 Universe::_loader_addClass_cache->init(
1137 SystemDictionary::ClassLoader_klass(), m);
1139 // Setup method for checking protection domain
1140 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
1141 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
1142 find_method(vmSymbols::impliesCreateAccessControlContext_name(),
1143 vmSymbols::void_boolean_signature());
1144 // Allow NULL which should only happen with bootstrapping.
1145 if (m != NULL) {
1146 if (m->is_static()) {
1147 // NoSuchMethodException doesn't actually work because it tries to run the
1148 // <init> function before java_lang_Class is linked. Print error and exit.
1149 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
1150 return false; // initialization failed
1151 }
1152 Universe::_pd_implies_cache->init(
1153 SystemDictionary::ProtectionDomain_klass(), m);;
1154 }
1156 // The folowing is initializing converter functions for serialization in
1157 // JVM.cpp. If we clean up the StrictMath code above we may want to find
1158 // a better solution for this as well.
1159 initialize_converter_functions();
1161 // This needs to be done before the first scavenge/gc, since
1162 // it's an input to soft ref clearing policy.
1163 {
1164 MutexLocker x(Heap_lock);
1165 Universe::update_heap_info_at_gc();
1166 }
1168 // ("weak") refs processing infrastructure initialization
1169 Universe::heap()->post_initialize();
1171 // Initialize performance counters for metaspaces
1172 MetaspaceCounters::initialize_performance_counters();
1173 CompressedClassSpaceCounters::initialize_performance_counters();
1175 MemoryService::add_metaspace_memory_pools();
1177 MemoryService::set_universe_heap(Universe::_collectedHeap);
1178 #if INCLUDE_CDS
1179 if (UseSharedSpaces) {
1180 SharedClassUtil::initialize(CHECK_false);
1181 }
1182 #endif
1183 return true;
1184 }
1187 void Universe::compute_base_vtable_size() {
1188 _base_vtable_size = ClassLoader::compute_Object_vtable();
1189 }
1192 // %%% The Universe::flush_foo methods belong in CodeCache.
1194 // Flushes compiled methods dependent on dependee.
1195 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1196 assert_lock_strong(Compile_lock);
1198 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1200 // CodeCache can only be updated by a thread_in_VM and they will all be
1201 // stopped dring the safepoint so CodeCache will be safe to update without
1202 // holding the CodeCache_lock.
1204 KlassDepChange changes(dependee);
1206 // Compute the dependent nmethods
1207 if (CodeCache::mark_for_deoptimization(changes) > 0) {
1208 // At least one nmethod has been marked for deoptimization
1209 VM_Deoptimize op;
1210 VMThread::execute(&op);
1211 }
1212 }
1214 // Flushes compiled methods dependent on a particular CallSite
1215 // instance when its target is different than the given MethodHandle.
1216 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1217 assert_lock_strong(Compile_lock);
1219 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1221 // CodeCache can only be updated by a thread_in_VM and they will all be
1222 // stopped dring the safepoint so CodeCache will be safe to update without
1223 // holding the CodeCache_lock.
1225 CallSiteDepChange changes(call_site(), method_handle());
1227 // Compute the dependent nmethods that have a reference to a
1228 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1229 // directly instead of CodeCache::mark_for_deoptimization because we
1230 // want dependents on the call site class only not all classes in
1231 // the ContextStream.
1232 int marked = 0;
1233 {
1234 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1235 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1236 marked = call_site_klass->mark_dependent_nmethods(changes);
1237 }
1238 if (marked > 0) {
1239 // At least one nmethod has been marked for deoptimization
1240 VM_Deoptimize op;
1241 VMThread::execute(&op);
1242 }
1243 }
1245 #ifdef HOTSWAP
1246 // Flushes compiled methods dependent on dependee in the evolutionary sense
1247 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1248 // --- Compile_lock is not held. However we are at a safepoint.
1249 assert_locked_or_safepoint(Compile_lock);
1250 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1252 // CodeCache can only be updated by a thread_in_VM and they will all be
1253 // stopped dring the safepoint so CodeCache will be safe to update without
1254 // holding the CodeCache_lock.
1256 // Compute the dependent nmethods
1257 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1258 // At least one nmethod has been marked for deoptimization
1260 // All this already happens inside a VM_Operation, so we'll do all the work here.
1261 // Stuff copied from VM_Deoptimize and modified slightly.
1263 // We do not want any GCs to happen while we are in the middle of this VM operation
1264 ResourceMark rm;
1265 DeoptimizationMarker dm;
1267 // Deoptimize all activations depending on marked nmethods
1268 Deoptimization::deoptimize_dependents();
1270 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1271 CodeCache::make_marked_nmethods_not_entrant();
1272 }
1273 }
1274 #endif // HOTSWAP
1277 // Flushes compiled methods dependent on dependee
1278 void Universe::flush_dependents_on_method(methodHandle m_h) {
1279 // --- Compile_lock is not held. However we are at a safepoint.
1280 assert_locked_or_safepoint(Compile_lock);
1282 // CodeCache can only be updated by a thread_in_VM and they will all be
1283 // stopped dring the safepoint so CodeCache will be safe to update without
1284 // holding the CodeCache_lock.
1286 // Compute the dependent nmethods
1287 if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1288 // At least one nmethod has been marked for deoptimization
1290 // All this already happens inside a VM_Operation, so we'll do all the work here.
1291 // Stuff copied from VM_Deoptimize and modified slightly.
1293 // We do not want any GCs to happen while we are in the middle of this VM operation
1294 ResourceMark rm;
1295 DeoptimizationMarker dm;
1297 // Deoptimize all activations depending on marked nmethods
1298 Deoptimization::deoptimize_dependents();
1300 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1301 CodeCache::make_marked_nmethods_not_entrant();
1302 }
1303 }
1305 void Universe::print() {
1306 print_on(gclog_or_tty);
1307 }
1309 void Universe::print_on(outputStream* st, bool extended) {
1310 st->print_cr("Heap");
1311 if (!extended) {
1312 heap()->print_on(st);
1313 } else {
1314 heap()->print_extended_on(st);
1315 }
1316 }
1318 void Universe::print_heap_at_SIGBREAK() {
1319 if (PrintHeapAtSIGBREAK) {
1320 MutexLocker hl(Heap_lock);
1321 print_on(tty);
1322 tty->cr();
1323 tty->flush();
1324 }
1325 }
1327 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1328 st->print_cr("{Heap before GC invocations=%u (full %u):",
1329 heap()->total_collections(),
1330 heap()->total_full_collections());
1331 if (!PrintHeapAtGCExtended || ignore_extended) {
1332 heap()->print_on(st);
1333 } else {
1334 heap()->print_extended_on(st);
1335 }
1336 }
1338 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1339 st->print_cr("Heap after GC invocations=%u (full %u):",
1340 heap()->total_collections(),
1341 heap()->total_full_collections());
1342 if (!PrintHeapAtGCExtended || ignore_extended) {
1343 heap()->print_on(st);
1344 } else {
1345 heap()->print_extended_on(st);
1346 }
1347 st->print_cr("}");
1348 }
1350 void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
1351 // The use of _verify_in_progress is a temporary work around for
1352 // 6320749. Don't bother with a creating a class to set and clear
1353 // it since it is only used in this method and the control flow is
1354 // straight forward.
1355 _verify_in_progress = true;
1357 COMPILER2_PRESENT(
1358 assert(!DerivedPointerTable::is_active(),
1359 "DPT should not be active during verification "
1360 "(of thread stacks below)");
1361 )
1363 ResourceMark rm;
1364 HandleMark hm; // Handles created during verification can be zapped
1365 _verify_count++;
1367 if (!silent) gclog_or_tty->print("%s", prefix);
1368 if (!silent) gclog_or_tty->print("[Verifying ");
1369 if (!silent) gclog_or_tty->print("threads ");
1370 Threads::verify();
1371 if (!silent) gclog_or_tty->print("heap ");
1372 heap()->verify(silent, option);
1373 if (!silent) gclog_or_tty->print("syms ");
1374 SymbolTable::verify();
1375 if (!silent) gclog_or_tty->print("strs ");
1376 StringTable::verify();
1377 {
1378 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1379 if (!silent) gclog_or_tty->print("zone ");
1380 CodeCache::verify();
1381 }
1382 if (!silent) gclog_or_tty->print("dict ");
1383 SystemDictionary::verify();
1384 #ifndef PRODUCT
1385 if (!silent) gclog_or_tty->print("cldg ");
1386 ClassLoaderDataGraph::verify();
1387 #endif
1388 if (!silent) gclog_or_tty->print("metaspace chunks ");
1389 MetaspaceAux::verify_free_chunks();
1390 if (!silent) gclog_or_tty->print("hand ");
1391 JNIHandles::verify();
1392 if (!silent) gclog_or_tty->print("C-heap ");
1393 os::check_heap();
1394 if (!silent) gclog_or_tty->print("code cache ");
1395 CodeCache::verify_oops();
1396 if (!silent) gclog_or_tty->print_cr("]");
1398 _verify_in_progress = false;
1399 }
1401 // Oop verification (see MacroAssembler::verify_oop)
1403 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
1404 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1407 #ifndef PRODUCT
1409 static void calculate_verify_data(uintptr_t verify_data[2],
1410 HeapWord* low_boundary,
1411 HeapWord* high_boundary) {
1412 assert(low_boundary < high_boundary, "bad interval");
1414 // decide which low-order bits we require to be clear:
1415 size_t alignSize = MinObjAlignmentInBytes;
1416 size_t min_object_size = CollectedHeap::min_fill_size();
1418 // make an inclusive limit:
1419 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1420 uintptr_t min = (uintptr_t)low_boundary;
1421 assert(min < max, "bad interval");
1422 uintptr_t diff = max ^ min;
1424 // throw away enough low-order bits to make the diff vanish
1425 uintptr_t mask = (uintptr_t)(-1);
1426 while ((mask & diff) != 0)
1427 mask <<= 1;
1428 uintptr_t bits = (min & mask);
1429 assert(bits == (max & mask), "correct mask");
1430 // check an intermediate value between min and max, just to make sure:
1431 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1433 // require address alignment, too:
1434 mask |= (alignSize - 1);
1436 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1437 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1438 }
1439 verify_data[0] = mask;
1440 verify_data[1] = bits;
1441 }
1443 // Oop verification (see MacroAssembler::verify_oop)
1445 uintptr_t Universe::verify_oop_mask() {
1446 MemRegion m = heap()->reserved_region();
1447 calculate_verify_data(_verify_oop_data,
1448 m.start(),
1449 m.end());
1450 return _verify_oop_data[0];
1451 }
1455 uintptr_t Universe::verify_oop_bits() {
1456 verify_oop_mask();
1457 return _verify_oop_data[1];
1458 }
1460 uintptr_t Universe::verify_mark_mask() {
1461 return markOopDesc::lock_mask_in_place;
1462 }
1464 uintptr_t Universe::verify_mark_bits() {
1465 intptr_t mask = verify_mark_mask();
1466 intptr_t bits = (intptr_t)markOopDesc::prototype();
1467 assert((bits & ~mask) == 0, "no stray header bits");
1468 return bits;
1469 }
1470 #endif // PRODUCT
1473 void Universe::compute_verify_oop_data() {
1474 verify_oop_mask();
1475 verify_oop_bits();
1476 verify_mark_mask();
1477 verify_mark_bits();
1478 }
1481 void LatestMethodCache::init(Klass* k, Method* m) {
1482 if (!UseSharedSpaces) {
1483 _klass = k;
1484 }
1485 #ifndef PRODUCT
1486 else {
1487 // sharing initilization should have already set up _klass
1488 assert(_klass != NULL, "just checking");
1489 }
1490 #endif
1492 _method_idnum = m->method_idnum();
1493 assert(_method_idnum >= 0, "sanity check");
1494 }
1497 Method* LatestMethodCache::get_method() {
1498 if (klass() == NULL) return NULL;
1499 InstanceKlass* ik = InstanceKlass::cast(klass());
1500 Method* m = ik->method_with_idnum(method_idnum());
1501 assert(m != NULL, "sanity check");
1502 return m;
1503 }
1506 #ifdef ASSERT
1507 // Release dummy object(s) at bottom of heap
1508 bool Universe::release_fullgc_alot_dummy() {
1509 MutexLocker ml(FullGCALot_lock);
1510 if (_fullgc_alot_dummy_array != NULL) {
1511 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1512 // No more dummies to release, release entire array instead
1513 _fullgc_alot_dummy_array = NULL;
1514 return false;
1515 }
1516 if (!UseConcMarkSweepGC) {
1517 // Release dummy at bottom of old generation
1518 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1519 }
1520 // Release dummy at bottom of permanent generation
1521 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1522 }
1523 return true;
1524 }
1526 #endif // ASSERT