Thu, 12 Oct 2017 21:27:07 +0800
merge
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/classLoaderData.hpp"
28 #include "classfile/javaClasses.hpp"
29 #if INCLUDE_CDS
30 #include "classfile/sharedClassUtil.hpp"
31 #endif
32 #include "classfile/symbolTable.hpp"
33 #include "classfile/systemDictionary.hpp"
34 #include "classfile/vmSymbols.hpp"
35 #include "code/codeCache.hpp"
36 #include "code/dependencies.hpp"
37 #include "gc_interface/collectedHeap.inline.hpp"
38 #include "interpreter/interpreter.hpp"
39 #include "memory/cardTableModRefBS.hpp"
40 #include "memory/filemap.hpp"
41 #include "memory/gcLocker.inline.hpp"
42 #include "memory/genCollectedHeap.hpp"
43 #include "memory/genRemSet.hpp"
44 #include "memory/generation.hpp"
45 #include "memory/metadataFactory.hpp"
46 #include "memory/metaspaceShared.hpp"
47 #include "memory/oopFactory.hpp"
48 #include "memory/space.hpp"
49 #include "memory/universe.hpp"
50 #include "memory/universe.inline.hpp"
51 #include "oops/constantPool.hpp"
52 #include "oops/instanceClassLoaderKlass.hpp"
53 #include "oops/instanceKlass.hpp"
54 #include "oops/instanceMirrorKlass.hpp"
55 #include "oops/instanceRefKlass.hpp"
56 #include "oops/oop.inline.hpp"
57 #include "oops/typeArrayKlass.hpp"
58 #include "prims/jvmtiRedefineClassesTrace.hpp"
59 #include "runtime/arguments.hpp"
60 #include "runtime/deoptimization.hpp"
61 #include "runtime/fprofiler.hpp"
62 #include "runtime/handles.inline.hpp"
63 #include "runtime/init.hpp"
64 #include "runtime/java.hpp"
65 #include "runtime/javaCalls.hpp"
66 #include "runtime/sharedRuntime.hpp"
67 #include "runtime/synchronizer.hpp"
68 #include "runtime/thread.inline.hpp"
69 #include "runtime/timer.hpp"
70 #include "runtime/vm_operations.hpp"
71 #include "services/memoryService.hpp"
72 #include "utilities/copy.hpp"
73 #include "utilities/events.hpp"
74 #include "utilities/hashtable.inline.hpp"
75 #include "utilities/preserveException.hpp"
76 #include "utilities/macros.hpp"
77 #if INCLUDE_ALL_GCS
78 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
79 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
80 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
81 #include "gc_implementation/g1/g1CollectorPolicy_ext.hpp"
82 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
83 #endif // INCLUDE_ALL_GCS
85 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
87 // Known objects
88 Klass* Universe::_boolArrayKlassObj = NULL;
89 Klass* Universe::_byteArrayKlassObj = NULL;
90 Klass* Universe::_charArrayKlassObj = NULL;
91 Klass* Universe::_intArrayKlassObj = NULL;
92 Klass* Universe::_shortArrayKlassObj = NULL;
93 Klass* Universe::_longArrayKlassObj = NULL;
94 Klass* Universe::_singleArrayKlassObj = NULL;
95 Klass* Universe::_doubleArrayKlassObj = NULL;
96 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
97 Klass* Universe::_objectArrayKlassObj = NULL;
98 oop Universe::_int_mirror = NULL;
99 oop Universe::_float_mirror = NULL;
100 oop Universe::_double_mirror = NULL;
101 oop Universe::_byte_mirror = NULL;
102 oop Universe::_bool_mirror = NULL;
103 oop Universe::_char_mirror = NULL;
104 oop Universe::_long_mirror = NULL;
105 oop Universe::_short_mirror = NULL;
106 oop Universe::_void_mirror = NULL;
107 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
108 oop Universe::_main_thread_group = NULL;
109 oop Universe::_system_thread_group = NULL;
110 objArrayOop Universe::_the_empty_class_klass_array = NULL;
111 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
112 oop Universe::_the_null_string = NULL;
113 oop Universe::_the_min_jint_string = NULL;
114 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
115 LatestMethodCache* Universe::_loader_addClass_cache = NULL;
116 LatestMethodCache* Universe::_pd_implies_cache = NULL;
117 oop Universe::_out_of_memory_error_java_heap = NULL;
118 oop Universe::_out_of_memory_error_metaspace = NULL;
119 oop Universe::_out_of_memory_error_class_metaspace = NULL;
120 oop Universe::_out_of_memory_error_array_size = NULL;
121 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
122 oop Universe::_out_of_memory_error_realloc_objects = NULL;
123 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
124 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
125 bool Universe::_verify_in_progress = false;
126 oop Universe::_null_ptr_exception_instance = NULL;
127 oop Universe::_arithmetic_exception_instance = NULL;
128 oop Universe::_virtual_machine_error_instance = NULL;
129 oop Universe::_vm_exception = NULL;
130 oop Universe::_allocation_context_notification_obj = NULL;
132 Method* Universe::_throw_illegal_access_error = NULL;
133 Array<int>* Universe::_the_empty_int_array = NULL;
134 Array<u2>* Universe::_the_empty_short_array = NULL;
135 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
136 Array<Method*>* Universe::_the_empty_method_array = NULL;
138 // These variables are guarded by FullGCALot_lock.
139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
140 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
142 // Heap
143 int Universe::_verify_count = 0;
145 int Universe::_base_vtable_size = 0;
146 bool Universe::_bootstrapping = false;
147 bool Universe::_fully_initialized = false;
149 size_t Universe::_heap_capacity_at_last_gc;
150 size_t Universe::_heap_used_at_last_gc = 0;
152 CollectedHeap* Universe::_collectedHeap = NULL;
154 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
155 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
156 address Universe::_narrow_ptrs_base;
158 void Universe::basic_type_classes_do(void f(Klass*)) {
159 f(boolArrayKlassObj());
160 f(byteArrayKlassObj());
161 f(charArrayKlassObj());
162 f(intArrayKlassObj());
163 f(shortArrayKlassObj());
164 f(longArrayKlassObj());
165 f(singleArrayKlassObj());
166 f(doubleArrayKlassObj());
167 }
169 void Universe::oops_do(OopClosure* f, bool do_all) {
171 f->do_oop((oop*) &_int_mirror);
172 f->do_oop((oop*) &_float_mirror);
173 f->do_oop((oop*) &_double_mirror);
174 f->do_oop((oop*) &_byte_mirror);
175 f->do_oop((oop*) &_bool_mirror);
176 f->do_oop((oop*) &_char_mirror);
177 f->do_oop((oop*) &_long_mirror);
178 f->do_oop((oop*) &_short_mirror);
179 f->do_oop((oop*) &_void_mirror);
181 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
182 f->do_oop((oop*) &_mirrors[i]);
183 }
184 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
186 f->do_oop((oop*)&_the_empty_class_klass_array);
187 f->do_oop((oop*)&_the_null_string);
188 f->do_oop((oop*)&_the_min_jint_string);
189 f->do_oop((oop*)&_out_of_memory_error_java_heap);
190 f->do_oop((oop*)&_out_of_memory_error_metaspace);
191 f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
192 f->do_oop((oop*)&_out_of_memory_error_array_size);
193 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
194 f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
195 f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
196 f->do_oop((oop*)&_null_ptr_exception_instance);
197 f->do_oop((oop*)&_arithmetic_exception_instance);
198 f->do_oop((oop*)&_virtual_machine_error_instance);
199 f->do_oop((oop*)&_main_thread_group);
200 f->do_oop((oop*)&_system_thread_group);
201 f->do_oop((oop*)&_vm_exception);
202 f->do_oop((oop*)&_allocation_context_notification_obj);
203 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
204 }
206 // Serialize metadata in and out of CDS archive, not oops.
207 void Universe::serialize(SerializeClosure* f, bool do_all) {
209 f->do_ptr((void**)&_boolArrayKlassObj);
210 f->do_ptr((void**)&_byteArrayKlassObj);
211 f->do_ptr((void**)&_charArrayKlassObj);
212 f->do_ptr((void**)&_intArrayKlassObj);
213 f->do_ptr((void**)&_shortArrayKlassObj);
214 f->do_ptr((void**)&_longArrayKlassObj);
215 f->do_ptr((void**)&_singleArrayKlassObj);
216 f->do_ptr((void**)&_doubleArrayKlassObj);
217 f->do_ptr((void**)&_objectArrayKlassObj);
219 {
220 for (int i = 0; i < T_VOID+1; i++) {
221 if (_typeArrayKlassObjs[i] != NULL) {
222 assert(i >= T_BOOLEAN, "checking");
223 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
224 } else if (do_all) {
225 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
226 }
227 }
228 }
230 f->do_ptr((void**)&_the_array_interfaces_array);
231 f->do_ptr((void**)&_the_empty_int_array);
232 f->do_ptr((void**)&_the_empty_short_array);
233 f->do_ptr((void**)&_the_empty_method_array);
234 f->do_ptr((void**)&_the_empty_klass_array);
235 _finalizer_register_cache->serialize(f);
236 _loader_addClass_cache->serialize(f);
237 _pd_implies_cache->serialize(f);
238 }
240 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
241 if (size < alignment || size % alignment != 0) {
242 vm_exit_during_initialization(
243 err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
244 }
245 }
247 void initialize_basic_type_klass(Klass* k, TRAPS) {
248 Klass* ok = SystemDictionary::Object_klass();
249 if (UseSharedSpaces) {
250 ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
251 assert(k->super() == ok, "u3");
252 k->restore_unshareable_info(loader_data, Handle(), CHECK);
253 } else {
254 k->initialize_supers(ok, CHECK);
255 }
256 k->append_to_sibling_list();
257 }
259 void Universe::genesis(TRAPS) {
260 ResourceMark rm;
262 { FlagSetting fs(_bootstrapping, true);
264 { MutexLocker mc(Compile_lock);
266 // determine base vtable size; without that we cannot create the array klasses
267 compute_base_vtable_size();
269 if (!UseSharedSpaces) {
270 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
271 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK);
272 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK);
273 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK);
274 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK);
275 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK);
276 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK);
277 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK);
279 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
280 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj;
281 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj;
282 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj;
283 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj;
284 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj;
285 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
286 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
288 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
290 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
291 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
292 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
293 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
294 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
295 }
296 }
298 vmSymbols::initialize(CHECK);
300 SystemDictionary::initialize(CHECK);
302 Klass* ok = SystemDictionary::Object_klass();
304 _the_null_string = StringTable::intern("null", CHECK);
305 _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
307 if (UseSharedSpaces) {
308 // Verify shared interfaces array.
309 assert(_the_array_interfaces_array->at(0) ==
310 SystemDictionary::Cloneable_klass(), "u3");
311 assert(_the_array_interfaces_array->at(1) ==
312 SystemDictionary::Serializable_klass(), "u3");
313 } else {
314 // Set up shared interfaces array. (Do this before supers are set up.)
315 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
316 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
317 }
319 initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
320 initialize_basic_type_klass(charArrayKlassObj(), CHECK);
321 initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
322 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
323 initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
324 initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
325 initialize_basic_type_klass(intArrayKlassObj(), CHECK);
326 initialize_basic_type_klass(longArrayKlassObj(), CHECK);
327 } // end of core bootstrapping
329 // Maybe this could be lifted up now that object array can be initialized
330 // during the bootstrapping.
332 // OLD
333 // Initialize _objectArrayKlass after core bootstraping to make
334 // sure the super class is set up properly for _objectArrayKlass.
335 // ---
336 // NEW
337 // Since some of the old system object arrays have been converted to
338 // ordinary object arrays, _objectArrayKlass will be loaded when
339 // SystemDictionary::initialize(CHECK); is run. See the extra check
340 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
341 _objectArrayKlassObj = InstanceKlass::
342 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
343 // OLD
344 // Add the class to the class hierarchy manually to make sure that
345 // its vtable is initialized after core bootstrapping is completed.
346 // ---
347 // New
348 // Have already been initialized.
349 _objectArrayKlassObj->append_to_sibling_list();
351 // Compute is_jdk version flags.
352 // Only 1.3 or later has the java.lang.Shutdown class.
353 // Only 1.4 or later has the java.lang.CharSequence interface.
354 // Only 1.5 or later has the java.lang.management.MemoryUsage class.
355 if (JDK_Version::is_partially_initialized()) {
356 uint8_t jdk_version;
357 Klass* k = SystemDictionary::resolve_or_null(
358 vmSymbols::java_lang_management_MemoryUsage(), THREAD);
359 CLEAR_PENDING_EXCEPTION; // ignore exceptions
360 if (k == NULL) {
361 k = SystemDictionary::resolve_or_null(
362 vmSymbols::java_lang_CharSequence(), THREAD);
363 CLEAR_PENDING_EXCEPTION; // ignore exceptions
364 if (k == NULL) {
365 k = SystemDictionary::resolve_or_null(
366 vmSymbols::java_lang_Shutdown(), THREAD);
367 CLEAR_PENDING_EXCEPTION; // ignore exceptions
368 if (k == NULL) {
369 jdk_version = 2;
370 } else {
371 jdk_version = 3;
372 }
373 } else {
374 jdk_version = 4;
375 }
376 } else {
377 jdk_version = 5;
378 }
379 JDK_Version::fully_initialize(jdk_version);
380 }
382 #ifdef ASSERT
383 if (FullGCALot) {
384 // Allocate an array of dummy objects.
385 // We'd like these to be at the bottom of the old generation,
386 // so that when we free one and then collect,
387 // (almost) the whole heap moves
388 // and we find out if we actually update all the oops correctly.
389 // But we can't allocate directly in the old generation,
390 // so we allocate wherever, and hope that the first collection
391 // moves these objects to the bottom of the old generation.
392 // We can allocate directly in the permanent generation, so we do.
393 int size;
394 if (UseConcMarkSweepGC) {
395 warning("Using +FullGCALot with concurrent mark sweep gc "
396 "will not force all objects to relocate");
397 size = FullGCALotDummies;
398 } else {
399 size = FullGCALotDummies * 2;
400 }
401 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
402 objArrayHandle dummy_array(THREAD, naked_array);
403 int i = 0;
404 while (i < size) {
405 // Allocate dummy in old generation
406 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
407 dummy_array->obj_at_put(i++, dummy);
408 }
409 {
410 // Only modify the global variable inside the mutex.
411 // If we had a race to here, the other dummy_array instances
412 // and their elements just get dropped on the floor, which is fine.
413 MutexLocker ml(FullGCALot_lock);
414 if (_fullgc_alot_dummy_array == NULL) {
415 _fullgc_alot_dummy_array = dummy_array();
416 }
417 }
418 assert(i == _fullgc_alot_dummy_array->length(), "just checking");
419 }
420 #endif
422 // Initialize dependency array for null class loader
423 ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
425 }
427 // CDS support for patching vtables in metadata in the shared archive.
428 // All types inherited from Metadata have vtables, but not types inherited
429 // from MetaspaceObj, because the latter does not have virtual functions.
430 // If the metadata type has a vtable, it cannot be shared in the read-only
431 // section of the CDS archive, because the vtable pointer is patched.
432 static inline void add_vtable(void** list, int* n, void* o, int count) {
433 guarantee((*n) < count, "vtable list too small");
434 void* vtable = dereference_vptr(o);
435 assert(*(void**)(vtable) != NULL, "invalid vtable");
436 list[(*n)++] = vtable;
437 }
439 void Universe::init_self_patching_vtbl_list(void** list, int count) {
440 int n = 0;
441 { InstanceKlass o; add_vtable(list, &n, &o, count); }
442 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
443 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); }
444 { InstanceRefKlass o; add_vtable(list, &n, &o, count); }
445 { TypeArrayKlass o; add_vtable(list, &n, &o, count); }
446 { ObjArrayKlass o; add_vtable(list, &n, &o, count); }
447 { Method o; add_vtable(list, &n, &o, count); }
448 { ConstantPool o; add_vtable(list, &n, &o, count); }
449 }
451 void Universe::initialize_basic_type_mirrors(TRAPS) {
452 assert(_int_mirror==NULL, "basic type mirrors already initialized");
453 _int_mirror =
454 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
455 _float_mirror =
456 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
457 _double_mirror =
458 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
459 _byte_mirror =
460 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
461 _bool_mirror =
462 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
463 _char_mirror =
464 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
465 _long_mirror =
466 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
467 _short_mirror =
468 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
469 _void_mirror =
470 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
472 _mirrors[T_INT] = _int_mirror;
473 _mirrors[T_FLOAT] = _float_mirror;
474 _mirrors[T_DOUBLE] = _double_mirror;
475 _mirrors[T_BYTE] = _byte_mirror;
476 _mirrors[T_BOOLEAN] = _bool_mirror;
477 _mirrors[T_CHAR] = _char_mirror;
478 _mirrors[T_LONG] = _long_mirror;
479 _mirrors[T_SHORT] = _short_mirror;
480 _mirrors[T_VOID] = _void_mirror;
481 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror();
482 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror();
483 }
485 void Universe::fixup_mirrors(TRAPS) {
486 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
487 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
488 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
489 // that the number of objects allocated at this point is very small.
490 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
491 HandleMark hm(THREAD);
492 // Cache the start of the static fields
493 InstanceMirrorKlass::init_offset_of_static_fields();
495 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
496 int list_length = list->length();
497 for (int i = 0; i < list_length; i++) {
498 Klass* k = list->at(i);
499 assert(k->is_klass(), "List should only hold classes");
500 EXCEPTION_MARK;
501 KlassHandle kh(THREAD, k);
502 java_lang_Class::fixup_mirror(kh, CATCH);
503 }
504 delete java_lang_Class::fixup_mirror_list();
505 java_lang_Class::set_fixup_mirror_list(NULL);
506 }
508 static bool has_run_finalizers_on_exit = false;
510 void Universe::run_finalizers_on_exit() {
511 if (has_run_finalizers_on_exit) return;
512 has_run_finalizers_on_exit = true;
514 // Called on VM exit. This ought to be run in a separate thread.
515 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
516 {
517 PRESERVE_EXCEPTION_MARK;
518 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
519 JavaValue result(T_VOID);
520 JavaCalls::call_static(
521 &result,
522 finalizer_klass,
523 vmSymbols::run_finalizers_on_exit_name(),
524 vmSymbols::void_method_signature(),
525 THREAD
526 );
527 // Ignore any pending exceptions
528 CLEAR_PENDING_EXCEPTION;
529 }
530 }
533 // initialize_vtable could cause gc if
534 // 1) we specified true to initialize_vtable and
535 // 2) this ran after gc was enabled
536 // In case those ever change we use handles for oops
537 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
538 // init vtable of k and all subclasses
539 Klass* ko = k_h();
540 klassVtable* vt = ko->vtable();
541 if (vt) vt->initialize_vtable(false, CHECK);
542 if (ko->oop_is_instance()) {
543 InstanceKlass* ik = (InstanceKlass*)ko;
544 for (KlassHandle s_h(THREAD, ik->subklass());
545 s_h() != NULL;
546 s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
547 reinitialize_vtable_of(s_h, CHECK);
548 }
549 }
550 }
553 void initialize_itable_for_klass(Klass* k, TRAPS) {
554 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
555 }
558 void Universe::reinitialize_itables(TRAPS) {
559 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
561 }
564 bool Universe::on_page_boundary(void* addr) {
565 return ((uintptr_t) addr) % os::vm_page_size() == 0;
566 }
569 bool Universe::should_fill_in_stack_trace(Handle throwable) {
570 // never attempt to fill in the stack trace of preallocated errors that do not have
571 // backtrace. These errors are kept alive forever and may be "re-used" when all
572 // preallocated errors with backtrace have been consumed. Also need to avoid
573 // a potential loop which could happen if an out of memory occurs when attempting
574 // to allocate the backtrace.
575 return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
576 (throwable() != Universe::_out_of_memory_error_metaspace) &&
577 (throwable() != Universe::_out_of_memory_error_class_metaspace) &&
578 (throwable() != Universe::_out_of_memory_error_array_size) &&
579 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
580 (throwable() != Universe::_out_of_memory_error_realloc_objects));
581 }
584 oop Universe::gen_out_of_memory_error(oop default_err) {
585 // generate an out of memory error:
586 // - if there is a preallocated error with backtrace available then return it wth
587 // a filled in stack trace.
588 // - if there are no preallocated errors with backtrace available then return
589 // an error without backtrace.
590 int next;
591 if (_preallocated_out_of_memory_error_avail_count > 0) {
592 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
593 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
594 } else {
595 next = -1;
596 }
597 if (next < 0) {
598 // all preallocated errors have been used.
599 // return default
600 return default_err;
601 } else {
602 // get the error object at the slot and set set it to NULL so that the
603 // array isn't keeping it alive anymore.
604 oop exc = preallocated_out_of_memory_errors()->obj_at(next);
605 assert(exc != NULL, "slot has been used already");
606 preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
608 // use the message from the default error
609 oop msg = java_lang_Throwable::message(default_err);
610 assert(msg != NULL, "no message");
611 java_lang_Throwable::set_message(exc, msg);
613 // populate the stack trace and return it.
614 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
615 return exc;
616 }
617 }
619 intptr_t Universe::_non_oop_bits = 0;
621 void* Universe::non_oop_word() {
622 // Neither the high bits nor the low bits of this value is allowed
623 // to look like (respectively) the high or low bits of a real oop.
624 //
625 // High and low are CPU-specific notions, but low always includes
626 // the low-order bit. Since oops are always aligned at least mod 4,
627 // setting the low-order bit will ensure that the low half of the
628 // word will never look like that of a real oop.
629 //
630 // Using the OS-supplied non-memory-address word (usually 0 or -1)
631 // will take care of the high bits, however many there are.
633 if (_non_oop_bits == 0) {
634 _non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
635 }
637 return (void*)_non_oop_bits;
638 }
640 jint universe_init() {
641 assert(!Universe::_fully_initialized, "called after initialize_vtables");
642 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
643 "LogHeapWordSize is incorrect.");
644 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
645 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
646 "oop size is not not a multiple of HeapWord size");
647 TraceTime timer("Genesis", TraceStartupTime);
648 JavaClasses::compute_hard_coded_offsets();
650 jint status = Universe::initialize_heap();
651 if (status != JNI_OK) {
652 return status;
653 }
655 Metaspace::global_initialize();
657 // Create memory for metadata. Must be after initializing heap for
658 // DumpSharedSpaces.
659 ClassLoaderData::init_null_class_loader_data();
661 // We have a heap so create the Method* caches before
662 // Metaspace::initialize_shared_spaces() tries to populate them.
663 Universe::_finalizer_register_cache = new LatestMethodCache();
664 Universe::_loader_addClass_cache = new LatestMethodCache();
665 Universe::_pd_implies_cache = new LatestMethodCache();
667 if (UseSharedSpaces) {
668 // Read the data structures supporting the shared spaces (shared
669 // system dictionary, symbol table, etc.). After that, access to
670 // the file (other than the mapped regions) is no longer needed, and
671 // the file is closed. Closing the file does not affect the
672 // currently mapped regions.
673 MetaspaceShared::initialize_shared_spaces();
674 StringTable::create_table();
675 } else {
676 SymbolTable::create_table();
677 StringTable::create_table();
678 ClassLoader::create_package_info_table();
680 if (DumpSharedSpaces) {
681 MetaspaceShared::prepare_for_dumping();
682 }
683 }
685 return JNI_OK;
686 }
688 // Choose the heap base address and oop encoding mode
689 // when compressed oops are used:
690 // Unscaled - Use 32-bits oops without encoding when
691 // NarrowOopHeapBaseMin + heap_size < 4Gb
692 // ZeroBased - Use zero based compressed oops with encoding when
693 // NarrowOopHeapBaseMin + heap_size < 32Gb
694 // HeapBased - Use compressed oops with heap base + encoding.
696 // 4Gb
697 static const uint64_t UnscaledOopHeapMax = (uint64_t(max_juint) + 1);
698 // 32Gb
699 // OopEncodingHeapMax == UnscaledOopHeapMax << LogMinObjAlignmentInBytes;
701 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
702 assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
703 assert(is_size_aligned((size_t)UnscaledOopHeapMax, alignment), "Must be");
704 assert(is_size_aligned(heap_size, alignment), "Must be");
706 uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
708 size_t base = 0;
709 #ifdef _LP64
710 if (UseCompressedOops) {
711 assert(mode == UnscaledNarrowOop ||
712 mode == ZeroBasedNarrowOop ||
713 mode == HeapBasedNarrowOop, "mode is invalid");
714 const size_t total_size = heap_size + heap_base_min_address_aligned;
715 // Return specified base for the first request.
716 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
717 base = heap_base_min_address_aligned;
719 // If the total size is small enough to allow UnscaledNarrowOop then
720 // just use UnscaledNarrowOop.
721 } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
722 if ((total_size <= UnscaledOopHeapMax) && (mode == UnscaledNarrowOop) &&
723 (Universe::narrow_oop_shift() == 0)) {
724 // Use 32-bits oops without encoding and
725 // place heap's top on the 4Gb boundary
726 base = (UnscaledOopHeapMax - heap_size);
727 } else {
728 // Can't reserve with NarrowOopShift == 0
729 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
731 if (mode == UnscaledNarrowOop ||
732 mode == ZeroBasedNarrowOop && total_size <= UnscaledOopHeapMax) {
734 // Use zero based compressed oops with encoding and
735 // place heap's top on the 32Gb boundary in case
736 // total_size > 4Gb or failed to reserve below 4Gb.
737 uint64_t heap_top = OopEncodingHeapMax;
739 // For small heaps, save some space for compressed class pointer
740 // space so it can be decoded with no base.
741 if (UseCompressedClassPointers && !UseSharedSpaces &&
742 OopEncodingHeapMax <= 32*G) {
744 uint64_t class_space = align_size_up(CompressedClassSpaceSize, alignment);
745 assert(is_size_aligned((size_t)OopEncodingHeapMax-class_space,
746 alignment), "difference must be aligned too");
747 uint64_t new_top = OopEncodingHeapMax-class_space;
749 if (total_size <= new_top) {
750 heap_top = new_top;
751 }
752 }
754 // Align base to the adjusted top of the heap
755 base = heap_top - heap_size;
756 }
757 }
758 } else {
759 // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
760 // HeapBasedNarrowOop encoding was requested. So, can't reserve below 32Gb.
761 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
762 }
764 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
765 // used in ReservedHeapSpace() constructors.
766 // The final values will be set in initialize_heap() below.
767 if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
768 // Use zero based compressed oops
769 Universe::set_narrow_oop_base(NULL);
770 // Don't need guard page for implicit checks in indexed
771 // addressing mode with zero based Compressed Oops.
772 Universe::set_narrow_oop_use_implicit_null_checks(true);
773 } else {
774 // Set to a non-NULL value so the ReservedSpace ctor computes
775 // the correct no-access prefix.
776 // The final value will be set in initialize_heap() below.
777 Universe::set_narrow_oop_base((address)UnscaledOopHeapMax);
778 #if defined(_WIN64) || defined(AIX)
779 if (UseLargePages) {
780 // Cannot allocate guard pages for implicit checks in indexed
781 // addressing mode when large pages are specified on windows.
782 Universe::set_narrow_oop_use_implicit_null_checks(false);
783 }
784 #endif // _WIN64
785 }
786 }
787 #endif
789 assert(is_ptr_aligned((char*)base, alignment), "Must be");
790 return (char*)base; // also return NULL (don't care) for 32-bit VM
791 }
793 jint Universe::initialize_heap() {
795 if (UseParallelGC) {
796 #if INCLUDE_ALL_GCS
797 Universe::_collectedHeap = new ParallelScavengeHeap();
798 #else // INCLUDE_ALL_GCS
799 fatal("UseParallelGC not supported in this VM.");
800 #endif // INCLUDE_ALL_GCS
802 } else if (UseG1GC) {
803 #if INCLUDE_ALL_GCS
804 G1CollectorPolicyExt* g1p = new G1CollectorPolicyExt();
805 g1p->initialize_all();
806 G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
807 Universe::_collectedHeap = g1h;
808 #else // INCLUDE_ALL_GCS
809 fatal("UseG1GC not supported in java kernel vm.");
810 #endif // INCLUDE_ALL_GCS
812 } else {
813 GenCollectorPolicy *gc_policy;
815 if (UseSerialGC) {
816 gc_policy = new MarkSweepPolicy();
817 } else if (UseConcMarkSweepGC) {
818 #if INCLUDE_ALL_GCS
819 if (UseAdaptiveSizePolicy) {
820 gc_policy = new ASConcurrentMarkSweepPolicy();
821 } else {
822 gc_policy = new ConcurrentMarkSweepPolicy();
823 }
824 #else // INCLUDE_ALL_GCS
825 fatal("UseConcMarkSweepGC not supported in this VM.");
826 #endif // INCLUDE_ALL_GCS
827 } else { // default old generation
828 gc_policy = new MarkSweepPolicy();
829 }
830 gc_policy->initialize_all();
832 Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
833 }
835 ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
837 jint status = Universe::heap()->initialize();
838 if (status != JNI_OK) {
839 return status;
840 }
842 #ifdef _LP64
843 if (UseCompressedOops) {
844 // Subtract a page because something can get allocated at heap base.
845 // This also makes implicit null checking work, because the
846 // memory+1 page below heap_base needs to cause a signal.
847 // See needs_explicit_null_check.
848 // Only set the heap base for compressed oops because it indicates
849 // compressed oops for pstack code.
850 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
851 if (verbose) {
852 tty->cr();
853 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
854 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
855 }
856 if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
857 // Can't reserve heap below 32Gb.
858 // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
859 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
860 #ifdef AIX
861 // There is no protected page before the heap. This assures all oops
862 // are decoded so that NULL is preserved, so this page will not be accessed.
863 Universe::set_narrow_oop_use_implicit_null_checks(false);
864 #endif
865 if (verbose) {
866 tty->print(", %s: "PTR_FORMAT,
867 narrow_oop_mode_to_string(HeapBasedNarrowOop),
868 Universe::narrow_oop_base());
869 }
870 } else {
871 Universe::set_narrow_oop_base(0);
872 if (verbose) {
873 tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
874 }
875 #ifdef _WIN64
876 if (!Universe::narrow_oop_use_implicit_null_checks()) {
877 // Don't need guard page for implicit checks in indexed addressing
878 // mode with zero based Compressed Oops.
879 Universe::set_narrow_oop_use_implicit_null_checks(true);
880 }
881 #endif // _WIN64
882 if((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
883 // Can't reserve heap below 4Gb.
884 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
885 } else {
886 Universe::set_narrow_oop_shift(0);
887 if (verbose) {
888 tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
889 }
890 }
891 }
893 if (verbose) {
894 tty->cr();
895 tty->cr();
896 }
897 Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
898 }
899 // Universe::narrow_oop_base() is one page below the heap.
900 assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
901 os::vm_page_size()) ||
902 Universe::narrow_oop_base() == NULL, "invalid value");
903 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
904 Universe::narrow_oop_shift() == 0, "invalid value");
905 #endif
907 // We will never reach the CATCH below since Exceptions::_throw will cause
908 // the VM to exit if an exception is thrown during initialization
910 if (UseTLAB) {
911 assert(Universe::heap()->supports_tlab_allocation(),
912 "Should support thread-local allocation buffers");
913 ThreadLocalAllocBuffer::startup_initialization();
914 }
915 return JNI_OK;
916 }
919 // Reserve the Java heap, which is now the same for all GCs.
920 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
921 assert(alignment <= Arguments::conservative_max_heap_alignment(),
922 err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
923 alignment, Arguments::conservative_max_heap_alignment()));
924 size_t total_reserved = align_size_up(heap_size, alignment);
925 assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
926 "heap size is too big for compressed oops");
928 bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
929 assert(!UseLargePages
930 || UseParallelGC
931 || use_large_pages, "Wrong alignment to use large pages");
933 char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
935 ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
937 if (UseCompressedOops) {
938 if (addr != NULL && !total_rs.is_reserved()) {
939 // Failed to reserve at specified address - the requested memory
940 // region is taken already, for example, by 'java' launcher.
941 // Try again to reserver heap higher.
942 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
944 ReservedHeapSpace total_rs0(total_reserved, alignment,
945 use_large_pages, addr);
947 if (addr != NULL && !total_rs0.is_reserved()) {
948 // Failed to reserve at specified address again - give up.
949 addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
950 assert(addr == NULL, "");
952 ReservedHeapSpace total_rs1(total_reserved, alignment,
953 use_large_pages, addr);
954 total_rs = total_rs1;
955 } else {
956 total_rs = total_rs0;
957 }
958 }
959 }
961 if (!total_rs.is_reserved()) {
962 vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
963 return total_rs;
964 }
966 if (UseCompressedOops) {
967 // Universe::initialize_heap() will reset this to NULL if unscaled
968 // or zero-based narrow oops are actually used.
969 address base = (address)(total_rs.base() - os::vm_page_size());
970 Universe::set_narrow_oop_base(base);
971 }
972 return total_rs;
973 }
976 // It's the caller's responsibility to ensure glitch-freedom
977 // (if required).
978 void Universe::update_heap_info_at_gc() {
979 _heap_capacity_at_last_gc = heap()->capacity();
980 _heap_used_at_last_gc = heap()->used();
981 }
984 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
985 switch (mode) {
986 case UnscaledNarrowOop:
987 return "32-bits Oops";
988 case ZeroBasedNarrowOop:
989 return "zero based Compressed Oops";
990 case HeapBasedNarrowOop:
991 return "Compressed Oops with base";
992 }
994 ShouldNotReachHere();
995 return "";
996 }
999 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
1000 if (narrow_oop_base() != 0) {
1001 return HeapBasedNarrowOop;
1002 }
1004 if (narrow_oop_shift() != 0) {
1005 return ZeroBasedNarrowOop;
1006 }
1008 return UnscaledNarrowOop;
1009 }
1012 void universe2_init() {
1013 EXCEPTION_MARK;
1014 Universe::genesis(CATCH);
1015 }
1018 // This function is defined in JVM.cpp
1019 extern void initialize_converter_functions();
1021 bool universe_post_init() {
1022 assert(!is_init_completed(), "Error: initialization not yet completed!");
1023 Universe::_fully_initialized = true;
1024 EXCEPTION_MARK;
1025 { ResourceMark rm;
1026 Interpreter::initialize(); // needed for interpreter entry points
1027 if (!UseSharedSpaces) {
1028 HandleMark hm(THREAD);
1029 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
1030 Universe::reinitialize_vtable_of(ok_h, CHECK_false);
1031 Universe::reinitialize_itables(CHECK_false);
1032 }
1033 }
1035 HandleMark hm(THREAD);
1036 Klass* k;
1037 instanceKlassHandle k_h;
1038 // Setup preallocated empty java.lang.Class array
1039 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
1041 // Setup preallocated OutOfMemoryError errors
1042 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
1043 k_h = instanceKlassHandle(THREAD, k);
1044 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
1045 Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
1046 Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
1047 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
1048 Universe::_out_of_memory_error_gc_overhead_limit =
1049 k_h->allocate_instance(CHECK_false);
1050 Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
1052 // Setup preallocated NullPointerException
1053 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1054 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
1055 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1056 // Setup preallocated ArithmeticException
1057 // (this is currently used for a cheap & dirty solution in compiler exception handling)
1058 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
1059 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1060 // Virtual Machine Error for when we get into a situation we can't resolve
1061 k = SystemDictionary::resolve_or_fail(
1062 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
1063 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
1064 if (!linked) {
1065 tty->print_cr("Unable to link/verify VirtualMachineError class");
1066 return false; // initialization failed
1067 }
1068 Universe::_virtual_machine_error_instance =
1069 InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1071 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
1073 if (!DumpSharedSpaces) {
1074 // These are the only Java fields that are currently set during shared space dumping.
1075 // We prefer to not handle this generally, so we always reinitialize these detail messages.
1076 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
1077 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
1079 msg = java_lang_String::create_from_str("Metaspace", CHECK_false);
1080 java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
1081 msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
1082 java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
1084 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
1085 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
1087 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
1088 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
1090 msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
1091 java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
1093 msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1094 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1096 // Setup the array of errors that have preallocated backtrace
1097 k = Universe::_out_of_memory_error_java_heap->klass();
1098 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1099 k_h = instanceKlassHandle(THREAD, k);
1101 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1102 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1103 for (int i=0; i<len; i++) {
1104 oop err = k_h->allocate_instance(CHECK_false);
1105 Handle err_h = Handle(THREAD, err);
1106 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1107 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1108 }
1109 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1110 }
1113 // Setup static method for registering finalizers
1114 // The finalizer klass must be linked before looking up the method, in
1115 // case it needs to get rewritten.
1116 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1117 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1118 vmSymbols::register_method_name(),
1119 vmSymbols::register_method_signature());
1120 if (m == NULL || !m->is_static()) {
1121 tty->print_cr("Unable to link/verify Finalizer.register method");
1122 return false; // initialization failed (cannot throw exception yet)
1123 }
1124 Universe::_finalizer_register_cache->init(
1125 SystemDictionary::Finalizer_klass(), m);
1127 InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->link_class(CHECK_false);
1128 m = InstanceKlass::cast(SystemDictionary::misc_Unsafe_klass())->find_method(
1129 vmSymbols::throwIllegalAccessError_name(),
1130 vmSymbols::void_method_signature());
1131 if (m != NULL && !m->is_static()) {
1132 // Note null is okay; this method is used in itables, and if it is null,
1133 // then AbstractMethodError is thrown instead.
1134 tty->print_cr("Unable to link/verify Unsafe.throwIllegalAccessError method");
1135 return false; // initialization failed (cannot throw exception yet)
1136 }
1137 Universe::_throw_illegal_access_error = m;
1139 // Setup method for registering loaded classes in class loader vector
1140 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1141 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1142 if (m == NULL || m->is_static()) {
1143 tty->print_cr("Unable to link/verify ClassLoader.addClass method");
1144 return false; // initialization failed (cannot throw exception yet)
1145 }
1146 Universe::_loader_addClass_cache->init(
1147 SystemDictionary::ClassLoader_klass(), m);
1149 // Setup method for checking protection domain
1150 InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
1151 m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
1152 find_method(vmSymbols::impliesCreateAccessControlContext_name(),
1153 vmSymbols::void_boolean_signature());
1154 // Allow NULL which should only happen with bootstrapping.
1155 if (m != NULL) {
1156 if (m->is_static()) {
1157 // NoSuchMethodException doesn't actually work because it tries to run the
1158 // <init> function before java_lang_Class is linked. Print error and exit.
1159 tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
1160 return false; // initialization failed
1161 }
1162 Universe::_pd_implies_cache->init(
1163 SystemDictionary::ProtectionDomain_klass(), m);;
1164 }
1166 // The folowing is initializing converter functions for serialization in
1167 // JVM.cpp. If we clean up the StrictMath code above we may want to find
1168 // a better solution for this as well.
1169 initialize_converter_functions();
1171 // This needs to be done before the first scavenge/gc, since
1172 // it's an input to soft ref clearing policy.
1173 {
1174 MutexLocker x(Heap_lock);
1175 Universe::update_heap_info_at_gc();
1176 }
1178 // ("weak") refs processing infrastructure initialization
1179 Universe::heap()->post_initialize();
1181 // Initialize performance counters for metaspaces
1182 MetaspaceCounters::initialize_performance_counters();
1183 CompressedClassSpaceCounters::initialize_performance_counters();
1185 MemoryService::add_metaspace_memory_pools();
1187 MemoryService::set_universe_heap(Universe::_collectedHeap);
1188 #if INCLUDE_CDS
1189 if (UseSharedSpaces) {
1190 SharedClassUtil::initialize(CHECK_false);
1191 }
1192 #endif
1193 return true;
1194 }
1197 void Universe::compute_base_vtable_size() {
1198 _base_vtable_size = ClassLoader::compute_Object_vtable();
1199 }
1202 // %%% The Universe::flush_foo methods belong in CodeCache.
1204 // Flushes compiled methods dependent on dependee.
1205 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1206 assert_lock_strong(Compile_lock);
1208 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1210 // CodeCache can only be updated by a thread_in_VM and they will all be
1211 // stopped dring the safepoint so CodeCache will be safe to update without
1212 // holding the CodeCache_lock.
1214 KlassDepChange changes(dependee);
1216 // Compute the dependent nmethods
1217 if (CodeCache::mark_for_deoptimization(changes) > 0) {
1218 // At least one nmethod has been marked for deoptimization
1219 VM_Deoptimize op;
1220 VMThread::execute(&op);
1221 }
1222 }
1224 // Flushes compiled methods dependent on a particular CallSite
1225 // instance when its target is different than the given MethodHandle.
1226 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1227 assert_lock_strong(Compile_lock);
1229 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1231 // CodeCache can only be updated by a thread_in_VM and they will all be
1232 // stopped dring the safepoint so CodeCache will be safe to update without
1233 // holding the CodeCache_lock.
1235 CallSiteDepChange changes(call_site(), method_handle());
1237 // Compute the dependent nmethods that have a reference to a
1238 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1239 // directly instead of CodeCache::mark_for_deoptimization because we
1240 // want dependents on the call site class only not all classes in
1241 // the ContextStream.
1242 int marked = 0;
1243 {
1244 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1245 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1246 marked = call_site_klass->mark_dependent_nmethods(changes);
1247 }
1248 if (marked > 0) {
1249 // At least one nmethod has been marked for deoptimization
1250 VM_Deoptimize op;
1251 VMThread::execute(&op);
1252 }
1253 }
1255 #ifdef HOTSWAP
1256 // Flushes compiled methods dependent on dependee in the evolutionary sense
1257 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1258 // --- Compile_lock is not held. However we are at a safepoint.
1259 assert_locked_or_safepoint(Compile_lock);
1260 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1262 // CodeCache can only be updated by a thread_in_VM and they will all be
1263 // stopped dring the safepoint so CodeCache will be safe to update without
1264 // holding the CodeCache_lock.
1266 // Compute the dependent nmethods
1267 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1268 // At least one nmethod has been marked for deoptimization
1270 // All this already happens inside a VM_Operation, so we'll do all the work here.
1271 // Stuff copied from VM_Deoptimize and modified slightly.
1273 // We do not want any GCs to happen while we are in the middle of this VM operation
1274 ResourceMark rm;
1275 DeoptimizationMarker dm;
1277 // Deoptimize all activations depending on marked nmethods
1278 Deoptimization::deoptimize_dependents();
1280 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1281 CodeCache::make_marked_nmethods_not_entrant();
1282 }
1283 }
1284 #endif // HOTSWAP
1287 // Flushes compiled methods dependent on dependee
1288 void Universe::flush_dependents_on_method(methodHandle m_h) {
1289 // --- Compile_lock is not held. However we are at a safepoint.
1290 assert_locked_or_safepoint(Compile_lock);
1292 // CodeCache can only be updated by a thread_in_VM and they will all be
1293 // stopped dring the safepoint so CodeCache will be safe to update without
1294 // holding the CodeCache_lock.
1296 // Compute the dependent nmethods
1297 if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1298 // At least one nmethod has been marked for deoptimization
1300 // All this already happens inside a VM_Operation, so we'll do all the work here.
1301 // Stuff copied from VM_Deoptimize and modified slightly.
1303 // We do not want any GCs to happen while we are in the middle of this VM operation
1304 ResourceMark rm;
1305 DeoptimizationMarker dm;
1307 // Deoptimize all activations depending on marked nmethods
1308 Deoptimization::deoptimize_dependents();
1310 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1311 CodeCache::make_marked_nmethods_not_entrant();
1312 }
1313 }
1315 void Universe::print() {
1316 print_on(gclog_or_tty);
1317 }
1319 void Universe::print_on(outputStream* st, bool extended) {
1320 st->print_cr("Heap");
1321 if (!extended) {
1322 heap()->print_on(st);
1323 } else {
1324 heap()->print_extended_on(st);
1325 }
1326 }
1328 void Universe::print_heap_at_SIGBREAK() {
1329 if (PrintHeapAtSIGBREAK) {
1330 MutexLocker hl(Heap_lock);
1331 print_on(tty);
1332 tty->cr();
1333 tty->flush();
1334 }
1335 }
1337 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1338 st->print_cr("{Heap before GC invocations=%u (full %u):",
1339 heap()->total_collections(),
1340 heap()->total_full_collections());
1341 if (!PrintHeapAtGCExtended || ignore_extended) {
1342 heap()->print_on(st);
1343 } else {
1344 heap()->print_extended_on(st);
1345 }
1346 }
1348 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1349 st->print_cr("Heap after GC invocations=%u (full %u):",
1350 heap()->total_collections(),
1351 heap()->total_full_collections());
1352 if (!PrintHeapAtGCExtended || ignore_extended) {
1353 heap()->print_on(st);
1354 } else {
1355 heap()->print_extended_on(st);
1356 }
1357 st->print_cr("}");
1358 }
1360 void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
1361 // The use of _verify_in_progress is a temporary work around for
1362 // 6320749. Don't bother with a creating a class to set and clear
1363 // it since it is only used in this method and the control flow is
1364 // straight forward.
1365 _verify_in_progress = true;
1367 COMPILER2_PRESENT(
1368 assert(!DerivedPointerTable::is_active(),
1369 "DPT should not be active during verification "
1370 "(of thread stacks below)");
1371 )
1373 ResourceMark rm;
1374 HandleMark hm; // Handles created during verification can be zapped
1375 _verify_count++;
1377 if (!silent) gclog_or_tty->print("%s", prefix);
1378 if (!silent) gclog_or_tty->print("[Verifying ");
1379 if (!silent) gclog_or_tty->print("threads ");
1380 Threads::verify();
1381 if (!silent) gclog_or_tty->print("heap ");
1382 heap()->verify(silent, option);
1383 if (!silent) gclog_or_tty->print("syms ");
1384 SymbolTable::verify();
1385 if (!silent) gclog_or_tty->print("strs ");
1386 StringTable::verify();
1387 {
1388 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1389 if (!silent) gclog_or_tty->print("zone ");
1390 CodeCache::verify();
1391 }
1392 if (!silent) gclog_or_tty->print("dict ");
1393 SystemDictionary::verify();
1394 #ifndef PRODUCT
1395 if (!silent) gclog_or_tty->print("cldg ");
1396 ClassLoaderDataGraph::verify();
1397 #endif
1398 if (!silent) gclog_or_tty->print("metaspace chunks ");
1399 MetaspaceAux::verify_free_chunks();
1400 if (!silent) gclog_or_tty->print("hand ");
1401 JNIHandles::verify();
1402 if (!silent) gclog_or_tty->print("C-heap ");
1403 os::check_heap();
1404 if (!silent) gclog_or_tty->print("code cache ");
1405 CodeCache::verify_oops();
1406 if (!silent) gclog_or_tty->print_cr("]");
1408 _verify_in_progress = false;
1409 }
1411 // Oop verification (see MacroAssembler::verify_oop)
1413 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
1414 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1417 #ifndef PRODUCT
1419 static void calculate_verify_data(uintptr_t verify_data[2],
1420 HeapWord* low_boundary,
1421 HeapWord* high_boundary) {
1422 assert(low_boundary < high_boundary, "bad interval");
1424 // decide which low-order bits we require to be clear:
1425 size_t alignSize = MinObjAlignmentInBytes;
1426 size_t min_object_size = CollectedHeap::min_fill_size();
1428 // make an inclusive limit:
1429 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1430 uintptr_t min = (uintptr_t)low_boundary;
1431 assert(min < max, "bad interval");
1432 uintptr_t diff = max ^ min;
1434 // throw away enough low-order bits to make the diff vanish
1435 uintptr_t mask = (uintptr_t)(-1);
1436 while ((mask & diff) != 0)
1437 mask <<= 1;
1438 uintptr_t bits = (min & mask);
1439 assert(bits == (max & mask), "correct mask");
1440 // check an intermediate value between min and max, just to make sure:
1441 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1443 // require address alignment, too:
1444 mask |= (alignSize - 1);
1446 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1447 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1448 }
1449 verify_data[0] = mask;
1450 verify_data[1] = bits;
1451 }
1453 // Oop verification (see MacroAssembler::verify_oop)
1455 uintptr_t Universe::verify_oop_mask() {
1456 MemRegion m = heap()->reserved_region();
1457 calculate_verify_data(_verify_oop_data,
1458 m.start(),
1459 m.end());
1460 return _verify_oop_data[0];
1461 }
1465 uintptr_t Universe::verify_oop_bits() {
1466 verify_oop_mask();
1467 return _verify_oop_data[1];
1468 }
1470 uintptr_t Universe::verify_mark_mask() {
1471 return markOopDesc::lock_mask_in_place;
1472 }
1474 uintptr_t Universe::verify_mark_bits() {
1475 intptr_t mask = verify_mark_mask();
1476 intptr_t bits = (intptr_t)markOopDesc::prototype();
1477 assert((bits & ~mask) == 0, "no stray header bits");
1478 return bits;
1479 }
1480 #endif // PRODUCT
1483 void Universe::compute_verify_oop_data() {
1484 verify_oop_mask();
1485 verify_oop_bits();
1486 verify_mark_mask();
1487 verify_mark_bits();
1488 }
1491 void LatestMethodCache::init(Klass* k, Method* m) {
1492 if (!UseSharedSpaces) {
1493 _klass = k;
1494 }
1495 #ifndef PRODUCT
1496 else {
1497 // sharing initilization should have already set up _klass
1498 assert(_klass != NULL, "just checking");
1499 }
1500 #endif
1502 _method_idnum = m->method_idnum();
1503 assert(_method_idnum >= 0, "sanity check");
1504 }
1507 Method* LatestMethodCache::get_method() {
1508 if (klass() == NULL) return NULL;
1509 InstanceKlass* ik = InstanceKlass::cast(klass());
1510 Method* m = ik->method_with_idnum(method_idnum());
1511 assert(m != NULL, "sanity check");
1512 return m;
1513 }
1516 #ifdef ASSERT
1517 // Release dummy object(s) at bottom of heap
1518 bool Universe::release_fullgc_alot_dummy() {
1519 MutexLocker ml(FullGCALot_lock);
1520 if (_fullgc_alot_dummy_array != NULL) {
1521 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1522 // No more dummies to release, release entire array instead
1523 _fullgc_alot_dummy_array = NULL;
1524 return false;
1525 }
1526 if (!UseConcMarkSweepGC) {
1527 // Release dummy at bottom of old generation
1528 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1529 }
1530 // Release dummy at bottom of permanent generation
1531 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1532 }
1533 return true;
1534 }
1536 #endif // ASSERT