Wed, 10 Oct 2012 17:04:33 -0400
7199068: NPG: SharedSkipVerify is meaningless
Summary: Remove the SharedSkipVerify flag
Reviewed-by: kamg, sspitsyn, coleenp
Contributed-by: harold.seigel@oracle.com
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/classLoader.hpp"
27 #include "classfile/classLoaderData.hpp"
28 #include "classfile/javaClasses.hpp"
29 #include "classfile/symbolTable.hpp"
30 #include "classfile/systemDictionary.hpp"
31 #include "classfile/vmSymbols.hpp"
32 #include "code/codeCache.hpp"
33 #include "code/dependencies.hpp"
34 #include "gc_interface/collectedHeap.inline.hpp"
35 #include "interpreter/interpreter.hpp"
36 #include "memory/cardTableModRefBS.hpp"
37 #include "memory/gcLocker.inline.hpp"
38 #include "memory/genCollectedHeap.hpp"
39 #include "memory/genRemSet.hpp"
40 #include "memory/generation.hpp"
41 #include "memory/metadataFactory.hpp"
42 #include "memory/metaspaceShared.hpp"
43 #include "memory/oopFactory.hpp"
44 #include "memory/space.hpp"
45 #include "memory/universe.hpp"
46 #include "memory/universe.inline.hpp"
47 #include "oops/constantPool.hpp"
48 #include "oops/instanceClassLoaderKlass.hpp"
49 #include "oops/instanceKlass.hpp"
50 #include "oops/instanceMirrorKlass.hpp"
51 #include "oops/instanceRefKlass.hpp"
52 #include "oops/oop.inline.hpp"
53 #include "oops/typeArrayKlass.hpp"
54 #include "prims/jvmtiRedefineClassesTrace.hpp"
55 #include "runtime/aprofiler.hpp"
56 #include "runtime/arguments.hpp"
57 #include "runtime/deoptimization.hpp"
58 #include "runtime/fprofiler.hpp"
59 #include "runtime/handles.inline.hpp"
60 #include "runtime/init.hpp"
61 #include "runtime/java.hpp"
62 #include "runtime/javaCalls.hpp"
63 #include "runtime/sharedRuntime.hpp"
64 #include "runtime/synchronizer.hpp"
65 #include "runtime/timer.hpp"
66 #include "runtime/vm_operations.hpp"
67 #include "services/memoryService.hpp"
68 #include "utilities/copy.hpp"
69 #include "utilities/events.hpp"
70 #include "utilities/hashtable.inline.hpp"
71 #include "utilities/preserveException.hpp"
72 #ifdef TARGET_OS_FAMILY_linux
73 # include "thread_linux.inline.hpp"
74 #endif
75 #ifdef TARGET_OS_FAMILY_solaris
76 # include "thread_solaris.inline.hpp"
77 #endif
78 #ifdef TARGET_OS_FAMILY_windows
79 # include "thread_windows.inline.hpp"
80 #endif
81 #ifdef TARGET_OS_FAMILY_bsd
82 # include "thread_bsd.inline.hpp"
83 #endif
84 #ifndef SERIALGC
85 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
86 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
87 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
88 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
89 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
90 #endif
92 // Known objects
93 Klass* Universe::_boolArrayKlassObj = NULL;
94 Klass* Universe::_byteArrayKlassObj = NULL;
95 Klass* Universe::_charArrayKlassObj = NULL;
96 Klass* Universe::_intArrayKlassObj = NULL;
97 Klass* Universe::_shortArrayKlassObj = NULL;
98 Klass* Universe::_longArrayKlassObj = NULL;
99 Klass* Universe::_singleArrayKlassObj = NULL;
100 Klass* Universe::_doubleArrayKlassObj = NULL;
101 Klass* Universe::_typeArrayKlassObjs[T_VOID+1] = { NULL /*, NULL...*/ };
102 Klass* Universe::_objectArrayKlassObj = NULL;
103 oop Universe::_int_mirror = NULL;
104 oop Universe::_float_mirror = NULL;
105 oop Universe::_double_mirror = NULL;
106 oop Universe::_byte_mirror = NULL;
107 oop Universe::_bool_mirror = NULL;
108 oop Universe::_char_mirror = NULL;
109 oop Universe::_long_mirror = NULL;
110 oop Universe::_short_mirror = NULL;
111 oop Universe::_void_mirror = NULL;
112 oop Universe::_mirrors[T_VOID+1] = { NULL /*, NULL...*/ };
113 oop Universe::_main_thread_group = NULL;
114 oop Universe::_system_thread_group = NULL;
115 objArrayOop Universe::_the_empty_class_klass_array = NULL;
116 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
117 oop Universe::_the_null_string = NULL;
118 oop Universe::_the_min_jint_string = NULL;
119 LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
120 LatestMethodOopCache* Universe::_loader_addClass_cache = NULL;
121 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL;
122 oop Universe::_out_of_memory_error_java_heap = NULL;
123 oop Universe::_out_of_memory_error_perm_gen = NULL;
124 oop Universe::_out_of_memory_error_array_size = NULL;
125 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL;
126 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
127 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
128 bool Universe::_verify_in_progress = false;
129 oop Universe::_null_ptr_exception_instance = NULL;
130 oop Universe::_arithmetic_exception_instance = NULL;
131 oop Universe::_virtual_machine_error_instance = NULL;
132 oop Universe::_vm_exception = NULL;
133 Array<int>* Universe::_the_empty_int_array = NULL;
134 Array<u2>* Universe::_the_empty_short_array = NULL;
135 Array<Klass*>* Universe::_the_empty_klass_array = NULL;
136 Array<Method*>* Universe::_the_empty_method_array = NULL;
138 // These variables are guarded by FullGCALot_lock.
139 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
140 debug_only(int Universe::_fullgc_alot_dummy_next = 0;)
142 // Heap
143 int Universe::_verify_count = 0;
145 int Universe::_base_vtable_size = 0;
146 bool Universe::_bootstrapping = false;
147 bool Universe::_fully_initialized = false;
149 size_t Universe::_heap_capacity_at_last_gc;
150 size_t Universe::_heap_used_at_last_gc = 0;
152 CollectedHeap* Universe::_collectedHeap = NULL;
154 NarrowOopStruct Universe::_narrow_oop = { NULL, 0, true };
157 void Universe::basic_type_classes_do(void f(Klass*)) {
158 f(boolArrayKlassObj());
159 f(byteArrayKlassObj());
160 f(charArrayKlassObj());
161 f(intArrayKlassObj());
162 f(shortArrayKlassObj());
163 f(longArrayKlassObj());
164 f(singleArrayKlassObj());
165 f(doubleArrayKlassObj());
166 }
168 void Universe::oops_do(OopClosure* f, bool do_all) {
170 f->do_oop((oop*) &_int_mirror);
171 f->do_oop((oop*) &_float_mirror);
172 f->do_oop((oop*) &_double_mirror);
173 f->do_oop((oop*) &_byte_mirror);
174 f->do_oop((oop*) &_bool_mirror);
175 f->do_oop((oop*) &_char_mirror);
176 f->do_oop((oop*) &_long_mirror);
177 f->do_oop((oop*) &_short_mirror);
178 f->do_oop((oop*) &_void_mirror);
180 for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
181 f->do_oop((oop*) &_mirrors[i]);
182 }
183 assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
185 f->do_oop((oop*)&_the_empty_class_klass_array);
186 f->do_oop((oop*)&_the_null_string);
187 f->do_oop((oop*)&_the_min_jint_string);
188 f->do_oop((oop*)&_out_of_memory_error_java_heap);
189 f->do_oop((oop*)&_out_of_memory_error_perm_gen);
190 f->do_oop((oop*)&_out_of_memory_error_array_size);
191 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
192 f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
193 f->do_oop((oop*)&_null_ptr_exception_instance);
194 f->do_oop((oop*)&_arithmetic_exception_instance);
195 f->do_oop((oop*)&_virtual_machine_error_instance);
196 f->do_oop((oop*)&_main_thread_group);
197 f->do_oop((oop*)&_system_thread_group);
198 f->do_oop((oop*)&_vm_exception);
199 debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
200 }
202 // Serialize metadata in and out of CDS archive, not oops.
203 void Universe::serialize(SerializeClosure* f, bool do_all) {
205 f->do_ptr((void**)&_boolArrayKlassObj);
206 f->do_ptr((void**)&_byteArrayKlassObj);
207 f->do_ptr((void**)&_charArrayKlassObj);
208 f->do_ptr((void**)&_intArrayKlassObj);
209 f->do_ptr((void**)&_shortArrayKlassObj);
210 f->do_ptr((void**)&_longArrayKlassObj);
211 f->do_ptr((void**)&_singleArrayKlassObj);
212 f->do_ptr((void**)&_doubleArrayKlassObj);
213 f->do_ptr((void**)&_objectArrayKlassObj);
215 {
216 for (int i = 0; i < T_VOID+1; i++) {
217 if (_typeArrayKlassObjs[i] != NULL) {
218 assert(i >= T_BOOLEAN, "checking");
219 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
220 } else if (do_all) {
221 f->do_ptr((void**)&_typeArrayKlassObjs[i]);
222 }
223 }
224 }
226 f->do_ptr((void**)&_the_array_interfaces_array);
227 f->do_ptr((void**)&_the_empty_int_array);
228 f->do_ptr((void**)&_the_empty_short_array);
229 f->do_ptr((void**)&_the_empty_method_array);
230 f->do_ptr((void**)&_the_empty_klass_array);
231 _finalizer_register_cache->serialize(f);
232 _loader_addClass_cache->serialize(f);
233 _reflect_invoke_cache->serialize(f);
234 }
236 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
237 if (size < alignment || size % alignment != 0) {
238 ResourceMark rm;
239 stringStream st;
240 st.print("Size of %s (%ld bytes) must be aligned to %ld bytes", name, size, alignment);
241 char* error = st.as_string();
242 vm_exit_during_initialization(error);
243 }
244 }
246 void initialize_basic_type_klass(Klass* k, TRAPS) {
247 Klass* ok = SystemDictionary::Object_klass();
248 if (UseSharedSpaces) {
249 assert(k->super() == ok, "u3");
250 k->restore_unshareable_info(CHECK);
251 } else {
252 k->initialize_supers(ok, CHECK);
253 }
254 k->append_to_sibling_list();
255 }
257 void Universe::genesis(TRAPS) {
258 ResourceMark rm;
260 { FlagSetting fs(_bootstrapping, true);
262 { MutexLocker mc(Compile_lock);
264 // determine base vtable size; without that we cannot create the array klasses
265 compute_base_vtable_size();
267 if (!UseSharedSpaces) {
268 _boolArrayKlassObj = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
269 _charArrayKlassObj = TypeArrayKlass::create_klass(T_CHAR, sizeof(jchar), CHECK);
270 _singleArrayKlassObj = TypeArrayKlass::create_klass(T_FLOAT, sizeof(jfloat), CHECK);
271 _doubleArrayKlassObj = TypeArrayKlass::create_klass(T_DOUBLE, sizeof(jdouble), CHECK);
272 _byteArrayKlassObj = TypeArrayKlass::create_klass(T_BYTE, sizeof(jbyte), CHECK);
273 _shortArrayKlassObj = TypeArrayKlass::create_klass(T_SHORT, sizeof(jshort), CHECK);
274 _intArrayKlassObj = TypeArrayKlass::create_klass(T_INT, sizeof(jint), CHECK);
275 _longArrayKlassObj = TypeArrayKlass::create_klass(T_LONG, sizeof(jlong), CHECK);
277 _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
278 _typeArrayKlassObjs[T_CHAR] = _charArrayKlassObj;
279 _typeArrayKlassObjs[T_FLOAT] = _singleArrayKlassObj;
280 _typeArrayKlassObjs[T_DOUBLE] = _doubleArrayKlassObj;
281 _typeArrayKlassObjs[T_BYTE] = _byteArrayKlassObj;
282 _typeArrayKlassObjs[T_SHORT] = _shortArrayKlassObj;
283 _typeArrayKlassObjs[T_INT] = _intArrayKlassObj;
284 _typeArrayKlassObjs[T_LONG] = _longArrayKlassObj;
286 ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
288 _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
289 _the_empty_int_array = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
290 _the_empty_short_array = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
291 _the_empty_method_array = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
292 _the_empty_klass_array = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
293 }
294 }
296 vmSymbols::initialize(CHECK);
298 SystemDictionary::initialize(CHECK);
300 Klass* ok = SystemDictionary::Object_klass();
302 _the_null_string = StringTable::intern("null", CHECK);
303 _the_min_jint_string = StringTable::intern("-2147483648", CHECK);
305 if (UseSharedSpaces) {
306 // Verify shared interfaces array.
307 assert(_the_array_interfaces_array->at(0) ==
308 SystemDictionary::Cloneable_klass(), "u3");
309 assert(_the_array_interfaces_array->at(1) ==
310 SystemDictionary::Serializable_klass(), "u3");
311 } else {
312 // Set up shared interfaces array. (Do this before supers are set up.)
313 _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
314 _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
315 }
317 initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
318 initialize_basic_type_klass(charArrayKlassObj(), CHECK);
319 initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
320 initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
321 initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
322 initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
323 initialize_basic_type_klass(intArrayKlassObj(), CHECK);
324 initialize_basic_type_klass(longArrayKlassObj(), CHECK);
325 } // end of core bootstrapping
327 // Maybe this could be lifted up now that object array can be initialized
328 // during the bootstrapping.
330 // OLD
331 // Initialize _objectArrayKlass after core bootstraping to make
332 // sure the super class is set up properly for _objectArrayKlass.
333 // ---
334 // NEW
335 // Since some of the old system object arrays have been converted to
336 // ordinary object arrays, _objectArrayKlass will be loaded when
337 // SystemDictionary::initialize(CHECK); is run. See the extra check
338 // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
339 _objectArrayKlassObj = InstanceKlass::
340 cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
341 // OLD
342 // Add the class to the class hierarchy manually to make sure that
343 // its vtable is initialized after core bootstrapping is completed.
344 // ---
345 // New
346 // Have already been initialized.
347 Klass::cast(_objectArrayKlassObj)->append_to_sibling_list();
349 // Compute is_jdk version flags.
350 // Only 1.3 or later has the java.lang.Shutdown class.
351 // Only 1.4 or later has the java.lang.CharSequence interface.
352 // Only 1.5 or later has the java.lang.management.MemoryUsage class.
353 if (JDK_Version::is_partially_initialized()) {
354 uint8_t jdk_version;
355 Klass* k = SystemDictionary::resolve_or_null(
356 vmSymbols::java_lang_management_MemoryUsage(), THREAD);
357 CLEAR_PENDING_EXCEPTION; // ignore exceptions
358 if (k == NULL) {
359 k = SystemDictionary::resolve_or_null(
360 vmSymbols::java_lang_CharSequence(), THREAD);
361 CLEAR_PENDING_EXCEPTION; // ignore exceptions
362 if (k == NULL) {
363 k = SystemDictionary::resolve_or_null(
364 vmSymbols::java_lang_Shutdown(), THREAD);
365 CLEAR_PENDING_EXCEPTION; // ignore exceptions
366 if (k == NULL) {
367 jdk_version = 2;
368 } else {
369 jdk_version = 3;
370 }
371 } else {
372 jdk_version = 4;
373 }
374 } else {
375 jdk_version = 5;
376 }
377 JDK_Version::fully_initialize(jdk_version);
378 }
380 #ifdef ASSERT
381 if (FullGCALot) {
382 // Allocate an array of dummy objects.
383 // We'd like these to be at the bottom of the old generation,
384 // so that when we free one and then collect,
385 // (almost) the whole heap moves
386 // and we find out if we actually update all the oops correctly.
387 // But we can't allocate directly in the old generation,
388 // so we allocate wherever, and hope that the first collection
389 // moves these objects to the bottom of the old generation.
390 // We can allocate directly in the permanent generation, so we do.
391 int size;
392 if (UseConcMarkSweepGC) {
393 warning("Using +FullGCALot with concurrent mark sweep gc "
394 "will not force all objects to relocate");
395 size = FullGCALotDummies;
396 } else {
397 size = FullGCALotDummies * 2;
398 }
399 objArrayOop naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
400 objArrayHandle dummy_array(THREAD, naked_array);
401 int i = 0;
402 while (i < size) {
403 // Allocate dummy in old generation
404 oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
405 dummy_array->obj_at_put(i++, dummy);
406 }
407 {
408 // Only modify the global variable inside the mutex.
409 // If we had a race to here, the other dummy_array instances
410 // and their elements just get dropped on the floor, which is fine.
411 MutexLocker ml(FullGCALot_lock);
412 if (_fullgc_alot_dummy_array == NULL) {
413 _fullgc_alot_dummy_array = dummy_array();
414 }
415 }
416 assert(i == _fullgc_alot_dummy_array->length(), "just checking");
417 }
418 #endif
419 }
421 // CDS support for patching vtables in metadata in the shared archive.
422 // All types inherited from Metadata have vtables, but not types inherited
423 // from MetaspaceObj, because the latter does not have virtual functions.
424 // If the metadata type has a vtable, it cannot be shared in the read-only
425 // section of the CDS archive, because the vtable pointer is patched.
426 static inline void* dereference(void* addr) {
427 return *(void**)addr;
428 }
430 static inline void add_vtable(void** list, int* n, void* o, int count) {
431 guarantee((*n) < count, "vtable list too small");
432 void* vtable = dereference(o);
433 assert(dereference(vtable) != NULL, "invalid vtable");
434 list[(*n)++] = vtable;
435 }
437 void Universe::init_self_patching_vtbl_list(void** list, int count) {
438 int n = 0;
439 { InstanceKlass o; add_vtable(list, &n, &o, count); }
440 { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
441 { InstanceMirrorKlass o; add_vtable(list, &n, &o, count); }
442 { InstanceRefKlass o; add_vtable(list, &n, &o, count); }
443 { TypeArrayKlass o; add_vtable(list, &n, &o, count); }
444 { ObjArrayKlass o; add_vtable(list, &n, &o, count); }
445 { Method o; add_vtable(list, &n, &o, count); }
446 { ConstantPool o; add_vtable(list, &n, &o, count); }
447 }
449 void Universe::initialize_basic_type_mirrors(TRAPS) {
450 assert(_int_mirror==NULL, "basic type mirrors already initialized");
451 _int_mirror =
452 java_lang_Class::create_basic_type_mirror("int", T_INT, CHECK);
453 _float_mirror =
454 java_lang_Class::create_basic_type_mirror("float", T_FLOAT, CHECK);
455 _double_mirror =
456 java_lang_Class::create_basic_type_mirror("double", T_DOUBLE, CHECK);
457 _byte_mirror =
458 java_lang_Class::create_basic_type_mirror("byte", T_BYTE, CHECK);
459 _bool_mirror =
460 java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
461 _char_mirror =
462 java_lang_Class::create_basic_type_mirror("char", T_CHAR, CHECK);
463 _long_mirror =
464 java_lang_Class::create_basic_type_mirror("long", T_LONG, CHECK);
465 _short_mirror =
466 java_lang_Class::create_basic_type_mirror("short", T_SHORT, CHECK);
467 _void_mirror =
468 java_lang_Class::create_basic_type_mirror("void", T_VOID, CHECK);
470 _mirrors[T_INT] = _int_mirror;
471 _mirrors[T_FLOAT] = _float_mirror;
472 _mirrors[T_DOUBLE] = _double_mirror;
473 _mirrors[T_BYTE] = _byte_mirror;
474 _mirrors[T_BOOLEAN] = _bool_mirror;
475 _mirrors[T_CHAR] = _char_mirror;
476 _mirrors[T_LONG] = _long_mirror;
477 _mirrors[T_SHORT] = _short_mirror;
478 _mirrors[T_VOID] = _void_mirror;
479 //_mirrors[T_OBJECT] = InstanceKlass::cast(_object_klass)->java_mirror();
480 //_mirrors[T_ARRAY] = InstanceKlass::cast(_object_klass)->java_mirror();
481 }
483 void Universe::fixup_mirrors(TRAPS) {
484 // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
485 // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
486 // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
487 // that the number of objects allocated at this point is very small.
488 assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
489 HandleMark hm(THREAD);
490 // Cache the start of the static fields
491 InstanceMirrorKlass::init_offset_of_static_fields();
493 GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
494 int list_length = list->length();
495 for (int i = 0; i < list_length; i++) {
496 Klass* k = list->at(i);
497 assert(k->is_klass(), "List should only hold classes");
498 EXCEPTION_MARK;
499 KlassHandle kh(THREAD, k);
500 java_lang_Class::fixup_mirror(kh, CATCH);
501 }
502 delete java_lang_Class::fixup_mirror_list();
503 java_lang_Class::set_fixup_mirror_list(NULL);
504 }
506 static bool has_run_finalizers_on_exit = false;
508 void Universe::run_finalizers_on_exit() {
509 if (has_run_finalizers_on_exit) return;
510 has_run_finalizers_on_exit = true;
512 // Called on VM exit. This ought to be run in a separate thread.
513 if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
514 {
515 PRESERVE_EXCEPTION_MARK;
516 KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
517 JavaValue result(T_VOID);
518 JavaCalls::call_static(
519 &result,
520 finalizer_klass,
521 vmSymbols::run_finalizers_on_exit_name(),
522 vmSymbols::void_method_signature(),
523 THREAD
524 );
525 // Ignore any pending exceptions
526 CLEAR_PENDING_EXCEPTION;
527 }
528 }
531 // initialize_vtable could cause gc if
532 // 1) we specified true to initialize_vtable and
533 // 2) this ran after gc was enabled
534 // In case those ever change we use handles for oops
535 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
536 // init vtable of k and all subclasses
537 Klass* ko = k_h();
538 klassVtable* vt = ko->vtable();
539 if (vt) vt->initialize_vtable(false, CHECK);
540 if (ko->oop_is_instance()) {
541 InstanceKlass* ik = (InstanceKlass*)ko;
542 for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
543 reinitialize_vtable_of(s_h, CHECK);
544 }
545 }
546 }
549 void initialize_itable_for_klass(Klass* k, TRAPS) {
550 InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
551 }
554 void Universe::reinitialize_itables(TRAPS) {
555 SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
557 }
560 bool Universe::on_page_boundary(void* addr) {
561 return ((uintptr_t) addr) % os::vm_page_size() == 0;
562 }
565 bool Universe::should_fill_in_stack_trace(Handle throwable) {
566 // never attempt to fill in the stack trace of preallocated errors that do not have
567 // backtrace. These errors are kept alive forever and may be "re-used" when all
568 // preallocated errors with backtrace have been consumed. Also need to avoid
569 // a potential loop which could happen if an out of memory occurs when attempting
570 // to allocate the backtrace.
571 return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
572 (throwable() != Universe::_out_of_memory_error_perm_gen) &&
573 (throwable() != Universe::_out_of_memory_error_array_size) &&
574 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
575 }
578 oop Universe::gen_out_of_memory_error(oop default_err) {
579 // generate an out of memory error:
580 // - if there is a preallocated error with backtrace available then return it wth
581 // a filled in stack trace.
582 // - if there are no preallocated errors with backtrace available then return
583 // an error without backtrace.
584 int next;
585 if (_preallocated_out_of_memory_error_avail_count > 0) {
586 next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
587 assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
588 } else {
589 next = -1;
590 }
591 if (next < 0) {
592 // all preallocated errors have been used.
593 // return default
594 return default_err;
595 } else {
596 // get the error object at the slot and set set it to NULL so that the
597 // array isn't keeping it alive anymore.
598 oop exc = preallocated_out_of_memory_errors()->obj_at(next);
599 assert(exc != NULL, "slot has been used already");
600 preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
602 // use the message from the default error
603 oop msg = java_lang_Throwable::message(default_err);
604 assert(msg != NULL, "no message");
605 java_lang_Throwable::set_message(exc, msg);
607 // populate the stack trace and return it.
608 java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
609 return exc;
610 }
611 }
613 static intptr_t non_oop_bits = 0;
615 void* Universe::non_oop_word() {
616 // Neither the high bits nor the low bits of this value is allowed
617 // to look like (respectively) the high or low bits of a real oop.
618 //
619 // High and low are CPU-specific notions, but low always includes
620 // the low-order bit. Since oops are always aligned at least mod 4,
621 // setting the low-order bit will ensure that the low half of the
622 // word will never look like that of a real oop.
623 //
624 // Using the OS-supplied non-memory-address word (usually 0 or -1)
625 // will take care of the high bits, however many there are.
627 if (non_oop_bits == 0) {
628 non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
629 }
631 return (void*)non_oop_bits;
632 }
634 jint universe_init() {
635 assert(!Universe::_fully_initialized, "called after initialize_vtables");
636 guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
637 "LogHeapWordSize is incorrect.");
638 guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
639 guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
640 "oop size is not not a multiple of HeapWord size");
641 TraceTime timer("Genesis", TraceStartupTime);
642 GC_locker::lock(); // do not allow gc during bootstrapping
643 JavaClasses::compute_hard_coded_offsets();
645 jint status = Universe::initialize_heap();
646 if (status != JNI_OK) {
647 return status;
648 }
650 // Create memory for metadata. Must be after initializing heap for
651 // DumpSharedSpaces.
652 ClassLoaderData::init_null_class_loader_data();
654 // We have a heap so create the Method* caches before
655 // Metaspace::initialize_shared_spaces() tries to populate them.
656 Universe::_finalizer_register_cache = new LatestMethodOopCache();
657 Universe::_loader_addClass_cache = new LatestMethodOopCache();
658 Universe::_reflect_invoke_cache = new ActiveMethodOopsCache();
660 if (UseSharedSpaces) {
661 // Read the data structures supporting the shared spaces (shared
662 // system dictionary, symbol table, etc.). After that, access to
663 // the file (other than the mapped regions) is no longer needed, and
664 // the file is closed. Closing the file does not affect the
665 // currently mapped regions.
666 MetaspaceShared::initialize_shared_spaces();
667 StringTable::create_table();
668 } else {
669 SymbolTable::create_table();
670 StringTable::create_table();
671 ClassLoader::create_package_info_table();
672 }
674 return JNI_OK;
675 }
677 // Choose the heap base address and oop encoding mode
678 // when compressed oops are used:
679 // Unscaled - Use 32-bits oops without encoding when
680 // NarrowOopHeapBaseMin + heap_size < 4Gb
681 // ZeroBased - Use zero based compressed oops with encoding when
682 // NarrowOopHeapBaseMin + heap_size < 32Gb
683 // HeapBased - Use compressed oops with heap base + encoding.
685 // 4Gb
686 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
687 // 32Gb
688 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
690 char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
691 size_t base = 0;
692 #ifdef _LP64
693 if (UseCompressedOops) {
694 assert(mode == UnscaledNarrowOop ||
695 mode == ZeroBasedNarrowOop ||
696 mode == HeapBasedNarrowOop, "mode is invalid");
697 const size_t total_size = heap_size + HeapBaseMinAddress;
698 // Return specified base for the first request.
699 if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
700 base = HeapBaseMinAddress;
701 } else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
702 if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
703 (Universe::narrow_oop_shift() == 0)) {
704 // Use 32-bits oops without encoding and
705 // place heap's top on the 4Gb boundary
706 base = (NarrowOopHeapMax - heap_size);
707 } else {
708 // Can't reserve with NarrowOopShift == 0
709 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
710 if (mode == UnscaledNarrowOop ||
711 mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
712 // Use zero based compressed oops with encoding and
713 // place heap's top on the 32Gb boundary in case
714 // total_size > 4Gb or failed to reserve below 4Gb.
715 base = (OopEncodingHeapMax - heap_size);
716 }
717 }
718 } else {
719 // Can't reserve below 32Gb.
720 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
721 }
722 // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
723 // used in ReservedHeapSpace() constructors.
724 // The final values will be set in initialize_heap() below.
725 if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
726 // Use zero based compressed oops
727 Universe::set_narrow_oop_base(NULL);
728 // Don't need guard page for implicit checks in indexed
729 // addressing mode with zero based Compressed Oops.
730 Universe::set_narrow_oop_use_implicit_null_checks(true);
731 } else {
732 // Set to a non-NULL value so the ReservedSpace ctor computes
733 // the correct no-access prefix.
734 // The final value will be set in initialize_heap() below.
735 Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
736 #ifdef _WIN64
737 if (UseLargePages) {
738 // Cannot allocate guard pages for implicit checks in indexed
739 // addressing mode when large pages are specified on windows.
740 Universe::set_narrow_oop_use_implicit_null_checks(false);
741 }
742 #endif // _WIN64
743 }
744 }
745 #endif
746 return (char*)base; // also return NULL (don't care) for 32-bit VM
747 }
749 jint Universe::initialize_heap() {
751 if (UseParallelGC) {
752 #ifndef SERIALGC
753 Universe::_collectedHeap = new ParallelScavengeHeap();
754 #else // SERIALGC
755 fatal("UseParallelGC not supported in java kernel vm.");
756 #endif // SERIALGC
758 } else if (UseG1GC) {
759 #ifndef SERIALGC
760 G1CollectorPolicy* g1p = new G1CollectorPolicy();
761 G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
762 Universe::_collectedHeap = g1h;
763 #else // SERIALGC
764 fatal("UseG1GC not supported in java kernel vm.");
765 #endif // SERIALGC
767 } else {
768 GenCollectorPolicy *gc_policy;
770 if (UseSerialGC) {
771 gc_policy = new MarkSweepPolicy();
772 } else if (UseConcMarkSweepGC) {
773 #ifndef SERIALGC
774 if (UseAdaptiveSizePolicy) {
775 gc_policy = new ASConcurrentMarkSweepPolicy();
776 } else {
777 gc_policy = new ConcurrentMarkSweepPolicy();
778 }
779 #else // SERIALGC
780 fatal("UseConcMarkSweepGC not supported in java kernel vm.");
781 #endif // SERIALGC
782 } else { // default old generation
783 gc_policy = new MarkSweepPolicy();
784 }
786 Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
787 }
789 jint status = Universe::heap()->initialize();
790 if (status != JNI_OK) {
791 return status;
792 }
794 #ifdef _LP64
795 if (UseCompressedOops) {
796 // Subtract a page because something can get allocated at heap base.
797 // This also makes implicit null checking work, because the
798 // memory+1 page below heap_base needs to cause a signal.
799 // See needs_explicit_null_check.
800 // Only set the heap base for compressed oops because it indicates
801 // compressed oops for pstack code.
802 bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
803 if (verbose) {
804 tty->cr();
805 tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
806 Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
807 }
808 if ((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax) {
809 // Can't reserve heap below 32Gb.
810 Universe::set_narrow_oop_base(Universe::heap()->base() - os::vm_page_size());
811 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
812 if (verbose) {
813 tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
814 }
815 } else {
816 Universe::set_narrow_oop_base(0);
817 if (verbose) {
818 tty->print(", zero based Compressed Oops");
819 }
820 #ifdef _WIN64
821 if (!Universe::narrow_oop_use_implicit_null_checks()) {
822 // Don't need guard page for implicit checks in indexed addressing
823 // mode with zero based Compressed Oops.
824 Universe::set_narrow_oop_use_implicit_null_checks(true);
825 }
826 #endif // _WIN64
827 if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
828 // Can't reserve heap below 4Gb.
829 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
830 } else {
831 Universe::set_narrow_oop_shift(0);
832 if (verbose) {
833 tty->print(", 32-bits Oops");
834 }
835 }
836 }
837 if (verbose) {
838 tty->cr();
839 tty->cr();
840 }
841 }
842 assert(Universe::narrow_oop_base() == (Universe::heap()->base() - os::vm_page_size()) ||
843 Universe::narrow_oop_base() == NULL, "invalid value");
844 assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
845 Universe::narrow_oop_shift() == 0, "invalid value");
846 #endif
848 // We will never reach the CATCH below since Exceptions::_throw will cause
849 // the VM to exit if an exception is thrown during initialization
851 if (UseTLAB) {
852 assert(Universe::heap()->supports_tlab_allocation(),
853 "Should support thread-local allocation buffers");
854 ThreadLocalAllocBuffer::startup_initialization();
855 }
856 return JNI_OK;
857 }
860 // Reserve the Java heap, which is now the same for all GCs.
861 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
862 // Add in the class metaspace area so the classes in the headers can
863 // be compressed the same as instances.
864 size_t total_reserved = align_size_up(heap_size + ClassMetaspaceSize, alignment);
865 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
867 ReservedHeapSpace total_rs(total_reserved, alignment, UseLargePages, addr);
869 if (UseCompressedOops) {
870 if (addr != NULL && !total_rs.is_reserved()) {
871 // Failed to reserve at specified address - the requested memory
872 // region is taken already, for example, by 'java' launcher.
873 // Try again to reserver heap higher.
874 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop);
876 ReservedHeapSpace total_rs0(total_reserved, alignment,
877 UseLargePages, addr);
879 if (addr != NULL && !total_rs0.is_reserved()) {
880 // Failed to reserve at specified address again - give up.
881 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop);
882 assert(addr == NULL, "");
884 ReservedHeapSpace total_rs1(total_reserved, alignment,
885 UseLargePages, addr);
886 total_rs = total_rs1;
887 } else {
888 total_rs = total_rs0;
889 }
890 }
891 }
893 if (!total_rs.is_reserved()) {
894 vm_exit_during_initialization(err_msg("Could not reserve enough space for object heap %d bytes", total_reserved));
895 return total_rs;
896 }
898 // Split the reserved space into main Java heap and a space for classes
899 // so that they can be compressed using the same algorithm as compressed oops
900 ReservedSpace heap_rs = total_rs.first_part(heap_size);
901 ReservedSpace class_rs = total_rs.last_part(heap_size, alignment);
902 Metaspace::initialize_class_space(class_rs);
903 return heap_rs;
904 }
907 // It's the caller's repsonsibility to ensure glitch-freedom
908 // (if required).
909 void Universe::update_heap_info_at_gc() {
910 _heap_capacity_at_last_gc = heap()->capacity();
911 _heap_used_at_last_gc = heap()->used();
912 }
916 void universe2_init() {
917 EXCEPTION_MARK;
918 Universe::genesis(CATCH);
919 // Although we'd like to verify here that the state of the heap
920 // is good, we can't because the main thread has not yet added
921 // itself to the threads list (so, using current interfaces
922 // we can't "fill" its TLAB), unless TLABs are disabled.
923 if (VerifyBeforeGC && !UseTLAB &&
924 Universe::heap()->total_collections() >= VerifyGCStartAt) {
925 Universe::heap()->prepare_for_verify();
926 Universe::verify(); // make sure we're starting with a clean slate
927 }
928 }
931 // This function is defined in JVM.cpp
932 extern void initialize_converter_functions();
934 bool universe_post_init() {
935 assert(!is_init_completed(), "Error: initialization not yet completed!");
936 Universe::_fully_initialized = true;
937 EXCEPTION_MARK;
938 { ResourceMark rm;
939 Interpreter::initialize(); // needed for interpreter entry points
940 if (!UseSharedSpaces) {
941 HandleMark hm(THREAD);
942 KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
943 Universe::reinitialize_vtable_of(ok_h, CHECK_false);
944 Universe::reinitialize_itables(CHECK_false);
945 }
946 }
948 HandleMark hm(THREAD);
949 Klass* k;
950 instanceKlassHandle k_h;
951 // Setup preallocated empty java.lang.Class array
952 Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
954 // Setup preallocated OutOfMemoryError errors
955 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
956 k_h = instanceKlassHandle(THREAD, k);
957 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
958 Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false);
959 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
960 Universe::_out_of_memory_error_gc_overhead_limit =
961 k_h->allocate_instance(CHECK_false);
963 // Setup preallocated NullPointerException
964 // (this is currently used for a cheap & dirty solution in compiler exception handling)
965 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
966 Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
967 // Setup preallocated ArithmeticException
968 // (this is currently used for a cheap & dirty solution in compiler exception handling)
969 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
970 Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
971 // Virtual Machine Error for when we get into a situation we can't resolve
972 k = SystemDictionary::resolve_or_fail(
973 vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
974 bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
975 if (!linked) {
976 tty->print_cr("Unable to link/verify VirtualMachineError class");
977 return false; // initialization failed
978 }
979 Universe::_virtual_machine_error_instance =
980 InstanceKlass::cast(k)->allocate_instance(CHECK_false);
982 Universe::_vm_exception = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
984 if (!DumpSharedSpaces) {
985 // These are the only Java fields that are currently set during shared space dumping.
986 // We prefer to not handle this generally, so we always reinitialize these detail messages.
987 Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
988 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
990 msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
991 java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg());
993 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
994 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
996 msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
997 java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
999 msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
1000 java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
1002 // Setup the array of errors that have preallocated backtrace
1003 k = Universe::_out_of_memory_error_java_heap->klass();
1004 assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
1005 k_h = instanceKlassHandle(THREAD, k);
1007 int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
1008 Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
1009 for (int i=0; i<len; i++) {
1010 oop err = k_h->allocate_instance(CHECK_false);
1011 Handle err_h = Handle(THREAD, err);
1012 java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
1013 Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
1014 }
1015 Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
1016 }
1019 // Setup static method for registering finalizers
1020 // The finalizer klass must be linked before looking up the method, in
1021 // case it needs to get rewritten.
1022 InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
1023 Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
1024 vmSymbols::register_method_name(),
1025 vmSymbols::register_method_signature());
1026 if (m == NULL || !m->is_static()) {
1027 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1028 "java.lang.ref.Finalizer.register", false);
1029 }
1030 Universe::_finalizer_register_cache->init(
1031 SystemDictionary::Finalizer_klass(), m, CHECK_false);
1033 // Resolve on first use and initialize class.
1034 // Note: No race-condition here, since a resolve will always return the same result
1036 // Setup method for security checks
1037 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
1038 k_h = instanceKlassHandle(THREAD, k);
1039 k_h->link_class(CHECK_false);
1040 m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
1041 if (m == NULL || m->is_static()) {
1042 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1043 "java.lang.reflect.Method.invoke", false);
1044 }
1045 Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
1047 // Setup method for registering loaded classes in class loader vector
1048 InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
1049 m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
1050 if (m == NULL || m->is_static()) {
1051 THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
1052 "java.lang.ClassLoader.addClass", false);
1053 }
1054 Universe::_loader_addClass_cache->init(
1055 SystemDictionary::ClassLoader_klass(), m, CHECK_false);
1057 // The folowing is initializing converter functions for serialization in
1058 // JVM.cpp. If we clean up the StrictMath code above we may want to find
1059 // a better solution for this as well.
1060 initialize_converter_functions();
1062 // This needs to be done before the first scavenge/gc, since
1063 // it's an input to soft ref clearing policy.
1064 {
1065 MutexLocker x(Heap_lock);
1066 Universe::update_heap_info_at_gc();
1067 }
1069 // ("weak") refs processing infrastructure initialization
1070 Universe::heap()->post_initialize();
1072 // Initialize performance counters for metaspaces
1073 MetaspaceCounters::initialize_performance_counters();
1075 GC_locker::unlock(); // allow gc after bootstrapping
1077 MemoryService::set_universe_heap(Universe::_collectedHeap);
1078 return true;
1079 }
1082 void Universe::compute_base_vtable_size() {
1083 _base_vtable_size = ClassLoader::compute_Object_vtable();
1084 }
1087 // %%% The Universe::flush_foo methods belong in CodeCache.
1089 // Flushes compiled methods dependent on dependee.
1090 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
1091 assert_lock_strong(Compile_lock);
1093 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1095 // CodeCache can only be updated by a thread_in_VM and they will all be
1096 // stopped dring the safepoint so CodeCache will be safe to update without
1097 // holding the CodeCache_lock.
1099 KlassDepChange changes(dependee);
1101 // Compute the dependent nmethods
1102 if (CodeCache::mark_for_deoptimization(changes) > 0) {
1103 // At least one nmethod has been marked for deoptimization
1104 VM_Deoptimize op;
1105 VMThread::execute(&op);
1106 }
1107 }
1109 // Flushes compiled methods dependent on a particular CallSite
1110 // instance when its target is different than the given MethodHandle.
1111 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
1112 assert_lock_strong(Compile_lock);
1114 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1116 // CodeCache can only be updated by a thread_in_VM and they will all be
1117 // stopped dring the safepoint so CodeCache will be safe to update without
1118 // holding the CodeCache_lock.
1120 CallSiteDepChange changes(call_site(), method_handle());
1122 // Compute the dependent nmethods that have a reference to a
1123 // CallSite object. We use InstanceKlass::mark_dependent_nmethod
1124 // directly instead of CodeCache::mark_for_deoptimization because we
1125 // want dependents on the call site class only not all classes in
1126 // the ContextStream.
1127 int marked = 0;
1128 {
1129 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1130 InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
1131 marked = call_site_klass->mark_dependent_nmethods(changes);
1132 }
1133 if (marked > 0) {
1134 // At least one nmethod has been marked for deoptimization
1135 VM_Deoptimize op;
1136 VMThread::execute(&op);
1137 }
1138 }
1140 #ifdef HOTSWAP
1141 // Flushes compiled methods dependent on dependee in the evolutionary sense
1142 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
1143 // --- Compile_lock is not held. However we are at a safepoint.
1144 assert_locked_or_safepoint(Compile_lock);
1145 if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
1147 // CodeCache can only be updated by a thread_in_VM and they will all be
1148 // stopped dring the safepoint so CodeCache will be safe to update without
1149 // holding the CodeCache_lock.
1151 // Compute the dependent nmethods
1152 if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
1153 // At least one nmethod has been marked for deoptimization
1155 // All this already happens inside a VM_Operation, so we'll do all the work here.
1156 // Stuff copied from VM_Deoptimize and modified slightly.
1158 // We do not want any GCs to happen while we are in the middle of this VM operation
1159 ResourceMark rm;
1160 DeoptimizationMarker dm;
1162 // Deoptimize all activations depending on marked nmethods
1163 Deoptimization::deoptimize_dependents();
1165 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1166 CodeCache::make_marked_nmethods_not_entrant();
1167 }
1168 }
1169 #endif // HOTSWAP
1172 // Flushes compiled methods dependent on dependee
1173 void Universe::flush_dependents_on_method(methodHandle m_h) {
1174 // --- Compile_lock is not held. However we are at a safepoint.
1175 assert_locked_or_safepoint(Compile_lock);
1177 // CodeCache can only be updated by a thread_in_VM and they will all be
1178 // stopped dring the safepoint so CodeCache will be safe to update without
1179 // holding the CodeCache_lock.
1181 // Compute the dependent nmethods
1182 if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
1183 // At least one nmethod has been marked for deoptimization
1185 // All this already happens inside a VM_Operation, so we'll do all the work here.
1186 // Stuff copied from VM_Deoptimize and modified slightly.
1188 // We do not want any GCs to happen while we are in the middle of this VM operation
1189 ResourceMark rm;
1190 DeoptimizationMarker dm;
1192 // Deoptimize all activations depending on marked nmethods
1193 Deoptimization::deoptimize_dependents();
1195 // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
1196 CodeCache::make_marked_nmethods_not_entrant();
1197 }
1198 }
1200 void Universe::print() {
1201 print_on(gclog_or_tty);
1202 }
1204 void Universe::print_on(outputStream* st, bool extended) {
1205 st->print_cr("Heap");
1206 if (!extended) {
1207 heap()->print_on(st);
1208 } else {
1209 heap()->print_extended_on(st);
1210 }
1211 }
1213 void Universe::print_heap_at_SIGBREAK() {
1214 if (PrintHeapAtSIGBREAK) {
1215 MutexLocker hl(Heap_lock);
1216 print_on(tty);
1217 tty->cr();
1218 tty->flush();
1219 }
1220 }
1222 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
1223 st->print_cr("{Heap before GC invocations=%u (full %u):",
1224 heap()->total_collections(),
1225 heap()->total_full_collections());
1226 if (!PrintHeapAtGCExtended || ignore_extended) {
1227 heap()->print_on(st);
1228 } else {
1229 heap()->print_extended_on(st);
1230 }
1231 }
1233 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
1234 st->print_cr("Heap after GC invocations=%u (full %u):",
1235 heap()->total_collections(),
1236 heap()->total_full_collections());
1237 if (!PrintHeapAtGCExtended || ignore_extended) {
1238 heap()->print_on(st);
1239 } else {
1240 heap()->print_extended_on(st);
1241 }
1242 st->print_cr("}");
1243 }
1245 void Universe::verify(bool silent, VerifyOption option) {
1246 // The use of _verify_in_progress is a temporary work around for
1247 // 6320749. Don't bother with a creating a class to set and clear
1248 // it since it is only used in this method and the control flow is
1249 // straight forward.
1250 _verify_in_progress = true;
1252 COMPILER2_PRESENT(
1253 assert(!DerivedPointerTable::is_active(),
1254 "DPT should not be active during verification "
1255 "(of thread stacks below)");
1256 )
1258 ResourceMark rm;
1259 HandleMark hm; // Handles created during verification can be zapped
1260 _verify_count++;
1262 if (!silent) gclog_or_tty->print("[Verifying ");
1263 if (!silent) gclog_or_tty->print("threads ");
1264 Threads::verify();
1265 heap()->verify(silent, option);
1267 if (!silent) gclog_or_tty->print("syms ");
1268 SymbolTable::verify();
1269 if (!silent) gclog_or_tty->print("strs ");
1270 StringTable::verify();
1271 {
1272 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1273 if (!silent) gclog_or_tty->print("zone ");
1274 CodeCache::verify();
1275 }
1276 if (!silent) gclog_or_tty->print("dict ");
1277 SystemDictionary::verify();
1278 #ifndef PRODUCT
1279 if (!silent) gclog_or_tty->print("cldg ");
1280 ClassLoaderDataGraph::verify();
1281 #endif
1282 if (!silent) gclog_or_tty->print("hand ");
1283 JNIHandles::verify();
1284 if (!silent) gclog_or_tty->print("C-heap ");
1285 os::check_heap();
1286 if (!silent) gclog_or_tty->print("code cache ");
1287 CodeCache::verify_oops();
1288 if (!silent) gclog_or_tty->print_cr("]");
1290 _verify_in_progress = false;
1291 }
1293 // Oop verification (see MacroAssembler::verify_oop)
1295 static uintptr_t _verify_oop_data[2] = {0, (uintptr_t)-1};
1296 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
1299 static void calculate_verify_data(uintptr_t verify_data[2],
1300 HeapWord* low_boundary,
1301 HeapWord* high_boundary) {
1302 assert(low_boundary < high_boundary, "bad interval");
1304 // decide which low-order bits we require to be clear:
1305 size_t alignSize = MinObjAlignmentInBytes;
1306 size_t min_object_size = CollectedHeap::min_fill_size();
1308 // make an inclusive limit:
1309 uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
1310 uintptr_t min = (uintptr_t)low_boundary;
1311 assert(min < max, "bad interval");
1312 uintptr_t diff = max ^ min;
1314 // throw away enough low-order bits to make the diff vanish
1315 uintptr_t mask = (uintptr_t)(-1);
1316 while ((mask & diff) != 0)
1317 mask <<= 1;
1318 uintptr_t bits = (min & mask);
1319 assert(bits == (max & mask), "correct mask");
1320 // check an intermediate value between min and max, just to make sure:
1321 assert(bits == ((min + (max-min)/2) & mask), "correct mask");
1323 // require address alignment, too:
1324 mask |= (alignSize - 1);
1326 if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
1327 assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
1328 }
1329 verify_data[0] = mask;
1330 verify_data[1] = bits;
1331 }
1334 // Oop verification (see MacroAssembler::verify_oop)
1335 #ifndef PRODUCT
1337 uintptr_t Universe::verify_oop_mask() {
1338 MemRegion m = heap()->reserved_region();
1339 calculate_verify_data(_verify_oop_data,
1340 m.start(),
1341 m.end());
1342 return _verify_oop_data[0];
1343 }
1347 uintptr_t Universe::verify_oop_bits() {
1348 verify_oop_mask();
1349 return _verify_oop_data[1];
1350 }
1352 uintptr_t Universe::verify_mark_mask() {
1353 return markOopDesc::lock_mask_in_place;
1354 }
1356 uintptr_t Universe::verify_mark_bits() {
1357 intptr_t mask = verify_mark_mask();
1358 intptr_t bits = (intptr_t)markOopDesc::prototype();
1359 assert((bits & ~mask) == 0, "no stray header bits");
1360 return bits;
1361 }
1362 #endif // PRODUCT
1365 void Universe::compute_verify_oop_data() {
1366 verify_oop_mask();
1367 verify_oop_bits();
1368 verify_mark_mask();
1369 verify_mark_bits();
1370 }
1373 void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
1374 if (!UseSharedSpaces) {
1375 _klass = k;
1376 }
1377 #ifndef PRODUCT
1378 else {
1379 // sharing initilization should have already set up _klass
1380 assert(_klass != NULL, "just checking");
1381 }
1382 #endif
1384 _method_idnum = m->method_idnum();
1385 assert(_method_idnum >= 0, "sanity check");
1386 }
1389 ActiveMethodOopsCache::~ActiveMethodOopsCache() {
1390 if (_prev_methods != NULL) {
1391 delete _prev_methods;
1392 _prev_methods = NULL;
1393 }
1394 }
1397 void ActiveMethodOopsCache::add_previous_version(Method* const method) {
1398 assert(Thread::current()->is_VM_thread(),
1399 "only VMThread can add previous versions");
1401 // Only append the previous method if it is executing on the stack.
1402 if (method->on_stack()) {
1404 if (_prev_methods == NULL) {
1405 // This is the first previous version so make some space.
1406 // Start with 2 elements under the assumption that the class
1407 // won't be redefined much.
1408 _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
1409 }
1411 // RC_TRACE macro has an embedded ResourceMark
1412 RC_TRACE(0x00000100,
1413 ("add: %s(%s): adding prev version ref for cached method @%d",
1414 method->name()->as_C_string(), method->signature()->as_C_string(),
1415 _prev_methods->length()));
1417 _prev_methods->append(method);
1418 }
1421 // Since the caller is the VMThread and we are at a safepoint, this is a good
1422 // time to clear out unused method references.
1424 if (_prev_methods == NULL) return;
1426 for (int i = _prev_methods->length() - 1; i >= 0; i--) {
1427 Method* method = _prev_methods->at(i);
1428 assert(method != NULL, "weak method ref was unexpectedly cleared");
1430 if (!method->on_stack()) {
1431 // This method isn't running anymore so remove it
1432 _prev_methods->remove_at(i);
1433 MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
1434 } else {
1435 // RC_TRACE macro has an embedded ResourceMark
1436 RC_TRACE(0x00000400, ("add: %s(%s): previous cached method @%d is alive",
1437 method->name()->as_C_string(), method->signature()->as_C_string(), i));
1438 }
1439 }
1440 } // end add_previous_version()
1443 bool ActiveMethodOopsCache::is_same_method(Method* const method) const {
1444 InstanceKlass* ik = InstanceKlass::cast(klass());
1445 Method* check_method = ik->method_with_idnum(method_idnum());
1446 assert(check_method != NULL, "sanity check");
1447 if (check_method == method) {
1448 // done with the easy case
1449 return true;
1450 }
1452 if (_prev_methods != NULL) {
1453 // The cached method has been redefined at least once so search
1454 // the previous versions for a match.
1455 for (int i = 0; i < _prev_methods->length(); i++) {
1456 check_method = _prev_methods->at(i);
1457 if (check_method == method) {
1458 // a previous version matches
1459 return true;
1460 }
1461 }
1462 }
1464 // either no previous versions or no previous version matched
1465 return false;
1466 }
1469 Method* LatestMethodOopCache::get_Method() {
1470 InstanceKlass* ik = InstanceKlass::cast(klass());
1471 Method* m = ik->method_with_idnum(method_idnum());
1472 assert(m != NULL, "sanity check");
1473 return m;
1474 }
1477 #ifdef ASSERT
1478 // Release dummy object(s) at bottom of heap
1479 bool Universe::release_fullgc_alot_dummy() {
1480 MutexLocker ml(FullGCALot_lock);
1481 if (_fullgc_alot_dummy_array != NULL) {
1482 if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
1483 // No more dummies to release, release entire array instead
1484 _fullgc_alot_dummy_array = NULL;
1485 return false;
1486 }
1487 if (!UseConcMarkSweepGC) {
1488 // Release dummy at bottom of old generation
1489 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1490 }
1491 // Release dummy at bottom of permanent generation
1492 _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
1493 }
1494 return true;
1495 }
1497 #endif // ASSERT