src/share/vm/memory/universe.cpp

Wed, 11 Sep 2013 16:25:02 +0200

author
tschatzl
date
Wed, 11 Sep 2013 16:25:02 +0200
changeset 5701
40136aa2cdb1
parent 5694
7944aba7ba41
child 5711
23ae5a04724d
permissions
-rw-r--r--

8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/classLoader.hpp"
    27 #include "classfile/classLoaderData.hpp"
    28 #include "classfile/javaClasses.hpp"
    29 #include "classfile/symbolTable.hpp"
    30 #include "classfile/systemDictionary.hpp"
    31 #include "classfile/vmSymbols.hpp"
    32 #include "code/codeCache.hpp"
    33 #include "code/dependencies.hpp"
    34 #include "gc_interface/collectedHeap.inline.hpp"
    35 #include "interpreter/interpreter.hpp"
    36 #include "memory/cardTableModRefBS.hpp"
    37 #include "memory/gcLocker.inline.hpp"
    38 #include "memory/genCollectedHeap.hpp"
    39 #include "memory/genRemSet.hpp"
    40 #include "memory/generation.hpp"
    41 #include "memory/metadataFactory.hpp"
    42 #include "memory/metaspaceShared.hpp"
    43 #include "memory/oopFactory.hpp"
    44 #include "memory/space.hpp"
    45 #include "memory/universe.hpp"
    46 #include "memory/universe.inline.hpp"
    47 #include "oops/constantPool.hpp"
    48 #include "oops/instanceClassLoaderKlass.hpp"
    49 #include "oops/instanceKlass.hpp"
    50 #include "oops/instanceMirrorKlass.hpp"
    51 #include "oops/instanceRefKlass.hpp"
    52 #include "oops/oop.inline.hpp"
    53 #include "oops/typeArrayKlass.hpp"
    54 #include "prims/jvmtiRedefineClassesTrace.hpp"
    55 #include "runtime/arguments.hpp"
    56 #include "runtime/deoptimization.hpp"
    57 #include "runtime/fprofiler.hpp"
    58 #include "runtime/handles.inline.hpp"
    59 #include "runtime/init.hpp"
    60 #include "runtime/java.hpp"
    61 #include "runtime/javaCalls.hpp"
    62 #include "runtime/sharedRuntime.hpp"
    63 #include "runtime/synchronizer.hpp"
    64 #include "runtime/thread.inline.hpp"
    65 #include "runtime/timer.hpp"
    66 #include "runtime/vm_operations.hpp"
    67 #include "services/memoryService.hpp"
    68 #include "utilities/copy.hpp"
    69 #include "utilities/events.hpp"
    70 #include "utilities/hashtable.inline.hpp"
    71 #include "utilities/preserveException.hpp"
    72 #include "utilities/macros.hpp"
    73 #if INCLUDE_ALL_GCS
    74 #include "gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp"
    75 #include "gc_implementation/concurrentMarkSweep/cmsCollectorPolicy.hpp"
    76 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    77 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    78 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    79 #endif // INCLUDE_ALL_GCS
    81 // Known objects
    82 Klass* Universe::_boolArrayKlassObj                 = NULL;
    83 Klass* Universe::_byteArrayKlassObj                 = NULL;
    84 Klass* Universe::_charArrayKlassObj                 = NULL;
    85 Klass* Universe::_intArrayKlassObj                  = NULL;
    86 Klass* Universe::_shortArrayKlassObj                = NULL;
    87 Klass* Universe::_longArrayKlassObj                 = NULL;
    88 Klass* Universe::_singleArrayKlassObj               = NULL;
    89 Klass* Universe::_doubleArrayKlassObj               = NULL;
    90 Klass* Universe::_typeArrayKlassObjs[T_VOID+1]      = { NULL /*, NULL...*/ };
    91 Klass* Universe::_objectArrayKlassObj               = NULL;
    92 oop Universe::_int_mirror                             = NULL;
    93 oop Universe::_float_mirror                           = NULL;
    94 oop Universe::_double_mirror                          = NULL;
    95 oop Universe::_byte_mirror                            = NULL;
    96 oop Universe::_bool_mirror                            = NULL;
    97 oop Universe::_char_mirror                            = NULL;
    98 oop Universe::_long_mirror                            = NULL;
    99 oop Universe::_short_mirror                           = NULL;
   100 oop Universe::_void_mirror                            = NULL;
   101 oop Universe::_mirrors[T_VOID+1]                      = { NULL /*, NULL...*/ };
   102 oop Universe::_main_thread_group                      = NULL;
   103 oop Universe::_system_thread_group                    = NULL;
   104 objArrayOop Universe::_the_empty_class_klass_array    = NULL;
   105 Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
   106 oop Universe::_the_null_string                        = NULL;
   107 oop Universe::_the_min_jint_string                   = NULL;
   108 LatestMethodCache* Universe::_finalizer_register_cache = NULL;
   109 LatestMethodCache* Universe::_loader_addClass_cache    = NULL;
   110 LatestMethodCache* Universe::_pd_implies_cache         = NULL;
   111 oop Universe::_out_of_memory_error_java_heap          = NULL;
   112 oop Universe::_out_of_memory_error_metaspace          = NULL;
   113 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
   114 oop Universe::_out_of_memory_error_array_size         = NULL;
   115 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
   116 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
   117 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
   118 bool Universe::_verify_in_progress                    = false;
   119 oop Universe::_null_ptr_exception_instance            = NULL;
   120 oop Universe::_arithmetic_exception_instance          = NULL;
   121 oop Universe::_virtual_machine_error_instance         = NULL;
   122 oop Universe::_vm_exception                           = NULL;
   123 Array<int>* Universe::_the_empty_int_array            = NULL;
   124 Array<u2>* Universe::_the_empty_short_array           = NULL;
   125 Array<Klass*>* Universe::_the_empty_klass_array     = NULL;
   126 Array<Method*>* Universe::_the_empty_method_array   = NULL;
   128 // These variables are guarded by FullGCALot_lock.
   129 debug_only(objArrayOop Universe::_fullgc_alot_dummy_array = NULL;)
   130 debug_only(int Universe::_fullgc_alot_dummy_next      = 0;)
   132 // Heap
   133 int             Universe::_verify_count = 0;
   135 int             Universe::_base_vtable_size = 0;
   136 bool            Universe::_bootstrapping = false;
   137 bool            Universe::_fully_initialized = false;
   139 size_t          Universe::_heap_capacity_at_last_gc;
   140 size_t          Universe::_heap_used_at_last_gc = 0;
   142 CollectedHeap*  Universe::_collectedHeap = NULL;
   144 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
   145 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
   146 address Universe::_narrow_ptrs_base;
   148 void Universe::basic_type_classes_do(void f(Klass*)) {
   149   f(boolArrayKlassObj());
   150   f(byteArrayKlassObj());
   151   f(charArrayKlassObj());
   152   f(intArrayKlassObj());
   153   f(shortArrayKlassObj());
   154   f(longArrayKlassObj());
   155   f(singleArrayKlassObj());
   156   f(doubleArrayKlassObj());
   157 }
   159 void Universe::oops_do(OopClosure* f, bool do_all) {
   161   f->do_oop((oop*) &_int_mirror);
   162   f->do_oop((oop*) &_float_mirror);
   163   f->do_oop((oop*) &_double_mirror);
   164   f->do_oop((oop*) &_byte_mirror);
   165   f->do_oop((oop*) &_bool_mirror);
   166   f->do_oop((oop*) &_char_mirror);
   167   f->do_oop((oop*) &_long_mirror);
   168   f->do_oop((oop*) &_short_mirror);
   169   f->do_oop((oop*) &_void_mirror);
   171   for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
   172     f->do_oop((oop*) &_mirrors[i]);
   173   }
   174   assert(_mirrors[0] == NULL && _mirrors[T_BOOLEAN - 1] == NULL, "checking");
   176   f->do_oop((oop*)&_the_empty_class_klass_array);
   177   f->do_oop((oop*)&_the_null_string);
   178   f->do_oop((oop*)&_the_min_jint_string);
   179   f->do_oop((oop*)&_out_of_memory_error_java_heap);
   180   f->do_oop((oop*)&_out_of_memory_error_metaspace);
   181   f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
   182   f->do_oop((oop*)&_out_of_memory_error_array_size);
   183   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
   184     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
   185   f->do_oop((oop*)&_null_ptr_exception_instance);
   186   f->do_oop((oop*)&_arithmetic_exception_instance);
   187   f->do_oop((oop*)&_virtual_machine_error_instance);
   188   f->do_oop((oop*)&_main_thread_group);
   189   f->do_oop((oop*)&_system_thread_group);
   190   f->do_oop((oop*)&_vm_exception);
   191   debug_only(f->do_oop((oop*)&_fullgc_alot_dummy_array);)
   192 }
   194 // Serialize metadata in and out of CDS archive, not oops.
   195 void Universe::serialize(SerializeClosure* f, bool do_all) {
   197   f->do_ptr((void**)&_boolArrayKlassObj);
   198   f->do_ptr((void**)&_byteArrayKlassObj);
   199   f->do_ptr((void**)&_charArrayKlassObj);
   200   f->do_ptr((void**)&_intArrayKlassObj);
   201   f->do_ptr((void**)&_shortArrayKlassObj);
   202   f->do_ptr((void**)&_longArrayKlassObj);
   203   f->do_ptr((void**)&_singleArrayKlassObj);
   204   f->do_ptr((void**)&_doubleArrayKlassObj);
   205   f->do_ptr((void**)&_objectArrayKlassObj);
   207   {
   208     for (int i = 0; i < T_VOID+1; i++) {
   209       if (_typeArrayKlassObjs[i] != NULL) {
   210         assert(i >= T_BOOLEAN, "checking");
   211         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
   212       } else if (do_all) {
   213         f->do_ptr((void**)&_typeArrayKlassObjs[i]);
   214       }
   215     }
   216   }
   218   f->do_ptr((void**)&_the_array_interfaces_array);
   219   f->do_ptr((void**)&_the_empty_int_array);
   220   f->do_ptr((void**)&_the_empty_short_array);
   221   f->do_ptr((void**)&_the_empty_method_array);
   222   f->do_ptr((void**)&_the_empty_klass_array);
   223   _finalizer_register_cache->serialize(f);
   224   _loader_addClass_cache->serialize(f);
   225   _pd_implies_cache->serialize(f);
   226 }
   228 void Universe::check_alignment(uintx size, uintx alignment, const char* name) {
   229   if (size < alignment || size % alignment != 0) {
   230     vm_exit_during_initialization(
   231       err_msg("Size of %s (" UINTX_FORMAT " bytes) must be aligned to " UINTX_FORMAT " bytes", name, size, alignment));
   232   }
   233 }
   235 void initialize_basic_type_klass(Klass* k, TRAPS) {
   236   Klass* ok = SystemDictionary::Object_klass();
   237   if (UseSharedSpaces) {
   238     assert(k->super() == ok, "u3");
   239     k->restore_unshareable_info(CHECK);
   240   } else {
   241     k->initialize_supers(ok, CHECK);
   242   }
   243   k->append_to_sibling_list();
   244 }
   246 void Universe::genesis(TRAPS) {
   247   ResourceMark rm;
   249   { FlagSetting fs(_bootstrapping, true);
   251     { MutexLocker mc(Compile_lock);
   253       // determine base vtable size; without that we cannot create the array klasses
   254       compute_base_vtable_size();
   256       if (!UseSharedSpaces) {
   257         _boolArrayKlassObj      = TypeArrayKlass::create_klass(T_BOOLEAN, sizeof(jboolean), CHECK);
   258         _charArrayKlassObj      = TypeArrayKlass::create_klass(T_CHAR,    sizeof(jchar),    CHECK);
   259         _singleArrayKlassObj    = TypeArrayKlass::create_klass(T_FLOAT,   sizeof(jfloat),   CHECK);
   260         _doubleArrayKlassObj    = TypeArrayKlass::create_klass(T_DOUBLE,  sizeof(jdouble),  CHECK);
   261         _byteArrayKlassObj      = TypeArrayKlass::create_klass(T_BYTE,    sizeof(jbyte),    CHECK);
   262         _shortArrayKlassObj     = TypeArrayKlass::create_klass(T_SHORT,   sizeof(jshort),   CHECK);
   263         _intArrayKlassObj       = TypeArrayKlass::create_klass(T_INT,     sizeof(jint),     CHECK);
   264         _longArrayKlassObj      = TypeArrayKlass::create_klass(T_LONG,    sizeof(jlong),    CHECK);
   266         _typeArrayKlassObjs[T_BOOLEAN] = _boolArrayKlassObj;
   267         _typeArrayKlassObjs[T_CHAR]    = _charArrayKlassObj;
   268         _typeArrayKlassObjs[T_FLOAT]   = _singleArrayKlassObj;
   269         _typeArrayKlassObjs[T_DOUBLE]  = _doubleArrayKlassObj;
   270         _typeArrayKlassObjs[T_BYTE]    = _byteArrayKlassObj;
   271         _typeArrayKlassObjs[T_SHORT]   = _shortArrayKlassObj;
   272         _typeArrayKlassObjs[T_INT]     = _intArrayKlassObj;
   273         _typeArrayKlassObjs[T_LONG]    = _longArrayKlassObj;
   275         ClassLoaderData* null_cld = ClassLoaderData::the_null_class_loader_data();
   277         _the_array_interfaces_array = MetadataFactory::new_array<Klass*>(null_cld, 2, NULL, CHECK);
   278         _the_empty_int_array        = MetadataFactory::new_array<int>(null_cld, 0, CHECK);
   279         _the_empty_short_array      = MetadataFactory::new_array<u2>(null_cld, 0, CHECK);
   280         _the_empty_method_array     = MetadataFactory::new_array<Method*>(null_cld, 0, CHECK);
   281         _the_empty_klass_array      = MetadataFactory::new_array<Klass*>(null_cld, 0, CHECK);
   282       }
   283     }
   285     vmSymbols::initialize(CHECK);
   287     SystemDictionary::initialize(CHECK);
   289     Klass* ok = SystemDictionary::Object_klass();
   291     _the_null_string            = StringTable::intern("null", CHECK);
   292     _the_min_jint_string       = StringTable::intern("-2147483648", CHECK);
   294     if (UseSharedSpaces) {
   295       // Verify shared interfaces array.
   296       assert(_the_array_interfaces_array->at(0) ==
   297              SystemDictionary::Cloneable_klass(), "u3");
   298       assert(_the_array_interfaces_array->at(1) ==
   299              SystemDictionary::Serializable_klass(), "u3");
   300     } else {
   301       // Set up shared interfaces array.  (Do this before supers are set up.)
   302       _the_array_interfaces_array->at_put(0, SystemDictionary::Cloneable_klass());
   303       _the_array_interfaces_array->at_put(1, SystemDictionary::Serializable_klass());
   304     }
   306     initialize_basic_type_klass(boolArrayKlassObj(), CHECK);
   307     initialize_basic_type_klass(charArrayKlassObj(), CHECK);
   308     initialize_basic_type_klass(singleArrayKlassObj(), CHECK);
   309     initialize_basic_type_klass(doubleArrayKlassObj(), CHECK);
   310     initialize_basic_type_klass(byteArrayKlassObj(), CHECK);
   311     initialize_basic_type_klass(shortArrayKlassObj(), CHECK);
   312     initialize_basic_type_klass(intArrayKlassObj(), CHECK);
   313     initialize_basic_type_klass(longArrayKlassObj(), CHECK);
   314   } // end of core bootstrapping
   316   // Maybe this could be lifted up now that object array can be initialized
   317   // during the bootstrapping.
   319   // OLD
   320   // Initialize _objectArrayKlass after core bootstraping to make
   321   // sure the super class is set up properly for _objectArrayKlass.
   322   // ---
   323   // NEW
   324   // Since some of the old system object arrays have been converted to
   325   // ordinary object arrays, _objectArrayKlass will be loaded when
   326   // SystemDictionary::initialize(CHECK); is run. See the extra check
   327   // for Object_klass_loaded in objArrayKlassKlass::allocate_objArray_klass_impl.
   328   _objectArrayKlassObj = InstanceKlass::
   329     cast(SystemDictionary::Object_klass())->array_klass(1, CHECK);
   330   // OLD
   331   // Add the class to the class hierarchy manually to make sure that
   332   // its vtable is initialized after core bootstrapping is completed.
   333   // ---
   334   // New
   335   // Have already been initialized.
   336   _objectArrayKlassObj->append_to_sibling_list();
   338   // Compute is_jdk version flags.
   339   // Only 1.3 or later has the java.lang.Shutdown class.
   340   // Only 1.4 or later has the java.lang.CharSequence interface.
   341   // Only 1.5 or later has the java.lang.management.MemoryUsage class.
   342   if (JDK_Version::is_partially_initialized()) {
   343     uint8_t jdk_version;
   344     Klass* k = SystemDictionary::resolve_or_null(
   345         vmSymbols::java_lang_management_MemoryUsage(), THREAD);
   346     CLEAR_PENDING_EXCEPTION; // ignore exceptions
   347     if (k == NULL) {
   348       k = SystemDictionary::resolve_or_null(
   349           vmSymbols::java_lang_CharSequence(), THREAD);
   350       CLEAR_PENDING_EXCEPTION; // ignore exceptions
   351       if (k == NULL) {
   352         k = SystemDictionary::resolve_or_null(
   353             vmSymbols::java_lang_Shutdown(), THREAD);
   354         CLEAR_PENDING_EXCEPTION; // ignore exceptions
   355         if (k == NULL) {
   356           jdk_version = 2;
   357         } else {
   358           jdk_version = 3;
   359         }
   360       } else {
   361         jdk_version = 4;
   362       }
   363     } else {
   364       jdk_version = 5;
   365     }
   366     JDK_Version::fully_initialize(jdk_version);
   367   }
   369   #ifdef ASSERT
   370   if (FullGCALot) {
   371     // Allocate an array of dummy objects.
   372     // We'd like these to be at the bottom of the old generation,
   373     // so that when we free one and then collect,
   374     // (almost) the whole heap moves
   375     // and we find out if we actually update all the oops correctly.
   376     // But we can't allocate directly in the old generation,
   377     // so we allocate wherever, and hope that the first collection
   378     // moves these objects to the bottom of the old generation.
   379     // We can allocate directly in the permanent generation, so we do.
   380     int size;
   381     if (UseConcMarkSweepGC) {
   382       warning("Using +FullGCALot with concurrent mark sweep gc "
   383               "will not force all objects to relocate");
   384       size = FullGCALotDummies;
   385     } else {
   386       size = FullGCALotDummies * 2;
   387     }
   388     objArrayOop    naked_array = oopFactory::new_objArray(SystemDictionary::Object_klass(), size, CHECK);
   389     objArrayHandle dummy_array(THREAD, naked_array);
   390     int i = 0;
   391     while (i < size) {
   392         // Allocate dummy in old generation
   393       oop dummy = InstanceKlass::cast(SystemDictionary::Object_klass())->allocate_instance(CHECK);
   394       dummy_array->obj_at_put(i++, dummy);
   395     }
   396     {
   397       // Only modify the global variable inside the mutex.
   398       // If we had a race to here, the other dummy_array instances
   399       // and their elements just get dropped on the floor, which is fine.
   400       MutexLocker ml(FullGCALot_lock);
   401       if (_fullgc_alot_dummy_array == NULL) {
   402         _fullgc_alot_dummy_array = dummy_array();
   403       }
   404     }
   405     assert(i == _fullgc_alot_dummy_array->length(), "just checking");
   406   }
   407   #endif
   409   // Initialize dependency array for null class loader
   410   ClassLoaderData::the_null_class_loader_data()->init_dependencies(CHECK);
   412 }
   414 // CDS support for patching vtables in metadata in the shared archive.
   415 // All types inherited from Metadata have vtables, but not types inherited
   416 // from MetaspaceObj, because the latter does not have virtual functions.
   417 // If the metadata type has a vtable, it cannot be shared in the read-only
   418 // section of the CDS archive, because the vtable pointer is patched.
   419 static inline void add_vtable(void** list, int* n, void* o, int count) {
   420   guarantee((*n) < count, "vtable list too small");
   421   void* vtable = dereference_vptr(o);
   422   assert(*(void**)(vtable) != NULL, "invalid vtable");
   423   list[(*n)++] = vtable;
   424 }
   426 void Universe::init_self_patching_vtbl_list(void** list, int count) {
   427   int n = 0;
   428   { InstanceKlass o;          add_vtable(list, &n, &o, count); }
   429   { InstanceClassLoaderKlass o; add_vtable(list, &n, &o, count); }
   430   { InstanceMirrorKlass o;    add_vtable(list, &n, &o, count); }
   431   { InstanceRefKlass o;       add_vtable(list, &n, &o, count); }
   432   { TypeArrayKlass o;         add_vtable(list, &n, &o, count); }
   433   { ObjArrayKlass o;          add_vtable(list, &n, &o, count); }
   434   { Method o;                 add_vtable(list, &n, &o, count); }
   435   { ConstantPool o;           add_vtable(list, &n, &o, count); }
   436 }
   438 void Universe::initialize_basic_type_mirrors(TRAPS) {
   439     assert(_int_mirror==NULL, "basic type mirrors already initialized");
   440     _int_mirror     =
   441       java_lang_Class::create_basic_type_mirror("int",    T_INT, CHECK);
   442     _float_mirror   =
   443       java_lang_Class::create_basic_type_mirror("float",  T_FLOAT,   CHECK);
   444     _double_mirror  =
   445       java_lang_Class::create_basic_type_mirror("double", T_DOUBLE,  CHECK);
   446     _byte_mirror    =
   447       java_lang_Class::create_basic_type_mirror("byte",   T_BYTE, CHECK);
   448     _bool_mirror    =
   449       java_lang_Class::create_basic_type_mirror("boolean",T_BOOLEAN, CHECK);
   450     _char_mirror    =
   451       java_lang_Class::create_basic_type_mirror("char",   T_CHAR, CHECK);
   452     _long_mirror    =
   453       java_lang_Class::create_basic_type_mirror("long",   T_LONG, CHECK);
   454     _short_mirror   =
   455       java_lang_Class::create_basic_type_mirror("short",  T_SHORT,   CHECK);
   456     _void_mirror    =
   457       java_lang_Class::create_basic_type_mirror("void",   T_VOID, CHECK);
   459     _mirrors[T_INT]     = _int_mirror;
   460     _mirrors[T_FLOAT]   = _float_mirror;
   461     _mirrors[T_DOUBLE]  = _double_mirror;
   462     _mirrors[T_BYTE]    = _byte_mirror;
   463     _mirrors[T_BOOLEAN] = _bool_mirror;
   464     _mirrors[T_CHAR]    = _char_mirror;
   465     _mirrors[T_LONG]    = _long_mirror;
   466     _mirrors[T_SHORT]   = _short_mirror;
   467     _mirrors[T_VOID]    = _void_mirror;
   468   //_mirrors[T_OBJECT]  = InstanceKlass::cast(_object_klass)->java_mirror();
   469   //_mirrors[T_ARRAY]   = InstanceKlass::cast(_object_klass)->java_mirror();
   470 }
   472 void Universe::fixup_mirrors(TRAPS) {
   473   // Bootstrap problem: all classes gets a mirror (java.lang.Class instance) assigned eagerly,
   474   // but we cannot do that for classes created before java.lang.Class is loaded. Here we simply
   475   // walk over permanent objects created so far (mostly classes) and fixup their mirrors. Note
   476   // that the number of objects allocated at this point is very small.
   477   assert(SystemDictionary::Class_klass_loaded(), "java.lang.Class should be loaded");
   478   HandleMark hm(THREAD);
   479   // Cache the start of the static fields
   480   InstanceMirrorKlass::init_offset_of_static_fields();
   482   GrowableArray <Klass*>* list = java_lang_Class::fixup_mirror_list();
   483   int list_length = list->length();
   484   for (int i = 0; i < list_length; i++) {
   485     Klass* k = list->at(i);
   486     assert(k->is_klass(), "List should only hold classes");
   487     EXCEPTION_MARK;
   488     KlassHandle kh(THREAD, k);
   489     java_lang_Class::fixup_mirror(kh, CATCH);
   490 }
   491   delete java_lang_Class::fixup_mirror_list();
   492   java_lang_Class::set_fixup_mirror_list(NULL);
   493 }
   495 static bool has_run_finalizers_on_exit = false;
   497 void Universe::run_finalizers_on_exit() {
   498   if (has_run_finalizers_on_exit) return;
   499   has_run_finalizers_on_exit = true;
   501   // Called on VM exit. This ought to be run in a separate thread.
   502   if (TraceReferenceGC) tty->print_cr("Callback to run finalizers on exit");
   503   {
   504     PRESERVE_EXCEPTION_MARK;
   505     KlassHandle finalizer_klass(THREAD, SystemDictionary::Finalizer_klass());
   506     JavaValue result(T_VOID);
   507     JavaCalls::call_static(
   508       &result,
   509       finalizer_klass,
   510       vmSymbols::run_finalizers_on_exit_name(),
   511       vmSymbols::void_method_signature(),
   512       THREAD
   513     );
   514     // Ignore any pending exceptions
   515     CLEAR_PENDING_EXCEPTION;
   516   }
   517 }
   520 // initialize_vtable could cause gc if
   521 // 1) we specified true to initialize_vtable and
   522 // 2) this ran after gc was enabled
   523 // In case those ever change we use handles for oops
   524 void Universe::reinitialize_vtable_of(KlassHandle k_h, TRAPS) {
   525   // init vtable of k and all subclasses
   526   Klass* ko = k_h();
   527   klassVtable* vt = ko->vtable();
   528   if (vt) vt->initialize_vtable(false, CHECK);
   529   if (ko->oop_is_instance()) {
   530     InstanceKlass* ik = (InstanceKlass*)ko;
   531     for (KlassHandle s_h(THREAD, ik->subklass());
   532          s_h() != NULL;
   533          s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
   534       reinitialize_vtable_of(s_h, CHECK);
   535     }
   536   }
   537 }
   540 void initialize_itable_for_klass(Klass* k, TRAPS) {
   541   InstanceKlass::cast(k)->itable()->initialize_itable(false, CHECK);
   542 }
   545 void Universe::reinitialize_itables(TRAPS) {
   546   SystemDictionary::classes_do(initialize_itable_for_klass, CHECK);
   548 }
   551 bool Universe::on_page_boundary(void* addr) {
   552   return ((uintptr_t) addr) % os::vm_page_size() == 0;
   553 }
   556 bool Universe::should_fill_in_stack_trace(Handle throwable) {
   557   // never attempt to fill in the stack trace of preallocated errors that do not have
   558   // backtrace. These errors are kept alive forever and may be "re-used" when all
   559   // preallocated errors with backtrace have been consumed. Also need to avoid
   560   // a potential loop which could happen if an out of memory occurs when attempting
   561   // to allocate the backtrace.
   562   return ((throwable() != Universe::_out_of_memory_error_java_heap) &&
   563           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
   564           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
   565           (throwable() != Universe::_out_of_memory_error_array_size) &&
   566           (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
   567 }
   570 oop Universe::gen_out_of_memory_error(oop default_err) {
   571   // generate an out of memory error:
   572   // - if there is a preallocated error with backtrace available then return it wth
   573   //   a filled in stack trace.
   574   // - if there are no preallocated errors with backtrace available then return
   575   //   an error without backtrace.
   576   int next;
   577   if (_preallocated_out_of_memory_error_avail_count > 0) {
   578     next = (int)Atomic::add(-1, &_preallocated_out_of_memory_error_avail_count);
   579     assert(next < (int)PreallocatedOutOfMemoryErrorCount, "avail count is corrupt");
   580   } else {
   581     next = -1;
   582   }
   583   if (next < 0) {
   584     // all preallocated errors have been used.
   585     // return default
   586     return default_err;
   587   } else {
   588     // get the error object at the slot and set set it to NULL so that the
   589     // array isn't keeping it alive anymore.
   590     oop exc = preallocated_out_of_memory_errors()->obj_at(next);
   591     assert(exc != NULL, "slot has been used already");
   592     preallocated_out_of_memory_errors()->obj_at_put(next, NULL);
   594     // use the message from the default error
   595     oop msg = java_lang_Throwable::message(default_err);
   596     assert(msg != NULL, "no message");
   597     java_lang_Throwable::set_message(exc, msg);
   599     // populate the stack trace and return it.
   600     java_lang_Throwable::fill_in_stack_trace_of_preallocated_backtrace(exc);
   601     return exc;
   602   }
   603 }
   605 static intptr_t non_oop_bits = 0;
   607 void* Universe::non_oop_word() {
   608   // Neither the high bits nor the low bits of this value is allowed
   609   // to look like (respectively) the high or low bits of a real oop.
   610   //
   611   // High and low are CPU-specific notions, but low always includes
   612   // the low-order bit.  Since oops are always aligned at least mod 4,
   613   // setting the low-order bit will ensure that the low half of the
   614   // word will never look like that of a real oop.
   615   //
   616   // Using the OS-supplied non-memory-address word (usually 0 or -1)
   617   // will take care of the high bits, however many there are.
   619   if (non_oop_bits == 0) {
   620     non_oop_bits = (intptr_t)os::non_memory_address_word() | 1;
   621   }
   623   return (void*)non_oop_bits;
   624 }
   626 jint universe_init() {
   627   assert(!Universe::_fully_initialized, "called after initialize_vtables");
   628   guarantee(1 << LogHeapWordSize == sizeof(HeapWord),
   629          "LogHeapWordSize is incorrect.");
   630   guarantee(sizeof(oop) >= sizeof(HeapWord), "HeapWord larger than oop?");
   631   guarantee(sizeof(oop) % sizeof(HeapWord) == 0,
   632             "oop size is not not a multiple of HeapWord size");
   633   TraceTime timer("Genesis", TraceStartupTime);
   634   GC_locker::lock();  // do not allow gc during bootstrapping
   635   JavaClasses::compute_hard_coded_offsets();
   637   jint status = Universe::initialize_heap();
   638   if (status != JNI_OK) {
   639     return status;
   640   }
   642   Metaspace::global_initialize();
   644   // Create memory for metadata.  Must be after initializing heap for
   645   // DumpSharedSpaces.
   646   ClassLoaderData::init_null_class_loader_data();
   648   // We have a heap so create the Method* caches before
   649   // Metaspace::initialize_shared_spaces() tries to populate them.
   650   Universe::_finalizer_register_cache = new LatestMethodCache();
   651   Universe::_loader_addClass_cache    = new LatestMethodCache();
   652   Universe::_pd_implies_cache         = new LatestMethodCache();
   654   if (UseSharedSpaces) {
   655     // Read the data structures supporting the shared spaces (shared
   656     // system dictionary, symbol table, etc.).  After that, access to
   657     // the file (other than the mapped regions) is no longer needed, and
   658     // the file is closed. Closing the file does not affect the
   659     // currently mapped regions.
   660     MetaspaceShared::initialize_shared_spaces();
   661     StringTable::create_table();
   662   } else {
   663     SymbolTable::create_table();
   664     StringTable::create_table();
   665     ClassLoader::create_package_info_table();
   666   }
   668   return JNI_OK;
   669 }
   671 // Choose the heap base address and oop encoding mode
   672 // when compressed oops are used:
   673 // Unscaled  - Use 32-bits oops without encoding when
   674 //     NarrowOopHeapBaseMin + heap_size < 4Gb
   675 // ZeroBased - Use zero based compressed oops with encoding when
   676 //     NarrowOopHeapBaseMin + heap_size < 32Gb
   677 // HeapBased - Use compressed oops with heap base + encoding.
   679 // 4Gb
   680 static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
   681 // 32Gb
   682 // OopEncodingHeapMax == NarrowOopHeapMax << LogMinObjAlignmentInBytes;
   684 char* Universe::preferred_heap_base(size_t heap_size, size_t alignment, NARROW_OOP_MODE mode) {
   685   assert(is_size_aligned((size_t)OopEncodingHeapMax, alignment), "Must be");
   686   assert(is_size_aligned((size_t)NarrowOopHeapMax, alignment), "Must be");
   687   assert(is_size_aligned(heap_size, alignment), "Must be");
   689   uintx heap_base_min_address_aligned = align_size_up(HeapBaseMinAddress, alignment);
   691   size_t base = 0;
   692 #ifdef _LP64
   693   if (UseCompressedOops) {
   694     assert(mode == UnscaledNarrowOop  ||
   695            mode == ZeroBasedNarrowOop ||
   696            mode == HeapBasedNarrowOop, "mode is invalid");
   697     const size_t total_size = heap_size + heap_base_min_address_aligned;
   698     // Return specified base for the first request.
   699     if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
   700       base = heap_base_min_address_aligned;
   702     // If the total size is small enough to allow UnscaledNarrowOop then
   703     // just use UnscaledNarrowOop.
   704     } else if ((total_size <= OopEncodingHeapMax) && (mode != HeapBasedNarrowOop)) {
   705       if ((total_size <= NarrowOopHeapMax) && (mode == UnscaledNarrowOop) &&
   706           (Universe::narrow_oop_shift() == 0)) {
   707         // Use 32-bits oops without encoding and
   708         // place heap's top on the 4Gb boundary
   709         base = (NarrowOopHeapMax - heap_size);
   710       } else {
   711         // Can't reserve with NarrowOopShift == 0
   712         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   713         if (mode == UnscaledNarrowOop ||
   714             mode == ZeroBasedNarrowOop && total_size <= NarrowOopHeapMax) {
   715           // Use zero based compressed oops with encoding and
   716           // place heap's top on the 32Gb boundary in case
   717           // total_size > 4Gb or failed to reserve below 4Gb.
   718           base = (OopEncodingHeapMax - heap_size);
   719         }
   720       }
   721     } else {
   722       // UnscaledNarrowOop encoding didn't work, and no base was found for ZeroBasedOops or
   723       // HeapBasedNarrowOop encoding was requested.  So, can't reserve below 32Gb.
   724       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   725     }
   727     // Set narrow_oop_base and narrow_oop_use_implicit_null_checks
   728     // used in ReservedHeapSpace() constructors.
   729     // The final values will be set in initialize_heap() below.
   730     if ((base != 0) && ((base + heap_size) <= OopEncodingHeapMax)) {
   731       // Use zero based compressed oops
   732       Universe::set_narrow_oop_base(NULL);
   733       // Don't need guard page for implicit checks in indexed
   734       // addressing mode with zero based Compressed Oops.
   735       Universe::set_narrow_oop_use_implicit_null_checks(true);
   736     } else {
   737       // Set to a non-NULL value so the ReservedSpace ctor computes
   738       // the correct no-access prefix.
   739       // The final value will be set in initialize_heap() below.
   740       Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
   741 #ifdef _WIN64
   742       if (UseLargePages) {
   743         // Cannot allocate guard pages for implicit checks in indexed
   744         // addressing mode when large pages are specified on windows.
   745         Universe::set_narrow_oop_use_implicit_null_checks(false);
   746       }
   747 #endif //  _WIN64
   748     }
   749   }
   750 #endif
   752   assert(is_ptr_aligned((char*)base, alignment), "Must be");
   753   return (char*)base; // also return NULL (don't care) for 32-bit VM
   754 }
   756 jint Universe::initialize_heap() {
   758   if (UseParallelGC) {
   759 #if INCLUDE_ALL_GCS
   760     Universe::_collectedHeap = new ParallelScavengeHeap();
   761 #else  // INCLUDE_ALL_GCS
   762     fatal("UseParallelGC not supported in this VM.");
   763 #endif // INCLUDE_ALL_GCS
   765   } else if (UseG1GC) {
   766 #if INCLUDE_ALL_GCS
   767     G1CollectorPolicy* g1p = new G1CollectorPolicy();
   768     G1CollectedHeap* g1h = new G1CollectedHeap(g1p);
   769     Universe::_collectedHeap = g1h;
   770 #else  // INCLUDE_ALL_GCS
   771     fatal("UseG1GC not supported in java kernel vm.");
   772 #endif // INCLUDE_ALL_GCS
   774   } else {
   775     GenCollectorPolicy *gc_policy;
   777     if (UseSerialGC) {
   778       gc_policy = new MarkSweepPolicy();
   779     } else if (UseConcMarkSweepGC) {
   780 #if INCLUDE_ALL_GCS
   781       if (UseAdaptiveSizePolicy) {
   782         gc_policy = new ASConcurrentMarkSweepPolicy();
   783       } else {
   784         gc_policy = new ConcurrentMarkSweepPolicy();
   785       }
   786 #else  // INCLUDE_ALL_GCS
   787     fatal("UseConcMarkSweepGC not supported in this VM.");
   788 #endif // INCLUDE_ALL_GCS
   789     } else { // default old generation
   790       gc_policy = new MarkSweepPolicy();
   791     }
   793     Universe::_collectedHeap = new GenCollectedHeap(gc_policy);
   794   }
   796   jint status = Universe::heap()->initialize();
   797   if (status != JNI_OK) {
   798     return status;
   799   }
   801 #ifdef _LP64
   802   if (UseCompressedOops) {
   803     // Subtract a page because something can get allocated at heap base.
   804     // This also makes implicit null checking work, because the
   805     // memory+1 page below heap_base needs to cause a signal.
   806     // See needs_explicit_null_check.
   807     // Only set the heap base for compressed oops because it indicates
   808     // compressed oops for pstack code.
   809     bool verbose = PrintCompressedOopsMode || (PrintMiscellaneous && Verbose);
   810     if (verbose) {
   811       tty->cr();
   812       tty->print("heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
   813                  Universe::heap()->base(), Universe::heap()->reserved_region().byte_size()/M);
   814     }
   815     if (((uint64_t)Universe::heap()->reserved_region().end() > OopEncodingHeapMax)) {
   816       // Can't reserve heap below 32Gb.
   817       // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
   818       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   819       if (verbose) {
   820         tty->print(", %s: "PTR_FORMAT,
   821             narrow_oop_mode_to_string(HeapBasedNarrowOop),
   822             Universe::narrow_oop_base());
   823       }
   824     } else {
   825       Universe::set_narrow_oop_base(0);
   826       if (verbose) {
   827         tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
   828       }
   829 #ifdef _WIN64
   830       if (!Universe::narrow_oop_use_implicit_null_checks()) {
   831         // Don't need guard page for implicit checks in indexed addressing
   832         // mode with zero based Compressed Oops.
   833         Universe::set_narrow_oop_use_implicit_null_checks(true);
   834       }
   835 #endif //  _WIN64
   836       if((uint64_t)Universe::heap()->reserved_region().end() > NarrowOopHeapMax) {
   837         // Can't reserve heap below 4Gb.
   838         Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   839       } else {
   840         Universe::set_narrow_oop_shift(0);
   841         if (verbose) {
   842           tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
   843         }
   844       }
   845     }
   847     if (verbose) {
   848       tty->cr();
   849       tty->cr();
   850     }
   851     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());
   852   }
   853   // Universe::narrow_oop_base() is one page below the heap.
   854   assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
   855          os::vm_page_size()) ||
   856          Universe::narrow_oop_base() == NULL, "invalid value");
   857   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
   858          Universe::narrow_oop_shift() == 0, "invalid value");
   859 #endif
   861   // We will never reach the CATCH below since Exceptions::_throw will cause
   862   // the VM to exit if an exception is thrown during initialization
   864   if (UseTLAB) {
   865     assert(Universe::heap()->supports_tlab_allocation(),
   866            "Should support thread-local allocation buffers");
   867     ThreadLocalAllocBuffer::startup_initialization();
   868   }
   869   return JNI_OK;
   870 }
   873 // Reserve the Java heap, which is now the same for all GCs.
   874 ReservedSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
   875   assert(alignment <= Arguments::conservative_max_heap_alignment(),
   876       err_msg("actual alignment "SIZE_FORMAT" must be within maximum heap alignment "SIZE_FORMAT,
   877           alignment, Arguments::conservative_max_heap_alignment()));
   878   size_t total_reserved = align_size_up(heap_size, alignment);
   879   assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
   880       "heap size is too big for compressed oops");
   882   bool use_large_pages = UseLargePages && is_size_aligned(alignment, os::large_page_size());
   883   assert(!UseLargePages
   884       || UseParallelOldGC
   885       || use_large_pages, "Wrong alignment to use large pages");
   887   char* addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::UnscaledNarrowOop);
   889   ReservedHeapSpace total_rs(total_reserved, alignment, use_large_pages, addr);
   891   if (UseCompressedOops) {
   892     if (addr != NULL && !total_rs.is_reserved()) {
   893       // Failed to reserve at specified address - the requested memory
   894       // region is taken already, for example, by 'java' launcher.
   895       // Try again to reserver heap higher.
   896       addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::ZeroBasedNarrowOop);
   898       ReservedHeapSpace total_rs0(total_reserved, alignment,
   899           use_large_pages, addr);
   901       if (addr != NULL && !total_rs0.is_reserved()) {
   902         // Failed to reserve at specified address again - give up.
   903         addr = Universe::preferred_heap_base(total_reserved, alignment, Universe::HeapBasedNarrowOop);
   904         assert(addr == NULL, "");
   906         ReservedHeapSpace total_rs1(total_reserved, alignment,
   907             use_large_pages, addr);
   908         total_rs = total_rs1;
   909       } else {
   910         total_rs = total_rs0;
   911       }
   912     }
   913   }
   915   if (!total_rs.is_reserved()) {
   916     vm_exit_during_initialization(err_msg("Could not reserve enough space for " SIZE_FORMAT "KB object heap", total_reserved/K));
   917     return total_rs;
   918   }
   920   if (UseCompressedOops) {
   921     // Universe::initialize_heap() will reset this to NULL if unscaled
   922     // or zero-based narrow oops are actually used.
   923     address base = (address)(total_rs.base() - os::vm_page_size());
   924     Universe::set_narrow_oop_base(base);
   925   }
   926   return total_rs;
   927 }
   930 // It's the caller's responsibility to ensure glitch-freedom
   931 // (if required).
   932 void Universe::update_heap_info_at_gc() {
   933   _heap_capacity_at_last_gc = heap()->capacity();
   934   _heap_used_at_last_gc     = heap()->used();
   935 }
   938 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
   939   switch (mode) {
   940     case UnscaledNarrowOop:
   941       return "32-bits Oops";
   942     case ZeroBasedNarrowOop:
   943       return "zero based Compressed Oops";
   944     case HeapBasedNarrowOop:
   945       return "Compressed Oops with base";
   946   }
   948   ShouldNotReachHere();
   949   return "";
   950 }
   953 Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
   954   if (narrow_oop_base() != 0) {
   955     return HeapBasedNarrowOop;
   956   }
   958   if (narrow_oop_shift() != 0) {
   959     return ZeroBasedNarrowOop;
   960   }
   962   return UnscaledNarrowOop;
   963 }
   966 void universe2_init() {
   967   EXCEPTION_MARK;
   968   Universe::genesis(CATCH);
   969 }
   972 // This function is defined in JVM.cpp
   973 extern void initialize_converter_functions();
   975 bool universe_post_init() {
   976   assert(!is_init_completed(), "Error: initialization not yet completed!");
   977   Universe::_fully_initialized = true;
   978   EXCEPTION_MARK;
   979   { ResourceMark rm;
   980     Interpreter::initialize();      // needed for interpreter entry points
   981     if (!UseSharedSpaces) {
   982       HandleMark hm(THREAD);
   983       KlassHandle ok_h(THREAD, SystemDictionary::Object_klass());
   984       Universe::reinitialize_vtable_of(ok_h, CHECK_false);
   985       Universe::reinitialize_itables(CHECK_false);
   986     }
   987   }
   989   HandleMark hm(THREAD);
   990   Klass* k;
   991   instanceKlassHandle k_h;
   992     // Setup preallocated empty java.lang.Class array
   993     Universe::_the_empty_class_klass_array = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_false);
   995     // Setup preallocated OutOfMemoryError errors
   996     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false);
   997     k_h = instanceKlassHandle(THREAD, k);
   998     Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false);
   999     Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false);
  1000     Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false);
  1001     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
  1002     Universe::_out_of_memory_error_gc_overhead_limit =
  1003       k_h->allocate_instance(CHECK_false);
  1005     // Setup preallocated NullPointerException
  1006     // (this is currently used for a cheap & dirty solution in compiler exception handling)
  1007     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_NullPointerException(), true, CHECK_false);
  1008     Universe::_null_ptr_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
  1009     // Setup preallocated ArithmeticException
  1010     // (this is currently used for a cheap & dirty solution in compiler exception handling)
  1011     k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ArithmeticException(), true, CHECK_false);
  1012     Universe::_arithmetic_exception_instance = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
  1013     // Virtual Machine Error for when we get into a situation we can't resolve
  1014     k = SystemDictionary::resolve_or_fail(
  1015       vmSymbols::java_lang_VirtualMachineError(), true, CHECK_false);
  1016     bool linked = InstanceKlass::cast(k)->link_class_or_fail(CHECK_false);
  1017     if (!linked) {
  1018       tty->print_cr("Unable to link/verify VirtualMachineError class");
  1019       return false; // initialization failed
  1021     Universe::_virtual_machine_error_instance =
  1022       InstanceKlass::cast(k)->allocate_instance(CHECK_false);
  1024     Universe::_vm_exception               = InstanceKlass::cast(k)->allocate_instance(CHECK_false);
  1026   if (!DumpSharedSpaces) {
  1027     // These are the only Java fields that are currently set during shared space dumping.
  1028     // We prefer to not handle this generally, so we always reinitialize these detail messages.
  1029     Handle msg = java_lang_String::create_from_str("Java heap space", CHECK_false);
  1030     java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg());
  1032     msg = java_lang_String::create_from_str("Metadata space", CHECK_false);
  1033     java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg());
  1034     msg = java_lang_String::create_from_str("Compressed class space", CHECK_false);
  1035     java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg());
  1037     msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false);
  1038     java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg());
  1040     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
  1041     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
  1043     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
  1044     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
  1046     // Setup the array of errors that have preallocated backtrace
  1047     k = Universe::_out_of_memory_error_java_heap->klass();
  1048     assert(k->name() == vmSymbols::java_lang_OutOfMemoryError(), "should be out of memory error");
  1049     k_h = instanceKlassHandle(THREAD, k);
  1051     int len = (StackTraceInThrowable) ? (int)PreallocatedOutOfMemoryErrorCount : 0;
  1052     Universe::_preallocated_out_of_memory_error_array = oopFactory::new_objArray(k_h(), len, CHECK_false);
  1053     for (int i=0; i<len; i++) {
  1054       oop err = k_h->allocate_instance(CHECK_false);
  1055       Handle err_h = Handle(THREAD, err);
  1056       java_lang_Throwable::allocate_backtrace(err_h, CHECK_false);
  1057       Universe::preallocated_out_of_memory_errors()->obj_at_put(i, err_h());
  1059     Universe::_preallocated_out_of_memory_error_avail_count = (jint)len;
  1063   // Setup static method for registering finalizers
  1064   // The finalizer klass must be linked before looking up the method, in
  1065   // case it needs to get rewritten.
  1066   InstanceKlass::cast(SystemDictionary::Finalizer_klass())->link_class(CHECK_false);
  1067   Method* m = InstanceKlass::cast(SystemDictionary::Finalizer_klass())->find_method(
  1068                                   vmSymbols::register_method_name(),
  1069                                   vmSymbols::register_method_signature());
  1070   if (m == NULL || !m->is_static()) {
  1071     tty->print_cr("Unable to link/verify Finalizer.register method");
  1072     return false; // initialization failed (cannot throw exception yet)
  1074   Universe::_finalizer_register_cache->init(
  1075     SystemDictionary::Finalizer_klass(), m);
  1077   // Setup method for registering loaded classes in class loader vector
  1078   InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
  1079   m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
  1080   if (m == NULL || m->is_static()) {
  1081     tty->print_cr("Unable to link/verify ClassLoader.addClass method");
  1082     return false; // initialization failed (cannot throw exception yet)
  1084   Universe::_loader_addClass_cache->init(
  1085     SystemDictionary::ClassLoader_klass(), m);
  1087   // Setup method for checking protection domain
  1088   InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
  1089   m = InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->
  1090             find_method(vmSymbols::impliesCreateAccessControlContext_name(),
  1091                         vmSymbols::void_boolean_signature());
  1092   // Allow NULL which should only happen with bootstrapping.
  1093   if (m != NULL) {
  1094     if (m->is_static()) {
  1095       // NoSuchMethodException doesn't actually work because it tries to run the
  1096       // <init> function before java_lang_Class is linked. Print error and exit.
  1097       tty->print_cr("ProtectionDomain.impliesCreateAccessControlContext() has the wrong linkage");
  1098       return false; // initialization failed
  1100     Universe::_pd_implies_cache->init(
  1101       SystemDictionary::ProtectionDomain_klass(), m);;
  1104   // The folowing is initializing converter functions for serialization in
  1105   // JVM.cpp. If we clean up the StrictMath code above we may want to find
  1106   // a better solution for this as well.
  1107   initialize_converter_functions();
  1109   // This needs to be done before the first scavenge/gc, since
  1110   // it's an input to soft ref clearing policy.
  1112     MutexLocker x(Heap_lock);
  1113     Universe::update_heap_info_at_gc();
  1116   // ("weak") refs processing infrastructure initialization
  1117   Universe::heap()->post_initialize();
  1119   // Initialize performance counters for metaspaces
  1120   MetaspaceCounters::initialize_performance_counters();
  1121   CompressedClassSpaceCounters::initialize_performance_counters();
  1123   MemoryService::add_metaspace_memory_pools();
  1125   GC_locker::unlock();  // allow gc after bootstrapping
  1127   MemoryService::set_universe_heap(Universe::_collectedHeap);
  1128   return true;
  1132 void Universe::compute_base_vtable_size() {
  1133   _base_vtable_size = ClassLoader::compute_Object_vtable();
  1137 // %%% The Universe::flush_foo methods belong in CodeCache.
  1139 // Flushes compiled methods dependent on dependee.
  1140 void Universe::flush_dependents_on(instanceKlassHandle dependee) {
  1141   assert_lock_strong(Compile_lock);
  1143   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
  1145   // CodeCache can only be updated by a thread_in_VM and they will all be
  1146   // stopped dring the safepoint so CodeCache will be safe to update without
  1147   // holding the CodeCache_lock.
  1149   KlassDepChange changes(dependee);
  1151   // Compute the dependent nmethods
  1152   if (CodeCache::mark_for_deoptimization(changes) > 0) {
  1153     // At least one nmethod has been marked for deoptimization
  1154     VM_Deoptimize op;
  1155     VMThread::execute(&op);
  1159 // Flushes compiled methods dependent on a particular CallSite
  1160 // instance when its target is different than the given MethodHandle.
  1161 void Universe::flush_dependents_on(Handle call_site, Handle method_handle) {
  1162   assert_lock_strong(Compile_lock);
  1164   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
  1166   // CodeCache can only be updated by a thread_in_VM and they will all be
  1167   // stopped dring the safepoint so CodeCache will be safe to update without
  1168   // holding the CodeCache_lock.
  1170   CallSiteDepChange changes(call_site(), method_handle());
  1172   // Compute the dependent nmethods that have a reference to a
  1173   // CallSite object.  We use InstanceKlass::mark_dependent_nmethod
  1174   // directly instead of CodeCache::mark_for_deoptimization because we
  1175   // want dependents on the call site class only not all classes in
  1176   // the ContextStream.
  1177   int marked = 0;
  1179     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1180     InstanceKlass* call_site_klass = InstanceKlass::cast(call_site->klass());
  1181     marked = call_site_klass->mark_dependent_nmethods(changes);
  1183   if (marked > 0) {
  1184     // At least one nmethod has been marked for deoptimization
  1185     VM_Deoptimize op;
  1186     VMThread::execute(&op);
  1190 #ifdef HOTSWAP
  1191 // Flushes compiled methods dependent on dependee in the evolutionary sense
  1192 void Universe::flush_evol_dependents_on(instanceKlassHandle ev_k_h) {
  1193   // --- Compile_lock is not held. However we are at a safepoint.
  1194   assert_locked_or_safepoint(Compile_lock);
  1195   if (CodeCache::number_of_nmethods_with_dependencies() == 0) return;
  1197   // CodeCache can only be updated by a thread_in_VM and they will all be
  1198   // stopped dring the safepoint so CodeCache will be safe to update without
  1199   // holding the CodeCache_lock.
  1201   // Compute the dependent nmethods
  1202   if (CodeCache::mark_for_evol_deoptimization(ev_k_h) > 0) {
  1203     // At least one nmethod has been marked for deoptimization
  1205     // All this already happens inside a VM_Operation, so we'll do all the work here.
  1206     // Stuff copied from VM_Deoptimize and modified slightly.
  1208     // We do not want any GCs to happen while we are in the middle of this VM operation
  1209     ResourceMark rm;
  1210     DeoptimizationMarker dm;
  1212     // Deoptimize all activations depending on marked nmethods
  1213     Deoptimization::deoptimize_dependents();
  1215     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
  1216     CodeCache::make_marked_nmethods_not_entrant();
  1219 #endif // HOTSWAP
  1222 // Flushes compiled methods dependent on dependee
  1223 void Universe::flush_dependents_on_method(methodHandle m_h) {
  1224   // --- Compile_lock is not held. However we are at a safepoint.
  1225   assert_locked_or_safepoint(Compile_lock);
  1227   // CodeCache can only be updated by a thread_in_VM and they will all be
  1228   // stopped dring the safepoint so CodeCache will be safe to update without
  1229   // holding the CodeCache_lock.
  1231   // Compute the dependent nmethods
  1232   if (CodeCache::mark_for_deoptimization(m_h()) > 0) {
  1233     // At least one nmethod has been marked for deoptimization
  1235     // All this already happens inside a VM_Operation, so we'll do all the work here.
  1236     // Stuff copied from VM_Deoptimize and modified slightly.
  1238     // We do not want any GCs to happen while we are in the middle of this VM operation
  1239     ResourceMark rm;
  1240     DeoptimizationMarker dm;
  1242     // Deoptimize all activations depending on marked nmethods
  1243     Deoptimization::deoptimize_dependents();
  1245     // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies)
  1246     CodeCache::make_marked_nmethods_not_entrant();
  1250 void Universe::print() {
  1251   print_on(gclog_or_tty);
  1254 void Universe::print_on(outputStream* st, bool extended) {
  1255   st->print_cr("Heap");
  1256   if (!extended) {
  1257     heap()->print_on(st);
  1258   } else {
  1259     heap()->print_extended_on(st);
  1263 void Universe::print_heap_at_SIGBREAK() {
  1264   if (PrintHeapAtSIGBREAK) {
  1265     MutexLocker hl(Heap_lock);
  1266     print_on(tty);
  1267     tty->cr();
  1268     tty->flush();
  1272 void Universe::print_heap_before_gc(outputStream* st, bool ignore_extended) {
  1273   st->print_cr("{Heap before GC invocations=%u (full %u):",
  1274                heap()->total_collections(),
  1275                heap()->total_full_collections());
  1276   if (!PrintHeapAtGCExtended || ignore_extended) {
  1277     heap()->print_on(st);
  1278   } else {
  1279     heap()->print_extended_on(st);
  1283 void Universe::print_heap_after_gc(outputStream* st, bool ignore_extended) {
  1284   st->print_cr("Heap after GC invocations=%u (full %u):",
  1285                heap()->total_collections(),
  1286                heap()->total_full_collections());
  1287   if (!PrintHeapAtGCExtended || ignore_extended) {
  1288     heap()->print_on(st);
  1289   } else {
  1290     heap()->print_extended_on(st);
  1292   st->print_cr("}");
  1295 void Universe::verify(VerifyOption option, const char* prefix, bool silent) {
  1296   // The use of _verify_in_progress is a temporary work around for
  1297   // 6320749.  Don't bother with a creating a class to set and clear
  1298   // it since it is only used in this method and the control flow is
  1299   // straight forward.
  1300   _verify_in_progress = true;
  1302   COMPILER2_PRESENT(
  1303     assert(!DerivedPointerTable::is_active(),
  1304          "DPT should not be active during verification "
  1305          "(of thread stacks below)");
  1308   ResourceMark rm;
  1309   HandleMark hm;  // Handles created during verification can be zapped
  1310   _verify_count++;
  1312   if (!silent) gclog_or_tty->print(prefix);
  1313   if (!silent) gclog_or_tty->print("[Verifying ");
  1314   if (!silent) gclog_or_tty->print("threads ");
  1315   Threads::verify();
  1316   if (!silent) gclog_or_tty->print("heap ");
  1317   heap()->verify(silent, option);
  1318   if (!silent) gclog_or_tty->print("syms ");
  1319   SymbolTable::verify();
  1320   if (!silent) gclog_or_tty->print("strs ");
  1321   StringTable::verify();
  1323     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1324     if (!silent) gclog_or_tty->print("zone ");
  1325     CodeCache::verify();
  1327   if (!silent) gclog_or_tty->print("dict ");
  1328   SystemDictionary::verify();
  1329 #ifndef PRODUCT
  1330   if (!silent) gclog_or_tty->print("cldg ");
  1331   ClassLoaderDataGraph::verify();
  1332 #endif
  1333   if (!silent) gclog_or_tty->print("metaspace chunks ");
  1334   MetaspaceAux::verify_free_chunks();
  1335   if (!silent) gclog_or_tty->print("hand ");
  1336   JNIHandles::verify();
  1337   if (!silent) gclog_or_tty->print("C-heap ");
  1338   os::check_heap();
  1339   if (!silent) gclog_or_tty->print("code cache ");
  1340   CodeCache::verify_oops();
  1341   if (!silent) gclog_or_tty->print_cr("]");
  1343   _verify_in_progress = false;
  1346 // Oop verification (see MacroAssembler::verify_oop)
  1348 static uintptr_t _verify_oop_data[2]   = {0, (uintptr_t)-1};
  1349 static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
  1352 #ifndef PRODUCT
  1354 static void calculate_verify_data(uintptr_t verify_data[2],
  1355                                   HeapWord* low_boundary,
  1356                                   HeapWord* high_boundary) {
  1357   assert(low_boundary < high_boundary, "bad interval");
  1359   // decide which low-order bits we require to be clear:
  1360   size_t alignSize = MinObjAlignmentInBytes;
  1361   size_t min_object_size = CollectedHeap::min_fill_size();
  1363   // make an inclusive limit:
  1364   uintptr_t max = (uintptr_t)high_boundary - min_object_size*wordSize;
  1365   uintptr_t min = (uintptr_t)low_boundary;
  1366   assert(min < max, "bad interval");
  1367   uintptr_t diff = max ^ min;
  1369   // throw away enough low-order bits to make the diff vanish
  1370   uintptr_t mask = (uintptr_t)(-1);
  1371   while ((mask & diff) != 0)
  1372     mask <<= 1;
  1373   uintptr_t bits = (min & mask);
  1374   assert(bits == (max & mask), "correct mask");
  1375   // check an intermediate value between min and max, just to make sure:
  1376   assert(bits == ((min + (max-min)/2) & mask), "correct mask");
  1378   // require address alignment, too:
  1379   mask |= (alignSize - 1);
  1381   if (!(verify_data[0] == 0 && verify_data[1] == (uintptr_t)-1)) {
  1382     assert(verify_data[0] == mask && verify_data[1] == bits, "mask stability");
  1384   verify_data[0] = mask;
  1385   verify_data[1] = bits;
  1388 // Oop verification (see MacroAssembler::verify_oop)
  1390 uintptr_t Universe::verify_oop_mask() {
  1391   MemRegion m = heap()->reserved_region();
  1392   calculate_verify_data(_verify_oop_data,
  1393                         m.start(),
  1394                         m.end());
  1395   return _verify_oop_data[0];
  1400 uintptr_t Universe::verify_oop_bits() {
  1401   verify_oop_mask();
  1402   return _verify_oop_data[1];
  1405 uintptr_t Universe::verify_mark_mask() {
  1406   return markOopDesc::lock_mask_in_place;
  1409 uintptr_t Universe::verify_mark_bits() {
  1410   intptr_t mask = verify_mark_mask();
  1411   intptr_t bits = (intptr_t)markOopDesc::prototype();
  1412   assert((bits & ~mask) == 0, "no stray header bits");
  1413   return bits;
  1415 #endif // PRODUCT
  1418 void Universe::compute_verify_oop_data() {
  1419   verify_oop_mask();
  1420   verify_oop_bits();
  1421   verify_mark_mask();
  1422   verify_mark_bits();
  1426 void LatestMethodCache::init(Klass* k, Method* m) {
  1427   if (!UseSharedSpaces) {
  1428     _klass = k;
  1430 #ifndef PRODUCT
  1431   else {
  1432     // sharing initilization should have already set up _klass
  1433     assert(_klass != NULL, "just checking");
  1435 #endif
  1437   _method_idnum = m->method_idnum();
  1438   assert(_method_idnum >= 0, "sanity check");
  1442 Method* LatestMethodCache::get_method() {
  1443   if (klass() == NULL) return NULL;
  1444   InstanceKlass* ik = InstanceKlass::cast(klass());
  1445   Method* m = ik->method_with_idnum(method_idnum());
  1446   assert(m != NULL, "sanity check");
  1447   return m;
  1451 #ifdef ASSERT
  1452 // Release dummy object(s) at bottom of heap
  1453 bool Universe::release_fullgc_alot_dummy() {
  1454   MutexLocker ml(FullGCALot_lock);
  1455   if (_fullgc_alot_dummy_array != NULL) {
  1456     if (_fullgc_alot_dummy_next >= _fullgc_alot_dummy_array->length()) {
  1457       // No more dummies to release, release entire array instead
  1458       _fullgc_alot_dummy_array = NULL;
  1459       return false;
  1461     if (!UseConcMarkSweepGC) {
  1462       // Release dummy at bottom of old generation
  1463       _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
  1465     // Release dummy at bottom of permanent generation
  1466     _fullgc_alot_dummy_array->obj_at_put(_fullgc_alot_dummy_next++, NULL);
  1468   return true;
  1471 #endif // ASSERT

mercurial