src/share/vm/runtime/os.cpp

Wed, 11 Sep 2013 16:25:02 +0200

author
tschatzl
date
Wed, 11 Sep 2013 16:25:02 +0200
changeset 5701
40136aa2cdb1
parent 5615
c636758ea616
child 5721
179cd89fb279
permissions
-rw-r--r--

8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/classLoader.hpp"
    27 #include "classfile/javaClasses.hpp"
    28 #include "classfile/systemDictionary.hpp"
    29 #include "classfile/vmSymbols.hpp"
    30 #include "code/icBuffer.hpp"
    31 #include "code/vtableStubs.hpp"
    32 #include "gc_implementation/shared/vmGCOperations.hpp"
    33 #include "interpreter/interpreter.hpp"
    34 #include "memory/allocation.inline.hpp"
    35 #include "oops/oop.inline.hpp"
    36 #include "prims/jvm.h"
    37 #include "prims/jvm_misc.hpp"
    38 #include "prims/privilegedStack.hpp"
    39 #include "runtime/arguments.hpp"
    40 #include "runtime/frame.inline.hpp"
    41 #include "runtime/interfaceSupport.hpp"
    42 #include "runtime/java.hpp"
    43 #include "runtime/javaCalls.hpp"
    44 #include "runtime/mutexLocker.hpp"
    45 #include "runtime/os.hpp"
    46 #include "runtime/stubRoutines.hpp"
    47 #include "runtime/thread.inline.hpp"
    48 #include "services/attachListener.hpp"
    49 #include "services/memTracker.hpp"
    50 #include "services/threadService.hpp"
    51 #include "utilities/defaultStream.hpp"
    52 #include "utilities/events.hpp"
    53 #ifdef TARGET_OS_FAMILY_linux
    54 # include "os_linux.inline.hpp"
    55 #endif
    56 #ifdef TARGET_OS_FAMILY_solaris
    57 # include "os_solaris.inline.hpp"
    58 #endif
    59 #ifdef TARGET_OS_FAMILY_windows
    60 # include "os_windows.inline.hpp"
    61 #endif
    62 #ifdef TARGET_OS_FAMILY_bsd
    63 # include "os_bsd.inline.hpp"
    64 #endif
    66 # include <signal.h>
    68 OSThread*         os::_starting_thread    = NULL;
    69 address           os::_polling_page       = NULL;
    70 volatile int32_t* os::_mem_serialize_page = NULL;
    71 uintptr_t         os::_serialize_page_mask = 0;
    72 long              os::_rand_seed          = 1;
    73 int               os::_processor_count    = 0;
    74 size_t            os::_page_sizes[os::page_sizes_max];
    76 #ifndef PRODUCT
    77 julong os::num_mallocs = 0;         // # of calls to malloc/realloc
    78 julong os::alloc_bytes = 0;         // # of bytes allocated
    79 julong os::num_frees = 0;           // # of calls to free
    80 julong os::free_bytes = 0;          // # of bytes freed
    81 #endif
    83 static juint cur_malloc_words = 0;  // current size for MallocMaxTestWords
    85 void os_init_globals() {
    86   // Called from init_globals().
    87   // See Threads::create_vm() in thread.cpp, and init.cpp.
    88   os::init_globals();
    89 }
    91 // Fill in buffer with current local time as an ISO-8601 string.
    92 // E.g., yyyy-mm-ddThh:mm:ss-zzzz.
    93 // Returns buffer, or NULL if it failed.
    94 // This would mostly be a call to
    95 //     strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
    96 // except that on Windows the %z behaves badly, so we do it ourselves.
    97 // Also, people wanted milliseconds on there,
    98 // and strftime doesn't do milliseconds.
    99 char* os::iso8601_time(char* buffer, size_t buffer_length) {
   100   // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
   101   //                                      1         2
   102   //                             12345678901234567890123456789
   103   static const char* iso8601_format =
   104     "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d";
   105   static const size_t needed_buffer = 29;
   107   // Sanity check the arguments
   108   if (buffer == NULL) {
   109     assert(false, "NULL buffer");
   110     return NULL;
   111   }
   112   if (buffer_length < needed_buffer) {
   113     assert(false, "buffer_length too small");
   114     return NULL;
   115   }
   116   // Get the current time
   117   jlong milliseconds_since_19700101 = javaTimeMillis();
   118   const int milliseconds_per_microsecond = 1000;
   119   const time_t seconds_since_19700101 =
   120     milliseconds_since_19700101 / milliseconds_per_microsecond;
   121   const int milliseconds_after_second =
   122     milliseconds_since_19700101 % milliseconds_per_microsecond;
   123   // Convert the time value to a tm and timezone variable
   124   struct tm time_struct;
   125   if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
   126     assert(false, "Failed localtime_pd");
   127     return NULL;
   128   }
   129 #if defined(_ALLBSD_SOURCE)
   130   const time_t zone = (time_t) time_struct.tm_gmtoff;
   131 #else
   132   const time_t zone = timezone;
   133 #endif
   135   // If daylight savings time is in effect,
   136   // we are 1 hour East of our time zone
   137   const time_t seconds_per_minute = 60;
   138   const time_t minutes_per_hour = 60;
   139   const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour;
   140   time_t UTC_to_local = zone;
   141   if (time_struct.tm_isdst > 0) {
   142     UTC_to_local = UTC_to_local - seconds_per_hour;
   143   }
   144   // Compute the time zone offset.
   145   //    localtime_pd() sets timezone to the difference (in seconds)
   146   //    between UTC and and local time.
   147   //    ISO 8601 says we need the difference between local time and UTC,
   148   //    we change the sign of the localtime_pd() result.
   149   const time_t local_to_UTC = -(UTC_to_local);
   150   // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
   151   char sign_local_to_UTC = '+';
   152   time_t abs_local_to_UTC = local_to_UTC;
   153   if (local_to_UTC < 0) {
   154     sign_local_to_UTC = '-';
   155     abs_local_to_UTC = -(abs_local_to_UTC);
   156   }
   157   // Convert time zone offset seconds to hours and minutes.
   158   const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
   159   const time_t zone_min =
   160     ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
   162   // Print an ISO 8601 date and time stamp into the buffer
   163   const int year = 1900 + time_struct.tm_year;
   164   const int month = 1 + time_struct.tm_mon;
   165   const int printed = jio_snprintf(buffer, buffer_length, iso8601_format,
   166                                    year,
   167                                    month,
   168                                    time_struct.tm_mday,
   169                                    time_struct.tm_hour,
   170                                    time_struct.tm_min,
   171                                    time_struct.tm_sec,
   172                                    milliseconds_after_second,
   173                                    sign_local_to_UTC,
   174                                    zone_hours,
   175                                    zone_min);
   176   if (printed == 0) {
   177     assert(false, "Failed jio_printf");
   178     return NULL;
   179   }
   180   return buffer;
   181 }
   183 OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
   184 #ifdef ASSERT
   185   if (!(!thread->is_Java_thread() ||
   186          Thread::current() == thread  ||
   187          Threads_lock->owned_by_self()
   188          || thread->is_Compiler_thread()
   189         )) {
   190     assert(false, "possibility of dangling Thread pointer");
   191   }
   192 #endif
   194   if (p >= MinPriority && p <= MaxPriority) {
   195     int priority = java_to_os_priority[p];
   196     return set_native_priority(thread, priority);
   197   } else {
   198     assert(false, "Should not happen");
   199     return OS_ERR;
   200   }
   201 }
   203 // The mapping from OS priority back to Java priority may be inexact because
   204 // Java priorities can map M:1 with native priorities. If you want the definite
   205 // Java priority then use JavaThread::java_priority()
   206 OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) {
   207   int p;
   208   int os_prio;
   209   OSReturn ret = get_native_priority(thread, &os_prio);
   210   if (ret != OS_OK) return ret;
   212   if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) {
   213     for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ;
   214   } else {
   215     // niceness values are in reverse order
   216     for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ;
   217   }
   218   priority = (ThreadPriority)p;
   219   return OS_OK;
   220 }
   223 // --------------------- sun.misc.Signal (optional) ---------------------
   226 // SIGBREAK is sent by the keyboard to query the VM state
   227 #ifndef SIGBREAK
   228 #define SIGBREAK SIGQUIT
   229 #endif
   231 // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread.
   234 static void signal_thread_entry(JavaThread* thread, TRAPS) {
   235   os::set_priority(thread, NearMaxPriority);
   236   while (true) {
   237     int sig;
   238     {
   239       // FIXME : Currently we have not decieded what should be the status
   240       //         for this java thread blocked here. Once we decide about
   241       //         that we should fix this.
   242       sig = os::signal_wait();
   243     }
   244     if (sig == os::sigexitnum_pd()) {
   245        // Terminate the signal thread
   246        return;
   247     }
   249     switch (sig) {
   250       case SIGBREAK: {
   251         // Check if the signal is a trigger to start the Attach Listener - in that
   252         // case don't print stack traces.
   253         if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
   254           continue;
   255         }
   256         // Print stack traces
   257         // Any SIGBREAK operations added here should make sure to flush
   258         // the output stream (e.g. tty->flush()) after output.  See 4803766.
   259         // Each module also prints an extra carriage return after its output.
   260         VM_PrintThreads op;
   261         VMThread::execute(&op);
   262         VM_PrintJNI jni_op;
   263         VMThread::execute(&jni_op);
   264         VM_FindDeadlocks op1(tty);
   265         VMThread::execute(&op1);
   266         Universe::print_heap_at_SIGBREAK();
   267         if (PrintClassHistogram) {
   268           VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
   269           VMThread::execute(&op1);
   270         }
   271         if (JvmtiExport::should_post_data_dump()) {
   272           JvmtiExport::post_data_dump();
   273         }
   274         break;
   275       }
   276       default: {
   277         // Dispatch the signal to java
   278         HandleMark hm(THREAD);
   279         Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_Signal(), THREAD);
   280         KlassHandle klass (THREAD, k);
   281         if (klass.not_null()) {
   282           JavaValue result(T_VOID);
   283           JavaCallArguments args;
   284           args.push_int(sig);
   285           JavaCalls::call_static(
   286             &result,
   287             klass,
   288             vmSymbols::dispatch_name(),
   289             vmSymbols::int_void_signature(),
   290             &args,
   291             THREAD
   292           );
   293         }
   294         if (HAS_PENDING_EXCEPTION) {
   295           // tty is initialized early so we don't expect it to be null, but
   296           // if it is we can't risk doing an initialization that might
   297           // trigger additional out-of-memory conditions
   298           if (tty != NULL) {
   299             char klass_name[256];
   300             char tmp_sig_name[16];
   301             const char* sig_name = "UNKNOWN";
   302             InstanceKlass::cast(PENDING_EXCEPTION->klass())->
   303               name()->as_klass_external_name(klass_name, 256);
   304             if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
   305               sig_name = tmp_sig_name;
   306             warning("Exception %s occurred dispatching signal %s to handler"
   307                     "- the VM may need to be forcibly terminated",
   308                     klass_name, sig_name );
   309           }
   310           CLEAR_PENDING_EXCEPTION;
   311         }
   312       }
   313     }
   314   }
   315 }
   317 void os::init_before_ergo() {
   318   // We need to initialize large page support here because ergonomics takes some
   319   // decisions depending on large page support and the calculated large page size.
   320   large_page_init();
   321 }
   323 void os::signal_init() {
   324   if (!ReduceSignalUsage) {
   325     // Setup JavaThread for processing signals
   326     EXCEPTION_MARK;
   327     Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
   328     instanceKlassHandle klass (THREAD, k);
   329     instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
   331     const char thread_name[] = "Signal Dispatcher";
   332     Handle string = java_lang_String::create_from_str(thread_name, CHECK);
   334     // Initialize thread_oop to put it into the system threadGroup
   335     Handle thread_group (THREAD, Universe::system_thread_group());
   336     JavaValue result(T_VOID);
   337     JavaCalls::call_special(&result, thread_oop,
   338                            klass,
   339                            vmSymbols::object_initializer_name(),
   340                            vmSymbols::threadgroup_string_void_signature(),
   341                            thread_group,
   342                            string,
   343                            CHECK);
   345     KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
   346     JavaCalls::call_special(&result,
   347                             thread_group,
   348                             group,
   349                             vmSymbols::add_method_name(),
   350                             vmSymbols::thread_void_signature(),
   351                             thread_oop,         // ARG 1
   352                             CHECK);
   354     os::signal_init_pd();
   356     { MutexLocker mu(Threads_lock);
   357       JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
   359       // At this point it may be possible that no osthread was created for the
   360       // JavaThread due to lack of memory. We would have to throw an exception
   361       // in that case. However, since this must work and we do not allow
   362       // exceptions anyway, check and abort if this fails.
   363       if (signal_thread == NULL || signal_thread->osthread() == NULL) {
   364         vm_exit_during_initialization("java.lang.OutOfMemoryError",
   365                                       "unable to create new native thread");
   366       }
   368       java_lang_Thread::set_thread(thread_oop(), signal_thread);
   369       java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
   370       java_lang_Thread::set_daemon(thread_oop());
   372       signal_thread->set_threadObj(thread_oop());
   373       Threads::add(signal_thread);
   374       Thread::start(signal_thread);
   375     }
   376     // Handle ^BREAK
   377     os::signal(SIGBREAK, os::user_handler());
   378   }
   379 }
   382 void os::terminate_signal_thread() {
   383   if (!ReduceSignalUsage)
   384     signal_notify(sigexitnum_pd());
   385 }
   388 // --------------------- loading libraries ---------------------
   390 typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
   391 extern struct JavaVM_ main_vm;
   393 static void* _native_java_library = NULL;
   395 void* os::native_java_library() {
   396   if (_native_java_library == NULL) {
   397     char buffer[JVM_MAXPATHLEN];
   398     char ebuf[1024];
   400     // Try to load verify dll first. In 1.3 java dll depends on it and is not
   401     // always able to find it when the loading executable is outside the JDK.
   402     // In order to keep working with 1.2 we ignore any loading errors.
   403     if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
   404                        "verify")) {
   405       dll_load(buffer, ebuf, sizeof(ebuf));
   406     }
   408     // Load java dll
   409     if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
   410                        "java")) {
   411       _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf));
   412     }
   413     if (_native_java_library == NULL) {
   414       vm_exit_during_initialization("Unable to load native library", ebuf);
   415     }
   417 #if defined(__OpenBSD__)
   418     // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so
   419     // ignore errors
   420     if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
   421                        "net")) {
   422       dll_load(buffer, ebuf, sizeof(ebuf));
   423     }
   424 #endif
   425   }
   426   static jboolean onLoaded = JNI_FALSE;
   427   if (onLoaded) {
   428     // We may have to wait to fire OnLoad until TLS is initialized.
   429     if (ThreadLocalStorage::is_initialized()) {
   430       // The JNI_OnLoad handling is normally done by method load in
   431       // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library
   432       // explicitly so we have to check for JNI_OnLoad as well
   433       const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS;
   434       JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(
   435           JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0]));
   436       if (JNI_OnLoad != NULL) {
   437         JavaThread* thread = JavaThread::current();
   438         ThreadToNativeFromVM ttn(thread);
   439         HandleMark hm(thread);
   440         jint ver = (*JNI_OnLoad)(&main_vm, NULL);
   441         onLoaded = JNI_TRUE;
   442         if (!Threads::is_supported_jni_version_including_1_1(ver)) {
   443           vm_exit_during_initialization("Unsupported JNI version");
   444         }
   445       }
   446     }
   447   }
   448   return _native_java_library;
   449 }
   451 /*
   452  * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
   453  * If check_lib == true then we are looking for an
   454  * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
   455  * this library is statically linked into the image.
   456  * If check_lib == false then we will look for the appropriate symbol in the
   457  * executable if agent_lib->is_static_lib() == true or in the shared library
   458  * referenced by 'handle'.
   459  */
   460 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
   461                               const char *syms[], size_t syms_len) {
   462   const char *lib_name;
   463   void *handle = agent_lib->os_lib();
   464   void *entryName = NULL;
   465   char *agent_function_name;
   466   size_t i;
   468   // If checking then use the agent name otherwise test is_static_lib() to
   469   // see how to process this lookup
   470   lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
   471   for (i = 0; i < syms_len; i++) {
   472     agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
   473     if (agent_function_name == NULL) {
   474       break;
   475     }
   476     entryName = dll_lookup(handle, agent_function_name);
   477     FREE_C_HEAP_ARRAY(char, agent_function_name, mtThread);
   478     if (entryName != NULL) {
   479       break;
   480     }
   481   }
   482   return entryName;
   483 }
   485 // See if the passed in agent is statically linked into the VM image.
   486 bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
   487                             size_t syms_len) {
   488   void *ret;
   489   void *proc_handle;
   490   void *save_handle;
   492   if (agent_lib->name() == NULL) {
   493     return false;
   494   }
   495   proc_handle = get_default_process_handle();
   496   // Check for Agent_OnLoad/Attach_lib_name function
   497   save_handle = agent_lib->os_lib();
   498   // We want to look in this process' symbol table.
   499   agent_lib->set_os_lib(proc_handle);
   500   ret = find_agent_function(agent_lib, true, syms, syms_len);
   501   agent_lib->set_os_lib(save_handle);
   502   if (ret != NULL) {
   503     // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
   504     agent_lib->set_os_lib(proc_handle);
   505     agent_lib->set_valid();
   506     agent_lib->set_static_lib(true);
   507     return true;
   508   }
   509   return false;
   510 }
   512 // --------------------- heap allocation utilities ---------------------
   514 char *os::strdup(const char *str, MEMFLAGS flags) {
   515   size_t size = strlen(str);
   516   char *dup_str = (char *)malloc(size + 1, flags);
   517   if (dup_str == NULL) return NULL;
   518   strcpy(dup_str, str);
   519   return dup_str;
   520 }
   524 #ifdef ASSERT
   525 #define space_before             (MallocCushion + sizeof(double))
   526 #define space_after              MallocCushion
   527 #define size_addr_from_base(p)   (size_t*)(p + space_before - sizeof(size_t))
   528 #define size_addr_from_obj(p)    ((size_t*)p - 1)
   529 // MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
   530 // NB: cannot be debug variable, because these aren't set from the command line until
   531 // *after* the first few allocs already happened
   532 #define MallocCushion            16
   533 #else
   534 #define space_before             0
   535 #define space_after              0
   536 #define size_addr_from_base(p)   should not use w/o ASSERT
   537 #define size_addr_from_obj(p)    should not use w/o ASSERT
   538 #define MallocCushion            0
   539 #endif
   540 #define paranoid                 0  /* only set to 1 if you suspect checking code has bug */
   542 #ifdef ASSERT
   543 inline size_t get_size(void* obj) {
   544   size_t size = *size_addr_from_obj(obj);
   545   if (size < 0) {
   546     fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten ("
   547                   SIZE_FORMAT ")", obj, size));
   548   }
   549   return size;
   550 }
   552 u_char* find_cushion_backwards(u_char* start) {
   553   u_char* p = start;
   554   while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
   555          p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
   556   // ok, we have four consecutive marker bytes; find start
   557   u_char* q = p - 4;
   558   while (*q == badResourceValue) q--;
   559   return q + 1;
   560 }
   562 u_char* find_cushion_forwards(u_char* start) {
   563   u_char* p = start;
   564   while (p[0] != badResourceValue || p[1] != badResourceValue ||
   565          p[2] != badResourceValue || p[3] != badResourceValue) p++;
   566   // ok, we have four consecutive marker bytes; find end of cushion
   567   u_char* q = p + 4;
   568   while (*q == badResourceValue) q++;
   569   return q - MallocCushion;
   570 }
   572 void print_neighbor_blocks(void* ptr) {
   573   // find block allocated before ptr (not entirely crash-proof)
   574   if (MallocCushion < 4) {
   575     tty->print_cr("### cannot find previous block (MallocCushion < 4)");
   576     return;
   577   }
   578   u_char* start_of_this_block = (u_char*)ptr - space_before;
   579   u_char* end_of_prev_block_data = start_of_this_block - space_after -1;
   580   // look for cushion in front of prev. block
   581   u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data);
   582   ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
   583   u_char* obj = start_of_prev_block + space_before;
   584   if (size <= 0 ) {
   585     // start is bad; mayhave been confused by OS data inbetween objects
   586     // search one more backwards
   587     start_of_prev_block = find_cushion_backwards(start_of_prev_block);
   588     size = *size_addr_from_base(start_of_prev_block);
   589     obj = start_of_prev_block + space_before;
   590   }
   592   if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
   593     tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
   594   } else {
   595     tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
   596   }
   598   // now find successor block
   599   u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after;
   600   start_of_next_block = find_cushion_forwards(start_of_next_block);
   601   u_char* next_obj = start_of_next_block + space_before;
   602   ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
   603   if (start_of_next_block[0] == badResourceValue &&
   604       start_of_next_block[1] == badResourceValue &&
   605       start_of_next_block[2] == badResourceValue &&
   606       start_of_next_block[3] == badResourceValue) {
   607     tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
   608   } else {
   609     tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
   610   }
   611 }
   614 void report_heap_error(void* memblock, void* bad, const char* where) {
   615   tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
   616   tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock);
   617   print_neighbor_blocks(memblock);
   618   fatal("memory stomping error");
   619 }
   621 void verify_block(void* memblock) {
   622   size_t size = get_size(memblock);
   623   if (MallocCushion) {
   624     u_char* ptr = (u_char*)memblock - space_before;
   625     for (int i = 0; i < MallocCushion; i++) {
   626       if (ptr[i] != badResourceValue) {
   627         report_heap_error(memblock, ptr+i, "in front of");
   628       }
   629     }
   630     u_char* end = (u_char*)memblock + size + space_after;
   631     for (int j = -MallocCushion; j < 0; j++) {
   632       if (end[j] != badResourceValue) {
   633         report_heap_error(memblock, end+j, "after");
   634       }
   635     }
   636   }
   637 }
   638 #endif
   640 //
   641 // This function supports testing of the malloc out of memory
   642 // condition without really running the system out of memory.
   643 //
   644 static u_char* testMalloc(size_t alloc_size) {
   645   assert(MallocMaxTestWords > 0, "sanity check");
   647   if ((cur_malloc_words + (alloc_size / BytesPerWord)) > MallocMaxTestWords) {
   648     return NULL;
   649   }
   651   u_char* ptr = (u_char*)::malloc(alloc_size);
   653   if (ptr != NULL) {
   654     Atomic::add(((jint) (alloc_size / BytesPerWord)),
   655                 (volatile jint *) &cur_malloc_words);
   656   }
   657   return ptr;
   658 }
   660 void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
   661   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   662   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
   664 #ifdef ASSERT
   665   // checking for the WatcherThread and crash_protection first
   666   // since os::malloc can be called when the libjvm.{dll,so} is
   667   // first loaded and we don't have a thread yet.
   668   // try to find the thread after we see that the watcher thread
   669   // exists and has crash protection.
   670   WatcherThread *wt = WatcherThread::watcher_thread();
   671   if (wt != NULL && wt->has_crash_protection()) {
   672     Thread* thread = ThreadLocalStorage::get_thread_slow();
   673     if (thread == wt) {
   674       assert(!wt->has_crash_protection(),
   675           "Can't malloc with crash protection from WatcherThread");
   676     }
   677   }
   678 #endif
   680   if (size == 0) {
   681     // return a valid pointer if size is zero
   682     // if NULL is returned the calling functions assume out of memory.
   683     size = 1;
   684   }
   686   const size_t alloc_size = size + space_before + space_after;
   688   if (size > alloc_size) { // Check for rollover.
   689     return NULL;
   690   }
   692   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   694   u_char* ptr;
   696   if (MallocMaxTestWords > 0) {
   697     ptr = testMalloc(alloc_size);
   698   } else {
   699     ptr = (u_char*)::malloc(alloc_size);
   700   }
   702 #ifdef ASSERT
   703   if (ptr == NULL) return NULL;
   704   if (MallocCushion) {
   705     for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue;
   706     u_char* end = ptr + space_before + size;
   707     for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad;
   708     for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue;
   709   }
   710   // put size just before data
   711   *size_addr_from_base(ptr) = size;
   712 #endif
   713   u_char* memblock = ptr + space_before;
   714   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
   715     tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
   716     breakpoint();
   717   }
   718   debug_only(if (paranoid) verify_block(memblock));
   719   if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
   721   // we do not track MallocCushion memory
   722     MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
   724   return memblock;
   725 }
   728 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
   729 #ifndef ASSERT
   730   NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
   731   NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
   732   MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
   733   void* ptr = ::realloc(memblock, size);
   734   if (ptr != NULL) {
   735     tkr.record((address)memblock, (address)ptr, size, memflags,
   736      caller == 0 ? CALLER_PC : caller);
   737   } else {
   738     tkr.discard();
   739   }
   740   return ptr;
   741 #else
   742   if (memblock == NULL) {
   743     return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
   744   }
   745   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
   746     tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
   747     breakpoint();
   748   }
   749   verify_block(memblock);
   750   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   751   if (size == 0) return NULL;
   752   // always move the block
   753   void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
   754   if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
   755   // Copy to new memory if malloc didn't fail
   756   if ( ptr != NULL ) {
   757     memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
   758     if (paranoid) verify_block(ptr);
   759     if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
   760       tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
   761       breakpoint();
   762     }
   763     free(memblock);
   764   }
   765   return ptr;
   766 #endif
   767 }
   770 void  os::free(void *memblock, MEMFLAGS memflags) {
   771   NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
   772 #ifdef ASSERT
   773   if (memblock == NULL) return;
   774   if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
   775     if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
   776     breakpoint();
   777   }
   778   verify_block(memblock);
   779   NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
   780   // Added by detlefs.
   781   if (MallocCushion) {
   782     u_char* ptr = (u_char*)memblock - space_before;
   783     for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
   784       guarantee(*p == badResourceValue,
   785                 "Thing freed should be malloc result.");
   786       *p = (u_char)freeBlockPad;
   787     }
   788     size_t size = get_size(memblock);
   789     inc_stat_counter(&free_bytes, size);
   790     u_char* end = ptr + space_before + size;
   791     for (u_char* q = end; q < end + MallocCushion; q++) {
   792       guarantee(*q == badResourceValue,
   793                 "Thing freed should be malloc result.");
   794       *q = (u_char)freeBlockPad;
   795     }
   796     if (PrintMalloc && tty != NULL)
   797       fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
   798   } else if (PrintMalloc && tty != NULL) {
   799     // tty->print_cr("os::free %p", memblock);
   800     fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock);
   801   }
   802 #endif
   803   MemTracker::record_free((address)memblock, memflags);
   805   ::free((char*)memblock - space_before);
   806 }
   808 void os::init_random(long initval) {
   809   _rand_seed = initval;
   810 }
   813 long os::random() {
   814   /* standard, well-known linear congruential random generator with
   815    * next_rand = (16807*seed) mod (2**31-1)
   816    * see
   817    * (1) "Random Number Generators: Good Ones Are Hard to Find",
   818    *      S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
   819    * (2) "Two Fast Implementations of the 'Minimal Standard' Random
   820    *     Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
   821   */
   822   const long a = 16807;
   823   const unsigned long m = 2147483647;
   824   const long q = m / a;        assert(q == 127773, "weird math");
   825   const long r = m % a;        assert(r == 2836, "weird math");
   827   // compute az=2^31p+q
   828   unsigned long lo = a * (long)(_rand_seed & 0xFFFF);
   829   unsigned long hi = a * (long)((unsigned long)_rand_seed >> 16);
   830   lo += (hi & 0x7FFF) << 16;
   832   // if q overflowed, ignore the overflow and increment q
   833   if (lo > m) {
   834     lo &= m;
   835     ++lo;
   836   }
   837   lo += hi >> 15;
   839   // if (p+q) overflowed, ignore the overflow and increment (p+q)
   840   if (lo > m) {
   841     lo &= m;
   842     ++lo;
   843   }
   844   return (_rand_seed = lo);
   845 }
   847 // The INITIALIZED state is distinguished from the SUSPENDED state because the
   848 // conditions in which a thread is first started are different from those in which
   849 // a suspension is resumed.  These differences make it hard for us to apply the
   850 // tougher checks when starting threads that we want to do when resuming them.
   851 // However, when start_thread is called as a result of Thread.start, on a Java
   852 // thread, the operation is synchronized on the Java Thread object.  So there
   853 // cannot be a race to start the thread and hence for the thread to exit while
   854 // we are working on it.  Non-Java threads that start Java threads either have
   855 // to do so in a context in which races are impossible, or should do appropriate
   856 // locking.
   858 void os::start_thread(Thread* thread) {
   859   // guard suspend/resume
   860   MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
   861   OSThread* osthread = thread->osthread();
   862   osthread->set_state(RUNNABLE);
   863   pd_start_thread(thread);
   864 }
   866 //---------------------------------------------------------------------------
   867 // Helper functions for fatal error handler
   869 void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) {
   870   assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking");
   872   int cols = 0;
   873   int cols_per_line = 0;
   874   switch (unitsize) {
   875     case 1: cols_per_line = 16; break;
   876     case 2: cols_per_line = 8;  break;
   877     case 4: cols_per_line = 4;  break;
   878     case 8: cols_per_line = 2;  break;
   879     default: return;
   880   }
   882   address p = start;
   883   st->print(PTR_FORMAT ":   ", start);
   884   while (p < end) {
   885     switch (unitsize) {
   886       case 1: st->print("%02x", *(u1*)p); break;
   887       case 2: st->print("%04x", *(u2*)p); break;
   888       case 4: st->print("%08x", *(u4*)p); break;
   889       case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break;
   890     }
   891     p += unitsize;
   892     cols++;
   893     if (cols >= cols_per_line && p < end) {
   894        cols = 0;
   895        st->cr();
   896        st->print(PTR_FORMAT ":   ", p);
   897     } else {
   898        st->print(" ");
   899     }
   900   }
   901   st->cr();
   902 }
   904 void os::print_environment_variables(outputStream* st, const char** env_list,
   905                                      char* buffer, int len) {
   906   if (env_list) {
   907     st->print_cr("Environment Variables:");
   909     for (int i = 0; env_list[i] != NULL; i++) {
   910       if (getenv(env_list[i], buffer, len)) {
   911         st->print(env_list[i]);
   912         st->print("=");
   913         st->print_cr(buffer);
   914       }
   915     }
   916   }
   917 }
   919 void os::print_cpu_info(outputStream* st) {
   920   // cpu
   921   st->print("CPU:");
   922   st->print("total %d", os::processor_count());
   923   // It's not safe to query number of active processors after crash
   924   // st->print("(active %d)", os::active_processor_count());
   925   st->print(" %s", VM_Version::cpu_features());
   926   st->cr();
   927   pd_print_cpu_info(st);
   928 }
   930 void os::print_date_and_time(outputStream *st) {
   931   time_t tloc;
   932   (void)time(&tloc);
   933   st->print("time: %s", ctime(&tloc));  // ctime adds newline.
   935   double t = os::elapsedTime();
   936   // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
   937   //       Linux. Must be a bug in glibc ? Workaround is to round "t" to int
   938   //       before printf. We lost some precision, but who cares?
   939   st->print_cr("elapsed time: %d seconds", (int)t);
   940 }
   942 // moved from debug.cpp (used to be find()) but still called from there
   943 // The verbose parameter is only set by the debug code in one case
   944 void os::print_location(outputStream* st, intptr_t x, bool verbose) {
   945   address addr = (address)x;
   946   CodeBlob* b = CodeCache::find_blob_unsafe(addr);
   947   if (b != NULL) {
   948     if (b->is_buffer_blob()) {
   949       // the interpreter is generated into a buffer blob
   950       InterpreterCodelet* i = Interpreter::codelet_containing(addr);
   951       if (i != NULL) {
   952         st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", addr, (int)(addr - i->code_begin()));
   953         i->print_on(st);
   954         return;
   955       }
   956       if (Interpreter::contains(addr)) {
   957         st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
   958                      " (not bytecode specific)", addr);
   959         return;
   960       }
   961       //
   962       if (AdapterHandlerLibrary::contains(b)) {
   963         st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", addr, (int)(addr - b->code_begin()));
   964         AdapterHandlerLibrary::print_handler_on(st, b);
   965       }
   966       // the stubroutines are generated into a buffer blob
   967       StubCodeDesc* d = StubCodeDesc::desc_for(addr);
   968       if (d != NULL) {
   969         st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", addr, (int)(addr - d->begin()));
   970         d->print_on(st);
   971         st->cr();
   972         return;
   973       }
   974       if (StubRoutines::contains(addr)) {
   975         st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) "
   976                      "stub routine", addr);
   977         return;
   978       }
   979       // the InlineCacheBuffer is using stubs generated into a buffer blob
   980       if (InlineCacheBuffer::contains(addr)) {
   981         st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
   982         return;
   983       }
   984       VtableStub* v = VtableStubs::stub_containing(addr);
   985       if (v != NULL) {
   986         st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", addr, (int)(addr - v->entry_point()));
   987         v->print_on(st);
   988         st->cr();
   989         return;
   990       }
   991     }
   992     nmethod* nm = b->as_nmethod_or_null();
   993     if (nm != NULL) {
   994       ResourceMark rm;
   995       st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
   996                 addr, (int)(addr - nm->entry_point()), nm);
   997       if (verbose) {
   998         st->print(" for ");
   999         nm->method()->print_value_on(st);
  1001       st->cr();
  1002       nm->print_nmethod(verbose);
  1003       return;
  1005     st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", addr, (int)(addr - b->code_begin()));
  1006     b->print_on(st);
  1007     return;
  1010   if (Universe::heap()->is_in(addr)) {
  1011     HeapWord* p = Universe::heap()->block_start(addr);
  1012     bool print = false;
  1013     // If we couldn't find it it just may mean that heap wasn't parseable
  1014     // See if we were just given an oop directly
  1015     if (p != NULL && Universe::heap()->block_is_obj(p)) {
  1016       print = true;
  1017     } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
  1018       p = (HeapWord*) addr;
  1019       print = true;
  1021     if (print) {
  1022       if (p == (HeapWord*) addr) {
  1023         st->print_cr(INTPTR_FORMAT " is an oop", addr);
  1024       } else {
  1025         st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, addr, p);
  1027       oop(p)->print_on(st);
  1028       return;
  1030   } else {
  1031     if (Universe::heap()->is_in_reserved(addr)) {
  1032       st->print_cr(INTPTR_FORMAT " is an unallocated location "
  1033                    "in the heap", addr);
  1034       return;
  1037   if (JNIHandles::is_global_handle((jobject) addr)) {
  1038     st->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
  1039     return;
  1041   if (JNIHandles::is_weak_global_handle((jobject) addr)) {
  1042     st->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
  1043     return;
  1045 #ifndef PRODUCT
  1046   // we don't keep the block list in product mode
  1047   if (JNIHandleBlock::any_contains((jobject) addr)) {
  1048     st->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
  1049     return;
  1051 #endif
  1053   for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
  1054     // Check for privilege stack
  1055     if (thread->privileged_stack_top() != NULL &&
  1056         thread->privileged_stack_top()->contains(addr)) {
  1057       st->print_cr(INTPTR_FORMAT " is pointing into the privilege stack "
  1058                    "for thread: " INTPTR_FORMAT, addr, thread);
  1059       if (verbose) thread->print_on(st);
  1060       return;
  1062     // If the addr is a java thread print information about that.
  1063     if (addr == (address)thread) {
  1064       if (verbose) {
  1065         thread->print_on(st);
  1066       } else {
  1067         st->print_cr(INTPTR_FORMAT " is a thread", addr);
  1069       return;
  1071     // If the addr is in the stack region for this thread then report that
  1072     // and print thread info
  1073     if (thread->stack_base() >= addr &&
  1074         addr > (thread->stack_base() - thread->stack_size())) {
  1075       st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
  1076                    INTPTR_FORMAT, addr, thread);
  1077       if (verbose) thread->print_on(st);
  1078       return;
  1083 #ifndef PRODUCT
  1084   // Check if in metaspace.
  1085   if (ClassLoaderDataGraph::contains((address)addr)) {
  1086     // Use addr->print() from the debugger instead (not here)
  1087     st->print_cr(INTPTR_FORMAT
  1088                  " is pointing into metadata", addr);
  1089     return;
  1091 #endif
  1093   // Try an OS specific find
  1094   if (os::find(addr, st)) {
  1095     return;
  1098   st->print_cr(INTPTR_FORMAT " is an unknown value", addr);
  1101 // Looks like all platforms except IA64 can use the same function to check
  1102 // if C stack is walkable beyond current frame. The check for fp() is not
  1103 // necessary on Sparc, but it's harmless.
  1104 bool os::is_first_C_frame(frame* fr) {
  1105 #if defined(IA64) && !defined(_WIN32)
  1106   // On IA64 we have to check if the callers bsp is still valid
  1107   // (i.e. within the register stack bounds).
  1108   // Notice: this only works for threads created by the VM and only if
  1109   // we walk the current stack!!! If we want to be able to walk
  1110   // arbitrary other threads, we'll have to somehow store the thread
  1111   // object in the frame.
  1112   Thread *thread = Thread::current();
  1113   if ((address)fr->fp() <=
  1114       thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) {
  1115     // This check is a little hacky, because on Linux the first C
  1116     // frame's ('start_thread') register stack frame starts at
  1117     // "register_stack_base + 0x48" while on HPUX, the first C frame's
  1118     // ('__pthread_bound_body') register stack frame seems to really
  1119     // start at "register_stack_base".
  1120     return true;
  1121   } else {
  1122     return false;
  1124 #elif defined(IA64) && defined(_WIN32)
  1125   return true;
  1126 #else
  1127   // Load up sp, fp, sender sp and sender fp, check for reasonable values.
  1128   // Check usp first, because if that's bad the other accessors may fault
  1129   // on some architectures.  Ditto ufp second, etc.
  1130   uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1);
  1131   // sp on amd can be 32 bit aligned.
  1132   uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1);
  1134   uintptr_t usp    = (uintptr_t)fr->sp();
  1135   if ((usp & sp_align_mask) != 0) return true;
  1137   uintptr_t ufp    = (uintptr_t)fr->fp();
  1138   if ((ufp & fp_align_mask) != 0) return true;
  1140   uintptr_t old_sp = (uintptr_t)fr->sender_sp();
  1141   if ((old_sp & sp_align_mask) != 0) return true;
  1142   if (old_sp == 0 || old_sp == (uintptr_t)-1) return true;
  1144   uintptr_t old_fp = (uintptr_t)fr->link();
  1145   if ((old_fp & fp_align_mask) != 0) return true;
  1146   if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true;
  1148   // stack grows downwards; if old_fp is below current fp or if the stack
  1149   // frame is too large, either the stack is corrupted or fp is not saved
  1150   // on stack (i.e. on x86, ebp may be used as general register). The stack
  1151   // is not walkable beyond current frame.
  1152   if (old_fp < ufp) return true;
  1153   if (old_fp - ufp > 64 * K) return true;
  1155   return false;
  1156 #endif
  1159 #ifdef ASSERT
  1160 extern "C" void test_random() {
  1161   const double m = 2147483647;
  1162   double mean = 0.0, variance = 0.0, t;
  1163   long reps = 10000;
  1164   unsigned long seed = 1;
  1166   tty->print_cr("seed %ld for %ld repeats...", seed, reps);
  1167   os::init_random(seed);
  1168   long num;
  1169   for (int k = 0; k < reps; k++) {
  1170     num = os::random();
  1171     double u = (double)num / m;
  1172     assert(u >= 0.0 && u <= 1.0, "bad random number!");
  1174     // calculate mean and variance of the random sequence
  1175     mean += u;
  1176     variance += (u*u);
  1178   mean /= reps;
  1179   variance /= (reps - 1);
  1181   assert(num == 1043618065, "bad seed");
  1182   tty->print_cr("mean of the 1st 10000 numbers: %f", mean);
  1183   tty->print_cr("variance of the 1st 10000 numbers: %f", variance);
  1184   const double eps = 0.0001;
  1185   t = fabsd(mean - 0.5018);
  1186   assert(t < eps, "bad mean");
  1187   t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355;
  1188   assert(t < eps, "bad variance");
  1190 #endif
  1193 // Set up the boot classpath.
  1195 char* os::format_boot_path(const char* format_string,
  1196                            const char* home,
  1197                            int home_len,
  1198                            char fileSep,
  1199                            char pathSep) {
  1200     assert((fileSep == '/' && pathSep == ':') ||
  1201            (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
  1203     // Scan the format string to determine the length of the actual
  1204     // boot classpath, and handle platform dependencies as well.
  1205     int formatted_path_len = 0;
  1206     const char* p;
  1207     for (p = format_string; *p != 0; ++p) {
  1208         if (*p == '%') formatted_path_len += home_len - 1;
  1209         ++formatted_path_len;
  1212     char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal);
  1213     if (formatted_path == NULL) {
  1214         return NULL;
  1217     // Create boot classpath from format, substituting separator chars and
  1218     // java home directory.
  1219     char* q = formatted_path;
  1220     for (p = format_string; *p != 0; ++p) {
  1221         switch (*p) {
  1222         case '%':
  1223             strcpy(q, home);
  1224             q += home_len;
  1225             break;
  1226         case '/':
  1227             *q++ = fileSep;
  1228             break;
  1229         case ':':
  1230             *q++ = pathSep;
  1231             break;
  1232         default:
  1233             *q++ = *p;
  1236     *q = '\0';
  1238     assert((q - formatted_path) == formatted_path_len, "formatted_path size botched");
  1239     return formatted_path;
  1243 bool os::set_boot_path(char fileSep, char pathSep) {
  1244     const char* home = Arguments::get_java_home();
  1245     int home_len = (int)strlen(home);
  1247     static const char* meta_index_dir_format = "%/lib/";
  1248     static const char* meta_index_format = "%/lib/meta-index";
  1249     char* meta_index = format_boot_path(meta_index_format, home, home_len, fileSep, pathSep);
  1250     if (meta_index == NULL) return false;
  1251     char* meta_index_dir = format_boot_path(meta_index_dir_format, home, home_len, fileSep, pathSep);
  1252     if (meta_index_dir == NULL) return false;
  1253     Arguments::set_meta_index_path(meta_index, meta_index_dir);
  1255     // Any modification to the JAR-file list, for the boot classpath must be
  1256     // aligned with install/install/make/common/Pack.gmk. Note: boot class
  1257     // path class JARs, are stripped for StackMapTable to reduce download size.
  1258     static const char classpath_format[] =
  1259         "%/lib/resources.jar:"
  1260         "%/lib/rt.jar:"
  1261         "%/lib/sunrsasign.jar:"
  1262         "%/lib/jsse.jar:"
  1263         "%/lib/jce.jar:"
  1264         "%/lib/charsets.jar:"
  1265         "%/lib/jfr.jar:"
  1266 #ifdef __APPLE__
  1267         "%/lib/JObjC.jar:"
  1268 #endif
  1269         "%/classes";
  1270     char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
  1271     if (sysclasspath == NULL) return false;
  1272     Arguments::set_sysclasspath(sysclasspath);
  1274     return true;
  1277 /*
  1278  * Splits a path, based on its separator, the number of
  1279  * elements is returned back in n.
  1280  * It is the callers responsibility to:
  1281  *   a> check the value of n, and n may be 0.
  1282  *   b> ignore any empty path elements
  1283  *   c> free up the data.
  1284  */
  1285 char** os::split_path(const char* path, int* n) {
  1286   *n = 0;
  1287   if (path == NULL || strlen(path) == 0) {
  1288     return NULL;
  1290   const char psepchar = *os::path_separator();
  1291   char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
  1292   if (inpath == NULL) {
  1293     return NULL;
  1295   strcpy(inpath, path);
  1296   int count = 1;
  1297   char* p = strchr(inpath, psepchar);
  1298   // Get a count of elements to allocate memory
  1299   while (p != NULL) {
  1300     count++;
  1301     p++;
  1302     p = strchr(p, psepchar);
  1304   char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal);
  1305   if (opath == NULL) {
  1306     return NULL;
  1309   // do the actual splitting
  1310   p = inpath;
  1311   for (int i = 0 ; i < count ; i++) {
  1312     size_t len = strcspn(p, os::path_separator());
  1313     if (len > JVM_MAXPATHLEN) {
  1314       return NULL;
  1316     // allocate the string and add terminator storage
  1317     char* s  = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
  1318     if (s == NULL) {
  1319       return NULL;
  1321     strncpy(s, p, len);
  1322     s[len] = '\0';
  1323     opath[i] = s;
  1324     p += len + 1;
  1326   FREE_C_HEAP_ARRAY(char, inpath, mtInternal);
  1327   *n = count;
  1328   return opath;
  1331 void os::set_memory_serialize_page(address page) {
  1332   int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
  1333   _mem_serialize_page = (volatile int32_t *)page;
  1334   // We initialize the serialization page shift count here
  1335   // We assume a cache line size of 64 bytes
  1336   assert(SerializePageShiftCount == count,
  1337          "thread size changed, fix SerializePageShiftCount constant");
  1338   set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t)));
  1341 static volatile intptr_t SerializePageLock = 0;
  1343 // This method is called from signal handler when SIGSEGV occurs while the current
  1344 // thread tries to store to the "read-only" memory serialize page during state
  1345 // transition.
  1346 void os::block_on_serialize_page_trap() {
  1347   if (TraceSafepoint) {
  1348     tty->print_cr("Block until the serialize page permission restored");
  1350   // When VMThread is holding the SerializePageLock during modifying the
  1351   // access permission of the memory serialize page, the following call
  1352   // will block until the permission of that page is restored to rw.
  1353   // Generally, it is unsafe to manipulate locks in signal handlers, but in
  1354   // this case, it's OK as the signal is synchronous and we know precisely when
  1355   // it can occur.
  1356   Thread::muxAcquire(&SerializePageLock, "set_memory_serialize_page");
  1357   Thread::muxRelease(&SerializePageLock);
  1360 // Serialize all thread state variables
  1361 void os::serialize_thread_states() {
  1362   // On some platforms such as Solaris & Linux, the time duration of the page
  1363   // permission restoration is observed to be much longer than expected  due to
  1364   // scheduler starvation problem etc. To avoid the long synchronization
  1365   // time and expensive page trap spinning, 'SerializePageLock' is used to block
  1366   // the mutator thread if such case is encountered. See bug 6546278 for details.
  1367   Thread::muxAcquire(&SerializePageLock, "serialize_thread_states");
  1368   os::protect_memory((char *)os::get_memory_serialize_page(),
  1369                      os::vm_page_size(), MEM_PROT_READ);
  1370   os::protect_memory((char *)os::get_memory_serialize_page(),
  1371                      os::vm_page_size(), MEM_PROT_RW);
  1372   Thread::muxRelease(&SerializePageLock);
  1375 // Returns true if the current stack pointer is above the stack shadow
  1376 // pages, false otherwise.
  1378 bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
  1379   assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
  1380   address sp = current_stack_pointer();
  1381   // Check if we have StackShadowPages above the yellow zone.  This parameter
  1382   // is dependent on the depth of the maximum VM call stack possible from
  1383   // the handler for stack overflow.  'instanceof' in the stack overflow
  1384   // handler or a println uses at least 8k stack of VM and native code
  1385   // respectively.
  1386   const int framesize_in_bytes =
  1387     Interpreter::size_top_interpreter_activation(method()) * wordSize;
  1388   int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages)
  1389                       * vm_page_size()) + framesize_in_bytes;
  1390   // The very lower end of the stack
  1391   address stack_limit = thread->stack_base() - thread->stack_size();
  1392   return (sp > (stack_limit + reserved_area));
  1395 size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
  1396                                 uint min_pages)
  1398   assert(min_pages > 0, "sanity");
  1399   if (UseLargePages) {
  1400     const size_t max_page_size = region_max_size / min_pages;
  1402     for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
  1403       const size_t sz = _page_sizes[i];
  1404       const size_t mask = sz - 1;
  1405       if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
  1406         // The largest page size with no fragmentation.
  1407         return sz;
  1410       if (sz <= max_page_size) {
  1411         // The largest page size that satisfies the min_pages requirement.
  1412         return sz;
  1417   return vm_page_size();
  1420 #ifndef PRODUCT
  1421 void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
  1423   if (TracePageSizes) {
  1424     tty->print("%s: ", str);
  1425     for (int i = 0; i < count; ++i) {
  1426       tty->print(" " SIZE_FORMAT, page_sizes[i]);
  1428     tty->cr();
  1432 void os::trace_page_sizes(const char* str, const size_t region_min_size,
  1433                           const size_t region_max_size, const size_t page_size,
  1434                           const char* base, const size_t size)
  1436   if (TracePageSizes) {
  1437     tty->print_cr("%s:  min=" SIZE_FORMAT " max=" SIZE_FORMAT
  1438                   " pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT
  1439                   " size=" SIZE_FORMAT,
  1440                   str, region_min_size, region_max_size,
  1441                   page_size, base, size);
  1444 #endif  // #ifndef PRODUCT
  1446 // This is the working definition of a server class machine:
  1447 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
  1448 // because the graphics memory (?) sometimes masks physical memory.
  1449 // If you want to change the definition of a server class machine
  1450 // on some OS or platform, e.g., >=4GB on Windohs platforms,
  1451 // then you'll have to parameterize this method based on that state,
  1452 // as was done for logical processors here, or replicate and
  1453 // specialize this method for each platform.  (Or fix os to have
  1454 // some inheritance structure and use subclassing.  Sigh.)
  1455 // If you want some platform to always or never behave as a server
  1456 // class machine, change the setting of AlwaysActAsServerClassMachine
  1457 // and NeverActAsServerClassMachine in globals*.hpp.
  1458 bool os::is_server_class_machine() {
  1459   // First check for the early returns
  1460   if (NeverActAsServerClassMachine) {
  1461     return false;
  1463   if (AlwaysActAsServerClassMachine) {
  1464     return true;
  1466   // Then actually look at the machine
  1467   bool         result            = false;
  1468   const unsigned int    server_processors = 2;
  1469   const julong server_memory     = 2UL * G;
  1470   // We seem not to get our full complement of memory.
  1471   //     We allow some part (1/8?) of the memory to be "missing",
  1472   //     based on the sizes of DIMMs, and maybe graphics cards.
  1473   const julong missing_memory   = 256UL * M;
  1475   /* Is this a server class machine? */
  1476   if ((os::active_processor_count() >= (int)server_processors) &&
  1477       (os::physical_memory() >= (server_memory - missing_memory))) {
  1478     const unsigned int logical_processors =
  1479       VM_Version::logical_processors_per_package();
  1480     if (logical_processors > 1) {
  1481       const unsigned int physical_packages =
  1482         os::active_processor_count() / logical_processors;
  1483       if (physical_packages > server_processors) {
  1484         result = true;
  1486     } else {
  1487       result = true;
  1490   return result;
  1493 void os::SuspendedThreadTask::run() {
  1494   assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
  1495   internal_do_task();
  1496   _done = true;
  1499 bool os::create_stack_guard_pages(char* addr, size_t bytes) {
  1500   return os::pd_create_stack_guard_pages(addr, bytes);
  1503 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
  1504   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
  1505   if (result != NULL) {
  1506     MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  1509   return result;
  1512 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
  1513    MEMFLAGS flags) {
  1514   char* result = pd_reserve_memory(bytes, addr, alignment_hint);
  1515   if (result != NULL) {
  1516     MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  1517     MemTracker::record_virtual_memory_type((address)result, flags);
  1520   return result;
  1523 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
  1524   char* result = pd_attempt_reserve_memory_at(bytes, addr);
  1525   if (result != NULL) {
  1526     MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
  1528   return result;
  1531 void os::split_reserved_memory(char *base, size_t size,
  1532                                  size_t split, bool realloc) {
  1533   pd_split_reserved_memory(base, size, split, realloc);
  1536 bool os::commit_memory(char* addr, size_t bytes, bool executable) {
  1537   bool res = pd_commit_memory(addr, bytes, executable);
  1538   if (res) {
  1539     MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
  1541   return res;
  1544 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
  1545                               bool executable) {
  1546   bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
  1547   if (res) {
  1548     MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
  1550   return res;
  1553 void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
  1554                                const char* mesg) {
  1555   pd_commit_memory_or_exit(addr, bytes, executable, mesg);
  1556   MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
  1559 void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
  1560                                bool executable, const char* mesg) {
  1561   os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
  1562   MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
  1565 bool os::uncommit_memory(char* addr, size_t bytes) {
  1566   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
  1567   bool res = pd_uncommit_memory(addr, bytes);
  1568   if (res) {
  1569     tkr.record((address)addr, bytes);
  1570   } else {
  1571     tkr.discard();
  1573   return res;
  1576 bool os::release_memory(char* addr, size_t bytes) {
  1577   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  1578   bool res = pd_release_memory(addr, bytes);
  1579   if (res) {
  1580     tkr.record((address)addr, bytes);
  1581   } else {
  1582     tkr.discard();
  1584   return res;
  1588 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
  1589                            char *addr, size_t bytes, bool read_only,
  1590                            bool allow_exec) {
  1591   char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
  1592   if (result != NULL) {
  1593     MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
  1595   return result;
  1598 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
  1599                              char *addr, size_t bytes, bool read_only,
  1600                              bool allow_exec) {
  1601   return pd_remap_memory(fd, file_name, file_offset, addr, bytes,
  1602                     read_only, allow_exec);
  1605 bool os::unmap_memory(char *addr, size_t bytes) {
  1606   MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  1607   bool result = pd_unmap_memory(addr, bytes);
  1608   if (result) {
  1609     tkr.record((address)addr, bytes);
  1610   } else {
  1611     tkr.discard();
  1613   return result;
  1616 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
  1617   pd_free_memory(addr, bytes, alignment_hint);
  1620 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  1621   pd_realign_memory(addr, bytes, alignment_hint);
  1624 #ifndef TARGET_OS_FAMILY_windows
  1625 /* try to switch state from state "from" to state "to"
  1626  * returns the state set after the method is complete
  1627  */
  1628 os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
  1629                                                          os::SuspendResume::State to)
  1631   os::SuspendResume::State result =
  1632     (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
  1633   if (result == from) {
  1634     // success
  1635     return to;
  1637   return result;
  1639 #endif

mercurial