src/share/vm/runtime/os.cpp

Wed, 11 Sep 2013 16:25:02 +0200

author
tschatzl
date
Wed, 11 Sep 2013 16:25:02 +0200
changeset 5701
40136aa2cdb1
parent 5615
c636758ea616
child 5721
179cd89fb279
permissions
-rw-r--r--

8010722: assert: failed: heap size is too big for compressed oops
Summary: Use conservative assumptions of required alignment for the various garbage collector components into account when determining the maximum heap size that supports compressed oops. Using this conservative value avoids several circular dependencies in the calculation.
Reviewed-by: stefank, dholmes

duke@435 1 /*
rdurbin@4802 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/classLoader.hpp"
stefank@2314 27 #include "classfile/javaClasses.hpp"
stefank@2314 28 #include "classfile/systemDictionary.hpp"
stefank@2314 29 #include "classfile/vmSymbols.hpp"
stefank@2314 30 #include "code/icBuffer.hpp"
stefank@2314 31 #include "code/vtableStubs.hpp"
stefank@2314 32 #include "gc_implementation/shared/vmGCOperations.hpp"
stefank@2314 33 #include "interpreter/interpreter.hpp"
stefank@2314 34 #include "memory/allocation.inline.hpp"
stefank@2314 35 #include "oops/oop.inline.hpp"
stefank@2314 36 #include "prims/jvm.h"
stefank@2314 37 #include "prims/jvm_misc.hpp"
stefank@2314 38 #include "prims/privilegedStack.hpp"
stefank@2314 39 #include "runtime/arguments.hpp"
stefank@2314 40 #include "runtime/frame.inline.hpp"
stefank@2314 41 #include "runtime/interfaceSupport.hpp"
stefank@2314 42 #include "runtime/java.hpp"
stefank@2314 43 #include "runtime/javaCalls.hpp"
stefank@2314 44 #include "runtime/mutexLocker.hpp"
stefank@2314 45 #include "runtime/os.hpp"
stefank@2314 46 #include "runtime/stubRoutines.hpp"
stefank@4299 47 #include "runtime/thread.inline.hpp"
stefank@2314 48 #include "services/attachListener.hpp"
zgu@3900 49 #include "services/memTracker.hpp"
stefank@2314 50 #include "services/threadService.hpp"
stefank@2314 51 #include "utilities/defaultStream.hpp"
stefank@2314 52 #include "utilities/events.hpp"
stefank@2314 53 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 54 # include "os_linux.inline.hpp"
stefank@2314 55 #endif
stefank@2314 56 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 57 # include "os_solaris.inline.hpp"
stefank@2314 58 #endif
stefank@2314 59 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 60 # include "os_windows.inline.hpp"
stefank@2314 61 #endif
never@3156 62 #ifdef TARGET_OS_FAMILY_bsd
never@3156 63 # include "os_bsd.inline.hpp"
never@3156 64 #endif
duke@435 65
duke@435 66 # include <signal.h>
duke@435 67
duke@435 68 OSThread* os::_starting_thread = NULL;
duke@435 69 address os::_polling_page = NULL;
duke@435 70 volatile int32_t* os::_mem_serialize_page = NULL;
duke@435 71 uintptr_t os::_serialize_page_mask = 0;
duke@435 72 long os::_rand_seed = 1;
duke@435 73 int os::_processor_count = 0;
duke@435 74 size_t os::_page_sizes[os::page_sizes_max];
duke@435 75
duke@435 76 #ifndef PRODUCT
kvn@2557 77 julong os::num_mallocs = 0; // # of calls to malloc/realloc
kvn@2557 78 julong os::alloc_bytes = 0; // # of bytes allocated
kvn@2557 79 julong os::num_frees = 0; // # of calls to free
kvn@2557 80 julong os::free_bytes = 0; // # of bytes freed
duke@435 81 #endif
duke@435 82
rdurbin@4802 83 static juint cur_malloc_words = 0; // current size for MallocMaxTestWords
rdurbin@4802 84
phh@3378 85 void os_init_globals() {
phh@3378 86 // Called from init_globals().
phh@3378 87 // See Threads::create_vm() in thread.cpp, and init.cpp.
phh@3378 88 os::init_globals();
phh@3378 89 }
phh@3378 90
duke@435 91 // Fill in buffer with current local time as an ISO-8601 string.
duke@435 92 // E.g., yyyy-mm-ddThh:mm:ss-zzzz.
duke@435 93 // Returns buffer, or NULL if it failed.
duke@435 94 // This would mostly be a call to
duke@435 95 // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....)
duke@435 96 // except that on Windows the %z behaves badly, so we do it ourselves.
duke@435 97 // Also, people wanted milliseconds on there,
duke@435 98 // and strftime doesn't do milliseconds.
duke@435 99 char* os::iso8601_time(char* buffer, size_t buffer_length) {
duke@435 100 // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0"
duke@435 101 // 1 2
duke@435 102 // 12345678901234567890123456789
duke@435 103 static const char* iso8601_format =
duke@435 104 "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d";
duke@435 105 static const size_t needed_buffer = 29;
duke@435 106
duke@435 107 // Sanity check the arguments
duke@435 108 if (buffer == NULL) {
duke@435 109 assert(false, "NULL buffer");
duke@435 110 return NULL;
duke@435 111 }
duke@435 112 if (buffer_length < needed_buffer) {
duke@435 113 assert(false, "buffer_length too small");
duke@435 114 return NULL;
duke@435 115 }
duke@435 116 // Get the current time
sbohne@496 117 jlong milliseconds_since_19700101 = javaTimeMillis();
duke@435 118 const int milliseconds_per_microsecond = 1000;
duke@435 119 const time_t seconds_since_19700101 =
duke@435 120 milliseconds_since_19700101 / milliseconds_per_microsecond;
duke@435 121 const int milliseconds_after_second =
duke@435 122 milliseconds_since_19700101 % milliseconds_per_microsecond;
duke@435 123 // Convert the time value to a tm and timezone variable
ysr@983 124 struct tm time_struct;
ysr@983 125 if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) {
ysr@983 126 assert(false, "Failed localtime_pd");
duke@435 127 return NULL;
duke@435 128 }
never@3156 129 #if defined(_ALLBSD_SOURCE)
never@3156 130 const time_t zone = (time_t) time_struct.tm_gmtoff;
never@3156 131 #else
duke@435 132 const time_t zone = timezone;
never@3156 133 #endif
duke@435 134
duke@435 135 // If daylight savings time is in effect,
duke@435 136 // we are 1 hour East of our time zone
duke@435 137 const time_t seconds_per_minute = 60;
duke@435 138 const time_t minutes_per_hour = 60;
duke@435 139 const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour;
duke@435 140 time_t UTC_to_local = zone;
duke@435 141 if (time_struct.tm_isdst > 0) {
duke@435 142 UTC_to_local = UTC_to_local - seconds_per_hour;
duke@435 143 }
duke@435 144 // Compute the time zone offset.
ysr@983 145 // localtime_pd() sets timezone to the difference (in seconds)
duke@435 146 // between UTC and and local time.
duke@435 147 // ISO 8601 says we need the difference between local time and UTC,
ysr@983 148 // we change the sign of the localtime_pd() result.
duke@435 149 const time_t local_to_UTC = -(UTC_to_local);
duke@435 150 // Then we have to figure out if if we are ahead (+) or behind (-) UTC.
duke@435 151 char sign_local_to_UTC = '+';
duke@435 152 time_t abs_local_to_UTC = local_to_UTC;
duke@435 153 if (local_to_UTC < 0) {
duke@435 154 sign_local_to_UTC = '-';
duke@435 155 abs_local_to_UTC = -(abs_local_to_UTC);
duke@435 156 }
duke@435 157 // Convert time zone offset seconds to hours and minutes.
duke@435 158 const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour);
duke@435 159 const time_t zone_min =
duke@435 160 ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute);
duke@435 161
duke@435 162 // Print an ISO 8601 date and time stamp into the buffer
duke@435 163 const int year = 1900 + time_struct.tm_year;
duke@435 164 const int month = 1 + time_struct.tm_mon;
duke@435 165 const int printed = jio_snprintf(buffer, buffer_length, iso8601_format,
duke@435 166 year,
duke@435 167 month,
duke@435 168 time_struct.tm_mday,
duke@435 169 time_struct.tm_hour,
duke@435 170 time_struct.tm_min,
duke@435 171 time_struct.tm_sec,
duke@435 172 milliseconds_after_second,
duke@435 173 sign_local_to_UTC,
duke@435 174 zone_hours,
duke@435 175 zone_min);
duke@435 176 if (printed == 0) {
duke@435 177 assert(false, "Failed jio_printf");
duke@435 178 return NULL;
duke@435 179 }
duke@435 180 return buffer;
duke@435 181 }
duke@435 182
duke@435 183 OSReturn os::set_priority(Thread* thread, ThreadPriority p) {
duke@435 184 #ifdef ASSERT
duke@435 185 if (!(!thread->is_Java_thread() ||
duke@435 186 Thread::current() == thread ||
duke@435 187 Threads_lock->owned_by_self()
duke@435 188 || thread->is_Compiler_thread()
duke@435 189 )) {
duke@435 190 assert(false, "possibility of dangling Thread pointer");
duke@435 191 }
duke@435 192 #endif
duke@435 193
duke@435 194 if (p >= MinPriority && p <= MaxPriority) {
duke@435 195 int priority = java_to_os_priority[p];
duke@435 196 return set_native_priority(thread, priority);
duke@435 197 } else {
duke@435 198 assert(false, "Should not happen");
duke@435 199 return OS_ERR;
duke@435 200 }
duke@435 201 }
duke@435 202
dholmes@4077 203 // The mapping from OS priority back to Java priority may be inexact because
dholmes@4077 204 // Java priorities can map M:1 with native priorities. If you want the definite
dholmes@4077 205 // Java priority then use JavaThread::java_priority()
duke@435 206 OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) {
duke@435 207 int p;
duke@435 208 int os_prio;
duke@435 209 OSReturn ret = get_native_priority(thread, &os_prio);
duke@435 210 if (ret != OS_OK) return ret;
duke@435 211
dholmes@4077 212 if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) {
dholmes@4077 213 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ;
dholmes@4077 214 } else {
dholmes@4077 215 // niceness values are in reverse order
dholmes@4077 216 for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ;
dholmes@4077 217 }
duke@435 218 priority = (ThreadPriority)p;
duke@435 219 return OS_OK;
duke@435 220 }
duke@435 221
duke@435 222
duke@435 223 // --------------------- sun.misc.Signal (optional) ---------------------
duke@435 224
duke@435 225
duke@435 226 // SIGBREAK is sent by the keyboard to query the VM state
duke@435 227 #ifndef SIGBREAK
duke@435 228 #define SIGBREAK SIGQUIT
duke@435 229 #endif
duke@435 230
duke@435 231 // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread.
duke@435 232
duke@435 233
duke@435 234 static void signal_thread_entry(JavaThread* thread, TRAPS) {
duke@435 235 os::set_priority(thread, NearMaxPriority);
duke@435 236 while (true) {
duke@435 237 int sig;
duke@435 238 {
duke@435 239 // FIXME : Currently we have not decieded what should be the status
duke@435 240 // for this java thread blocked here. Once we decide about
duke@435 241 // that we should fix this.
duke@435 242 sig = os::signal_wait();
duke@435 243 }
duke@435 244 if (sig == os::sigexitnum_pd()) {
duke@435 245 // Terminate the signal thread
duke@435 246 return;
duke@435 247 }
duke@435 248
duke@435 249 switch (sig) {
duke@435 250 case SIGBREAK: {
duke@435 251 // Check if the signal is a trigger to start the Attach Listener - in that
duke@435 252 // case don't print stack traces.
duke@435 253 if (!DisableAttachMechanism && AttachListener::is_init_trigger()) {
duke@435 254 continue;
duke@435 255 }
duke@435 256 // Print stack traces
duke@435 257 // Any SIGBREAK operations added here should make sure to flush
duke@435 258 // the output stream (e.g. tty->flush()) after output. See 4803766.
duke@435 259 // Each module also prints an extra carriage return after its output.
duke@435 260 VM_PrintThreads op;
duke@435 261 VMThread::execute(&op);
duke@435 262 VM_PrintJNI jni_op;
duke@435 263 VMThread::execute(&jni_op);
duke@435 264 VM_FindDeadlocks op1(tty);
duke@435 265 VMThread::execute(&op1);
duke@435 266 Universe::print_heap_at_SIGBREAK();
duke@435 267 if (PrintClassHistogram) {
sla@5237 268 VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
duke@435 269 VMThread::execute(&op1);
duke@435 270 }
duke@435 271 if (JvmtiExport::should_post_data_dump()) {
duke@435 272 JvmtiExport::post_data_dump();
duke@435 273 }
duke@435 274 break;
duke@435 275 }
duke@435 276 default: {
duke@435 277 // Dispatch the signal to java
duke@435 278 HandleMark hm(THREAD);
coleenp@4037 279 Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_Signal(), THREAD);
duke@435 280 KlassHandle klass (THREAD, k);
duke@435 281 if (klass.not_null()) {
duke@435 282 JavaValue result(T_VOID);
duke@435 283 JavaCallArguments args;
duke@435 284 args.push_int(sig);
duke@435 285 JavaCalls::call_static(
duke@435 286 &result,
duke@435 287 klass,
coleenp@2497 288 vmSymbols::dispatch_name(),
coleenp@2497 289 vmSymbols::int_void_signature(),
duke@435 290 &args,
duke@435 291 THREAD
duke@435 292 );
duke@435 293 }
duke@435 294 if (HAS_PENDING_EXCEPTION) {
duke@435 295 // tty is initialized early so we don't expect it to be null, but
duke@435 296 // if it is we can't risk doing an initialization that might
duke@435 297 // trigger additional out-of-memory conditions
duke@435 298 if (tty != NULL) {
duke@435 299 char klass_name[256];
duke@435 300 char tmp_sig_name[16];
duke@435 301 const char* sig_name = "UNKNOWN";
coleenp@4037 302 InstanceKlass::cast(PENDING_EXCEPTION->klass())->
duke@435 303 name()->as_klass_external_name(klass_name, 256);
duke@435 304 if (os::exception_name(sig, tmp_sig_name, 16) != NULL)
duke@435 305 sig_name = tmp_sig_name;
duke@435 306 warning("Exception %s occurred dispatching signal %s to handler"
duke@435 307 "- the VM may need to be forcibly terminated",
duke@435 308 klass_name, sig_name );
duke@435 309 }
duke@435 310 CLEAR_PENDING_EXCEPTION;
duke@435 311 }
duke@435 312 }
duke@435 313 }
duke@435 314 }
duke@435 315 }
duke@435 316
tschatzl@5701 317 void os::init_before_ergo() {
tschatzl@5701 318 // We need to initialize large page support here because ergonomics takes some
tschatzl@5701 319 // decisions depending on large page support and the calculated large page size.
tschatzl@5701 320 large_page_init();
tschatzl@5701 321 }
duke@435 322
duke@435 323 void os::signal_init() {
duke@435 324 if (!ReduceSignalUsage) {
duke@435 325 // Setup JavaThread for processing signals
duke@435 326 EXCEPTION_MARK;
coleenp@4037 327 Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK);
duke@435 328 instanceKlassHandle klass (THREAD, k);
duke@435 329 instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
duke@435 330
duke@435 331 const char thread_name[] = "Signal Dispatcher";
duke@435 332 Handle string = java_lang_String::create_from_str(thread_name, CHECK);
duke@435 333
duke@435 334 // Initialize thread_oop to put it into the system threadGroup
duke@435 335 Handle thread_group (THREAD, Universe::system_thread_group());
duke@435 336 JavaValue result(T_VOID);
duke@435 337 JavaCalls::call_special(&result, thread_oop,
duke@435 338 klass,
coleenp@2497 339 vmSymbols::object_initializer_name(),
coleenp@2497 340 vmSymbols::threadgroup_string_void_signature(),
duke@435 341 thread_group,
duke@435 342 string,
duke@435 343 CHECK);
duke@435 344
never@1577 345 KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass());
duke@435 346 JavaCalls::call_special(&result,
duke@435 347 thread_group,
duke@435 348 group,
coleenp@2497 349 vmSymbols::add_method_name(),
coleenp@2497 350 vmSymbols::thread_void_signature(),
duke@435 351 thread_oop, // ARG 1
duke@435 352 CHECK);
duke@435 353
duke@435 354 os::signal_init_pd();
duke@435 355
duke@435 356 { MutexLocker mu(Threads_lock);
duke@435 357 JavaThread* signal_thread = new JavaThread(&signal_thread_entry);
duke@435 358
duke@435 359 // At this point it may be possible that no osthread was created for the
duke@435 360 // JavaThread due to lack of memory. We would have to throw an exception
duke@435 361 // in that case. However, since this must work and we do not allow
duke@435 362 // exceptions anyway, check and abort if this fails.
duke@435 363 if (signal_thread == NULL || signal_thread->osthread() == NULL) {
duke@435 364 vm_exit_during_initialization("java.lang.OutOfMemoryError",
duke@435 365 "unable to create new native thread");
duke@435 366 }
duke@435 367
duke@435 368 java_lang_Thread::set_thread(thread_oop(), signal_thread);
duke@435 369 java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
duke@435 370 java_lang_Thread::set_daemon(thread_oop());
duke@435 371
duke@435 372 signal_thread->set_threadObj(thread_oop());
duke@435 373 Threads::add(signal_thread);
duke@435 374 Thread::start(signal_thread);
duke@435 375 }
duke@435 376 // Handle ^BREAK
duke@435 377 os::signal(SIGBREAK, os::user_handler());
duke@435 378 }
duke@435 379 }
duke@435 380
duke@435 381
duke@435 382 void os::terminate_signal_thread() {
duke@435 383 if (!ReduceSignalUsage)
duke@435 384 signal_notify(sigexitnum_pd());
duke@435 385 }
duke@435 386
duke@435 387
duke@435 388 // --------------------- loading libraries ---------------------
duke@435 389
duke@435 390 typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *);
duke@435 391 extern struct JavaVM_ main_vm;
duke@435 392
duke@435 393 static void* _native_java_library = NULL;
duke@435 394
duke@435 395 void* os::native_java_library() {
duke@435 396 if (_native_java_library == NULL) {
duke@435 397 char buffer[JVM_MAXPATHLEN];
duke@435 398 char ebuf[1024];
duke@435 399
kamg@677 400 // Try to load verify dll first. In 1.3 java dll depends on it and is not
kamg@677 401 // always able to find it when the loading executable is outside the JDK.
duke@435 402 // In order to keep working with 1.2 we ignore any loading errors.
bpittore@4261 403 if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
bpittore@4261 404 "verify")) {
bpittore@4261 405 dll_load(buffer, ebuf, sizeof(ebuf));
bpittore@4261 406 }
duke@435 407
duke@435 408 // Load java dll
bpittore@4261 409 if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
bpittore@4261 410 "java")) {
bpittore@4261 411 _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf));
bpittore@4261 412 }
duke@435 413 if (_native_java_library == NULL) {
duke@435 414 vm_exit_during_initialization("Unable to load native library", ebuf);
duke@435 415 }
never@3156 416
never@3156 417 #if defined(__OpenBSD__)
never@3156 418 // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so
never@3156 419 // ignore errors
bpittore@4261 420 if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
bpittore@4261 421 "net")) {
bpittore@4261 422 dll_load(buffer, ebuf, sizeof(ebuf));
bpittore@4261 423 }
never@3156 424 #endif
kamg@677 425 }
kamg@677 426 static jboolean onLoaded = JNI_FALSE;
kamg@677 427 if (onLoaded) {
kamg@677 428 // We may have to wait to fire OnLoad until TLS is initialized.
kamg@677 429 if (ThreadLocalStorage::is_initialized()) {
kamg@677 430 // The JNI_OnLoad handling is normally done by method load in
kamg@677 431 // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library
kamg@677 432 // explicitly so we have to check for JNI_OnLoad as well
kamg@677 433 const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS;
kamg@677 434 JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR(
kamg@677 435 JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0]));
kamg@677 436 if (JNI_OnLoad != NULL) {
kamg@677 437 JavaThread* thread = JavaThread::current();
kamg@677 438 ThreadToNativeFromVM ttn(thread);
kamg@677 439 HandleMark hm(thread);
kamg@677 440 jint ver = (*JNI_OnLoad)(&main_vm, NULL);
kamg@677 441 onLoaded = JNI_TRUE;
kamg@677 442 if (!Threads::is_supported_jni_version_including_1_1(ver)) {
kamg@677 443 vm_exit_during_initialization("Unsupported JNI version");
kamg@677 444 }
duke@435 445 }
duke@435 446 }
duke@435 447 }
duke@435 448 return _native_java_library;
duke@435 449 }
duke@435 450
bpittore@5585 451 /*
bpittore@5585 452 * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists.
bpittore@5585 453 * If check_lib == true then we are looking for an
bpittore@5585 454 * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if
bpittore@5585 455 * this library is statically linked into the image.
bpittore@5585 456 * If check_lib == false then we will look for the appropriate symbol in the
bpittore@5585 457 * executable if agent_lib->is_static_lib() == true or in the shared library
bpittore@5585 458 * referenced by 'handle'.
bpittore@5585 459 */
bpittore@5585 460 void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib,
bpittore@5585 461 const char *syms[], size_t syms_len) {
bpittore@5585 462 const char *lib_name;
bpittore@5585 463 void *handle = agent_lib->os_lib();
bpittore@5585 464 void *entryName = NULL;
bpittore@5585 465 char *agent_function_name;
bpittore@5585 466 size_t i;
bpittore@5585 467
bpittore@5585 468 // If checking then use the agent name otherwise test is_static_lib() to
bpittore@5585 469 // see how to process this lookup
bpittore@5585 470 lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL);
bpittore@5585 471 for (i = 0; i < syms_len; i++) {
bpittore@5585 472 agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path());
bpittore@5585 473 if (agent_function_name == NULL) {
bpittore@5585 474 break;
bpittore@5585 475 }
bpittore@5585 476 entryName = dll_lookup(handle, agent_function_name);
bpittore@5585 477 FREE_C_HEAP_ARRAY(char, agent_function_name, mtThread);
bpittore@5585 478 if (entryName != NULL) {
bpittore@5585 479 break;
bpittore@5585 480 }
bpittore@5585 481 }
bpittore@5585 482 return entryName;
bpittore@5585 483 }
bpittore@5585 484
bpittore@5585 485 // See if the passed in agent is statically linked into the VM image.
bpittore@5585 486 bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
bpittore@5585 487 size_t syms_len) {
bpittore@5585 488 void *ret;
bpittore@5585 489 void *proc_handle;
bpittore@5585 490 void *save_handle;
bpittore@5585 491
bpittore@5585 492 if (agent_lib->name() == NULL) {
bpittore@5585 493 return false;
bpittore@5585 494 }
bpittore@5585 495 proc_handle = get_default_process_handle();
bpittore@5585 496 // Check for Agent_OnLoad/Attach_lib_name function
bpittore@5585 497 save_handle = agent_lib->os_lib();
bpittore@5585 498 // We want to look in this process' symbol table.
bpittore@5585 499 agent_lib->set_os_lib(proc_handle);
bpittore@5585 500 ret = find_agent_function(agent_lib, true, syms, syms_len);
bpittore@5585 501 agent_lib->set_os_lib(save_handle);
bpittore@5585 502 if (ret != NULL) {
bpittore@5585 503 // Found an entry point like Agent_OnLoad_lib_name so we have a static agent
bpittore@5585 504 agent_lib->set_os_lib(proc_handle);
bpittore@5585 505 agent_lib->set_valid();
bpittore@5585 506 agent_lib->set_static_lib(true);
bpittore@5585 507 return true;
bpittore@5585 508 }
bpittore@5585 509 return false;
bpittore@5585 510 }
bpittore@5585 511
duke@435 512 // --------------------- heap allocation utilities ---------------------
duke@435 513
zgu@3900 514 char *os::strdup(const char *str, MEMFLAGS flags) {
duke@435 515 size_t size = strlen(str);
zgu@3900 516 char *dup_str = (char *)malloc(size + 1, flags);
duke@435 517 if (dup_str == NULL) return NULL;
duke@435 518 strcpy(dup_str, str);
duke@435 519 return dup_str;
duke@435 520 }
duke@435 521
duke@435 522
duke@435 523
duke@435 524 #ifdef ASSERT
duke@435 525 #define space_before (MallocCushion + sizeof(double))
duke@435 526 #define space_after MallocCushion
duke@435 527 #define size_addr_from_base(p) (size_t*)(p + space_before - sizeof(size_t))
duke@435 528 #define size_addr_from_obj(p) ((size_t*)p - 1)
duke@435 529 // MallocCushion: size of extra cushion allocated around objects with +UseMallocOnly
duke@435 530 // NB: cannot be debug variable, because these aren't set from the command line until
duke@435 531 // *after* the first few allocs already happened
duke@435 532 #define MallocCushion 16
duke@435 533 #else
duke@435 534 #define space_before 0
duke@435 535 #define space_after 0
duke@435 536 #define size_addr_from_base(p) should not use w/o ASSERT
duke@435 537 #define size_addr_from_obj(p) should not use w/o ASSERT
duke@435 538 #define MallocCushion 0
duke@435 539 #endif
duke@435 540 #define paranoid 0 /* only set to 1 if you suspect checking code has bug */
duke@435 541
duke@435 542 #ifdef ASSERT
duke@435 543 inline size_t get_size(void* obj) {
duke@435 544 size_t size = *size_addr_from_obj(obj);
jcoomes@1845 545 if (size < 0) {
jcoomes@1845 546 fatal(err_msg("free: size field of object #" PTR_FORMAT " was overwritten ("
jcoomes@1845 547 SIZE_FORMAT ")", obj, size));
jcoomes@1845 548 }
duke@435 549 return size;
duke@435 550 }
duke@435 551
duke@435 552 u_char* find_cushion_backwards(u_char* start) {
duke@435 553 u_char* p = start;
duke@435 554 while (p[ 0] != badResourceValue || p[-1] != badResourceValue ||
duke@435 555 p[-2] != badResourceValue || p[-3] != badResourceValue) p--;
duke@435 556 // ok, we have four consecutive marker bytes; find start
duke@435 557 u_char* q = p - 4;
duke@435 558 while (*q == badResourceValue) q--;
duke@435 559 return q + 1;
duke@435 560 }
duke@435 561
duke@435 562 u_char* find_cushion_forwards(u_char* start) {
duke@435 563 u_char* p = start;
duke@435 564 while (p[0] != badResourceValue || p[1] != badResourceValue ||
duke@435 565 p[2] != badResourceValue || p[3] != badResourceValue) p++;
duke@435 566 // ok, we have four consecutive marker bytes; find end of cushion
duke@435 567 u_char* q = p + 4;
duke@435 568 while (*q == badResourceValue) q++;
duke@435 569 return q - MallocCushion;
duke@435 570 }
duke@435 571
duke@435 572 void print_neighbor_blocks(void* ptr) {
duke@435 573 // find block allocated before ptr (not entirely crash-proof)
duke@435 574 if (MallocCushion < 4) {
duke@435 575 tty->print_cr("### cannot find previous block (MallocCushion < 4)");
duke@435 576 return;
duke@435 577 }
duke@435 578 u_char* start_of_this_block = (u_char*)ptr - space_before;
duke@435 579 u_char* end_of_prev_block_data = start_of_this_block - space_after -1;
duke@435 580 // look for cushion in front of prev. block
duke@435 581 u_char* start_of_prev_block = find_cushion_backwards(end_of_prev_block_data);
duke@435 582 ptrdiff_t size = *size_addr_from_base(start_of_prev_block);
duke@435 583 u_char* obj = start_of_prev_block + space_before;
duke@435 584 if (size <= 0 ) {
duke@435 585 // start is bad; mayhave been confused by OS data inbetween objects
duke@435 586 // search one more backwards
duke@435 587 start_of_prev_block = find_cushion_backwards(start_of_prev_block);
duke@435 588 size = *size_addr_from_base(start_of_prev_block);
duke@435 589 obj = start_of_prev_block + space_before;
duke@435 590 }
duke@435 591
duke@435 592 if (start_of_prev_block + space_before + size + space_after == start_of_this_block) {
kvn@2557 593 tty->print_cr("### previous object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
duke@435 594 } else {
kvn@2557 595 tty->print_cr("### previous object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", obj, size);
duke@435 596 }
duke@435 597
duke@435 598 // now find successor block
duke@435 599 u_char* start_of_next_block = (u_char*)ptr + *size_addr_from_obj(ptr) + space_after;
duke@435 600 start_of_next_block = find_cushion_forwards(start_of_next_block);
duke@435 601 u_char* next_obj = start_of_next_block + space_before;
duke@435 602 ptrdiff_t next_size = *size_addr_from_base(start_of_next_block);
duke@435 603 if (start_of_next_block[0] == badResourceValue &&
duke@435 604 start_of_next_block[1] == badResourceValue &&
duke@435 605 start_of_next_block[2] == badResourceValue &&
duke@435 606 start_of_next_block[3] == badResourceValue) {
kvn@2557 607 tty->print_cr("### next object: " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
duke@435 608 } else {
kvn@2557 609 tty->print_cr("### next object (not sure if correct): " PTR_FORMAT " (" SSIZE_FORMAT " bytes)", next_obj, next_size);
duke@435 610 }
duke@435 611 }
duke@435 612
duke@435 613
duke@435 614 void report_heap_error(void* memblock, void* bad, const char* where) {
kvn@2557 615 tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees);
kvn@2557 616 tty->print_cr("## memory stomp: byte at " PTR_FORMAT " %s object " PTR_FORMAT, bad, where, memblock);
duke@435 617 print_neighbor_blocks(memblock);
duke@435 618 fatal("memory stomping error");
duke@435 619 }
duke@435 620
duke@435 621 void verify_block(void* memblock) {
duke@435 622 size_t size = get_size(memblock);
duke@435 623 if (MallocCushion) {
duke@435 624 u_char* ptr = (u_char*)memblock - space_before;
duke@435 625 for (int i = 0; i < MallocCushion; i++) {
duke@435 626 if (ptr[i] != badResourceValue) {
duke@435 627 report_heap_error(memblock, ptr+i, "in front of");
duke@435 628 }
duke@435 629 }
duke@435 630 u_char* end = (u_char*)memblock + size + space_after;
duke@435 631 for (int j = -MallocCushion; j < 0; j++) {
duke@435 632 if (end[j] != badResourceValue) {
duke@435 633 report_heap_error(memblock, end+j, "after");
duke@435 634 }
duke@435 635 }
duke@435 636 }
duke@435 637 }
duke@435 638 #endif
duke@435 639
rdurbin@4802 640 //
rdurbin@4802 641 // This function supports testing of the malloc out of memory
rdurbin@4802 642 // condition without really running the system out of memory.
rdurbin@4802 643 //
rdurbin@4802 644 static u_char* testMalloc(size_t alloc_size) {
rdurbin@4808 645 assert(MallocMaxTestWords > 0, "sanity check");
rdurbin@4802 646
rdurbin@4808 647 if ((cur_malloc_words + (alloc_size / BytesPerWord)) > MallocMaxTestWords) {
rdurbin@4802 648 return NULL;
rdurbin@4802 649 }
rdurbin@4802 650
rdurbin@4802 651 u_char* ptr = (u_char*)::malloc(alloc_size);
rdurbin@4802 652
rdurbin@4808 653 if (ptr != NULL) {
rdurbin@4802 654 Atomic::add(((jint) (alloc_size / BytesPerWord)),
rdurbin@4802 655 (volatile jint *) &cur_malloc_words);
rdurbin@4802 656 }
rdurbin@4802 657 return ptr;
rdurbin@4802 658 }
rdurbin@4802 659
zgu@3900 660 void* os::malloc(size_t size, MEMFLAGS memflags, address caller) {
kvn@2557 661 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
kvn@2557 662 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
duke@435 663
rbackman@5424 664 #ifdef ASSERT
rbackman@5424 665 // checking for the WatcherThread and crash_protection first
rbackman@5424 666 // since os::malloc can be called when the libjvm.{dll,so} is
rbackman@5424 667 // first loaded and we don't have a thread yet.
rbackman@5424 668 // try to find the thread after we see that the watcher thread
rbackman@5424 669 // exists and has crash protection.
rbackman@5424 670 WatcherThread *wt = WatcherThread::watcher_thread();
rbackman@5424 671 if (wt != NULL && wt->has_crash_protection()) {
rbackman@5424 672 Thread* thread = ThreadLocalStorage::get_thread_slow();
rbackman@5424 673 if (thread == wt) {
rbackman@5424 674 assert(!wt->has_crash_protection(),
rbackman@5424 675 "Can't malloc with crash protection from WatcherThread");
rbackman@5424 676 }
rbackman@5424 677 }
rbackman@5424 678 #endif
rbackman@5424 679
duke@435 680 if (size == 0) {
duke@435 681 // return a valid pointer if size is zero
duke@435 682 // if NULL is returned the calling functions assume out of memory.
duke@435 683 size = 1;
duke@435 684 }
rdurbin@4802 685
rdurbin@4802 686 const size_t alloc_size = size + space_before + space_after;
rdurbin@4802 687
rdurbin@4802 688 if (size > alloc_size) { // Check for rollover.
hseigel@4277 689 return NULL;
hseigel@4277 690 }
rdurbin@4802 691
duke@435 692 NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
rdurbin@4802 693
rdurbin@4802 694 u_char* ptr;
rdurbin@4802 695
rdurbin@4802 696 if (MallocMaxTestWords > 0) {
rdurbin@4802 697 ptr = testMalloc(alloc_size);
rdurbin@4802 698 } else {
rdurbin@4802 699 ptr = (u_char*)::malloc(alloc_size);
rdurbin@4802 700 }
zgu@3900 701
duke@435 702 #ifdef ASSERT
duke@435 703 if (ptr == NULL) return NULL;
duke@435 704 if (MallocCushion) {
duke@435 705 for (u_char* p = ptr; p < ptr + MallocCushion; p++) *p = (u_char)badResourceValue;
duke@435 706 u_char* end = ptr + space_before + size;
duke@435 707 for (u_char* pq = ptr+MallocCushion; pq < end; pq++) *pq = (u_char)uninitBlockPad;
duke@435 708 for (u_char* q = end; q < end + MallocCushion; q++) *q = (u_char)badResourceValue;
duke@435 709 }
duke@435 710 // put size just before data
duke@435 711 *size_addr_from_base(ptr) = size;
duke@435 712 #endif
duke@435 713 u_char* memblock = ptr + space_before;
duke@435 714 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
kvn@2557 715 tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
duke@435 716 breakpoint();
duke@435 717 }
duke@435 718 debug_only(if (paranoid) verify_block(memblock));
kvn@2557 719 if (PrintMalloc && tty != NULL) tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, memblock);
zgu@3900 720
zgu@3900 721 // we do not track MallocCushion memory
zgu@3900 722 MemTracker::record_malloc((address)memblock, size, memflags, caller == 0 ? CALLER_PC : caller);
zgu@3900 723
duke@435 724 return memblock;
duke@435 725 }
duke@435 726
duke@435 727
zgu@3900 728 void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, address caller) {
duke@435 729 #ifndef ASSERT
kvn@2557 730 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
kvn@2557 731 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
zgu@5272 732 MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
zgu@3900 733 void* ptr = ::realloc(memblock, size);
zgu@4193 734 if (ptr != NULL) {
zgu@5272 735 tkr.record((address)memblock, (address)ptr, size, memflags,
zgu@3900 736 caller == 0 ? CALLER_PC : caller);
zgu@5272 737 } else {
zgu@5272 738 tkr.discard();
zgu@3900 739 }
zgu@3900 740 return ptr;
duke@435 741 #else
duke@435 742 if (memblock == NULL) {
zgu@3900 743 return malloc(size, memflags, (caller == 0 ? CALLER_PC : caller));
duke@435 744 }
duke@435 745 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
kvn@2557 746 tty->print_cr("os::realloc caught " PTR_FORMAT, memblock);
duke@435 747 breakpoint();
duke@435 748 }
duke@435 749 verify_block(memblock);
duke@435 750 NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
duke@435 751 if (size == 0) return NULL;
duke@435 752 // always move the block
zgu@3900 753 void* ptr = malloc(size, memflags, caller == 0 ? CALLER_PC : caller);
kvn@2557 754 if (PrintMalloc) tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr);
duke@435 755 // Copy to new memory if malloc didn't fail
duke@435 756 if ( ptr != NULL ) {
duke@435 757 memcpy(ptr, memblock, MIN2(size, get_size(memblock)));
duke@435 758 if (paranoid) verify_block(ptr);
duke@435 759 if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) {
kvn@2557 760 tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr);
duke@435 761 breakpoint();
duke@435 762 }
duke@435 763 free(memblock);
duke@435 764 }
duke@435 765 return ptr;
duke@435 766 #endif
duke@435 767 }
duke@435 768
duke@435 769
zgu@3900 770 void os::free(void *memblock, MEMFLAGS memflags) {
kvn@2557 771 NOT_PRODUCT(inc_stat_counter(&num_frees, 1));
duke@435 772 #ifdef ASSERT
duke@435 773 if (memblock == NULL) return;
duke@435 774 if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) {
kvn@2557 775 if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock);
duke@435 776 breakpoint();
duke@435 777 }
duke@435 778 verify_block(memblock);
duke@435 779 NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap());
duke@435 780 // Added by detlefs.
duke@435 781 if (MallocCushion) {
duke@435 782 u_char* ptr = (u_char*)memblock - space_before;
duke@435 783 for (u_char* p = ptr; p < ptr + MallocCushion; p++) {
duke@435 784 guarantee(*p == badResourceValue,
duke@435 785 "Thing freed should be malloc result.");
duke@435 786 *p = (u_char)freeBlockPad;
duke@435 787 }
duke@435 788 size_t size = get_size(memblock);
kvn@2557 789 inc_stat_counter(&free_bytes, size);
duke@435 790 u_char* end = ptr + space_before + size;
duke@435 791 for (u_char* q = end; q < end + MallocCushion; q++) {
duke@435 792 guarantee(*q == badResourceValue,
duke@435 793 "Thing freed should be malloc result.");
duke@435 794 *q = (u_char)freeBlockPad;
duke@435 795 }
kvn@2557 796 if (PrintMalloc && tty != NULL)
coleenp@2615 797 fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)memblock);
kvn@2557 798 } else if (PrintMalloc && tty != NULL) {
kvn@2557 799 // tty->print_cr("os::free %p", memblock);
coleenp@2615 800 fprintf(stderr, "os::free " PTR_FORMAT "\n", (uintptr_t)memblock);
duke@435 801 }
duke@435 802 #endif
zgu@3900 803 MemTracker::record_free((address)memblock, memflags);
zgu@3900 804
duke@435 805 ::free((char*)memblock - space_before);
duke@435 806 }
duke@435 807
duke@435 808 void os::init_random(long initval) {
duke@435 809 _rand_seed = initval;
duke@435 810 }
duke@435 811
duke@435 812
duke@435 813 long os::random() {
duke@435 814 /* standard, well-known linear congruential random generator with
duke@435 815 * next_rand = (16807*seed) mod (2**31-1)
duke@435 816 * see
duke@435 817 * (1) "Random Number Generators: Good Ones Are Hard to Find",
duke@435 818 * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988),
duke@435 819 * (2) "Two Fast Implementations of the 'Minimal Standard' Random
duke@435 820 * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88.
duke@435 821 */
duke@435 822 const long a = 16807;
duke@435 823 const unsigned long m = 2147483647;
duke@435 824 const long q = m / a; assert(q == 127773, "weird math");
duke@435 825 const long r = m % a; assert(r == 2836, "weird math");
duke@435 826
duke@435 827 // compute az=2^31p+q
duke@435 828 unsigned long lo = a * (long)(_rand_seed & 0xFFFF);
duke@435 829 unsigned long hi = a * (long)((unsigned long)_rand_seed >> 16);
duke@435 830 lo += (hi & 0x7FFF) << 16;
duke@435 831
duke@435 832 // if q overflowed, ignore the overflow and increment q
duke@435 833 if (lo > m) {
duke@435 834 lo &= m;
duke@435 835 ++lo;
duke@435 836 }
duke@435 837 lo += hi >> 15;
duke@435 838
duke@435 839 // if (p+q) overflowed, ignore the overflow and increment (p+q)
duke@435 840 if (lo > m) {
duke@435 841 lo &= m;
duke@435 842 ++lo;
duke@435 843 }
duke@435 844 return (_rand_seed = lo);
duke@435 845 }
duke@435 846
duke@435 847 // The INITIALIZED state is distinguished from the SUSPENDED state because the
duke@435 848 // conditions in which a thread is first started are different from those in which
duke@435 849 // a suspension is resumed. These differences make it hard for us to apply the
duke@435 850 // tougher checks when starting threads that we want to do when resuming them.
duke@435 851 // However, when start_thread is called as a result of Thread.start, on a Java
duke@435 852 // thread, the operation is synchronized on the Java Thread object. So there
duke@435 853 // cannot be a race to start the thread and hence for the thread to exit while
duke@435 854 // we are working on it. Non-Java threads that start Java threads either have
duke@435 855 // to do so in a context in which races are impossible, or should do appropriate
duke@435 856 // locking.
duke@435 857
duke@435 858 void os::start_thread(Thread* thread) {
duke@435 859 // guard suspend/resume
duke@435 860 MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
duke@435 861 OSThread* osthread = thread->osthread();
duke@435 862 osthread->set_state(RUNNABLE);
duke@435 863 pd_start_thread(thread);
duke@435 864 }
duke@435 865
duke@435 866 //---------------------------------------------------------------------------
duke@435 867 // Helper functions for fatal error handler
duke@435 868
duke@435 869 void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) {
duke@435 870 assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking");
duke@435 871
duke@435 872 int cols = 0;
duke@435 873 int cols_per_line = 0;
duke@435 874 switch (unitsize) {
duke@435 875 case 1: cols_per_line = 16; break;
duke@435 876 case 2: cols_per_line = 8; break;
duke@435 877 case 4: cols_per_line = 4; break;
duke@435 878 case 8: cols_per_line = 2; break;
duke@435 879 default: return;
duke@435 880 }
duke@435 881
duke@435 882 address p = start;
duke@435 883 st->print(PTR_FORMAT ": ", start);
duke@435 884 while (p < end) {
duke@435 885 switch (unitsize) {
duke@435 886 case 1: st->print("%02x", *(u1*)p); break;
duke@435 887 case 2: st->print("%04x", *(u2*)p); break;
duke@435 888 case 4: st->print("%08x", *(u4*)p); break;
duke@435 889 case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break;
duke@435 890 }
duke@435 891 p += unitsize;
duke@435 892 cols++;
duke@435 893 if (cols >= cols_per_line && p < end) {
duke@435 894 cols = 0;
duke@435 895 st->cr();
duke@435 896 st->print(PTR_FORMAT ": ", p);
duke@435 897 } else {
duke@435 898 st->print(" ");
duke@435 899 }
duke@435 900 }
duke@435 901 st->cr();
duke@435 902 }
duke@435 903
duke@435 904 void os::print_environment_variables(outputStream* st, const char** env_list,
duke@435 905 char* buffer, int len) {
duke@435 906 if (env_list) {
duke@435 907 st->print_cr("Environment Variables:");
duke@435 908
duke@435 909 for (int i = 0; env_list[i] != NULL; i++) {
duke@435 910 if (getenv(env_list[i], buffer, len)) {
duke@435 911 st->print(env_list[i]);
duke@435 912 st->print("=");
duke@435 913 st->print_cr(buffer);
duke@435 914 }
duke@435 915 }
duke@435 916 }
duke@435 917 }
duke@435 918
duke@435 919 void os::print_cpu_info(outputStream* st) {
duke@435 920 // cpu
duke@435 921 st->print("CPU:");
duke@435 922 st->print("total %d", os::processor_count());
duke@435 923 // It's not safe to query number of active processors after crash
duke@435 924 // st->print("(active %d)", os::active_processor_count());
duke@435 925 st->print(" %s", VM_Version::cpu_features());
duke@435 926 st->cr();
jcoomes@2997 927 pd_print_cpu_info(st);
duke@435 928 }
duke@435 929
duke@435 930 void os::print_date_and_time(outputStream *st) {
duke@435 931 time_t tloc;
duke@435 932 (void)time(&tloc);
duke@435 933 st->print("time: %s", ctime(&tloc)); // ctime adds newline.
duke@435 934
duke@435 935 double t = os::elapsedTime();
duke@435 936 // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in
duke@435 937 // Linux. Must be a bug in glibc ? Workaround is to round "t" to int
duke@435 938 // before printf. We lost some precision, but who cares?
duke@435 939 st->print_cr("elapsed time: %d seconds", (int)t);
duke@435 940 }
duke@435 941
bobv@2036 942 // moved from debug.cpp (used to be find()) but still called from there
never@2262 943 // The verbose parameter is only set by the debug code in one case
never@2262 944 void os::print_location(outputStream* st, intptr_t x, bool verbose) {
bobv@2036 945 address addr = (address)x;
bobv@2036 946 CodeBlob* b = CodeCache::find_blob_unsafe(addr);
bobv@2036 947 if (b != NULL) {
bobv@2036 948 if (b->is_buffer_blob()) {
bobv@2036 949 // the interpreter is generated into a buffer blob
bobv@2036 950 InterpreterCodelet* i = Interpreter::codelet_containing(addr);
bobv@2036 951 if (i != NULL) {
twisti@3969 952 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", addr, (int)(addr - i->code_begin()));
bobv@2036 953 i->print_on(st);
bobv@2036 954 return;
bobv@2036 955 }
bobv@2036 956 if (Interpreter::contains(addr)) {
bobv@2036 957 st->print_cr(INTPTR_FORMAT " is pointing into interpreter code"
bobv@2036 958 " (not bytecode specific)", addr);
bobv@2036 959 return;
bobv@2036 960 }
bobv@2036 961 //
bobv@2036 962 if (AdapterHandlerLibrary::contains(b)) {
twisti@3969 963 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", addr, (int)(addr - b->code_begin()));
bobv@2036 964 AdapterHandlerLibrary::print_handler_on(st, b);
bobv@2036 965 }
bobv@2036 966 // the stubroutines are generated into a buffer blob
bobv@2036 967 StubCodeDesc* d = StubCodeDesc::desc_for(addr);
bobv@2036 968 if (d != NULL) {
twisti@3969 969 st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", addr, (int)(addr - d->begin()));
bobv@2036 970 d->print_on(st);
twisti@3969 971 st->cr();
bobv@2036 972 return;
bobv@2036 973 }
bobv@2036 974 if (StubRoutines::contains(addr)) {
bobv@2036 975 st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) "
bobv@2036 976 "stub routine", addr);
bobv@2036 977 return;
bobv@2036 978 }
bobv@2036 979 // the InlineCacheBuffer is using stubs generated into a buffer blob
bobv@2036 980 if (InlineCacheBuffer::contains(addr)) {
bobv@2036 981 st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr);
bobv@2036 982 return;
bobv@2036 983 }
bobv@2036 984 VtableStub* v = VtableStubs::stub_containing(addr);
bobv@2036 985 if (v != NULL) {
twisti@3969 986 st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", addr, (int)(addr - v->entry_point()));
bobv@2036 987 v->print_on(st);
twisti@3969 988 st->cr();
bobv@2036 989 return;
bobv@2036 990 }
bobv@2036 991 }
twisti@3969 992 nmethod* nm = b->as_nmethod_or_null();
twisti@3969 993 if (nm != NULL) {
bobv@2036 994 ResourceMark rm;
twisti@3969 995 st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT,
twisti@3969 996 addr, (int)(addr - nm->entry_point()), nm);
twisti@3969 997 if (verbose) {
twisti@3969 998 st->print(" for ");
twisti@3969 999 nm->method()->print_value_on(st);
twisti@3969 1000 }
stefank@4127 1001 st->cr();
twisti@3969 1002 nm->print_nmethod(verbose);
bobv@2036 1003 return;
bobv@2036 1004 }
twisti@3969 1005 st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", addr, (int)(addr - b->code_begin()));
bobv@2036 1006 b->print_on(st);
bobv@2036 1007 return;
bobv@2036 1008 }
bobv@2036 1009
bobv@2036 1010 if (Universe::heap()->is_in(addr)) {
bobv@2036 1011 HeapWord* p = Universe::heap()->block_start(addr);
bobv@2036 1012 bool print = false;
bobv@2036 1013 // If we couldn't find it it just may mean that heap wasn't parseable
bobv@2036 1014 // See if we were just given an oop directly
bobv@2036 1015 if (p != NULL && Universe::heap()->block_is_obj(p)) {
bobv@2036 1016 print = true;
bobv@2036 1017 } else if (p == NULL && ((oopDesc*)addr)->is_oop()) {
bobv@2036 1018 p = (HeapWord*) addr;
bobv@2036 1019 print = true;
bobv@2036 1020 }
bobv@2036 1021 if (print) {
stefank@4125 1022 if (p == (HeapWord*) addr) {
stefank@4125 1023 st->print_cr(INTPTR_FORMAT " is an oop", addr);
stefank@4125 1024 } else {
stefank@4125 1025 st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, addr, p);
stefank@4125 1026 }
bobv@2036 1027 oop(p)->print_on(st);
bobv@2036 1028 return;
bobv@2036 1029 }
bobv@2036 1030 } else {
bobv@2036 1031 if (Universe::heap()->is_in_reserved(addr)) {
bobv@2036 1032 st->print_cr(INTPTR_FORMAT " is an unallocated location "
bobv@2036 1033 "in the heap", addr);
bobv@2036 1034 return;
bobv@2036 1035 }
bobv@2036 1036 }
bobv@2036 1037 if (JNIHandles::is_global_handle((jobject) addr)) {
bobv@2036 1038 st->print_cr(INTPTR_FORMAT " is a global jni handle", addr);
bobv@2036 1039 return;
bobv@2036 1040 }
bobv@2036 1041 if (JNIHandles::is_weak_global_handle((jobject) addr)) {
bobv@2036 1042 st->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr);
bobv@2036 1043 return;
bobv@2036 1044 }
bobv@2036 1045 #ifndef PRODUCT
bobv@2036 1046 // we don't keep the block list in product mode
bobv@2036 1047 if (JNIHandleBlock::any_contains((jobject) addr)) {
bobv@2036 1048 st->print_cr(INTPTR_FORMAT " is a local jni handle", addr);
bobv@2036 1049 return;
bobv@2036 1050 }
bobv@2036 1051 #endif
bobv@2036 1052
bobv@2036 1053 for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
bobv@2036 1054 // Check for privilege stack
bobv@2036 1055 if (thread->privileged_stack_top() != NULL &&
bobv@2036 1056 thread->privileged_stack_top()->contains(addr)) {
bobv@2036 1057 st->print_cr(INTPTR_FORMAT " is pointing into the privilege stack "
bobv@2036 1058 "for thread: " INTPTR_FORMAT, addr, thread);
never@2262 1059 if (verbose) thread->print_on(st);
bobv@2036 1060 return;
bobv@2036 1061 }
bobv@2036 1062 // If the addr is a java thread print information about that.
bobv@2036 1063 if (addr == (address)thread) {
never@2262 1064 if (verbose) {
never@2262 1065 thread->print_on(st);
never@2262 1066 } else {
never@2262 1067 st->print_cr(INTPTR_FORMAT " is a thread", addr);
never@2262 1068 }
bobv@2036 1069 return;
bobv@2036 1070 }
bobv@2036 1071 // If the addr is in the stack region for this thread then report that
bobv@2036 1072 // and print thread info
bobv@2036 1073 if (thread->stack_base() >= addr &&
bobv@2036 1074 addr > (thread->stack_base() - thread->stack_size())) {
bobv@2036 1075 st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: "
bobv@2036 1076 INTPTR_FORMAT, addr, thread);
never@2262 1077 if (verbose) thread->print_on(st);
bobv@2036 1078 return;
bobv@2036 1079 }
bobv@2036 1080
bobv@2036 1081 }
coleenp@4037 1082
coleenp@4037 1083 #ifndef PRODUCT
coleenp@4037 1084 // Check if in metaspace.
coleenp@4037 1085 if (ClassLoaderDataGraph::contains((address)addr)) {
coleenp@4037 1086 // Use addr->print() from the debugger instead (not here)
coleenp@4037 1087 st->print_cr(INTPTR_FORMAT
coleenp@4037 1088 " is pointing into metadata", addr);
coleenp@4037 1089 return;
coleenp@4037 1090 }
coleenp@4037 1091 #endif
coleenp@4037 1092
bobv@2036 1093 // Try an OS specific find
bobv@2036 1094 if (os::find(addr, st)) {
bobv@2036 1095 return;
bobv@2036 1096 }
bobv@2036 1097
never@2262 1098 st->print_cr(INTPTR_FORMAT " is an unknown value", addr);
bobv@2036 1099 }
duke@435 1100
duke@435 1101 // Looks like all platforms except IA64 can use the same function to check
duke@435 1102 // if C stack is walkable beyond current frame. The check for fp() is not
duke@435 1103 // necessary on Sparc, but it's harmless.
duke@435 1104 bool os::is_first_C_frame(frame* fr) {
morris@4535 1105 #if defined(IA64) && !defined(_WIN32)
morris@4535 1106 // On IA64 we have to check if the callers bsp is still valid
morris@4535 1107 // (i.e. within the register stack bounds).
morris@4535 1108 // Notice: this only works for threads created by the VM and only if
morris@4535 1109 // we walk the current stack!!! If we want to be able to walk
morris@4535 1110 // arbitrary other threads, we'll have to somehow store the thread
morris@4535 1111 // object in the frame.
morris@4535 1112 Thread *thread = Thread::current();
morris@4535 1113 if ((address)fr->fp() <=
morris@4535 1114 thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) {
morris@4535 1115 // This check is a little hacky, because on Linux the first C
morris@4535 1116 // frame's ('start_thread') register stack frame starts at
morris@4535 1117 // "register_stack_base + 0x48" while on HPUX, the first C frame's
morris@4535 1118 // ('__pthread_bound_body') register stack frame seems to really
morris@4535 1119 // start at "register_stack_base".
morris@4535 1120 return true;
morris@4535 1121 } else {
morris@4535 1122 return false;
morris@4535 1123 }
morris@4535 1124 #elif defined(IA64) && defined(_WIN32)
duke@435 1125 return true;
morris@4535 1126 #else
duke@435 1127 // Load up sp, fp, sender sp and sender fp, check for reasonable values.
duke@435 1128 // Check usp first, because if that's bad the other accessors may fault
duke@435 1129 // on some architectures. Ditto ufp second, etc.
duke@435 1130 uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1);
duke@435 1131 // sp on amd can be 32 bit aligned.
duke@435 1132 uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1);
duke@435 1133
duke@435 1134 uintptr_t usp = (uintptr_t)fr->sp();
duke@435 1135 if ((usp & sp_align_mask) != 0) return true;
duke@435 1136
duke@435 1137 uintptr_t ufp = (uintptr_t)fr->fp();
duke@435 1138 if ((ufp & fp_align_mask) != 0) return true;
duke@435 1139
duke@435 1140 uintptr_t old_sp = (uintptr_t)fr->sender_sp();
duke@435 1141 if ((old_sp & sp_align_mask) != 0) return true;
duke@435 1142 if (old_sp == 0 || old_sp == (uintptr_t)-1) return true;
duke@435 1143
duke@435 1144 uintptr_t old_fp = (uintptr_t)fr->link();
duke@435 1145 if ((old_fp & fp_align_mask) != 0) return true;
duke@435 1146 if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true;
duke@435 1147
duke@435 1148 // stack grows downwards; if old_fp is below current fp or if the stack
duke@435 1149 // frame is too large, either the stack is corrupted or fp is not saved
duke@435 1150 // on stack (i.e. on x86, ebp may be used as general register). The stack
duke@435 1151 // is not walkable beyond current frame.
duke@435 1152 if (old_fp < ufp) return true;
duke@435 1153 if (old_fp - ufp > 64 * K) return true;
duke@435 1154
duke@435 1155 return false;
morris@4535 1156 #endif
duke@435 1157 }
duke@435 1158
duke@435 1159 #ifdef ASSERT
duke@435 1160 extern "C" void test_random() {
duke@435 1161 const double m = 2147483647;
duke@435 1162 double mean = 0.0, variance = 0.0, t;
duke@435 1163 long reps = 10000;
duke@435 1164 unsigned long seed = 1;
duke@435 1165
duke@435 1166 tty->print_cr("seed %ld for %ld repeats...", seed, reps);
duke@435 1167 os::init_random(seed);
duke@435 1168 long num;
duke@435 1169 for (int k = 0; k < reps; k++) {
duke@435 1170 num = os::random();
duke@435 1171 double u = (double)num / m;
duke@435 1172 assert(u >= 0.0 && u <= 1.0, "bad random number!");
duke@435 1173
duke@435 1174 // calculate mean and variance of the random sequence
duke@435 1175 mean += u;
duke@435 1176 variance += (u*u);
duke@435 1177 }
duke@435 1178 mean /= reps;
duke@435 1179 variance /= (reps - 1);
duke@435 1180
duke@435 1181 assert(num == 1043618065, "bad seed");
duke@435 1182 tty->print_cr("mean of the 1st 10000 numbers: %f", mean);
duke@435 1183 tty->print_cr("variance of the 1st 10000 numbers: %f", variance);
duke@435 1184 const double eps = 0.0001;
duke@435 1185 t = fabsd(mean - 0.5018);
duke@435 1186 assert(t < eps, "bad mean");
duke@435 1187 t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355;
duke@435 1188 assert(t < eps, "bad variance");
duke@435 1189 }
duke@435 1190 #endif
duke@435 1191
duke@435 1192
duke@435 1193 // Set up the boot classpath.
duke@435 1194
duke@435 1195 char* os::format_boot_path(const char* format_string,
duke@435 1196 const char* home,
duke@435 1197 int home_len,
duke@435 1198 char fileSep,
duke@435 1199 char pathSep) {
duke@435 1200 assert((fileSep == '/' && pathSep == ':') ||
duke@435 1201 (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars");
duke@435 1202
duke@435 1203 // Scan the format string to determine the length of the actual
duke@435 1204 // boot classpath, and handle platform dependencies as well.
duke@435 1205 int formatted_path_len = 0;
duke@435 1206 const char* p;
duke@435 1207 for (p = format_string; *p != 0; ++p) {
duke@435 1208 if (*p == '%') formatted_path_len += home_len - 1;
duke@435 1209 ++formatted_path_len;
duke@435 1210 }
duke@435 1211
zgu@3900 1212 char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal);
duke@435 1213 if (formatted_path == NULL) {
duke@435 1214 return NULL;
duke@435 1215 }
duke@435 1216
duke@435 1217 // Create boot classpath from format, substituting separator chars and
duke@435 1218 // java home directory.
duke@435 1219 char* q = formatted_path;
duke@435 1220 for (p = format_string; *p != 0; ++p) {
duke@435 1221 switch (*p) {
duke@435 1222 case '%':
duke@435 1223 strcpy(q, home);
duke@435 1224 q += home_len;
duke@435 1225 break;
duke@435 1226 case '/':
duke@435 1227 *q++ = fileSep;
duke@435 1228 break;
duke@435 1229 case ':':
duke@435 1230 *q++ = pathSep;
duke@435 1231 break;
duke@435 1232 default:
duke@435 1233 *q++ = *p;
duke@435 1234 }
duke@435 1235 }
duke@435 1236 *q = '\0';
duke@435 1237
duke@435 1238 assert((q - formatted_path) == formatted_path_len, "formatted_path size botched");
duke@435 1239 return formatted_path;
duke@435 1240 }
duke@435 1241
duke@435 1242
duke@435 1243 bool os::set_boot_path(char fileSep, char pathSep) {
duke@435 1244 const char* home = Arguments::get_java_home();
duke@435 1245 int home_len = (int)strlen(home);
duke@435 1246
duke@435 1247 static const char* meta_index_dir_format = "%/lib/";
duke@435 1248 static const char* meta_index_format = "%/lib/meta-index";
duke@435 1249 char* meta_index = format_boot_path(meta_index_format, home, home_len, fileSep, pathSep);
duke@435 1250 if (meta_index == NULL) return false;
duke@435 1251 char* meta_index_dir = format_boot_path(meta_index_dir_format, home, home_len, fileSep, pathSep);
duke@435 1252 if (meta_index_dir == NULL) return false;
duke@435 1253 Arguments::set_meta_index_path(meta_index, meta_index_dir);
duke@435 1254
duke@435 1255 // Any modification to the JAR-file list, for the boot classpath must be
duke@435 1256 // aligned with install/install/make/common/Pack.gmk. Note: boot class
duke@435 1257 // path class JARs, are stripped for StackMapTable to reduce download size.
duke@435 1258 static const char classpath_format[] =
duke@435 1259 "%/lib/resources.jar:"
duke@435 1260 "%/lib/rt.jar:"
duke@435 1261 "%/lib/sunrsasign.jar:"
duke@435 1262 "%/lib/jsse.jar:"
duke@435 1263 "%/lib/jce.jar:"
duke@435 1264 "%/lib/charsets.jar:"
phh@3427 1265 "%/lib/jfr.jar:"
dcubed@3202 1266 #ifdef __APPLE__
dcubed@3202 1267 "%/lib/JObjC.jar:"
dcubed@3202 1268 #endif
duke@435 1269 "%/classes";
duke@435 1270 char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep);
duke@435 1271 if (sysclasspath == NULL) return false;
duke@435 1272 Arguments::set_sysclasspath(sysclasspath);
duke@435 1273
duke@435 1274 return true;
duke@435 1275 }
duke@435 1276
phh@1126 1277 /*
phh@1126 1278 * Splits a path, based on its separator, the number of
phh@1126 1279 * elements is returned back in n.
phh@1126 1280 * It is the callers responsibility to:
phh@1126 1281 * a> check the value of n, and n may be 0.
phh@1126 1282 * b> ignore any empty path elements
phh@1126 1283 * c> free up the data.
phh@1126 1284 */
phh@1126 1285 char** os::split_path(const char* path, int* n) {
phh@1126 1286 *n = 0;
phh@1126 1287 if (path == NULL || strlen(path) == 0) {
phh@1126 1288 return NULL;
phh@1126 1289 }
phh@1126 1290 const char psepchar = *os::path_separator();
zgu@3900 1291 char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal);
phh@1126 1292 if (inpath == NULL) {
phh@1126 1293 return NULL;
phh@1126 1294 }
bpittore@4261 1295 strcpy(inpath, path);
phh@1126 1296 int count = 1;
phh@1126 1297 char* p = strchr(inpath, psepchar);
phh@1126 1298 // Get a count of elements to allocate memory
phh@1126 1299 while (p != NULL) {
phh@1126 1300 count++;
phh@1126 1301 p++;
phh@1126 1302 p = strchr(p, psepchar);
phh@1126 1303 }
zgu@3900 1304 char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal);
phh@1126 1305 if (opath == NULL) {
phh@1126 1306 return NULL;
phh@1126 1307 }
phh@1126 1308
phh@1126 1309 // do the actual splitting
phh@1126 1310 p = inpath;
phh@1126 1311 for (int i = 0 ; i < count ; i++) {
phh@1126 1312 size_t len = strcspn(p, os::path_separator());
phh@1126 1313 if (len > JVM_MAXPATHLEN) {
phh@1126 1314 return NULL;
phh@1126 1315 }
phh@1126 1316 // allocate the string and add terminator storage
zgu@3900 1317 char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal);
phh@1126 1318 if (s == NULL) {
phh@1126 1319 return NULL;
phh@1126 1320 }
phh@1126 1321 strncpy(s, p, len);
phh@1126 1322 s[len] = '\0';
phh@1126 1323 opath[i] = s;
phh@1126 1324 p += len + 1;
phh@1126 1325 }
zgu@3900 1326 FREE_C_HEAP_ARRAY(char, inpath, mtInternal);
phh@1126 1327 *n = count;
phh@1126 1328 return opath;
phh@1126 1329 }
phh@1126 1330
duke@435 1331 void os::set_memory_serialize_page(address page) {
duke@435 1332 int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64);
duke@435 1333 _mem_serialize_page = (volatile int32_t *)page;
duke@435 1334 // We initialize the serialization page shift count here
duke@435 1335 // We assume a cache line size of 64 bytes
duke@435 1336 assert(SerializePageShiftCount == count,
duke@435 1337 "thread size changed, fix SerializePageShiftCount constant");
duke@435 1338 set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t)));
duke@435 1339 }
duke@435 1340
xlu@490 1341 static volatile intptr_t SerializePageLock = 0;
xlu@490 1342
duke@435 1343 // This method is called from signal handler when SIGSEGV occurs while the current
duke@435 1344 // thread tries to store to the "read-only" memory serialize page during state
duke@435 1345 // transition.
duke@435 1346 void os::block_on_serialize_page_trap() {
duke@435 1347 if (TraceSafepoint) {
duke@435 1348 tty->print_cr("Block until the serialize page permission restored");
duke@435 1349 }
xlu@490 1350 // When VMThread is holding the SerializePageLock during modifying the
duke@435 1351 // access permission of the memory serialize page, the following call
duke@435 1352 // will block until the permission of that page is restored to rw.
duke@435 1353 // Generally, it is unsafe to manipulate locks in signal handlers, but in
duke@435 1354 // this case, it's OK as the signal is synchronous and we know precisely when
xlu@490 1355 // it can occur.
xlu@490 1356 Thread::muxAcquire(&SerializePageLock, "set_memory_serialize_page");
xlu@490 1357 Thread::muxRelease(&SerializePageLock);
duke@435 1358 }
duke@435 1359
duke@435 1360 // Serialize all thread state variables
duke@435 1361 void os::serialize_thread_states() {
duke@435 1362 // On some platforms such as Solaris & Linux, the time duration of the page
duke@435 1363 // permission restoration is observed to be much longer than expected due to
duke@435 1364 // scheduler starvation problem etc. To avoid the long synchronization
xlu@490 1365 // time and expensive page trap spinning, 'SerializePageLock' is used to block
xlu@490 1366 // the mutator thread if such case is encountered. See bug 6546278 for details.
xlu@490 1367 Thread::muxAcquire(&SerializePageLock, "serialize_thread_states");
coleenp@672 1368 os::protect_memory((char *)os::get_memory_serialize_page(),
coleenp@912 1369 os::vm_page_size(), MEM_PROT_READ);
coleenp@912 1370 os::protect_memory((char *)os::get_memory_serialize_page(),
coleenp@912 1371 os::vm_page_size(), MEM_PROT_RW);
xlu@490 1372 Thread::muxRelease(&SerializePageLock);
duke@435 1373 }
duke@435 1374
duke@435 1375 // Returns true if the current stack pointer is above the stack shadow
duke@435 1376 // pages, false otherwise.
duke@435 1377
duke@435 1378 bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) {
duke@435 1379 assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check");
duke@435 1380 address sp = current_stack_pointer();
duke@435 1381 // Check if we have StackShadowPages above the yellow zone. This parameter
twisti@1040 1382 // is dependent on the depth of the maximum VM call stack possible from
duke@435 1383 // the handler for stack overflow. 'instanceof' in the stack overflow
duke@435 1384 // handler or a println uses at least 8k stack of VM and native code
duke@435 1385 // respectively.
duke@435 1386 const int framesize_in_bytes =
duke@435 1387 Interpreter::size_top_interpreter_activation(method()) * wordSize;
duke@435 1388 int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages)
duke@435 1389 * vm_page_size()) + framesize_in_bytes;
duke@435 1390 // The very lower end of the stack
duke@435 1391 address stack_limit = thread->stack_base() - thread->stack_size();
duke@435 1392 return (sp > (stack_limit + reserved_area));
duke@435 1393 }
duke@435 1394
duke@435 1395 size_t os::page_size_for_region(size_t region_min_size, size_t region_max_size,
duke@435 1396 uint min_pages)
duke@435 1397 {
duke@435 1398 assert(min_pages > 0, "sanity");
duke@435 1399 if (UseLargePages) {
duke@435 1400 const size_t max_page_size = region_max_size / min_pages;
duke@435 1401
duke@435 1402 for (unsigned int i = 0; _page_sizes[i] != 0; ++i) {
duke@435 1403 const size_t sz = _page_sizes[i];
duke@435 1404 const size_t mask = sz - 1;
duke@435 1405 if ((region_min_size & mask) == 0 && (region_max_size & mask) == 0) {
duke@435 1406 // The largest page size with no fragmentation.
duke@435 1407 return sz;
duke@435 1408 }
duke@435 1409
duke@435 1410 if (sz <= max_page_size) {
duke@435 1411 // The largest page size that satisfies the min_pages requirement.
duke@435 1412 return sz;
duke@435 1413 }
duke@435 1414 }
duke@435 1415 }
duke@435 1416
duke@435 1417 return vm_page_size();
duke@435 1418 }
duke@435 1419
duke@435 1420 #ifndef PRODUCT
jcoomes@3057 1421 void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count)
jcoomes@3057 1422 {
jcoomes@3057 1423 if (TracePageSizes) {
jcoomes@3057 1424 tty->print("%s: ", str);
jcoomes@3057 1425 for (int i = 0; i < count; ++i) {
jcoomes@3057 1426 tty->print(" " SIZE_FORMAT, page_sizes[i]);
jcoomes@3057 1427 }
jcoomes@3057 1428 tty->cr();
jcoomes@3057 1429 }
jcoomes@3057 1430 }
jcoomes@3057 1431
duke@435 1432 void os::trace_page_sizes(const char* str, const size_t region_min_size,
duke@435 1433 const size_t region_max_size, const size_t page_size,
duke@435 1434 const char* base, const size_t size)
duke@435 1435 {
duke@435 1436 if (TracePageSizes) {
duke@435 1437 tty->print_cr("%s: min=" SIZE_FORMAT " max=" SIZE_FORMAT
duke@435 1438 " pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT
duke@435 1439 " size=" SIZE_FORMAT,
duke@435 1440 str, region_min_size, region_max_size,
duke@435 1441 page_size, base, size);
duke@435 1442 }
duke@435 1443 }
duke@435 1444 #endif // #ifndef PRODUCT
duke@435 1445
duke@435 1446 // This is the working definition of a server class machine:
duke@435 1447 // >= 2 physical CPU's and >=2GB of memory, with some fuzz
duke@435 1448 // because the graphics memory (?) sometimes masks physical memory.
duke@435 1449 // If you want to change the definition of a server class machine
duke@435 1450 // on some OS or platform, e.g., >=4GB on Windohs platforms,
duke@435 1451 // then you'll have to parameterize this method based on that state,
duke@435 1452 // as was done for logical processors here, or replicate and
duke@435 1453 // specialize this method for each platform. (Or fix os to have
duke@435 1454 // some inheritance structure and use subclassing. Sigh.)
duke@435 1455 // If you want some platform to always or never behave as a server
duke@435 1456 // class machine, change the setting of AlwaysActAsServerClassMachine
duke@435 1457 // and NeverActAsServerClassMachine in globals*.hpp.
duke@435 1458 bool os::is_server_class_machine() {
duke@435 1459 // First check for the early returns
duke@435 1460 if (NeverActAsServerClassMachine) {
duke@435 1461 return false;
duke@435 1462 }
duke@435 1463 if (AlwaysActAsServerClassMachine) {
duke@435 1464 return true;
duke@435 1465 }
duke@435 1466 // Then actually look at the machine
duke@435 1467 bool result = false;
duke@435 1468 const unsigned int server_processors = 2;
duke@435 1469 const julong server_memory = 2UL * G;
duke@435 1470 // We seem not to get our full complement of memory.
duke@435 1471 // We allow some part (1/8?) of the memory to be "missing",
duke@435 1472 // based on the sizes of DIMMs, and maybe graphics cards.
duke@435 1473 const julong missing_memory = 256UL * M;
duke@435 1474
duke@435 1475 /* Is this a server class machine? */
duke@435 1476 if ((os::active_processor_count() >= (int)server_processors) &&
duke@435 1477 (os::physical_memory() >= (server_memory - missing_memory))) {
duke@435 1478 const unsigned int logical_processors =
duke@435 1479 VM_Version::logical_processors_per_package();
duke@435 1480 if (logical_processors > 1) {
duke@435 1481 const unsigned int physical_packages =
duke@435 1482 os::active_processor_count() / logical_processors;
duke@435 1483 if (physical_packages > server_processors) {
duke@435 1484 result = true;
duke@435 1485 }
duke@435 1486 } else {
duke@435 1487 result = true;
duke@435 1488 }
duke@435 1489 }
duke@435 1490 return result;
duke@435 1491 }
dsamersoff@2751 1492
sla@5237 1493 void os::SuspendedThreadTask::run() {
sla@5237 1494 assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
sla@5237 1495 internal_do_task();
sla@5237 1496 _done = true;
sla@5237 1497 }
sla@5237 1498
zgu@3900 1499 bool os::create_stack_guard_pages(char* addr, size_t bytes) {
zgu@3900 1500 return os::pd_create_stack_guard_pages(addr, bytes);
zgu@3900 1501 }
zgu@3900 1502
zgu@3900 1503 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
zgu@3900 1504 char* result = pd_reserve_memory(bytes, addr, alignment_hint);
zgu@4193 1505 if (result != NULL) {
zgu@5272 1506 MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
zgu@3900 1507 }
zgu@3900 1508
zgu@3900 1509 return result;
zgu@3900 1510 }
zgu@5053 1511
zgu@5053 1512 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint,
zgu@5053 1513 MEMFLAGS flags) {
zgu@5053 1514 char* result = pd_reserve_memory(bytes, addr, alignment_hint);
zgu@5053 1515 if (result != NULL) {
zgu@5272 1516 MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
zgu@5053 1517 MemTracker::record_virtual_memory_type((address)result, flags);
zgu@5053 1518 }
zgu@5053 1519
zgu@5053 1520 return result;
zgu@5053 1521 }
zgu@5053 1522
zgu@3900 1523 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
zgu@3900 1524 char* result = pd_attempt_reserve_memory_at(bytes, addr);
zgu@4193 1525 if (result != NULL) {
zgu@5272 1526 MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
zgu@3900 1527 }
zgu@3900 1528 return result;
zgu@3900 1529 }
zgu@3900 1530
zgu@3900 1531 void os::split_reserved_memory(char *base, size_t size,
zgu@3900 1532 size_t split, bool realloc) {
zgu@3900 1533 pd_split_reserved_memory(base, size, split, realloc);
zgu@3900 1534 }
zgu@3900 1535
zgu@3900 1536 bool os::commit_memory(char* addr, size_t bytes, bool executable) {
zgu@3900 1537 bool res = pd_commit_memory(addr, bytes, executable);
zgu@4193 1538 if (res) {
zgu@3900 1539 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
zgu@3900 1540 }
zgu@3900 1541 return res;
zgu@3900 1542 }
zgu@3900 1543
zgu@3900 1544 bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
zgu@3900 1545 bool executable) {
zgu@3900 1546 bool res = os::pd_commit_memory(addr, size, alignment_hint, executable);
zgu@4193 1547 if (res) {
zgu@3900 1548 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
zgu@3900 1549 }
zgu@3900 1550 return res;
zgu@3900 1551 }
zgu@3900 1552
dcubed@5255 1553 void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
dcubed@5255 1554 const char* mesg) {
dcubed@5255 1555 pd_commit_memory_or_exit(addr, bytes, executable, mesg);
dcubed@5255 1556 MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
dcubed@5255 1557 }
dcubed@5255 1558
dcubed@5255 1559 void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
dcubed@5255 1560 bool executable, const char* mesg) {
dcubed@5255 1561 os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
dcubed@5255 1562 MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
dcubed@5255 1563 }
dcubed@5255 1564
zgu@3900 1565 bool os::uncommit_memory(char* addr, size_t bytes) {
zgu@5272 1566 MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
zgu@3900 1567 bool res = pd_uncommit_memory(addr, bytes);
zgu@3900 1568 if (res) {
zgu@5272 1569 tkr.record((address)addr, bytes);
zgu@5272 1570 } else {
zgu@5272 1571 tkr.discard();
zgu@3900 1572 }
zgu@3900 1573 return res;
zgu@3900 1574 }
zgu@3900 1575
zgu@3900 1576 bool os::release_memory(char* addr, size_t bytes) {
zgu@5272 1577 MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
zgu@3900 1578 bool res = pd_release_memory(addr, bytes);
zgu@3900 1579 if (res) {
zgu@5272 1580 tkr.record((address)addr, bytes);
zgu@5272 1581 } else {
zgu@5272 1582 tkr.discard();
zgu@3900 1583 }
zgu@3900 1584 return res;
zgu@3900 1585 }
zgu@3900 1586
zgu@3900 1587
zgu@3900 1588 char* os::map_memory(int fd, const char* file_name, size_t file_offset,
zgu@3900 1589 char *addr, size_t bytes, bool read_only,
zgu@3900 1590 bool allow_exec) {
zgu@3900 1591 char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
zgu@4193 1592 if (result != NULL) {
zgu@5272 1593 MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
zgu@3900 1594 }
zgu@3900 1595 return result;
zgu@3900 1596 }
zgu@3900 1597
zgu@3900 1598 char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
zgu@3900 1599 char *addr, size_t bytes, bool read_only,
zgu@3900 1600 bool allow_exec) {
zgu@3900 1601 return pd_remap_memory(fd, file_name, file_offset, addr, bytes,
zgu@3900 1602 read_only, allow_exec);
zgu@3900 1603 }
zgu@3900 1604
zgu@3900 1605 bool os::unmap_memory(char *addr, size_t bytes) {
zgu@5272 1606 MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
zgu@3900 1607 bool result = pd_unmap_memory(addr, bytes);
zgu@3900 1608 if (result) {
zgu@5272 1609 tkr.record((address)addr, bytes);
zgu@5272 1610 } else {
zgu@5272 1611 tkr.discard();
zgu@3900 1612 }
zgu@3900 1613 return result;
zgu@3900 1614 }
zgu@3900 1615
zgu@3900 1616 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
zgu@3900 1617 pd_free_memory(addr, bytes, alignment_hint);
zgu@3900 1618 }
zgu@3900 1619
zgu@3900 1620 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
zgu@3900 1621 pd_realign_memory(addr, bytes, alignment_hint);
zgu@3900 1622 }
zgu@3900 1623
sla@5237 1624 #ifndef TARGET_OS_FAMILY_windows
sla@5237 1625 /* try to switch state from state "from" to state "to"
sla@5237 1626 * returns the state set after the method is complete
sla@5237 1627 */
sla@5237 1628 os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
sla@5237 1629 os::SuspendResume::State to)
sla@5237 1630 {
sla@5237 1631 os::SuspendResume::State result =
sla@5237 1632 (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
sla@5237 1633 if (result == from) {
sla@5237 1634 // success
sla@5237 1635 return to;
sla@5237 1636 }
sla@5237 1637 return result;
sla@5237 1638 }
sla@5237 1639 #endif

mercurial