Thu, 07 Sep 2017 09:17:43 -0400
8072428: Enable UseLoopCounter ergonomically if on-stack-replacement is enabled
Summary: Set UseLoopCounter ergonomically if on-stack-replacement is enabled. Print warning.
Reviewed-by: kvn
duke@435 | 1 | /* |
tschatzl@8661 | 2 | * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "classfile/classLoader.hpp" |
stefank@2314 | 27 | #include "classfile/javaClasses.hpp" |
stefank@2314 | 28 | #include "classfile/systemDictionary.hpp" |
stefank@2314 | 29 | #include "classfile/vmSymbols.hpp" |
stefank@2314 | 30 | #include "code/icBuffer.hpp" |
stefank@2314 | 31 | #include "code/vtableStubs.hpp" |
stefank@2314 | 32 | #include "gc_implementation/shared/vmGCOperations.hpp" |
stefank@2314 | 33 | #include "interpreter/interpreter.hpp" |
stefank@2314 | 34 | #include "memory/allocation.inline.hpp" |
dsimms@7032 | 35 | #ifdef ASSERT |
dsimms@7032 | 36 | #include "memory/guardedMemory.hpp" |
dsimms@7032 | 37 | #endif |
stefank@2314 | 38 | #include "oops/oop.inline.hpp" |
stefank@2314 | 39 | #include "prims/jvm.h" |
stefank@2314 | 40 | #include "prims/jvm_misc.hpp" |
stefank@2314 | 41 | #include "prims/privilegedStack.hpp" |
stefank@2314 | 42 | #include "runtime/arguments.hpp" |
stefank@2314 | 43 | #include "runtime/frame.inline.hpp" |
stefank@2314 | 44 | #include "runtime/interfaceSupport.hpp" |
stefank@2314 | 45 | #include "runtime/java.hpp" |
stefank@2314 | 46 | #include "runtime/javaCalls.hpp" |
stefank@2314 | 47 | #include "runtime/mutexLocker.hpp" |
stefank@2314 | 48 | #include "runtime/os.hpp" |
stefank@2314 | 49 | #include "runtime/stubRoutines.hpp" |
stefank@4299 | 50 | #include "runtime/thread.inline.hpp" |
stefank@2314 | 51 | #include "services/attachListener.hpp" |
zgu@7074 | 52 | #include "services/nmtCommon.hpp" |
zgu@7177 | 53 | #include "services/mallocTracker.hpp" |
zgu@3900 | 54 | #include "services/memTracker.hpp" |
stefank@2314 | 55 | #include "services/threadService.hpp" |
stefank@2314 | 56 | #include "utilities/defaultStream.hpp" |
stefank@2314 | 57 | #include "utilities/events.hpp" |
stefank@2314 | 58 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 59 | # include "os_linux.inline.hpp" |
stefank@2314 | 60 | #endif |
stefank@2314 | 61 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 62 | # include "os_solaris.inline.hpp" |
stefank@2314 | 63 | #endif |
stefank@2314 | 64 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 65 | # include "os_windows.inline.hpp" |
stefank@2314 | 66 | #endif |
never@3156 | 67 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 68 | # include "os_bsd.inline.hpp" |
never@3156 | 69 | #endif |
duke@435 | 70 | |
duke@435 | 71 | # include <signal.h> |
duke@435 | 72 | |
drchase@6680 | 73 | PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
drchase@6680 | 74 | |
duke@435 | 75 | OSThread* os::_starting_thread = NULL; |
duke@435 | 76 | address os::_polling_page = NULL; |
duke@435 | 77 | volatile int32_t* os::_mem_serialize_page = NULL; |
duke@435 | 78 | uintptr_t os::_serialize_page_mask = 0; |
duke@435 | 79 | long os::_rand_seed = 1; |
duke@435 | 80 | int os::_processor_count = 0; |
tschatzl@8661 | 81 | int os::_initial_active_processor_count = 0; |
duke@435 | 82 | size_t os::_page_sizes[os::page_sizes_max]; |
duke@435 | 83 | |
duke@435 | 84 | #ifndef PRODUCT |
kvn@2557 | 85 | julong os::num_mallocs = 0; // # of calls to malloc/realloc |
kvn@2557 | 86 | julong os::alloc_bytes = 0; // # of bytes allocated |
kvn@2557 | 87 | julong os::num_frees = 0; // # of calls to free |
kvn@2557 | 88 | julong os::free_bytes = 0; // # of bytes freed |
duke@435 | 89 | #endif |
duke@435 | 90 | |
rdurbin@4802 | 91 | static juint cur_malloc_words = 0; // current size for MallocMaxTestWords |
rdurbin@4802 | 92 | |
phh@3378 | 93 | void os_init_globals() { |
phh@3378 | 94 | // Called from init_globals(). |
phh@3378 | 95 | // See Threads::create_vm() in thread.cpp, and init.cpp. |
phh@3378 | 96 | os::init_globals(); |
phh@3378 | 97 | } |
phh@3378 | 98 | |
duke@435 | 99 | // Fill in buffer with current local time as an ISO-8601 string. |
duke@435 | 100 | // E.g., yyyy-mm-ddThh:mm:ss-zzzz. |
duke@435 | 101 | // Returns buffer, or NULL if it failed. |
duke@435 | 102 | // This would mostly be a call to |
duke@435 | 103 | // strftime(...., "%Y-%m-%d" "T" "%H:%M:%S" "%z", ....) |
duke@435 | 104 | // except that on Windows the %z behaves badly, so we do it ourselves. |
duke@435 | 105 | // Also, people wanted milliseconds on there, |
duke@435 | 106 | // and strftime doesn't do milliseconds. |
duke@435 | 107 | char* os::iso8601_time(char* buffer, size_t buffer_length) { |
duke@435 | 108 | // Output will be of the form "YYYY-MM-DDThh:mm:ss.mmm+zzzz\0" |
duke@435 | 109 | // 1 2 |
duke@435 | 110 | // 12345678901234567890123456789 |
duke@435 | 111 | static const char* iso8601_format = |
duke@435 | 112 | "%04d-%02d-%02dT%02d:%02d:%02d.%03d%c%02d%02d"; |
duke@435 | 113 | static const size_t needed_buffer = 29; |
duke@435 | 114 | |
duke@435 | 115 | // Sanity check the arguments |
duke@435 | 116 | if (buffer == NULL) { |
duke@435 | 117 | assert(false, "NULL buffer"); |
duke@435 | 118 | return NULL; |
duke@435 | 119 | } |
duke@435 | 120 | if (buffer_length < needed_buffer) { |
duke@435 | 121 | assert(false, "buffer_length too small"); |
duke@435 | 122 | return NULL; |
duke@435 | 123 | } |
duke@435 | 124 | // Get the current time |
sbohne@496 | 125 | jlong milliseconds_since_19700101 = javaTimeMillis(); |
duke@435 | 126 | const int milliseconds_per_microsecond = 1000; |
duke@435 | 127 | const time_t seconds_since_19700101 = |
duke@435 | 128 | milliseconds_since_19700101 / milliseconds_per_microsecond; |
duke@435 | 129 | const int milliseconds_after_second = |
duke@435 | 130 | milliseconds_since_19700101 % milliseconds_per_microsecond; |
duke@435 | 131 | // Convert the time value to a tm and timezone variable |
ysr@983 | 132 | struct tm time_struct; |
ysr@983 | 133 | if (localtime_pd(&seconds_since_19700101, &time_struct) == NULL) { |
ysr@983 | 134 | assert(false, "Failed localtime_pd"); |
duke@435 | 135 | return NULL; |
duke@435 | 136 | } |
never@3156 | 137 | #if defined(_ALLBSD_SOURCE) |
never@3156 | 138 | const time_t zone = (time_t) time_struct.tm_gmtoff; |
never@3156 | 139 | #else |
duke@435 | 140 | const time_t zone = timezone; |
never@3156 | 141 | #endif |
duke@435 | 142 | |
duke@435 | 143 | // If daylight savings time is in effect, |
duke@435 | 144 | // we are 1 hour East of our time zone |
duke@435 | 145 | const time_t seconds_per_minute = 60; |
duke@435 | 146 | const time_t minutes_per_hour = 60; |
duke@435 | 147 | const time_t seconds_per_hour = seconds_per_minute * minutes_per_hour; |
duke@435 | 148 | time_t UTC_to_local = zone; |
duke@435 | 149 | if (time_struct.tm_isdst > 0) { |
duke@435 | 150 | UTC_to_local = UTC_to_local - seconds_per_hour; |
duke@435 | 151 | } |
duke@435 | 152 | // Compute the time zone offset. |
ysr@983 | 153 | // localtime_pd() sets timezone to the difference (in seconds) |
duke@435 | 154 | // between UTC and and local time. |
duke@435 | 155 | // ISO 8601 says we need the difference between local time and UTC, |
ysr@983 | 156 | // we change the sign of the localtime_pd() result. |
duke@435 | 157 | const time_t local_to_UTC = -(UTC_to_local); |
duke@435 | 158 | // Then we have to figure out if if we are ahead (+) or behind (-) UTC. |
duke@435 | 159 | char sign_local_to_UTC = '+'; |
duke@435 | 160 | time_t abs_local_to_UTC = local_to_UTC; |
duke@435 | 161 | if (local_to_UTC < 0) { |
duke@435 | 162 | sign_local_to_UTC = '-'; |
duke@435 | 163 | abs_local_to_UTC = -(abs_local_to_UTC); |
duke@435 | 164 | } |
duke@435 | 165 | // Convert time zone offset seconds to hours and minutes. |
duke@435 | 166 | const time_t zone_hours = (abs_local_to_UTC / seconds_per_hour); |
duke@435 | 167 | const time_t zone_min = |
duke@435 | 168 | ((abs_local_to_UTC % seconds_per_hour) / seconds_per_minute); |
duke@435 | 169 | |
duke@435 | 170 | // Print an ISO 8601 date and time stamp into the buffer |
duke@435 | 171 | const int year = 1900 + time_struct.tm_year; |
duke@435 | 172 | const int month = 1 + time_struct.tm_mon; |
duke@435 | 173 | const int printed = jio_snprintf(buffer, buffer_length, iso8601_format, |
duke@435 | 174 | year, |
duke@435 | 175 | month, |
duke@435 | 176 | time_struct.tm_mday, |
duke@435 | 177 | time_struct.tm_hour, |
duke@435 | 178 | time_struct.tm_min, |
duke@435 | 179 | time_struct.tm_sec, |
duke@435 | 180 | milliseconds_after_second, |
duke@435 | 181 | sign_local_to_UTC, |
duke@435 | 182 | zone_hours, |
duke@435 | 183 | zone_min); |
duke@435 | 184 | if (printed == 0) { |
duke@435 | 185 | assert(false, "Failed jio_printf"); |
duke@435 | 186 | return NULL; |
duke@435 | 187 | } |
duke@435 | 188 | return buffer; |
duke@435 | 189 | } |
duke@435 | 190 | |
duke@435 | 191 | OSReturn os::set_priority(Thread* thread, ThreadPriority p) { |
duke@435 | 192 | #ifdef ASSERT |
duke@435 | 193 | if (!(!thread->is_Java_thread() || |
duke@435 | 194 | Thread::current() == thread || |
duke@435 | 195 | Threads_lock->owned_by_self() |
duke@435 | 196 | || thread->is_Compiler_thread() |
duke@435 | 197 | )) { |
duke@435 | 198 | assert(false, "possibility of dangling Thread pointer"); |
duke@435 | 199 | } |
duke@435 | 200 | #endif |
duke@435 | 201 | |
duke@435 | 202 | if (p >= MinPriority && p <= MaxPriority) { |
duke@435 | 203 | int priority = java_to_os_priority[p]; |
duke@435 | 204 | return set_native_priority(thread, priority); |
duke@435 | 205 | } else { |
duke@435 | 206 | assert(false, "Should not happen"); |
duke@435 | 207 | return OS_ERR; |
duke@435 | 208 | } |
duke@435 | 209 | } |
duke@435 | 210 | |
dholmes@4077 | 211 | // The mapping from OS priority back to Java priority may be inexact because |
dholmes@4077 | 212 | // Java priorities can map M:1 with native priorities. If you want the definite |
dholmes@4077 | 213 | // Java priority then use JavaThread::java_priority() |
duke@435 | 214 | OSReturn os::get_priority(const Thread* const thread, ThreadPriority& priority) { |
duke@435 | 215 | int p; |
duke@435 | 216 | int os_prio; |
duke@435 | 217 | OSReturn ret = get_native_priority(thread, &os_prio); |
duke@435 | 218 | if (ret != OS_OK) return ret; |
duke@435 | 219 | |
dholmes@4077 | 220 | if (java_to_os_priority[MaxPriority] > java_to_os_priority[MinPriority]) { |
dholmes@4077 | 221 | for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] > os_prio; p--) ; |
dholmes@4077 | 222 | } else { |
dholmes@4077 | 223 | // niceness values are in reverse order |
dholmes@4077 | 224 | for (p = MaxPriority; p > MinPriority && java_to_os_priority[p] < os_prio; p--) ; |
dholmes@4077 | 225 | } |
duke@435 | 226 | priority = (ThreadPriority)p; |
duke@435 | 227 | return OS_OK; |
duke@435 | 228 | } |
duke@435 | 229 | |
duke@435 | 230 | |
duke@435 | 231 | // --------------------- sun.misc.Signal (optional) --------------------- |
duke@435 | 232 | |
duke@435 | 233 | |
duke@435 | 234 | // SIGBREAK is sent by the keyboard to query the VM state |
duke@435 | 235 | #ifndef SIGBREAK |
duke@435 | 236 | #define SIGBREAK SIGQUIT |
duke@435 | 237 | #endif |
duke@435 | 238 | |
duke@435 | 239 | // sigexitnum_pd is a platform-specific special signal used for terminating the Signal thread. |
duke@435 | 240 | |
duke@435 | 241 | |
duke@435 | 242 | static void signal_thread_entry(JavaThread* thread, TRAPS) { |
duke@435 | 243 | os::set_priority(thread, NearMaxPriority); |
duke@435 | 244 | while (true) { |
duke@435 | 245 | int sig; |
duke@435 | 246 | { |
duke@435 | 247 | // FIXME : Currently we have not decieded what should be the status |
duke@435 | 248 | // for this java thread blocked here. Once we decide about |
duke@435 | 249 | // that we should fix this. |
duke@435 | 250 | sig = os::signal_wait(); |
duke@435 | 251 | } |
duke@435 | 252 | if (sig == os::sigexitnum_pd()) { |
duke@435 | 253 | // Terminate the signal thread |
duke@435 | 254 | return; |
duke@435 | 255 | } |
duke@435 | 256 | |
duke@435 | 257 | switch (sig) { |
duke@435 | 258 | case SIGBREAK: { |
duke@435 | 259 | // Check if the signal is a trigger to start the Attach Listener - in that |
duke@435 | 260 | // case don't print stack traces. |
duke@435 | 261 | if (!DisableAttachMechanism && AttachListener::is_init_trigger()) { |
duke@435 | 262 | continue; |
duke@435 | 263 | } |
duke@435 | 264 | // Print stack traces |
duke@435 | 265 | // Any SIGBREAK operations added here should make sure to flush |
duke@435 | 266 | // the output stream (e.g. tty->flush()) after output. See 4803766. |
duke@435 | 267 | // Each module also prints an extra carriage return after its output. |
duke@435 | 268 | VM_PrintThreads op; |
duke@435 | 269 | VMThread::execute(&op); |
duke@435 | 270 | VM_PrintJNI jni_op; |
duke@435 | 271 | VMThread::execute(&jni_op); |
duke@435 | 272 | VM_FindDeadlocks op1(tty); |
duke@435 | 273 | VMThread::execute(&op1); |
duke@435 | 274 | Universe::print_heap_at_SIGBREAK(); |
duke@435 | 275 | if (PrintClassHistogram) { |
sla@5237 | 276 | VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */); |
duke@435 | 277 | VMThread::execute(&op1); |
duke@435 | 278 | } |
duke@435 | 279 | if (JvmtiExport::should_post_data_dump()) { |
duke@435 | 280 | JvmtiExport::post_data_dump(); |
duke@435 | 281 | } |
duke@435 | 282 | break; |
duke@435 | 283 | } |
duke@435 | 284 | default: { |
duke@435 | 285 | // Dispatch the signal to java |
duke@435 | 286 | HandleMark hm(THREAD); |
coleenp@4037 | 287 | Klass* k = SystemDictionary::resolve_or_null(vmSymbols::sun_misc_Signal(), THREAD); |
duke@435 | 288 | KlassHandle klass (THREAD, k); |
duke@435 | 289 | if (klass.not_null()) { |
duke@435 | 290 | JavaValue result(T_VOID); |
duke@435 | 291 | JavaCallArguments args; |
duke@435 | 292 | args.push_int(sig); |
duke@435 | 293 | JavaCalls::call_static( |
duke@435 | 294 | &result, |
duke@435 | 295 | klass, |
coleenp@2497 | 296 | vmSymbols::dispatch_name(), |
coleenp@2497 | 297 | vmSymbols::int_void_signature(), |
duke@435 | 298 | &args, |
duke@435 | 299 | THREAD |
duke@435 | 300 | ); |
duke@435 | 301 | } |
duke@435 | 302 | if (HAS_PENDING_EXCEPTION) { |
duke@435 | 303 | // tty is initialized early so we don't expect it to be null, but |
duke@435 | 304 | // if it is we can't risk doing an initialization that might |
duke@435 | 305 | // trigger additional out-of-memory conditions |
duke@435 | 306 | if (tty != NULL) { |
duke@435 | 307 | char klass_name[256]; |
duke@435 | 308 | char tmp_sig_name[16]; |
duke@435 | 309 | const char* sig_name = "UNKNOWN"; |
coleenp@4037 | 310 | InstanceKlass::cast(PENDING_EXCEPTION->klass())-> |
duke@435 | 311 | name()->as_klass_external_name(klass_name, 256); |
duke@435 | 312 | if (os::exception_name(sig, tmp_sig_name, 16) != NULL) |
duke@435 | 313 | sig_name = tmp_sig_name; |
duke@435 | 314 | warning("Exception %s occurred dispatching signal %s to handler" |
duke@435 | 315 | "- the VM may need to be forcibly terminated", |
duke@435 | 316 | klass_name, sig_name ); |
duke@435 | 317 | } |
duke@435 | 318 | CLEAR_PENDING_EXCEPTION; |
duke@435 | 319 | } |
duke@435 | 320 | } |
duke@435 | 321 | } |
duke@435 | 322 | } |
duke@435 | 323 | } |
duke@435 | 324 | |
tschatzl@5701 | 325 | void os::init_before_ergo() { |
tschatzl@8661 | 326 | initialize_initial_active_processor_count(); |
tschatzl@5701 | 327 | // We need to initialize large page support here because ergonomics takes some |
tschatzl@5701 | 328 | // decisions depending on large page support and the calculated large page size. |
tschatzl@5701 | 329 | large_page_init(); |
poonam@8329 | 330 | |
poonam@8329 | 331 | // VM version initialization identifies some characteristics of the |
poonam@8329 | 332 | // the platform that are used during ergonomic decisions. |
poonam@8329 | 333 | VM_Version::init_before_ergo(); |
tschatzl@5701 | 334 | } |
duke@435 | 335 | |
duke@435 | 336 | void os::signal_init() { |
duke@435 | 337 | if (!ReduceSignalUsage) { |
duke@435 | 338 | // Setup JavaThread for processing signals |
duke@435 | 339 | EXCEPTION_MARK; |
coleenp@4037 | 340 | Klass* k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_Thread(), true, CHECK); |
duke@435 | 341 | instanceKlassHandle klass (THREAD, k); |
duke@435 | 342 | instanceHandle thread_oop = klass->allocate_instance_handle(CHECK); |
duke@435 | 343 | |
duke@435 | 344 | const char thread_name[] = "Signal Dispatcher"; |
duke@435 | 345 | Handle string = java_lang_String::create_from_str(thread_name, CHECK); |
duke@435 | 346 | |
duke@435 | 347 | // Initialize thread_oop to put it into the system threadGroup |
duke@435 | 348 | Handle thread_group (THREAD, Universe::system_thread_group()); |
duke@435 | 349 | JavaValue result(T_VOID); |
duke@435 | 350 | JavaCalls::call_special(&result, thread_oop, |
duke@435 | 351 | klass, |
coleenp@2497 | 352 | vmSymbols::object_initializer_name(), |
coleenp@2497 | 353 | vmSymbols::threadgroup_string_void_signature(), |
duke@435 | 354 | thread_group, |
duke@435 | 355 | string, |
duke@435 | 356 | CHECK); |
duke@435 | 357 | |
never@1577 | 358 | KlassHandle group(THREAD, SystemDictionary::ThreadGroup_klass()); |
duke@435 | 359 | JavaCalls::call_special(&result, |
duke@435 | 360 | thread_group, |
duke@435 | 361 | group, |
coleenp@2497 | 362 | vmSymbols::add_method_name(), |
coleenp@2497 | 363 | vmSymbols::thread_void_signature(), |
duke@435 | 364 | thread_oop, // ARG 1 |
duke@435 | 365 | CHECK); |
duke@435 | 366 | |
duke@435 | 367 | os::signal_init_pd(); |
duke@435 | 368 | |
duke@435 | 369 | { MutexLocker mu(Threads_lock); |
duke@435 | 370 | JavaThread* signal_thread = new JavaThread(&signal_thread_entry); |
duke@435 | 371 | |
duke@435 | 372 | // At this point it may be possible that no osthread was created for the |
duke@435 | 373 | // JavaThread due to lack of memory. We would have to throw an exception |
duke@435 | 374 | // in that case. However, since this must work and we do not allow |
duke@435 | 375 | // exceptions anyway, check and abort if this fails. |
duke@435 | 376 | if (signal_thread == NULL || signal_thread->osthread() == NULL) { |
duke@435 | 377 | vm_exit_during_initialization("java.lang.OutOfMemoryError", |
duke@435 | 378 | "unable to create new native thread"); |
duke@435 | 379 | } |
duke@435 | 380 | |
duke@435 | 381 | java_lang_Thread::set_thread(thread_oop(), signal_thread); |
duke@435 | 382 | java_lang_Thread::set_priority(thread_oop(), NearMaxPriority); |
duke@435 | 383 | java_lang_Thread::set_daemon(thread_oop()); |
duke@435 | 384 | |
duke@435 | 385 | signal_thread->set_threadObj(thread_oop()); |
duke@435 | 386 | Threads::add(signal_thread); |
duke@435 | 387 | Thread::start(signal_thread); |
duke@435 | 388 | } |
duke@435 | 389 | // Handle ^BREAK |
duke@435 | 390 | os::signal(SIGBREAK, os::user_handler()); |
duke@435 | 391 | } |
duke@435 | 392 | } |
duke@435 | 393 | |
duke@435 | 394 | |
duke@435 | 395 | void os::terminate_signal_thread() { |
duke@435 | 396 | if (!ReduceSignalUsage) |
duke@435 | 397 | signal_notify(sigexitnum_pd()); |
duke@435 | 398 | } |
duke@435 | 399 | |
duke@435 | 400 | |
duke@435 | 401 | // --------------------- loading libraries --------------------- |
duke@435 | 402 | |
duke@435 | 403 | typedef jint (JNICALL *JNI_OnLoad_t)(JavaVM *, void *); |
duke@435 | 404 | extern struct JavaVM_ main_vm; |
duke@435 | 405 | |
duke@435 | 406 | static void* _native_java_library = NULL; |
duke@435 | 407 | |
duke@435 | 408 | void* os::native_java_library() { |
duke@435 | 409 | if (_native_java_library == NULL) { |
duke@435 | 410 | char buffer[JVM_MAXPATHLEN]; |
duke@435 | 411 | char ebuf[1024]; |
duke@435 | 412 | |
kamg@677 | 413 | // Try to load verify dll first. In 1.3 java dll depends on it and is not |
kamg@677 | 414 | // always able to find it when the loading executable is outside the JDK. |
duke@435 | 415 | // In order to keep working with 1.2 we ignore any loading errors. |
bpittore@4261 | 416 | if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
bpittore@4261 | 417 | "verify")) { |
bpittore@4261 | 418 | dll_load(buffer, ebuf, sizeof(ebuf)); |
bpittore@4261 | 419 | } |
duke@435 | 420 | |
duke@435 | 421 | // Load java dll |
bpittore@4261 | 422 | if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
bpittore@4261 | 423 | "java")) { |
bpittore@4261 | 424 | _native_java_library = dll_load(buffer, ebuf, sizeof(ebuf)); |
bpittore@4261 | 425 | } |
duke@435 | 426 | if (_native_java_library == NULL) { |
duke@435 | 427 | vm_exit_during_initialization("Unable to load native library", ebuf); |
duke@435 | 428 | } |
never@3156 | 429 | |
never@3156 | 430 | #if defined(__OpenBSD__) |
never@3156 | 431 | // Work-around OpenBSD's lack of $ORIGIN support by pre-loading libnet.so |
never@3156 | 432 | // ignore errors |
bpittore@4261 | 433 | if (dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(), |
bpittore@4261 | 434 | "net")) { |
bpittore@4261 | 435 | dll_load(buffer, ebuf, sizeof(ebuf)); |
bpittore@4261 | 436 | } |
never@3156 | 437 | #endif |
kamg@677 | 438 | } |
kamg@677 | 439 | static jboolean onLoaded = JNI_FALSE; |
kamg@677 | 440 | if (onLoaded) { |
kamg@677 | 441 | // We may have to wait to fire OnLoad until TLS is initialized. |
kamg@677 | 442 | if (ThreadLocalStorage::is_initialized()) { |
kamg@677 | 443 | // The JNI_OnLoad handling is normally done by method load in |
kamg@677 | 444 | // java.lang.ClassLoader$NativeLibrary, but the VM loads the base library |
kamg@677 | 445 | // explicitly so we have to check for JNI_OnLoad as well |
kamg@677 | 446 | const char *onLoadSymbols[] = JNI_ONLOAD_SYMBOLS; |
kamg@677 | 447 | JNI_OnLoad_t JNI_OnLoad = CAST_TO_FN_PTR( |
kamg@677 | 448 | JNI_OnLoad_t, dll_lookup(_native_java_library, onLoadSymbols[0])); |
kamg@677 | 449 | if (JNI_OnLoad != NULL) { |
kamg@677 | 450 | JavaThread* thread = JavaThread::current(); |
kamg@677 | 451 | ThreadToNativeFromVM ttn(thread); |
kamg@677 | 452 | HandleMark hm(thread); |
kamg@677 | 453 | jint ver = (*JNI_OnLoad)(&main_vm, NULL); |
kamg@677 | 454 | onLoaded = JNI_TRUE; |
kamg@677 | 455 | if (!Threads::is_supported_jni_version_including_1_1(ver)) { |
kamg@677 | 456 | vm_exit_during_initialization("Unsupported JNI version"); |
kamg@677 | 457 | } |
duke@435 | 458 | } |
duke@435 | 459 | } |
duke@435 | 460 | } |
duke@435 | 461 | return _native_java_library; |
duke@435 | 462 | } |
duke@435 | 463 | |
bpittore@5585 | 464 | /* |
bpittore@5585 | 465 | * Support for finding Agent_On(Un)Load/Attach<_lib_name> if it exists. |
bpittore@5585 | 466 | * If check_lib == true then we are looking for an |
bpittore@5585 | 467 | * Agent_OnLoad_lib_name or Agent_OnAttach_lib_name function to determine if |
bpittore@5585 | 468 | * this library is statically linked into the image. |
bpittore@5585 | 469 | * If check_lib == false then we will look for the appropriate symbol in the |
bpittore@5585 | 470 | * executable if agent_lib->is_static_lib() == true or in the shared library |
bpittore@5585 | 471 | * referenced by 'handle'. |
bpittore@5585 | 472 | */ |
bpittore@5585 | 473 | void* os::find_agent_function(AgentLibrary *agent_lib, bool check_lib, |
bpittore@5585 | 474 | const char *syms[], size_t syms_len) { |
bpittore@5688 | 475 | assert(agent_lib != NULL, "sanity check"); |
bpittore@5585 | 476 | const char *lib_name; |
bpittore@5585 | 477 | void *handle = agent_lib->os_lib(); |
bpittore@5585 | 478 | void *entryName = NULL; |
bpittore@5585 | 479 | char *agent_function_name; |
bpittore@5585 | 480 | size_t i; |
bpittore@5585 | 481 | |
bpittore@5585 | 482 | // If checking then use the agent name otherwise test is_static_lib() to |
bpittore@5585 | 483 | // see how to process this lookup |
bpittore@5585 | 484 | lib_name = ((check_lib || agent_lib->is_static_lib()) ? agent_lib->name() : NULL); |
bpittore@5585 | 485 | for (i = 0; i < syms_len; i++) { |
bpittore@5585 | 486 | agent_function_name = build_agent_function_name(syms[i], lib_name, agent_lib->is_absolute_path()); |
bpittore@5585 | 487 | if (agent_function_name == NULL) { |
bpittore@5585 | 488 | break; |
bpittore@5585 | 489 | } |
bpittore@5585 | 490 | entryName = dll_lookup(handle, agent_function_name); |
bpittore@5585 | 491 | FREE_C_HEAP_ARRAY(char, agent_function_name, mtThread); |
bpittore@5585 | 492 | if (entryName != NULL) { |
bpittore@5585 | 493 | break; |
bpittore@5585 | 494 | } |
bpittore@5585 | 495 | } |
bpittore@5585 | 496 | return entryName; |
bpittore@5585 | 497 | } |
bpittore@5585 | 498 | |
bpittore@5585 | 499 | // See if the passed in agent is statically linked into the VM image. |
bpittore@5585 | 500 | bool os::find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], |
bpittore@5585 | 501 | size_t syms_len) { |
bpittore@5585 | 502 | void *ret; |
bpittore@5585 | 503 | void *proc_handle; |
bpittore@5585 | 504 | void *save_handle; |
bpittore@5585 | 505 | |
bpittore@5688 | 506 | assert(agent_lib != NULL, "sanity check"); |
bpittore@5585 | 507 | if (agent_lib->name() == NULL) { |
bpittore@5585 | 508 | return false; |
bpittore@5585 | 509 | } |
bpittore@5585 | 510 | proc_handle = get_default_process_handle(); |
bpittore@5585 | 511 | // Check for Agent_OnLoad/Attach_lib_name function |
bpittore@5585 | 512 | save_handle = agent_lib->os_lib(); |
bpittore@5585 | 513 | // We want to look in this process' symbol table. |
bpittore@5585 | 514 | agent_lib->set_os_lib(proc_handle); |
bpittore@5585 | 515 | ret = find_agent_function(agent_lib, true, syms, syms_len); |
bpittore@5585 | 516 | if (ret != NULL) { |
bpittore@5585 | 517 | // Found an entry point like Agent_OnLoad_lib_name so we have a static agent |
bpittore@5585 | 518 | agent_lib->set_valid(); |
bpittore@5585 | 519 | agent_lib->set_static_lib(true); |
bpittore@5585 | 520 | return true; |
bpittore@5585 | 521 | } |
bpittore@5688 | 522 | agent_lib->set_os_lib(save_handle); |
bpittore@5585 | 523 | return false; |
bpittore@5585 | 524 | } |
bpittore@5585 | 525 | |
duke@435 | 526 | // --------------------- heap allocation utilities --------------------- |
duke@435 | 527 | |
zgu@3900 | 528 | char *os::strdup(const char *str, MEMFLAGS flags) { |
duke@435 | 529 | size_t size = strlen(str); |
zgu@3900 | 530 | char *dup_str = (char *)malloc(size + 1, flags); |
duke@435 | 531 | if (dup_str == NULL) return NULL; |
duke@435 | 532 | strcpy(dup_str, str); |
duke@435 | 533 | return dup_str; |
duke@435 | 534 | } |
duke@435 | 535 | |
duke@435 | 536 | |
duke@435 | 537 | |
duke@435 | 538 | #define paranoid 0 /* only set to 1 if you suspect checking code has bug */ |
duke@435 | 539 | |
duke@435 | 540 | #ifdef ASSERT |
dsimms@7032 | 541 | static void verify_memory(void* ptr) { |
dsimms@7032 | 542 | GuardedMemory guarded(ptr); |
dsimms@7032 | 543 | if (!guarded.verify_guards()) { |
dsimms@7032 | 544 | tty->print_cr("## nof_mallocs = " UINT64_FORMAT ", nof_frees = " UINT64_FORMAT, os::num_mallocs, os::num_frees); |
dsimms@7032 | 545 | tty->print_cr("## memory stomp:"); |
dsimms@7032 | 546 | guarded.print_on(tty); |
dsimms@7032 | 547 | fatal("memory stomping error"); |
duke@435 | 548 | } |
duke@435 | 549 | } |
duke@435 | 550 | #endif |
duke@435 | 551 | |
rdurbin@4802 | 552 | // |
rdurbin@4802 | 553 | // This function supports testing of the malloc out of memory |
rdurbin@4802 | 554 | // condition without really running the system out of memory. |
rdurbin@4802 | 555 | // |
rdurbin@4802 | 556 | static u_char* testMalloc(size_t alloc_size) { |
rdurbin@4808 | 557 | assert(MallocMaxTestWords > 0, "sanity check"); |
rdurbin@4802 | 558 | |
rdurbin@4808 | 559 | if ((cur_malloc_words + (alloc_size / BytesPerWord)) > MallocMaxTestWords) { |
rdurbin@4802 | 560 | return NULL; |
rdurbin@4802 | 561 | } |
rdurbin@4802 | 562 | |
rdurbin@4802 | 563 | u_char* ptr = (u_char*)::malloc(alloc_size); |
rdurbin@4802 | 564 | |
rdurbin@4808 | 565 | if (ptr != NULL) { |
rdurbin@4802 | 566 | Atomic::add(((jint) (alloc_size / BytesPerWord)), |
rdurbin@4802 | 567 | (volatile jint *) &cur_malloc_words); |
rdurbin@4802 | 568 | } |
rdurbin@4802 | 569 | return ptr; |
rdurbin@4802 | 570 | } |
rdurbin@4802 | 571 | |
zgu@7074 | 572 | void* os::malloc(size_t size, MEMFLAGS flags) { |
zgu@7074 | 573 | return os::malloc(size, flags, CALLER_PC); |
zgu@7074 | 574 | } |
zgu@7074 | 575 | |
zgu@7074 | 576 | void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { |
kvn@2557 | 577 | NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); |
kvn@2557 | 578 | NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); |
duke@435 | 579 | |
rbackman@5424 | 580 | #ifdef ASSERT |
rbackman@5424 | 581 | // checking for the WatcherThread and crash_protection first |
rbackman@5424 | 582 | // since os::malloc can be called when the libjvm.{dll,so} is |
rbackman@5424 | 583 | // first loaded and we don't have a thread yet. |
rbackman@5424 | 584 | // try to find the thread after we see that the watcher thread |
rbackman@5424 | 585 | // exists and has crash protection. |
rbackman@5424 | 586 | WatcherThread *wt = WatcherThread::watcher_thread(); |
rbackman@5424 | 587 | if (wt != NULL && wt->has_crash_protection()) { |
rbackman@5424 | 588 | Thread* thread = ThreadLocalStorage::get_thread_slow(); |
rbackman@5424 | 589 | if (thread == wt) { |
rbackman@5424 | 590 | assert(!wt->has_crash_protection(), |
rbackman@5424 | 591 | "Can't malloc with crash protection from WatcherThread"); |
rbackman@5424 | 592 | } |
rbackman@5424 | 593 | } |
rbackman@5424 | 594 | #endif |
rbackman@5424 | 595 | |
duke@435 | 596 | if (size == 0) { |
duke@435 | 597 | // return a valid pointer if size is zero |
duke@435 | 598 | // if NULL is returned the calling functions assume out of memory. |
duke@435 | 599 | size = 1; |
duke@435 | 600 | } |
rdurbin@4802 | 601 | |
zgu@7074 | 602 | // NMT support |
zgu@7074 | 603 | NMT_TrackingLevel level = MemTracker::tracking_level(); |
zgu@7074 | 604 | size_t nmt_header_size = MemTracker::malloc_header_size(level); |
zgu@7074 | 605 | |
dsimms@7032 | 606 | #ifndef ASSERT |
zgu@7074 | 607 | const size_t alloc_size = size + nmt_header_size; |
dsimms@7032 | 608 | #else |
zgu@7074 | 609 | const size_t alloc_size = GuardedMemory::get_total_size(size + nmt_header_size); |
zgu@7074 | 610 | if (size + nmt_header_size > alloc_size) { // Check for rollover. |
hseigel@4277 | 611 | return NULL; |
hseigel@4277 | 612 | } |
dsimms@7032 | 613 | #endif |
rdurbin@4802 | 614 | |
duke@435 | 615 | NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); |
rdurbin@4802 | 616 | |
rdurbin@4802 | 617 | u_char* ptr; |
rdurbin@4802 | 618 | if (MallocMaxTestWords > 0) { |
rdurbin@4802 | 619 | ptr = testMalloc(alloc_size); |
rdurbin@4802 | 620 | } else { |
rdurbin@4802 | 621 | ptr = (u_char*)::malloc(alloc_size); |
rdurbin@4802 | 622 | } |
zgu@3900 | 623 | |
duke@435 | 624 | #ifdef ASSERT |
dsimms@7032 | 625 | if (ptr == NULL) { |
dsimms@7032 | 626 | return NULL; |
duke@435 | 627 | } |
dsimms@7032 | 628 | // Wrap memory with guard |
zgu@7074 | 629 | GuardedMemory guarded(ptr, size + nmt_header_size); |
dsimms@7032 | 630 | ptr = guarded.get_user_ptr(); |
duke@435 | 631 | #endif |
dsimms@7032 | 632 | if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { |
dsimms@7032 | 633 | tty->print_cr("os::malloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); |
duke@435 | 634 | breakpoint(); |
duke@435 | 635 | } |
dsimms@7032 | 636 | debug_only(if (paranoid) verify_memory(ptr)); |
dsimms@7032 | 637 | if (PrintMalloc && tty != NULL) { |
dsimms@7032 | 638 | tty->print_cr("os::malloc " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); |
dsimms@7032 | 639 | } |
zgu@3900 | 640 | |
dsimms@7032 | 641 | // we do not track guard memory |
zgu@7074 | 642 | return MemTracker::record_malloc((address)ptr, size, memflags, stack, level); |
duke@435 | 643 | } |
duke@435 | 644 | |
zgu@7074 | 645 | void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { |
zgu@7074 | 646 | return os::realloc(memblock, size, flags, CALLER_PC); |
zgu@7074 | 647 | } |
duke@435 | 648 | |
zgu@7074 | 649 | void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { |
zgu@7177 | 650 | |
duke@435 | 651 | #ifndef ASSERT |
kvn@2557 | 652 | NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); |
kvn@2557 | 653 | NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); |
zgu@7074 | 654 | // NMT support |
zgu@7074 | 655 | void* membase = MemTracker::record_free(memblock); |
zgu@7074 | 656 | NMT_TrackingLevel level = MemTracker::tracking_level(); |
zgu@7074 | 657 | size_t nmt_header_size = MemTracker::malloc_header_size(level); |
zgu@7074 | 658 | void* ptr = ::realloc(membase, size + nmt_header_size); |
zgu@7074 | 659 | return MemTracker::record_malloc(ptr, size, memflags, stack, level); |
duke@435 | 660 | #else |
duke@435 | 661 | if (memblock == NULL) { |
zgu@7074 | 662 | return os::malloc(size, memflags, stack); |
duke@435 | 663 | } |
duke@435 | 664 | if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { |
kvn@2557 | 665 | tty->print_cr("os::realloc caught " PTR_FORMAT, memblock); |
duke@435 | 666 | breakpoint(); |
duke@435 | 667 | } |
zgu@7074 | 668 | // NMT support |
zgu@7074 | 669 | void* membase = MemTracker::malloc_base(memblock); |
zgu@7074 | 670 | verify_memory(membase); |
duke@435 | 671 | NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); |
dsimms@7032 | 672 | if (size == 0) { |
dsimms@7032 | 673 | return NULL; |
dsimms@7032 | 674 | } |
duke@435 | 675 | // always move the block |
zgu@7074 | 676 | void* ptr = os::malloc(size, memflags, stack); |
dsimms@7032 | 677 | if (PrintMalloc) { |
dsimms@7032 | 678 | tty->print_cr("os::remalloc " SIZE_FORMAT " bytes, " PTR_FORMAT " --> " PTR_FORMAT, size, memblock, ptr); |
dsimms@7032 | 679 | } |
duke@435 | 680 | // Copy to new memory if malloc didn't fail |
duke@435 | 681 | if ( ptr != NULL ) { |
zgu@7074 | 682 | GuardedMemory guarded(MemTracker::malloc_base(memblock)); |
zgu@7074 | 683 | // Guard's user data contains NMT header |
zgu@7074 | 684 | size_t memblock_size = guarded.get_user_size() - MemTracker::malloc_header_size(memblock); |
zgu@7074 | 685 | memcpy(ptr, memblock, MIN2(size, memblock_size)); |
zgu@7074 | 686 | if (paranoid) verify_memory(MemTracker::malloc_base(ptr)); |
duke@435 | 687 | if ((intptr_t)ptr == (intptr_t)MallocCatchPtr) { |
kvn@2557 | 688 | tty->print_cr("os::realloc caught, " SIZE_FORMAT " bytes --> " PTR_FORMAT, size, ptr); |
duke@435 | 689 | breakpoint(); |
duke@435 | 690 | } |
dsimms@7032 | 691 | os::free(memblock); |
duke@435 | 692 | } |
duke@435 | 693 | return ptr; |
duke@435 | 694 | #endif |
duke@435 | 695 | } |
duke@435 | 696 | |
duke@435 | 697 | |
zgu@3900 | 698 | void os::free(void *memblock, MEMFLAGS memflags) { |
kvn@2557 | 699 | NOT_PRODUCT(inc_stat_counter(&num_frees, 1)); |
duke@435 | 700 | #ifdef ASSERT |
duke@435 | 701 | if (memblock == NULL) return; |
duke@435 | 702 | if ((intptr_t)memblock == (intptr_t)MallocCatchPtr) { |
kvn@2557 | 703 | if (tty != NULL) tty->print_cr("os::free caught " PTR_FORMAT, memblock); |
duke@435 | 704 | breakpoint(); |
duke@435 | 705 | } |
zgu@7074 | 706 | void* membase = MemTracker::record_free(memblock); |
zgu@7074 | 707 | verify_memory(membase); |
duke@435 | 708 | NOT_PRODUCT(if (MallocVerifyInterval > 0) check_heap()); |
dsimms@7032 | 709 | |
zgu@7074 | 710 | GuardedMemory guarded(membase); |
dsimms@7032 | 711 | size_t size = guarded.get_user_size(); |
dsimms@7032 | 712 | inc_stat_counter(&free_bytes, size); |
zgu@7074 | 713 | membase = guarded.release_for_freeing(); |
dsimms@7032 | 714 | if (PrintMalloc && tty != NULL) { |
zgu@7074 | 715 | fprintf(stderr, "os::free " SIZE_FORMAT " bytes --> " PTR_FORMAT "\n", size, (uintptr_t)membase); |
duke@435 | 716 | } |
zgu@7074 | 717 | ::free(membase); |
zgu@7074 | 718 | #else |
zgu@7074 | 719 | void* membase = MemTracker::record_free(memblock); |
zgu@7074 | 720 | ::free(membase); |
duke@435 | 721 | #endif |
duke@435 | 722 | } |
duke@435 | 723 | |
duke@435 | 724 | void os::init_random(long initval) { |
duke@435 | 725 | _rand_seed = initval; |
duke@435 | 726 | } |
duke@435 | 727 | |
duke@435 | 728 | |
duke@435 | 729 | long os::random() { |
duke@435 | 730 | /* standard, well-known linear congruential random generator with |
duke@435 | 731 | * next_rand = (16807*seed) mod (2**31-1) |
duke@435 | 732 | * see |
duke@435 | 733 | * (1) "Random Number Generators: Good Ones Are Hard to Find", |
duke@435 | 734 | * S.K. Park and K.W. Miller, Communications of the ACM 31:10 (Oct 1988), |
duke@435 | 735 | * (2) "Two Fast Implementations of the 'Minimal Standard' Random |
duke@435 | 736 | * Number Generator", David G. Carta, Comm. ACM 33, 1 (Jan 1990), pp. 87-88. |
duke@435 | 737 | */ |
duke@435 | 738 | const long a = 16807; |
duke@435 | 739 | const unsigned long m = 2147483647; |
duke@435 | 740 | const long q = m / a; assert(q == 127773, "weird math"); |
duke@435 | 741 | const long r = m % a; assert(r == 2836, "weird math"); |
duke@435 | 742 | |
duke@435 | 743 | // compute az=2^31p+q |
duke@435 | 744 | unsigned long lo = a * (long)(_rand_seed & 0xFFFF); |
duke@435 | 745 | unsigned long hi = a * (long)((unsigned long)_rand_seed >> 16); |
duke@435 | 746 | lo += (hi & 0x7FFF) << 16; |
duke@435 | 747 | |
duke@435 | 748 | // if q overflowed, ignore the overflow and increment q |
duke@435 | 749 | if (lo > m) { |
duke@435 | 750 | lo &= m; |
duke@435 | 751 | ++lo; |
duke@435 | 752 | } |
duke@435 | 753 | lo += hi >> 15; |
duke@435 | 754 | |
duke@435 | 755 | // if (p+q) overflowed, ignore the overflow and increment (p+q) |
duke@435 | 756 | if (lo > m) { |
duke@435 | 757 | lo &= m; |
duke@435 | 758 | ++lo; |
duke@435 | 759 | } |
duke@435 | 760 | return (_rand_seed = lo); |
duke@435 | 761 | } |
duke@435 | 762 | |
duke@435 | 763 | // The INITIALIZED state is distinguished from the SUSPENDED state because the |
duke@435 | 764 | // conditions in which a thread is first started are different from those in which |
duke@435 | 765 | // a suspension is resumed. These differences make it hard for us to apply the |
duke@435 | 766 | // tougher checks when starting threads that we want to do when resuming them. |
duke@435 | 767 | // However, when start_thread is called as a result of Thread.start, on a Java |
duke@435 | 768 | // thread, the operation is synchronized on the Java Thread object. So there |
duke@435 | 769 | // cannot be a race to start the thread and hence for the thread to exit while |
duke@435 | 770 | // we are working on it. Non-Java threads that start Java threads either have |
duke@435 | 771 | // to do so in a context in which races are impossible, or should do appropriate |
duke@435 | 772 | // locking. |
duke@435 | 773 | |
duke@435 | 774 | void os::start_thread(Thread* thread) { |
duke@435 | 775 | // guard suspend/resume |
duke@435 | 776 | MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag); |
duke@435 | 777 | OSThread* osthread = thread->osthread(); |
duke@435 | 778 | osthread->set_state(RUNNABLE); |
duke@435 | 779 | pd_start_thread(thread); |
duke@435 | 780 | } |
duke@435 | 781 | |
duke@435 | 782 | //--------------------------------------------------------------------------- |
duke@435 | 783 | // Helper functions for fatal error handler |
duke@435 | 784 | |
duke@435 | 785 | void os::print_hex_dump(outputStream* st, address start, address end, int unitsize) { |
duke@435 | 786 | assert(unitsize == 1 || unitsize == 2 || unitsize == 4 || unitsize == 8, "just checking"); |
duke@435 | 787 | |
duke@435 | 788 | int cols = 0; |
duke@435 | 789 | int cols_per_line = 0; |
duke@435 | 790 | switch (unitsize) { |
duke@435 | 791 | case 1: cols_per_line = 16; break; |
duke@435 | 792 | case 2: cols_per_line = 8; break; |
duke@435 | 793 | case 4: cols_per_line = 4; break; |
duke@435 | 794 | case 8: cols_per_line = 2; break; |
duke@435 | 795 | default: return; |
duke@435 | 796 | } |
duke@435 | 797 | |
duke@435 | 798 | address p = start; |
duke@435 | 799 | st->print(PTR_FORMAT ": ", start); |
duke@435 | 800 | while (p < end) { |
duke@435 | 801 | switch (unitsize) { |
duke@435 | 802 | case 1: st->print("%02x", *(u1*)p); break; |
duke@435 | 803 | case 2: st->print("%04x", *(u2*)p); break; |
duke@435 | 804 | case 4: st->print("%08x", *(u4*)p); break; |
duke@435 | 805 | case 8: st->print("%016" FORMAT64_MODIFIER "x", *(u8*)p); break; |
duke@435 | 806 | } |
duke@435 | 807 | p += unitsize; |
duke@435 | 808 | cols++; |
duke@435 | 809 | if (cols >= cols_per_line && p < end) { |
duke@435 | 810 | cols = 0; |
duke@435 | 811 | st->cr(); |
duke@435 | 812 | st->print(PTR_FORMAT ": ", p); |
duke@435 | 813 | } else { |
duke@435 | 814 | st->print(" "); |
duke@435 | 815 | } |
duke@435 | 816 | } |
duke@435 | 817 | st->cr(); |
duke@435 | 818 | } |
duke@435 | 819 | |
duke@435 | 820 | void os::print_environment_variables(outputStream* st, const char** env_list, |
duke@435 | 821 | char* buffer, int len) { |
duke@435 | 822 | if (env_list) { |
duke@435 | 823 | st->print_cr("Environment Variables:"); |
duke@435 | 824 | |
duke@435 | 825 | for (int i = 0; env_list[i] != NULL; i++) { |
duke@435 | 826 | if (getenv(env_list[i], buffer, len)) { |
drchase@6680 | 827 | st->print("%s", env_list[i]); |
duke@435 | 828 | st->print("="); |
drchase@6680 | 829 | st->print_cr("%s", buffer); |
duke@435 | 830 | } |
duke@435 | 831 | } |
duke@435 | 832 | } |
duke@435 | 833 | } |
duke@435 | 834 | |
duke@435 | 835 | void os::print_cpu_info(outputStream* st) { |
duke@435 | 836 | // cpu |
duke@435 | 837 | st->print("CPU:"); |
duke@435 | 838 | st->print("total %d", os::processor_count()); |
duke@435 | 839 | // It's not safe to query number of active processors after crash |
tschatzl@8661 | 840 | // st->print("(active %d)", os::active_processor_count()); but we can |
tschatzl@8661 | 841 | // print the initial number of active processors. |
tschatzl@8661 | 842 | // We access the raw value here because the assert in the accessor will |
tschatzl@8661 | 843 | // fail if the crash occurs before initialization of this value. |
tschatzl@8661 | 844 | st->print(" (initial active %d)", _initial_active_processor_count); |
duke@435 | 845 | st->print(" %s", VM_Version::cpu_features()); |
duke@435 | 846 | st->cr(); |
jcoomes@2997 | 847 | pd_print_cpu_info(st); |
duke@435 | 848 | } |
duke@435 | 849 | |
duke@435 | 850 | void os::print_date_and_time(outputStream *st) { |
dbuck@6547 | 851 | const int secs_per_day = 86400; |
dbuck@6547 | 852 | const int secs_per_hour = 3600; |
dbuck@6547 | 853 | const int secs_per_min = 60; |
dbuck@6547 | 854 | |
duke@435 | 855 | time_t tloc; |
duke@435 | 856 | (void)time(&tloc); |
duke@435 | 857 | st->print("time: %s", ctime(&tloc)); // ctime adds newline. |
duke@435 | 858 | |
duke@435 | 859 | double t = os::elapsedTime(); |
duke@435 | 860 | // NOTE: It tends to crash after a SEGV if we want to printf("%f",...) in |
duke@435 | 861 | // Linux. Must be a bug in glibc ? Workaround is to round "t" to int |
duke@435 | 862 | // before printf. We lost some precision, but who cares? |
dbuck@6547 | 863 | int eltime = (int)t; // elapsed time in seconds |
dbuck@6547 | 864 | |
dbuck@6547 | 865 | // print elapsed time in a human-readable format: |
dbuck@6547 | 866 | int eldays = eltime / secs_per_day; |
dbuck@6547 | 867 | int day_secs = eldays * secs_per_day; |
dbuck@6547 | 868 | int elhours = (eltime - day_secs) / secs_per_hour; |
dbuck@6547 | 869 | int hour_secs = elhours * secs_per_hour; |
dbuck@6547 | 870 | int elmins = (eltime - day_secs - hour_secs) / secs_per_min; |
dbuck@6547 | 871 | int minute_secs = elmins * secs_per_min; |
dbuck@6547 | 872 | int elsecs = (eltime - day_secs - hour_secs - minute_secs); |
dbuck@6547 | 873 | st->print_cr("elapsed time: %d seconds (%dd %dh %dm %ds)", eltime, eldays, elhours, elmins, elsecs); |
duke@435 | 874 | } |
duke@435 | 875 | |
bobv@2036 | 876 | // moved from debug.cpp (used to be find()) but still called from there |
never@2262 | 877 | // The verbose parameter is only set by the debug code in one case |
never@2262 | 878 | void os::print_location(outputStream* st, intptr_t x, bool verbose) { |
bobv@2036 | 879 | address addr = (address)x; |
bobv@2036 | 880 | CodeBlob* b = CodeCache::find_blob_unsafe(addr); |
bobv@2036 | 881 | if (b != NULL) { |
bobv@2036 | 882 | if (b->is_buffer_blob()) { |
bobv@2036 | 883 | // the interpreter is generated into a buffer blob |
bobv@2036 | 884 | InterpreterCodelet* i = Interpreter::codelet_containing(addr); |
bobv@2036 | 885 | if (i != NULL) { |
twisti@3969 | 886 | st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an Interpreter codelet", addr, (int)(addr - i->code_begin())); |
bobv@2036 | 887 | i->print_on(st); |
bobv@2036 | 888 | return; |
bobv@2036 | 889 | } |
bobv@2036 | 890 | if (Interpreter::contains(addr)) { |
bobv@2036 | 891 | st->print_cr(INTPTR_FORMAT " is pointing into interpreter code" |
bobv@2036 | 892 | " (not bytecode specific)", addr); |
bobv@2036 | 893 | return; |
bobv@2036 | 894 | } |
bobv@2036 | 895 | // |
bobv@2036 | 896 | if (AdapterHandlerLibrary::contains(b)) { |
twisti@3969 | 897 | st->print_cr(INTPTR_FORMAT " is at code_begin+%d in an AdapterHandler", addr, (int)(addr - b->code_begin())); |
bobv@2036 | 898 | AdapterHandlerLibrary::print_handler_on(st, b); |
bobv@2036 | 899 | } |
bobv@2036 | 900 | // the stubroutines are generated into a buffer blob |
bobv@2036 | 901 | StubCodeDesc* d = StubCodeDesc::desc_for(addr); |
bobv@2036 | 902 | if (d != NULL) { |
twisti@3969 | 903 | st->print_cr(INTPTR_FORMAT " is at begin+%d in a stub", addr, (int)(addr - d->begin())); |
bobv@2036 | 904 | d->print_on(st); |
twisti@3969 | 905 | st->cr(); |
bobv@2036 | 906 | return; |
bobv@2036 | 907 | } |
bobv@2036 | 908 | if (StubRoutines::contains(addr)) { |
bobv@2036 | 909 | st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) " |
bobv@2036 | 910 | "stub routine", addr); |
bobv@2036 | 911 | return; |
bobv@2036 | 912 | } |
bobv@2036 | 913 | // the InlineCacheBuffer is using stubs generated into a buffer blob |
bobv@2036 | 914 | if (InlineCacheBuffer::contains(addr)) { |
bobv@2036 | 915 | st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", addr); |
bobv@2036 | 916 | return; |
bobv@2036 | 917 | } |
bobv@2036 | 918 | VtableStub* v = VtableStubs::stub_containing(addr); |
bobv@2036 | 919 | if (v != NULL) { |
twisti@3969 | 920 | st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", addr, (int)(addr - v->entry_point())); |
bobv@2036 | 921 | v->print_on(st); |
twisti@3969 | 922 | st->cr(); |
bobv@2036 | 923 | return; |
bobv@2036 | 924 | } |
bobv@2036 | 925 | } |
twisti@3969 | 926 | nmethod* nm = b->as_nmethod_or_null(); |
twisti@3969 | 927 | if (nm != NULL) { |
bobv@2036 | 928 | ResourceMark rm; |
twisti@3969 | 929 | st->print(INTPTR_FORMAT " is at entry_point+%d in (nmethod*)" INTPTR_FORMAT, |
twisti@3969 | 930 | addr, (int)(addr - nm->entry_point()), nm); |
twisti@3969 | 931 | if (verbose) { |
twisti@3969 | 932 | st->print(" for "); |
twisti@3969 | 933 | nm->method()->print_value_on(st); |
twisti@3969 | 934 | } |
stefank@4127 | 935 | st->cr(); |
twisti@3969 | 936 | nm->print_nmethod(verbose); |
bobv@2036 | 937 | return; |
bobv@2036 | 938 | } |
twisti@3969 | 939 | st->print_cr(INTPTR_FORMAT " is at code_begin+%d in ", addr, (int)(addr - b->code_begin())); |
bobv@2036 | 940 | b->print_on(st); |
bobv@2036 | 941 | return; |
bobv@2036 | 942 | } |
bobv@2036 | 943 | |
bobv@2036 | 944 | if (Universe::heap()->is_in(addr)) { |
bobv@2036 | 945 | HeapWord* p = Universe::heap()->block_start(addr); |
bobv@2036 | 946 | bool print = false; |
bobv@2036 | 947 | // If we couldn't find it it just may mean that heap wasn't parseable |
bobv@2036 | 948 | // See if we were just given an oop directly |
bobv@2036 | 949 | if (p != NULL && Universe::heap()->block_is_obj(p)) { |
bobv@2036 | 950 | print = true; |
bobv@2036 | 951 | } else if (p == NULL && ((oopDesc*)addr)->is_oop()) { |
bobv@2036 | 952 | p = (HeapWord*) addr; |
bobv@2036 | 953 | print = true; |
bobv@2036 | 954 | } |
bobv@2036 | 955 | if (print) { |
stefank@4125 | 956 | if (p == (HeapWord*) addr) { |
stefank@4125 | 957 | st->print_cr(INTPTR_FORMAT " is an oop", addr); |
stefank@4125 | 958 | } else { |
stefank@4125 | 959 | st->print_cr(INTPTR_FORMAT " is pointing into object: " INTPTR_FORMAT, addr, p); |
stefank@4125 | 960 | } |
bobv@2036 | 961 | oop(p)->print_on(st); |
bobv@2036 | 962 | return; |
bobv@2036 | 963 | } |
bobv@2036 | 964 | } else { |
bobv@2036 | 965 | if (Universe::heap()->is_in_reserved(addr)) { |
bobv@2036 | 966 | st->print_cr(INTPTR_FORMAT " is an unallocated location " |
bobv@2036 | 967 | "in the heap", addr); |
bobv@2036 | 968 | return; |
bobv@2036 | 969 | } |
bobv@2036 | 970 | } |
bobv@2036 | 971 | if (JNIHandles::is_global_handle((jobject) addr)) { |
bobv@2036 | 972 | st->print_cr(INTPTR_FORMAT " is a global jni handle", addr); |
bobv@2036 | 973 | return; |
bobv@2036 | 974 | } |
bobv@2036 | 975 | if (JNIHandles::is_weak_global_handle((jobject) addr)) { |
bobv@2036 | 976 | st->print_cr(INTPTR_FORMAT " is a weak global jni handle", addr); |
bobv@2036 | 977 | return; |
bobv@2036 | 978 | } |
bobv@2036 | 979 | #ifndef PRODUCT |
bobv@2036 | 980 | // we don't keep the block list in product mode |
bobv@2036 | 981 | if (JNIHandleBlock::any_contains((jobject) addr)) { |
bobv@2036 | 982 | st->print_cr(INTPTR_FORMAT " is a local jni handle", addr); |
bobv@2036 | 983 | return; |
bobv@2036 | 984 | } |
bobv@2036 | 985 | #endif |
bobv@2036 | 986 | |
bobv@2036 | 987 | for(JavaThread *thread = Threads::first(); thread; thread = thread->next()) { |
bobv@2036 | 988 | // Check for privilege stack |
bobv@2036 | 989 | if (thread->privileged_stack_top() != NULL && |
bobv@2036 | 990 | thread->privileged_stack_top()->contains(addr)) { |
bobv@2036 | 991 | st->print_cr(INTPTR_FORMAT " is pointing into the privilege stack " |
bobv@2036 | 992 | "for thread: " INTPTR_FORMAT, addr, thread); |
never@2262 | 993 | if (verbose) thread->print_on(st); |
bobv@2036 | 994 | return; |
bobv@2036 | 995 | } |
bobv@2036 | 996 | // If the addr is a java thread print information about that. |
bobv@2036 | 997 | if (addr == (address)thread) { |
never@2262 | 998 | if (verbose) { |
never@2262 | 999 | thread->print_on(st); |
never@2262 | 1000 | } else { |
never@2262 | 1001 | st->print_cr(INTPTR_FORMAT " is a thread", addr); |
never@2262 | 1002 | } |
bobv@2036 | 1003 | return; |
bobv@2036 | 1004 | } |
bobv@2036 | 1005 | // If the addr is in the stack region for this thread then report that |
bobv@2036 | 1006 | // and print thread info |
bobv@2036 | 1007 | if (thread->stack_base() >= addr && |
bobv@2036 | 1008 | addr > (thread->stack_base() - thread->stack_size())) { |
bobv@2036 | 1009 | st->print_cr(INTPTR_FORMAT " is pointing into the stack for thread: " |
bobv@2036 | 1010 | INTPTR_FORMAT, addr, thread); |
never@2262 | 1011 | if (verbose) thread->print_on(st); |
bobv@2036 | 1012 | return; |
bobv@2036 | 1013 | } |
bobv@2036 | 1014 | |
bobv@2036 | 1015 | } |
coleenp@4037 | 1016 | |
coleenp@6678 | 1017 | // Check if in metaspace and print types that have vptrs (only method now) |
coleenp@6678 | 1018 | if (Metaspace::contains(addr)) { |
coleenp@6678 | 1019 | if (Method::has_method_vptr((const void*)addr)) { |
coleenp@6678 | 1020 | ((Method*)addr)->print_value_on(st); |
coleenp@6678 | 1021 | st->cr(); |
coleenp@6678 | 1022 | } else { |
coleenp@6678 | 1023 | // Use addr->print() from the debugger instead (not here) |
coleenp@6678 | 1024 | st->print_cr(INTPTR_FORMAT " is pointing into metadata", addr); |
coleenp@6678 | 1025 | } |
coleenp@4037 | 1026 | return; |
coleenp@4037 | 1027 | } |
coleenp@4037 | 1028 | |
bobv@2036 | 1029 | // Try an OS specific find |
bobv@2036 | 1030 | if (os::find(addr, st)) { |
bobv@2036 | 1031 | return; |
bobv@2036 | 1032 | } |
bobv@2036 | 1033 | |
never@2262 | 1034 | st->print_cr(INTPTR_FORMAT " is an unknown value", addr); |
bobv@2036 | 1035 | } |
duke@435 | 1036 | |
duke@435 | 1037 | // Looks like all platforms except IA64 can use the same function to check |
duke@435 | 1038 | // if C stack is walkable beyond current frame. The check for fp() is not |
duke@435 | 1039 | // necessary on Sparc, but it's harmless. |
duke@435 | 1040 | bool os::is_first_C_frame(frame* fr) { |
goetz@6453 | 1041 | #if (defined(IA64) && !defined(AIX)) && !defined(_WIN32) |
morris@4535 | 1042 | // On IA64 we have to check if the callers bsp is still valid |
morris@4535 | 1043 | // (i.e. within the register stack bounds). |
morris@4535 | 1044 | // Notice: this only works for threads created by the VM and only if |
morris@4535 | 1045 | // we walk the current stack!!! If we want to be able to walk |
morris@4535 | 1046 | // arbitrary other threads, we'll have to somehow store the thread |
morris@4535 | 1047 | // object in the frame. |
morris@4535 | 1048 | Thread *thread = Thread::current(); |
morris@4535 | 1049 | if ((address)fr->fp() <= |
morris@4535 | 1050 | thread->register_stack_base() HPUX_ONLY(+ 0x0) LINUX_ONLY(+ 0x50)) { |
morris@4535 | 1051 | // This check is a little hacky, because on Linux the first C |
morris@4535 | 1052 | // frame's ('start_thread') register stack frame starts at |
morris@4535 | 1053 | // "register_stack_base + 0x48" while on HPUX, the first C frame's |
morris@4535 | 1054 | // ('__pthread_bound_body') register stack frame seems to really |
morris@4535 | 1055 | // start at "register_stack_base". |
morris@4535 | 1056 | return true; |
morris@4535 | 1057 | } else { |
morris@4535 | 1058 | return false; |
morris@4535 | 1059 | } |
morris@4535 | 1060 | #elif defined(IA64) && defined(_WIN32) |
duke@435 | 1061 | return true; |
morris@4535 | 1062 | #else |
duke@435 | 1063 | // Load up sp, fp, sender sp and sender fp, check for reasonable values. |
duke@435 | 1064 | // Check usp first, because if that's bad the other accessors may fault |
duke@435 | 1065 | // on some architectures. Ditto ufp second, etc. |
duke@435 | 1066 | uintptr_t fp_align_mask = (uintptr_t)(sizeof(address)-1); |
duke@435 | 1067 | // sp on amd can be 32 bit aligned. |
duke@435 | 1068 | uintptr_t sp_align_mask = (uintptr_t)(sizeof(int)-1); |
duke@435 | 1069 | |
duke@435 | 1070 | uintptr_t usp = (uintptr_t)fr->sp(); |
duke@435 | 1071 | if ((usp & sp_align_mask) != 0) return true; |
duke@435 | 1072 | |
duke@435 | 1073 | uintptr_t ufp = (uintptr_t)fr->fp(); |
duke@435 | 1074 | if ((ufp & fp_align_mask) != 0) return true; |
duke@435 | 1075 | |
duke@435 | 1076 | uintptr_t old_sp = (uintptr_t)fr->sender_sp(); |
duke@435 | 1077 | if ((old_sp & sp_align_mask) != 0) return true; |
duke@435 | 1078 | if (old_sp == 0 || old_sp == (uintptr_t)-1) return true; |
duke@435 | 1079 | |
duke@435 | 1080 | uintptr_t old_fp = (uintptr_t)fr->link(); |
duke@435 | 1081 | if ((old_fp & fp_align_mask) != 0) return true; |
duke@435 | 1082 | if (old_fp == 0 || old_fp == (uintptr_t)-1 || old_fp == ufp) return true; |
duke@435 | 1083 | |
duke@435 | 1084 | // stack grows downwards; if old_fp is below current fp or if the stack |
duke@435 | 1085 | // frame is too large, either the stack is corrupted or fp is not saved |
duke@435 | 1086 | // on stack (i.e. on x86, ebp may be used as general register). The stack |
duke@435 | 1087 | // is not walkable beyond current frame. |
duke@435 | 1088 | if (old_fp < ufp) return true; |
duke@435 | 1089 | if (old_fp - ufp > 64 * K) return true; |
duke@435 | 1090 | |
duke@435 | 1091 | return false; |
morris@4535 | 1092 | #endif |
duke@435 | 1093 | } |
duke@435 | 1094 | |
duke@435 | 1095 | #ifdef ASSERT |
duke@435 | 1096 | extern "C" void test_random() { |
duke@435 | 1097 | const double m = 2147483647; |
duke@435 | 1098 | double mean = 0.0, variance = 0.0, t; |
duke@435 | 1099 | long reps = 10000; |
duke@435 | 1100 | unsigned long seed = 1; |
duke@435 | 1101 | |
duke@435 | 1102 | tty->print_cr("seed %ld for %ld repeats...", seed, reps); |
duke@435 | 1103 | os::init_random(seed); |
duke@435 | 1104 | long num; |
duke@435 | 1105 | for (int k = 0; k < reps; k++) { |
duke@435 | 1106 | num = os::random(); |
duke@435 | 1107 | double u = (double)num / m; |
duke@435 | 1108 | assert(u >= 0.0 && u <= 1.0, "bad random number!"); |
duke@435 | 1109 | |
duke@435 | 1110 | // calculate mean and variance of the random sequence |
duke@435 | 1111 | mean += u; |
duke@435 | 1112 | variance += (u*u); |
duke@435 | 1113 | } |
duke@435 | 1114 | mean /= reps; |
duke@435 | 1115 | variance /= (reps - 1); |
duke@435 | 1116 | |
duke@435 | 1117 | assert(num == 1043618065, "bad seed"); |
duke@435 | 1118 | tty->print_cr("mean of the 1st 10000 numbers: %f", mean); |
duke@435 | 1119 | tty->print_cr("variance of the 1st 10000 numbers: %f", variance); |
duke@435 | 1120 | const double eps = 0.0001; |
duke@435 | 1121 | t = fabsd(mean - 0.5018); |
duke@435 | 1122 | assert(t < eps, "bad mean"); |
duke@435 | 1123 | t = (variance - 0.3355) < 0.0 ? -(variance - 0.3355) : variance - 0.3355; |
duke@435 | 1124 | assert(t < eps, "bad variance"); |
duke@435 | 1125 | } |
duke@435 | 1126 | #endif |
duke@435 | 1127 | |
duke@435 | 1128 | |
duke@435 | 1129 | // Set up the boot classpath. |
duke@435 | 1130 | |
duke@435 | 1131 | char* os::format_boot_path(const char* format_string, |
duke@435 | 1132 | const char* home, |
duke@435 | 1133 | int home_len, |
duke@435 | 1134 | char fileSep, |
duke@435 | 1135 | char pathSep) { |
duke@435 | 1136 | assert((fileSep == '/' && pathSep == ':') || |
duke@435 | 1137 | (fileSep == '\\' && pathSep == ';'), "unexpected seperator chars"); |
duke@435 | 1138 | |
duke@435 | 1139 | // Scan the format string to determine the length of the actual |
duke@435 | 1140 | // boot classpath, and handle platform dependencies as well. |
duke@435 | 1141 | int formatted_path_len = 0; |
duke@435 | 1142 | const char* p; |
duke@435 | 1143 | for (p = format_string; *p != 0; ++p) { |
duke@435 | 1144 | if (*p == '%') formatted_path_len += home_len - 1; |
duke@435 | 1145 | ++formatted_path_len; |
duke@435 | 1146 | } |
duke@435 | 1147 | |
zgu@3900 | 1148 | char* formatted_path = NEW_C_HEAP_ARRAY(char, formatted_path_len + 1, mtInternal); |
duke@435 | 1149 | if (formatted_path == NULL) { |
duke@435 | 1150 | return NULL; |
duke@435 | 1151 | } |
duke@435 | 1152 | |
duke@435 | 1153 | // Create boot classpath from format, substituting separator chars and |
duke@435 | 1154 | // java home directory. |
duke@435 | 1155 | char* q = formatted_path; |
duke@435 | 1156 | for (p = format_string; *p != 0; ++p) { |
duke@435 | 1157 | switch (*p) { |
duke@435 | 1158 | case '%': |
duke@435 | 1159 | strcpy(q, home); |
duke@435 | 1160 | q += home_len; |
duke@435 | 1161 | break; |
duke@435 | 1162 | case '/': |
duke@435 | 1163 | *q++ = fileSep; |
duke@435 | 1164 | break; |
duke@435 | 1165 | case ':': |
duke@435 | 1166 | *q++ = pathSep; |
duke@435 | 1167 | break; |
duke@435 | 1168 | default: |
duke@435 | 1169 | *q++ = *p; |
duke@435 | 1170 | } |
duke@435 | 1171 | } |
duke@435 | 1172 | *q = '\0'; |
duke@435 | 1173 | |
duke@435 | 1174 | assert((q - formatted_path) == formatted_path_len, "formatted_path size botched"); |
duke@435 | 1175 | return formatted_path; |
duke@435 | 1176 | } |
duke@435 | 1177 | |
duke@435 | 1178 | |
duke@435 | 1179 | bool os::set_boot_path(char fileSep, char pathSep) { |
duke@435 | 1180 | const char* home = Arguments::get_java_home(); |
duke@435 | 1181 | int home_len = (int)strlen(home); |
duke@435 | 1182 | |
duke@435 | 1183 | static const char* meta_index_dir_format = "%/lib/"; |
duke@435 | 1184 | static const char* meta_index_format = "%/lib/meta-index"; |
duke@435 | 1185 | char* meta_index = format_boot_path(meta_index_format, home, home_len, fileSep, pathSep); |
duke@435 | 1186 | if (meta_index == NULL) return false; |
duke@435 | 1187 | char* meta_index_dir = format_boot_path(meta_index_dir_format, home, home_len, fileSep, pathSep); |
duke@435 | 1188 | if (meta_index_dir == NULL) return false; |
duke@435 | 1189 | Arguments::set_meta_index_path(meta_index, meta_index_dir); |
duke@435 | 1190 | |
duke@435 | 1191 | // Any modification to the JAR-file list, for the boot classpath must be |
duke@435 | 1192 | // aligned with install/install/make/common/Pack.gmk. Note: boot class |
duke@435 | 1193 | // path class JARs, are stripped for StackMapTable to reduce download size. |
duke@435 | 1194 | static const char classpath_format[] = |
duke@435 | 1195 | "%/lib/resources.jar:" |
duke@435 | 1196 | "%/lib/rt.jar:" |
duke@435 | 1197 | "%/lib/sunrsasign.jar:" |
duke@435 | 1198 | "%/lib/jsse.jar:" |
duke@435 | 1199 | "%/lib/jce.jar:" |
duke@435 | 1200 | "%/lib/charsets.jar:" |
phh@3427 | 1201 | "%/lib/jfr.jar:" |
duke@435 | 1202 | "%/classes"; |
duke@435 | 1203 | char* sysclasspath = format_boot_path(classpath_format, home, home_len, fileSep, pathSep); |
duke@435 | 1204 | if (sysclasspath == NULL) return false; |
duke@435 | 1205 | Arguments::set_sysclasspath(sysclasspath); |
duke@435 | 1206 | |
duke@435 | 1207 | return true; |
duke@435 | 1208 | } |
duke@435 | 1209 | |
phh@1126 | 1210 | /* |
phh@1126 | 1211 | * Splits a path, based on its separator, the number of |
phh@1126 | 1212 | * elements is returned back in n. |
phh@1126 | 1213 | * It is the callers responsibility to: |
phh@1126 | 1214 | * a> check the value of n, and n may be 0. |
phh@1126 | 1215 | * b> ignore any empty path elements |
phh@1126 | 1216 | * c> free up the data. |
phh@1126 | 1217 | */ |
phh@1126 | 1218 | char** os::split_path(const char* path, int* n) { |
phh@1126 | 1219 | *n = 0; |
phh@1126 | 1220 | if (path == NULL || strlen(path) == 0) { |
phh@1126 | 1221 | return NULL; |
phh@1126 | 1222 | } |
phh@1126 | 1223 | const char psepchar = *os::path_separator(); |
zgu@3900 | 1224 | char* inpath = (char*)NEW_C_HEAP_ARRAY(char, strlen(path) + 1, mtInternal); |
phh@1126 | 1225 | if (inpath == NULL) { |
phh@1126 | 1226 | return NULL; |
phh@1126 | 1227 | } |
bpittore@4261 | 1228 | strcpy(inpath, path); |
phh@1126 | 1229 | int count = 1; |
phh@1126 | 1230 | char* p = strchr(inpath, psepchar); |
phh@1126 | 1231 | // Get a count of elements to allocate memory |
phh@1126 | 1232 | while (p != NULL) { |
phh@1126 | 1233 | count++; |
phh@1126 | 1234 | p++; |
phh@1126 | 1235 | p = strchr(p, psepchar); |
phh@1126 | 1236 | } |
zgu@3900 | 1237 | char** opath = (char**) NEW_C_HEAP_ARRAY(char*, count, mtInternal); |
phh@1126 | 1238 | if (opath == NULL) { |
phh@1126 | 1239 | return NULL; |
phh@1126 | 1240 | } |
phh@1126 | 1241 | |
phh@1126 | 1242 | // do the actual splitting |
phh@1126 | 1243 | p = inpath; |
phh@1126 | 1244 | for (int i = 0 ; i < count ; i++) { |
phh@1126 | 1245 | size_t len = strcspn(p, os::path_separator()); |
phh@1126 | 1246 | if (len > JVM_MAXPATHLEN) { |
phh@1126 | 1247 | return NULL; |
phh@1126 | 1248 | } |
phh@1126 | 1249 | // allocate the string and add terminator storage |
zgu@3900 | 1250 | char* s = (char*)NEW_C_HEAP_ARRAY(char, len + 1, mtInternal); |
phh@1126 | 1251 | if (s == NULL) { |
phh@1126 | 1252 | return NULL; |
phh@1126 | 1253 | } |
phh@1126 | 1254 | strncpy(s, p, len); |
phh@1126 | 1255 | s[len] = '\0'; |
phh@1126 | 1256 | opath[i] = s; |
phh@1126 | 1257 | p += len + 1; |
phh@1126 | 1258 | } |
zgu@3900 | 1259 | FREE_C_HEAP_ARRAY(char, inpath, mtInternal); |
phh@1126 | 1260 | *n = count; |
phh@1126 | 1261 | return opath; |
phh@1126 | 1262 | } |
phh@1126 | 1263 | |
duke@435 | 1264 | void os::set_memory_serialize_page(address page) { |
duke@435 | 1265 | int count = log2_intptr(sizeof(class JavaThread)) - log2_intptr(64); |
duke@435 | 1266 | _mem_serialize_page = (volatile int32_t *)page; |
duke@435 | 1267 | // We initialize the serialization page shift count here |
duke@435 | 1268 | // We assume a cache line size of 64 bytes |
duke@435 | 1269 | assert(SerializePageShiftCount == count, |
duke@435 | 1270 | "thread size changed, fix SerializePageShiftCount constant"); |
duke@435 | 1271 | set_serialize_page_mask((uintptr_t)(vm_page_size() - sizeof(int32_t))); |
duke@435 | 1272 | } |
duke@435 | 1273 | |
xlu@490 | 1274 | static volatile intptr_t SerializePageLock = 0; |
xlu@490 | 1275 | |
duke@435 | 1276 | // This method is called from signal handler when SIGSEGV occurs while the current |
duke@435 | 1277 | // thread tries to store to the "read-only" memory serialize page during state |
duke@435 | 1278 | // transition. |
duke@435 | 1279 | void os::block_on_serialize_page_trap() { |
duke@435 | 1280 | if (TraceSafepoint) { |
duke@435 | 1281 | tty->print_cr("Block until the serialize page permission restored"); |
duke@435 | 1282 | } |
xlu@490 | 1283 | // When VMThread is holding the SerializePageLock during modifying the |
duke@435 | 1284 | // access permission of the memory serialize page, the following call |
duke@435 | 1285 | // will block until the permission of that page is restored to rw. |
duke@435 | 1286 | // Generally, it is unsafe to manipulate locks in signal handlers, but in |
duke@435 | 1287 | // this case, it's OK as the signal is synchronous and we know precisely when |
xlu@490 | 1288 | // it can occur. |
xlu@490 | 1289 | Thread::muxAcquire(&SerializePageLock, "set_memory_serialize_page"); |
xlu@490 | 1290 | Thread::muxRelease(&SerializePageLock); |
duke@435 | 1291 | } |
duke@435 | 1292 | |
duke@435 | 1293 | // Serialize all thread state variables |
duke@435 | 1294 | void os::serialize_thread_states() { |
duke@435 | 1295 | // On some platforms such as Solaris & Linux, the time duration of the page |
duke@435 | 1296 | // permission restoration is observed to be much longer than expected due to |
duke@435 | 1297 | // scheduler starvation problem etc. To avoid the long synchronization |
xlu@490 | 1298 | // time and expensive page trap spinning, 'SerializePageLock' is used to block |
xlu@490 | 1299 | // the mutator thread if such case is encountered. See bug 6546278 for details. |
xlu@490 | 1300 | Thread::muxAcquire(&SerializePageLock, "serialize_thread_states"); |
coleenp@672 | 1301 | os::protect_memory((char *)os::get_memory_serialize_page(), |
coleenp@912 | 1302 | os::vm_page_size(), MEM_PROT_READ); |
coleenp@912 | 1303 | os::protect_memory((char *)os::get_memory_serialize_page(), |
coleenp@912 | 1304 | os::vm_page_size(), MEM_PROT_RW); |
xlu@490 | 1305 | Thread::muxRelease(&SerializePageLock); |
duke@435 | 1306 | } |
duke@435 | 1307 | |
duke@435 | 1308 | // Returns true if the current stack pointer is above the stack shadow |
duke@435 | 1309 | // pages, false otherwise. |
duke@435 | 1310 | |
duke@435 | 1311 | bool os::stack_shadow_pages_available(Thread *thread, methodHandle method) { |
duke@435 | 1312 | assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check"); |
duke@435 | 1313 | address sp = current_stack_pointer(); |
duke@435 | 1314 | // Check if we have StackShadowPages above the yellow zone. This parameter |
twisti@1040 | 1315 | // is dependent on the depth of the maximum VM call stack possible from |
duke@435 | 1316 | // the handler for stack overflow. 'instanceof' in the stack overflow |
duke@435 | 1317 | // handler or a println uses at least 8k stack of VM and native code |
duke@435 | 1318 | // respectively. |
duke@435 | 1319 | const int framesize_in_bytes = |
duke@435 | 1320 | Interpreter::size_top_interpreter_activation(method()) * wordSize; |
duke@435 | 1321 | int reserved_area = ((StackShadowPages + StackRedPages + StackYellowPages) |
duke@435 | 1322 | * vm_page_size()) + framesize_in_bytes; |
duke@435 | 1323 | // The very lower end of the stack |
duke@435 | 1324 | address stack_limit = thread->stack_base() - thread->stack_size(); |
duke@435 | 1325 | return (sp > (stack_limit + reserved_area)); |
duke@435 | 1326 | } |
duke@435 | 1327 | |
ehelin@7780 | 1328 | size_t os::page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned) { |
duke@435 | 1329 | assert(min_pages > 0, "sanity"); |
duke@435 | 1330 | if (UseLargePages) { |
ehelin@7778 | 1331 | const size_t max_page_size = region_size / min_pages; |
duke@435 | 1332 | |
ehelin@7778 | 1333 | for (size_t i = 0; _page_sizes[i] != 0; ++i) { |
ehelin@7778 | 1334 | const size_t page_size = _page_sizes[i]; |
ehelin@7780 | 1335 | if (page_size <= max_page_size) { |
ehelin@7780 | 1336 | if (!must_be_aligned || is_size_aligned(region_size, page_size)) { |
ehelin@7780 | 1337 | return page_size; |
ehelin@7780 | 1338 | } |
duke@435 | 1339 | } |
duke@435 | 1340 | } |
duke@435 | 1341 | } |
duke@435 | 1342 | |
duke@435 | 1343 | return vm_page_size(); |
duke@435 | 1344 | } |
duke@435 | 1345 | |
ehelin@7780 | 1346 | size_t os::page_size_for_region_aligned(size_t region_size, size_t min_pages) { |
ehelin@7780 | 1347 | return page_size_for_region(region_size, min_pages, true); |
ehelin@7780 | 1348 | } |
ehelin@7780 | 1349 | |
ehelin@7780 | 1350 | size_t os::page_size_for_region_unaligned(size_t region_size, size_t min_pages) { |
ehelin@7780 | 1351 | return page_size_for_region(region_size, min_pages, false); |
ehelin@7780 | 1352 | } |
ehelin@7780 | 1353 | |
duke@435 | 1354 | #ifndef PRODUCT |
jcoomes@3057 | 1355 | void os::trace_page_sizes(const char* str, const size_t* page_sizes, int count) |
jcoomes@3057 | 1356 | { |
jcoomes@3057 | 1357 | if (TracePageSizes) { |
jcoomes@3057 | 1358 | tty->print("%s: ", str); |
jcoomes@3057 | 1359 | for (int i = 0; i < count; ++i) { |
jcoomes@3057 | 1360 | tty->print(" " SIZE_FORMAT, page_sizes[i]); |
jcoomes@3057 | 1361 | } |
jcoomes@3057 | 1362 | tty->cr(); |
jcoomes@3057 | 1363 | } |
jcoomes@3057 | 1364 | } |
jcoomes@3057 | 1365 | |
duke@435 | 1366 | void os::trace_page_sizes(const char* str, const size_t region_min_size, |
duke@435 | 1367 | const size_t region_max_size, const size_t page_size, |
duke@435 | 1368 | const char* base, const size_t size) |
duke@435 | 1369 | { |
duke@435 | 1370 | if (TracePageSizes) { |
duke@435 | 1371 | tty->print_cr("%s: min=" SIZE_FORMAT " max=" SIZE_FORMAT |
duke@435 | 1372 | " pg_sz=" SIZE_FORMAT " base=" PTR_FORMAT |
duke@435 | 1373 | " size=" SIZE_FORMAT, |
duke@435 | 1374 | str, region_min_size, region_max_size, |
duke@435 | 1375 | page_size, base, size); |
duke@435 | 1376 | } |
duke@435 | 1377 | } |
duke@435 | 1378 | #endif // #ifndef PRODUCT |
duke@435 | 1379 | |
duke@435 | 1380 | // This is the working definition of a server class machine: |
duke@435 | 1381 | // >= 2 physical CPU's and >=2GB of memory, with some fuzz |
duke@435 | 1382 | // because the graphics memory (?) sometimes masks physical memory. |
duke@435 | 1383 | // If you want to change the definition of a server class machine |
duke@435 | 1384 | // on some OS or platform, e.g., >=4GB on Windohs platforms, |
duke@435 | 1385 | // then you'll have to parameterize this method based on that state, |
duke@435 | 1386 | // as was done for logical processors here, or replicate and |
duke@435 | 1387 | // specialize this method for each platform. (Or fix os to have |
duke@435 | 1388 | // some inheritance structure and use subclassing. Sigh.) |
duke@435 | 1389 | // If you want some platform to always or never behave as a server |
duke@435 | 1390 | // class machine, change the setting of AlwaysActAsServerClassMachine |
duke@435 | 1391 | // and NeverActAsServerClassMachine in globals*.hpp. |
duke@435 | 1392 | bool os::is_server_class_machine() { |
duke@435 | 1393 | // First check for the early returns |
duke@435 | 1394 | if (NeverActAsServerClassMachine) { |
duke@435 | 1395 | return false; |
duke@435 | 1396 | } |
duke@435 | 1397 | if (AlwaysActAsServerClassMachine) { |
duke@435 | 1398 | return true; |
duke@435 | 1399 | } |
duke@435 | 1400 | // Then actually look at the machine |
duke@435 | 1401 | bool result = false; |
duke@435 | 1402 | const unsigned int server_processors = 2; |
duke@435 | 1403 | const julong server_memory = 2UL * G; |
duke@435 | 1404 | // We seem not to get our full complement of memory. |
duke@435 | 1405 | // We allow some part (1/8?) of the memory to be "missing", |
duke@435 | 1406 | // based on the sizes of DIMMs, and maybe graphics cards. |
duke@435 | 1407 | const julong missing_memory = 256UL * M; |
duke@435 | 1408 | |
duke@435 | 1409 | /* Is this a server class machine? */ |
duke@435 | 1410 | if ((os::active_processor_count() >= (int)server_processors) && |
duke@435 | 1411 | (os::physical_memory() >= (server_memory - missing_memory))) { |
duke@435 | 1412 | const unsigned int logical_processors = |
duke@435 | 1413 | VM_Version::logical_processors_per_package(); |
duke@435 | 1414 | if (logical_processors > 1) { |
duke@435 | 1415 | const unsigned int physical_packages = |
duke@435 | 1416 | os::active_processor_count() / logical_processors; |
duke@435 | 1417 | if (physical_packages > server_processors) { |
duke@435 | 1418 | result = true; |
duke@435 | 1419 | } |
duke@435 | 1420 | } else { |
duke@435 | 1421 | result = true; |
duke@435 | 1422 | } |
duke@435 | 1423 | } |
duke@435 | 1424 | return result; |
duke@435 | 1425 | } |
dsamersoff@2751 | 1426 | |
tschatzl@8661 | 1427 | void os::initialize_initial_active_processor_count() { |
tschatzl@8661 | 1428 | assert(_initial_active_processor_count == 0, "Initial active processor count already set."); |
tschatzl@8661 | 1429 | _initial_active_processor_count = active_processor_count(); |
tschatzl@8661 | 1430 | } |
tschatzl@8661 | 1431 | |
sla@5237 | 1432 | void os::SuspendedThreadTask::run() { |
sla@5237 | 1433 | assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this"); |
sla@5237 | 1434 | internal_do_task(); |
sla@5237 | 1435 | _done = true; |
sla@5237 | 1436 | } |
sla@5237 | 1437 | |
zgu@3900 | 1438 | bool os::create_stack_guard_pages(char* addr, size_t bytes) { |
zgu@3900 | 1439 | return os::pd_create_stack_guard_pages(addr, bytes); |
zgu@3900 | 1440 | } |
zgu@3900 | 1441 | |
zgu@3900 | 1442 | char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { |
zgu@3900 | 1443 | char* result = pd_reserve_memory(bytes, addr, alignment_hint); |
zgu@4193 | 1444 | if (result != NULL) { |
zgu@7074 | 1445 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
zgu@3900 | 1446 | } |
zgu@3900 | 1447 | |
zgu@3900 | 1448 | return result; |
zgu@3900 | 1449 | } |
zgu@5053 | 1450 | |
zgu@5053 | 1451 | char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint, |
zgu@5053 | 1452 | MEMFLAGS flags) { |
zgu@5053 | 1453 | char* result = pd_reserve_memory(bytes, addr, alignment_hint); |
zgu@5053 | 1454 | if (result != NULL) { |
zgu@7074 | 1455 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
zgu@5053 | 1456 | MemTracker::record_virtual_memory_type((address)result, flags); |
zgu@5053 | 1457 | } |
zgu@5053 | 1458 | |
zgu@5053 | 1459 | return result; |
zgu@5053 | 1460 | } |
zgu@5053 | 1461 | |
zgu@3900 | 1462 | char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { |
zgu@3900 | 1463 | char* result = pd_attempt_reserve_memory_at(bytes, addr); |
zgu@4193 | 1464 | if (result != NULL) { |
zgu@7074 | 1465 | MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); |
zgu@3900 | 1466 | } |
zgu@3900 | 1467 | return result; |
zgu@3900 | 1468 | } |
zgu@3900 | 1469 | |
zgu@3900 | 1470 | void os::split_reserved_memory(char *base, size_t size, |
zgu@3900 | 1471 | size_t split, bool realloc) { |
zgu@3900 | 1472 | pd_split_reserved_memory(base, size, split, realloc); |
zgu@3900 | 1473 | } |
zgu@3900 | 1474 | |
zgu@3900 | 1475 | bool os::commit_memory(char* addr, size_t bytes, bool executable) { |
zgu@3900 | 1476 | bool res = pd_commit_memory(addr, bytes, executable); |
zgu@4193 | 1477 | if (res) { |
zgu@3900 | 1478 | MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); |
zgu@3900 | 1479 | } |
zgu@3900 | 1480 | return res; |
zgu@3900 | 1481 | } |
zgu@3900 | 1482 | |
zgu@3900 | 1483 | bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, |
zgu@3900 | 1484 | bool executable) { |
zgu@3900 | 1485 | bool res = os::pd_commit_memory(addr, size, alignment_hint, executable); |
zgu@4193 | 1486 | if (res) { |
zgu@3900 | 1487 | MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); |
zgu@3900 | 1488 | } |
zgu@3900 | 1489 | return res; |
zgu@3900 | 1490 | } |
zgu@3900 | 1491 | |
dcubed@5255 | 1492 | void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, |
dcubed@5255 | 1493 | const char* mesg) { |
dcubed@5255 | 1494 | pd_commit_memory_or_exit(addr, bytes, executable, mesg); |
dcubed@5255 | 1495 | MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); |
dcubed@5255 | 1496 | } |
dcubed@5255 | 1497 | |
dcubed@5255 | 1498 | void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, |
dcubed@5255 | 1499 | bool executable, const char* mesg) { |
dcubed@5255 | 1500 | os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); |
dcubed@5255 | 1501 | MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); |
dcubed@5255 | 1502 | } |
dcubed@5255 | 1503 | |
zgu@3900 | 1504 | bool os::uncommit_memory(char* addr, size_t bytes) { |
zgu@7074 | 1505 | bool res; |
zgu@7074 | 1506 | if (MemTracker::tracking_level() > NMT_minimal) { |
zgu@7074 | 1507 | Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); |
zgu@7074 | 1508 | res = pd_uncommit_memory(addr, bytes); |
zgu@7074 | 1509 | if (res) { |
zgu@7074 | 1510 | tkr.record((address)addr, bytes); |
zgu@7074 | 1511 | } |
zgu@5272 | 1512 | } else { |
zgu@7074 | 1513 | res = pd_uncommit_memory(addr, bytes); |
zgu@3900 | 1514 | } |
zgu@3900 | 1515 | return res; |
zgu@3900 | 1516 | } |
zgu@3900 | 1517 | |
zgu@3900 | 1518 | bool os::release_memory(char* addr, size_t bytes) { |
zgu@7074 | 1519 | bool res; |
zgu@7074 | 1520 | if (MemTracker::tracking_level() > NMT_minimal) { |
zgu@7074 | 1521 | Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); |
zgu@7074 | 1522 | res = pd_release_memory(addr, bytes); |
zgu@7074 | 1523 | if (res) { |
zgu@7074 | 1524 | tkr.record((address)addr, bytes); |
zgu@7074 | 1525 | } |
zgu@5272 | 1526 | } else { |
zgu@7074 | 1527 | res = pd_release_memory(addr, bytes); |
zgu@3900 | 1528 | } |
zgu@3900 | 1529 | return res; |
zgu@3900 | 1530 | } |
zgu@3900 | 1531 | |
tschatzl@7777 | 1532 | void os::pretouch_memory(char* start, char* end) { |
tschatzl@7777 | 1533 | for (volatile char *p = start; p < end; p += os::vm_page_size()) { |
tschatzl@7777 | 1534 | *p = 0; |
tschatzl@7777 | 1535 | } |
tschatzl@7777 | 1536 | } |
zgu@3900 | 1537 | |
zgu@3900 | 1538 | char* os::map_memory(int fd, const char* file_name, size_t file_offset, |
zgu@3900 | 1539 | char *addr, size_t bytes, bool read_only, |
zgu@3900 | 1540 | bool allow_exec) { |
zgu@3900 | 1541 | char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); |
zgu@4193 | 1542 | if (result != NULL) { |
zgu@7074 | 1543 | MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC); |
zgu@3900 | 1544 | } |
zgu@3900 | 1545 | return result; |
zgu@3900 | 1546 | } |
zgu@3900 | 1547 | |
zgu@3900 | 1548 | char* os::remap_memory(int fd, const char* file_name, size_t file_offset, |
zgu@3900 | 1549 | char *addr, size_t bytes, bool read_only, |
zgu@3900 | 1550 | bool allow_exec) { |
zgu@3900 | 1551 | return pd_remap_memory(fd, file_name, file_offset, addr, bytes, |
zgu@3900 | 1552 | read_only, allow_exec); |
zgu@3900 | 1553 | } |
zgu@3900 | 1554 | |
zgu@3900 | 1555 | bool os::unmap_memory(char *addr, size_t bytes) { |
zgu@7074 | 1556 | bool result; |
zgu@7074 | 1557 | if (MemTracker::tracking_level() > NMT_minimal) { |
zgu@7074 | 1558 | Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); |
zgu@7074 | 1559 | result = pd_unmap_memory(addr, bytes); |
zgu@7074 | 1560 | if (result) { |
zgu@7074 | 1561 | tkr.record((address)addr, bytes); |
zgu@7074 | 1562 | } |
zgu@5272 | 1563 | } else { |
zgu@7074 | 1564 | result = pd_unmap_memory(addr, bytes); |
zgu@3900 | 1565 | } |
zgu@3900 | 1566 | return result; |
zgu@3900 | 1567 | } |
zgu@3900 | 1568 | |
zgu@3900 | 1569 | void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) { |
zgu@3900 | 1570 | pd_free_memory(addr, bytes, alignment_hint); |
zgu@3900 | 1571 | } |
zgu@3900 | 1572 | |
zgu@3900 | 1573 | void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { |
zgu@3900 | 1574 | pd_realign_memory(addr, bytes, alignment_hint); |
zgu@3900 | 1575 | } |
zgu@3900 | 1576 | |
sla@5237 | 1577 | #ifndef TARGET_OS_FAMILY_windows |
sla@5237 | 1578 | /* try to switch state from state "from" to state "to" |
sla@5237 | 1579 | * returns the state set after the method is complete |
sla@5237 | 1580 | */ |
sla@5237 | 1581 | os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from, |
sla@5237 | 1582 | os::SuspendResume::State to) |
sla@5237 | 1583 | { |
sla@5237 | 1584 | os::SuspendResume::State result = |
sla@5237 | 1585 | (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from); |
sla@5237 | 1586 | if (result == from) { |
sla@5237 | 1587 | // success |
sla@5237 | 1588 | return to; |
sla@5237 | 1589 | } |
sla@5237 | 1590 | return result; |
sla@5237 | 1591 | } |
sla@5237 | 1592 | #endif |
ehelin@7778 | 1593 | |
ehelin@7778 | 1594 | /////////////// Unit tests /////////////// |
ehelin@7778 | 1595 | |
ehelin@7778 | 1596 | #ifndef PRODUCT |
ehelin@7778 | 1597 | |
ehelin@7778 | 1598 | #define assert_eq(a,b) assert(a == b, err_msg(SIZE_FORMAT " != " SIZE_FORMAT, a, b)) |
ehelin@7778 | 1599 | |
ehelin@7778 | 1600 | class TestOS : AllStatic { |
ehelin@7778 | 1601 | static size_t small_page_size() { |
ehelin@7778 | 1602 | return os::vm_page_size(); |
ehelin@7778 | 1603 | } |
ehelin@7778 | 1604 | |
ehelin@7778 | 1605 | static size_t large_page_size() { |
ehelin@7778 | 1606 | const size_t large_page_size_example = 4 * M; |
ehelin@7780 | 1607 | return os::page_size_for_region_aligned(large_page_size_example, 1); |
ehelin@7778 | 1608 | } |
ehelin@7778 | 1609 | |
ehelin@7780 | 1610 | static void test_page_size_for_region_aligned() { |
ehelin@7778 | 1611 | if (UseLargePages) { |
ehelin@7778 | 1612 | const size_t small_page = small_page_size(); |
ehelin@7778 | 1613 | const size_t large_page = large_page_size(); |
ehelin@7778 | 1614 | |
ehelin@7778 | 1615 | if (large_page > small_page) { |
ehelin@7778 | 1616 | size_t num_small_pages_in_large = large_page / small_page; |
ehelin@7780 | 1617 | size_t page = os::page_size_for_region_aligned(large_page, num_small_pages_in_large); |
ehelin@7778 | 1618 | |
ehelin@7778 | 1619 | assert_eq(page, small_page); |
ehelin@7778 | 1620 | } |
ehelin@7778 | 1621 | } |
ehelin@7778 | 1622 | } |
ehelin@7778 | 1623 | |
ehelin@7778 | 1624 | static void test_page_size_for_region_alignment() { |
ehelin@7778 | 1625 | if (UseLargePages) { |
ehelin@7778 | 1626 | const size_t small_page = small_page_size(); |
ehelin@7778 | 1627 | const size_t large_page = large_page_size(); |
ehelin@7778 | 1628 | if (large_page > small_page) { |
ehelin@7778 | 1629 | const size_t unaligned_region = large_page + 17; |
ehelin@7780 | 1630 | size_t page = os::page_size_for_region_aligned(unaligned_region, 1); |
ehelin@7778 | 1631 | assert_eq(page, small_page); |
ehelin@7778 | 1632 | |
ehelin@7778 | 1633 | const size_t num_pages = 5; |
ehelin@7778 | 1634 | const size_t aligned_region = large_page * num_pages; |
ehelin@7780 | 1635 | page = os::page_size_for_region_aligned(aligned_region, num_pages); |
ehelin@7778 | 1636 | assert_eq(page, large_page); |
ehelin@7778 | 1637 | } |
ehelin@7778 | 1638 | } |
ehelin@7778 | 1639 | } |
ehelin@7778 | 1640 | |
ehelin@7780 | 1641 | static void test_page_size_for_region_unaligned() { |
ehelin@7780 | 1642 | if (UseLargePages) { |
ehelin@7780 | 1643 | // Given exact page size, should return that page size. |
ehelin@7780 | 1644 | for (size_t i = 0; os::_page_sizes[i] != 0; i++) { |
ehelin@7780 | 1645 | size_t expected = os::_page_sizes[i]; |
ehelin@7780 | 1646 | size_t actual = os::page_size_for_region_unaligned(expected, 1); |
ehelin@7780 | 1647 | assert_eq(expected, actual); |
ehelin@7780 | 1648 | } |
ehelin@7780 | 1649 | |
ehelin@7780 | 1650 | // Given slightly larger size than a page size, return the page size. |
ehelin@7780 | 1651 | for (size_t i = 0; os::_page_sizes[i] != 0; i++) { |
ehelin@7780 | 1652 | size_t expected = os::_page_sizes[i]; |
ehelin@7780 | 1653 | size_t actual = os::page_size_for_region_unaligned(expected + 17, 1); |
ehelin@7780 | 1654 | assert_eq(expected, actual); |
ehelin@7780 | 1655 | } |
ehelin@7780 | 1656 | |
ehelin@7780 | 1657 | // Given a slightly smaller size than a page size, |
ehelin@7780 | 1658 | // return the next smaller page size. |
ehelin@7780 | 1659 | if (os::_page_sizes[1] > os::_page_sizes[0]) { |
ehelin@7780 | 1660 | size_t expected = os::_page_sizes[0]; |
ehelin@7780 | 1661 | size_t actual = os::page_size_for_region_unaligned(os::_page_sizes[1] - 17, 1); |
ehelin@7780 | 1662 | assert_eq(actual, expected); |
ehelin@7780 | 1663 | } |
ehelin@7780 | 1664 | |
ehelin@7780 | 1665 | // Return small page size for values less than a small page. |
ehelin@7780 | 1666 | size_t small_page = small_page_size(); |
ehelin@7780 | 1667 | size_t actual = os::page_size_for_region_unaligned(small_page - 17, 1); |
ehelin@7780 | 1668 | assert_eq(small_page, actual); |
ehelin@7780 | 1669 | } |
ehelin@7780 | 1670 | } |
ehelin@7780 | 1671 | |
ehelin@7778 | 1672 | public: |
ehelin@7778 | 1673 | static void run_tests() { |
ehelin@7780 | 1674 | test_page_size_for_region_aligned(); |
ehelin@7778 | 1675 | test_page_size_for_region_alignment(); |
ehelin@7780 | 1676 | test_page_size_for_region_unaligned(); |
ehelin@7778 | 1677 | } |
ehelin@7778 | 1678 | }; |
ehelin@7778 | 1679 | |
ehelin@7778 | 1680 | void TestOS_test() { |
ehelin@7778 | 1681 | TestOS::run_tests(); |
ehelin@7778 | 1682 | } |
ehelin@7778 | 1683 | |
ehelin@7778 | 1684 | #endif // PRODUCT |