src/share/vm/runtime/os.hpp

Thu, 15 Dec 2016 19:48:32 -0500

author
tschatzl
date
Thu, 15 Dec 2016 19:48:32 -0500
changeset 8661
27ae9bbef86a
parent 7780
5788dbd1f2d6
child 8856
ac27a9c85bea
child 9289
427b2fb1944f
child 9413
5aa3d728164a
permissions
-rw-r--r--

8147910: Cache initial active_processor_count
Summary: Introduce and initialize active_processor_count variable in VM.
Reviewed-by: dholmes, jprovino

duke@435 1 /*
hseigel@6755 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_RUNTIME_OS_HPP
stefank@2314 26 #define SHARE_VM_RUNTIME_OS_HPP
stefank@2314 27
stefank@2314 28 #include "jvmtifiles/jvmti.h"
stefank@2314 29 #include "runtime/atomic.hpp"
stefank@2314 30 #include "runtime/extendedPC.hpp"
stefank@2314 31 #include "runtime/handles.hpp"
stefank@2314 32 #include "utilities/top.hpp"
stefank@2314 33 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 34 # include "jvm_linux.h"
rbackman@5424 35 # include <setjmp.h>
stefank@2314 36 #endif
stefank@2314 37 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 38 # include "jvm_solaris.h"
rbackman@5424 39 # include <setjmp.h>
stefank@2314 40 #endif
stefank@2314 41 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 42 # include "jvm_windows.h"
stefank@2314 43 #endif
goetz@6461 44 #ifdef TARGET_OS_FAMILY_aix
goetz@6461 45 # include "jvm_aix.h"
goetz@6461 46 # include <setjmp.h>
goetz@6461 47 #endif
never@3156 48 #ifdef TARGET_OS_FAMILY_bsd
never@3156 49 # include "jvm_bsd.h"
rbackman@5424 50 # include <setjmp.h>
sla@6667 51 # ifdef __APPLE__
sla@6667 52 # include <mach/mach_time.h>
sla@6667 53 # endif
never@3156 54 #endif
stefank@2314 55
bpittore@5585 56 class AgentLibrary;
bpittore@5585 57
duke@435 58 // os defines the interface to operating system; this includes traditional
duke@435 59 // OS services (time, I/O) as well as other functionality with system-
duke@435 60 // dependent code.
duke@435 61
duke@435 62 typedef void (*dll_func)(...);
duke@435 63
duke@435 64 class Thread;
duke@435 65 class JavaThread;
duke@435 66 class Event;
duke@435 67 class DLL;
duke@435 68 class FileHandle;
zgu@7074 69 class NativeCallStack;
zgu@7074 70
iveresov@576 71 template<class E> class GrowableArray;
duke@435 72
duke@435 73 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
duke@435 74
duke@435 75 // Platform-independent error return values from OS functions
duke@435 76 enum OSReturn {
duke@435 77 OS_OK = 0, // Operation was successful
duke@435 78 OS_ERR = -1, // Operation failed
duke@435 79 OS_INTRPT = -2, // Operation was interrupted
duke@435 80 OS_TIMEOUT = -3, // Operation timed out
duke@435 81 OS_NOMEM = -5, // Operation failed for lack of memory
duke@435 82 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource
duke@435 83 };
duke@435 84
duke@435 85 enum ThreadPriority { // JLS 20.20.1-3
duke@435 86 NoPriority = -1, // Initial non-priority value
duke@435 87 MinPriority = 1, // Minimum priority
duke@435 88 NormPriority = 5, // Normal (non-daemon) priority
duke@435 89 NearMaxPriority = 9, // High priority, used for VMThread
phh@3481 90 MaxPriority = 10, // Highest priority, used for WatcherThread
duke@435 91 // ensures that VMThread doesn't starve profiler
phh@3481 92 CriticalPriority = 11 // Critical thread priority
duke@435 93 };
duke@435 94
dcubed@5255 95 // Executable parameter flag for os::commit_memory() and
dcubed@5255 96 // os::commit_memory_or_exit().
dcubed@5255 97 const bool ExecMem = true;
dcubed@5255 98
duke@435 99 // Typedef for structured exception handling support
duke@435 100 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
duke@435 101
zgu@7074 102 class MallocTracker;
zgu@7074 103
duke@435 104 class os: AllStatic {
twisti@5726 105 friend class VMStructs;
zgu@7074 106 friend class MallocTracker;
phh@1558 107 public:
duke@435 108 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
duke@435 109
phh@1558 110 private:
duke@435 111 static OSThread* _starting_thread;
duke@435 112 static address _polling_page;
duke@435 113 static volatile int32_t * _mem_serialize_page;
duke@435 114 static uintptr_t _serialize_page_mask;
phh@1558 115 public:
duke@435 116 static size_t _page_sizes[page_sizes_max];
duke@435 117
phh@1558 118 private:
duke@435 119 static void init_page_sizes(size_t default_page_size) {
duke@435 120 _page_sizes[0] = default_page_size;
duke@435 121 _page_sizes[1] = 0; // sentinel
duke@435 122 }
duke@435 123
zgu@3900 124 static char* pd_reserve_memory(size_t bytes, char* addr = 0,
zgu@3900 125 size_t alignment_hint = 0);
zgu@3900 126 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
zgu@3900 127 static void pd_split_reserved_memory(char *base, size_t size,
zgu@3900 128 size_t split, bool realloc);
dcubed@5255 129 static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
zgu@3900 130 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
dcubed@5255 131 bool executable);
dcubed@5255 132 // Same as pd_commit_memory() that either succeeds or calls
dcubed@5255 133 // vm_exit_out_of_memory() with the specified mesg.
dcubed@5255 134 static void pd_commit_memory_or_exit(char* addr, size_t bytes,
dcubed@5255 135 bool executable, const char* mesg);
dcubed@5255 136 static void pd_commit_memory_or_exit(char* addr, size_t size,
dcubed@5255 137 size_t alignment_hint,
dcubed@5255 138 bool executable, const char* mesg);
zgu@3900 139 static bool pd_uncommit_memory(char* addr, size_t bytes);
zgu@3900 140 static bool pd_release_memory(char* addr, size_t bytes);
zgu@3900 141
zgu@3900 142 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
zgu@3900 143 char *addr, size_t bytes, bool read_only = false,
zgu@3900 144 bool allow_exec = false);
zgu@3900 145 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset,
zgu@3900 146 char *addr, size_t bytes, bool read_only,
zgu@3900 147 bool allow_exec);
zgu@3900 148 static bool pd_unmap_memory(char *addr, size_t bytes);
zgu@3900 149 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
zgu@3900 150 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
zgu@3900 151
ehelin@7780 152 static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
zgu@3900 153
tschatzl@8661 154 static void initialize_initial_active_processor_count();
duke@435 155 public:
bobv@2036 156 static void init(void); // Called before command line parsing
tschatzl@5701 157 static void init_before_ergo(void); // Called after command line parsing
tschatzl@5701 158 // before VM ergonomics processing.
bobv@2036 159 static jint init_2(void); // Called after command line parsing
tschatzl@5701 160 // and VM ergonomics processing
phh@3378 161 static void init_globals(void) { // Called from init_globals() in init.cpp
phh@3378 162 init_globals_ext();
phh@3378 163 }
duke@435 164
duke@435 165 // File names are case-insensitive on windows only
duke@435 166 // Override me as needed
duke@435 167 static int file_name_strcmp(const char* s1, const char* s2);
duke@435 168
zgu@7074 169 // get/unset environment variable
duke@435 170 static bool getenv(const char* name, char* buffer, int len);
zgu@7074 171 static bool unsetenv(const char* name);
zgu@7074 172
duke@435 173 static bool have_special_privileges();
duke@435 174
duke@435 175 static jlong javaTimeMillis();
duke@435 176 static jlong javaTimeNanos();
duke@435 177 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
duke@435 178 static void run_periodic_checks();
duke@435 179
duke@435 180
duke@435 181 // Returns the elapsed time in seconds since the vm started.
duke@435 182 static double elapsedTime();
duke@435 183
duke@435 184 // Returns real time in seconds since an arbitrary point
duke@435 185 // in the past.
duke@435 186 static bool getTimesSecs(double* process_real_time,
duke@435 187 double* process_user_time,
duke@435 188 double* process_system_time);
duke@435 189
duke@435 190 // Interface to the performance counter
duke@435 191 static jlong elapsed_counter();
duke@435 192 static jlong elapsed_frequency();
duke@435 193
ysr@777 194 // The "virtual time" of a thread is the amount of time a thread has
ysr@777 195 // actually run. The first function indicates whether the OS supports
ysr@777 196 // this functionality for the current thread, and if so:
ysr@777 197 // * the second enables vtime tracking (if that is required).
ysr@777 198 // * the third tells whether vtime is enabled.
ysr@777 199 // * the fourth returns the elapsed virtual time for the current
ysr@777 200 // thread.
ysr@777 201 static bool supports_vtime();
ysr@777 202 static bool enable_vtime();
ysr@777 203 static bool vtime_enabled();
ysr@777 204 static double elapsedVTime();
ysr@777 205
duke@435 206 // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
duke@435 207 // It is MT safe, but not async-safe, as reading time zone
duke@435 208 // information may require a lock on some platforms.
ysr@983 209 static char* local_time_string(char *buf, size_t buflen);
ysr@983 210 static struct tm* localtime_pd (const time_t* clock, struct tm* res);
duke@435 211 // Fill in buffer with current local time as an ISO-8601 string.
duke@435 212 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
duke@435 213 // Returns buffer, or NULL if it failed.
duke@435 214 static char* iso8601_time(char* buffer, size_t buffer_length);
duke@435 215
duke@435 216 // Interface for detecting multiprocessor system
duke@435 217 static inline bool is_MP() {
dholmes@7273 218 // During bootstrap if _processor_count is not yet initialized
dholmes@7273 219 // we claim to be MP as that is safest. If any platform has a
dholmes@7273 220 // stub generator that might be triggered in this phase and for
dholmes@7273 221 // which being declared MP when in fact not, is a problem - then
dholmes@7273 222 // the bootstrap routine for the stub generator needs to check
dholmes@7273 223 // the processor count directly and leave the bootstrap routine
dholmes@7273 224 // in place until called after initialization has ocurred.
dholmes@7273 225 return (_processor_count != 1) || AssumeMP;
duke@435 226 }
duke@435 227 static julong available_memory();
duke@435 228 static julong physical_memory();
tschatzl@4854 229 static bool has_allocatable_memory_limit(julong* limit);
duke@435 230 static bool is_server_class_machine();
duke@435 231
duke@435 232 // number of CPUs
duke@435 233 static int processor_count() {
duke@435 234 return _processor_count;
duke@435 235 }
phh@1558 236 static void set_processor_count(int count) { _processor_count = count; }
duke@435 237
duke@435 238 // Returns the number of CPUs this process is currently allowed to run on.
duke@435 239 // Note that on some OSes this can change dynamically.
duke@435 240 static int active_processor_count();
duke@435 241
tschatzl@8661 242 // At startup the number of active CPUs this process is allowed to run on.
tschatzl@8661 243 // This value does not change dynamically. May be different from active_processor_count().
tschatzl@8661 244 static int initial_active_processor_count() {
tschatzl@8661 245 assert(_initial_active_processor_count > 0, "Initial active processor count not set yet.");
tschatzl@8661 246 return _initial_active_processor_count;
tschatzl@8661 247 }
tschatzl@8661 248
duke@435 249 // Bind processes to processors.
duke@435 250 // This is a two step procedure:
duke@435 251 // first you generate a distribution of processes to processors,
duke@435 252 // then you bind processes according to that distribution.
duke@435 253 // Compute a distribution for number of processes to processors.
duke@435 254 // Stores the processor id's into the distribution array argument.
duke@435 255 // Returns true if it worked, false if it didn't.
duke@435 256 static bool distribute_processes(uint length, uint* distribution);
duke@435 257 // Binds the current process to a processor.
duke@435 258 // Returns true if it worked, false if it didn't.
duke@435 259 static bool bind_to_processor(uint processor_id);
duke@435 260
dcubed@3202 261 // Give a name to the current thread.
dcubed@3202 262 static void set_native_thread_name(const char *name);
dcubed@3202 263
duke@435 264 // Interface for stack banging (predetect possible stack overflow for
duke@435 265 // exception processing) There are guard pages, and above that shadow
duke@435 266 // pages for stack overflow checking.
duke@435 267 static bool uses_stack_guard_pages();
duke@435 268 static bool allocate_stack_guard_pages();
duke@435 269 static void bang_stack_shadow_pages();
duke@435 270 static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
duke@435 271
duke@435 272 // OS interface to Virtual Memory
duke@435 273
duke@435 274 // Return the default page size.
duke@435 275 static int vm_page_size();
duke@435 276
ehelin@7778 277 // Returns the page size to use for a region of memory.
ehelin@7778 278 // region_size / min_pages will always be greater than or equal to the
ehelin@7780 279 // returned value. The returned value will divide region_size.
ehelin@7780 280 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
ehelin@7780 281
ehelin@7780 282 // Returns the page size to use for a region of memory.
ehelin@7780 283 // region_size / min_pages will always be greater than or equal to the
ehelin@7780 284 // returned value. The returned value might not divide region_size.
ehelin@7780 285 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
ehelin@7778 286
tschatzl@5701 287 // Return the largest page size that can be used
tschatzl@5701 288 static size_t max_page_size() {
tschatzl@5701 289 // The _page_sizes array is sorted in descending order.
tschatzl@5701 290 return _page_sizes[0];
tschatzl@5701 291 }
duke@435 292
jcoomes@3057 293 // Methods for tracing page sizes returned by the above method; enabled by
duke@435 294 // TracePageSizes. The region_{min,max}_size parameters should be the values
duke@435 295 // passed to page_size_for_region() and page_size should be the result of that
duke@435 296 // call. The (optional) base and size parameters should come from the
duke@435 297 // ReservedSpace base() and size() methods.
jcoomes@3057 298 static void trace_page_sizes(const char* str, const size_t* page_sizes,
jcoomes@3057 299 int count) PRODUCT_RETURN;
duke@435 300 static void trace_page_sizes(const char* str, const size_t region_min_size,
duke@435 301 const size_t region_max_size,
duke@435 302 const size_t page_size,
duke@435 303 const char* base = NULL,
duke@435 304 const size_t size = 0) PRODUCT_RETURN;
duke@435 305
duke@435 306 static int vm_allocation_granularity();
duke@435 307 static char* reserve_memory(size_t bytes, char* addr = 0,
duke@435 308 size_t alignment_hint = 0);
zgu@5053 309 static char* reserve_memory(size_t bytes, char* addr,
zgu@5053 310 size_t alignment_hint, MEMFLAGS flags);
brutisso@4369 311 static char* reserve_memory_aligned(size_t size, size_t alignment);
duke@435 312 static char* attempt_reserve_memory_at(size_t bytes, char* addr);
duke@435 313 static void split_reserved_memory(char *base, size_t size,
duke@435 314 size_t split, bool realloc);
dcubed@5255 315 static bool commit_memory(char* addr, size_t bytes, bool executable);
coleenp@1091 316 static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
dcubed@5255 317 bool executable);
dcubed@5255 318 // Same as commit_memory() that either succeeds or calls
dcubed@5255 319 // vm_exit_out_of_memory() with the specified mesg.
dcubed@5255 320 static void commit_memory_or_exit(char* addr, size_t bytes,
dcubed@5255 321 bool executable, const char* mesg);
dcubed@5255 322 static void commit_memory_or_exit(char* addr, size_t size,
dcubed@5255 323 size_t alignment_hint,
dcubed@5255 324 bool executable, const char* mesg);
duke@435 325 static bool uncommit_memory(char* addr, size_t bytes);
duke@435 326 static bool release_memory(char* addr, size_t bytes);
coleenp@672 327
tschatzl@7777 328 // Touch memory pages that cover the memory range from start to end (exclusive)
tschatzl@7777 329 // to make the OS back the memory range with actual memory.
tschatzl@7777 330 // Current implementation may not touch the last page if unaligned addresses
tschatzl@7777 331 // are passed.
tschatzl@7777 332 static void pretouch_memory(char* start, char* end);
tschatzl@7777 333
coleenp@672 334 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
coleenp@672 335 static bool protect_memory(char* addr, size_t bytes, ProtType prot,
coleenp@912 336 bool is_committed = true);
coleenp@672 337
duke@435 338 static bool guard_memory(char* addr, size_t bytes);
duke@435 339 static bool unguard_memory(char* addr, size_t bytes);
coleenp@1755 340 static bool create_stack_guard_pages(char* addr, size_t bytes);
zgu@3900 341 static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
coleenp@1755 342 static bool remove_stack_guard_pages(char* addr, size_t bytes);
coleenp@1755 343
duke@435 344 static char* map_memory(int fd, const char* file_name, size_t file_offset,
duke@435 345 char *addr, size_t bytes, bool read_only = false,
duke@435 346 bool allow_exec = false);
duke@435 347 static char* remap_memory(int fd, const char* file_name, size_t file_offset,
duke@435 348 char *addr, size_t bytes, bool read_only,
duke@435 349 bool allow_exec);
duke@435 350 static bool unmap_memory(char *addr, size_t bytes);
iveresov@3363 351 static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
duke@435 352 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
duke@435 353
duke@435 354 // NUMA-specific interface
iveresov@576 355 static bool numa_has_static_binding();
iveresov@576 356 static bool numa_has_group_homing();
iveresov@576 357 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
duke@435 358 static void numa_make_global(char *addr, size_t bytes);
duke@435 359 static size_t numa_get_groups_num();
duke@435 360 static size_t numa_get_leaf_groups(int *ids, size_t size);
duke@435 361 static bool numa_topology_changed();
duke@435 362 static int numa_get_group_id();
duke@435 363
duke@435 364 // Page manipulation
duke@435 365 struct page_info {
duke@435 366 size_t size;
duke@435 367 int lgrp_id;
duke@435 368 };
duke@435 369 static bool get_page_info(char *start, page_info* info);
duke@435 370 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
duke@435 371
duke@435 372 static char* non_memory_address_word();
duke@435 373 // reserve, commit and pin the entire memory region
stefank@5578 374 static char* reserve_memory_special(size_t size, size_t alignment,
stefank@5578 375 char* addr, bool executable);
duke@435 376 static bool release_memory_special(char* addr, size_t bytes);
iveresov@2850 377 static void large_page_init();
duke@435 378 static size_t large_page_size();
duke@435 379 static bool can_commit_large_page_memory();
jcoomes@514 380 static bool can_execute_large_page_memory();
duke@435 381
duke@435 382 // OS interface to polling page
duke@435 383 static address get_polling_page() { return _polling_page; }
duke@435 384 static void set_polling_page(address page) { _polling_page = page; }
duke@435 385 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
duke@435 386 static void make_polling_page_unreadable();
duke@435 387 static void make_polling_page_readable();
duke@435 388
duke@435 389 // Routines used to serialize the thread state without using membars
duke@435 390 static void serialize_thread_states();
duke@435 391
duke@435 392 // Since we write to the serialize page from every thread, we
duke@435 393 // want stores to be on unique cache lines whenever possible
duke@435 394 // in order to minimize CPU cross talk. We pre-compute the
duke@435 395 // amount to shift the thread* to make this offset unique to
duke@435 396 // each thread.
duke@435 397 static int get_serialize_page_shift_count() {
duke@435 398 return SerializePageShiftCount;
duke@435 399 }
duke@435 400
duke@435 401 static void set_serialize_page_mask(uintptr_t mask) {
duke@435 402 _serialize_page_mask = mask;
duke@435 403 }
duke@435 404
duke@435 405 static unsigned int get_serialize_page_mask() {
duke@435 406 return _serialize_page_mask;
duke@435 407 }
duke@435 408
duke@435 409 static void set_memory_serialize_page(address page);
duke@435 410
duke@435 411 static address get_memory_serialize_page() {
duke@435 412 return (address)_mem_serialize_page;
duke@435 413 }
duke@435 414
duke@435 415 static inline void write_memory_serialize_page(JavaThread *thread) {
duke@435 416 uintptr_t page_offset = ((uintptr_t)thread >>
duke@435 417 get_serialize_page_shift_count()) &
duke@435 418 get_serialize_page_mask();
duke@435 419 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
duke@435 420 }
duke@435 421
duke@435 422 static bool is_memory_serialize_page(JavaThread *thread, address addr) {
duke@435 423 if (UseMembar) return false;
twisti@1513 424 // Previously this function calculated the exact address of this
twisti@1513 425 // thread's serialize page, and checked if the faulting address
twisti@1513 426 // was equal. However, some platforms mask off faulting addresses
twisti@1513 427 // to the page size, so now we just check that the address is
twisti@1513 428 // within the page. This makes the thread argument unnecessary,
twisti@1513 429 // but we retain the NULL check to preserve existing behaviour.
duke@435 430 if (thread == NULL) return false;
twisti@1513 431 address page = (address) _mem_serialize_page;
twisti@1513 432 return addr >= page && addr < (page + os::vm_page_size());
duke@435 433 }
duke@435 434
duke@435 435 static void block_on_serialize_page_trap();
duke@435 436
duke@435 437 // threads
duke@435 438
duke@435 439 enum ThreadType {
duke@435 440 vm_thread,
duke@435 441 cgc_thread, // Concurrent GC thread
duke@435 442 pgc_thread, // Parallel GC thread
duke@435 443 java_thread,
duke@435 444 compiler_thread,
bobv@2036 445 watcher_thread,
bobv@2036 446 os_thread
duke@435 447 };
duke@435 448
duke@435 449 static bool create_thread(Thread* thread,
duke@435 450 ThreadType thr_type,
duke@435 451 size_t stack_size = 0);
duke@435 452 static bool create_main_thread(JavaThread* thread);
duke@435 453 static bool create_attached_thread(JavaThread* thread);
duke@435 454 static void pd_start_thread(Thread* thread);
duke@435 455 static void start_thread(Thread* thread);
duke@435 456
zgu@4079 457 static void initialize_thread(Thread* thr);
duke@435 458 static void free_thread(OSThread* osthread);
duke@435 459
duke@435 460 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
duke@435 461 static intx current_thread_id();
duke@435 462 static int current_process_id();
duke@435 463 static int sleep(Thread* thread, jlong ms, bool interruptable);
dsimms@6348 464 // Short standalone OS sleep suitable for slow path spin loop.
dsimms@6348 465 // Ignores Thread.interrupt() (so keep it short).
dsimms@6348 466 // ms = 0, will sleep for the least amount of time allowed by the OS.
dsimms@6348 467 static void naked_short_sleep(jlong ms);
duke@435 468 static void infinite_sleep(); // never returns, use with CAUTION
duke@435 469 static void yield(); // Yields to all threads with same priority
duke@435 470 enum YieldResult {
duke@435 471 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran
duke@435 472 YIELD_NONEREADY = 0, // No other runnable/ready threads.
duke@435 473 // platform-specific yield return immediately
duke@435 474 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY
duke@435 475 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
duke@435 476 // yield that can be used in lieu of blocking.
duke@435 477 } ;
duke@435 478 static YieldResult NakedYield () ;
duke@435 479 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
duke@435 480 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
duke@435 481 static OSReturn set_priority(Thread* thread, ThreadPriority priority);
duke@435 482 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
duke@435 483
duke@435 484 static void interrupt(Thread* thread);
duke@435 485 static bool is_interrupted(Thread* thread, bool clear_interrupted);
duke@435 486
duke@435 487 static int pd_self_suspend_thread(Thread* thread);
duke@435 488
duke@435 489 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
duke@435 490 static frame fetch_frame_from_context(void* ucVoid);
duke@435 491
duke@435 492 static ExtendedPC get_thread_pc(Thread *thread);
duke@435 493 static void breakpoint();
duke@435 494
duke@435 495 static address current_stack_pointer();
duke@435 496 static address current_stack_base();
duke@435 497 static size_t current_stack_size();
duke@435 498
roland@3606 499 static void verify_stack_alignment() PRODUCT_RETURN;
roland@3606 500
duke@435 501 static int message_box(const char* title, const char* message);
duke@435 502 static char* do_you_want_to_debug(const char* message);
duke@435 503
duke@435 504 // run cmd in a separate process and return its exit code; or -1 on failures
duke@435 505 static int fork_and_exec(char *cmd);
duke@435 506
duke@435 507 // os::exit() is merged with vm_exit()
duke@435 508 // static void exit(int num);
duke@435 509
duke@435 510 // Terminate the VM, but don't exit the process
duke@435 511 static void shutdown();
duke@435 512
duke@435 513 // Terminate with an error. Default is to generate a core file on platforms
duke@435 514 // that support such things. This calls shutdown() and then aborts.
duke@435 515 static void abort(bool dump_core = true);
duke@435 516
duke@435 517 // Die immediately, no exit hook, no abort hook, no cleanup.
duke@435 518 static void die();
duke@435 519
ikrylov@2322 520 // File i/o operations
ikrylov@2322 521 static const int default_file_open_flags();
ikrylov@2322 522 static int open(const char *path, int oflag, int mode);
vlivanov@5027 523 static FILE* open(int fd, const char* mode);
ikrylov@2322 524 static int close(int fd);
ikrylov@2322 525 static jlong lseek(int fd, jlong offset, int whence);
ikrylov@2322 526 static char* native_path(char *path);
ikrylov@2322 527 static int ftruncate(int fd, jlong length);
ikrylov@2322 528 static int fsync(int fd);
ikrylov@2322 529 static int available(int fd, jlong *bytes);
ikrylov@2322 530
ikrylov@2322 531 //File i/o operations
ikrylov@2322 532
ikrylov@2322 533 static size_t read(int fd, void *buf, unsigned int nBytes);
ikrylov@2322 534 static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
ikrylov@2322 535 static size_t write(int fd, const void *buf, unsigned int nBytes);
ikrylov@2322 536
duke@435 537 // Reading directories.
duke@435 538 static DIR* opendir(const char* dirname);
duke@435 539 static int readdir_buf_size(const char *path);
duke@435 540 static struct dirent* readdir(DIR* dirp, dirent* dbuf);
duke@435 541 static int closedir(DIR* dirp);
duke@435 542
duke@435 543 // Dynamic library extension
duke@435 544 static const char* dll_file_extension();
duke@435 545
duke@435 546 static const char* get_temp_directory();
vlivanov@5027 547 static const char* get_current_directory(char *buf, size_t buflen);
duke@435 548
kamg@677 549 // Builds a platform-specific full library path given a ld path and lib name
bpittore@4261 550 // Returns true if buffer contains full path to existing file, false otherwise
bpittore@4261 551 static bool dll_build_name(char* buffer, size_t size,
kamg@677 552 const char* pathname, const char* fname);
kamg@677 553
duke@435 554 // Symbol lookup, find nearest function name; basically it implements
duke@435 555 // dladdr() for all platforms. Name of the nearest function is copied
dcubed@5365 556 // to buf. Distance from its base address is optionally returned as offset.
duke@435 557 // If function name is not found, buf[0] is set to '\0' and offset is
dcubed@5365 558 // set to -1 (if offset is non-NULL).
duke@435 559 static bool dll_address_to_function_name(address addr, char* buf,
duke@435 560 int buflen, int* offset);
duke@435 561
duke@435 562 // Locate DLL/DSO. On success, full path of the library is copied to
dcubed@5365 563 // buf, and offset is optionally set to be the distance between addr
dcubed@5365 564 // and the library's base address. On failure, buf[0] is set to '\0'
dcubed@5365 565 // and offset is set to -1 (if offset is non-NULL).
duke@435 566 static bool dll_address_to_library_name(address addr, char* buf,
duke@435 567 int buflen, int* offset);
duke@435 568
duke@435 569 // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
duke@435 570 static bool address_is_in_vm(address addr);
duke@435 571
duke@435 572 // Loads .dll/.so and
duke@435 573 // in case of error it checks if .dll/.so was built for the
duke@435 574 // same architecture as Hotspot is running on
duke@435 575 static void* dll_load(const char *name, char *ebuf, int ebuflen);
duke@435 576
kamg@677 577 // lookup symbol in a shared library
kamg@677 578 static void* dll_lookup(void* handle, const char* name);
kamg@677 579
ikrylov@2322 580 // Unload library
ikrylov@2322 581 static void dll_unload(void *lib);
ikrylov@2322 582
bpittore@5585 583 // Return the handle of this process
bpittore@5585 584 static void* get_default_process_handle();
bpittore@5585 585
bpittore@5585 586 // Check for static linked agent library
bpittore@5585 587 static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
bpittore@5585 588 size_t syms_len);
bpittore@5585 589
bpittore@5585 590 // Find agent entry point
bpittore@5585 591 static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
bpittore@5585 592 const char *syms[], size_t syms_len);
bpittore@5585 593
duke@435 594 // Print out system information; they are called by fatal error handler.
duke@435 595 // Output format may be different on different platforms.
duke@435 596 static void print_os_info(outputStream* st);
nloodin@3783 597 static void print_os_info_brief(outputStream* st);
duke@435 598 static void print_cpu_info(outputStream* st);
jcoomes@2997 599 static void pd_print_cpu_info(outputStream* st);
duke@435 600 static void print_memory_info(outputStream* st);
duke@435 601 static void print_dll_info(outputStream* st);
duke@435 602 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
duke@435 603 static void print_context(outputStream* st, void* context);
never@2262 604 static void print_register_info(outputStream* st, void* context);
duke@435 605 static void print_siginfo(outputStream* st, void* siginfo);
duke@435 606 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
duke@435 607 static void print_date_and_time(outputStream* st);
duke@435 608
never@2262 609 static void print_location(outputStream* st, intptr_t x, bool verbose = false);
ikrylov@2322 610 static size_t lasterror(char *buf, size_t len);
phh@3379 611 static int get_last_error();
bobv@2036 612
sla@2584 613 // Determines whether the calling process is being debugged by a user-mode debugger.
sla@2584 614 static bool is_debugger_attached();
sla@2584 615
sla@2584 616 // wait for a key press if PauseAtExit is set
sla@2584 617 static void wait_for_keypress_at_exit(void);
sla@2584 618
duke@435 619 // The following two functions are used by fatal error handler to trace
duke@435 620 // native (C) frames. They are not part of frame.hpp/frame.cpp because
duke@435 621 // frame.hpp/cpp assume thread is JavaThread, and also because different
duke@435 622 // OS/compiler may have different convention or provide different API to
duke@435 623 // walk C frames.
duke@435 624 //
duke@435 625 // We don't attempt to become a debugger, so we only follow frames if that
duke@435 626 // does not require a lookup in the unwind table, which is part of the binary
duke@435 627 // file but may be unsafe to read after a fatal error. So on x86, we can
duke@435 628 // only walk stack if %ebp is used as frame pointer; on ia64, it's not
duke@435 629 // possible to walk C stack without having the unwind table.
duke@435 630 static bool is_first_C_frame(frame *fr);
duke@435 631 static frame get_sender_for_C_frame(frame *fr);
duke@435 632
duke@435 633 // return current frame. pc() and sp() are set to NULL on failure.
duke@435 634 static frame current_frame();
duke@435 635
duke@435 636 static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
duke@435 637
duke@435 638 // returns a string to describe the exception/signal;
duke@435 639 // returns NULL if exception_code is not an OS exception/signal.
duke@435 640 static const char* exception_name(int exception_code, char* buf, size_t buflen);
duke@435 641
duke@435 642 // Returns native Java library, loads if necessary
duke@435 643 static void* native_java_library();
duke@435 644
ikrylov@2322 645 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler)
duke@435 646 static void jvm_path(char *buf, jint buflen);
duke@435 647
bobv@2036 648 // Returns true if we are running in a headless jre.
bobv@2036 649 static bool is_headless_jre();
bobv@2036 650
duke@435 651 // JNI names
duke@435 652 static void print_jni_name_prefix_on(outputStream* st, int args_size);
duke@435 653 static void print_jni_name_suffix_on(outputStream* st, int args_size);
duke@435 654
duke@435 655 // File conventions
duke@435 656 static const char* file_separator();
duke@435 657 static const char* line_separator();
duke@435 658 static const char* path_separator();
duke@435 659
duke@435 660 // Init os specific system properties values
duke@435 661 static void init_system_properties_values();
duke@435 662
duke@435 663 // IO operations, non-JVM_ version.
duke@435 664 static int stat(const char* path, struct stat* sbuf);
duke@435 665 static bool dir_is_empty(const char* path);
duke@435 666
duke@435 667 // IO operations on binary files
duke@435 668 static int create_binary_file(const char* path, bool rewrite_existing);
duke@435 669 static jlong current_file_offset(int fd);
duke@435 670 static jlong seek_to_file_offset(int fd, jlong offset);
duke@435 671
duke@435 672 // Thread Local Storage
duke@435 673 static int allocate_thread_local_storage();
duke@435 674 static void thread_local_storage_at_put(int index, void* value);
duke@435 675 static void* thread_local_storage_at(int index);
duke@435 676 static void free_thread_local_storage(int index);
duke@435 677
zgu@7074 678 // Retrieve native stack frames.
zgu@7074 679 // Parameter:
zgu@7074 680 // stack: an array to storage stack pointers.
zgu@7074 681 // frames: size of above array.
zgu@7074 682 // toSkip: number of stack frames to skip at the beginning.
zgu@7074 683 // Return: number of stack frames captured.
zgu@7074 684 static int get_native_stack(address* stack, int size, int toSkip = 0);
zgu@3900 685
duke@435 686 // General allocation (must be MT-safe)
zgu@7074 687 static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
zgu@7074 688 static void* malloc (size_t size, MEMFLAGS flags);
zgu@7074 689 static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
zgu@7074 690 static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
zgu@7074 691
zgu@3900 692 static void free (void *memblock, MEMFLAGS flags = mtNone);
duke@435 693 static bool check_heap(bool force = false); // verify C heap integrity
zgu@3900 694 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
duke@435 695
duke@435 696 #ifndef PRODUCT
kvn@2557 697 static julong num_mallocs; // # of calls to malloc/realloc
kvn@2557 698 static julong alloc_bytes; // # of bytes allocated
kvn@2557 699 static julong num_frees; // # of calls to free
kvn@2557 700 static julong free_bytes; // # of bytes freed
duke@435 701 #endif
duke@435 702
ikrylov@2322 703 // SocketInterface (ex HPI SocketInterface )
ikrylov@2322 704 static int socket(int domain, int type, int protocol);
ikrylov@2322 705 static int socket_close(int fd);
ikrylov@2322 706 static int socket_shutdown(int fd, int howto);
phh@3344 707 static int recv(int fd, char* buf, size_t nBytes, uint flags);
phh@3344 708 static int send(int fd, char* buf, size_t nBytes, uint flags);
phh@3344 709 static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
ikrylov@2322 710 static int timeout(int fd, long timeout);
ikrylov@2322 711 static int listen(int fd, int count);
phh@3344 712 static int connect(int fd, struct sockaddr* him, socklen_t len);
phh@3344 713 static int bind(int fd, struct sockaddr* him, socklen_t len);
phh@3344 714 static int accept(int fd, struct sockaddr* him, socklen_t* len);
phh@3344 715 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
phh@3344 716 struct sockaddr* from, socklen_t* fromlen);
phh@3344 717 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
phh@3344 718 static int sendto(int fd, char* buf, size_t len, uint flags,
phh@3344 719 struct sockaddr* to, socklen_t tolen);
phh@3344 720 static int socket_available(int fd, jint* pbytes);
ikrylov@2322 721
ikrylov@2322 722 static int get_sock_opt(int fd, int level, int optname,
phh@3344 723 char* optval, socklen_t* optlen);
ikrylov@2322 724 static int set_sock_opt(int fd, int level, int optname,
phh@3344 725 const char* optval, socklen_t optlen);
ikrylov@2322 726 static int get_host_name(char* name, int namelen);
ikrylov@2322 727
phh@3344 728 static struct hostent* get_host_by_name(char* name);
ikrylov@2322 729
duke@435 730 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
duke@435 731 static void signal_init();
duke@435 732 static void signal_init_pd();
duke@435 733 static void signal_notify(int signal_number);
duke@435 734 static void* signal(int signal_number, void* handler);
duke@435 735 static void signal_raise(int signal_number);
duke@435 736 static int signal_wait();
duke@435 737 static int signal_lookup();
duke@435 738 static void* user_handler();
duke@435 739 static void terminate_signal_thread();
duke@435 740 static int sigexitnum_pd();
duke@435 741
duke@435 742 // random number generation
duke@435 743 static long random(); // return 32bit pseudorandom number
duke@435 744 static void init_random(long initval); // initialize random sequence
duke@435 745
duke@435 746 // Structured OS Exception support
duke@435 747 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
duke@435 748
ctornqvi@2520 749 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits
ctornqvi@2520 750 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize);
ctornqvi@2520 751
mikael@3903 752 // Get the default path to the core file
mikael@3903 753 // Returns the length of the string
mikael@3903 754 static int get_core_path(char* buffer, size_t bufferSize);
mikael@3903 755
duke@435 756 // JVMTI & JVM monitoring and management support
duke@435 757 // The thread_cpu_time() and current_thread_cpu_time() are only
duke@435 758 // supported if is_thread_cpu_time_supported() returns true.
duke@435 759 // They are not supported on Solaris T1.
duke@435 760
duke@435 761 // Thread CPU Time - return the fast estimate on a platform
duke@435 762 // On Solaris - call gethrvtime (fast) - user time only
duke@435 763 // On Linux - fast clock_gettime where available - user+sys
duke@435 764 // - otherwise: very slow /proc fs - user+sys
duke@435 765 // On Windows - GetThreadTimes - user+sys
duke@435 766 static jlong current_thread_cpu_time();
duke@435 767 static jlong thread_cpu_time(Thread* t);
duke@435 768
duke@435 769 // Thread CPU Time with user_sys_cpu_time parameter.
duke@435 770 //
duke@435 771 // If user_sys_cpu_time is true, user+sys time is returned.
duke@435 772 // Otherwise, only user time is returned
duke@435 773 static jlong current_thread_cpu_time(bool user_sys_cpu_time);
duke@435 774 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
duke@435 775
duke@435 776 // Return a bunch of info about the timers.
duke@435 777 // Note that the returned info for these two functions may be different
duke@435 778 // on some platforms
duke@435 779 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
duke@435 780 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
duke@435 781
duke@435 782 static bool is_thread_cpu_time_supported();
duke@435 783
duke@435 784 // System loadavg support. Returns -1 if load average cannot be obtained.
duke@435 785 static int loadavg(double loadavg[], int nelem);
duke@435 786
duke@435 787 // Hook for os specific jvm options that we don't want to abort on seeing
duke@435 788 static bool obsolete_option(const JavaVMOption *option);
duke@435 789
phh@3378 790 // Extensions
phh@3378 791 #include "runtime/os_ext.hpp"
phh@3378 792
phh@3378 793 public:
rbackman@5424 794 class CrashProtectionCallback : public StackObj {
rbackman@5424 795 public:
rbackman@5424 796 virtual void call() = 0;
rbackman@5424 797 };
phh@3378 798
duke@435 799 // Platform dependent stuff
stefank@2314 800 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 801 # include "os_linux.hpp"
nloodin@3783 802 # include "os_posix.hpp"
stefank@2314 803 #endif
stefank@2314 804 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 805 # include "os_solaris.hpp"
nloodin@3783 806 # include "os_posix.hpp"
stefank@2314 807 #endif
stefank@2314 808 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 809 # include "os_windows.hpp"
stefank@2314 810 #endif
goetz@6461 811 #ifdef TARGET_OS_FAMILY_aix
goetz@6461 812 # include "os_aix.hpp"
goetz@6461 813 # include "os_posix.hpp"
goetz@6461 814 #endif
never@3156 815 #ifdef TARGET_OS_FAMILY_bsd
nloodin@3783 816 # include "os_posix.hpp"
never@3156 817 # include "os_bsd.hpp"
never@3156 818 #endif
stefank@2314 819 #ifdef TARGET_OS_ARCH_linux_x86
stefank@2314 820 # include "os_linux_x86.hpp"
stefank@2314 821 #endif
stefank@2314 822 #ifdef TARGET_OS_ARCH_linux_sparc
stefank@2314 823 # include "os_linux_sparc.hpp"
stefank@2314 824 #endif
stefank@2314 825 #ifdef TARGET_OS_ARCH_linux_zero
stefank@2314 826 # include "os_linux_zero.hpp"
stefank@2314 827 #endif
stefank@2314 828 #ifdef TARGET_OS_ARCH_solaris_x86
stefank@2314 829 # include "os_solaris_x86.hpp"
stefank@2314 830 #endif
stefank@2314 831 #ifdef TARGET_OS_ARCH_solaris_sparc
stefank@2314 832 # include "os_solaris_sparc.hpp"
stefank@2314 833 #endif
stefank@2314 834 #ifdef TARGET_OS_ARCH_windows_x86
stefank@2314 835 # include "os_windows_x86.hpp"
stefank@2314 836 #endif
bobv@2508 837 #ifdef TARGET_OS_ARCH_linux_arm
bobv@2508 838 # include "os_linux_arm.hpp"
bobv@2508 839 #endif
bobv@2508 840 #ifdef TARGET_OS_ARCH_linux_ppc
bobv@2508 841 # include "os_linux_ppc.hpp"
bobv@2508 842 #endif
goetz@6461 843 #ifdef TARGET_OS_ARCH_aix_ppc
goetz@6461 844 # include "os_aix_ppc.hpp"
goetz@6461 845 #endif
never@3156 846 #ifdef TARGET_OS_ARCH_bsd_x86
never@3156 847 # include "os_bsd_x86.hpp"
never@3156 848 #endif
never@3156 849 #ifdef TARGET_OS_ARCH_bsd_zero
never@3156 850 # include "os_bsd_zero.hpp"
never@3156 851 #endif
stefank@2314 852
phh@3378 853 public:
iklam@5667 854 #ifndef PLATFORM_PRINT_NATIVE_STACK
iklam@5667 855 // No platform-specific code for printing the native stack.
iklam@5667 856 static bool platform_print_native_stack(outputStream* st, void* context,
iklam@5667 857 char *buf, int buf_size) {
iklam@5667 858 return false;
iklam@5667 859 }
iklam@5667 860 #endif
iklam@5667 861
bobv@2036 862 // debugging support (mostly used by debug.cpp but also fatal error handler)
bobv@2036 863 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
duke@435 864
duke@435 865 static bool dont_yield(); // when true, JVM_Yield() is nop
duke@435 866 static void print_statistics();
duke@435 867
duke@435 868 // Thread priority helpers (implemented in OS-specific part)
duke@435 869 static OSReturn set_native_priority(Thread* thread, int native_prio);
duke@435 870 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
phh@3481 871 static int java_to_os_priority[CriticalPriority + 1];
duke@435 872 // Hint to the underlying OS that a task switch would not be good.
duke@435 873 // Void return because it's a hint and can fail.
duke@435 874 static void hint_no_preempt();
duke@435 875
duke@435 876 // Used at creation if requested by the diagnostic flag PauseAtStartup.
duke@435 877 // Causes the VM to wait until an external stimulus has been applied
duke@435 878 // (for Unix, that stimulus is a signal, for Windows, an external
duke@435 879 // ResumeThread call)
duke@435 880 static void pause();
duke@435 881
bpittore@5585 882 // Builds a platform dependent Agent_OnLoad_<libname> function name
bpittore@5585 883 // which is used to find statically linked in agents.
bpittore@5585 884 static char* build_agent_function_name(const char *sym, const char *cname,
bpittore@5585 885 bool is_absolute_path);
bpittore@5585 886
sla@5237 887 class SuspendedThreadTaskContext {
sla@5237 888 public:
sla@5237 889 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
sla@5237 890 Thread* thread() const { return _thread; }
sla@5237 891 void* ucontext() const { return _ucontext; }
sla@5237 892 private:
sla@5237 893 Thread* _thread;
sla@5237 894 void* _ucontext;
sla@5237 895 };
sla@5237 896
sla@5237 897 class SuspendedThreadTask {
sla@5237 898 public:
sla@5237 899 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
sla@5237 900 virtual ~SuspendedThreadTask() {}
sla@5237 901 void run();
sla@5237 902 bool is_done() { return _done; }
sla@5237 903 virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
sla@5237 904 protected:
sla@5237 905 private:
sla@5237 906 void internal_do_task();
sla@5237 907 Thread* _thread;
sla@5237 908 bool _done;
sla@5237 909 };
sla@5237 910
sla@5237 911 #ifndef TARGET_OS_FAMILY_windows
sla@5237 912 // Suspend/resume support
sla@5237 913 // Protocol:
sla@5237 914 //
sla@5237 915 // a thread starts in SR_RUNNING
sla@5237 916 //
sla@5237 917 // SR_RUNNING can go to
sla@5237 918 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
sla@5237 919 // SR_SUSPEND_REQUEST can go to
sla@5237 920 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
sla@5237 921 // * SR_SUSPENDED if the stopped thread receives the signal and switches state
sla@5237 922 // SR_SUSPENDED can go to
sla@5237 923 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
sla@5237 924 // SR_WAKEUP_REQUEST can go to
sla@5237 925 // * SR_RUNNING when the stopped thread receives the signal
sla@5237 926 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
sla@5237 927 class SuspendResume {
sla@5237 928 public:
sla@5237 929 enum State {
sla@5237 930 SR_RUNNING,
sla@5237 931 SR_SUSPEND_REQUEST,
sla@5237 932 SR_SUSPENDED,
sla@5237 933 SR_WAKEUP_REQUEST
sla@5237 934 };
sla@5237 935
sla@5237 936 private:
sla@5237 937 volatile State _state;
sla@5237 938
sla@5237 939 private:
sla@5237 940 /* try to switch state from state "from" to state "to"
sla@5237 941 * returns the state set after the method is complete
sla@5237 942 */
sla@5237 943 State switch_state(State from, State to);
sla@5237 944
sla@5237 945 public:
sla@5237 946 SuspendResume() : _state(SR_RUNNING) { }
sla@5237 947
sla@5237 948 State state() const { return _state; }
sla@5237 949
sla@5237 950 State request_suspend() {
sla@5237 951 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
sla@5237 952 }
sla@5237 953
sla@5237 954 State cancel_suspend() {
sla@5237 955 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
sla@5237 956 }
sla@5237 957
sla@5237 958 State suspended() {
sla@5237 959 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
sla@5237 960 }
sla@5237 961
sla@5237 962 State request_wakeup() {
sla@5237 963 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
sla@5237 964 }
sla@5237 965
sla@5237 966 State running() {
sla@5237 967 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
sla@5237 968 }
sla@5237 969
sla@5237 970 bool is_running() const {
sla@5237 971 return _state == SR_RUNNING;
sla@5237 972 }
sla@5237 973
sla@5237 974 bool is_suspend_request() const {
sla@5237 975 return _state == SR_SUSPEND_REQUEST;
sla@5237 976 }
sla@5237 977
sla@5237 978 bool is_suspended() const {
sla@5237 979 return _state == SR_SUSPENDED;
sla@5237 980 }
sla@5237 981 };
sla@5237 982 #endif
sla@5237 983
sla@5237 984
duke@435 985 protected:
tschatzl@8661 986 static long _rand_seed; // seed for random number generator
tschatzl@8661 987 static int _processor_count; // number of processors
tschatzl@8661 988 static int _initial_active_processor_count; // number of active processors during initialization.
duke@435 989
duke@435 990 static char* format_boot_path(const char* format_string,
duke@435 991 const char* home,
duke@435 992 int home_len,
duke@435 993 char fileSep,
duke@435 994 char pathSep);
duke@435 995 static bool set_boot_path(char fileSep, char pathSep);
phh@1126 996 static char** split_path(const char* path, int* n);
rbackman@5424 997
duke@435 998 };
duke@435 999
duke@435 1000 // Note that "PAUSE" is almost always used with synchronization
duke@435 1001 // so arguably we should provide Atomic::SpinPause() instead
duke@435 1002 // of the global SpinPause() with C linkage.
duke@435 1003 // It'd also be eligible for inlining on many platforms.
duke@435 1004
goetz@5400 1005 extern "C" int SpinPause();
stefank@2314 1006
stefank@2314 1007 #endif // SHARE_VM_RUNTIME_OS_HPP

mercurial