Tue, 24 Dec 2013 11:48:39 -0800
8029233: Update copyright year to match last edit in jdk8 hotspot repository for 2013
Summary: Copyright year updated for files modified during 2013
Reviewed-by: twisti, iveresov
duke@435 | 1 | /* |
hseigel@4465 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_RUNTIME_OS_HPP |
stefank@2314 | 26 | #define SHARE_VM_RUNTIME_OS_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "jvmtifiles/jvmti.h" |
stefank@2314 | 29 | #include "runtime/atomic.hpp" |
stefank@2314 | 30 | #include "runtime/extendedPC.hpp" |
stefank@2314 | 31 | #include "runtime/handles.hpp" |
stefank@2314 | 32 | #include "utilities/top.hpp" |
stefank@2314 | 33 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 34 | # include "jvm_linux.h" |
rbackman@5424 | 35 | # include <setjmp.h> |
stefank@2314 | 36 | #endif |
stefank@2314 | 37 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 38 | # include "jvm_solaris.h" |
rbackman@5424 | 39 | # include <setjmp.h> |
stefank@2314 | 40 | #endif |
stefank@2314 | 41 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 42 | # include "jvm_windows.h" |
stefank@2314 | 43 | #endif |
never@3156 | 44 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 45 | # include "jvm_bsd.h" |
rbackman@5424 | 46 | # include <setjmp.h> |
never@3156 | 47 | #endif |
stefank@2314 | 48 | |
bpittore@5585 | 49 | class AgentLibrary; |
bpittore@5585 | 50 | |
duke@435 | 51 | // os defines the interface to operating system; this includes traditional |
duke@435 | 52 | // OS services (time, I/O) as well as other functionality with system- |
duke@435 | 53 | // dependent code. |
duke@435 | 54 | |
duke@435 | 55 | typedef void (*dll_func)(...); |
duke@435 | 56 | |
duke@435 | 57 | class Thread; |
duke@435 | 58 | class JavaThread; |
duke@435 | 59 | class Event; |
duke@435 | 60 | class DLL; |
duke@435 | 61 | class FileHandle; |
iveresov@576 | 62 | template<class E> class GrowableArray; |
duke@435 | 63 | |
duke@435 | 64 | // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose |
duke@435 | 65 | |
duke@435 | 66 | // Platform-independent error return values from OS functions |
duke@435 | 67 | enum OSReturn { |
duke@435 | 68 | OS_OK = 0, // Operation was successful |
duke@435 | 69 | OS_ERR = -1, // Operation failed |
duke@435 | 70 | OS_INTRPT = -2, // Operation was interrupted |
duke@435 | 71 | OS_TIMEOUT = -3, // Operation timed out |
duke@435 | 72 | OS_NOMEM = -5, // Operation failed for lack of memory |
duke@435 | 73 | OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource |
duke@435 | 74 | }; |
duke@435 | 75 | |
duke@435 | 76 | enum ThreadPriority { // JLS 20.20.1-3 |
duke@435 | 77 | NoPriority = -1, // Initial non-priority value |
duke@435 | 78 | MinPriority = 1, // Minimum priority |
duke@435 | 79 | NormPriority = 5, // Normal (non-daemon) priority |
duke@435 | 80 | NearMaxPriority = 9, // High priority, used for VMThread |
phh@3481 | 81 | MaxPriority = 10, // Highest priority, used for WatcherThread |
duke@435 | 82 | // ensures that VMThread doesn't starve profiler |
phh@3481 | 83 | CriticalPriority = 11 // Critical thread priority |
duke@435 | 84 | }; |
duke@435 | 85 | |
dcubed@5255 | 86 | // Executable parameter flag for os::commit_memory() and |
dcubed@5255 | 87 | // os::commit_memory_or_exit(). |
dcubed@5255 | 88 | const bool ExecMem = true; |
dcubed@5255 | 89 | |
duke@435 | 90 | // Typedef for structured exception handling support |
duke@435 | 91 | typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); |
duke@435 | 92 | |
duke@435 | 93 | class os: AllStatic { |
twisti@5726 | 94 | friend class VMStructs; |
twisti@5726 | 95 | |
phh@1558 | 96 | public: |
duke@435 | 97 | enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) |
duke@435 | 98 | |
phh@1558 | 99 | private: |
duke@435 | 100 | static OSThread* _starting_thread; |
duke@435 | 101 | static address _polling_page; |
duke@435 | 102 | static volatile int32_t * _mem_serialize_page; |
duke@435 | 103 | static uintptr_t _serialize_page_mask; |
phh@1558 | 104 | public: |
duke@435 | 105 | static size_t _page_sizes[page_sizes_max]; |
duke@435 | 106 | |
phh@1558 | 107 | private: |
duke@435 | 108 | static void init_page_sizes(size_t default_page_size) { |
duke@435 | 109 | _page_sizes[0] = default_page_size; |
duke@435 | 110 | _page_sizes[1] = 0; // sentinel |
duke@435 | 111 | } |
duke@435 | 112 | |
zgu@3900 | 113 | static char* pd_reserve_memory(size_t bytes, char* addr = 0, |
zgu@3900 | 114 | size_t alignment_hint = 0); |
zgu@3900 | 115 | static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); |
zgu@3900 | 116 | static void pd_split_reserved_memory(char *base, size_t size, |
zgu@3900 | 117 | size_t split, bool realloc); |
dcubed@5255 | 118 | static bool pd_commit_memory(char* addr, size_t bytes, bool executable); |
zgu@3900 | 119 | static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, |
dcubed@5255 | 120 | bool executable); |
dcubed@5255 | 121 | // Same as pd_commit_memory() that either succeeds or calls |
dcubed@5255 | 122 | // vm_exit_out_of_memory() with the specified mesg. |
dcubed@5255 | 123 | static void pd_commit_memory_or_exit(char* addr, size_t bytes, |
dcubed@5255 | 124 | bool executable, const char* mesg); |
dcubed@5255 | 125 | static void pd_commit_memory_or_exit(char* addr, size_t size, |
dcubed@5255 | 126 | size_t alignment_hint, |
dcubed@5255 | 127 | bool executable, const char* mesg); |
zgu@3900 | 128 | static bool pd_uncommit_memory(char* addr, size_t bytes); |
zgu@3900 | 129 | static bool pd_release_memory(char* addr, size_t bytes); |
zgu@3900 | 130 | |
zgu@3900 | 131 | static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, |
zgu@3900 | 132 | char *addr, size_t bytes, bool read_only = false, |
zgu@3900 | 133 | bool allow_exec = false); |
zgu@3900 | 134 | static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, |
zgu@3900 | 135 | char *addr, size_t bytes, bool read_only, |
zgu@3900 | 136 | bool allow_exec); |
zgu@3900 | 137 | static bool pd_unmap_memory(char *addr, size_t bytes); |
zgu@3900 | 138 | static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); |
zgu@3900 | 139 | static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); |
zgu@3900 | 140 | |
zgu@3900 | 141 | |
duke@435 | 142 | public: |
bobv@2036 | 143 | static void init(void); // Called before command line parsing |
tschatzl@5701 | 144 | static void init_before_ergo(void); // Called after command line parsing |
tschatzl@5701 | 145 | // before VM ergonomics processing. |
bobv@2036 | 146 | static jint init_2(void); // Called after command line parsing |
tschatzl@5701 | 147 | // and VM ergonomics processing |
phh@3378 | 148 | static void init_globals(void) { // Called from init_globals() in init.cpp |
phh@3378 | 149 | init_globals_ext(); |
phh@3378 | 150 | } |
bobv@2036 | 151 | static void init_3(void); // Called at the end of vm init |
duke@435 | 152 | |
duke@435 | 153 | // File names are case-insensitive on windows only |
duke@435 | 154 | // Override me as needed |
duke@435 | 155 | static int file_name_strcmp(const char* s1, const char* s2); |
duke@435 | 156 | |
duke@435 | 157 | static bool getenv(const char* name, char* buffer, int len); |
duke@435 | 158 | static bool have_special_privileges(); |
duke@435 | 159 | |
duke@435 | 160 | static jlong javaTimeMillis(); |
duke@435 | 161 | static jlong javaTimeNanos(); |
duke@435 | 162 | static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); |
duke@435 | 163 | static void run_periodic_checks(); |
duke@435 | 164 | |
duke@435 | 165 | |
duke@435 | 166 | // Returns the elapsed time in seconds since the vm started. |
duke@435 | 167 | static double elapsedTime(); |
duke@435 | 168 | |
duke@435 | 169 | // Returns real time in seconds since an arbitrary point |
duke@435 | 170 | // in the past. |
duke@435 | 171 | static bool getTimesSecs(double* process_real_time, |
duke@435 | 172 | double* process_user_time, |
duke@435 | 173 | double* process_system_time); |
duke@435 | 174 | |
duke@435 | 175 | // Interface to the performance counter |
duke@435 | 176 | static jlong elapsed_counter(); |
duke@435 | 177 | static jlong elapsed_frequency(); |
duke@435 | 178 | |
ysr@777 | 179 | // The "virtual time" of a thread is the amount of time a thread has |
ysr@777 | 180 | // actually run. The first function indicates whether the OS supports |
ysr@777 | 181 | // this functionality for the current thread, and if so: |
ysr@777 | 182 | // * the second enables vtime tracking (if that is required). |
ysr@777 | 183 | // * the third tells whether vtime is enabled. |
ysr@777 | 184 | // * the fourth returns the elapsed virtual time for the current |
ysr@777 | 185 | // thread. |
ysr@777 | 186 | static bool supports_vtime(); |
ysr@777 | 187 | static bool enable_vtime(); |
ysr@777 | 188 | static bool vtime_enabled(); |
ysr@777 | 189 | static double elapsedVTime(); |
ysr@777 | 190 | |
duke@435 | 191 | // Return current local time in a string (YYYY-MM-DD HH:MM:SS). |
duke@435 | 192 | // It is MT safe, but not async-safe, as reading time zone |
duke@435 | 193 | // information may require a lock on some platforms. |
ysr@983 | 194 | static char* local_time_string(char *buf, size_t buflen); |
ysr@983 | 195 | static struct tm* localtime_pd (const time_t* clock, struct tm* res); |
duke@435 | 196 | // Fill in buffer with current local time as an ISO-8601 string. |
duke@435 | 197 | // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. |
duke@435 | 198 | // Returns buffer, or NULL if it failed. |
duke@435 | 199 | static char* iso8601_time(char* buffer, size_t buffer_length); |
duke@435 | 200 | |
duke@435 | 201 | // Interface for detecting multiprocessor system |
duke@435 | 202 | static inline bool is_MP() { |
duke@435 | 203 | assert(_processor_count > 0, "invalid processor count"); |
minqi@4845 | 204 | return _processor_count > 1 || AssumeMP; |
duke@435 | 205 | } |
duke@435 | 206 | static julong available_memory(); |
duke@435 | 207 | static julong physical_memory(); |
tschatzl@4854 | 208 | static bool has_allocatable_memory_limit(julong* limit); |
duke@435 | 209 | static bool is_server_class_machine(); |
duke@435 | 210 | |
duke@435 | 211 | // number of CPUs |
duke@435 | 212 | static int processor_count() { |
duke@435 | 213 | return _processor_count; |
duke@435 | 214 | } |
phh@1558 | 215 | static void set_processor_count(int count) { _processor_count = count; } |
duke@435 | 216 | |
duke@435 | 217 | // Returns the number of CPUs this process is currently allowed to run on. |
duke@435 | 218 | // Note that on some OSes this can change dynamically. |
duke@435 | 219 | static int active_processor_count(); |
duke@435 | 220 | |
duke@435 | 221 | // Bind processes to processors. |
duke@435 | 222 | // This is a two step procedure: |
duke@435 | 223 | // first you generate a distribution of processes to processors, |
duke@435 | 224 | // then you bind processes according to that distribution. |
duke@435 | 225 | // Compute a distribution for number of processes to processors. |
duke@435 | 226 | // Stores the processor id's into the distribution array argument. |
duke@435 | 227 | // Returns true if it worked, false if it didn't. |
duke@435 | 228 | static bool distribute_processes(uint length, uint* distribution); |
duke@435 | 229 | // Binds the current process to a processor. |
duke@435 | 230 | // Returns true if it worked, false if it didn't. |
duke@435 | 231 | static bool bind_to_processor(uint processor_id); |
duke@435 | 232 | |
dcubed@3202 | 233 | // Give a name to the current thread. |
dcubed@3202 | 234 | static void set_native_thread_name(const char *name); |
dcubed@3202 | 235 | |
duke@435 | 236 | // Interface for stack banging (predetect possible stack overflow for |
duke@435 | 237 | // exception processing) There are guard pages, and above that shadow |
duke@435 | 238 | // pages for stack overflow checking. |
duke@435 | 239 | static bool uses_stack_guard_pages(); |
duke@435 | 240 | static bool allocate_stack_guard_pages(); |
duke@435 | 241 | static void bang_stack_shadow_pages(); |
duke@435 | 242 | static bool stack_shadow_pages_available(Thread *thread, methodHandle method); |
duke@435 | 243 | |
duke@435 | 244 | // OS interface to Virtual Memory |
duke@435 | 245 | |
duke@435 | 246 | // Return the default page size. |
duke@435 | 247 | static int vm_page_size(); |
duke@435 | 248 | |
duke@435 | 249 | // Return the page size to use for a region of memory. The min_pages argument |
duke@435 | 250 | // is a hint intended to limit fragmentation; it says the returned page size |
duke@435 | 251 | // should be <= region_max_size / min_pages. Because min_pages is a hint, |
duke@435 | 252 | // this routine may return a size larger than region_max_size / min_pages. |
duke@435 | 253 | // |
duke@435 | 254 | // The current implementation ignores min_pages if a larger page size is an |
duke@435 | 255 | // exact multiple of both region_min_size and region_max_size. This allows |
duke@435 | 256 | // larger pages to be used when doing so would not cause fragmentation; in |
duke@435 | 257 | // particular, a single page can be used when region_min_size == |
duke@435 | 258 | // region_max_size == a supported page size. |
duke@435 | 259 | static size_t page_size_for_region(size_t region_min_size, |
duke@435 | 260 | size_t region_max_size, |
duke@435 | 261 | uint min_pages); |
tschatzl@5701 | 262 | // Return the largest page size that can be used |
tschatzl@5701 | 263 | static size_t max_page_size() { |
tschatzl@5701 | 264 | // The _page_sizes array is sorted in descending order. |
tschatzl@5701 | 265 | return _page_sizes[0]; |
tschatzl@5701 | 266 | } |
duke@435 | 267 | |
jcoomes@3057 | 268 | // Methods for tracing page sizes returned by the above method; enabled by |
duke@435 | 269 | // TracePageSizes. The region_{min,max}_size parameters should be the values |
duke@435 | 270 | // passed to page_size_for_region() and page_size should be the result of that |
duke@435 | 271 | // call. The (optional) base and size parameters should come from the |
duke@435 | 272 | // ReservedSpace base() and size() methods. |
jcoomes@3057 | 273 | static void trace_page_sizes(const char* str, const size_t* page_sizes, |
jcoomes@3057 | 274 | int count) PRODUCT_RETURN; |
duke@435 | 275 | static void trace_page_sizes(const char* str, const size_t region_min_size, |
duke@435 | 276 | const size_t region_max_size, |
duke@435 | 277 | const size_t page_size, |
duke@435 | 278 | const char* base = NULL, |
duke@435 | 279 | const size_t size = 0) PRODUCT_RETURN; |
duke@435 | 280 | |
duke@435 | 281 | static int vm_allocation_granularity(); |
duke@435 | 282 | static char* reserve_memory(size_t bytes, char* addr = 0, |
duke@435 | 283 | size_t alignment_hint = 0); |
zgu@5053 | 284 | static char* reserve_memory(size_t bytes, char* addr, |
zgu@5053 | 285 | size_t alignment_hint, MEMFLAGS flags); |
brutisso@4369 | 286 | static char* reserve_memory_aligned(size_t size, size_t alignment); |
duke@435 | 287 | static char* attempt_reserve_memory_at(size_t bytes, char* addr); |
duke@435 | 288 | static void split_reserved_memory(char *base, size_t size, |
duke@435 | 289 | size_t split, bool realloc); |
dcubed@5255 | 290 | static bool commit_memory(char* addr, size_t bytes, bool executable); |
coleenp@1091 | 291 | static bool commit_memory(char* addr, size_t size, size_t alignment_hint, |
dcubed@5255 | 292 | bool executable); |
dcubed@5255 | 293 | // Same as commit_memory() that either succeeds or calls |
dcubed@5255 | 294 | // vm_exit_out_of_memory() with the specified mesg. |
dcubed@5255 | 295 | static void commit_memory_or_exit(char* addr, size_t bytes, |
dcubed@5255 | 296 | bool executable, const char* mesg); |
dcubed@5255 | 297 | static void commit_memory_or_exit(char* addr, size_t size, |
dcubed@5255 | 298 | size_t alignment_hint, |
dcubed@5255 | 299 | bool executable, const char* mesg); |
duke@435 | 300 | static bool uncommit_memory(char* addr, size_t bytes); |
duke@435 | 301 | static bool release_memory(char* addr, size_t bytes); |
coleenp@672 | 302 | |
coleenp@672 | 303 | enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; |
coleenp@672 | 304 | static bool protect_memory(char* addr, size_t bytes, ProtType prot, |
coleenp@912 | 305 | bool is_committed = true); |
coleenp@672 | 306 | |
duke@435 | 307 | static bool guard_memory(char* addr, size_t bytes); |
duke@435 | 308 | static bool unguard_memory(char* addr, size_t bytes); |
coleenp@1755 | 309 | static bool create_stack_guard_pages(char* addr, size_t bytes); |
zgu@3900 | 310 | static bool pd_create_stack_guard_pages(char* addr, size_t bytes); |
coleenp@1755 | 311 | static bool remove_stack_guard_pages(char* addr, size_t bytes); |
coleenp@1755 | 312 | |
duke@435 | 313 | static char* map_memory(int fd, const char* file_name, size_t file_offset, |
duke@435 | 314 | char *addr, size_t bytes, bool read_only = false, |
duke@435 | 315 | bool allow_exec = false); |
duke@435 | 316 | static char* remap_memory(int fd, const char* file_name, size_t file_offset, |
duke@435 | 317 | char *addr, size_t bytes, bool read_only, |
duke@435 | 318 | bool allow_exec); |
duke@435 | 319 | static bool unmap_memory(char *addr, size_t bytes); |
iveresov@3363 | 320 | static void free_memory(char *addr, size_t bytes, size_t alignment_hint); |
duke@435 | 321 | static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); |
duke@435 | 322 | |
duke@435 | 323 | // NUMA-specific interface |
iveresov@576 | 324 | static bool numa_has_static_binding(); |
iveresov@576 | 325 | static bool numa_has_group_homing(); |
iveresov@576 | 326 | static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); |
duke@435 | 327 | static void numa_make_global(char *addr, size_t bytes); |
duke@435 | 328 | static size_t numa_get_groups_num(); |
duke@435 | 329 | static size_t numa_get_leaf_groups(int *ids, size_t size); |
duke@435 | 330 | static bool numa_topology_changed(); |
duke@435 | 331 | static int numa_get_group_id(); |
duke@435 | 332 | |
duke@435 | 333 | // Page manipulation |
duke@435 | 334 | struct page_info { |
duke@435 | 335 | size_t size; |
duke@435 | 336 | int lgrp_id; |
duke@435 | 337 | }; |
duke@435 | 338 | static bool get_page_info(char *start, page_info* info); |
duke@435 | 339 | static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); |
duke@435 | 340 | |
duke@435 | 341 | static char* non_memory_address_word(); |
duke@435 | 342 | // reserve, commit and pin the entire memory region |
stefank@5578 | 343 | static char* reserve_memory_special(size_t size, size_t alignment, |
stefank@5578 | 344 | char* addr, bool executable); |
duke@435 | 345 | static bool release_memory_special(char* addr, size_t bytes); |
iveresov@2850 | 346 | static void large_page_init(); |
duke@435 | 347 | static size_t large_page_size(); |
duke@435 | 348 | static bool can_commit_large_page_memory(); |
jcoomes@514 | 349 | static bool can_execute_large_page_memory(); |
duke@435 | 350 | |
duke@435 | 351 | // OS interface to polling page |
duke@435 | 352 | static address get_polling_page() { return _polling_page; } |
duke@435 | 353 | static void set_polling_page(address page) { _polling_page = page; } |
duke@435 | 354 | static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } |
duke@435 | 355 | static void make_polling_page_unreadable(); |
duke@435 | 356 | static void make_polling_page_readable(); |
duke@435 | 357 | |
duke@435 | 358 | // Routines used to serialize the thread state without using membars |
duke@435 | 359 | static void serialize_thread_states(); |
duke@435 | 360 | |
duke@435 | 361 | // Since we write to the serialize page from every thread, we |
duke@435 | 362 | // want stores to be on unique cache lines whenever possible |
duke@435 | 363 | // in order to minimize CPU cross talk. We pre-compute the |
duke@435 | 364 | // amount to shift the thread* to make this offset unique to |
duke@435 | 365 | // each thread. |
duke@435 | 366 | static int get_serialize_page_shift_count() { |
duke@435 | 367 | return SerializePageShiftCount; |
duke@435 | 368 | } |
duke@435 | 369 | |
duke@435 | 370 | static void set_serialize_page_mask(uintptr_t mask) { |
duke@435 | 371 | _serialize_page_mask = mask; |
duke@435 | 372 | } |
duke@435 | 373 | |
duke@435 | 374 | static unsigned int get_serialize_page_mask() { |
duke@435 | 375 | return _serialize_page_mask; |
duke@435 | 376 | } |
duke@435 | 377 | |
duke@435 | 378 | static void set_memory_serialize_page(address page); |
duke@435 | 379 | |
duke@435 | 380 | static address get_memory_serialize_page() { |
duke@435 | 381 | return (address)_mem_serialize_page; |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | static inline void write_memory_serialize_page(JavaThread *thread) { |
duke@435 | 385 | uintptr_t page_offset = ((uintptr_t)thread >> |
duke@435 | 386 | get_serialize_page_shift_count()) & |
duke@435 | 387 | get_serialize_page_mask(); |
duke@435 | 388 | *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; |
duke@435 | 389 | } |
duke@435 | 390 | |
duke@435 | 391 | static bool is_memory_serialize_page(JavaThread *thread, address addr) { |
duke@435 | 392 | if (UseMembar) return false; |
twisti@1513 | 393 | // Previously this function calculated the exact address of this |
twisti@1513 | 394 | // thread's serialize page, and checked if the faulting address |
twisti@1513 | 395 | // was equal. However, some platforms mask off faulting addresses |
twisti@1513 | 396 | // to the page size, so now we just check that the address is |
twisti@1513 | 397 | // within the page. This makes the thread argument unnecessary, |
twisti@1513 | 398 | // but we retain the NULL check to preserve existing behaviour. |
duke@435 | 399 | if (thread == NULL) return false; |
twisti@1513 | 400 | address page = (address) _mem_serialize_page; |
twisti@1513 | 401 | return addr >= page && addr < (page + os::vm_page_size()); |
duke@435 | 402 | } |
duke@435 | 403 | |
duke@435 | 404 | static void block_on_serialize_page_trap(); |
duke@435 | 405 | |
duke@435 | 406 | // threads |
duke@435 | 407 | |
duke@435 | 408 | enum ThreadType { |
duke@435 | 409 | vm_thread, |
duke@435 | 410 | cgc_thread, // Concurrent GC thread |
duke@435 | 411 | pgc_thread, // Parallel GC thread |
duke@435 | 412 | java_thread, |
duke@435 | 413 | compiler_thread, |
bobv@2036 | 414 | watcher_thread, |
bobv@2036 | 415 | os_thread |
duke@435 | 416 | }; |
duke@435 | 417 | |
duke@435 | 418 | static bool create_thread(Thread* thread, |
duke@435 | 419 | ThreadType thr_type, |
duke@435 | 420 | size_t stack_size = 0); |
duke@435 | 421 | static bool create_main_thread(JavaThread* thread); |
duke@435 | 422 | static bool create_attached_thread(JavaThread* thread); |
duke@435 | 423 | static void pd_start_thread(Thread* thread); |
duke@435 | 424 | static void start_thread(Thread* thread); |
duke@435 | 425 | |
zgu@4079 | 426 | static void initialize_thread(Thread* thr); |
duke@435 | 427 | static void free_thread(OSThread* osthread); |
duke@435 | 428 | |
duke@435 | 429 | // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit |
duke@435 | 430 | static intx current_thread_id(); |
duke@435 | 431 | static int current_process_id(); |
duke@435 | 432 | static int sleep(Thread* thread, jlong ms, bool interruptable); |
duke@435 | 433 | static int naked_sleep(); |
duke@435 | 434 | static void infinite_sleep(); // never returns, use with CAUTION |
duke@435 | 435 | static void yield(); // Yields to all threads with same priority |
duke@435 | 436 | enum YieldResult { |
duke@435 | 437 | YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran |
duke@435 | 438 | YIELD_NONEREADY = 0, // No other runnable/ready threads. |
duke@435 | 439 | // platform-specific yield return immediately |
duke@435 | 440 | YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY |
duke@435 | 441 | // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" |
duke@435 | 442 | // yield that can be used in lieu of blocking. |
duke@435 | 443 | } ; |
duke@435 | 444 | static YieldResult NakedYield () ; |
duke@435 | 445 | static void yield_all(int attempts = 0); // Yields to all other threads including lower priority |
duke@435 | 446 | static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing |
duke@435 | 447 | static OSReturn set_priority(Thread* thread, ThreadPriority priority); |
duke@435 | 448 | static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); |
duke@435 | 449 | |
duke@435 | 450 | static void interrupt(Thread* thread); |
duke@435 | 451 | static bool is_interrupted(Thread* thread, bool clear_interrupted); |
duke@435 | 452 | |
duke@435 | 453 | static int pd_self_suspend_thread(Thread* thread); |
duke@435 | 454 | |
duke@435 | 455 | static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); |
duke@435 | 456 | static frame fetch_frame_from_context(void* ucVoid); |
duke@435 | 457 | |
duke@435 | 458 | static ExtendedPC get_thread_pc(Thread *thread); |
duke@435 | 459 | static void breakpoint(); |
duke@435 | 460 | |
duke@435 | 461 | static address current_stack_pointer(); |
duke@435 | 462 | static address current_stack_base(); |
duke@435 | 463 | static size_t current_stack_size(); |
duke@435 | 464 | |
roland@3606 | 465 | static void verify_stack_alignment() PRODUCT_RETURN; |
roland@3606 | 466 | |
duke@435 | 467 | static int message_box(const char* title, const char* message); |
duke@435 | 468 | static char* do_you_want_to_debug(const char* message); |
duke@435 | 469 | |
duke@435 | 470 | // run cmd in a separate process and return its exit code; or -1 on failures |
duke@435 | 471 | static int fork_and_exec(char *cmd); |
duke@435 | 472 | |
duke@435 | 473 | // Set file to send error reports. |
duke@435 | 474 | static void set_error_file(const char *logfile); |
duke@435 | 475 | |
duke@435 | 476 | // os::exit() is merged with vm_exit() |
duke@435 | 477 | // static void exit(int num); |
duke@435 | 478 | |
duke@435 | 479 | // Terminate the VM, but don't exit the process |
duke@435 | 480 | static void shutdown(); |
duke@435 | 481 | |
duke@435 | 482 | // Terminate with an error. Default is to generate a core file on platforms |
duke@435 | 483 | // that support such things. This calls shutdown() and then aborts. |
duke@435 | 484 | static void abort(bool dump_core = true); |
duke@435 | 485 | |
duke@435 | 486 | // Die immediately, no exit hook, no abort hook, no cleanup. |
duke@435 | 487 | static void die(); |
duke@435 | 488 | |
ikrylov@2322 | 489 | // File i/o operations |
ikrylov@2322 | 490 | static const int default_file_open_flags(); |
ikrylov@2322 | 491 | static int open(const char *path, int oflag, int mode); |
vlivanov@5027 | 492 | static FILE* open(int fd, const char* mode); |
ikrylov@2322 | 493 | static int close(int fd); |
ikrylov@2322 | 494 | static jlong lseek(int fd, jlong offset, int whence); |
ikrylov@2322 | 495 | static char* native_path(char *path); |
ikrylov@2322 | 496 | static int ftruncate(int fd, jlong length); |
ikrylov@2322 | 497 | static int fsync(int fd); |
ikrylov@2322 | 498 | static int available(int fd, jlong *bytes); |
ikrylov@2322 | 499 | |
ikrylov@2322 | 500 | //File i/o operations |
ikrylov@2322 | 501 | |
ikrylov@2322 | 502 | static size_t read(int fd, void *buf, unsigned int nBytes); |
ikrylov@2322 | 503 | static size_t restartable_read(int fd, void *buf, unsigned int nBytes); |
ikrylov@2322 | 504 | static size_t write(int fd, const void *buf, unsigned int nBytes); |
ikrylov@2322 | 505 | |
duke@435 | 506 | // Reading directories. |
duke@435 | 507 | static DIR* opendir(const char* dirname); |
duke@435 | 508 | static int readdir_buf_size(const char *path); |
duke@435 | 509 | static struct dirent* readdir(DIR* dirp, dirent* dbuf); |
duke@435 | 510 | static int closedir(DIR* dirp); |
duke@435 | 511 | |
duke@435 | 512 | // Dynamic library extension |
duke@435 | 513 | static const char* dll_file_extension(); |
duke@435 | 514 | |
duke@435 | 515 | static const char* get_temp_directory(); |
vlivanov@5027 | 516 | static const char* get_current_directory(char *buf, size_t buflen); |
duke@435 | 517 | |
kamg@677 | 518 | // Builds a platform-specific full library path given a ld path and lib name |
bpittore@4261 | 519 | // Returns true if buffer contains full path to existing file, false otherwise |
bpittore@4261 | 520 | static bool dll_build_name(char* buffer, size_t size, |
kamg@677 | 521 | const char* pathname, const char* fname); |
kamg@677 | 522 | |
duke@435 | 523 | // Symbol lookup, find nearest function name; basically it implements |
duke@435 | 524 | // dladdr() for all platforms. Name of the nearest function is copied |
dcubed@5365 | 525 | // to buf. Distance from its base address is optionally returned as offset. |
duke@435 | 526 | // If function name is not found, buf[0] is set to '\0' and offset is |
dcubed@5365 | 527 | // set to -1 (if offset is non-NULL). |
duke@435 | 528 | static bool dll_address_to_function_name(address addr, char* buf, |
duke@435 | 529 | int buflen, int* offset); |
duke@435 | 530 | |
duke@435 | 531 | // Locate DLL/DSO. On success, full path of the library is copied to |
dcubed@5365 | 532 | // buf, and offset is optionally set to be the distance between addr |
dcubed@5365 | 533 | // and the library's base address. On failure, buf[0] is set to '\0' |
dcubed@5365 | 534 | // and offset is set to -1 (if offset is non-NULL). |
duke@435 | 535 | static bool dll_address_to_library_name(address addr, char* buf, |
duke@435 | 536 | int buflen, int* offset); |
duke@435 | 537 | |
duke@435 | 538 | // Find out whether the pc is in the static code for jvm.dll/libjvm.so. |
duke@435 | 539 | static bool address_is_in_vm(address addr); |
duke@435 | 540 | |
duke@435 | 541 | // Loads .dll/.so and |
duke@435 | 542 | // in case of error it checks if .dll/.so was built for the |
duke@435 | 543 | // same architecture as Hotspot is running on |
duke@435 | 544 | static void* dll_load(const char *name, char *ebuf, int ebuflen); |
duke@435 | 545 | |
kamg@677 | 546 | // lookup symbol in a shared library |
kamg@677 | 547 | static void* dll_lookup(void* handle, const char* name); |
kamg@677 | 548 | |
ikrylov@2322 | 549 | // Unload library |
ikrylov@2322 | 550 | static void dll_unload(void *lib); |
ikrylov@2322 | 551 | |
bpittore@5585 | 552 | // Return the handle of this process |
bpittore@5585 | 553 | static void* get_default_process_handle(); |
bpittore@5585 | 554 | |
bpittore@5585 | 555 | // Check for static linked agent library |
bpittore@5585 | 556 | static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], |
bpittore@5585 | 557 | size_t syms_len); |
bpittore@5585 | 558 | |
bpittore@5585 | 559 | // Find agent entry point |
bpittore@5585 | 560 | static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, |
bpittore@5585 | 561 | const char *syms[], size_t syms_len); |
bpittore@5585 | 562 | |
duke@435 | 563 | // Print out system information; they are called by fatal error handler. |
duke@435 | 564 | // Output format may be different on different platforms. |
duke@435 | 565 | static void print_os_info(outputStream* st); |
nloodin@3783 | 566 | static void print_os_info_brief(outputStream* st); |
duke@435 | 567 | static void print_cpu_info(outputStream* st); |
jcoomes@2997 | 568 | static void pd_print_cpu_info(outputStream* st); |
duke@435 | 569 | static void print_memory_info(outputStream* st); |
duke@435 | 570 | static void print_dll_info(outputStream* st); |
duke@435 | 571 | static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); |
duke@435 | 572 | static void print_context(outputStream* st, void* context); |
never@2262 | 573 | static void print_register_info(outputStream* st, void* context); |
duke@435 | 574 | static void print_siginfo(outputStream* st, void* siginfo); |
duke@435 | 575 | static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); |
duke@435 | 576 | static void print_date_and_time(outputStream* st); |
duke@435 | 577 | |
never@2262 | 578 | static void print_location(outputStream* st, intptr_t x, bool verbose = false); |
ikrylov@2322 | 579 | static size_t lasterror(char *buf, size_t len); |
phh@3379 | 580 | static int get_last_error(); |
bobv@2036 | 581 | |
sla@2584 | 582 | // Determines whether the calling process is being debugged by a user-mode debugger. |
sla@2584 | 583 | static bool is_debugger_attached(); |
sla@2584 | 584 | |
sla@2584 | 585 | // wait for a key press if PauseAtExit is set |
sla@2584 | 586 | static void wait_for_keypress_at_exit(void); |
sla@2584 | 587 | |
duke@435 | 588 | // The following two functions are used by fatal error handler to trace |
duke@435 | 589 | // native (C) frames. They are not part of frame.hpp/frame.cpp because |
duke@435 | 590 | // frame.hpp/cpp assume thread is JavaThread, and also because different |
duke@435 | 591 | // OS/compiler may have different convention or provide different API to |
duke@435 | 592 | // walk C frames. |
duke@435 | 593 | // |
duke@435 | 594 | // We don't attempt to become a debugger, so we only follow frames if that |
duke@435 | 595 | // does not require a lookup in the unwind table, which is part of the binary |
duke@435 | 596 | // file but may be unsafe to read after a fatal error. So on x86, we can |
duke@435 | 597 | // only walk stack if %ebp is used as frame pointer; on ia64, it's not |
duke@435 | 598 | // possible to walk C stack without having the unwind table. |
duke@435 | 599 | static bool is_first_C_frame(frame *fr); |
duke@435 | 600 | static frame get_sender_for_C_frame(frame *fr); |
duke@435 | 601 | |
duke@435 | 602 | // return current frame. pc() and sp() are set to NULL on failure. |
duke@435 | 603 | static frame current_frame(); |
duke@435 | 604 | |
duke@435 | 605 | static void print_hex_dump(outputStream* st, address start, address end, int unitsize); |
duke@435 | 606 | |
duke@435 | 607 | // returns a string to describe the exception/signal; |
duke@435 | 608 | // returns NULL if exception_code is not an OS exception/signal. |
duke@435 | 609 | static const char* exception_name(int exception_code, char* buf, size_t buflen); |
duke@435 | 610 | |
duke@435 | 611 | // Returns native Java library, loads if necessary |
duke@435 | 612 | static void* native_java_library(); |
duke@435 | 613 | |
ikrylov@2322 | 614 | // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) |
duke@435 | 615 | static void jvm_path(char *buf, jint buflen); |
duke@435 | 616 | |
bobv@2036 | 617 | // Returns true if we are running in a headless jre. |
bobv@2036 | 618 | static bool is_headless_jre(); |
bobv@2036 | 619 | |
duke@435 | 620 | // JNI names |
duke@435 | 621 | static void print_jni_name_prefix_on(outputStream* st, int args_size); |
duke@435 | 622 | static void print_jni_name_suffix_on(outputStream* st, int args_size); |
duke@435 | 623 | |
duke@435 | 624 | // File conventions |
duke@435 | 625 | static const char* file_separator(); |
duke@435 | 626 | static const char* line_separator(); |
duke@435 | 627 | static const char* path_separator(); |
duke@435 | 628 | |
duke@435 | 629 | // Init os specific system properties values |
duke@435 | 630 | static void init_system_properties_values(); |
duke@435 | 631 | |
duke@435 | 632 | // IO operations, non-JVM_ version. |
duke@435 | 633 | static int stat(const char* path, struct stat* sbuf); |
duke@435 | 634 | static bool dir_is_empty(const char* path); |
duke@435 | 635 | |
duke@435 | 636 | // IO operations on binary files |
duke@435 | 637 | static int create_binary_file(const char* path, bool rewrite_existing); |
duke@435 | 638 | static jlong current_file_offset(int fd); |
duke@435 | 639 | static jlong seek_to_file_offset(int fd, jlong offset); |
duke@435 | 640 | |
duke@435 | 641 | // Thread Local Storage |
duke@435 | 642 | static int allocate_thread_local_storage(); |
duke@435 | 643 | static void thread_local_storage_at_put(int index, void* value); |
duke@435 | 644 | static void* thread_local_storage_at(int index); |
duke@435 | 645 | static void free_thread_local_storage(int index); |
duke@435 | 646 | |
zgu@3900 | 647 | // Stack walk |
zgu@3900 | 648 | static address get_caller_pc(int n = 0); |
zgu@3900 | 649 | |
duke@435 | 650 | // General allocation (must be MT-safe) |
zgu@3900 | 651 | static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0); |
zgu@3900 | 652 | static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0); |
zgu@3900 | 653 | static void free (void *memblock, MEMFLAGS flags = mtNone); |
duke@435 | 654 | static bool check_heap(bool force = false); // verify C heap integrity |
zgu@3900 | 655 | static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup |
duke@435 | 656 | |
duke@435 | 657 | #ifndef PRODUCT |
kvn@2557 | 658 | static julong num_mallocs; // # of calls to malloc/realloc |
kvn@2557 | 659 | static julong alloc_bytes; // # of bytes allocated |
kvn@2557 | 660 | static julong num_frees; // # of calls to free |
kvn@2557 | 661 | static julong free_bytes; // # of bytes freed |
duke@435 | 662 | #endif |
duke@435 | 663 | |
ikrylov@2322 | 664 | // SocketInterface (ex HPI SocketInterface ) |
ikrylov@2322 | 665 | static int socket(int domain, int type, int protocol); |
ikrylov@2322 | 666 | static int socket_close(int fd); |
ikrylov@2322 | 667 | static int socket_shutdown(int fd, int howto); |
phh@3344 | 668 | static int recv(int fd, char* buf, size_t nBytes, uint flags); |
phh@3344 | 669 | static int send(int fd, char* buf, size_t nBytes, uint flags); |
phh@3344 | 670 | static int raw_send(int fd, char* buf, size_t nBytes, uint flags); |
ikrylov@2322 | 671 | static int timeout(int fd, long timeout); |
ikrylov@2322 | 672 | static int listen(int fd, int count); |
phh@3344 | 673 | static int connect(int fd, struct sockaddr* him, socklen_t len); |
phh@3344 | 674 | static int bind(int fd, struct sockaddr* him, socklen_t len); |
phh@3344 | 675 | static int accept(int fd, struct sockaddr* him, socklen_t* len); |
phh@3344 | 676 | static int recvfrom(int fd, char* buf, size_t nbytes, uint flags, |
phh@3344 | 677 | struct sockaddr* from, socklen_t* fromlen); |
phh@3344 | 678 | static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len); |
phh@3344 | 679 | static int sendto(int fd, char* buf, size_t len, uint flags, |
phh@3344 | 680 | struct sockaddr* to, socklen_t tolen); |
phh@3344 | 681 | static int socket_available(int fd, jint* pbytes); |
ikrylov@2322 | 682 | |
ikrylov@2322 | 683 | static int get_sock_opt(int fd, int level, int optname, |
phh@3344 | 684 | char* optval, socklen_t* optlen); |
ikrylov@2322 | 685 | static int set_sock_opt(int fd, int level, int optname, |
phh@3344 | 686 | const char* optval, socklen_t optlen); |
ikrylov@2322 | 687 | static int get_host_name(char* name, int namelen); |
ikrylov@2322 | 688 | |
phh@3344 | 689 | static struct hostent* get_host_by_name(char* name); |
ikrylov@2322 | 690 | |
duke@435 | 691 | // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) |
duke@435 | 692 | static void signal_init(); |
duke@435 | 693 | static void signal_init_pd(); |
duke@435 | 694 | static void signal_notify(int signal_number); |
duke@435 | 695 | static void* signal(int signal_number, void* handler); |
duke@435 | 696 | static void signal_raise(int signal_number); |
duke@435 | 697 | static int signal_wait(); |
duke@435 | 698 | static int signal_lookup(); |
duke@435 | 699 | static void* user_handler(); |
duke@435 | 700 | static void terminate_signal_thread(); |
duke@435 | 701 | static int sigexitnum_pd(); |
duke@435 | 702 | |
duke@435 | 703 | // random number generation |
duke@435 | 704 | static long random(); // return 32bit pseudorandom number |
duke@435 | 705 | static void init_random(long initval); // initialize random sequence |
duke@435 | 706 | |
duke@435 | 707 | // Structured OS Exception support |
duke@435 | 708 | static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); |
duke@435 | 709 | |
ctornqvi@2520 | 710 | // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits |
ctornqvi@2520 | 711 | static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize); |
ctornqvi@2520 | 712 | |
mikael@3903 | 713 | // Get the default path to the core file |
mikael@3903 | 714 | // Returns the length of the string |
mikael@3903 | 715 | static int get_core_path(char* buffer, size_t bufferSize); |
mikael@3903 | 716 | |
duke@435 | 717 | // JVMTI & JVM monitoring and management support |
duke@435 | 718 | // The thread_cpu_time() and current_thread_cpu_time() are only |
duke@435 | 719 | // supported if is_thread_cpu_time_supported() returns true. |
duke@435 | 720 | // They are not supported on Solaris T1. |
duke@435 | 721 | |
duke@435 | 722 | // Thread CPU Time - return the fast estimate on a platform |
duke@435 | 723 | // On Solaris - call gethrvtime (fast) - user time only |
duke@435 | 724 | // On Linux - fast clock_gettime where available - user+sys |
duke@435 | 725 | // - otherwise: very slow /proc fs - user+sys |
duke@435 | 726 | // On Windows - GetThreadTimes - user+sys |
duke@435 | 727 | static jlong current_thread_cpu_time(); |
duke@435 | 728 | static jlong thread_cpu_time(Thread* t); |
duke@435 | 729 | |
duke@435 | 730 | // Thread CPU Time with user_sys_cpu_time parameter. |
duke@435 | 731 | // |
duke@435 | 732 | // If user_sys_cpu_time is true, user+sys time is returned. |
duke@435 | 733 | // Otherwise, only user time is returned |
duke@435 | 734 | static jlong current_thread_cpu_time(bool user_sys_cpu_time); |
duke@435 | 735 | static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); |
duke@435 | 736 | |
duke@435 | 737 | // Return a bunch of info about the timers. |
duke@435 | 738 | // Note that the returned info for these two functions may be different |
duke@435 | 739 | // on some platforms |
duke@435 | 740 | static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); |
duke@435 | 741 | static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); |
duke@435 | 742 | |
duke@435 | 743 | static bool is_thread_cpu_time_supported(); |
duke@435 | 744 | |
duke@435 | 745 | // System loadavg support. Returns -1 if load average cannot be obtained. |
duke@435 | 746 | static int loadavg(double loadavg[], int nelem); |
duke@435 | 747 | |
duke@435 | 748 | // Hook for os specific jvm options that we don't want to abort on seeing |
duke@435 | 749 | static bool obsolete_option(const JavaVMOption *option); |
duke@435 | 750 | |
phh@3378 | 751 | // Extensions |
phh@3378 | 752 | #include "runtime/os_ext.hpp" |
phh@3378 | 753 | |
phh@3378 | 754 | public: |
rbackman@5424 | 755 | class CrashProtectionCallback : public StackObj { |
rbackman@5424 | 756 | public: |
rbackman@5424 | 757 | virtual void call() = 0; |
rbackman@5424 | 758 | }; |
phh@3378 | 759 | |
duke@435 | 760 | // Platform dependent stuff |
stefank@2314 | 761 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 762 | # include "os_linux.hpp" |
nloodin@3783 | 763 | # include "os_posix.hpp" |
stefank@2314 | 764 | #endif |
stefank@2314 | 765 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 766 | # include "os_solaris.hpp" |
nloodin@3783 | 767 | # include "os_posix.hpp" |
stefank@2314 | 768 | #endif |
stefank@2314 | 769 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 770 | # include "os_windows.hpp" |
stefank@2314 | 771 | #endif |
never@3156 | 772 | #ifdef TARGET_OS_FAMILY_bsd |
nloodin@3783 | 773 | # include "os_posix.hpp" |
never@3156 | 774 | # include "os_bsd.hpp" |
never@3156 | 775 | #endif |
stefank@2314 | 776 | #ifdef TARGET_OS_ARCH_linux_x86 |
stefank@2314 | 777 | # include "os_linux_x86.hpp" |
stefank@2314 | 778 | #endif |
stefank@2314 | 779 | #ifdef TARGET_OS_ARCH_linux_sparc |
stefank@2314 | 780 | # include "os_linux_sparc.hpp" |
stefank@2314 | 781 | #endif |
stefank@2314 | 782 | #ifdef TARGET_OS_ARCH_linux_zero |
stefank@2314 | 783 | # include "os_linux_zero.hpp" |
stefank@2314 | 784 | #endif |
stefank@2314 | 785 | #ifdef TARGET_OS_ARCH_solaris_x86 |
stefank@2314 | 786 | # include "os_solaris_x86.hpp" |
stefank@2314 | 787 | #endif |
stefank@2314 | 788 | #ifdef TARGET_OS_ARCH_solaris_sparc |
stefank@2314 | 789 | # include "os_solaris_sparc.hpp" |
stefank@2314 | 790 | #endif |
stefank@2314 | 791 | #ifdef TARGET_OS_ARCH_windows_x86 |
stefank@2314 | 792 | # include "os_windows_x86.hpp" |
stefank@2314 | 793 | #endif |
bobv@2508 | 794 | #ifdef TARGET_OS_ARCH_linux_arm |
bobv@2508 | 795 | # include "os_linux_arm.hpp" |
bobv@2508 | 796 | #endif |
bobv@2508 | 797 | #ifdef TARGET_OS_ARCH_linux_ppc |
bobv@2508 | 798 | # include "os_linux_ppc.hpp" |
bobv@2508 | 799 | #endif |
never@3156 | 800 | #ifdef TARGET_OS_ARCH_bsd_x86 |
never@3156 | 801 | # include "os_bsd_x86.hpp" |
never@3156 | 802 | #endif |
never@3156 | 803 | #ifdef TARGET_OS_ARCH_bsd_zero |
never@3156 | 804 | # include "os_bsd_zero.hpp" |
never@3156 | 805 | #endif |
stefank@2314 | 806 | |
phh@3378 | 807 | public: |
iklam@5667 | 808 | #ifndef PLATFORM_PRINT_NATIVE_STACK |
iklam@5667 | 809 | // No platform-specific code for printing the native stack. |
iklam@5667 | 810 | static bool platform_print_native_stack(outputStream* st, void* context, |
iklam@5667 | 811 | char *buf, int buf_size) { |
iklam@5667 | 812 | return false; |
iklam@5667 | 813 | } |
iklam@5667 | 814 | #endif |
iklam@5667 | 815 | |
bobv@2036 | 816 | // debugging support (mostly used by debug.cpp but also fatal error handler) |
bobv@2036 | 817 | static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address |
duke@435 | 818 | |
duke@435 | 819 | static bool dont_yield(); // when true, JVM_Yield() is nop |
duke@435 | 820 | static void print_statistics(); |
duke@435 | 821 | |
duke@435 | 822 | // Thread priority helpers (implemented in OS-specific part) |
duke@435 | 823 | static OSReturn set_native_priority(Thread* thread, int native_prio); |
duke@435 | 824 | static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); |
phh@3481 | 825 | static int java_to_os_priority[CriticalPriority + 1]; |
duke@435 | 826 | // Hint to the underlying OS that a task switch would not be good. |
duke@435 | 827 | // Void return because it's a hint and can fail. |
duke@435 | 828 | static void hint_no_preempt(); |
duke@435 | 829 | |
duke@435 | 830 | // Used at creation if requested by the diagnostic flag PauseAtStartup. |
duke@435 | 831 | // Causes the VM to wait until an external stimulus has been applied |
duke@435 | 832 | // (for Unix, that stimulus is a signal, for Windows, an external |
duke@435 | 833 | // ResumeThread call) |
duke@435 | 834 | static void pause(); |
duke@435 | 835 | |
bpittore@5585 | 836 | // Builds a platform dependent Agent_OnLoad_<libname> function name |
bpittore@5585 | 837 | // which is used to find statically linked in agents. |
bpittore@5585 | 838 | static char* build_agent_function_name(const char *sym, const char *cname, |
bpittore@5585 | 839 | bool is_absolute_path); |
bpittore@5585 | 840 | |
sla@5237 | 841 | class SuspendedThreadTaskContext { |
sla@5237 | 842 | public: |
sla@5237 | 843 | SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} |
sla@5237 | 844 | Thread* thread() const { return _thread; } |
sla@5237 | 845 | void* ucontext() const { return _ucontext; } |
sla@5237 | 846 | private: |
sla@5237 | 847 | Thread* _thread; |
sla@5237 | 848 | void* _ucontext; |
sla@5237 | 849 | }; |
sla@5237 | 850 | |
sla@5237 | 851 | class SuspendedThreadTask { |
sla@5237 | 852 | public: |
sla@5237 | 853 | SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} |
sla@5237 | 854 | virtual ~SuspendedThreadTask() {} |
sla@5237 | 855 | void run(); |
sla@5237 | 856 | bool is_done() { return _done; } |
sla@5237 | 857 | virtual void do_task(const SuspendedThreadTaskContext& context) = 0; |
sla@5237 | 858 | protected: |
sla@5237 | 859 | private: |
sla@5237 | 860 | void internal_do_task(); |
sla@5237 | 861 | Thread* _thread; |
sla@5237 | 862 | bool _done; |
sla@5237 | 863 | }; |
sla@5237 | 864 | |
sla@5237 | 865 | #ifndef TARGET_OS_FAMILY_windows |
sla@5237 | 866 | // Suspend/resume support |
sla@5237 | 867 | // Protocol: |
sla@5237 | 868 | // |
sla@5237 | 869 | // a thread starts in SR_RUNNING |
sla@5237 | 870 | // |
sla@5237 | 871 | // SR_RUNNING can go to |
sla@5237 | 872 | // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it |
sla@5237 | 873 | // SR_SUSPEND_REQUEST can go to |
sla@5237 | 874 | // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) |
sla@5237 | 875 | // * SR_SUSPENDED if the stopped thread receives the signal and switches state |
sla@5237 | 876 | // SR_SUSPENDED can go to |
sla@5237 | 877 | // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume |
sla@5237 | 878 | // SR_WAKEUP_REQUEST can go to |
sla@5237 | 879 | // * SR_RUNNING when the stopped thread receives the signal |
sla@5237 | 880 | // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) |
sla@5237 | 881 | class SuspendResume { |
sla@5237 | 882 | public: |
sla@5237 | 883 | enum State { |
sla@5237 | 884 | SR_RUNNING, |
sla@5237 | 885 | SR_SUSPEND_REQUEST, |
sla@5237 | 886 | SR_SUSPENDED, |
sla@5237 | 887 | SR_WAKEUP_REQUEST |
sla@5237 | 888 | }; |
sla@5237 | 889 | |
sla@5237 | 890 | private: |
sla@5237 | 891 | volatile State _state; |
sla@5237 | 892 | |
sla@5237 | 893 | private: |
sla@5237 | 894 | /* try to switch state from state "from" to state "to" |
sla@5237 | 895 | * returns the state set after the method is complete |
sla@5237 | 896 | */ |
sla@5237 | 897 | State switch_state(State from, State to); |
sla@5237 | 898 | |
sla@5237 | 899 | public: |
sla@5237 | 900 | SuspendResume() : _state(SR_RUNNING) { } |
sla@5237 | 901 | |
sla@5237 | 902 | State state() const { return _state; } |
sla@5237 | 903 | |
sla@5237 | 904 | State request_suspend() { |
sla@5237 | 905 | return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); |
sla@5237 | 906 | } |
sla@5237 | 907 | |
sla@5237 | 908 | State cancel_suspend() { |
sla@5237 | 909 | return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); |
sla@5237 | 910 | } |
sla@5237 | 911 | |
sla@5237 | 912 | State suspended() { |
sla@5237 | 913 | return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); |
sla@5237 | 914 | } |
sla@5237 | 915 | |
sla@5237 | 916 | State request_wakeup() { |
sla@5237 | 917 | return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); |
sla@5237 | 918 | } |
sla@5237 | 919 | |
sla@5237 | 920 | State running() { |
sla@5237 | 921 | return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); |
sla@5237 | 922 | } |
sla@5237 | 923 | |
sla@5237 | 924 | bool is_running() const { |
sla@5237 | 925 | return _state == SR_RUNNING; |
sla@5237 | 926 | } |
sla@5237 | 927 | |
sla@5237 | 928 | bool is_suspend_request() const { |
sla@5237 | 929 | return _state == SR_SUSPEND_REQUEST; |
sla@5237 | 930 | } |
sla@5237 | 931 | |
sla@5237 | 932 | bool is_suspended() const { |
sla@5237 | 933 | return _state == SR_SUSPENDED; |
sla@5237 | 934 | } |
sla@5237 | 935 | }; |
sla@5237 | 936 | #endif |
sla@5237 | 937 | |
sla@5237 | 938 | |
duke@435 | 939 | protected: |
duke@435 | 940 | static long _rand_seed; // seed for random number generator |
duke@435 | 941 | static int _processor_count; // number of processors |
duke@435 | 942 | |
duke@435 | 943 | static char* format_boot_path(const char* format_string, |
duke@435 | 944 | const char* home, |
duke@435 | 945 | int home_len, |
duke@435 | 946 | char fileSep, |
duke@435 | 947 | char pathSep); |
duke@435 | 948 | static bool set_boot_path(char fileSep, char pathSep); |
phh@1126 | 949 | static char** split_path(const char* path, int* n); |
rbackman@5424 | 950 | |
duke@435 | 951 | }; |
duke@435 | 952 | |
duke@435 | 953 | // Note that "PAUSE" is almost always used with synchronization |
duke@435 | 954 | // so arguably we should provide Atomic::SpinPause() instead |
duke@435 | 955 | // of the global SpinPause() with C linkage. |
duke@435 | 956 | // It'd also be eligible for inlining on many platforms. |
duke@435 | 957 | |
goetz@5400 | 958 | extern "C" int SpinPause(); |
stefank@2314 | 959 | |
stefank@2314 | 960 | #endif // SHARE_VM_RUNTIME_OS_HPP |