duke@435: /* hseigel@4465: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_RUNTIME_OS_HPP stefank@2314: #define SHARE_VM_RUNTIME_OS_HPP stefank@2314: stefank@2314: #include "jvmtifiles/jvmti.h" stefank@2314: #include "runtime/atomic.hpp" stefank@2314: #include "runtime/extendedPC.hpp" stefank@2314: #include "runtime/handles.hpp" stefank@2314: #include "utilities/top.hpp" stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "jvm_linux.h" rbackman@5424: # include stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "jvm_solaris.h" rbackman@5424: # include stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "jvm_windows.h" stefank@2314: #endif goetz@6461: #ifdef TARGET_OS_FAMILY_aix goetz@6461: # include "jvm_aix.h" goetz@6461: # include goetz@6461: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd never@3156: # include "jvm_bsd.h" rbackman@5424: # include never@3156: #endif stefank@2314: bpittore@5585: class AgentLibrary; bpittore@5585: duke@435: // os defines the interface to operating system; this includes traditional duke@435: // OS services (time, I/O) as well as other functionality with system- duke@435: // dependent code. duke@435: duke@435: typedef void (*dll_func)(...); duke@435: duke@435: class Thread; duke@435: class JavaThread; duke@435: class Event; duke@435: class DLL; duke@435: class FileHandle; iveresov@576: template class GrowableArray; duke@435: duke@435: // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose duke@435: duke@435: // Platform-independent error return values from OS functions duke@435: enum OSReturn { duke@435: OS_OK = 0, // Operation was successful duke@435: OS_ERR = -1, // Operation failed duke@435: OS_INTRPT = -2, // Operation was interrupted duke@435: OS_TIMEOUT = -3, // Operation timed out duke@435: OS_NOMEM = -5, // Operation failed for lack of memory duke@435: OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource duke@435: }; duke@435: duke@435: enum ThreadPriority { // JLS 20.20.1-3 duke@435: NoPriority = -1, // Initial non-priority value duke@435: MinPriority = 1, // Minimum priority duke@435: NormPriority = 5, // Normal (non-daemon) priority duke@435: NearMaxPriority = 9, // High priority, used for VMThread phh@3481: MaxPriority = 10, // Highest priority, used for WatcherThread duke@435: // ensures that VMThread doesn't starve profiler phh@3481: CriticalPriority = 11 // Critical thread priority duke@435: }; duke@435: dcubed@5255: // Executable parameter flag for os::commit_memory() and dcubed@5255: // os::commit_memory_or_exit(). dcubed@5255: const bool ExecMem = true; dcubed@5255: duke@435: // Typedef for structured exception handling support duke@435: typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); duke@435: duke@435: class os: AllStatic { twisti@5726: friend class VMStructs; twisti@5726: phh@1558: public: duke@435: enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) duke@435: phh@1558: private: duke@435: static OSThread* _starting_thread; duke@435: static address _polling_page; duke@435: static volatile int32_t * _mem_serialize_page; duke@435: static uintptr_t _serialize_page_mask; phh@1558: public: duke@435: static size_t _page_sizes[page_sizes_max]; duke@435: phh@1558: private: duke@435: static void init_page_sizes(size_t default_page_size) { duke@435: _page_sizes[0] = default_page_size; duke@435: _page_sizes[1] = 0; // sentinel duke@435: } duke@435: zgu@3900: static char* pd_reserve_memory(size_t bytes, char* addr = 0, zgu@3900: size_t alignment_hint = 0); zgu@3900: static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); zgu@3900: static void pd_split_reserved_memory(char *base, size_t size, zgu@3900: size_t split, bool realloc); dcubed@5255: static bool pd_commit_memory(char* addr, size_t bytes, bool executable); zgu@3900: static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, dcubed@5255: bool executable); dcubed@5255: // Same as pd_commit_memory() that either succeeds or calls dcubed@5255: // vm_exit_out_of_memory() with the specified mesg. dcubed@5255: static void pd_commit_memory_or_exit(char* addr, size_t bytes, dcubed@5255: bool executable, const char* mesg); dcubed@5255: static void pd_commit_memory_or_exit(char* addr, size_t size, dcubed@5255: size_t alignment_hint, dcubed@5255: bool executable, const char* mesg); zgu@3900: static bool pd_uncommit_memory(char* addr, size_t bytes); zgu@3900: static bool pd_release_memory(char* addr, size_t bytes); zgu@3900: zgu@3900: static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, zgu@3900: char *addr, size_t bytes, bool read_only = false, zgu@3900: bool allow_exec = false); zgu@3900: static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, zgu@3900: char *addr, size_t bytes, bool read_only, zgu@3900: bool allow_exec); zgu@3900: static bool pd_unmap_memory(char *addr, size_t bytes); zgu@3900: static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); zgu@3900: static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); zgu@3900: zgu@3900: duke@435: public: bobv@2036: static void init(void); // Called before command line parsing tschatzl@5701: static void init_before_ergo(void); // Called after command line parsing tschatzl@5701: // before VM ergonomics processing. bobv@2036: static jint init_2(void); // Called after command line parsing tschatzl@5701: // and VM ergonomics processing phh@3378: static void init_globals(void) { // Called from init_globals() in init.cpp phh@3378: init_globals_ext(); phh@3378: } bobv@2036: static void init_3(void); // Called at the end of vm init duke@435: duke@435: // File names are case-insensitive on windows only duke@435: // Override me as needed duke@435: static int file_name_strcmp(const char* s1, const char* s2); duke@435: duke@435: static bool getenv(const char* name, char* buffer, int len); duke@435: static bool have_special_privileges(); duke@435: duke@435: static jlong javaTimeMillis(); duke@435: static jlong javaTimeNanos(); duke@435: static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); duke@435: static void run_periodic_checks(); duke@435: duke@435: duke@435: // Returns the elapsed time in seconds since the vm started. duke@435: static double elapsedTime(); duke@435: duke@435: // Returns real time in seconds since an arbitrary point duke@435: // in the past. duke@435: static bool getTimesSecs(double* process_real_time, duke@435: double* process_user_time, duke@435: double* process_system_time); duke@435: duke@435: // Interface to the performance counter duke@435: static jlong elapsed_counter(); duke@435: static jlong elapsed_frequency(); duke@435: ysr@777: // The "virtual time" of a thread is the amount of time a thread has ysr@777: // actually run. The first function indicates whether the OS supports ysr@777: // this functionality for the current thread, and if so: ysr@777: // * the second enables vtime tracking (if that is required). ysr@777: // * the third tells whether vtime is enabled. ysr@777: // * the fourth returns the elapsed virtual time for the current ysr@777: // thread. ysr@777: static bool supports_vtime(); ysr@777: static bool enable_vtime(); ysr@777: static bool vtime_enabled(); ysr@777: static double elapsedVTime(); ysr@777: duke@435: // Return current local time in a string (YYYY-MM-DD HH:MM:SS). duke@435: // It is MT safe, but not async-safe, as reading time zone duke@435: // information may require a lock on some platforms. ysr@983: static char* local_time_string(char *buf, size_t buflen); ysr@983: static struct tm* localtime_pd (const time_t* clock, struct tm* res); duke@435: // Fill in buffer with current local time as an ISO-8601 string. duke@435: // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. duke@435: // Returns buffer, or NULL if it failed. duke@435: static char* iso8601_time(char* buffer, size_t buffer_length); duke@435: duke@435: // Interface for detecting multiprocessor system duke@435: static inline bool is_MP() { duke@435: assert(_processor_count > 0, "invalid processor count"); minqi@4845: return _processor_count > 1 || AssumeMP; duke@435: } duke@435: static julong available_memory(); duke@435: static julong physical_memory(); tschatzl@4854: static bool has_allocatable_memory_limit(julong* limit); duke@435: static bool is_server_class_machine(); duke@435: duke@435: // number of CPUs duke@435: static int processor_count() { duke@435: return _processor_count; duke@435: } phh@1558: static void set_processor_count(int count) { _processor_count = count; } duke@435: duke@435: // Returns the number of CPUs this process is currently allowed to run on. duke@435: // Note that on some OSes this can change dynamically. duke@435: static int active_processor_count(); duke@435: duke@435: // Bind processes to processors. duke@435: // This is a two step procedure: duke@435: // first you generate a distribution of processes to processors, duke@435: // then you bind processes according to that distribution. duke@435: // Compute a distribution for number of processes to processors. duke@435: // Stores the processor id's into the distribution array argument. duke@435: // Returns true if it worked, false if it didn't. duke@435: static bool distribute_processes(uint length, uint* distribution); duke@435: // Binds the current process to a processor. duke@435: // Returns true if it worked, false if it didn't. duke@435: static bool bind_to_processor(uint processor_id); duke@435: dcubed@3202: // Give a name to the current thread. dcubed@3202: static void set_native_thread_name(const char *name); dcubed@3202: duke@435: // Interface for stack banging (predetect possible stack overflow for duke@435: // exception processing) There are guard pages, and above that shadow duke@435: // pages for stack overflow checking. duke@435: static bool uses_stack_guard_pages(); duke@435: static bool allocate_stack_guard_pages(); duke@435: static void bang_stack_shadow_pages(); duke@435: static bool stack_shadow_pages_available(Thread *thread, methodHandle method); duke@435: duke@435: // OS interface to Virtual Memory duke@435: duke@435: // Return the default page size. duke@435: static int vm_page_size(); duke@435: duke@435: // Return the page size to use for a region of memory. The min_pages argument duke@435: // is a hint intended to limit fragmentation; it says the returned page size duke@435: // should be <= region_max_size / min_pages. Because min_pages is a hint, duke@435: // this routine may return a size larger than region_max_size / min_pages. duke@435: // duke@435: // The current implementation ignores min_pages if a larger page size is an duke@435: // exact multiple of both region_min_size and region_max_size. This allows duke@435: // larger pages to be used when doing so would not cause fragmentation; in duke@435: // particular, a single page can be used when region_min_size == duke@435: // region_max_size == a supported page size. duke@435: static size_t page_size_for_region(size_t region_min_size, duke@435: size_t region_max_size, duke@435: uint min_pages); tschatzl@5701: // Return the largest page size that can be used tschatzl@5701: static size_t max_page_size() { tschatzl@5701: // The _page_sizes array is sorted in descending order. tschatzl@5701: return _page_sizes[0]; tschatzl@5701: } duke@435: jcoomes@3057: // Methods for tracing page sizes returned by the above method; enabled by duke@435: // TracePageSizes. The region_{min,max}_size parameters should be the values duke@435: // passed to page_size_for_region() and page_size should be the result of that duke@435: // call. The (optional) base and size parameters should come from the duke@435: // ReservedSpace base() and size() methods. jcoomes@3057: static void trace_page_sizes(const char* str, const size_t* page_sizes, jcoomes@3057: int count) PRODUCT_RETURN; duke@435: static void trace_page_sizes(const char* str, const size_t region_min_size, duke@435: const size_t region_max_size, duke@435: const size_t page_size, duke@435: const char* base = NULL, duke@435: const size_t size = 0) PRODUCT_RETURN; duke@435: duke@435: static int vm_allocation_granularity(); duke@435: static char* reserve_memory(size_t bytes, char* addr = 0, duke@435: size_t alignment_hint = 0); zgu@5053: static char* reserve_memory(size_t bytes, char* addr, zgu@5053: size_t alignment_hint, MEMFLAGS flags); brutisso@4369: static char* reserve_memory_aligned(size_t size, size_t alignment); duke@435: static char* attempt_reserve_memory_at(size_t bytes, char* addr); duke@435: static void split_reserved_memory(char *base, size_t size, duke@435: size_t split, bool realloc); dcubed@5255: static bool commit_memory(char* addr, size_t bytes, bool executable); coleenp@1091: static bool commit_memory(char* addr, size_t size, size_t alignment_hint, dcubed@5255: bool executable); dcubed@5255: // Same as commit_memory() that either succeeds or calls dcubed@5255: // vm_exit_out_of_memory() with the specified mesg. dcubed@5255: static void commit_memory_or_exit(char* addr, size_t bytes, dcubed@5255: bool executable, const char* mesg); dcubed@5255: static void commit_memory_or_exit(char* addr, size_t size, dcubed@5255: size_t alignment_hint, dcubed@5255: bool executable, const char* mesg); duke@435: static bool uncommit_memory(char* addr, size_t bytes); duke@435: static bool release_memory(char* addr, size_t bytes); coleenp@672: coleenp@672: enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; coleenp@672: static bool protect_memory(char* addr, size_t bytes, ProtType prot, coleenp@912: bool is_committed = true); coleenp@672: duke@435: static bool guard_memory(char* addr, size_t bytes); duke@435: static bool unguard_memory(char* addr, size_t bytes); coleenp@1755: static bool create_stack_guard_pages(char* addr, size_t bytes); zgu@3900: static bool pd_create_stack_guard_pages(char* addr, size_t bytes); coleenp@1755: static bool remove_stack_guard_pages(char* addr, size_t bytes); coleenp@1755: duke@435: static char* map_memory(int fd, const char* file_name, size_t file_offset, duke@435: char *addr, size_t bytes, bool read_only = false, duke@435: bool allow_exec = false); duke@435: static char* remap_memory(int fd, const char* file_name, size_t file_offset, duke@435: char *addr, size_t bytes, bool read_only, duke@435: bool allow_exec); duke@435: static bool unmap_memory(char *addr, size_t bytes); iveresov@3363: static void free_memory(char *addr, size_t bytes, size_t alignment_hint); duke@435: static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); duke@435: duke@435: // NUMA-specific interface iveresov@576: static bool numa_has_static_binding(); iveresov@576: static bool numa_has_group_homing(); iveresov@576: static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); duke@435: static void numa_make_global(char *addr, size_t bytes); duke@435: static size_t numa_get_groups_num(); duke@435: static size_t numa_get_leaf_groups(int *ids, size_t size); duke@435: static bool numa_topology_changed(); duke@435: static int numa_get_group_id(); duke@435: duke@435: // Page manipulation duke@435: struct page_info { duke@435: size_t size; duke@435: int lgrp_id; duke@435: }; duke@435: static bool get_page_info(char *start, page_info* info); duke@435: static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); duke@435: duke@435: static char* non_memory_address_word(); duke@435: // reserve, commit and pin the entire memory region stefank@5578: static char* reserve_memory_special(size_t size, size_t alignment, stefank@5578: char* addr, bool executable); duke@435: static bool release_memory_special(char* addr, size_t bytes); iveresov@2850: static void large_page_init(); duke@435: static size_t large_page_size(); duke@435: static bool can_commit_large_page_memory(); jcoomes@514: static bool can_execute_large_page_memory(); duke@435: duke@435: // OS interface to polling page duke@435: static address get_polling_page() { return _polling_page; } duke@435: static void set_polling_page(address page) { _polling_page = page; } duke@435: static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } duke@435: static void make_polling_page_unreadable(); duke@435: static void make_polling_page_readable(); duke@435: duke@435: // Routines used to serialize the thread state without using membars duke@435: static void serialize_thread_states(); duke@435: duke@435: // Since we write to the serialize page from every thread, we duke@435: // want stores to be on unique cache lines whenever possible duke@435: // in order to minimize CPU cross talk. We pre-compute the duke@435: // amount to shift the thread* to make this offset unique to duke@435: // each thread. duke@435: static int get_serialize_page_shift_count() { duke@435: return SerializePageShiftCount; duke@435: } duke@435: duke@435: static void set_serialize_page_mask(uintptr_t mask) { duke@435: _serialize_page_mask = mask; duke@435: } duke@435: duke@435: static unsigned int get_serialize_page_mask() { duke@435: return _serialize_page_mask; duke@435: } duke@435: duke@435: static void set_memory_serialize_page(address page); duke@435: duke@435: static address get_memory_serialize_page() { duke@435: return (address)_mem_serialize_page; duke@435: } duke@435: duke@435: static inline void write_memory_serialize_page(JavaThread *thread) { duke@435: uintptr_t page_offset = ((uintptr_t)thread >> duke@435: get_serialize_page_shift_count()) & duke@435: get_serialize_page_mask(); duke@435: *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; duke@435: } duke@435: duke@435: static bool is_memory_serialize_page(JavaThread *thread, address addr) { duke@435: if (UseMembar) return false; twisti@1513: // Previously this function calculated the exact address of this twisti@1513: // thread's serialize page, and checked if the faulting address twisti@1513: // was equal. However, some platforms mask off faulting addresses twisti@1513: // to the page size, so now we just check that the address is twisti@1513: // within the page. This makes the thread argument unnecessary, twisti@1513: // but we retain the NULL check to preserve existing behaviour. duke@435: if (thread == NULL) return false; twisti@1513: address page = (address) _mem_serialize_page; twisti@1513: return addr >= page && addr < (page + os::vm_page_size()); duke@435: } duke@435: duke@435: static void block_on_serialize_page_trap(); duke@435: duke@435: // threads duke@435: duke@435: enum ThreadType { duke@435: vm_thread, duke@435: cgc_thread, // Concurrent GC thread duke@435: pgc_thread, // Parallel GC thread duke@435: java_thread, duke@435: compiler_thread, bobv@2036: watcher_thread, bobv@2036: os_thread duke@435: }; duke@435: duke@435: static bool create_thread(Thread* thread, duke@435: ThreadType thr_type, duke@435: size_t stack_size = 0); duke@435: static bool create_main_thread(JavaThread* thread); duke@435: static bool create_attached_thread(JavaThread* thread); duke@435: static void pd_start_thread(Thread* thread); duke@435: static void start_thread(Thread* thread); duke@435: zgu@4079: static void initialize_thread(Thread* thr); duke@435: static void free_thread(OSThread* osthread); duke@435: duke@435: // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit duke@435: static intx current_thread_id(); duke@435: static int current_process_id(); duke@435: static int sleep(Thread* thread, jlong ms, bool interruptable); dsimms@6348: // Short standalone OS sleep suitable for slow path spin loop. dsimms@6348: // Ignores Thread.interrupt() (so keep it short). dsimms@6348: // ms = 0, will sleep for the least amount of time allowed by the OS. dsimms@6348: static void naked_short_sleep(jlong ms); duke@435: static void infinite_sleep(); // never returns, use with CAUTION duke@435: static void yield(); // Yields to all threads with same priority duke@435: enum YieldResult { duke@435: YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran duke@435: YIELD_NONEREADY = 0, // No other runnable/ready threads. duke@435: // platform-specific yield return immediately duke@435: YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY duke@435: // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" duke@435: // yield that can be used in lieu of blocking. duke@435: } ; duke@435: static YieldResult NakedYield () ; duke@435: static void yield_all(int attempts = 0); // Yields to all other threads including lower priority duke@435: static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing duke@435: static OSReturn set_priority(Thread* thread, ThreadPriority priority); duke@435: static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); duke@435: duke@435: static void interrupt(Thread* thread); duke@435: static bool is_interrupted(Thread* thread, bool clear_interrupted); duke@435: duke@435: static int pd_self_suspend_thread(Thread* thread); duke@435: duke@435: static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); duke@435: static frame fetch_frame_from_context(void* ucVoid); duke@435: duke@435: static ExtendedPC get_thread_pc(Thread *thread); duke@435: static void breakpoint(); duke@435: duke@435: static address current_stack_pointer(); duke@435: static address current_stack_base(); duke@435: static size_t current_stack_size(); duke@435: roland@3606: static void verify_stack_alignment() PRODUCT_RETURN; roland@3606: duke@435: static int message_box(const char* title, const char* message); duke@435: static char* do_you_want_to_debug(const char* message); duke@435: duke@435: // run cmd in a separate process and return its exit code; or -1 on failures duke@435: static int fork_and_exec(char *cmd); duke@435: duke@435: // Set file to send error reports. duke@435: static void set_error_file(const char *logfile); duke@435: duke@435: // os::exit() is merged with vm_exit() duke@435: // static void exit(int num); duke@435: duke@435: // Terminate the VM, but don't exit the process duke@435: static void shutdown(); duke@435: duke@435: // Terminate with an error. Default is to generate a core file on platforms duke@435: // that support such things. This calls shutdown() and then aborts. duke@435: static void abort(bool dump_core = true); duke@435: duke@435: // Die immediately, no exit hook, no abort hook, no cleanup. duke@435: static void die(); duke@435: ikrylov@2322: // File i/o operations ikrylov@2322: static const int default_file_open_flags(); ikrylov@2322: static int open(const char *path, int oflag, int mode); vlivanov@5027: static FILE* open(int fd, const char* mode); ikrylov@2322: static int close(int fd); ikrylov@2322: static jlong lseek(int fd, jlong offset, int whence); ikrylov@2322: static char* native_path(char *path); ikrylov@2322: static int ftruncate(int fd, jlong length); ikrylov@2322: static int fsync(int fd); ikrylov@2322: static int available(int fd, jlong *bytes); ikrylov@2322: ikrylov@2322: //File i/o operations ikrylov@2322: ikrylov@2322: static size_t read(int fd, void *buf, unsigned int nBytes); ikrylov@2322: static size_t restartable_read(int fd, void *buf, unsigned int nBytes); ikrylov@2322: static size_t write(int fd, const void *buf, unsigned int nBytes); ikrylov@2322: duke@435: // Reading directories. duke@435: static DIR* opendir(const char* dirname); duke@435: static int readdir_buf_size(const char *path); duke@435: static struct dirent* readdir(DIR* dirp, dirent* dbuf); duke@435: static int closedir(DIR* dirp); duke@435: duke@435: // Dynamic library extension duke@435: static const char* dll_file_extension(); duke@435: duke@435: static const char* get_temp_directory(); vlivanov@5027: static const char* get_current_directory(char *buf, size_t buflen); duke@435: kamg@677: // Builds a platform-specific full library path given a ld path and lib name bpittore@4261: // Returns true if buffer contains full path to existing file, false otherwise bpittore@4261: static bool dll_build_name(char* buffer, size_t size, kamg@677: const char* pathname, const char* fname); kamg@677: duke@435: // Symbol lookup, find nearest function name; basically it implements duke@435: // dladdr() for all platforms. Name of the nearest function is copied dcubed@5365: // to buf. Distance from its base address is optionally returned as offset. duke@435: // If function name is not found, buf[0] is set to '\0' and offset is dcubed@5365: // set to -1 (if offset is non-NULL). duke@435: static bool dll_address_to_function_name(address addr, char* buf, duke@435: int buflen, int* offset); duke@435: duke@435: // Locate DLL/DSO. On success, full path of the library is copied to dcubed@5365: // buf, and offset is optionally set to be the distance between addr dcubed@5365: // and the library's base address. On failure, buf[0] is set to '\0' dcubed@5365: // and offset is set to -1 (if offset is non-NULL). duke@435: static bool dll_address_to_library_name(address addr, char* buf, duke@435: int buflen, int* offset); duke@435: duke@435: // Find out whether the pc is in the static code for jvm.dll/libjvm.so. duke@435: static bool address_is_in_vm(address addr); duke@435: duke@435: // Loads .dll/.so and duke@435: // in case of error it checks if .dll/.so was built for the duke@435: // same architecture as Hotspot is running on duke@435: static void* dll_load(const char *name, char *ebuf, int ebuflen); duke@435: kamg@677: // lookup symbol in a shared library kamg@677: static void* dll_lookup(void* handle, const char* name); kamg@677: ikrylov@2322: // Unload library ikrylov@2322: static void dll_unload(void *lib); ikrylov@2322: bpittore@5585: // Return the handle of this process bpittore@5585: static void* get_default_process_handle(); bpittore@5585: bpittore@5585: // Check for static linked agent library bpittore@5585: static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], bpittore@5585: size_t syms_len); bpittore@5585: bpittore@5585: // Find agent entry point bpittore@5585: static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, bpittore@5585: const char *syms[], size_t syms_len); bpittore@5585: duke@435: // Print out system information; they are called by fatal error handler. duke@435: // Output format may be different on different platforms. duke@435: static void print_os_info(outputStream* st); nloodin@3783: static void print_os_info_brief(outputStream* st); duke@435: static void print_cpu_info(outputStream* st); jcoomes@2997: static void pd_print_cpu_info(outputStream* st); duke@435: static void print_memory_info(outputStream* st); duke@435: static void print_dll_info(outputStream* st); duke@435: static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); duke@435: static void print_context(outputStream* st, void* context); never@2262: static void print_register_info(outputStream* st, void* context); duke@435: static void print_siginfo(outputStream* st, void* siginfo); duke@435: static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); duke@435: static void print_date_and_time(outputStream* st); duke@435: never@2262: static void print_location(outputStream* st, intptr_t x, bool verbose = false); ikrylov@2322: static size_t lasterror(char *buf, size_t len); phh@3379: static int get_last_error(); bobv@2036: sla@2584: // Determines whether the calling process is being debugged by a user-mode debugger. sla@2584: static bool is_debugger_attached(); sla@2584: sla@2584: // wait for a key press if PauseAtExit is set sla@2584: static void wait_for_keypress_at_exit(void); sla@2584: duke@435: // The following two functions are used by fatal error handler to trace duke@435: // native (C) frames. They are not part of frame.hpp/frame.cpp because duke@435: // frame.hpp/cpp assume thread is JavaThread, and also because different duke@435: // OS/compiler may have different convention or provide different API to duke@435: // walk C frames. duke@435: // duke@435: // We don't attempt to become a debugger, so we only follow frames if that duke@435: // does not require a lookup in the unwind table, which is part of the binary duke@435: // file but may be unsafe to read after a fatal error. So on x86, we can duke@435: // only walk stack if %ebp is used as frame pointer; on ia64, it's not duke@435: // possible to walk C stack without having the unwind table. duke@435: static bool is_first_C_frame(frame *fr); duke@435: static frame get_sender_for_C_frame(frame *fr); duke@435: duke@435: // return current frame. pc() and sp() are set to NULL on failure. duke@435: static frame current_frame(); duke@435: duke@435: static void print_hex_dump(outputStream* st, address start, address end, int unitsize); duke@435: duke@435: // returns a string to describe the exception/signal; duke@435: // returns NULL if exception_code is not an OS exception/signal. duke@435: static const char* exception_name(int exception_code, char* buf, size_t buflen); duke@435: duke@435: // Returns native Java library, loads if necessary duke@435: static void* native_java_library(); duke@435: ikrylov@2322: // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) duke@435: static void jvm_path(char *buf, jint buflen); duke@435: bobv@2036: // Returns true if we are running in a headless jre. bobv@2036: static bool is_headless_jre(); bobv@2036: duke@435: // JNI names duke@435: static void print_jni_name_prefix_on(outputStream* st, int args_size); duke@435: static void print_jni_name_suffix_on(outputStream* st, int args_size); duke@435: duke@435: // File conventions duke@435: static const char* file_separator(); duke@435: static const char* line_separator(); duke@435: static const char* path_separator(); duke@435: duke@435: // Init os specific system properties values duke@435: static void init_system_properties_values(); duke@435: duke@435: // IO operations, non-JVM_ version. duke@435: static int stat(const char* path, struct stat* sbuf); duke@435: static bool dir_is_empty(const char* path); duke@435: duke@435: // IO operations on binary files duke@435: static int create_binary_file(const char* path, bool rewrite_existing); duke@435: static jlong current_file_offset(int fd); duke@435: static jlong seek_to_file_offset(int fd, jlong offset); duke@435: duke@435: // Thread Local Storage duke@435: static int allocate_thread_local_storage(); duke@435: static void thread_local_storage_at_put(int index, void* value); duke@435: static void* thread_local_storage_at(int index); duke@435: static void free_thread_local_storage(int index); duke@435: zgu@3900: // Stack walk zgu@3900: static address get_caller_pc(int n = 0); zgu@3900: duke@435: // General allocation (must be MT-safe) zgu@3900: static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0); zgu@3900: static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0); zgu@3900: static void free (void *memblock, MEMFLAGS flags = mtNone); duke@435: static bool check_heap(bool force = false); // verify C heap integrity zgu@3900: static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup duke@435: duke@435: #ifndef PRODUCT kvn@2557: static julong num_mallocs; // # of calls to malloc/realloc kvn@2557: static julong alloc_bytes; // # of bytes allocated kvn@2557: static julong num_frees; // # of calls to free kvn@2557: static julong free_bytes; // # of bytes freed duke@435: #endif duke@435: ikrylov@2322: // SocketInterface (ex HPI SocketInterface ) ikrylov@2322: static int socket(int domain, int type, int protocol); ikrylov@2322: static int socket_close(int fd); ikrylov@2322: static int socket_shutdown(int fd, int howto); phh@3344: static int recv(int fd, char* buf, size_t nBytes, uint flags); phh@3344: static int send(int fd, char* buf, size_t nBytes, uint flags); phh@3344: static int raw_send(int fd, char* buf, size_t nBytes, uint flags); ikrylov@2322: static int timeout(int fd, long timeout); ikrylov@2322: static int listen(int fd, int count); phh@3344: static int connect(int fd, struct sockaddr* him, socklen_t len); phh@3344: static int bind(int fd, struct sockaddr* him, socklen_t len); phh@3344: static int accept(int fd, struct sockaddr* him, socklen_t* len); phh@3344: static int recvfrom(int fd, char* buf, size_t nbytes, uint flags, phh@3344: struct sockaddr* from, socklen_t* fromlen); phh@3344: static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len); phh@3344: static int sendto(int fd, char* buf, size_t len, uint flags, phh@3344: struct sockaddr* to, socklen_t tolen); phh@3344: static int socket_available(int fd, jint* pbytes); ikrylov@2322: ikrylov@2322: static int get_sock_opt(int fd, int level, int optname, phh@3344: char* optval, socklen_t* optlen); ikrylov@2322: static int set_sock_opt(int fd, int level, int optname, phh@3344: const char* optval, socklen_t optlen); ikrylov@2322: static int get_host_name(char* name, int namelen); ikrylov@2322: phh@3344: static struct hostent* get_host_by_name(char* name); ikrylov@2322: duke@435: // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) duke@435: static void signal_init(); duke@435: static void signal_init_pd(); duke@435: static void signal_notify(int signal_number); duke@435: static void* signal(int signal_number, void* handler); duke@435: static void signal_raise(int signal_number); duke@435: static int signal_wait(); duke@435: static int signal_lookup(); duke@435: static void* user_handler(); duke@435: static void terminate_signal_thread(); duke@435: static int sigexitnum_pd(); duke@435: duke@435: // random number generation duke@435: static long random(); // return 32bit pseudorandom number duke@435: static void init_random(long initval); // initialize random sequence duke@435: duke@435: // Structured OS Exception support duke@435: static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); duke@435: ctornqvi@2520: // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits ctornqvi@2520: static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize); ctornqvi@2520: mikael@3903: // Get the default path to the core file mikael@3903: // Returns the length of the string mikael@3903: static int get_core_path(char* buffer, size_t bufferSize); mikael@3903: duke@435: // JVMTI & JVM monitoring and management support duke@435: // The thread_cpu_time() and current_thread_cpu_time() are only duke@435: // supported if is_thread_cpu_time_supported() returns true. duke@435: // They are not supported on Solaris T1. duke@435: duke@435: // Thread CPU Time - return the fast estimate on a platform duke@435: // On Solaris - call gethrvtime (fast) - user time only duke@435: // On Linux - fast clock_gettime where available - user+sys duke@435: // - otherwise: very slow /proc fs - user+sys duke@435: // On Windows - GetThreadTimes - user+sys duke@435: static jlong current_thread_cpu_time(); duke@435: static jlong thread_cpu_time(Thread* t); duke@435: duke@435: // Thread CPU Time with user_sys_cpu_time parameter. duke@435: // duke@435: // If user_sys_cpu_time is true, user+sys time is returned. duke@435: // Otherwise, only user time is returned duke@435: static jlong current_thread_cpu_time(bool user_sys_cpu_time); duke@435: static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); duke@435: duke@435: // Return a bunch of info about the timers. duke@435: // Note that the returned info for these two functions may be different duke@435: // on some platforms duke@435: static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); duke@435: static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); duke@435: duke@435: static bool is_thread_cpu_time_supported(); duke@435: duke@435: // System loadavg support. Returns -1 if load average cannot be obtained. duke@435: static int loadavg(double loadavg[], int nelem); duke@435: duke@435: // Hook for os specific jvm options that we don't want to abort on seeing duke@435: static bool obsolete_option(const JavaVMOption *option); duke@435: phh@3378: // Extensions phh@3378: #include "runtime/os_ext.hpp" phh@3378: phh@3378: public: rbackman@5424: class CrashProtectionCallback : public StackObj { rbackman@5424: public: rbackman@5424: virtual void call() = 0; rbackman@5424: }; phh@3378: duke@435: // Platform dependent stuff stefank@2314: #ifdef TARGET_OS_FAMILY_linux stefank@2314: # include "os_linux.hpp" nloodin@3783: # include "os_posix.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_solaris stefank@2314: # include "os_solaris.hpp" nloodin@3783: # include "os_posix.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_FAMILY_windows stefank@2314: # include "os_windows.hpp" stefank@2314: #endif goetz@6461: #ifdef TARGET_OS_FAMILY_aix goetz@6461: # include "os_aix.hpp" goetz@6461: # include "os_posix.hpp" goetz@6461: #endif never@3156: #ifdef TARGET_OS_FAMILY_bsd nloodin@3783: # include "os_posix.hpp" never@3156: # include "os_bsd.hpp" never@3156: #endif stefank@2314: #ifdef TARGET_OS_ARCH_linux_x86 stefank@2314: # include "os_linux_x86.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_ARCH_linux_sparc stefank@2314: # include "os_linux_sparc.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_ARCH_linux_zero stefank@2314: # include "os_linux_zero.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_ARCH_solaris_x86 stefank@2314: # include "os_solaris_x86.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_ARCH_solaris_sparc stefank@2314: # include "os_solaris_sparc.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_OS_ARCH_windows_x86 stefank@2314: # include "os_windows_x86.hpp" stefank@2314: #endif bobv@2508: #ifdef TARGET_OS_ARCH_linux_arm bobv@2508: # include "os_linux_arm.hpp" bobv@2508: #endif bobv@2508: #ifdef TARGET_OS_ARCH_linux_ppc bobv@2508: # include "os_linux_ppc.hpp" bobv@2508: #endif goetz@6461: #ifdef TARGET_OS_ARCH_aix_ppc goetz@6461: # include "os_aix_ppc.hpp" goetz@6461: #endif never@3156: #ifdef TARGET_OS_ARCH_bsd_x86 never@3156: # include "os_bsd_x86.hpp" never@3156: #endif never@3156: #ifdef TARGET_OS_ARCH_bsd_zero never@3156: # include "os_bsd_zero.hpp" never@3156: #endif stefank@2314: phh@3378: public: iklam@5667: #ifndef PLATFORM_PRINT_NATIVE_STACK iklam@5667: // No platform-specific code for printing the native stack. iklam@5667: static bool platform_print_native_stack(outputStream* st, void* context, iklam@5667: char *buf, int buf_size) { iklam@5667: return false; iklam@5667: } iklam@5667: #endif iklam@5667: bobv@2036: // debugging support (mostly used by debug.cpp but also fatal error handler) bobv@2036: static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address duke@435: duke@435: static bool dont_yield(); // when true, JVM_Yield() is nop duke@435: static void print_statistics(); duke@435: duke@435: // Thread priority helpers (implemented in OS-specific part) duke@435: static OSReturn set_native_priority(Thread* thread, int native_prio); duke@435: static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); phh@3481: static int java_to_os_priority[CriticalPriority + 1]; duke@435: // Hint to the underlying OS that a task switch would not be good. duke@435: // Void return because it's a hint and can fail. duke@435: static void hint_no_preempt(); duke@435: duke@435: // Used at creation if requested by the diagnostic flag PauseAtStartup. duke@435: // Causes the VM to wait until an external stimulus has been applied duke@435: // (for Unix, that stimulus is a signal, for Windows, an external duke@435: // ResumeThread call) duke@435: static void pause(); duke@435: bpittore@5585: // Builds a platform dependent Agent_OnLoad_ function name bpittore@5585: // which is used to find statically linked in agents. bpittore@5585: static char* build_agent_function_name(const char *sym, const char *cname, bpittore@5585: bool is_absolute_path); bpittore@5585: sla@5237: class SuspendedThreadTaskContext { sla@5237: public: sla@5237: SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} sla@5237: Thread* thread() const { return _thread; } sla@5237: void* ucontext() const { return _ucontext; } sla@5237: private: sla@5237: Thread* _thread; sla@5237: void* _ucontext; sla@5237: }; sla@5237: sla@5237: class SuspendedThreadTask { sla@5237: public: sla@5237: SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} sla@5237: virtual ~SuspendedThreadTask() {} sla@5237: void run(); sla@5237: bool is_done() { return _done; } sla@5237: virtual void do_task(const SuspendedThreadTaskContext& context) = 0; sla@5237: protected: sla@5237: private: sla@5237: void internal_do_task(); sla@5237: Thread* _thread; sla@5237: bool _done; sla@5237: }; sla@5237: sla@5237: #ifndef TARGET_OS_FAMILY_windows sla@5237: // Suspend/resume support sla@5237: // Protocol: sla@5237: // sla@5237: // a thread starts in SR_RUNNING sla@5237: // sla@5237: // SR_RUNNING can go to sla@5237: // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it sla@5237: // SR_SUSPEND_REQUEST can go to sla@5237: // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) sla@5237: // * SR_SUSPENDED if the stopped thread receives the signal and switches state sla@5237: // SR_SUSPENDED can go to sla@5237: // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume sla@5237: // SR_WAKEUP_REQUEST can go to sla@5237: // * SR_RUNNING when the stopped thread receives the signal sla@5237: // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) sla@5237: class SuspendResume { sla@5237: public: sla@5237: enum State { sla@5237: SR_RUNNING, sla@5237: SR_SUSPEND_REQUEST, sla@5237: SR_SUSPENDED, sla@5237: SR_WAKEUP_REQUEST sla@5237: }; sla@5237: sla@5237: private: sla@5237: volatile State _state; sla@5237: sla@5237: private: sla@5237: /* try to switch state from state "from" to state "to" sla@5237: * returns the state set after the method is complete sla@5237: */ sla@5237: State switch_state(State from, State to); sla@5237: sla@5237: public: sla@5237: SuspendResume() : _state(SR_RUNNING) { } sla@5237: sla@5237: State state() const { return _state; } sla@5237: sla@5237: State request_suspend() { sla@5237: return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); sla@5237: } sla@5237: sla@5237: State cancel_suspend() { sla@5237: return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); sla@5237: } sla@5237: sla@5237: State suspended() { sla@5237: return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); sla@5237: } sla@5237: sla@5237: State request_wakeup() { sla@5237: return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); sla@5237: } sla@5237: sla@5237: State running() { sla@5237: return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); sla@5237: } sla@5237: sla@5237: bool is_running() const { sla@5237: return _state == SR_RUNNING; sla@5237: } sla@5237: sla@5237: bool is_suspend_request() const { sla@5237: return _state == SR_SUSPEND_REQUEST; sla@5237: } sla@5237: sla@5237: bool is_suspended() const { sla@5237: return _state == SR_SUSPENDED; sla@5237: } sla@5237: }; sla@5237: #endif sla@5237: sla@5237: duke@435: protected: duke@435: static long _rand_seed; // seed for random number generator duke@435: static int _processor_count; // number of processors duke@435: duke@435: static char* format_boot_path(const char* format_string, duke@435: const char* home, duke@435: int home_len, duke@435: char fileSep, duke@435: char pathSep); duke@435: static bool set_boot_path(char fileSep, char pathSep); phh@1126: static char** split_path(const char* path, int* n); rbackman@5424: duke@435: }; duke@435: duke@435: // Note that "PAUSE" is almost always used with synchronization duke@435: // so arguably we should provide Atomic::SpinPause() instead duke@435: // of the global SpinPause() with C linkage. duke@435: // It'd also be eligible for inlining on many platforms. duke@435: goetz@5400: extern "C" int SpinPause(); stefank@2314: stefank@2314: #endif // SHARE_VM_RUNTIME_OS_HPP