Tue, 17 Oct 2017 12:58:25 +0800
merge
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #ifndef SHARE_VM_RUNTIME_OS_HPP
32 #define SHARE_VM_RUNTIME_OS_HPP
34 #include "jvmtifiles/jvmti.h"
35 #include "runtime/atomic.hpp"
36 #include "runtime/extendedPC.hpp"
37 #include "runtime/handles.hpp"
38 #include "utilities/top.hpp"
39 #ifdef TARGET_OS_FAMILY_linux
40 # include "jvm_linux.h"
41 # include <setjmp.h>
42 #endif
43 #ifdef TARGET_OS_FAMILY_solaris
44 # include "jvm_solaris.h"
45 # include <setjmp.h>
46 #endif
47 #ifdef TARGET_OS_FAMILY_windows
48 # include "jvm_windows.h"
49 #endif
50 #ifdef TARGET_OS_FAMILY_aix
51 # include "jvm_aix.h"
52 # include <setjmp.h>
53 #endif
54 #ifdef TARGET_OS_FAMILY_bsd
55 # include "jvm_bsd.h"
56 # include <setjmp.h>
57 # ifdef __APPLE__
58 # include <mach/mach_time.h>
59 # endif
60 #endif
62 class AgentLibrary;
64 // os defines the interface to operating system; this includes traditional
65 // OS services (time, I/O) as well as other functionality with system-
66 // dependent code.
68 typedef void (*dll_func)(...);
70 class Thread;
71 class JavaThread;
72 class Event;
73 class DLL;
74 class FileHandle;
75 class NativeCallStack;
77 template<class E> class GrowableArray;
79 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose
81 // Platform-independent error return values from OS functions
82 enum OSReturn {
83 OS_OK = 0, // Operation was successful
84 OS_ERR = -1, // Operation failed
85 OS_INTRPT = -2, // Operation was interrupted
86 OS_TIMEOUT = -3, // Operation timed out
87 OS_NOMEM = -5, // Operation failed for lack of memory
88 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource
89 };
91 enum ThreadPriority { // JLS 20.20.1-3
92 NoPriority = -1, // Initial non-priority value
93 MinPriority = 1, // Minimum priority
94 NormPriority = 5, // Normal (non-daemon) priority
95 NearMaxPriority = 9, // High priority, used for VMThread
96 MaxPriority = 10, // Highest priority, used for WatcherThread
97 // ensures that VMThread doesn't starve profiler
98 CriticalPriority = 11 // Critical thread priority
99 };
101 // Executable parameter flag for os::commit_memory() and
102 // os::commit_memory_or_exit().
103 const bool ExecMem = true;
105 // Typedef for structured exception handling support
106 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
108 class MallocTracker;
110 class os: AllStatic {
111 friend class VMStructs;
112 friend class MallocTracker;
113 public:
114 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel)
116 private:
117 static OSThread* _starting_thread;
118 static address _polling_page;
119 static volatile int32_t * _mem_serialize_page;
120 static uintptr_t _serialize_page_mask;
121 public:
122 static size_t _page_sizes[page_sizes_max];
124 private:
125 static void init_page_sizes(size_t default_page_size) {
126 _page_sizes[0] = default_page_size;
127 _page_sizes[1] = 0; // sentinel
128 }
130 static char* pd_reserve_memory(size_t bytes, char* addr = 0,
131 size_t alignment_hint = 0);
132 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr);
133 static void pd_split_reserved_memory(char *base, size_t size,
134 size_t split, bool realloc);
135 static bool pd_commit_memory(char* addr, size_t bytes, bool executable);
136 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
137 bool executable);
138 // Same as pd_commit_memory() that either succeeds or calls
139 // vm_exit_out_of_memory() with the specified mesg.
140 static void pd_commit_memory_or_exit(char* addr, size_t bytes,
141 bool executable, const char* mesg);
142 static void pd_commit_memory_or_exit(char* addr, size_t size,
143 size_t alignment_hint,
144 bool executable, const char* mesg);
145 static bool pd_uncommit_memory(char* addr, size_t bytes);
146 static bool pd_release_memory(char* addr, size_t bytes);
148 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset,
149 char *addr, size_t bytes, bool read_only = false,
150 bool allow_exec = false);
151 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset,
152 char *addr, size_t bytes, bool read_only,
153 bool allow_exec);
154 static bool pd_unmap_memory(char *addr, size_t bytes);
155 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint);
156 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint);
158 static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
160 public:
161 static void init(void); // Called before command line parsing
162 static void init_before_ergo(void); // Called after command line parsing
163 // before VM ergonomics processing.
164 static jint init_2(void); // Called after command line parsing
165 // and VM ergonomics processing
166 static void init_globals(void) { // Called from init_globals() in init.cpp
167 init_globals_ext();
168 }
170 // File names are case-insensitive on windows only
171 // Override me as needed
172 static int file_name_strcmp(const char* s1, const char* s2);
174 // get/unset environment variable
175 static bool getenv(const char* name, char* buffer, int len);
176 static bool unsetenv(const char* name);
178 static bool have_special_privileges();
180 static jlong javaTimeMillis();
181 static jlong javaTimeNanos();
182 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr);
183 static void run_periodic_checks();
186 // Returns the elapsed time in seconds since the vm started.
187 static double elapsedTime();
189 // Returns real time in seconds since an arbitrary point
190 // in the past.
191 static bool getTimesSecs(double* process_real_time,
192 double* process_user_time,
193 double* process_system_time);
195 // Interface to the performance counter
196 static jlong elapsed_counter();
197 static jlong elapsed_frequency();
199 // The "virtual time" of a thread is the amount of time a thread has
200 // actually run. The first function indicates whether the OS supports
201 // this functionality for the current thread, and if so:
202 // * the second enables vtime tracking (if that is required).
203 // * the third tells whether vtime is enabled.
204 // * the fourth returns the elapsed virtual time for the current
205 // thread.
206 static bool supports_vtime();
207 static bool enable_vtime();
208 static bool vtime_enabled();
209 static double elapsedVTime();
211 // Return current local time in a string (YYYY-MM-DD HH:MM:SS).
212 // It is MT safe, but not async-safe, as reading time zone
213 // information may require a lock on some platforms.
214 static char* local_time_string(char *buf, size_t buflen);
215 static struct tm* localtime_pd (const time_t* clock, struct tm* res);
216 // Fill in buffer with current local time as an ISO-8601 string.
217 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz.
218 // Returns buffer, or NULL if it failed.
219 static char* iso8601_time(char* buffer, size_t buffer_length);
221 // Interface for detecting multiprocessor system
222 static inline bool is_MP() {
223 // During bootstrap if _processor_count is not yet initialized
224 // we claim to be MP as that is safest. If any platform has a
225 // stub generator that might be triggered in this phase and for
226 // which being declared MP when in fact not, is a problem - then
227 // the bootstrap routine for the stub generator needs to check
228 // the processor count directly and leave the bootstrap routine
229 // in place until called after initialization has ocurred.
230 return (_processor_count != 1) || AssumeMP;
231 }
232 static julong available_memory();
233 static julong physical_memory();
234 static bool has_allocatable_memory_limit(julong* limit);
235 static bool is_server_class_machine();
237 // number of CPUs
238 static int processor_count() {
239 return _processor_count;
240 }
241 static void set_processor_count(int count) { _processor_count = count; }
243 // Returns the number of CPUs this process is currently allowed to run on.
244 // Note that on some OSes this can change dynamically.
245 static int active_processor_count();
247 // Bind processes to processors.
248 // This is a two step procedure:
249 // first you generate a distribution of processes to processors,
250 // then you bind processes according to that distribution.
251 // Compute a distribution for number of processes to processors.
252 // Stores the processor id's into the distribution array argument.
253 // Returns true if it worked, false if it didn't.
254 static bool distribute_processes(uint length, uint* distribution);
255 // Binds the current process to a processor.
256 // Returns true if it worked, false if it didn't.
257 static bool bind_to_processor(uint processor_id);
259 // Give a name to the current thread.
260 static void set_native_thread_name(const char *name);
262 // Interface for stack banging (predetect possible stack overflow for
263 // exception processing) There are guard pages, and above that shadow
264 // pages for stack overflow checking.
265 static bool uses_stack_guard_pages();
266 static bool allocate_stack_guard_pages();
267 static void bang_stack_shadow_pages();
268 static bool stack_shadow_pages_available(Thread *thread, methodHandle method);
270 // OS interface to Virtual Memory
272 // Return the default page size.
273 static int vm_page_size();
275 // Returns the page size to use for a region of memory.
276 // region_size / min_pages will always be greater than or equal to the
277 // returned value. The returned value will divide region_size.
278 static size_t page_size_for_region_aligned(size_t region_size, size_t min_pages);
280 // Returns the page size to use for a region of memory.
281 // region_size / min_pages will always be greater than or equal to the
282 // returned value. The returned value might not divide region_size.
283 static size_t page_size_for_region_unaligned(size_t region_size, size_t min_pages);
285 // Return the largest page size that can be used
286 static size_t max_page_size() {
287 // The _page_sizes array is sorted in descending order.
288 return _page_sizes[0];
289 }
291 // Methods for tracing page sizes returned by the above method; enabled by
292 // TracePageSizes. The region_{min,max}_size parameters should be the values
293 // passed to page_size_for_region() and page_size should be the result of that
294 // call. The (optional) base and size parameters should come from the
295 // ReservedSpace base() and size() methods.
296 static void trace_page_sizes(const char* str, const size_t* page_sizes,
297 int count) PRODUCT_RETURN;
298 static void trace_page_sizes(const char* str, const size_t region_min_size,
299 const size_t region_max_size,
300 const size_t page_size,
301 const char* base = NULL,
302 const size_t size = 0) PRODUCT_RETURN;
304 static int vm_allocation_granularity();
305 static char* reserve_memory(size_t bytes, char* addr = 0,
306 size_t alignment_hint = 0);
307 static char* reserve_memory(size_t bytes, char* addr,
308 size_t alignment_hint, MEMFLAGS flags);
309 static char* reserve_memory_aligned(size_t size, size_t alignment);
310 static char* attempt_reserve_memory_at(size_t bytes, char* addr);
311 static void split_reserved_memory(char *base, size_t size,
312 size_t split, bool realloc);
313 static bool commit_memory(char* addr, size_t bytes, bool executable);
314 static bool commit_memory(char* addr, size_t size, size_t alignment_hint,
315 bool executable);
316 // Same as commit_memory() that either succeeds or calls
317 // vm_exit_out_of_memory() with the specified mesg.
318 static void commit_memory_or_exit(char* addr, size_t bytes,
319 bool executable, const char* mesg);
320 static void commit_memory_or_exit(char* addr, size_t size,
321 size_t alignment_hint,
322 bool executable, const char* mesg);
323 static bool uncommit_memory(char* addr, size_t bytes);
324 static bool release_memory(char* addr, size_t bytes);
326 // Touch memory pages that cover the memory range from start to end (exclusive)
327 // to make the OS back the memory range with actual memory.
328 // Current implementation may not touch the last page if unaligned addresses
329 // are passed.
330 static void pretouch_memory(char* start, char* end);
332 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX };
333 static bool protect_memory(char* addr, size_t bytes, ProtType prot,
334 bool is_committed = true);
336 static bool guard_memory(char* addr, size_t bytes);
337 static bool unguard_memory(char* addr, size_t bytes);
338 static bool create_stack_guard_pages(char* addr, size_t bytes);
339 static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
340 static bool remove_stack_guard_pages(char* addr, size_t bytes);
342 static char* map_memory(int fd, const char* file_name, size_t file_offset,
343 char *addr, size_t bytes, bool read_only = false,
344 bool allow_exec = false);
345 static char* remap_memory(int fd, const char* file_name, size_t file_offset,
346 char *addr, size_t bytes, bool read_only,
347 bool allow_exec);
348 static bool unmap_memory(char *addr, size_t bytes);
349 static void free_memory(char *addr, size_t bytes, size_t alignment_hint);
350 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
352 // NUMA-specific interface
353 static bool numa_has_static_binding();
354 static bool numa_has_group_homing();
355 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint);
356 static void numa_make_global(char *addr, size_t bytes);
357 static size_t numa_get_groups_num();
358 static size_t numa_get_leaf_groups(int *ids, size_t size);
359 static bool numa_topology_changed();
360 static int numa_get_group_id();
362 // Page manipulation
363 struct page_info {
364 size_t size;
365 int lgrp_id;
366 };
367 static bool get_page_info(char *start, page_info* info);
368 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found);
370 static char* non_memory_address_word();
371 // reserve, commit and pin the entire memory region
372 static char* reserve_memory_special(size_t size, size_t alignment,
373 char* addr, bool executable);
374 static bool release_memory_special(char* addr, size_t bytes);
375 static void large_page_init();
376 static size_t large_page_size();
377 static bool can_commit_large_page_memory();
378 static bool can_execute_large_page_memory();
380 // OS interface to polling page
381 static address get_polling_page() { return _polling_page; }
382 static void set_polling_page(address page) { _polling_page = page; }
383 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); }
384 static void make_polling_page_unreadable();
385 static void make_polling_page_readable();
387 // Routines used to serialize the thread state without using membars
388 static void serialize_thread_states();
390 // Since we write to the serialize page from every thread, we
391 // want stores to be on unique cache lines whenever possible
392 // in order to minimize CPU cross talk. We pre-compute the
393 // amount to shift the thread* to make this offset unique to
394 // each thread.
395 static int get_serialize_page_shift_count() {
396 return SerializePageShiftCount;
397 }
399 static void set_serialize_page_mask(uintptr_t mask) {
400 _serialize_page_mask = mask;
401 }
403 static unsigned int get_serialize_page_mask() {
404 return _serialize_page_mask;
405 }
407 static void set_memory_serialize_page(address page);
409 static address get_memory_serialize_page() {
410 return (address)_mem_serialize_page;
411 }
413 static inline void write_memory_serialize_page(JavaThread *thread) {
414 uintptr_t page_offset = ((uintptr_t)thread >>
415 get_serialize_page_shift_count()) &
416 get_serialize_page_mask();
417 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1;
418 }
420 static bool is_memory_serialize_page(JavaThread *thread, address addr) {
421 if (UseMembar) return false;
422 // Previously this function calculated the exact address of this
423 // thread's serialize page, and checked if the faulting address
424 // was equal. However, some platforms mask off faulting addresses
425 // to the page size, so now we just check that the address is
426 // within the page. This makes the thread argument unnecessary,
427 // but we retain the NULL check to preserve existing behaviour.
428 if (thread == NULL) return false;
429 address page = (address) _mem_serialize_page;
430 return addr >= page && addr < (page + os::vm_page_size());
431 }
433 static void block_on_serialize_page_trap();
435 // threads
437 enum ThreadType {
438 vm_thread,
439 cgc_thread, // Concurrent GC thread
440 pgc_thread, // Parallel GC thread
441 java_thread,
442 compiler_thread,
443 watcher_thread,
444 os_thread
445 };
447 static bool create_thread(Thread* thread,
448 ThreadType thr_type,
449 size_t stack_size = 0);
450 static bool create_main_thread(JavaThread* thread);
451 static bool create_attached_thread(JavaThread* thread);
452 static void pd_start_thread(Thread* thread);
453 static void start_thread(Thread* thread);
455 static void initialize_thread(Thread* thr);
456 static void free_thread(OSThread* osthread);
458 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit
459 static intx current_thread_id();
460 static int current_process_id();
461 static int sleep(Thread* thread, jlong ms, bool interruptable);
462 // Short standalone OS sleep suitable for slow path spin loop.
463 // Ignores Thread.interrupt() (so keep it short).
464 // ms = 0, will sleep for the least amount of time allowed by the OS.
465 static void naked_short_sleep(jlong ms);
466 static void infinite_sleep(); // never returns, use with CAUTION
467 static void yield(); // Yields to all threads with same priority
468 enum YieldResult {
469 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran
470 YIELD_NONEREADY = 0, // No other runnable/ready threads.
471 // platform-specific yield return immediately
472 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY
473 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong"
474 // yield that can be used in lieu of blocking.
475 } ;
476 static YieldResult NakedYield () ;
477 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority
478 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing
479 static OSReturn set_priority(Thread* thread, ThreadPriority priority);
480 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority);
482 static void interrupt(Thread* thread);
483 static bool is_interrupted(Thread* thread, bool clear_interrupted);
485 static int pd_self_suspend_thread(Thread* thread);
487 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp);
488 static frame fetch_frame_from_context(void* ucVoid);
490 static ExtendedPC get_thread_pc(Thread *thread);
491 static void breakpoint();
493 static address current_stack_pointer();
494 static address current_stack_base();
495 static size_t current_stack_size();
497 static void verify_stack_alignment() PRODUCT_RETURN;
499 static int message_box(const char* title, const char* message);
500 static char* do_you_want_to_debug(const char* message);
502 // run cmd in a separate process and return its exit code; or -1 on failures
503 static int fork_and_exec(char *cmd);
505 // os::exit() is merged with vm_exit()
506 // static void exit(int num);
508 // Terminate the VM, but don't exit the process
509 static void shutdown();
511 // Terminate with an error. Default is to generate a core file on platforms
512 // that support such things. This calls shutdown() and then aborts.
513 static void abort(bool dump_core = true);
515 // Die immediately, no exit hook, no abort hook, no cleanup.
516 static void die();
518 // File i/o operations
519 static const int default_file_open_flags();
520 static int open(const char *path, int oflag, int mode);
521 static FILE* open(int fd, const char* mode);
522 static int close(int fd);
523 static jlong lseek(int fd, jlong offset, int whence);
524 static char* native_path(char *path);
525 static int ftruncate(int fd, jlong length);
526 static int fsync(int fd);
527 static int available(int fd, jlong *bytes);
529 //File i/o operations
531 static size_t read(int fd, void *buf, unsigned int nBytes);
532 static size_t restartable_read(int fd, void *buf, unsigned int nBytes);
533 static size_t write(int fd, const void *buf, unsigned int nBytes);
535 // Reading directories.
536 static DIR* opendir(const char* dirname);
537 static int readdir_buf_size(const char *path);
538 static struct dirent* readdir(DIR* dirp, dirent* dbuf);
539 static int closedir(DIR* dirp);
541 // Dynamic library extension
542 static const char* dll_file_extension();
544 static const char* get_temp_directory();
545 static const char* get_current_directory(char *buf, size_t buflen);
547 // Builds a platform-specific full library path given a ld path and lib name
548 // Returns true if buffer contains full path to existing file, false otherwise
549 static bool dll_build_name(char* buffer, size_t size,
550 const char* pathname, const char* fname);
552 // Symbol lookup, find nearest function name; basically it implements
553 // dladdr() for all platforms. Name of the nearest function is copied
554 // to buf. Distance from its base address is optionally returned as offset.
555 // If function name is not found, buf[0] is set to '\0' and offset is
556 // set to -1 (if offset is non-NULL).
557 static bool dll_address_to_function_name(address addr, char* buf,
558 int buflen, int* offset);
560 // Locate DLL/DSO. On success, full path of the library is copied to
561 // buf, and offset is optionally set to be the distance between addr
562 // and the library's base address. On failure, buf[0] is set to '\0'
563 // and offset is set to -1 (if offset is non-NULL).
564 static bool dll_address_to_library_name(address addr, char* buf,
565 int buflen, int* offset);
567 // Find out whether the pc is in the static code for jvm.dll/libjvm.so.
568 static bool address_is_in_vm(address addr);
570 // Loads .dll/.so and
571 // in case of error it checks if .dll/.so was built for the
572 // same architecture as Hotspot is running on
573 static void* dll_load(const char *name, char *ebuf, int ebuflen);
575 // lookup symbol in a shared library
576 static void* dll_lookup(void* handle, const char* name);
578 // Unload library
579 static void dll_unload(void *lib);
581 // Return the handle of this process
582 static void* get_default_process_handle();
584 // Check for static linked agent library
585 static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[],
586 size_t syms_len);
588 // Find agent entry point
589 static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib,
590 const char *syms[], size_t syms_len);
592 // Print out system information; they are called by fatal error handler.
593 // Output format may be different on different platforms.
594 static void print_os_info(outputStream* st);
595 static void print_os_info_brief(outputStream* st);
596 static void print_cpu_info(outputStream* st);
597 static void pd_print_cpu_info(outputStream* st);
598 static void print_memory_info(outputStream* st);
599 static void print_dll_info(outputStream* st);
600 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len);
601 static void print_context(outputStream* st, void* context);
602 static void print_register_info(outputStream* st, void* context);
603 static void print_siginfo(outputStream* st, void* siginfo);
604 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen);
605 static void print_date_and_time(outputStream* st);
607 static void print_location(outputStream* st, intptr_t x, bool verbose = false);
608 static size_t lasterror(char *buf, size_t len);
609 static int get_last_error();
611 // Determines whether the calling process is being debugged by a user-mode debugger.
612 static bool is_debugger_attached();
614 // wait for a key press if PauseAtExit is set
615 static void wait_for_keypress_at_exit(void);
617 // The following two functions are used by fatal error handler to trace
618 // native (C) frames. They are not part of frame.hpp/frame.cpp because
619 // frame.hpp/cpp assume thread is JavaThread, and also because different
620 // OS/compiler may have different convention or provide different API to
621 // walk C frames.
622 //
623 // We don't attempt to become a debugger, so we only follow frames if that
624 // does not require a lookup in the unwind table, which is part of the binary
625 // file but may be unsafe to read after a fatal error. So on x86, we can
626 // only walk stack if %ebp is used as frame pointer; on ia64, it's not
627 // possible to walk C stack without having the unwind table.
628 static bool is_first_C_frame(frame *fr);
629 static frame get_sender_for_C_frame(frame *fr);
631 // return current frame. pc() and sp() are set to NULL on failure.
632 static frame current_frame();
634 static void print_hex_dump(outputStream* st, address start, address end, int unitsize);
636 // returns a string to describe the exception/signal;
637 // returns NULL if exception_code is not an OS exception/signal.
638 static const char* exception_name(int exception_code, char* buf, size_t buflen);
640 // Returns native Java library, loads if necessary
641 static void* native_java_library();
643 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler)
644 static void jvm_path(char *buf, jint buflen);
646 // Returns true if we are running in a headless jre.
647 static bool is_headless_jre();
649 // JNI names
650 static void print_jni_name_prefix_on(outputStream* st, int args_size);
651 static void print_jni_name_suffix_on(outputStream* st, int args_size);
653 // File conventions
654 static const char* file_separator();
655 static const char* line_separator();
656 static const char* path_separator();
658 // Init os specific system properties values
659 static void init_system_properties_values();
661 // IO operations, non-JVM_ version.
662 static int stat(const char* path, struct stat* sbuf);
663 static bool dir_is_empty(const char* path);
665 // IO operations on binary files
666 static int create_binary_file(const char* path, bool rewrite_existing);
667 static jlong current_file_offset(int fd);
668 static jlong seek_to_file_offset(int fd, jlong offset);
670 // Thread Local Storage
671 static int allocate_thread_local_storage();
672 static void thread_local_storage_at_put(int index, void* value);
673 static void* thread_local_storage_at(int index);
674 static void free_thread_local_storage(int index);
676 // Retrieve native stack frames.
677 // Parameter:
678 // stack: an array to storage stack pointers.
679 // frames: size of above array.
680 // toSkip: number of stack frames to skip at the beginning.
681 // Return: number of stack frames captured.
682 static int get_native_stack(address* stack, int size, int toSkip = 0);
684 // General allocation (must be MT-safe)
685 static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack);
686 static void* malloc (size_t size, MEMFLAGS flags);
687 static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack);
688 static void* realloc (void *memblock, size_t size, MEMFLAGS flag);
690 static void free (void *memblock, MEMFLAGS flags = mtNone);
691 static bool check_heap(bool force = false); // verify C heap integrity
692 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup
694 #ifndef PRODUCT
695 static julong num_mallocs; // # of calls to malloc/realloc
696 static julong alloc_bytes; // # of bytes allocated
697 static julong num_frees; // # of calls to free
698 static julong free_bytes; // # of bytes freed
699 #endif
701 // SocketInterface (ex HPI SocketInterface )
702 static int socket(int domain, int type, int protocol);
703 static int socket_close(int fd);
704 static int socket_shutdown(int fd, int howto);
705 static int recv(int fd, char* buf, size_t nBytes, uint flags);
706 static int send(int fd, char* buf, size_t nBytes, uint flags);
707 static int raw_send(int fd, char* buf, size_t nBytes, uint flags);
708 static int timeout(int fd, long timeout);
709 static int listen(int fd, int count);
710 static int connect(int fd, struct sockaddr* him, socklen_t len);
711 static int bind(int fd, struct sockaddr* him, socklen_t len);
712 static int accept(int fd, struct sockaddr* him, socklen_t* len);
713 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags,
714 struct sockaddr* from, socklen_t* fromlen);
715 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len);
716 static int sendto(int fd, char* buf, size_t len, uint flags,
717 struct sockaddr* to, socklen_t tolen);
718 static int socket_available(int fd, jint* pbytes);
720 static int get_sock_opt(int fd, int level, int optname,
721 char* optval, socklen_t* optlen);
722 static int set_sock_opt(int fd, int level, int optname,
723 const char* optval, socklen_t optlen);
724 static int get_host_name(char* name, int namelen);
726 static struct hostent* get_host_by_name(char* name);
728 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal)
729 static void signal_init();
730 static void signal_init_pd();
731 static void signal_notify(int signal_number);
732 static void* signal(int signal_number, void* handler);
733 static void signal_raise(int signal_number);
734 static int signal_wait();
735 static int signal_lookup();
736 static void* user_handler();
737 static void terminate_signal_thread();
738 static int sigexitnum_pd();
740 // random number generation
741 static long random(); // return 32bit pseudorandom number
742 static void init_random(long initval); // initialize random sequence
744 // Structured OS Exception support
745 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
747 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits
748 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize);
750 // Get the default path to the core file
751 // Returns the length of the string
752 static int get_core_path(char* buffer, size_t bufferSize);
754 // JVMTI & JVM monitoring and management support
755 // The thread_cpu_time() and current_thread_cpu_time() are only
756 // supported if is_thread_cpu_time_supported() returns true.
757 // They are not supported on Solaris T1.
759 // Thread CPU Time - return the fast estimate on a platform
760 // On Solaris - call gethrvtime (fast) - user time only
761 // On Linux - fast clock_gettime where available - user+sys
762 // - otherwise: very slow /proc fs - user+sys
763 // On Windows - GetThreadTimes - user+sys
764 static jlong current_thread_cpu_time();
765 static jlong thread_cpu_time(Thread* t);
767 // Thread CPU Time with user_sys_cpu_time parameter.
768 //
769 // If user_sys_cpu_time is true, user+sys time is returned.
770 // Otherwise, only user time is returned
771 static jlong current_thread_cpu_time(bool user_sys_cpu_time);
772 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time);
774 // Return a bunch of info about the timers.
775 // Note that the returned info for these two functions may be different
776 // on some platforms
777 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
778 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr);
780 static bool is_thread_cpu_time_supported();
782 // System loadavg support. Returns -1 if load average cannot be obtained.
783 static int loadavg(double loadavg[], int nelem);
785 // Hook for os specific jvm options that we don't want to abort on seeing
786 static bool obsolete_option(const JavaVMOption *option);
788 // Extensions
789 #include "runtime/os_ext.hpp"
791 public:
792 class CrashProtectionCallback : public StackObj {
793 public:
794 virtual void call() = 0;
795 };
797 // Platform dependent stuff
798 #ifdef TARGET_OS_FAMILY_linux
799 # include "os_linux.hpp"
800 # include "os_posix.hpp"
801 #endif
802 #ifdef TARGET_OS_FAMILY_solaris
803 # include "os_solaris.hpp"
804 # include "os_posix.hpp"
805 #endif
806 #ifdef TARGET_OS_FAMILY_windows
807 # include "os_windows.hpp"
808 #endif
809 #ifdef TARGET_OS_FAMILY_aix
810 # include "os_aix.hpp"
811 # include "os_posix.hpp"
812 #endif
813 #ifdef TARGET_OS_FAMILY_bsd
814 # include "os_posix.hpp"
815 # include "os_bsd.hpp"
816 #endif
817 #ifdef TARGET_OS_ARCH_linux_x86
818 # include "os_linux_x86.hpp"
819 #endif
820 #ifdef TARGET_OS_ARCH_linux_mips
821 # include "os_linux_mips.hpp"
822 #endif
823 #ifdef TARGET_OS_ARCH_linux_sparc
824 # include "os_linux_sparc.hpp"
825 #endif
826 #ifdef TARGET_OS_ARCH_linux_zero
827 # include "os_linux_zero.hpp"
828 #endif
829 #ifdef TARGET_OS_ARCH_solaris_x86
830 # include "os_solaris_x86.hpp"
831 #endif
832 #ifdef TARGET_OS_ARCH_solaris_sparc
833 # include "os_solaris_sparc.hpp"
834 #endif
835 #ifdef TARGET_OS_ARCH_windows_x86
836 # include "os_windows_x86.hpp"
837 #endif
838 #ifdef TARGET_OS_ARCH_linux_arm
839 # include "os_linux_arm.hpp"
840 #endif
841 #ifdef TARGET_OS_ARCH_linux_ppc
842 # include "os_linux_ppc.hpp"
843 #endif
844 #ifdef TARGET_OS_ARCH_aix_ppc
845 # include "os_aix_ppc.hpp"
846 #endif
847 #ifdef TARGET_OS_ARCH_bsd_x86
848 # include "os_bsd_x86.hpp"
849 #endif
850 #ifdef TARGET_OS_ARCH_bsd_zero
851 # include "os_bsd_zero.hpp"
852 #endif
854 public:
855 #ifndef PLATFORM_PRINT_NATIVE_STACK
856 // No platform-specific code for printing the native stack.
857 static bool platform_print_native_stack(outputStream* st, void* context,
858 char *buf, int buf_size) {
859 return false;
860 }
861 #endif
863 // debugging support (mostly used by debug.cpp but also fatal error handler)
864 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address
866 static bool dont_yield(); // when true, JVM_Yield() is nop
867 static void print_statistics();
869 // Thread priority helpers (implemented in OS-specific part)
870 static OSReturn set_native_priority(Thread* thread, int native_prio);
871 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr);
872 static int java_to_os_priority[CriticalPriority + 1];
873 // Hint to the underlying OS that a task switch would not be good.
874 // Void return because it's a hint and can fail.
875 static void hint_no_preempt();
877 // Used at creation if requested by the diagnostic flag PauseAtStartup.
878 // Causes the VM to wait until an external stimulus has been applied
879 // (for Unix, that stimulus is a signal, for Windows, an external
880 // ResumeThread call)
881 static void pause();
883 // Builds a platform dependent Agent_OnLoad_<libname> function name
884 // which is used to find statically linked in agents.
885 static char* build_agent_function_name(const char *sym, const char *cname,
886 bool is_absolute_path);
888 class SuspendedThreadTaskContext {
889 public:
890 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
891 Thread* thread() const { return _thread; }
892 void* ucontext() const { return _ucontext; }
893 private:
894 Thread* _thread;
895 void* _ucontext;
896 };
898 class SuspendedThreadTask {
899 public:
900 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
901 virtual ~SuspendedThreadTask() {}
902 void run();
903 bool is_done() { return _done; }
904 virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
905 protected:
906 private:
907 void internal_do_task();
908 Thread* _thread;
909 bool _done;
910 };
912 #ifndef TARGET_OS_FAMILY_windows
913 // Suspend/resume support
914 // Protocol:
915 //
916 // a thread starts in SR_RUNNING
917 //
918 // SR_RUNNING can go to
919 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
920 // SR_SUSPEND_REQUEST can go to
921 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
922 // * SR_SUSPENDED if the stopped thread receives the signal and switches state
923 // SR_SUSPENDED can go to
924 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
925 // SR_WAKEUP_REQUEST can go to
926 // * SR_RUNNING when the stopped thread receives the signal
927 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
928 class SuspendResume {
929 public:
930 enum State {
931 SR_RUNNING,
932 SR_SUSPEND_REQUEST,
933 SR_SUSPENDED,
934 SR_WAKEUP_REQUEST
935 };
937 private:
938 volatile State _state;
940 private:
941 /* try to switch state from state "from" to state "to"
942 * returns the state set after the method is complete
943 */
944 State switch_state(State from, State to);
946 public:
947 SuspendResume() : _state(SR_RUNNING) { }
949 State state() const { return _state; }
951 State request_suspend() {
952 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
953 }
955 State cancel_suspend() {
956 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
957 }
959 State suspended() {
960 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
961 }
963 State request_wakeup() {
964 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
965 }
967 State running() {
968 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
969 }
971 bool is_running() const {
972 return _state == SR_RUNNING;
973 }
975 bool is_suspend_request() const {
976 return _state == SR_SUSPEND_REQUEST;
977 }
979 bool is_suspended() const {
980 return _state == SR_SUSPENDED;
981 }
982 };
983 #endif
986 protected:
987 static long _rand_seed; // seed for random number generator
988 static int _processor_count; // number of processors
990 static char* format_boot_path(const char* format_string,
991 const char* home,
992 int home_len,
993 char fileSep,
994 char pathSep);
995 static bool set_boot_path(char fileSep, char pathSep);
996 static char** split_path(const char* path, int* n);
998 };
1000 // Note that "PAUSE" is almost always used with synchronization
1001 // so arguably we should provide Atomic::SpinPause() instead
1002 // of the global SpinPause() with C linkage.
1003 // It'd also be eligible for inlining on many platforms.
1005 extern "C" int SpinPause();
1007 #endif // SHARE_VM_RUNTIME_OS_HPP