|
1 /* |
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_RUNTIME_OS_HPP |
|
26 #define SHARE_VM_RUNTIME_OS_HPP |
|
27 |
|
28 #include "jvmtifiles/jvmti.h" |
|
29 #include "runtime/atomic.hpp" |
|
30 #include "runtime/extendedPC.hpp" |
|
31 #include "runtime/handles.hpp" |
|
32 #include "utilities/top.hpp" |
|
33 #ifdef TARGET_OS_FAMILY_linux |
|
34 # include "jvm_linux.h" |
|
35 # include <setjmp.h> |
|
36 #endif |
|
37 #ifdef TARGET_OS_FAMILY_solaris |
|
38 # include "jvm_solaris.h" |
|
39 # include <setjmp.h> |
|
40 #endif |
|
41 #ifdef TARGET_OS_FAMILY_windows |
|
42 # include "jvm_windows.h" |
|
43 #endif |
|
44 #ifdef TARGET_OS_FAMILY_aix |
|
45 # include "jvm_aix.h" |
|
46 # include <setjmp.h> |
|
47 #endif |
|
48 #ifdef TARGET_OS_FAMILY_bsd |
|
49 # include "jvm_bsd.h" |
|
50 # include <setjmp.h> |
|
51 # ifdef __APPLE__ |
|
52 # include <mach/mach_time.h> |
|
53 # endif |
|
54 #endif |
|
55 |
|
56 class AgentLibrary; |
|
57 |
|
58 // os defines the interface to operating system; this includes traditional |
|
59 // OS services (time, I/O) as well as other functionality with system- |
|
60 // dependent code. |
|
61 |
|
62 typedef void (*dll_func)(...); |
|
63 |
|
64 class Thread; |
|
65 class JavaThread; |
|
66 class Event; |
|
67 class DLL; |
|
68 class FileHandle; |
|
69 template<class E> class GrowableArray; |
|
70 |
|
71 // %%%%% Moved ThreadState, START_FN, OSThread to new osThread.hpp. -- Rose |
|
72 |
|
73 // Platform-independent error return values from OS functions |
|
74 enum OSReturn { |
|
75 OS_OK = 0, // Operation was successful |
|
76 OS_ERR = -1, // Operation failed |
|
77 OS_INTRPT = -2, // Operation was interrupted |
|
78 OS_TIMEOUT = -3, // Operation timed out |
|
79 OS_NOMEM = -5, // Operation failed for lack of memory |
|
80 OS_NORESOURCE = -6 // Operation failed for lack of nonmemory resource |
|
81 }; |
|
82 |
|
83 enum ThreadPriority { // JLS 20.20.1-3 |
|
84 NoPriority = -1, // Initial non-priority value |
|
85 MinPriority = 1, // Minimum priority |
|
86 NormPriority = 5, // Normal (non-daemon) priority |
|
87 NearMaxPriority = 9, // High priority, used for VMThread |
|
88 MaxPriority = 10, // Highest priority, used for WatcherThread |
|
89 // ensures that VMThread doesn't starve profiler |
|
90 CriticalPriority = 11 // Critical thread priority |
|
91 }; |
|
92 |
|
93 // Executable parameter flag for os::commit_memory() and |
|
94 // os::commit_memory_or_exit(). |
|
95 const bool ExecMem = true; |
|
96 |
|
97 // Typedef for structured exception handling support |
|
98 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); |
|
99 |
|
100 class os: AllStatic { |
|
101 friend class VMStructs; |
|
102 |
|
103 public: |
|
104 enum { page_sizes_max = 9 }; // Size of _page_sizes array (8 plus a sentinel) |
|
105 |
|
106 private: |
|
107 static OSThread* _starting_thread; |
|
108 static address _polling_page; |
|
109 static volatile int32_t * _mem_serialize_page; |
|
110 static uintptr_t _serialize_page_mask; |
|
111 public: |
|
112 static size_t _page_sizes[page_sizes_max]; |
|
113 |
|
114 private: |
|
115 static void init_page_sizes(size_t default_page_size) { |
|
116 _page_sizes[0] = default_page_size; |
|
117 _page_sizes[1] = 0; // sentinel |
|
118 } |
|
119 |
|
120 static char* pd_reserve_memory(size_t bytes, char* addr = 0, |
|
121 size_t alignment_hint = 0); |
|
122 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); |
|
123 static void pd_split_reserved_memory(char *base, size_t size, |
|
124 size_t split, bool realloc); |
|
125 static bool pd_commit_memory(char* addr, size_t bytes, bool executable); |
|
126 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, |
|
127 bool executable); |
|
128 // Same as pd_commit_memory() that either succeeds or calls |
|
129 // vm_exit_out_of_memory() with the specified mesg. |
|
130 static void pd_commit_memory_or_exit(char* addr, size_t bytes, |
|
131 bool executable, const char* mesg); |
|
132 static void pd_commit_memory_or_exit(char* addr, size_t size, |
|
133 size_t alignment_hint, |
|
134 bool executable, const char* mesg); |
|
135 static bool pd_uncommit_memory(char* addr, size_t bytes); |
|
136 static bool pd_release_memory(char* addr, size_t bytes); |
|
137 |
|
138 static char* pd_map_memory(int fd, const char* file_name, size_t file_offset, |
|
139 char *addr, size_t bytes, bool read_only = false, |
|
140 bool allow_exec = false); |
|
141 static char* pd_remap_memory(int fd, const char* file_name, size_t file_offset, |
|
142 char *addr, size_t bytes, bool read_only, |
|
143 bool allow_exec); |
|
144 static bool pd_unmap_memory(char *addr, size_t bytes); |
|
145 static void pd_free_memory(char *addr, size_t bytes, size_t alignment_hint); |
|
146 static void pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint); |
|
147 |
|
148 |
|
149 public: |
|
150 static void init(void); // Called before command line parsing |
|
151 static void init_before_ergo(void); // Called after command line parsing |
|
152 // before VM ergonomics processing. |
|
153 static jint init_2(void); // Called after command line parsing |
|
154 // and VM ergonomics processing |
|
155 static void init_globals(void) { // Called from init_globals() in init.cpp |
|
156 init_globals_ext(); |
|
157 } |
|
158 static void init_3(void); // Called at the end of vm init |
|
159 |
|
160 // File names are case-insensitive on windows only |
|
161 // Override me as needed |
|
162 static int file_name_strcmp(const char* s1, const char* s2); |
|
163 |
|
164 static bool getenv(const char* name, char* buffer, int len); |
|
165 static bool have_special_privileges(); |
|
166 |
|
167 static jlong javaTimeMillis(); |
|
168 static jlong javaTimeNanos(); |
|
169 static void javaTimeNanos_info(jvmtiTimerInfo *info_ptr); |
|
170 static void run_periodic_checks(); |
|
171 |
|
172 |
|
173 // Returns the elapsed time in seconds since the vm started. |
|
174 static double elapsedTime(); |
|
175 |
|
176 // Returns real time in seconds since an arbitrary point |
|
177 // in the past. |
|
178 static bool getTimesSecs(double* process_real_time, |
|
179 double* process_user_time, |
|
180 double* process_system_time); |
|
181 |
|
182 // Interface to the performance counter |
|
183 static jlong elapsed_counter(); |
|
184 static jlong elapsed_frequency(); |
|
185 |
|
186 // The "virtual time" of a thread is the amount of time a thread has |
|
187 // actually run. The first function indicates whether the OS supports |
|
188 // this functionality for the current thread, and if so: |
|
189 // * the second enables vtime tracking (if that is required). |
|
190 // * the third tells whether vtime is enabled. |
|
191 // * the fourth returns the elapsed virtual time for the current |
|
192 // thread. |
|
193 static bool supports_vtime(); |
|
194 static bool enable_vtime(); |
|
195 static bool vtime_enabled(); |
|
196 static double elapsedVTime(); |
|
197 |
|
198 // Return current local time in a string (YYYY-MM-DD HH:MM:SS). |
|
199 // It is MT safe, but not async-safe, as reading time zone |
|
200 // information may require a lock on some platforms. |
|
201 static char* local_time_string(char *buf, size_t buflen); |
|
202 static struct tm* localtime_pd (const time_t* clock, struct tm* res); |
|
203 // Fill in buffer with current local time as an ISO-8601 string. |
|
204 // E.g., YYYY-MM-DDThh:mm:ss.mmm+zzzz. |
|
205 // Returns buffer, or NULL if it failed. |
|
206 static char* iso8601_time(char* buffer, size_t buffer_length); |
|
207 |
|
208 // Interface for detecting multiprocessor system |
|
209 static inline bool is_MP() { |
|
210 assert(_processor_count > 0, "invalid processor count"); |
|
211 return _processor_count > 1 || AssumeMP; |
|
212 } |
|
213 static julong available_memory(); |
|
214 static julong physical_memory(); |
|
215 static bool has_allocatable_memory_limit(julong* limit); |
|
216 static bool is_server_class_machine(); |
|
217 |
|
218 // number of CPUs |
|
219 static int processor_count() { |
|
220 return _processor_count; |
|
221 } |
|
222 static void set_processor_count(int count) { _processor_count = count; } |
|
223 |
|
224 // Returns the number of CPUs this process is currently allowed to run on. |
|
225 // Note that on some OSes this can change dynamically. |
|
226 static int active_processor_count(); |
|
227 |
|
228 // Bind processes to processors. |
|
229 // This is a two step procedure: |
|
230 // first you generate a distribution of processes to processors, |
|
231 // then you bind processes according to that distribution. |
|
232 // Compute a distribution for number of processes to processors. |
|
233 // Stores the processor id's into the distribution array argument. |
|
234 // Returns true if it worked, false if it didn't. |
|
235 static bool distribute_processes(uint length, uint* distribution); |
|
236 // Binds the current process to a processor. |
|
237 // Returns true if it worked, false if it didn't. |
|
238 static bool bind_to_processor(uint processor_id); |
|
239 |
|
240 // Give a name to the current thread. |
|
241 static void set_native_thread_name(const char *name); |
|
242 |
|
243 // Interface for stack banging (predetect possible stack overflow for |
|
244 // exception processing) There are guard pages, and above that shadow |
|
245 // pages for stack overflow checking. |
|
246 static bool uses_stack_guard_pages(); |
|
247 static bool allocate_stack_guard_pages(); |
|
248 static void bang_stack_shadow_pages(); |
|
249 static bool stack_shadow_pages_available(Thread *thread, methodHandle method); |
|
250 |
|
251 // OS interface to Virtual Memory |
|
252 |
|
253 // Return the default page size. |
|
254 static int vm_page_size(); |
|
255 |
|
256 // Return the page size to use for a region of memory. The min_pages argument |
|
257 // is a hint intended to limit fragmentation; it says the returned page size |
|
258 // should be <= region_max_size / min_pages. Because min_pages is a hint, |
|
259 // this routine may return a size larger than region_max_size / min_pages. |
|
260 // |
|
261 // The current implementation ignores min_pages if a larger page size is an |
|
262 // exact multiple of both region_min_size and region_max_size. This allows |
|
263 // larger pages to be used when doing so would not cause fragmentation; in |
|
264 // particular, a single page can be used when region_min_size == |
|
265 // region_max_size == a supported page size. |
|
266 static size_t page_size_for_region(size_t region_min_size, |
|
267 size_t region_max_size, |
|
268 uint min_pages); |
|
269 // Return the largest page size that can be used |
|
270 static size_t max_page_size() { |
|
271 // The _page_sizes array is sorted in descending order. |
|
272 return _page_sizes[0]; |
|
273 } |
|
274 |
|
275 // Methods for tracing page sizes returned by the above method; enabled by |
|
276 // TracePageSizes. The region_{min,max}_size parameters should be the values |
|
277 // passed to page_size_for_region() and page_size should be the result of that |
|
278 // call. The (optional) base and size parameters should come from the |
|
279 // ReservedSpace base() and size() methods. |
|
280 static void trace_page_sizes(const char* str, const size_t* page_sizes, |
|
281 int count) PRODUCT_RETURN; |
|
282 static void trace_page_sizes(const char* str, const size_t region_min_size, |
|
283 const size_t region_max_size, |
|
284 const size_t page_size, |
|
285 const char* base = NULL, |
|
286 const size_t size = 0) PRODUCT_RETURN; |
|
287 |
|
288 static int vm_allocation_granularity(); |
|
289 static char* reserve_memory(size_t bytes, char* addr = 0, |
|
290 size_t alignment_hint = 0); |
|
291 static char* reserve_memory(size_t bytes, char* addr, |
|
292 size_t alignment_hint, MEMFLAGS flags); |
|
293 static char* reserve_memory_aligned(size_t size, size_t alignment); |
|
294 static char* attempt_reserve_memory_at(size_t bytes, char* addr); |
|
295 static void split_reserved_memory(char *base, size_t size, |
|
296 size_t split, bool realloc); |
|
297 static bool commit_memory(char* addr, size_t bytes, bool executable); |
|
298 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, |
|
299 bool executable); |
|
300 // Same as commit_memory() that either succeeds or calls |
|
301 // vm_exit_out_of_memory() with the specified mesg. |
|
302 static void commit_memory_or_exit(char* addr, size_t bytes, |
|
303 bool executable, const char* mesg); |
|
304 static void commit_memory_or_exit(char* addr, size_t size, |
|
305 size_t alignment_hint, |
|
306 bool executable, const char* mesg); |
|
307 static bool uncommit_memory(char* addr, size_t bytes); |
|
308 static bool release_memory(char* addr, size_t bytes); |
|
309 |
|
310 enum ProtType { MEM_PROT_NONE, MEM_PROT_READ, MEM_PROT_RW, MEM_PROT_RWX }; |
|
311 static bool protect_memory(char* addr, size_t bytes, ProtType prot, |
|
312 bool is_committed = true); |
|
313 |
|
314 static bool guard_memory(char* addr, size_t bytes); |
|
315 static bool unguard_memory(char* addr, size_t bytes); |
|
316 static bool create_stack_guard_pages(char* addr, size_t bytes); |
|
317 static bool pd_create_stack_guard_pages(char* addr, size_t bytes); |
|
318 static bool remove_stack_guard_pages(char* addr, size_t bytes); |
|
319 |
|
320 static char* map_memory(int fd, const char* file_name, size_t file_offset, |
|
321 char *addr, size_t bytes, bool read_only = false, |
|
322 bool allow_exec = false); |
|
323 static char* remap_memory(int fd, const char* file_name, size_t file_offset, |
|
324 char *addr, size_t bytes, bool read_only, |
|
325 bool allow_exec); |
|
326 static bool unmap_memory(char *addr, size_t bytes); |
|
327 static void free_memory(char *addr, size_t bytes, size_t alignment_hint); |
|
328 static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); |
|
329 |
|
330 // NUMA-specific interface |
|
331 static bool numa_has_static_binding(); |
|
332 static bool numa_has_group_homing(); |
|
333 static void numa_make_local(char *addr, size_t bytes, int lgrp_hint); |
|
334 static void numa_make_global(char *addr, size_t bytes); |
|
335 static size_t numa_get_groups_num(); |
|
336 static size_t numa_get_leaf_groups(int *ids, size_t size); |
|
337 static bool numa_topology_changed(); |
|
338 static int numa_get_group_id(); |
|
339 |
|
340 // Page manipulation |
|
341 struct page_info { |
|
342 size_t size; |
|
343 int lgrp_id; |
|
344 }; |
|
345 static bool get_page_info(char *start, page_info* info); |
|
346 static char* scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found); |
|
347 |
|
348 static char* non_memory_address_word(); |
|
349 // reserve, commit and pin the entire memory region |
|
350 static char* reserve_memory_special(size_t size, size_t alignment, |
|
351 char* addr, bool executable); |
|
352 static bool release_memory_special(char* addr, size_t bytes); |
|
353 static void large_page_init(); |
|
354 static size_t large_page_size(); |
|
355 static bool can_commit_large_page_memory(); |
|
356 static bool can_execute_large_page_memory(); |
|
357 |
|
358 // OS interface to polling page |
|
359 static address get_polling_page() { return _polling_page; } |
|
360 static void set_polling_page(address page) { _polling_page = page; } |
|
361 static bool is_poll_address(address addr) { return addr >= _polling_page && addr < (_polling_page + os::vm_page_size()); } |
|
362 static void make_polling_page_unreadable(); |
|
363 static void make_polling_page_readable(); |
|
364 |
|
365 // Routines used to serialize the thread state without using membars |
|
366 static void serialize_thread_states(); |
|
367 |
|
368 // Since we write to the serialize page from every thread, we |
|
369 // want stores to be on unique cache lines whenever possible |
|
370 // in order to minimize CPU cross talk. We pre-compute the |
|
371 // amount to shift the thread* to make this offset unique to |
|
372 // each thread. |
|
373 static int get_serialize_page_shift_count() { |
|
374 return SerializePageShiftCount; |
|
375 } |
|
376 |
|
377 static void set_serialize_page_mask(uintptr_t mask) { |
|
378 _serialize_page_mask = mask; |
|
379 } |
|
380 |
|
381 static unsigned int get_serialize_page_mask() { |
|
382 return _serialize_page_mask; |
|
383 } |
|
384 |
|
385 static void set_memory_serialize_page(address page); |
|
386 |
|
387 static address get_memory_serialize_page() { |
|
388 return (address)_mem_serialize_page; |
|
389 } |
|
390 |
|
391 static inline void write_memory_serialize_page(JavaThread *thread) { |
|
392 uintptr_t page_offset = ((uintptr_t)thread >> |
|
393 get_serialize_page_shift_count()) & |
|
394 get_serialize_page_mask(); |
|
395 *(volatile int32_t *)((uintptr_t)_mem_serialize_page+page_offset) = 1; |
|
396 } |
|
397 |
|
398 static bool is_memory_serialize_page(JavaThread *thread, address addr) { |
|
399 if (UseMembar) return false; |
|
400 // Previously this function calculated the exact address of this |
|
401 // thread's serialize page, and checked if the faulting address |
|
402 // was equal. However, some platforms mask off faulting addresses |
|
403 // to the page size, so now we just check that the address is |
|
404 // within the page. This makes the thread argument unnecessary, |
|
405 // but we retain the NULL check to preserve existing behaviour. |
|
406 if (thread == NULL) return false; |
|
407 address page = (address) _mem_serialize_page; |
|
408 return addr >= page && addr < (page + os::vm_page_size()); |
|
409 } |
|
410 |
|
411 static void block_on_serialize_page_trap(); |
|
412 |
|
413 // threads |
|
414 |
|
415 enum ThreadType { |
|
416 vm_thread, |
|
417 cgc_thread, // Concurrent GC thread |
|
418 pgc_thread, // Parallel GC thread |
|
419 java_thread, |
|
420 compiler_thread, |
|
421 watcher_thread, |
|
422 os_thread |
|
423 }; |
|
424 |
|
425 static bool create_thread(Thread* thread, |
|
426 ThreadType thr_type, |
|
427 size_t stack_size = 0); |
|
428 static bool create_main_thread(JavaThread* thread); |
|
429 static bool create_attached_thread(JavaThread* thread); |
|
430 static void pd_start_thread(Thread* thread); |
|
431 static void start_thread(Thread* thread); |
|
432 |
|
433 static void initialize_thread(Thread* thr); |
|
434 static void free_thread(OSThread* osthread); |
|
435 |
|
436 // thread id on Linux/64bit is 64bit, on Windows and Solaris, it's 32bit |
|
437 static intx current_thread_id(); |
|
438 static int current_process_id(); |
|
439 static int sleep(Thread* thread, jlong ms, bool interruptable); |
|
440 // Short standalone OS sleep suitable for slow path spin loop. |
|
441 // Ignores Thread.interrupt() (so keep it short). |
|
442 // ms = 0, will sleep for the least amount of time allowed by the OS. |
|
443 static void naked_short_sleep(jlong ms); |
|
444 static void infinite_sleep(); // never returns, use with CAUTION |
|
445 static void yield(); // Yields to all threads with same priority |
|
446 enum YieldResult { |
|
447 YIELD_SWITCHED = 1, // caller descheduled, other ready threads exist & ran |
|
448 YIELD_NONEREADY = 0, // No other runnable/ready threads. |
|
449 // platform-specific yield return immediately |
|
450 YIELD_UNKNOWN = -1 // Unknown: platform doesn't support _SWITCHED or _NONEREADY |
|
451 // YIELD_SWITCHED and YIELD_NONREADY imply the platform supports a "strong" |
|
452 // yield that can be used in lieu of blocking. |
|
453 } ; |
|
454 static YieldResult NakedYield () ; |
|
455 static void yield_all(int attempts = 0); // Yields to all other threads including lower priority |
|
456 static void loop_breaker(int attempts); // called from within tight loops to possibly influence time-sharing |
|
457 static OSReturn set_priority(Thread* thread, ThreadPriority priority); |
|
458 static OSReturn get_priority(const Thread* const thread, ThreadPriority& priority); |
|
459 |
|
460 static void interrupt(Thread* thread); |
|
461 static bool is_interrupted(Thread* thread, bool clear_interrupted); |
|
462 |
|
463 static int pd_self_suspend_thread(Thread* thread); |
|
464 |
|
465 static ExtendedPC fetch_frame_from_context(void* ucVoid, intptr_t** sp, intptr_t** fp); |
|
466 static frame fetch_frame_from_context(void* ucVoid); |
|
467 |
|
468 static ExtendedPC get_thread_pc(Thread *thread); |
|
469 static void breakpoint(); |
|
470 |
|
471 static address current_stack_pointer(); |
|
472 static address current_stack_base(); |
|
473 static size_t current_stack_size(); |
|
474 |
|
475 static void verify_stack_alignment() PRODUCT_RETURN; |
|
476 |
|
477 static int message_box(const char* title, const char* message); |
|
478 static char* do_you_want_to_debug(const char* message); |
|
479 |
|
480 // run cmd in a separate process and return its exit code; or -1 on failures |
|
481 static int fork_and_exec(char *cmd); |
|
482 |
|
483 // os::exit() is merged with vm_exit() |
|
484 // static void exit(int num); |
|
485 |
|
486 // Terminate the VM, but don't exit the process |
|
487 static void shutdown(); |
|
488 |
|
489 // Terminate with an error. Default is to generate a core file on platforms |
|
490 // that support such things. This calls shutdown() and then aborts. |
|
491 static void abort(bool dump_core = true); |
|
492 |
|
493 // Die immediately, no exit hook, no abort hook, no cleanup. |
|
494 static void die(); |
|
495 |
|
496 // File i/o operations |
|
497 static const int default_file_open_flags(); |
|
498 static int open(const char *path, int oflag, int mode); |
|
499 static FILE* open(int fd, const char* mode); |
|
500 static int close(int fd); |
|
501 static jlong lseek(int fd, jlong offset, int whence); |
|
502 static char* native_path(char *path); |
|
503 static int ftruncate(int fd, jlong length); |
|
504 static int fsync(int fd); |
|
505 static int available(int fd, jlong *bytes); |
|
506 |
|
507 //File i/o operations |
|
508 |
|
509 static size_t read(int fd, void *buf, unsigned int nBytes); |
|
510 static size_t restartable_read(int fd, void *buf, unsigned int nBytes); |
|
511 static size_t write(int fd, const void *buf, unsigned int nBytes); |
|
512 |
|
513 // Reading directories. |
|
514 static DIR* opendir(const char* dirname); |
|
515 static int readdir_buf_size(const char *path); |
|
516 static struct dirent* readdir(DIR* dirp, dirent* dbuf); |
|
517 static int closedir(DIR* dirp); |
|
518 |
|
519 // Dynamic library extension |
|
520 static const char* dll_file_extension(); |
|
521 |
|
522 static const char* get_temp_directory(); |
|
523 static const char* get_current_directory(char *buf, size_t buflen); |
|
524 |
|
525 // Builds a platform-specific full library path given a ld path and lib name |
|
526 // Returns true if buffer contains full path to existing file, false otherwise |
|
527 static bool dll_build_name(char* buffer, size_t size, |
|
528 const char* pathname, const char* fname); |
|
529 |
|
530 // Symbol lookup, find nearest function name; basically it implements |
|
531 // dladdr() for all platforms. Name of the nearest function is copied |
|
532 // to buf. Distance from its base address is optionally returned as offset. |
|
533 // If function name is not found, buf[0] is set to '\0' and offset is |
|
534 // set to -1 (if offset is non-NULL). |
|
535 static bool dll_address_to_function_name(address addr, char* buf, |
|
536 int buflen, int* offset); |
|
537 |
|
538 // Locate DLL/DSO. On success, full path of the library is copied to |
|
539 // buf, and offset is optionally set to be the distance between addr |
|
540 // and the library's base address. On failure, buf[0] is set to '\0' |
|
541 // and offset is set to -1 (if offset is non-NULL). |
|
542 static bool dll_address_to_library_name(address addr, char* buf, |
|
543 int buflen, int* offset); |
|
544 |
|
545 // Find out whether the pc is in the static code for jvm.dll/libjvm.so. |
|
546 static bool address_is_in_vm(address addr); |
|
547 |
|
548 // Loads .dll/.so and |
|
549 // in case of error it checks if .dll/.so was built for the |
|
550 // same architecture as Hotspot is running on |
|
551 static void* dll_load(const char *name, char *ebuf, int ebuflen); |
|
552 |
|
553 // lookup symbol in a shared library |
|
554 static void* dll_lookup(void* handle, const char* name); |
|
555 |
|
556 // Unload library |
|
557 static void dll_unload(void *lib); |
|
558 |
|
559 // Return the handle of this process |
|
560 static void* get_default_process_handle(); |
|
561 |
|
562 // Check for static linked agent library |
|
563 static bool find_builtin_agent(AgentLibrary *agent_lib, const char *syms[], |
|
564 size_t syms_len); |
|
565 |
|
566 // Find agent entry point |
|
567 static void *find_agent_function(AgentLibrary *agent_lib, bool check_lib, |
|
568 const char *syms[], size_t syms_len); |
|
569 |
|
570 // Print out system information; they are called by fatal error handler. |
|
571 // Output format may be different on different platforms. |
|
572 static void print_os_info(outputStream* st); |
|
573 static void print_os_info_brief(outputStream* st); |
|
574 static void print_cpu_info(outputStream* st); |
|
575 static void pd_print_cpu_info(outputStream* st); |
|
576 static void print_memory_info(outputStream* st); |
|
577 static void print_dll_info(outputStream* st); |
|
578 static void print_environment_variables(outputStream* st, const char** env_list, char* buffer, int len); |
|
579 static void print_context(outputStream* st, void* context); |
|
580 static void print_register_info(outputStream* st, void* context); |
|
581 static void print_siginfo(outputStream* st, void* siginfo); |
|
582 static void print_signal_handlers(outputStream* st, char* buf, size_t buflen); |
|
583 static void print_date_and_time(outputStream* st); |
|
584 |
|
585 static void print_location(outputStream* st, intptr_t x, bool verbose = false); |
|
586 static size_t lasterror(char *buf, size_t len); |
|
587 static int get_last_error(); |
|
588 |
|
589 // Determines whether the calling process is being debugged by a user-mode debugger. |
|
590 static bool is_debugger_attached(); |
|
591 |
|
592 // wait for a key press if PauseAtExit is set |
|
593 static void wait_for_keypress_at_exit(void); |
|
594 |
|
595 // The following two functions are used by fatal error handler to trace |
|
596 // native (C) frames. They are not part of frame.hpp/frame.cpp because |
|
597 // frame.hpp/cpp assume thread is JavaThread, and also because different |
|
598 // OS/compiler may have different convention or provide different API to |
|
599 // walk C frames. |
|
600 // |
|
601 // We don't attempt to become a debugger, so we only follow frames if that |
|
602 // does not require a lookup in the unwind table, which is part of the binary |
|
603 // file but may be unsafe to read after a fatal error. So on x86, we can |
|
604 // only walk stack if %ebp is used as frame pointer; on ia64, it's not |
|
605 // possible to walk C stack without having the unwind table. |
|
606 static bool is_first_C_frame(frame *fr); |
|
607 static frame get_sender_for_C_frame(frame *fr); |
|
608 |
|
609 // return current frame. pc() and sp() are set to NULL on failure. |
|
610 static frame current_frame(); |
|
611 |
|
612 static void print_hex_dump(outputStream* st, address start, address end, int unitsize); |
|
613 |
|
614 // returns a string to describe the exception/signal; |
|
615 // returns NULL if exception_code is not an OS exception/signal. |
|
616 static const char* exception_name(int exception_code, char* buf, size_t buflen); |
|
617 |
|
618 // Returns native Java library, loads if necessary |
|
619 static void* native_java_library(); |
|
620 |
|
621 // Fills in path to jvm.dll/libjvm.so (used by the Disassembler) |
|
622 static void jvm_path(char *buf, jint buflen); |
|
623 |
|
624 // Returns true if we are running in a headless jre. |
|
625 static bool is_headless_jre(); |
|
626 |
|
627 // JNI names |
|
628 static void print_jni_name_prefix_on(outputStream* st, int args_size); |
|
629 static void print_jni_name_suffix_on(outputStream* st, int args_size); |
|
630 |
|
631 // File conventions |
|
632 static const char* file_separator(); |
|
633 static const char* line_separator(); |
|
634 static const char* path_separator(); |
|
635 |
|
636 // Init os specific system properties values |
|
637 static void init_system_properties_values(); |
|
638 |
|
639 // IO operations, non-JVM_ version. |
|
640 static int stat(const char* path, struct stat* sbuf); |
|
641 static bool dir_is_empty(const char* path); |
|
642 |
|
643 // IO operations on binary files |
|
644 static int create_binary_file(const char* path, bool rewrite_existing); |
|
645 static jlong current_file_offset(int fd); |
|
646 static jlong seek_to_file_offset(int fd, jlong offset); |
|
647 |
|
648 // Thread Local Storage |
|
649 static int allocate_thread_local_storage(); |
|
650 static void thread_local_storage_at_put(int index, void* value); |
|
651 static void* thread_local_storage_at(int index); |
|
652 static void free_thread_local_storage(int index); |
|
653 |
|
654 // Stack walk |
|
655 static address get_caller_pc(int n = 0); |
|
656 |
|
657 // General allocation (must be MT-safe) |
|
658 static void* malloc (size_t size, MEMFLAGS flags, address caller_pc = 0); |
|
659 static void* realloc (void *memblock, size_t size, MEMFLAGS flags, address caller_pc = 0); |
|
660 static void free (void *memblock, MEMFLAGS flags = mtNone); |
|
661 static bool check_heap(bool force = false); // verify C heap integrity |
|
662 static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup |
|
663 |
|
664 #ifndef PRODUCT |
|
665 static julong num_mallocs; // # of calls to malloc/realloc |
|
666 static julong alloc_bytes; // # of bytes allocated |
|
667 static julong num_frees; // # of calls to free |
|
668 static julong free_bytes; // # of bytes freed |
|
669 #endif |
|
670 |
|
671 // SocketInterface (ex HPI SocketInterface ) |
|
672 static int socket(int domain, int type, int protocol); |
|
673 static int socket_close(int fd); |
|
674 static int socket_shutdown(int fd, int howto); |
|
675 static int recv(int fd, char* buf, size_t nBytes, uint flags); |
|
676 static int send(int fd, char* buf, size_t nBytes, uint flags); |
|
677 static int raw_send(int fd, char* buf, size_t nBytes, uint flags); |
|
678 static int timeout(int fd, long timeout); |
|
679 static int listen(int fd, int count); |
|
680 static int connect(int fd, struct sockaddr* him, socklen_t len); |
|
681 static int bind(int fd, struct sockaddr* him, socklen_t len); |
|
682 static int accept(int fd, struct sockaddr* him, socklen_t* len); |
|
683 static int recvfrom(int fd, char* buf, size_t nbytes, uint flags, |
|
684 struct sockaddr* from, socklen_t* fromlen); |
|
685 static int get_sock_name(int fd, struct sockaddr* him, socklen_t* len); |
|
686 static int sendto(int fd, char* buf, size_t len, uint flags, |
|
687 struct sockaddr* to, socklen_t tolen); |
|
688 static int socket_available(int fd, jint* pbytes); |
|
689 |
|
690 static int get_sock_opt(int fd, int level, int optname, |
|
691 char* optval, socklen_t* optlen); |
|
692 static int set_sock_opt(int fd, int level, int optname, |
|
693 const char* optval, socklen_t optlen); |
|
694 static int get_host_name(char* name, int namelen); |
|
695 |
|
696 static struct hostent* get_host_by_name(char* name); |
|
697 |
|
698 // Support for signals (see JVM_RaiseSignal, JVM_RegisterSignal) |
|
699 static void signal_init(); |
|
700 static void signal_init_pd(); |
|
701 static void signal_notify(int signal_number); |
|
702 static void* signal(int signal_number, void* handler); |
|
703 static void signal_raise(int signal_number); |
|
704 static int signal_wait(); |
|
705 static int signal_lookup(); |
|
706 static void* user_handler(); |
|
707 static void terminate_signal_thread(); |
|
708 static int sigexitnum_pd(); |
|
709 |
|
710 // random number generation |
|
711 static long random(); // return 32bit pseudorandom number |
|
712 static void init_random(long initval); // initialize random sequence |
|
713 |
|
714 // Structured OS Exception support |
|
715 static void os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); |
|
716 |
|
717 // On Windows this will create an actual minidump, on Linux/Solaris it will simply check core dump limits |
|
718 static void check_or_create_dump(void* exceptionRecord, void* contextRecord, char* buffer, size_t bufferSize); |
|
719 |
|
720 // Get the default path to the core file |
|
721 // Returns the length of the string |
|
722 static int get_core_path(char* buffer, size_t bufferSize); |
|
723 |
|
724 // JVMTI & JVM monitoring and management support |
|
725 // The thread_cpu_time() and current_thread_cpu_time() are only |
|
726 // supported if is_thread_cpu_time_supported() returns true. |
|
727 // They are not supported on Solaris T1. |
|
728 |
|
729 // Thread CPU Time - return the fast estimate on a platform |
|
730 // On Solaris - call gethrvtime (fast) - user time only |
|
731 // On Linux - fast clock_gettime where available - user+sys |
|
732 // - otherwise: very slow /proc fs - user+sys |
|
733 // On Windows - GetThreadTimes - user+sys |
|
734 static jlong current_thread_cpu_time(); |
|
735 static jlong thread_cpu_time(Thread* t); |
|
736 |
|
737 // Thread CPU Time with user_sys_cpu_time parameter. |
|
738 // |
|
739 // If user_sys_cpu_time is true, user+sys time is returned. |
|
740 // Otherwise, only user time is returned |
|
741 static jlong current_thread_cpu_time(bool user_sys_cpu_time); |
|
742 static jlong thread_cpu_time(Thread* t, bool user_sys_cpu_time); |
|
743 |
|
744 // Return a bunch of info about the timers. |
|
745 // Note that the returned info for these two functions may be different |
|
746 // on some platforms |
|
747 static void current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr); |
|
748 static void thread_cpu_time_info(jvmtiTimerInfo *info_ptr); |
|
749 |
|
750 static bool is_thread_cpu_time_supported(); |
|
751 |
|
752 // System loadavg support. Returns -1 if load average cannot be obtained. |
|
753 static int loadavg(double loadavg[], int nelem); |
|
754 |
|
755 // Hook for os specific jvm options that we don't want to abort on seeing |
|
756 static bool obsolete_option(const JavaVMOption *option); |
|
757 |
|
758 // Extensions |
|
759 #include "runtime/os_ext.hpp" |
|
760 |
|
761 public: |
|
762 class CrashProtectionCallback : public StackObj { |
|
763 public: |
|
764 virtual void call() = 0; |
|
765 }; |
|
766 |
|
767 // Platform dependent stuff |
|
768 #ifdef TARGET_OS_FAMILY_linux |
|
769 # include "os_linux.hpp" |
|
770 # include "os_posix.hpp" |
|
771 #endif |
|
772 #ifdef TARGET_OS_FAMILY_solaris |
|
773 # include "os_solaris.hpp" |
|
774 # include "os_posix.hpp" |
|
775 #endif |
|
776 #ifdef TARGET_OS_FAMILY_windows |
|
777 # include "os_windows.hpp" |
|
778 #endif |
|
779 #ifdef TARGET_OS_FAMILY_aix |
|
780 # include "os_aix.hpp" |
|
781 # include "os_posix.hpp" |
|
782 #endif |
|
783 #ifdef TARGET_OS_FAMILY_bsd |
|
784 # include "os_posix.hpp" |
|
785 # include "os_bsd.hpp" |
|
786 #endif |
|
787 #ifdef TARGET_OS_ARCH_linux_x86 |
|
788 # include "os_linux_x86.hpp" |
|
789 #endif |
|
790 #ifdef TARGET_OS_ARCH_linux_sparc |
|
791 # include "os_linux_sparc.hpp" |
|
792 #endif |
|
793 #ifdef TARGET_OS_ARCH_linux_zero |
|
794 # include "os_linux_zero.hpp" |
|
795 #endif |
|
796 #ifdef TARGET_OS_ARCH_solaris_x86 |
|
797 # include "os_solaris_x86.hpp" |
|
798 #endif |
|
799 #ifdef TARGET_OS_ARCH_solaris_sparc |
|
800 # include "os_solaris_sparc.hpp" |
|
801 #endif |
|
802 #ifdef TARGET_OS_ARCH_windows_x86 |
|
803 # include "os_windows_x86.hpp" |
|
804 #endif |
|
805 #ifdef TARGET_OS_ARCH_linux_arm |
|
806 # include "os_linux_arm.hpp" |
|
807 #endif |
|
808 #ifdef TARGET_OS_ARCH_linux_ppc |
|
809 # include "os_linux_ppc.hpp" |
|
810 #endif |
|
811 #ifdef TARGET_OS_ARCH_aix_ppc |
|
812 # include "os_aix_ppc.hpp" |
|
813 #endif |
|
814 #ifdef TARGET_OS_ARCH_bsd_x86 |
|
815 # include "os_bsd_x86.hpp" |
|
816 #endif |
|
817 #ifdef TARGET_OS_ARCH_bsd_zero |
|
818 # include "os_bsd_zero.hpp" |
|
819 #endif |
|
820 |
|
821 public: |
|
822 #ifndef PLATFORM_PRINT_NATIVE_STACK |
|
823 // No platform-specific code for printing the native stack. |
|
824 static bool platform_print_native_stack(outputStream* st, void* context, |
|
825 char *buf, int buf_size) { |
|
826 return false; |
|
827 } |
|
828 #endif |
|
829 |
|
830 // debugging support (mostly used by debug.cpp but also fatal error handler) |
|
831 static bool find(address pc, outputStream* st = tty); // OS specific function to make sense out of an address |
|
832 |
|
833 static bool dont_yield(); // when true, JVM_Yield() is nop |
|
834 static void print_statistics(); |
|
835 |
|
836 // Thread priority helpers (implemented in OS-specific part) |
|
837 static OSReturn set_native_priority(Thread* thread, int native_prio); |
|
838 static OSReturn get_native_priority(const Thread* const thread, int* priority_ptr); |
|
839 static int java_to_os_priority[CriticalPriority + 1]; |
|
840 // Hint to the underlying OS that a task switch would not be good. |
|
841 // Void return because it's a hint and can fail. |
|
842 static void hint_no_preempt(); |
|
843 |
|
844 // Used at creation if requested by the diagnostic flag PauseAtStartup. |
|
845 // Causes the VM to wait until an external stimulus has been applied |
|
846 // (for Unix, that stimulus is a signal, for Windows, an external |
|
847 // ResumeThread call) |
|
848 static void pause(); |
|
849 |
|
850 // Builds a platform dependent Agent_OnLoad_<libname> function name |
|
851 // which is used to find statically linked in agents. |
|
852 static char* build_agent_function_name(const char *sym, const char *cname, |
|
853 bool is_absolute_path); |
|
854 |
|
855 class SuspendedThreadTaskContext { |
|
856 public: |
|
857 SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {} |
|
858 Thread* thread() const { return _thread; } |
|
859 void* ucontext() const { return _ucontext; } |
|
860 private: |
|
861 Thread* _thread; |
|
862 void* _ucontext; |
|
863 }; |
|
864 |
|
865 class SuspendedThreadTask { |
|
866 public: |
|
867 SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {} |
|
868 virtual ~SuspendedThreadTask() {} |
|
869 void run(); |
|
870 bool is_done() { return _done; } |
|
871 virtual void do_task(const SuspendedThreadTaskContext& context) = 0; |
|
872 protected: |
|
873 private: |
|
874 void internal_do_task(); |
|
875 Thread* _thread; |
|
876 bool _done; |
|
877 }; |
|
878 |
|
879 #ifndef TARGET_OS_FAMILY_windows |
|
880 // Suspend/resume support |
|
881 // Protocol: |
|
882 // |
|
883 // a thread starts in SR_RUNNING |
|
884 // |
|
885 // SR_RUNNING can go to |
|
886 // * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it |
|
887 // SR_SUSPEND_REQUEST can go to |
|
888 // * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout) |
|
889 // * SR_SUSPENDED if the stopped thread receives the signal and switches state |
|
890 // SR_SUSPENDED can go to |
|
891 // * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume |
|
892 // SR_WAKEUP_REQUEST can go to |
|
893 // * SR_RUNNING when the stopped thread receives the signal |
|
894 // * SR_WAKEUP_REQUEST on timeout (resend the signal and try again) |
|
895 class SuspendResume { |
|
896 public: |
|
897 enum State { |
|
898 SR_RUNNING, |
|
899 SR_SUSPEND_REQUEST, |
|
900 SR_SUSPENDED, |
|
901 SR_WAKEUP_REQUEST |
|
902 }; |
|
903 |
|
904 private: |
|
905 volatile State _state; |
|
906 |
|
907 private: |
|
908 /* try to switch state from state "from" to state "to" |
|
909 * returns the state set after the method is complete |
|
910 */ |
|
911 State switch_state(State from, State to); |
|
912 |
|
913 public: |
|
914 SuspendResume() : _state(SR_RUNNING) { } |
|
915 |
|
916 State state() const { return _state; } |
|
917 |
|
918 State request_suspend() { |
|
919 return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST); |
|
920 } |
|
921 |
|
922 State cancel_suspend() { |
|
923 return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING); |
|
924 } |
|
925 |
|
926 State suspended() { |
|
927 return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED); |
|
928 } |
|
929 |
|
930 State request_wakeup() { |
|
931 return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST); |
|
932 } |
|
933 |
|
934 State running() { |
|
935 return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING); |
|
936 } |
|
937 |
|
938 bool is_running() const { |
|
939 return _state == SR_RUNNING; |
|
940 } |
|
941 |
|
942 bool is_suspend_request() const { |
|
943 return _state == SR_SUSPEND_REQUEST; |
|
944 } |
|
945 |
|
946 bool is_suspended() const { |
|
947 return _state == SR_SUSPENDED; |
|
948 } |
|
949 }; |
|
950 #endif |
|
951 |
|
952 |
|
953 protected: |
|
954 static long _rand_seed; // seed for random number generator |
|
955 static int _processor_count; // number of processors |
|
956 |
|
957 static char* format_boot_path(const char* format_string, |
|
958 const char* home, |
|
959 int home_len, |
|
960 char fileSep, |
|
961 char pathSep); |
|
962 static bool set_boot_path(char fileSep, char pathSep); |
|
963 static char** split_path(const char* path, int* n); |
|
964 |
|
965 }; |
|
966 |
|
967 // Note that "PAUSE" is almost always used with synchronization |
|
968 // so arguably we should provide Atomic::SpinPause() instead |
|
969 // of the global SpinPause() with C linkage. |
|
970 // It'd also be eligible for inlining on many platforms. |
|
971 |
|
972 extern "C" int SpinPause(); |
|
973 |
|
974 #endif // SHARE_VM_RUNTIME_OS_HPP |