src/os/aix/vm/os_aix.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright 2012, 2014 SAP AG. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 // According to the AIX OS doc #pragma alloca must be used
27 // with C++ compiler before referencing the function alloca()
28 #pragma alloca
29
30 // no precompiled headers
31 #include "classfile/classLoader.hpp"
32 #include "classfile/systemDictionary.hpp"
33 #include "classfile/vmSymbols.hpp"
34 #include "code/icBuffer.hpp"
35 #include "code/vtableStubs.hpp"
36 #include "compiler/compileBroker.hpp"
37 #include "interpreter/interpreter.hpp"
38 #include "jvm_aix.h"
39 #include "libperfstat_aix.hpp"
40 #include "loadlib_aix.hpp"
41 #include "memory/allocation.inline.hpp"
42 #include "memory/filemap.hpp"
43 #include "mutex_aix.inline.hpp"
44 #include "oops/oop.inline.hpp"
45 #include "os_share_aix.hpp"
46 #include "porting_aix.hpp"
47 #include "prims/jniFastGetField.hpp"
48 #include "prims/jvm.h"
49 #include "prims/jvm_misc.hpp"
50 #include "runtime/arguments.hpp"
51 #include "runtime/extendedPC.hpp"
52 #include "runtime/globals.hpp"
53 #include "runtime/interfaceSupport.hpp"
54 #include "runtime/java.hpp"
55 #include "runtime/javaCalls.hpp"
56 #include "runtime/mutexLocker.hpp"
57 #include "runtime/objectMonitor.hpp"
58 #include "runtime/osThread.hpp"
59 #include "runtime/perfMemory.hpp"
60 #include "runtime/sharedRuntime.hpp"
61 #include "runtime/statSampler.hpp"
62 #include "runtime/stubRoutines.hpp"
63 #include "runtime/thread.inline.hpp"
64 #include "runtime/threadCritical.hpp"
65 #include "runtime/timer.hpp"
66 #include "services/attachListener.hpp"
67 #include "services/runtimeService.hpp"
68 #include "utilities/decoder.hpp"
69 #include "utilities/defaultStream.hpp"
70 #include "utilities/events.hpp"
71 #include "utilities/growableArray.hpp"
72 #include "utilities/vmError.hpp"
73
74 // put OS-includes here (sorted alphabetically)
75 #include <errno.h>
76 #include <fcntl.h>
77 #include <inttypes.h>
78 #include <poll.h>
79 #include <procinfo.h>
80 #include <pthread.h>
81 #include <pwd.h>
82 #include <semaphore.h>
83 #include <signal.h>
84 #include <stdint.h>
85 #include <stdio.h>
86 #include <string.h>
87 #include <unistd.h>
88 #include <sys/ioctl.h>
89 #include <sys/ipc.h>
90 #include <sys/mman.h>
91 #include <sys/resource.h>
92 #include <sys/select.h>
93 #include <sys/shm.h>
94 #include <sys/socket.h>
95 #include <sys/stat.h>
96 #include <sys/sysinfo.h>
97 #include <sys/systemcfg.h>
98 #include <sys/time.h>
99 #include <sys/times.h>
100 #include <sys/types.h>
101 #include <sys/utsname.h>
102 #include <sys/vminfo.h>
103 #include <sys/wait.h>
104
105 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
106 #if !defined(_AIXVERSION_610)
107 extern "C" {
108 int getthrds64(pid_t ProcessIdentifier,
109 struct thrdentry64* ThreadBuffer,
110 int ThreadSize,
111 tid64_t* IndexPointer,
112 int Count);
113 }
114 #endif
115
116 // Excerpts from systemcfg.h definitions newer than AIX 5.3
117 #ifndef PV_7
118 # define PV_7 0x200000 // Power PC 7
119 # define PV_7_Compat 0x208000 // Power PC 7
120 #endif
121
122 #define MAX_PATH (2 * K)
123
124 // for timer info max values which include all bits
125 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
126 // for multipage initialization error analysis (in 'g_multipage_error')
127 #define ERROR_MP_OS_TOO_OLD 100
128 #define ERROR_MP_EXTSHM_ACTIVE 101
129 #define ERROR_MP_VMGETINFO_FAILED 102
130 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
131
132 // the semantics in this file are thus that codeptr_t is a *real code ptr*
133 // This means that any function taking codeptr_t as arguments will assume
134 // a real codeptr and won't handle function descriptors (eg getFuncName),
135 // whereas functions taking address as args will deal with function
136 // descriptors (eg os::dll_address_to_library_name)
137 typedef unsigned int* codeptr_t;
138
139 // typedefs for stackslots, stack pointers, pointers to op codes
140 typedef unsigned long stackslot_t;
141 typedef stackslot_t* stackptr_t;
142
143 // query dimensions of the stack of the calling thread
144 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
145
146 // function to check a given stack pointer against given stack limits
147 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
148 if (((uintptr_t)sp) & 0x7) {
149 return false;
150 }
151 if (sp > stack_base) {
152 return false;
153 }
154 if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
155 return false;
156 }
157 return true;
158 }
159
160 // returns true if function is a valid codepointer
161 inline bool is_valid_codepointer(codeptr_t p) {
162 if (!p) {
163 return false;
164 }
165 if (((uintptr_t)p) & 0x3) {
166 return false;
167 }
168 if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
169 return false;
170 }
171 return true;
172 }
173
174 // macro to check a given stack pointer against given stack limits and to die if test fails
175 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
176 guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
177 }
178
179 // macro to check the current stack pointer against given stacklimits
180 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
181 address sp; \
182 sp = os::current_stack_pointer(); \
183 CHECK_STACK_PTR(sp, stack_base, stack_size); \
184 }
185
186 ////////////////////////////////////////////////////////////////////////////////
187 // global variables (for a description see os_aix.hpp)
188
189 julong os::Aix::_physical_memory = 0;
190 pthread_t os::Aix::_main_thread = ((pthread_t)0);
191 int os::Aix::_page_size = -1;
192 int os::Aix::_on_pase = -1;
193 int os::Aix::_os_version = -1;
194 int os::Aix::_stack_page_size = -1;
195 size_t os::Aix::_shm_default_page_size = -1;
196 int os::Aix::_can_use_64K_pages = -1;
197 int os::Aix::_can_use_16M_pages = -1;
198 int os::Aix::_xpg_sus_mode = -1;
199 int os::Aix::_extshm = -1;
200 int os::Aix::_logical_cpus = -1;
201
202 ////////////////////////////////////////////////////////////////////////////////
203 // local variables
204
205 static int g_multipage_error = -1; // error analysis for multipage initialization
206 static jlong initial_time_count = 0;
207 static int clock_tics_per_sec = 100;
208 static sigset_t check_signal_done; // For diagnostics to print a message once (see run_periodic_checks)
209 static bool check_signals = true;
210 static pid_t _initial_pid = 0;
211 static int SR_signum = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
212 static sigset_t SR_sigset;
213 static pthread_mutex_t dl_mutex; // Used to protect dlsym() calls */
214
215 julong os::available_memory() {
216 return Aix::available_memory();
217 }
218
219 julong os::Aix::available_memory() {
220 os::Aix::meminfo_t mi;
221 if (os::Aix::get_meminfo(&mi)) {
222 return mi.real_free;
223 } else {
224 return 0xFFFFFFFFFFFFFFFFLL;
225 }
226 }
227
228 julong os::physical_memory() {
229 return Aix::physical_memory();
230 }
231
232 ////////////////////////////////////////////////////////////////////////////////
233 // environment support
234
235 bool os::getenv(const char* name, char* buf, int len) {
236 const char* val = ::getenv(name);
237 if (val != NULL && strlen(val) < (size_t)len) {
238 strcpy(buf, val);
239 return true;
240 }
241 if (len > 0) buf[0] = 0; // return a null string
242 return false;
243 }
244
245
246 // Return true if user is running as root.
247
248 bool os::have_special_privileges() {
249 static bool init = false;
250 static bool privileges = false;
251 if (!init) {
252 privileges = (getuid() != geteuid()) || (getgid() != getegid());
253 init = true;
254 }
255 return privileges;
256 }
257
258 // Helper function, emulates disclaim64 using multiple 32bit disclaims
259 // because we cannot use disclaim64() on AS/400 and old AIX releases.
260 static bool my_disclaim64(char* addr, size_t size) {
261
262 if (size == 0) {
263 return true;
264 }
265
266 // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
267 const unsigned int maxDisclaimSize = 0x80000000;
268
269 const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
270 const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
271
272 char* p = addr;
273
274 for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
275 if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
276 //if (Verbose)
277 fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
278 return false;
279 }
280 p += maxDisclaimSize;
281 }
282
283 if (lastDisclaimSize > 0) {
284 if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
285 //if (Verbose)
286 fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
287 return false;
288 }
289 }
290
291 return true;
292 }
293
294 // Cpu architecture string
295 #if defined(PPC32)
296 static char cpu_arch[] = "ppc";
297 #elif defined(PPC64)
298 static char cpu_arch[] = "ppc64";
299 #else
300 #error Add appropriate cpu_arch setting
301 #endif
302
303
304 // Given an address, returns the size of the page backing that address.
305 size_t os::Aix::query_pagesize(void* addr) {
306
307 vm_page_info pi;
308 pi.addr = (uint64_t)addr;
309 if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
310 return pi.pagesize;
311 } else {
312 fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
313 assert(false, "vmgetinfo failed to retrieve page size");
314 return SIZE_4K;
315 }
316
317 }
318
319 // Returns the kernel thread id of the currently running thread.
320 pid_t os::Aix::gettid() {
321 return (pid_t) thread_self();
322 }
323
324 void os::Aix::initialize_system_info() {
325
326 // get the number of online(logical) cpus instead of configured
327 os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
328 assert(_processor_count > 0, "_processor_count must be > 0");
329
330 // retrieve total physical storage
331 os::Aix::meminfo_t mi;
332 if (!os::Aix::get_meminfo(&mi)) {
333 fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
334 assert(false, "os::Aix::get_meminfo failed.");
335 }
336 _physical_memory = (julong) mi.real_total;
337 }
338
339 // Helper function for tracing page sizes.
340 static const char* describe_pagesize(size_t pagesize) {
341 switch (pagesize) {
342 case SIZE_4K : return "4K";
343 case SIZE_64K: return "64K";
344 case SIZE_16M: return "16M";
345 case SIZE_16G: return "16G";
346 default:
347 assert(false, "surprise");
348 return "??";
349 }
350 }
351
352 // Retrieve information about multipage size support. Will initialize
353 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
354 // Aix::_can_use_16M_pages.
355 // Must be called before calling os::large_page_init().
356 void os::Aix::query_multipage_support() {
357
358 guarantee(_page_size == -1 &&
359 _stack_page_size == -1 &&
360 _can_use_64K_pages == -1 &&
361 _can_use_16M_pages == -1 &&
362 g_multipage_error == -1,
363 "do not call twice");
364
365 _page_size = ::sysconf(_SC_PAGESIZE);
366
367 // This really would surprise me.
368 assert(_page_size == SIZE_4K, "surprise!");
369
370
371 // Query default data page size (default page size for C-Heap, pthread stacks and .bss).
372 // Default data page size is influenced either by linker options (-bdatapsize)
373 // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
374 // default should be 4K.
375 size_t data_page_size = SIZE_4K;
376 {
377 void* p = ::malloc(SIZE_16M);
378 guarantee(p != NULL, "malloc failed");
379 data_page_size = os::Aix::query_pagesize(p);
380 ::free(p);
381 }
382
383 // query default shm page size (LDR_CNTRL SHMPSIZE)
384 {
385 const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
386 guarantee(shmid != -1, "shmget failed");
387 void* p = ::shmat(shmid, NULL, 0);
388 ::shmctl(shmid, IPC_RMID, NULL);
389 guarantee(p != (void*) -1, "shmat failed");
390 _shm_default_page_size = os::Aix::query_pagesize(p);
391 ::shmdt(p);
392 }
393
394 // before querying the stack page size, make sure we are not running as primordial
395 // thread (because primordial thread's stack may have different page size than
396 // pthread thread stacks). Running a VM on the primordial thread won't work for a
397 // number of reasons so we may just as well guarantee it here
398 guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
399
400 // query stack page size
401 {
402 int dummy = 0;
403 _stack_page_size = os::Aix::query_pagesize(&dummy);
404 // everything else would surprise me and should be looked into
405 guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
406 // also, just for completeness: pthread stacks are allocated from C heap, so
407 // stack page size should be the same as data page size
408 guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
409 }
410
411 // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
412 // for system V shm.
413 if (Aix::extshm()) {
414 if (Verbose) {
415 fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
416 "Please make sure EXTSHM is OFF for large page support.\n");
417 }
418 g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
419 _can_use_64K_pages = _can_use_16M_pages = 0;
420 goto query_multipage_support_end;
421 }
422
423 // now check which page sizes the OS claims it supports, and of those, which actually can be used.
424 {
425 const int MAX_PAGE_SIZES = 4;
426 psize_t sizes[MAX_PAGE_SIZES];
427 const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
428 if (num_psizes == -1) {
429 if (Verbose) {
430 fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
431 fprintf(stderr, "disabling multipage support.\n");
432 }
433 g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
434 _can_use_64K_pages = _can_use_16M_pages = 0;
435 goto query_multipage_support_end;
436 }
437 guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
438 assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
439 if (Verbose) {
440 fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
441 for (int i = 0; i < num_psizes; i ++) {
442 fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
443 }
444 fprintf(stderr, " .\n");
445 }
446
447 // Can we use 64K, 16M pages?
448 _can_use_64K_pages = 0;
449 _can_use_16M_pages = 0;
450 for (int i = 0; i < num_psizes; i ++) {
451 if (sizes[i] == SIZE_64K) {
452 _can_use_64K_pages = 1;
453 } else if (sizes[i] == SIZE_16M) {
454 _can_use_16M_pages = 1;
455 }
456 }
457
458 if (!_can_use_64K_pages) {
459 g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
460 }
461
462 // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
463 // there must be an actual 16M page pool, and we must run with enough rights.
464 if (_can_use_16M_pages) {
465 const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
466 guarantee(shmid != -1, "shmget failed");
467 struct shmid_ds shm_buf = { 0 };
468 shm_buf.shm_pagesize = SIZE_16M;
469 const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
470 const int en = errno;
471 ::shmctl(shmid, IPC_RMID, NULL);
472 if (!can_set_pagesize) {
473 if (Verbose) {
474 fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
475 "Will deactivate 16M support.\n", en, strerror(en));
476 }
477 _can_use_16M_pages = 0;
478 }
479 }
480
481 } // end: check which pages can be used for shared memory
482
483 query_multipage_support_end:
484
485 guarantee(_page_size != -1 &&
486 _stack_page_size != -1 &&
487 _can_use_64K_pages != -1 &&
488 _can_use_16M_pages != -1, "Page sizes not properly initialized");
489
490 if (_can_use_64K_pages) {
491 g_multipage_error = 0;
492 }
493
494 if (Verbose) {
495 fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
496 fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
497 fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
498 fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
499 fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
500 fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
501 }
502
503 } // end os::Aix::query_multipage_support()
504
505 // The code for this method was initially derived from the version in os_linux.cpp.
506 void os::init_system_properties_values() {
507
508 #define DEFAULT_LIBPATH "/usr/lib:/lib"
509 #define EXTENSIONS_DIR "/lib/ext"
510 #define ENDORSED_DIR "/lib/endorsed"
511
512 // Buffer that fits several sprintfs.
513 // Note that the space for the trailing null is provided
514 // by the nulls included by the sizeof operator.
515 const size_t bufsize =
516 MAX3((size_t)MAXPATHLEN, // For dll_dir & friends.
517 (size_t)MAXPATHLEN + sizeof(EXTENSIONS_DIR), // extensions dir
518 (size_t)MAXPATHLEN + sizeof(ENDORSED_DIR)); // endorsed dir
519 char *buf = (char *)NEW_C_HEAP_ARRAY(char, bufsize, mtInternal);
520
521 // sysclasspath, java_home, dll_dir
522 {
523 char *pslash;
524 os::jvm_path(buf, bufsize);
525
526 // Found the full path to libjvm.so.
527 // Now cut the path to <java_home>/jre if we can.
528 *(strrchr(buf, '/')) = '\0'; // Get rid of /libjvm.so.
529 pslash = strrchr(buf, '/');
530 if (pslash != NULL) {
531 *pslash = '\0'; // Get rid of /{client|server|hotspot}.
532 }
533 Arguments::set_dll_dir(buf);
534
535 if (pslash != NULL) {
536 pslash = strrchr(buf, '/');
537 if (pslash != NULL) {
538 *pslash = '\0'; // Get rid of /<arch>.
539 pslash = strrchr(buf, '/');
540 if (pslash != NULL) {
541 *pslash = '\0'; // Get rid of /lib.
542 }
543 }
544 }
545 Arguments::set_java_home(buf);
546 set_boot_path('/', ':');
547 }
548
549 // Where to look for native libraries.
550
551 // On Aix we get the user setting of LIBPATH.
552 // Eventually, all the library path setting will be done here.
553 // Get the user setting of LIBPATH.
554 const char *v = ::getenv("LIBPATH");
555 const char *v_colon = ":";
556 if (v == NULL) { v = ""; v_colon = ""; }
557
558 // Concatenate user and invariant part of ld_library_path.
559 // That's +1 for the colon and +1 for the trailing '\0'.
560 char *ld_library_path = (char *)NEW_C_HEAP_ARRAY(char, strlen(v) + 1 + sizeof(DEFAULT_LIBPATH) + 1, mtInternal);
561 sprintf(ld_library_path, "%s%s" DEFAULT_LIBPATH, v, v_colon);
562 Arguments::set_library_path(ld_library_path);
563 FREE_C_HEAP_ARRAY(char, ld_library_path, mtInternal);
564
565 // Extensions directories.
566 sprintf(buf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
567 Arguments::set_ext_dirs(buf);
568
569 // Endorsed standards default directory.
570 sprintf(buf, "%s" ENDORSED_DIR, Arguments::get_java_home());
571 Arguments::set_endorsed_dirs(buf);
572
573 FREE_C_HEAP_ARRAY(char, buf, mtInternal);
574
575 #undef DEFAULT_LIBPATH
576 #undef EXTENSIONS_DIR
577 #undef ENDORSED_DIR
578 }
579
580 ////////////////////////////////////////////////////////////////////////////////
581 // breakpoint support
582
583 void os::breakpoint() {
584 BREAKPOINT;
585 }
586
587 extern "C" void breakpoint() {
588 // use debugger to set breakpoint here
589 }
590
591 ////////////////////////////////////////////////////////////////////////////////
592 // signal support
593
594 debug_only(static bool signal_sets_initialized = false);
595 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
596
597 bool os::Aix::is_sig_ignored(int sig) {
598 struct sigaction oact;
599 sigaction(sig, (struct sigaction*)NULL, &oact);
600 void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
601 : CAST_FROM_FN_PTR(void*, oact.sa_handler);
602 if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
603 return true;
604 else
605 return false;
606 }
607
608 void os::Aix::signal_sets_init() {
609 // Should also have an assertion stating we are still single-threaded.
610 assert(!signal_sets_initialized, "Already initialized");
611 // Fill in signals that are necessarily unblocked for all threads in
612 // the VM. Currently, we unblock the following signals:
613 // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
614 // by -Xrs (=ReduceSignalUsage));
615 // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
616 // other threads. The "ReduceSignalUsage" boolean tells us not to alter
617 // the dispositions or masks wrt these signals.
618 // Programs embedding the VM that want to use the above signals for their
619 // own purposes must, at this time, use the "-Xrs" option to prevent
620 // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
621 // (See bug 4345157, and other related bugs).
622 // In reality, though, unblocking these signals is really a nop, since
623 // these signals are not blocked by default.
624 sigemptyset(&unblocked_sigs);
625 sigemptyset(&allowdebug_blocked_sigs);
626 sigaddset(&unblocked_sigs, SIGILL);
627 sigaddset(&unblocked_sigs, SIGSEGV);
628 sigaddset(&unblocked_sigs, SIGBUS);
629 sigaddset(&unblocked_sigs, SIGFPE);
630 sigaddset(&unblocked_sigs, SIGTRAP);
631 sigaddset(&unblocked_sigs, SIGDANGER);
632 sigaddset(&unblocked_sigs, SR_signum);
633
634 if (!ReduceSignalUsage) {
635 if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
636 sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
637 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
638 }
639 if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
640 sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
641 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
642 }
643 if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
644 sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
645 sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
646 }
647 }
648 // Fill in signals that are blocked by all but the VM thread.
649 sigemptyset(&vm_sigs);
650 if (!ReduceSignalUsage)
651 sigaddset(&vm_sigs, BREAK_SIGNAL);
652 debug_only(signal_sets_initialized = true);
653 }
654
655 // These are signals that are unblocked while a thread is running Java.
656 // (For some reason, they get blocked by default.)
657 sigset_t* os::Aix::unblocked_signals() {
658 assert(signal_sets_initialized, "Not initialized");
659 return &unblocked_sigs;
660 }
661
662 // These are the signals that are blocked while a (non-VM) thread is
663 // running Java. Only the VM thread handles these signals.
664 sigset_t* os::Aix::vm_signals() {
665 assert(signal_sets_initialized, "Not initialized");
666 return &vm_sigs;
667 }
668
669 // These are signals that are blocked during cond_wait to allow debugger in
670 sigset_t* os::Aix::allowdebug_blocked_signals() {
671 assert(signal_sets_initialized, "Not initialized");
672 return &allowdebug_blocked_sigs;
673 }
674
675 void os::Aix::hotspot_sigmask(Thread* thread) {
676
677 //Save caller's signal mask before setting VM signal mask
678 sigset_t caller_sigmask;
679 pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
680
681 OSThread* osthread = thread->osthread();
682 osthread->set_caller_sigmask(caller_sigmask);
683
684 pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
685
686 if (!ReduceSignalUsage) {
687 if (thread->is_VM_thread()) {
688 // Only the VM thread handles BREAK_SIGNAL ...
689 pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
690 } else {
691 // ... all other threads block BREAK_SIGNAL
692 pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
693 }
694 }
695 }
696
697 // retrieve memory information.
698 // Returns false if something went wrong;
699 // content of pmi undefined in this case.
700 bool os::Aix::get_meminfo(meminfo_t* pmi) {
701
702 assert(pmi, "get_meminfo: invalid parameter");
703
704 memset(pmi, 0, sizeof(meminfo_t));
705
706 if (os::Aix::on_pase()) {
707
708 Unimplemented();
709 return false;
710
711 } else {
712
713 // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
714 // See:
715 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
716 // ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
717 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
718 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
719
720 perfstat_memory_total_t psmt;
721 memset (&psmt, '\0', sizeof(psmt));
722 const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
723 if (rc == -1) {
724 fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
725 assert(0, "perfstat_memory_total() failed");
726 return false;
727 }
728
729 assert(rc == 1, "perfstat_memory_total() - weird return code");
730
731 // excerpt from
732 // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
733 // ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
734 // The fields of perfstat_memory_total_t:
735 // u_longlong_t virt_total Total virtual memory (in 4 KB pages).
736 // u_longlong_t real_total Total real memory (in 4 KB pages).
737 // u_longlong_t real_free Free real memory (in 4 KB pages).
738 // u_longlong_t pgsp_total Total paging space (in 4 KB pages).
739 // u_longlong_t pgsp_free Free paging space (in 4 KB pages).
740
741 pmi->virt_total = psmt.virt_total * 4096;
742 pmi->real_total = psmt.real_total * 4096;
743 pmi->real_free = psmt.real_free * 4096;
744 pmi->pgsp_total = psmt.pgsp_total * 4096;
745 pmi->pgsp_free = psmt.pgsp_free * 4096;
746
747 return true;
748
749 }
750 } // end os::Aix::get_meminfo
751
752 // Retrieve global cpu information.
753 // Returns false if something went wrong;
754 // the content of pci is undefined in this case.
755 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
756 assert(pci, "get_cpuinfo: invalid parameter");
757 memset(pci, 0, sizeof(cpuinfo_t));
758
759 perfstat_cpu_total_t psct;
760 memset (&psct, '\0', sizeof(psct));
761
762 if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
763 fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
764 assert(0, "perfstat_cpu_total() failed");
765 return false;
766 }
767
768 // global cpu information
769 strcpy (pci->description, psct.description);
770 pci->processorHZ = psct.processorHZ;
771 pci->ncpus = psct.ncpus;
772 os::Aix::_logical_cpus = psct.ncpus;
773 for (int i = 0; i < 3; i++) {
774 pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
775 }
776
777 // get the processor version from _system_configuration
778 switch (_system_configuration.version) {
779 case PV_7:
780 strcpy(pci->version, "Power PC 7");
781 break;
782 case PV_6_1:
783 strcpy(pci->version, "Power PC 6 DD1.x");
784 break;
785 case PV_6:
786 strcpy(pci->version, "Power PC 6");
787 break;
788 case PV_5:
789 strcpy(pci->version, "Power PC 5");
790 break;
791 case PV_5_2:
792 strcpy(pci->version, "Power PC 5_2");
793 break;
794 case PV_5_3:
795 strcpy(pci->version, "Power PC 5_3");
796 break;
797 case PV_5_Compat:
798 strcpy(pci->version, "PV_5_Compat");
799 break;
800 case PV_6_Compat:
801 strcpy(pci->version, "PV_6_Compat");
802 break;
803 case PV_7_Compat:
804 strcpy(pci->version, "PV_7_Compat");
805 break;
806 default:
807 strcpy(pci->version, "unknown");
808 }
809
810 return true;
811
812 } //end os::Aix::get_cpuinfo
813
814 //////////////////////////////////////////////////////////////////////////////
815 // detecting pthread library
816
817 void os::Aix::libpthread_init() {
818 return;
819 }
820
821 //////////////////////////////////////////////////////////////////////////////
822 // create new thread
823
824 // Thread start routine for all newly created threads
825 static void *java_start(Thread *thread) {
826
827 // find out my own stack dimensions
828 {
829 // actually, this should do exactly the same as thread->record_stack_base_and_size...
830 address base = 0;
831 size_t size = 0;
832 query_stack_dimensions(&base, &size);
833 thread->set_stack_base(base);
834 thread->set_stack_size(size);
835 }
836
837 // Do some sanity checks.
838 CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
839
840 // Try to randomize the cache line index of hot stack frames.
841 // This helps when threads of the same stack traces evict each other's
842 // cache lines. The threads can be either from the same JVM instance, or
843 // from different JVM instances. The benefit is especially true for
844 // processors with hyperthreading technology.
845
846 static int counter = 0;
847 int pid = os::current_process_id();
848 alloca(((pid ^ counter++) & 7) * 128);
849
850 ThreadLocalStorage::set_thread(thread);
851
852 OSThread* osthread = thread->osthread();
853
854 // thread_id is kernel thread id (similar to Solaris LWP id)
855 osthread->set_thread_id(os::Aix::gettid());
856
857 // initialize signal mask for this thread
858 os::Aix::hotspot_sigmask(thread);
859
860 // initialize floating point control register
861 os::Aix::init_thread_fpu_state();
862
863 assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
864
865 // call one more level start routine
866 thread->run();
867
868 return 0;
869 }
870
871 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
872
873 // We want the whole function to be synchronized.
874 ThreadCritical cs;
875
876 assert(thread->osthread() == NULL, "caller responsible");
877
878 // Allocate the OSThread object
879 OSThread* osthread = new OSThread(NULL, NULL);
880 if (osthread == NULL) {
881 return false;
882 }
883
884 // set the correct thread state
885 osthread->set_thread_type(thr_type);
886
887 // Initial state is ALLOCATED but not INITIALIZED
888 osthread->set_state(ALLOCATED);
889
890 thread->set_osthread(osthread);
891
892 // init thread attributes
893 pthread_attr_t attr;
894 pthread_attr_init(&attr);
895 guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
896
897 // Make sure we run in 1:1 kernel-user-thread mode.
898 if (os::Aix::on_aix()) {
899 guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
900 guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
901 } // end: aix
902
903 // Start in suspended state, and in os::thread_start, wake the thread up.
904 guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
905
906 // calculate stack size if it's not specified by caller
907 if (os::Aix::supports_variable_stack_size()) {
908 if (stack_size == 0) {
909 stack_size = os::Aix::default_stack_size(thr_type);
910
911 switch (thr_type) {
912 case os::java_thread:
913 // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
914 assert(JavaThread::stack_size_at_create() > 0, "this should be set");
915 stack_size = JavaThread::stack_size_at_create();
916 break;
917 case os::compiler_thread:
918 if (CompilerThreadStackSize > 0) {
919 stack_size = (size_t)(CompilerThreadStackSize * K);
920 break;
921 } // else fall through:
922 // use VMThreadStackSize if CompilerThreadStackSize is not defined
923 case os::vm_thread:
924 case os::pgc_thread:
925 case os::cgc_thread:
926 case os::watcher_thread:
927 if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
928 break;
929 }
930 }
931
932 stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
933 pthread_attr_setstacksize(&attr, stack_size);
934 } //else let thread_create() pick the default value (96 K on AIX)
935
936 pthread_t tid;
937 int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
938
939 pthread_attr_destroy(&attr);
940
941 if (ret != 0) {
942 if (PrintMiscellaneous && (Verbose || WizardMode)) {
943 perror("pthread_create()");
944 }
945 // Need to clean up stuff we've allocated so far
946 thread->set_osthread(NULL);
947 delete osthread;
948 return false;
949 }
950
951 // Store pthread info into the OSThread
952 osthread->set_pthread_id(tid);
953
954 return true;
955 }
956
957 /////////////////////////////////////////////////////////////////////////////
958 // attach existing thread
959
960 // bootstrap the main thread
961 bool os::create_main_thread(JavaThread* thread) {
962 assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
963 return create_attached_thread(thread);
964 }
965
966 bool os::create_attached_thread(JavaThread* thread) {
967 #ifdef ASSERT
968 thread->verify_not_published();
969 #endif
970
971 // Allocate the OSThread object
972 OSThread* osthread = new OSThread(NULL, NULL);
973
974 if (osthread == NULL) {
975 return false;
976 }
977
978 // Store pthread info into the OSThread
979 osthread->set_thread_id(os::Aix::gettid());
980 osthread->set_pthread_id(::pthread_self());
981
982 // initialize floating point control register
983 os::Aix::init_thread_fpu_state();
984
985 // some sanity checks
986 CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
987
988 // Initial thread state is RUNNABLE
989 osthread->set_state(RUNNABLE);
990
991 thread->set_osthread(osthread);
992
993 if (UseNUMA) {
994 int lgrp_id = os::numa_get_group_id();
995 if (lgrp_id != -1) {
996 thread->set_lgrp_id(lgrp_id);
997 }
998 }
999
1000 // initialize signal mask for this thread
1001 // and save the caller's signal mask
1002 os::Aix::hotspot_sigmask(thread);
1003
1004 return true;
1005 }
1006
1007 void os::pd_start_thread(Thread* thread) {
1008 int status = pthread_continue_np(thread->osthread()->pthread_id());
1009 assert(status == 0, "thr_continue failed");
1010 }
1011
1012 // Free OS resources related to the OSThread
1013 void os::free_thread(OSThread* osthread) {
1014 assert(osthread != NULL, "osthread not set");
1015
1016 if (Thread::current()->osthread() == osthread) {
1017 // Restore caller's signal mask
1018 sigset_t sigmask = osthread->caller_sigmask();
1019 pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
1020 }
1021
1022 delete osthread;
1023 }
1024
1025 //////////////////////////////////////////////////////////////////////////////
1026 // thread local storage
1027
1028 int os::allocate_thread_local_storage() {
1029 pthread_key_t key;
1030 int rslt = pthread_key_create(&key, NULL);
1031 assert(rslt == 0, "cannot allocate thread local storage");
1032 return (int)key;
1033 }
1034
1035 // Note: This is currently not used by VM, as we don't destroy TLS key
1036 // on VM exit.
1037 void os::free_thread_local_storage(int index) {
1038 int rslt = pthread_key_delete((pthread_key_t)index);
1039 assert(rslt == 0, "invalid index");
1040 }
1041
1042 void os::thread_local_storage_at_put(int index, void* value) {
1043 int rslt = pthread_setspecific((pthread_key_t)index, value);
1044 assert(rslt == 0, "pthread_setspecific failed");
1045 }
1046
1047 extern "C" Thread* get_thread() {
1048 return ThreadLocalStorage::thread();
1049 }
1050
1051 ////////////////////////////////////////////////////////////////////////////////
1052 // time support
1053
1054 // Time since start-up in seconds to a fine granularity.
1055 // Used by VMSelfDestructTimer and the MemProfiler.
1056 double os::elapsedTime() {
1057 return (double)(os::elapsed_counter()) * 0.000001;
1058 }
1059
1060 jlong os::elapsed_counter() {
1061 timeval time;
1062 int status = gettimeofday(&time, NULL);
1063 return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
1064 }
1065
1066 jlong os::elapsed_frequency() {
1067 return (1000 * 1000);
1068 }
1069
1070 // For now, we say that linux does not support vtime. I have no idea
1071 // whether it can actually be made to (DLD, 9/13/05).
1072
1073 bool os::supports_vtime() { return false; }
1074 bool os::enable_vtime() { return false; }
1075 bool os::vtime_enabled() { return false; }
1076 double os::elapsedVTime() {
1077 // better than nothing, but not much
1078 return elapsedTime();
1079 }
1080
1081 jlong os::javaTimeMillis() {
1082 timeval time;
1083 int status = gettimeofday(&time, NULL);
1084 assert(status != -1, "aix error at gettimeofday()");
1085 return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
1086 }
1087
1088 // We need to manually declare mread_real_time,
1089 // because IBM didn't provide a prototype in time.h.
1090 // (they probably only ever tested in C, not C++)
1091 extern "C"
1092 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
1093
1094 jlong os::javaTimeNanos() {
1095 if (os::Aix::on_pase()) {
1096 Unimplemented();
1097 return 0;
1098 }
1099 else {
1100 // On AIX use the precision of processors real time clock
1101 // or time base registers.
1102 timebasestruct_t time;
1103 int rc;
1104
1105 // If the CPU has a time register, it will be used and
1106 // we have to convert to real time first. After convertion we have following data:
1107 // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
1108 // time.tb_low [nanoseconds after the last full second above]
1109 // We better use mread_real_time here instead of read_real_time
1110 // to ensure that we will get a monotonic increasing time.
1111 if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
1112 rc = time_base_to_time(&time, TIMEBASE_SZ);
1113 assert(rc != -1, "aix error at time_base_to_time()");
1114 }
1115 return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
1116 }
1117 }
1118
1119 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
1120 {
1121 // gettimeofday - based on time in seconds since the Epoch thus does not wrap
1122 info_ptr->max_value = ALL_64_BITS;
1123
1124 // gettimeofday is a real time clock so it skips
1125 info_ptr->may_skip_backward = true;
1126 info_ptr->may_skip_forward = true;
1127 }
1128
1129 info_ptr->kind = JVMTI_TIMER_ELAPSED; // elapsed not CPU time
1130 }
1131
1132 // Return the real, user, and system times in seconds from an
1133 // arbitrary fixed point in the past.
1134 bool os::getTimesSecs(double* process_real_time,
1135 double* process_user_time,
1136 double* process_system_time) {
1137 struct tms ticks;
1138 clock_t real_ticks = times(&ticks);
1139
1140 if (real_ticks == (clock_t) (-1)) {
1141 return false;
1142 } else {
1143 double ticks_per_second = (double) clock_tics_per_sec;
1144 *process_user_time = ((double) ticks.tms_utime) / ticks_per_second;
1145 *process_system_time = ((double) ticks.tms_stime) / ticks_per_second;
1146 *process_real_time = ((double) real_ticks) / ticks_per_second;
1147
1148 return true;
1149 }
1150 }
1151
1152
1153 char * os::local_time_string(char *buf, size_t buflen) {
1154 struct tm t;
1155 time_t long_time;
1156 time(&long_time);
1157 localtime_r(&long_time, &t);
1158 jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
1159 t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
1160 t.tm_hour, t.tm_min, t.tm_sec);
1161 return buf;
1162 }
1163
1164 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
1165 return localtime_r(clock, res);
1166 }
1167
1168 ////////////////////////////////////////////////////////////////////////////////
1169 // runtime exit support
1170
1171 // Note: os::shutdown() might be called very early during initialization, or
1172 // called from signal handler. Before adding something to os::shutdown(), make
1173 // sure it is async-safe and can handle partially initialized VM.
1174 void os::shutdown() {
1175
1176 // allow PerfMemory to attempt cleanup of any persistent resources
1177 perfMemory_exit();
1178
1179 // needs to remove object in file system
1180 AttachListener::abort();
1181
1182 // flush buffered output, finish log files
1183 ostream_abort();
1184
1185 // Check for abort hook
1186 abort_hook_t abort_hook = Arguments::abort_hook();
1187 if (abort_hook != NULL) {
1188 abort_hook();
1189 }
1190
1191 }
1192
1193 // Note: os::abort() might be called very early during initialization, or
1194 // called from signal handler. Before adding something to os::abort(), make
1195 // sure it is async-safe and can handle partially initialized VM.
1196 void os::abort(bool dump_core) {
1197 os::shutdown();
1198 if (dump_core) {
1199 #ifndef PRODUCT
1200 fdStream out(defaultStream::output_fd());
1201 out.print_raw("Current thread is ");
1202 char buf[16];
1203 jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
1204 out.print_raw_cr(buf);
1205 out.print_raw_cr("Dumping core ...");
1206 #endif
1207 ::abort(); // dump core
1208 }
1209
1210 ::exit(1);
1211 }
1212
1213 // Die immediately, no exit hook, no abort hook, no cleanup.
1214 void os::die() {
1215 ::abort();
1216 }
1217
1218 // This method is a copy of JDK's sysGetLastErrorString
1219 // from src/solaris/hpi/src/system_md.c
1220
1221 size_t os::lasterror(char *buf, size_t len) {
1222
1223 if (errno == 0) return 0;
1224
1225 const char *s = ::strerror(errno);
1226 size_t n = ::strlen(s);
1227 if (n >= len) {
1228 n = len - 1;
1229 }
1230 ::strncpy(buf, s, n);
1231 buf[n] = '\0';
1232 return n;
1233 }
1234
1235 intx os::current_thread_id() { return (intx)pthread_self(); }
1236 int os::current_process_id() {
1237
1238 // This implementation returns a unique pid, the pid of the
1239 // launcher thread that starts the vm 'process'.
1240
1241 // Under POSIX, getpid() returns the same pid as the
1242 // launcher thread rather than a unique pid per thread.
1243 // Use gettid() if you want the old pre NPTL behaviour.
1244
1245 // if you are looking for the result of a call to getpid() that
1246 // returns a unique pid for the calling thread, then look at the
1247 // OSThread::thread_id() method in osThread_linux.hpp file
1248
1249 return (int)(_initial_pid ? _initial_pid : getpid());
1250 }
1251
1252 // DLL functions
1253
1254 const char* os::dll_file_extension() { return ".so"; }
1255
1256 // This must be hard coded because it's the system's temporary
1257 // directory not the java application's temp directory, ala java.io.tmpdir.
1258 const char* os::get_temp_directory() { return "/tmp"; }
1259
1260 static bool file_exists(const char* filename) {
1261 struct stat statbuf;
1262 if (filename == NULL || strlen(filename) == 0) {
1263 return false;
1264 }
1265 return os::stat(filename, &statbuf) == 0;
1266 }
1267
1268 bool os::dll_build_name(char* buffer, size_t buflen,
1269 const char* pname, const char* fname) {
1270 bool retval = false;
1271 // Copied from libhpi
1272 const size_t pnamelen = pname ? strlen(pname) : 0;
1273
1274 // Return error on buffer overflow.
1275 if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
1276 *buffer = '\0';
1277 return retval;
1278 }
1279
1280 if (pnamelen == 0) {
1281 snprintf(buffer, buflen, "lib%s.so", fname);
1282 retval = true;
1283 } else if (strchr(pname, *os::path_separator()) != NULL) {
1284 int n;
1285 char** pelements = split_path(pname, &n);
1286 for (int i = 0; i < n; i++) {
1287 // Really shouldn't be NULL, but check can't hurt
1288 if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
1289 continue; // skip the empty path values
1290 }
1291 snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
1292 if (file_exists(buffer)) {
1293 retval = true;
1294 break;
1295 }
1296 }
1297 // release the storage
1298 for (int i = 0; i < n; i++) {
1299 if (pelements[i] != NULL) {
1300 FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
1301 }
1302 }
1303 if (pelements != NULL) {
1304 FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
1305 }
1306 } else {
1307 snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
1308 retval = true;
1309 }
1310 return retval;
1311 }
1312
1313 // Check if addr is inside libjvm.so.
1314 bool os::address_is_in_vm(address addr) {
1315
1316 // Input could be a real pc or a function pointer literal. The latter
1317 // would be a function descriptor residing in the data segment of a module.
1318
1319 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
1320 if (lib) {
1321 if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1322 return true;
1323 } else {
1324 return false;
1325 }
1326 } else {
1327 lib = LoadedLibraries::find_for_data_address(addr);
1328 if (lib) {
1329 if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
1330 return true;
1331 } else {
1332 return false;
1333 }
1334 } else {
1335 return false;
1336 }
1337 }
1338 }
1339
1340 // Resolve an AIX function descriptor literal to a code pointer.
1341 // If the input is a valid code pointer to a text segment of a loaded module,
1342 // it is returned unchanged.
1343 // If the input is a valid AIX function descriptor, it is resolved to the
1344 // code entry point.
1345 // If the input is neither a valid function descriptor nor a valid code pointer,
1346 // NULL is returned.
1347 static address resolve_function_descriptor_to_code_pointer(address p) {
1348
1349 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
1350 if (lib) {
1351 // its a real code pointer
1352 return p;
1353 } else {
1354 lib = LoadedLibraries::find_for_data_address(p);
1355 if (lib) {
1356 // pointer to data segment, potential function descriptor
1357 address code_entry = (address)(((FunctionDescriptor*)p)->entry());
1358 if (LoadedLibraries::find_for_text_address(code_entry)) {
1359 // Its a function descriptor
1360 return code_entry;
1361 }
1362 }
1363 }
1364 return NULL;
1365 }
1366
1367 bool os::dll_address_to_function_name(address addr, char *buf,
1368 int buflen, int *offset) {
1369 if (offset) {
1370 *offset = -1;
1371 }
1372 if (buf) {
1373 buf[0] = '\0';
1374 }
1375
1376 // Resolve function ptr literals first.
1377 addr = resolve_function_descriptor_to_code_pointer(addr);
1378 if (!addr) {
1379 return false;
1380 }
1381
1382 // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
1383 return Decoder::decode(addr, buf, buflen, offset);
1384 }
1385
1386 static int getModuleName(codeptr_t pc, // [in] program counter
1387 char* p_name, size_t namelen, // [out] optional: function name
1388 char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
1389 ) {
1390
1391 // initialize output parameters
1392 if (p_name && namelen > 0) {
1393 *p_name = '\0';
1394 }
1395 if (p_errmsg && errmsglen > 0) {
1396 *p_errmsg = '\0';
1397 }
1398
1399 const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
1400 if (lib) {
1401 if (p_name && namelen > 0) {
1402 sprintf(p_name, "%.*s", namelen, lib->get_shortname());
1403 }
1404 return 0;
1405 }
1406
1407 if (Verbose) {
1408 fprintf(stderr, "pc outside any module");
1409 }
1410
1411 return -1;
1412
1413 }
1414
1415 bool os::dll_address_to_library_name(address addr, char* buf,
1416 int buflen, int* offset) {
1417 if (offset) {
1418 *offset = -1;
1419 }
1420 if (buf) {
1421 buf[0] = '\0';
1422 }
1423
1424 // Resolve function ptr literals first.
1425 addr = resolve_function_descriptor_to_code_pointer(addr);
1426 if (!addr) {
1427 return false;
1428 }
1429
1430 if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
1431 return true;
1432 }
1433 return false;
1434 }
1435
1436 // Loads .dll/.so and in case of error it checks if .dll/.so was built
1437 // for the same architecture as Hotspot is running on
1438 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
1439
1440 if (ebuf && ebuflen > 0) {
1441 ebuf[0] = '\0';
1442 ebuf[ebuflen - 1] = '\0';
1443 }
1444
1445 if (!filename || strlen(filename) == 0) {
1446 ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
1447 return NULL;
1448 }
1449
1450 // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
1451 void * result= ::dlopen(filename, RTLD_LAZY);
1452 if (result != NULL) {
1453 // Reload dll cache. Don't do this in signal handling.
1454 LoadedLibraries::reload();
1455 return result;
1456 } else {
1457 // error analysis when dlopen fails
1458 const char* const error_report = ::dlerror();
1459 if (error_report && ebuf && ebuflen > 0) {
1460 snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
1461 filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
1462 }
1463 }
1464 return NULL;
1465 }
1466
1467 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
1468 // chances are you might want to run the generated bits against glibc-2.0
1469 // libdl.so, so always use locking for any version of glibc.
1470 void* os::dll_lookup(void* handle, const char* name) {
1471 pthread_mutex_lock(&dl_mutex);
1472 void* res = dlsym(handle, name);
1473 pthread_mutex_unlock(&dl_mutex);
1474 return res;
1475 }
1476
1477 void* os::get_default_process_handle() {
1478 return (void*)::dlopen(NULL, RTLD_LAZY);
1479 }
1480
1481 void os::print_dll_info(outputStream *st) {
1482 st->print_cr("Dynamic libraries:");
1483 LoadedLibraries::print(st);
1484 }
1485
1486 void os::print_os_info(outputStream* st) {
1487 st->print("OS:");
1488
1489 st->print("uname:");
1490 struct utsname name;
1491 uname(&name);
1492 st->print(name.sysname); st->print(" ");
1493 st->print(name.nodename); st->print(" ");
1494 st->print(name.release); st->print(" ");
1495 st->print(name.version); st->print(" ");
1496 st->print(name.machine);
1497 st->cr();
1498
1499 // rlimit
1500 st->print("rlimit:");
1501 struct rlimit rlim;
1502
1503 st->print(" STACK ");
1504 getrlimit(RLIMIT_STACK, &rlim);
1505 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1506 else st->print("%uk", rlim.rlim_cur >> 10);
1507
1508 st->print(", CORE ");
1509 getrlimit(RLIMIT_CORE, &rlim);
1510 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1511 else st->print("%uk", rlim.rlim_cur >> 10);
1512
1513 st->print(", NPROC ");
1514 st->print("%d", sysconf(_SC_CHILD_MAX));
1515
1516 st->print(", NOFILE ");
1517 getrlimit(RLIMIT_NOFILE, &rlim);
1518 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1519 else st->print("%d", rlim.rlim_cur);
1520
1521 st->print(", AS ");
1522 getrlimit(RLIMIT_AS, &rlim);
1523 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1524 else st->print("%uk", rlim.rlim_cur >> 10);
1525
1526 // Print limits on DATA, because it limits the C-heap.
1527 st->print(", DATA ");
1528 getrlimit(RLIMIT_DATA, &rlim);
1529 if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
1530 else st->print("%uk", rlim.rlim_cur >> 10);
1531 st->cr();
1532
1533 // load average
1534 st->print("load average:");
1535 double loadavg[3] = {-1.L, -1.L, -1.L};
1536 os::loadavg(loadavg, 3);
1537 st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
1538 st->cr();
1539 }
1540
1541 void os::print_memory_info(outputStream* st) {
1542
1543 st->print_cr("Memory:");
1544
1545 st->print_cr(" default page size: %s", describe_pagesize(os::vm_page_size()));
1546 st->print_cr(" default stack page size: %s", describe_pagesize(os::vm_page_size()));
1547 st->print_cr(" default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
1548 st->print_cr(" can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
1549 st->print_cr(" can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
1550 if (g_multipage_error != 0) {
1551 st->print_cr(" multipage error: %d", g_multipage_error);
1552 }
1553
1554 // print out LDR_CNTRL because it affects the default page sizes
1555 const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
1556 st->print_cr(" LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
1557
1558 const char* const extshm = ::getenv("EXTSHM");
1559 st->print_cr(" EXTSHM=%s.", extshm ? extshm : "<unset>");
1560
1561 // Call os::Aix::get_meminfo() to retrieve memory statistics.
1562 os::Aix::meminfo_t mi;
1563 if (os::Aix::get_meminfo(&mi)) {
1564 char buffer[256];
1565 if (os::Aix::on_aix()) {
1566 jio_snprintf(buffer, sizeof(buffer),
1567 " physical total : %llu\n"
1568 " physical free : %llu\n"
1569 " swap total : %llu\n"
1570 " swap free : %llu\n",
1571 mi.real_total,
1572 mi.real_free,
1573 mi.pgsp_total,
1574 mi.pgsp_free);
1575 } else {
1576 Unimplemented();
1577 }
1578 st->print_raw(buffer);
1579 } else {
1580 st->print_cr(" (no more information available)");
1581 }
1582 }
1583
1584 void os::pd_print_cpu_info(outputStream* st) {
1585 // cpu
1586 st->print("CPU:");
1587 st->print("total %d", os::processor_count());
1588 // It's not safe to query number of active processors after crash
1589 // st->print("(active %d)", os::active_processor_count());
1590 st->print(" %s", VM_Version::cpu_features());
1591 st->cr();
1592 }
1593
1594 void os::print_siginfo(outputStream* st, void* siginfo) {
1595 // Use common posix version.
1596 os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
1597 st->cr();
1598 }
1599
1600
1601 static void print_signal_handler(outputStream* st, int sig,
1602 char* buf, size_t buflen);
1603
1604 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
1605 st->print_cr("Signal Handlers:");
1606 print_signal_handler(st, SIGSEGV, buf, buflen);
1607 print_signal_handler(st, SIGBUS , buf, buflen);
1608 print_signal_handler(st, SIGFPE , buf, buflen);
1609 print_signal_handler(st, SIGPIPE, buf, buflen);
1610 print_signal_handler(st, SIGXFSZ, buf, buflen);
1611 print_signal_handler(st, SIGILL , buf, buflen);
1612 print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
1613 print_signal_handler(st, SR_signum, buf, buflen);
1614 print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
1615 print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
1616 print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
1617 print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
1618 print_signal_handler(st, SIGTRAP, buf, buflen);
1619 print_signal_handler(st, SIGDANGER, buf, buflen);
1620 }
1621
1622 static char saved_jvm_path[MAXPATHLEN] = {0};
1623
1624 // Find the full path to the current module, libjvm.so or libjvm_g.so
1625 void os::jvm_path(char *buf, jint buflen) {
1626 // Error checking.
1627 if (buflen < MAXPATHLEN) {
1628 assert(false, "must use a large-enough buffer");
1629 buf[0] = '\0';
1630 return;
1631 }
1632 // Lazy resolve the path to current module.
1633 if (saved_jvm_path[0] != 0) {
1634 strcpy(buf, saved_jvm_path);
1635 return;
1636 }
1637
1638 Dl_info dlinfo;
1639 int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
1640 assert(ret != 0, "cannot locate libjvm");
1641 char* rp = realpath((char *)dlinfo.dli_fname, buf);
1642 assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
1643
1644 strcpy(saved_jvm_path, buf);
1645 }
1646
1647 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
1648 // no prefix required, not even "_"
1649 }
1650
1651 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
1652 // no suffix required
1653 }
1654
1655 ////////////////////////////////////////////////////////////////////////////////
1656 // sun.misc.Signal support
1657
1658 static volatile jint sigint_count = 0;
1659
1660 static void
1661 UserHandler(int sig, void *siginfo, void *context) {
1662 // 4511530 - sem_post is serialized and handled by the manager thread. When
1663 // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
1664 // don't want to flood the manager thread with sem_post requests.
1665 if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
1666 return;
1667
1668 // Ctrl-C is pressed during error reporting, likely because the error
1669 // handler fails to abort. Let VM die immediately.
1670 if (sig == SIGINT && is_error_reported()) {
1671 os::die();
1672 }
1673
1674 os::signal_notify(sig);
1675 }
1676
1677 void* os::user_handler() {
1678 return CAST_FROM_FN_PTR(void*, UserHandler);
1679 }
1680
1681 extern "C" {
1682 typedef void (*sa_handler_t)(int);
1683 typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
1684 }
1685
1686 void* os::signal(int signal_number, void* handler) {
1687 struct sigaction sigAct, oldSigAct;
1688
1689 sigfillset(&(sigAct.sa_mask));
1690
1691 // Do not block out synchronous signals in the signal handler.
1692 // Blocking synchronous signals only makes sense if you can really
1693 // be sure that those signals won't happen during signal handling,
1694 // when the blocking applies. Normal signal handlers are lean and
1695 // do not cause signals. But our signal handlers tend to be "risky"
1696 // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
1697 // On AIX, PASE there was a case where a SIGSEGV happened, followed
1698 // by a SIGILL, which was blocked due to the signal mask. The process
1699 // just hung forever. Better to crash from a secondary signal than to hang.
1700 sigdelset(&(sigAct.sa_mask), SIGSEGV);
1701 sigdelset(&(sigAct.sa_mask), SIGBUS);
1702 sigdelset(&(sigAct.sa_mask), SIGILL);
1703 sigdelset(&(sigAct.sa_mask), SIGFPE);
1704 sigdelset(&(sigAct.sa_mask), SIGTRAP);
1705
1706 sigAct.sa_flags = SA_RESTART|SA_SIGINFO;
1707
1708 sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
1709
1710 if (sigaction(signal_number, &sigAct, &oldSigAct)) {
1711 // -1 means registration failed
1712 return (void *)-1;
1713 }
1714
1715 return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
1716 }
1717
1718 void os::signal_raise(int signal_number) {
1719 ::raise(signal_number);
1720 }
1721
1722 //
1723 // The following code is moved from os.cpp for making this
1724 // code platform specific, which it is by its very nature.
1725 //
1726
1727 // Will be modified when max signal is changed to be dynamic
1728 int os::sigexitnum_pd() {
1729 return NSIG;
1730 }
1731
1732 // a counter for each possible signal value
1733 static volatile jint pending_signals[NSIG+1] = { 0 };
1734
1735 // Linux(POSIX) specific hand shaking semaphore.
1736 static sem_t sig_sem;
1737
1738 void os::signal_init_pd() {
1739 // Initialize signal structures
1740 ::memset((void*)pending_signals, 0, sizeof(pending_signals));
1741
1742 // Initialize signal semaphore
1743 int rc = ::sem_init(&sig_sem, 0, 0);
1744 guarantee(rc != -1, "sem_init failed");
1745 }
1746
1747 void os::signal_notify(int sig) {
1748 Atomic::inc(&pending_signals[sig]);
1749 ::sem_post(&sig_sem);
1750 }
1751
1752 static int check_pending_signals(bool wait) {
1753 Atomic::store(0, &sigint_count);
1754 for (;;) {
1755 for (int i = 0; i < NSIG + 1; i++) {
1756 jint n = pending_signals[i];
1757 if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
1758 return i;
1759 }
1760 }
1761 if (!wait) {
1762 return -1;
1763 }
1764 JavaThread *thread = JavaThread::current();
1765 ThreadBlockInVM tbivm(thread);
1766
1767 bool threadIsSuspended;
1768 do {
1769 thread->set_suspend_equivalent();
1770 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
1771
1772 ::sem_wait(&sig_sem);
1773
1774 // were we externally suspended while we were waiting?
1775 threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
1776 if (threadIsSuspended) {
1777 //
1778 // The semaphore has been incremented, but while we were waiting
1779 // another thread suspended us. We don't want to continue running
1780 // while suspended because that would surprise the thread that
1781 // suspended us.
1782 //
1783 ::sem_post(&sig_sem);
1784
1785 thread->java_suspend_self();
1786 }
1787 } while (threadIsSuspended);
1788 }
1789 }
1790
1791 int os::signal_lookup() {
1792 return check_pending_signals(false);
1793 }
1794
1795 int os::signal_wait() {
1796 return check_pending_signals(true);
1797 }
1798
1799 ////////////////////////////////////////////////////////////////////////////////
1800 // Virtual Memory
1801
1802 // AddrRange describes an immutable address range
1803 //
1804 // This is a helper class for the 'shared memory bookkeeping' below.
1805 class AddrRange {
1806 friend class ShmBkBlock;
1807
1808 char* _start;
1809 size_t _size;
1810
1811 public:
1812
1813 AddrRange(char* start, size_t size)
1814 : _start(start), _size(size)
1815 {}
1816
1817 AddrRange(const AddrRange& r)
1818 : _start(r.start()), _size(r.size())
1819 {}
1820
1821 char* start() const { return _start; }
1822 size_t size() const { return _size; }
1823 char* end() const { return _start + _size; }
1824 bool is_empty() const { return _size == 0 ? true : false; }
1825
1826 static AddrRange empty_range() { return AddrRange(NULL, 0); }
1827
1828 bool contains(const char* p) const {
1829 return start() <= p && end() > p;
1830 }
1831
1832 bool contains(const AddrRange& range) const {
1833 return start() <= range.start() && end() >= range.end();
1834 }
1835
1836 bool intersects(const AddrRange& range) const {
1837 return (range.start() <= start() && range.end() > start()) ||
1838 (range.start() < end() && range.end() >= end()) ||
1839 contains(range);
1840 }
1841
1842 bool is_same_range(const AddrRange& range) const {
1843 return start() == range.start() && size() == range.size();
1844 }
1845
1846 // return the closest inside range consisting of whole pages
1847 AddrRange find_closest_aligned_range(size_t pagesize) const {
1848 if (pagesize == 0 || is_empty()) {
1849 return empty_range();
1850 }
1851 char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
1852 char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
1853 if (from > to) {
1854 return empty_range();
1855 }
1856 return AddrRange(from, to - from);
1857 }
1858 };
1859
1860 ////////////////////////////////////////////////////////////////////////////
1861 // shared memory bookkeeping
1862 //
1863 // the os::reserve_memory() API and friends hand out different kind of memory, depending
1864 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
1865 //
1866 // But these memory types have to be treated differently. For example, to uncommit
1867 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
1868 // disclaim64() is needed.
1869 //
1870 // Therefore we need to keep track of the allocated memory segments and their
1871 // properties.
1872
1873 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping
1874 class ShmBkBlock {
1875
1876 ShmBkBlock* _next;
1877
1878 protected:
1879
1880 AddrRange _range;
1881 const size_t _pagesize;
1882 const bool _pinned;
1883
1884 public:
1885
1886 ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
1887 : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
1888
1889 assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
1890 assert(!_range.is_empty(), "invalid range");
1891 }
1892
1893 virtual void print(outputStream* st) const {
1894 st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
1895 _range.start(), _range.end(), _range.size(),
1896 _range.size() / _pagesize, describe_pagesize(_pagesize),
1897 _pinned ? "pinned" : "");
1898 }
1899
1900 enum Type { MMAP, SHMAT };
1901 virtual Type getType() = 0;
1902
1903 char* base() const { return _range.start(); }
1904 size_t size() const { return _range.size(); }
1905
1906 void setAddrRange(AddrRange range) {
1907 _range = range;
1908 }
1909
1910 bool containsAddress(const char* p) const {
1911 return _range.contains(p);
1912 }
1913
1914 bool containsRange(const char* p, size_t size) const {
1915 return _range.contains(AddrRange((char*)p, size));
1916 }
1917
1918 bool isSameRange(const char* p, size_t size) const {
1919 return _range.is_same_range(AddrRange((char*)p, size));
1920 }
1921
1922 virtual bool disclaim(char* p, size_t size) = 0;
1923 virtual bool release() = 0;
1924
1925 // blocks live in a list.
1926 ShmBkBlock* next() const { return _next; }
1927 void set_next(ShmBkBlock* blk) { _next = blk; }
1928
1929 }; // end: ShmBkBlock
1930
1931
1932 // ShmBkMappedBlock: describes an block allocated with mmap()
1933 class ShmBkMappedBlock : public ShmBkBlock {
1934 public:
1935
1936 ShmBkMappedBlock(AddrRange range)
1937 : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
1938
1939 void print(outputStream* st) const {
1940 ShmBkBlock::print(st);
1941 st->print_cr(" - mmap'ed");
1942 }
1943
1944 Type getType() {
1945 return MMAP;
1946 }
1947
1948 bool disclaim(char* p, size_t size) {
1949
1950 AddrRange r(p, size);
1951
1952 guarantee(_range.contains(r), "invalid disclaim");
1953
1954 // only disclaim whole ranges.
1955 const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
1956 if (r2.is_empty()) {
1957 return true;
1958 }
1959
1960 const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
1961
1962 if (rc != 0) {
1963 warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
1964 }
1965
1966 return rc == 0 ? true : false;
1967 }
1968
1969 bool release() {
1970 // mmap'ed blocks are released using munmap
1971 if (::munmap(_range.start(), _range.size()) != 0) {
1972 warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
1973 return false;
1974 }
1975 return true;
1976 }
1977 }; // end: ShmBkMappedBlock
1978
1979 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
1980 class ShmBkShmatedBlock : public ShmBkBlock {
1981 public:
1982
1983 ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
1984 : ShmBkBlock(range, pagesize, pinned) {}
1985
1986 void print(outputStream* st) const {
1987 ShmBkBlock::print(st);
1988 st->print_cr(" - shmat'ed");
1989 }
1990
1991 Type getType() {
1992 return SHMAT;
1993 }
1994
1995 bool disclaim(char* p, size_t size) {
1996
1997 AddrRange r(p, size);
1998
1999 if (_pinned) {
2000 return true;
2001 }
2002
2003 // shmat'ed blocks are disclaimed using disclaim64
2004 guarantee(_range.contains(r), "invalid disclaim");
2005
2006 // only disclaim whole ranges.
2007 const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
2008 if (r2.is_empty()) {
2009 return true;
2010 }
2011
2012 const bool rc = my_disclaim64(r2.start(), r2.size());
2013
2014 if (Verbose && !rc) {
2015 warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
2016 }
2017
2018 return rc;
2019 }
2020
2021 bool release() {
2022 bool rc = false;
2023 if (::shmdt(_range.start()) != 0) {
2024 warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
2025 } else {
2026 rc = true;
2027 }
2028 return rc;
2029 }
2030
2031 }; // end: ShmBkShmatedBlock
2032
2033 static ShmBkBlock* g_shmbk_list = NULL;
2034 static volatile jint g_shmbk_table_lock = 0;
2035
2036 // keep some usage statistics
2037 static struct {
2038 int nodes; // number of nodes in list
2039 size_t bytes; // reserved - not committed - bytes.
2040 int reserves; // how often reserve was called
2041 int lookups; // how often a lookup was made
2042 } g_shmbk_stats = { 0, 0, 0, 0 };
2043
2044 // add information about a shared memory segment to the bookkeeping
2045 static void shmbk_register(ShmBkBlock* p_block) {
2046 guarantee(p_block, "logic error");
2047 p_block->set_next(g_shmbk_list);
2048 g_shmbk_list = p_block;
2049 g_shmbk_stats.reserves ++;
2050 g_shmbk_stats.bytes += p_block->size();
2051 g_shmbk_stats.nodes ++;
2052 }
2053
2054 // remove information about a shared memory segment by its starting address
2055 static void shmbk_unregister(ShmBkBlock* p_block) {
2056 ShmBkBlock* p = g_shmbk_list;
2057 ShmBkBlock* prev = NULL;
2058 while (p) {
2059 if (p == p_block) {
2060 if (prev) {
2061 prev->set_next(p->next());
2062 } else {
2063 g_shmbk_list = p->next();
2064 }
2065 g_shmbk_stats.nodes --;
2066 g_shmbk_stats.bytes -= p->size();
2067 return;
2068 }
2069 prev = p;
2070 p = p->next();
2071 }
2072 assert(false, "should not happen");
2073 }
2074
2075 // given a pointer, return shared memory bookkeeping record for the segment it points into
2076 // using the returned block info must happen under lock protection
2077 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
2078 g_shmbk_stats.lookups ++;
2079 ShmBkBlock* p = g_shmbk_list;
2080 while (p) {
2081 if (p->containsAddress(addr)) {
2082 return p;
2083 }
2084 p = p->next();
2085 }
2086 return NULL;
2087 }
2088
2089 // dump all information about all memory segments allocated with os::reserve_memory()
2090 void shmbk_dump_info() {
2091 tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
2092 "total reserves: %d total lookups: %d)",
2093 g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
2094 const ShmBkBlock* p = g_shmbk_list;
2095 int i = 0;
2096 while (p) {
2097 p->print(tty);
2098 p = p->next();
2099 i ++;
2100 }
2101 }
2102
2103 #define LOCK_SHMBK { ThreadCritical _LOCK_SHMBK;
2104 #define UNLOCK_SHMBK }
2105
2106 // End: shared memory bookkeeping
2107 ////////////////////////////////////////////////////////////////////////////////////////////////////
2108
2109 int os::vm_page_size() {
2110 // Seems redundant as all get out
2111 assert(os::Aix::page_size() != -1, "must call os::init");
2112 return os::Aix::page_size();
2113 }
2114
2115 // Aix allocates memory by pages.
2116 int os::vm_allocation_granularity() {
2117 assert(os::Aix::page_size() != -1, "must call os::init");
2118 return os::Aix::page_size();
2119 }
2120
2121 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
2122
2123 // Commit is a noop. There is no explicit commit
2124 // needed on AIX. Memory is committed when touched.
2125 //
2126 // Debug : check address range for validity
2127 #ifdef ASSERT
2128 LOCK_SHMBK
2129 ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2130 if (!block) {
2131 fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
2132 shmbk_dump_info();
2133 assert(false, "invalid pointer");
2134 return false;
2135 } else if (!block->containsRange(addr, size)) {
2136 fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
2137 shmbk_dump_info();
2138 assert(false, "invalid range");
2139 return false;
2140 }
2141 UNLOCK_SHMBK
2142 #endif // ASSERT
2143
2144 return 0;
2145 }
2146
2147 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
2148 return os::Aix::commit_memory_impl(addr, size, exec) == 0;
2149 }
2150
2151 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
2152 const char* mesg) {
2153 assert(mesg != NULL, "mesg must be specified");
2154 os::Aix::commit_memory_impl(addr, size, exec);
2155 }
2156
2157 int os::Aix::commit_memory_impl(char* addr, size_t size,
2158 size_t alignment_hint, bool exec) {
2159 return os::Aix::commit_memory_impl(addr, size, exec);
2160 }
2161
2162 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
2163 bool exec) {
2164 return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
2165 }
2166
2167 void os::pd_commit_memory_or_exit(char* addr, size_t size,
2168 size_t alignment_hint, bool exec,
2169 const char* mesg) {
2170 os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
2171 }
2172
2173 bool os::pd_uncommit_memory(char* addr, size_t size) {
2174
2175 // Delegate to ShmBkBlock class which knows how to uncommit its memory.
2176
2177 bool rc = false;
2178 LOCK_SHMBK
2179 ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2180 if (!block) {
2181 fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2182 shmbk_dump_info();
2183 assert(false, "invalid pointer");
2184 return false;
2185 } else if (!block->containsRange(addr, size)) {
2186 fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
2187 shmbk_dump_info();
2188 assert(false, "invalid range");
2189 return false;
2190 }
2191 rc = block->disclaim(addr, size);
2192 UNLOCK_SHMBK
2193
2194 if (Verbose && !rc) {
2195 warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
2196 }
2197 return rc;
2198 }
2199
2200 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
2201 return os::guard_memory(addr, size);
2202 }
2203
2204 bool os::remove_stack_guard_pages(char* addr, size_t size) {
2205 return os::unguard_memory(addr, size);
2206 }
2207
2208 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
2209 }
2210
2211 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
2212 }
2213
2214 void os::numa_make_global(char *addr, size_t bytes) {
2215 }
2216
2217 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
2218 }
2219
2220 bool os::numa_topology_changed() {
2221 return false;
2222 }
2223
2224 size_t os::numa_get_groups_num() {
2225 return 1;
2226 }
2227
2228 int os::numa_get_group_id() {
2229 return 0;
2230 }
2231
2232 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
2233 if (size > 0) {
2234 ids[0] = 0;
2235 return 1;
2236 }
2237 return 0;
2238 }
2239
2240 bool os::get_page_info(char *start, page_info* info) {
2241 return false;
2242 }
2243
2244 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
2245 return end;
2246 }
2247
2248 // Flags for reserve_shmatted_memory:
2249 #define RESSHM_WISHADDR_OR_FAIL 1
2250 #define RESSHM_TRY_16M_PAGES 2
2251 #define RESSHM_16M_PAGES_OR_FAIL 4
2252
2253 // Result of reserve_shmatted_memory:
2254 struct shmatted_memory_info_t {
2255 char* addr;
2256 size_t pagesize;
2257 bool pinned;
2258 };
2259
2260 // Reserve a section of shmatted memory.
2261 // params:
2262 // bytes [in]: size of memory, in bytes
2263 // requested_addr [in]: wish address.
2264 // NULL = no wish.
2265 // If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
2266 // be obtained, function will fail. Otherwise wish address is treated as hint and
2267 // another pointer is returned.
2268 // flags [in]: some flags. Valid flags are:
2269 // RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
2270 // RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
2271 // (requires UseLargePages and Use16MPages)
2272 // RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
2273 // Otherwise any other page size will do.
2274 // p_info [out] : holds information about the created shared memory segment.
2275 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
2276
2277 assert(p_info, "parameter error");
2278
2279 // init output struct.
2280 p_info->addr = NULL;
2281
2282 // neither should we be here for EXTSHM=ON.
2283 if (os::Aix::extshm()) {
2284 ShouldNotReachHere();
2285 }
2286
2287 // extract flags. sanity checks.
2288 const bool wishaddr_or_fail =
2289 flags & RESSHM_WISHADDR_OR_FAIL;
2290 const bool try_16M_pages =
2291 flags & RESSHM_TRY_16M_PAGES;
2292 const bool f16M_pages_or_fail =
2293 flags & RESSHM_16M_PAGES_OR_FAIL;
2294
2295 // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
2296 // shmat will fail anyway, so save some cycles by failing right away
2297 if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
2298 if (wishaddr_or_fail) {
2299 return false;
2300 } else {
2301 requested_addr = NULL;
2302 }
2303 }
2304
2305 char* addr = NULL;
2306
2307 // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
2308 // pagesize dynamically.
2309 const size_t size = align_size_up(bytes, SIZE_16M);
2310
2311 // reserve the shared segment
2312 int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
2313 if (shmid == -1) {
2314 warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
2315 return false;
2316 }
2317
2318 // Important note:
2319 // It is very important that we, upon leaving this function, do not leave a shm segment alive.
2320 // We must right after attaching it remove it from the system. System V shm segments are global and
2321 // survive the process.
2322 // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
2323
2324 // try forcing the page size
2325 size_t pagesize = -1; // unknown so far
2326
2327 if (UseLargePages) {
2328
2329 struct shmid_ds shmbuf;
2330 memset(&shmbuf, 0, sizeof(shmbuf));
2331
2332 // First, try to take from 16M page pool if...
2333 if (os::Aix::can_use_16M_pages() // we can ...
2334 && Use16MPages // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
2335 && try_16M_pages) { // caller wants us to.
2336 shmbuf.shm_pagesize = SIZE_16M;
2337 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2338 pagesize = SIZE_16M;
2339 } else {
2340 warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
2341 size / SIZE_16M, errno);
2342 if (f16M_pages_or_fail) {
2343 goto cleanup_shm;
2344 }
2345 }
2346 }
2347
2348 // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
2349 // because the 64K page pool may also be exhausted.
2350 if (pagesize == -1) {
2351 shmbuf.shm_pagesize = SIZE_64K;
2352 if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
2353 pagesize = SIZE_64K;
2354 } else {
2355 warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
2356 size / SIZE_64K, errno);
2357 // here I give up. leave page_size -1 - later, after attaching, we will query the
2358 // real page size of the attached memory. (in theory, it may be something different
2359 // from 4K if LDR_CNTRL SHM_PSIZE is set)
2360 }
2361 }
2362 }
2363
2364 // sanity point
2365 assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
2366
2367 // Now attach the shared segment.
2368 addr = (char*) shmat(shmid, requested_addr, 0);
2369 if (addr == (char*)-1) {
2370 // How to handle attach failure:
2371 // If it failed for a specific wish address, tolerate this: in that case, if wish address was
2372 // mandatory, fail, if not, retry anywhere.
2373 // If it failed for any other reason, treat that as fatal error.
2374 addr = NULL;
2375 if (requested_addr) {
2376 if (wishaddr_or_fail) {
2377 goto cleanup_shm;
2378 } else {
2379 addr = (char*) shmat(shmid, NULL, 0);
2380 if (addr == (char*)-1) { // fatal
2381 addr = NULL;
2382 warning("shmat failed (errno: %d)", errno);
2383 goto cleanup_shm;
2384 }
2385 }
2386 } else { // fatal
2387 addr = NULL;
2388 warning("shmat failed (errno: %d)", errno);
2389 goto cleanup_shm;
2390 }
2391 }
2392
2393 // sanity point
2394 assert(addr && addr != (char*) -1, "wrong address");
2395
2396 // after successful Attach remove the segment - right away.
2397 if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2398 warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2399 guarantee(false, "failed to remove shared memory segment!");
2400 }
2401 shmid = -1;
2402
2403 // query the real page size. In case setting the page size did not work (see above), the system
2404 // may have given us something other then 4K (LDR_CNTRL)
2405 {
2406 const size_t real_pagesize = os::Aix::query_pagesize(addr);
2407 if (pagesize != -1) {
2408 assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
2409 } else {
2410 pagesize = real_pagesize;
2411 }
2412 }
2413
2414 // Now register the reserved block with internal book keeping.
2415 LOCK_SHMBK
2416 const bool pinned = pagesize >= SIZE_16M ? true : false;
2417 ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
2418 assert(p_block, "");
2419 shmbk_register(p_block);
2420 UNLOCK_SHMBK
2421
2422 cleanup_shm:
2423
2424 // if we have not done so yet, remove the shared memory segment. This is very important.
2425 if (shmid != -1) {
2426 if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
2427 warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
2428 guarantee(false, "failed to remove shared memory segment!");
2429 }
2430 shmid = -1;
2431 }
2432
2433 // trace
2434 if (Verbose && !addr) {
2435 if (requested_addr != NULL) {
2436 warning("failed to shm-allocate 0x%llX bytes at wish address 0x%p.", size, requested_addr);
2437 } else {
2438 warning("failed to shm-allocate 0x%llX bytes at any address.", size);
2439 }
2440 }
2441
2442 // hand info to caller
2443 if (addr) {
2444 p_info->addr = addr;
2445 p_info->pagesize = pagesize;
2446 p_info->pinned = pagesize == SIZE_16M ? true : false;
2447 }
2448
2449 // sanity test:
2450 if (requested_addr && addr && wishaddr_or_fail) {
2451 guarantee(addr == requested_addr, "shmat error");
2452 }
2453
2454 // just one more test to really make sure we have no dangling shm segments.
2455 guarantee(shmid == -1, "dangling shm segments");
2456
2457 return addr ? true : false;
2458
2459 } // end: reserve_shmatted_memory
2460
2461 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
2462 // will return NULL in case of an error.
2463 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
2464
2465 // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
2466 if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
2467 warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
2468 return NULL;
2469 }
2470
2471 const size_t size = align_size_up(bytes, SIZE_4K);
2472
2473 // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
2474 // msync(MS_INVALIDATE) (see os::uncommit_memory)
2475 int flags = MAP_ANONYMOUS | MAP_SHARED;
2476
2477 // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
2478 // it means if wishaddress is given but MAP_FIXED is not set.
2479 //
2480 // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
2481 // clobbers the address range, which is probably not what the caller wants. That's
2482 // why I assert here (again) that the SPEC1170 compat mode is off.
2483 // If we want to be able to run under SPEC1170, we have to do some porting and
2484 // testing.
2485 if (requested_addr != NULL) {
2486 assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
2487 flags |= MAP_FIXED;
2488 }
2489
2490 char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
2491
2492 if (addr == MAP_FAILED) {
2493 // attach failed: tolerate for specific wish addresses. Not being able to attach
2494 // anywhere is a fatal error.
2495 if (requested_addr == NULL) {
2496 // It's ok to fail here if the machine has not enough memory.
2497 warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
2498 }
2499 addr = NULL;
2500 goto cleanup_mmap;
2501 }
2502
2503 // If we did request a specific address and that address was not available, fail.
2504 if (addr && requested_addr) {
2505 guarantee(addr == requested_addr, "unexpected");
2506 }
2507
2508 // register this mmap'ed segment with book keeping
2509 LOCK_SHMBK
2510 ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
2511 assert(p_block, "");
2512 shmbk_register(p_block);
2513 UNLOCK_SHMBK
2514
2515 cleanup_mmap:
2516
2517 // trace
2518 if (Verbose) {
2519 if (addr) {
2520 fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
2521 }
2522 else {
2523 if (requested_addr != NULL) {
2524 warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
2525 } else {
2526 warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
2527 }
2528 }
2529 }
2530
2531 return addr;
2532
2533 } // end: reserve_mmaped_memory
2534
2535 // Reserves and attaches a shared memory segment.
2536 // Will assert if a wish address is given and could not be obtained.
2537 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
2538 return os::attempt_reserve_memory_at(bytes, requested_addr);
2539 }
2540
2541 bool os::pd_release_memory(char* addr, size_t size) {
2542
2543 // delegate to ShmBkBlock class which knows how to uncommit its memory.
2544
2545 bool rc = false;
2546 LOCK_SHMBK
2547 ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
2548 if (!block) {
2549 fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
2550 shmbk_dump_info();
2551 assert(false, "invalid pointer");
2552 return false;
2553 }
2554 else if (!block->isSameRange(addr, size)) {
2555 if (block->getType() == ShmBkBlock::MMAP) {
2556 // Release only the same range or a the beginning or the end of a range.
2557 if (block->base() == addr && size < block->size()) {
2558 ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
2559 assert(b, "");
2560 shmbk_register(b);
2561 block->setAddrRange(AddrRange(addr, size));
2562 }
2563 else if (addr > block->base() && addr + size == block->base() + block->size()) {
2564 ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
2565 assert(b, "");
2566 shmbk_register(b);
2567 block->setAddrRange(AddrRange(addr, size));
2568 }
2569 else {
2570 fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
2571 shmbk_dump_info();
2572 assert(false, "invalid mmap range");
2573 return false;
2574 }
2575 }
2576 else {
2577 // Release only the same range. No partial release allowed.
2578 // Soften the requirement a bit, because the user may think he owns a smaller size
2579 // than the block is due to alignment etc.
2580 if (block->base() != addr || block->size() < size) {
2581 fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
2582 shmbk_dump_info();
2583 assert(false, "invalid shmget range");
2584 return false;
2585 }
2586 }
2587 }
2588 rc = block->release();
2589 assert(rc, "release failed");
2590 // remove block from bookkeeping
2591 shmbk_unregister(block);
2592 delete block;
2593 UNLOCK_SHMBK
2594
2595 if (!rc) {
2596 warning("failed to released %lu bytes at 0x%p", size, addr);
2597 }
2598
2599 return rc;
2600 }
2601
2602 static bool checked_mprotect(char* addr, size_t size, int prot) {
2603
2604 // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
2605 // not tell me if protection failed when trying to protect an un-protectable range.
2606 //
2607 // This means if the memory was allocated using shmget/shmat, protection wont work
2608 // but mprotect will still return 0:
2609 //
2610 // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
2611
2612 bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
2613
2614 if (!rc) {
2615 const char* const s_errno = strerror(errno);
2616 warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
2617 return false;
2618 }
2619
2620 // mprotect success check
2621 //
2622 // Mprotect said it changed the protection but can I believe it?
2623 //
2624 // To be sure I need to check the protection afterwards. Try to
2625 // read from protected memory and check whether that causes a segfault.
2626 //
2627 if (!os::Aix::xpg_sus_mode()) {
2628
2629 if (StubRoutines::SafeFetch32_stub()) {
2630
2631 const bool read_protected =
2632 (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
2633 SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
2634
2635 if (prot & PROT_READ) {
2636 rc = !read_protected;
2637 } else {
2638 rc = read_protected;
2639 }
2640 }
2641 }
2642 if (!rc) {
2643 assert(false, "mprotect failed.");
2644 }
2645 return rc;
2646 }
2647
2648 // Set protections specified
2649 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
2650 unsigned int p = 0;
2651 switch (prot) {
2652 case MEM_PROT_NONE: p = PROT_NONE; break;
2653 case MEM_PROT_READ: p = PROT_READ; break;
2654 case MEM_PROT_RW: p = PROT_READ|PROT_WRITE; break;
2655 case MEM_PROT_RWX: p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
2656 default:
2657 ShouldNotReachHere();
2658 }
2659 // is_committed is unused.
2660 return checked_mprotect(addr, size, p);
2661 }
2662
2663 bool os::guard_memory(char* addr, size_t size) {
2664 return checked_mprotect(addr, size, PROT_NONE);
2665 }
2666
2667 bool os::unguard_memory(char* addr, size_t size) {
2668 return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
2669 }
2670
2671 // Large page support
2672
2673 static size_t _large_page_size = 0;
2674
2675 // Enable large page support if OS allows that.
2676 void os::large_page_init() {
2677
2678 // Note: os::Aix::query_multipage_support must run first.
2679
2680 if (!UseLargePages) {
2681 return;
2682 }
2683
2684 if (!Aix::can_use_64K_pages()) {
2685 assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
2686 UseLargePages = false;
2687 return;
2688 }
2689
2690 if (!Aix::can_use_16M_pages() && Use16MPages) {
2691 fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
2692 " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
2693 }
2694
2695 // Do not report 16M page alignment as part of os::_page_sizes if we are
2696 // explicitly forbidden from using 16M pages. Doing so would increase the
2697 // alignment the garbage collector calculates with, slightly increasing
2698 // heap usage. We should only pay for 16M alignment if we really want to
2699 // use 16M pages.
2700 if (Use16MPages && Aix::can_use_16M_pages()) {
2701 _large_page_size = SIZE_16M;
2702 _page_sizes[0] = SIZE_16M;
2703 _page_sizes[1] = SIZE_64K;
2704 _page_sizes[2] = SIZE_4K;
2705 _page_sizes[3] = 0;
2706 } else if (Aix::can_use_64K_pages()) {
2707 _large_page_size = SIZE_64K;
2708 _page_sizes[0] = SIZE_64K;
2709 _page_sizes[1] = SIZE_4K;
2710 _page_sizes[2] = 0;
2711 }
2712
2713 if (Verbose) {
2714 ("Default large page size is 0x%llX.", _large_page_size);
2715 }
2716 } // end: os::large_page_init()
2717
2718 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
2719 // "exec" is passed in but not used. Creating the shared image for
2720 // the code cache doesn't have an SHM_X executable permission to check.
2721 Unimplemented();
2722 return 0;
2723 }
2724
2725 bool os::release_memory_special(char* base, size_t bytes) {
2726 // detaching the SHM segment will also delete it, see reserve_memory_special()
2727 Unimplemented();
2728 return false;
2729 }
2730
2731 size_t os::large_page_size() {
2732 return _large_page_size;
2733 }
2734
2735 bool os::can_commit_large_page_memory() {
2736 // Well, sadly we cannot commit anything at all (see comment in
2737 // os::commit_memory) but we claim to so we can make use of large pages
2738 return true;
2739 }
2740
2741 bool os::can_execute_large_page_memory() {
2742 // We can do that
2743 return true;
2744 }
2745
2746 // Reserve memory at an arbitrary address, only if that area is
2747 // available (and not reserved for something else).
2748 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
2749
2750 bool use_mmap = false;
2751
2752 // mmap: smaller graining, no large page support
2753 // shm: large graining (256M), large page support, limited number of shm segments
2754 //
2755 // Prefer mmap wherever we either do not need large page support or have OS limits
2756
2757 if (!UseLargePages || bytes < SIZE_16M) {
2758 use_mmap = true;
2759 }
2760
2761 char* addr = NULL;
2762 if (use_mmap) {
2763 addr = reserve_mmaped_memory(bytes, requested_addr);
2764 } else {
2765 // shmat: wish address is mandatory, and do not try 16M pages here.
2766 shmatted_memory_info_t info;
2767 const int flags = RESSHM_WISHADDR_OR_FAIL;
2768 if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
2769 addr = info.addr;
2770 }
2771 }
2772
2773 return addr;
2774 }
2775
2776 size_t os::read(int fd, void *buf, unsigned int nBytes) {
2777 return ::read(fd, buf, nBytes);
2778 }
2779
2780 #define NANOSECS_PER_MILLISEC 1000000
2781
2782 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
2783 assert(thread == Thread::current(), "thread consistency check");
2784
2785 // Prevent nasty overflow in deadline calculation
2786 // by handling long sleeps similar to solaris or windows.
2787 const jlong limit = INT_MAX;
2788 int result;
2789 while (millis > limit) {
2790 if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
2791 return result;
2792 }
2793 millis -= limit;
2794 }
2795
2796 ParkEvent * const slp = thread->_SleepEvent;
2797 slp->reset();
2798 OrderAccess::fence();
2799
2800 if (interruptible) {
2801 jlong prevtime = javaTimeNanos();
2802
2803 // Prevent precision loss and too long sleeps
2804 jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2805
2806 for (;;) {
2807 if (os::is_interrupted(thread, true)) {
2808 return OS_INTRPT;
2809 }
2810
2811 jlong newtime = javaTimeNanos();
2812
2813 assert(newtime >= prevtime, "time moving backwards");
2814 // Doing prevtime and newtime in microseconds doesn't help precision,
2815 // and trying to round up to avoid lost milliseconds can result in a
2816 // too-short delay.
2817 millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2818
2819 if (millis <= 0) {
2820 return OS_OK;
2821 }
2822
2823 // Stop sleeping if we passed the deadline
2824 if (newtime >= deadline) {
2825 return OS_OK;
2826 }
2827
2828 prevtime = newtime;
2829
2830 {
2831 assert(thread->is_Java_thread(), "sanity check");
2832 JavaThread *jt = (JavaThread *) thread;
2833 ThreadBlockInVM tbivm(jt);
2834 OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
2835
2836 jt->set_suspend_equivalent();
2837
2838 slp->park(millis);
2839
2840 // were we externally suspended while we were waiting?
2841 jt->check_and_wait_while_suspended();
2842 }
2843 }
2844 } else {
2845 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
2846 jlong prevtime = javaTimeNanos();
2847
2848 // Prevent precision loss and too long sleeps
2849 jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
2850
2851 for (;;) {
2852 // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
2853 // the 1st iteration ...
2854 jlong newtime = javaTimeNanos();
2855
2856 if (newtime - prevtime < 0) {
2857 // time moving backwards, should only happen if no monotonic clock
2858 // not a guarantee() because JVM should not abort on kernel/glibc bugs
2859 // - HS14 Commented out as not implemented.
2860 // - TODO Maybe we should implement it?
2861 //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
2862 } else {
2863 millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
2864 }
2865
2866 if (millis <= 0) break;
2867
2868 if (newtime >= deadline) {
2869 break;
2870 }
2871
2872 prevtime = newtime;
2873 slp->park(millis);
2874 }
2875 return OS_OK;
2876 }
2877 }
2878
2879 void os::naked_short_sleep(jlong ms) {
2880 struct timespec req;
2881
2882 assert(ms < 1000, "Un-interruptable sleep, short time use only");
2883 req.tv_sec = 0;
2884 if (ms > 0) {
2885 req.tv_nsec = (ms % 1000) * 1000000;
2886 }
2887 else {
2888 req.tv_nsec = 1;
2889 }
2890
2891 nanosleep(&req, NULL);
2892
2893 return;
2894 }
2895
2896 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
2897 void os::infinite_sleep() {
2898 while (true) { // sleep forever ...
2899 ::sleep(100); // ... 100 seconds at a time
2900 }
2901 }
2902
2903 // Used to convert frequent JVM_Yield() to nops
2904 bool os::dont_yield() {
2905 return DontYieldALot;
2906 }
2907
2908 void os::yield() {
2909 sched_yield();
2910 }
2911
2912 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
2913
2914 void os::yield_all(int attempts) {
2915 // Yields to all threads, including threads with lower priorities
2916 // Threads on Linux are all with same priority. The Solaris style
2917 // os::yield_all() with nanosleep(1ms) is not necessary.
2918 sched_yield();
2919 }
2920
2921 // Called from the tight loops to possibly influence time-sharing heuristics
2922 void os::loop_breaker(int attempts) {
2923 os::yield_all(attempts);
2924 }
2925
2926 ////////////////////////////////////////////////////////////////////////////////
2927 // thread priority support
2928
2929 // From AIX manpage to pthread_setschedparam
2930 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
2931 // topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
2932 //
2933 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
2934 // range from 40 to 80, where 40 is the least favored priority and 80
2935 // is the most favored."
2936 //
2937 // (Actually, I doubt this even has an impact on AIX, as we do kernel
2938 // scheduling there; however, this still leaves iSeries.)
2939 //
2940 // We use the same values for AIX and PASE.
2941 int os::java_to_os_priority[CriticalPriority + 1] = {
2942 54, // 0 Entry should never be used
2943
2944 55, // 1 MinPriority
2945 55, // 2
2946 56, // 3
2947
2948 56, // 4
2949 57, // 5 NormPriority
2950 57, // 6
2951
2952 58, // 7
2953 58, // 8
2954 59, // 9 NearMaxPriority
2955
2956 60, // 10 MaxPriority
2957
2958 60 // 11 CriticalPriority
2959 };
2960
2961 OSReturn os::set_native_priority(Thread* thread, int newpri) {
2962 if (!UseThreadPriorities) return OS_OK;
2963 pthread_t thr = thread->osthread()->pthread_id();
2964 int policy = SCHED_OTHER;
2965 struct sched_param param;
2966 param.sched_priority = newpri;
2967 int ret = pthread_setschedparam(thr, policy, &param);
2968
2969 if (Verbose) {
2970 if (ret == 0) {
2971 fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
2972 } else {
2973 fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
2974 (int)thr, newpri, ret, strerror(ret));
2975 }
2976 }
2977 return (ret == 0) ? OS_OK : OS_ERR;
2978 }
2979
2980 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
2981 if (!UseThreadPriorities) {
2982 *priority_ptr = java_to_os_priority[NormPriority];
2983 return OS_OK;
2984 }
2985 pthread_t thr = thread->osthread()->pthread_id();
2986 int policy = SCHED_OTHER;
2987 struct sched_param param;
2988 int ret = pthread_getschedparam(thr, &policy, &param);
2989 *priority_ptr = param.sched_priority;
2990
2991 return (ret == 0) ? OS_OK : OS_ERR;
2992 }
2993
2994 // Hint to the underlying OS that a task switch would not be good.
2995 // Void return because it's a hint and can fail.
2996 void os::hint_no_preempt() {}
2997
2998 ////////////////////////////////////////////////////////////////////////////////
2999 // suspend/resume support
3000
3001 // the low-level signal-based suspend/resume support is a remnant from the
3002 // old VM-suspension that used to be for java-suspension, safepoints etc,
3003 // within hotspot. Now there is a single use-case for this:
3004 // - calling get_thread_pc() on the VMThread by the flat-profiler task
3005 // that runs in the watcher thread.
3006 // The remaining code is greatly simplified from the more general suspension
3007 // code that used to be used.
3008 //
3009 // The protocol is quite simple:
3010 // - suspend:
3011 // - sends a signal to the target thread
3012 // - polls the suspend state of the osthread using a yield loop
3013 // - target thread signal handler (SR_handler) sets suspend state
3014 // and blocks in sigsuspend until continued
3015 // - resume:
3016 // - sets target osthread state to continue
3017 // - sends signal to end the sigsuspend loop in the SR_handler
3018 //
3019 // Note that the SR_lock plays no role in this suspend/resume protocol.
3020 //
3021
3022 static void resume_clear_context(OSThread *osthread) {
3023 osthread->set_ucontext(NULL);
3024 osthread->set_siginfo(NULL);
3025 }
3026
3027 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
3028 osthread->set_ucontext(context);
3029 osthread->set_siginfo(siginfo);
3030 }
3031
3032 //
3033 // Handler function invoked when a thread's execution is suspended or
3034 // resumed. We have to be careful that only async-safe functions are
3035 // called here (Note: most pthread functions are not async safe and
3036 // should be avoided.)
3037 //
3038 // Note: sigwait() is a more natural fit than sigsuspend() from an
3039 // interface point of view, but sigwait() prevents the signal hander
3040 // from being run. libpthread would get very confused by not having
3041 // its signal handlers run and prevents sigwait()'s use with the
3042 // mutex granting granting signal.
3043 //
3044 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
3045 //
3046 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
3047 // Save and restore errno to avoid confusing native code with EINTR
3048 // after sigsuspend.
3049 int old_errno = errno;
3050
3051 Thread* thread = Thread::current();
3052 OSThread* osthread = thread->osthread();
3053 assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
3054
3055 os::SuspendResume::State current = osthread->sr.state();
3056 if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
3057 suspend_save_context(osthread, siginfo, context);
3058
3059 // attempt to switch the state, we assume we had a SUSPEND_REQUEST
3060 os::SuspendResume::State state = osthread->sr.suspended();
3061 if (state == os::SuspendResume::SR_SUSPENDED) {
3062 sigset_t suspend_set; // signals for sigsuspend()
3063
3064 // get current set of blocked signals and unblock resume signal
3065 pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
3066 sigdelset(&suspend_set, SR_signum);
3067
3068 // wait here until we are resumed
3069 while (1) {
3070 sigsuspend(&suspend_set);
3071
3072 os::SuspendResume::State result = osthread->sr.running();
3073 if (result == os::SuspendResume::SR_RUNNING) {
3074 break;
3075 }
3076 }
3077
3078 } else if (state == os::SuspendResume::SR_RUNNING) {
3079 // request was cancelled, continue
3080 } else {
3081 ShouldNotReachHere();
3082 }
3083
3084 resume_clear_context(osthread);
3085 } else if (current == os::SuspendResume::SR_RUNNING) {
3086 // request was cancelled, continue
3087 } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
3088 // ignore
3089 } else {
3090 ShouldNotReachHere();
3091 }
3092
3093 errno = old_errno;
3094 }
3095
3096
3097 static int SR_initialize() {
3098 struct sigaction act;
3099 char *s;
3100 // Get signal number to use for suspend/resume
3101 if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
3102 int sig = ::strtol(s, 0, 10);
3103 if (sig > 0 || sig < NSIG) {
3104 SR_signum = sig;
3105 }
3106 }
3107
3108 assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
3109 "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
3110
3111 sigemptyset(&SR_sigset);
3112 sigaddset(&SR_sigset, SR_signum);
3113
3114 // Set up signal handler for suspend/resume.
3115 act.sa_flags = SA_RESTART|SA_SIGINFO;
3116 act.sa_handler = (void (*)(int)) SR_handler;
3117
3118 // SR_signum is blocked by default.
3119 // 4528190 - We also need to block pthread restart signal (32 on all
3120 // supported Linux platforms). Note that LinuxThreads need to block
3121 // this signal for all threads to work properly. So we don't have
3122 // to use hard-coded signal number when setting up the mask.
3123 pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
3124
3125 if (sigaction(SR_signum, &act, 0) == -1) {
3126 return -1;
3127 }
3128
3129 // Save signal flag
3130 os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
3131 return 0;
3132 }
3133
3134 static int SR_finalize() {
3135 return 0;
3136 }
3137
3138 static int sr_notify(OSThread* osthread) {
3139 int status = pthread_kill(osthread->pthread_id(), SR_signum);
3140 assert_status(status == 0, status, "pthread_kill");
3141 return status;
3142 }
3143
3144 // "Randomly" selected value for how long we want to spin
3145 // before bailing out on suspending a thread, also how often
3146 // we send a signal to a thread we want to resume
3147 static const int RANDOMLY_LARGE_INTEGER = 1000000;
3148 static const int RANDOMLY_LARGE_INTEGER2 = 100;
3149
3150 // returns true on success and false on error - really an error is fatal
3151 // but this seems the normal response to library errors
3152 static bool do_suspend(OSThread* osthread) {
3153 assert(osthread->sr.is_running(), "thread should be running");
3154 // mark as suspended and send signal
3155
3156 if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
3157 // failed to switch, state wasn't running?
3158 ShouldNotReachHere();
3159 return false;
3160 }
3161
3162 if (sr_notify(osthread) != 0) {
3163 // try to cancel, switch to running
3164
3165 os::SuspendResume::State result = osthread->sr.cancel_suspend();
3166 if (result == os::SuspendResume::SR_RUNNING) {
3167 // cancelled
3168 return false;
3169 } else if (result == os::SuspendResume::SR_SUSPENDED) {
3170 // somehow managed to suspend
3171 return true;
3172 } else {
3173 ShouldNotReachHere();
3174 return false;
3175 }
3176 }
3177
3178 // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
3179
3180 for (int n = 0; !osthread->sr.is_suspended(); n++) {
3181 for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
3182 os::yield_all(i);
3183 }
3184
3185 // timeout, try to cancel the request
3186 if (n >= RANDOMLY_LARGE_INTEGER) {
3187 os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
3188 if (cancelled == os::SuspendResume::SR_RUNNING) {
3189 return false;
3190 } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
3191 return true;
3192 } else {
3193 ShouldNotReachHere();
3194 return false;
3195 }
3196 }
3197 }
3198
3199 guarantee(osthread->sr.is_suspended(), "Must be suspended");
3200 return true;
3201 }
3202
3203 static void do_resume(OSThread* osthread) {
3204 //assert(osthread->sr.is_suspended(), "thread should be suspended");
3205
3206 if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
3207 // failed to switch to WAKEUP_REQUEST
3208 ShouldNotReachHere();
3209 return;
3210 }
3211
3212 while (!osthread->sr.is_running()) {
3213 if (sr_notify(osthread) == 0) {
3214 for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
3215 for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
3216 os::yield_all(i);
3217 }
3218 }
3219 } else {
3220 ShouldNotReachHere();
3221 }
3222 }
3223
3224 guarantee(osthread->sr.is_running(), "Must be running!");
3225 }
3226
3227 ////////////////////////////////////////////////////////////////////////////////
3228 // interrupt support
3229
3230 void os::interrupt(Thread* thread) {
3231 assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3232 "possibility of dangling Thread pointer");
3233
3234 OSThread* osthread = thread->osthread();
3235
3236 if (!osthread->interrupted()) {
3237 osthread->set_interrupted(true);
3238 // More than one thread can get here with the same value of osthread,
3239 // resulting in multiple notifications. We do, however, want the store
3240 // to interrupted() to be visible to other threads before we execute unpark().
3241 OrderAccess::fence();
3242 ParkEvent * const slp = thread->_SleepEvent;
3243 if (slp != NULL) slp->unpark();
3244 }
3245
3246 // For JSR166. Unpark even if interrupt status already was set
3247 if (thread->is_Java_thread())
3248 ((JavaThread*)thread)->parker()->unpark();
3249
3250 ParkEvent * ev = thread->_ParkEvent;
3251 if (ev != NULL) ev->unpark();
3252
3253 }
3254
3255 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
3256 assert(Thread::current() == thread || Threads_lock->owned_by_self(),
3257 "possibility of dangling Thread pointer");
3258
3259 OSThread* osthread = thread->osthread();
3260
3261 bool interrupted = osthread->interrupted();
3262
3263 if (interrupted && clear_interrupted) {
3264 osthread->set_interrupted(false);
3265 // consider thread->_SleepEvent->reset() ... optional optimization
3266 }
3267
3268 return interrupted;
3269 }
3270
3271 ///////////////////////////////////////////////////////////////////////////////////
3272 // signal handling (except suspend/resume)
3273
3274 // This routine may be used by user applications as a "hook" to catch signals.
3275 // The user-defined signal handler must pass unrecognized signals to this
3276 // routine, and if it returns true (non-zero), then the signal handler must
3277 // return immediately. If the flag "abort_if_unrecognized" is true, then this
3278 // routine will never retun false (zero), but instead will execute a VM panic
3279 // routine kill the process.
3280 //
3281 // If this routine returns false, it is OK to call it again. This allows
3282 // the user-defined signal handler to perform checks either before or after
3283 // the VM performs its own checks. Naturally, the user code would be making
3284 // a serious error if it tried to handle an exception (such as a null check
3285 // or breakpoint) that the VM was generating for its own correct operation.
3286 //
3287 // This routine may recognize any of the following kinds of signals:
3288 // SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
3289 // It should be consulted by handlers for any of those signals.
3290 //
3291 // The caller of this routine must pass in the three arguments supplied
3292 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
3293 // field of the structure passed to sigaction(). This routine assumes that
3294 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
3295 //
3296 // Note that the VM will print warnings if it detects conflicting signal
3297 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
3298 //
3299 extern "C" JNIEXPORT int
3300 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
3301
3302 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
3303 // to be the thing to call; documentation is not terribly clear about whether
3304 // pthread_sigmask also works, and if it does, whether it does the same.
3305 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
3306 const int rc = ::pthread_sigmask(how, set, oset);
3307 // return value semantics differ slightly for error case:
3308 // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
3309 // (so, pthread_sigmask is more theadsafe for error handling)
3310 // But success is always 0.
3311 return rc == 0 ? true : false;
3312 }
3313
3314 // Function to unblock all signals which are, according
3315 // to POSIX, typical program error signals. If they happen while being blocked,
3316 // they typically will bring down the process immediately.
3317 bool unblock_program_error_signals() {
3318 sigset_t set;
3319 ::sigemptyset(&set);
3320 ::sigaddset(&set, SIGILL);
3321 ::sigaddset(&set, SIGBUS);
3322 ::sigaddset(&set, SIGFPE);
3323 ::sigaddset(&set, SIGSEGV);
3324 return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
3325 }
3326
3327 // Renamed from 'signalHandler' to avoid collision with other shared libs.
3328 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
3329 assert(info != NULL && uc != NULL, "it must be old kernel");
3330
3331 // Never leave program error signals blocked;
3332 // on all our platforms they would bring down the process immediately when
3333 // getting raised while being blocked.
3334 unblock_program_error_signals();
3335
3336 JVM_handle_aix_signal(sig, info, uc, true);
3337 }
3338
3339
3340 // This boolean allows users to forward their own non-matching signals
3341 // to JVM_handle_aix_signal, harmlessly.
3342 bool os::Aix::signal_handlers_are_installed = false;
3343
3344 // For signal-chaining
3345 struct sigaction os::Aix::sigact[MAXSIGNUM];
3346 unsigned int os::Aix::sigs = 0;
3347 bool os::Aix::libjsig_is_loaded = false;
3348 typedef struct sigaction *(*get_signal_t)(int);
3349 get_signal_t os::Aix::get_signal_action = NULL;
3350
3351 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
3352 struct sigaction *actp = NULL;
3353
3354 if (libjsig_is_loaded) {
3355 // Retrieve the old signal handler from libjsig
3356 actp = (*get_signal_action)(sig);
3357 }
3358 if (actp == NULL) {
3359 // Retrieve the preinstalled signal handler from jvm
3360 actp = get_preinstalled_handler(sig);
3361 }
3362
3363 return actp;
3364 }
3365
3366 static bool call_chained_handler(struct sigaction *actp, int sig,
3367 siginfo_t *siginfo, void *context) {
3368 // Call the old signal handler
3369 if (actp->sa_handler == SIG_DFL) {
3370 // It's more reasonable to let jvm treat it as an unexpected exception
3371 // instead of taking the default action.
3372 return false;
3373 } else if (actp->sa_handler != SIG_IGN) {
3374 if ((actp->sa_flags & SA_NODEFER) == 0) {
3375 // automaticlly block the signal
3376 sigaddset(&(actp->sa_mask), sig);
3377 }
3378
3379 sa_handler_t hand = NULL;
3380 sa_sigaction_t sa = NULL;
3381 bool siginfo_flag_set = (actp->sa_flags & SA_SIGINFO) != 0;
3382 // retrieve the chained handler
3383 if (siginfo_flag_set) {
3384 sa = actp->sa_sigaction;
3385 } else {
3386 hand = actp->sa_handler;
3387 }
3388
3389 if ((actp->sa_flags & SA_RESETHAND) != 0) {
3390 actp->sa_handler = SIG_DFL;
3391 }
3392
3393 // try to honor the signal mask
3394 sigset_t oset;
3395 pthread_sigmask(SIG_SETMASK, &(actp->sa_mask), &oset);
3396
3397 // call into the chained handler
3398 if (siginfo_flag_set) {
3399 (*sa)(sig, siginfo, context);
3400 } else {
3401 (*hand)(sig);
3402 }
3403
3404 // restore the signal mask
3405 pthread_sigmask(SIG_SETMASK, &oset, 0);
3406 }
3407 // Tell jvm's signal handler the signal is taken care of.
3408 return true;
3409 }
3410
3411 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
3412 bool chained = false;
3413 // signal-chaining
3414 if (UseSignalChaining) {
3415 struct sigaction *actp = get_chained_signal_action(sig);
3416 if (actp != NULL) {
3417 chained = call_chained_handler(actp, sig, siginfo, context);
3418 }
3419 }
3420 return chained;
3421 }
3422
3423 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
3424 if ((((unsigned int)1 << sig) & sigs) != 0) {
3425 return &sigact[sig];
3426 }
3427 return NULL;
3428 }
3429
3430 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
3431 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3432 sigact[sig] = oldAct;
3433 sigs |= (unsigned int)1 << sig;
3434 }
3435
3436 // for diagnostic
3437 int os::Aix::sigflags[MAXSIGNUM];
3438
3439 int os::Aix::get_our_sigflags(int sig) {
3440 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3441 return sigflags[sig];
3442 }
3443
3444 void os::Aix::set_our_sigflags(int sig, int flags) {
3445 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3446 sigflags[sig] = flags;
3447 }
3448
3449 void os::Aix::set_signal_handler(int sig, bool set_installed) {
3450 // Check for overwrite.
3451 struct sigaction oldAct;
3452 sigaction(sig, (struct sigaction*)NULL, &oldAct);
3453
3454 void* oldhand = oldAct.sa_sigaction
3455 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3456 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3457 // Renamed 'signalHandler' to avoid collision with other shared libs.
3458 if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
3459 oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
3460 oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
3461 if (AllowUserSignalHandlers || !set_installed) {
3462 // Do not overwrite; user takes responsibility to forward to us.
3463 return;
3464 } else if (UseSignalChaining) {
3465 // save the old handler in jvm
3466 save_preinstalled_handler(sig, oldAct);
3467 // libjsig also interposes the sigaction() call below and saves the
3468 // old sigaction on it own.
3469 } else {
3470 fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
3471 "%#lx for signal %d.", (long)oldhand, sig));
3472 }
3473 }
3474
3475 struct sigaction sigAct;
3476 sigfillset(&(sigAct.sa_mask));
3477 if (!set_installed) {
3478 sigAct.sa_handler = SIG_DFL;
3479 sigAct.sa_flags = SA_RESTART;
3480 } else {
3481 // Renamed 'signalHandler' to avoid collision with other shared libs.
3482 sigAct.sa_sigaction = javaSignalHandler;
3483 sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
3484 }
3485 // Save flags, which are set by ours
3486 assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
3487 sigflags[sig] = sigAct.sa_flags;
3488
3489 int ret = sigaction(sig, &sigAct, &oldAct);
3490 assert(ret == 0, "check");
3491
3492 void* oldhand2 = oldAct.sa_sigaction
3493 ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
3494 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
3495 assert(oldhand2 == oldhand, "no concurrent signal handler installation");
3496 }
3497
3498 // install signal handlers for signals that HotSpot needs to
3499 // handle in order to support Java-level exception handling.
3500 void os::Aix::install_signal_handlers() {
3501 if (!signal_handlers_are_installed) {
3502 signal_handlers_are_installed = true;
3503
3504 // signal-chaining
3505 typedef void (*signal_setting_t)();
3506 signal_setting_t begin_signal_setting = NULL;
3507 signal_setting_t end_signal_setting = NULL;
3508 begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3509 dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
3510 if (begin_signal_setting != NULL) {
3511 end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
3512 dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
3513 get_signal_action = CAST_TO_FN_PTR(get_signal_t,
3514 dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
3515 libjsig_is_loaded = true;
3516 assert(UseSignalChaining, "should enable signal-chaining");
3517 }
3518 if (libjsig_is_loaded) {
3519 // Tell libjsig jvm is setting signal handlers
3520 (*begin_signal_setting)();
3521 }
3522
3523 set_signal_handler(SIGSEGV, true);
3524 set_signal_handler(SIGPIPE, true);
3525 set_signal_handler(SIGBUS, true);
3526 set_signal_handler(SIGILL, true);
3527 set_signal_handler(SIGFPE, true);
3528 set_signal_handler(SIGTRAP, true);
3529 set_signal_handler(SIGXFSZ, true);
3530 set_signal_handler(SIGDANGER, true);
3531
3532 if (libjsig_is_loaded) {
3533 // Tell libjsig jvm finishes setting signal handlers
3534 (*end_signal_setting)();
3535 }
3536
3537 // We don't activate signal checker if libjsig is in place, we trust ourselves
3538 // and if UserSignalHandler is installed all bets are off.
3539 // Log that signal checking is off only if -verbose:jni is specified.
3540 if (CheckJNICalls) {
3541 if (libjsig_is_loaded) {
3542 tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
3543 check_signals = false;
3544 }
3545 if (AllowUserSignalHandlers) {
3546 tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
3547 check_signals = false;
3548 }
3549 // need to initialize check_signal_done
3550 ::sigemptyset(&check_signal_done);
3551 }
3552 }
3553 }
3554
3555 static const char* get_signal_handler_name(address handler,
3556 char* buf, int buflen) {
3557 int offset;
3558 bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
3559 if (found) {
3560 // skip directory names
3561 const char *p1, *p2;
3562 p1 = buf;
3563 size_t len = strlen(os::file_separator());
3564 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
3565 // The way os::dll_address_to_library_name is implemented on Aix
3566 // right now, it always returns -1 for the offset which is not
3567 // terribly informative.
3568 // Will fix that. For now, omit the offset.
3569 jio_snprintf(buf, buflen, "%s", p1);
3570 } else {
3571 jio_snprintf(buf, buflen, PTR_FORMAT, handler);
3572 }
3573 return buf;
3574 }
3575
3576 static void print_signal_handler(outputStream* st, int sig,
3577 char* buf, size_t buflen) {
3578 struct sigaction sa;
3579 sigaction(sig, NULL, &sa);
3580
3581 st->print("%s: ", os::exception_name(sig, buf, buflen));
3582
3583 address handler = (sa.sa_flags & SA_SIGINFO)
3584 ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
3585 : CAST_FROM_FN_PTR(address, sa.sa_handler);
3586
3587 if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
3588 st->print("SIG_DFL");
3589 } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
3590 st->print("SIG_IGN");
3591 } else {
3592 st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
3593 }
3594
3595 // Print readable mask.
3596 st->print(", sa_mask[0]=");
3597 os::Posix::print_signal_set_short(st, &sa.sa_mask);
3598
3599 address rh = VMError::get_resetted_sighandler(sig);
3600 // May be, handler was resetted by VMError?
3601 if (rh != NULL) {
3602 handler = rh;
3603 sa.sa_flags = VMError::get_resetted_sigflags(sig);
3604 }
3605
3606 // Print textual representation of sa_flags.
3607 st->print(", sa_flags=");
3608 os::Posix::print_sa_flags(st, sa.sa_flags);
3609
3610 // Check: is it our handler?
3611 if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
3612 handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
3613 // It is our signal handler.
3614 // Check for flags, reset system-used one!
3615 if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
3616 st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
3617 os::Aix::get_our_sigflags(sig));
3618 }
3619 }
3620 st->cr();
3621 }
3622
3623
3624 #define DO_SIGNAL_CHECK(sig) \
3625 if (!sigismember(&check_signal_done, sig)) \
3626 os::Aix::check_signal_handler(sig)
3627
3628 // This method is a periodic task to check for misbehaving JNI applications
3629 // under CheckJNI, we can add any periodic checks here
3630
3631 void os::run_periodic_checks() {
3632
3633 if (check_signals == false) return;
3634
3635 // SEGV and BUS if overridden could potentially prevent
3636 // generation of hs*.log in the event of a crash, debugging
3637 // such a case can be very challenging, so we absolutely
3638 // check the following for a good measure:
3639 DO_SIGNAL_CHECK(SIGSEGV);
3640 DO_SIGNAL_CHECK(SIGILL);
3641 DO_SIGNAL_CHECK(SIGFPE);
3642 DO_SIGNAL_CHECK(SIGBUS);
3643 DO_SIGNAL_CHECK(SIGPIPE);
3644 DO_SIGNAL_CHECK(SIGXFSZ);
3645 if (UseSIGTRAP) {
3646 DO_SIGNAL_CHECK(SIGTRAP);
3647 }
3648 DO_SIGNAL_CHECK(SIGDANGER);
3649
3650 // ReduceSignalUsage allows the user to override these handlers
3651 // see comments at the very top and jvm_solaris.h
3652 if (!ReduceSignalUsage) {
3653 DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
3654 DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
3655 DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
3656 DO_SIGNAL_CHECK(BREAK_SIGNAL);
3657 }
3658
3659 DO_SIGNAL_CHECK(SR_signum);
3660 DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
3661 }
3662
3663 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
3664
3665 static os_sigaction_t os_sigaction = NULL;
3666
3667 void os::Aix::check_signal_handler(int sig) {
3668 char buf[O_BUFLEN];
3669 address jvmHandler = NULL;
3670
3671 struct sigaction act;
3672 if (os_sigaction == NULL) {
3673 // only trust the default sigaction, in case it has been interposed
3674 os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
3675 if (os_sigaction == NULL) return;
3676 }
3677
3678 os_sigaction(sig, (struct sigaction*)NULL, &act);
3679
3680 address thisHandler = (act.sa_flags & SA_SIGINFO)
3681 ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
3682 : CAST_FROM_FN_PTR(address, act.sa_handler);
3683
3684
3685 switch(sig) {
3686 case SIGSEGV:
3687 case SIGBUS:
3688 case SIGFPE:
3689 case SIGPIPE:
3690 case SIGILL:
3691 case SIGXFSZ:
3692 // Renamed 'signalHandler' to avoid collision with other shared libs.
3693 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
3694 break;
3695
3696 case SHUTDOWN1_SIGNAL:
3697 case SHUTDOWN2_SIGNAL:
3698 case SHUTDOWN3_SIGNAL:
3699 case BREAK_SIGNAL:
3700 jvmHandler = (address)user_handler();
3701 break;
3702
3703 case INTERRUPT_SIGNAL:
3704 jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
3705 break;
3706
3707 default:
3708 if (sig == SR_signum) {
3709 jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
3710 } else {
3711 return;
3712 }
3713 break;
3714 }
3715
3716 if (thisHandler != jvmHandler) {
3717 tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
3718 tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
3719 tty->print_cr(" found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
3720 // No need to check this sig any longer
3721 sigaddset(&check_signal_done, sig);
3722 } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
3723 tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
3724 tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
3725 tty->print_cr(" found:" PTR32_FORMAT, act.sa_flags);
3726 // No need to check this sig any longer
3727 sigaddset(&check_signal_done, sig);
3728 }
3729
3730 // Dump all the signal
3731 if (sigismember(&check_signal_done, sig)) {
3732 print_signal_handlers(tty, buf, O_BUFLEN);
3733 }
3734 }
3735
3736 extern bool signal_name(int signo, char* buf, size_t len);
3737
3738 const char* os::exception_name(int exception_code, char* buf, size_t size) {
3739 if (0 < exception_code && exception_code <= SIGRTMAX) {
3740 // signal
3741 if (!signal_name(exception_code, buf, size)) {
3742 jio_snprintf(buf, size, "SIG%d", exception_code);
3743 }
3744 return buf;
3745 } else {
3746 return NULL;
3747 }
3748 }
3749
3750 // To install functions for atexit system call
3751 extern "C" {
3752 static void perfMemory_exit_helper() {
3753 perfMemory_exit();
3754 }
3755 }
3756
3757 // This is called _before_ the most of global arguments have been parsed.
3758 void os::init(void) {
3759 // This is basic, we want to know if that ever changes.
3760 // (shared memory boundary is supposed to be a 256M aligned)
3761 assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
3762
3763 // First off, we need to know whether we run on AIX or PASE, and
3764 // the OS level we run on.
3765 os::Aix::initialize_os_info();
3766
3767 // Scan environment (SPEC1170 behaviour, etc)
3768 os::Aix::scan_environment();
3769
3770 // Check which pages are supported by AIX.
3771 os::Aix::query_multipage_support();
3772
3773 // Next, we need to initialize libo4 and libperfstat libraries.
3774 if (os::Aix::on_pase()) {
3775 os::Aix::initialize_libo4();
3776 } else {
3777 os::Aix::initialize_libperfstat();
3778 }
3779
3780 // Reset the perfstat information provided by ODM.
3781 if (os::Aix::on_aix()) {
3782 libperfstat::perfstat_reset();
3783 }
3784
3785 // Now initialze basic system properties. Note that for some of the values we
3786 // need libperfstat etc.
3787 os::Aix::initialize_system_info();
3788
3789 // Initialize large page support.
3790 if (UseLargePages) {
3791 os::large_page_init();
3792 if (!UseLargePages) {
3793 // initialize os::_page_sizes
3794 _page_sizes[0] = Aix::page_size();
3795 _page_sizes[1] = 0;
3796 if (Verbose) {
3797 fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
3798 }
3799 }
3800 } else {
3801 // initialize os::_page_sizes
3802 _page_sizes[0] = Aix::page_size();
3803 _page_sizes[1] = 0;
3804 }
3805
3806 // debug trace
3807 if (Verbose) {
3808 fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
3809 fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
3810 fprintf(stderr, "os::_page_sizes = ( ");
3811 for (int i = 0; _page_sizes[i]; i ++) {
3812 fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
3813 }
3814 fprintf(stderr, ")\n");
3815 }
3816
3817 _initial_pid = getpid();
3818
3819 clock_tics_per_sec = sysconf(_SC_CLK_TCK);
3820
3821 init_random(1234567);
3822
3823 ThreadCritical::initialize();
3824
3825 // Main_thread points to the aboriginal thread.
3826 Aix::_main_thread = pthread_self();
3827
3828 initial_time_count = os::elapsed_counter();
3829 pthread_mutex_init(&dl_mutex, NULL);
3830 }
3831
3832 // this is called _after_ the global arguments have been parsed
3833 jint os::init_2(void) {
3834
3835 if (Verbose) {
3836 fprintf(stderr, "processor count: %d\n", os::_processor_count);
3837 fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
3838 }
3839
3840 // initially build up the loaded dll map
3841 LoadedLibraries::reload();
3842
3843 const int page_size = Aix::page_size();
3844 const int map_size = page_size;
3845
3846 address map_address = (address) MAP_FAILED;
3847 const int prot = PROT_READ;
3848 const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
3849
3850 // use optimized addresses for the polling page,
3851 // e.g. map it to a special 32-bit address.
3852 if (OptimizePollingPageLocation) {
3853 // architecture-specific list of address wishes:
3854 address address_wishes[] = {
3855 // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
3856 // PPC64: all address wishes are non-negative 32 bit values where
3857 // the lower 16 bits are all zero. we can load these addresses
3858 // with a single ppc_lis instruction.
3859 (address) 0x30000000, (address) 0x31000000,
3860 (address) 0x32000000, (address) 0x33000000,
3861 (address) 0x40000000, (address) 0x41000000,
3862 (address) 0x42000000, (address) 0x43000000,
3863 (address) 0x50000000, (address) 0x51000000,
3864 (address) 0x52000000, (address) 0x53000000,
3865 (address) 0x60000000, (address) 0x61000000,
3866 (address) 0x62000000, (address) 0x63000000
3867 };
3868 int address_wishes_length = sizeof(address_wishes)/sizeof(address);
3869
3870 // iterate over the list of address wishes:
3871 for (int i=0; i<address_wishes_length; i++) {
3872 // try to map with current address wish.
3873 // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
3874 // fail if the address is already mapped.
3875 map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
3876 map_size, prot,
3877 flags | MAP_FIXED,
3878 -1, 0);
3879 if (Verbose) {
3880 fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
3881 address_wishes[i], map_address + (ssize_t)page_size);
3882 }
3883
3884 if (map_address + (ssize_t)page_size == address_wishes[i]) {
3885 // map succeeded and map_address is at wished address, exit loop.
3886 break;
3887 }
3888
3889 if (map_address != (address) MAP_FAILED) {
3890 // map succeeded, but polling_page is not at wished address, unmap and continue.
3891 ::munmap(map_address, map_size);
3892 map_address = (address) MAP_FAILED;
3893 }
3894 // map failed, continue loop.
3895 }
3896 } // end OptimizePollingPageLocation
3897
3898 if (map_address == (address) MAP_FAILED) {
3899 map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
3900 }
3901 guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
3902 os::set_polling_page(map_address);
3903
3904 if (!UseMembar) {
3905 address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
3906 guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
3907 os::set_memory_serialize_page(mem_serialize_page);
3908
3909 #ifndef PRODUCT
3910 if (Verbose && PrintMiscellaneous)
3911 tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
3912 #endif
3913 }
3914
3915 // initialize suspend/resume support - must do this before signal_sets_init()
3916 if (SR_initialize() != 0) {
3917 perror("SR_initialize failed");
3918 return JNI_ERR;
3919 }
3920
3921 Aix::signal_sets_init();
3922 Aix::install_signal_handlers();
3923
3924 // Check minimum allowable stack size for thread creation and to initialize
3925 // the java system classes, including StackOverflowError - depends on page
3926 // size. Add a page for compiler2 recursion in main thread.
3927 // Add in 2*BytesPerWord times page size to account for VM stack during
3928 // class initialization depending on 32 or 64 bit VM.
3929 os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
3930 (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
3931 2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
3932
3933 size_t threadStackSizeInBytes = ThreadStackSize * K;
3934 if (threadStackSizeInBytes != 0 &&
3935 threadStackSizeInBytes < os::Aix::min_stack_allowed) {
3936 tty->print_cr("\nThe stack size specified is too small, "
3937 "Specify at least %dk",
3938 os::Aix::min_stack_allowed / K);
3939 return JNI_ERR;
3940 }
3941
3942 // Make the stack size a multiple of the page size so that
3943 // the yellow/red zones can be guarded.
3944 // note that this can be 0, if no default stacksize was set
3945 JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
3946
3947 Aix::libpthread_init();
3948
3949 if (MaxFDLimit) {
3950 // set the number of file descriptors to max. print out error
3951 // if getrlimit/setrlimit fails but continue regardless.
3952 struct rlimit nbr_files;
3953 int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
3954 if (status != 0) {
3955 if (PrintMiscellaneous && (Verbose || WizardMode))
3956 perror("os::init_2 getrlimit failed");
3957 } else {
3958 nbr_files.rlim_cur = nbr_files.rlim_max;
3959 status = setrlimit(RLIMIT_NOFILE, &nbr_files);
3960 if (status != 0) {
3961 if (PrintMiscellaneous && (Verbose || WizardMode))
3962 perror("os::init_2 setrlimit failed");
3963 }
3964 }
3965 }
3966
3967 if (PerfAllowAtExitRegistration) {
3968 // only register atexit functions if PerfAllowAtExitRegistration is set.
3969 // atexit functions can be delayed until process exit time, which
3970 // can be problematic for embedded VM situations. Embedded VMs should
3971 // call DestroyJavaVM() to assure that VM resources are released.
3972
3973 // note: perfMemory_exit_helper atexit function may be removed in
3974 // the future if the appropriate cleanup code can be added to the
3975 // VM_Exit VMOperation's doit method.
3976 if (atexit(perfMemory_exit_helper) != 0) {
3977 warning("os::init_2 atexit(perfMemory_exit_helper) failed");
3978 }
3979 }
3980
3981 return JNI_OK;
3982 }
3983
3984 // this is called at the end of vm_initialization
3985 void os::init_3(void) {
3986 return;
3987 }
3988
3989 // Mark the polling page as unreadable
3990 void os::make_polling_page_unreadable(void) {
3991 if (!guard_memory((char*)_polling_page, Aix::page_size())) {
3992 fatal("Could not disable polling page");
3993 }
3994 };
3995
3996 // Mark the polling page as readable
3997 void os::make_polling_page_readable(void) {
3998 // Changed according to os_linux.cpp.
3999 if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
4000 fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
4001 }
4002 };
4003
4004 int os::active_processor_count() {
4005 int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
4006 assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
4007 return online_cpus;
4008 }
4009
4010 void os::set_native_thread_name(const char *name) {
4011 // Not yet implemented.
4012 return;
4013 }
4014
4015 bool os::distribute_processes(uint length, uint* distribution) {
4016 // Not yet implemented.
4017 return false;
4018 }
4019
4020 bool os::bind_to_processor(uint processor_id) {
4021 // Not yet implemented.
4022 return false;
4023 }
4024
4025 void os::SuspendedThreadTask::internal_do_task() {
4026 if (do_suspend(_thread->osthread())) {
4027 SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
4028 do_task(context);
4029 do_resume(_thread->osthread());
4030 }
4031 }
4032
4033 class PcFetcher : public os::SuspendedThreadTask {
4034 public:
4035 PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
4036 ExtendedPC result();
4037 protected:
4038 void do_task(const os::SuspendedThreadTaskContext& context);
4039 private:
4040 ExtendedPC _epc;
4041 };
4042
4043 ExtendedPC PcFetcher::result() {
4044 guarantee(is_done(), "task is not done yet.");
4045 return _epc;
4046 }
4047
4048 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
4049 Thread* thread = context.thread();
4050 OSThread* osthread = thread->osthread();
4051 if (osthread->ucontext() != NULL) {
4052 _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
4053 } else {
4054 // NULL context is unexpected, double-check this is the VMThread.
4055 guarantee(thread->is_VM_thread(), "can only be called for VMThread");
4056 }
4057 }
4058
4059 // Suspends the target using the signal mechanism and then grabs the PC before
4060 // resuming the target. Used by the flat-profiler only
4061 ExtendedPC os::get_thread_pc(Thread* thread) {
4062 // Make sure that it is called by the watcher for the VMThread.
4063 assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
4064 assert(thread->is_VM_thread(), "Can only be called for VMThread");
4065
4066 PcFetcher fetcher(thread);
4067 fetcher.run();
4068 return fetcher.result();
4069 }
4070
4071 // Not neede on Aix.
4072 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
4073 // }
4074
4075 ////////////////////////////////////////////////////////////////////////////////
4076 // debug support
4077
4078 static address same_page(address x, address y) {
4079 intptr_t page_bits = -os::vm_page_size();
4080 if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
4081 return x;
4082 else if (x > y)
4083 return (address)(intptr_t(y) | ~page_bits) + 1;
4084 else
4085 return (address)(intptr_t(y) & page_bits);
4086 }
4087
4088 bool os::find(address addr, outputStream* st) {
4089
4090 st->print(PTR_FORMAT ": ", addr);
4091
4092 const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
4093 if (lib) {
4094 lib->print(st);
4095 return true;
4096 } else {
4097 lib = LoadedLibraries::find_for_data_address(addr);
4098 if (lib) {
4099 lib->print(st);
4100 return true;
4101 } else {
4102 st->print_cr("(outside any module)");
4103 }
4104 }
4105
4106 return false;
4107 }
4108
4109 ////////////////////////////////////////////////////////////////////////////////
4110 // misc
4111
4112 // This does not do anything on Aix. This is basically a hook for being
4113 // able to use structured exception handling (thread-local exception filters)
4114 // on, e.g., Win32.
4115 void
4116 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
4117 JavaCallArguments* args, Thread* thread) {
4118 f(value, method, args, thread);
4119 }
4120
4121 void os::print_statistics() {
4122 }
4123
4124 int os::message_box(const char* title, const char* message) {
4125 int i;
4126 fdStream err(defaultStream::error_fd());
4127 for (i = 0; i < 78; i++) err.print_raw("=");
4128 err.cr();
4129 err.print_raw_cr(title);
4130 for (i = 0; i < 78; i++) err.print_raw("-");
4131 err.cr();
4132 err.print_raw_cr(message);
4133 for (i = 0; i < 78; i++) err.print_raw("=");
4134 err.cr();
4135
4136 char buf[16];
4137 // Prevent process from exiting upon "read error" without consuming all CPU
4138 while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
4139
4140 return buf[0] == 'y' || buf[0] == 'Y';
4141 }
4142
4143 int os::stat(const char *path, struct stat *sbuf) {
4144 char pathbuf[MAX_PATH];
4145 if (strlen(path) > MAX_PATH - 1) {
4146 errno = ENAMETOOLONG;
4147 return -1;
4148 }
4149 os::native_path(strcpy(pathbuf, path));
4150 return ::stat(pathbuf, sbuf);
4151 }
4152
4153 bool os::check_heap(bool force) {
4154 return true;
4155 }
4156
4157 // int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
4158 // return ::vsnprintf(buf, count, format, args);
4159 // }
4160
4161 // Is a (classpath) directory empty?
4162 bool os::dir_is_empty(const char* path) {
4163 DIR *dir = NULL;
4164 struct dirent *ptr;
4165
4166 dir = opendir(path);
4167 if (dir == NULL) return true;
4168
4169 /* Scan the directory */
4170 bool result = true;
4171 char buf[sizeof(struct dirent) + MAX_PATH];
4172 while (result && (ptr = ::readdir(dir)) != NULL) {
4173 if (strcmp(ptr->d_name, ".") != 0 && strcmp(ptr->d_name, "..") != 0) {
4174 result = false;
4175 }
4176 }
4177 closedir(dir);
4178 return result;
4179 }
4180
4181 // This code originates from JDK's sysOpen and open64_w
4182 // from src/solaris/hpi/src/system_md.c
4183
4184 #ifndef O_DELETE
4185 #define O_DELETE 0x10000
4186 #endif
4187
4188 // Open a file. Unlink the file immediately after open returns
4189 // if the specified oflag has the O_DELETE flag set.
4190 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
4191
4192 int os::open(const char *path, int oflag, int mode) {
4193
4194 if (strlen(path) > MAX_PATH - 1) {
4195 errno = ENAMETOOLONG;
4196 return -1;
4197 }
4198 int fd;
4199 int o_delete = (oflag & O_DELETE);
4200 oflag = oflag & ~O_DELETE;
4201
4202 fd = ::open64(path, oflag, mode);
4203 if (fd == -1) return -1;
4204
4205 // If the open succeeded, the file might still be a directory.
4206 {
4207 struct stat64 buf64;
4208 int ret = ::fstat64(fd, &buf64);
4209 int st_mode = buf64.st_mode;
4210
4211 if (ret != -1) {
4212 if ((st_mode & S_IFMT) == S_IFDIR) {
4213 errno = EISDIR;
4214 ::close(fd);
4215 return -1;
4216 }
4217 } else {
4218 ::close(fd);
4219 return -1;
4220 }
4221 }
4222
4223 // All file descriptors that are opened in the JVM and not
4224 // specifically destined for a subprocess should have the
4225 // close-on-exec flag set. If we don't set it, then careless 3rd
4226 // party native code might fork and exec without closing all
4227 // appropriate file descriptors (e.g. as we do in closeDescriptors in
4228 // UNIXProcess.c), and this in turn might:
4229 //
4230 // - cause end-of-file to fail to be detected on some file
4231 // descriptors, resulting in mysterious hangs, or
4232 //
4233 // - might cause an fopen in the subprocess to fail on a system
4234 // suffering from bug 1085341.
4235 //
4236 // (Yes, the default setting of the close-on-exec flag is a Unix
4237 // design flaw.)
4238 //
4239 // See:
4240 // 1085341: 32-bit stdio routines should support file descriptors >255
4241 // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
4242 // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
4243 #ifdef FD_CLOEXEC
4244 {
4245 int flags = ::fcntl(fd, F_GETFD);
4246 if (flags != -1)
4247 ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
4248 }
4249 #endif
4250
4251 if (o_delete != 0) {
4252 ::unlink(path);
4253 }
4254 return fd;
4255 }
4256
4257
4258 // create binary file, rewriting existing file if required
4259 int os::create_binary_file(const char* path, bool rewrite_existing) {
4260 int oflags = O_WRONLY | O_CREAT;
4261 if (!rewrite_existing) {
4262 oflags |= O_EXCL;
4263 }
4264 return ::open64(path, oflags, S_IREAD | S_IWRITE);
4265 }
4266
4267 // return current position of file pointer
4268 jlong os::current_file_offset(int fd) {
4269 return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
4270 }
4271
4272 // move file pointer to the specified offset
4273 jlong os::seek_to_file_offset(int fd, jlong offset) {
4274 return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
4275 }
4276
4277 // This code originates from JDK's sysAvailable
4278 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
4279
4280 int os::available(int fd, jlong *bytes) {
4281 jlong cur, end;
4282 int mode;
4283 struct stat64 buf64;
4284
4285 if (::fstat64(fd, &buf64) >= 0) {
4286 mode = buf64.st_mode;
4287 if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
4288 // XXX: is the following call interruptible? If so, this might
4289 // need to go through the INTERRUPT_IO() wrapper as for other
4290 // blocking, interruptible calls in this file.
4291 int n;
4292 if (::ioctl(fd, FIONREAD, &n) >= 0) {
4293 *bytes = n;
4294 return 1;
4295 }
4296 }
4297 }
4298 if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
4299 return 0;
4300 } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
4301 return 0;
4302 } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
4303 return 0;
4304 }
4305 *bytes = end - cur;
4306 return 1;
4307 }
4308
4309 int os::socket_available(int fd, jint *pbytes) {
4310 // Linux doc says EINTR not returned, unlike Solaris
4311 int ret = ::ioctl(fd, FIONREAD, pbytes);
4312
4313 //%% note ioctl can return 0 when successful, JVM_SocketAvailable
4314 // is expected to return 0 on failure and 1 on success to the jdk.
4315 return (ret < 0) ? 0 : 1;
4316 }
4317
4318 // Map a block of memory.
4319 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
4320 char *addr, size_t bytes, bool read_only,
4321 bool allow_exec) {
4322 Unimplemented();
4323 return NULL;
4324 }
4325
4326
4327 // Remap a block of memory.
4328 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
4329 char *addr, size_t bytes, bool read_only,
4330 bool allow_exec) {
4331 // same as map_memory() on this OS
4332 return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
4333 allow_exec);
4334 }
4335
4336 // Unmap a block of memory.
4337 bool os::pd_unmap_memory(char* addr, size_t bytes) {
4338 return munmap(addr, bytes) == 0;
4339 }
4340
4341 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
4342 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
4343 // of a thread.
4344 //
4345 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
4346 // the fast estimate available on the platform.
4347
4348 jlong os::current_thread_cpu_time() {
4349 // return user + sys since the cost is the same
4350 const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
4351 assert(n >= 0, "negative CPU time");
4352 return n;
4353 }
4354
4355 jlong os::thread_cpu_time(Thread* thread) {
4356 // consistent with what current_thread_cpu_time() returns
4357 const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
4358 assert(n >= 0, "negative CPU time");
4359 return n;
4360 }
4361
4362 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
4363 const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
4364 assert(n >= 0, "negative CPU time");
4365 return n;
4366 }
4367
4368 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
4369 bool error = false;
4370
4371 jlong sys_time = 0;
4372 jlong user_time = 0;
4373
4374 // reimplemented using getthrds64().
4375 //
4376 // goes like this:
4377 // For the thread in question, get the kernel thread id. Then get the
4378 // kernel thread statistics using that id.
4379 //
4380 // This only works of course when no pthread scheduling is used,
4381 // ie there is a 1:1 relationship to kernel threads.
4382 // On AIX, see AIXTHREAD_SCOPE variable.
4383
4384 pthread_t pthtid = thread->osthread()->pthread_id();
4385
4386 // retrieve kernel thread id for the pthread:
4387 tid64_t tid = 0;
4388 struct __pthrdsinfo pinfo;
4389 // I just love those otherworldly IBM APIs which force me to hand down
4390 // dummy buffers for stuff I dont care for...
4391 char dummy[1];
4392 int dummy_size = sizeof(dummy);
4393 if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
4394 dummy, &dummy_size) == 0) {
4395 tid = pinfo.__pi_tid;
4396 } else {
4397 tty->print_cr("pthread_getthrds_np failed.");
4398 error = true;
4399 }
4400
4401 // retrieve kernel timing info for that kernel thread
4402 if (!error) {
4403 struct thrdentry64 thrdentry;
4404 if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
4405 sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
4406 user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
4407 } else {
4408 tty->print_cr("pthread_getthrds_np failed.");
4409 error = true;
4410 }
4411 }
4412
4413 if (p_sys_time) {
4414 *p_sys_time = sys_time;
4415 }
4416
4417 if (p_user_time) {
4418 *p_user_time = user_time;
4419 }
4420
4421 if (error) {
4422 return false;
4423 }
4424
4425 return true;
4426 }
4427
4428 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
4429 jlong sys_time;
4430 jlong user_time;
4431
4432 if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
4433 return -1;
4434 }
4435
4436 return user_sys_cpu_time ? sys_time + user_time : user_time;
4437 }
4438
4439 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4440 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
4441 info_ptr->may_skip_backward = false; // elapsed time not wall time
4442 info_ptr->may_skip_forward = false; // elapsed time not wall time
4443 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4444 }
4445
4446 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
4447 info_ptr->max_value = ALL_64_BITS; // will not wrap in less than 64 bits
4448 info_ptr->may_skip_backward = false; // elapsed time not wall time
4449 info_ptr->may_skip_forward = false; // elapsed time not wall time
4450 info_ptr->kind = JVMTI_TIMER_TOTAL_CPU; // user+system time is returned
4451 }
4452
4453 bool os::is_thread_cpu_time_supported() {
4454 return true;
4455 }
4456
4457 // System loadavg support. Returns -1 if load average cannot be obtained.
4458 // For now just return the system wide load average (no processor sets).
4459 int os::loadavg(double values[], int nelem) {
4460
4461 // Implemented using libperfstat on AIX.
4462
4463 guarantee(nelem >= 0 && nelem <= 3, "argument error");
4464 guarantee(values, "argument error");
4465
4466 if (os::Aix::on_pase()) {
4467 Unimplemented();
4468 return -1;
4469 } else {
4470 // AIX: use libperfstat
4471 //
4472 // See also:
4473 // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
4474 // /usr/include/libperfstat.h:
4475
4476 // Use the already AIX version independent get_cpuinfo.
4477 os::Aix::cpuinfo_t ci;
4478 if (os::Aix::get_cpuinfo(&ci)) {
4479 for (int i = 0; i < nelem; i++) {
4480 values[i] = ci.loadavg[i];
4481 }
4482 } else {
4483 return -1;
4484 }
4485 return nelem;
4486 }
4487 }
4488
4489 void os::pause() {
4490 char filename[MAX_PATH];
4491 if (PauseAtStartupFile && PauseAtStartupFile[0]) {
4492 jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
4493 } else {
4494 jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
4495 }
4496
4497 int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
4498 if (fd != -1) {
4499 struct stat buf;
4500 ::close(fd);
4501 while (::stat(filename, &buf) == 0) {
4502 (void)::poll(NULL, 0, 100);
4503 }
4504 } else {
4505 jio_fprintf(stderr,
4506 "Could not open pause file '%s', continuing immediately.\n", filename);
4507 }
4508 }
4509
4510 bool os::Aix::is_primordial_thread() {
4511 if (pthread_self() == (pthread_t)1) {
4512 return true;
4513 } else {
4514 return false;
4515 }
4516 }
4517
4518 // OS recognitions (PASE/AIX, OS level) call this before calling any
4519 // one of Aix::on_pase(), Aix::os_version() static
4520 void os::Aix::initialize_os_info() {
4521
4522 assert(_on_pase == -1 && _os_version == -1, "already called.");
4523
4524 struct utsname uts;
4525 memset(&uts, 0, sizeof(uts));
4526 strcpy(uts.sysname, "?");
4527 if (::uname(&uts) == -1) {
4528 fprintf(stderr, "uname failed (%d)\n", errno);
4529 guarantee(0, "Could not determine whether we run on AIX or PASE");
4530 } else {
4531 if (Verbose) {
4532 fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
4533 "node \"%s\" machine \"%s\"\n",
4534 uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
4535 }
4536 const int major = atoi(uts.version);
4537 assert(major > 0, "invalid OS version");
4538 const int minor = atoi(uts.release);
4539 assert(minor > 0, "invalid OS release");
4540 _os_version = (major << 8) | minor;
4541 if (strcmp(uts.sysname, "OS400") == 0) {
4542 Unimplemented();
4543 } else if (strcmp(uts.sysname, "AIX") == 0) {
4544 // We run on AIX. We do not support versions older than AIX 5.3.
4545 _on_pase = 0;
4546 if (_os_version < 0x0503) {
4547 fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
4548 assert(false, "AIX release too old.");
4549 } else {
4550 if (Verbose) {
4551 fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
4552 }
4553 }
4554 } else {
4555 assert(false, "unknown OS");
4556 }
4557 }
4558
4559 guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
4560
4561 } // end: os::Aix::initialize_os_info()
4562
4563 // Scan environment for important settings which might effect the VM.
4564 // Trace out settings. Warn about invalid settings and/or correct them.
4565 //
4566 // Must run after os::Aix::initialue_os_info().
4567 void os::Aix::scan_environment() {
4568
4569 char* p;
4570 int rc;
4571
4572 // Warn explicity if EXTSHM=ON is used. That switch changes how
4573 // System V shared memory behaves. One effect is that page size of
4574 // shared memory cannot be change dynamically, effectivly preventing
4575 // large pages from working.
4576 // This switch was needed on AIX 32bit, but on AIX 64bit the general
4577 // recommendation is (in OSS notes) to switch it off.
4578 p = ::getenv("EXTSHM");
4579 if (Verbose) {
4580 fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
4581 }
4582 if (p && strcmp(p, "ON") == 0) {
4583 fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
4584 _extshm = 1;
4585 } else {
4586 _extshm = 0;
4587 }
4588
4589 // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
4590 // Not tested, not supported.
4591 //
4592 // Note that it might be worth the trouble to test and to require it, if only to
4593 // get useful return codes for mprotect.
4594 //
4595 // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
4596 // exec() ? before loading the libjvm ? ....)
4597 p = ::getenv("XPG_SUS_ENV");
4598 if (Verbose) {
4599 fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
4600 }
4601 if (p && strcmp(p, "ON") == 0) {
4602 _xpg_sus_mode = 1;
4603 fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
4604 // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
4605 // clobber address ranges. If we ever want to support that, we have to do some
4606 // testing first.
4607 guarantee(false, "XPG_SUS_ENV=ON not supported");
4608 } else {
4609 _xpg_sus_mode = 0;
4610 }
4611
4612 // Switch off AIX internal (pthread) guard pages. This has
4613 // immediate effect for any pthread_create calls which follow.
4614 p = ::getenv("AIXTHREAD_GUARDPAGES");
4615 if (Verbose) {
4616 fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
4617 fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
4618 }
4619 rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
4620 guarantee(rc == 0, "");
4621
4622 } // end: os::Aix::scan_environment()
4623
4624 // PASE: initialize the libo4 library (AS400 PASE porting library).
4625 void os::Aix::initialize_libo4() {
4626 Unimplemented();
4627 }
4628
4629 // AIX: initialize the libperfstat library (we load this dynamically
4630 // because it is only available on AIX.
4631 void os::Aix::initialize_libperfstat() {
4632
4633 assert(os::Aix::on_aix(), "AIX only");
4634
4635 if (!libperfstat::init()) {
4636 fprintf(stderr, "libperfstat initialization failed.\n");
4637 assert(false, "libperfstat initialization failed");
4638 } else {
4639 if (Verbose) {
4640 fprintf(stderr, "libperfstat initialized.\n");
4641 }
4642 }
4643 } // end: os::Aix::initialize_libperfstat
4644
4645 /////////////////////////////////////////////////////////////////////////////
4646 // thread stack
4647
4648 // function to query the current stack size using pthread_getthrds_np
4649 //
4650 // ! do not change anything here unless you know what you are doing !
4651 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
4652
4653 // This only works when invoked on a pthread. As we agreed not to use
4654 // primordial threads anyway, I assert here
4655 guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
4656
4657 // information about this api can be found (a) in the pthread.h header and
4658 // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
4659 //
4660 // The use of this API to find out the current stack is kind of undefined.
4661 // But after a lot of tries and asking IBM about it, I concluded that it is safe
4662 // enough for cases where I let the pthread library create its stacks. For cases
4663 // where I create an own stack and pass this to pthread_create, it seems not to
4664 // work (the returned stack size in that case is 0).
4665
4666 pthread_t tid = pthread_self();
4667 struct __pthrdsinfo pinfo;
4668 char dummy[1]; // we only need this to satisfy the api and to not get E
4669 int dummy_size = sizeof(dummy);
4670
4671 memset(&pinfo, 0, sizeof(pinfo));
4672
4673 const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
4674 sizeof(pinfo), dummy, &dummy_size);
4675
4676 if (rc != 0) {
4677 fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
4678 guarantee(0, "pthread_getthrds_np failed");
4679 }
4680
4681 guarantee(pinfo.__pi_stackend, "returned stack base invalid");
4682
4683 // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
4684 // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
4685 // Not sure what to do here - I feel inclined to forbid this use case completely.
4686 guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
4687
4688 // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
4689 if (p_stack_base) {
4690 (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
4691 }
4692
4693 if (p_stack_size) {
4694 (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
4695 }
4696
4697 #ifndef PRODUCT
4698 if (Verbose) {
4699 fprintf(stderr,
4700 "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
4701 ", real stack_size=" INTPTR_FORMAT
4702 ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
4703 (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
4704 (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
4705 pinfo.__pi_stacksize - os::Aix::stack_page_size());
4706 }
4707 #endif
4708
4709 } // end query_stack_dimensions
4710
4711 // get the current stack base from the OS (actually, the pthread library)
4712 address os::current_stack_base() {
4713 address p;
4714 query_stack_dimensions(&p, 0);
4715 return p;
4716 }
4717
4718 // get the current stack size from the OS (actually, the pthread library)
4719 size_t os::current_stack_size() {
4720 size_t s;
4721 query_stack_dimensions(0, &s);
4722 return s;
4723 }
4724
4725 // Refer to the comments in os_solaris.cpp park-unpark.
4726 //
4727 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
4728 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
4729 // For specifics regarding the bug see GLIBC BUGID 261237 :
4730 // http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
4731 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
4732 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
4733 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
4734 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
4735 // and monitorenter when we're using 1-0 locking. All those operations may result in
4736 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
4737 // of libpthread avoids the problem, but isn't practical.
4738 //
4739 // Possible remedies:
4740 //
4741 // 1. Establish a minimum relative wait time. 50 to 100 msecs seems to work.
4742 // This is palliative and probabilistic, however. If the thread is preempted
4743 // between the call to compute_abstime() and pthread_cond_timedwait(), more
4744 // than the minimum period may have passed, and the abstime may be stale (in the
4745 // past) resultin in a hang. Using this technique reduces the odds of a hang
4746 // but the JVM is still vulnerable, particularly on heavily loaded systems.
4747 //
4748 // 2. Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
4749 // of the usual flag-condvar-mutex idiom. The write side of the pipe is set
4750 // NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
4751 // reduces to poll()+read(). This works well, but consumes 2 FDs per extant
4752 // thread.
4753 //
4754 // 3. Embargo pthread_cond_timedwait() and implement a native "chron" thread
4755 // that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
4756 // a timeout request to the chron thread and then blocking via pthread_cond_wait().
4757 // This also works well. In fact it avoids kernel-level scalability impediments
4758 // on certain platforms that don't handle lots of active pthread_cond_timedwait()
4759 // timers in a graceful fashion.
4760 //
4761 // 4. When the abstime value is in the past it appears that control returns
4762 // correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
4763 // Subsequent timedwait/wait calls may hang indefinitely. Given that, we
4764 // can avoid the problem by reinitializing the condvar -- by cond_destroy()
4765 // followed by cond_init() -- after all calls to pthread_cond_timedwait().
4766 // It may be possible to avoid reinitialization by checking the return
4767 // value from pthread_cond_timedwait(). In addition to reinitializing the
4768 // condvar we must establish the invariant that cond_signal() is only called
4769 // within critical sections protected by the adjunct mutex. This prevents
4770 // cond_signal() from "seeing" a condvar that's in the midst of being
4771 // reinitialized or that is corrupt. Sadly, this invariant obviates the
4772 // desirable signal-after-unlock optimization that avoids futile context switching.
4773 //
4774 // I'm also concerned that some versions of NTPL might allocate an auxilliary
4775 // structure when a condvar is used or initialized. cond_destroy() would
4776 // release the helper structure. Our reinitialize-after-timedwait fix
4777 // put excessive stress on malloc/free and locks protecting the c-heap.
4778 //
4779 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
4780 // It may be possible to refine (4) by checking the kernel and NTPL verisons
4781 // and only enabling the work-around for vulnerable environments.
4782
4783 // utility to compute the abstime argument to timedwait:
4784 // millis is the relative timeout time
4785 // abstime will be the absolute timeout time
4786 // TODO: replace compute_abstime() with unpackTime()
4787
4788 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
4789 if (millis < 0) millis = 0;
4790 struct timeval now;
4791 int status = gettimeofday(&now, NULL);
4792 assert(status == 0, "gettimeofday");
4793 jlong seconds = millis / 1000;
4794 millis %= 1000;
4795 if (seconds > 50000000) { // see man cond_timedwait(3T)
4796 seconds = 50000000;
4797 }
4798 abstime->tv_sec = now.tv_sec + seconds;
4799 long usec = now.tv_usec + millis * 1000;
4800 if (usec >= 1000000) {
4801 abstime->tv_sec += 1;
4802 usec -= 1000000;
4803 }
4804 abstime->tv_nsec = usec * 1000;
4805 return abstime;
4806 }
4807
4808
4809 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
4810 // Conceptually TryPark() should be equivalent to park(0).
4811
4812 int os::PlatformEvent::TryPark() {
4813 for (;;) {
4814 const int v = _Event;
4815 guarantee ((v == 0) || (v == 1), "invariant");
4816 if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
4817 }
4818 }
4819
4820 void os::PlatformEvent::park() { // AKA "down()"
4821 // Invariant: Only the thread associated with the Event/PlatformEvent
4822 // may call park().
4823 // TODO: assert that _Assoc != NULL or _Assoc == Self
4824 int v;
4825 for (;;) {
4826 v = _Event;
4827 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4828 }
4829 guarantee (v >= 0, "invariant");
4830 if (v == 0) {
4831 // Do this the hard way by blocking ...
4832 int status = pthread_mutex_lock(_mutex);
4833 assert_status(status == 0, status, "mutex_lock");
4834 guarantee (_nParked == 0, "invariant");
4835 ++ _nParked;
4836 while (_Event < 0) {
4837 status = pthread_cond_wait(_cond, _mutex);
4838 assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
4839 }
4840 -- _nParked;
4841
4842 // In theory we could move the ST of 0 into _Event past the unlock(),
4843 // but then we'd need a MEMBAR after the ST.
4844 _Event = 0;
4845 status = pthread_mutex_unlock(_mutex);
4846 assert_status(status == 0, status, "mutex_unlock");
4847 }
4848 guarantee (_Event >= 0, "invariant");
4849 }
4850
4851 int os::PlatformEvent::park(jlong millis) {
4852 guarantee (_nParked == 0, "invariant");
4853
4854 int v;
4855 for (;;) {
4856 v = _Event;
4857 if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
4858 }
4859 guarantee (v >= 0, "invariant");
4860 if (v != 0) return OS_OK;
4861
4862 // We do this the hard way, by blocking the thread.
4863 // Consider enforcing a minimum timeout value.
4864 struct timespec abst;
4865 compute_abstime(&abst, millis);
4866
4867 int ret = OS_TIMEOUT;
4868 int status = pthread_mutex_lock(_mutex);
4869 assert_status(status == 0, status, "mutex_lock");
4870 guarantee (_nParked == 0, "invariant");
4871 ++_nParked;
4872
4873 // Object.wait(timo) will return because of
4874 // (a) notification
4875 // (b) timeout
4876 // (c) thread.interrupt
4877 //
4878 // Thread.interrupt and object.notify{All} both call Event::set.
4879 // That is, we treat thread.interrupt as a special case of notification.
4880 // The underlying Solaris implementation, cond_timedwait, admits
4881 // spurious/premature wakeups, but the JLS/JVM spec prevents the
4882 // JVM from making those visible to Java code. As such, we must
4883 // filter out spurious wakeups. We assume all ETIME returns are valid.
4884 //
4885 // TODO: properly differentiate simultaneous notify+interrupt.
4886 // In that case, we should propagate the notify to another waiter.
4887
4888 while (_Event < 0) {
4889 status = pthread_cond_timedwait(_cond, _mutex, &abst);
4890 assert_status(status == 0 || status == ETIMEDOUT,
4891 status, "cond_timedwait");
4892 if (!FilterSpuriousWakeups) break; // previous semantics
4893 if (status == ETIMEDOUT) break;
4894 // We consume and ignore EINTR and spurious wakeups.
4895 }
4896 --_nParked;
4897 if (_Event >= 0) {
4898 ret = OS_OK;
4899 }
4900 _Event = 0;
4901 status = pthread_mutex_unlock(_mutex);
4902 assert_status(status == 0, status, "mutex_unlock");
4903 assert (_nParked == 0, "invariant");
4904 return ret;
4905 }
4906
4907 void os::PlatformEvent::unpark() {
4908 int v, AnyWaiters;
4909 for (;;) {
4910 v = _Event;
4911 if (v > 0) {
4912 // The LD of _Event could have reordered or be satisfied
4913 // by a read-aside from this processor's write buffer.
4914 // To avoid problems execute a barrier and then
4915 // ratify the value.
4916 OrderAccess::fence();
4917 if (_Event == v) return;
4918 continue;
4919 }
4920 if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
4921 }
4922 if (v < 0) {
4923 // Wait for the thread associated with the event to vacate
4924 int status = pthread_mutex_lock(_mutex);
4925 assert_status(status == 0, status, "mutex_lock");
4926 AnyWaiters = _nParked;
4927
4928 if (AnyWaiters != 0) {
4929 // We intentional signal *after* dropping the lock
4930 // to avoid a common class of futile wakeups.
4931 status = pthread_cond_signal(_cond);
4932 assert_status(status == 0, status, "cond_signal");
4933 }
4934 // Mutex should be locked for pthread_cond_signal(_cond).
4935 status = pthread_mutex_unlock(_mutex);
4936 assert_status(status == 0, status, "mutex_unlock");
4937 }
4938
4939 // Note that we signal() _after dropping the lock for "immortal" Events.
4940 // This is safe and avoids a common class of futile wakeups. In rare
4941 // circumstances this can cause a thread to return prematurely from
4942 // cond_{timed}wait() but the spurious wakeup is benign and the victim will
4943 // simply re-test the condition and re-park itself.
4944 }
4945
4946
4947 // JSR166
4948 // -------------------------------------------------------
4949
4950 //
4951 // The solaris and linux implementations of park/unpark are fairly
4952 // conservative for now, but can be improved. They currently use a
4953 // mutex/condvar pair, plus a a count.
4954 // Park decrements count if > 0, else does a condvar wait. Unpark
4955 // sets count to 1 and signals condvar. Only one thread ever waits
4956 // on the condvar. Contention seen when trying to park implies that someone
4957 // is unparking you, so don't wait. And spurious returns are fine, so there
4958 // is no need to track notifications.
4959 //
4960
4961 #define MAX_SECS 100000000
4962 //
4963 // This code is common to linux and solaris and will be moved to a
4964 // common place in dolphin.
4965 //
4966 // The passed in time value is either a relative time in nanoseconds
4967 // or an absolute time in milliseconds. Either way it has to be unpacked
4968 // into suitable seconds and nanoseconds components and stored in the
4969 // given timespec structure.
4970 // Given time is a 64-bit value and the time_t used in the timespec is only
4971 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
4972 // overflow if times way in the future are given. Further on Solaris versions
4973 // prior to 10 there is a restriction (see cond_timedwait) that the specified
4974 // number of seconds, in abstime, is less than current_time + 100,000,000.
4975 // As it will be 28 years before "now + 100000000" will overflow we can
4976 // ignore overflow and just impose a hard-limit on seconds using the value
4977 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
4978 // years from "now".
4979 //
4980
4981 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
4982 assert (time > 0, "convertTime");
4983
4984 struct timeval now;
4985 int status = gettimeofday(&now, NULL);
4986 assert(status == 0, "gettimeofday");
4987
4988 time_t max_secs = now.tv_sec + MAX_SECS;
4989
4990 if (isAbsolute) {
4991 jlong secs = time / 1000;
4992 if (secs > max_secs) {
4993 absTime->tv_sec = max_secs;
4994 }
4995 else {
4996 absTime->tv_sec = secs;
4997 }
4998 absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
4999 }
5000 else {
5001 jlong secs = time / NANOSECS_PER_SEC;
5002 if (secs >= MAX_SECS) {
5003 absTime->tv_sec = max_secs;
5004 absTime->tv_nsec = 0;
5005 }
5006 else {
5007 absTime->tv_sec = now.tv_sec + secs;
5008 absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
5009 if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
5010 absTime->tv_nsec -= NANOSECS_PER_SEC;
5011 ++absTime->tv_sec; // note: this must be <= max_secs
5012 }
5013 }
5014 }
5015 assert(absTime->tv_sec >= 0, "tv_sec < 0");
5016 assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
5017 assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
5018 assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
5019 }
5020
5021 void Parker::park(bool isAbsolute, jlong time) {
5022 // Optional fast-path check:
5023 // Return immediately if a permit is available.
5024 if (_counter > 0) {
5025 _counter = 0;
5026 OrderAccess::fence();
5027 return;
5028 }
5029
5030 Thread* thread = Thread::current();
5031 assert(thread->is_Java_thread(), "Must be JavaThread");
5032 JavaThread *jt = (JavaThread *)thread;
5033
5034 // Optional optimization -- avoid state transitions if there's an interrupt pending.
5035 // Check interrupt before trying to wait
5036 if (Thread::is_interrupted(thread, false)) {
5037 return;
5038 }
5039
5040 // Next, demultiplex/decode time arguments
5041 timespec absTime;
5042 if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
5043 return;
5044 }
5045 if (time > 0) {
5046 unpackTime(&absTime, isAbsolute, time);
5047 }
5048
5049
5050 // Enter safepoint region
5051 // Beware of deadlocks such as 6317397.
5052 // The per-thread Parker:: mutex is a classic leaf-lock.
5053 // In particular a thread must never block on the Threads_lock while
5054 // holding the Parker:: mutex. If safepoints are pending both the
5055 // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
5056 ThreadBlockInVM tbivm(jt);
5057
5058 // Don't wait if cannot get lock since interference arises from
5059 // unblocking. Also. check interrupt before trying wait
5060 if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
5061 return;
5062 }
5063
5064 int status;
5065 if (_counter > 0) { // no wait needed
5066 _counter = 0;
5067 status = pthread_mutex_unlock(_mutex);
5068 assert (status == 0, "invariant");
5069 OrderAccess::fence();
5070 return;
5071 }
5072
5073 #ifdef ASSERT
5074 // Don't catch signals while blocked; let the running threads have the signals.
5075 // (This allows a debugger to break into the running thread.)
5076 sigset_t oldsigs;
5077 sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
5078 pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
5079 #endif
5080
5081 OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
5082 jt->set_suspend_equivalent();
5083 // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
5084
5085 if (time == 0) {
5086 status = pthread_cond_wait (_cond, _mutex);
5087 } else {
5088 status = pthread_cond_timedwait (_cond, _mutex, &absTime);
5089 if (status != 0 && WorkAroundNPTLTimedWaitHang) {
5090 pthread_cond_destroy (_cond);
5091 pthread_cond_init (_cond, NULL);
5092 }
5093 }
5094 assert_status(status == 0 || status == EINTR ||
5095 status == ETIME || status == ETIMEDOUT,
5096 status, "cond_timedwait");
5097
5098 #ifdef ASSERT
5099 pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
5100 #endif
5101
5102 _counter = 0;
5103 status = pthread_mutex_unlock(_mutex);
5104 assert_status(status == 0, status, "invariant");
5105 // If externally suspended while waiting, re-suspend
5106 if (jt->handle_special_suspend_equivalent_condition()) {
5107 jt->java_suspend_self();
5108 }
5109
5110 OrderAccess::fence();
5111 }
5112
5113 void Parker::unpark() {
5114 int s, status;
5115 status = pthread_mutex_lock(_mutex);
5116 assert (status == 0, "invariant");
5117 s = _counter;
5118 _counter = 1;
5119 if (s < 1) {
5120 if (WorkAroundNPTLTimedWaitHang) {
5121 status = pthread_cond_signal (_cond);
5122 assert (status == 0, "invariant");
5123 status = pthread_mutex_unlock(_mutex);
5124 assert (status == 0, "invariant");
5125 } else {
5126 status = pthread_mutex_unlock(_mutex);
5127 assert (status == 0, "invariant");
5128 status = pthread_cond_signal (_cond);
5129 assert (status == 0, "invariant");
5130 }
5131 } else {
5132 pthread_mutex_unlock(_mutex);
5133 assert (status == 0, "invariant");
5134 }
5135 }
5136
5137
5138 extern char** environ;
5139
5140 // Run the specified command in a separate process. Return its exit value,
5141 // or -1 on failure (e.g. can't fork a new process).
5142 // Unlike system(), this function can be called from signal handler. It
5143 // doesn't block SIGINT et al.
5144 int os::fork_and_exec(char* cmd) {
5145 char * argv[4] = {"sh", "-c", cmd, NULL};
5146
5147 pid_t pid = fork();
5148
5149 if (pid < 0) {
5150 // fork failed
5151 return -1;
5152
5153 } else if (pid == 0) {
5154 // child process
5155
5156 // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
5157 execve("/usr/bin/sh", argv, environ);
5158
5159 // execve failed
5160 _exit(-1);
5161
5162 } else {
5163 // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
5164 // care about the actual exit code, for now.
5165
5166 int status;
5167
5168 // Wait for the child process to exit. This returns immediately if
5169 // the child has already exited. */
5170 while (waitpid(pid, &status, 0) < 0) {
5171 switch (errno) {
5172 case ECHILD: return 0;
5173 case EINTR: break;
5174 default: return -1;
5175 }
5176 }
5177
5178 if (WIFEXITED(status)) {
5179 // The child exited normally; get its exit code.
5180 return WEXITSTATUS(status);
5181 } else if (WIFSIGNALED(status)) {
5182 // The child exited because of a signal
5183 // The best value to return is 0x80 + signal number,
5184 // because that is what all Unix shells do, and because
5185 // it allows callers to distinguish between process exit and
5186 // process death by signal.
5187 return 0x80 + WTERMSIG(status);
5188 } else {
5189 // Unknown exit code; pass it through
5190 return status;
5191 }
5192 }
5193 // Remove warning.
5194 return -1;
5195 }
5196
5197 // is_headless_jre()
5198 //
5199 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
5200 // in order to report if we are running in a headless jre.
5201 //
5202 // Since JDK8 xawt/libmawt.so is moved into the same directory
5203 // as libawt.so, and renamed libawt_xawt.so
5204 bool os::is_headless_jre() {
5205 struct stat statbuf;
5206 char buf[MAXPATHLEN];
5207 char libmawtpath[MAXPATHLEN];
5208 const char *xawtstr = "/xawt/libmawt.so";
5209 const char *new_xawtstr = "/libawt_xawt.so";
5210
5211 char *p;
5212
5213 // Get path to libjvm.so
5214 os::jvm_path(buf, sizeof(buf));
5215
5216 // Get rid of libjvm.so
5217 p = strrchr(buf, '/');
5218 if (p == NULL) return false;
5219 else *p = '\0';
5220
5221 // Get rid of client or server
5222 p = strrchr(buf, '/');
5223 if (p == NULL) return false;
5224 else *p = '\0';
5225
5226 // check xawt/libmawt.so
5227 strcpy(libmawtpath, buf);
5228 strcat(libmawtpath, xawtstr);
5229 if (::stat(libmawtpath, &statbuf) == 0) return false;
5230
5231 // check libawt_xawt.so
5232 strcpy(libmawtpath, buf);
5233 strcat(libmawtpath, new_xawtstr);
5234 if (::stat(libmawtpath, &statbuf) == 0) return false;
5235
5236 return true;
5237 }
5238
5239 // Get the default path to the core file
5240 // Returns the length of the string
5241 int os::get_core_path(char* buffer, size_t bufferSize) {
5242 const char* p = get_current_directory(buffer, bufferSize);
5243
5244 if (p == NULL) {
5245 assert(p != NULL, "failed to get current directory");
5246 return 0;
5247 }
5248
5249 return strlen(buffer);
5250 }
5251
5252 #ifndef PRODUCT
5253 void TestReserveMemorySpecial_test() {
5254 // No tests available for this platform
5255 }
5256 #endif

mercurial