src/os/aix/vm/os_aix.cpp

Fri, 06 Sep 2013 20:16:09 +0200

author
simonis
date
Fri, 06 Sep 2013 20:16:09 +0200
changeset 6465
666e6ce3976c
child 6471
3068270ba476
permissions
-rw-r--r--

8023038: PPC64 (part 15): Platform files for AIX/PPC64 support
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2012, 2013 SAP AG. All rights reserved.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 // According to the AIX OS doc #pragma alloca must be used
    27 // with C++ compiler before referencing the function alloca()
    28 #pragma alloca
    30 // no precompiled headers
    31 #include "classfile/classLoader.hpp"
    32 #include "classfile/systemDictionary.hpp"
    33 #include "classfile/vmSymbols.hpp"
    34 #include "code/icBuffer.hpp"
    35 #include "code/vtableStubs.hpp"
    36 #include "compiler/compileBroker.hpp"
    37 #include "interpreter/interpreter.hpp"
    38 #include "jvm_aix.h"
    39 #include "libperfstat_aix.hpp"
    40 #include "loadlib_aix.hpp"
    41 #include "memory/allocation.inline.hpp"
    42 #include "memory/filemap.hpp"
    43 #include "mutex_aix.inline.hpp"
    44 #include "oops/oop.inline.hpp"
    45 #include "os_share_aix.hpp"
    46 #include "porting_aix.hpp"
    47 #include "prims/jniFastGetField.hpp"
    48 #include "prims/jvm.h"
    49 #include "prims/jvm_misc.hpp"
    50 #include "runtime/arguments.hpp"
    51 #include "runtime/extendedPC.hpp"
    52 #include "runtime/globals.hpp"
    53 #include "runtime/interfaceSupport.hpp"
    54 #include "runtime/java.hpp"
    55 #include "runtime/javaCalls.hpp"
    56 #include "runtime/mutexLocker.hpp"
    57 #include "runtime/objectMonitor.hpp"
    58 #include "runtime/osThread.hpp"
    59 #include "runtime/perfMemory.hpp"
    60 #include "runtime/sharedRuntime.hpp"
    61 #include "runtime/statSampler.hpp"
    62 #include "runtime/stubRoutines.hpp"
    63 #include "runtime/threadCritical.hpp"
    64 #include "runtime/timer.hpp"
    65 #include "services/attachListener.hpp"
    66 #include "services/runtimeService.hpp"
    67 #include "thread_aix.inline.hpp"
    68 #include "utilities/decoder.hpp"
    69 #include "utilities/defaultStream.hpp"
    70 #include "utilities/events.hpp"
    71 #include "utilities/growableArray.hpp"
    72 #include "utilities/vmError.hpp"
    73 #ifdef TARGET_ARCH_ppc
    74 # include "assembler_ppc.inline.hpp"
    75 # include "nativeInst_ppc.hpp"
    76 #endif
    77 #ifdef COMPILER1
    78 #include "c1/c1_Runtime1.hpp"
    79 #endif
    80 #ifdef COMPILER2
    81 #include "opto/runtime.hpp"
    82 #endif
    84 // put OS-includes here (sorted alphabetically)
    85 #include <errno.h>
    86 #include <fcntl.h>
    87 #include <inttypes.h>
    88 #include <poll.h>
    89 #include <procinfo.h>
    90 #include <pthread.h>
    91 #include <pwd.h>
    92 #include <semaphore.h>
    93 #include <signal.h>
    94 #include <stdint.h>
    95 #include <stdio.h>
    96 #include <string.h>
    97 #include <unistd.h>
    98 #include <sys/ioctl.h>
    99 #include <sys/ipc.h>
   100 #include <sys/mman.h>
   101 #include <sys/resource.h>
   102 #include <sys/select.h>
   103 #include <sys/shm.h>
   104 #include <sys/socket.h>
   105 #include <sys/stat.h>
   106 #include <sys/sysinfo.h>
   107 #include <sys/systemcfg.h>
   108 #include <sys/time.h>
   109 #include <sys/times.h>
   110 #include <sys/types.h>
   111 #include <sys/utsname.h>
   112 #include <sys/vminfo.h>
   113 #include <sys/wait.h>
   115 // Add missing declarations (should be in procinfo.h but isn't until AIX 6.1).
   116 #if !defined(_AIXVERSION_610)
   117 extern "C" {
   118   int getthrds64(pid_t ProcessIdentifier,
   119                  struct thrdentry64* ThreadBuffer,
   120                  int ThreadSize,
   121                  tid64_t* IndexPointer,
   122                  int Count);
   123 }
   124 #endif
   126 // Excerpts from systemcfg.h definitions newer than AIX 5.3
   127 #ifndef PV_7
   128 # define PV_7 0x200000          // Power PC 7
   129 # define PV_7_Compat 0x208000   // Power PC 7
   130 #endif
   132 #define MAX_PATH (2 * K)
   134 // for timer info max values which include all bits
   135 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
   136 // for multipage initialization error analysis (in 'g_multipage_error')
   137 #define ERROR_MP_OS_TOO_OLD                          100
   138 #define ERROR_MP_EXTSHM_ACTIVE                       101
   139 #define ERROR_MP_VMGETINFO_FAILED                    102
   140 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
   142 // the semantics in this file are thus that codeptr_t is a *real code ptr*
   143 // This means that any function taking codeptr_t as arguments will assume
   144 // a real codeptr and won't handle function descriptors (eg getFuncName),
   145 // whereas functions taking address as args will deal with function
   146 // descriptors (eg os::dll_address_to_library_name)
   147 typedef unsigned int* codeptr_t;
   149 // typedefs for stackslots, stack pointers, pointers to op codes
   150 typedef unsigned long stackslot_t;
   151 typedef stackslot_t* stackptr_t;
   153 // query dimensions of the stack of the calling thread
   154 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
   156 // function to check a given stack pointer against given stack limits
   157 inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
   158   if (((uintptr_t)sp) & 0x7) {
   159     return false;
   160   }
   161   if (sp > stack_base) {
   162     return false;
   163   }
   164   if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
   165     return false;
   166   }
   167   return true;
   168 }
   170 // returns true if function is a valid codepointer
   171 inline bool is_valid_codepointer(codeptr_t p) {
   172   if (!p) {
   173     return false;
   174   }
   175   if (((uintptr_t)p) & 0x3) {
   176     return false;
   177   }
   178   if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
   179     return false;
   180   }
   181   return true;
   182 }
   184 // macro to check a given stack pointer against given stack limits and to die if test fails
   185 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
   186     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
   187 }
   189 // macro to check the current stack pointer against given stacklimits
   190 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
   191   address sp; \
   192   sp = os::current_stack_pointer(); \
   193   CHECK_STACK_PTR(sp, stack_base, stack_size); \
   194 }
   196 ////////////////////////////////////////////////////////////////////////////////
   197 // global variables (for a description see os_aix.hpp)
   199 julong    os::Aix::_physical_memory = 0;
   200 pthread_t os::Aix::_main_thread = ((pthread_t)0);
   201 int       os::Aix::_page_size = -1;
   202 int       os::Aix::_on_pase = -1;
   203 int       os::Aix::_os_version = -1;
   204 int       os::Aix::_stack_page_size = -1;
   205 size_t    os::Aix::_shm_default_page_size = -1;
   206 int       os::Aix::_can_use_64K_pages = -1;
   207 int       os::Aix::_can_use_16M_pages = -1;
   208 int       os::Aix::_xpg_sus_mode = -1;
   209 int       os::Aix::_extshm = -1;
   210 int       os::Aix::_logical_cpus = -1;
   212 ////////////////////////////////////////////////////////////////////////////////
   213 // local variables
   215 static int      g_multipage_error  = -1;   // error analysis for multipage initialization
   216 static jlong    initial_time_count = 0;
   217 static int      clock_tics_per_sec = 100;
   218 static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
   219 static bool     check_signals      = true;
   220 static pid_t    _initial_pid       = 0;
   221 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
   222 static sigset_t SR_sigset;
   223 static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
   225 julong os::available_memory() {
   226   return Aix::available_memory();
   227 }
   229 julong os::Aix::available_memory() {
   230   Unimplemented();
   231   return 0;
   232 }
   234 julong os::physical_memory() {
   235   return Aix::physical_memory();
   236 }
   238 ////////////////////////////////////////////////////////////////////////////////
   239 // environment support
   241 bool os::getenv(const char* name, char* buf, int len) {
   242   const char* val = ::getenv(name);
   243   if (val != NULL && strlen(val) < (size_t)len) {
   244     strcpy(buf, val);
   245     return true;
   246   }
   247   if (len > 0) buf[0] = 0;  // return a null string
   248   return false;
   249 }
   252 // Return true if user is running as root.
   254 bool os::have_special_privileges() {
   255   static bool init = false;
   256   static bool privileges = false;
   257   if (!init) {
   258     privileges = (getuid() != geteuid()) || (getgid() != getegid());
   259     init = true;
   260   }
   261   return privileges;
   262 }
   264 // Helper function, emulates disclaim64 using multiple 32bit disclaims
   265 // because we cannot use disclaim64() on AS/400 and old AIX releases.
   266 static bool my_disclaim64(char* addr, size_t size) {
   268   if (size == 0) {
   269     return true;
   270   }
   272   // Maximum size 32bit disclaim() accepts. (Theoretically 4GB, but I just do not trust that.)
   273   const unsigned int maxDisclaimSize = 0x80000000;
   275   const unsigned int numFullDisclaimsNeeded = (size / maxDisclaimSize);
   276   const unsigned int lastDisclaimSize = (size % maxDisclaimSize);
   278   char* p = addr;
   280   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
   281     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
   282       //if (Verbose)
   283       fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
   284       return false;
   285     }
   286     p += maxDisclaimSize;
   287   }
   289   if (lastDisclaimSize > 0) {
   290     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
   291       //if (Verbose)
   292         fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
   293       return false;
   294     }
   295   }
   297   return true;
   298 }
   300 // Cpu architecture string
   301 #if defined(PPC32)
   302 static char cpu_arch[] = "ppc";
   303 #elif defined(PPC64)
   304 static char cpu_arch[] = "ppc64";
   305 #else
   306 #error Add appropriate cpu_arch setting
   307 #endif
   310 // Given an address, returns the size of the page backing that address.
   311 size_t os::Aix::query_pagesize(void* addr) {
   313   vm_page_info pi;
   314   pi.addr = (uint64_t)addr;
   315   if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
   316     return pi.pagesize;
   317   } else {
   318     fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
   319     assert(false, "vmgetinfo failed to retrieve page size");
   320     return SIZE_4K;
   321   }
   323 }
   325 // Returns the kernel thread id of the currently running thread.
   326 pid_t os::Aix::gettid() {
   327   return (pid_t) thread_self();
   328 }
   330 void os::Aix::initialize_system_info() {
   332   // get the number of online(logical) cpus instead of configured
   333   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
   334   assert(_processor_count > 0, "_processor_count must be > 0");
   336   // retrieve total physical storage
   337   os::Aix::meminfo_t mi;
   338   if (!os::Aix::get_meminfo(&mi)) {
   339     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
   340     assert(false, "os::Aix::get_meminfo failed.");
   341   }
   342   _physical_memory = (julong) mi.real_total;
   343 }
   345 // Helper function for tracing page sizes.
   346 static const char* describe_pagesize(size_t pagesize) {
   347   switch (pagesize) {
   348     case SIZE_4K : return "4K";
   349     case SIZE_64K: return "64K";
   350     case SIZE_16M: return "16M";
   351     case SIZE_16G: return "16G";
   352     default:
   353       assert(false, "surprise");
   354       return "??";
   355   }
   356 }
   358 // Retrieve information about multipage size support. Will initialize
   359 // Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
   360 // Aix::_can_use_16M_pages.
   361 // Must be called before calling os::large_page_init().
   362 void os::Aix::query_multipage_support() {
   364   guarantee(_page_size == -1 &&
   365             _stack_page_size == -1 &&
   366             _can_use_64K_pages == -1 &&
   367             _can_use_16M_pages == -1 &&
   368             g_multipage_error == -1,
   369             "do not call twice");
   371   _page_size = ::sysconf(_SC_PAGESIZE);
   373   // This really would surprise me.
   374   assert(_page_size == SIZE_4K, "surprise!");
   377   // query default data page size (default page size for C-Heap, pthread stacks and .bss).
   378   // Default data page size is influenced either by linker options (-bdatapsize)
   379   // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
   380   // default should be 4K.
   381   size_t data_page_size = SIZE_4K;
   382   {
   383     void* p = ::malloc(SIZE_16M);
   384     data_page_size = os::Aix::query_pagesize(p);
   385     ::free(p);
   386   }
   388   // query default shm page size (LDR_CNTRL SHMPSIZE)
   389   {
   390     const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
   391     guarantee(shmid != -1, "shmget failed");
   392     void* p = ::shmat(shmid, NULL, 0);
   393     ::shmctl(shmid, IPC_RMID, NULL);
   394     guarantee(p != (void*) -1, "shmat failed");
   395     _shm_default_page_size = os::Aix::query_pagesize(p);
   396     ::shmdt(p);
   397   }
   399   // before querying the stack page size, make sure we are not running as primordial
   400   // thread (because primordial thread's stack may have different page size than
   401   // pthread thread stacks). Running a VM on the primordial thread won't work for a
   402   // number of reasons so we may just as well guarantee it here
   403   guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
   405   // query stack page size
   406   {
   407     int dummy = 0;
   408     _stack_page_size = os::Aix::query_pagesize(&dummy);
   409     // everything else would surprise me and should be looked into
   410     guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
   411     // also, just for completeness: pthread stacks are allocated from C heap, so
   412     // stack page size should be the same as data page size
   413     guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
   414   }
   416   // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
   417   // for system V shm.
   418   if (Aix::extshm()) {
   419     if (Verbose) {
   420       fprintf(stderr, "EXTSHM is active - will disable large page support.\n"
   421                       "Please make sure EXTSHM is OFF for large page support.\n");
   422     }
   423     g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
   424     _can_use_64K_pages = _can_use_16M_pages = 0;
   425     goto query_multipage_support_end;
   426   }
   428   // now check which page sizes the OS claims it supports, and of those, which actually can be used.
   429   {
   430     const int MAX_PAGE_SIZES = 4;
   431     psize_t sizes[MAX_PAGE_SIZES];
   432     const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
   433     if (num_psizes == -1) {
   434       if (Verbose) {
   435         fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
   436         fprintf(stderr, "disabling multipage support.\n");
   437       }
   438       g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
   439       _can_use_64K_pages = _can_use_16M_pages = 0;
   440       goto query_multipage_support_end;
   441     }
   442     guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
   443     assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
   444     if (Verbose) {
   445       fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
   446       for (int i = 0; i < num_psizes; i ++) {
   447         fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
   448       }
   449       fprintf(stderr, " .\n");
   450     }
   452     // Can we use 64K, 16M pages?
   453     _can_use_64K_pages = 0;
   454     _can_use_16M_pages = 0;
   455     for (int i = 0; i < num_psizes; i ++) {
   456       if (sizes[i] == SIZE_64K) {
   457         _can_use_64K_pages = 1;
   458       } else if (sizes[i] == SIZE_16M) {
   459         _can_use_16M_pages = 1;
   460       }
   461     }
   463     if (!_can_use_64K_pages) {
   464       g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
   465     }
   467     // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
   468     // there must be an actual 16M page pool, and we must run with enough rights.
   469     if (_can_use_16M_pages) {
   470       const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
   471       guarantee(shmid != -1, "shmget failed");
   472       struct shmid_ds shm_buf = { 0 };
   473       shm_buf.shm_pagesize = SIZE_16M;
   474       const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
   475       const int en = errno;
   476       ::shmctl(shmid, IPC_RMID, NULL);
   477       if (!can_set_pagesize) {
   478         if (Verbose) {
   479           fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
   480                           "Will deactivate 16M support.\n", en, strerror(en));
   481         }
   482         _can_use_16M_pages = 0;
   483       }
   484     }
   486   } // end: check which pages can be used for shared memory
   488 query_multipage_support_end:
   490   guarantee(_page_size != -1 &&
   491             _stack_page_size != -1 &&
   492             _can_use_64K_pages != -1 &&
   493             _can_use_16M_pages != -1, "Page sizes not properly initialized");
   495   if (_can_use_64K_pages) {
   496     g_multipage_error = 0;
   497   }
   499   if (Verbose) {
   500     fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
   501     fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
   502     fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
   503     fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
   504     fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
   505     fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
   506   }
   508 } // end os::Aix::query_multipage_support()
   511 // The code for this method was initially derived from the version in os_linux.cpp
   512 void os::init_system_properties_values() {
   513   // The next few definitions allow the code to be verbatim:
   514 #define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n), mtInternal)
   515 #define DEFAULT_LIBPATH "/usr/lib:/lib"
   516 #define EXTENSIONS_DIR  "/lib/ext"
   517 #define ENDORSED_DIR    "/lib/endorsed"
   519   // sysclasspath, java_home, dll_dir
   520   char *home_path;
   521   char *dll_path;
   522   char *pslash;
   523   char buf[MAXPATHLEN];
   524   os::jvm_path(buf, sizeof(buf));
   526   // Found the full path to libjvm.so.
   527   // Now cut the path to <java_home>/jre if we can.
   528   *(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
   529   pslash = strrchr(buf, '/');
   530   if (pslash != NULL) {
   531     *pslash = '\0';            // get rid of /{client|server|hotspot}
   532   }
   534   dll_path = malloc(strlen(buf) + 1);
   535   strcpy(dll_path, buf);
   536   Arguments::set_dll_dir(dll_path);
   538   if (pslash != NULL) {
   539     pslash = strrchr(buf, '/');
   540     if (pslash != NULL) {
   541       *pslash = '\0';          // get rid of /<arch>
   542       pslash = strrchr(buf, '/');
   543       if (pslash != NULL) {
   544         *pslash = '\0';        // get rid of /lib
   545       }
   546     }
   547   }
   549   home_path = malloc(strlen(buf) + 1);
   550   strcpy(home_path, buf);
   551   Arguments::set_java_home(home_path);
   553   if (!set_boot_path('/', ':')) return;
   555   // Where to look for native libraries
   557   // On Aix we get the user setting of LIBPATH
   558   // Eventually, all the library path setting will be done here.
   559   char *ld_library_path;
   561   // Construct the invariant part of ld_library_path.
   562   ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
   563   sprintf(ld_library_path, DEFAULT_LIBPATH);
   565   // Get the user setting of LIBPATH, and prepended it.
   566   char *v = ::getenv("LIBPATH");
   567   if (v == NULL) {
   568     v = "";
   569   }
   571   char *t = ld_library_path;
   572   // That's +1 for the colon and +1 for the trailing '\0'
   573   ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
   574   sprintf(ld_library_path, "%s:%s", v, t);
   576   Arguments::set_library_path(ld_library_path);
   578   // Extensions directories
   579   char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
   580   sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
   581   Arguments::set_ext_dirs(cbuf);
   583   // Endorsed standards default directory.
   584   cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
   585   sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
   586   Arguments::set_endorsed_dirs(cbuf);
   588 #undef malloc
   589 #undef DEFAULT_LIBPATH
   590 #undef EXTENSIONS_DIR
   591 #undef ENDORSED_DIR
   592 }
   594 ////////////////////////////////////////////////////////////////////////////////
   595 // breakpoint support
   597 void os::breakpoint() {
   598   BREAKPOINT;
   599 }
   601 extern "C" void breakpoint() {
   602   // use debugger to set breakpoint here
   603 }
   605 ////////////////////////////////////////////////////////////////////////////////
   606 // signal support
   608 debug_only(static bool signal_sets_initialized = false);
   609 static sigset_t unblocked_sigs, vm_sigs, allowdebug_blocked_sigs;
   611 bool os::Aix::is_sig_ignored(int sig) {
   612   struct sigaction oact;
   613   sigaction(sig, (struct sigaction*)NULL, &oact);
   614   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
   615     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
   616   if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
   617     return true;
   618   else
   619     return false;
   620 }
   622 void os::Aix::signal_sets_init() {
   623   // Should also have an assertion stating we are still single-threaded.
   624   assert(!signal_sets_initialized, "Already initialized");
   625   // Fill in signals that are necessarily unblocked for all threads in
   626   // the VM. Currently, we unblock the following signals:
   627   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
   628   //                         by -Xrs (=ReduceSignalUsage));
   629   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
   630   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
   631   // the dispositions or masks wrt these signals.
   632   // Programs embedding the VM that want to use the above signals for their
   633   // own purposes must, at this time, use the "-Xrs" option to prevent
   634   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
   635   // (See bug 4345157, and other related bugs).
   636   // In reality, though, unblocking these signals is really a nop, since
   637   // these signals are not blocked by default.
   638   sigemptyset(&unblocked_sigs);
   639   sigemptyset(&allowdebug_blocked_sigs);
   640   sigaddset(&unblocked_sigs, SIGILL);
   641   sigaddset(&unblocked_sigs, SIGSEGV);
   642   sigaddset(&unblocked_sigs, SIGBUS);
   643   sigaddset(&unblocked_sigs, SIGFPE);
   644   sigaddset(&unblocked_sigs, SIGTRAP);
   645   sigaddset(&unblocked_sigs, SIGDANGER);
   646   sigaddset(&unblocked_sigs, SR_signum);
   648   if (!ReduceSignalUsage) {
   649    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
   650      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
   651      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
   652    }
   653    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
   654      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
   655      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
   656    }
   657    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
   658      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
   659      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
   660    }
   661   }
   662   // Fill in signals that are blocked by all but the VM thread.
   663   sigemptyset(&vm_sigs);
   664   if (!ReduceSignalUsage)
   665     sigaddset(&vm_sigs, BREAK_SIGNAL);
   666   debug_only(signal_sets_initialized = true);
   667 }
   669 // These are signals that are unblocked while a thread is running Java.
   670 // (For some reason, they get blocked by default.)
   671 sigset_t* os::Aix::unblocked_signals() {
   672   assert(signal_sets_initialized, "Not initialized");
   673   return &unblocked_sigs;
   674 }
   676 // These are the signals that are blocked while a (non-VM) thread is
   677 // running Java. Only the VM thread handles these signals.
   678 sigset_t* os::Aix::vm_signals() {
   679   assert(signal_sets_initialized, "Not initialized");
   680   return &vm_sigs;
   681 }
   683 // These are signals that are blocked during cond_wait to allow debugger in
   684 sigset_t* os::Aix::allowdebug_blocked_signals() {
   685   assert(signal_sets_initialized, "Not initialized");
   686   return &allowdebug_blocked_sigs;
   687 }
   689 void os::Aix::hotspot_sigmask(Thread* thread) {
   691   //Save caller's signal mask before setting VM signal mask
   692   sigset_t caller_sigmask;
   693   pthread_sigmask(SIG_BLOCK, NULL, &caller_sigmask);
   695   OSThread* osthread = thread->osthread();
   696   osthread->set_caller_sigmask(caller_sigmask);
   698   pthread_sigmask(SIG_UNBLOCK, os::Aix::unblocked_signals(), NULL);
   700   if (!ReduceSignalUsage) {
   701     if (thread->is_VM_thread()) {
   702       // Only the VM thread handles BREAK_SIGNAL ...
   703       pthread_sigmask(SIG_UNBLOCK, vm_signals(), NULL);
   704     } else {
   705       // ... all other threads block BREAK_SIGNAL
   706       pthread_sigmask(SIG_BLOCK, vm_signals(), NULL);
   707     }
   708   }
   709 }
   711 // retrieve memory information.
   712 // Returns false if something went wrong;
   713 // content of pmi undefined in this case.
   714 bool os::Aix::get_meminfo(meminfo_t* pmi) {
   716   assert(pmi, "get_meminfo: invalid parameter");
   718   memset(pmi, 0, sizeof(meminfo_t));
   720   if (os::Aix::on_pase()) {
   722     Unimplemented();
   723     return false;
   725   } else {
   727     // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
   728     // See:
   729     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
   730     //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
   731     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
   732     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
   734     perfstat_memory_total_t psmt;
   735     memset (&psmt, '\0', sizeof(psmt));
   736     const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
   737     if (rc == -1) {
   738       fprintf(stderr, "perfstat_memory_total() failed (errno=%d)\n", errno);
   739       assert(0, "perfstat_memory_total() failed");
   740       return false;
   741     }
   743     assert(rc == 1, "perfstat_memory_total() - weird return code");
   745     // excerpt from
   746     // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
   747     //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
   748     // The fields of perfstat_memory_total_t:
   749     // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
   750     // u_longlong_t real_total         Total real memory (in 4 KB pages).
   751     // u_longlong_t real_free          Free real memory (in 4 KB pages).
   752     // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
   753     // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
   755     pmi->virt_total = psmt.virt_total * 4096;
   756     pmi->real_total = psmt.real_total * 4096;
   757     pmi->real_free = psmt.real_free * 4096;
   758     pmi->pgsp_total = psmt.pgsp_total * 4096;
   759     pmi->pgsp_free = psmt.pgsp_free * 4096;
   761     return true;
   763   }
   764 } // end os::Aix::get_meminfo
   766 // Retrieve global cpu information.
   767 // Returns false if something went wrong;
   768 // the content of pci is undefined in this case.
   769 bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
   770   assert(pci, "get_cpuinfo: invalid parameter");
   771   memset(pci, 0, sizeof(cpuinfo_t));
   773   perfstat_cpu_total_t psct;
   774   memset (&psct, '\0', sizeof(psct));
   776   if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
   777     fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)\n", errno);
   778     assert(0, "perfstat_cpu_total() failed");
   779     return false;
   780   }
   782   // global cpu information
   783   strcpy (pci->description, psct.description);
   784   pci->processorHZ = psct.processorHZ;
   785   pci->ncpus = psct.ncpus;
   786   os::Aix::_logical_cpus = psct.ncpus;
   787   for (int i = 0; i < 3; i++) {
   788     pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
   789   }
   791   // get the processor version from _system_configuration
   792   switch (_system_configuration.version) {
   793   case PV_7:
   794     strcpy(pci->version, "Power PC 7");
   795     break;
   796   case PV_6_1:
   797     strcpy(pci->version, "Power PC 6 DD1.x");
   798     break;
   799   case PV_6:
   800     strcpy(pci->version, "Power PC 6");
   801     break;
   802   case PV_5:
   803     strcpy(pci->version, "Power PC 5");
   804     break;
   805   case PV_5_2:
   806     strcpy(pci->version, "Power PC 5_2");
   807     break;
   808   case PV_5_3:
   809     strcpy(pci->version, "Power PC 5_3");
   810     break;
   811   case PV_5_Compat:
   812     strcpy(pci->version, "PV_5_Compat");
   813     break;
   814   case PV_6_Compat:
   815     strcpy(pci->version, "PV_6_Compat");
   816     break;
   817   case PV_7_Compat:
   818     strcpy(pci->version, "PV_7_Compat");
   819     break;
   820   default:
   821     strcpy(pci->version, "unknown");
   822   }
   824   return true;
   826 } //end os::Aix::get_cpuinfo
   828 //////////////////////////////////////////////////////////////////////////////
   829 // detecting pthread library
   831 void os::Aix::libpthread_init() {
   832   return;
   833 }
   835 //////////////////////////////////////////////////////////////////////////////
   836 // create new thread
   838 // Thread start routine for all newly created threads
   839 static void *java_start(Thread *thread) {
   841   // find out my own stack dimensions
   842   {
   843     // actually, this should do exactly the same as thread->record_stack_base_and_size...
   844     address base = 0;
   845     size_t size = 0;
   846     query_stack_dimensions(&base, &size);
   847     thread->set_stack_base(base);
   848     thread->set_stack_size(size);
   849   }
   851   // Do some sanity checks.
   852   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
   854   // Try to randomize the cache line index of hot stack frames.
   855   // This helps when threads of the same stack traces evict each other's
   856   // cache lines. The threads can be either from the same JVM instance, or
   857   // from different JVM instances. The benefit is especially true for
   858   // processors with hyperthreading technology.
   860   static int counter = 0;
   861   int pid = os::current_process_id();
   862   alloca(((pid ^ counter++) & 7) * 128);
   864   ThreadLocalStorage::set_thread(thread);
   866   OSThread* osthread = thread->osthread();
   868   // thread_id is kernel thread id (similar to Solaris LWP id)
   869   osthread->set_thread_id(os::Aix::gettid());
   871   // initialize signal mask for this thread
   872   os::Aix::hotspot_sigmask(thread);
   874   // initialize floating point control register
   875   os::Aix::init_thread_fpu_state();
   877   assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
   879   // call one more level start routine
   880   thread->run();
   882   return 0;
   883 }
   885 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
   887   // We want the whole function to be synchronized.
   888   ThreadCritical cs;
   890   assert(thread->osthread() == NULL, "caller responsible");
   892   // Allocate the OSThread object
   893   OSThread* osthread = new OSThread(NULL, NULL);
   894   if (osthread == NULL) {
   895     return false;
   896   }
   898   // set the correct thread state
   899   osthread->set_thread_type(thr_type);
   901   // Initial state is ALLOCATED but not INITIALIZED
   902   osthread->set_state(ALLOCATED);
   904   thread->set_osthread(osthread);
   906   // init thread attributes
   907   pthread_attr_t attr;
   908   pthread_attr_init(&attr);
   909   guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
   911   // Make sure we run in 1:1 kernel-user-thread mode.
   912   if (os::Aix::on_aix()) {
   913     guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
   914     guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
   915   } // end: aix
   917   // Start in suspended state, and in os::thread_start, wake the thread up.
   918   guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
   920   // calculate stack size if it's not specified by caller
   921   if (os::Aix::supports_variable_stack_size()) {
   922     if (stack_size == 0) {
   923       stack_size = os::Aix::default_stack_size(thr_type);
   925       switch (thr_type) {
   926       case os::java_thread:
   927         // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss.
   928         assert(JavaThread::stack_size_at_create() > 0, "this should be set");
   929         stack_size = JavaThread::stack_size_at_create();
   930         break;
   931       case os::compiler_thread:
   932         if (CompilerThreadStackSize > 0) {
   933           stack_size = (size_t)(CompilerThreadStackSize * K);
   934           break;
   935         } // else fall through:
   936           // use VMThreadStackSize if CompilerThreadStackSize is not defined
   937       case os::vm_thread:
   938       case os::pgc_thread:
   939       case os::cgc_thread:
   940       case os::watcher_thread:
   941         if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
   942         break;
   943       }
   944     }
   946     stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
   947     pthread_attr_setstacksize(&attr, stack_size);
   948   } //else let thread_create() pick the default value (96 K on AIX)
   950   pthread_t tid;
   951   int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
   953   pthread_attr_destroy(&attr);
   955   if (ret != 0) {
   956     if (PrintMiscellaneous && (Verbose || WizardMode)) {
   957       perror("pthread_create()");
   958     }
   959     // Need to clean up stuff we've allocated so far
   960     thread->set_osthread(NULL);
   961     delete osthread;
   962     return false;
   963   }
   965   // Store pthread info into the OSThread
   966   osthread->set_pthread_id(tid);
   968   return true;
   969 }
   971 /////////////////////////////////////////////////////////////////////////////
   972 // attach existing thread
   974 // bootstrap the main thread
   975 bool os::create_main_thread(JavaThread* thread) {
   976   assert(os::Aix::_main_thread == pthread_self(), "should be called inside main thread");
   977   return create_attached_thread(thread);
   978 }
   980 bool os::create_attached_thread(JavaThread* thread) {
   981 #ifdef ASSERT
   982     thread->verify_not_published();
   983 #endif
   985   // Allocate the OSThread object
   986   OSThread* osthread = new OSThread(NULL, NULL);
   988   if (osthread == NULL) {
   989     return false;
   990   }
   992   // Store pthread info into the OSThread
   993   osthread->set_thread_id(os::Aix::gettid());
   994   osthread->set_pthread_id(::pthread_self());
   996   // initialize floating point control register
   997   os::Aix::init_thread_fpu_state();
   999   // some sanity checks
  1000   CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
  1002   // Initial thread state is RUNNABLE
  1003   osthread->set_state(RUNNABLE);
  1005   thread->set_osthread(osthread);
  1007   if (UseNUMA) {
  1008     int lgrp_id = os::numa_get_group_id();
  1009     if (lgrp_id != -1) {
  1010       thread->set_lgrp_id(lgrp_id);
  1014   // initialize signal mask for this thread
  1015   // and save the caller's signal mask
  1016   os::Aix::hotspot_sigmask(thread);
  1018   return true;
  1021 void os::pd_start_thread(Thread* thread) {
  1022   int status = pthread_continue_np(thread->osthread()->pthread_id());
  1023   assert(status == 0, "thr_continue failed");
  1026 // Free OS resources related to the OSThread
  1027 void os::free_thread(OSThread* osthread) {
  1028   assert(osthread != NULL, "osthread not set");
  1030   if (Thread::current()->osthread() == osthread) {
  1031     // Restore caller's signal mask
  1032     sigset_t sigmask = osthread->caller_sigmask();
  1033     pthread_sigmask(SIG_SETMASK, &sigmask, NULL);
  1036   delete osthread;
  1039 //////////////////////////////////////////////////////////////////////////////
  1040 // thread local storage
  1042 int os::allocate_thread_local_storage() {
  1043   pthread_key_t key;
  1044   int rslt = pthread_key_create(&key, NULL);
  1045   assert(rslt == 0, "cannot allocate thread local storage");
  1046   return (int)key;
  1049 // Note: This is currently not used by VM, as we don't destroy TLS key
  1050 // on VM exit.
  1051 void os::free_thread_local_storage(int index) {
  1052   int rslt = pthread_key_delete((pthread_key_t)index);
  1053   assert(rslt == 0, "invalid index");
  1056 void os::thread_local_storage_at_put(int index, void* value) {
  1057   int rslt = pthread_setspecific((pthread_key_t)index, value);
  1058   assert(rslt == 0, "pthread_setspecific failed");
  1061 extern "C" Thread* get_thread() {
  1062   return ThreadLocalStorage::thread();
  1065 ////////////////////////////////////////////////////////////////////////////////
  1066 // time support
  1068 // Time since start-up in seconds to a fine granularity.
  1069 // Used by VMSelfDestructTimer and the MemProfiler.
  1070 double os::elapsedTime() {
  1071   return (double)(os::elapsed_counter()) * 0.000001;
  1074 jlong os::elapsed_counter() {
  1075   timeval time;
  1076   int status = gettimeofday(&time, NULL);
  1077   return jlong(time.tv_sec) * 1000 * 1000 + jlong(time.tv_usec) - initial_time_count;
  1080 jlong os::elapsed_frequency() {
  1081   return (1000 * 1000);
  1084 // For now, we say that linux does not support vtime. I have no idea
  1085 // whether it can actually be made to (DLD, 9/13/05).
  1087 bool os::supports_vtime() { return false; }
  1088 bool os::enable_vtime()   { return false; }
  1089 bool os::vtime_enabled()  { return false; }
  1090 double os::elapsedVTime() {
  1091   // better than nothing, but not much
  1092   return elapsedTime();
  1095 jlong os::javaTimeMillis() {
  1096   timeval time;
  1097   int status = gettimeofday(&time, NULL);
  1098   assert(status != -1, "aix error at gettimeofday()");
  1099   return jlong(time.tv_sec) * 1000 + jlong(time.tv_usec / 1000);
  1102 // We need to manually declare mread_real_time,
  1103 // because IBM didn't provide a prototype in time.h.
  1104 // (they probably only ever tested in C, not C++)
  1105 extern "C"
  1106 int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
  1108 jlong os::javaTimeNanos() {
  1109   if (os::Aix::on_pase()) {
  1110     Unimplemented();
  1111     return 0;
  1113   else {
  1114     // On AIX use the precision of processors real time clock
  1115     // or time base registers.
  1116     timebasestruct_t time;
  1117     int rc;
  1119     // If the CPU has a time register, it will be used and
  1120     // we have to convert to real time first. After convertion we have following data:
  1121     // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
  1122     // time.tb_low  [nanoseconds after the last full second above]
  1123     // We better use mread_real_time here instead of read_real_time
  1124     // to ensure that we will get a monotonic increasing time.
  1125     if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
  1126       rc = time_base_to_time(&time, TIMEBASE_SZ);
  1127       assert(rc != -1, "aix error at time_base_to_time()");
  1129     return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
  1133 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
  1135     // gettimeofday - based on time in seconds since the Epoch thus does not wrap
  1136     info_ptr->max_value = ALL_64_BITS;
  1138     // gettimeofday is a real time clock so it skips
  1139     info_ptr->may_skip_backward = true;
  1140     info_ptr->may_skip_forward = true;
  1143   info_ptr->kind = JVMTI_TIMER_ELAPSED;    // elapsed not CPU time
  1146 // Return the real, user, and system times in seconds from an
  1147 // arbitrary fixed point in the past.
  1148 bool os::getTimesSecs(double* process_real_time,
  1149                       double* process_user_time,
  1150                       double* process_system_time) {
  1151   Unimplemented();
  1152   return false;
  1156 char * os::local_time_string(char *buf, size_t buflen) {
  1157   struct tm t;
  1158   time_t long_time;
  1159   time(&long_time);
  1160   localtime_r(&long_time, &t);
  1161   jio_snprintf(buf, buflen, "%d-%02d-%02d %02d:%02d:%02d",
  1162                t.tm_year + 1900, t.tm_mon + 1, t.tm_mday,
  1163                t.tm_hour, t.tm_min, t.tm_sec);
  1164   return buf;
  1167 struct tm* os::localtime_pd(const time_t* clock, struct tm* res) {
  1168   return localtime_r(clock, res);
  1171 ////////////////////////////////////////////////////////////////////////////////
  1172 // runtime exit support
  1174 // Note: os::shutdown() might be called very early during initialization, or
  1175 // called from signal handler. Before adding something to os::shutdown(), make
  1176 // sure it is async-safe and can handle partially initialized VM.
  1177 void os::shutdown() {
  1179   // allow PerfMemory to attempt cleanup of any persistent resources
  1180   perfMemory_exit();
  1182   // needs to remove object in file system
  1183   AttachListener::abort();
  1185   // flush buffered output, finish log files
  1186   ostream_abort();
  1188   // Check for abort hook
  1189   abort_hook_t abort_hook = Arguments::abort_hook();
  1190   if (abort_hook != NULL) {
  1191     abort_hook();
  1196 // Note: os::abort() might be called very early during initialization, or
  1197 // called from signal handler. Before adding something to os::abort(), make
  1198 // sure it is async-safe and can handle partially initialized VM.
  1199 void os::abort(bool dump_core) {
  1200   os::shutdown();
  1201   if (dump_core) {
  1202 #ifndef PRODUCT
  1203     fdStream out(defaultStream::output_fd());
  1204     out.print_raw("Current thread is ");
  1205     char buf[16];
  1206     jio_snprintf(buf, sizeof(buf), UINTX_FORMAT, os::current_thread_id());
  1207     out.print_raw_cr(buf);
  1208     out.print_raw_cr("Dumping core ...");
  1209 #endif
  1210     ::abort(); // dump core
  1213   ::exit(1);
  1216 // Die immediately, no exit hook, no abort hook, no cleanup.
  1217 void os::die() {
  1218   ::abort();
  1221 // Unused on Aix for now.
  1222 void os::set_error_file(const char *logfile) {}
  1225 // This method is a copy of JDK's sysGetLastErrorString
  1226 // from src/solaris/hpi/src/system_md.c
  1228 size_t os::lasterror(char *buf, size_t len) {
  1230   if (errno == 0)  return 0;
  1232   const char *s = ::strerror(errno);
  1233   size_t n = ::strlen(s);
  1234   if (n >= len) {
  1235     n = len - 1;
  1237   ::strncpy(buf, s, n);
  1238   buf[n] = '\0';
  1239   return n;
  1242 intx os::current_thread_id() { return (intx)pthread_self(); }
  1243 int os::current_process_id() {
  1245   // This implementation returns a unique pid, the pid of the
  1246   // launcher thread that starts the vm 'process'.
  1248   // Under POSIX, getpid() returns the same pid as the
  1249   // launcher thread rather than a unique pid per thread.
  1250   // Use gettid() if you want the old pre NPTL behaviour.
  1252   // if you are looking for the result of a call to getpid() that
  1253   // returns a unique pid for the calling thread, then look at the
  1254   // OSThread::thread_id() method in osThread_linux.hpp file
  1256   return (int)(_initial_pid ? _initial_pid : getpid());
  1259 // DLL functions
  1261 const char* os::dll_file_extension() { return ".so"; }
  1263 // This must be hard coded because it's the system's temporary
  1264 // directory not the java application's temp directory, ala java.io.tmpdir.
  1265 const char* os::get_temp_directory() { return "/tmp"; }
  1267 static bool file_exists(const char* filename) {
  1268   struct stat statbuf;
  1269   if (filename == NULL || strlen(filename) == 0) {
  1270     return false;
  1272   return os::stat(filename, &statbuf) == 0;
  1275 bool os::dll_build_name(char* buffer, size_t buflen,
  1276                         const char* pname, const char* fname) {
  1277   bool retval = false;
  1278   // Copied from libhpi
  1279   const size_t pnamelen = pname ? strlen(pname) : 0;
  1281   // Return error on buffer overflow.
  1282   if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
  1283     *buffer = '\0';
  1284     return retval;
  1287   if (pnamelen == 0) {
  1288     snprintf(buffer, buflen, "lib%s.so", fname);
  1289     retval = true;
  1290   } else if (strchr(pname, *os::path_separator()) != NULL) {
  1291     int n;
  1292     char** pelements = split_path(pname, &n);
  1293     for (int i = 0; i < n; i++) {
  1294       // Really shouldn't be NULL, but check can't hurt
  1295       if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
  1296         continue; // skip the empty path values
  1298       snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
  1299       if (file_exists(buffer)) {
  1300         retval = true;
  1301         break;
  1304     // release the storage
  1305     for (int i = 0; i < n; i++) {
  1306       if (pelements[i] != NULL) {
  1307         FREE_C_HEAP_ARRAY(char, pelements[i], mtInternal);
  1310     if (pelements != NULL) {
  1311       FREE_C_HEAP_ARRAY(char*, pelements, mtInternal);
  1313   } else {
  1314     snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
  1315     retval = true;
  1317   return retval;
  1320 // Check if addr is inside libjvm.so.
  1321 bool os::address_is_in_vm(address addr) {
  1323   // Input could be a real pc or a function pointer literal. The latter
  1324   // would be a function descriptor residing in the data segment of a module.
  1326   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(addr);
  1327   if (lib) {
  1328     if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
  1329       return true;
  1330     } else {
  1331       return false;
  1333   } else {
  1334     lib = LoadedLibraries::find_for_data_address(addr);
  1335     if (lib) {
  1336       if (strcmp(lib->get_shortname(), "libjvm.so") == 0) {
  1337         return true;
  1338       } else {
  1339         return false;
  1341     } else {
  1342       return false;
  1347 // Resolve an AIX function descriptor literal to a code pointer.
  1348 // If the input is a valid code pointer to a text segment of a loaded module,
  1349 //   it is returned unchanged.
  1350 // If the input is a valid AIX function descriptor, it is resolved to the
  1351 //   code entry point.
  1352 // If the input is neither a valid function descriptor nor a valid code pointer,
  1353 //   NULL is returned.
  1354 static address resolve_function_descriptor_to_code_pointer(address p) {
  1356   const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
  1357   if (lib) {
  1358     // its a real code pointer
  1359     return p;
  1360   } else {
  1361     lib = LoadedLibraries::find_for_data_address(p);
  1362     if (lib) {
  1363       // pointer to data segment, potential function descriptor
  1364       address code_entry = (address)(((FunctionDescriptor*)p)->entry());
  1365       if (LoadedLibraries::find_for_text_address(code_entry)) {
  1366         // Its a function descriptor
  1367         return code_entry;
  1371   return NULL;
  1374 bool os::dll_address_to_function_name(address addr, char *buf,
  1375                                       int buflen, int *offset) {
  1376   if (offset) {
  1377     *offset = -1;
  1379   if (buf) {
  1380     buf[0] = '\0';
  1383   // Resolve function ptr literals first.
  1384   addr = resolve_function_descriptor_to_code_pointer(addr);
  1385   if (!addr) {
  1386     return false;
  1389   // Go through Decoder::decode to call getFuncName which reads the name from the traceback table.
  1390   return Decoder::decode(addr, buf, buflen, offset);
  1393 static int getModuleName(codeptr_t pc,                    // [in] program counter
  1394                          char* p_name, size_t namelen,    // [out] optional: function name
  1395                          char* p_errmsg, size_t errmsglen // [out] optional: user provided buffer for error messages
  1396                          ) {
  1398   // initialize output parameters
  1399   if (p_name && namelen > 0) {
  1400     *p_name = '\0';
  1402   if (p_errmsg && errmsglen > 0) {
  1403     *p_errmsg = '\0';
  1406   const LoadedLibraryModule* const lib = LoadedLibraries::find_for_text_address((address)pc);
  1407   if (lib) {
  1408     if (p_name && namelen > 0) {
  1409       sprintf(p_name, "%.*s", namelen, lib->get_shortname());
  1411     return 0;
  1414   if (Verbose) {
  1415     fprintf(stderr, "pc outside any module");
  1418   return -1;
  1422 bool os::dll_address_to_library_name(address addr, char* buf,
  1423                                      int buflen, int* offset) {
  1424   if (offset) {
  1425     *offset = -1;
  1427   if (buf) {
  1428       buf[0] = '\0';
  1431   // Resolve function ptr literals first.
  1432   addr = resolve_function_descriptor_to_code_pointer(addr);
  1433   if (!addr) {
  1434     return false;
  1437   if (::getModuleName((codeptr_t) addr, buf, buflen, 0, 0) == 0) {
  1438     return true;
  1440   return false;
  1443 // Loads .dll/.so and in case of error it checks if .dll/.so was built
  1444 // for the same architecture as Hotspot is running on
  1445 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
  1447   if (ebuf && ebuflen > 0) {
  1448     ebuf[0] = '\0';
  1449     ebuf[ebuflen - 1] = '\0';
  1452   if (!filename || strlen(filename) == 0) {
  1453     ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
  1454     return NULL;
  1457   // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
  1458   void * result= ::dlopen(filename, RTLD_LAZY);
  1459   if (result != NULL) {
  1460     // Reload dll cache. Don't do this in signal handling.
  1461     LoadedLibraries::reload();
  1462     return result;
  1463   } else {
  1464     // error analysis when dlopen fails
  1465     const char* const error_report = ::dlerror();
  1466     if (error_report && ebuf && ebuflen > 0) {
  1467       snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s",
  1468                filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
  1471   return NULL;
  1474 // Glibc-2.0 libdl is not MT safe. If you are building with any glibc,
  1475 // chances are you might want to run the generated bits against glibc-2.0
  1476 // libdl.so, so always use locking for any version of glibc.
  1477 void* os::dll_lookup(void* handle, const char* name) {
  1478   pthread_mutex_lock(&dl_mutex);
  1479   void* res = dlsym(handle, name);
  1480   pthread_mutex_unlock(&dl_mutex);
  1481   return res;
  1484 void os::print_dll_info(outputStream *st) {
  1485   st->print_cr("Dynamic libraries:");
  1486   LoadedLibraries::print(st);
  1489 void os::print_os_info(outputStream* st) {
  1490   st->print("OS:");
  1492   st->print("uname:");
  1493   struct utsname name;
  1494   uname(&name);
  1495   st->print(name.sysname); st->print(" ");
  1496   st->print(name.nodename); st->print(" ");
  1497   st->print(name.release); st->print(" ");
  1498   st->print(name.version); st->print(" ");
  1499   st->print(name.machine);
  1500   st->cr();
  1502   // rlimit
  1503   st->print("rlimit:");
  1504   struct rlimit rlim;
  1506   st->print(" STACK ");
  1507   getrlimit(RLIMIT_STACK, &rlim);
  1508   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1509   else st->print("%uk", rlim.rlim_cur >> 10);
  1511   st->print(", CORE ");
  1512   getrlimit(RLIMIT_CORE, &rlim);
  1513   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1514   else st->print("%uk", rlim.rlim_cur >> 10);
  1516   st->print(", NPROC ");
  1517   st->print("%d", sysconf(_SC_CHILD_MAX));
  1519   st->print(", NOFILE ");
  1520   getrlimit(RLIMIT_NOFILE, &rlim);
  1521   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1522   else st->print("%d", rlim.rlim_cur);
  1524   st->print(", AS ");
  1525   getrlimit(RLIMIT_AS, &rlim);
  1526   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1527   else st->print("%uk", rlim.rlim_cur >> 10);
  1529   // Print limits on DATA, because it limits the C-heap.
  1530   st->print(", DATA ");
  1531   getrlimit(RLIMIT_DATA, &rlim);
  1532   if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
  1533   else st->print("%uk", rlim.rlim_cur >> 10);
  1534   st->cr();
  1536   // load average
  1537   st->print("load average:");
  1538   double loadavg[3] = {-1.L, -1.L, -1.L};
  1539   os::loadavg(loadavg, 3);
  1540   st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
  1541   st->cr();
  1544 void os::print_memory_info(outputStream* st) {
  1546   st->print_cr("Memory:");
  1548   st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
  1549   st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
  1550   st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
  1551   st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ? "yes" :"no"));
  1552   st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ? "yes" :"no"));
  1553   if (g_multipage_error != 0) {
  1554     st->print_cr("  multipage error: %d", g_multipage_error);
  1557   // print out LDR_CNTRL because it affects the default page sizes
  1558   const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
  1559   st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
  1561   const char* const extshm = ::getenv("EXTSHM");
  1562   st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
  1564   // Call os::Aix::get_meminfo() to retrieve memory statistics.
  1565   os::Aix::meminfo_t mi;
  1566   if (os::Aix::get_meminfo(&mi)) {
  1567     char buffer[256];
  1568     if (os::Aix::on_aix()) {
  1569       jio_snprintf(buffer, sizeof(buffer),
  1570                    "  physical total : %llu\n"
  1571                    "  physical free  : %llu\n"
  1572                    "  swap total     : %llu\n"
  1573                    "  swap free      : %llu\n",
  1574                    mi.real_total,
  1575                    mi.real_free,
  1576                    mi.pgsp_total,
  1577                    mi.pgsp_free);
  1578     } else {
  1579       Unimplemented();
  1581     st->print_raw(buffer);
  1582   } else {
  1583     st->print_cr("  (no more information available)");
  1587 void os::pd_print_cpu_info(outputStream* st) {
  1588   // cpu
  1589   st->print("CPU:");
  1590   st->print("total %d", os::processor_count());
  1591   // It's not safe to query number of active processors after crash
  1592   // st->print("(active %d)", os::active_processor_count());
  1593   st->print(" %s", VM_Version::cpu_features());
  1594   st->cr();
  1597 void os::print_siginfo(outputStream* st, void* siginfo) {
  1598   // Use common posix version.
  1599   os::Posix::print_siginfo_brief(st, (const siginfo_t*) siginfo);
  1600   st->cr();
  1604 static void print_signal_handler(outputStream* st, int sig,
  1605                                  char* buf, size_t buflen);
  1607 void os::print_signal_handlers(outputStream* st, char* buf, size_t buflen) {
  1608   st->print_cr("Signal Handlers:");
  1609   print_signal_handler(st, SIGSEGV, buf, buflen);
  1610   print_signal_handler(st, SIGBUS , buf, buflen);
  1611   print_signal_handler(st, SIGFPE , buf, buflen);
  1612   print_signal_handler(st, SIGPIPE, buf, buflen);
  1613   print_signal_handler(st, SIGXFSZ, buf, buflen);
  1614   print_signal_handler(st, SIGILL , buf, buflen);
  1615   print_signal_handler(st, INTERRUPT_SIGNAL, buf, buflen);
  1616   print_signal_handler(st, SR_signum, buf, buflen);
  1617   print_signal_handler(st, SHUTDOWN1_SIGNAL, buf, buflen);
  1618   print_signal_handler(st, SHUTDOWN2_SIGNAL , buf, buflen);
  1619   print_signal_handler(st, SHUTDOWN3_SIGNAL , buf, buflen);
  1620   print_signal_handler(st, BREAK_SIGNAL, buf, buflen);
  1621   print_signal_handler(st, SIGTRAP, buf, buflen);
  1622   print_signal_handler(st, SIGDANGER, buf, buflen);
  1625 static char saved_jvm_path[MAXPATHLEN] = {0};
  1627 // Find the full path to the current module, libjvm.so or libjvm_g.so
  1628 void os::jvm_path(char *buf, jint buflen) {
  1629   // Error checking.
  1630   if (buflen < MAXPATHLEN) {
  1631     assert(false, "must use a large-enough buffer");
  1632     buf[0] = '\0';
  1633     return;
  1635   // Lazy resolve the path to current module.
  1636   if (saved_jvm_path[0] != 0) {
  1637     strcpy(buf, saved_jvm_path);
  1638     return;
  1641   Dl_info dlinfo;
  1642   int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
  1643   assert(ret != 0, "cannot locate libjvm");
  1644   char* rp = realpath((char *)dlinfo.dli_fname, buf);
  1645   assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
  1647   strcpy(saved_jvm_path, buf);
  1650 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
  1651   // no prefix required, not even "_"
  1654 void os::print_jni_name_suffix_on(outputStream* st, int args_size) {
  1655   // no suffix required
  1658 ////////////////////////////////////////////////////////////////////////////////
  1659 // sun.misc.Signal support
  1661 static volatile jint sigint_count = 0;
  1663 static void
  1664 UserHandler(int sig, void *siginfo, void *context) {
  1665   // 4511530 - sem_post is serialized and handled by the manager thread. When
  1666   // the program is interrupted by Ctrl-C, SIGINT is sent to every thread. We
  1667   // don't want to flood the manager thread with sem_post requests.
  1668   if (sig == SIGINT && Atomic::add(1, &sigint_count) > 1)
  1669     return;
  1671   // Ctrl-C is pressed during error reporting, likely because the error
  1672   // handler fails to abort. Let VM die immediately.
  1673   if (sig == SIGINT && is_error_reported()) {
  1674     os::die();
  1677   os::signal_notify(sig);
  1680 void* os::user_handler() {
  1681   return CAST_FROM_FN_PTR(void*, UserHandler);
  1684 extern "C" {
  1685   typedef void (*sa_handler_t)(int);
  1686   typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
  1689 void* os::signal(int signal_number, void* handler) {
  1690   struct sigaction sigAct, oldSigAct;
  1692   sigfillset(&(sigAct.sa_mask));
  1694   // Do not block out synchronous signals in the signal handler.
  1695   // Blocking synchronous signals only makes sense if you can really
  1696   // be sure that those signals won't happen during signal handling,
  1697   // when the blocking applies.  Normal signal handlers are lean and
  1698   // do not cause signals. But our signal handlers tend to be "risky"
  1699   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
  1700   // On AIX, PASE there was a case where a SIGSEGV happened, followed
  1701   // by a SIGILL, which was blocked due to the signal mask. The process
  1702   // just hung forever. Better to crash from a secondary signal than to hang.
  1703   sigdelset(&(sigAct.sa_mask), SIGSEGV);
  1704   sigdelset(&(sigAct.sa_mask), SIGBUS);
  1705   sigdelset(&(sigAct.sa_mask), SIGILL);
  1706   sigdelset(&(sigAct.sa_mask), SIGFPE);
  1707   sigdelset(&(sigAct.sa_mask), SIGTRAP);
  1709   sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
  1711   sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
  1713   if (sigaction(signal_number, &sigAct, &oldSigAct)) {
  1714     // -1 means registration failed
  1715     return (void *)-1;
  1718   return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
  1721 void os::signal_raise(int signal_number) {
  1722   ::raise(signal_number);
  1725 //
  1726 // The following code is moved from os.cpp for making this
  1727 // code platform specific, which it is by its very nature.
  1728 //
  1730 // Will be modified when max signal is changed to be dynamic
  1731 int os::sigexitnum_pd() {
  1732   return NSIG;
  1735 // a counter for each possible signal value
  1736 static volatile jint pending_signals[NSIG+1] = { 0 };
  1738 // Linux(POSIX) specific hand shaking semaphore.
  1739 static sem_t sig_sem;
  1741 void os::signal_init_pd() {
  1742   // Initialize signal structures
  1743   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
  1745   // Initialize signal semaphore
  1746   int rc = ::sem_init(&sig_sem, 0, 0);
  1747   guarantee(rc != -1, "sem_init failed");
  1750 void os::signal_notify(int sig) {
  1751   Atomic::inc(&pending_signals[sig]);
  1752   ::sem_post(&sig_sem);
  1755 static int check_pending_signals(bool wait) {
  1756   Atomic::store(0, &sigint_count);
  1757   for (;;) {
  1758     for (int i = 0; i < NSIG + 1; i++) {
  1759       jint n = pending_signals[i];
  1760       if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
  1761         return i;
  1764     if (!wait) {
  1765       return -1;
  1767     JavaThread *thread = JavaThread::current();
  1768     ThreadBlockInVM tbivm(thread);
  1770     bool threadIsSuspended;
  1771     do {
  1772       thread->set_suspend_equivalent();
  1773       // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  1775       ::sem_wait(&sig_sem);
  1777       // were we externally suspended while we were waiting?
  1778       threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
  1779       if (threadIsSuspended) {
  1780         //
  1781         // The semaphore has been incremented, but while we were waiting
  1782         // another thread suspended us. We don't want to continue running
  1783         // while suspended because that would surprise the thread that
  1784         // suspended us.
  1785         //
  1786         ::sem_post(&sig_sem);
  1788         thread->java_suspend_self();
  1790     } while (threadIsSuspended);
  1794 int os::signal_lookup() {
  1795   return check_pending_signals(false);
  1798 int os::signal_wait() {
  1799   return check_pending_signals(true);
  1802 ////////////////////////////////////////////////////////////////////////////////
  1803 // Virtual Memory
  1805 // AddrRange describes an immutable address range
  1806 //
  1807 // This is a helper class for the 'shared memory bookkeeping' below.
  1808 class AddrRange {
  1809   friend class ShmBkBlock;
  1811   char* _start;
  1812   size_t _size;
  1814 public:
  1816   AddrRange(char* start, size_t size)
  1817     : _start(start), _size(size)
  1818   {}
  1820   AddrRange(const AddrRange& r)
  1821     : _start(r.start()), _size(r.size())
  1822   {}
  1824   char* start() const { return _start; }
  1825   size_t size() const { return _size; }
  1826   char* end() const { return _start + _size; }
  1827   bool is_empty() const { return _size == 0 ? true : false; }
  1829   static AddrRange empty_range() { return AddrRange(NULL, 0); }
  1831   bool contains(const char* p) const {
  1832     return start() <= p && end() > p;
  1835   bool contains(const AddrRange& range) const {
  1836     return start() <= range.start() && end() >= range.end();
  1839   bool intersects(const AddrRange& range) const {
  1840     return (range.start() <= start() && range.end() > start()) ||
  1841            (range.start() < end() && range.end() >= end()) ||
  1842            contains(range);
  1845   bool is_same_range(const AddrRange& range) const {
  1846     return start() == range.start() && size() == range.size();
  1849   // return the closest inside range consisting of whole pages
  1850   AddrRange find_closest_aligned_range(size_t pagesize) const {
  1851     if (pagesize == 0 || is_empty()) {
  1852       return empty_range();
  1854     char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
  1855     char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
  1856     if (from > to) {
  1857       return empty_range();
  1859     return AddrRange(from, to - from);
  1861 };
  1863 ////////////////////////////////////////////////////////////////////////////
  1864 // shared memory bookkeeping
  1865 //
  1866 // the os::reserve_memory() API and friends hand out different kind of memory, depending
  1867 // on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
  1868 //
  1869 // But these memory types have to be treated differently. For example, to uncommit
  1870 // mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
  1871 // disclaim64() is needed.
  1872 //
  1873 // Therefore we need to keep track of the allocated memory segments and their
  1874 // properties.
  1876 // ShmBkBlock: base class for all blocks in the shared memory bookkeeping
  1877 class ShmBkBlock {
  1879   ShmBkBlock* _next;
  1881 protected:
  1883   AddrRange _range;
  1884   const size_t _pagesize;
  1885   const bool _pinned;
  1887 public:
  1889   ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
  1890     : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
  1892     assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
  1893     assert(!_range.is_empty(), "invalid range");
  1896   virtual void print(outputStream* st) const {
  1897     st->print("0x%p ... 0x%p (%llu) - %d %s pages - %s",
  1898               _range.start(), _range.end(), _range.size(),
  1899               _range.size() / _pagesize, describe_pagesize(_pagesize),
  1900               _pinned ? "pinned" : "");
  1903   enum Type { MMAP, SHMAT };
  1904   virtual Type getType() = 0;
  1906   char* base() const { return _range.start(); }
  1907   size_t size() const { return _range.size(); }
  1909   void setAddrRange(AddrRange range) {
  1910     _range = range;
  1913   bool containsAddress(const char* p) const {
  1914     return _range.contains(p);
  1917   bool containsRange(const char* p, size_t size) const {
  1918     return _range.contains(AddrRange((char*)p, size));
  1921   bool isSameRange(const char* p, size_t size) const {
  1922     return _range.is_same_range(AddrRange((char*)p, size));
  1925   virtual bool disclaim(char* p, size_t size) = 0;
  1926   virtual bool release() = 0;
  1928   // blocks live in a list.
  1929   ShmBkBlock* next() const { return _next; }
  1930   void set_next(ShmBkBlock* blk) { _next = blk; }
  1932 }; // end: ShmBkBlock
  1935 // ShmBkMappedBlock: describes an block allocated with mmap()
  1936 class ShmBkMappedBlock : public ShmBkBlock {
  1937 public:
  1939   ShmBkMappedBlock(AddrRange range)
  1940     : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
  1942   void print(outputStream* st) const {
  1943     ShmBkBlock::print(st);
  1944     st->print_cr(" - mmap'ed");
  1947   Type getType() {
  1948     return MMAP;
  1951   bool disclaim(char* p, size_t size) {
  1953     AddrRange r(p, size);
  1955     guarantee(_range.contains(r), "invalid disclaim");
  1957     // only disclaim whole ranges.
  1958     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
  1959     if (r2.is_empty()) {
  1960       return true;
  1963     const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
  1965     if (rc != 0) {
  1966       warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
  1969     return rc == 0 ? true : false;
  1972   bool release() {
  1973     // mmap'ed blocks are released using munmap
  1974     if (::munmap(_range.start(), _range.size()) != 0) {
  1975       warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
  1976       return false;
  1978     return true;
  1980 }; // end: ShmBkMappedBlock
  1982 // ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
  1983 class ShmBkShmatedBlock : public ShmBkBlock {
  1984 public:
  1986   ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
  1987     : ShmBkBlock(range, pagesize, pinned) {}
  1989   void print(outputStream* st) const {
  1990     ShmBkBlock::print(st);
  1991     st->print_cr(" - shmat'ed");
  1994   Type getType() {
  1995     return SHMAT;
  1998   bool disclaim(char* p, size_t size) {
  2000     AddrRange r(p, size);
  2002     if (_pinned) {
  2003       return true;
  2006     // shmat'ed blocks are disclaimed using disclaim64
  2007     guarantee(_range.contains(r), "invalid disclaim");
  2009     // only disclaim whole ranges.
  2010     const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
  2011     if (r2.is_empty()) {
  2012       return true;
  2015     const bool rc = my_disclaim64(r2.start(), r2.size());
  2017     if (Verbose && !rc) {
  2018       warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
  2021     return rc;
  2024   bool release() {
  2025     bool rc = false;
  2026     if (::shmdt(_range.start()) != 0) {
  2027       warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
  2028     } else {
  2029       rc = true;
  2031     return rc;
  2034 }; // end: ShmBkShmatedBlock
  2036 static ShmBkBlock* g_shmbk_list = NULL;
  2037 static volatile jint g_shmbk_table_lock = 0;
  2039 // keep some usage statistics
  2040 static struct {
  2041   int nodes;    // number of nodes in list
  2042   size_t bytes; // reserved - not committed - bytes.
  2043   int reserves; // how often reserve was called
  2044   int lookups;  // how often a lookup was made
  2045 } g_shmbk_stats = { 0, 0, 0, 0 };
  2047 // add information about a shared memory segment to the bookkeeping
  2048 static void shmbk_register(ShmBkBlock* p_block) {
  2049   guarantee(p_block, "logic error");
  2050   p_block->set_next(g_shmbk_list);
  2051   g_shmbk_list = p_block;
  2052   g_shmbk_stats.reserves ++;
  2053   g_shmbk_stats.bytes += p_block->size();
  2054   g_shmbk_stats.nodes ++;
  2057 // remove information about a shared memory segment by its starting address
  2058 static void shmbk_unregister(ShmBkBlock* p_block) {
  2059   ShmBkBlock* p = g_shmbk_list;
  2060   ShmBkBlock* prev = NULL;
  2061   while (p) {
  2062     if (p == p_block) {
  2063       if (prev) {
  2064         prev->set_next(p->next());
  2065       } else {
  2066         g_shmbk_list = p->next();
  2068       g_shmbk_stats.nodes --;
  2069       g_shmbk_stats.bytes -= p->size();
  2070       return;
  2072     prev = p;
  2073     p = p->next();
  2075   assert(false, "should not happen");
  2078 // given a pointer, return shared memory bookkeeping record for the segment it points into
  2079 // using the returned block info must happen under lock protection
  2080 static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
  2081   g_shmbk_stats.lookups ++;
  2082   ShmBkBlock* p = g_shmbk_list;
  2083   while (p) {
  2084     if (p->containsAddress(addr)) {
  2085       return p;
  2087     p = p->next();
  2089   return NULL;
  2092 // dump all information about all memory segments allocated with os::reserve_memory()
  2093 void shmbk_dump_info() {
  2094   tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
  2095     "total reserves: %d total lookups: %d)",
  2096     g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
  2097   const ShmBkBlock* p = g_shmbk_list;
  2098   int i = 0;
  2099   while (p) {
  2100     p->print(tty);
  2101     p = p->next();
  2102     i ++;
  2106 #define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
  2107 #define UNLOCK_SHMBK   }
  2109 // End: shared memory bookkeeping
  2110 ////////////////////////////////////////////////////////////////////////////////////////////////////
  2112 int os::vm_page_size() {
  2113   // Seems redundant as all get out
  2114   assert(os::Aix::page_size() != -1, "must call os::init");
  2115   return os::Aix::page_size();
  2118 // Aix allocates memory by pages.
  2119 int os::vm_allocation_granularity() {
  2120   assert(os::Aix::page_size() != -1, "must call os::init");
  2121   return os::Aix::page_size();
  2124 int os::Aix::commit_memory_impl(char* addr, size_t size, bool exec) {
  2126   // Commit is a noop. There is no explicit commit
  2127   // needed on AIX. Memory is committed when touched.
  2128   //
  2129   // Debug : check address range for validity
  2130 #ifdef ASSERT
  2131   LOCK_SHMBK
  2132     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
  2133     if (!block) {
  2134       fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
  2135       shmbk_dump_info();
  2136       assert(false, "invalid pointer");
  2137       return false;
  2138     } else if (!block->containsRange(addr, size)) {
  2139       fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
  2140       shmbk_dump_info();
  2141       assert(false, "invalid range");
  2142       return false;
  2144   UNLOCK_SHMBK
  2145 #endif // ASSERT
  2147   return 0;
  2150 bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
  2151   return os::Aix::commit_memory_impl(addr, size, exec) == 0;
  2154 void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
  2155                                   const char* mesg) {
  2156   assert(mesg != NULL, "mesg must be specified");
  2157   os::Aix::commit_memory_impl(addr, size, exec);
  2160 int os::Aix::commit_memory_impl(char* addr, size_t size,
  2161                                 size_t alignment_hint, bool exec) {
  2162   return os::Aix::commit_memory_impl(addr, size, exec);
  2165 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
  2166                           bool exec) {
  2167   return os::Aix::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
  2170 void os::pd_commit_memory_or_exit(char* addr, size_t size,
  2171                                   size_t alignment_hint, bool exec,
  2172                                   const char* mesg) {
  2173   os::Aix::commit_memory_impl(addr, size, alignment_hint, exec);
  2176 bool os::pd_uncommit_memory(char* addr, size_t size) {
  2178   // Delegate to ShmBkBlock class which knows how to uncommit its memory.
  2180   bool rc = false;
  2181   LOCK_SHMBK
  2182     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
  2183     if (!block) {
  2184       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
  2185       shmbk_dump_info();
  2186       assert(false, "invalid pointer");
  2187       return false;
  2188     } else if (!block->containsRange(addr, size)) {
  2189       fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
  2190       shmbk_dump_info();
  2191       assert(false, "invalid range");
  2192       return false;
  2194     rc = block->disclaim(addr, size);
  2195   UNLOCK_SHMBK
  2197   if (Verbose && !rc) {
  2198     warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
  2200   return rc;
  2203 bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
  2204   return os::guard_memory(addr, size);
  2207 bool os::remove_stack_guard_pages(char* addr, size_t size) {
  2208   return os::unguard_memory(addr, size);
  2211 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  2214 void os::pd_free_memory(char *addr, size_t bytes, size_t alignment_hint) {
  2217 void os::numa_make_global(char *addr, size_t bytes) {
  2220 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
  2223 bool os::numa_topology_changed() {
  2224   return false;
  2227 size_t os::numa_get_groups_num() {
  2228   return 1;
  2231 int os::numa_get_group_id() {
  2232   return 0;
  2235 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
  2236   if (size > 0) {
  2237     ids[0] = 0;
  2238     return 1;
  2240   return 0;
  2243 bool os::get_page_info(char *start, page_info* info) {
  2244   return false;
  2247 char *os::scan_pages(char *start, char* end, page_info* page_expected, page_info* page_found) {
  2248   return end;
  2251 // Flags for reserve_shmatted_memory:
  2252 #define RESSHM_WISHADDR_OR_FAIL                     1
  2253 #define RESSHM_TRY_16M_PAGES                        2
  2254 #define RESSHM_16M_PAGES_OR_FAIL                    4
  2256 // Result of reserve_shmatted_memory:
  2257 struct shmatted_memory_info_t {
  2258   char* addr;
  2259   size_t pagesize;
  2260   bool pinned;
  2261 };
  2263 // Reserve a section of shmatted memory.
  2264 // params:
  2265 // bytes [in]: size of memory, in bytes
  2266 // requested_addr [in]: wish address.
  2267 //                      NULL = no wish.
  2268 //                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
  2269 //                      be obtained, function will fail. Otherwise wish address is treated as hint and
  2270 //                      another pointer is returned.
  2271 // flags [in]:          some flags. Valid flags are:
  2272 //                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
  2273 //                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
  2274 //                          (requires UseLargePages and Use16MPages)
  2275 //                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
  2276 //                          Otherwise any other page size will do.
  2277 // p_info [out] :       holds information about the created shared memory segment.
  2278 static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
  2280   assert(p_info, "parameter error");
  2282   // init output struct.
  2283   p_info->addr = NULL;
  2285   // neither should we be here for EXTSHM=ON.
  2286   if (os::Aix::extshm()) {
  2287     ShouldNotReachHere();
  2290   // extract flags. sanity checks.
  2291   const bool wishaddr_or_fail =
  2292     flags & RESSHM_WISHADDR_OR_FAIL;
  2293   const bool try_16M_pages =
  2294     flags & RESSHM_TRY_16M_PAGES;
  2295   const bool f16M_pages_or_fail =
  2296     flags & RESSHM_16M_PAGES_OR_FAIL;
  2298   // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
  2299   // shmat will fail anyway, so save some cycles by failing right away
  2300   if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
  2301     if (wishaddr_or_fail) {
  2302       return false;
  2303     } else {
  2304       requested_addr = NULL;
  2308   char* addr = NULL;
  2310   // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
  2311   // pagesize dynamically.
  2312   const size_t size = align_size_up(bytes, SIZE_16M);
  2314   // reserve the shared segment
  2315   int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
  2316   if (shmid == -1) {
  2317     warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
  2318     return false;
  2321   // Important note:
  2322   // It is very important that we, upon leaving this function, do not leave a shm segment alive.
  2323   // We must right after attaching it remove it from the system. System V shm segments are global and
  2324   // survive the process.
  2325   // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
  2327   // try forcing the page size
  2328   size_t pagesize = -1; // unknown so far
  2330   if (UseLargePages) {
  2332     struct shmid_ds shmbuf;
  2333     memset(&shmbuf, 0, sizeof(shmbuf));
  2335     // First, try to take from 16M page pool if...
  2336     if (os::Aix::can_use_16M_pages()  // we can ...
  2337         && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
  2338         && try_16M_pages) {           // caller wants us to.
  2339       shmbuf.shm_pagesize = SIZE_16M;
  2340       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
  2341         pagesize = SIZE_16M;
  2342       } else {
  2343         warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
  2344                 size / SIZE_16M, errno);
  2345         if (f16M_pages_or_fail) {
  2346           goto cleanup_shm;
  2351     // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
  2352     // because the 64K page pool may also be exhausted.
  2353     if (pagesize == -1) {
  2354       shmbuf.shm_pagesize = SIZE_64K;
  2355       if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
  2356         pagesize = SIZE_64K;
  2357       } else {
  2358         warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
  2359                 size / SIZE_64K, errno);
  2360         // here I give up. leave page_size -1 - later, after attaching, we will query the
  2361         // real page size of the attached memory. (in theory, it may be something different
  2362         // from 4K if LDR_CNTRL SHM_PSIZE is set)
  2367   // sanity point
  2368   assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
  2370   // Now attach the shared segment.
  2371   addr = (char*) shmat(shmid, requested_addr, 0);
  2372   if (addr == (char*)-1) {
  2373     // How to handle attach failure:
  2374     // If it failed for a specific wish address, tolerate this: in that case, if wish address was
  2375     // mandatory, fail, if not, retry anywhere.
  2376     // If it failed for any other reason, treat that as fatal error.
  2377     addr = NULL;
  2378     if (requested_addr) {
  2379       if (wishaddr_or_fail) {
  2380         goto cleanup_shm;
  2381       } else {
  2382         addr = (char*) shmat(shmid, NULL, 0);
  2383         if (addr == (char*)-1) { // fatal
  2384           addr = NULL;
  2385           warning("shmat failed (errno: %d)", errno);
  2386           goto cleanup_shm;
  2389     } else { // fatal
  2390       addr = NULL;
  2391       warning("shmat failed (errno: %d)", errno);
  2392       goto cleanup_shm;
  2396   // sanity point
  2397   assert(addr && addr != (char*) -1, "wrong address");
  2399   // after successful Attach remove the segment - right away.
  2400   if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
  2401     warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
  2402     guarantee(false, "failed to remove shared memory segment!");
  2404   shmid = -1;
  2406   // query the real page size. In case setting the page size did not work (see above), the system
  2407   // may have given us something other then 4K (LDR_CNTRL)
  2409     const size_t real_pagesize = os::Aix::query_pagesize(addr);
  2410     if (pagesize != -1) {
  2411       assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
  2412     } else {
  2413       pagesize = real_pagesize;
  2417   // Now register the reserved block with internal book keeping.
  2418   LOCK_SHMBK
  2419     const bool pinned = pagesize >= SIZE_16M ? true : false;
  2420     ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
  2421     assert(p_block, "");
  2422     shmbk_register(p_block);
  2423   UNLOCK_SHMBK
  2425 cleanup_shm:
  2427   // if we have not done so yet, remove the shared memory segment. This is very important.
  2428   if (shmid != -1) {
  2429     if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
  2430       warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
  2431       guarantee(false, "failed to remove shared memory segment!");
  2433     shmid = -1;
  2436   // trace
  2437   if (Verbose && !addr) {
  2438     if (requested_addr != NULL) {
  2439       warning("failed to shm-allocate 0x%llX bytes at with address 0x%p.", size, requested_addr);
  2440     } else {
  2441       warning("failed to shm-allocate 0x%llX bytes at any address.", size);
  2445   // hand info to caller
  2446   if (addr) {
  2447     p_info->addr = addr;
  2448     p_info->pagesize = pagesize;
  2449     p_info->pinned = pagesize == SIZE_16M ? true : false;
  2452   // sanity test:
  2453   if (requested_addr && addr && wishaddr_or_fail) {
  2454     guarantee(addr == requested_addr, "shmat error");
  2457   // just one more test to really make sure we have no dangling shm segments.
  2458   guarantee(shmid == -1, "dangling shm segments");
  2460   return addr ? true : false;
  2462 } // end: reserve_shmatted_memory
  2464 // Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
  2465 // will return NULL in case of an error.
  2466 static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
  2468   // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
  2469   if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
  2470     warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
  2471     return NULL;
  2474   const size_t size = align_size_up(bytes, SIZE_4K);
  2476   // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
  2477   // msync(MS_INVALIDATE) (see os::uncommit_memory)
  2478   int flags = MAP_ANONYMOUS | MAP_SHARED;
  2480   // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
  2481   // it means if wishaddress is given but MAP_FIXED is not set.
  2482   //
  2483   // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
  2484   // clobbers the address range, which is probably not what the caller wants. That's
  2485   // why I assert here (again) that the SPEC1170 compat mode is off.
  2486   // If we want to be able to run under SPEC1170, we have to do some porting and
  2487   // testing.
  2488   if (requested_addr != NULL) {
  2489     assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
  2490     flags |= MAP_FIXED;
  2493   char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
  2495   if (addr == MAP_FAILED) {
  2496     // attach failed: tolerate for specific wish addresses. Not being able to attach
  2497     // anywhere is a fatal error.
  2498     if (requested_addr == NULL) {
  2499       // It's ok to fail here if the machine has not enough memory.
  2500       warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
  2502     addr = NULL;
  2503     goto cleanup_mmap;
  2506   // If we did request a specific address and that address was not available, fail.
  2507   if (addr && requested_addr) {
  2508     guarantee(addr == requested_addr, "unexpected");
  2511   // register this mmap'ed segment with book keeping
  2512   LOCK_SHMBK
  2513     ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
  2514     assert(p_block, "");
  2515     shmbk_register(p_block);
  2516   UNLOCK_SHMBK
  2518 cleanup_mmap:
  2520   if (addr) {
  2521     if (Verbose) {
  2522       fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)\n", addr, addr + bytes, bytes);
  2525   else {
  2526     if (requested_addr != NULL) {
  2527       warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
  2528     } else {
  2529       warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
  2533   return addr;
  2535 } // end: reserve_mmaped_memory
  2537 // Reserves and attaches a shared memory segment.
  2538 // Will assert if a wish address is given and could not be obtained.
  2539 char* os::pd_reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
  2540   return os::attempt_reserve_memory_at(bytes, requested_addr);
  2543 bool os::pd_release_memory(char* addr, size_t size) {
  2545   // delegate to ShmBkBlock class which knows how to uncommit its memory.
  2547   bool rc = false;
  2548   LOCK_SHMBK
  2549     ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
  2550     if (!block) {
  2551       fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
  2552       shmbk_dump_info();
  2553       assert(false, "invalid pointer");
  2554       return false;
  2556     else if (!block->isSameRange(addr, size)) {
  2557       if (block->getType() == ShmBkBlock::MMAP) {
  2558         // Release only the same range or a the beginning or the end of a range.
  2559         if (block->base() == addr && size < block->size()) {
  2560           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base() + size, block->size() - size));
  2561           assert(b, "");
  2562           shmbk_register(b);
  2563           block->setAddrRange(AddrRange(addr, size));
  2565         else if (addr > block->base() && addr + size == block->base() + block->size()) {
  2566           ShmBkMappedBlock* const b = new ShmBkMappedBlock(AddrRange(block->base(), block->size() - size));
  2567           assert(b, "");
  2568           shmbk_register(b);
  2569           block->setAddrRange(AddrRange(addr, size));
  2571         else {
  2572           fprintf(stderr, "invalid mmap range: 0x%p .. 0x%p.\n", addr, addr + size);
  2573           shmbk_dump_info();
  2574           assert(false, "invalid mmap range");
  2575           return false;
  2578       else {
  2579         // Release only the same range. No partial release allowed.
  2580         // Soften the requirement a bit, because the user may think he owns a smaller size
  2581         // than the block is due to alignment etc.
  2582         if (block->base() != addr || block->size() < size) {
  2583           fprintf(stderr, "invalid shmget range: 0x%p .. 0x%p.\n", addr, addr + size);
  2584           shmbk_dump_info();
  2585           assert(false, "invalid shmget range");
  2586           return false;
  2590     rc = block->release();
  2591     assert(rc, "release failed");
  2592     // remove block from bookkeeping
  2593     shmbk_unregister(block);
  2594     delete block;
  2595   UNLOCK_SHMBK
  2597   if (!rc) {
  2598     warning("failed to released %lu bytes at 0x%p", size, addr);
  2601   return rc;
  2604 static bool checked_mprotect(char* addr, size_t size, int prot) {
  2606   // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
  2607   // not tell me if protection failed when trying to protect an un-protectable range.
  2608   //
  2609   // This means if the memory was allocated using shmget/shmat, protection wont work
  2610   // but mprotect will still return 0:
  2611   //
  2612   // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
  2614   bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
  2616   if (!rc) {
  2617     const char* const s_errno = strerror(errno);
  2618     warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
  2619     return false;
  2622   // mprotect success check
  2623   //
  2624   // Mprotect said it changed the protection but can I believe it?
  2625   //
  2626   // To be sure I need to check the protection afterwards. Try to
  2627   // read from protected memory and check whether that causes a segfault.
  2628   //
  2629   if (!os::Aix::xpg_sus_mode()) {
  2631     if (StubRoutines::SafeFetch32_stub()) {
  2633       const bool read_protected =
  2634         (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
  2635          SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
  2637       if (prot & PROT_READ) {
  2638         rc = !read_protected;
  2639       } else {
  2640         rc = read_protected;
  2644   if (!rc) {
  2645     assert(false, "mprotect failed.");
  2647   return rc;
  2650 // Set protections specified
  2651 bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
  2652   unsigned int p = 0;
  2653   switch (prot) {
  2654   case MEM_PROT_NONE: p = PROT_NONE; break;
  2655   case MEM_PROT_READ: p = PROT_READ; break;
  2656   case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
  2657   case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
  2658   default:
  2659     ShouldNotReachHere();
  2661   // is_committed is unused.
  2662   return checked_mprotect(addr, size, p);
  2665 bool os::guard_memory(char* addr, size_t size) {
  2666   return checked_mprotect(addr, size, PROT_NONE);
  2669 bool os::unguard_memory(char* addr, size_t size) {
  2670   return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
  2673 // Large page support
  2675 static size_t _large_page_size = 0;
  2677 // Enable large page support if OS allows that.
  2678 void os::large_page_init() {
  2680   // Note: os::Aix::query_multipage_support must run first.
  2682   if (!UseLargePages) {
  2683     return;
  2686   if (!Aix::can_use_64K_pages()) {
  2687     assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
  2688     UseLargePages = false;
  2689     return;
  2692   if (!Aix::can_use_16M_pages() && Use16MPages) {
  2693     fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
  2694             " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.\n");
  2697   // Do not report 16M page alignment as part of os::_page_sizes if we are
  2698   // explicitly forbidden from using 16M pages. Doing so would increase the
  2699   // alignment the garbage collector calculates with, slightly increasing
  2700   // heap usage. We should only pay for 16M alignment if we really want to
  2701   // use 16M pages.
  2702   if (Use16MPages && Aix::can_use_16M_pages()) {
  2703     _large_page_size = SIZE_16M;
  2704     _page_sizes[0] = SIZE_16M;
  2705     _page_sizes[1] = SIZE_64K;
  2706     _page_sizes[2] = SIZE_4K;
  2707     _page_sizes[3] = 0;
  2708   } else if (Aix::can_use_64K_pages()) {
  2709     _large_page_size = SIZE_64K;
  2710     _page_sizes[0] = SIZE_64K;
  2711     _page_sizes[1] = SIZE_4K;
  2712     _page_sizes[2] = 0;
  2715   if (Verbose) {
  2716     ("Default large page size is 0x%llX.", _large_page_size);
  2718 } // end: os::large_page_init()
  2720 char* os::reserve_memory_special(size_t bytes, size_t alignment, char* req_addr, bool exec) {
  2721   // "exec" is passed in but not used. Creating the shared image for
  2722   // the code cache doesn't have an SHM_X executable permission to check.
  2723   Unimplemented();
  2724   return 0;
  2727 bool os::release_memory_special(char* base, size_t bytes) {
  2728   // detaching the SHM segment will also delete it, see reserve_memory_special()
  2729   Unimplemented();
  2730   return false;
  2733 size_t os::large_page_size() {
  2734   return _large_page_size;
  2737 bool os::can_commit_large_page_memory() {
  2738   // Well, sadly we cannot commit anything at all (see comment in
  2739   // os::commit_memory) but we claim to so we can make use of large pages
  2740   return true;
  2743 bool os::can_execute_large_page_memory() {
  2744   // We can do that
  2745   return true;
  2748 // Reserve memory at an arbitrary address, only if that area is
  2749 // available (and not reserved for something else).
  2750 char* os::pd_attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
  2752   bool use_mmap = false;
  2754   // mmap: smaller graining, no large page support
  2755   // shm: large graining (256M), large page support, limited number of shm segments
  2756   //
  2757   // Prefer mmap wherever we either do not need large page support or have OS limits
  2759   if (!UseLargePages || bytes < SIZE_16M) {
  2760     use_mmap = true;
  2763   char* addr = NULL;
  2764   if (use_mmap) {
  2765     addr = reserve_mmaped_memory(bytes, requested_addr);
  2766   } else {
  2767     // shmat: wish address is mandatory, and do not try 16M pages here.
  2768     shmatted_memory_info_t info;
  2769     const int flags = RESSHM_WISHADDR_OR_FAIL;
  2770     if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
  2771       addr = info.addr;
  2775   return addr;
  2778 size_t os::read(int fd, void *buf, unsigned int nBytes) {
  2779   return ::read(fd, buf, nBytes);
  2782 #define NANOSECS_PER_MILLISEC 1000000
  2784 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
  2785   assert(thread == Thread::current(), "thread consistency check");
  2787   // Prevent nasty overflow in deadline calculation
  2788   // by handling long sleeps similar to solaris or windows.
  2789   const jlong limit = INT_MAX;
  2790   int result;
  2791   while (millis > limit) {
  2792     if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
  2793       return result;
  2795     millis -= limit;
  2798   ParkEvent * const slp = thread->_SleepEvent;
  2799   slp->reset();
  2800   OrderAccess::fence();
  2802   if (interruptible) {
  2803     jlong prevtime = javaTimeNanos();
  2805     // Prevent precision loss and too long sleeps
  2806     jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
  2808     for (;;) {
  2809       if (os::is_interrupted(thread, true)) {
  2810         return OS_INTRPT;
  2813       jlong newtime = javaTimeNanos();
  2815       assert(newtime >= prevtime, "time moving backwards");
  2816       // Doing prevtime and newtime in microseconds doesn't help precision,
  2817       // and trying to round up to avoid lost milliseconds can result in a
  2818       // too-short delay.
  2819       millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
  2821       if (millis <= 0) {
  2822         return OS_OK;
  2825       // Stop sleeping if we passed the deadline
  2826       if (newtime >= deadline) {
  2827         return OS_OK;
  2830       prevtime = newtime;
  2833         assert(thread->is_Java_thread(), "sanity check");
  2834         JavaThread *jt = (JavaThread *) thread;
  2835         ThreadBlockInVM tbivm(jt);
  2836         OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
  2838         jt->set_suspend_equivalent();
  2840         slp->park(millis);
  2842         // were we externally suspended while we were waiting?
  2843         jt->check_and_wait_while_suspended();
  2846   } else {
  2847     OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  2848     jlong prevtime = javaTimeNanos();
  2850     // Prevent precision loss and too long sleeps
  2851     jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
  2853     for (;;) {
  2854       // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
  2855       // the 1st iteration ...
  2856       jlong newtime = javaTimeNanos();
  2858       if (newtime - prevtime < 0) {
  2859         // time moving backwards, should only happen if no monotonic clock
  2860         // not a guarantee() because JVM should not abort on kernel/glibc bugs
  2861         // - HS14 Commented out as not implemented.
  2862         // - TODO Maybe we should implement it?
  2863         //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
  2864       } else {
  2865         millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
  2868       if (millis <= 0) break;
  2870       if (newtime >= deadline) {
  2871         break;
  2874       prevtime = newtime;
  2875       slp->park(millis);
  2877     return OS_OK;
  2881 int os::naked_sleep() {
  2882   // %% make the sleep time an integer flag. for now use 1 millisec.
  2883   return os::sleep(Thread::current(), 1, false);
  2886 // Sleep forever; naked call to OS-specific sleep; use with CAUTION
  2887 void os::infinite_sleep() {
  2888   while (true) {    // sleep forever ...
  2889     ::sleep(100);   // ... 100 seconds at a time
  2893 // Used to convert frequent JVM_Yield() to nops
  2894 bool os::dont_yield() {
  2895   return DontYieldALot;
  2898 void os::yield() {
  2899   sched_yield();
  2902 os::YieldResult os::NakedYield() { sched_yield(); return os::YIELD_UNKNOWN; }
  2904 void os::yield_all(int attempts) {
  2905   // Yields to all threads, including threads with lower priorities
  2906   // Threads on Linux are all with same priority. The Solaris style
  2907   // os::yield_all() with nanosleep(1ms) is not necessary.
  2908   sched_yield();
  2911 // Called from the tight loops to possibly influence time-sharing heuristics
  2912 void os::loop_breaker(int attempts) {
  2913   os::yield_all(attempts);
  2916 ////////////////////////////////////////////////////////////////////////////////
  2917 // thread priority support
  2919 // From AIX manpage to pthread_setschedparam
  2920 // (see: http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?
  2921 //    topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_setschedparam.htm):
  2922 //
  2923 // "If schedpolicy is SCHED_OTHER, then sched_priority must be in the
  2924 // range from 40 to 80, where 40 is the least favored priority and 80
  2925 // is the most favored."
  2926 //
  2927 // (Actually, I doubt this even has an impact on AIX, as we do kernel
  2928 // scheduling there; however, this still leaves iSeries.)
  2929 //
  2930 // We use the same values for AIX and PASE.
  2931 int os::java_to_os_priority[CriticalPriority + 1] = {
  2932   54,             // 0 Entry should never be used
  2934   55,             // 1 MinPriority
  2935   55,             // 2
  2936   56,             // 3
  2938   56,             // 4
  2939   57,             // 5 NormPriority
  2940   57,             // 6
  2942   58,             // 7
  2943   58,             // 8
  2944   59,             // 9 NearMaxPriority
  2946   60,             // 10 MaxPriority
  2948   60              // 11 CriticalPriority
  2949 };
  2951 OSReturn os::set_native_priority(Thread* thread, int newpri) {
  2952   if (!UseThreadPriorities) return OS_OK;
  2953   pthread_t thr = thread->osthread()->pthread_id();
  2954   int policy = SCHED_OTHER;
  2955   struct sched_param param;
  2956   param.sched_priority = newpri;
  2957   int ret = pthread_setschedparam(thr, policy, &param);
  2959   if (Verbose) {
  2960     if (ret == 0) {
  2961       fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
  2962     } else {
  2963       fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
  2964               (int)thr, newpri, ret, strerror(ret));
  2967   return (ret == 0) ? OS_OK : OS_ERR;
  2970 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
  2971   if (!UseThreadPriorities) {
  2972     *priority_ptr = java_to_os_priority[NormPriority];
  2973     return OS_OK;
  2975   pthread_t thr = thread->osthread()->pthread_id();
  2976   int policy = SCHED_OTHER;
  2977   struct sched_param param;
  2978   int ret = pthread_getschedparam(thr, &policy, &param);
  2979   *priority_ptr = param.sched_priority;
  2981   return (ret == 0) ? OS_OK : OS_ERR;
  2984 // Hint to the underlying OS that a task switch would not be good.
  2985 // Void return because it's a hint and can fail.
  2986 void os::hint_no_preempt() {}
  2988 ////////////////////////////////////////////////////////////////////////////////
  2989 // suspend/resume support
  2991 //  the low-level signal-based suspend/resume support is a remnant from the
  2992 //  old VM-suspension that used to be for java-suspension, safepoints etc,
  2993 //  within hotspot. Now there is a single use-case for this:
  2994 //    - calling get_thread_pc() on the VMThread by the flat-profiler task
  2995 //      that runs in the watcher thread.
  2996 //  The remaining code is greatly simplified from the more general suspension
  2997 //  code that used to be used.
  2998 //
  2999 //  The protocol is quite simple:
  3000 //  - suspend:
  3001 //      - sends a signal to the target thread
  3002 //      - polls the suspend state of the osthread using a yield loop
  3003 //      - target thread signal handler (SR_handler) sets suspend state
  3004 //        and blocks in sigsuspend until continued
  3005 //  - resume:
  3006 //      - sets target osthread state to continue
  3007 //      - sends signal to end the sigsuspend loop in the SR_handler
  3008 //
  3009 //  Note that the SR_lock plays no role in this suspend/resume protocol.
  3010 //
  3012 static void resume_clear_context(OSThread *osthread) {
  3013   osthread->set_ucontext(NULL);
  3014   osthread->set_siginfo(NULL);
  3017 static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
  3018   osthread->set_ucontext(context);
  3019   osthread->set_siginfo(siginfo);
  3022 //
  3023 // Handler function invoked when a thread's execution is suspended or
  3024 // resumed. We have to be careful that only async-safe functions are
  3025 // called here (Note: most pthread functions are not async safe and
  3026 // should be avoided.)
  3027 //
  3028 // Note: sigwait() is a more natural fit than sigsuspend() from an
  3029 // interface point of view, but sigwait() prevents the signal hander
  3030 // from being run. libpthread would get very confused by not having
  3031 // its signal handlers run and prevents sigwait()'s use with the
  3032 // mutex granting granting signal.
  3033 //
  3034 // Currently only ever called on the VMThread and JavaThreads (PC sampling).
  3035 //
  3036 static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
  3037   // Save and restore errno to avoid confusing native code with EINTR
  3038   // after sigsuspend.
  3039   int old_errno = errno;
  3041   Thread* thread = Thread::current();
  3042   OSThread* osthread = thread->osthread();
  3043   assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
  3045   os::SuspendResume::State current = osthread->sr.state();
  3046   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
  3047     suspend_save_context(osthread, siginfo, context);
  3049     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
  3050     os::SuspendResume::State state = osthread->sr.suspended();
  3051     if (state == os::SuspendResume::SR_SUSPENDED) {
  3052       sigset_t suspend_set;  // signals for sigsuspend()
  3054       // get current set of blocked signals and unblock resume signal
  3055       pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  3056       sigdelset(&suspend_set, SR_signum);
  3058       // wait here until we are resumed
  3059       while (1) {
  3060         sigsuspend(&suspend_set);
  3062         os::SuspendResume::State result = osthread->sr.running();
  3063         if (result == os::SuspendResume::SR_RUNNING) {
  3064           break;
  3068     } else if (state == os::SuspendResume::SR_RUNNING) {
  3069       // request was cancelled, continue
  3070     } else {
  3071       ShouldNotReachHere();
  3074     resume_clear_context(osthread);
  3075   } else if (current == os::SuspendResume::SR_RUNNING) {
  3076     // request was cancelled, continue
  3077   } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
  3078     // ignore
  3079   } else {
  3080     ShouldNotReachHere();
  3083   errno = old_errno;
  3087 static int SR_initialize() {
  3088   struct sigaction act;
  3089   char *s;
  3090   // Get signal number to use for suspend/resume
  3091   if ((s = ::getenv("_JAVA_SR_SIGNUM")) != 0) {
  3092     int sig = ::strtol(s, 0, 10);
  3093     if (sig > 0 || sig < NSIG) {
  3094       SR_signum = sig;
  3098   assert(SR_signum > SIGSEGV && SR_signum > SIGBUS,
  3099         "SR_signum must be greater than max(SIGSEGV, SIGBUS), see 4355769");
  3101   sigemptyset(&SR_sigset);
  3102   sigaddset(&SR_sigset, SR_signum);
  3104   // Set up signal handler for suspend/resume.
  3105   act.sa_flags = SA_RESTART|SA_SIGINFO;
  3106   act.sa_handler = (void (*)(int)) SR_handler;
  3108   // SR_signum is blocked by default.
  3109   // 4528190 - We also need to block pthread restart signal (32 on all
  3110   // supported Linux platforms). Note that LinuxThreads need to block
  3111   // this signal for all threads to work properly. So we don't have
  3112   // to use hard-coded signal number when setting up the mask.
  3113   pthread_sigmask(SIG_BLOCK, NULL, &act.sa_mask);
  3115   if (sigaction(SR_signum, &act, 0) == -1) {
  3116     return -1;
  3119   // Save signal flag
  3120   os::Aix::set_our_sigflags(SR_signum, act.sa_flags);
  3121   return 0;
  3124 static int SR_finalize() {
  3125   return 0;
  3128 static int sr_notify(OSThread* osthread) {
  3129   int status = pthread_kill(osthread->pthread_id(), SR_signum);
  3130   assert_status(status == 0, status, "pthread_kill");
  3131   return status;
  3134 // "Randomly" selected value for how long we want to spin
  3135 // before bailing out on suspending a thread, also how often
  3136 // we send a signal to a thread we want to resume
  3137 static const int RANDOMLY_LARGE_INTEGER = 1000000;
  3138 static const int RANDOMLY_LARGE_INTEGER2 = 100;
  3140 // returns true on success and false on error - really an error is fatal
  3141 // but this seems the normal response to library errors
  3142 static bool do_suspend(OSThread* osthread) {
  3143   assert(osthread->sr.is_running(), "thread should be running");
  3144   // mark as suspended and send signal
  3146   if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
  3147     // failed to switch, state wasn't running?
  3148     ShouldNotReachHere();
  3149     return false;
  3152   if (sr_notify(osthread) != 0) {
  3153     // try to cancel, switch to running
  3155     os::SuspendResume::State result = osthread->sr.cancel_suspend();
  3156     if (result == os::SuspendResume::SR_RUNNING) {
  3157       // cancelled
  3158       return false;
  3159     } else if (result == os::SuspendResume::SR_SUSPENDED) {
  3160       // somehow managed to suspend
  3161       return true;
  3162     } else {
  3163       ShouldNotReachHere();
  3164       return false;
  3168   // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  3170   for (int n = 0; !osthread->sr.is_suspended(); n++) {
  3171     for (int i = 0; i < RANDOMLY_LARGE_INTEGER2 && !osthread->sr.is_suspended(); i++) {
  3172       os::yield_all(i);
  3175     // timeout, try to cancel the request
  3176     if (n >= RANDOMLY_LARGE_INTEGER) {
  3177       os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
  3178       if (cancelled == os::SuspendResume::SR_RUNNING) {
  3179         return false;
  3180       } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
  3181         return true;
  3182       } else {
  3183         ShouldNotReachHere();
  3184         return false;
  3189   guarantee(osthread->sr.is_suspended(), "Must be suspended");
  3190   return true;
  3193 static void do_resume(OSThread* osthread) {
  3194   //assert(osthread->sr.is_suspended(), "thread should be suspended");
  3196   if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
  3197     // failed to switch to WAKEUP_REQUEST
  3198     ShouldNotReachHere();
  3199     return;
  3202   while (!osthread->sr.is_running()) {
  3203     if (sr_notify(osthread) == 0) {
  3204       for (int n = 0; n < RANDOMLY_LARGE_INTEGER && !osthread->sr.is_running(); n++) {
  3205         for (int i = 0; i < 100 && !osthread->sr.is_running(); i++) {
  3206           os::yield_all(i);
  3209     } else {
  3210       ShouldNotReachHere();
  3214   guarantee(osthread->sr.is_running(), "Must be running!");
  3217 ////////////////////////////////////////////////////////////////////////////////
  3218 // interrupt support
  3220 void os::interrupt(Thread* thread) {
  3221   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
  3222     "possibility of dangling Thread pointer");
  3224   OSThread* osthread = thread->osthread();
  3226   if (!osthread->interrupted()) {
  3227     osthread->set_interrupted(true);
  3228     // More than one thread can get here with the same value of osthread,
  3229     // resulting in multiple notifications.  We do, however, want the store
  3230     // to interrupted() to be visible to other threads before we execute unpark().
  3231     OrderAccess::fence();
  3232     ParkEvent * const slp = thread->_SleepEvent;
  3233     if (slp != NULL) slp->unpark();
  3236   // For JSR166. Unpark even if interrupt status already was set
  3237   if (thread->is_Java_thread())
  3238     ((JavaThread*)thread)->parker()->unpark();
  3240   ParkEvent * ev = thread->_ParkEvent;
  3241   if (ev != NULL) ev->unpark();
  3245 bool os::is_interrupted(Thread* thread, bool clear_interrupted) {
  3246   assert(Thread::current() == thread || Threads_lock->owned_by_self(),
  3247     "possibility of dangling Thread pointer");
  3249   OSThread* osthread = thread->osthread();
  3251   bool interrupted = osthread->interrupted();
  3253   if (interrupted && clear_interrupted) {
  3254     osthread->set_interrupted(false);
  3255     // consider thread->_SleepEvent->reset() ... optional optimization
  3258   return interrupted;
  3261 ///////////////////////////////////////////////////////////////////////////////////
  3262 // signal handling (except suspend/resume)
  3264 // This routine may be used by user applications as a "hook" to catch signals.
  3265 // The user-defined signal handler must pass unrecognized signals to this
  3266 // routine, and if it returns true (non-zero), then the signal handler must
  3267 // return immediately. If the flag "abort_if_unrecognized" is true, then this
  3268 // routine will never retun false (zero), but instead will execute a VM panic
  3269 // routine kill the process.
  3270 //
  3271 // If this routine returns false, it is OK to call it again. This allows
  3272 // the user-defined signal handler to perform checks either before or after
  3273 // the VM performs its own checks. Naturally, the user code would be making
  3274 // a serious error if it tried to handle an exception (such as a null check
  3275 // or breakpoint) that the VM was generating for its own correct operation.
  3276 //
  3277 // This routine may recognize any of the following kinds of signals:
  3278 //   SIGBUS, SIGSEGV, SIGILL, SIGFPE, SIGQUIT, SIGPIPE, SIGXFSZ, SIGUSR1.
  3279 // It should be consulted by handlers for any of those signals.
  3280 //
  3281 // The caller of this routine must pass in the three arguments supplied
  3282 // to the function referred to in the "sa_sigaction" (not the "sa_handler")
  3283 // field of the structure passed to sigaction(). This routine assumes that
  3284 // the sa_flags field passed to sigaction() includes SA_SIGINFO and SA_RESTART.
  3285 //
  3286 // Note that the VM will print warnings if it detects conflicting signal
  3287 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
  3288 //
  3289 extern "C" JNIEXPORT int
  3290 JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
  3292 // Set thread signal mask (for some reason on AIX sigthreadmask() seems
  3293 // to be the thing to call; documentation is not terribly clear about whether
  3294 // pthread_sigmask also works, and if it does, whether it does the same.
  3295 bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
  3296   const int rc = ::pthread_sigmask(how, set, oset);
  3297   // return value semantics differ slightly for error case:
  3298   // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
  3299   // (so, pthread_sigmask is more theadsafe for error handling)
  3300   // But success is always 0.
  3301   return rc == 0 ? true : false;
  3304 // Function to unblock all signals which are, according
  3305 // to POSIX, typical program error signals. If they happen while being blocked,
  3306 // they typically will bring down the process immediately.
  3307 bool unblock_program_error_signals() {
  3308   sigset_t set;
  3309   ::sigemptyset(&set);
  3310   ::sigaddset(&set, SIGILL);
  3311   ::sigaddset(&set, SIGBUS);
  3312   ::sigaddset(&set, SIGFPE);
  3313   ::sigaddset(&set, SIGSEGV);
  3314   return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
  3317 // Renamed from 'signalHandler' to avoid collision with other shared libs.
  3318 void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
  3319   assert(info != NULL && uc != NULL, "it must be old kernel");
  3321   // Never leave program error signals blocked;
  3322   // on all our platforms they would bring down the process immediately when
  3323   // getting raised while being blocked.
  3324   unblock_program_error_signals();
  3326   JVM_handle_aix_signal(sig, info, uc, true);
  3330 // This boolean allows users to forward their own non-matching signals
  3331 // to JVM_handle_aix_signal, harmlessly.
  3332 bool os::Aix::signal_handlers_are_installed = false;
  3334 // For signal-chaining
  3335 struct sigaction os::Aix::sigact[MAXSIGNUM];
  3336 unsigned int os::Aix::sigs = 0;
  3337 bool os::Aix::libjsig_is_loaded = false;
  3338 typedef struct sigaction *(*get_signal_t)(int);
  3339 get_signal_t os::Aix::get_signal_action = NULL;
  3341 struct sigaction* os::Aix::get_chained_signal_action(int sig) {
  3342   struct sigaction *actp = NULL;
  3344   if (libjsig_is_loaded) {
  3345     // Retrieve the old signal handler from libjsig
  3346     actp = (*get_signal_action)(sig);
  3348   if (actp == NULL) {
  3349     // Retrieve the preinstalled signal handler from jvm
  3350     actp = get_preinstalled_handler(sig);
  3353   return actp;
  3356 static bool call_chained_handler(struct sigaction *actp, int sig,
  3357                                  siginfo_t *siginfo, void *context) {
  3358   Unimplemented();
  3359   return true;
  3362 bool os::Aix::chained_handler(int sig, siginfo_t* siginfo, void* context) {
  3363   bool chained = false;
  3364   // signal-chaining
  3365   if (UseSignalChaining) {
  3366     struct sigaction *actp = get_chained_signal_action(sig);
  3367     if (actp != NULL) {
  3368       chained = call_chained_handler(actp, sig, siginfo, context);
  3371   return chained;
  3374 struct sigaction* os::Aix::get_preinstalled_handler(int sig) {
  3375   if ((((unsigned int)1 << sig) & sigs) != 0) {
  3376     return &sigact[sig];
  3378   return NULL;
  3381 void os::Aix::save_preinstalled_handler(int sig, struct sigaction& oldAct) {
  3382   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  3383   sigact[sig] = oldAct;
  3384   sigs |= (unsigned int)1 << sig;
  3387 // for diagnostic
  3388 int os::Aix::sigflags[MAXSIGNUM];
  3390 int os::Aix::get_our_sigflags(int sig) {
  3391   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  3392   return sigflags[sig];
  3395 void os::Aix::set_our_sigflags(int sig, int flags) {
  3396   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  3397   sigflags[sig] = flags;
  3400 void os::Aix::set_signal_handler(int sig, bool set_installed) {
  3401   // Check for overwrite.
  3402   struct sigaction oldAct;
  3403   sigaction(sig, (struct sigaction*)NULL, &oldAct);
  3405   void* oldhand = oldAct.sa_sigaction
  3406     ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  3407     : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  3408   // Renamed 'signalHandler' to avoid collision with other shared libs.
  3409   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
  3410       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
  3411       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
  3412     if (AllowUserSignalHandlers || !set_installed) {
  3413       // Do not overwrite; user takes responsibility to forward to us.
  3414       return;
  3415     } else if (UseSignalChaining) {
  3416       // save the old handler in jvm
  3417       save_preinstalled_handler(sig, oldAct);
  3418       // libjsig also interposes the sigaction() call below and saves the
  3419       // old sigaction on it own.
  3420     } else {
  3421       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
  3422                     "%#lx for signal %d.", (long)oldhand, sig));
  3426   struct sigaction sigAct;
  3427   sigfillset(&(sigAct.sa_mask));
  3428   if (!set_installed) {
  3429     sigAct.sa_handler = SIG_DFL;
  3430     sigAct.sa_flags = SA_RESTART;
  3431   } else {
  3432     // Renamed 'signalHandler' to avoid collision with other shared libs.
  3433     sigAct.sa_sigaction = javaSignalHandler;
  3434     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
  3436   // Save flags, which are set by ours
  3437   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
  3438   sigflags[sig] = sigAct.sa_flags;
  3440   int ret = sigaction(sig, &sigAct, &oldAct);
  3441   assert(ret == 0, "check");
  3443   void* oldhand2 = oldAct.sa_sigaction
  3444                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
  3445                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
  3446   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
  3449 // install signal handlers for signals that HotSpot needs to
  3450 // handle in order to support Java-level exception handling.
  3451 void os::Aix::install_signal_handlers() {
  3452   if (!signal_handlers_are_installed) {
  3453     signal_handlers_are_installed = true;
  3455     // signal-chaining
  3456     typedef void (*signal_setting_t)();
  3457     signal_setting_t begin_signal_setting = NULL;
  3458     signal_setting_t end_signal_setting = NULL;
  3459     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  3460                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
  3461     if (begin_signal_setting != NULL) {
  3462       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
  3463                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
  3464       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
  3465                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
  3466       libjsig_is_loaded = true;
  3467       assert(UseSignalChaining, "should enable signal-chaining");
  3469     if (libjsig_is_loaded) {
  3470       // Tell libjsig jvm is setting signal handlers
  3471       (*begin_signal_setting)();
  3474     set_signal_handler(SIGSEGV, true);
  3475     set_signal_handler(SIGPIPE, true);
  3476     set_signal_handler(SIGBUS, true);
  3477     set_signal_handler(SIGILL, true);
  3478     set_signal_handler(SIGFPE, true);
  3479     set_signal_handler(SIGTRAP, true);
  3480     set_signal_handler(SIGXFSZ, true);
  3481     set_signal_handler(SIGDANGER, true);
  3483     if (libjsig_is_loaded) {
  3484       // Tell libjsig jvm finishes setting signal handlers
  3485       (*end_signal_setting)();
  3488     // We don't activate signal checker if libjsig is in place, we trust ourselves
  3489     // and if UserSignalHandler is installed all bets are off.
  3490     // Log that signal checking is off only if -verbose:jni is specified.
  3491     if (CheckJNICalls) {
  3492       if (libjsig_is_loaded) {
  3493         tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
  3494         check_signals = false;
  3496       if (AllowUserSignalHandlers) {
  3497         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
  3498         check_signals = false;
  3500       // need to initialize check_signal_done
  3501       ::sigemptyset(&check_signal_done);
  3506 static const char* get_signal_handler_name(address handler,
  3507                                            char* buf, int buflen) {
  3508   int offset;
  3509   bool found = os::dll_address_to_library_name(handler, buf, buflen, &offset);
  3510   if (found) {
  3511     // skip directory names
  3512     const char *p1, *p2;
  3513     p1 = buf;
  3514     size_t len = strlen(os::file_separator());
  3515     while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
  3516     // The way os::dll_address_to_library_name is implemented on Aix
  3517     // right now, it always returns -1 for the offset which is not
  3518     // terribly informative.
  3519     // Will fix that. For now, omit the offset.
  3520     jio_snprintf(buf, buflen, "%s", p1);
  3521   } else {
  3522     jio_snprintf(buf, buflen, PTR_FORMAT, handler);
  3524   return buf;
  3527 static void print_signal_handler(outputStream* st, int sig,
  3528                                  char* buf, size_t buflen) {
  3529   struct sigaction sa;
  3530   sigaction(sig, NULL, &sa);
  3532   st->print("%s: ", os::exception_name(sig, buf, buflen));
  3534   address handler = (sa.sa_flags & SA_SIGINFO)
  3535     ? CAST_FROM_FN_PTR(address, sa.sa_sigaction)
  3536     : CAST_FROM_FN_PTR(address, sa.sa_handler);
  3538   if (handler == CAST_FROM_FN_PTR(address, SIG_DFL)) {
  3539     st->print("SIG_DFL");
  3540   } else if (handler == CAST_FROM_FN_PTR(address, SIG_IGN)) {
  3541     st->print("SIG_IGN");
  3542   } else {
  3543     st->print("[%s]", get_signal_handler_name(handler, buf, buflen));
  3546   // Print readable mask.
  3547   st->print(", sa_mask[0]=");
  3548   os::Posix::print_signal_set_short(st, &sa.sa_mask);
  3550   address rh = VMError::get_resetted_sighandler(sig);
  3551   // May be, handler was resetted by VMError?
  3552   if (rh != NULL) {
  3553     handler = rh;
  3554     sa.sa_flags = VMError::get_resetted_sigflags(sig);
  3557   // Print textual representation of sa_flags.
  3558   st->print(", sa_flags=");
  3559   os::Posix::print_sa_flags(st, sa.sa_flags);
  3561   // Check: is it our handler?
  3562   if (handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler) ||
  3563       handler == CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler)) {
  3564     // It is our signal handler.
  3565     // Check for flags, reset system-used one!
  3566     if ((int)sa.sa_flags != os::Aix::get_our_sigflags(sig)) {
  3567       st->print(", flags was changed from " PTR32_FORMAT ", consider using jsig library",
  3568                 os::Aix::get_our_sigflags(sig));
  3571   st->cr();
  3575 #define DO_SIGNAL_CHECK(sig) \
  3576   if (!sigismember(&check_signal_done, sig)) \
  3577     os::Aix::check_signal_handler(sig)
  3579 // This method is a periodic task to check for misbehaving JNI applications
  3580 // under CheckJNI, we can add any periodic checks here
  3582 void os::run_periodic_checks() {
  3584   if (check_signals == false) return;
  3586   // SEGV and BUS if overridden could potentially prevent
  3587   // generation of hs*.log in the event of a crash, debugging
  3588   // such a case can be very challenging, so we absolutely
  3589   // check the following for a good measure:
  3590   DO_SIGNAL_CHECK(SIGSEGV);
  3591   DO_SIGNAL_CHECK(SIGILL);
  3592   DO_SIGNAL_CHECK(SIGFPE);
  3593   DO_SIGNAL_CHECK(SIGBUS);
  3594   DO_SIGNAL_CHECK(SIGPIPE);
  3595   DO_SIGNAL_CHECK(SIGXFSZ);
  3596   if (UseSIGTRAP) {
  3597     DO_SIGNAL_CHECK(SIGTRAP);
  3599   DO_SIGNAL_CHECK(SIGDANGER);
  3601   // ReduceSignalUsage allows the user to override these handlers
  3602   // see comments at the very top and jvm_solaris.h
  3603   if (!ReduceSignalUsage) {
  3604     DO_SIGNAL_CHECK(SHUTDOWN1_SIGNAL);
  3605     DO_SIGNAL_CHECK(SHUTDOWN2_SIGNAL);
  3606     DO_SIGNAL_CHECK(SHUTDOWN3_SIGNAL);
  3607     DO_SIGNAL_CHECK(BREAK_SIGNAL);
  3610   DO_SIGNAL_CHECK(SR_signum);
  3611   DO_SIGNAL_CHECK(INTERRUPT_SIGNAL);
  3614 typedef int (*os_sigaction_t)(int, const struct sigaction *, struct sigaction *);
  3616 static os_sigaction_t os_sigaction = NULL;
  3618 void os::Aix::check_signal_handler(int sig) {
  3619   char buf[O_BUFLEN];
  3620   address jvmHandler = NULL;
  3622   struct sigaction act;
  3623   if (os_sigaction == NULL) {
  3624     // only trust the default sigaction, in case it has been interposed
  3625     os_sigaction = (os_sigaction_t)dlsym(RTLD_DEFAULT, "sigaction");
  3626     if (os_sigaction == NULL) return;
  3629   os_sigaction(sig, (struct sigaction*)NULL, &act);
  3631   address thisHandler = (act.sa_flags & SA_SIGINFO)
  3632     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
  3633     : CAST_FROM_FN_PTR(address, act.sa_handler);
  3636   switch(sig) {
  3637   case SIGSEGV:
  3638   case SIGBUS:
  3639   case SIGFPE:
  3640   case SIGPIPE:
  3641   case SIGILL:
  3642   case SIGXFSZ:
  3643     // Renamed 'signalHandler' to avoid collision with other shared libs.
  3644     jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
  3645     break;
  3647   case SHUTDOWN1_SIGNAL:
  3648   case SHUTDOWN2_SIGNAL:
  3649   case SHUTDOWN3_SIGNAL:
  3650   case BREAK_SIGNAL:
  3651     jvmHandler = (address)user_handler();
  3652     break;
  3654   case INTERRUPT_SIGNAL:
  3655     jvmHandler = CAST_FROM_FN_PTR(address, SIG_DFL);
  3656     break;
  3658   default:
  3659     if (sig == SR_signum) {
  3660       jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)SR_handler);
  3661     } else {
  3662       return;
  3664     break;
  3667   if (thisHandler != jvmHandler) {
  3668     tty->print("Warning: %s handler ", exception_name(sig, buf, O_BUFLEN));
  3669     tty->print("expected:%s", get_signal_handler_name(jvmHandler, buf, O_BUFLEN));
  3670     tty->print_cr("  found:%s", get_signal_handler_name(thisHandler, buf, O_BUFLEN));
  3671     // No need to check this sig any longer
  3672     sigaddset(&check_signal_done, sig);
  3673   } else if (os::Aix::get_our_sigflags(sig) != 0 && (int)act.sa_flags != os::Aix::get_our_sigflags(sig)) {
  3674     tty->print("Warning: %s handler flags ", exception_name(sig, buf, O_BUFLEN));
  3675     tty->print("expected:" PTR32_FORMAT, os::Aix::get_our_sigflags(sig));
  3676     tty->print_cr("  found:" PTR32_FORMAT, act.sa_flags);
  3677     // No need to check this sig any longer
  3678     sigaddset(&check_signal_done, sig);
  3681   // Dump all the signal
  3682   if (sigismember(&check_signal_done, sig)) {
  3683     print_signal_handlers(tty, buf, O_BUFLEN);
  3687 extern bool signal_name(int signo, char* buf, size_t len);
  3689 const char* os::exception_name(int exception_code, char* buf, size_t size) {
  3690   if (0 < exception_code && exception_code <= SIGRTMAX) {
  3691     // signal
  3692     if (!signal_name(exception_code, buf, size)) {
  3693       jio_snprintf(buf, size, "SIG%d", exception_code);
  3695     return buf;
  3696   } else {
  3697     return NULL;
  3701 // To install functions for atexit system call
  3702 extern "C" {
  3703   static void perfMemory_exit_helper() {
  3704     perfMemory_exit();
  3708 // This is called _before_ the most of global arguments have been parsed.
  3709 void os::init(void) {
  3710   // This is basic, we want to know if that ever changes.
  3711   // (shared memory boundary is supposed to be a 256M aligned)
  3712   assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
  3714   // First off, we need to know whether we run on AIX or PASE, and
  3715   // the OS level we run on.
  3716   os::Aix::initialize_os_info();
  3718   // Scan environment (SPEC1170 behaviour, etc)
  3719   os::Aix::scan_environment();
  3721   // Check which pages are supported by AIX.
  3722   os::Aix::query_multipage_support();
  3724   // Next, we need to initialize libo4 and libperfstat libraries.
  3725   if (os::Aix::on_pase()) {
  3726     os::Aix::initialize_libo4();
  3727   } else {
  3728     os::Aix::initialize_libperfstat();
  3731   // Reset the perfstat information provided by ODM.
  3732   if (os::Aix::on_aix()) {
  3733     libperfstat::perfstat_reset();
  3736   // Now initialze basic system properties. Note that for some of the values we
  3737   // need libperfstat etc.
  3738   os::Aix::initialize_system_info();
  3740   // Initialize large page support.
  3741   if (UseLargePages) {
  3742     os::large_page_init();
  3743     if (!UseLargePages) {
  3744       // initialize os::_page_sizes
  3745       _page_sizes[0] = Aix::page_size();
  3746       _page_sizes[1] = 0;
  3747       if (Verbose) {
  3748         fprintf(stderr, "Large Page initialization failed: setting UseLargePages=0.\n");
  3751   } else {
  3752     // initialize os::_page_sizes
  3753     _page_sizes[0] = Aix::page_size();
  3754     _page_sizes[1] = 0;
  3757   // debug trace
  3758   if (Verbose) {
  3759     fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
  3760     fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
  3761     fprintf(stderr, "os::_page_sizes = ( ");
  3762     for (int i = 0; _page_sizes[i]; i ++) {
  3763       fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
  3765     fprintf(stderr, ")\n");
  3768   _initial_pid = getpid();
  3770   clock_tics_per_sec = sysconf(_SC_CLK_TCK);
  3772   init_random(1234567);
  3774   ThreadCritical::initialize();
  3776   // Main_thread points to the aboriginal thread.
  3777   Aix::_main_thread = pthread_self();
  3779   initial_time_count = os::elapsed_counter();
  3780   pthread_mutex_init(&dl_mutex, NULL);
  3783 // this is called _after_ the global arguments have been parsed
  3784 jint os::init_2(void) {
  3786   if (Verbose) {
  3787     fprintf(stderr, "processor count: %d\n", os::_processor_count);
  3788     fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
  3791   // initially build up the loaded dll map
  3792   LoadedLibraries::reload();
  3794   const int page_size = Aix::page_size();
  3795   const int map_size = page_size;
  3797   address map_address = (address) MAP_FAILED;
  3798   const int prot  = PROT_READ;
  3799   const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
  3801   // use optimized addresses for the polling page,
  3802   // e.g. map it to a special 32-bit address.
  3803   if (OptimizePollingPageLocation) {
  3804     // architecture-specific list of address wishes:
  3805     address address_wishes[] = {
  3806       // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
  3807       // PPC64: all address wishes are non-negative 32 bit values where
  3808       // the lower 16 bits are all zero. we can load these addresses
  3809       // with a single ppc_lis instruction.
  3810       (address) 0x30000000, (address) 0x31000000,
  3811       (address) 0x32000000, (address) 0x33000000,
  3812       (address) 0x40000000, (address) 0x41000000,
  3813       (address) 0x42000000, (address) 0x43000000,
  3814       (address) 0x50000000, (address) 0x51000000,
  3815       (address) 0x52000000, (address) 0x53000000,
  3816       (address) 0x60000000, (address) 0x61000000,
  3817       (address) 0x62000000, (address) 0x63000000
  3818     };
  3819     int address_wishes_length = sizeof(address_wishes)/sizeof(address);
  3821     // iterate over the list of address wishes:
  3822     for (int i=0; i<address_wishes_length; i++) {
  3823       // try to map with current address wish.
  3824       // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
  3825       // fail if the address is already mapped.
  3826       map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
  3827                                      map_size, prot,
  3828                                      flags | MAP_FIXED,
  3829                                      -1, 0);
  3830       if (Verbose) {
  3831         fprintf(stderr, "SafePoint Polling Page address: %p (wish) => %p\n",
  3832                 address_wishes[i], map_address + (ssize_t)page_size);
  3835       if (map_address + (ssize_t)page_size == address_wishes[i]) {
  3836         // map succeeded and map_address is at wished address, exit loop.
  3837         break;
  3840       if (map_address != (address) MAP_FAILED) {
  3841         // map succeeded, but polling_page is not at wished address, unmap and continue.
  3842         ::munmap(map_address, map_size);
  3843         map_address = (address) MAP_FAILED;
  3845       // map failed, continue loop.
  3847   } // end OptimizePollingPageLocation
  3849   if (map_address == (address) MAP_FAILED) {
  3850     map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
  3852   guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
  3853   os::set_polling_page(map_address);
  3855   if (!UseMembar) {
  3856     address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  3857     guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  3858     os::set_memory_serialize_page(mem_serialize_page);
  3860 #ifndef PRODUCT
  3861     if (Verbose && PrintMiscellaneous)
  3862       tty->print("[Memory Serialize Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
  3863 #endif
  3866   // initialize suspend/resume support - must do this before signal_sets_init()
  3867   if (SR_initialize() != 0) {
  3868     perror("SR_initialize failed");
  3869     return JNI_ERR;
  3872   Aix::signal_sets_init();
  3873   Aix::install_signal_handlers();
  3875   // Check minimum allowable stack size for thread creation and to initialize
  3876   // the java system classes, including StackOverflowError - depends on page
  3877   // size. Add a page for compiler2 recursion in main thread.
  3878   // Add in 2*BytesPerWord times page size to account for VM stack during
  3879   // class initialization depending on 32 or 64 bit VM.
  3880   os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
  3881             (size_t)(StackYellowPages+StackRedPages+StackShadowPages +
  3882                      2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
  3884   size_t threadStackSizeInBytes = ThreadStackSize * K;
  3885   if (threadStackSizeInBytes != 0 &&
  3886       threadStackSizeInBytes < os::Aix::min_stack_allowed) {
  3887         tty->print_cr("\nThe stack size specified is too small, "
  3888                       "Specify at least %dk",
  3889                       os::Aix::min_stack_allowed / K);
  3890         return JNI_ERR;
  3893   // Make the stack size a multiple of the page size so that
  3894   // the yellow/red zones can be guarded.
  3895   // note that this can be 0, if no default stacksize was set
  3896   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
  3898   Aix::libpthread_init();
  3900   if (MaxFDLimit) {
  3901     // set the number of file descriptors to max. print out error
  3902     // if getrlimit/setrlimit fails but continue regardless.
  3903     struct rlimit nbr_files;
  3904     int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
  3905     if (status != 0) {
  3906       if (PrintMiscellaneous && (Verbose || WizardMode))
  3907         perror("os::init_2 getrlimit failed");
  3908     } else {
  3909       nbr_files.rlim_cur = nbr_files.rlim_max;
  3910       status = setrlimit(RLIMIT_NOFILE, &nbr_files);
  3911       if (status != 0) {
  3912         if (PrintMiscellaneous && (Verbose || WizardMode))
  3913           perror("os::init_2 setrlimit failed");
  3918   if (PerfAllowAtExitRegistration) {
  3919     // only register atexit functions if PerfAllowAtExitRegistration is set.
  3920     // atexit functions can be delayed until process exit time, which
  3921     // can be problematic for embedded VM situations. Embedded VMs should
  3922     // call DestroyJavaVM() to assure that VM resources are released.
  3924     // note: perfMemory_exit_helper atexit function may be removed in
  3925     // the future if the appropriate cleanup code can be added to the
  3926     // VM_Exit VMOperation's doit method.
  3927     if (atexit(perfMemory_exit_helper) != 0) {
  3928       warning("os::init_2 atexit(perfMemory_exit_helper) failed");
  3932   return JNI_OK;
  3935 // this is called at the end of vm_initialization
  3936 void os::init_3(void) {
  3937   return;
  3940 // Mark the polling page as unreadable
  3941 void os::make_polling_page_unreadable(void) {
  3942   if (!guard_memory((char*)_polling_page, Aix::page_size())) {
  3943     fatal("Could not disable polling page");
  3945 };
  3947 // Mark the polling page as readable
  3948 void os::make_polling_page_readable(void) {
  3949   // Changed according to os_linux.cpp.
  3950   if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
  3951     fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
  3953 };
  3955 int os::active_processor_count() {
  3956   int online_cpus = ::sysconf(_SC_NPROCESSORS_ONLN);
  3957   assert(online_cpus > 0 && online_cpus <= processor_count(), "sanity check");
  3958   return online_cpus;
  3961 void os::set_native_thread_name(const char *name) {
  3962   // Not yet implemented.
  3963   return;
  3966 bool os::distribute_processes(uint length, uint* distribution) {
  3967   // Not yet implemented.
  3968   return false;
  3971 bool os::bind_to_processor(uint processor_id) {
  3972   // Not yet implemented.
  3973   return false;
  3976 void os::SuspendedThreadTask::internal_do_task() {
  3977   if (do_suspend(_thread->osthread())) {
  3978     SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
  3979     do_task(context);
  3980     do_resume(_thread->osthread());
  3984 class PcFetcher : public os::SuspendedThreadTask {
  3985 public:
  3986   PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  3987   ExtendedPC result();
  3988 protected:
  3989   void do_task(const os::SuspendedThreadTaskContext& context);
  3990 private:
  3991   ExtendedPC _epc;
  3992 };
  3994 ExtendedPC PcFetcher::result() {
  3995   guarantee(is_done(), "task is not done yet.");
  3996   return _epc;
  3999 void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  4000   Thread* thread = context.thread();
  4001   OSThread* osthread = thread->osthread();
  4002   if (osthread->ucontext() != NULL) {
  4003     _epc = os::Aix::ucontext_get_pc((ucontext_t *) context.ucontext());
  4004   } else {
  4005     // NULL context is unexpected, double-check this is the VMThread.
  4006     guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  4010 // Suspends the target using the signal mechanism and then grabs the PC before
  4011 // resuming the target. Used by the flat-profiler only
  4012 ExtendedPC os::get_thread_pc(Thread* thread) {
  4013   // Make sure that it is called by the watcher for the VMThread.
  4014   assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
  4015   assert(thread->is_VM_thread(), "Can only be called for VMThread");
  4017   PcFetcher fetcher(thread);
  4018   fetcher.run();
  4019   return fetcher.result();
  4022 // Not neede on Aix.
  4023 // int os::Aix::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime) {
  4024 // }
  4026 ////////////////////////////////////////////////////////////////////////////////
  4027 // debug support
  4029 static address same_page(address x, address y) {
  4030   intptr_t page_bits = -os::vm_page_size();
  4031   if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
  4032     return x;
  4033   else if (x > y)
  4034     return (address)(intptr_t(y) | ~page_bits) + 1;
  4035   else
  4036     return (address)(intptr_t(y) & page_bits);
  4039 bool os::find(address addr, outputStream* st) {
  4040   Unimplemented();
  4041   return false;
  4044 ////////////////////////////////////////////////////////////////////////////////
  4045 // misc
  4047 // This does not do anything on Aix. This is basically a hook for being
  4048 // able to use structured exception handling (thread-local exception filters)
  4049 // on, e.g., Win32.
  4050 void
  4051 os::os_exception_wrapper(java_call_t f, JavaValue* value, methodHandle* method,
  4052                          JavaCallArguments* args, Thread* thread) {
  4053   f(value, method, args, thread);
  4056 void os::print_statistics() {
  4059 int os::message_box(const char* title, const char* message) {
  4060   int i;
  4061   fdStream err(defaultStream::error_fd());
  4062   for (i = 0; i < 78; i++) err.print_raw("=");
  4063   err.cr();
  4064   err.print_raw_cr(title);
  4065   for (i = 0; i < 78; i++) err.print_raw("-");
  4066   err.cr();
  4067   err.print_raw_cr(message);
  4068   for (i = 0; i < 78; i++) err.print_raw("=");
  4069   err.cr();
  4071   char buf[16];
  4072   // Prevent process from exiting upon "read error" without consuming all CPU
  4073   while (::read(0, buf, sizeof(buf)) <= 0) { ::sleep(100); }
  4075   return buf[0] == 'y' || buf[0] == 'Y';
  4078 int os::stat(const char *path, struct stat *sbuf) {
  4079   char pathbuf[MAX_PATH];
  4080   if (strlen(path) > MAX_PATH - 1) {
  4081     errno = ENAMETOOLONG;
  4082     return -1;
  4084   os::native_path(strcpy(pathbuf, path));
  4085   return ::stat(pathbuf, sbuf);
  4088 bool os::check_heap(bool force) {
  4089   return true;
  4092 // int local_vsnprintf(char* buf, size_t count, const char* format, va_list args) {
  4093 //   return ::vsnprintf(buf, count, format, args);
  4094 // }
  4096 // Is a (classpath) directory empty?
  4097 bool os::dir_is_empty(const char* path) {
  4098   Unimplemented();
  4099   return false;
  4102 // This code originates from JDK's sysOpen and open64_w
  4103 // from src/solaris/hpi/src/system_md.c
  4105 #ifndef O_DELETE
  4106 #define O_DELETE 0x10000
  4107 #endif
  4109 // Open a file. Unlink the file immediately after open returns
  4110 // if the specified oflag has the O_DELETE flag set.
  4111 // O_DELETE is used only in j2se/src/share/native/java/util/zip/ZipFile.c
  4113 int os::open(const char *path, int oflag, int mode) {
  4115   if (strlen(path) > MAX_PATH - 1) {
  4116     errno = ENAMETOOLONG;
  4117     return -1;
  4119   int fd;
  4120   int o_delete = (oflag & O_DELETE);
  4121   oflag = oflag & ~O_DELETE;
  4123   fd = ::open64(path, oflag, mode);
  4124   if (fd == -1) return -1;
  4126   //If the open succeeded, the file might still be a directory
  4128     struct stat64 buf64;
  4129     int ret = ::fstat64(fd, &buf64);
  4130     int st_mode = buf64.st_mode;
  4132     if (ret != -1) {
  4133       if ((st_mode & S_IFMT) == S_IFDIR) {
  4134         errno = EISDIR;
  4135         ::close(fd);
  4136         return -1;
  4138     } else {
  4139       ::close(fd);
  4140       return -1;
  4144   // All file descriptors that are opened in the JVM and not
  4145   // specifically destined for a subprocess should have the
  4146   // close-on-exec flag set. If we don't set it, then careless 3rd
  4147   // party native code might fork and exec without closing all
  4148   // appropriate file descriptors (e.g. as we do in closeDescriptors in
  4149   // UNIXProcess.c), and this in turn might:
  4150   //
  4151   // - cause end-of-file to fail to be detected on some file
  4152   //   descriptors, resulting in mysterious hangs, or
  4153   //
  4154   // - might cause an fopen in the subprocess to fail on a system
  4155   //   suffering from bug 1085341.
  4156   //
  4157   // (Yes, the default setting of the close-on-exec flag is a Unix
  4158   // design flaw.)
  4159   //
  4160   // See:
  4161   // 1085341: 32-bit stdio routines should support file descriptors >255
  4162   // 4843136: (process) pipe file descriptor from Runtime.exec not being closed
  4163   // 6339493: (process) Runtime.exec does not close all file descriptors on Solaris 9
  4164 #ifdef FD_CLOEXEC
  4166     int flags = ::fcntl(fd, F_GETFD);
  4167     if (flags != -1)
  4168       ::fcntl(fd, F_SETFD, flags | FD_CLOEXEC);
  4170 #endif
  4172   if (o_delete != 0) {
  4173     ::unlink(path);
  4175   return fd;
  4179 // create binary file, rewriting existing file if required
  4180 int os::create_binary_file(const char* path, bool rewrite_existing) {
  4181   Unimplemented();
  4182   return 0;
  4185 // return current position of file pointer
  4186 jlong os::current_file_offset(int fd) {
  4187   return (jlong)::lseek64(fd, (off64_t)0, SEEK_CUR);
  4190 // move file pointer to the specified offset
  4191 jlong os::seek_to_file_offset(int fd, jlong offset) {
  4192   return (jlong)::lseek64(fd, (off64_t)offset, SEEK_SET);
  4195 // This code originates from JDK's sysAvailable
  4196 // from src/solaris/hpi/src/native_threads/src/sys_api_td.c
  4198 int os::available(int fd, jlong *bytes) {
  4199   jlong cur, end;
  4200   int mode;
  4201   struct stat64 buf64;
  4203   if (::fstat64(fd, &buf64) >= 0) {
  4204     mode = buf64.st_mode;
  4205     if (S_ISCHR(mode) || S_ISFIFO(mode) || S_ISSOCK(mode)) {
  4206       // XXX: is the following call interruptible? If so, this might
  4207       // need to go through the INTERRUPT_IO() wrapper as for other
  4208       // blocking, interruptible calls in this file.
  4209       int n;
  4210       if (::ioctl(fd, FIONREAD, &n) >= 0) {
  4211         *bytes = n;
  4212         return 1;
  4216   if ((cur = ::lseek64(fd, 0L, SEEK_CUR)) == -1) {
  4217     return 0;
  4218   } else if ((end = ::lseek64(fd, 0L, SEEK_END)) == -1) {
  4219     return 0;
  4220   } else if (::lseek64(fd, cur, SEEK_SET) == -1) {
  4221     return 0;
  4223   *bytes = end - cur;
  4224   return 1;
  4227 int os::socket_available(int fd, jint *pbytes) {
  4228   // Linux doc says EINTR not returned, unlike Solaris
  4229   int ret = ::ioctl(fd, FIONREAD, pbytes);
  4231   //%% note ioctl can return 0 when successful, JVM_SocketAvailable
  4232   // is expected to return 0 on failure and 1 on success to the jdk.
  4233   return (ret < 0) ? 0 : 1;
  4236 // Map a block of memory.
  4237 char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
  4238                         char *addr, size_t bytes, bool read_only,
  4239                         bool allow_exec) {
  4240   Unimplemented();
  4241   return NULL;
  4245 // Remap a block of memory.
  4246 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
  4247                           char *addr, size_t bytes, bool read_only,
  4248                           bool allow_exec) {
  4249   // same as map_memory() on this OS
  4250   return os::map_memory(fd, file_name, file_offset, addr, bytes, read_only,
  4251                         allow_exec);
  4254 // Unmap a block of memory.
  4255 bool os::pd_unmap_memory(char* addr, size_t bytes) {
  4256   return munmap(addr, bytes) == 0;
  4259 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
  4260 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
  4261 // of a thread.
  4262 //
  4263 // current_thread_cpu_time() and thread_cpu_time(Thread*) returns
  4264 // the fast estimate available on the platform.
  4266 jlong os::current_thread_cpu_time() {
  4267   // return user + sys since the cost is the same
  4268   const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
  4269   assert(n >= 0, "negative CPU time");
  4270   return n;
  4273 jlong os::thread_cpu_time(Thread* thread) {
  4274   // consistent with what current_thread_cpu_time() returns
  4275   const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
  4276   assert(n >= 0, "negative CPU time");
  4277   return n;
  4280 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
  4281   const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
  4282   assert(n >= 0, "negative CPU time");
  4283   return n;
  4286 static bool thread_cpu_time_unchecked(Thread* thread, jlong* p_sys_time, jlong* p_user_time) {
  4287   bool error = false;
  4289   jlong sys_time = 0;
  4290   jlong user_time = 0;
  4292   // reimplemented using getthrds64().
  4293   //
  4294   // goes like this:
  4295   // For the thread in question, get the kernel thread id. Then get the
  4296   // kernel thread statistics using that id.
  4297   //
  4298   // This only works of course when no pthread scheduling is used,
  4299   // ie there is a 1:1 relationship to kernel threads.
  4300   // On AIX, see AIXTHREAD_SCOPE variable.
  4302   pthread_t pthtid = thread->osthread()->pthread_id();
  4304   // retrieve kernel thread id for the pthread:
  4305   tid64_t tid = 0;
  4306   struct __pthrdsinfo pinfo;
  4307   // I just love those otherworldly IBM APIs which force me to hand down
  4308   // dummy buffers for stuff I dont care for...
  4309   char dummy[1];
  4310   int dummy_size = sizeof(dummy);
  4311   if (pthread_getthrds_np(&pthtid, PTHRDSINFO_QUERY_TID, &pinfo, sizeof(pinfo),
  4312                           dummy, &dummy_size) == 0) {
  4313     tid = pinfo.__pi_tid;
  4314   } else {
  4315     tty->print_cr("pthread_getthrds_np failed.");
  4316     error = true;
  4319   // retrieve kernel timing info for that kernel thread
  4320   if (!error) {
  4321     struct thrdentry64 thrdentry;
  4322     if (getthrds64(getpid(), &thrdentry, sizeof(thrdentry), &tid, 1) == 1) {
  4323       sys_time = thrdentry.ti_ru.ru_stime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_stime.tv_usec * 1000LL;
  4324       user_time = thrdentry.ti_ru.ru_utime.tv_sec * 1000000000LL + thrdentry.ti_ru.ru_utime.tv_usec * 1000LL;
  4325     } else {
  4326       tty->print_cr("pthread_getthrds_np failed.");
  4327       error = true;
  4331   if (p_sys_time) {
  4332     *p_sys_time = sys_time;
  4335   if (p_user_time) {
  4336     *p_user_time = user_time;
  4339   if (error) {
  4340     return false;
  4343   return true;
  4346 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
  4347   jlong sys_time;
  4348   jlong user_time;
  4350   if (!thread_cpu_time_unchecked(thread, &sys_time, &user_time)) {
  4351     return -1;
  4354   return user_sys_cpu_time ? sys_time + user_time : user_time;
  4357 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  4358   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
  4359   info_ptr->may_skip_backward = false;     // elapsed time not wall time
  4360   info_ptr->may_skip_forward = false;      // elapsed time not wall time
  4361   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
  4364 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
  4365   info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
  4366   info_ptr->may_skip_backward = false;     // elapsed time not wall time
  4367   info_ptr->may_skip_forward = false;      // elapsed time not wall time
  4368   info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
  4371 bool os::is_thread_cpu_time_supported() {
  4372   return true;
  4375 // System loadavg support. Returns -1 if load average cannot be obtained.
  4376 // For now just return the system wide load average (no processor sets).
  4377 int os::loadavg(double values[], int nelem) {
  4379   // Implemented using libperfstat on AIX.
  4381   guarantee(nelem >= 0 && nelem <= 3, "argument error");
  4382   guarantee(values, "argument error");
  4384   if (os::Aix::on_pase()) {
  4385     Unimplemented();
  4386     return -1;
  4387   } else {
  4388     // AIX: use libperfstat
  4389     //
  4390     // See also:
  4391     // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
  4392     // /usr/include/libperfstat.h:
  4394     // Use the already AIX version independent get_cpuinfo.
  4395     os::Aix::cpuinfo_t ci;
  4396     if (os::Aix::get_cpuinfo(&ci)) {
  4397       for (int i = 0; i < nelem; i++) {
  4398         values[i] = ci.loadavg[i];
  4400     } else {
  4401       return -1;
  4403     return nelem;
  4407 void os::pause() {
  4408   char filename[MAX_PATH];
  4409   if (PauseAtStartupFile && PauseAtStartupFile[0]) {
  4410     jio_snprintf(filename, MAX_PATH, PauseAtStartupFile);
  4411   } else {
  4412     jio_snprintf(filename, MAX_PATH, "./vm.paused.%d", current_process_id());
  4415   int fd = ::open(filename, O_WRONLY | O_CREAT | O_TRUNC, 0666);
  4416   if (fd != -1) {
  4417     struct stat buf;
  4418     ::close(fd);
  4419     while (::stat(filename, &buf) == 0) {
  4420       (void)::poll(NULL, 0, 100);
  4422   } else {
  4423     jio_fprintf(stderr,
  4424       "Could not open pause file '%s', continuing immediately.\n", filename);
  4428 bool os::Aix::is_primordial_thread() {
  4429   if (pthread_self() == (pthread_t)1) {
  4430     return true;
  4431   } else {
  4432     return false;
  4436 // OS recognitions (PASE/AIX, OS level) call this before calling any
  4437 // one of Aix::on_pase(), Aix::os_version() static
  4438 void os::Aix::initialize_os_info() {
  4440   assert(_on_pase == -1 && _os_version == -1, "already called.");
  4442   struct utsname uts;
  4443   memset(&uts, 0, sizeof(uts));
  4444   strcpy(uts.sysname, "?");
  4445   if (::uname(&uts) == -1) {
  4446     fprintf(stderr, "uname failed (%d)\n", errno);
  4447     guarantee(0, "Could not determine whether we run on AIX or PASE");
  4448   } else {
  4449     if (Verbose) {
  4450       fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
  4451               "node \"%s\" machine \"%s\"\n",
  4452               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
  4454     const int major = atoi(uts.version);
  4455     assert(major > 0, "invalid OS version");
  4456     const int minor = atoi(uts.release);
  4457     assert(minor > 0, "invalid OS release");
  4458     _os_version = (major << 8) | minor;
  4459     if (strcmp(uts.sysname, "OS400") == 0) {
  4460       Unimplemented();
  4461     } else if (strcmp(uts.sysname, "AIX") == 0) {
  4462       // We run on AIX. We do not support versions older than AIX 5.3.
  4463       _on_pase = 0;
  4464       if (_os_version < 0x0503) {
  4465         fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
  4466         assert(false, "AIX release too old.");
  4467       } else {
  4468         if (Verbose) {
  4469           fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
  4472     } else {
  4473       assert(false, "unknown OS");
  4477   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
  4479 } // end: os::Aix::initialize_os_info()
  4481 // Scan environment for important settings which might effect the VM.
  4482 // Trace out settings. Warn about invalid settings and/or correct them.
  4483 //
  4484 // Must run after os::Aix::initialue_os_info().
  4485 void os::Aix::scan_environment() {
  4487   char* p;
  4488   int rc;
  4490   // Warn explicity if EXTSHM=ON is used. That switch changes how
  4491   // System V shared memory behaves. One effect is that page size of
  4492   // shared memory cannot be change dynamically, effectivly preventing
  4493   // large pages from working.
  4494   // This switch was needed on AIX 32bit, but on AIX 64bit the general
  4495   // recommendation is (in OSS notes) to switch it off.
  4496   p = ::getenv("EXTSHM");
  4497   if (Verbose) {
  4498     fprintf(stderr, "EXTSHM=%s.\n", p ? p : "<unset>");
  4500   if (p && strcmp(p, "ON") == 0) {
  4501     fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.\n");
  4502     _extshm = 1;
  4503   } else {
  4504     _extshm = 0;
  4507   // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
  4508   // Not tested, not supported.
  4509   //
  4510   // Note that it might be worth the trouble to test and to require it, if only to
  4511   // get useful return codes for mprotect.
  4512   //
  4513   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
  4514   // exec() ? before loading the libjvm ? ....)
  4515   p = ::getenv("XPG_SUS_ENV");
  4516   if (Verbose) {
  4517     fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
  4519   if (p && strcmp(p, "ON") == 0) {
  4520     _xpg_sus_mode = 1;
  4521     fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
  4522     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
  4523     // clobber address ranges. If we ever want to support that, we have to do some
  4524     // testing first.
  4525     guarantee(false, "XPG_SUS_ENV=ON not supported");
  4526   } else {
  4527     _xpg_sus_mode = 0;
  4530   // Switch off AIX internal (pthread) guard pages. This has
  4531   // immediate effect for any pthread_create calls which follow.
  4532   p = ::getenv("AIXTHREAD_GUARDPAGES");
  4533   if (Verbose) {
  4534     fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
  4535     fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
  4537   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
  4538   guarantee(rc == 0, "");
  4540 } // end: os::Aix::scan_environment()
  4542 // PASE: initialize the libo4 library (AS400 PASE porting library).
  4543 void os::Aix::initialize_libo4() {
  4544   Unimplemented();
  4547 // AIX: initialize the libperfstat library (we load this dynamically
  4548 // because it is only available on AIX.
  4549 void os::Aix::initialize_libperfstat() {
  4551   assert(os::Aix::on_aix(), "AIX only");
  4553   if (!libperfstat::init()) {
  4554     fprintf(stderr, "libperfstat initialization failed.\n");
  4555     assert(false, "libperfstat initialization failed");
  4556   } else {
  4557     if (Verbose) {
  4558       fprintf(stderr, "libperfstat initialized.\n");
  4561 } // end: os::Aix::initialize_libperfstat
  4563 /////////////////////////////////////////////////////////////////////////////
  4564 // thread stack
  4566 // function to query the current stack size using pthread_getthrds_np
  4567 //
  4568 // ! do not change anything here unless you know what you are doing !
  4569 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
  4571   // This only works when invoked on a pthread. As we agreed not to use
  4572   // primordial threads anyway, I assert here
  4573   guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
  4575   // information about this api can be found (a) in the pthread.h header and
  4576   // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
  4577   //
  4578   // The use of this API to find out the current stack is kind of undefined.
  4579   // But after a lot of tries and asking IBM about it, I concluded that it is safe
  4580   // enough for cases where I let the pthread library create its stacks. For cases
  4581   // where I create an own stack and pass this to pthread_create, it seems not to
  4582   // work (the returned stack size in that case is 0).
  4584   pthread_t tid = pthread_self();
  4585   struct __pthrdsinfo pinfo;
  4586   char dummy[1]; // we only need this to satisfy the api and to not get E
  4587   int dummy_size = sizeof(dummy);
  4589   memset(&pinfo, 0, sizeof(pinfo));
  4591   const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
  4592                                       sizeof(pinfo), dummy, &dummy_size);
  4594   if (rc != 0) {
  4595     fprintf(stderr, "pthread_getthrds_np failed (%d)\n", rc);
  4596     guarantee(0, "pthread_getthrds_np failed");
  4599   guarantee(pinfo.__pi_stackend, "returned stack base invalid");
  4601   // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
  4602   // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
  4603   // Not sure what to do here - I feel inclined to forbid this use case completely.
  4604   guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
  4606   // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
  4607   if (p_stack_base) {
  4608     (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
  4611   if (p_stack_size) {
  4612     (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
  4615 #ifndef PRODUCT
  4616   if (Verbose) {
  4617     fprintf(stderr,
  4618             "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT
  4619             ", real stack_size=" INTPTR_FORMAT
  4620             ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n",
  4621             (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
  4622             (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()),
  4623             pinfo.__pi_stacksize - os::Aix::stack_page_size());
  4625 #endif
  4627 } // end query_stack_dimensions
  4629 // get the current stack base from the OS (actually, the pthread library)
  4630 address os::current_stack_base() {
  4631   address p;
  4632   query_stack_dimensions(&p, 0);
  4633   return p;
  4636 // get the current stack size from the OS (actually, the pthread library)
  4637 size_t os::current_stack_size() {
  4638   size_t s;
  4639   query_stack_dimensions(0, &s);
  4640   return s;
  4643 // Refer to the comments in os_solaris.cpp park-unpark.
  4644 //
  4645 // Beware -- Some versions of NPTL embody a flaw where pthread_cond_timedwait() can
  4646 // hang indefinitely. For instance NPTL 0.60 on 2.4.21-4ELsmp is vulnerable.
  4647 // For specifics regarding the bug see GLIBC BUGID 261237 :
  4648 //    http://www.mail-archive.com/debian-glibc@lists.debian.org/msg10837.html.
  4649 // Briefly, pthread_cond_timedwait() calls with an expiry time that's not in the future
  4650 // will either hang or corrupt the condvar, resulting in subsequent hangs if the condvar
  4651 // is used. (The simple C test-case provided in the GLIBC bug report manifests the
  4652 // hang). The JVM is vulernable via sleep(), Object.wait(timo), LockSupport.parkNanos()
  4653 // and monitorenter when we're using 1-0 locking. All those operations may result in
  4654 // calls to pthread_cond_timedwait(). Using LD_ASSUME_KERNEL to use an older version
  4655 // of libpthread avoids the problem, but isn't practical.
  4656 //
  4657 // Possible remedies:
  4658 //
  4659 // 1.   Establish a minimum relative wait time. 50 to 100 msecs seems to work.
  4660 //      This is palliative and probabilistic, however. If the thread is preempted
  4661 //      between the call to compute_abstime() and pthread_cond_timedwait(), more
  4662 //      than the minimum period may have passed, and the abstime may be stale (in the
  4663 //      past) resultin in a hang. Using this technique reduces the odds of a hang
  4664 //      but the JVM is still vulnerable, particularly on heavily loaded systems.
  4665 //
  4666 // 2.   Modify park-unpark to use per-thread (per ParkEvent) pipe-pairs instead
  4667 //      of the usual flag-condvar-mutex idiom. The write side of the pipe is set
  4668 //      NDELAY. unpark() reduces to write(), park() reduces to read() and park(timo)
  4669 //      reduces to poll()+read(). This works well, but consumes 2 FDs per extant
  4670 //      thread.
  4671 //
  4672 // 3.   Embargo pthread_cond_timedwait() and implement a native "chron" thread
  4673 //      that manages timeouts. We'd emulate pthread_cond_timedwait() by enqueuing
  4674 //      a timeout request to the chron thread and then blocking via pthread_cond_wait().
  4675 //      This also works well. In fact it avoids kernel-level scalability impediments
  4676 //      on certain platforms that don't handle lots of active pthread_cond_timedwait()
  4677 //      timers in a graceful fashion.
  4678 //
  4679 // 4.   When the abstime value is in the past it appears that control returns
  4680 //      correctly from pthread_cond_timedwait(), but the condvar is left corrupt.
  4681 //      Subsequent timedwait/wait calls may hang indefinitely. Given that, we
  4682 //      can avoid the problem by reinitializing the condvar -- by cond_destroy()
  4683 //      followed by cond_init() -- after all calls to pthread_cond_timedwait().
  4684 //      It may be possible to avoid reinitialization by checking the return
  4685 //      value from pthread_cond_timedwait(). In addition to reinitializing the
  4686 //      condvar we must establish the invariant that cond_signal() is only called
  4687 //      within critical sections protected by the adjunct mutex. This prevents
  4688 //      cond_signal() from "seeing" a condvar that's in the midst of being
  4689 //      reinitialized or that is corrupt. Sadly, this invariant obviates the
  4690 //      desirable signal-after-unlock optimization that avoids futile context switching.
  4691 //
  4692 //      I'm also concerned that some versions of NTPL might allocate an auxilliary
  4693 //      structure when a condvar is used or initialized. cond_destroy() would
  4694 //      release the helper structure. Our reinitialize-after-timedwait fix
  4695 //      put excessive stress on malloc/free and locks protecting the c-heap.
  4696 //
  4697 // We currently use (4). See the WorkAroundNTPLTimedWaitHang flag.
  4698 // It may be possible to refine (4) by checking the kernel and NTPL verisons
  4699 // and only enabling the work-around for vulnerable environments.
  4701 // utility to compute the abstime argument to timedwait:
  4702 // millis is the relative timeout time
  4703 // abstime will be the absolute timeout time
  4704 // TODO: replace compute_abstime() with unpackTime()
  4706 static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
  4707   if (millis < 0) millis = 0;
  4708   struct timeval now;
  4709   int status = gettimeofday(&now, NULL);
  4710   assert(status == 0, "gettimeofday");
  4711   jlong seconds = millis / 1000;
  4712   millis %= 1000;
  4713   if (seconds > 50000000) { // see man cond_timedwait(3T)
  4714     seconds = 50000000;
  4716   abstime->tv_sec = now.tv_sec  + seconds;
  4717   long       usec = now.tv_usec + millis * 1000;
  4718   if (usec >= 1000000) {
  4719     abstime->tv_sec += 1;
  4720     usec -= 1000000;
  4722   abstime->tv_nsec = usec * 1000;
  4723   return abstime;
  4727 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
  4728 // Conceptually TryPark() should be equivalent to park(0).
  4730 int os::PlatformEvent::TryPark() {
  4731   for (;;) {
  4732     const int v = _Event;
  4733     guarantee ((v == 0) || (v == 1), "invariant");
  4734     if (Atomic::cmpxchg (0, &_Event, v) == v) return v;
  4738 void os::PlatformEvent::park() {       // AKA "down()"
  4739   // Invariant: Only the thread associated with the Event/PlatformEvent
  4740   // may call park().
  4741   // TODO: assert that _Assoc != NULL or _Assoc == Self
  4742   int v;
  4743   for (;;) {
  4744     v = _Event;
  4745     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
  4747   guarantee (v >= 0, "invariant");
  4748   if (v == 0) {
  4749     // Do this the hard way by blocking ...
  4750     int status = pthread_mutex_lock(_mutex);
  4751     assert_status(status == 0, status, "mutex_lock");
  4752     guarantee (_nParked == 0, "invariant");
  4753     ++ _nParked;
  4754     while (_Event < 0) {
  4755       status = pthread_cond_wait(_cond, _mutex);
  4756       assert_status(status == 0 || status == ETIMEDOUT, status, "cond_timedwait");
  4758     -- _nParked;
  4760     // In theory we could move the ST of 0 into _Event past the unlock(),
  4761     // but then we'd need a MEMBAR after the ST.
  4762     _Event = 0;
  4763     status = pthread_mutex_unlock(_mutex);
  4764     assert_status(status == 0, status, "mutex_unlock");
  4766   guarantee (_Event >= 0, "invariant");
  4769 int os::PlatformEvent::park(jlong millis) {
  4770   guarantee (_nParked == 0, "invariant");
  4772   int v;
  4773   for (;;) {
  4774     v = _Event;
  4775     if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
  4777   guarantee (v >= 0, "invariant");
  4778   if (v != 0) return OS_OK;
  4780   // We do this the hard way, by blocking the thread.
  4781   // Consider enforcing a minimum timeout value.
  4782   struct timespec abst;
  4783   compute_abstime(&abst, millis);
  4785   int ret = OS_TIMEOUT;
  4786   int status = pthread_mutex_lock(_mutex);
  4787   assert_status(status == 0, status, "mutex_lock");
  4788   guarantee (_nParked == 0, "invariant");
  4789   ++_nParked;
  4791   // Object.wait(timo) will return because of
  4792   // (a) notification
  4793   // (b) timeout
  4794   // (c) thread.interrupt
  4795   //
  4796   // Thread.interrupt and object.notify{All} both call Event::set.
  4797   // That is, we treat thread.interrupt as a special case of notification.
  4798   // The underlying Solaris implementation, cond_timedwait, admits
  4799   // spurious/premature wakeups, but the JLS/JVM spec prevents the
  4800   // JVM from making those visible to Java code. As such, we must
  4801   // filter out spurious wakeups. We assume all ETIME returns are valid.
  4802   //
  4803   // TODO: properly differentiate simultaneous notify+interrupt.
  4804   // In that case, we should propagate the notify to another waiter.
  4806   while (_Event < 0) {
  4807     status = pthread_cond_timedwait(_cond, _mutex, &abst);
  4808     assert_status(status == 0 || status == ETIMEDOUT,
  4809           status, "cond_timedwait");
  4810     if (!FilterSpuriousWakeups) break;         // previous semantics
  4811     if (status == ETIMEDOUT) break;
  4812     // We consume and ignore EINTR and spurious wakeups.
  4814   --_nParked;
  4815   if (_Event >= 0) {
  4816      ret = OS_OK;
  4818   _Event = 0;
  4819   status = pthread_mutex_unlock(_mutex);
  4820   assert_status(status == 0, status, "mutex_unlock");
  4821   assert (_nParked == 0, "invariant");
  4822   return ret;
  4825 void os::PlatformEvent::unpark() {
  4826   int v, AnyWaiters;
  4827   for (;;) {
  4828     v = _Event;
  4829     if (v > 0) {
  4830       // The LD of _Event could have reordered or be satisfied
  4831       // by a read-aside from this processor's write buffer.
  4832       // To avoid problems execute a barrier and then
  4833       // ratify the value.
  4834       OrderAccess::fence();
  4835       if (_Event == v) return;
  4836       continue;
  4838     if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
  4840   if (v < 0) {
  4841     // Wait for the thread associated with the event to vacate
  4842     int status = pthread_mutex_lock(_mutex);
  4843     assert_status(status == 0, status, "mutex_lock");
  4844     AnyWaiters = _nParked;
  4846     if (AnyWaiters != 0) {
  4847       // We intentional signal *after* dropping the lock
  4848       // to avoid a common class of futile wakeups.
  4849       status = pthread_cond_signal(_cond);
  4850       assert_status(status == 0, status, "cond_signal");
  4852     // Mutex should be locked for pthread_cond_signal(_cond).
  4853     status = pthread_mutex_unlock(_mutex);
  4854     assert_status(status == 0, status, "mutex_unlock");
  4857   // Note that we signal() _after dropping the lock for "immortal" Events.
  4858   // This is safe and avoids a common class of futile wakeups. In rare
  4859   // circumstances this can cause a thread to return prematurely from
  4860   // cond_{timed}wait() but the spurious wakeup is benign and the victim will
  4861   // simply re-test the condition and re-park itself.
  4865 // JSR166
  4866 // -------------------------------------------------------
  4868 //
  4869 // The solaris and linux implementations of park/unpark are fairly
  4870 // conservative for now, but can be improved. They currently use a
  4871 // mutex/condvar pair, plus a a count.
  4872 // Park decrements count if > 0, else does a condvar wait. Unpark
  4873 // sets count to 1 and signals condvar. Only one thread ever waits
  4874 // on the condvar. Contention seen when trying to park implies that someone
  4875 // is unparking you, so don't wait. And spurious returns are fine, so there
  4876 // is no need to track notifications.
  4877 //
  4879 #define MAX_SECS 100000000
  4880 //
  4881 // This code is common to linux and solaris and will be moved to a
  4882 // common place in dolphin.
  4883 //
  4884 // The passed in time value is either a relative time in nanoseconds
  4885 // or an absolute time in milliseconds. Either way it has to be unpacked
  4886 // into suitable seconds and nanoseconds components and stored in the
  4887 // given timespec structure.
  4888 // Given time is a 64-bit value and the time_t used in the timespec is only
  4889 // a signed-32-bit value (except on 64-bit Linux) we have to watch for
  4890 // overflow if times way in the future are given. Further on Solaris versions
  4891 // prior to 10 there is a restriction (see cond_timedwait) that the specified
  4892 // number of seconds, in abstime, is less than current_time + 100,000,000.
  4893 // As it will be 28 years before "now + 100000000" will overflow we can
  4894 // ignore overflow and just impose a hard-limit on seconds using the value
  4895 // of "now + 100,000,000". This places a limit on the timeout of about 3.17
  4896 // years from "now".
  4897 //
  4899 static void unpackTime(timespec* absTime, bool isAbsolute, jlong time) {
  4900   assert (time > 0, "convertTime");
  4902   struct timeval now;
  4903   int status = gettimeofday(&now, NULL);
  4904   assert(status == 0, "gettimeofday");
  4906   time_t max_secs = now.tv_sec + MAX_SECS;
  4908   if (isAbsolute) {
  4909     jlong secs = time / 1000;
  4910     if (secs > max_secs) {
  4911       absTime->tv_sec = max_secs;
  4913     else {
  4914       absTime->tv_sec = secs;
  4916     absTime->tv_nsec = (time % 1000) * NANOSECS_PER_MILLISEC;
  4918   else {
  4919     jlong secs = time / NANOSECS_PER_SEC;
  4920     if (secs >= MAX_SECS) {
  4921       absTime->tv_sec = max_secs;
  4922       absTime->tv_nsec = 0;
  4924     else {
  4925       absTime->tv_sec = now.tv_sec + secs;
  4926       absTime->tv_nsec = (time % NANOSECS_PER_SEC) + now.tv_usec*1000;
  4927       if (absTime->tv_nsec >= NANOSECS_PER_SEC) {
  4928         absTime->tv_nsec -= NANOSECS_PER_SEC;
  4929         ++absTime->tv_sec; // note: this must be <= max_secs
  4933   assert(absTime->tv_sec >= 0, "tv_sec < 0");
  4934   assert(absTime->tv_sec <= max_secs, "tv_sec > max_secs");
  4935   assert(absTime->tv_nsec >= 0, "tv_nsec < 0");
  4936   assert(absTime->tv_nsec < NANOSECS_PER_SEC, "tv_nsec >= nanos_per_sec");
  4939 void Parker::park(bool isAbsolute, jlong time) {
  4940   // Optional fast-path check:
  4941   // Return immediately if a permit is available.
  4942   if (_counter > 0) {
  4943       _counter = 0;
  4944       OrderAccess::fence();
  4945       return;
  4948   Thread* thread = Thread::current();
  4949   assert(thread->is_Java_thread(), "Must be JavaThread");
  4950   JavaThread *jt = (JavaThread *)thread;
  4952   // Optional optimization -- avoid state transitions if there's an interrupt pending.
  4953   // Check interrupt before trying to wait
  4954   if (Thread::is_interrupted(thread, false)) {
  4955     return;
  4958   // Next, demultiplex/decode time arguments
  4959   timespec absTime;
  4960   if (time < 0 || (isAbsolute && time == 0)) { // don't wait at all
  4961     return;
  4963   if (time > 0) {
  4964     unpackTime(&absTime, isAbsolute, time);
  4968   // Enter safepoint region
  4969   // Beware of deadlocks such as 6317397.
  4970   // The per-thread Parker:: mutex is a classic leaf-lock.
  4971   // In particular a thread must never block on the Threads_lock while
  4972   // holding the Parker:: mutex. If safepoints are pending both the
  4973   // the ThreadBlockInVM() CTOR and DTOR may grab Threads_lock.
  4974   ThreadBlockInVM tbivm(jt);
  4976   // Don't wait if cannot get lock since interference arises from
  4977   // unblocking. Also. check interrupt before trying wait
  4978   if (Thread::is_interrupted(thread, false) || pthread_mutex_trylock(_mutex) != 0) {
  4979     return;
  4982   int status;
  4983   if (_counter > 0) { // no wait needed
  4984     _counter = 0;
  4985     status = pthread_mutex_unlock(_mutex);
  4986     assert (status == 0, "invariant");
  4987     OrderAccess::fence();
  4988     return;
  4991 #ifdef ASSERT
  4992   // Don't catch signals while blocked; let the running threads have the signals.
  4993   // (This allows a debugger to break into the running thread.)
  4994   sigset_t oldsigs;
  4995   sigset_t* allowdebug_blocked = os::Aix::allowdebug_blocked_signals();
  4996   pthread_sigmask(SIG_BLOCK, allowdebug_blocked, &oldsigs);
  4997 #endif
  4999   OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
  5000   jt->set_suspend_equivalent();
  5001   // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
  5003   if (time == 0) {
  5004     status = pthread_cond_wait (_cond, _mutex);
  5005   } else {
  5006     status = pthread_cond_timedwait (_cond, _mutex, &absTime);
  5007     if (status != 0 && WorkAroundNPTLTimedWaitHang) {
  5008       pthread_cond_destroy (_cond);
  5009       pthread_cond_init    (_cond, NULL);
  5012   assert_status(status == 0 || status == EINTR ||
  5013                 status == ETIME || status == ETIMEDOUT,
  5014                 status, "cond_timedwait");
  5016 #ifdef ASSERT
  5017   pthread_sigmask(SIG_SETMASK, &oldsigs, NULL);
  5018 #endif
  5020   _counter = 0;
  5021   status = pthread_mutex_unlock(_mutex);
  5022   assert_status(status == 0, status, "invariant");
  5023   // If externally suspended while waiting, re-suspend
  5024   if (jt->handle_special_suspend_equivalent_condition()) {
  5025     jt->java_suspend_self();
  5028   OrderAccess::fence();
  5031 void Parker::unpark() {
  5032   int s, status;
  5033   status = pthread_mutex_lock(_mutex);
  5034   assert (status == 0, "invariant");
  5035   s = _counter;
  5036   _counter = 1;
  5037   if (s < 1) {
  5038     if (WorkAroundNPTLTimedWaitHang) {
  5039       status = pthread_cond_signal (_cond);
  5040       assert (status == 0, "invariant");
  5041       status = pthread_mutex_unlock(_mutex);
  5042       assert (status == 0, "invariant");
  5043     } else {
  5044       status = pthread_mutex_unlock(_mutex);
  5045       assert (status == 0, "invariant");
  5046       status = pthread_cond_signal (_cond);
  5047       assert (status == 0, "invariant");
  5049   } else {
  5050     pthread_mutex_unlock(_mutex);
  5051     assert (status == 0, "invariant");
  5056 extern char** environ;
  5058 // Run the specified command in a separate process. Return its exit value,
  5059 // or -1 on failure (e.g. can't fork a new process).
  5060 // Unlike system(), this function can be called from signal handler. It
  5061 // doesn't block SIGINT et al.
  5062 int os::fork_and_exec(char* cmd) {
  5063   Unimplemented();
  5064   return 0;
  5067 // is_headless_jre()
  5068 //
  5069 // Test for the existence of xawt/libmawt.so or libawt_xawt.so
  5070 // in order to report if we are running in a headless jre.
  5071 //
  5072 // Since JDK8 xawt/libmawt.so is moved into the same directory
  5073 // as libawt.so, and renamed libawt_xawt.so
  5074 bool os::is_headless_jre() {
  5075   struct stat statbuf;
  5076   char buf[MAXPATHLEN];
  5077   char libmawtpath[MAXPATHLEN];
  5078   const char *xawtstr  = "/xawt/libmawt.so";
  5079   const char *new_xawtstr = "/libawt_xawt.so";
  5081   char *p;
  5083   // Get path to libjvm.so
  5084   os::jvm_path(buf, sizeof(buf));
  5086   // Get rid of libjvm.so
  5087   p = strrchr(buf, '/');
  5088   if (p == NULL) return false;
  5089   else *p = '\0';
  5091   // Get rid of client or server
  5092   p = strrchr(buf, '/');
  5093   if (p == NULL) return false;
  5094   else *p = '\0';
  5096   // check xawt/libmawt.so
  5097   strcpy(libmawtpath, buf);
  5098   strcat(libmawtpath, xawtstr);
  5099   if (::stat(libmawtpath, &statbuf) == 0) return false;
  5101   // check libawt_xawt.so
  5102   strcpy(libmawtpath, buf);
  5103   strcat(libmawtpath, new_xawtstr);
  5104   if (::stat(libmawtpath, &statbuf) == 0) return false;
  5106   return true;
  5109 // Get the default path to the core file
  5110 // Returns the length of the string
  5111 int os::get_core_path(char* buffer, size_t bufferSize) {
  5112   const char* p = get_current_directory(buffer, bufferSize);
  5114   if (p == NULL) {
  5115     assert(p != NULL, "failed to get current directory");
  5116     return 0;
  5119   return strlen(buffer);
  5122 #ifndef PRODUCT
  5123 void TestReserveMemorySpecial_test() {
  5124   // No tests available for this platform
  5126 #endif

mercurial