src/os_cpu/linux_zero/vm/os_linux_zero.cpp

Wed, 18 Aug 2010 01:22:16 -0700

author
twisti
date
Wed, 18 Aug 2010 01:22:16 -0700
changeset 2084
13b87063b4d8
parent 1907
c18cbe5936b8
child 2312
b675ff1ca7a3
permissions
-rw-r--r--

6977640: Zero and Shark fixes
Summary: A number of fixes for Zero and Shark.
Reviewed-by: twisti
Contributed-by: Gary Benson <gbenson@redhat.com>

     1 /*
     2  * Copyright (c) 2003, 2007, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 // do not include precompiled header file
    27 #include "incls/_os_linux_zero.cpp.incl"
    29 address os::current_stack_pointer() {
    30   address dummy = (address) &dummy;
    31   return dummy;
    32 }
    34 frame os::get_sender_for_C_frame(frame* fr) {
    35   ShouldNotCallThis();
    36 }
    38 frame os::current_frame() {
    39   // The only thing that calls this is the stack printing code in
    40   // VMError::report:
    41   //   - Step 110 (printing stack bounds) uses the sp in the frame
    42   //     to determine the amount of free space on the stack.  We
    43   //     set the sp to a close approximation of the real value in
    44   //     order to allow this step to complete.
    45   //   - Step 120 (printing native stack) tries to walk the stack.
    46   //     The frame we create has a NULL pc, which is ignored as an
    47   //     invalid frame.
    48   frame dummy = frame();
    49   dummy.set_sp((intptr_t *) current_stack_pointer());
    50   return dummy;
    51 }
    53 char* os::non_memory_address_word() {
    54   // Must never look like an address returned by reserve_memory,
    55   // even in its subfields (as defined by the CPU immediate fields,
    56   // if the CPU splits constants across multiple instructions).
    57 #ifdef SPARC
    58   // On SPARC, 0 != %hi(any real address), because there is no
    59   // allocation in the first 1Kb of the virtual address space.
    60   return (char *) 0;
    61 #else
    62   // This is the value for x86; works pretty well for PPC too.
    63   return (char *) -1;
    64 #endif // SPARC
    65 }
    67 void os::initialize_thread() {
    68   // Nothing to do.
    69 }
    71 address os::Linux::ucontext_get_pc(ucontext_t* uc) {
    72   ShouldNotCallThis();
    73 }
    75 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
    76                                         intptr_t** ret_sp,
    77                                         intptr_t** ret_fp) {
    78   ShouldNotCallThis();
    79 }
    81 frame os::fetch_frame_from_context(void* ucVoid) {
    82   ShouldNotCallThis();
    83 }
    85 extern "C" int
    86 JVM_handle_linux_signal(int sig,
    87                         siginfo_t* info,
    88                         void* ucVoid,
    89                         int abort_if_unrecognized) {
    90   ucontext_t* uc = (ucontext_t*) ucVoid;
    92   Thread* t = ThreadLocalStorage::get_thread_slow();
    94   SignalHandlerMark shm(t);
    96   // Note: it's not uncommon that JNI code uses signal/sigset to
    97   // install then restore certain signal handler (e.g. to temporarily
    98   // block SIGPIPE, or have a SIGILL handler when detecting CPU
    99   // type). When that happens, JVM_handle_linux_signal() might be
   100   // invoked with junk info/ucVoid. To avoid unnecessary crash when
   101   // libjsig is not preloaded, try handle signals that do not require
   102   // siginfo/ucontext first.
   104   if (sig == SIGPIPE || sig == SIGXFSZ) {
   105     // allow chained handler to go first
   106     if (os::Linux::chained_handler(sig, info, ucVoid)) {
   107       return true;
   108     } else {
   109       if (PrintMiscellaneous && (WizardMode || Verbose)) {
   110         char buf[64];
   111         warning("Ignoring %s - see bugs 4229104 or 646499219",
   112                 os::exception_name(sig, buf, sizeof(buf)));
   113       }
   114       return true;
   115     }
   116   }
   118   JavaThread* thread = NULL;
   119   VMThread* vmthread = NULL;
   120   if (os::Linux::signal_handlers_are_installed) {
   121     if (t != NULL ){
   122       if(t->is_Java_thread()) {
   123         thread = (JavaThread*)t;
   124       }
   125       else if(t->is_VM_thread()){
   126         vmthread = (VMThread *)t;
   127       }
   128     }
   129   }
   131   if (info != NULL && thread != NULL) {
   132     // Handle ALL stack overflow variations here
   133     if (sig == SIGSEGV) {
   134       address addr = (address) info->si_addr;
   136       // check if fault address is within thread stack
   137       if (addr < thread->stack_base() &&
   138           addr >= thread->stack_base() - thread->stack_size()) {
   139         // stack overflow
   140         if (thread->in_stack_yellow_zone(addr)) {
   141           thread->disable_stack_yellow_zone();
   142           ShouldNotCallThis();
   143         }
   144         else if (thread->in_stack_red_zone(addr)) {
   145           thread->disable_stack_red_zone();
   146           ShouldNotCallThis();
   147         }
   148         else {
   149           // Accessing stack address below sp may cause SEGV if
   150           // current thread has MAP_GROWSDOWN stack. This should
   151           // only happen when current thread was created by user
   152           // code with MAP_GROWSDOWN flag and then attached to VM.
   153           // See notes in os_linux.cpp.
   154           if (thread->osthread()->expanding_stack() == 0) {
   155             thread->osthread()->set_expanding_stack();
   156             if (os::Linux::manually_expand_stack(thread, addr)) {
   157               thread->osthread()->clear_expanding_stack();
   158               return true;
   159             }
   160             thread->osthread()->clear_expanding_stack();
   161           }
   162           else {
   163             fatal("recursive segv. expanding stack.");
   164           }
   165         }
   166       }
   167     }
   169     /*if (thread->thread_state() == _thread_in_Java) {
   170       ShouldNotCallThis();
   171     }
   172     else*/ if (thread->thread_state() == _thread_in_vm &&
   173                sig == SIGBUS && thread->doing_unsafe_access()) {
   174       ShouldNotCallThis();
   175     }
   177     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
   178     // kicks in and the heap gets shrunk before the field access.
   179     /*if (sig == SIGSEGV || sig == SIGBUS) {
   180       address addr = JNI_FastGetField::find_slowcase_pc(pc);
   181       if (addr != (address)-1) {
   182         stub = addr;
   183       }
   184     }*/
   186     // Check to see if we caught the safepoint code in the process
   187     // of write protecting the memory serialization page.  It write
   188     // enables the page immediately after protecting it so we can
   189     // just return to retry the write.
   190     if (sig == SIGSEGV &&
   191         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
   192       // Block current thread until permission is restored.
   193       os::block_on_serialize_page_trap();
   194       return true;
   195     }
   196   }
   198   // signal-chaining
   199   if (os::Linux::chained_handler(sig, info, ucVoid)) {
   200      return true;
   201   }
   203   if (!abort_if_unrecognized) {
   204     // caller wants another chance, so give it to him
   205     return false;
   206   }
   208 #ifndef PRODUCT
   209   if (sig == SIGSEGV) {
   210     fatal("\n#"
   211           "\n#    /--------------------\\"
   212           "\n#    | segmentation fault |"
   213           "\n#    \\---\\ /--------------/"
   214           "\n#        /"
   215           "\n#    [-]        |\\_/|    "
   216           "\n#    (+)=C      |o o|__  "
   217           "\n#    | |        =-*-=__\\ "
   218           "\n#    OOO        c_c_(___)");
   219   }
   220 #endif // !PRODUCT
   222   const char *fmt = "caught unhandled signal %d";
   223   char buf[64];
   225   sprintf(buf, fmt, sig);
   226   fatal(buf);
   227 }
   229 void os::Linux::init_thread_fpu_state(void) {
   230   // Nothing to do
   231 }
   233 int os::Linux::get_fpu_control_word() {
   234   ShouldNotCallThis();
   235 }
   237 void os::Linux::set_fpu_control_word(int fpu) {
   238   ShouldNotCallThis();
   239 }
   241 bool os::is_allocatable(size_t bytes) {
   242 #ifdef _LP64
   243   return true;
   244 #else
   245   if (bytes < 2 * G) {
   246     return true;
   247   }
   249   char* addr = reserve_memory(bytes, NULL);
   251   if (addr != NULL) {
   252     release_memory(addr, bytes);
   253   }
   255   return addr != NULL;
   256 #endif // _LP64
   257 }
   259 ///////////////////////////////////////////////////////////////////////////////
   260 // thread stack
   262 size_t os::Linux::min_stack_allowed = 64 * K;
   264 bool os::Linux::supports_variable_stack_size() {
   265   return true;
   266 }
   268 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
   269 #ifdef _LP64
   270   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   271 #else
   272   size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
   273 #endif // _LP64
   274   return s;
   275 }
   277 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
   278   // Only enable glibc guard pages for non-Java threads
   279   // (Java threads have HotSpot guard pages)
   280   return (thr_type == java_thread ? 0 : page_size());
   281 }
   283 static void current_stack_region(address *bottom, size_t *size) {
   284   pthread_attr_t attr;
   285   int res = pthread_getattr_np(pthread_self(), &attr);
   286   if (res != 0) {
   287     if (res == ENOMEM) {
   288       vm_exit_out_of_memory(0, "pthread_getattr_np");
   289     }
   290     else {
   291       fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
   292     }
   293   }
   295   address stack_bottom;
   296   size_t stack_bytes;
   297   res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
   298   if (res != 0) {
   299     fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
   300   }
   301   address stack_top = stack_bottom + stack_bytes;
   303   // The block of memory returned by pthread_attr_getstack() includes
   304   // guard pages where present.  We need to trim these off.
   305   size_t page_bytes = os::Linux::page_size();
   306   assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
   308   size_t guard_bytes;
   309   res = pthread_attr_getguardsize(&attr, &guard_bytes);
   310   if (res != 0) {
   311     fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
   312   }
   313   int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
   314   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
   316 #ifdef IA64
   317   // IA64 has two stacks sharing the same area of memory, a normal
   318   // stack growing downwards and a register stack growing upwards.
   319   // Guard pages, if present, are in the centre.  This code splits
   320   // the stack in two even without guard pages, though in theory
   321   // there's nothing to stop us allocating more to the normal stack
   322   // or more to the register stack if one or the other were found
   323   // to grow faster.
   324   int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
   325   stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
   326 #endif // IA64
   328   stack_bottom += guard_bytes;
   330   pthread_attr_destroy(&attr);
   332   // The initial thread has a growable stack, and the size reported
   333   // by pthread_attr_getstack is the maximum size it could possibly
   334   // be given what currently mapped.  This can be huge, so we cap it.
   335   if (os::Linux::is_initial_thread()) {
   336     stack_bytes = stack_top - stack_bottom;
   338     if (stack_bytes > JavaThread::stack_size_at_create())
   339       stack_bytes = JavaThread::stack_size_at_create();
   341     stack_bottom = stack_top - stack_bytes;
   342   }
   344   assert(os::current_stack_pointer() >= stack_bottom, "should do");
   345   assert(os::current_stack_pointer() < stack_top, "should do");
   347   *bottom = stack_bottom;
   348   *size = stack_top - stack_bottom;
   349 }
   351 address os::current_stack_base() {
   352   address bottom;
   353   size_t size;
   354   current_stack_region(&bottom, &size);
   355   return bottom + size;
   356 }
   358 size_t os::current_stack_size() {
   359   // stack size includes normal stack and HotSpot guard pages
   360   address bottom;
   361   size_t size;
   362   current_stack_region(&bottom, &size);
   363   return size;
   364 }
   366 /////////////////////////////////////////////////////////////////////////////
   367 // helper functions for fatal error handler
   369 void os::print_context(outputStream* st, void* context) {
   370   ShouldNotCallThis();
   371 }
   373 /////////////////////////////////////////////////////////////////////////////
   374 // Stubs for things that would be in linux_zero.s if it existed.
   375 // You probably want to disassemble these monkeys to check they're ok.
   377 extern "C" {
   378   int SpinPause() {
   379   }
   381   int SafeFetch32(int *adr, int errValue) {
   382     int value = errValue;
   383     value = *adr;
   384     return value;
   385   }
   386   intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
   387     intptr_t value = errValue;
   388     value = *adr;
   389     return value;
   390   }
   392   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
   393     if (from > to) {
   394       jshort *end = from + count;
   395       while (from < end)
   396         *(to++) = *(from++);
   397     }
   398     else if (from < to) {
   399       jshort *end = from;
   400       from += count - 1;
   401       to   += count - 1;
   402       while (from >= end)
   403         *(to--) = *(from--);
   404     }
   405   }
   406   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
   407     if (from > to) {
   408       jint *end = from + count;
   409       while (from < end)
   410         *(to++) = *(from++);
   411     }
   412     else if (from < to) {
   413       jint *end = from;
   414       from += count - 1;
   415       to   += count - 1;
   416       while (from >= end)
   417         *(to--) = *(from--);
   418     }
   419   }
   420   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
   421     if (from > to) {
   422       jlong *end = from + count;
   423       while (from < end)
   424         os::atomic_copy64(from++, to++);
   425     }
   426     else if (from < to) {
   427       jlong *end = from;
   428       from += count - 1;
   429       to   += count - 1;
   430       while (from >= end)
   431         os::atomic_copy64(from--, to--);
   432     }
   433   }
   435   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
   436                                     HeapWord* to,
   437                                     size_t    count) {
   438     memmove(to, from, count);
   439   }
   440   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
   441                                       HeapWord* to,
   442                                       size_t    count) {
   443     memmove(to, from, count * 2);
   444   }
   445   void _Copy_arrayof_conjoint_jints(HeapWord* from,
   446                                     HeapWord* to,
   447                                     size_t    count) {
   448     memmove(to, from, count * 4);
   449   }
   450   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
   451                                      HeapWord* to,
   452                                      size_t    count) {
   453     memmove(to, from, count * 8);
   454   }
   455 };
   457 /////////////////////////////////////////////////////////////////////////////
   458 // Implementations of atomic operations not supported by processors.
   459 //  -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
   461 #ifndef _LP64
   462 extern "C" {
   463   long long unsigned int __sync_val_compare_and_swap_8(
   464     volatile void *ptr,
   465     long long unsigned int oldval,
   466     long long unsigned int newval) {
   467     ShouldNotCallThis();
   468   }
   469 };
   470 #endif // !_LP64

mercurial