src/os_cpu/linux_zero/vm/os_linux_zero.cpp

Mon, 27 Feb 2012 09:17:44 +0100

author
roland
date
Mon, 27 Feb 2012 09:17:44 +0100
changeset 3606
da4be62fb889
parent 2708
1d1603768966
child 4079
716e6ef4482a
permissions
-rw-r--r--

7147740: add assertions to check stack alignment on VM entry from generated code (x64)
Summary: check stack alignment on VM entry on x64.
Reviewed-by: kvn, never

     1 /*
     2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
     4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5  *
     6  * This code is free software; you can redistribute it and/or modify it
     7  * under the terms of the GNU General Public License version 2 only, as
     8  * published by the Free Software Foundation.
     9  *
    10  * This code is distributed in the hope that it will be useful, but WITHOUT
    11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    13  * version 2 for more details (a copy is included in the LICENSE file that
    14  * accompanied this code).
    15  *
    16  * You should have received a copy of the GNU General Public License version
    17  * 2 along with this work; if not, write to the Free Software Foundation,
    18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    19  *
    20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21  * or visit www.oracle.com if you need additional information or have any
    22  * questions.
    23  *
    24  */
    26 // no precompiled headers
    27 #include "assembler_zero.inline.hpp"
    28 #include "classfile/classLoader.hpp"
    29 #include "classfile/systemDictionary.hpp"
    30 #include "classfile/vmSymbols.hpp"
    31 #include "code/icBuffer.hpp"
    32 #include "code/vtableStubs.hpp"
    33 #include "interpreter/interpreter.hpp"
    34 #include "jvm_linux.h"
    35 #include "memory/allocation.inline.hpp"
    36 #include "mutex_linux.inline.hpp"
    37 #include "nativeInst_zero.hpp"
    38 #include "os_share_linux.hpp"
    39 #include "prims/jniFastGetField.hpp"
    40 #include "prims/jvm.h"
    41 #include "prims/jvm_misc.hpp"
    42 #include "runtime/arguments.hpp"
    43 #include "runtime/extendedPC.hpp"
    44 #include "runtime/frame.inline.hpp"
    45 #include "runtime/interfaceSupport.hpp"
    46 #include "runtime/java.hpp"
    47 #include "runtime/javaCalls.hpp"
    48 #include "runtime/mutexLocker.hpp"
    49 #include "runtime/osThread.hpp"
    50 #include "runtime/sharedRuntime.hpp"
    51 #include "runtime/stubRoutines.hpp"
    52 #include "runtime/timer.hpp"
    53 #include "thread_linux.inline.hpp"
    54 #include "utilities/events.hpp"
    55 #include "utilities/vmError.hpp"
    56 #ifdef COMPILER1
    57 #include "c1/c1_Runtime1.hpp"
    58 #endif
    59 #ifdef COMPILER2
    60 #include "opto/runtime.hpp"
    61 #endif
    63 address os::current_stack_pointer() {
    64   address dummy = (address) &dummy;
    65   return dummy;
    66 }
    68 frame os::get_sender_for_C_frame(frame* fr) {
    69   ShouldNotCallThis();
    70 }
    72 frame os::current_frame() {
    73   // The only thing that calls this is the stack printing code in
    74   // VMError::report:
    75   //   - Step 110 (printing stack bounds) uses the sp in the frame
    76   //     to determine the amount of free space on the stack.  We
    77   //     set the sp to a close approximation of the real value in
    78   //     order to allow this step to complete.
    79   //   - Step 120 (printing native stack) tries to walk the stack.
    80   //     The frame we create has a NULL pc, which is ignored as an
    81   //     invalid frame.
    82   frame dummy = frame();
    83   dummy.set_sp((intptr_t *) current_stack_pointer());
    84   return dummy;
    85 }
    87 char* os::non_memory_address_word() {
    88   // Must never look like an address returned by reserve_memory,
    89   // even in its subfields (as defined by the CPU immediate fields,
    90   // if the CPU splits constants across multiple instructions).
    91 #ifdef SPARC
    92   // On SPARC, 0 != %hi(any real address), because there is no
    93   // allocation in the first 1Kb of the virtual address space.
    94   return (char *) 0;
    95 #else
    96   // This is the value for x86; works pretty well for PPC too.
    97   return (char *) -1;
    98 #endif // SPARC
    99 }
   101 void os::initialize_thread() {
   102   // Nothing to do.
   103 }
   105 address os::Linux::ucontext_get_pc(ucontext_t* uc) {
   106   ShouldNotCallThis();
   107 }
   109 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
   110                                         intptr_t** ret_sp,
   111                                         intptr_t** ret_fp) {
   112   ShouldNotCallThis();
   113 }
   115 frame os::fetch_frame_from_context(void* ucVoid) {
   116   ShouldNotCallThis();
   117 }
   119 extern "C" JNIEXPORT int
   120 JVM_handle_linux_signal(int sig,
   121                         siginfo_t* info,
   122                         void* ucVoid,
   123                         int abort_if_unrecognized) {
   124   ucontext_t* uc = (ucontext_t*) ucVoid;
   126   Thread* t = ThreadLocalStorage::get_thread_slow();
   128   SignalHandlerMark shm(t);
   130   // Note: it's not uncommon that JNI code uses signal/sigset to
   131   // install then restore certain signal handler (e.g. to temporarily
   132   // block SIGPIPE, or have a SIGILL handler when detecting CPU
   133   // type). When that happens, JVM_handle_linux_signal() might be
   134   // invoked with junk info/ucVoid. To avoid unnecessary crash when
   135   // libjsig is not preloaded, try handle signals that do not require
   136   // siginfo/ucontext first.
   138   if (sig == SIGPIPE || sig == SIGXFSZ) {
   139     // allow chained handler to go first
   140     if (os::Linux::chained_handler(sig, info, ucVoid)) {
   141       return true;
   142     } else {
   143       if (PrintMiscellaneous && (WizardMode || Verbose)) {
   144         char buf[64];
   145         warning("Ignoring %s - see bugs 4229104 or 646499219",
   146                 os::exception_name(sig, buf, sizeof(buf)));
   147       }
   148       return true;
   149     }
   150   }
   152   JavaThread* thread = NULL;
   153   VMThread* vmthread = NULL;
   154   if (os::Linux::signal_handlers_are_installed) {
   155     if (t != NULL ){
   156       if(t->is_Java_thread()) {
   157         thread = (JavaThread*)t;
   158       }
   159       else if(t->is_VM_thread()){
   160         vmthread = (VMThread *)t;
   161       }
   162     }
   163   }
   165   if (info != NULL && thread != NULL) {
   166     // Handle ALL stack overflow variations here
   167     if (sig == SIGSEGV) {
   168       address addr = (address) info->si_addr;
   170       // check if fault address is within thread stack
   171       if (addr < thread->stack_base() &&
   172           addr >= thread->stack_base() - thread->stack_size()) {
   173         // stack overflow
   174         if (thread->in_stack_yellow_zone(addr)) {
   175           thread->disable_stack_yellow_zone();
   176           ShouldNotCallThis();
   177         }
   178         else if (thread->in_stack_red_zone(addr)) {
   179           thread->disable_stack_red_zone();
   180           ShouldNotCallThis();
   181         }
   182         else {
   183           // Accessing stack address below sp may cause SEGV if
   184           // current thread has MAP_GROWSDOWN stack. This should
   185           // only happen when current thread was created by user
   186           // code with MAP_GROWSDOWN flag and then attached to VM.
   187           // See notes in os_linux.cpp.
   188           if (thread->osthread()->expanding_stack() == 0) {
   189             thread->osthread()->set_expanding_stack();
   190             if (os::Linux::manually_expand_stack(thread, addr)) {
   191               thread->osthread()->clear_expanding_stack();
   192               return true;
   193             }
   194             thread->osthread()->clear_expanding_stack();
   195           }
   196           else {
   197             fatal("recursive segv. expanding stack.");
   198           }
   199         }
   200       }
   201     }
   203     /*if (thread->thread_state() == _thread_in_Java) {
   204       ShouldNotCallThis();
   205     }
   206     else*/ if (thread->thread_state() == _thread_in_vm &&
   207                sig == SIGBUS && thread->doing_unsafe_access()) {
   208       ShouldNotCallThis();
   209     }
   211     // jni_fast_Get<Primitive>Field can trap at certain pc's if a GC
   212     // kicks in and the heap gets shrunk before the field access.
   213     /*if (sig == SIGSEGV || sig == SIGBUS) {
   214       address addr = JNI_FastGetField::find_slowcase_pc(pc);
   215       if (addr != (address)-1) {
   216         stub = addr;
   217       }
   218     }*/
   220     // Check to see if we caught the safepoint code in the process
   221     // of write protecting the memory serialization page.  It write
   222     // enables the page immediately after protecting it so we can
   223     // just return to retry the write.
   224     if (sig == SIGSEGV &&
   225         os::is_memory_serialize_page(thread, (address) info->si_addr)) {
   226       // Block current thread until permission is restored.
   227       os::block_on_serialize_page_trap();
   228       return true;
   229     }
   230   }
   232   // signal-chaining
   233   if (os::Linux::chained_handler(sig, info, ucVoid)) {
   234      return true;
   235   }
   237   if (!abort_if_unrecognized) {
   238     // caller wants another chance, so give it to him
   239     return false;
   240   }
   242 #ifndef PRODUCT
   243   if (sig == SIGSEGV) {
   244     fatal("\n#"
   245           "\n#    /--------------------\\"
   246           "\n#    | segmentation fault |"
   247           "\n#    \\---\\ /--------------/"
   248           "\n#        /"
   249           "\n#    [-]        |\\_/|    "
   250           "\n#    (+)=C      |o o|__  "
   251           "\n#    | |        =-*-=__\\ "
   252           "\n#    OOO        c_c_(___)");
   253   }
   254 #endif // !PRODUCT
   256   const char *fmt = "caught unhandled signal %d";
   257   char buf[64];
   259   sprintf(buf, fmt, sig);
   260   fatal(buf);
   261 }
   263 void os::Linux::init_thread_fpu_state(void) {
   264   // Nothing to do
   265 }
   267 int os::Linux::get_fpu_control_word() {
   268   ShouldNotCallThis();
   269 }
   271 void os::Linux::set_fpu_control_word(int fpu) {
   272   ShouldNotCallThis();
   273 }
   275 bool os::is_allocatable(size_t bytes) {
   276 #ifdef _LP64
   277   return true;
   278 #else
   279   if (bytes < 2 * G) {
   280     return true;
   281   }
   283   char* addr = reserve_memory(bytes, NULL);
   285   if (addr != NULL) {
   286     release_memory(addr, bytes);
   287   }
   289   return addr != NULL;
   290 #endif // _LP64
   291 }
   293 ///////////////////////////////////////////////////////////////////////////////
   294 // thread stack
   296 size_t os::Linux::min_stack_allowed = 64 * K;
   298 bool os::Linux::supports_variable_stack_size() {
   299   return true;
   300 }
   302 size_t os::Linux::default_stack_size(os::ThreadType thr_type) {
   303 #ifdef _LP64
   304   size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   305 #else
   306   size_t s = (thr_type == os::compiler_thread ? 2 * M : 512 * K);
   307 #endif // _LP64
   308   return s;
   309 }
   311 size_t os::Linux::default_guard_size(os::ThreadType thr_type) {
   312   // Only enable glibc guard pages for non-Java threads
   313   // (Java threads have HotSpot guard pages)
   314   return (thr_type == java_thread ? 0 : page_size());
   315 }
   317 static void current_stack_region(address *bottom, size_t *size) {
   318   pthread_attr_t attr;
   319   int res = pthread_getattr_np(pthread_self(), &attr);
   320   if (res != 0) {
   321     if (res == ENOMEM) {
   322       vm_exit_out_of_memory(0, "pthread_getattr_np");
   323     }
   324     else {
   325       fatal(err_msg("pthread_getattr_np failed with errno = %d", res));
   326     }
   327   }
   329   address stack_bottom;
   330   size_t stack_bytes;
   331   res = pthread_attr_getstack(&attr, (void **) &stack_bottom, &stack_bytes);
   332   if (res != 0) {
   333     fatal(err_msg("pthread_attr_getstack failed with errno = %d", res));
   334   }
   335   address stack_top = stack_bottom + stack_bytes;
   337   // The block of memory returned by pthread_attr_getstack() includes
   338   // guard pages where present.  We need to trim these off.
   339   size_t page_bytes = os::Linux::page_size();
   340   assert(((intptr_t) stack_bottom & (page_bytes - 1)) == 0, "unaligned stack");
   342   size_t guard_bytes;
   343   res = pthread_attr_getguardsize(&attr, &guard_bytes);
   344   if (res != 0) {
   345     fatal(err_msg("pthread_attr_getguardsize failed with errno = %d", res));
   346   }
   347   int guard_pages = align_size_up(guard_bytes, page_bytes) / page_bytes;
   348   assert(guard_bytes == guard_pages * page_bytes, "unaligned guard");
   350 #ifdef IA64
   351   // IA64 has two stacks sharing the same area of memory, a normal
   352   // stack growing downwards and a register stack growing upwards.
   353   // Guard pages, if present, are in the centre.  This code splits
   354   // the stack in two even without guard pages, though in theory
   355   // there's nothing to stop us allocating more to the normal stack
   356   // or more to the register stack if one or the other were found
   357   // to grow faster.
   358   int total_pages = align_size_down(stack_bytes, page_bytes) / page_bytes;
   359   stack_bottom += (total_pages - guard_pages) / 2 * page_bytes;
   360 #endif // IA64
   362   stack_bottom += guard_bytes;
   364   pthread_attr_destroy(&attr);
   366   // The initial thread has a growable stack, and the size reported
   367   // by pthread_attr_getstack is the maximum size it could possibly
   368   // be given what currently mapped.  This can be huge, so we cap it.
   369   if (os::Linux::is_initial_thread()) {
   370     stack_bytes = stack_top - stack_bottom;
   372     if (stack_bytes > JavaThread::stack_size_at_create())
   373       stack_bytes = JavaThread::stack_size_at_create();
   375     stack_bottom = stack_top - stack_bytes;
   376   }
   378   assert(os::current_stack_pointer() >= stack_bottom, "should do");
   379   assert(os::current_stack_pointer() < stack_top, "should do");
   381   *bottom = stack_bottom;
   382   *size = stack_top - stack_bottom;
   383 }
   385 address os::current_stack_base() {
   386   address bottom;
   387   size_t size;
   388   current_stack_region(&bottom, &size);
   389   return bottom + size;
   390 }
   392 size_t os::current_stack_size() {
   393   // stack size includes normal stack and HotSpot guard pages
   394   address bottom;
   395   size_t size;
   396   current_stack_region(&bottom, &size);
   397   return size;
   398 }
   400 /////////////////////////////////////////////////////////////////////////////
   401 // helper functions for fatal error handler
   403 void os::print_context(outputStream* st, void* context) {
   404   ShouldNotCallThis();
   405 }
   407 void os::print_register_info(outputStream *st, void *context) {
   408   ShouldNotCallThis();
   409 }
   411 /////////////////////////////////////////////////////////////////////////////
   412 // Stubs for things that would be in linux_zero.s if it existed.
   413 // You probably want to disassemble these monkeys to check they're ok.
   415 extern "C" {
   416   int SpinPause() {
   417   }
   419   int SafeFetch32(int *adr, int errValue) {
   420     int value = errValue;
   421     value = *adr;
   422     return value;
   423   }
   424   intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
   425     intptr_t value = errValue;
   426     value = *adr;
   427     return value;
   428   }
   430   void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
   431     if (from > to) {
   432       jshort *end = from + count;
   433       while (from < end)
   434         *(to++) = *(from++);
   435     }
   436     else if (from < to) {
   437       jshort *end = from;
   438       from += count - 1;
   439       to   += count - 1;
   440       while (from >= end)
   441         *(to--) = *(from--);
   442     }
   443   }
   444   void _Copy_conjoint_jints_atomic(jint* from, jint* to, size_t count) {
   445     if (from > to) {
   446       jint *end = from + count;
   447       while (from < end)
   448         *(to++) = *(from++);
   449     }
   450     else if (from < to) {
   451       jint *end = from;
   452       from += count - 1;
   453       to   += count - 1;
   454       while (from >= end)
   455         *(to--) = *(from--);
   456     }
   457   }
   458   void _Copy_conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) {
   459     if (from > to) {
   460       jlong *end = from + count;
   461       while (from < end)
   462         os::atomic_copy64(from++, to++);
   463     }
   464     else if (from < to) {
   465       jlong *end = from;
   466       from += count - 1;
   467       to   += count - 1;
   468       while (from >= end)
   469         os::atomic_copy64(from--, to--);
   470     }
   471   }
   473   void _Copy_arrayof_conjoint_bytes(HeapWord* from,
   474                                     HeapWord* to,
   475                                     size_t    count) {
   476     memmove(to, from, count);
   477   }
   478   void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
   479                                       HeapWord* to,
   480                                       size_t    count) {
   481     memmove(to, from, count * 2);
   482   }
   483   void _Copy_arrayof_conjoint_jints(HeapWord* from,
   484                                     HeapWord* to,
   485                                     size_t    count) {
   486     memmove(to, from, count * 4);
   487   }
   488   void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
   489                                      HeapWord* to,
   490                                      size_t    count) {
   491     memmove(to, from, count * 8);
   492   }
   493 };
   495 /////////////////////////////////////////////////////////////////////////////
   496 // Implementations of atomic operations not supported by processors.
   497 //  -- http://gcc.gnu.org/onlinedocs/gcc-4.2.1/gcc/Atomic-Builtins.html
   499 #ifndef _LP64
   500 extern "C" {
   501   long long unsigned int __sync_val_compare_and_swap_8(
   502     volatile void *ptr,
   503     long long unsigned int oldval,
   504     long long unsigned int newval) {
   505     ShouldNotCallThis();
   506   }
   507 };
   508 #endif // !_LP64
   510 #ifndef PRODUCT
   511 void os::verify_stack_alignment() {
   512 }
   513 #endif

mercurial