Merge

Tue, 17 Jan 2017 09:21:05 -0800

author
asaha
date
Tue, 17 Jan 2017 09:21:05 -0800
changeset 8711
e6d246d3fdfc
parent 8710
229d5dd40a3f
parent 8671
b4bdf3484720
child 8715
567e410935e5

Merge

     1.1 --- a/src/os/linux/vm/os_linux.cpp	Thu Dec 22 15:55:08 2016 -0800
     1.2 +++ b/src/os/linux/vm/os_linux.cpp	Tue Jan 17 09:21:05 2017 -0800
     1.3 @@ -1075,29 +1075,30 @@
     1.4  
     1.5  // Locate initial thread stack. This special handling of initial thread stack
     1.6  // is needed because pthread_getattr_np() on most (all?) Linux distros returns
     1.7 -// bogus value for initial thread.
     1.8 +// bogus value for the primordial process thread. While the launcher has created
     1.9 +// the VM in a new thread since JDK 6, we still have to allow for the use of the
    1.10 +// JNI invocation API from a primordial thread.
    1.11  void os::Linux::capture_initial_stack(size_t max_size) {
    1.12 -  // stack size is the easy part, get it from RLIMIT_STACK
    1.13 -  size_t stack_size;
    1.14 +
    1.15 +  // max_size is either 0 (which means accept OS default for thread stacks) or
    1.16 +  // a user-specified value known to be at least the minimum needed. If we
    1.17 +  // are actually on the primordial thread we can make it appear that we have a
    1.18 +  // smaller max_size stack by inserting the guard pages at that location. But we
    1.19 +  // cannot do anything to emulate a larger stack than what has been provided by
    1.20 +  // the OS or threading library. In fact if we try to use a stack greater than
    1.21 +  // what is set by rlimit then we will crash the hosting process.
    1.22 +
    1.23 +  // Maximum stack size is the easy part, get it from RLIMIT_STACK.
    1.24 +  // If this is "unlimited" then it will be a huge value.
    1.25    struct rlimit rlim;
    1.26    getrlimit(RLIMIT_STACK, &rlim);
    1.27 -  stack_size = rlim.rlim_cur;
    1.28 +  size_t stack_size = rlim.rlim_cur;
    1.29  
    1.30    // 6308388: a bug in ld.so will relocate its own .data section to the
    1.31    //   lower end of primordial stack; reduce ulimit -s value a little bit
    1.32    //   so we won't install guard page on ld.so's data section.
    1.33    stack_size -= 2 * page_size();
    1.34  
    1.35 -  // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
    1.36 -  //   7.1, in both cases we will get 2G in return value.
    1.37 -  // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
    1.38 -  //   SuSE 7.2, Debian) can not handle alternate signal stack correctly
    1.39 -  //   for initial thread if its stack size exceeds 6M. Cap it at 2M,
    1.40 -  //   in case other parts in glibc still assumes 2M max stack size.
    1.41 -  // FIXME: alt signal stack is gone, maybe we can relax this constraint?
    1.42 -  // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
    1.43 -  if (stack_size > 2 * K * K IA64_ONLY(*2))
    1.44 -      stack_size = 2 * K * K IA64_ONLY(*2);
    1.45    // Try to figure out where the stack base (top) is. This is harder.
    1.46    //
    1.47    // When an application is started, glibc saves the initial stack pointer in
    1.48 @@ -1257,14 +1258,18 @@
    1.49    // stack_top could be partially down the page so align it
    1.50    stack_top = align_size_up(stack_top, page_size());
    1.51  
    1.52 -  if (max_size && stack_size > max_size) {
    1.53 -     _initial_thread_stack_size = max_size;
    1.54 +  // Allowed stack value is minimum of max_size and what we derived from rlimit
    1.55 +  if (max_size > 0) {
    1.56 +    _initial_thread_stack_size = MIN2(max_size, stack_size);
    1.57    } else {
    1.58 -     _initial_thread_stack_size = stack_size;
    1.59 +    // Accept the rlimit max, but if stack is unlimited then it will be huge, so
    1.60 +    // clamp it at 8MB as we do on Solaris
    1.61 +    _initial_thread_stack_size = MIN2(stack_size, 8*M);
    1.62    }
    1.63  
    1.64    _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
    1.65    _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
    1.66 +  assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!");
    1.67  }
    1.68  
    1.69  ////////////////////////////////////////////////////////////////////////////////
     2.1 --- a/src/os/windows/vm/os_windows.cpp	Thu Dec 22 15:55:08 2016 -0800
     2.2 +++ b/src/os/windows/vm/os_windows.cpp	Tue Jan 17 09:21:05 2017 -0800
     2.3 @@ -1,5 +1,5 @@
     2.4  /*
     2.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     2.6 + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
     2.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8   *
     2.9   * This code is free software; you can redistribute it and/or modify it
    2.10 @@ -1747,8 +1747,7 @@
    2.11      if (is_workstation) {
    2.12        st->print("10");
    2.13      } else {
    2.14 -      // The server version name of Windows 10 is not known at this time
    2.15 -      st->print("%d.%d", major_version, minor_version);
    2.16 +      st->print("Server 2016");
    2.17      }
    2.18      break;
    2.19  
     3.1 --- a/src/share/vm/adlc/formssel.cpp	Thu Dec 22 15:55:08 2016 -0800
     3.2 +++ b/src/share/vm/adlc/formssel.cpp	Tue Jan 17 09:21:05 2017 -0800
     3.3 @@ -648,6 +648,7 @@
     3.4    if( strcmp(_matrule->_opType,"MemBarReleaseLock") == 0 ) return true;
     3.5    if( strcmp(_matrule->_opType,"MemBarAcquireLock") == 0 ) return true;
     3.6    if( strcmp(_matrule->_opType,"MemBarStoreStore") == 0 ) return true;
     3.7 +  if( strcmp(_matrule->_opType,"MemBarVolatile") == 0 ) return true;
     3.8    if( strcmp(_matrule->_opType,"StoreFence") == 0 ) return true;
     3.9    if( strcmp(_matrule->_opType,"LoadFence") == 0 ) return true;
    3.10  
     4.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Dec 22 15:55:08 2016 -0800
     4.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Jan 17 09:21:05 2017 -0800
     4.3 @@ -1485,6 +1485,21 @@
     4.4    // Check to see whether we are inlining. If so, Return
     4.5    // instructions become Gotos to the continuation point.
     4.6    if (continuation() != NULL) {
     4.7 +
     4.8 +    int invoke_bci = state()->caller_state()->bci();
     4.9 +
    4.10 +    if (x != NULL) {
    4.11 +      ciMethod* caller = state()->scope()->caller()->method();
    4.12 +      Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci);
    4.13 +      if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) {
    4.14 +        ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type();
    4.15 +        if (declared_ret_type->is_klass() && x->exact_type() == NULL &&
    4.16 +            x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) {
    4.17 +          x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before()));
    4.18 +        }
    4.19 +      }
    4.20 +    }
    4.21 +
    4.22      assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet");
    4.23  
    4.24      if (compilation()->env()->dtrace_method_probes()) {
    4.25 @@ -1508,7 +1523,6 @@
    4.26      // State at end of inlined method is the state of the caller
    4.27      // without the method parameters on stack, including the
    4.28      // return value, if any, of the inlined method on operand stack.
    4.29 -    int invoke_bci = state()->caller_state()->bci();
    4.30      set_state(state()->caller_state()->copy_for_parsing());
    4.31      if (x != NULL) {
    4.32        state()->push(x->type(), x);
     5.1 --- a/src/share/vm/c1/c1_Instruction.cpp	Thu Dec 22 15:55:08 2016 -0800
     5.2 +++ b/src/share/vm/c1/c1_Instruction.cpp	Tue Jan 17 09:21:05 2017 -0800
     5.3 @@ -360,7 +360,8 @@
     5.4  }
     5.5  
     5.6  ciType* Invoke::declared_type() const {
     5.7 -  ciType *t = _target->signature()->return_type();
     5.8 +  ciSignature* declared_signature = state()->scope()->method()->get_declared_signature_at_bci(state()->bci());
     5.9 +  ciType *t = declared_signature->return_type();
    5.10    assert(t->basic_type() != T_VOID, "need return value of void method?");
    5.11    return t;
    5.12  }
     6.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Dec 22 15:55:08 2016 -0800
     6.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 17 09:21:05 2017 -0800
     6.3 @@ -3191,14 +3191,14 @@
     6.4        Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
     6.5        int start = 0;
     6.6        int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
     6.7 -      if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
     6.8 +      if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
     6.9          // first argument is not profiled at call (method handle invoke)
    6.10          assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
    6.11          start = 1;
    6.12        }
    6.13        ciSignature* callee_signature = x->callee()->signature();
    6.14        // method handle call to virtual method
    6.15 -      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
    6.16 +      bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
    6.17        ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
    6.18  
    6.19        bool ignored_will_link;
     7.1 --- a/src/share/vm/ci/ciField.cpp	Thu Dec 22 15:55:08 2016 -0800
     7.2 +++ b/src/share/vm/ci/ciField.cpp	Tue Jan 17 09:21:05 2017 -0800
     7.3 @@ -207,7 +207,7 @@
     7.4    // Check to see if the field is constant.
     7.5    bool is_final = this->is_final();
     7.6    bool is_stable = FoldStableValues && this->is_stable();
     7.7 -  if (_holder->is_initialized() && (is_final || is_stable)) {
     7.8 +  if (_holder->is_initialized() && ((is_final && !has_initialized_final_update()) || is_stable)) {
     7.9      if (!this->is_static()) {
    7.10        // A field can be constant if it's a final static field or if
    7.11        // it's a final non-static field of a trusted class (classes in
     8.1 --- a/src/share/vm/ci/ciField.hpp	Thu Dec 22 15:55:08 2016 -0800
     8.2 +++ b/src/share/vm/ci/ciField.hpp	Tue Jan 17 09:21:05 2017 -0800
     8.3 @@ -124,22 +124,8 @@
     8.4      return _holder->is_shared() && !is_static();
     8.5    }
     8.6  
     8.7 -  // Is this field a constant?
     8.8 -  //
     8.9 -  // Clarification: A field is considered constant if:
    8.10 -  //   1. The field is both static and final
    8.11 -  //   2. The canonical holder of the field has undergone
    8.12 -  //      static initialization.
    8.13 -  //   3. If the field is an object or array, then the oop
    8.14 -  //      in question is allocated in perm space.
    8.15 -  //   4. The field is not one of the special static/final
    8.16 -  //      non-constant fields.  These are java.lang.System.in
    8.17 -  //      and java.lang.System.out.  Abomination.
    8.18 -  //
    8.19 -  // A field is also considered constant if it is marked @Stable
    8.20 -  // and is non-null (or non-zero, if a primitive).
    8.21 -  // For non-static fields, the null/zero check must be
    8.22 -  // arranged by the user, as constant_value().is_null_or_zero().
    8.23 +  // Is this field a constant? See ciField::initialize_from() for details
    8.24 +  // about how a field is determined to be constant.
    8.25    bool is_constant() { return _is_constant; }
    8.26  
    8.27    // Get the constant value of this field.
    8.28 @@ -176,6 +162,9 @@
    8.29    bool is_stable      () { return flags().is_stable(); }
    8.30    bool is_volatile    () { return flags().is_volatile(); }
    8.31    bool is_transient   () { return flags().is_transient(); }
    8.32 +  // The field is modified outside of instance initializer methods
    8.33 +  // (or class/initializer methods if the field is static).
    8.34 +  bool has_initialized_final_update() { return flags().has_initialized_final_update(); }
    8.35  
    8.36    bool is_call_site_target() {
    8.37      ciInstanceKlass* callsite_klass = CURRENT_ENV->CallSite_klass();
     9.1 --- a/src/share/vm/ci/ciFlags.hpp	Thu Dec 22 15:55:08 2016 -0800
     9.2 +++ b/src/share/vm/ci/ciFlags.hpp	Tue Jan 17 09:21:05 2017 -0800
     9.3 @@ -46,20 +46,25 @@
     9.4  
     9.5  public:
     9.6    // Java access flags
     9.7 -  bool is_public      () const         { return (_flags & JVM_ACC_PUBLIC      ) != 0; }
     9.8 -  bool is_private     () const         { return (_flags & JVM_ACC_PRIVATE     ) != 0; }
     9.9 -  bool is_protected   () const         { return (_flags & JVM_ACC_PROTECTED   ) != 0; }
    9.10 -  bool is_static      () const         { return (_flags & JVM_ACC_STATIC      ) != 0; }
    9.11 -  bool is_final       () const         { return (_flags & JVM_ACC_FINAL       ) != 0; }
    9.12 -  bool is_synchronized() const         { return (_flags & JVM_ACC_SYNCHRONIZED) != 0; }
    9.13 -  bool is_super       () const         { return (_flags & JVM_ACC_SUPER       ) != 0; }
    9.14 -  bool is_volatile    () const         { return (_flags & JVM_ACC_VOLATILE    ) != 0; }
    9.15 -  bool is_transient   () const         { return (_flags & JVM_ACC_TRANSIENT   ) != 0; }
    9.16 -  bool is_native      () const         { return (_flags & JVM_ACC_NATIVE      ) != 0; }
    9.17 -  bool is_interface   () const         { return (_flags & JVM_ACC_INTERFACE   ) != 0; }
    9.18 -  bool is_abstract    () const         { return (_flags & JVM_ACC_ABSTRACT    ) != 0; }
    9.19 -  bool is_strict      () const         { return (_flags & JVM_ACC_STRICT      ) != 0; }
    9.20 -  bool is_stable      () const         { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
    9.21 +  bool is_public               () const { return (_flags & JVM_ACC_PUBLIC                    ) != 0; }
    9.22 +  bool is_private              () const { return (_flags & JVM_ACC_PRIVATE                   ) != 0; }
    9.23 +  bool is_protected            () const { return (_flags & JVM_ACC_PROTECTED                 ) != 0; }
    9.24 +  bool is_static               () const { return (_flags & JVM_ACC_STATIC                    ) != 0; }
    9.25 +  bool is_final                () const { return (_flags & JVM_ACC_FINAL                     ) != 0; }
    9.26 +  bool is_synchronized         () const { return (_flags & JVM_ACC_SYNCHRONIZED              ) != 0; }
    9.27 +  bool is_super                () const { return (_flags & JVM_ACC_SUPER                     ) != 0; }
    9.28 +  bool is_volatile             () const { return (_flags & JVM_ACC_VOLATILE                  ) != 0; }
    9.29 +  bool is_transient            () const { return (_flags & JVM_ACC_TRANSIENT                 ) != 0; }
    9.30 +  bool is_native               () const { return (_flags & JVM_ACC_NATIVE                    ) != 0; }
    9.31 +  bool is_interface            () const { return (_flags & JVM_ACC_INTERFACE                 ) != 0; }
    9.32 +  bool is_abstract             () const { return (_flags & JVM_ACC_ABSTRACT                  ) != 0; }
    9.33 +  bool is_strict               () const { return (_flags & JVM_ACC_STRICT                    ) != 0; }
    9.34 +  bool is_stable               () const { return (_flags & JVM_ACC_FIELD_STABLE              ) != 0; }
    9.35 +  // In case the current object represents a field, return true if
    9.36 +  // the field is modified outside of instance initializer methods
    9.37 +  // (or class/initializer methods if the field is static) and false
    9.38 +  // otherwise.
    9.39 +  bool has_initialized_final_update() const { return (_flags & JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE) != 0; };
    9.40  
    9.41    // Conversion
    9.42    jint   as_int()                      { return _flags; }
    10.1 --- a/src/share/vm/ci/ciMethod.hpp	Thu Dec 22 15:55:08 2016 -0800
    10.2 +++ b/src/share/vm/ci/ciMethod.hpp	Tue Jan 17 09:21:05 2017 -0800
    10.3 @@ -243,6 +243,21 @@
    10.4  
    10.5    ciField*      get_field_at_bci( int bci, bool &will_link);
    10.6    ciMethod*     get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature);
    10.7 +
    10.8 +  ciSignature*  get_declared_signature_at_bci(int bci) {
    10.9 +    bool ignored_will_link;
   10.10 +    ciSignature* declared_signature;
   10.11 +    get_method_at_bci(bci, ignored_will_link, &declared_signature);
   10.12 +    assert(declared_signature != NULL, "cannot be null");
   10.13 +    return declared_signature;
   10.14 +  }
   10.15 +
   10.16 +  ciMethod*     get_method_at_bci(int bci) {
   10.17 +    bool ignored_will_link;
   10.18 +    ciSignature* ignored_declared_signature;
   10.19 +    return get_method_at_bci(bci, ignored_will_link, &ignored_declared_signature);
   10.20 +  }
   10.21 +
   10.22    // Given a certain calling environment, find the monomorphic target
   10.23    // for the call.  Return NULL if the call is not monomorphic in
   10.24    // its calling environment.
    11.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Dec 22 15:55:08 2016 -0800
    11.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 17 09:21:05 2017 -0800
    11.3 @@ -1,5 +1,5 @@
    11.4  /*
    11.5 - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
    11.6 + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
    11.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8   *
    11.9   * This code is free software; you can redistribute it and/or modify it
   11.10 @@ -631,11 +631,10 @@
   11.11        double overall_cm_overhead =
   11.12          (double) MaxGCPauseMillis * marking_overhead /
   11.13          (double) GCPauseIntervalMillis;
   11.14 -      double cpu_ratio = 1.0 / (double) os::processor_count();
   11.15 +      double cpu_ratio = 1.0 / os::initial_active_processor_count();
   11.16        double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio);
   11.17        double marking_task_overhead =
   11.18 -        overall_cm_overhead / marking_thread_num *
   11.19 -                                                (double) os::processor_count();
   11.20 +        overall_cm_overhead / marking_thread_num * os::initial_active_processor_count();
   11.21        double sleep_factor =
   11.22                           (1.0 - marking_task_overhead) / marking_task_overhead;
   11.23  
    12.1 --- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Thu Dec 22 15:55:08 2016 -0800
    12.2 +++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp	Tue Jan 17 09:21:05 2017 -0800
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -80,7 +80,7 @@
   12.11  
   12.12  // Determines how many mutator threads can process the buffers in parallel.
   12.13  uint DirtyCardQueueSet::num_par_ids() {
   12.14 -  return (uint)os::processor_count();
   12.15 +  return (uint)os::initial_active_processor_count();
   12.16  }
   12.17  
   12.18  void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
    13.1 --- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Thu Dec 22 15:55:08 2016 -0800
    13.2 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Tue Jan 17 09:21:05 2017 -0800
    13.3 @@ -452,9 +452,13 @@
    13.4    // event lock and do the read again in case some other thread had already
    13.5    // succeeded and done the resize.
    13.6    int cur_collection = Universe::heap()->total_collections();
    13.7 -  if (_last_LNC_resizing_collection[i] != cur_collection) {
    13.8 +  // Updated _last_LNC_resizing_collection[i] must not be visible before
    13.9 +  // _lowest_non_clean and friends are visible. Therefore use acquire/release
   13.10 +  // to guarantee this on non TSO architecures.
   13.11 +  if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
   13.12      MutexLocker x(ParGCRareEvent_lock);
   13.13 -    if (_last_LNC_resizing_collection[i] != cur_collection) {
   13.14 +    // This load_acquire is here for clarity only. The MutexLocker already fences.
   13.15 +    if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) {
   13.16        if (_lowest_non_clean[i] == NULL ||
   13.17            n_chunks != _lowest_non_clean_chunk_size[i]) {
   13.18  
   13.19 @@ -474,7 +478,8 @@
   13.20              _lowest_non_clean[i][j] = NULL;
   13.21          }
   13.22        }
   13.23 -      _last_LNC_resizing_collection[i] = cur_collection;
   13.24 +      // Make sure this gets visible only after _lowest_non_clean* was initialized
   13.25 +      OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection);
   13.26      }
   13.27    }
   13.28    // In any case, now do the initialization.
    14.1 --- a/src/share/vm/interpreter/rewriter.cpp	Thu Dec 22 15:55:08 2016 -0800
    14.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Tue Jan 17 09:21:05 2017 -0800
    14.3 @@ -396,10 +396,45 @@
    14.4            break;
    14.5          }
    14.6  
    14.7 +        case Bytecodes::_putstatic      :
    14.8 +        case Bytecodes::_putfield       : {
    14.9 +          if (!reverse) {
   14.10 +            // Check if any final field of the class given as parameter is modified
   14.11 +            // outside of initializer methods of the class. Fields that are modified
   14.12 +            // are marked with a flag. For marked fields, the compilers do not perform
   14.13 +            // constant folding (as the field can be changed after initialization).
   14.14 +            //
   14.15 +            // The check is performed after verification and only if verification has
   14.16 +            // succeeded. Therefore, the class is guaranteed to be well-formed.
   14.17 +            InstanceKlass* klass = method->method_holder();
   14.18 +            u2 bc_index = Bytes::get_Java_u2(bcp + prefix_length + 1);
   14.19 +            constantPoolHandle cp(method->constants());
   14.20 +            Symbol* ref_class_name = cp->klass_name_at(cp->klass_ref_index_at(bc_index));
   14.21 +
   14.22 +            if (klass->name() == ref_class_name) {
   14.23 +              Symbol* field_name = cp->name_ref_at(bc_index);
   14.24 +              Symbol* field_sig = cp->signature_ref_at(bc_index);
   14.25 +
   14.26 +              fieldDescriptor fd;
   14.27 +              if (klass->find_field(field_name, field_sig, &fd) != NULL) {
   14.28 +                if (fd.access_flags().is_final()) {
   14.29 +                  if (fd.access_flags().is_static()) {
   14.30 +                    if (!method->is_static_initializer()) {
   14.31 +                      fd.set_has_initialized_final_update(true);
   14.32 +                    }
   14.33 +                  } else {
   14.34 +                    if (!method->is_object_initializer()) {
   14.35 +                      fd.set_has_initialized_final_update(true);
   14.36 +                    }
   14.37 +                  }
   14.38 +                }
   14.39 +              }
   14.40 +            }
   14.41 +          }
   14.42 +        }
   14.43 +        // fall through
   14.44          case Bytecodes::_getstatic      : // fall through
   14.45 -        case Bytecodes::_putstatic      : // fall through
   14.46          case Bytecodes::_getfield       : // fall through
   14.47 -        case Bytecodes::_putfield       : // fall through
   14.48          case Bytecodes::_invokevirtual  : // fall through
   14.49          case Bytecodes::_invokestatic   :
   14.50          case Bytecodes::_invokeinterface:
    15.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp	Thu Dec 22 15:55:08 2016 -0800
    15.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp	Tue Jan 17 09:21:05 2017 -0800
    15.3 @@ -217,7 +217,7 @@
    15.4    CardArr* _lowest_non_clean;
    15.5    size_t*  _lowest_non_clean_chunk_size;
    15.6    uintptr_t* _lowest_non_clean_base_chunk_index;
    15.7 -  int* _last_LNC_resizing_collection;
    15.8 +  volatile int* _last_LNC_resizing_collection;
    15.9  
   15.10    // Initializes "lowest_non_clean" to point to the array for the region
   15.11    // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
    16.1 --- a/src/share/vm/oops/method.cpp	Thu Dec 22 15:55:08 2016 -0800
    16.2 +++ b/src/share/vm/oops/method.cpp	Tue Jan 17 09:21:05 2017 -0800
    16.3 @@ -590,7 +590,7 @@
    16.4  }
    16.5  
    16.6  bool Method::is_initializer() const {
    16.7 -  return name() == vmSymbols::object_initializer_name() || is_static_initializer();
    16.8 +  return is_object_initializer() || is_static_initializer();
    16.9  }
   16.10  
   16.11  bool Method::has_valid_initializer_flags() const {
   16.12 @@ -606,6 +606,9 @@
   16.13           has_valid_initializer_flags();
   16.14  }
   16.15  
   16.16 +bool Method::is_object_initializer() const {
   16.17 +   return name() == vmSymbols::object_initializer_name();
   16.18 +}
   16.19  
   16.20  objArrayHandle Method::resolved_checked_exceptions_impl(Method* this_oop, TRAPS) {
   16.21    int length = this_oop->checked_exceptions_length();
    17.1 --- a/src/share/vm/oops/method.hpp	Thu Dec 22 15:55:08 2016 -0800
    17.2 +++ b/src/share/vm/oops/method.hpp	Tue Jan 17 09:21:05 2017 -0800
    17.3 @@ -627,6 +627,9 @@
    17.4    // valid static initializer flags.
    17.5    bool is_static_initializer() const;
    17.6  
    17.7 +  // returns true if the method name is <init>
    17.8 +  bool is_object_initializer() const;
    17.9 +
   17.10    // compiled code support
   17.11    // NOTE: code() is inherently racy as deopt can be clearing code
   17.12    // simultaneously. Use with caution.
    18.1 --- a/src/share/vm/opto/callGenerator.cpp	Thu Dec 22 15:55:08 2016 -0800
    18.2 +++ b/src/share/vm/opto/callGenerator.cpp	Tue Jan 17 09:21:05 2017 -0800
    18.3 @@ -188,7 +188,10 @@
    18.4    // the call instruction will have a seemingly deficient out-count.
    18.5    // (The bailout says something misleading about an "infinite loop".)
    18.6    if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) {
    18.7 -    kit.inc_sp(method()->arg_size());  // restore arguments
    18.8 +    assert(Bytecodes::is_invoke(kit.java_bc()), err_msg("%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc())));
    18.9 +    ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
   18.10 +    int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc());
   18.11 +    kit.inc_sp(arg_size);  // restore arguments
   18.12      kit.uncommon_trap(Deoptimization::Reason_null_check,
   18.13                        Deoptimization::Action_none,
   18.14                        NULL, "null receiver");
   18.15 @@ -1119,7 +1122,10 @@
   18.16  JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
   18.17    GraphKit kit(jvms);
   18.18    // Take the trap with arguments pushed on the stack.  (Cf. null_check_receiver).
   18.19 -  int nargs = method()->arg_size();
   18.20 +  // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
   18.21 +  // Use callsite signature always.
   18.22 +  ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci());
   18.23 +  int nargs = declared_method->arg_size();
   18.24    kit.inc_sp(nargs);
   18.25    assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed");
   18.26    if (_reason == Deoptimization::Reason_class_check &&
    19.1 --- a/src/share/vm/opto/compile.cpp	Thu Dec 22 15:55:08 2016 -0800
    19.2 +++ b/src/share/vm/opto/compile.cpp	Tue Jan 17 09:21:05 2017 -0800
    19.3 @@ -1595,6 +1595,17 @@
    19.4    }
    19.5  }
    19.6  
    19.7 +BasicType Compile::AliasType::basic_type() const {
    19.8 +  if (element() != NULL) {
    19.9 +    const Type* element = adr_type()->is_aryptr()->elem();
   19.10 +    return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
   19.11 +  } if (field() != NULL) {
   19.12 +    return field()->layout_type();
   19.13 +  } else {
   19.14 +    return T_ILLEGAL; // unknown
   19.15 +  }
   19.16 +}
   19.17 +
   19.18  //---------------------------------print_on------------------------------------
   19.19  #ifndef PRODUCT
   19.20  void Compile::AliasType::print_on(outputStream* st) {
    20.1 --- a/src/share/vm/opto/compile.hpp	Thu Dec 22 15:55:08 2016 -0800
    20.2 +++ b/src/share/vm/opto/compile.hpp	Tue Jan 17 09:21:05 2017 -0800
    20.3 @@ -152,6 +152,8 @@
    20.4        _element = e;
    20.5      }
    20.6  
    20.7 +    BasicType basic_type() const;
    20.8 +
    20.9      void print_on(outputStream* st) PRODUCT_RETURN;
   20.10    };
   20.11  
    21.1 --- a/src/share/vm/opto/graphKit.cpp	Thu Dec 22 15:55:08 2016 -0800
    21.2 +++ b/src/share/vm/opto/graphKit.cpp	Tue Jan 17 09:21:05 2017 -0800
    21.3 @@ -1452,7 +1452,11 @@
    21.4  // factory methods in "int adr_idx"
    21.5  Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
    21.6                            int adr_idx,
    21.7 -                          MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) {
    21.8 +                          MemNode::MemOrd mo,
    21.9 +                          LoadNode::ControlDependency control_dependency,
   21.10 +                          bool require_atomic_access,
   21.11 +                          bool unaligned,
   21.12 +                          bool mismatched) {
   21.13    assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
   21.14    const TypePtr* adr_type = NULL; // debug-mode-only argument
   21.15    debug_only(adr_type = C->get_adr_type(adr_idx));
   21.16 @@ -1465,6 +1469,12 @@
   21.17    } else {
   21.18      ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency);
   21.19    }
   21.20 +  if (unaligned) {
   21.21 +    ld->as_Load()->set_unaligned_access();
   21.22 +  }
   21.23 +  if (mismatched) {
   21.24 +    ld->as_Load()->set_mismatched_access();
   21.25 +  }
   21.26    ld = _gvn.transform(ld);
   21.27    if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) {
   21.28      // Improve graph before escape analysis and boxing elimination.
   21.29 @@ -1476,7 +1486,9 @@
   21.30  Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt,
   21.31                                  int adr_idx,
   21.32                                  MemNode::MemOrd mo,
   21.33 -                                bool require_atomic_access) {
   21.34 +                                bool require_atomic_access,
   21.35 +                                bool unaligned,
   21.36 +                                bool mismatched) {
   21.37    assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
   21.38    const TypePtr* adr_type = NULL;
   21.39    debug_only(adr_type = C->get_adr_type(adr_idx));
   21.40 @@ -1489,6 +1501,12 @@
   21.41    } else {
   21.42      st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   21.43    }
   21.44 +  if (unaligned) {
   21.45 +    st->as_Store()->set_unaligned_access();
   21.46 +  }
   21.47 +  if (mismatched) {
   21.48 +    st->as_Store()->set_mismatched_access();
   21.49 +  }
   21.50    st = _gvn.transform(st);
   21.51    set_memory(st, adr_idx);
   21.52    // Back-to-back stores can only remove intermediate store with DU info
   21.53 @@ -1588,7 +1606,8 @@
   21.54                            const TypeOopPtr* val_type,
   21.55                            BasicType bt,
   21.56                            bool use_precise,
   21.57 -                          MemNode::MemOrd mo) {
   21.58 +                          MemNode::MemOrd mo,
   21.59 +                          bool mismatched) {
   21.60    // Transformation of a value which could be NULL pointer (CastPP #NULL)
   21.61    // could be delayed during Parse (for example, in adjust_map_after_if()).
   21.62    // Execute transformation here to avoid barrier generation in such case.
   21.63 @@ -1608,7 +1627,7 @@
   21.64                NULL /* pre_val */,
   21.65                bt);
   21.66  
   21.67 -  Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo);
   21.68 +  Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched);
   21.69    post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise);
   21.70    return store;
   21.71  }
   21.72 @@ -1620,7 +1639,8 @@
   21.73                               const TypePtr* adr_type,
   21.74                               Node* val,
   21.75                               BasicType bt,
   21.76 -                             MemNode::MemOrd mo) {
   21.77 +                             MemNode::MemOrd mo,
   21.78 +                             bool mismatched) {
   21.79    Compile::AliasType* at = C->alias_type(adr_type);
   21.80    const TypeOopPtr* val_type = NULL;
   21.81    if (adr_type->isa_instptr()) {
   21.82 @@ -1639,7 +1659,7 @@
   21.83    if (val_type == NULL) {
   21.84      val_type = TypeInstPtr::BOTTOM;
   21.85    }
   21.86 -  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo);
   21.87 +  return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched);
   21.88  }
   21.89  
   21.90  
    22.1 --- a/src/share/vm/opto/graphKit.hpp	Thu Dec 22 15:55:08 2016 -0800
    22.2 +++ b/src/share/vm/opto/graphKit.hpp	Tue Jan 17 09:21:05 2017 -0800
    22.3 @@ -517,23 +517,28 @@
    22.4    // of volatile fields.
    22.5    Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
    22.6                    MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
    22.7 -                  bool require_atomic_access = false) {
    22.8 +                  bool require_atomic_access = false, bool unaligned = false,
    22.9 +                  bool mismatched = false) {
   22.10      // This version computes alias_index from bottom_type
   22.11      return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
   22.12 -                     mo, control_dependency, require_atomic_access);
   22.13 +                     mo, control_dependency, require_atomic_access,
   22.14 +                     unaligned, mismatched);
   22.15    }
   22.16    Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type,
   22.17                    MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
   22.18 -                  bool require_atomic_access = false) {
   22.19 +                  bool require_atomic_access = false, bool unaligned = false,
   22.20 +                  bool mismatched = false) {
   22.21      // This version computes alias_index from an address type
   22.22      assert(adr_type != NULL, "use other make_load factory");
   22.23      return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
   22.24 -                     mo, control_dependency, require_atomic_access);
   22.25 +                     mo, control_dependency, require_atomic_access,
   22.26 +                     unaligned, mismatched);
   22.27    }
   22.28    // This is the base version which is given an alias index.
   22.29    Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx,
   22.30                    MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest,
   22.31 -                  bool require_atomic_access = false);
   22.32 +                  bool require_atomic_access = false, bool unaligned = false,
   22.33 +                  bool mismatched = false);
   22.34  
   22.35    // Create & transform a StoreNode and store the effect into the
   22.36    // parser's memory state.
   22.37 @@ -546,19 +551,24 @@
   22.38    Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
   22.39                          const TypePtr* adr_type,
   22.40                          MemNode::MemOrd mo,
   22.41 -                        bool require_atomic_access = false) {
   22.42 +                        bool require_atomic_access = false,
   22.43 +                        bool unaligned = false,
   22.44 +                        bool mismatched = false) {
   22.45      // This version computes alias_index from an address type
   22.46      assert(adr_type != NULL, "use other store_to_memory factory");
   22.47      return store_to_memory(ctl, adr, val, bt,
   22.48                             C->get_alias_index(adr_type),
   22.49 -                           mo, require_atomic_access);
   22.50 +                           mo, require_atomic_access,
   22.51 +                           unaligned, mismatched);
   22.52    }
   22.53    // This is the base version which is given alias index
   22.54    // Return the new StoreXNode
   22.55    Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
   22.56                          int adr_idx,
   22.57                          MemNode::MemOrd,
   22.58 -                        bool require_atomic_access = false);
   22.59 +                        bool require_atomic_access = false,
   22.60 +                        bool unaligned = false,
   22.61 +                        bool mismatched = false);
   22.62  
   22.63  
   22.64    // All in one pre-barrier, store, post_barrier
   22.65 @@ -581,7 +591,8 @@
   22.66                    const TypeOopPtr* val_type,
   22.67                    BasicType bt,
   22.68                    bool use_precise,
   22.69 -                  MemNode::MemOrd mo);
   22.70 +                  MemNode::MemOrd mo,
   22.71 +                  bool mismatched = false);
   22.72  
   22.73    Node* store_oop_to_object(Node* ctl,
   22.74                              Node* obj,   // containing obj
   22.75 @@ -612,7 +623,8 @@
   22.76                               const TypePtr* adr_type,
   22.77                               Node* val,
   22.78                               BasicType bt,
   22.79 -                             MemNode::MemOrd mo);
   22.80 +                             MemNode::MemOrd mo,
   22.81 +                             bool mismatched = false);
   22.82  
   22.83    // For the few case where the barriers need special help
   22.84    void pre_barrier(bool do_load, Node* ctl,
   22.85 @@ -656,7 +668,10 @@
   22.86    // callee (with all arguments still on the stack).
   22.87    Node* null_check_receiver_before_call(ciMethod* callee) {
   22.88      assert(!callee->is_static(), "must be a virtual method");
   22.89 -    const int nargs = callee->arg_size();
   22.90 +    // Callsite signature can be different from actual method being called (i.e _linkTo* sites).
   22.91 +    // Use callsite signature always.
   22.92 +    ciMethod* declared_method = method()->get_method_at_bci(bci());
   22.93 +    const int nargs = declared_method->arg_size();
   22.94      inc_sp(nargs);
   22.95      Node* n = null_check_receiver();
   22.96      dec_sp(nargs);
    23.1 --- a/src/share/vm/opto/idealKit.cpp	Thu Dec 22 15:55:08 2016 -0800
    23.2 +++ b/src/share/vm/opto/idealKit.cpp	Tue Jan 17 09:21:05 2017 -0800
    23.3 @@ -368,7 +368,8 @@
    23.4  
    23.5  Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
    23.6                        int adr_idx,
    23.7 -                      MemNode::MemOrd mo, bool require_atomic_access) {
    23.8 +                      MemNode::MemOrd mo, bool require_atomic_access,
    23.9 +                      bool mismatched) {
   23.10    assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory");
   23.11    const TypePtr* adr_type = NULL;
   23.12    debug_only(adr_type = C->get_adr_type(adr_idx));
   23.13 @@ -379,6 +380,9 @@
   23.14    } else {
   23.15      st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo);
   23.16    }
   23.17 +  if (mismatched) {
   23.18 +    st->as_Store()->set_mismatched_access();
   23.19 +  }
   23.20    st = transform(st);
   23.21    set_memory(st, adr_idx);
   23.22  
    24.1 --- a/src/share/vm/opto/idealKit.hpp	Thu Dec 22 15:55:08 2016 -0800
    24.2 +++ b/src/share/vm/opto/idealKit.hpp	Tue Jan 17 09:21:05 2017 -0800
    24.3 @@ -227,7 +227,9 @@
    24.4                BasicType bt,
    24.5                int adr_idx,
    24.6                MemNode::MemOrd mo,
    24.7 -              bool require_atomic_access = false);
    24.8 +              bool require_atomic_access = false,
    24.9 +              bool mismatched = false
   24.10 +              );
   24.11  
   24.12    // Store a card mark ordered after store_oop
   24.13    Node* storeCM(Node* ctl,
    25.1 --- a/src/share/vm/opto/library_call.cpp	Thu Dec 22 15:55:08 2016 -0800
    25.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Jan 17 09:21:05 2017 -0800
    25.3 @@ -230,7 +230,7 @@
    25.4    // Generates the guards that check whether the result of
    25.5    // Unsafe.getObject should be recorded in an SATB log buffer.
    25.6    void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar);
    25.7 -  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile);
    25.8 +  bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned);
    25.9    bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static);
   25.10    static bool klass_needs_init_guard(Node* kls);
   25.11    bool inline_unsafe_allocate();
   25.12 @@ -795,63 +795,63 @@
   25.13    case vmIntrinsics::_indexOf:                  return inline_string_indexOf();
   25.14    case vmIntrinsics::_equals:                   return inline_string_equals();
   25.15  
   25.16 -  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile);
   25.17 -  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile);
   25.18 -  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile);
   25.19 -  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile);
   25.20 -  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile);
   25.21 -  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile);
   25.22 -  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile);
   25.23 -  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
   25.24 -  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
   25.25 -
   25.26 -  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile);
   25.27 -  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile);
   25.28 -  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile);
   25.29 -  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile);
   25.30 -  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile);
   25.31 -  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile);
   25.32 -  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile);
   25.33 -  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
   25.34 -  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
   25.35 -
   25.36 -  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile);
   25.37 -  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile);
   25.38 -  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile);
   25.39 -  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile);
   25.40 -  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile);
   25.41 -  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile);
   25.42 -  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile);
   25.43 -  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile);
   25.44 -
   25.45 -  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile);
   25.46 -  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile);
   25.47 -  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile);
   25.48 -  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile);
   25.49 -  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile);
   25.50 -  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile);
   25.51 -  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile);
   25.52 -  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile);
   25.53 -
   25.54 -  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile);
   25.55 -  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile);
   25.56 -  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile);
   25.57 -  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile);
   25.58 -  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile);
   25.59 -  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile);
   25.60 -  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile);
   25.61 -  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile);
   25.62 -  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile);
   25.63 -
   25.64 -  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile);
   25.65 -  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile);
   25.66 -  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile);
   25.67 -  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile);
   25.68 -  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile);
   25.69 -  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile);
   25.70 -  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile);
   25.71 -  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile);
   25.72 -  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile);
   25.73 +  case vmIntrinsics::_getObject:                return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,  !is_volatile, false);
   25.74 +  case vmIntrinsics::_getBoolean:               return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false);
   25.75 +  case vmIntrinsics::_getByte:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
   25.76 +  case vmIntrinsics::_getShort:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
   25.77 +  case vmIntrinsics::_getChar:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
   25.78 +  case vmIntrinsics::_getInt:                   return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,     !is_volatile, false);
   25.79 +  case vmIntrinsics::_getLong:                  return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
   25.80 +  case vmIntrinsics::_getFloat:                 return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
   25.81 +  case vmIntrinsics::_getDouble:                return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
   25.82 +
   25.83 +  case vmIntrinsics::_putObject:                return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,  !is_volatile, false);
   25.84 +  case vmIntrinsics::_putBoolean:               return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN, !is_volatile, false);
   25.85 +  case vmIntrinsics::_putByte:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
   25.86 +  case vmIntrinsics::_putShort:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
   25.87 +  case vmIntrinsics::_putChar:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
   25.88 +  case vmIntrinsics::_putInt:                   return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,     !is_volatile, false);
   25.89 +  case vmIntrinsics::_putLong:                  return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
   25.90 +  case vmIntrinsics::_putFloat:                 return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
   25.91 +  case vmIntrinsics::_putDouble:                return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
   25.92 +
   25.93 +  case vmIntrinsics::_getByte_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE,    !is_volatile, false);
   25.94 +  case vmIntrinsics::_getShort_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT,   !is_volatile, false);
   25.95 +  case vmIntrinsics::_getChar_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR,    !is_volatile, false);
   25.96 +  case vmIntrinsics::_getInt_raw:               return inline_unsafe_access( is_native_ptr, !is_store, T_INT,     !is_volatile, false);
   25.97 +  case vmIntrinsics::_getLong_raw:              return inline_unsafe_access( is_native_ptr, !is_store, T_LONG,    !is_volatile, false);
   25.98 +  case vmIntrinsics::_getFloat_raw:             return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT,   !is_volatile, false);
   25.99 +  case vmIntrinsics::_getDouble_raw:            return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE,  !is_volatile, false);
  25.100 +  case vmIntrinsics::_getAddress_raw:           return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false);
  25.101 +
  25.102 +  case vmIntrinsics::_putByte_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_BYTE,    !is_volatile, false);
  25.103 +  case vmIntrinsics::_putShort_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_SHORT,   !is_volatile, false);
  25.104 +  case vmIntrinsics::_putChar_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_CHAR,    !is_volatile, false);
  25.105 +  case vmIntrinsics::_putInt_raw:               return inline_unsafe_access( is_native_ptr,  is_store, T_INT,     !is_volatile, false);
  25.106 +  case vmIntrinsics::_putLong_raw:              return inline_unsafe_access( is_native_ptr,  is_store, T_LONG,    !is_volatile, false);
  25.107 +  case vmIntrinsics::_putFloat_raw:             return inline_unsafe_access( is_native_ptr,  is_store, T_FLOAT,   !is_volatile, false);
  25.108 +  case vmIntrinsics::_putDouble_raw:            return inline_unsafe_access( is_native_ptr,  is_store, T_DOUBLE,  !is_volatile, false);
  25.109 +  case vmIntrinsics::_putAddress_raw:           return inline_unsafe_access( is_native_ptr,  is_store, T_ADDRESS, !is_volatile, false);
  25.110 +
  25.111 +  case vmIntrinsics::_getObjectVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT,   is_volatile, false);
  25.112 +  case vmIntrinsics::_getBooleanVolatile:       return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN,  is_volatile, false);
  25.113 +  case vmIntrinsics::_getByteVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE,     is_volatile, false);
  25.114 +  case vmIntrinsics::_getShortVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT,    is_volatile, false);
  25.115 +  case vmIntrinsics::_getCharVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR,     is_volatile, false);
  25.116 +  case vmIntrinsics::_getIntVolatile:           return inline_unsafe_access(!is_native_ptr, !is_store, T_INT,      is_volatile, false);
  25.117 +  case vmIntrinsics::_getLongVolatile:          return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG,     is_volatile, false);
  25.118 +  case vmIntrinsics::_getFloatVolatile:         return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT,    is_volatile, false);
  25.119 +  case vmIntrinsics::_getDoubleVolatile:        return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE,   is_volatile, false);
  25.120 +
  25.121 +  case vmIntrinsics::_putObjectVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_OBJECT,   is_volatile, false);
  25.122 +  case vmIntrinsics::_putBooleanVolatile:       return inline_unsafe_access(!is_native_ptr,  is_store, T_BOOLEAN,  is_volatile, false);
  25.123 +  case vmIntrinsics::_putByteVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_BYTE,     is_volatile, false);
  25.124 +  case vmIntrinsics::_putShortVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_SHORT,    is_volatile, false);
  25.125 +  case vmIntrinsics::_putCharVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_CHAR,     is_volatile, false);
  25.126 +  case vmIntrinsics::_putIntVolatile:           return inline_unsafe_access(!is_native_ptr,  is_store, T_INT,      is_volatile, false);
  25.127 +  case vmIntrinsics::_putLongVolatile:          return inline_unsafe_access(!is_native_ptr,  is_store, T_LONG,     is_volatile, false);
  25.128 +  case vmIntrinsics::_putFloatVolatile:         return inline_unsafe_access(!is_native_ptr,  is_store, T_FLOAT,    is_volatile, false);
  25.129 +  case vmIntrinsics::_putDoubleVolatile:        return inline_unsafe_access(!is_native_ptr,  is_store, T_DOUBLE,   is_volatile, false);
  25.130  
  25.131    case vmIntrinsics::_prefetchRead:             return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static);
  25.132    case vmIntrinsics::_prefetchWrite:            return inline_unsafe_prefetch(!is_native_ptr,  is_store, !is_static);
  25.133 @@ -2554,8 +2554,9 @@
  25.134    return NULL;
  25.135  }
  25.136  
  25.137 -bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) {
  25.138 +bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) {
  25.139    if (callee()->is_static())  return false;  // caller must have the capability!
  25.140 +  assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
  25.141  
  25.142  #ifndef PRODUCT
  25.143    {
  25.144 @@ -2629,16 +2630,45 @@
  25.145      val = is_store ? argument(3) : NULL;
  25.146    }
  25.147  
  25.148 +  // Can base be NULL? Otherwise, always on-heap access.
  25.149 +  bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop));
  25.150 +
  25.151    const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
  25.152  
  25.153 +  // Try to categorize the address.
  25.154 +  Compile::AliasType* alias_type = C->alias_type(adr_type);
  25.155 +  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
  25.156 +
  25.157 +  if (alias_type->adr_type() == TypeInstPtr::KLASS ||
  25.158 +      alias_type->adr_type() == TypeAryPtr::RANGE) {
  25.159 +    return false; // not supported
  25.160 +  }
  25.161 +
  25.162 +  bool mismatched = false;
  25.163 +  BasicType bt = alias_type->basic_type();
  25.164 +  if (bt != T_ILLEGAL) {
  25.165 +    assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access");
  25.166 +    if (bt == T_BYTE && adr_type->isa_aryptr()) {
  25.167 +      // Alias type doesn't differentiate between byte[] and boolean[]).
  25.168 +      // Use address type to get the element type.
  25.169 +      bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
  25.170 +    }
  25.171 +    if (bt == T_ARRAY || bt == T_NARROWOOP) {
  25.172 +      // accessing an array field with getObject is not a mismatch
  25.173 +      bt = T_OBJECT;
  25.174 +    }
  25.175 +    if ((bt == T_OBJECT) != (type == T_OBJECT)) {
  25.176 +      // Don't intrinsify mismatched object accesses
  25.177 +      return false;
  25.178 +    }
  25.179 +    mismatched = (bt != type);
  25.180 +  }
  25.181 +
  25.182 +  assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched");
  25.183 +
  25.184    // First guess at the value type.
  25.185    const Type *value_type = Type::get_const_basic_type(type);
  25.186  
  25.187 -  // Try to categorize the address.  If it comes up as TypeJavaPtr::BOTTOM,
  25.188 -  // there was not enough information to nail it down.
  25.189 -  Compile::AliasType* alias_type = C->alias_type(adr_type);
  25.190 -  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
  25.191 -
  25.192    // We will need memory barriers unless we can determine a unique
  25.193    // alias category for this reference.  (Note:  If for some reason
  25.194    // the barriers get omitted and the unsafe reference begins to "pollute"
  25.195 @@ -2701,7 +2731,7 @@
  25.196      MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered;
  25.197      // To be valid, unsafe loads may depend on other conditions than
  25.198      // the one that guards them: pin the Load node
  25.199 -    Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile);
  25.200 +    Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched);
  25.201      // load value
  25.202      switch (type) {
  25.203      case T_BOOLEAN:
  25.204 @@ -2747,12 +2777,12 @@
  25.205  
  25.206      MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered;
  25.207      if (type != T_OBJECT ) {
  25.208 -      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile);
  25.209 +      (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched);
  25.210      } else {
  25.211        // Possibly an oop being stored to Java heap or native memory
  25.212 -      if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) {
  25.213 +      if (!can_access_non_heap) {
  25.214          // oop to Java heap.
  25.215 -        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
  25.216 +        (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
  25.217        } else {
  25.218          // We can't tell at compile time if we are storing in the Java heap or outside
  25.219          // of it. So we need to emit code to conditionally do the proper type of
  25.220 @@ -2764,11 +2794,11 @@
  25.221          __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); {
  25.222            // Sync IdealKit and graphKit.
  25.223            sync_kit(ideal);
  25.224 -          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo);
  25.225 +          Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched);
  25.226            // Update IdealKit memory.
  25.227            __ sync_kit(this);
  25.228          } __ else_(); {
  25.229 -          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile);
  25.230 +          __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched);
  25.231          } __ end_if();
  25.232          // Final sync IdealKit and GraphKit.
  25.233          final_sync(ideal);
  25.234 @@ -2939,12 +2969,6 @@
  25.235      newval   = argument(4);  // type: oop, int, or long
  25.236    }
  25.237  
  25.238 -  // Null check receiver.
  25.239 -  receiver = null_check(receiver);
  25.240 -  if (stopped()) {
  25.241 -    return true;
  25.242 -  }
  25.243 -
  25.244    // Build field offset expression.
  25.245    // We currently rely on the cookies produced by Unsafe.xxxFieldOffset
  25.246    // to be plain byte offsets, which are also the same as those accepted
  25.247 @@ -2955,11 +2979,18 @@
  25.248    Node* adr = make_unsafe_address(base, offset);
  25.249    const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
  25.250  
  25.251 +  Compile::AliasType* alias_type = C->alias_type(adr_type);
  25.252 +  BasicType bt = alias_type->basic_type();
  25.253 +  if (bt != T_ILLEGAL &&
  25.254 +      ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
  25.255 +    // Don't intrinsify mismatched object accesses.
  25.256 +    return false;
  25.257 +  }
  25.258 +
  25.259    // For CAS, unlike inline_unsafe_access, there seems no point in
  25.260    // trying to refine types. Just use the coarse types here.
  25.261 +  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
  25.262    const Type *value_type = Type::get_const_basic_type(type);
  25.263 -  Compile::AliasType* alias_type = C->alias_type(adr_type);
  25.264 -  assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
  25.265  
  25.266    if (kind == LS_xchg && type == T_OBJECT) {
  25.267      const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type);
  25.268 @@ -2968,6 +2999,12 @@
  25.269      }
  25.270    }
  25.271  
  25.272 +  // Null check receiver.
  25.273 +  receiver = null_check(receiver);
  25.274 +  if (stopped()) {
  25.275 +    return true;
  25.276 +  }
  25.277 +
  25.278    int alias_idx = C->get_alias_index(adr_type);
  25.279  
  25.280    // Memory-model-wise, a LoadStore acts like a little synchronized
    26.1 --- a/src/share/vm/opto/memnode.cpp	Thu Dec 22 15:55:08 2016 -0800
    26.2 +++ b/src/share/vm/opto/memnode.cpp	Tue Jan 17 09:21:05 2017 -0800
    26.3 @@ -67,8 +67,15 @@
    26.4    dump_adr_type(this, _adr_type, st);
    26.5  
    26.6    Compile* C = Compile::current();
    26.7 -  if( C->alias_type(_adr_type)->is_volatile() )
    26.8 +  if (C->alias_type(_adr_type)->is_volatile()) {
    26.9      st->print(" Volatile!");
   26.10 +  }
   26.11 +  if (_unaligned_access) {
   26.12 +    st->print(" unaligned");
   26.13 +  }
   26.14 +  if (_mismatched_access) {
   26.15 +    st->print(" mismatched");
   26.16 +  }
   26.17  }
   26.18  
   26.19  void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) {
   26.20 @@ -3322,6 +3329,9 @@
   26.21  // within the initialized memory.
   26.22  intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
   26.23    const int FAIL = 0;
   26.24 +  if (st->is_unaligned_access()) {
   26.25 +    return FAIL;
   26.26 +  }
   26.27    if (st->req() != MemNode::ValueIn + 1)
   26.28      return FAIL;                // an inscrutable StoreNode (card mark?)
   26.29    Node* ctl = st->in(MemNode::Control);
    27.1 --- a/src/share/vm/opto/memnode.hpp	Thu Dec 22 15:55:08 2016 -0800
    27.2 +++ b/src/share/vm/opto/memnode.hpp	Tue Jan 17 09:21:05 2017 -0800
    27.3 @@ -39,11 +39,14 @@
    27.4  //------------------------------MemNode----------------------------------------
    27.5  // Load or Store, possibly throwing a NULL pointer exception
    27.6  class MemNode : public Node {
    27.7 +private:
    27.8 +  bool _unaligned_access; // Unaligned access from unsafe
    27.9 +  bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
   27.10  protected:
   27.11  #ifdef ASSERT
   27.12    const TypePtr* _adr_type;     // What kind of memory is being addressed?
   27.13  #endif
   27.14 -  virtual uint size_of() const; // Size is bigger (ASSERT only)
   27.15 +  virtual uint size_of() const;
   27.16  public:
   27.17    enum { Control,               // When is it safe to do this load?
   27.18           Memory,                // Chunk of memory is being loaded from
   27.19 @@ -57,17 +60,17 @@
   27.20    } MemOrd;
   27.21  protected:
   27.22    MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
   27.23 -    : Node(c0,c1,c2   ) {
   27.24 +    : Node(c0,c1,c2   ), _unaligned_access(false), _mismatched_access(false) {
   27.25      init_class_id(Class_Mem);
   27.26      debug_only(_adr_type=at; adr_type();)
   27.27    }
   27.28    MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
   27.29 -    : Node(c0,c1,c2,c3) {
   27.30 +    : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
   27.31      init_class_id(Class_Mem);
   27.32      debug_only(_adr_type=at; adr_type();)
   27.33    }
   27.34    MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
   27.35 -    : Node(c0,c1,c2,c3,c4) {
   27.36 +    : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
   27.37      init_class_id(Class_Mem);
   27.38      debug_only(_adr_type=at; adr_type();)
   27.39    }
   27.40 @@ -129,6 +132,11 @@
   27.41    // the given memory state?  (The state may or may not be in(Memory).)
   27.42    Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
   27.43  
   27.44 +  void set_unaligned_access() { _unaligned_access = true; }
   27.45 +  bool is_unaligned_access() const { return _unaligned_access; }
   27.46 +  void set_mismatched_access() { _mismatched_access = true; }
   27.47 +  bool is_mismatched_access() const { return _mismatched_access; }
   27.48 +
   27.49  #ifndef PRODUCT
   27.50    static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
   27.51    virtual void dump_spec(outputStream *st) const;
    28.1 --- a/src/share/vm/opto/type.hpp	Thu Dec 22 15:55:08 2016 -0800
    28.2 +++ b/src/share/vm/opto/type.hpp	Tue Jan 17 09:21:05 2017 -0800
    28.3 @@ -209,11 +209,11 @@
    28.4    static int cmp( const Type *const t1, const Type *const t2 );
    28.5    // Test for higher or equal in lattice
    28.6    // Variant that drops the speculative part of the types
    28.7 -  int higher_equal(const Type *t) const {
    28.8 +  bool higher_equal(const Type *t) const {
    28.9      return !cmp(meet(t),t->remove_speculative());
   28.10    }
   28.11    // Variant that keeps the speculative part of the types
   28.12 -  int higher_equal_speculative(const Type *t) const {
   28.13 +  bool higher_equal_speculative(const Type *t) const {
   28.14      return !cmp(meet_speculative(t),t);
   28.15    }
   28.16  
    29.1 --- a/src/share/vm/runtime/fieldDescriptor.hpp	Thu Dec 22 15:55:08 2016 -0800
    29.2 +++ b/src/share/vm/runtime/fieldDescriptor.hpp	Tue Jan 17 09:21:05 2017 -0800
    29.3 @@ -106,6 +106,7 @@
    29.4    bool is_field_access_watched()  const    { return access_flags().is_field_access_watched(); }
    29.5    bool is_field_modification_watched() const
    29.6                                             { return access_flags().is_field_modification_watched(); }
    29.7 +  bool has_initialized_final_update() const { return access_flags().has_field_initialized_final_update(); }
    29.8    bool has_generic_signature()    const    { return access_flags().field_has_generic_signature(); }
    29.9  
   29.10    void set_is_field_access_watched(const bool value) {
   29.11 @@ -118,6 +119,11 @@
   29.12      update_klass_field_access_flag();
   29.13    }
   29.14  
   29.15 +  void set_has_initialized_final_update(const bool value) {
   29.16 +    _access_flags.set_has_field_initialized_final_update(value);
   29.17 +    update_klass_field_access_flag();
   29.18 +  }
   29.19 +
   29.20    // Initialization
   29.21    void reinitialize(InstanceKlass* ik, int index);
   29.22  
    30.1 --- a/src/share/vm/runtime/os.cpp	Thu Dec 22 15:55:08 2016 -0800
    30.2 +++ b/src/share/vm/runtime/os.cpp	Tue Jan 17 09:21:05 2017 -0800
    30.3 @@ -1,5 +1,5 @@
    30.4  /*
    30.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    30.6 + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
    30.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.8   *
    30.9   * This code is free software; you can redistribute it and/or modify it
   30.10 @@ -78,6 +78,7 @@
   30.11  uintptr_t         os::_serialize_page_mask = 0;
   30.12  long              os::_rand_seed          = 1;
   30.13  int               os::_processor_count    = 0;
   30.14 +int               os::_initial_active_processor_count = 0;
   30.15  size_t            os::_page_sizes[os::page_sizes_max];
   30.16  
   30.17  #ifndef PRODUCT
   30.18 @@ -322,6 +323,7 @@
   30.19  }
   30.20  
   30.21  void os::init_before_ergo() {
   30.22 +  initialize_initial_active_processor_count();
   30.23    // We need to initialize large page support here because ergonomics takes some
   30.24    // decisions depending on large page support and the calculated large page size.
   30.25    large_page_init();
   30.26 @@ -835,7 +837,11 @@
   30.27    st->print("CPU:");
   30.28    st->print("total %d", os::processor_count());
   30.29    // It's not safe to query number of active processors after crash
   30.30 -  // st->print("(active %d)", os::active_processor_count());
   30.31 +  // st->print("(active %d)", os::active_processor_count()); but we can
   30.32 +  // print the initial number of active processors.
   30.33 +  // We access the raw value here because the assert in the accessor will
   30.34 +  // fail if the crash occurs before initialization of this value.
   30.35 +  st->print(" (initial active %d)", _initial_active_processor_count);
   30.36    st->print(" %s", VM_Version::cpu_features());
   30.37    st->cr();
   30.38    pd_print_cpu_info(st);
   30.39 @@ -1418,6 +1424,11 @@
   30.40    return result;
   30.41  }
   30.42  
   30.43 +void os::initialize_initial_active_processor_count() {
   30.44 +  assert(_initial_active_processor_count == 0, "Initial active processor count already set.");
   30.45 +  _initial_active_processor_count = active_processor_count();
   30.46 +}
   30.47 +
   30.48  void os::SuspendedThreadTask::run() {
   30.49    assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
   30.50    internal_do_task();
    31.1 --- a/src/share/vm/runtime/os.hpp	Thu Dec 22 15:55:08 2016 -0800
    31.2 +++ b/src/share/vm/runtime/os.hpp	Tue Jan 17 09:21:05 2017 -0800
    31.3 @@ -151,6 +151,7 @@
    31.4  
    31.5    static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned);
    31.6  
    31.7 +  static void initialize_initial_active_processor_count();
    31.8   public:
    31.9    static void init(void);                      // Called before command line parsing
   31.10    static void init_before_ergo(void);          // Called after command line parsing
   31.11 @@ -238,6 +239,13 @@
   31.12    // Note that on some OSes this can change dynamically.
   31.13    static int active_processor_count();
   31.14  
   31.15 +  // At startup the number of active CPUs this process is allowed to run on.
   31.16 +  // This value does not change dynamically. May be different from active_processor_count().
   31.17 +  static int initial_active_processor_count() {
   31.18 +    assert(_initial_active_processor_count > 0, "Initial active processor count not set yet.");
   31.19 +    return _initial_active_processor_count;
   31.20 +  }
   31.21 +
   31.22    // Bind processes to processors.
   31.23    //     This is a two step procedure:
   31.24    //     first you generate a distribution of processes to processors,
   31.25 @@ -975,8 +983,9 @@
   31.26  
   31.27  
   31.28   protected:
   31.29 -  static long _rand_seed;                   // seed for random number generator
   31.30 -  static int _processor_count;              // number of processors
   31.31 +  static long _rand_seed;                     // seed for random number generator
   31.32 +  static int _processor_count;                // number of processors
   31.33 +  static int _initial_active_processor_count; // number of active processors during initialization.
   31.34  
   31.35    static char* format_boot_path(const char* format_string,
   31.36                                  const char* home,
    32.1 --- a/src/share/vm/runtime/vm_version.cpp	Thu Dec 22 15:55:08 2016 -0800
    32.2 +++ b/src/share/vm/runtime/vm_version.cpp	Tue Jan 17 09:21:05 2017 -0800
    32.3 @@ -1,5 +1,5 @@
    32.4  /*
    32.5 - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
    32.6 + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
    32.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.8   *
    32.9   * This code is free software; you can redistribute it and/or modify it
   32.10 @@ -296,7 +296,7 @@
   32.11      // processor after the first 8.  For example, on a 72 cpu machine
   32.12      // and a chosen fraction of 5/8
   32.13      // use 8 + (72 - 8) * (5/8) == 48 worker threads.
   32.14 -    unsigned int ncpus = (unsigned int) os::active_processor_count();
   32.15 +    unsigned int ncpus = (unsigned int) os::initial_active_processor_count();
   32.16      return (ncpus <= switch_pt) ?
   32.17             ncpus :
   32.18            (switch_pt + ((ncpus - switch_pt) * num) / den);
    33.1 --- a/src/share/vm/services/attachListener.cpp	Thu Dec 22 15:55:08 2016 -0800
    33.2 +++ b/src/share/vm/services/attachListener.cpp	Tue Jan 17 09:21:05 2017 -0800
    33.3 @@ -1,5 +1,5 @@
    33.4  /*
    33.5 - * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
    33.6 + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
    33.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.8   *
    33.9   * This code is free software; you can redistribute it and/or modify it
   33.10 @@ -271,13 +271,17 @@
   33.11  // set a uintx global flag using value from AttachOperation
   33.12  static jint set_uintx_flag(const char* name, AttachOperation* op, outputStream* out) {
   33.13    uintx value;
   33.14 -  const char* arg1;
   33.15 -  if ((arg1 = op->arg(1)) != NULL) {
   33.16 -    int n = sscanf(arg1, UINTX_FORMAT, &value);
   33.17 -    if (n != 1) {
   33.18 -      out->print_cr("flag value must be an unsigned integer");
   33.19 -      return JNI_ERR;
   33.20 -    }
   33.21 +
   33.22 +  const char* arg1 = op->arg(1);
   33.23 +  if (arg1 == NULL) {
   33.24 +    out->print_cr("flag value must be specified");
   33.25 +    return JNI_ERR;
   33.26 +  }
   33.27 +
   33.28 +  int n = sscanf(arg1, UINTX_FORMAT, &value);
   33.29 +  if (n != 1) {
   33.30 +    out->print_cr("flag value must be an unsigned integer");
   33.31 +    return JNI_ERR;
   33.32    }
   33.33  
   33.34    if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
    34.1 --- a/src/share/vm/utilities/accessFlags.hpp	Thu Dec 22 15:55:08 2016 -0800
    34.2 +++ b/src/share/vm/utilities/accessFlags.hpp	Tue Jan 17 09:21:05 2017 -0800
    34.3 @@ -76,11 +76,12 @@
    34.4    // These bits must not conflict with any other field-related access flags
    34.5    // (e.g., ACC_ENUM).
    34.6    // Note that the class-related ACC_ANNOTATION bit conflicts with these flags.
    34.7 -  JVM_ACC_FIELD_ACCESS_WATCHED       = 0x00002000,  // field access is watched by JVMTI
    34.8 -  JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000,  // field modification is watched by JVMTI
    34.9 -  JVM_ACC_FIELD_INTERNAL             = 0x00000400,  // internal field, same as JVM_ACC_ABSTRACT
   34.10 -  JVM_ACC_FIELD_STABLE               = 0x00000020,  // @Stable field, same as JVM_ACC_SYNCHRONIZED
   34.11 -  JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature
   34.12 +  JVM_ACC_FIELD_ACCESS_WATCHED            = 0x00002000, // field access is watched by JVMTI
   34.13 +  JVM_ACC_FIELD_MODIFICATION_WATCHED      = 0x00008000, // field modification is watched by JVMTI
   34.14 +  JVM_ACC_FIELD_INTERNAL                  = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT
   34.15 +  JVM_ACC_FIELD_STABLE                    = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED and JVM_ACC_SUPER
   34.16 +  JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE  = 0x00000100, // (static) final field updated outside (class) initializer, same as JVM_ACC_NATIVE
   34.17 +  JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE     = 0x00000800, // field has generic signature
   34.18  
   34.19    JVM_ACC_FIELD_INTERNAL_FLAGS       = JVM_ACC_FIELD_ACCESS_WATCHED |
   34.20                                         JVM_ACC_FIELD_MODIFICATION_WATCHED |
   34.21 @@ -150,6 +151,8 @@
   34.22    bool is_field_access_watched() const  { return (_flags & JVM_ACC_FIELD_ACCESS_WATCHED) != 0; }
   34.23    bool is_field_modification_watched() const
   34.24                                          { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; }
   34.25 +  bool has_field_initialized_final_update() const
   34.26 +                                        { return (_flags & JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE) != 0; }
   34.27    bool on_stack() const                 { return (_flags & JVM_ACC_ON_STACK) != 0; }
   34.28    bool is_internal() const              { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; }
   34.29    bool is_stable() const                { return (_flags & JVM_ACC_FIELD_STABLE) != 0; }
   34.30 @@ -229,6 +232,15 @@
   34.31                                             atomic_clear_bits(JVM_ACC_FIELD_MODIFICATION_WATCHED);
   34.32                                           }
   34.33                                         }
   34.34 +
   34.35 +  void set_has_field_initialized_final_update(const bool value) {
   34.36 +    if (value) {
   34.37 +      atomic_set_bits(JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE);
   34.38 +    } else {
   34.39 +      atomic_clear_bits(JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE);
   34.40 +    }
   34.41 +  }
   34.42 +
   34.43    void set_field_has_generic_signature()
   34.44                                         {
   34.45                                           atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
    35.1 --- a/test/compiler/jsr292/NullConstantReceiver.java	Thu Dec 22 15:55:08 2016 -0800
    35.2 +++ b/test/compiler/jsr292/NullConstantReceiver.java	Tue Jan 17 09:21:05 2017 -0800
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    35.6 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -23,8 +23,11 @@
   35.11  
   35.12  /**
   35.13   * @test
   35.14 - * @bug 8059556
   35.15 + * @bug 8059556 8158639 8164508
   35.16 + *
   35.17   * @run main/othervm -Xbatch NullConstantReceiver
   35.18 + * @run main/othervm -Xbatch -XX:CompileCommand=exclude,*::run NullConstantReceiver
   35.19 + * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,*::run NullConstantReceiver
   35.20   */
   35.21  
   35.22  import java.lang.invoke.MethodHandle;
    36.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    36.2 +++ b/test/compiler/profiling/UnsafeAccess.java	Tue Jan 17 09:21:05 2017 -0800
    36.3 @@ -0,0 +1,88 @@
    36.4 +/*
    36.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
    36.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    36.7 + *
    36.8 + * This code is free software; you can redistribute it and/or modify it
    36.9 + * under the terms of the GNU General Public License version 2 only, as
   36.10 + * published by the Free Software Foundation.
   36.11 + *
   36.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   36.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   36.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   36.15 + * version 2 for more details (a copy is included in the LICENSE file that
   36.16 + * accompanied this code).
   36.17 + *
   36.18 + * You should have received a copy of the GNU General Public License version
   36.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   36.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   36.21 + *
   36.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   36.23 + * or visit www.oracle.com if you need additional information or have any
   36.24 + * questions.
   36.25 + */
   36.26 +/*
   36.27 + * @test
   36.28 + * @bug 8134918
   36.29 + * @modules java.base/jdk.internal.misc
   36.30 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation -Xbatch
   36.31 + *                         -XX:CompileCommand=dontinline,UnsafeAccess::test*
   36.32 + *                         UnsafeAccess
   36.33 + */
   36.34 +import sun.misc.Unsafe;
   36.35 +
   36.36 +public class UnsafeAccess {
   36.37 +    private static final Unsafe U = Unsafe.getUnsafe();
   36.38 +
   36.39 +    static Class cls = Object.class;
   36.40 +    static long off = U.ARRAY_OBJECT_BASE_OFFSET;
   36.41 +
   36.42 +    static Object testUnsafeAccess(Object o, boolean isObjArray) {
   36.43 +        if (o != null && cls.isInstance(o)) { // speculates "o" type to int[]
   36.44 +            return helperUnsafeAccess(o, isObjArray);
   36.45 +        }
   36.46 +        return null;
   36.47 +    }
   36.48 +
   36.49 +    static Object helperUnsafeAccess(Object o, boolean isObjArray) {
   36.50 +        if (isObjArray) {
   36.51 +            U.putObject(o, off, new Object());
   36.52 +        }
   36.53 +        return o;
   36.54 +    }
   36.55 +
   36.56 +    static Object testUnsafeLoadStore(Object o, boolean isObjArray) {
   36.57 +        if (o != null && cls.isInstance(o)) { // speculates "o" type to int[]
   36.58 +            return helperUnsafeLoadStore(o, isObjArray);
   36.59 +        }
   36.60 +        return null;
   36.61 +    }
   36.62 +
   36.63 +    static Object helperUnsafeLoadStore(Object o, boolean isObjArray) {
   36.64 +        if (isObjArray) {
   36.65 +            Object o1 = U.getObject(o, off);
   36.66 +            U.compareAndSwapObject(o, off, o1, new Object());
   36.67 +        }
   36.68 +        return o;
   36.69 +    }
   36.70 +
   36.71 +    public static void main(String[] args) {
   36.72 +        Object[] objArray = new Object[10];
   36.73 +        int[]    intArray = new    int[10];
   36.74 +
   36.75 +        for (int i = 0; i < 20_000; i++) {
   36.76 +            helperUnsafeAccess(objArray, true);
   36.77 +        }
   36.78 +        for (int i = 0; i < 20_000; i++) {
   36.79 +            testUnsafeAccess(intArray, false);
   36.80 +        }
   36.81 +
   36.82 +        for (int i = 0; i < 20_000; i++) {
   36.83 +            helperUnsafeLoadStore(objArray, true);
   36.84 +        }
   36.85 +        for (int i = 0; i < 20_000; i++) {
   36.86 +            testUnsafeLoadStore(intArray, false);
   36.87 +        }
   36.88 +
   36.89 +        System.out.println("TEST PASSED");
   36.90 +    }
   36.91 +}
    37.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    37.2 +++ b/test/compiler/unsafe/OpaqueAccesses.java	Tue Jan 17 09:21:05 2017 -0800
    37.3 @@ -0,0 +1,181 @@
    37.4 +/*
    37.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
    37.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.7 + *
    37.8 + * This code is free software; you can redistribute it and/or modify it
    37.9 + * under the terms of the GNU General Public License version 2 only, as
   37.10 + * published by the Free Software Foundation.  Oracle designates this
   37.11 + * particular file as subject to the "Classpath" exception as provided
   37.12 + * by Oracle in the LICENSE file that accompanied this code.
   37.13 + *
   37.14 + * This code is distributed in the hope that it will be useful, but WITHOUT
   37.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   37.16 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   37.17 + * version 2 for more details (a copy is included in the LICENSE file that
   37.18 + * accompanied this code).
   37.19 + *
   37.20 + * You should have received a copy of the GNU General Public License version
   37.21 + * 2 along with this work; if not, write to the Free Software Foundation,
   37.22 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   37.23 + *
   37.24 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   37.25 + * or visit www.oracle.com if you need additional information or have any
   37.26 + * questions.
   37.27 + */
   37.28 +
   37.29 +/*
   37.30 + * @test
   37.31 + * @bug 8155781
   37.32 + * @modules java.base/jdk.internal.misc
   37.33 + *
   37.34 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
   37.35 + *                                 -XX:-TieredCompilation -Xbatch
   37.36 + *                                 -XX:+UseCompressedOops -XX:+UseCompressedClassPointers
   37.37 + *                                 -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test*
   37.38 + *                                 compiler.unsafe.OpaqueAccesses
   37.39 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
   37.40 + *                                 -XX:-TieredCompilation -Xbatch
   37.41 + *                                 -XX:+UseCompressedOops -XX:-UseCompressedClassPointers
   37.42 + *                                 -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test*
   37.43 + *                                 compiler.unsafe.OpaqueAccesses
   37.44 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
   37.45 + *                                 -XX:-TieredCompilation -Xbatch
   37.46 + *                                 -XX:-UseCompressedOops -XX:+UseCompressedClassPointers
   37.47 + *                                 -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test*
   37.48 + *                                 compiler.unsafe.OpaqueAccesses
   37.49 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions
   37.50 + *                                 -XX:-TieredCompilation -Xbatch
   37.51 + *                                 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers
   37.52 + *                                 -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test*
   37.53 + *                                 compiler.unsafe.OpaqueAccesses
   37.54 + */
   37.55 +package compiler.unsafe;
   37.56 +
   37.57 +import sun.misc.Unsafe;
   37.58 +
   37.59 +import java.lang.reflect.Field;
   37.60 +
   37.61 +public class OpaqueAccesses {
   37.62 +    private static final Unsafe UNSAFE = Unsafe.getUnsafe();
   37.63 +
   37.64 +    private static final Object INSTANCE = new OpaqueAccesses();
   37.65 +
   37.66 +    private static final Object[] ARRAY = new Object[10];
   37.67 +
   37.68 +    private static final long F_OFFSET;
   37.69 +    private static final long E_OFFSET;
   37.70 +
   37.71 +    static {
   37.72 +        try {
   37.73 +            Field field = OpaqueAccesses.class.getDeclaredField("f");
   37.74 +            F_OFFSET = UNSAFE.objectFieldOffset(field);
   37.75 +
   37.76 +            E_OFFSET = UNSAFE.arrayBaseOffset(ARRAY.getClass());
   37.77 +        } catch (NoSuchFieldException e) {
   37.78 +            throw new Error(e);
   37.79 +        }
   37.80 +    }
   37.81 +
   37.82 +    private Object f = new Object();
   37.83 +    private long l1, l2;
   37.84 +
   37.85 +    static Object testFixedOffsetField(Object o) {
   37.86 +        return UNSAFE.getObject(o, F_OFFSET);
   37.87 +    }
   37.88 +
   37.89 +    static int testFixedOffsetHeader0(Object o) {
   37.90 +        return UNSAFE.getInt(o, 0);
   37.91 +    }
   37.92 +
   37.93 +    static int testFixedOffsetHeader4(Object o) {
   37.94 +        return UNSAFE.getInt(o, 4);
   37.95 +    }
   37.96 +
   37.97 +    static int testFixedOffsetHeader8(Object o) {
   37.98 +        return UNSAFE.getInt(o, 8);
   37.99 +    }
  37.100 +
  37.101 +    static int testFixedOffsetHeader12(Object o) {
  37.102 +        return UNSAFE.getInt(o, 12);
  37.103 +    }
  37.104 +
  37.105 +    static int testFixedOffsetHeader16(Object o) {
  37.106 +        return UNSAFE.getInt(o, 16);
  37.107 +    }
  37.108 +
  37.109 +    static Object testFixedBase(long off) {
  37.110 +        return UNSAFE.getObject(INSTANCE, off);
  37.111 +    }
  37.112 +
  37.113 +    static Object testOpaque(Object o, long off) {
  37.114 +        return UNSAFE.getObject(o, off);
  37.115 +    }
  37.116 +
  37.117 +    static int testFixedOffsetHeaderArray0(Object[] arr) {
  37.118 +        return UNSAFE.getInt(arr, 0);
  37.119 +    }
  37.120 +
  37.121 +    static int testFixedOffsetHeaderArray4(Object[] arr) {
  37.122 +        return UNSAFE.getInt(arr, 4);
  37.123 +    }
  37.124 +
  37.125 +    static int testFixedOffsetHeaderArray8(Object[] arr) {
  37.126 +        return UNSAFE.getInt(arr, 8);
  37.127 +    }
  37.128 +
  37.129 +    static int testFixedOffsetHeaderArray12(Object[] arr) {
  37.130 +        return UNSAFE.getInt(arr, 12);
  37.131 +    }
  37.132 +
  37.133 +    static int testFixedOffsetHeaderArray16(Object[] arr) {
  37.134 +        return UNSAFE.getInt(arr, 16);
  37.135 +    }
  37.136 +
  37.137 +    static Object testFixedOffsetArray(Object[] arr) {
  37.138 +        return UNSAFE.getObject(arr, E_OFFSET);
  37.139 +    }
  37.140 +
  37.141 +    static Object testFixedBaseArray(long off) {
  37.142 +        return UNSAFE.getObject(ARRAY, off);
  37.143 +    }
  37.144 +
  37.145 +    static Object testOpaqueArray(Object[] o, long off) {
  37.146 +        return UNSAFE.getObject(o, off);
  37.147 +    }
  37.148 +
  37.149 +    static final long ADDR = UNSAFE.allocateMemory(10);
  37.150 +    static boolean flag;
  37.151 +
  37.152 +    static int testMixedAccess() {
  37.153 +        flag = !flag;
  37.154 +        Object o = (flag ? INSTANCE : null);
  37.155 +        long off = (flag ? F_OFFSET : ADDR);
  37.156 +        return UNSAFE.getInt(o, off);
  37.157 +    }
  37.158 +
  37.159 +    public static void main(String[] args) {
  37.160 +        for (int i = 0; i < 20_000; i++) {
  37.161 +            // Instance
  37.162 +            testFixedOffsetField(INSTANCE);
  37.163 +            testFixedOffsetHeader0(INSTANCE);
  37.164 +            testFixedOffsetHeader4(INSTANCE);
  37.165 +            testFixedOffsetHeader8(INSTANCE);
  37.166 +            testFixedOffsetHeader12(INSTANCE);
  37.167 +            testFixedOffsetHeader16(INSTANCE);
  37.168 +            testFixedBase(F_OFFSET);
  37.169 +            testOpaque(INSTANCE, F_OFFSET);
  37.170 +            testMixedAccess();
  37.171 +
  37.172 +            // Array
  37.173 +            testFixedOffsetHeaderArray0(ARRAY);
  37.174 +            testFixedOffsetHeaderArray4(ARRAY);
  37.175 +            testFixedOffsetHeaderArray8(ARRAY);
  37.176 +            testFixedOffsetHeaderArray12(ARRAY);
  37.177 +            testFixedOffsetHeaderArray16(ARRAY);
  37.178 +            testFixedOffsetArray(ARRAY);
  37.179 +            testFixedBaseArray(E_OFFSET);
  37.180 +            testOpaqueArray(ARRAY, E_OFFSET);
  37.181 +        }
  37.182 +        System.out.println("TEST PASSED");
  37.183 +    }
  37.184 +}
    38.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    38.2 +++ b/test/runtime/Final/Bad.jasm	Tue Jan 17 09:21:05 2017 -0800
    38.3 @@ -0,0 +1,55 @@
    38.4 +/*
    38.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
    38.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.7 + *
    38.8 + * This code is free software; you can redistribute it and/or modify it
    38.9 + * under the terms of the GNU General Public License version 2 only, as
   38.10 + * published by the Free Software Foundation.
   38.11 + *
   38.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   38.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   38.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   38.15 + * version 2 for more details (a copy is included in the LICENSE file that
   38.16 + * accompanied this code).
   38.17 + *
   38.18 + * You should have received a copy of the GNU General Public License version
   38.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   38.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   38.21 + *
   38.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   38.23 + * or visit www.oracle.com if you need additional information or have any
   38.24 + * questions.
   38.25 + */
   38.26 +
   38.27 +/* Recoded in jasm to provoke an ICCE assigning a non-static final field with putstatic.
   38.28 +class Bad {
   38.29 +  public static final int i; //rewritten
   38.30 +  //rewritten to: public final int i;
   38.31 +  static { i = 5; } // putstatic instruction
   38.32 +}
   38.33 +*/
   38.34 +
   38.35 +super class Bad
   38.36 +	version 53:0
   38.37 +{
   38.38 +
   38.39 +// Remove 'static' keyword
   38.40 +public final Field i:I;
   38.41 +
   38.42 +Method "<init>":"()V"
   38.43 +	stack 1 locals 1
   38.44 +{
   38.45 +		aload_0;
   38.46 +		invokespecial	Method java/lang/Object."<init>":"()V";
   38.47 +		return;
   38.48 +}
   38.49 +
   38.50 +static Method "<clinit>":"()V"
   38.51 +	stack 1 locals 0
   38.52 +{
   38.53 +		iconst_5;
   38.54 +		putstatic	Field i:"I";
   38.55 +		return;
   38.56 +}
   38.57 +
   38.58 +} // end Class Bad
    39.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    39.2 +++ b/test/runtime/Final/PutfieldError.java	Tue Jan 17 09:21:05 2017 -0800
    39.3 @@ -0,0 +1,42 @@
    39.4 +/*
    39.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
    39.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.7 + *
    39.8 + * This code is free software; you can redistribute it and/or modify it
    39.9 + * under the terms of the GNU General Public License version 2 only, as
   39.10 + * published by the Free Software Foundation.
   39.11 + *
   39.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   39.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   39.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   39.15 + * version 2 for more details (a copy is included in the LICENSE file that
   39.16 + * accompanied this code).
   39.17 + *
   39.18 + * You should have received a copy of the GNU General Public License version
   39.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   39.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   39.21 + *
   39.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   39.23 + * or visit www.oracle.com if you need additional information or have any
   39.24 + * questions.
   39.25 + */
   39.26 +
   39.27 +/*
   39.28 + * @test PutfieldError
   39.29 + * @bug 8160551
   39.30 + * @summary Throw ICCE rather than crashing for nonstatic final field in static initializer
   39.31 + * @compile Bad.jasm
   39.32 + * @run main PutfieldError
   39.33 + */
   39.34 +
   39.35 +public class PutfieldError {
   39.36 +  public static void main(java.lang.String[] unused) {
   39.37 +    try {
   39.38 +      Bad b = new Bad();
   39.39 +      System.out.println("Bad.i = " + 5);
   39.40 +      throw new RuntimeException("ICCE NOT thrown as expected");
   39.41 +    } catch (IncompatibleClassChangeError icce) {
   39.42 +      System.out.println("ICCE thrown as expected");
   39.43 +    }
   39.44 +  }
   39.45 +}

mercurial