Wed, 04 Jan 2017 19:44:02 +0000
Merge
1.1 --- a/src/os/linux/vm/os_linux.cpp Tue Dec 20 15:36:08 2016 -0800 1.2 +++ b/src/os/linux/vm/os_linux.cpp Wed Jan 04 19:44:02 2017 +0000 1.3 @@ -1075,29 +1075,30 @@ 1.4 1.5 // Locate initial thread stack. This special handling of initial thread stack 1.6 // is needed because pthread_getattr_np() on most (all?) Linux distros returns 1.7 -// bogus value for initial thread. 1.8 +// bogus value for the primordial process thread. While the launcher has created 1.9 +// the VM in a new thread since JDK 6, we still have to allow for the use of the 1.10 +// JNI invocation API from a primordial thread. 1.11 void os::Linux::capture_initial_stack(size_t max_size) { 1.12 - // stack size is the easy part, get it from RLIMIT_STACK 1.13 - size_t stack_size; 1.14 + 1.15 + // max_size is either 0 (which means accept OS default for thread stacks) or 1.16 + // a user-specified value known to be at least the minimum needed. If we 1.17 + // are actually on the primordial thread we can make it appear that we have a 1.18 + // smaller max_size stack by inserting the guard pages at that location. But we 1.19 + // cannot do anything to emulate a larger stack than what has been provided by 1.20 + // the OS or threading library. In fact if we try to use a stack greater than 1.21 + // what is set by rlimit then we will crash the hosting process. 1.22 + 1.23 + // Maximum stack size is the easy part, get it from RLIMIT_STACK. 1.24 + // If this is "unlimited" then it will be a huge value. 1.25 struct rlimit rlim; 1.26 getrlimit(RLIMIT_STACK, &rlim); 1.27 - stack_size = rlim.rlim_cur; 1.28 + size_t stack_size = rlim.rlim_cur; 1.29 1.30 // 6308388: a bug in ld.so will relocate its own .data section to the 1.31 // lower end of primordial stack; reduce ulimit -s value a little bit 1.32 // so we won't install guard page on ld.so's data section. 1.33 stack_size -= 2 * page_size(); 1.34 1.35 - // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat 1.36 - // 7.1, in both cases we will get 2G in return value. 1.37 - // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0, 1.38 - // SuSE 7.2, Debian) can not handle alternate signal stack correctly 1.39 - // for initial thread if its stack size exceeds 6M. Cap it at 2M, 1.40 - // in case other parts in glibc still assumes 2M max stack size. 1.41 - // FIXME: alt signal stack is gone, maybe we can relax this constraint? 1.42 - // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small 1.43 - if (stack_size > 2 * K * K IA64_ONLY(*2)) 1.44 - stack_size = 2 * K * K IA64_ONLY(*2); 1.45 // Try to figure out where the stack base (top) is. This is harder. 1.46 // 1.47 // When an application is started, glibc saves the initial stack pointer in 1.48 @@ -1257,14 +1258,18 @@ 1.49 // stack_top could be partially down the page so align it 1.50 stack_top = align_size_up(stack_top, page_size()); 1.51 1.52 - if (max_size && stack_size > max_size) { 1.53 - _initial_thread_stack_size = max_size; 1.54 + // Allowed stack value is minimum of max_size and what we derived from rlimit 1.55 + if (max_size > 0) { 1.56 + _initial_thread_stack_size = MIN2(max_size, stack_size); 1.57 } else { 1.58 - _initial_thread_stack_size = stack_size; 1.59 + // Accept the rlimit max, but if stack is unlimited then it will be huge, so 1.60 + // clamp it at 8MB as we do on Solaris 1.61 + _initial_thread_stack_size = MIN2(stack_size, 8*M); 1.62 } 1.63 1.64 _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size()); 1.65 _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size; 1.66 + assert(_initial_thread_stack_bottom < (address)stack_top, "overflow!"); 1.67 } 1.68 1.69 ////////////////////////////////////////////////////////////////////////////////
2.1 --- a/src/os/windows/vm/os_windows.cpp Tue Dec 20 15:36:08 2016 -0800 2.2 +++ b/src/os/windows/vm/os_windows.cpp Wed Jan 04 19:44:02 2017 +0000 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -1747,8 +1747,7 @@ 2.11 if (is_workstation) { 2.12 st->print("10"); 2.13 } else { 2.14 - // The server version name of Windows 10 is not known at this time 2.15 - st->print("%d.%d", major_version, minor_version); 2.16 + st->print("Server 2016"); 2.17 } 2.18 break; 2.19
3.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Dec 20 15:36:08 2016 -0800 3.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jan 04 19:44:02 2017 +0000 3.3 @@ -1485,6 +1485,21 @@ 3.4 // Check to see whether we are inlining. If so, Return 3.5 // instructions become Gotos to the continuation point. 3.6 if (continuation() != NULL) { 3.7 + 3.8 + int invoke_bci = state()->caller_state()->bci(); 3.9 + 3.10 + if (x != NULL) { 3.11 + ciMethod* caller = state()->scope()->caller()->method(); 3.12 + Bytecodes::Code invoke_raw_bc = caller->raw_code_at_bci(invoke_bci); 3.13 + if (invoke_raw_bc == Bytecodes::_invokehandle || invoke_raw_bc == Bytecodes::_invokedynamic) { 3.14 + ciType* declared_ret_type = caller->get_declared_signature_at_bci(invoke_bci)->return_type(); 3.15 + if (declared_ret_type->is_klass() && x->exact_type() == NULL && 3.16 + x->declared_type() != declared_ret_type && declared_ret_type != compilation()->env()->Object_klass()) { 3.17 + x = append(new TypeCast(declared_ret_type->as_klass(), x, copy_state_before())); 3.18 + } 3.19 + } 3.20 + } 3.21 + 3.22 assert(!method()->is_synchronized() || InlineSynchronizedMethods, "can not inline synchronized methods yet"); 3.23 3.24 if (compilation()->env()->dtrace_method_probes()) { 3.25 @@ -1508,7 +1523,6 @@ 3.26 // State at end of inlined method is the state of the caller 3.27 // without the method parameters on stack, including the 3.28 // return value, if any, of the inlined method on operand stack. 3.29 - int invoke_bci = state()->caller_state()->bci(); 3.30 set_state(state()->caller_state()->copy_for_parsing()); 3.31 if (x != NULL) { 3.32 state()->push(x->type(), x);
4.1 --- a/src/share/vm/c1/c1_Instruction.cpp Tue Dec 20 15:36:08 2016 -0800 4.2 +++ b/src/share/vm/c1/c1_Instruction.cpp Wed Jan 04 19:44:02 2017 +0000 4.3 @@ -360,7 +360,8 @@ 4.4 } 4.5 4.6 ciType* Invoke::declared_type() const { 4.7 - ciType *t = _target->signature()->return_type(); 4.8 + ciSignature* declared_signature = state()->scope()->method()->get_declared_signature_at_bci(state()->bci()); 4.9 + ciType *t = declared_signature->return_type(); 4.10 assert(t->basic_type() != T_VOID, "need return value of void method?"); 4.11 return t; 4.12 }
5.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Dec 20 15:36:08 2016 -0800 5.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Wed Jan 04 19:44:02 2017 +0000 5.3 @@ -3191,14 +3191,14 @@ 5.4 Bytecodes::Code bc = x->method()->java_code_at_bci(bci); 5.5 int start = 0; 5.6 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments(); 5.7 - if (x->inlined() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 5.8 + if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) { 5.9 // first argument is not profiled at call (method handle invoke) 5.10 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected"); 5.11 start = 1; 5.12 } 5.13 ciSignature* callee_signature = x->callee()->signature(); 5.14 // method handle call to virtual method 5.15 - bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 5.16 + bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc); 5.17 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL); 5.18 5.19 bool ignored_will_link;
6.1 --- a/src/share/vm/ci/ciField.cpp Tue Dec 20 15:36:08 2016 -0800 6.2 +++ b/src/share/vm/ci/ciField.cpp Wed Jan 04 19:44:02 2017 +0000 6.3 @@ -207,7 +207,7 @@ 6.4 // Check to see if the field is constant. 6.5 bool is_final = this->is_final(); 6.6 bool is_stable = FoldStableValues && this->is_stable(); 6.7 - if (_holder->is_initialized() && (is_final || is_stable)) { 6.8 + if (_holder->is_initialized() && ((is_final && !has_initialized_final_update()) || is_stable)) { 6.9 if (!this->is_static()) { 6.10 // A field can be constant if it's a final static field or if 6.11 // it's a final non-static field of a trusted class (classes in
7.1 --- a/src/share/vm/ci/ciField.hpp Tue Dec 20 15:36:08 2016 -0800 7.2 +++ b/src/share/vm/ci/ciField.hpp Wed Jan 04 19:44:02 2017 +0000 7.3 @@ -124,22 +124,8 @@ 7.4 return _holder->is_shared() && !is_static(); 7.5 } 7.6 7.7 - // Is this field a constant? 7.8 - // 7.9 - // Clarification: A field is considered constant if: 7.10 - // 1. The field is both static and final 7.11 - // 2. The canonical holder of the field has undergone 7.12 - // static initialization. 7.13 - // 3. If the field is an object or array, then the oop 7.14 - // in question is allocated in perm space. 7.15 - // 4. The field is not one of the special static/final 7.16 - // non-constant fields. These are java.lang.System.in 7.17 - // and java.lang.System.out. Abomination. 7.18 - // 7.19 - // A field is also considered constant if it is marked @Stable 7.20 - // and is non-null (or non-zero, if a primitive). 7.21 - // For non-static fields, the null/zero check must be 7.22 - // arranged by the user, as constant_value().is_null_or_zero(). 7.23 + // Is this field a constant? See ciField::initialize_from() for details 7.24 + // about how a field is determined to be constant. 7.25 bool is_constant() { return _is_constant; } 7.26 7.27 // Get the constant value of this field. 7.28 @@ -176,6 +162,9 @@ 7.29 bool is_stable () { return flags().is_stable(); } 7.30 bool is_volatile () { return flags().is_volatile(); } 7.31 bool is_transient () { return flags().is_transient(); } 7.32 + // The field is modified outside of instance initializer methods 7.33 + // (or class/initializer methods if the field is static). 7.34 + bool has_initialized_final_update() { return flags().has_initialized_final_update(); } 7.35 7.36 bool is_call_site_target() { 7.37 ciInstanceKlass* callsite_klass = CURRENT_ENV->CallSite_klass();
8.1 --- a/src/share/vm/ci/ciFlags.hpp Tue Dec 20 15:36:08 2016 -0800 8.2 +++ b/src/share/vm/ci/ciFlags.hpp Wed Jan 04 19:44:02 2017 +0000 8.3 @@ -46,20 +46,25 @@ 8.4 8.5 public: 8.6 // Java access flags 8.7 - bool is_public () const { return (_flags & JVM_ACC_PUBLIC ) != 0; } 8.8 - bool is_private () const { return (_flags & JVM_ACC_PRIVATE ) != 0; } 8.9 - bool is_protected () const { return (_flags & JVM_ACC_PROTECTED ) != 0; } 8.10 - bool is_static () const { return (_flags & JVM_ACC_STATIC ) != 0; } 8.11 - bool is_final () const { return (_flags & JVM_ACC_FINAL ) != 0; } 8.12 - bool is_synchronized() const { return (_flags & JVM_ACC_SYNCHRONIZED) != 0; } 8.13 - bool is_super () const { return (_flags & JVM_ACC_SUPER ) != 0; } 8.14 - bool is_volatile () const { return (_flags & JVM_ACC_VOLATILE ) != 0; } 8.15 - bool is_transient () const { return (_flags & JVM_ACC_TRANSIENT ) != 0; } 8.16 - bool is_native () const { return (_flags & JVM_ACC_NATIVE ) != 0; } 8.17 - bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; } 8.18 - bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; } 8.19 - bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; } 8.20 - bool is_stable () const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; } 8.21 + bool is_public () const { return (_flags & JVM_ACC_PUBLIC ) != 0; } 8.22 + bool is_private () const { return (_flags & JVM_ACC_PRIVATE ) != 0; } 8.23 + bool is_protected () const { return (_flags & JVM_ACC_PROTECTED ) != 0; } 8.24 + bool is_static () const { return (_flags & JVM_ACC_STATIC ) != 0; } 8.25 + bool is_final () const { return (_flags & JVM_ACC_FINAL ) != 0; } 8.26 + bool is_synchronized () const { return (_flags & JVM_ACC_SYNCHRONIZED ) != 0; } 8.27 + bool is_super () const { return (_flags & JVM_ACC_SUPER ) != 0; } 8.28 + bool is_volatile () const { return (_flags & JVM_ACC_VOLATILE ) != 0; } 8.29 + bool is_transient () const { return (_flags & JVM_ACC_TRANSIENT ) != 0; } 8.30 + bool is_native () const { return (_flags & JVM_ACC_NATIVE ) != 0; } 8.31 + bool is_interface () const { return (_flags & JVM_ACC_INTERFACE ) != 0; } 8.32 + bool is_abstract () const { return (_flags & JVM_ACC_ABSTRACT ) != 0; } 8.33 + bool is_strict () const { return (_flags & JVM_ACC_STRICT ) != 0; } 8.34 + bool is_stable () const { return (_flags & JVM_ACC_FIELD_STABLE ) != 0; } 8.35 + // In case the current object represents a field, return true if 8.36 + // the field is modified outside of instance initializer methods 8.37 + // (or class/initializer methods if the field is static) and false 8.38 + // otherwise. 8.39 + bool has_initialized_final_update() const { return (_flags & JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE) != 0; }; 8.40 8.41 // Conversion 8.42 jint as_int() { return _flags; }
9.1 --- a/src/share/vm/ci/ciMethod.hpp Tue Dec 20 15:36:08 2016 -0800 9.2 +++ b/src/share/vm/ci/ciMethod.hpp Wed Jan 04 19:44:02 2017 +0000 9.3 @@ -243,6 +243,21 @@ 9.4 9.5 ciField* get_field_at_bci( int bci, bool &will_link); 9.6 ciMethod* get_method_at_bci(int bci, bool &will_link, ciSignature* *declared_signature); 9.7 + 9.8 + ciSignature* get_declared_signature_at_bci(int bci) { 9.9 + bool ignored_will_link; 9.10 + ciSignature* declared_signature; 9.11 + get_method_at_bci(bci, ignored_will_link, &declared_signature); 9.12 + assert(declared_signature != NULL, "cannot be null"); 9.13 + return declared_signature; 9.14 + } 9.15 + 9.16 + ciMethod* get_method_at_bci(int bci) { 9.17 + bool ignored_will_link; 9.18 + ciSignature* ignored_declared_signature; 9.19 + return get_method_at_bci(bci, ignored_will_link, &ignored_declared_signature); 9.20 + } 9.21 + 9.22 // Given a certain calling environment, find the monomorphic target 9.23 // for the call. Return NULL if the call is not monomorphic in 9.24 // its calling environment.
10.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Dec 20 15:36:08 2016 -0800 10.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jan 04 19:44:02 2017 +0000 10.3 @@ -1,5 +1,5 @@ 10.4 /* 10.5 - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 10.6 + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 10.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 10.8 * 10.9 * This code is free software; you can redistribute it and/or modify it 10.10 @@ -631,11 +631,10 @@ 10.11 double overall_cm_overhead = 10.12 (double) MaxGCPauseMillis * marking_overhead / 10.13 (double) GCPauseIntervalMillis; 10.14 - double cpu_ratio = 1.0 / (double) os::processor_count(); 10.15 + double cpu_ratio = 1.0 / os::initial_active_processor_count(); 10.16 double marking_thread_num = ceil(overall_cm_overhead / cpu_ratio); 10.17 double marking_task_overhead = 10.18 - overall_cm_overhead / marking_thread_num * 10.19 - (double) os::processor_count(); 10.20 + overall_cm_overhead / marking_thread_num * os::initial_active_processor_count(); 10.21 double sleep_factor = 10.22 (1.0 - marking_task_overhead) / marking_task_overhead; 10.23
11.1 --- a/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Tue Dec 20 15:36:08 2016 -0800 11.2 +++ b/src/share/vm/gc_implementation/g1/dirtyCardQueue.cpp Wed Jan 04 19:44:02 2017 +0000 11.3 @@ -1,5 +1,5 @@ 11.4 /* 11.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 11.6 + * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved. 11.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 11.8 * 11.9 * This code is free software; you can redistribute it and/or modify it 11.10 @@ -80,7 +80,7 @@ 11.11 11.12 // Determines how many mutator threads can process the buffers in parallel. 11.13 uint DirtyCardQueueSet::num_par_ids() { 11.14 - return (uint)os::processor_count(); 11.15 + return (uint)os::initial_active_processor_count(); 11.16 } 11.17 11.18 void DirtyCardQueueSet::initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock,
12.1 --- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Tue Dec 20 15:36:08 2016 -0800 12.2 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp Wed Jan 04 19:44:02 2017 +0000 12.3 @@ -452,9 +452,13 @@ 12.4 // event lock and do the read again in case some other thread had already 12.5 // succeeded and done the resize. 12.6 int cur_collection = Universe::heap()->total_collections(); 12.7 - if (_last_LNC_resizing_collection[i] != cur_collection) { 12.8 + // Updated _last_LNC_resizing_collection[i] must not be visible before 12.9 + // _lowest_non_clean and friends are visible. Therefore use acquire/release 12.10 + // to guarantee this on non TSO architecures. 12.11 + if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { 12.12 MutexLocker x(ParGCRareEvent_lock); 12.13 - if (_last_LNC_resizing_collection[i] != cur_collection) { 12.14 + // This load_acquire is here for clarity only. The MutexLocker already fences. 12.15 + if (OrderAccess::load_acquire(&_last_LNC_resizing_collection[i]) != cur_collection) { 12.16 if (_lowest_non_clean[i] == NULL || 12.17 n_chunks != _lowest_non_clean_chunk_size[i]) { 12.18 12.19 @@ -474,7 +478,8 @@ 12.20 _lowest_non_clean[i][j] = NULL; 12.21 } 12.22 } 12.23 - _last_LNC_resizing_collection[i] = cur_collection; 12.24 + // Make sure this gets visible only after _lowest_non_clean* was initialized 12.25 + OrderAccess::release_store(&_last_LNC_resizing_collection[i], cur_collection); 12.26 } 12.27 } 12.28 // In any case, now do the initialization.
13.1 --- a/src/share/vm/interpreter/rewriter.cpp Tue Dec 20 15:36:08 2016 -0800 13.2 +++ b/src/share/vm/interpreter/rewriter.cpp Wed Jan 04 19:44:02 2017 +0000 13.3 @@ -396,10 +396,45 @@ 13.4 break; 13.5 } 13.6 13.7 + case Bytecodes::_putstatic : 13.8 + case Bytecodes::_putfield : { 13.9 + if (!reverse) { 13.10 + // Check if any final field of the class given as parameter is modified 13.11 + // outside of initializer methods of the class. Fields that are modified 13.12 + // are marked with a flag. For marked fields, the compilers do not perform 13.13 + // constant folding (as the field can be changed after initialization). 13.14 + // 13.15 + // The check is performed after verification and only if verification has 13.16 + // succeeded. Therefore, the class is guaranteed to be well-formed. 13.17 + InstanceKlass* klass = method->method_holder(); 13.18 + u2 bc_index = Bytes::get_Java_u2(bcp + prefix_length + 1); 13.19 + constantPoolHandle cp(method->constants()); 13.20 + Symbol* ref_class_name = cp->klass_name_at(cp->klass_ref_index_at(bc_index)); 13.21 + 13.22 + if (klass->name() == ref_class_name) { 13.23 + Symbol* field_name = cp->name_ref_at(bc_index); 13.24 + Symbol* field_sig = cp->signature_ref_at(bc_index); 13.25 + 13.26 + fieldDescriptor fd; 13.27 + if (klass->find_field(field_name, field_sig, &fd) != NULL) { 13.28 + if (fd.access_flags().is_final()) { 13.29 + if (fd.access_flags().is_static()) { 13.30 + if (!method->is_static_initializer()) { 13.31 + fd.set_has_initialized_final_update(true); 13.32 + } 13.33 + } else { 13.34 + if (!method->is_object_initializer()) { 13.35 + fd.set_has_initialized_final_update(true); 13.36 + } 13.37 + } 13.38 + } 13.39 + } 13.40 + } 13.41 + } 13.42 + } 13.43 + // fall through 13.44 case Bytecodes::_getstatic : // fall through 13.45 - case Bytecodes::_putstatic : // fall through 13.46 case Bytecodes::_getfield : // fall through 13.47 - case Bytecodes::_putfield : // fall through 13.48 case Bytecodes::_invokevirtual : // fall through 13.49 case Bytecodes::_invokestatic : 13.50 case Bytecodes::_invokeinterface:
14.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp Tue Dec 20 15:36:08 2016 -0800 14.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp Wed Jan 04 19:44:02 2017 +0000 14.3 @@ -217,7 +217,7 @@ 14.4 CardArr* _lowest_non_clean; 14.5 size_t* _lowest_non_clean_chunk_size; 14.6 uintptr_t* _lowest_non_clean_base_chunk_index; 14.7 - int* _last_LNC_resizing_collection; 14.8 + volatile int* _last_LNC_resizing_collection; 14.9 14.10 // Initializes "lowest_non_clean" to point to the array for the region 14.11 // covering "sp", and "lowest_non_clean_base_chunk_index" to the chunk
15.1 --- a/src/share/vm/oops/method.cpp Tue Dec 20 15:36:08 2016 -0800 15.2 +++ b/src/share/vm/oops/method.cpp Wed Jan 04 19:44:02 2017 +0000 15.3 @@ -590,7 +590,7 @@ 15.4 } 15.5 15.6 bool Method::is_initializer() const { 15.7 - return name() == vmSymbols::object_initializer_name() || is_static_initializer(); 15.8 + return is_object_initializer() || is_static_initializer(); 15.9 } 15.10 15.11 bool Method::has_valid_initializer_flags() const { 15.12 @@ -606,6 +606,9 @@ 15.13 has_valid_initializer_flags(); 15.14 } 15.15 15.16 +bool Method::is_object_initializer() const { 15.17 + return name() == vmSymbols::object_initializer_name(); 15.18 +} 15.19 15.20 objArrayHandle Method::resolved_checked_exceptions_impl(Method* this_oop, TRAPS) { 15.21 int length = this_oop->checked_exceptions_length();
16.1 --- a/src/share/vm/oops/method.hpp Tue Dec 20 15:36:08 2016 -0800 16.2 +++ b/src/share/vm/oops/method.hpp Wed Jan 04 19:44:02 2017 +0000 16.3 @@ -627,6 +627,9 @@ 16.4 // valid static initializer flags. 16.5 bool is_static_initializer() const; 16.6 16.7 + // returns true if the method name is <init> 16.8 + bool is_object_initializer() const; 16.9 + 16.10 // compiled code support 16.11 // NOTE: code() is inherently racy as deopt can be clearing code 16.12 // simultaneously. Use with caution.
17.1 --- a/src/share/vm/opto/callGenerator.cpp Tue Dec 20 15:36:08 2016 -0800 17.2 +++ b/src/share/vm/opto/callGenerator.cpp Wed Jan 04 19:44:02 2017 +0000 17.3 @@ -188,7 +188,10 @@ 17.4 // the call instruction will have a seemingly deficient out-count. 17.5 // (The bailout says something misleading about an "infinite loop".) 17.6 if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { 17.7 - kit.inc_sp(method()->arg_size()); // restore arguments 17.8 + assert(Bytecodes::is_invoke(kit.java_bc()), err_msg("%d: %s", kit.java_bc(), Bytecodes::name(kit.java_bc()))); 17.9 + ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 17.10 + int arg_size = declared_method->signature()->arg_size_for_bc(kit.java_bc()); 17.11 + kit.inc_sp(arg_size); // restore arguments 17.12 kit.uncommon_trap(Deoptimization::Reason_null_check, 17.13 Deoptimization::Action_none, 17.14 NULL, "null receiver"); 17.15 @@ -1119,7 +1122,10 @@ 17.16 JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { 17.17 GraphKit kit(jvms); 17.18 // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). 17.19 - int nargs = method()->arg_size(); 17.20 + // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 17.21 + // Use callsite signature always. 17.22 + ciMethod* declared_method = kit.method()->get_method_at_bci(kit.bci()); 17.23 + int nargs = declared_method->arg_size(); 17.24 kit.inc_sp(nargs); 17.25 assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); 17.26 if (_reason == Deoptimization::Reason_class_check &&
18.1 --- a/src/share/vm/opto/compile.cpp Tue Dec 20 15:36:08 2016 -0800 18.2 +++ b/src/share/vm/opto/compile.cpp Wed Jan 04 19:44:02 2017 +0000 18.3 @@ -1595,6 +1595,17 @@ 18.4 } 18.5 } 18.6 18.7 +BasicType Compile::AliasType::basic_type() const { 18.8 + if (element() != NULL) { 18.9 + const Type* element = adr_type()->is_aryptr()->elem(); 18.10 + return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type(); 18.11 + } if (field() != NULL) { 18.12 + return field()->layout_type(); 18.13 + } else { 18.14 + return T_ILLEGAL; // unknown 18.15 + } 18.16 +} 18.17 + 18.18 //---------------------------------print_on------------------------------------ 18.19 #ifndef PRODUCT 18.20 void Compile::AliasType::print_on(outputStream* st) {
19.1 --- a/src/share/vm/opto/compile.hpp Tue Dec 20 15:36:08 2016 -0800 19.2 +++ b/src/share/vm/opto/compile.hpp Wed Jan 04 19:44:02 2017 +0000 19.3 @@ -152,6 +152,8 @@ 19.4 _element = e; 19.5 } 19.6 19.7 + BasicType basic_type() const; 19.8 + 19.9 void print_on(outputStream* st) PRODUCT_RETURN; 19.10 }; 19.11
20.1 --- a/src/share/vm/opto/graphKit.cpp Tue Dec 20 15:36:08 2016 -0800 20.2 +++ b/src/share/vm/opto/graphKit.cpp Wed Jan 04 19:44:02 2017 +0000 20.3 @@ -1452,7 +1452,11 @@ 20.4 // factory methods in "int adr_idx" 20.5 Node* GraphKit::make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 20.6 int adr_idx, 20.7 - MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency, bool require_atomic_access) { 20.8 + MemNode::MemOrd mo, 20.9 + LoadNode::ControlDependency control_dependency, 20.10 + bool require_atomic_access, 20.11 + bool unaligned, 20.12 + bool mismatched) { 20.13 assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" ); 20.14 const TypePtr* adr_type = NULL; // debug-mode-only argument 20.15 debug_only(adr_type = C->get_adr_type(adr_idx)); 20.16 @@ -1465,6 +1469,12 @@ 20.17 } else { 20.18 ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency); 20.19 } 20.20 + if (unaligned) { 20.21 + ld->as_Load()->set_unaligned_access(); 20.22 + } 20.23 + if (mismatched) { 20.24 + ld->as_Load()->set_mismatched_access(); 20.25 + } 20.26 ld = _gvn.transform(ld); 20.27 if ((bt == T_OBJECT) && C->do_escape_analysis() || C->eliminate_boxing()) { 20.28 // Improve graph before escape analysis and boxing elimination. 20.29 @@ -1476,7 +1486,9 @@ 20.30 Node* GraphKit::store_to_memory(Node* ctl, Node* adr, Node *val, BasicType bt, 20.31 int adr_idx, 20.32 MemNode::MemOrd mo, 20.33 - bool require_atomic_access) { 20.34 + bool require_atomic_access, 20.35 + bool unaligned, 20.36 + bool mismatched) { 20.37 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); 20.38 const TypePtr* adr_type = NULL; 20.39 debug_only(adr_type = C->get_adr_type(adr_idx)); 20.40 @@ -1489,6 +1501,12 @@ 20.41 } else { 20.42 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); 20.43 } 20.44 + if (unaligned) { 20.45 + st->as_Store()->set_unaligned_access(); 20.46 + } 20.47 + if (mismatched) { 20.48 + st->as_Store()->set_mismatched_access(); 20.49 + } 20.50 st = _gvn.transform(st); 20.51 set_memory(st, adr_idx); 20.52 // Back-to-back stores can only remove intermediate store with DU info 20.53 @@ -1588,7 +1606,8 @@ 20.54 const TypeOopPtr* val_type, 20.55 BasicType bt, 20.56 bool use_precise, 20.57 - MemNode::MemOrd mo) { 20.58 + MemNode::MemOrd mo, 20.59 + bool mismatched) { 20.60 // Transformation of a value which could be NULL pointer (CastPP #NULL) 20.61 // could be delayed during Parse (for example, in adjust_map_after_if()). 20.62 // Execute transformation here to avoid barrier generation in such case. 20.63 @@ -1608,7 +1627,7 @@ 20.64 NULL /* pre_val */, 20.65 bt); 20.66 20.67 - Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo); 20.68 + Node* store = store_to_memory(control(), adr, val, bt, adr_idx, mo, mismatched); 20.69 post_barrier(control(), store, obj, adr, adr_idx, val, bt, use_precise); 20.70 return store; 20.71 } 20.72 @@ -1620,7 +1639,8 @@ 20.73 const TypePtr* adr_type, 20.74 Node* val, 20.75 BasicType bt, 20.76 - MemNode::MemOrd mo) { 20.77 + MemNode::MemOrd mo, 20.78 + bool mismatched) { 20.79 Compile::AliasType* at = C->alias_type(adr_type); 20.80 const TypeOopPtr* val_type = NULL; 20.81 if (adr_type->isa_instptr()) { 20.82 @@ -1639,7 +1659,7 @@ 20.83 if (val_type == NULL) { 20.84 val_type = TypeInstPtr::BOTTOM; 20.85 } 20.86 - return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo); 20.87 + return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true, mo, mismatched); 20.88 } 20.89 20.90
21.1 --- a/src/share/vm/opto/graphKit.hpp Tue Dec 20 15:36:08 2016 -0800 21.2 +++ b/src/share/vm/opto/graphKit.hpp Wed Jan 04 19:44:02 2017 +0000 21.3 @@ -517,23 +517,28 @@ 21.4 // of volatile fields. 21.5 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, 21.6 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 21.7 - bool require_atomic_access = false) { 21.8 + bool require_atomic_access = false, bool unaligned = false, 21.9 + bool mismatched = false) { 21.10 // This version computes alias_index from bottom_type 21.11 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(), 21.12 - mo, control_dependency, require_atomic_access); 21.13 + mo, control_dependency, require_atomic_access, 21.14 + unaligned, mismatched); 21.15 } 21.16 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, 21.17 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 21.18 - bool require_atomic_access = false) { 21.19 + bool require_atomic_access = false, bool unaligned = false, 21.20 + bool mismatched = false) { 21.21 // This version computes alias_index from an address type 21.22 assert(adr_type != NULL, "use other make_load factory"); 21.23 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type), 21.24 - mo, control_dependency, require_atomic_access); 21.25 + mo, control_dependency, require_atomic_access, 21.26 + unaligned, mismatched); 21.27 } 21.28 // This is the base version which is given an alias index. 21.29 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, 21.30 MemNode::MemOrd mo, LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest, 21.31 - bool require_atomic_access = false); 21.32 + bool require_atomic_access = false, bool unaligned = false, 21.33 + bool mismatched = false); 21.34 21.35 // Create & transform a StoreNode and store the effect into the 21.36 // parser's memory state. 21.37 @@ -546,19 +551,24 @@ 21.38 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 21.39 const TypePtr* adr_type, 21.40 MemNode::MemOrd mo, 21.41 - bool require_atomic_access = false) { 21.42 + bool require_atomic_access = false, 21.43 + bool unaligned = false, 21.44 + bool mismatched = false) { 21.45 // This version computes alias_index from an address type 21.46 assert(adr_type != NULL, "use other store_to_memory factory"); 21.47 return store_to_memory(ctl, adr, val, bt, 21.48 C->get_alias_index(adr_type), 21.49 - mo, require_atomic_access); 21.50 + mo, require_atomic_access, 21.51 + unaligned, mismatched); 21.52 } 21.53 // This is the base version which is given alias index 21.54 // Return the new StoreXNode 21.55 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt, 21.56 int adr_idx, 21.57 MemNode::MemOrd, 21.58 - bool require_atomic_access = false); 21.59 + bool require_atomic_access = false, 21.60 + bool unaligned = false, 21.61 + bool mismatched = false); 21.62 21.63 21.64 // All in one pre-barrier, store, post_barrier 21.65 @@ -581,7 +591,8 @@ 21.66 const TypeOopPtr* val_type, 21.67 BasicType bt, 21.68 bool use_precise, 21.69 - MemNode::MemOrd mo); 21.70 + MemNode::MemOrd mo, 21.71 + bool mismatched = false); 21.72 21.73 Node* store_oop_to_object(Node* ctl, 21.74 Node* obj, // containing obj 21.75 @@ -612,7 +623,8 @@ 21.76 const TypePtr* adr_type, 21.77 Node* val, 21.78 BasicType bt, 21.79 - MemNode::MemOrd mo); 21.80 + MemNode::MemOrd mo, 21.81 + bool mismatched = false); 21.82 21.83 // For the few case where the barriers need special help 21.84 void pre_barrier(bool do_load, Node* ctl, 21.85 @@ -656,7 +668,10 @@ 21.86 // callee (with all arguments still on the stack). 21.87 Node* null_check_receiver_before_call(ciMethod* callee) { 21.88 assert(!callee->is_static(), "must be a virtual method"); 21.89 - const int nargs = callee->arg_size(); 21.90 + // Callsite signature can be different from actual method being called (i.e _linkTo* sites). 21.91 + // Use callsite signature always. 21.92 + ciMethod* declared_method = method()->get_method_at_bci(bci()); 21.93 + const int nargs = declared_method->arg_size(); 21.94 inc_sp(nargs); 21.95 Node* n = null_check_receiver(); 21.96 dec_sp(nargs);
22.1 --- a/src/share/vm/opto/idealKit.cpp Tue Dec 20 15:36:08 2016 -0800 22.2 +++ b/src/share/vm/opto/idealKit.cpp Wed Jan 04 19:44:02 2017 +0000 22.3 @@ -368,7 +368,8 @@ 22.4 22.5 Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt, 22.6 int adr_idx, 22.7 - MemNode::MemOrd mo, bool require_atomic_access) { 22.8 + MemNode::MemOrd mo, bool require_atomic_access, 22.9 + bool mismatched) { 22.10 assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory"); 22.11 const TypePtr* adr_type = NULL; 22.12 debug_only(adr_type = C->get_adr_type(adr_idx)); 22.13 @@ -379,6 +380,9 @@ 22.14 } else { 22.15 st = StoreNode::make(_gvn, ctl, mem, adr, adr_type, val, bt, mo); 22.16 } 22.17 + if (mismatched) { 22.18 + st->as_Store()->set_mismatched_access(); 22.19 + } 22.20 st = transform(st); 22.21 set_memory(st, adr_idx); 22.22
23.1 --- a/src/share/vm/opto/idealKit.hpp Tue Dec 20 15:36:08 2016 -0800 23.2 +++ b/src/share/vm/opto/idealKit.hpp Wed Jan 04 19:44:02 2017 +0000 23.3 @@ -227,7 +227,9 @@ 23.4 BasicType bt, 23.5 int adr_idx, 23.6 MemNode::MemOrd mo, 23.7 - bool require_atomic_access = false); 23.8 + bool require_atomic_access = false, 23.9 + bool mismatched = false 23.10 + ); 23.11 23.12 // Store a card mark ordered after store_oop 23.13 Node* storeCM(Node* ctl,
24.1 --- a/src/share/vm/opto/library_call.cpp Tue Dec 20 15:36:08 2016 -0800 24.2 +++ b/src/share/vm/opto/library_call.cpp Wed Jan 04 19:44:02 2017 +0000 24.3 @@ -230,7 +230,7 @@ 24.4 // Generates the guards that check whether the result of 24.5 // Unsafe.getObject should be recorded in an SATB log buffer. 24.6 void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); 24.7 - bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); 24.8 + bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool is_unaligned); 24.9 bool inline_unsafe_prefetch(bool is_native_ptr, bool is_store, bool is_static); 24.10 static bool klass_needs_init_guard(Node* kls); 24.11 bool inline_unsafe_allocate(); 24.12 @@ -795,63 +795,63 @@ 24.13 case vmIntrinsics::_indexOf: return inline_string_indexOf(); 24.14 case vmIntrinsics::_equals: return inline_string_equals(); 24.15 24.16 - case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile); 24.17 - case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile); 24.18 - case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile); 24.19 - case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); 24.20 - case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); 24.21 - case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); 24.22 - case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); 24.23 - case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile); 24.24 - case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile); 24.25 - 24.26 - case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile); 24.27 - case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile); 24.28 - case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile); 24.29 - case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); 24.30 - case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); 24.31 - case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); 24.32 - case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); 24.33 - case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile); 24.34 - case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile); 24.35 - 24.36 - case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile); 24.37 - case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile); 24.38 - case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile); 24.39 - case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile); 24.40 - case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile); 24.41 - case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile); 24.42 - case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile); 24.43 - case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile); 24.44 - 24.45 - case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile); 24.46 - case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile); 24.47 - case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile); 24.48 - case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile); 24.49 - case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile); 24.50 - case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile); 24.51 - case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile); 24.52 - case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile); 24.53 - 24.54 - case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile); 24.55 - case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile); 24.56 - case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile); 24.57 - case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile); 24.58 - case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile); 24.59 - case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile); 24.60 - case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile); 24.61 - case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile); 24.62 - case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile); 24.63 - 24.64 - case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile); 24.65 - case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile); 24.66 - case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile); 24.67 - case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile); 24.68 - case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile); 24.69 - case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile); 24.70 - case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile); 24.71 - case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile); 24.72 - case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile); 24.73 + case vmIntrinsics::_getObject: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, !is_volatile, false); 24.74 + case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, !is_volatile, false); 24.75 + case vmIntrinsics::_getByte: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, !is_volatile, false); 24.76 + case vmIntrinsics::_getShort: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, false); 24.77 + case vmIntrinsics::_getChar: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, false); 24.78 + case vmIntrinsics::_getInt: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, false); 24.79 + case vmIntrinsics::_getLong: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, false); 24.80 + case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, !is_volatile, false); 24.81 + case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false); 24.82 + 24.83 + case vmIntrinsics::_putObject: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, !is_volatile, false); 24.84 + case vmIntrinsics::_putBoolean: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, !is_volatile, false); 24.85 + case vmIntrinsics::_putByte: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, !is_volatile, false); 24.86 + case vmIntrinsics::_putShort: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, false); 24.87 + case vmIntrinsics::_putChar: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, false); 24.88 + case vmIntrinsics::_putInt: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, false); 24.89 + case vmIntrinsics::_putLong: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, false); 24.90 + case vmIntrinsics::_putFloat: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, !is_volatile, false); 24.91 + case vmIntrinsics::_putDouble: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, !is_volatile, false); 24.92 + 24.93 + case vmIntrinsics::_getByte_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_BYTE, !is_volatile, false); 24.94 + case vmIntrinsics::_getShort_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_SHORT, !is_volatile, false); 24.95 + case vmIntrinsics::_getChar_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_CHAR, !is_volatile, false); 24.96 + case vmIntrinsics::_getInt_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_INT, !is_volatile, false); 24.97 + case vmIntrinsics::_getLong_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_LONG, !is_volatile, false); 24.98 + case vmIntrinsics::_getFloat_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_FLOAT, !is_volatile, false); 24.99 + case vmIntrinsics::_getDouble_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_DOUBLE, !is_volatile, false); 24.100 + case vmIntrinsics::_getAddress_raw: return inline_unsafe_access( is_native_ptr, !is_store, T_ADDRESS, !is_volatile, false); 24.101 + 24.102 + case vmIntrinsics::_putByte_raw: return inline_unsafe_access( is_native_ptr, is_store, T_BYTE, !is_volatile, false); 24.103 + case vmIntrinsics::_putShort_raw: return inline_unsafe_access( is_native_ptr, is_store, T_SHORT, !is_volatile, false); 24.104 + case vmIntrinsics::_putChar_raw: return inline_unsafe_access( is_native_ptr, is_store, T_CHAR, !is_volatile, false); 24.105 + case vmIntrinsics::_putInt_raw: return inline_unsafe_access( is_native_ptr, is_store, T_INT, !is_volatile, false); 24.106 + case vmIntrinsics::_putLong_raw: return inline_unsafe_access( is_native_ptr, is_store, T_LONG, !is_volatile, false); 24.107 + case vmIntrinsics::_putFloat_raw: return inline_unsafe_access( is_native_ptr, is_store, T_FLOAT, !is_volatile, false); 24.108 + case vmIntrinsics::_putDouble_raw: return inline_unsafe_access( is_native_ptr, is_store, T_DOUBLE, !is_volatile, false); 24.109 + case vmIntrinsics::_putAddress_raw: return inline_unsafe_access( is_native_ptr, is_store, T_ADDRESS, !is_volatile, false); 24.110 + 24.111 + case vmIntrinsics::_getObjectVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_OBJECT, is_volatile, false); 24.112 + case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BOOLEAN, is_volatile, false); 24.113 + case vmIntrinsics::_getByteVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_BYTE, is_volatile, false); 24.114 + case vmIntrinsics::_getShortVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, is_volatile, false); 24.115 + case vmIntrinsics::_getCharVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, is_volatile, false); 24.116 + case vmIntrinsics::_getIntVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, is_volatile, false); 24.117 + case vmIntrinsics::_getLongVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, is_volatile, false); 24.118 + case vmIntrinsics::_getFloatVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_FLOAT, is_volatile, false); 24.119 + case vmIntrinsics::_getDoubleVolatile: return inline_unsafe_access(!is_native_ptr, !is_store, T_DOUBLE, is_volatile, false); 24.120 + 24.121 + case vmIntrinsics::_putObjectVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_OBJECT, is_volatile, false); 24.122 + case vmIntrinsics::_putBooleanVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BOOLEAN, is_volatile, false); 24.123 + case vmIntrinsics::_putByteVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_BYTE, is_volatile, false); 24.124 + case vmIntrinsics::_putShortVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, is_volatile, false); 24.125 + case vmIntrinsics::_putCharVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, is_volatile, false); 24.126 + case vmIntrinsics::_putIntVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, is_volatile, false); 24.127 + case vmIntrinsics::_putLongVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, is_volatile, false); 24.128 + case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile, false); 24.129 + case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile, false); 24.130 24.131 case vmIntrinsics::_prefetchRead: return inline_unsafe_prefetch(!is_native_ptr, !is_store, !is_static); 24.132 case vmIntrinsics::_prefetchWrite: return inline_unsafe_prefetch(!is_native_ptr, is_store, !is_static); 24.133 @@ -2554,8 +2554,9 @@ 24.134 return NULL; 24.135 } 24.136 24.137 -bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) { 24.138 +bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) { 24.139 if (callee()->is_static()) return false; // caller must have the capability! 24.140 + assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type"); 24.141 24.142 #ifndef PRODUCT 24.143 { 24.144 @@ -2629,16 +2630,45 @@ 24.145 val = is_store ? argument(3) : NULL; 24.146 } 24.147 24.148 + // Can base be NULL? Otherwise, always on-heap access. 24.149 + bool can_access_non_heap = TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop)); 24.150 + 24.151 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); 24.152 24.153 + // Try to categorize the address. 24.154 + Compile::AliasType* alias_type = C->alias_type(adr_type); 24.155 + assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); 24.156 + 24.157 + if (alias_type->adr_type() == TypeInstPtr::KLASS || 24.158 + alias_type->adr_type() == TypeAryPtr::RANGE) { 24.159 + return false; // not supported 24.160 + } 24.161 + 24.162 + bool mismatched = false; 24.163 + BasicType bt = alias_type->basic_type(); 24.164 + if (bt != T_ILLEGAL) { 24.165 + assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access"); 24.166 + if (bt == T_BYTE && adr_type->isa_aryptr()) { 24.167 + // Alias type doesn't differentiate between byte[] and boolean[]). 24.168 + // Use address type to get the element type. 24.169 + bt = adr_type->is_aryptr()->elem()->array_element_basic_type(); 24.170 + } 24.171 + if (bt == T_ARRAY || bt == T_NARROWOOP) { 24.172 + // accessing an array field with getObject is not a mismatch 24.173 + bt = T_OBJECT; 24.174 + } 24.175 + if ((bt == T_OBJECT) != (type == T_OBJECT)) { 24.176 + // Don't intrinsify mismatched object accesses 24.177 + return false; 24.178 + } 24.179 + mismatched = (bt != type); 24.180 + } 24.181 + 24.182 + assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); 24.183 + 24.184 // First guess at the value type. 24.185 const Type *value_type = Type::get_const_basic_type(type); 24.186 24.187 - // Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM, 24.188 - // there was not enough information to nail it down. 24.189 - Compile::AliasType* alias_type = C->alias_type(adr_type); 24.190 - assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); 24.191 - 24.192 // We will need memory barriers unless we can determine a unique 24.193 // alias category for this reference. (Note: If for some reason 24.194 // the barriers get omitted and the unsafe reference begins to "pollute" 24.195 @@ -2701,7 +2731,7 @@ 24.196 MemNode::MemOrd mo = is_volatile ? MemNode::acquire : MemNode::unordered; 24.197 // To be valid, unsafe loads may depend on other conditions than 24.198 // the one that guards them: pin the Load node 24.199 - Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile); 24.200 + Node* p = make_load(control(), adr, value_type, type, adr_type, mo, LoadNode::Pinned, is_volatile, unaligned, mismatched); 24.201 // load value 24.202 switch (type) { 24.203 case T_BOOLEAN: 24.204 @@ -2747,12 +2777,12 @@ 24.205 24.206 MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; 24.207 if (type != T_OBJECT ) { 24.208 - (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); 24.209 + (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile, unaligned, mismatched); 24.210 } else { 24.211 // Possibly an oop being stored to Java heap or native memory 24.212 - if (!TypePtr::NULL_PTR->higher_equal(_gvn.type(heap_base_oop))) { 24.213 + if (!can_access_non_heap) { 24.214 // oop to Java heap. 24.215 - (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); 24.216 + (void) store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); 24.217 } else { 24.218 // We can't tell at compile time if we are storing in the Java heap or outside 24.219 // of it. So we need to emit code to conditionally do the proper type of 24.220 @@ -2764,11 +2794,11 @@ 24.221 __ if_then(heap_base_oop, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { 24.222 // Sync IdealKit and graphKit. 24.223 sync_kit(ideal); 24.224 - Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo); 24.225 + Node* st = store_oop_to_unknown(control(), heap_base_oop, adr, adr_type, val, type, mo, mismatched); 24.226 // Update IdealKit memory. 24.227 __ sync_kit(this); 24.228 } __ else_(); { 24.229 - __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile); 24.230 + __ store(__ ctrl(), adr, val, type, alias_type->index(), mo, is_volatile, mismatched); 24.231 } __ end_if(); 24.232 // Final sync IdealKit and GraphKit. 24.233 final_sync(ideal); 24.234 @@ -2939,12 +2969,6 @@ 24.235 newval = argument(4); // type: oop, int, or long 24.236 } 24.237 24.238 - // Null check receiver. 24.239 - receiver = null_check(receiver); 24.240 - if (stopped()) { 24.241 - return true; 24.242 - } 24.243 - 24.244 // Build field offset expression. 24.245 // We currently rely on the cookies produced by Unsafe.xxxFieldOffset 24.246 // to be plain byte offsets, which are also the same as those accepted 24.247 @@ -2955,11 +2979,18 @@ 24.248 Node* adr = make_unsafe_address(base, offset); 24.249 const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); 24.250 24.251 + Compile::AliasType* alias_type = C->alias_type(adr_type); 24.252 + BasicType bt = alias_type->basic_type(); 24.253 + if (bt != T_ILLEGAL && 24.254 + ((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) { 24.255 + // Don't intrinsify mismatched object accesses. 24.256 + return false; 24.257 + } 24.258 + 24.259 // For CAS, unlike inline_unsafe_access, there seems no point in 24.260 // trying to refine types. Just use the coarse types here. 24.261 + assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); 24.262 const Type *value_type = Type::get_const_basic_type(type); 24.263 - Compile::AliasType* alias_type = C->alias_type(adr_type); 24.264 - assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here"); 24.265 24.266 if (kind == LS_xchg && type == T_OBJECT) { 24.267 const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); 24.268 @@ -2968,6 +2999,12 @@ 24.269 } 24.270 } 24.271 24.272 + // Null check receiver. 24.273 + receiver = null_check(receiver); 24.274 + if (stopped()) { 24.275 + return true; 24.276 + } 24.277 + 24.278 int alias_idx = C->get_alias_index(adr_type); 24.279 24.280 // Memory-model-wise, a LoadStore acts like a little synchronized
25.1 --- a/src/share/vm/opto/memnode.cpp Tue Dec 20 15:36:08 2016 -0800 25.2 +++ b/src/share/vm/opto/memnode.cpp Wed Jan 04 19:44:02 2017 +0000 25.3 @@ -67,8 +67,15 @@ 25.4 dump_adr_type(this, _adr_type, st); 25.5 25.6 Compile* C = Compile::current(); 25.7 - if( C->alias_type(_adr_type)->is_volatile() ) 25.8 + if (C->alias_type(_adr_type)->is_volatile()) { 25.9 st->print(" Volatile!"); 25.10 + } 25.11 + if (_unaligned_access) { 25.12 + st->print(" unaligned"); 25.13 + } 25.14 + if (_mismatched_access) { 25.15 + st->print(" mismatched"); 25.16 + } 25.17 } 25.18 25.19 void MemNode::dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st) { 25.20 @@ -3322,6 +3329,9 @@ 25.21 // within the initialized memory. 25.22 intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) { 25.23 const int FAIL = 0; 25.24 + if (st->is_unaligned_access()) { 25.25 + return FAIL; 25.26 + } 25.27 if (st->req() != MemNode::ValueIn + 1) 25.28 return FAIL; // an inscrutable StoreNode (card mark?) 25.29 Node* ctl = st->in(MemNode::Control);
26.1 --- a/src/share/vm/opto/memnode.hpp Tue Dec 20 15:36:08 2016 -0800 26.2 +++ b/src/share/vm/opto/memnode.hpp Wed Jan 04 19:44:02 2017 +0000 26.3 @@ -39,11 +39,14 @@ 26.4 //------------------------------MemNode---------------------------------------- 26.5 // Load or Store, possibly throwing a NULL pointer exception 26.6 class MemNode : public Node { 26.7 +private: 26.8 + bool _unaligned_access; // Unaligned access from unsafe 26.9 + bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance 26.10 protected: 26.11 #ifdef ASSERT 26.12 const TypePtr* _adr_type; // What kind of memory is being addressed? 26.13 #endif 26.14 - virtual uint size_of() const; // Size is bigger (ASSERT only) 26.15 + virtual uint size_of() const; 26.16 public: 26.17 enum { Control, // When is it safe to do this load? 26.18 Memory, // Chunk of memory is being loaded from 26.19 @@ -57,17 +60,17 @@ 26.20 } MemOrd; 26.21 protected: 26.22 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) 26.23 - : Node(c0,c1,c2 ) { 26.24 + : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { 26.25 init_class_id(Class_Mem); 26.26 debug_only(_adr_type=at; adr_type();) 26.27 } 26.28 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) 26.29 - : Node(c0,c1,c2,c3) { 26.30 + : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { 26.31 init_class_id(Class_Mem); 26.32 debug_only(_adr_type=at; adr_type();) 26.33 } 26.34 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) 26.35 - : Node(c0,c1,c2,c3,c4) { 26.36 + : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { 26.37 init_class_id(Class_Mem); 26.38 debug_only(_adr_type=at; adr_type();) 26.39 } 26.40 @@ -129,6 +132,11 @@ 26.41 // the given memory state? (The state may or may not be in(Memory).) 26.42 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; 26.43 26.44 + void set_unaligned_access() { _unaligned_access = true; } 26.45 + bool is_unaligned_access() const { return _unaligned_access; } 26.46 + void set_mismatched_access() { _mismatched_access = true; } 26.47 + bool is_mismatched_access() const { return _mismatched_access; } 26.48 + 26.49 #ifndef PRODUCT 26.50 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); 26.51 virtual void dump_spec(outputStream *st) const;
27.1 --- a/src/share/vm/opto/type.hpp Tue Dec 20 15:36:08 2016 -0800 27.2 +++ b/src/share/vm/opto/type.hpp Wed Jan 04 19:44:02 2017 +0000 27.3 @@ -209,11 +209,11 @@ 27.4 static int cmp( const Type *const t1, const Type *const t2 ); 27.5 // Test for higher or equal in lattice 27.6 // Variant that drops the speculative part of the types 27.7 - int higher_equal(const Type *t) const { 27.8 + bool higher_equal(const Type *t) const { 27.9 return !cmp(meet(t),t->remove_speculative()); 27.10 } 27.11 // Variant that keeps the speculative part of the types 27.12 - int higher_equal_speculative(const Type *t) const { 27.13 + bool higher_equal_speculative(const Type *t) const { 27.14 return !cmp(meet_speculative(t),t); 27.15 } 27.16
28.1 --- a/src/share/vm/runtime/fieldDescriptor.hpp Tue Dec 20 15:36:08 2016 -0800 28.2 +++ b/src/share/vm/runtime/fieldDescriptor.hpp Wed Jan 04 19:44:02 2017 +0000 28.3 @@ -106,6 +106,7 @@ 28.4 bool is_field_access_watched() const { return access_flags().is_field_access_watched(); } 28.5 bool is_field_modification_watched() const 28.6 { return access_flags().is_field_modification_watched(); } 28.7 + bool has_initialized_final_update() const { return access_flags().has_field_initialized_final_update(); } 28.8 bool has_generic_signature() const { return access_flags().field_has_generic_signature(); } 28.9 28.10 void set_is_field_access_watched(const bool value) { 28.11 @@ -118,6 +119,11 @@ 28.12 update_klass_field_access_flag(); 28.13 } 28.14 28.15 + void set_has_initialized_final_update(const bool value) { 28.16 + _access_flags.set_has_field_initialized_final_update(value); 28.17 + update_klass_field_access_flag(); 28.18 + } 28.19 + 28.20 // Initialization 28.21 void reinitialize(InstanceKlass* ik, int index); 28.22
29.1 --- a/src/share/vm/runtime/os.cpp Tue Dec 20 15:36:08 2016 -0800 29.2 +++ b/src/share/vm/runtime/os.cpp Wed Jan 04 19:44:02 2017 +0000 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -78,6 +78,7 @@ 29.11 uintptr_t os::_serialize_page_mask = 0; 29.12 long os::_rand_seed = 1; 29.13 int os::_processor_count = 0; 29.14 +int os::_initial_active_processor_count = 0; 29.15 size_t os::_page_sizes[os::page_sizes_max]; 29.16 29.17 #ifndef PRODUCT 29.18 @@ -322,6 +323,7 @@ 29.19 } 29.20 29.21 void os::init_before_ergo() { 29.22 + initialize_initial_active_processor_count(); 29.23 // We need to initialize large page support here because ergonomics takes some 29.24 // decisions depending on large page support and the calculated large page size. 29.25 large_page_init(); 29.26 @@ -835,7 +837,11 @@ 29.27 st->print("CPU:"); 29.28 st->print("total %d", os::processor_count()); 29.29 // It's not safe to query number of active processors after crash 29.30 - // st->print("(active %d)", os::active_processor_count()); 29.31 + // st->print("(active %d)", os::active_processor_count()); but we can 29.32 + // print the initial number of active processors. 29.33 + // We access the raw value here because the assert in the accessor will 29.34 + // fail if the crash occurs before initialization of this value. 29.35 + st->print(" (initial active %d)", _initial_active_processor_count); 29.36 st->print(" %s", VM_Version::cpu_features()); 29.37 st->cr(); 29.38 pd_print_cpu_info(st); 29.39 @@ -1418,6 +1424,11 @@ 29.40 return result; 29.41 } 29.42 29.43 +void os::initialize_initial_active_processor_count() { 29.44 + assert(_initial_active_processor_count == 0, "Initial active processor count already set."); 29.45 + _initial_active_processor_count = active_processor_count(); 29.46 +} 29.47 + 29.48 void os::SuspendedThreadTask::run() { 29.49 assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this"); 29.50 internal_do_task();
30.1 --- a/src/share/vm/runtime/os.hpp Tue Dec 20 15:36:08 2016 -0800 30.2 +++ b/src/share/vm/runtime/os.hpp Wed Jan 04 19:44:02 2017 +0000 30.3 @@ -151,6 +151,7 @@ 30.4 30.5 static size_t page_size_for_region(size_t region_size, size_t min_pages, bool must_be_aligned); 30.6 30.7 + static void initialize_initial_active_processor_count(); 30.8 public: 30.9 static void init(void); // Called before command line parsing 30.10 static void init_before_ergo(void); // Called after command line parsing 30.11 @@ -238,6 +239,13 @@ 30.12 // Note that on some OSes this can change dynamically. 30.13 static int active_processor_count(); 30.14 30.15 + // At startup the number of active CPUs this process is allowed to run on. 30.16 + // This value does not change dynamically. May be different from active_processor_count(). 30.17 + static int initial_active_processor_count() { 30.18 + assert(_initial_active_processor_count > 0, "Initial active processor count not set yet."); 30.19 + return _initial_active_processor_count; 30.20 + } 30.21 + 30.22 // Bind processes to processors. 30.23 // This is a two step procedure: 30.24 // first you generate a distribution of processes to processors, 30.25 @@ -975,8 +983,9 @@ 30.26 30.27 30.28 protected: 30.29 - static long _rand_seed; // seed for random number generator 30.30 - static int _processor_count; // number of processors 30.31 + static long _rand_seed; // seed for random number generator 30.32 + static int _processor_count; // number of processors 30.33 + static int _initial_active_processor_count; // number of active processors during initialization. 30.34 30.35 static char* format_boot_path(const char* format_string, 30.36 const char* home,
31.1 --- a/src/share/vm/runtime/vm_version.cpp Tue Dec 20 15:36:08 2016 -0800 31.2 +++ b/src/share/vm/runtime/vm_version.cpp Wed Jan 04 19:44:02 2017 +0000 31.3 @@ -1,5 +1,5 @@ 31.4 /* 31.5 - * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 31.6 + * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. 31.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 31.8 * 31.9 * This code is free software; you can redistribute it and/or modify it 31.10 @@ -296,7 +296,7 @@ 31.11 // processor after the first 8. For example, on a 72 cpu machine 31.12 // and a chosen fraction of 5/8 31.13 // use 8 + (72 - 8) * (5/8) == 48 worker threads. 31.14 - unsigned int ncpus = (unsigned int) os::active_processor_count(); 31.15 + unsigned int ncpus = (unsigned int) os::initial_active_processor_count(); 31.16 return (ncpus <= switch_pt) ? 31.17 ncpus : 31.18 (switch_pt + ((ncpus - switch_pt) * num) / den);
32.1 --- a/src/share/vm/services/attachListener.cpp Tue Dec 20 15:36:08 2016 -0800 32.2 +++ b/src/share/vm/services/attachListener.cpp Wed Jan 04 19:44:02 2017 +0000 32.3 @@ -1,5 +1,5 @@ 32.4 /* 32.5 - * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. 32.6 + * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved. 32.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 32.8 * 32.9 * This code is free software; you can redistribute it and/or modify it 32.10 @@ -271,13 +271,17 @@ 32.11 // set a uintx global flag using value from AttachOperation 32.12 static jint set_uintx_flag(const char* name, AttachOperation* op, outputStream* out) { 32.13 uintx value; 32.14 - const char* arg1; 32.15 - if ((arg1 = op->arg(1)) != NULL) { 32.16 - int n = sscanf(arg1, UINTX_FORMAT, &value); 32.17 - if (n != 1) { 32.18 - out->print_cr("flag value must be an unsigned integer"); 32.19 - return JNI_ERR; 32.20 - } 32.21 + 32.22 + const char* arg1 = op->arg(1); 32.23 + if (arg1 == NULL) { 32.24 + out->print_cr("flag value must be specified"); 32.25 + return JNI_ERR; 32.26 + } 32.27 + 32.28 + int n = sscanf(arg1, UINTX_FORMAT, &value); 32.29 + if (n != 1) { 32.30 + out->print_cr("flag value must be an unsigned integer"); 32.31 + return JNI_ERR; 32.32 } 32.33 32.34 if (strncmp(name, "MaxHeapFreeRatio", 17) == 0) {
33.1 --- a/src/share/vm/utilities/accessFlags.hpp Tue Dec 20 15:36:08 2016 -0800 33.2 +++ b/src/share/vm/utilities/accessFlags.hpp Wed Jan 04 19:44:02 2017 +0000 33.3 @@ -76,11 +76,12 @@ 33.4 // These bits must not conflict with any other field-related access flags 33.5 // (e.g., ACC_ENUM). 33.6 // Note that the class-related ACC_ANNOTATION bit conflicts with these flags. 33.7 - JVM_ACC_FIELD_ACCESS_WATCHED = 0x00002000, // field access is watched by JVMTI 33.8 - JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI 33.9 - JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT 33.10 - JVM_ACC_FIELD_STABLE = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED 33.11 - JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature 33.12 + JVM_ACC_FIELD_ACCESS_WATCHED = 0x00002000, // field access is watched by JVMTI 33.13 + JVM_ACC_FIELD_MODIFICATION_WATCHED = 0x00008000, // field modification is watched by JVMTI 33.14 + JVM_ACC_FIELD_INTERNAL = 0x00000400, // internal field, same as JVM_ACC_ABSTRACT 33.15 + JVM_ACC_FIELD_STABLE = 0x00000020, // @Stable field, same as JVM_ACC_SYNCHRONIZED and JVM_ACC_SUPER 33.16 + JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE = 0x00000100, // (static) final field updated outside (class) initializer, same as JVM_ACC_NATIVE 33.17 + JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE = 0x00000800, // field has generic signature 33.18 33.19 JVM_ACC_FIELD_INTERNAL_FLAGS = JVM_ACC_FIELD_ACCESS_WATCHED | 33.20 JVM_ACC_FIELD_MODIFICATION_WATCHED | 33.21 @@ -150,6 +151,8 @@ 33.22 bool is_field_access_watched() const { return (_flags & JVM_ACC_FIELD_ACCESS_WATCHED) != 0; } 33.23 bool is_field_modification_watched() const 33.24 { return (_flags & JVM_ACC_FIELD_MODIFICATION_WATCHED) != 0; } 33.25 + bool has_field_initialized_final_update() const 33.26 + { return (_flags & JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE) != 0; } 33.27 bool on_stack() const { return (_flags & JVM_ACC_ON_STACK) != 0; } 33.28 bool is_internal() const { return (_flags & JVM_ACC_FIELD_INTERNAL) != 0; } 33.29 bool is_stable() const { return (_flags & JVM_ACC_FIELD_STABLE) != 0; } 33.30 @@ -229,6 +232,15 @@ 33.31 atomic_clear_bits(JVM_ACC_FIELD_MODIFICATION_WATCHED); 33.32 } 33.33 } 33.34 + 33.35 + void set_has_field_initialized_final_update(const bool value) { 33.36 + if (value) { 33.37 + atomic_set_bits(JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE); 33.38 + } else { 33.39 + atomic_clear_bits(JVM_ACC_FIELD_INITIALIZED_FINAL_UPDATE); 33.40 + } 33.41 + } 33.42 + 33.43 void set_field_has_generic_signature() 33.44 { 33.45 atomic_set_bits(JVM_ACC_FIELD_HAS_GENERIC_SIGNATURE);
34.1 --- a/test/compiler/jsr292/NullConstantReceiver.java Tue Dec 20 15:36:08 2016 -0800 34.2 +++ b/test/compiler/jsr292/NullConstantReceiver.java Wed Jan 04 19:44:02 2017 +0000 34.3 @@ -1,5 +1,5 @@ 34.4 /* 34.5 - * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 34.6 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 34.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 34.8 * 34.9 * This code is free software; you can redistribute it and/or modify it 34.10 @@ -23,8 +23,11 @@ 34.11 34.12 /** 34.13 * @test 34.14 - * @bug 8059556 34.15 + * @bug 8059556 8158639 8164508 34.16 + * 34.17 * @run main/othervm -Xbatch NullConstantReceiver 34.18 + * @run main/othervm -Xbatch -XX:CompileCommand=exclude,*::run NullConstantReceiver 34.19 + * @run main/othervm -Xbatch -XX:CompileCommand=compileonly,*::run NullConstantReceiver 34.20 */ 34.21 34.22 import java.lang.invoke.MethodHandle;
35.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 35.2 +++ b/test/compiler/profiling/UnsafeAccess.java Wed Jan 04 19:44:02 2017 +0000 35.3 @@ -0,0 +1,88 @@ 35.4 +/* 35.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 35.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 35.7 + * 35.8 + * This code is free software; you can redistribute it and/or modify it 35.9 + * under the terms of the GNU General Public License version 2 only, as 35.10 + * published by the Free Software Foundation. 35.11 + * 35.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 35.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 35.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 35.15 + * version 2 for more details (a copy is included in the LICENSE file that 35.16 + * accompanied this code). 35.17 + * 35.18 + * You should have received a copy of the GNU General Public License version 35.19 + * 2 along with this work; if not, write to the Free Software Foundation, 35.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 35.21 + * 35.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 35.23 + * or visit www.oracle.com if you need additional information or have any 35.24 + * questions. 35.25 + */ 35.26 +/* 35.27 + * @test 35.28 + * @bug 8134918 35.29 + * @modules java.base/jdk.internal.misc 35.30 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UseTypeSpeculation -Xbatch 35.31 + * -XX:CompileCommand=dontinline,UnsafeAccess::test* 35.32 + * UnsafeAccess 35.33 + */ 35.34 +import sun.misc.Unsafe; 35.35 + 35.36 +public class UnsafeAccess { 35.37 + private static final Unsafe U = Unsafe.getUnsafe(); 35.38 + 35.39 + static Class cls = Object.class; 35.40 + static long off = U.ARRAY_OBJECT_BASE_OFFSET; 35.41 + 35.42 + static Object testUnsafeAccess(Object o, boolean isObjArray) { 35.43 + if (o != null && cls.isInstance(o)) { // speculates "o" type to int[] 35.44 + return helperUnsafeAccess(o, isObjArray); 35.45 + } 35.46 + return null; 35.47 + } 35.48 + 35.49 + static Object helperUnsafeAccess(Object o, boolean isObjArray) { 35.50 + if (isObjArray) { 35.51 + U.putObject(o, off, new Object()); 35.52 + } 35.53 + return o; 35.54 + } 35.55 + 35.56 + static Object testUnsafeLoadStore(Object o, boolean isObjArray) { 35.57 + if (o != null && cls.isInstance(o)) { // speculates "o" type to int[] 35.58 + return helperUnsafeLoadStore(o, isObjArray); 35.59 + } 35.60 + return null; 35.61 + } 35.62 + 35.63 + static Object helperUnsafeLoadStore(Object o, boolean isObjArray) { 35.64 + if (isObjArray) { 35.65 + Object o1 = U.getObject(o, off); 35.66 + U.compareAndSwapObject(o, off, o1, new Object()); 35.67 + } 35.68 + return o; 35.69 + } 35.70 + 35.71 + public static void main(String[] args) { 35.72 + Object[] objArray = new Object[10]; 35.73 + int[] intArray = new int[10]; 35.74 + 35.75 + for (int i = 0; i < 20_000; i++) { 35.76 + helperUnsafeAccess(objArray, true); 35.77 + } 35.78 + for (int i = 0; i < 20_000; i++) { 35.79 + testUnsafeAccess(intArray, false); 35.80 + } 35.81 + 35.82 + for (int i = 0; i < 20_000; i++) { 35.83 + helperUnsafeLoadStore(objArray, true); 35.84 + } 35.85 + for (int i = 0; i < 20_000; i++) { 35.86 + testUnsafeLoadStore(intArray, false); 35.87 + } 35.88 + 35.89 + System.out.println("TEST PASSED"); 35.90 + } 35.91 +}
36.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 36.2 +++ b/test/compiler/unsafe/OpaqueAccesses.java Wed Jan 04 19:44:02 2017 +0000 36.3 @@ -0,0 +1,181 @@ 36.4 +/* 36.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 36.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 36.7 + * 36.8 + * This code is free software; you can redistribute it and/or modify it 36.9 + * under the terms of the GNU General Public License version 2 only, as 36.10 + * published by the Free Software Foundation. Oracle designates this 36.11 + * particular file as subject to the "Classpath" exception as provided 36.12 + * by Oracle in the LICENSE file that accompanied this code. 36.13 + * 36.14 + * This code is distributed in the hope that it will be useful, but WITHOUT 36.15 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 36.16 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 36.17 + * version 2 for more details (a copy is included in the LICENSE file that 36.18 + * accompanied this code). 36.19 + * 36.20 + * You should have received a copy of the GNU General Public License version 36.21 + * 2 along with this work; if not, write to the Free Software Foundation, 36.22 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 36.23 + * 36.24 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 36.25 + * or visit www.oracle.com if you need additional information or have any 36.26 + * questions. 36.27 + */ 36.28 + 36.29 +/* 36.30 + * @test 36.31 + * @bug 8155781 36.32 + * @modules java.base/jdk.internal.misc 36.33 + * 36.34 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions 36.35 + * -XX:-TieredCompilation -Xbatch 36.36 + * -XX:+UseCompressedOops -XX:+UseCompressedClassPointers 36.37 + * -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test* 36.38 + * compiler.unsafe.OpaqueAccesses 36.39 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions 36.40 + * -XX:-TieredCompilation -Xbatch 36.41 + * -XX:+UseCompressedOops -XX:-UseCompressedClassPointers 36.42 + * -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test* 36.43 + * compiler.unsafe.OpaqueAccesses 36.44 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions 36.45 + * -XX:-TieredCompilation -Xbatch 36.46 + * -XX:-UseCompressedOops -XX:+UseCompressedClassPointers 36.47 + * -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test* 36.48 + * compiler.unsafe.OpaqueAccesses 36.49 + * @run main/bootclasspath/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions 36.50 + * -XX:-TieredCompilation -Xbatch 36.51 + * -XX:-UseCompressedOops -XX:-UseCompressedClassPointers 36.52 + * -XX:CompileCommand=dontinline,compiler.unsafe.OpaqueAccesses::test* 36.53 + * compiler.unsafe.OpaqueAccesses 36.54 + */ 36.55 +package compiler.unsafe; 36.56 + 36.57 +import sun.misc.Unsafe; 36.58 + 36.59 +import java.lang.reflect.Field; 36.60 + 36.61 +public class OpaqueAccesses { 36.62 + private static final Unsafe UNSAFE = Unsafe.getUnsafe(); 36.63 + 36.64 + private static final Object INSTANCE = new OpaqueAccesses(); 36.65 + 36.66 + private static final Object[] ARRAY = new Object[10]; 36.67 + 36.68 + private static final long F_OFFSET; 36.69 + private static final long E_OFFSET; 36.70 + 36.71 + static { 36.72 + try { 36.73 + Field field = OpaqueAccesses.class.getDeclaredField("f"); 36.74 + F_OFFSET = UNSAFE.objectFieldOffset(field); 36.75 + 36.76 + E_OFFSET = UNSAFE.arrayBaseOffset(ARRAY.getClass()); 36.77 + } catch (NoSuchFieldException e) { 36.78 + throw new Error(e); 36.79 + } 36.80 + } 36.81 + 36.82 + private Object f = new Object(); 36.83 + private long l1, l2; 36.84 + 36.85 + static Object testFixedOffsetField(Object o) { 36.86 + return UNSAFE.getObject(o, F_OFFSET); 36.87 + } 36.88 + 36.89 + static int testFixedOffsetHeader0(Object o) { 36.90 + return UNSAFE.getInt(o, 0); 36.91 + } 36.92 + 36.93 + static int testFixedOffsetHeader4(Object o) { 36.94 + return UNSAFE.getInt(o, 4); 36.95 + } 36.96 + 36.97 + static int testFixedOffsetHeader8(Object o) { 36.98 + return UNSAFE.getInt(o, 8); 36.99 + } 36.100 + 36.101 + static int testFixedOffsetHeader12(Object o) { 36.102 + return UNSAFE.getInt(o, 12); 36.103 + } 36.104 + 36.105 + static int testFixedOffsetHeader16(Object o) { 36.106 + return UNSAFE.getInt(o, 16); 36.107 + } 36.108 + 36.109 + static Object testFixedBase(long off) { 36.110 + return UNSAFE.getObject(INSTANCE, off); 36.111 + } 36.112 + 36.113 + static Object testOpaque(Object o, long off) { 36.114 + return UNSAFE.getObject(o, off); 36.115 + } 36.116 + 36.117 + static int testFixedOffsetHeaderArray0(Object[] arr) { 36.118 + return UNSAFE.getInt(arr, 0); 36.119 + } 36.120 + 36.121 + static int testFixedOffsetHeaderArray4(Object[] arr) { 36.122 + return UNSAFE.getInt(arr, 4); 36.123 + } 36.124 + 36.125 + static int testFixedOffsetHeaderArray8(Object[] arr) { 36.126 + return UNSAFE.getInt(arr, 8); 36.127 + } 36.128 + 36.129 + static int testFixedOffsetHeaderArray12(Object[] arr) { 36.130 + return UNSAFE.getInt(arr, 12); 36.131 + } 36.132 + 36.133 + static int testFixedOffsetHeaderArray16(Object[] arr) { 36.134 + return UNSAFE.getInt(arr, 16); 36.135 + } 36.136 + 36.137 + static Object testFixedOffsetArray(Object[] arr) { 36.138 + return UNSAFE.getObject(arr, E_OFFSET); 36.139 + } 36.140 + 36.141 + static Object testFixedBaseArray(long off) { 36.142 + return UNSAFE.getObject(ARRAY, off); 36.143 + } 36.144 + 36.145 + static Object testOpaqueArray(Object[] o, long off) { 36.146 + return UNSAFE.getObject(o, off); 36.147 + } 36.148 + 36.149 + static final long ADDR = UNSAFE.allocateMemory(10); 36.150 + static boolean flag; 36.151 + 36.152 + static int testMixedAccess() { 36.153 + flag = !flag; 36.154 + Object o = (flag ? INSTANCE : null); 36.155 + long off = (flag ? F_OFFSET : ADDR); 36.156 + return UNSAFE.getInt(o, off); 36.157 + } 36.158 + 36.159 + public static void main(String[] args) { 36.160 + for (int i = 0; i < 20_000; i++) { 36.161 + // Instance 36.162 + testFixedOffsetField(INSTANCE); 36.163 + testFixedOffsetHeader0(INSTANCE); 36.164 + testFixedOffsetHeader4(INSTANCE); 36.165 + testFixedOffsetHeader8(INSTANCE); 36.166 + testFixedOffsetHeader12(INSTANCE); 36.167 + testFixedOffsetHeader16(INSTANCE); 36.168 + testFixedBase(F_OFFSET); 36.169 + testOpaque(INSTANCE, F_OFFSET); 36.170 + testMixedAccess(); 36.171 + 36.172 + // Array 36.173 + testFixedOffsetHeaderArray0(ARRAY); 36.174 + testFixedOffsetHeaderArray4(ARRAY); 36.175 + testFixedOffsetHeaderArray8(ARRAY); 36.176 + testFixedOffsetHeaderArray12(ARRAY); 36.177 + testFixedOffsetHeaderArray16(ARRAY); 36.178 + testFixedOffsetArray(ARRAY); 36.179 + testFixedBaseArray(E_OFFSET); 36.180 + testOpaqueArray(ARRAY, E_OFFSET); 36.181 + } 36.182 + System.out.println("TEST PASSED"); 36.183 + } 36.184 +}
37.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 37.2 +++ b/test/runtime/Final/Bad.jasm Wed Jan 04 19:44:02 2017 +0000 37.3 @@ -0,0 +1,55 @@ 37.4 +/* 37.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 37.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 37.7 + * 37.8 + * This code is free software; you can redistribute it and/or modify it 37.9 + * under the terms of the GNU General Public License version 2 only, as 37.10 + * published by the Free Software Foundation. 37.11 + * 37.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 37.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 37.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 37.15 + * version 2 for more details (a copy is included in the LICENSE file that 37.16 + * accompanied this code). 37.17 + * 37.18 + * You should have received a copy of the GNU General Public License version 37.19 + * 2 along with this work; if not, write to the Free Software Foundation, 37.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 37.21 + * 37.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 37.23 + * or visit www.oracle.com if you need additional information or have any 37.24 + * questions. 37.25 + */ 37.26 + 37.27 +/* Recoded in jasm to provoke an ICCE assigning a non-static final field with putstatic. 37.28 +class Bad { 37.29 + public static final int i; //rewritten 37.30 + //rewritten to: public final int i; 37.31 + static { i = 5; } // putstatic instruction 37.32 +} 37.33 +*/ 37.34 + 37.35 +super class Bad 37.36 + version 53:0 37.37 +{ 37.38 + 37.39 +// Remove 'static' keyword 37.40 +public final Field i:I; 37.41 + 37.42 +Method "<init>":"()V" 37.43 + stack 1 locals 1 37.44 +{ 37.45 + aload_0; 37.46 + invokespecial Method java/lang/Object."<init>":"()V"; 37.47 + return; 37.48 +} 37.49 + 37.50 +static Method "<clinit>":"()V" 37.51 + stack 1 locals 0 37.52 +{ 37.53 + iconst_5; 37.54 + putstatic Field i:"I"; 37.55 + return; 37.56 +} 37.57 + 37.58 +} // end Class Bad
38.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 38.2 +++ b/test/runtime/Final/PutfieldError.java Wed Jan 04 19:44:02 2017 +0000 38.3 @@ -0,0 +1,42 @@ 38.4 +/* 38.5 + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. 38.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 38.7 + * 38.8 + * This code is free software; you can redistribute it and/or modify it 38.9 + * under the terms of the GNU General Public License version 2 only, as 38.10 + * published by the Free Software Foundation. 38.11 + * 38.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 38.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 38.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 38.15 + * version 2 for more details (a copy is included in the LICENSE file that 38.16 + * accompanied this code). 38.17 + * 38.18 + * You should have received a copy of the GNU General Public License version 38.19 + * 2 along with this work; if not, write to the Free Software Foundation, 38.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 38.21 + * 38.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 38.23 + * or visit www.oracle.com if you need additional information or have any 38.24 + * questions. 38.25 + */ 38.26 + 38.27 +/* 38.28 + * @test PutfieldError 38.29 + * @bug 8160551 38.30 + * @summary Throw ICCE rather than crashing for nonstatic final field in static initializer 38.31 + * @compile Bad.jasm 38.32 + * @run main PutfieldError 38.33 + */ 38.34 + 38.35 +public class PutfieldError { 38.36 + public static void main(java.lang.String[] unused) { 38.37 + try { 38.38 + Bad b = new Bad(); 38.39 + System.out.println("Bad.i = " + 5); 38.40 + throw new RuntimeException("ICCE NOT thrown as expected"); 38.41 + } catch (IncompatibleClassChangeError icce) { 38.42 + System.out.println("ICCE thrown as expected"); 38.43 + } 38.44 + } 38.45 +}