Thu, 29 Oct 2015 17:03:53 -0700
Merge
.hgtags | file | annotate | diff | comparison | revisions |
1.1 --- a/.hgtags Fri Oct 16 12:15:09 2015 -0700 1.2 +++ b/.hgtags Thu Oct 29 17:03:53 2015 -0700 1.3 @@ -766,4 +766,14 @@ 1.4 67df26e363fb7e722032fd286673642fc999957c jdk8u71-b01 1.5 1a799d49de23d84f658ade1d3805a1924e7e1e84 jdk8u71-b02 1.6 e06f49d82ef8128b3637937d383b6f0862650deb jdk8u71-b03 1.7 +7466029bf3cd3d5eea3055c4f790728263be4a2e jdk8u71-b04 1.8 +8a402d51763c083151d0cb434647bd6e1ba4353f jdk8u71-b05 1.9 +7dd34cca3538c9bef74a8a1976e14ca51e9857f9 jdk8u71-b06 1.10 +b8f426369187c32551f0a3d571d933908988c81c jdk8u72-b00 1.11 +c0205eddb31766ece562483595ec28a7506971e9 jdk8u72-b01 1.12 +15ef554f2f2e0a8d7c330191432fcd2126d19dab jdk8u72-b02 1.13 +bb98a4ba1556d0505461de98aa3dddf75278c09b jdk8u72-b03 1.14 +6c8ceb05ccf78f2f8f72f0870e3f6f3bd4895bb1 jdk8u72-b04 1.15 +a2969911663ab29c71a61aa3403e53243ad89923 jdk8u72-b05 1.16 +acf0d80cb84f14d787c34360abf2bc38b186999a jdk8u72-b06 1.17 c0242ea4bde19d72be5149feda112a39e8c89b0a jdk8u75-b00
2.1 --- a/src/os/linux/vm/os_linux.cpp Fri Oct 16 12:15:09 2015 -0700 2.2 +++ b/src/os/linux/vm/os_linux.cpp Thu Oct 29 17:03:53 2015 -0700 2.3 @@ -5922,9 +5922,11 @@ 2.4 status = pthread_mutex_unlock(_mutex); 2.5 assert (status == 0, "invariant"); 2.6 } else { 2.7 + // must capture correct index before unlocking 2.8 + int index = _cur_index; 2.9 status = pthread_mutex_unlock(_mutex); 2.10 assert (status == 0, "invariant"); 2.11 - status = pthread_cond_signal (&_cond[_cur_index]); 2.12 + status = pthread_cond_signal (&_cond[index]); 2.13 assert (status == 0, "invariant"); 2.14 } 2.15 } else {
3.1 --- a/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Fri Oct 16 12:15:09 2015 -0700 3.2 +++ b/src/os_cpu/linux_sparc/vm/vm_version_linux_sparc.cpp Thu Oct 29 17:03:53 2015 -0700 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -53,6 +53,10 @@ 3.11 return cpuinfo_field_contains("cpu", "Niagara"); 3.12 } 3.13 3.14 +static bool detect_M_family() { 3.15 + return cpuinfo_field_contains("cpu", "SPARC-M"); 3.16 +} 3.17 + 3.18 static bool detect_blkinit() { 3.19 return cpuinfo_field_contains("cpucaps", "blkinit"); 3.20 } 3.21 @@ -66,6 +70,11 @@ 3.22 features = niagara1_m | T_family_m; 3.23 } 3.24 3.25 + if (detect_M_family()) { 3.26 + NOT_PRODUCT(if (PrintMiscellaneous && Verbose) tty->print_cr("Detected Linux on M family");) 3.27 + features = sun4v_m | generic_v9_m | M_family_m | T_family_m; 3.28 + } 3.29 + 3.30 if (detect_blkinit()) { 3.31 features |= blk_init_instructions_m; 3.32 }
4.1 --- a/src/share/vm/classfile/stackMapTable.cpp Fri Oct 16 12:15:09 2015 -0700 4.2 +++ b/src/share/vm/classfile/stackMapTable.cpp Thu Oct 29 17:03:53 2015 -0700 4.3 @@ -186,7 +186,6 @@ 4.4 u2 offset = _stream->get_u2(THREAD); 4.5 if (offset >= _code_length || 4.6 _code_data[offset] != ClassVerifier::NEW_OFFSET) { 4.7 - ResourceMark rm(THREAD); 4.8 _verifier->class_format_error( 4.9 "StackMapTable format error: bad offset for Uninitialized"); 4.10 return VerificationType::bogus_type();
5.1 --- a/src/share/vm/code/codeCache.cpp Fri Oct 16 12:15:09 2015 -0700 5.2 +++ b/src/share/vm/code/codeCache.cpp Thu Oct 29 17:03:53 2015 -0700 5.3 @@ -521,15 +521,17 @@ 5.4 5.5 void CodeCache::gc_epilogue() { 5.6 assert_locked_or_safepoint(CodeCache_lock); 5.7 - FOR_ALL_ALIVE_BLOBS(cb) { 5.8 - if (cb->is_nmethod()) { 5.9 - nmethod *nm = (nmethod*)cb; 5.10 - assert(!nm->is_unloaded(), "Tautology"); 5.11 - if (needs_cache_clean()) { 5.12 - nm->cleanup_inline_caches(); 5.13 + NOT_DEBUG(if (needs_cache_clean())) { 5.14 + FOR_ALL_ALIVE_BLOBS(cb) { 5.15 + if (cb->is_nmethod()) { 5.16 + nmethod *nm = (nmethod*)cb; 5.17 + assert(!nm->is_unloaded(), "Tautology"); 5.18 + DEBUG_ONLY(if (needs_cache_clean())) { 5.19 + nm->cleanup_inline_caches(); 5.20 + } 5.21 + DEBUG_ONLY(nm->verify()); 5.22 + DEBUG_ONLY(nm->verify_oop_relocations()); 5.23 } 5.24 - DEBUG_ONLY(nm->verify()); 5.25 - DEBUG_ONLY(nm->verify_oop_relocations()); 5.26 } 5.27 } 5.28 set_needs_cache_clean(false); 5.29 @@ -734,27 +736,6 @@ 5.30 return number_of_marked_CodeBlobs; 5.31 } 5.32 5.33 -void CodeCache::make_marked_nmethods_zombies() { 5.34 - assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint"); 5.35 - FOR_ALL_ALIVE_NMETHODS(nm) { 5.36 - if (nm->is_marked_for_deoptimization()) { 5.37 - 5.38 - // If the nmethod has already been made non-entrant and it can be converted 5.39 - // then zombie it now. Otherwise make it non-entrant and it will eventually 5.40 - // be zombied when it is no longer seen on the stack. Note that the nmethod 5.41 - // might be "entrant" and not on the stack and so could be zombied immediately 5.42 - // but we can't tell because we don't track it on stack until it becomes 5.43 - // non-entrant. 5.44 - 5.45 - if (nm->is_not_entrant() && nm->can_not_entrant_be_converted()) { 5.46 - nm->make_zombie(); 5.47 - } else { 5.48 - nm->make_not_entrant(); 5.49 - } 5.50 - } 5.51 - } 5.52 -} 5.53 - 5.54 void CodeCache::make_marked_nmethods_not_entrant() { 5.55 assert_locked_or_safepoint(CodeCache_lock); 5.56 FOR_ALL_ALIVE_NMETHODS(nm) {
6.1 --- a/src/share/vm/code/codeCache.hpp Fri Oct 16 12:15:09 2015 -0700 6.2 +++ b/src/share/vm/code/codeCache.hpp Thu Oct 29 17:03:53 2015 -0700 6.3 @@ -179,7 +179,6 @@ 6.4 6.5 static void mark_all_nmethods_for_deoptimization(); 6.6 static int mark_for_deoptimization(Method* dependee); 6.7 - static void make_marked_nmethods_zombies(); 6.8 static void make_marked_nmethods_not_entrant(); 6.9 6.10 // tells how many nmethods have dependencies
7.1 --- a/src/share/vm/code/compiledIC.cpp Fri Oct 16 12:15:09 2015 -0700 7.2 +++ b/src/share/vm/code/compiledIC.cpp Thu Oct 29 17:03:53 2015 -0700 7.3 @@ -155,6 +155,14 @@ 7.4 return _ic_call->destination(); 7.5 } 7.6 7.7 +// Clears the IC stub if the compiled IC is in transition state 7.8 +void CompiledIC::clear_ic_stub() { 7.9 + if (is_in_transition_state()) { 7.10 + ICStub* stub = ICStub_from_destination_address(stub_address()); 7.11 + stub->clear(); 7.12 + } 7.13 +} 7.14 + 7.15 7.16 //----------------------------------------------------------------------------- 7.17 // High-level access to an inline cache. Guaranteed to be MT-safe. 7.18 @@ -279,6 +287,7 @@ 7.19 assert( is_c1_method || 7.20 !is_monomorphic || 7.21 is_optimized() || 7.22 + !caller->is_alive() || 7.23 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); 7.24 #endif // ASSERT 7.25 return is_monomorphic; 7.26 @@ -313,7 +322,7 @@ 7.27 } 7.28 7.29 7.30 -void CompiledIC::set_to_clean() { 7.31 +void CompiledIC::set_to_clean(bool in_use) { 7.32 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 7.33 if (TraceInlineCacheClearing || TraceICs) { 7.34 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address())); 7.35 @@ -329,17 +338,14 @@ 7.36 7.37 // A zombie transition will always be safe, since the metadata has already been set to NULL, so 7.38 // we only need to patch the destination 7.39 - bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); 7.40 + bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); 7.41 7.42 if (safe_transition) { 7.43 // Kill any leftover stub we might have too 7.44 - if (is_in_transition_state()) { 7.45 - ICStub* old_stub = ICStub_from_destination_address(stub_address()); 7.46 - old_stub->clear(); 7.47 - } 7.48 + clear_ic_stub(); 7.49 if (is_optimized()) { 7.50 - set_ic_destination(entry); 7.51 - } else { 7.52 + set_ic_destination(entry); 7.53 + } else { 7.54 set_ic_destination_and_value(entry, (void*)NULL); 7.55 } 7.56 } else {
8.1 --- a/src/share/vm/code/compiledIC.hpp Fri Oct 16 12:15:09 2015 -0700 8.2 +++ b/src/share/vm/code/compiledIC.hpp Thu Oct 29 17:03:53 2015 -0700 8.3 @@ -228,8 +228,9 @@ 8.4 // 8.5 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. 8.6 // 8.7 - void set_to_clean(); // Can only be called during a safepoint operation 8.8 + void set_to_clean(bool in_use = true); 8.9 void set_to_monomorphic(CompiledICInfo& info); 8.10 + void clear_ic_stub(); 8.11 8.12 // Returns true if successful and false otherwise. The call can fail if memory 8.13 // allocation in the code cache fails.
9.1 --- a/src/share/vm/code/nmethod.cpp Fri Oct 16 12:15:09 2015 -0700 9.2 +++ b/src/share/vm/code/nmethod.cpp Thu Oct 29 17:03:53 2015 -0700 9.3 @@ -1148,9 +1148,20 @@ 9.4 } 9.5 } 9.6 9.7 +// Clear ICStubs of all compiled ICs 9.8 +void nmethod::clear_ic_stubs() { 9.9 + assert_locked_or_safepoint(CompiledIC_lock); 9.10 + RelocIterator iter(this); 9.11 + while(iter.next()) { 9.12 + if (iter.type() == relocInfo::virtual_call_type) { 9.13 + CompiledIC* ic = CompiledIC_at(&iter); 9.14 + ic->clear_ic_stub(); 9.15 + } 9.16 + } 9.17 +} 9.18 + 9.19 9.20 void nmethod::cleanup_inline_caches() { 9.21 - 9.22 assert_locked_or_safepoint(CompiledIC_lock); 9.23 9.24 // If the method is not entrant or zombie then a JMP is plastered over the 9.25 @@ -1166,7 +1177,8 @@ 9.26 // In fact, why are we bothering to look at oops in a non-entrant method?? 9.27 } 9.28 9.29 - // Find all calls in an nmethod, and clear the ones that points to zombie methods 9.30 + // Find all calls in an nmethod and clear the ones that point to non-entrant, 9.31 + // zombie and unloaded nmethods. 9.32 ResourceMark rm; 9.33 RelocIterator iter(this, low_boundary); 9.34 while(iter.next()) { 9.35 @@ -1178,8 +1190,8 @@ 9.36 CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination()); 9.37 if( cb != NULL && cb->is_nmethod() ) { 9.38 nmethod* nm = (nmethod*)cb; 9.39 - // Clean inline caches pointing to both zombie and not_entrant methods 9.40 - if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(); 9.41 + // Clean inline caches pointing to zombie, non-entrant and unloaded methods 9.42 + if (!nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive()); 9.43 } 9.44 break; 9.45 } 9.46 @@ -1188,7 +1200,7 @@ 9.47 CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination()); 9.48 if( cb != NULL && cb->is_nmethod() ) { 9.49 nmethod* nm = (nmethod*)cb; 9.50 - // Clean inline caches pointing to both zombie and not_entrant methods 9.51 + // Clean inline caches pointing to zombie, non-entrant and unloaded methods 9.52 if (!nm->is_in_use() || (nm->method()->code() != nm)) csc->set_to_clean(); 9.53 } 9.54 break; 9.55 @@ -1279,7 +1291,7 @@ 9.56 // Tell if a non-entrant method can be converted to a zombie (i.e., 9.57 // there are no activations on the stack, not in use by the VM, 9.58 // and not in use by the ServiceThread) 9.59 -bool nmethod::can_not_entrant_be_converted() { 9.60 +bool nmethod::can_convert_to_zombie() { 9.61 assert(is_not_entrant(), "must be a non-entrant method"); 9.62 9.63 // Since the nmethod sweeper only does partial sweep the sweeper's traversal 9.64 @@ -2695,7 +2707,7 @@ 9.65 // Hmm. OSR methods can be deopted but not marked as zombie or not_entrant 9.66 // seems odd. 9.67 9.68 - if( is_zombie() || is_not_entrant() ) 9.69 + if (is_zombie() || is_not_entrant() || is_unloaded()) 9.70 return; 9.71 9.72 // Make sure all the entry points are correctly aligned for patching.
10.1 --- a/src/share/vm/code/nmethod.hpp Fri Oct 16 12:15:09 2015 -0700 10.2 +++ b/src/share/vm/code/nmethod.hpp Thu Oct 29 17:03:53 2015 -0700 10.3 @@ -577,6 +577,7 @@ 10.4 10.5 // Inline cache support 10.6 void clear_inline_caches(); 10.7 + void clear_ic_stubs(); 10.8 void cleanup_inline_caches(); 10.9 bool inlinecache_check_contains(address addr) const { 10.10 return (addr >= code_begin() && addr < verified_entry_point()); 10.11 @@ -604,7 +605,7 @@ 10.12 10.13 // See comment at definition of _last_seen_on_stack 10.14 void mark_as_seen_on_stack(); 10.15 - bool can_not_entrant_be_converted(); 10.16 + bool can_convert_to_zombie(); 10.17 10.18 // Evolution support. We make old (discarded) compiled methods point to new Method*s. 10.19 void set_method(Method* method) { _method = method; }
11.1 --- a/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp Fri Oct 16 12:15:09 2015 -0700 11.2 +++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp Thu Oct 29 17:03:53 2015 -0700 11.3 @@ -117,7 +117,7 @@ 11.4 11.5 G1RootProcessor::G1RootProcessor(G1CollectedHeap* g1h) : 11.6 _g1h(g1h), 11.7 - _process_strong_tasks(new SubTasksDone(G1RP_PS_NumElements)), 11.8 + _process_strong_tasks(G1RP_PS_NumElements), 11.9 _srs(g1h), 11.10 _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false), 11.11 _n_workers_discovered_strong_classes(0) {} 11.12 @@ -160,7 +160,7 @@ 11.13 { 11.14 // Now the CM ref_processor roots. 11.15 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CMRefRoots, worker_i); 11.16 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_refProcessor_oops_do)) { 11.17 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_refProcessor_oops_do)) { 11.18 // We need to treat the discovered reference lists of the 11.19 // concurrent mark ref processor as roots and keep entries 11.20 // (which are added by the marking threads) on them live 11.21 @@ -203,12 +203,12 @@ 11.22 // as implicitly live). 11.23 { 11.24 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SATBFiltering, worker_i); 11.25 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) { 11.26 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_filter_satb_buffers) && _g1h->mark_in_progress()) { 11.27 JavaThread::satb_mark_queue_set().filter_thread_buffers(); 11.28 } 11.29 } 11.30 11.31 - _process_strong_tasks->all_tasks_completed(); 11.32 + _process_strong_tasks.all_tasks_completed(); 11.33 } 11.34 11.35 void G1RootProcessor::process_strong_roots(OopClosure* oops, 11.36 @@ -218,7 +218,7 @@ 11.37 process_java_roots(oops, clds, clds, NULL, blobs, NULL, 0); 11.38 process_vm_roots(oops, NULL, NULL, 0); 11.39 11.40 - _process_strong_tasks->all_tasks_completed(); 11.41 + _process_strong_tasks.all_tasks_completed(); 11.42 } 11.43 11.44 void G1RootProcessor::process_all_roots(OopClosure* oops, 11.45 @@ -228,11 +228,11 @@ 11.46 process_java_roots(oops, NULL, clds, clds, NULL, NULL, 0); 11.47 process_vm_roots(oops, oops, NULL, 0); 11.48 11.49 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_CodeCache_oops_do)) { 11.50 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_CodeCache_oops_do)) { 11.51 CodeCache::blobs_do(blobs); 11.52 } 11.53 11.54 - _process_strong_tasks->all_tasks_completed(); 11.55 + _process_strong_tasks.all_tasks_completed(); 11.56 } 11.57 11.58 void G1RootProcessor::process_java_roots(OopClosure* strong_roots, 11.59 @@ -248,7 +248,7 @@ 11.60 // let the thread process the weak CLDs and nmethods. 11.61 { 11.62 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::CLDGRoots, worker_i); 11.63 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) { 11.64 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ClassLoaderDataGraph_oops_do)) { 11.65 ClassLoaderDataGraph::roots_cld_do(strong_clds, weak_clds); 11.66 } 11.67 } 11.68 @@ -265,49 +265,49 @@ 11.69 uint worker_i) { 11.70 { 11.71 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::UniverseRoots, worker_i); 11.72 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Universe_oops_do)) { 11.73 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Universe_oops_do)) { 11.74 Universe::oops_do(strong_roots); 11.75 } 11.76 } 11.77 11.78 { 11.79 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JNIRoots, worker_i); 11.80 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_JNIHandles_oops_do)) { 11.81 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_JNIHandles_oops_do)) { 11.82 JNIHandles::oops_do(strong_roots); 11.83 } 11.84 } 11.85 11.86 { 11.87 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ObjectSynchronizerRoots, worker_i); 11.88 - if (!_process_strong_tasks-> is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) { 11.89 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_ObjectSynchronizer_oops_do)) { 11.90 ObjectSynchronizer::oops_do(strong_roots); 11.91 } 11.92 } 11.93 11.94 { 11.95 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i); 11.96 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) { 11.97 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) { 11.98 FlatProfiler::oops_do(strong_roots); 11.99 } 11.100 } 11.101 11.102 { 11.103 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i); 11.104 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_Management_oops_do)) { 11.105 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) { 11.106 Management::oops_do(strong_roots); 11.107 } 11.108 } 11.109 11.110 { 11.111 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::JVMTIRoots, worker_i); 11.112 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_jvmti_oops_do)) { 11.113 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_jvmti_oops_do)) { 11.114 JvmtiExport::oops_do(strong_roots); 11.115 } 11.116 } 11.117 11.118 { 11.119 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i); 11.120 - if (!_process_strong_tasks->is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) { 11.121 + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) { 11.122 SystemDictionary::roots_oops_do(strong_roots, weak_roots); 11.123 } 11.124 } 11.125 @@ -335,5 +335,5 @@ 11.126 } 11.127 11.128 void G1RootProcessor::set_num_workers(int active_workers) { 11.129 - _process_strong_tasks->set_n_threads(active_workers); 11.130 + _process_strong_tasks.set_n_threads(active_workers); 11.131 }
12.1 --- a/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp Fri Oct 16 12:15:09 2015 -0700 12.2 +++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp Thu Oct 29 17:03:53 2015 -0700 12.3 @@ -45,7 +45,7 @@ 12.4 // worker thread call the process_roots methods. 12.5 class G1RootProcessor : public StackObj { 12.6 G1CollectedHeap* _g1h; 12.7 - SubTasksDone* _process_strong_tasks; 12.8 + SubTasksDone _process_strong_tasks; 12.9 SharedHeap::StrongRootsScope _srs; 12.10 12.11 // Used to implement the Thread work barrier.
13.1 --- a/src/share/vm/opto/block.cpp Fri Oct 16 12:15:09 2015 -0700 13.2 +++ b/src/share/vm/opto/block.cpp Thu Oct 29 17:03:53 2015 -0700 13.3 @@ -393,7 +393,7 @@ 13.4 VectorSet visited(a); 13.5 13.6 // Allocate stack with enough space to avoid frequent realloc 13.7 - Node_Stack nstack(a, C->unique() >> 1); 13.8 + Node_Stack nstack(a, C->live_nodes() >> 1); 13.9 nstack.push(_root, 0); 13.10 uint sum = 0; // Counter for blocks 13.11
14.1 --- a/src/share/vm/opto/cfgnode.cpp Fri Oct 16 12:15:09 2015 -0700 14.2 +++ b/src/share/vm/opto/cfgnode.cpp Thu Oct 29 17:03:53 2015 -0700 14.3 @@ -791,7 +791,7 @@ 14.4 Compile *C = igvn->C; 14.5 Arena *a = Thread::current()->resource_area(); 14.6 Node_Array node_map = new Node_Array(a); 14.7 - Node_Stack stack(a, C->unique() >> 4); 14.8 + Node_Stack stack(a, C->live_nodes() >> 4); 14.9 PhiNode *nphi = slice_memory(at); 14.10 igvn->register_new_node_with_optimizer( nphi ); 14.11 node_map.map(_idx, nphi);
15.1 --- a/src/share/vm/opto/compile.cpp Fri Oct 16 12:15:09 2015 -0700 15.2 +++ b/src/share/vm/opto/compile.cpp Thu Oct 29 17:03:53 2015 -0700 15.3 @@ -327,7 +327,7 @@ 15.4 // Use breadth-first pass that records state in a Unique_Node_List, 15.5 // recursive traversal is slower. 15.6 void Compile::identify_useful_nodes(Unique_Node_List &useful) { 15.7 - int estimated_worklist_size = unique(); 15.8 + int estimated_worklist_size = live_nodes(); 15.9 useful.map( estimated_worklist_size, NULL ); // preallocate space 15.10 15.11 // Initialize worklist 15.12 @@ -3212,8 +3212,8 @@ 15.13 Final_Reshape_Counts frc; 15.14 15.15 // Visit everybody reachable! 15.16 - // Allocate stack of size C->unique()/2 to avoid frequent realloc 15.17 - Node_Stack nstack(unique() >> 1); 15.18 + // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc 15.19 + Node_Stack nstack(live_nodes() >> 1); 15.20 final_graph_reshaping_walk(nstack, root(), frc); 15.21 15.22 // Check for unreachable (from below) code (i.e., infinite loops).
16.1 --- a/src/share/vm/opto/domgraph.cpp Fri Oct 16 12:15:09 2015 -0700 16.2 +++ b/src/share/vm/opto/domgraph.cpp Thu Oct 29 17:03:53 2015 -0700 16.3 @@ -505,8 +505,8 @@ 16.4 // Perform DFS search. Setup 'vertex' as DFS to vertex mapping. Setup 16.5 // 'semi' as vertex to DFS mapping. Set 'parent' to DFS parent. 16.6 int NTarjan::DFS( NTarjan *ntarjan, VectorSet &visited, PhaseIdealLoop *pil, uint *dfsorder) { 16.7 - // Allocate stack of size C->unique()/8 to avoid frequent realloc 16.8 - GrowableArray <Node *> dfstack(pil->C->unique() >> 3); 16.9 + // Allocate stack of size C->live_nodes()/8 to avoid frequent realloc 16.10 + GrowableArray <Node *> dfstack(pil->C->live_nodes() >> 3); 16.11 Node *b = pil->C->root(); 16.12 int dfsnum = 1; 16.13 dfsorder[b->_idx] = dfsnum; // Cache parent's dfsnum for a later use
17.1 --- a/src/share/vm/opto/escape.cpp Fri Oct 16 12:15:09 2015 -0700 17.2 +++ b/src/share/vm/opto/escape.cpp Thu Oct 29 17:03:53 2015 -0700 17.3 @@ -3183,7 +3183,7 @@ 17.4 // Note 2: MergeMem may already contains instance memory slices added 17.5 // during find_inst_mem() call when memory nodes were processed above. 17.6 igvn->hash_delete(nmm); 17.7 - uint nslices = nmm->req(); 17.8 + uint nslices = MIN2(nmm->req(), new_index_start); 17.9 for (uint i = Compile::AliasIdxRaw+1; i < nslices; i++) { 17.10 Node* mem = nmm->in(i); 17.11 Node* cur = NULL;
18.1 --- a/src/share/vm/opto/gcm.cpp Fri Oct 16 12:15:09 2015 -0700 18.2 +++ b/src/share/vm/opto/gcm.cpp Thu Oct 29 17:03:53 2015 -0700 18.3 @@ -118,8 +118,8 @@ 18.4 //------------------------------schedule_pinned_nodes-------------------------- 18.5 // Set the basic block for Nodes pinned into blocks 18.6 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { 18.7 - // Allocate node stack of size C->unique()+8 to avoid frequent realloc 18.8 - GrowableArray <Node *> spstack(C->unique() + 8); 18.9 + // Allocate node stack of size C->live_nodes()+8 to avoid frequent realloc 18.10 + GrowableArray <Node *> spstack(C->live_nodes() + 8); 18.11 spstack.push(_root); 18.12 while (spstack.is_nonempty()) { 18.13 Node* node = spstack.pop(); 18.14 @@ -1285,7 +1285,7 @@ 18.15 visited.Clear(); 18.16 Node_List stack(arena); 18.17 // Pre-grow the list 18.18 - stack.map((C->unique() >> 1) + 16, NULL); 18.19 + stack.map((C->live_nodes() >> 1) + 16, NULL); 18.20 if (!schedule_early(visited, stack)) { 18.21 // Bailout without retry 18.22 C->record_method_not_compilable("early schedule failed");
19.1 --- a/src/share/vm/opto/loopnode.cpp Fri Oct 16 12:15:09 2015 -0700 19.2 +++ b/src/share/vm/opto/loopnode.cpp Thu Oct 29 17:03:53 2015 -0700 19.3 @@ -2230,7 +2230,7 @@ 19.4 // _nodes array holds the earliest legal controlling CFG node. 19.5 19.6 // Allocate stack with enough space to avoid frequent realloc 19.7 - int stack_size = (C->unique() >> 1) + 16; // (unique>>1)+16 from Java2D stats 19.8 + int stack_size = (C->live_nodes() >> 1) + 16; // (live_nodes>>1)+16 from Java2D stats 19.9 Node_Stack nstack( a, stack_size ); 19.10 19.11 visited.Clear(); 19.12 @@ -2686,7 +2686,7 @@ 19.13 } 19.14 } 19.15 if (_dom_stk == NULL) { 19.16 - uint init_size = C->unique() / 100; // Guess that 1/100 is a reasonable initial size. 19.17 + uint init_size = C->live_nodes() / 100; // Guess that 1/100 is a reasonable initial size. 19.18 if (init_size < 10) init_size = 10; 19.19 _dom_stk = new GrowableArray<uint>(init_size); 19.20 } 19.21 @@ -2776,8 +2776,8 @@ 19.22 // The sort is of size number-of-control-children, which generally limits 19.23 // it to size 2 (i.e., I just choose between my 2 target loops). 19.24 void PhaseIdealLoop::build_loop_tree() { 19.25 - // Allocate stack of size C->unique()/2 to avoid frequent realloc 19.26 - GrowableArray <Node *> bltstack(C->unique() >> 1); 19.27 + // Allocate stack of size C->live_nodes()/2 to avoid frequent realloc 19.28 + GrowableArray <Node *> bltstack(C->live_nodes() >> 1); 19.29 Node *n = C->root(); 19.30 bltstack.push(n); 19.31 int pre_order = 1; 19.32 @@ -3666,7 +3666,7 @@ 19.33 void PhaseIdealLoop::dump( ) const { 19.34 ResourceMark rm; 19.35 Arena* arena = Thread::current()->resource_area(); 19.36 - Node_Stack stack(arena, C->unique() >> 2); 19.37 + Node_Stack stack(arena, C->live_nodes() >> 2); 19.38 Node_List rpo_list; 19.39 VectorSet visited(arena); 19.40 visited.set(C->top()->_idx);
20.1 --- a/src/share/vm/opto/matcher.cpp Fri Oct 16 12:15:09 2015 -0700 20.2 +++ b/src/share/vm/opto/matcher.cpp Thu Oct 29 17:03:53 2015 -0700 20.3 @@ -335,14 +335,14 @@ 20.4 grow_new_node_array(C->unique()); 20.5 20.6 // Reset node counter so MachNodes start with _idx at 0 20.7 - int nodes = C->unique(); // save value 20.8 + int live_nodes = C->live_nodes(); 20.9 C->set_unique(0); 20.10 C->reset_dead_node_list(); 20.11 20.12 // Recursively match trees from old space into new space. 20.13 // Correct leaves of new-space Nodes; they point to old-space. 20.14 _visited.Clear(); // Clear visit bits for xform call 20.15 - C->set_cached_top_node(xform( C->top(), nodes )); 20.16 + C->set_cached_top_node(xform( C->top(), live_nodes)); 20.17 if (!C->failing()) { 20.18 Node* xroot = xform( C->root(), 1 ); 20.19 if (xroot == NULL) { 20.20 @@ -995,7 +995,7 @@ 20.21 Node *Matcher::transform( Node *n ) { ShouldNotCallThis(); return n; } 20.22 Node *Matcher::xform( Node *n, int max_stack ) { 20.23 // Use one stack to keep both: child's node/state and parent's node/index 20.24 - MStack mstack(max_stack * 2 * 2); // C->unique() * 2 * 2 20.25 + MStack mstack(max_stack * 2 * 2); // usually: C->live_nodes() * 2 * 2 20.26 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root 20.27 20.28 while (mstack.is_nonempty()) { 20.29 @@ -2021,8 +2021,8 @@ 20.30 //------------------------------find_shared------------------------------------ 20.31 // Set bits if Node is shared or otherwise a root 20.32 void Matcher::find_shared( Node *n ) { 20.33 - // Allocate stack of size C->unique() * 2 to avoid frequent realloc 20.34 - MStack mstack(C->unique() * 2); 20.35 + // Allocate stack of size C->live_nodes() * 2 to avoid frequent realloc 20.36 + MStack mstack(C->live_nodes() * 2); 20.37 // Mark nodes as address_visited if they are inputs to an address expression 20.38 VectorSet address_visited(Thread::current()->resource_area()); 20.39 mstack.push(n, Visit); // Don't need to pre-visit root node
21.1 --- a/src/share/vm/opto/node.cpp Fri Oct 16 12:15:09 2015 -0700 21.2 +++ b/src/share/vm/opto/node.cpp Thu Oct 29 17:03:53 2015 -0700 21.3 @@ -1749,7 +1749,7 @@ 21.4 uint depth = (uint)ABS(d); 21.5 int direction = d; 21.6 Compile* C = Compile::current(); 21.7 - GrowableArray <Node *> nstack(C->unique()); 21.8 + GrowableArray <Node *> nstack(C->live_nodes()); 21.9 21.10 nstack.append(s); 21.11 int begin = 0;
22.1 --- a/src/share/vm/opto/phaseX.cpp Fri Oct 16 12:15:09 2015 -0700 22.2 +++ b/src/share/vm/opto/phaseX.cpp Thu Oct 29 17:03:53 2015 -0700 22.3 @@ -783,7 +783,7 @@ 22.4 //------------------------------PhaseIterGVN----------------------------------- 22.5 // Initialize hash table to fresh and clean for +VerifyOpto 22.6 PhaseIterGVN::PhaseIterGVN( PhaseIterGVN *igvn, const char *dummy ) : PhaseGVN(igvn,dummy), _worklist( ), 22.7 - _stack(C->unique() >> 1), 22.8 + _stack(C->live_nodes() >> 1), 22.9 _delay_transform(false) { 22.10 } 22.11 22.12 @@ -800,7 +800,11 @@ 22.13 // Initialize with previous PhaseGVN info from Parser 22.14 PhaseIterGVN::PhaseIterGVN( PhaseGVN *gvn ) : PhaseGVN(gvn), 22.15 _worklist(*C->for_igvn()), 22.16 - _stack(C->unique() >> 1), 22.17 +// TODO: Before incremental inlining it was allocated only once and it was fine. Now that 22.18 +// the constructor is used in incremental inlining, this consumes too much memory: 22.19 +// _stack(C->live_nodes() >> 1), 22.20 +// So, as a band-aid, we replace this by: 22.21 + _stack(C->comp_arena(), 32), 22.22 _delay_transform(false) 22.23 { 22.24 uint max; 22.25 @@ -1586,7 +1590,7 @@ 22.26 _nodes.map( n->_idx, new_node ); // Flag as having been cloned 22.27 22.28 // Allocate stack of size _nodes.Size()/2 to avoid frequent realloc 22.29 - GrowableArray <Node *> trstack(C->unique() >> 1); 22.30 + GrowableArray <Node *> trstack(C->live_nodes() >> 1); 22.31 22.32 trstack.push(new_node); // Process children of cloned node 22.33 while ( trstack.is_nonempty() ) {
23.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Oct 16 12:15:09 2015 -0700 23.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Thu Oct 29 17:03:53 2015 -0700 23.3 @@ -3751,7 +3751,7 @@ 23.4 // Deoptimize all activations depending on marked nmethods 23.5 Deoptimization::deoptimize_dependents(); 23.6 23.7 - // Make the dependent methods not entrant (in VM_Deoptimize they are made zombies) 23.8 + // Make the dependent methods not entrant 23.9 CodeCache::make_marked_nmethods_not_entrant(); 23.10 23.11 // From now on we know that the dependency information is complete
24.1 --- a/src/share/vm/runtime/objectMonitor.cpp Fri Oct 16 12:15:09 2015 -0700 24.2 +++ b/src/share/vm/runtime/objectMonitor.cpp Thu Oct 29 17:03:53 2015 -0700 24.3 @@ -1,5 +1,5 @@ 24.4 /* 24.5 - * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved. 24.6 + * Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved. 24.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 24.8 * 24.9 * This code is free software; you can redistribute it and/or modify it 24.10 @@ -226,7 +226,8 @@ 24.11 // 24.12 // * The monitor entry list operations avoid locks, but strictly speaking 24.13 // they're not lock-free. Enter is lock-free, exit is not. 24.14 -// See http://j2se.east/~dice/PERSIST/040825-LockFreeQueues.html 24.15 +// For a description of 'Methods and apparatus providing non-blocking access 24.16 +// to a resource,' see U.S. Pat. No. 7844973. 24.17 // 24.18 // * The cxq can have multiple concurrent "pushers" but only one concurrent 24.19 // detaching thread. This mechanism is immune from the ABA corruption. 24.20 @@ -1955,7 +1956,8 @@ 24.21 // (duration) or we can fix the count at approximately the duration of 24.22 // a context switch and vary the frequency. Of course we could also 24.23 // vary both satisfying K == Frequency * Duration, where K is adaptive by monitor. 24.24 -// See http://j2se.east/~dice/PERSIST/040824-AdaptiveSpinning.html. 24.25 +// For a description of 'Adaptive spin-then-block mutual exclusion in 24.26 +// multi-threaded processing,' see U.S. Pat. No. 8046758. 24.27 // 24.28 // This implementation varies the duration "D", where D varies with 24.29 // the success rate of recent spin attempts. (D is capped at approximately
25.1 --- a/src/share/vm/runtime/sweeper.cpp Fri Oct 16 12:15:09 2015 -0700 25.2 +++ b/src/share/vm/runtime/sweeper.cpp Thu Oct 29 17:03:53 2015 -0700 25.3 @@ -538,10 +538,14 @@ 25.4 } else if (nm->is_not_entrant()) { 25.5 // If there are no current activations of this method on the 25.6 // stack we can safely convert it to a zombie method 25.7 - if (nm->can_not_entrant_be_converted()) { 25.8 + if (nm->can_convert_to_zombie()) { 25.9 if (PrintMethodFlushing && Verbose) { 25.10 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm); 25.11 } 25.12 + // Clear ICStubs to prevent back patching stubs of zombie or unloaded 25.13 + // nmethods during the next safepoint (see ICStub::finalize). 25.14 + MutexLocker cl(CompiledIC_lock); 25.15 + nm->clear_ic_stubs(); 25.16 // Code cache state change is tracked in make_zombie() 25.17 nm->make_zombie(); 25.18 _zombified_count++; 25.19 @@ -567,6 +571,12 @@ 25.20 release_nmethod(nm); 25.21 _flushed_count++; 25.22 } else { 25.23 + { 25.24 + // Clean ICs of unloaded nmethods as well because they may reference other 25.25 + // unloaded nmethods that may be flushed earlier in the sweeper cycle. 25.26 + MutexLocker cl(CompiledIC_lock); 25.27 + nm->cleanup_inline_caches(); 25.28 + } 25.29 // Code cache state change is tracked in make_zombie() 25.30 nm->make_zombie(); 25.31 _zombified_count++;
26.1 --- a/src/share/vm/runtime/vm_operations.cpp Fri Oct 16 12:15:09 2015 -0700 26.2 +++ b/src/share/vm/runtime/vm_operations.cpp Thu Oct 29 17:03:53 2015 -0700 26.3 @@ -106,8 +106,8 @@ 26.4 // Deoptimize all activations depending on marked nmethods 26.5 Deoptimization::deoptimize_dependents(); 26.6 26.7 - // Make the dependent methods zombies 26.8 - CodeCache::make_marked_nmethods_zombies(); 26.9 + // Make the dependent methods not entrant 26.10 + CodeCache::make_marked_nmethods_not_entrant(); 26.11 } 26.12 26.13
27.1 --- a/src/share/vm/services/management.cpp Fri Oct 16 12:15:09 2015 -0700 27.2 +++ b/src/share/vm/services/management.cpp Thu Oct 29 17:03:53 2015 -0700 27.3 @@ -1107,6 +1107,8 @@ 27.4 bool with_locked_monitors, 27.5 bool with_locked_synchronizers, 27.6 TRAPS) { 27.7 + // no need to actually perform thread dump if no TIDs are specified 27.8 + if (num_threads == 0) return; 27.9 27.10 // First get an array of threadObj handles. 27.11 // A JavaThread may terminate before we get the stack trace.
28.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 28.2 +++ b/test/compiler/escapeAnalysis/TestEABadMergeMem.java Thu Oct 29 17:03:53 2015 -0700 28.3 @@ -0,0 +1,86 @@ 28.4 +/* 28.5 + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. 28.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.7 + * 28.8 + * This code is free software; you can redistribute it and/or modify it 28.9 + * under the terms of the GNU General Public License version 2 only, as 28.10 + * published by the Free Software Foundation. 28.11 + * 28.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 28.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 28.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 28.15 + * version 2 for more details (a copy is included in the LICENSE file that 28.16 + * accompanied this code). 28.17 + * 28.18 + * You should have received a copy of the GNU General Public License version 28.19 + * 2 along with this work; if not, write to the Free Software Foundation, 28.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 28.21 + * 28.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 28.23 + * or visit www.oracle.com if you need additional information or have any 28.24 + * questions. 28.25 + */ 28.26 + 28.27 +/* 28.28 + * @test 28.29 + * @bug 8134031 28.30 + * @summary Bad rewiring of memory edges when we split unique types during EA 28.31 + * @run main/othervm -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:CompileCommand=dontinline,TestEABadMergeMem::m_notinlined TestEABadMergeMem 28.32 + * 28.33 + */ 28.34 + 28.35 +public class TestEABadMergeMem { 28.36 + 28.37 + static class Box { 28.38 + int i; 28.39 + } 28.40 + 28.41 + static void m_notinlined() { 28.42 + } 28.43 + 28.44 + static float dummy1; 28.45 + static float dummy2; 28.46 + 28.47 + static int test(Box a, Box c, int i, int j, int k, boolean flag1, boolean flag2) { 28.48 + Box b = new Box(); // non escaping 28.49 + a.i = i; 28.50 + b.i = j; 28.51 + c.i = k; 28.52 + 28.53 + m_notinlined(); 28.54 + 28.55 + boolean flag3 = false; 28.56 + if (flag1) { 28.57 + for (int ii = 0; ii < 100; ii++) { 28.58 + if (flag2) { 28.59 + dummy1 = (float)ii; 28.60 + } else { 28.61 + dummy2 = (float)ii; 28.62 + } 28.63 + } 28.64 + flag3 = true; 28.65 + } 28.66 + // Memory Phi here with projection of not inlined call as one edge, MergeMem as other 28.67 + 28.68 + if (flag3) { // will split through Phi during loopopts 28.69 + int res = c.i + b.i; 28.70 + m_notinlined(); // prevents split through phi during igvn 28.71 + return res; 28.72 + } else { 28.73 + return 44 + 43; 28.74 + } 28.75 + } 28.76 + 28.77 + static public void main(String[] args) { 28.78 + for (int i = 0; i < 20000; i++) { 28.79 + // m(2); 28.80 + Box a = new Box(); 28.81 + Box c = new Box(); 28.82 + int res = test(a, c, 42, 43, 44, (i%2) == 0, (i%3) == 0); 28.83 + if (res != 44 + 43) { 28.84 + throw new RuntimeException("Bad result " + res); 28.85 + } 28.86 + } 28.87 + } 28.88 + 28.89 +}