Wed, 04 Jun 2014 06:53:06 -0700
Merge
.hgtags | file | annotate | diff | comparison | revisions | |
make/hotspot_version | file | annotate | diff | comparison | revisions |
1.1 --- a/.hgtags Mon Jun 02 15:18:10 2014 -0700 1.2 +++ b/.hgtags Wed Jun 04 06:53:06 2014 -0700 1.3 @@ -480,3 +480,4 @@ 1.4 8c785f9bde6f603cbd13eecd2ee6acd699b376f8 jdk8u20-b15 1.5 50e5d560367b94275a91d5d579c32f1164eb5fa5 hs25.20-b16 1.6 c36ef639e6d3c2d238f4e4f8b2f5803a60de8be8 jdk8u20-b16 1.7 +ee8b934668694dba5dc0ac039f8d56e52499c0f9 hs25.20-b17
2.1 --- a/make/hotspot_version Mon Jun 02 15:18:10 2014 -0700 2.2 +++ b/make/hotspot_version Wed Jun 04 06:53:06 2014 -0700 2.3 @@ -35,7 +35,7 @@ 2.4 2.5 HS_MAJOR_VER=25 2.6 HS_MINOR_VER=20 2.7 -HS_BUILD_NUMBER=16 2.8 +HS_BUILD_NUMBER=17 2.9 2.10 JDK_MAJOR_VER=1 2.11 JDK_MINOR_VER=8
3.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Mon Jun 02 15:18:10 2014 -0700 3.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Wed Jun 04 06:53:06 2014 -0700 3.3 @@ -1221,10 +1221,8 @@ 3.4 bool is_obj = (type == T_ARRAY || type == T_OBJECT); 3.5 LIR_Opr offset = off.result(); 3.6 3.7 - if (data != dst) { 3.8 - __ move(data, dst); 3.9 - data = dst; 3.10 - } 3.11 + // Because we want a 2-arg form of xchg 3.12 + __ move(data, dst); 3.13 3.14 assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type"); 3.15 LIR_Address* addr; 3.16 @@ -1254,7 +1252,7 @@ 3.17 pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */, 3.18 true /* do_load */, false /* patch */, NULL); 3.19 } 3.20 - __ xchg(LIR_OprFact::address(addr), data, dst, tmp); 3.21 + __ xchg(LIR_OprFact::address(addr), dst, dst, tmp); 3.22 if (is_obj) { 3.23 // Seems to be a precise address 3.24 post_barrier(ptr, data);
4.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Jun 02 15:18:10 2014 -0700 4.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Wed Jun 04 06:53:06 2014 -0700 4.3 @@ -3653,9 +3653,9 @@ 4.4 const Register len_reg = I4; // cipher length 4.5 const Register keylen = I5; // reg for storing expanded key array length 4.6 4.7 - // save cipher len before save_frame, to return in the end 4.8 - __ mov(O4, L0); 4.9 __ save_frame(0); 4.10 + // save cipher len to return in the end 4.11 + __ mov(len_reg, L0); 4.12 4.13 // read expanded key length 4.14 __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0); 4.15 @@ -3778,9 +3778,9 @@ 4.16 // re-init intial vector for next block, 8-byte alignment is guaranteed 4.17 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 4.18 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 4.19 - __ restore(); 4.20 - __ retl(); 4.21 - __ delayed()->mov(L0, O0); 4.22 + __ mov(L0, I0); 4.23 + __ ret(); 4.24 + __ delayed()->restore(); 4.25 4.26 __ align(OptoLoopAlignment); 4.27 __ BIND(L_cbcenc192); 4.28 @@ -3869,9 +3869,9 @@ 4.29 // re-init intial vector for next block, 8-byte alignment is guaranteed 4.30 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 4.31 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 4.32 - __ restore(); 4.33 - __ retl(); 4.34 - __ delayed()->mov(L0, O0); 4.35 + __ mov(L0, I0); 4.36 + __ ret(); 4.37 + __ delayed()->restore(); 4.38 4.39 __ align(OptoLoopAlignment); 4.40 __ BIND(L_cbcenc256); 4.41 @@ -3962,9 +3962,9 @@ 4.42 // re-init intial vector for next block, 8-byte alignment is guaranteed 4.43 __ stf(FloatRegisterImpl::D, F60, rvec, 0); 4.44 __ stf(FloatRegisterImpl::D, F62, rvec, 8); 4.45 - __ restore(); 4.46 - __ retl(); 4.47 - __ delayed()->mov(L0, O0); 4.48 + __ mov(L0, I0); 4.49 + __ ret(); 4.50 + __ delayed()->restore(); 4.51 4.52 return start; 4.53 } 4.54 @@ -3992,9 +3992,9 @@ 4.55 const Register original_key = I5; // original key array only required during decryption 4.56 const Register keylen = L6; // reg for storing expanded key array length 4.57 4.58 - // save cipher len before save_frame, to return in the end 4.59 - __ mov(O4, L0); 4.60 __ save_frame(0); //args are read from I* registers since we save the frame in the beginning 4.61 + // save cipher len to return in the end 4.62 + __ mov(len_reg, L7); 4.63 4.64 // load original key from SunJCE expanded decryption key 4.65 // Since we load original key buffer starting first element, 8-byte alignment is guaranteed 4.66 @@ -4568,10 +4568,9 @@ 4.67 // re-init intial vector for next block, 8-byte alignment is guaranteed 4.68 __ stx(L0, rvec, 0); 4.69 __ stx(L1, rvec, 8); 4.70 - __ restore(); 4.71 - __ mov(L0, O0); 4.72 - __ retl(); 4.73 - __ delayed()->nop(); 4.74 + __ mov(L7, I0); 4.75 + __ ret(); 4.76 + __ delayed()->restore(); 4.77 4.78 return start; 4.79 }
5.1 --- a/src/share/vm/c1/c1_LIR.cpp Mon Jun 02 15:18:10 2014 -0700 5.2 +++ b/src/share/vm/c1/c1_LIR.cpp Wed Jun 04 06:53:06 2014 -0700 5.3 @@ -1083,7 +1083,7 @@ 5.4 5.5 void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) { 5.6 masm->emit_arraycopy(this); 5.7 - masm->emit_code_stub(stub()); 5.8 + masm->append_code_stub(stub()); 5.9 } 5.10 5.11 void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { 5.12 @@ -1100,20 +1100,20 @@ 5.13 5.14 void LIR_OpAllocObj::emit_code(LIR_Assembler* masm) { 5.15 masm->emit_alloc_obj(this); 5.16 - masm->emit_code_stub(stub()); 5.17 + masm->append_code_stub(stub()); 5.18 } 5.19 5.20 void LIR_OpBranch::emit_code(LIR_Assembler* masm) { 5.21 masm->emit_opBranch(this); 5.22 if (stub()) { 5.23 - masm->emit_code_stub(stub()); 5.24 + masm->append_code_stub(stub()); 5.25 } 5.26 } 5.27 5.28 void LIR_OpConvert::emit_code(LIR_Assembler* masm) { 5.29 masm->emit_opConvert(this); 5.30 if (stub() != NULL) { 5.31 - masm->emit_code_stub(stub()); 5.32 + masm->append_code_stub(stub()); 5.33 } 5.34 } 5.35 5.36 @@ -1123,13 +1123,13 @@ 5.37 5.38 void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) { 5.39 masm->emit_alloc_array(this); 5.40 - masm->emit_code_stub(stub()); 5.41 + masm->append_code_stub(stub()); 5.42 } 5.43 5.44 void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) { 5.45 masm->emit_opTypeCheck(this); 5.46 if (stub()) { 5.47 - masm->emit_code_stub(stub()); 5.48 + masm->append_code_stub(stub()); 5.49 } 5.50 } 5.51 5.52 @@ -1144,7 +1144,7 @@ 5.53 void LIR_OpLock::emit_code(LIR_Assembler* masm) { 5.54 masm->emit_lock(this); 5.55 if (stub()) { 5.56 - masm->emit_code_stub(stub()); 5.57 + masm->append_code_stub(stub()); 5.58 } 5.59 } 5.60
6.1 --- a/src/share/vm/c1/c1_LIR.hpp Mon Jun 02 15:18:10 2014 -0700 6.2 +++ b/src/share/vm/c1/c1_LIR.hpp Wed Jun 04 06:53:06 2014 -0700 6.3 @@ -1127,6 +1127,7 @@ 6.4 virtual void print_instr(outputStream* out) const = 0; 6.5 virtual void print_on(outputStream* st) const PRODUCT_RETURN; 6.6 6.7 + virtual bool is_patching() { return false; } 6.8 virtual LIR_OpCall* as_OpCall() { return NULL; } 6.9 virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; } 6.10 virtual LIR_OpLabel* as_OpLabel() { return NULL; } 6.11 @@ -1387,6 +1388,7 @@ 6.12 return (LIR_MoveKind)_flags; 6.13 } 6.14 6.15 + virtual bool is_patching() { return _patch != lir_patch_none; } 6.16 virtual void emit_code(LIR_Assembler* masm); 6.17 virtual LIR_Op1* as_Op1() { return this; } 6.18 virtual const char * name() const PRODUCT_RETURN0; 6.19 @@ -1619,6 +1621,7 @@ 6.20 int profiled_bci() const { return _profiled_bci; } 6.21 bool should_profile() const { return _should_profile; } 6.22 6.23 + virtual bool is_patching() { return _info_for_patch != NULL; } 6.24 virtual void emit_code(LIR_Assembler* masm); 6.25 virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; } 6.26 void print_instr(outputStream* out) const PRODUCT_RETURN;
7.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp Mon Jun 02 15:18:10 2014 -0700 7.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Wed Jun 04 06:53:06 2014 -0700 7.3 @@ -58,7 +58,7 @@ 7.4 _masm->nop(); 7.5 } 7.6 patch->install(_masm, patch_code, obj, info); 7.7 - append_patching_stub(patch); 7.8 + append_code_stub(patch); 7.9 7.10 #ifdef ASSERT 7.11 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci()); 7.12 @@ -131,11 +131,6 @@ 7.13 } 7.14 7.15 7.16 -void LIR_Assembler::append_patching_stub(PatchingStub* stub) { 7.17 - _slow_case_stubs->append(stub); 7.18 -} 7.19 - 7.20 - 7.21 void LIR_Assembler::check_codespace() { 7.22 CodeSection* cs = _masm->code_section(); 7.23 if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) { 7.24 @@ -144,7 +139,7 @@ 7.25 } 7.26 7.27 7.28 -void LIR_Assembler::emit_code_stub(CodeStub* stub) { 7.29 +void LIR_Assembler::append_code_stub(CodeStub* stub) { 7.30 _slow_case_stubs->append(stub); 7.31 } 7.32 7.33 @@ -435,7 +430,7 @@ 7.34 7.35 void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) { 7.36 ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo); 7.37 - emit_code_stub(stub); 7.38 + append_code_stub(stub); 7.39 } 7.40 7.41 void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) { 7.42 @@ -444,7 +439,7 @@ 7.43 7.44 void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) { 7.45 DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo); 7.46 - emit_code_stub(stub); 7.47 + append_code_stub(stub); 7.48 } 7.49 7.50 void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
8.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp Mon Jun 02 15:18:10 2014 -0700 8.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Wed Jun 04 06:53:06 2014 -0700 8.3 @@ -143,7 +143,7 @@ 8.4 // stubs 8.5 void emit_slow_case_stubs(); 8.6 void emit_static_call_stub(); 8.7 - void emit_code_stub(CodeStub* op); 8.8 + void append_code_stub(CodeStub* op); 8.9 void add_call_info_here(CodeEmitInfo* info) { add_call_info(code_offset(), info); } 8.10 8.11 // code patterns
9.1 --- a/src/share/vm/c1/c1_LinearScan.cpp Mon Jun 02 15:18:10 2014 -0700 9.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp Wed Jun 04 06:53:06 2014 -0700 9.3 @@ -2382,16 +2382,6 @@ 9.4 int arg_count = frame_map()->oop_map_arg_count(); 9.5 OopMap* map = new OopMap(frame_size, arg_count); 9.6 9.7 - // Check if this is a patch site. 9.8 - bool is_patch_info = false; 9.9 - if (op->code() == lir_move) { 9.10 - assert(!is_call_site, "move must not be a call site"); 9.11 - assert(op->as_Op1() != NULL, "move must be LIR_Op1"); 9.12 - LIR_Op1* move = (LIR_Op1*)op; 9.13 - 9.14 - is_patch_info = move->patch_code() != lir_patch_none; 9.15 - } 9.16 - 9.17 // Iterate through active intervals 9.18 for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) { 9.19 int assigned_reg = interval->assigned_reg(); 9.20 @@ -2406,7 +2396,7 @@ 9.21 // moves, any intervals which end at this instruction are included 9.22 // in the oop map since we may safepoint while doing the patch 9.23 // before we've consumed the inputs. 9.24 - if (is_patch_info || op->id() < interval->current_to()) { 9.25 + if (op->is_patching() || op->id() < interval->current_to()) { 9.26 9.27 // caller-save registers must not be included into oop-maps at calls 9.28 assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
10.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Mon Jun 02 15:18:10 2014 -0700 10.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Jun 04 06:53:06 2014 -0700 10.3 @@ -819,7 +819,7 @@ 10.4 // false before we start remark. At this point we should also be 10.5 // in a STW phase. 10.6 assert(!concurrent_marking_in_progress(), "invariant"); 10.7 - assert(_finger == _heap_end, 10.8 + assert(out_of_regions(), 10.9 err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT, 10.10 p2i(_finger), p2i(_heap_end))); 10.11 update_g1_committed(true); 10.12 @@ -978,7 +978,9 @@ 10.13 if (concurrent()) { 10.14 ConcurrentGCThread::stsLeave(); 10.15 } 10.16 - _first_overflow_barrier_sync.enter(); 10.17 + 10.18 + bool barrier_aborted = !_first_overflow_barrier_sync.enter(); 10.19 + 10.20 if (concurrent()) { 10.21 ConcurrentGCThread::stsJoin(); 10.22 } 10.23 @@ -986,7 +988,17 @@ 10.24 // more work 10.25 10.26 if (verbose_low()) { 10.27 - gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 10.28 + if (barrier_aborted) { 10.29 + gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id); 10.30 + } else { 10.31 + gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id); 10.32 + } 10.33 + } 10.34 + 10.35 + if (barrier_aborted) { 10.36 + // If the barrier aborted we ignore the overflow condition and 10.37 + // just abort the whole marking phase as quickly as possible. 10.38 + return; 10.39 } 10.40 10.41 // If we're executing the concurrent phase of marking, reset the marking 10.42 @@ -1026,14 +1038,20 @@ 10.43 if (concurrent()) { 10.44 ConcurrentGCThread::stsLeave(); 10.45 } 10.46 - _second_overflow_barrier_sync.enter(); 10.47 + 10.48 + bool barrier_aborted = !_second_overflow_barrier_sync.enter(); 10.49 + 10.50 if (concurrent()) { 10.51 ConcurrentGCThread::stsJoin(); 10.52 } 10.53 // at this point everything should be re-initialized and ready to go 10.54 10.55 if (verbose_low()) { 10.56 - gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 10.57 + if (barrier_aborted) { 10.58 + gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id); 10.59 + } else { 10.60 + gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id); 10.61 + } 10.62 } 10.63 } 10.64 10.65 @@ -3232,6 +3250,8 @@ 10.66 for (uint i = 0; i < _max_worker_id; ++i) { 10.67 _tasks[i]->clear_region_fields(); 10.68 } 10.69 + _first_overflow_barrier_sync.abort(); 10.70 + _second_overflow_barrier_sync.abort(); 10.71 _has_aborted = true; 10.72 10.73 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
11.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Mon Jun 02 15:18:10 2014 -0700 11.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Wed Jun 04 06:53:06 2014 -0700 11.3 @@ -542,8 +542,12 @@ 11.4 // frequently. 11.5 HeapRegion* claim_region(uint worker_id); 11.6 11.7 - // It determines whether we've run out of regions to scan. 11.8 - bool out_of_regions() { return _finger == _heap_end; } 11.9 + // It determines whether we've run out of regions to scan. Note that 11.10 + // the finger can point past the heap end in case the heap was expanded 11.11 + // to satisfy an allocation without doing a GC. This is fine, because all 11.12 + // objects in those regions will be considered live anyway because of 11.13 + // SATB guarantees (i.e. their TAMS will be equal to bottom). 11.14 + bool out_of_regions() { return _finger >= _heap_end; } 11.15 11.16 // Returns the task with the given id 11.17 CMTask* task(int id) {
12.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Mon Jun 02 15:18:10 2014 -0700 12.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Wed Jun 04 06:53:06 2014 -0700 12.3 @@ -89,6 +89,10 @@ 12.4 while (!_should_terminate) { 12.5 // wait until started is set. 12.6 sleepBeforeNextCycle(); 12.7 + if (_should_terminate) { 12.8 + break; 12.9 + } 12.10 + 12.11 { 12.12 ResourceMark rm; 12.13 HandleMark hm; 12.14 @@ -303,11 +307,21 @@ 12.15 } 12.16 12.17 void ConcurrentMarkThread::stop() { 12.18 - // it is ok to take late safepoints here, if needed 12.19 - MutexLockerEx mu(Terminator_lock); 12.20 - _should_terminate = true; 12.21 - while (!_has_terminated) { 12.22 - Terminator_lock->wait(); 12.23 + { 12.24 + MutexLockerEx ml(Terminator_lock); 12.25 + _should_terminate = true; 12.26 + } 12.27 + 12.28 + { 12.29 + MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag); 12.30 + CGC_lock->notify_all(); 12.31 + } 12.32 + 12.33 + { 12.34 + MutexLockerEx ml(Terminator_lock); 12.35 + while (!_has_terminated) { 12.36 + Terminator_lock->wait(); 12.37 + } 12.38 } 12.39 } 12.40 12.41 @@ -327,11 +341,14 @@ 12.42 assert(!in_progress(), "should have been cleared"); 12.43 12.44 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag); 12.45 - while (!started()) { 12.46 + while (!started() && !_should_terminate) { 12.47 CGC_lock->wait(Mutex::_no_safepoint_check_flag); 12.48 } 12.49 - set_in_progress(); 12.50 - clear_started(); 12.51 + 12.52 + if (started()) { 12.53 + set_in_progress(); 12.54 + clear_started(); 12.55 + } 12.56 } 12.57 12.58 // Note: As is the case with CMS - this method, although exported
13.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Jun 02 15:18:10 2014 -0700 13.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jun 04 06:53:06 2014 -0700 13.3 @@ -435,6 +435,9 @@ 13.4 void G1CollectedHeap::stop_conc_gc_threads() { 13.5 _cg1r->stop(); 13.6 _cmThread->stop(); 13.7 + if (G1StringDedup::is_enabled()) { 13.8 + G1StringDedup::stop(); 13.9 + } 13.10 } 13.11 13.12 #ifdef ASSERT 13.13 @@ -2182,6 +2185,23 @@ 13.14 return JNI_OK; 13.15 } 13.16 13.17 +void G1CollectedHeap::stop() { 13.18 +#if 0 13.19 + // Stopping concurrent worker threads is currently disabled until 13.20 + // some bugs in concurrent mark has been resolve. Without fixing 13.21 + // those bugs first we risk haning during VM exit when trying to 13.22 + // stop these threads. 13.23 + 13.24 + // Abort any ongoing concurrent root region scanning and stop all 13.25 + // concurrent threads. We do this to make sure these threads do 13.26 + // not continue to execute and access resources (e.g. gclog_or_tty) 13.27 + // that are destroyed during shutdown. 13.28 + _cm->root_regions()->abort(); 13.29 + _cm->root_regions()->wait_until_scan_finished(); 13.30 + stop_conc_gc_threads(); 13.31 +#endif 13.32 +} 13.33 + 13.34 size_t G1CollectedHeap::conservative_max_heap_alignment() { 13.35 return HeapRegion::max_region_size(); 13.36 }
14.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Jun 02 15:18:10 2014 -0700 14.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jun 04 06:53:06 2014 -0700 14.3 @@ -1082,6 +1082,8 @@ 14.4 // specified by the policy object. 14.5 jint initialize(); 14.6 14.7 + virtual void stop(); 14.8 + 14.9 // Return the (conservative) maximum heap alignment for any G1 heap 14.10 static size_t conservative_max_heap_alignment(); 14.11
15.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Jun 02 15:18:10 2014 -0700 15.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Wed Jun 04 06:53:06 2014 -0700 15.3 @@ -95,7 +95,15 @@ 15.4 jbyte *const first = byte_for(mr.start()); 15.5 jbyte *const last = byte_after(mr.last()); 15.6 15.7 - memset(first, g1_young_gen, last - first); 15.8 + // Below we may use an explicit loop instead of memset() because on 15.9 + // certain platforms memset() can give concurrent readers phantom zeros. 15.10 + if (UseMemSetInBOT) { 15.11 + memset(first, g1_young_gen, last - first); 15.12 + } else { 15.13 + for (jbyte* i = first; i < last; i++) { 15.14 + *i = g1_young_gen; 15.15 + } 15.16 + } 15.17 } 15.18 15.19 #ifndef PRODUCT
16.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Mon Jun 02 15:18:10 2014 -0700 16.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.cpp Wed Jun 04 06:53:06 2014 -0700 16.3 @@ -44,6 +44,11 @@ 16.4 } 16.5 } 16.6 16.7 +void G1StringDedup::stop() { 16.8 + assert(is_enabled(), "String deduplication not enabled"); 16.9 + G1StringDedupThread::stop(); 16.10 +} 16.11 + 16.12 bool G1StringDedup::is_candidate_from_mark(oop obj) { 16.13 if (java_lang_String::is_instance(obj)) { 16.14 bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
17.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Mon Jun 02 15:18:10 2014 -0700 17.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedup.hpp Wed Jun 04 06:53:06 2014 -0700 17.3 @@ -110,8 +110,12 @@ 17.4 return _enabled; 17.5 } 17.6 17.7 + // Initialize string deduplication. 17.8 static void initialize(); 17.9 17.10 + // Stop the deduplication thread. 17.11 + static void stop(); 17.12 + 17.13 // Immediately deduplicates the given String object, bypassing the 17.14 // the deduplication queue. 17.15 static void deduplicate(oop java_string);
18.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Mon Jun 02 15:18:10 2014 -0700 18.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.cpp Wed Jun 04 06:53:06 2014 -0700 18.3 @@ -35,6 +35,7 @@ 18.4 18.5 G1StringDedupQueue::G1StringDedupQueue() : 18.6 _cursor(0), 18.7 + _cancel(false), 18.8 _empty(true), 18.9 _dropped(0) { 18.10 _nqueues = MAX2(ParallelGCThreads, (size_t)1); 18.11 @@ -55,11 +56,17 @@ 18.12 18.13 void G1StringDedupQueue::wait() { 18.14 MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); 18.15 - while (_queue->_empty) { 18.16 + while (_queue->_empty && !_queue->_cancel) { 18.17 ml.wait(Mutex::_no_safepoint_check_flag); 18.18 } 18.19 } 18.20 18.21 +void G1StringDedupQueue::cancel_wait() { 18.22 + MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag); 18.23 + _queue->_cancel = true; 18.24 + ml.notify(); 18.25 +} 18.26 + 18.27 void G1StringDedupQueue::push(uint worker_id, oop java_string) { 18.28 assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint"); 18.29 assert(worker_id < _queue->_nqueues, "Invalid queue");
19.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedupQueue.hpp Mon Jun 02 15:18:10 2014 -0700 19.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupQueue.hpp Wed Jun 04 06:53:06 2014 -0700 19.3 @@ -65,6 +65,7 @@ 19.4 G1StringDedupWorkerQueue* _queues; 19.5 size_t _nqueues; 19.6 size_t _cursor; 19.7 + bool _cancel; 19.8 volatile bool _empty; 19.9 19.10 // Statistics counter, only used for logging. 19.11 @@ -81,6 +82,9 @@ 19.12 // Blocks and waits for the queue to become non-empty. 19.13 static void wait(); 19.14 19.15 + // Wakes up any thread blocked waiting for the queue to become non-empty. 19.16 + static void cancel_wait(); 19.17 + 19.18 // Pushes a deduplication candidate onto a specific GC worker queue. 19.19 static void push(uint worker_id, oop java_string); 19.20
20.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Mon Jun 02 15:18:10 2014 -0700 20.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupThread.cpp Wed Jun 04 06:53:06 2014 -0700 20.3 @@ -73,6 +73,9 @@ 20.4 20.5 // Wait for the queue to become non-empty 20.6 G1StringDedupQueue::wait(); 20.7 + if (_should_terminate) { 20.8 + break; 20.9 + } 20.10 20.11 // Include this thread in safepoints 20.12 stsJoin(); 20.13 @@ -108,7 +111,23 @@ 20.14 stsLeave(); 20.15 } 20.16 20.17 - ShouldNotReachHere(); 20.18 + terminate(); 20.19 +} 20.20 + 20.21 +void G1StringDedupThread::stop() { 20.22 + { 20.23 + MonitorLockerEx ml(Terminator_lock); 20.24 + _thread->_should_terminate = true; 20.25 + } 20.26 + 20.27 + G1StringDedupQueue::cancel_wait(); 20.28 + 20.29 + { 20.30 + MonitorLockerEx ml(Terminator_lock); 20.31 + while (!_thread->_has_terminated) { 20.32 + ml.wait(); 20.33 + } 20.34 + } 20.35 } 20.36 20.37 void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
21.1 --- a/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp Mon Jun 02 15:18:10 2014 -0700 21.2 +++ b/src/share/vm/gc_implementation/g1/g1StringDedupThread.hpp Wed Jun 04 06:53:06 2014 -0700 21.3 @@ -47,6 +47,8 @@ 21.4 21.5 public: 21.6 static void create(); 21.7 + static void stop(); 21.8 + 21.9 static G1StringDedupThread* thread(); 21.10 21.11 virtual void run();
22.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp Mon Jun 02 15:18:10 2014 -0700 22.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp Wed Jun 04 06:53:06 2014 -0700 22.3 @@ -208,6 +208,9 @@ 22.4 // This is the correct place to place such initialization methods. 22.5 virtual void post_initialize() = 0; 22.6 22.7 + // Stop any onging concurrent work and prepare for exit. 22.8 + virtual void stop() {} 22.9 + 22.10 MemRegion reserved_region() const { return _reserved; } 22.11 address base() const { return (address)reserved_region().start(); } 22.12
23.1 --- a/src/share/vm/memory/allocation.cpp Mon Jun 02 15:18:10 2014 -0700 23.2 +++ b/src/share/vm/memory/allocation.cpp Wed Jun 04 06:53:06 2014 -0700 23.3 @@ -561,6 +561,7 @@ 23.4 _chunk = new (alloc_failmode, len) Chunk(len); 23.5 23.6 if (_chunk == NULL) { 23.7 + _chunk = k; // restore the previous value of _chunk 23.8 return NULL; 23.9 } 23.10 if (k) k->set_next(_chunk); // Append new chunk to end of linked list
24.1 --- a/src/share/vm/prims/jvmtiEnv.cpp Mon Jun 02 15:18:10 2014 -0700 24.2 +++ b/src/share/vm/prims/jvmtiEnv.cpp Wed Jun 04 06:53:06 2014 -0700 24.3 @@ -307,9 +307,9 @@ 24.4 !java_lang_Class::is_primitive(mirror)) { 24.5 Klass* k = java_lang_Class::as_Klass(mirror); 24.6 assert(k != NULL, "class for non-primitive mirror must exist"); 24.7 - *size_ptr = k->size() * wordSize; 24.8 + *size_ptr = (jlong)k->size() * wordSize; 24.9 } else { 24.10 - *size_ptr = mirror->size() * wordSize; 24.11 + *size_ptr = (jlong)mirror->size() * wordSize; 24.12 } 24.13 return JVMTI_ERROR_NONE; 24.14 } /* end GetObjectSize */
25.1 --- a/src/share/vm/runtime/java.cpp Mon Jun 02 15:18:10 2014 -0700 25.2 +++ b/src/share/vm/runtime/java.cpp Wed Jun 04 06:53:06 2014 -0700 25.3 @@ -497,6 +497,9 @@ 25.4 os::infinite_sleep(); 25.5 } 25.6 25.7 + // Stop any ongoing concurrent GC work 25.8 + Universe::heap()->stop(); 25.9 + 25.10 // Terminate watcher thread - must before disenrolling any periodic task 25.11 if (PeriodicTask::num_tasks() > 0) 25.12 WatcherThread::stop();
26.1 --- a/src/share/vm/utilities/workgroup.cpp Mon Jun 02 15:18:10 2014 -0700 26.2 +++ b/src/share/vm/utilities/workgroup.cpp Wed Jun 04 06:53:06 2014 -0700 26.3 @@ -378,21 +378,22 @@ 26.4 26.5 WorkGangBarrierSync::WorkGangBarrierSync() 26.6 : _monitor(Mutex::safepoint, "work gang barrier sync", true), 26.7 - _n_workers(0), _n_completed(0), _should_reset(false) { 26.8 + _n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) { 26.9 } 26.10 26.11 WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name) 26.12 : _monitor(Mutex::safepoint, name, true), 26.13 - _n_workers(n_workers), _n_completed(0), _should_reset(false) { 26.14 + _n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) { 26.15 } 26.16 26.17 void WorkGangBarrierSync::set_n_workers(uint n_workers) { 26.18 - _n_workers = n_workers; 26.19 - _n_completed = 0; 26.20 + _n_workers = n_workers; 26.21 + _n_completed = 0; 26.22 _should_reset = false; 26.23 + _aborted = false; 26.24 } 26.25 26.26 -void WorkGangBarrierSync::enter() { 26.27 +bool WorkGangBarrierSync::enter() { 26.28 MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); 26.29 if (should_reset()) { 26.30 // The should_reset() was set and we are the first worker to enter 26.31 @@ -415,10 +416,17 @@ 26.32 set_should_reset(true); 26.33 monitor()->notify_all(); 26.34 } else { 26.35 - while (n_completed() != n_workers()) { 26.36 + while (n_completed() != n_workers() && !aborted()) { 26.37 monitor()->wait(/* no_safepoint_check */ true); 26.38 } 26.39 } 26.40 + return !aborted(); 26.41 +} 26.42 + 26.43 +void WorkGangBarrierSync::abort() { 26.44 + MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag); 26.45 + set_aborted(); 26.46 + monitor()->notify_all(); 26.47 } 26.48 26.49 // SubTasksDone functions.
27.1 --- a/src/share/vm/utilities/workgroup.hpp Mon Jun 02 15:18:10 2014 -0700 27.2 +++ b/src/share/vm/utilities/workgroup.hpp Wed Jun 04 06:53:06 2014 -0700 27.3 @@ -359,18 +359,20 @@ 27.4 class WorkGangBarrierSync : public StackObj { 27.5 protected: 27.6 Monitor _monitor; 27.7 - uint _n_workers; 27.8 - uint _n_completed; 27.9 + uint _n_workers; 27.10 + uint _n_completed; 27.11 bool _should_reset; 27.12 + bool _aborted; 27.13 27.14 Monitor* monitor() { return &_monitor; } 27.15 uint n_workers() { return _n_workers; } 27.16 uint n_completed() { return _n_completed; } 27.17 bool should_reset() { return _should_reset; } 27.18 + bool aborted() { return _aborted; } 27.19 27.20 void zero_completed() { _n_completed = 0; } 27.21 void inc_completed() { _n_completed++; } 27.22 - 27.23 + void set_aborted() { _aborted = true; } 27.24 void set_should_reset(bool v) { _should_reset = v; } 27.25 27.26 public: 27.27 @@ -383,8 +385,14 @@ 27.28 27.29 // Enter the barrier. A worker that enters the barrier will 27.30 // not be allowed to leave until all other threads have 27.31 - // also entered the barrier. 27.32 - void enter(); 27.33 + // also entered the barrier or the barrier is aborted. 27.34 + // Returns false if the barrier was aborted. 27.35 + bool enter(); 27.36 + 27.37 + // Aborts the barrier and wakes up any threads waiting for 27.38 + // the barrier to complete. The barrier will remain in the 27.39 + // aborted state until the next call to set_n_workers(). 27.40 + void abort(); 27.41 }; 27.42 27.43 // A class to manage claiming of subtasks within a group of tasks. The
28.1 --- a/test/TEST.groups Mon Jun 02 15:18:10 2014 -0700 28.2 +++ b/test/TEST.groups Wed Jun 04 06:53:06 2014 -0700 28.3 @@ -134,6 +134,8 @@ 28.4 gc/arguments/TestDynMaxHeapFreeRatio.java \ 28.5 runtime/InternalApi/ThreadCpuTimesDeadlock.java \ 28.6 serviceability/threads/TestFalseDeadLock.java \ 28.7 + serviceability/jvmti/GetObjectSizeOverflow.java \ 28.8 + serviceability/jvmti/TestRedefineWithUnresolvedClass.java \ 28.9 compiler/tiered/NonTieredLevelsTest.java \ 28.10 compiler/tiered/TieredLevelsTest.java \ 28.11 compiler/intrinsics/bmi/verifycode
29.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 29.2 +++ b/test/serviceability/jvmti/GetObjectSizeOverflow.java Wed Jun 04 06:53:06 2014 -0700 29.3 @@ -0,0 +1,64 @@ 29.4 +/* 29.5 + * Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved. 29.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.7 + * 29.8 + * This code is free software; you can redistribute it and/or modify it 29.9 + * under the terms of the GNU General Public License version 2 only, as 29.10 + * published by the Free Software Foundation. 29.11 + * 29.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 29.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 29.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 29.15 + * version 2 for more details (a copy is included in the LICENSE file that 29.16 + * accompanied this code). 29.17 + * 29.18 + * You should have received a copy of the GNU General Public License version 29.19 + * 2 along with this work; if not, write to the Free Software Foundation, 29.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 29.21 + * 29.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 29.23 + * or visit www.oracle.com if you need additional information or have any 29.24 + * questions. 29.25 + */ 29.26 +import java.io.PrintWriter; 29.27 +import com.oracle.java.testlibrary.*; 29.28 + 29.29 +/* 29.30 + * Test to verify GetObjectSize does not overflow on a 600M element int[] 29.31 + * 29.32 + * @test 29.33 + * @bug 8027230 29.34 + * @library /testlibrary 29.35 + * @build GetObjectSizeOverflowAgent 29.36 + * @run main ClassFileInstaller GetObjectSizeOverflowAgent 29.37 + * @run main GetObjectSizeOverflow 29.38 + */ 29.39 +public class GetObjectSizeOverflow { 29.40 + public static void main(String[] args) throws Exception { 29.41 + 29.42 + if (!Platform.is64bit()) { 29.43 + System.out.println("Test needs a 4GB heap and can only be run as a 64bit process, skipping."); 29.44 + return; 29.45 + } 29.46 + 29.47 + PrintWriter pw = new PrintWriter("MANIFEST.MF"); 29.48 + pw.println("Premain-Class: GetObjectSizeOverflowAgent"); 29.49 + pw.close(); 29.50 + 29.51 + ProcessBuilder pb = new ProcessBuilder(); 29.52 + pb.command(new String[] { JDKToolFinder.getJDKTool("jar"), "cmf", "MANIFEST.MF", "agent.jar", "GetObjectSizeOverflowAgent.class"}); 29.53 + pb.start().waitFor(); 29.54 + 29.55 + ProcessBuilder pt = ProcessTools.createJavaProcessBuilder(true, "-Xmx4000m", "-javaagent:agent.jar", "GetObjectSizeOverflowAgent"); 29.56 + OutputAnalyzer output = new OutputAnalyzer(pt.start()); 29.57 + 29.58 + if (output.getStdout().contains("Could not reserve enough space") || output.getStderr().contains("java.lang.OutOfMemoryError")) { 29.59 + System.out.println("stdout: " + output.getStdout()); 29.60 + System.out.println("stderr: " + output.getStderr()); 29.61 + System.out.println("Test could not reserve or allocate enough space, skipping"); 29.62 + return; 29.63 + } 29.64 + 29.65 + output.stdoutShouldContain("GetObjectSizeOverflow passed"); 29.66 + } 29.67 +}
30.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 30.2 +++ b/test/serviceability/jvmti/GetObjectSizeOverflowAgent.java Wed Jun 04 06:53:06 2014 -0700 30.3 @@ -0,0 +1,43 @@ 30.4 +/* 30.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. 30.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 30.7 + * 30.8 + * This code is free software; you can redistribute it and/or modify it 30.9 + * under the terms of the GNU General Public License version 2 only, as 30.10 + * published by the Free Software Foundation. 30.11 + * 30.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 30.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 30.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 30.15 + * version 2 for more details (a copy is included in the LICENSE file that 30.16 + * accompanied this code). 30.17 + * 30.18 + * You should have received a copy of the GNU General Public License version 30.19 + * 2 along with this work; if not, write to the Free Software Foundation, 30.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 30.21 + * 30.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 30.23 + * or visit www.oracle.com if you need additional information or have any 30.24 + * questions. 30.25 + */ 30.26 +import java.lang.instrument.*; 30.27 + 30.28 +public class GetObjectSizeOverflowAgent { 30.29 + 30.30 + static Instrumentation instrumentation; 30.31 + 30.32 + public static void premain(String agentArgs, Instrumentation instrumentation) { 30.33 + GetObjectSizeOverflowAgent.instrumentation = instrumentation; 30.34 + } 30.35 + 30.36 + public static void main(String[] args) throws Exception { 30.37 + int[] a = new int[600_000_000]; 30.38 + long size = instrumentation.getObjectSize(a); 30.39 + 30.40 + if (size < 2_400_000_000L) { 30.41 + throw new RuntimeException("Invalid size of array, expected >= 2400000000, got " + size); 30.42 + } 30.43 + 30.44 + System.out.println("GetObjectSizeOverflow passed"); 30.45 + } 30.46 +}