Wed, 17 Jun 2020 11:43:05 +0300
8220293: Deadlock in JFR string pool
Reviewed-by: rehn, egahlin
apetushkov@9858 | 1 | /* |
apetushkov@9858 | 2 | * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. |
apetushkov@9858 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
apetushkov@9858 | 4 | * |
apetushkov@9858 | 5 | * This code is free software; you can redistribute it and/or modify it |
apetushkov@9858 | 6 | * under the terms of the GNU General Public License version 2 only, as |
apetushkov@9858 | 7 | * published by the Free Software Foundation. |
apetushkov@9858 | 8 | * |
apetushkov@9858 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
apetushkov@9858 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
apetushkov@9858 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
apetushkov@9858 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
apetushkov@9858 | 13 | * accompanied this code). |
apetushkov@9858 | 14 | * |
apetushkov@9858 | 15 | * You should have received a copy of the GNU General Public License version |
apetushkov@9858 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
apetushkov@9858 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
apetushkov@9858 | 18 | * |
apetushkov@9858 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
apetushkov@9858 | 20 | * or visit www.oracle.com if you need additional information or have any |
apetushkov@9858 | 21 | * questions. |
apetushkov@9858 | 22 | * |
apetushkov@9858 | 23 | */ |
apetushkov@9858 | 24 | |
apetushkov@9858 | 25 | #ifndef SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP |
apetushkov@9858 | 26 | #define SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP |
apetushkov@9858 | 27 | |
apetushkov@9858 | 28 | #include "jfr/recorder/storage/jfrStorageUtils.hpp" |
apetushkov@9928 | 29 | #include "runtime/thread.inline.hpp" |
apetushkov@9858 | 30 | |
apetushkov@9858 | 31 | template <typename T> |
apetushkov@9858 | 32 | inline bool UnBufferedWriteToChunk<T>::write(T* t, const u1* data, size_t size) { |
apetushkov@9858 | 33 | _writer.write_unbuffered(data, size); |
apetushkov@9858 | 34 | _processed += size; |
apetushkov@9858 | 35 | return true; |
apetushkov@9858 | 36 | } |
apetushkov@9858 | 37 | |
apetushkov@9858 | 38 | template <typename T> |
apetushkov@9858 | 39 | inline bool DefaultDiscarder<T>::discard(T* t, const u1* data, size_t size) { |
apetushkov@9858 | 40 | _processed += size; |
apetushkov@9858 | 41 | return true; |
apetushkov@9858 | 42 | } |
apetushkov@9858 | 43 | |
apetushkov@9858 | 44 | template <typename Operation> |
apetushkov@9858 | 45 | inline bool ConcurrentWriteOp<Operation>::process(typename Operation::Type* t) { |
apetushkov@9858 | 46 | const u1* const current_top = t->concurrent_top(); |
apetushkov@9858 | 47 | const size_t unflushed_size = t->pos() - current_top; |
apetushkov@9858 | 48 | if (unflushed_size == 0) { |
apetushkov@9858 | 49 | t->set_concurrent_top(current_top); |
apetushkov@9858 | 50 | return true; |
apetushkov@9858 | 51 | } |
apetushkov@9858 | 52 | const bool result = _operation.write(t, current_top, unflushed_size); |
apetushkov@9858 | 53 | t->set_concurrent_top(current_top + unflushed_size); |
apetushkov@9858 | 54 | return result; |
apetushkov@9858 | 55 | } |
apetushkov@9858 | 56 | |
apetushkov@9858 | 57 | template <typename Operation> |
apetushkov@9858 | 58 | inline bool ConcurrentWriteOpExcludeRetired<Operation>::process(typename Operation::Type* t) { |
apetushkov@9858 | 59 | if (t->retired()) { |
apetushkov@9858 | 60 | assert(t->empty(), "invariant"); |
apetushkov@9858 | 61 | return true; |
apetushkov@9858 | 62 | } |
apetushkov@9858 | 63 | return ConcurrentWriteOp<Operation>::process(t); |
apetushkov@9858 | 64 | } |
apetushkov@9858 | 65 | |
apetushkov@9858 | 66 | template <typename Operation> |
apetushkov@9858 | 67 | inline bool MutexedWriteOp<Operation>::process(typename Operation::Type* t) { |
apetushkov@9858 | 68 | assert(t != NULL, "invariant"); |
apetushkov@9858 | 69 | const u1* const current_top = t->top(); |
apetushkov@9858 | 70 | const size_t unflushed_size = t->pos() - current_top; |
apetushkov@9858 | 71 | if (unflushed_size == 0) { |
apetushkov@9858 | 72 | return true; |
apetushkov@9858 | 73 | } |
apetushkov@9858 | 74 | const bool result = _operation.write(t, current_top, unflushed_size); |
apetushkov@9858 | 75 | t->set_top(current_top + unflushed_size); |
apetushkov@9858 | 76 | return result; |
apetushkov@9858 | 77 | } |
apetushkov@9858 | 78 | |
apetushkov@9928 | 79 | template <typename Type> |
apetushkov@9928 | 80 | static void retired_sensitive_acquire(Type* t) { |
apetushkov@9928 | 81 | assert(t != NULL, "invariant"); |
apetushkov@9928 | 82 | if (t->retired()) { |
apetushkov@9928 | 83 | return; |
apetushkov@9928 | 84 | } |
apetushkov@9928 | 85 | Thread* const thread = Thread::current(); |
apetushkov@9928 | 86 | while (!t->try_acquire(thread)) { |
apetushkov@9928 | 87 | if (t->retired()) { |
apetushkov@9928 | 88 | return; |
apetushkov@9928 | 89 | } |
apetushkov@9928 | 90 | } |
apetushkov@9928 | 91 | } |
apetushkov@9928 | 92 | |
apetushkov@9928 | 93 | template <typename Operation> |
apetushkov@9928 | 94 | inline bool ExclusiveOp<Operation>::process(typename Operation::Type* t) { |
apetushkov@9928 | 95 | retired_sensitive_acquire(t); |
apetushkov@9928 | 96 | assert(t->acquired_by_self() || t->retired(), "invariant"); |
apetushkov@9928 | 97 | // User is required to ensure proper release of the acquisition |
apetushkov@9928 | 98 | return MutexedWriteOp<Operation>::process(t); |
apetushkov@9928 | 99 | } |
apetushkov@9928 | 100 | |
apetushkov@9858 | 101 | template <typename Operation> |
apetushkov@9858 | 102 | inline bool DiscardOp<Operation>::process(typename Operation::Type* t) { |
apetushkov@9858 | 103 | assert(t != NULL, "invariant"); |
apetushkov@9858 | 104 | const u1* const current_top = _mode == concurrent ? t->concurrent_top() : t->top(); |
apetushkov@9858 | 105 | const size_t unflushed_size = t->pos() - current_top; |
apetushkov@9858 | 106 | if (unflushed_size == 0) { |
apetushkov@9858 | 107 | if (_mode == concurrent) { |
apetushkov@9858 | 108 | t->set_concurrent_top(current_top); |
apetushkov@9858 | 109 | } |
apetushkov@9858 | 110 | return true; |
apetushkov@9858 | 111 | } |
apetushkov@9858 | 112 | const bool result = _operation.discard(t, current_top, unflushed_size); |
apetushkov@9858 | 113 | if (_mode == concurrent) { |
apetushkov@9858 | 114 | t->set_concurrent_top(current_top + unflushed_size); |
apetushkov@9858 | 115 | } else { |
apetushkov@9858 | 116 | t->set_top(current_top + unflushed_size); |
apetushkov@9858 | 117 | } |
apetushkov@9858 | 118 | return result; |
apetushkov@9858 | 119 | } |
apetushkov@9858 | 120 | |
apetushkov@9858 | 121 | #endif // SHARE_VM_JFR_RECORDER_STORAGE_JFRSTORAGEUTILS_INLINE_HPP |