src/share/vm/memory/threadLocalAllocBuffer.hpp

Tue, 16 Feb 2016 21:42:29 +0000

author
poonam
date
Tue, 16 Feb 2016 21:42:29 +0000
changeset 8308
6acf14e730dd
parent 7470
060cdf93040c
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8072725: Provide more granular levels for GC verification
Summary: Add option VerifySubSet to selectively verify the memory sub-systems
Reviewed-by: kevinw, jmasa

duke@435 1 /*
mikael@4153 2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_THREADLOCALALLOCBUFFER_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_THREADLOCALALLOCBUFFER_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/shared/gcUtil.hpp"
stefank@2314 29 #include "oops/typeArrayOop.hpp"
stefank@2314 30 #include "runtime/perfData.hpp"
stefank@2314 31
duke@435 32 class GlobalTLABStats;
duke@435 33
duke@435 34 // ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
duke@435 35 // the threads for allocation.
duke@435 36 // It is thread-private at any time, but maybe multiplexed over
duke@435 37 // time across multiple threads. The park()/unpark() pair is
jwilhelm@4129 38 // used to make it available for such multiplexing.
zgu@3900 39 class ThreadLocalAllocBuffer: public CHeapObj<mtThread> {
duke@435 40 friend class VMStructs;
duke@435 41 private:
duke@435 42 HeapWord* _start; // address of TLAB
duke@435 43 HeapWord* _top; // address after last allocation
duke@435 44 HeapWord* _pf_top; // allocation prefetch watermark
duke@435 45 HeapWord* _end; // allocation end (excluding alignment_reserve)
duke@435 46 size_t _desired_size; // desired size (including alignment_reserve)
duke@435 47 size_t _refill_waste_limit; // hold onto tlab if free() is larger than this
brutisso@6376 48 size_t _allocated_before_last_gc; // total bytes allocated up until the last gc
duke@435 49
brutisso@6376 50 static size_t _max_size; // maximum size of any TLAB
duke@435 51 static unsigned _target_refills; // expected number of refills between GCs
duke@435 52
duke@435 53 unsigned _number_of_refills;
duke@435 54 unsigned _fast_refill_waste;
duke@435 55 unsigned _slow_refill_waste;
duke@435 56 unsigned _gc_waste;
duke@435 57 unsigned _slow_allocations;
duke@435 58
duke@435 59 AdaptiveWeightedAverage _allocation_fraction; // fraction of eden allocated in tlabs
duke@435 60
duke@435 61 void accumulate_statistics();
duke@435 62 void initialize_statistics();
duke@435 63
duke@435 64 void set_start(HeapWord* start) { _start = start; }
duke@435 65 void set_end(HeapWord* end) { _end = end; }
duke@435 66 void set_top(HeapWord* top) { _top = top; }
duke@435 67 void set_pf_top(HeapWord* pf_top) { _pf_top = pf_top; }
duke@435 68 void set_desired_size(size_t desired_size) { _desired_size = desired_size; }
duke@435 69 void set_refill_waste_limit(size_t waste) { _refill_waste_limit = waste; }
duke@435 70
duke@435 71 size_t initial_refill_waste_limit() { return desired_size() / TLABRefillWasteFraction; }
duke@435 72
duke@435 73 static int target_refills() { return _target_refills; }
duke@435 74 size_t initial_desired_size();
duke@435 75
duke@435 76 size_t remaining() const { return end() == NULL ? 0 : pointer_delta(hard_end(), top()); }
duke@435 77
duke@435 78 // Make parsable and release it.
duke@435 79 void reset();
duke@435 80
duke@435 81 // Resize based on amount of allocation, etc.
duke@435 82 void resize();
duke@435 83
duke@435 84 void invariants() const { assert(top() >= start() && top() <= end(), "invalid tlab"); }
duke@435 85
duke@435 86 void initialize(HeapWord* start, HeapWord* top, HeapWord* end);
duke@435 87
duke@435 88 void print_stats(const char* tag);
duke@435 89
duke@435 90 Thread* myThread();
duke@435 91
duke@435 92 // statistics
duke@435 93
duke@435 94 int number_of_refills() const { return _number_of_refills; }
duke@435 95 int fast_refill_waste() const { return _fast_refill_waste; }
duke@435 96 int slow_refill_waste() const { return _slow_refill_waste; }
duke@435 97 int gc_waste() const { return _gc_waste; }
duke@435 98 int slow_allocations() const { return _slow_allocations; }
duke@435 99
duke@435 100 static GlobalTLABStats* _global_stats;
duke@435 101 static GlobalTLABStats* global_stats() { return _global_stats; }
duke@435 102
duke@435 103 public:
brutisso@6376 104 ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
duke@435 105 // do nothing. tlabs must be inited by initialize() calls
duke@435 106 }
duke@435 107
mgerdin@7470 108 static const size_t min_size() { return align_object_size(MinTLABSize / HeapWordSize) + alignment_reserve(); }
brutisso@6376 109 static const size_t max_size() { assert(_max_size != 0, "max_size not set up"); return _max_size; }
brutisso@6376 110 static void set_max_size(size_t max_size) { _max_size = max_size; }
duke@435 111
duke@435 112 HeapWord* start() const { return _start; }
duke@435 113 HeapWord* end() const { return _end; }
duke@435 114 HeapWord* hard_end() const { return _end + alignment_reserve(); }
duke@435 115 HeapWord* top() const { return _top; }
duke@435 116 HeapWord* pf_top() const { return _pf_top; }
duke@435 117 size_t desired_size() const { return _desired_size; }
phh@2423 118 size_t used() const { return pointer_delta(top(), start()); }
phh@2423 119 size_t used_bytes() const { return pointer_delta(top(), start(), 1); }
duke@435 120 size_t free() const { return pointer_delta(end(), top()); }
duke@435 121 // Don't discard tlab if remaining space is larger than this.
duke@435 122 size_t refill_waste_limit() const { return _refill_waste_limit; }
duke@435 123
duke@435 124 // Allocate size HeapWords. The memory is NOT initialized to zero.
duke@435 125 inline HeapWord* allocate(size_t size);
kvn@1802 126
kvn@1802 127 // Reserve space at the end of TLAB
kvn@1802 128 static size_t end_reserve() {
kvn@1802 129 int reserve_size = typeArrayOopDesc::header_size(T_INT);
kvn@3052 130 return MAX2(reserve_size, VM_Version::reserve_for_allocation_prefetch());
kvn@1802 131 }
kvn@1802 132 static size_t alignment_reserve() { return align_object_size(end_reserve()); }
duke@435 133 static size_t alignment_reserve_in_bytes() { return alignment_reserve() * HeapWordSize; }
duke@435 134
duke@435 135 // Return tlab size or remaining space in eden such that the
duke@435 136 // space is large enough to hold obj_size and necessary fill space.
duke@435 137 // Otherwise return 0;
duke@435 138 inline size_t compute_size(size_t obj_size);
duke@435 139
duke@435 140 // Record slow allocation
duke@435 141 inline void record_slow_allocation(size_t obj_size);
duke@435 142
duke@435 143 // Initialization at startup
duke@435 144 static void startup_initialization();
duke@435 145
duke@435 146 // Make an in-use tlab parsable, optionally also retiring it.
duke@435 147 void make_parsable(bool retire);
duke@435 148
duke@435 149 // Retire in-use tlab before allocation of a new tlab
duke@435 150 void clear_before_allocation();
duke@435 151
duke@435 152 // Accumulate statistics across all tlabs before gc
duke@435 153 static void accumulate_statistics_before_gc();
duke@435 154
duke@435 155 // Resize tlabs for all threads
duke@435 156 static void resize_all_tlabs();
duke@435 157
duke@435 158 void fill(HeapWord* start, HeapWord* top, size_t new_size);
duke@435 159 void initialize();
duke@435 160
duke@435 161 static size_t refill_waste_limit_increment() { return TLABWasteIncrement; }
duke@435 162
duke@435 163 // Code generation support
duke@435 164 static ByteSize start_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _start); }
duke@435 165 static ByteSize end_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _end ); }
duke@435 166 static ByteSize top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _top ); }
duke@435 167 static ByteSize pf_top_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _pf_top ); }
duke@435 168 static ByteSize size_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _desired_size ); }
duke@435 169 static ByteSize refill_waste_limit_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _refill_waste_limit ); }
duke@435 170
duke@435 171 static ByteSize number_of_refills_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _number_of_refills ); }
duke@435 172 static ByteSize fast_refill_waste_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _fast_refill_waste ); }
duke@435 173 static ByteSize slow_allocations_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); }
duke@435 174
duke@435 175 void verify();
duke@435 176 };
duke@435 177
zgu@3900 178 class GlobalTLABStats: public CHeapObj<mtThread> {
duke@435 179 private:
duke@435 180
duke@435 181 // Accumulate perfdata in private variables because
duke@435 182 // PerfData should be write-only for security reasons
duke@435 183 // (see perfData.hpp)
duke@435 184 unsigned _allocating_threads;
duke@435 185 unsigned _total_refills;
duke@435 186 unsigned _max_refills;
duke@435 187 size_t _total_allocation;
duke@435 188 size_t _total_gc_waste;
duke@435 189 size_t _max_gc_waste;
duke@435 190 size_t _total_slow_refill_waste;
duke@435 191 size_t _max_slow_refill_waste;
duke@435 192 size_t _total_fast_refill_waste;
duke@435 193 size_t _max_fast_refill_waste;
duke@435 194 unsigned _total_slow_allocations;
duke@435 195 unsigned _max_slow_allocations;
duke@435 196
duke@435 197 PerfVariable* _perf_allocating_threads;
duke@435 198 PerfVariable* _perf_total_refills;
duke@435 199 PerfVariable* _perf_max_refills;
duke@435 200 PerfVariable* _perf_allocation;
duke@435 201 PerfVariable* _perf_gc_waste;
duke@435 202 PerfVariable* _perf_max_gc_waste;
duke@435 203 PerfVariable* _perf_slow_refill_waste;
duke@435 204 PerfVariable* _perf_max_slow_refill_waste;
duke@435 205 PerfVariable* _perf_fast_refill_waste;
duke@435 206 PerfVariable* _perf_max_fast_refill_waste;
duke@435 207 PerfVariable* _perf_slow_allocations;
duke@435 208 PerfVariable* _perf_max_slow_allocations;
duke@435 209
duke@435 210 AdaptiveWeightedAverage _allocating_threads_avg;
duke@435 211
duke@435 212 public:
duke@435 213 GlobalTLABStats();
duke@435 214
duke@435 215 // Initialize all counters
duke@435 216 void initialize();
duke@435 217
duke@435 218 // Write all perf counters to the perf_counters
duke@435 219 void publish();
duke@435 220
duke@435 221 void print();
duke@435 222
duke@435 223 // Accessors
duke@435 224 unsigned allocating_threads_avg() {
duke@435 225 return MAX2((unsigned)(_allocating_threads_avg.average() + 0.5), 1U);
duke@435 226 }
duke@435 227
duke@435 228 size_t allocation() {
duke@435 229 return _total_allocation;
duke@435 230 }
duke@435 231
duke@435 232 // Update methods
duke@435 233
duke@435 234 void update_allocating_threads() {
duke@435 235 _allocating_threads++;
duke@435 236 }
duke@435 237 void update_number_of_refills(unsigned value) {
duke@435 238 _total_refills += value;
duke@435 239 _max_refills = MAX2(_max_refills, value);
duke@435 240 }
duke@435 241 void update_allocation(size_t value) {
duke@435 242 _total_allocation += value;
duke@435 243 }
duke@435 244 void update_gc_waste(size_t value) {
duke@435 245 _total_gc_waste += value;
duke@435 246 _max_gc_waste = MAX2(_max_gc_waste, value);
duke@435 247 }
duke@435 248 void update_fast_refill_waste(size_t value) {
duke@435 249 _total_fast_refill_waste += value;
duke@435 250 _max_fast_refill_waste = MAX2(_max_fast_refill_waste, value);
duke@435 251 }
duke@435 252 void update_slow_refill_waste(size_t value) {
duke@435 253 _total_slow_refill_waste += value;
duke@435 254 _max_slow_refill_waste = MAX2(_max_slow_refill_waste, value);
duke@435 255 }
duke@435 256 void update_slow_allocations(unsigned value) {
duke@435 257 _total_slow_allocations += value;
duke@435 258 _max_slow_allocations = MAX2(_max_slow_allocations, value);
duke@435 259 }
duke@435 260 };
stefank@2314 261
stefank@2314 262 #endif // SHARE_VM_MEMORY_THREADLOCALALLOCBUFFER_HPP

mercurial