src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp

Thu, 28 Jun 2012 17:03:16 -0400

author
zgu
date
Thu, 28 Jun 2012 17:03:16 -0400
changeset 3900
d2a62e0f25eb
parent 2790
edd9b016deb6
child 4153
b9a9ed0f8eeb
permissions
-rw-r--r--

6995781: Native Memory Tracking (Phase 1)
7151532: DCmd for hotspot native memory tracking
Summary: Implementation of native memory tracking phase 1, which tracks VM native memory usage, and related DCmd
Reviewed-by: acorn, coleenp, fparain

ysr@777 1 /*
johnc@2713 2 * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/allocation.hpp"
stefank@2314 29 #include "memory/cardTableModRefBS.hpp"
stefank@2314 30 #include "runtime/thread.hpp"
stefank@2314 31 #include "utilities/globalDefinitions.hpp"
stefank@2314 32
ysr@777 33 // Forward decl
ysr@777 34 class ConcurrentG1RefineThread;
ysr@777 35 class G1RemSet;
ysr@777 36
zgu@3900 37 class ConcurrentG1Refine: public CHeapObj<mtGC> {
iveresov@1229 38 ConcurrentG1RefineThread** _threads;
iveresov@1229 39 int _n_threads;
iveresov@1546 40 int _n_worker_threads;
iveresov@1546 41 /*
iveresov@1546 42 * The value of the update buffer queue length falls into one of 3 zones:
iveresov@1546 43 * green, yellow, red. If the value is in [0, green) nothing is
iveresov@1546 44 * done, the buffers are left unprocessed to enable the caching effect of the
iveresov@1546 45 * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
iveresov@1546 46 * threads are gradually activated. In [yellow, red) all threads are
iveresov@1546 47 * running. If the length becomes red (max queue length) the mutators start
iveresov@1546 48 * processing the buffers.
iveresov@1546 49 *
tonyp@1717 50 * There are some interesting cases (when G1UseAdaptiveConcRefinement
tonyp@1717 51 * is turned off):
iveresov@1546 52 * 1) green = yellow = red = 0. In this case the mutator will process all
iveresov@1546 53 * buffers. Except for those that are created by the deferred updates
iveresov@1546 54 * machinery during a collection.
iveresov@1546 55 * 2) green = 0. Means no caching. Can be a good way to minimize the
iveresov@1546 56 * amount of time spent updating rsets during a collection.
iveresov@1546 57 */
iveresov@1546 58 int _green_zone;
iveresov@1546 59 int _yellow_zone;
iveresov@1546 60 int _red_zone;
iveresov@1546 61
iveresov@1546 62 int _thread_threshold_step;
iveresov@1546 63
iveresov@1546 64 // Reset the threshold step value based of the current zone boundaries.
iveresov@1546 65 void reset_threshold_step();
johnc@1325 66
ysr@777 67 // The cache for card refinement.
johnc@1325 68 bool _use_cache;
johnc@1325 69 bool _def_use_cache;
ysr@777 70
johnc@1325 71 size_t _n_periods; // Used as clearing epoch
johnc@1325 72
johnc@1325 73 // An evicting cache of the number of times each card
johnc@1325 74 // is accessed. Reduces, but does not eliminate, the amount
johnc@1325 75 // of duplicated processing of dirty cards.
johnc@1325 76
johnc@1325 77 enum SomePrivateConstants {
johnc@1325 78 epoch_bits = 32,
johnc@1325 79 card_num_shift = epoch_bits,
johnc@1325 80 epoch_mask = AllBits,
johnc@1325 81 card_num_mask = AllBits,
johnc@1325 82
johnc@1325 83 // The initial cache size is approximately this fraction
johnc@1325 84 // of a maximal cache (i.e. the size needed for all cards
johnc@1325 85 // in the heap)
johnc@1325 86 InitialCacheFraction = 512
johnc@1325 87 };
johnc@1325 88
johnc@1325 89 const static julong card_num_mask_in_place =
johnc@1325 90 (julong) card_num_mask << card_num_shift;
johnc@1325 91
johnc@1325 92 typedef struct {
johnc@1325 93 julong _value; // | card_num | epoch |
johnc@1325 94 } CardEpochCacheEntry;
johnc@1325 95
johnc@1325 96 julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
johnc@2713 97 assert(0 <= card_num && card_num < _max_cards, "Bounds");
johnc@1325 98 assert(0 <= epoch && epoch <= _n_periods, "must be");
johnc@1325 99
johnc@1325 100 return ((julong) card_num << card_num_shift) | epoch;
johnc@1325 101 }
johnc@1325 102
johnc@1325 103 unsigned int extract_epoch(julong v) {
johnc@1325 104 return (v & epoch_mask);
johnc@1325 105 }
johnc@1325 106
johnc@1325 107 unsigned int extract_card_num(julong v) {
johnc@1325 108 return (v & card_num_mask_in_place) >> card_num_shift;
johnc@1325 109 }
johnc@1325 110
johnc@1325 111 typedef struct {
johnc@1325 112 unsigned char _count;
johnc@1325 113 unsigned char _evict_count;
johnc@1325 114 } CardCountCacheEntry;
johnc@1325 115
johnc@1325 116 CardCountCacheEntry* _card_counts;
johnc@1325 117 CardEpochCacheEntry* _card_epochs;
johnc@1325 118
johnc@1325 119 // The current number of buckets in the card count cache
johnc@2713 120 size_t _n_card_counts;
johnc@1325 121
johnc@2713 122 // The number of cards for the entire reserved heap
johnc@2713 123 size_t _max_cards;
johnc@2713 124
johnc@2713 125 // The max number of buckets for the card counts and epochs caches.
johnc@2713 126 // This is the maximum that the counts and epochs will grow to.
johnc@2713 127 // It is specified as a fraction or percentage of _max_cards using
johnc@2713 128 // G1MaxHotCardCountSizePercent.
johnc@2713 129 size_t _max_n_card_counts;
johnc@1325 130
johnc@1325 131 // Possible sizes of the cache: odd primes that roughly double in size.
johnc@1325 132 // (See jvmtiTagMap.cpp).
johnc@2713 133 enum {
johnc@2713 134 MAX_CC_CACHE_INDEX = 15 // maximum index into the cache size array.
johnc@2713 135 };
johnc@2713 136
johnc@2713 137 static size_t _cc_cache_sizes[MAX_CC_CACHE_INDEX];
johnc@1325 138
johnc@1325 139 // The index in _cc_cache_sizes corresponding to the size of
johnc@1325 140 // _card_counts.
johnc@1325 141 int _cache_size_index;
johnc@1325 142
johnc@1325 143 bool _expand_card_counts;
johnc@1325 144
johnc@1325 145 const jbyte* _ct_bot;
johnc@1324 146
johnc@1324 147 jbyte** _hot_cache;
johnc@1324 148 int _hot_cache_size;
johnc@1324 149 int _n_hot;
johnc@1324 150 int _hot_cache_idx;
johnc@1324 151
johnc@1324 152 int _hot_cache_par_chunk_size;
johnc@1324 153 volatile int _hot_cache_par_claimed_idx;
ysr@777 154
johnc@1325 155 // Needed to workaround 6817995
johnc@1325 156 CardTableModRefBS* _ct_bs;
johnc@1325 157 G1CollectedHeap* _g1h;
johnc@1325 158
johnc@2713 159 // Helper routine for expand_card_count_cache().
johnc@2713 160 // The arrays used to hold the card counts and the epochs must have
johnc@2713 161 // a 1:1 correspondence. Hence they are allocated and freed together.
johnc@2713 162 // Returns true if the allocations of both the counts and epochs
johnc@2713 163 // were successful; false otherwise.
johnc@2713 164 bool allocate_card_count_cache(size_t n,
johnc@2713 165 CardCountCacheEntry** counts,
johnc@2713 166 CardEpochCacheEntry** epochs);
johnc@2713 167
johnc@2713 168 // Expands the arrays that hold the card counts and epochs
johnc@2713 169 // to the cache size at index. Returns true if the expansion/
johnc@2713 170 // allocation was successful; false otherwise.
johnc@2713 171 bool expand_card_count_cache(int index);
johnc@1325 172
johnc@1325 173 // hash a given key (index of card_ptr) with the specified size
johnc@2713 174 static unsigned int hash(size_t key, size_t size) {
johnc@2790 175 return (unsigned int) (key % size);
johnc@1325 176 }
johnc@1325 177
johnc@1325 178 // hash a given key (index of card_ptr)
johnc@1325 179 unsigned int hash(size_t key) {
johnc@1325 180 return hash(key, _n_card_counts);
johnc@1325 181 }
johnc@1325 182
johnc@2790 183 unsigned int ptr_2_card_num(jbyte* card_ptr) {
johnc@2790 184 return (unsigned int) (card_ptr - _ct_bot);
johnc@1325 185 }
johnc@1325 186
johnc@2790 187 jbyte* card_num_2_ptr(unsigned int card_num) {
johnc@1325 188 return (jbyte*) (_ct_bot + card_num);
johnc@1325 189 }
johnc@1325 190
ysr@777 191 // Returns the count of this card after incrementing it.
johnc@1325 192 jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
ysr@777 193
johnc@1325 194 // Returns true if this card is in a young region
johnc@1325 195 bool is_young_card(jbyte* card_ptr);
johnc@1325 196
ysr@777 197 public:
ysr@777 198 ConcurrentG1Refine();
ysr@777 199 ~ConcurrentG1Refine();
ysr@777 200
ysr@777 201 void init(); // Accomplish some initialization that has to wait.
iveresov@1229 202 void stop();
ysr@777 203
iveresov@1546 204 void reinitialize_threads();
iveresov@1546 205
iveresov@1229 206 // Iterate over the conc refine threads
iveresov@1229 207 void threads_do(ThreadClosure *tc);
ysr@777 208
ysr@777 209 // If this is the first entry for the slot, writes into the cache and
ysr@777 210 // returns NULL. If it causes an eviction, returns the evicted pointer.
ysr@777 211 // Otherwise, its a cache hit, and returns NULL.
johnc@1325 212 jbyte* cache_insert(jbyte* card_ptr, bool* defer);
ysr@777 213
ysr@777 214 // Process the cached entries.
johnc@2060 215 void clean_up_cache(int worker_i, G1RemSet* g1rs, DirtyCardQueue* into_cset_dcq);
ysr@777 216
johnc@1324 217 // Set up for parallel processing of the cards in the hot cache
johnc@1324 218 void clear_hot_cache_claimed_index() {
johnc@1324 219 _hot_cache_par_claimed_idx = 0;
johnc@1324 220 }
johnc@1324 221
ysr@777 222 // Discard entries in the hot cache.
ysr@777 223 void clear_hot_cache() {
ysr@777 224 _hot_cache_idx = 0; _n_hot = 0;
ysr@777 225 }
ysr@777 226
ysr@777 227 bool hot_cache_is_empty() { return _n_hot == 0; }
ysr@777 228
ysr@777 229 bool use_cache() { return _use_cache; }
ysr@777 230 void set_use_cache(bool b) {
ysr@777 231 if (b) _use_cache = _def_use_cache;
ysr@777 232 else _use_cache = false;
ysr@777 233 }
ysr@777 234
ysr@777 235 void clear_and_record_card_counts();
iveresov@1230 236
iveresov@1546 237 static int thread_num();
tonyp@1454 238
tonyp@1454 239 void print_worker_threads_on(outputStream* st) const;
iveresov@1546 240
iveresov@1546 241 void set_green_zone(int x) { _green_zone = x; }
iveresov@1546 242 void set_yellow_zone(int x) { _yellow_zone = x; }
iveresov@1546 243 void set_red_zone(int x) { _red_zone = x; }
iveresov@1546 244
iveresov@1546 245 int green_zone() const { return _green_zone; }
iveresov@1546 246 int yellow_zone() const { return _yellow_zone; }
iveresov@1546 247 int red_zone() const { return _red_zone; }
iveresov@1546 248
iveresov@1546 249 int total_thread_num() const { return _n_threads; }
iveresov@1546 250 int worker_thread_num() const { return _n_worker_threads; }
iveresov@1546 251
iveresov@1546 252 int thread_threshold_step() const { return _thread_threshold_step; }
ysr@777 253 };
stefank@2314 254
stefank@2314 255 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTG1REFINE_HPP

mercurial