src/share/vm/gc_implementation/g1/concurrentG1Refine.hpp

Tue, 13 Apr 2010 13:52:10 -0700

author
jmasa
date
Tue, 13 Apr 2010 13:52:10 -0700
changeset 1822
0bfd3fb24150
parent 1717
b81f3572f355
child 1907
c18cbe5936b8
permissions
-rw-r--r--

6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory
Reviewed-by: ysr, jcoomes

ysr@777 1 /*
xdono@1014 2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
ysr@777 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
ysr@777 20 * CA 95054 USA or visit www.sun.com if you need additional information or
ysr@777 21 * have any questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
ysr@777 25 // Forward decl
ysr@777 26 class ConcurrentG1RefineThread;
ysr@777 27 class G1RemSet;
ysr@777 28
apetrusenko@984 29 class ConcurrentG1Refine: public CHeapObj {
iveresov@1229 30 ConcurrentG1RefineThread** _threads;
iveresov@1229 31 int _n_threads;
iveresov@1546 32 int _n_worker_threads;
iveresov@1546 33 /*
iveresov@1546 34 * The value of the update buffer queue length falls into one of 3 zones:
iveresov@1546 35 * green, yellow, red. If the value is in [0, green) nothing is
iveresov@1546 36 * done, the buffers are left unprocessed to enable the caching effect of the
iveresov@1546 37 * dirtied cards. In the yellow zone [green, yellow) the concurrent refinement
iveresov@1546 38 * threads are gradually activated. In [yellow, red) all threads are
iveresov@1546 39 * running. If the length becomes red (max queue length) the mutators start
iveresov@1546 40 * processing the buffers.
iveresov@1546 41 *
tonyp@1717 42 * There are some interesting cases (when G1UseAdaptiveConcRefinement
tonyp@1717 43 * is turned off):
iveresov@1546 44 * 1) green = yellow = red = 0. In this case the mutator will process all
iveresov@1546 45 * buffers. Except for those that are created by the deferred updates
iveresov@1546 46 * machinery during a collection.
iveresov@1546 47 * 2) green = 0. Means no caching. Can be a good way to minimize the
iveresov@1546 48 * amount of time spent updating rsets during a collection.
iveresov@1546 49 */
iveresov@1546 50 int _green_zone;
iveresov@1546 51 int _yellow_zone;
iveresov@1546 52 int _red_zone;
iveresov@1546 53
iveresov@1546 54 int _thread_threshold_step;
iveresov@1546 55
iveresov@1546 56 // Reset the threshold step value based of the current zone boundaries.
iveresov@1546 57 void reset_threshold_step();
johnc@1325 58
ysr@777 59 // The cache for card refinement.
johnc@1325 60 bool _use_cache;
johnc@1325 61 bool _def_use_cache;
ysr@777 62
johnc@1325 63 size_t _n_periods; // Used as clearing epoch
johnc@1325 64
johnc@1325 65 // An evicting cache of the number of times each card
johnc@1325 66 // is accessed. Reduces, but does not eliminate, the amount
johnc@1325 67 // of duplicated processing of dirty cards.
johnc@1325 68
johnc@1325 69 enum SomePrivateConstants {
johnc@1325 70 epoch_bits = 32,
johnc@1325 71 card_num_shift = epoch_bits,
johnc@1325 72 epoch_mask = AllBits,
johnc@1325 73 card_num_mask = AllBits,
johnc@1325 74
johnc@1325 75 // The initial cache size is approximately this fraction
johnc@1325 76 // of a maximal cache (i.e. the size needed for all cards
johnc@1325 77 // in the heap)
johnc@1325 78 InitialCacheFraction = 512
johnc@1325 79 };
johnc@1325 80
johnc@1325 81 const static julong card_num_mask_in_place =
johnc@1325 82 (julong) card_num_mask << card_num_shift;
johnc@1325 83
johnc@1325 84 typedef struct {
johnc@1325 85 julong _value; // | card_num | epoch |
johnc@1325 86 } CardEpochCacheEntry;
johnc@1325 87
johnc@1325 88 julong make_epoch_entry(unsigned int card_num, unsigned int epoch) {
johnc@1325 89 assert(0 <= card_num && card_num < _max_n_card_counts, "Bounds");
johnc@1325 90 assert(0 <= epoch && epoch <= _n_periods, "must be");
johnc@1325 91
johnc@1325 92 return ((julong) card_num << card_num_shift) | epoch;
johnc@1325 93 }
johnc@1325 94
johnc@1325 95 unsigned int extract_epoch(julong v) {
johnc@1325 96 return (v & epoch_mask);
johnc@1325 97 }
johnc@1325 98
johnc@1325 99 unsigned int extract_card_num(julong v) {
johnc@1325 100 return (v & card_num_mask_in_place) >> card_num_shift;
johnc@1325 101 }
johnc@1325 102
johnc@1325 103 typedef struct {
johnc@1325 104 unsigned char _count;
johnc@1325 105 unsigned char _evict_count;
johnc@1325 106 } CardCountCacheEntry;
johnc@1325 107
johnc@1325 108 CardCountCacheEntry* _card_counts;
johnc@1325 109 CardEpochCacheEntry* _card_epochs;
johnc@1325 110
johnc@1325 111 // The current number of buckets in the card count cache
johnc@1325 112 unsigned _n_card_counts;
johnc@1325 113
johnc@1325 114 // The max number of buckets required for the number of
johnc@1325 115 // cards for the entire reserved heap
johnc@1325 116 unsigned _max_n_card_counts;
johnc@1325 117
johnc@1325 118 // Possible sizes of the cache: odd primes that roughly double in size.
johnc@1325 119 // (See jvmtiTagMap.cpp).
johnc@1325 120 static int _cc_cache_sizes[];
johnc@1325 121
johnc@1325 122 // The index in _cc_cache_sizes corresponding to the size of
johnc@1325 123 // _card_counts.
johnc@1325 124 int _cache_size_index;
johnc@1325 125
johnc@1325 126 bool _expand_card_counts;
johnc@1325 127
johnc@1325 128 const jbyte* _ct_bot;
johnc@1324 129
johnc@1324 130 jbyte** _hot_cache;
johnc@1324 131 int _hot_cache_size;
johnc@1324 132 int _n_hot;
johnc@1324 133 int _hot_cache_idx;
johnc@1324 134
johnc@1324 135 int _hot_cache_par_chunk_size;
johnc@1324 136 volatile int _hot_cache_par_claimed_idx;
ysr@777 137
johnc@1325 138 // Needed to workaround 6817995
johnc@1325 139 CardTableModRefBS* _ct_bs;
johnc@1325 140 G1CollectedHeap* _g1h;
johnc@1325 141
johnc@1325 142 // Expands the array that holds the card counts to the next size up
johnc@1325 143 void expand_card_count_cache();
johnc@1325 144
johnc@1325 145 // hash a given key (index of card_ptr) with the specified size
johnc@1325 146 static unsigned int hash(size_t key, int size) {
johnc@1325 147 return (unsigned int) key % size;
johnc@1325 148 }
johnc@1325 149
johnc@1325 150 // hash a given key (index of card_ptr)
johnc@1325 151 unsigned int hash(size_t key) {
johnc@1325 152 return hash(key, _n_card_counts);
johnc@1325 153 }
johnc@1325 154
johnc@1325 155 unsigned ptr_2_card_num(jbyte* card_ptr) {
johnc@1325 156 return (unsigned) (card_ptr - _ct_bot);
johnc@1325 157 }
johnc@1325 158
johnc@1325 159 jbyte* card_num_2_ptr(unsigned card_num) {
johnc@1325 160 return (jbyte*) (_ct_bot + card_num);
johnc@1325 161 }
johnc@1325 162
ysr@777 163 // Returns the count of this card after incrementing it.
johnc@1325 164 jbyte* add_card_count(jbyte* card_ptr, int* count, bool* defer);
ysr@777 165
johnc@1325 166 // Returns true if this card is in a young region
johnc@1325 167 bool is_young_card(jbyte* card_ptr);
johnc@1325 168
ysr@777 169 public:
ysr@777 170 ConcurrentG1Refine();
ysr@777 171 ~ConcurrentG1Refine();
ysr@777 172
ysr@777 173 void init(); // Accomplish some initialization that has to wait.
iveresov@1229 174 void stop();
ysr@777 175
iveresov@1546 176 void reinitialize_threads();
iveresov@1546 177
iveresov@1229 178 // Iterate over the conc refine threads
iveresov@1229 179 void threads_do(ThreadClosure *tc);
ysr@777 180
ysr@777 181 // If this is the first entry for the slot, writes into the cache and
ysr@777 182 // returns NULL. If it causes an eviction, returns the evicted pointer.
ysr@777 183 // Otherwise, its a cache hit, and returns NULL.
johnc@1325 184 jbyte* cache_insert(jbyte* card_ptr, bool* defer);
ysr@777 185
ysr@777 186 // Process the cached entries.
ysr@777 187 void clean_up_cache(int worker_i, G1RemSet* g1rs);
ysr@777 188
johnc@1324 189 // Set up for parallel processing of the cards in the hot cache
johnc@1324 190 void clear_hot_cache_claimed_index() {
johnc@1324 191 _hot_cache_par_claimed_idx = 0;
johnc@1324 192 }
johnc@1324 193
ysr@777 194 // Discard entries in the hot cache.
ysr@777 195 void clear_hot_cache() {
ysr@777 196 _hot_cache_idx = 0; _n_hot = 0;
ysr@777 197 }
ysr@777 198
ysr@777 199 bool hot_cache_is_empty() { return _n_hot == 0; }
ysr@777 200
ysr@777 201 bool use_cache() { return _use_cache; }
ysr@777 202 void set_use_cache(bool b) {
ysr@777 203 if (b) _use_cache = _def_use_cache;
ysr@777 204 else _use_cache = false;
ysr@777 205 }
ysr@777 206
ysr@777 207 void clear_and_record_card_counts();
iveresov@1230 208
iveresov@1546 209 static int thread_num();
tonyp@1454 210
tonyp@1454 211 void print_worker_threads_on(outputStream* st) const;
iveresov@1546 212
iveresov@1546 213 void set_green_zone(int x) { _green_zone = x; }
iveresov@1546 214 void set_yellow_zone(int x) { _yellow_zone = x; }
iveresov@1546 215 void set_red_zone(int x) { _red_zone = x; }
iveresov@1546 216
iveresov@1546 217 int green_zone() const { return _green_zone; }
iveresov@1546 218 int yellow_zone() const { return _yellow_zone; }
iveresov@1546 219 int red_zone() const { return _red_zone; }
iveresov@1546 220
iveresov@1546 221 int total_thread_num() const { return _n_threads; }
iveresov@1546 222 int worker_thread_num() const { return _n_worker_threads; }
iveresov@1546 223
iveresov@1546 224 int thread_threshold_step() const { return _thread_threshold_step; }
ysr@777 225 };

mercurial