Mon, 09 Aug 2010 05:41:05 -0700
6966222: G1: simplify TaskQueue overflow handling
Reviewed-by: tonyp, ysr
ysr@777 | 1 | /* |
johnc@2060 | 2 | * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | // A G1RemSet provides ways of iterating over pointers into a selected |
ysr@777 | 26 | // collection set. |
ysr@777 | 27 | |
ysr@777 | 28 | class G1CollectedHeap; |
ysr@777 | 29 | class CardTableModRefBarrierSet; |
ysr@777 | 30 | class HRInto_G1RemSet; |
ysr@777 | 31 | class ConcurrentG1Refine; |
ysr@777 | 32 | |
apetrusenko@984 | 33 | class G1RemSet: public CHeapObj { |
ysr@777 | 34 | protected: |
ysr@777 | 35 | G1CollectedHeap* _g1; |
ysr@777 | 36 | unsigned _conc_refine_cards; |
ysr@777 | 37 | size_t n_workers(); |
ysr@777 | 38 | |
ysr@777 | 39 | public: |
ysr@777 | 40 | G1RemSet(G1CollectedHeap* g1) : |
iveresov@1229 | 41 | _g1(g1), _conc_refine_cards(0) |
ysr@777 | 42 | {} |
ysr@777 | 43 | |
ysr@777 | 44 | // Invoke "blk->do_oop" on all pointers into the CS in object in regions |
ysr@777 | 45 | // outside the CS (having invoked "blk->set_region" to set the "from" |
ysr@777 | 46 | // region correctly beforehand.) The "worker_i" param is for the |
ysr@777 | 47 | // parallel case where the number of the worker thread calling this |
ysr@777 | 48 | // function can be helpful in partitioning the work to be done. It |
ysr@777 | 49 | // should be the same as the "i" passed to the calling thread's |
ysr@777 | 50 | // work(i) function. In the sequential case this param will be ingored. |
ysr@777 | 51 | virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 52 | int worker_i) = 0; |
ysr@777 | 53 | |
ysr@777 | 54 | // Prepare for and cleanup after an oops_into_collection_set_do |
ysr@777 | 55 | // call. Must call each of these once before and after (in sequential |
ysr@777 | 56 | // code) any threads call oops into collection set do. (This offers an |
ysr@777 | 57 | // opportunity to sequential setup and teardown of structures needed by a |
ysr@777 | 58 | // parallel iteration over the CS's RS.) |
ysr@777 | 59 | virtual void prepare_for_oops_into_collection_set_do() = 0; |
ysr@777 | 60 | virtual void cleanup_after_oops_into_collection_set_do() = 0; |
ysr@777 | 61 | |
ysr@777 | 62 | // If "this" is of the given subtype, return "this", else "NULL". |
ysr@777 | 63 | virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } |
ysr@777 | 64 | |
ysr@1280 | 65 | // Record, if necessary, the fact that *p (where "p" is in region "from", |
ysr@1280 | 66 | // and is, a fortiori, required to be non-NULL) has changed to its new value. |
ysr@777 | 67 | virtual void write_ref(HeapRegion* from, oop* p) = 0; |
ysr@1280 | 68 | virtual void write_ref(HeapRegion* from, narrowOop* p) = 0; |
ysr@777 | 69 | virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; |
ysr@1280 | 70 | virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0; |
ysr@777 | 71 | |
ysr@777 | 72 | // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region |
ysr@777 | 73 | // or card, respectively, such that a region or card with a corresponding |
ysr@777 | 74 | // 0 bit contains no part of any live object. Eliminates any remembered |
ysr@777 | 75 | // set entries that correspond to dead heap ranges. |
ysr@777 | 76 | virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; |
ysr@777 | 77 | // Like the above, but assumes is called in parallel: "worker_num" is the |
ysr@777 | 78 | // parallel thread id of the current thread, and "claim_val" is the |
ysr@777 | 79 | // value that should be used to claim heap regions. |
ysr@777 | 80 | virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 81 | int worker_num, int claim_val) = 0; |
ysr@777 | 82 | |
ysr@777 | 83 | // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, |
ysr@777 | 84 | // join and leave around parts that must be atomic wrt GC. (NULL means |
ysr@777 | 85 | // being done at a safepoint.) |
johnc@2060 | 86 | // With some implementations of this routine, when check_for_refs_into_cset |
johnc@2060 | 87 | // is true, a true result may be returned if the given card contains oops |
johnc@2060 | 88 | // that have references into the current collection set. |
johnc@2060 | 89 | virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, |
johnc@2060 | 90 | bool check_for_refs_into_cset) { |
johnc@2060 | 91 | return false; |
johnc@2060 | 92 | } |
ysr@777 | 93 | |
ysr@777 | 94 | // Print any relevant summary info. |
ysr@777 | 95 | virtual void print_summary_info() {} |
ysr@777 | 96 | |
ysr@777 | 97 | // Prepare remebered set for verification. |
ysr@777 | 98 | virtual void prepare_for_verify() {}; |
ysr@777 | 99 | }; |
ysr@777 | 100 | |
ysr@777 | 101 | |
ysr@777 | 102 | // The simplest possible G1RemSet: iterates over all objects in non-CS |
ysr@777 | 103 | // regions, searching for pointers into the CS. |
ysr@777 | 104 | class StupidG1RemSet: public G1RemSet { |
ysr@777 | 105 | public: |
ysr@777 | 106 | StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} |
ysr@777 | 107 | |
ysr@777 | 108 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 109 | int worker_i); |
ysr@777 | 110 | |
ysr@777 | 111 | void prepare_for_oops_into_collection_set_do() {} |
ysr@777 | 112 | void cleanup_after_oops_into_collection_set_do() {} |
ysr@777 | 113 | |
ysr@777 | 114 | // Nothing is necessary in the version below. |
ysr@777 | 115 | void write_ref(HeapRegion* from, oop* p) {} |
ysr@1280 | 116 | void write_ref(HeapRegion* from, narrowOop* p) {} |
ysr@777 | 117 | void par_write_ref(HeapRegion* from, oop* p, int tid) {} |
ysr@1280 | 118 | void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {} |
ysr@777 | 119 | |
ysr@777 | 120 | void scrub(BitMap* region_bm, BitMap* card_bm) {} |
ysr@777 | 121 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 122 | int worker_num, int claim_val) {} |
ysr@777 | 123 | |
ysr@777 | 124 | }; |
ysr@777 | 125 | |
ysr@777 | 126 | // A G1RemSet in which each heap region has a rem set that records the |
ysr@777 | 127 | // external heap references into it. Uses a mod ref bs to track updates, |
ysr@777 | 128 | // so that they can be used to update the individual region remsets. |
ysr@777 | 129 | |
ysr@777 | 130 | class HRInto_G1RemSet: public G1RemSet { |
ysr@777 | 131 | protected: |
ysr@777 | 132 | enum SomePrivateConstants { |
ysr@777 | 133 | UpdateRStoMergeSync = 0, |
ysr@777 | 134 | MergeRStoDoDirtySync = 1, |
ysr@777 | 135 | DoDirtySync = 2, |
ysr@777 | 136 | LastSync = 3, |
ysr@777 | 137 | |
ysr@777 | 138 | SeqTask = 0, |
ysr@777 | 139 | NumSeqTasks = 1 |
ysr@777 | 140 | }; |
ysr@777 | 141 | |
ysr@777 | 142 | CardTableModRefBS* _ct_bs; |
ysr@777 | 143 | SubTasksDone* _seq_task; |
ysr@777 | 144 | G1CollectorPolicy* _g1p; |
ysr@777 | 145 | |
ysr@777 | 146 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 147 | |
ysr@777 | 148 | size_t* _cards_scanned; |
ysr@777 | 149 | size_t _total_cards_scanned; |
ysr@777 | 150 | |
johnc@2063 | 151 | // _traversal_in_progress is "true" iff a traversal is in progress. |
johnc@2063 | 152 | |
johnc@2063 | 153 | bool _traversal_in_progress; |
johnc@2063 | 154 | void set_traversal(bool b) { _traversal_in_progress = b; } |
johnc@2060 | 155 | |
johnc@2060 | 156 | // Used for caching the closure that is responsible for scanning |
johnc@2060 | 157 | // references into the collection set. |
johnc@2060 | 158 | OopsInHeapRegionClosure** _cset_rs_update_cl; |
ysr@1280 | 159 | |
johnc@1325 | 160 | // The routine that performs the actual work of refining a dirty |
johnc@1325 | 161 | // card. |
johnc@2060 | 162 | // If check_for_refs_into_refs is true then a true result is returned |
johnc@2060 | 163 | // if the card contains oops that have references into the current |
johnc@2060 | 164 | // collection set. |
johnc@2060 | 165 | bool concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i, |
johnc@2060 | 166 | bool check_for_refs_into_cset); |
johnc@1325 | 167 | |
ysr@1280 | 168 | protected: |
ysr@1280 | 169 | template <class T> void write_ref_nv(HeapRegion* from, T* p); |
ysr@1280 | 170 | template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid); |
ysr@777 | 171 | |
ysr@777 | 172 | public: |
ysr@777 | 173 | // This is called to reset dual hash tables after the gc pause |
ysr@777 | 174 | // is finished and the initial hash table is no longer being |
ysr@777 | 175 | // scanned. |
ysr@777 | 176 | void cleanupHRRS(); |
ysr@777 | 177 | |
ysr@777 | 178 | HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); |
ysr@777 | 179 | ~HRInto_G1RemSet(); |
ysr@777 | 180 | |
ysr@777 | 181 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 182 | int worker_i); |
ysr@777 | 183 | |
ysr@777 | 184 | void prepare_for_oops_into_collection_set_do(); |
ysr@777 | 185 | void cleanup_after_oops_into_collection_set_do(); |
ysr@777 | 186 | void scanRS(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@1280 | 187 | template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@1280 | 188 | void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) { |
ysr@1280 | 189 | if (UseCompressedOops) { |
ysr@1280 | 190 | scanNewRefsRS_work<narrowOop>(oc, worker_i); |
ysr@1280 | 191 | } else { |
ysr@1280 | 192 | scanNewRefsRS_work<oop>(oc, worker_i); |
ysr@1280 | 193 | } |
ysr@1280 | 194 | } |
johnc@2060 | 195 | void updateRS(DirtyCardQueue* into_cset_dcq, int worker_i); |
ysr@777 | 196 | HeapRegion* calculateStartRegion(int i); |
ysr@777 | 197 | |
ysr@777 | 198 | HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } |
ysr@777 | 199 | |
ysr@777 | 200 | CardTableModRefBS* ct_bs() { return _ct_bs; } |
ysr@777 | 201 | size_t cardsScanned() { return _total_cards_scanned; } |
ysr@777 | 202 | |
ysr@777 | 203 | // Record, if necessary, the fact that *p (where "p" is in region "from", |
ysr@777 | 204 | // which is required to be non-NULL) has changed to a new non-NULL value. |
ysr@1280 | 205 | // [Below the virtual version calls a non-virtual protected |
ysr@1280 | 206 | // workhorse that is templatified for narrow vs wide oop.] |
ysr@1280 | 207 | inline void write_ref(HeapRegion* from, oop* p) { |
ysr@1280 | 208 | write_ref_nv(from, p); |
ysr@1280 | 209 | } |
ysr@1280 | 210 | inline void write_ref(HeapRegion* from, narrowOop* p) { |
ysr@1280 | 211 | write_ref_nv(from, p); |
ysr@1280 | 212 | } |
ysr@1280 | 213 | inline void par_write_ref(HeapRegion* from, oop* p, int tid) { |
ysr@1280 | 214 | par_write_ref_nv(from, p, tid); |
ysr@1280 | 215 | } |
ysr@1280 | 216 | inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) { |
ysr@1280 | 217 | par_write_ref_nv(from, p, tid); |
ysr@1280 | 218 | } |
ysr@777 | 219 | |
ysr@1280 | 220 | bool self_forwarded(oop obj); |
ysr@777 | 221 | |
ysr@777 | 222 | void scrub(BitMap* region_bm, BitMap* card_bm); |
ysr@777 | 223 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 224 | int worker_num, int claim_val); |
ysr@777 | 225 | |
johnc@2060 | 226 | // If check_for_refs_into_cset is true then a true result is returned |
johnc@2060 | 227 | // if the card contains oops that have references into the current |
johnc@2060 | 228 | // collection set. |
johnc@2060 | 229 | virtual bool concurrentRefineOneCard(jbyte* card_ptr, int worker_i, |
johnc@2060 | 230 | bool check_for_refs_into_cset); |
ysr@777 | 231 | |
ysr@777 | 232 | virtual void print_summary_info(); |
ysr@777 | 233 | virtual void prepare_for_verify(); |
ysr@777 | 234 | }; |
ysr@777 | 235 | |
ysr@777 | 236 | #define G1_REM_SET_LOGGING 0 |
ysr@777 | 237 | |
ysr@777 | 238 | class CountNonCleanMemRegionClosure: public MemRegionClosure { |
ysr@777 | 239 | G1CollectedHeap* _g1; |
ysr@777 | 240 | int _n; |
ysr@777 | 241 | HeapWord* _start_first; |
ysr@777 | 242 | public: |
ysr@777 | 243 | CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : |
ysr@777 | 244 | _g1(g1), _n(0), _start_first(NULL) |
ysr@777 | 245 | {} |
ysr@777 | 246 | void do_MemRegion(MemRegion mr); |
ysr@777 | 247 | int n() { return _n; }; |
ysr@777 | 248 | HeapWord* start_first() { return _start_first; } |
ysr@777 | 249 | }; |
apetrusenko@1061 | 250 | |
apetrusenko@1061 | 251 | class UpdateRSOopClosure: public OopClosure { |
apetrusenko@1061 | 252 | HeapRegion* _from; |
apetrusenko@1061 | 253 | HRInto_G1RemSet* _rs; |
apetrusenko@1061 | 254 | int _worker_i; |
ysr@1280 | 255 | |
ysr@1280 | 256 | template <class T> void do_oop_work(T* p); |
ysr@1280 | 257 | |
apetrusenko@1061 | 258 | public: |
apetrusenko@1061 | 259 | UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : |
apetrusenko@1061 | 260 | _from(NULL), _rs(rs), _worker_i(worker_i) { |
apetrusenko@1061 | 261 | guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); |
apetrusenko@1061 | 262 | } |
apetrusenko@1061 | 263 | |
apetrusenko@1061 | 264 | void set_from(HeapRegion* from) { |
apetrusenko@1061 | 265 | assert(from != NULL, "from region must be non-NULL"); |
apetrusenko@1061 | 266 | _from = from; |
apetrusenko@1061 | 267 | } |
apetrusenko@1061 | 268 | |
ysr@1280 | 269 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 270 | virtual void do_oop(oop* p) { do_oop_work(p); } |
apetrusenko@1061 | 271 | |
apetrusenko@1061 | 272 | // Override: this closure is idempotent. |
apetrusenko@1061 | 273 | // bool idempotent() { return true; } |
apetrusenko@1061 | 274 | bool apply_to_weak_ref_discovered_field() { return true; } |
apetrusenko@1061 | 275 | }; |
johnc@2060 | 276 | |
johnc@2060 | 277 | class UpdateRSetImmediate: public OopsInHeapRegionClosure { |
johnc@2060 | 278 | private: |
johnc@2060 | 279 | G1RemSet* _g1_rem_set; |
johnc@2060 | 280 | |
johnc@2060 | 281 | template <class T> void do_oop_work(T* p); |
johnc@2060 | 282 | public: |
johnc@2060 | 283 | UpdateRSetImmediate(G1RemSet* rs) : |
johnc@2060 | 284 | _g1_rem_set(rs) {} |
johnc@2060 | 285 | |
johnc@2060 | 286 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
johnc@2060 | 287 | virtual void do_oop( oop* p) { do_oop_work(p); } |
johnc@2060 | 288 | }; |