Wed, 30 Sep 2009 14:50:51 -0400
6890137: G1: revamp reachable object dump
Summary: Revamp the reachable object dump debugging facility.
Reviewed-by: jmasa, apetrusenko
ysr@777 | 1 | /* |
xdono@1014 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | // A G1RemSet provides ways of iterating over pointers into a selected |
ysr@777 | 26 | // collection set. |
ysr@777 | 27 | |
ysr@777 | 28 | class G1CollectedHeap; |
ysr@777 | 29 | class CardTableModRefBarrierSet; |
ysr@777 | 30 | class HRInto_G1RemSet; |
ysr@777 | 31 | class ConcurrentG1Refine; |
ysr@777 | 32 | |
apetrusenko@984 | 33 | class G1RemSet: public CHeapObj { |
ysr@777 | 34 | protected: |
ysr@777 | 35 | G1CollectedHeap* _g1; |
ysr@777 | 36 | unsigned _conc_refine_cards; |
ysr@777 | 37 | size_t n_workers(); |
ysr@777 | 38 | |
ysr@777 | 39 | public: |
ysr@777 | 40 | G1RemSet(G1CollectedHeap* g1) : |
iveresov@1229 | 41 | _g1(g1), _conc_refine_cards(0) |
ysr@777 | 42 | {} |
ysr@777 | 43 | |
ysr@777 | 44 | // Invoke "blk->do_oop" on all pointers into the CS in object in regions |
ysr@777 | 45 | // outside the CS (having invoked "blk->set_region" to set the "from" |
ysr@777 | 46 | // region correctly beforehand.) The "worker_i" param is for the |
ysr@777 | 47 | // parallel case where the number of the worker thread calling this |
ysr@777 | 48 | // function can be helpful in partitioning the work to be done. It |
ysr@777 | 49 | // should be the same as the "i" passed to the calling thread's |
ysr@777 | 50 | // work(i) function. In the sequential case this param will be ingored. |
ysr@777 | 51 | virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 52 | int worker_i) = 0; |
ysr@777 | 53 | |
ysr@777 | 54 | // Prepare for and cleanup after an oops_into_collection_set_do |
ysr@777 | 55 | // call. Must call each of these once before and after (in sequential |
ysr@777 | 56 | // code) any threads call oops into collection set do. (This offers an |
ysr@777 | 57 | // opportunity to sequential setup and teardown of structures needed by a |
ysr@777 | 58 | // parallel iteration over the CS's RS.) |
ysr@777 | 59 | virtual void prepare_for_oops_into_collection_set_do() = 0; |
ysr@777 | 60 | virtual void cleanup_after_oops_into_collection_set_do() = 0; |
ysr@777 | 61 | |
ysr@777 | 62 | // If "this" is of the given subtype, return "this", else "NULL". |
ysr@777 | 63 | virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } |
ysr@777 | 64 | |
ysr@1280 | 65 | // Record, if necessary, the fact that *p (where "p" is in region "from", |
ysr@1280 | 66 | // and is, a fortiori, required to be non-NULL) has changed to its new value. |
ysr@777 | 67 | virtual void write_ref(HeapRegion* from, oop* p) = 0; |
ysr@1280 | 68 | virtual void write_ref(HeapRegion* from, narrowOop* p) = 0; |
ysr@777 | 69 | virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; |
ysr@1280 | 70 | virtual void par_write_ref(HeapRegion* from, narrowOop* p, int tid) = 0; |
ysr@777 | 71 | |
ysr@777 | 72 | // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region |
ysr@777 | 73 | // or card, respectively, such that a region or card with a corresponding |
ysr@777 | 74 | // 0 bit contains no part of any live object. Eliminates any remembered |
ysr@777 | 75 | // set entries that correspond to dead heap ranges. |
ysr@777 | 76 | virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; |
ysr@777 | 77 | // Like the above, but assumes is called in parallel: "worker_num" is the |
ysr@777 | 78 | // parallel thread id of the current thread, and "claim_val" is the |
ysr@777 | 79 | // value that should be used to claim heap regions. |
ysr@777 | 80 | virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 81 | int worker_num, int claim_val) = 0; |
ysr@777 | 82 | |
ysr@777 | 83 | // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, |
ysr@777 | 84 | // join and leave around parts that must be atomic wrt GC. (NULL means |
ysr@777 | 85 | // being done at a safepoint.) |
ysr@777 | 86 | virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {} |
ysr@777 | 87 | |
ysr@777 | 88 | // Print any relevant summary info. |
ysr@777 | 89 | virtual void print_summary_info() {} |
ysr@777 | 90 | |
ysr@777 | 91 | // Prepare remebered set for verification. |
ysr@777 | 92 | virtual void prepare_for_verify() {}; |
ysr@777 | 93 | }; |
ysr@777 | 94 | |
ysr@777 | 95 | |
ysr@777 | 96 | // The simplest possible G1RemSet: iterates over all objects in non-CS |
ysr@777 | 97 | // regions, searching for pointers into the CS. |
ysr@777 | 98 | class StupidG1RemSet: public G1RemSet { |
ysr@777 | 99 | public: |
ysr@777 | 100 | StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} |
ysr@777 | 101 | |
ysr@777 | 102 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 103 | int worker_i); |
ysr@777 | 104 | |
ysr@777 | 105 | void prepare_for_oops_into_collection_set_do() {} |
ysr@777 | 106 | void cleanup_after_oops_into_collection_set_do() {} |
ysr@777 | 107 | |
ysr@777 | 108 | // Nothing is necessary in the version below. |
ysr@777 | 109 | void write_ref(HeapRegion* from, oop* p) {} |
ysr@1280 | 110 | void write_ref(HeapRegion* from, narrowOop* p) {} |
ysr@777 | 111 | void par_write_ref(HeapRegion* from, oop* p, int tid) {} |
ysr@1280 | 112 | void par_write_ref(HeapRegion* from, narrowOop* p, int tid) {} |
ysr@777 | 113 | |
ysr@777 | 114 | void scrub(BitMap* region_bm, BitMap* card_bm) {} |
ysr@777 | 115 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 116 | int worker_num, int claim_val) {} |
ysr@777 | 117 | |
ysr@777 | 118 | }; |
ysr@777 | 119 | |
ysr@777 | 120 | // A G1RemSet in which each heap region has a rem set that records the |
ysr@777 | 121 | // external heap references into it. Uses a mod ref bs to track updates, |
ysr@777 | 122 | // so that they can be used to update the individual region remsets. |
ysr@777 | 123 | |
ysr@777 | 124 | class HRInto_G1RemSet: public G1RemSet { |
ysr@777 | 125 | protected: |
ysr@777 | 126 | enum SomePrivateConstants { |
ysr@777 | 127 | UpdateRStoMergeSync = 0, |
ysr@777 | 128 | MergeRStoDoDirtySync = 1, |
ysr@777 | 129 | DoDirtySync = 2, |
ysr@777 | 130 | LastSync = 3, |
ysr@777 | 131 | |
ysr@777 | 132 | SeqTask = 0, |
ysr@777 | 133 | NumSeqTasks = 1 |
ysr@777 | 134 | }; |
ysr@777 | 135 | |
ysr@777 | 136 | CardTableModRefBS* _ct_bs; |
ysr@777 | 137 | SubTasksDone* _seq_task; |
ysr@777 | 138 | G1CollectorPolicy* _g1p; |
ysr@777 | 139 | |
ysr@777 | 140 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 141 | |
ysr@777 | 142 | size_t* _cards_scanned; |
ysr@777 | 143 | size_t _total_cards_scanned; |
ysr@777 | 144 | |
ysr@777 | 145 | // _par_traversal_in_progress is "true" iff a parallel traversal is in |
ysr@777 | 146 | // progress. If so, then cards added to remembered sets should also have |
ysr@777 | 147 | // their references into the collection summarized in "_new_refs". |
ysr@777 | 148 | bool _par_traversal_in_progress; |
iveresov@1229 | 149 | void set_par_traversal(bool b) { _par_traversal_in_progress = b; } |
ysr@1280 | 150 | GrowableArray<OopOrNarrowOopStar>** _new_refs; |
ysr@1280 | 151 | template <class T> void new_refs_iterate_work(OopClosure* cl); |
ysr@1280 | 152 | void new_refs_iterate(OopClosure* cl) { |
ysr@1280 | 153 | if (UseCompressedOops) { |
ysr@1280 | 154 | new_refs_iterate_work<narrowOop>(cl); |
ysr@1280 | 155 | } else { |
ysr@1280 | 156 | new_refs_iterate_work<oop>(cl); |
ysr@1280 | 157 | } |
ysr@1280 | 158 | } |
ysr@1280 | 159 | |
johnc@1325 | 160 | // The routine that performs the actual work of refining a dirty |
johnc@1325 | 161 | // card. |
johnc@1325 | 162 | void concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i); |
johnc@1325 | 163 | |
ysr@1280 | 164 | protected: |
ysr@1280 | 165 | template <class T> void write_ref_nv(HeapRegion* from, T* p); |
ysr@1280 | 166 | template <class T> void par_write_ref_nv(HeapRegion* from, T* p, int tid); |
ysr@777 | 167 | |
ysr@777 | 168 | public: |
ysr@777 | 169 | // This is called to reset dual hash tables after the gc pause |
ysr@777 | 170 | // is finished and the initial hash table is no longer being |
ysr@777 | 171 | // scanned. |
ysr@777 | 172 | void cleanupHRRS(); |
ysr@777 | 173 | |
ysr@777 | 174 | HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); |
ysr@777 | 175 | ~HRInto_G1RemSet(); |
ysr@777 | 176 | |
ysr@777 | 177 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 178 | int worker_i); |
ysr@777 | 179 | |
ysr@777 | 180 | void prepare_for_oops_into_collection_set_do(); |
ysr@777 | 181 | void cleanup_after_oops_into_collection_set_do(); |
ysr@777 | 182 | void scanRS(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@1280 | 183 | template <class T> void scanNewRefsRS_work(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@1280 | 184 | void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i) { |
ysr@1280 | 185 | if (UseCompressedOops) { |
ysr@1280 | 186 | scanNewRefsRS_work<narrowOop>(oc, worker_i); |
ysr@1280 | 187 | } else { |
ysr@1280 | 188 | scanNewRefsRS_work<oop>(oc, worker_i); |
ysr@1280 | 189 | } |
ysr@1280 | 190 | } |
ysr@777 | 191 | void updateRS(int worker_i); |
ysr@777 | 192 | HeapRegion* calculateStartRegion(int i); |
ysr@777 | 193 | |
ysr@777 | 194 | HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } |
ysr@777 | 195 | |
ysr@777 | 196 | CardTableModRefBS* ct_bs() { return _ct_bs; } |
ysr@777 | 197 | size_t cardsScanned() { return _total_cards_scanned; } |
ysr@777 | 198 | |
ysr@777 | 199 | // Record, if necessary, the fact that *p (where "p" is in region "from", |
ysr@777 | 200 | // which is required to be non-NULL) has changed to a new non-NULL value. |
ysr@1280 | 201 | // [Below the virtual version calls a non-virtual protected |
ysr@1280 | 202 | // workhorse that is templatified for narrow vs wide oop.] |
ysr@1280 | 203 | inline void write_ref(HeapRegion* from, oop* p) { |
ysr@1280 | 204 | write_ref_nv(from, p); |
ysr@1280 | 205 | } |
ysr@1280 | 206 | inline void write_ref(HeapRegion* from, narrowOop* p) { |
ysr@1280 | 207 | write_ref_nv(from, p); |
ysr@1280 | 208 | } |
ysr@1280 | 209 | inline void par_write_ref(HeapRegion* from, oop* p, int tid) { |
ysr@1280 | 210 | par_write_ref_nv(from, p, tid); |
ysr@1280 | 211 | } |
ysr@1280 | 212 | inline void par_write_ref(HeapRegion* from, narrowOop* p, int tid) { |
ysr@1280 | 213 | par_write_ref_nv(from, p, tid); |
ysr@1280 | 214 | } |
ysr@777 | 215 | |
ysr@1280 | 216 | bool self_forwarded(oop obj); |
ysr@777 | 217 | |
ysr@777 | 218 | void scrub(BitMap* region_bm, BitMap* card_bm); |
ysr@777 | 219 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 220 | int worker_num, int claim_val); |
ysr@777 | 221 | |
ysr@777 | 222 | virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i); |
ysr@777 | 223 | |
ysr@777 | 224 | virtual void print_summary_info(); |
ysr@777 | 225 | virtual void prepare_for_verify(); |
ysr@777 | 226 | }; |
ysr@777 | 227 | |
ysr@777 | 228 | #define G1_REM_SET_LOGGING 0 |
ysr@777 | 229 | |
ysr@777 | 230 | class CountNonCleanMemRegionClosure: public MemRegionClosure { |
ysr@777 | 231 | G1CollectedHeap* _g1; |
ysr@777 | 232 | int _n; |
ysr@777 | 233 | HeapWord* _start_first; |
ysr@777 | 234 | public: |
ysr@777 | 235 | CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : |
ysr@777 | 236 | _g1(g1), _n(0), _start_first(NULL) |
ysr@777 | 237 | {} |
ysr@777 | 238 | void do_MemRegion(MemRegion mr); |
ysr@777 | 239 | int n() { return _n; }; |
ysr@777 | 240 | HeapWord* start_first() { return _start_first; } |
ysr@777 | 241 | }; |
apetrusenko@1061 | 242 | |
apetrusenko@1061 | 243 | class UpdateRSOopClosure: public OopClosure { |
apetrusenko@1061 | 244 | HeapRegion* _from; |
apetrusenko@1061 | 245 | HRInto_G1RemSet* _rs; |
apetrusenko@1061 | 246 | int _worker_i; |
ysr@1280 | 247 | |
ysr@1280 | 248 | template <class T> void do_oop_work(T* p); |
ysr@1280 | 249 | |
apetrusenko@1061 | 250 | public: |
apetrusenko@1061 | 251 | UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : |
apetrusenko@1061 | 252 | _from(NULL), _rs(rs), _worker_i(worker_i) { |
apetrusenko@1061 | 253 | guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); |
apetrusenko@1061 | 254 | } |
apetrusenko@1061 | 255 | |
apetrusenko@1061 | 256 | void set_from(HeapRegion* from) { |
apetrusenko@1061 | 257 | assert(from != NULL, "from region must be non-NULL"); |
apetrusenko@1061 | 258 | _from = from; |
apetrusenko@1061 | 259 | } |
apetrusenko@1061 | 260 | |
ysr@1280 | 261 | virtual void do_oop(narrowOop* p) { do_oop_work(p); } |
ysr@1280 | 262 | virtual void do_oop(oop* p) { do_oop_work(p); } |
apetrusenko@1061 | 263 | |
apetrusenko@1061 | 264 | // Override: this closure is idempotent. |
apetrusenko@1061 | 265 | // bool idempotent() { return true; } |
apetrusenko@1061 | 266 | bool apply_to_weak_ref_discovered_field() { return true; } |
apetrusenko@1061 | 267 | }; |