Tue, 05 May 2009 22:15:35 -0700
6833576: G1: assert illegal index, growableArray.hpp:186
Summary: The code that calculates the heap region index for an object address incorrectly used signed arithmetic.
Reviewed-by: jcoomes, ysr
ysr@777 | 1 | /* |
xdono@1014 | 2 | * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
ysr@777 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
ysr@777 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
ysr@777 | 21 | * have any questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
ysr@777 | 25 | // A G1RemSet provides ways of iterating over pointers into a selected |
ysr@777 | 26 | // collection set. |
ysr@777 | 27 | |
ysr@777 | 28 | class G1CollectedHeap; |
ysr@777 | 29 | class CardTableModRefBarrierSet; |
ysr@777 | 30 | class HRInto_G1RemSet; |
ysr@777 | 31 | class ConcurrentG1Refine; |
ysr@777 | 32 | |
apetrusenko@984 | 33 | class G1RemSet: public CHeapObj { |
ysr@777 | 34 | protected: |
ysr@777 | 35 | G1CollectedHeap* _g1; |
ysr@777 | 36 | |
ysr@777 | 37 | unsigned _conc_refine_traversals; |
ysr@777 | 38 | unsigned _conc_refine_cards; |
ysr@777 | 39 | |
ysr@777 | 40 | size_t n_workers(); |
ysr@777 | 41 | |
ysr@777 | 42 | public: |
ysr@777 | 43 | G1RemSet(G1CollectedHeap* g1) : |
ysr@777 | 44 | _g1(g1), _conc_refine_traversals(0), _conc_refine_cards(0) |
ysr@777 | 45 | {} |
ysr@777 | 46 | |
ysr@777 | 47 | // Invoke "blk->do_oop" on all pointers into the CS in object in regions |
ysr@777 | 48 | // outside the CS (having invoked "blk->set_region" to set the "from" |
ysr@777 | 49 | // region correctly beforehand.) The "worker_i" param is for the |
ysr@777 | 50 | // parallel case where the number of the worker thread calling this |
ysr@777 | 51 | // function can be helpful in partitioning the work to be done. It |
ysr@777 | 52 | // should be the same as the "i" passed to the calling thread's |
ysr@777 | 53 | // work(i) function. In the sequential case this param will be ingored. |
ysr@777 | 54 | virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 55 | int worker_i) = 0; |
ysr@777 | 56 | |
ysr@777 | 57 | // Prepare for and cleanup after an oops_into_collection_set_do |
ysr@777 | 58 | // call. Must call each of these once before and after (in sequential |
ysr@777 | 59 | // code) any threads call oops into collection set do. (This offers an |
ysr@777 | 60 | // opportunity to sequential setup and teardown of structures needed by a |
ysr@777 | 61 | // parallel iteration over the CS's RS.) |
ysr@777 | 62 | virtual void prepare_for_oops_into_collection_set_do() = 0; |
ysr@777 | 63 | virtual void cleanup_after_oops_into_collection_set_do() = 0; |
ysr@777 | 64 | |
ysr@777 | 65 | // If "this" is of the given subtype, return "this", else "NULL". |
ysr@777 | 66 | virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } |
ysr@777 | 67 | |
ysr@777 | 68 | // Record, if necessary, the fact that *p (where "p" is in region "from") |
ysr@777 | 69 | // has changed to its new value. |
ysr@777 | 70 | virtual void write_ref(HeapRegion* from, oop* p) = 0; |
ysr@777 | 71 | virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; |
ysr@777 | 72 | |
ysr@777 | 73 | // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region |
ysr@777 | 74 | // or card, respectively, such that a region or card with a corresponding |
ysr@777 | 75 | // 0 bit contains no part of any live object. Eliminates any remembered |
ysr@777 | 76 | // set entries that correspond to dead heap ranges. |
ysr@777 | 77 | virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; |
ysr@777 | 78 | // Like the above, but assumes is called in parallel: "worker_num" is the |
ysr@777 | 79 | // parallel thread id of the current thread, and "claim_val" is the |
ysr@777 | 80 | // value that should be used to claim heap regions. |
ysr@777 | 81 | virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 82 | int worker_num, int claim_val) = 0; |
ysr@777 | 83 | |
ysr@777 | 84 | // Do any "refinement" activity that might be appropriate to the given |
ysr@777 | 85 | // G1RemSet. If "refinement" has iterateive "passes", do one pass. |
ysr@777 | 86 | // If "t" is non-NULL, it is the thread performing the refinement. |
ysr@777 | 87 | // Default implementation does nothing. |
ysr@777 | 88 | virtual void concurrentRefinementPass(ConcurrentG1Refine* cg1r) {} |
ysr@777 | 89 | |
ysr@777 | 90 | // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, |
ysr@777 | 91 | // join and leave around parts that must be atomic wrt GC. (NULL means |
ysr@777 | 92 | // being done at a safepoint.) |
ysr@777 | 93 | virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {} |
ysr@777 | 94 | |
ysr@777 | 95 | unsigned conc_refine_cards() { return _conc_refine_cards; } |
ysr@777 | 96 | |
ysr@777 | 97 | // Print any relevant summary info. |
ysr@777 | 98 | virtual void print_summary_info() {} |
ysr@777 | 99 | |
ysr@777 | 100 | // Prepare remebered set for verification. |
ysr@777 | 101 | virtual void prepare_for_verify() {}; |
ysr@777 | 102 | }; |
ysr@777 | 103 | |
ysr@777 | 104 | |
ysr@777 | 105 | // The simplest possible G1RemSet: iterates over all objects in non-CS |
ysr@777 | 106 | // regions, searching for pointers into the CS. |
ysr@777 | 107 | class StupidG1RemSet: public G1RemSet { |
ysr@777 | 108 | public: |
ysr@777 | 109 | StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} |
ysr@777 | 110 | |
ysr@777 | 111 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 112 | int worker_i); |
ysr@777 | 113 | |
ysr@777 | 114 | void prepare_for_oops_into_collection_set_do() {} |
ysr@777 | 115 | void cleanup_after_oops_into_collection_set_do() {} |
ysr@777 | 116 | |
ysr@777 | 117 | // Nothing is necessary in the version below. |
ysr@777 | 118 | void write_ref(HeapRegion* from, oop* p) {} |
ysr@777 | 119 | void par_write_ref(HeapRegion* from, oop* p, int tid) {} |
ysr@777 | 120 | |
ysr@777 | 121 | void scrub(BitMap* region_bm, BitMap* card_bm) {} |
ysr@777 | 122 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 123 | int worker_num, int claim_val) {} |
ysr@777 | 124 | |
ysr@777 | 125 | }; |
ysr@777 | 126 | |
ysr@777 | 127 | // A G1RemSet in which each heap region has a rem set that records the |
ysr@777 | 128 | // external heap references into it. Uses a mod ref bs to track updates, |
ysr@777 | 129 | // so that they can be used to update the individual region remsets. |
ysr@777 | 130 | |
ysr@777 | 131 | class HRInto_G1RemSet: public G1RemSet { |
ysr@777 | 132 | protected: |
ysr@777 | 133 | enum SomePrivateConstants { |
ysr@777 | 134 | UpdateRStoMergeSync = 0, |
ysr@777 | 135 | MergeRStoDoDirtySync = 1, |
ysr@777 | 136 | DoDirtySync = 2, |
ysr@777 | 137 | LastSync = 3, |
ysr@777 | 138 | |
ysr@777 | 139 | SeqTask = 0, |
ysr@777 | 140 | NumSeqTasks = 1 |
ysr@777 | 141 | }; |
ysr@777 | 142 | |
ysr@777 | 143 | CardTableModRefBS* _ct_bs; |
ysr@777 | 144 | SubTasksDone* _seq_task; |
ysr@777 | 145 | G1CollectorPolicy* _g1p; |
ysr@777 | 146 | |
ysr@777 | 147 | ConcurrentG1Refine* _cg1r; |
ysr@777 | 148 | |
ysr@777 | 149 | size_t* _cards_scanned; |
ysr@777 | 150 | size_t _total_cards_scanned; |
ysr@777 | 151 | |
ysr@777 | 152 | // _par_traversal_in_progress is "true" iff a parallel traversal is in |
ysr@777 | 153 | // progress. If so, then cards added to remembered sets should also have |
ysr@777 | 154 | // their references into the collection summarized in "_new_refs". |
ysr@777 | 155 | bool _par_traversal_in_progress; |
ysr@777 | 156 | void set_par_traversal(bool b); |
ysr@777 | 157 | GrowableArray<oop*>** _new_refs; |
iveresov@1051 | 158 | void new_refs_iterate(OopClosure* cl); |
ysr@777 | 159 | |
ysr@777 | 160 | public: |
ysr@777 | 161 | // This is called to reset dual hash tables after the gc pause |
ysr@777 | 162 | // is finished and the initial hash table is no longer being |
ysr@777 | 163 | // scanned. |
ysr@777 | 164 | void cleanupHRRS(); |
ysr@777 | 165 | |
ysr@777 | 166 | HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); |
ysr@777 | 167 | ~HRInto_G1RemSet(); |
ysr@777 | 168 | |
ysr@777 | 169 | void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, |
ysr@777 | 170 | int worker_i); |
ysr@777 | 171 | |
ysr@777 | 172 | void prepare_for_oops_into_collection_set_do(); |
ysr@777 | 173 | void cleanup_after_oops_into_collection_set_do(); |
ysr@777 | 174 | void scanRS(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@777 | 175 | void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i); |
ysr@777 | 176 | void updateRS(int worker_i); |
ysr@777 | 177 | HeapRegion* calculateStartRegion(int i); |
ysr@777 | 178 | |
ysr@777 | 179 | HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } |
ysr@777 | 180 | |
ysr@777 | 181 | CardTableModRefBS* ct_bs() { return _ct_bs; } |
ysr@777 | 182 | size_t cardsScanned() { return _total_cards_scanned; } |
ysr@777 | 183 | |
ysr@777 | 184 | // Record, if necessary, the fact that *p (where "p" is in region "from", |
ysr@777 | 185 | // which is required to be non-NULL) has changed to a new non-NULL value. |
ysr@777 | 186 | inline void write_ref(HeapRegion* from, oop* p); |
ysr@777 | 187 | // The "_nv" version is the same; it exists just so that it is not virtual. |
ysr@777 | 188 | inline void write_ref_nv(HeapRegion* from, oop* p); |
ysr@777 | 189 | |
ysr@777 | 190 | inline bool self_forwarded(oop obj); |
ysr@777 | 191 | inline void par_write_ref(HeapRegion* from, oop* p, int tid); |
ysr@777 | 192 | |
ysr@777 | 193 | void scrub(BitMap* region_bm, BitMap* card_bm); |
ysr@777 | 194 | void scrub_par(BitMap* region_bm, BitMap* card_bm, |
ysr@777 | 195 | int worker_num, int claim_val); |
ysr@777 | 196 | |
ysr@777 | 197 | virtual void concurrentRefinementPass(ConcurrentG1Refine* t); |
ysr@777 | 198 | virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i); |
ysr@777 | 199 | |
ysr@777 | 200 | virtual void print_summary_info(); |
ysr@777 | 201 | virtual void prepare_for_verify(); |
ysr@777 | 202 | }; |
ysr@777 | 203 | |
ysr@777 | 204 | #define G1_REM_SET_LOGGING 0 |
ysr@777 | 205 | |
ysr@777 | 206 | class CountNonCleanMemRegionClosure: public MemRegionClosure { |
ysr@777 | 207 | G1CollectedHeap* _g1; |
ysr@777 | 208 | int _n; |
ysr@777 | 209 | HeapWord* _start_first; |
ysr@777 | 210 | public: |
ysr@777 | 211 | CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : |
ysr@777 | 212 | _g1(g1), _n(0), _start_first(NULL) |
ysr@777 | 213 | {} |
ysr@777 | 214 | void do_MemRegion(MemRegion mr); |
ysr@777 | 215 | int n() { return _n; }; |
ysr@777 | 216 | HeapWord* start_first() { return _start_first; } |
ysr@777 | 217 | }; |
apetrusenko@1061 | 218 | |
apetrusenko@1061 | 219 | class UpdateRSOopClosure: public OopClosure { |
apetrusenko@1061 | 220 | HeapRegion* _from; |
apetrusenko@1061 | 221 | HRInto_G1RemSet* _rs; |
apetrusenko@1061 | 222 | int _worker_i; |
apetrusenko@1061 | 223 | public: |
apetrusenko@1061 | 224 | UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : |
apetrusenko@1061 | 225 | _from(NULL), _rs(rs), _worker_i(worker_i) { |
apetrusenko@1061 | 226 | guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); |
apetrusenko@1061 | 227 | } |
apetrusenko@1061 | 228 | |
apetrusenko@1061 | 229 | void set_from(HeapRegion* from) { |
apetrusenko@1061 | 230 | assert(from != NULL, "from region must be non-NULL"); |
apetrusenko@1061 | 231 | _from = from; |
apetrusenko@1061 | 232 | } |
apetrusenko@1061 | 233 | |
apetrusenko@1061 | 234 | virtual void do_oop(narrowOop* p); |
apetrusenko@1061 | 235 | virtual void do_oop(oop* p); |
apetrusenko@1061 | 236 | |
apetrusenko@1061 | 237 | // Override: this closure is idempotent. |
apetrusenko@1061 | 238 | // bool idempotent() { return true; } |
apetrusenko@1061 | 239 | bool apply_to_weak_ref_discovered_field() { return true; } |
apetrusenko@1061 | 240 | }; |
apetrusenko@1061 | 241 |