ysr@777: /* xdono@1014: * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved. ysr@777: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. ysr@777: * ysr@777: * This code is free software; you can redistribute it and/or modify it ysr@777: * under the terms of the GNU General Public License version 2 only, as ysr@777: * published by the Free Software Foundation. ysr@777: * ysr@777: * This code is distributed in the hope that it will be useful, but WITHOUT ysr@777: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ysr@777: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ysr@777: * version 2 for more details (a copy is included in the LICENSE file that ysr@777: * accompanied this code). ysr@777: * ysr@777: * You should have received a copy of the GNU General Public License version ysr@777: * 2 along with this work; if not, write to the Free Software Foundation, ysr@777: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. ysr@777: * ysr@777: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, ysr@777: * CA 95054 USA or visit www.sun.com if you need additional information or ysr@777: * have any questions. ysr@777: * ysr@777: */ ysr@777: ysr@777: // A G1RemSet provides ways of iterating over pointers into a selected ysr@777: // collection set. ysr@777: ysr@777: class G1CollectedHeap; ysr@777: class CardTableModRefBarrierSet; ysr@777: class HRInto_G1RemSet; ysr@777: class ConcurrentG1Refine; ysr@777: apetrusenko@984: class G1RemSet: public CHeapObj { ysr@777: protected: ysr@777: G1CollectedHeap* _g1; ysr@777: ysr@777: unsigned _conc_refine_traversals; ysr@777: unsigned _conc_refine_cards; ysr@777: ysr@777: size_t n_workers(); ysr@777: ysr@777: public: ysr@777: G1RemSet(G1CollectedHeap* g1) : ysr@777: _g1(g1), _conc_refine_traversals(0), _conc_refine_cards(0) ysr@777: {} ysr@777: ysr@777: // Invoke "blk->do_oop" on all pointers into the CS in object in regions ysr@777: // outside the CS (having invoked "blk->set_region" to set the "from" ysr@777: // region correctly beforehand.) The "worker_i" param is for the ysr@777: // parallel case where the number of the worker thread calling this ysr@777: // function can be helpful in partitioning the work to be done. It ysr@777: // should be the same as the "i" passed to the calling thread's ysr@777: // work(i) function. In the sequential case this param will be ingored. ysr@777: virtual void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, ysr@777: int worker_i) = 0; ysr@777: ysr@777: // Prepare for and cleanup after an oops_into_collection_set_do ysr@777: // call. Must call each of these once before and after (in sequential ysr@777: // code) any threads call oops into collection set do. (This offers an ysr@777: // opportunity to sequential setup and teardown of structures needed by a ysr@777: // parallel iteration over the CS's RS.) ysr@777: virtual void prepare_for_oops_into_collection_set_do() = 0; ysr@777: virtual void cleanup_after_oops_into_collection_set_do() = 0; ysr@777: ysr@777: // If "this" is of the given subtype, return "this", else "NULL". ysr@777: virtual HRInto_G1RemSet* as_HRInto_G1RemSet() { return NULL; } ysr@777: ysr@777: // Record, if necessary, the fact that *p (where "p" is in region "from") ysr@777: // has changed to its new value. ysr@777: virtual void write_ref(HeapRegion* from, oop* p) = 0; ysr@777: virtual void par_write_ref(HeapRegion* from, oop* p, int tid) = 0; ysr@777: ysr@777: // Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region ysr@777: // or card, respectively, such that a region or card with a corresponding ysr@777: // 0 bit contains no part of any live object. Eliminates any remembered ysr@777: // set entries that correspond to dead heap ranges. ysr@777: virtual void scrub(BitMap* region_bm, BitMap* card_bm) = 0; ysr@777: // Like the above, but assumes is called in parallel: "worker_num" is the ysr@777: // parallel thread id of the current thread, and "claim_val" is the ysr@777: // value that should be used to claim heap regions. ysr@777: virtual void scrub_par(BitMap* region_bm, BitMap* card_bm, ysr@777: int worker_num, int claim_val) = 0; ysr@777: ysr@777: // Do any "refinement" activity that might be appropriate to the given ysr@777: // G1RemSet. If "refinement" has iterateive "passes", do one pass. ysr@777: // If "t" is non-NULL, it is the thread performing the refinement. ysr@777: // Default implementation does nothing. ysr@777: virtual void concurrentRefinementPass(ConcurrentG1Refine* cg1r) {} ysr@777: ysr@777: // Refine the card corresponding to "card_ptr". If "sts" is non-NULL, ysr@777: // join and leave around parts that must be atomic wrt GC. (NULL means ysr@777: // being done at a safepoint.) ysr@777: virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i) {} ysr@777: ysr@777: unsigned conc_refine_cards() { return _conc_refine_cards; } ysr@777: ysr@777: // Print any relevant summary info. ysr@777: virtual void print_summary_info() {} ysr@777: ysr@777: // Prepare remebered set for verification. ysr@777: virtual void prepare_for_verify() {}; ysr@777: }; ysr@777: ysr@777: ysr@777: // The simplest possible G1RemSet: iterates over all objects in non-CS ysr@777: // regions, searching for pointers into the CS. ysr@777: class StupidG1RemSet: public G1RemSet { ysr@777: public: ysr@777: StupidG1RemSet(G1CollectedHeap* g1) : G1RemSet(g1) {} ysr@777: ysr@777: void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, ysr@777: int worker_i); ysr@777: ysr@777: void prepare_for_oops_into_collection_set_do() {} ysr@777: void cleanup_after_oops_into_collection_set_do() {} ysr@777: ysr@777: // Nothing is necessary in the version below. ysr@777: void write_ref(HeapRegion* from, oop* p) {} ysr@777: void par_write_ref(HeapRegion* from, oop* p, int tid) {} ysr@777: ysr@777: void scrub(BitMap* region_bm, BitMap* card_bm) {} ysr@777: void scrub_par(BitMap* region_bm, BitMap* card_bm, ysr@777: int worker_num, int claim_val) {} ysr@777: ysr@777: }; ysr@777: ysr@777: // A G1RemSet in which each heap region has a rem set that records the ysr@777: // external heap references into it. Uses a mod ref bs to track updates, ysr@777: // so that they can be used to update the individual region remsets. ysr@777: ysr@777: class HRInto_G1RemSet: public G1RemSet { ysr@777: protected: ysr@777: enum SomePrivateConstants { ysr@777: UpdateRStoMergeSync = 0, ysr@777: MergeRStoDoDirtySync = 1, ysr@777: DoDirtySync = 2, ysr@777: LastSync = 3, ysr@777: ysr@777: SeqTask = 0, ysr@777: NumSeqTasks = 1 ysr@777: }; ysr@777: ysr@777: CardTableModRefBS* _ct_bs; ysr@777: SubTasksDone* _seq_task; ysr@777: G1CollectorPolicy* _g1p; ysr@777: ysr@777: ConcurrentG1Refine* _cg1r; ysr@777: ysr@777: size_t* _cards_scanned; ysr@777: size_t _total_cards_scanned; ysr@777: ysr@777: // _par_traversal_in_progress is "true" iff a parallel traversal is in ysr@777: // progress. If so, then cards added to remembered sets should also have ysr@777: // their references into the collection summarized in "_new_refs". ysr@777: bool _par_traversal_in_progress; ysr@777: void set_par_traversal(bool b); ysr@777: GrowableArray** _new_refs; iveresov@1051: void new_refs_iterate(OopClosure* cl); ysr@777: ysr@777: public: ysr@777: // This is called to reset dual hash tables after the gc pause ysr@777: // is finished and the initial hash table is no longer being ysr@777: // scanned. ysr@777: void cleanupHRRS(); ysr@777: ysr@777: HRInto_G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs); ysr@777: ~HRInto_G1RemSet(); ysr@777: ysr@777: void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, ysr@777: int worker_i); ysr@777: ysr@777: void prepare_for_oops_into_collection_set_do(); ysr@777: void cleanup_after_oops_into_collection_set_do(); ysr@777: void scanRS(OopsInHeapRegionClosure* oc, int worker_i); ysr@777: void scanNewRefsRS(OopsInHeapRegionClosure* oc, int worker_i); ysr@777: void updateRS(int worker_i); ysr@777: HeapRegion* calculateStartRegion(int i); ysr@777: ysr@777: HRInto_G1RemSet* as_HRInto_G1RemSet() { return this; } ysr@777: ysr@777: CardTableModRefBS* ct_bs() { return _ct_bs; } ysr@777: size_t cardsScanned() { return _total_cards_scanned; } ysr@777: ysr@777: // Record, if necessary, the fact that *p (where "p" is in region "from", ysr@777: // which is required to be non-NULL) has changed to a new non-NULL value. ysr@777: inline void write_ref(HeapRegion* from, oop* p); ysr@777: // The "_nv" version is the same; it exists just so that it is not virtual. ysr@777: inline void write_ref_nv(HeapRegion* from, oop* p); ysr@777: ysr@777: inline bool self_forwarded(oop obj); ysr@777: inline void par_write_ref(HeapRegion* from, oop* p, int tid); ysr@777: ysr@777: void scrub(BitMap* region_bm, BitMap* card_bm); ysr@777: void scrub_par(BitMap* region_bm, BitMap* card_bm, ysr@777: int worker_num, int claim_val); ysr@777: ysr@777: virtual void concurrentRefinementPass(ConcurrentG1Refine* t); ysr@777: virtual void concurrentRefineOneCard(jbyte* card_ptr, int worker_i); ysr@777: ysr@777: virtual void print_summary_info(); ysr@777: virtual void prepare_for_verify(); ysr@777: }; ysr@777: ysr@777: #define G1_REM_SET_LOGGING 0 ysr@777: ysr@777: class CountNonCleanMemRegionClosure: public MemRegionClosure { ysr@777: G1CollectedHeap* _g1; ysr@777: int _n; ysr@777: HeapWord* _start_first; ysr@777: public: ysr@777: CountNonCleanMemRegionClosure(G1CollectedHeap* g1) : ysr@777: _g1(g1), _n(0), _start_first(NULL) ysr@777: {} ysr@777: void do_MemRegion(MemRegion mr); ysr@777: int n() { return _n; }; ysr@777: HeapWord* start_first() { return _start_first; } ysr@777: }; apetrusenko@1061: apetrusenko@1061: class UpdateRSOopClosure: public OopClosure { apetrusenko@1061: HeapRegion* _from; apetrusenko@1061: HRInto_G1RemSet* _rs; apetrusenko@1061: int _worker_i; apetrusenko@1061: public: apetrusenko@1061: UpdateRSOopClosure(HRInto_G1RemSet* rs, int worker_i = 0) : apetrusenko@1061: _from(NULL), _rs(rs), _worker_i(worker_i) { apetrusenko@1061: guarantee(_rs != NULL, "Requires an HRIntoG1RemSet"); apetrusenko@1061: } apetrusenko@1061: apetrusenko@1061: void set_from(HeapRegion* from) { apetrusenko@1061: assert(from != NULL, "from region must be non-NULL"); apetrusenko@1061: _from = from; apetrusenko@1061: } apetrusenko@1061: apetrusenko@1061: virtual void do_oop(narrowOop* p); apetrusenko@1061: virtual void do_oop(oop* p); apetrusenko@1061: apetrusenko@1061: // Override: this closure is idempotent. apetrusenko@1061: // bool idempotent() { return true; } apetrusenko@1061: bool apply_to_weak_ref_discovered_field() { return true; } apetrusenko@1061: }; apetrusenko@1061: