aoqi@0: /* aoqi@0: * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP aoqi@0: #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP aoqi@0: brutisso@6904: #include "gc_implementation/shared/gcTrace.hpp" aoqi@0: #include "memory/referencePolicy.hpp" aoqi@0: #include "memory/referenceProcessorStats.hpp" aoqi@0: #include "memory/referenceType.hpp" aoqi@0: #include "oops/instanceRefKlass.hpp" aoqi@0: aoqi@0: class GCTimer; aoqi@0: aoqi@0: // ReferenceProcessor class encapsulates the per-"collector" processing aoqi@0: // of java.lang.Reference objects for GC. The interface is useful for supporting aoqi@0: // a generational abstraction, in particular when there are multiple aoqi@0: // generations that are being independently collected -- possibly aoqi@0: // concurrently and/or incrementally. Note, however, that the aoqi@0: // ReferenceProcessor class abstracts away from a generational setting aoqi@0: // by using only a heap interval (called "span" below), thus allowing aoqi@0: // its use in a straightforward manner in a general, non-generational aoqi@0: // setting. aoqi@0: // aoqi@0: // The basic idea is that each ReferenceProcessor object concerns aoqi@0: // itself with ("weak") reference processing in a specific "span" aoqi@0: // of the heap of interest to a specific collector. Currently, aoqi@0: // the span is a convex interval of the heap, but, efficiency aoqi@0: // apart, there seems to be no reason it couldn't be extended aoqi@0: // (with appropriate modifications) to any "non-convex interval". aoqi@0: aoqi@0: // forward references aoqi@0: class ReferencePolicy; aoqi@0: class AbstractRefProcTaskExecutor; aoqi@0: aoqi@0: // List of discovered references. aoqi@0: class DiscoveredList { aoqi@0: public: aoqi@0: DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { } aoqi@0: oop head() const { aoqi@0: return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) : aoqi@0: _oop_head; aoqi@0: } aoqi@0: HeapWord* adr_head() { aoqi@0: return UseCompressedOops ? (HeapWord*)&_compressed_head : aoqi@0: (HeapWord*)&_oop_head; aoqi@0: } aoqi@0: void set_head(oop o) { aoqi@0: if (UseCompressedOops) { aoqi@0: // Must compress the head ptr. aoqi@0: _compressed_head = oopDesc::encode_heap_oop(o); aoqi@0: } else { aoqi@0: _oop_head = o; aoqi@0: } aoqi@0: } aoqi@0: bool is_empty() const { return head() == NULL; } aoqi@0: size_t length() { return _len; } aoqi@0: void set_length(size_t len) { _len = len; } aoqi@0: void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); } aoqi@0: void dec_length(size_t dec) { _len -= dec; } aoqi@0: private: aoqi@0: // Set value depending on UseCompressedOops. This could be a template class aoqi@0: // but then we have to fix all the instantiations and declarations that use this class. aoqi@0: oop _oop_head; aoqi@0: narrowOop _compressed_head; aoqi@0: size_t _len; aoqi@0: }; aoqi@0: aoqi@0: // Iterator for the list of discovered references. aoqi@0: class DiscoveredListIterator { aoqi@0: private: aoqi@0: DiscoveredList& _refs_list; aoqi@0: HeapWord* _prev_next; aoqi@0: oop _prev; aoqi@0: oop _ref; aoqi@0: HeapWord* _discovered_addr; aoqi@0: oop _next; aoqi@0: HeapWord* _referent_addr; aoqi@0: oop _referent; aoqi@0: OopClosure* _keep_alive; aoqi@0: BoolObjectClosure* _is_alive; aoqi@0: aoqi@0: DEBUG_ONLY( aoqi@0: oop _first_seen; // cyclic linked list check aoqi@0: ) aoqi@0: aoqi@0: NOT_PRODUCT( aoqi@0: size_t _processed; aoqi@0: size_t _removed; aoqi@0: ) aoqi@0: aoqi@0: public: aoqi@0: inline DiscoveredListIterator(DiscoveredList& refs_list, aoqi@0: OopClosure* keep_alive, aoqi@0: BoolObjectClosure* is_alive): aoqi@0: _refs_list(refs_list), aoqi@0: _prev_next(refs_list.adr_head()), aoqi@0: _prev(NULL), aoqi@0: _ref(refs_list.head()), aoqi@0: #ifdef ASSERT aoqi@0: _first_seen(refs_list.head()), aoqi@0: #endif aoqi@0: #ifndef PRODUCT aoqi@0: _processed(0), aoqi@0: _removed(0), aoqi@0: #endif aoqi@0: _next(NULL), aoqi@0: _keep_alive(keep_alive), aoqi@0: _is_alive(is_alive) aoqi@0: { } aoqi@0: aoqi@0: // End Of List. aoqi@0: inline bool has_next() const { return _ref != NULL; } aoqi@0: aoqi@0: // Get oop to the Reference object. aoqi@0: inline oop obj() const { return _ref; } aoqi@0: aoqi@0: // Get oop to the referent object. aoqi@0: inline oop referent() const { return _referent; } aoqi@0: aoqi@0: // Returns true if referent is alive. aoqi@0: inline bool is_referent_alive() const { aoqi@0: return _is_alive->do_object_b(_referent); aoqi@0: } aoqi@0: aoqi@0: // Loads data for the current reference. aoqi@0: // The "allow_null_referent" argument tells us to allow for the possibility aoqi@0: // of a NULL referent in the discovered Reference object. This typically aoqi@0: // happens in the case of concurrent collectors that may have done the aoqi@0: // discovery concurrently, or interleaved, with mutator execution. aoqi@0: void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); aoqi@0: aoqi@0: // Move to the next discovered reference. aoqi@0: inline void next() { aoqi@0: _prev_next = _discovered_addr; aoqi@0: _prev = _ref; aoqi@0: move_to_next(); aoqi@0: } aoqi@0: aoqi@0: // Remove the current reference from the list aoqi@0: void remove(); aoqi@0: aoqi@0: // Make the Reference object active again. aoqi@0: void make_active(); aoqi@0: aoqi@0: // Make the referent alive. aoqi@0: inline void make_referent_alive() { aoqi@0: if (UseCompressedOops) { aoqi@0: _keep_alive->do_oop((narrowOop*)_referent_addr); aoqi@0: } else { aoqi@0: _keep_alive->do_oop((oop*)_referent_addr); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // Update the discovered field. aoqi@0: inline void update_discovered() { aoqi@0: // First _prev_next ref actually points into DiscoveredList (gross). aoqi@0: if (UseCompressedOops) { aoqi@0: if (!oopDesc::is_null(*(narrowOop*)_prev_next)) { aoqi@0: _keep_alive->do_oop((narrowOop*)_prev_next); aoqi@0: } aoqi@0: } else { aoqi@0: if (!oopDesc::is_null(*(oop*)_prev_next)) { aoqi@0: _keep_alive->do_oop((oop*)_prev_next); aoqi@0: } aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: // NULL out referent pointer. aoqi@0: void clear_referent(); aoqi@0: aoqi@0: // Statistics aoqi@0: NOT_PRODUCT( aoqi@0: inline size_t processed() const { return _processed; } aoqi@0: inline size_t removed() const { return _removed; } aoqi@0: ) aoqi@0: aoqi@0: inline void move_to_next() { aoqi@0: if (_ref == _next) { aoqi@0: // End of the list. aoqi@0: _ref = NULL; aoqi@0: } else { aoqi@0: _ref = _next; aoqi@0: } aoqi@0: assert(_ref != _first_seen, "cyclic ref_list found"); aoqi@0: NOT_PRODUCT(_processed++); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: class ReferenceProcessor : public CHeapObj { aoqi@0: aoqi@0: private: aoqi@0: size_t total_count(DiscoveredList lists[]); aoqi@0: aoqi@0: protected: aoqi@0: // Compatibility with pre-4965777 JDK's aoqi@0: static bool _pending_list_uses_discovered_field; aoqi@0: aoqi@0: // The SoftReference master timestamp clock aoqi@0: static jlong _soft_ref_timestamp_clock; aoqi@0: aoqi@0: MemRegion _span; // (right-open) interval of heap aoqi@0: // subject to wkref discovery aoqi@0: aoqi@0: bool _discovering_refs; // true when discovery enabled aoqi@0: bool _discovery_is_atomic; // if discovery is atomic wrt aoqi@0: // other collectors in configuration aoqi@0: bool _discovery_is_mt; // true if reference discovery is MT. aoqi@0: aoqi@0: bool _enqueuing_is_done; // true if all weak references enqueued aoqi@0: bool _processing_is_mt; // true during phases when aoqi@0: // reference processing is MT. aoqi@0: uint _next_id; // round-robin mod _num_q counter in aoqi@0: // support of work distribution aoqi@0: aoqi@0: // For collectors that do not keep GC liveness information aoqi@0: // in the object header, this field holds a closure that aoqi@0: // helps the reference processor determine the reachability aoqi@0: // of an oop. It is currently initialized to NULL for all aoqi@0: // collectors except for CMS and G1. aoqi@0: BoolObjectClosure* _is_alive_non_header; aoqi@0: aoqi@0: // Soft ref clearing policies aoqi@0: // . the default policy aoqi@0: static ReferencePolicy* _default_soft_ref_policy; aoqi@0: // . the "clear all" policy aoqi@0: static ReferencePolicy* _always_clear_soft_ref_policy; aoqi@0: // . the current policy below is either one of the above aoqi@0: ReferencePolicy* _current_soft_ref_policy; aoqi@0: aoqi@0: // The discovered ref lists themselves aoqi@0: aoqi@0: // The active MT'ness degree of the queues below aoqi@0: uint _num_q; aoqi@0: // The maximum MT'ness degree of the queues below aoqi@0: uint _max_num_q; aoqi@0: aoqi@0: // Master array of discovered oops aoqi@0: DiscoveredList* _discovered_refs; aoqi@0: aoqi@0: // Arrays of lists of oops, one per thread (pointers into master array above) aoqi@0: DiscoveredList* _discoveredSoftRefs; aoqi@0: DiscoveredList* _discoveredWeakRefs; aoqi@0: DiscoveredList* _discoveredFinalRefs; aoqi@0: DiscoveredList* _discoveredPhantomRefs; jmasa@7469: DiscoveredList* _discoveredCleanerRefs; aoqi@0: aoqi@0: public: jmasa@7469: static int number_of_subclasses_of_ref() { return (REF_CLEANER - REF_OTHER); } aoqi@0: aoqi@0: uint num_q() { return _num_q; } aoqi@0: uint max_num_q() { return _max_num_q; } aoqi@0: void set_active_mt_degree(uint v) { _num_q = v; } aoqi@0: aoqi@0: DiscoveredList* discovered_refs() { return _discovered_refs; } aoqi@0: aoqi@0: ReferencePolicy* setup_policy(bool always_clear) { aoqi@0: _current_soft_ref_policy = always_clear ? aoqi@0: _always_clear_soft_ref_policy : _default_soft_ref_policy; aoqi@0: _current_soft_ref_policy->setup(); // snapshot the policy threshold aoqi@0: return _current_soft_ref_policy; aoqi@0: } aoqi@0: aoqi@0: // Process references with a certain reachability level. aoqi@0: size_t process_discovered_reflist(DiscoveredList refs_lists[], aoqi@0: ReferencePolicy* policy, aoqi@0: bool clear_referent, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc, aoqi@0: AbstractRefProcTaskExecutor* task_executor); aoqi@0: aoqi@0: void process_phaseJNI(BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc); aoqi@0: aoqi@0: // Work methods used by the method process_discovered_reflist aoqi@0: // Phase1: keep alive all those referents that are otherwise aoqi@0: // dead but which must be kept alive by policy (and their closure). aoqi@0: void process_phase1(DiscoveredList& refs_list, aoqi@0: ReferencePolicy* policy, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc); aoqi@0: // Phase2: remove all those references whose referents are aoqi@0: // reachable. aoqi@0: inline void process_phase2(DiscoveredList& refs_list, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc) { aoqi@0: if (discovery_is_atomic()) { aoqi@0: // complete_gc is ignored in this case for this phase aoqi@0: pp2_work(refs_list, is_alive, keep_alive); aoqi@0: } else { aoqi@0: assert(complete_gc != NULL, "Error"); aoqi@0: pp2_work_concurrent_discovery(refs_list, is_alive, aoqi@0: keep_alive, complete_gc); aoqi@0: } aoqi@0: } aoqi@0: // Work methods in support of process_phase2 aoqi@0: void pp2_work(DiscoveredList& refs_list, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive); aoqi@0: void pp2_work_concurrent_discovery( aoqi@0: DiscoveredList& refs_list, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc); aoqi@0: // Phase3: process the referents by either clearing them aoqi@0: // or keeping them alive (and their closure) aoqi@0: void process_phase3(DiscoveredList& refs_list, aoqi@0: bool clear_referent, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc); aoqi@0: aoqi@0: // Enqueue references with a certain reachability level aoqi@0: void enqueue_discovered_reflist(DiscoveredList& refs_list, HeapWord* pending_list_addr); aoqi@0: aoqi@0: // "Preclean" all the discovered reference lists aoqi@0: // by removing references with strongly reachable referents. aoqi@0: // The first argument is a predicate on an oop that indicates aoqi@0: // its (strong) reachability and the second is a closure that aoqi@0: // may be used to incrementalize or abort the precleaning process. aoqi@0: // The caller is responsible for taking care of potential aoqi@0: // interference with concurrent operations on these lists aoqi@0: // (or predicates involved) by other threads. Currently aoqi@0: // only used by the CMS collector. aoqi@0: void preclean_discovered_references(BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc, aoqi@0: YieldClosure* yield, brutisso@6904: GCTimer* gc_timer, brutisso@6904: GCId gc_id); aoqi@0: aoqi@0: // Delete entries in the discovered lists that have aoqi@0: // either a null referent or are not active. Such aoqi@0: // Reference objects can result from the clearing aoqi@0: // or enqueueing of Reference objects concurrent aoqi@0: // with their discovery by a (concurrent) collector. aoqi@0: // For a definition of "active" see java.lang.ref.Reference; aoqi@0: // Refs are born active, become inactive when enqueued, aoqi@0: // and never become active again. The state of being aoqi@0: // active is encoded as follows: A Ref is active aoqi@0: // if and only if its "next" field is NULL. aoqi@0: void clean_up_discovered_references(); aoqi@0: void clean_up_discovered_reflist(DiscoveredList& refs_list); aoqi@0: aoqi@0: // Returns the name of the discovered reference list aoqi@0: // occupying the i / _num_q slot. aoqi@0: const char* list_name(uint i); aoqi@0: aoqi@0: void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor); aoqi@0: aoqi@0: protected: aoqi@0: // "Preclean" the given discovered reference list aoqi@0: // by removing references with strongly reachable referents. aoqi@0: // Currently used in support of CMS only. aoqi@0: void preclean_discovered_reflist(DiscoveredList& refs_list, aoqi@0: BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc, aoqi@0: YieldClosure* yield); aoqi@0: aoqi@0: // round-robin mod _num_q (not: _not_ mode _max_num_q) aoqi@0: uint next_id() { aoqi@0: uint id = _next_id; aoqi@0: if (++_next_id == _num_q) { aoqi@0: _next_id = 0; aoqi@0: } aoqi@0: return id; aoqi@0: } aoqi@0: DiscoveredList* get_discovered_list(ReferenceType rt); aoqi@0: inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, aoqi@0: HeapWord* discovered_addr); aoqi@0: void verify_ok_to_handle_reflists() PRODUCT_RETURN; aoqi@0: aoqi@0: void clear_discovered_references(DiscoveredList& refs_list); aoqi@0: void abandon_partial_discovered_list(DiscoveredList& refs_list); aoqi@0: aoqi@0: // Calculate the number of jni handles. aoqi@0: unsigned int count_jni_refs(); aoqi@0: aoqi@0: // Balances reference queues. aoqi@0: void balance_queues(DiscoveredList ref_lists[]); aoqi@0: aoqi@0: // Update (advance) the soft ref master clock field. aoqi@0: void update_soft_ref_master_clock(); aoqi@0: aoqi@0: public: aoqi@0: // Default parameters give you a vanilla reference processor. aoqi@0: ReferenceProcessor(MemRegion span, aoqi@0: bool mt_processing = false, uint mt_processing_degree = 1, aoqi@0: bool mt_discovery = false, uint mt_discovery_degree = 1, aoqi@0: bool atomic_discovery = true, aoqi@0: BoolObjectClosure* is_alive_non_header = NULL); aoqi@0: aoqi@0: // RefDiscoveryPolicy values aoqi@0: enum DiscoveryPolicy { aoqi@0: ReferenceBasedDiscovery = 0, aoqi@0: ReferentBasedDiscovery = 1, aoqi@0: DiscoveryPolicyMin = ReferenceBasedDiscovery, aoqi@0: DiscoveryPolicyMax = ReferentBasedDiscovery aoqi@0: }; aoqi@0: aoqi@0: static void init_statics(); aoqi@0: aoqi@0: public: aoqi@0: // get and set "is_alive_non_header" field aoqi@0: BoolObjectClosure* is_alive_non_header() { aoqi@0: return _is_alive_non_header; aoqi@0: } aoqi@0: void set_is_alive_non_header(BoolObjectClosure* is_alive_non_header) { aoqi@0: _is_alive_non_header = is_alive_non_header; aoqi@0: } aoqi@0: aoqi@0: // get and set span aoqi@0: MemRegion span() { return _span; } aoqi@0: void set_span(MemRegion span) { _span = span; } aoqi@0: aoqi@0: // start and stop weak ref discovery aoqi@0: void enable_discovery(bool verify_disabled, bool check_no_refs); aoqi@0: void disable_discovery() { _discovering_refs = false; } aoqi@0: bool discovery_enabled() { return _discovering_refs; } aoqi@0: aoqi@0: // whether discovery is atomic wrt other collectors aoqi@0: bool discovery_is_atomic() const { return _discovery_is_atomic; } aoqi@0: void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } aoqi@0: aoqi@0: // whether the JDK in which we are embedded is a pre-4965777 JDK, aoqi@0: // and thus whether or not it uses the discovered field to chain aoqi@0: // the entries in the pending list. aoqi@0: static bool pending_list_uses_discovered_field() { aoqi@0: return _pending_list_uses_discovered_field; aoqi@0: } aoqi@0: aoqi@0: // whether discovery is done by multiple threads same-old-timeously aoqi@0: bool discovery_is_mt() const { return _discovery_is_mt; } aoqi@0: void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } aoqi@0: aoqi@0: // Whether we are in a phase when _processing_ is MT. aoqi@0: bool processing_is_mt() const { return _processing_is_mt; } aoqi@0: void set_mt_processing(bool mt) { _processing_is_mt = mt; } aoqi@0: aoqi@0: // whether all enqueuing of weak references is complete aoqi@0: bool enqueuing_is_done() { return _enqueuing_is_done; } aoqi@0: void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } aoqi@0: aoqi@0: // iterate over oops aoqi@0: void weak_oops_do(OopClosure* f); // weak roots aoqi@0: aoqi@0: // Balance each of the discovered lists. aoqi@0: void balance_all_queues(); aoqi@0: void verify_list(DiscoveredList& ref_list); aoqi@0: aoqi@0: // Discover a Reference object, using appropriate discovery criteria aoqi@0: bool discover_reference(oop obj, ReferenceType rt); aoqi@0: aoqi@0: // Process references found during GC (called by the garbage collector) aoqi@0: ReferenceProcessorStats aoqi@0: process_discovered_references(BoolObjectClosure* is_alive, aoqi@0: OopClosure* keep_alive, aoqi@0: VoidClosure* complete_gc, aoqi@0: AbstractRefProcTaskExecutor* task_executor, brutisso@6904: GCTimer *gc_timer, brutisso@6904: GCId gc_id); aoqi@0: aoqi@0: // Enqueue references at end of GC (called by the garbage collector) aoqi@0: bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL); aoqi@0: aoqi@0: // If a discovery is in process that is being superceded, abandon it: all aoqi@0: // the discovered lists will be empty, and all the objects on them will aoqi@0: // have NULL discovered fields. Must be called only at a safepoint. aoqi@0: void abandon_partial_discovery(); aoqi@0: aoqi@0: // debugging aoqi@0: void verify_no_references_recorded() PRODUCT_RETURN; aoqi@0: void verify_referent(oop obj) PRODUCT_RETURN; aoqi@0: aoqi@0: // clear the discovered lists (unlinking each entry). aoqi@0: void clear_discovered_references() PRODUCT_RETURN; aoqi@0: }; aoqi@0: aoqi@0: // A utility class to disable reference discovery in aoqi@0: // the scope which contains it, for given ReferenceProcessor. aoqi@0: class NoRefDiscovery: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: bool _was_discovering_refs; aoqi@0: public: aoqi@0: NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { aoqi@0: _was_discovering_refs = _rp->discovery_enabled(); aoqi@0: if (_was_discovering_refs) { aoqi@0: _rp->disable_discovery(); aoqi@0: } aoqi@0: } aoqi@0: aoqi@0: ~NoRefDiscovery() { aoqi@0: if (_was_discovering_refs) { aoqi@0: _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/); aoqi@0: } aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // A utility class to temporarily mutate the span of the aoqi@0: // given ReferenceProcessor in the scope that contains it. aoqi@0: class ReferenceProcessorSpanMutator: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: MemRegion _saved_span; aoqi@0: aoqi@0: public: aoqi@0: ReferenceProcessorSpanMutator(ReferenceProcessor* rp, aoqi@0: MemRegion span): aoqi@0: _rp(rp) { aoqi@0: _saved_span = _rp->span(); aoqi@0: _rp->set_span(span); aoqi@0: } aoqi@0: aoqi@0: ~ReferenceProcessorSpanMutator() { aoqi@0: _rp->set_span(_saved_span); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // A utility class to temporarily change the MT'ness of aoqi@0: // reference discovery for the given ReferenceProcessor aoqi@0: // in the scope that contains it. aoqi@0: class ReferenceProcessorMTDiscoveryMutator: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: bool _saved_mt; aoqi@0: aoqi@0: public: aoqi@0: ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, aoqi@0: bool mt): aoqi@0: _rp(rp) { aoqi@0: _saved_mt = _rp->discovery_is_mt(); aoqi@0: _rp->set_mt_discovery(mt); aoqi@0: } aoqi@0: aoqi@0: ~ReferenceProcessorMTDiscoveryMutator() { aoqi@0: _rp->set_mt_discovery(_saved_mt); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // A utility class to temporarily change the disposition aoqi@0: // of the "is_alive_non_header" closure field of the aoqi@0: // given ReferenceProcessor in the scope that contains it. aoqi@0: class ReferenceProcessorIsAliveMutator: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: BoolObjectClosure* _saved_cl; aoqi@0: aoqi@0: public: aoqi@0: ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, aoqi@0: BoolObjectClosure* cl): aoqi@0: _rp(rp) { aoqi@0: _saved_cl = _rp->is_alive_non_header(); aoqi@0: _rp->set_is_alive_non_header(cl); aoqi@0: } aoqi@0: aoqi@0: ~ReferenceProcessorIsAliveMutator() { aoqi@0: _rp->set_is_alive_non_header(_saved_cl); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: // A utility class to temporarily change the disposition aoqi@0: // of the "discovery_is_atomic" field of the aoqi@0: // given ReferenceProcessor in the scope that contains it. aoqi@0: class ReferenceProcessorAtomicMutator: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: bool _saved_atomic_discovery; aoqi@0: aoqi@0: public: aoqi@0: ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, aoqi@0: bool atomic): aoqi@0: _rp(rp) { aoqi@0: _saved_atomic_discovery = _rp->discovery_is_atomic(); aoqi@0: _rp->set_atomic_discovery(atomic); aoqi@0: } aoqi@0: aoqi@0: ~ReferenceProcessorAtomicMutator() { aoqi@0: _rp->set_atomic_discovery(_saved_atomic_discovery); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // A utility class to temporarily change the MT processing aoqi@0: // disposition of the given ReferenceProcessor instance aoqi@0: // in the scope that contains it. aoqi@0: class ReferenceProcessorMTProcMutator: StackObj { aoqi@0: private: aoqi@0: ReferenceProcessor* _rp; aoqi@0: bool _saved_mt; aoqi@0: aoqi@0: public: aoqi@0: ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, aoqi@0: bool mt): aoqi@0: _rp(rp) { aoqi@0: _saved_mt = _rp->processing_is_mt(); aoqi@0: _rp->set_mt_processing(mt); aoqi@0: } aoqi@0: aoqi@0: ~ReferenceProcessorMTProcMutator() { aoqi@0: _rp->set_mt_processing(_saved_mt); aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: // This class is an interface used to implement task execution for the aoqi@0: // reference processing. aoqi@0: class AbstractRefProcTaskExecutor { aoqi@0: public: aoqi@0: aoqi@0: // Abstract tasks to execute. aoqi@0: class ProcessTask; aoqi@0: class EnqueueTask; aoqi@0: aoqi@0: // Executes a task using worker threads. aoqi@0: virtual void execute(ProcessTask& task) = 0; aoqi@0: virtual void execute(EnqueueTask& task) = 0; aoqi@0: aoqi@0: // Switch to single threaded mode. aoqi@0: virtual void set_single_threaded_mode() { }; aoqi@0: }; aoqi@0: aoqi@0: // Abstract reference processing task to execute. aoqi@0: class AbstractRefProcTaskExecutor::ProcessTask { aoqi@0: protected: aoqi@0: ProcessTask(ReferenceProcessor& ref_processor, aoqi@0: DiscoveredList refs_lists[], aoqi@0: bool marks_oops_alive) aoqi@0: : _ref_processor(ref_processor), aoqi@0: _refs_lists(refs_lists), aoqi@0: _marks_oops_alive(marks_oops_alive) aoqi@0: { } aoqi@0: aoqi@0: public: aoqi@0: virtual void work(unsigned int work_id, BoolObjectClosure& is_alive, aoqi@0: OopClosure& keep_alive, aoqi@0: VoidClosure& complete_gc) = 0; aoqi@0: aoqi@0: // Returns true if a task marks some oops as alive. aoqi@0: bool marks_oops_alive() const aoqi@0: { return _marks_oops_alive; } aoqi@0: aoqi@0: protected: aoqi@0: ReferenceProcessor& _ref_processor; aoqi@0: DiscoveredList* _refs_lists; aoqi@0: const bool _marks_oops_alive; aoqi@0: }; aoqi@0: aoqi@0: // Abstract reference processing task to execute. aoqi@0: class AbstractRefProcTaskExecutor::EnqueueTask { aoqi@0: protected: aoqi@0: EnqueueTask(ReferenceProcessor& ref_processor, aoqi@0: DiscoveredList refs_lists[], aoqi@0: HeapWord* pending_list_addr, aoqi@0: int n_queues) aoqi@0: : _ref_processor(ref_processor), aoqi@0: _refs_lists(refs_lists), aoqi@0: _pending_list_addr(pending_list_addr), aoqi@0: _n_queues(n_queues) aoqi@0: { } aoqi@0: aoqi@0: public: aoqi@0: virtual void work(unsigned int work_id) = 0; aoqi@0: aoqi@0: protected: aoqi@0: ReferenceProcessor& _ref_processor; aoqi@0: DiscoveredList* _refs_lists; aoqi@0: HeapWord* _pending_list_addr; aoqi@0: int _n_queues; aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP