duke@435: /* xdono@631: * Copyright 2002-2008 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: // duke@435: // psPromotionManager is used by a single thread to manage object survival duke@435: // during a scavenge. The promotion manager contains thread local data only. duke@435: // duke@435: // NOTE! Be carefull when allocating the stacks on cheap. If you are going duke@435: // to use a promotion manager in more than one thread, the stacks MUST be duke@435: // on cheap. This can lead to memory leaks, though, as they are not auto duke@435: // deallocated. duke@435: // duke@435: // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate! duke@435: // duke@435: duke@435: // Move to some global location duke@435: #define HAS_BEEN_MOVED 0x1501d01d duke@435: // End move to some global location duke@435: duke@435: class MutableSpace; duke@435: class PSOldGen; duke@435: class ParCompactionManager; duke@435: duke@435: #define PS_PM_STATS 0 duke@435: duke@435: class PSPromotionManager : public CHeapObj { duke@435: friend class PSScavenge; duke@435: friend class PSRefProcTaskExecutor; duke@435: private: duke@435: static PSPromotionManager** _manager_array; duke@435: static OopStarTaskQueueSet* _stack_array_depth; duke@435: static OopTaskQueueSet* _stack_array_breadth; duke@435: static PSOldGen* _old_gen; duke@435: static MutableSpace* _young_space; duke@435: duke@435: #if PS_PM_STATS duke@435: uint _total_pushes; duke@435: uint _masked_pushes; duke@435: duke@435: uint _overflow_pushes; duke@435: uint _max_overflow_length; duke@435: duke@435: uint _arrays_chunked; duke@435: uint _array_chunks_processed; duke@435: duke@435: uint _total_steals; duke@435: uint _masked_steals; duke@435: duke@435: void print_stats(uint i); duke@435: static void print_stats(); duke@435: #endif // PS_PM_STATS duke@435: duke@435: PSYoungPromotionLAB _young_lab; duke@435: PSOldPromotionLAB _old_lab; duke@435: bool _young_gen_is_full; duke@435: bool _old_gen_is_full; duke@435: PrefetchQueue _prefetch_queue; duke@435: duke@435: OopStarTaskQueue _claimed_stack_depth; coleenp@548: GrowableArray* _overflow_stack_depth; duke@435: OopTaskQueue _claimed_stack_breadth; duke@435: GrowableArray* _overflow_stack_breadth; duke@435: duke@435: bool _depth_first; duke@435: bool _totally_drain; duke@435: uint _target_stack_size; duke@435: duke@435: uint _array_chunk_size; duke@435: uint _min_array_size_for_chunking; duke@435: duke@435: // Accessors coleenp@548: static PSOldGen* old_gen() { return _old_gen; } coleenp@548: static MutableSpace* young_space() { return _young_space; } duke@435: duke@435: inline static PSPromotionManager* manager_array(int index); coleenp@548: template inline void claim_or_forward_internal_depth(T* p); coleenp@548: template inline void claim_or_forward_internal_breadth(T* p); duke@435: coleenp@548: GrowableArray* overflow_stack_depth() { return _overflow_stack_depth; } coleenp@548: GrowableArray* overflow_stack_breadth() { return _overflow_stack_breadth; } duke@435: duke@435: // On the task queues we push reference locations as well as duke@435: // partially-scanned arrays (in the latter case, we push an oop to duke@435: // the from-space image of the array and the length on the duke@435: // from-space image indicates how many entries on the array we still duke@435: // need to scan; this is basically how ParNew does partial array duke@435: // scanning too). To be able to distinguish between reference duke@435: // locations and partially-scanned array oops we simply mask the duke@435: // latter oops with 0x01. The next three methods do the masking, duke@435: // unmasking, and checking whether the oop is masked or not. Notice duke@435: // that the signature of the mask and unmask methods looks a bit duke@435: // strange, as they accept and return different types (oop and duke@435: // oop*). This is because of the difference in types between what duke@435: // the task queue holds (oop*) and oops to partially-scanned arrays duke@435: // (oop). We do all the necessary casting in the mask / unmask duke@435: // methods to avoid sprinkling the rest of the code with more casts. duke@435: coleenp@548: // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any coleenp@548: // future masks) can't conflict with COMPRESSED_OOP_MASK coleenp@548: #define PS_CHUNKED_ARRAY_OOP_MASK 0x2 coleenp@548: coleenp@548: bool is_oop_masked(StarTask p) { coleenp@548: // If something is marked chunked it's always treated like wide oop* coleenp@548: return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) == coleenp@548: PS_CHUNKED_ARRAY_OOP_MASK; duke@435: } duke@435: duke@435: oop* mask_chunked_array_oop(oop obj) { duke@435: assert(!is_oop_masked((oop*) obj), "invariant"); coleenp@548: oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK); duke@435: assert(is_oop_masked(ret), "invariant"); duke@435: return ret; duke@435: } duke@435: coleenp@548: oop unmask_chunked_array_oop(StarTask p) { duke@435: assert(is_oop_masked(p), "invariant"); coleenp@548: assert(!p.is_narrow(), "chunked array oops cannot be narrow"); coleenp@548: oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) coleenp@548: oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); duke@435: assert(!is_oop_masked((oop*) ret), "invariant"); duke@435: return ret; duke@435: } duke@435: coleenp@548: template void process_array_chunk_work(oop obj, coleenp@548: int start, int end); duke@435: void process_array_chunk(oop old); duke@435: coleenp@548: template void push_depth(T* p) { duke@435: assert(depth_first(), "pre-condition"); duke@435: duke@435: #if PS_PM_STATS duke@435: ++_total_pushes; duke@435: #endif // PS_PM_STATS duke@435: duke@435: if (!claimed_stack_depth()->push(p)) { duke@435: overflow_stack_depth()->push(p); duke@435: #if PS_PM_STATS duke@435: ++_overflow_pushes; duke@435: uint stack_length = (uint) overflow_stack_depth()->length(); duke@435: if (stack_length > _max_overflow_length) { duke@435: _max_overflow_length = stack_length; duke@435: } duke@435: #endif // PS_PM_STATS duke@435: } duke@435: } duke@435: duke@435: void push_breadth(oop o) { duke@435: assert(!depth_first(), "pre-condition"); duke@435: duke@435: #if PS_PM_STATS duke@435: ++_total_pushes; duke@435: #endif // PS_PM_STATS duke@435: duke@435: if(!claimed_stack_breadth()->push(o)) { duke@435: overflow_stack_breadth()->push(o); duke@435: #if PS_PM_STATS duke@435: ++_overflow_pushes; duke@435: uint stack_length = (uint) overflow_stack_breadth()->length(); duke@435: if (stack_length > _max_overflow_length) { duke@435: _max_overflow_length = stack_length; duke@435: } duke@435: #endif // PS_PM_STATS duke@435: } duke@435: } duke@435: duke@435: protected: coleenp@548: static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } duke@435: static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; } duke@435: duke@435: public: duke@435: // Static duke@435: static void initialize(); duke@435: duke@435: static void pre_scavenge(); duke@435: static void post_scavenge(); duke@435: duke@435: static PSPromotionManager* gc_thread_promotion_manager(int index); duke@435: static PSPromotionManager* vm_thread_promotion_manager(); duke@435: duke@435: static bool steal_depth(int queue_num, int* seed, StarTask& t) { duke@435: assert(stack_array_depth() != NULL, "invariant"); duke@435: return stack_array_depth()->steal(queue_num, seed, t); duke@435: } duke@435: duke@435: static bool steal_breadth(int queue_num, int* seed, Task& t) { duke@435: assert(stack_array_breadth() != NULL, "invariant"); duke@435: return stack_array_breadth()->steal(queue_num, seed, t); duke@435: } duke@435: duke@435: PSPromotionManager(); duke@435: duke@435: // Accessors duke@435: OopStarTaskQueue* claimed_stack_depth() { duke@435: return &_claimed_stack_depth; duke@435: } duke@435: OopTaskQueue* claimed_stack_breadth() { duke@435: return &_claimed_stack_breadth; duke@435: } duke@435: duke@435: bool young_gen_is_full() { return _young_gen_is_full; } duke@435: duke@435: bool old_gen_is_full() { return _old_gen_is_full; } duke@435: void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } duke@435: duke@435: // Promotion methods duke@435: oop copy_to_survivor_space(oop o, bool depth_first); duke@435: oop oop_promotion_failed(oop obj, markOop obj_mark); duke@435: duke@435: void reset(); duke@435: duke@435: void flush_labs(); duke@435: void drain_stacks(bool totally_drain) { duke@435: if (depth_first()) { duke@435: drain_stacks_depth(totally_drain); duke@435: } else { duke@435: drain_stacks_breadth(totally_drain); duke@435: } duke@435: } coleenp@548: public: duke@435: void drain_stacks_cond_depth() { duke@435: if (claimed_stack_depth()->size() > _target_stack_size) { duke@435: drain_stacks_depth(false); duke@435: } duke@435: } duke@435: void drain_stacks_depth(bool totally_drain); duke@435: void drain_stacks_breadth(bool totally_drain); duke@435: duke@435: bool claimed_stack_empty() { duke@435: if (depth_first()) { duke@435: return claimed_stack_depth()->size() <= 0; duke@435: } else { duke@435: return claimed_stack_breadth()->size() <= 0; duke@435: } duke@435: } duke@435: bool overflow_stack_empty() { duke@435: if (depth_first()) { duke@435: return overflow_stack_depth()->length() <= 0; duke@435: } else { duke@435: return overflow_stack_breadth()->length() <= 0; duke@435: } duke@435: } duke@435: bool stacks_empty() { duke@435: return claimed_stack_empty() && overflow_stack_empty(); duke@435: } duke@435: bool depth_first() { duke@435: return _depth_first; duke@435: } duke@435: coleenp@548: inline void process_popped_location_depth(StarTask p); duke@435: duke@435: inline void flush_prefetch_queue(); coleenp@548: template inline void claim_or_forward_depth(T* p); coleenp@548: template inline void claim_or_forward_breadth(T* p); duke@435: duke@435: #if PS_PM_STATS duke@435: void increment_steals(oop* p = NULL) { duke@435: _total_steals += 1; duke@435: if (p != NULL && is_oop_masked(p)) { duke@435: _masked_steals += 1; duke@435: } duke@435: } duke@435: #endif // PS_PM_STATS duke@435: };