duke@435: /* jcoomes@1993: * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP stefank@2314: #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP stefank@2314: stefank@2314: #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp" stefank@2314: #include "memory/allocation.hpp" stefank@2314: #include "utilities/taskqueue.hpp" stefank@2314: duke@435: // duke@435: // psPromotionManager is used by a single thread to manage object survival duke@435: // during a scavenge. The promotion manager contains thread local data only. duke@435: // duke@435: // NOTE! Be carefull when allocating the stacks on cheap. If you are going duke@435: // to use a promotion manager in more than one thread, the stacks MUST be duke@435: // on cheap. This can lead to memory leaks, though, as they are not auto duke@435: // deallocated. duke@435: // duke@435: // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate! duke@435: // duke@435: duke@435: // Move to some global location duke@435: #define HAS_BEEN_MOVED 0x1501d01d duke@435: // End move to some global location duke@435: duke@435: class MutableSpace; duke@435: class PSOldGen; duke@435: class ParCompactionManager; duke@435: duke@435: class PSPromotionManager : public CHeapObj { duke@435: friend class PSScavenge; duke@435: friend class PSRefProcTaskExecutor; duke@435: private: duke@435: static PSPromotionManager** _manager_array; duke@435: static OopStarTaskQueueSet* _stack_array_depth; duke@435: static PSOldGen* _old_gen; duke@435: static MutableSpace* _young_space; duke@435: jcoomes@2020: #if TASKQUEUE_STATS jcoomes@2020: size_t _masked_pushes; jcoomes@2020: size_t _masked_steals; jcoomes@2020: size_t _arrays_chunked; jcoomes@2020: size_t _array_chunks_processed; duke@435: jcoomes@2020: void print_taskqueue_stats(uint i) const; jcoomes@2020: void print_local_stats(uint i) const; jcoomes@2020: static void print_stats(); duke@435: jcoomes@2020: void reset_stats(); jcoomes@2020: #endif // TASKQUEUE_STATS duke@435: duke@435: PSYoungPromotionLAB _young_lab; duke@435: PSOldPromotionLAB _old_lab; duke@435: bool _young_gen_is_full; duke@435: bool _old_gen_is_full; duke@435: duke@435: OopStarTaskQueue _claimed_stack_depth; jcoomes@1993: OverflowTaskQueue _claimed_stack_breadth; duke@435: duke@435: bool _totally_drain; duke@435: uint _target_stack_size; duke@435: duke@435: uint _array_chunk_size; duke@435: uint _min_array_size_for_chunking; duke@435: duke@435: // Accessors coleenp@548: static PSOldGen* old_gen() { return _old_gen; } coleenp@548: static MutableSpace* young_space() { return _young_space; } duke@435: duke@435: inline static PSPromotionManager* manager_array(int index); coleenp@548: template inline void claim_or_forward_internal_depth(T* p); duke@435: duke@435: // On the task queues we push reference locations as well as duke@435: // partially-scanned arrays (in the latter case, we push an oop to duke@435: // the from-space image of the array and the length on the duke@435: // from-space image indicates how many entries on the array we still duke@435: // need to scan; this is basically how ParNew does partial array duke@435: // scanning too). To be able to distinguish between reference duke@435: // locations and partially-scanned array oops we simply mask the duke@435: // latter oops with 0x01. The next three methods do the masking, duke@435: // unmasking, and checking whether the oop is masked or not. Notice duke@435: // that the signature of the mask and unmask methods looks a bit duke@435: // strange, as they accept and return different types (oop and duke@435: // oop*). This is because of the difference in types between what duke@435: // the task queue holds (oop*) and oops to partially-scanned arrays duke@435: // (oop). We do all the necessary casting in the mask / unmask duke@435: // methods to avoid sprinkling the rest of the code with more casts. duke@435: coleenp@548: // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any coleenp@548: // future masks) can't conflict with COMPRESSED_OOP_MASK coleenp@548: #define PS_CHUNKED_ARRAY_OOP_MASK 0x2 coleenp@548: coleenp@548: bool is_oop_masked(StarTask p) { coleenp@548: // If something is marked chunked it's always treated like wide oop* coleenp@548: return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) == coleenp@548: PS_CHUNKED_ARRAY_OOP_MASK; duke@435: } duke@435: duke@435: oop* mask_chunked_array_oop(oop obj) { duke@435: assert(!is_oop_masked((oop*) obj), "invariant"); coleenp@548: oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK); duke@435: assert(is_oop_masked(ret), "invariant"); duke@435: return ret; duke@435: } duke@435: coleenp@548: oop unmask_chunked_array_oop(StarTask p) { duke@435: assert(is_oop_masked(p), "invariant"); coleenp@548: assert(!p.is_narrow(), "chunked array oops cannot be narrow"); coleenp@548: oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) coleenp@548: oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); duke@435: assert(!is_oop_masked((oop*) ret), "invariant"); duke@435: return ret; duke@435: } duke@435: coleenp@548: template void process_array_chunk_work(oop obj, coleenp@548: int start, int end); duke@435: void process_array_chunk(oop old); duke@435: coleenp@548: template void push_depth(T* p) { jcoomes@1993: claimed_stack_depth()->push(p); duke@435: } duke@435: duke@435: protected: coleenp@548: static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } duke@435: public: duke@435: // Static duke@435: static void initialize(); duke@435: duke@435: static void pre_scavenge(); duke@435: static void post_scavenge(); duke@435: duke@435: static PSPromotionManager* gc_thread_promotion_manager(int index); duke@435: static PSPromotionManager* vm_thread_promotion_manager(); duke@435: duke@435: static bool steal_depth(int queue_num, int* seed, StarTask& t) { duke@435: return stack_array_depth()->steal(queue_num, seed, t); duke@435: } duke@435: duke@435: PSPromotionManager(); duke@435: duke@435: // Accessors duke@435: OopStarTaskQueue* claimed_stack_depth() { duke@435: return &_claimed_stack_depth; duke@435: } duke@435: duke@435: bool young_gen_is_full() { return _young_gen_is_full; } duke@435: duke@435: bool old_gen_is_full() { return _old_gen_is_full; } duke@435: void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } duke@435: duke@435: // Promotion methods tonyp@2061: oop copy_to_survivor_space(oop o); duke@435: oop oop_promotion_failed(oop obj, markOop obj_mark); duke@435: duke@435: void reset(); duke@435: duke@435: void flush_labs(); duke@435: void drain_stacks(bool totally_drain) { tonyp@2061: drain_stacks_depth(totally_drain); duke@435: } coleenp@548: public: duke@435: void drain_stacks_cond_depth() { duke@435: if (claimed_stack_depth()->size() > _target_stack_size) { duke@435: drain_stacks_depth(false); duke@435: } duke@435: } duke@435: void drain_stacks_depth(bool totally_drain); duke@435: duke@435: bool stacks_empty() { tonyp@2061: return claimed_stack_depth()->is_empty(); duke@435: } duke@435: coleenp@548: inline void process_popped_location_depth(StarTask p); duke@435: coleenp@548: template inline void claim_or_forward_depth(T* p); duke@435: jcoomes@2020: TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);) duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP