aoqi@0: /* aoqi@0: * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@0: #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP aoqi@0: #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP aoqi@0: aoqi@0: #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp" aoqi@0: #include "gc_implementation/shared/gcTrace.hpp" aoqi@0: #include "gc_implementation/shared/copyFailedInfo.hpp" aoqi@0: #include "memory/allocation.hpp" aoqi@0: #include "memory/padded.hpp" aoqi@0: #include "utilities/globalDefinitions.hpp" aoqi@0: #include "utilities/taskqueue.hpp" aoqi@0: aoqi@0: // aoqi@0: // psPromotionManager is used by a single thread to manage object survival aoqi@0: // during a scavenge. The promotion manager contains thread local data only. aoqi@0: // aoqi@0: // NOTE! Be careful when allocating the stacks on cheap. If you are going aoqi@0: // to use a promotion manager in more than one thread, the stacks MUST be aoqi@0: // on cheap. This can lead to memory leaks, though, as they are not auto aoqi@0: // deallocated. aoqi@0: // aoqi@0: // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate! aoqi@0: // aoqi@0: aoqi@0: // Move to some global location aoqi@0: #define HAS_BEEN_MOVED 0x1501d01d aoqi@0: // End move to some global location aoqi@0: aoqi@0: class MutableSpace; aoqi@0: class PSOldGen; aoqi@0: class ParCompactionManager; aoqi@0: aoqi@0: class PSPromotionManager VALUE_OBJ_CLASS_SPEC { aoqi@0: friend class PSScavenge; aoqi@0: friend class PSRefProcTaskExecutor; aoqi@0: private: aoqi@0: static PaddedEnd* _manager_array; aoqi@0: static OopStarTaskQueueSet* _stack_array_depth; aoqi@0: static PSOldGen* _old_gen; aoqi@0: static MutableSpace* _young_space; aoqi@0: aoqi@0: #if TASKQUEUE_STATS aoqi@0: size_t _masked_pushes; aoqi@0: size_t _masked_steals; aoqi@0: size_t _arrays_chunked; aoqi@0: size_t _array_chunks_processed; aoqi@0: aoqi@0: void print_taskqueue_stats(uint i) const; aoqi@0: void print_local_stats(uint i) const; aoqi@0: static void print_stats(); aoqi@0: aoqi@0: void reset_stats(); aoqi@0: #endif // TASKQUEUE_STATS aoqi@0: aoqi@0: PSYoungPromotionLAB _young_lab; aoqi@0: PSOldPromotionLAB _old_lab; aoqi@0: bool _young_gen_is_full; aoqi@0: bool _old_gen_is_full; aoqi@0: aoqi@0: OopStarTaskQueue _claimed_stack_depth; aoqi@0: OverflowTaskQueue _claimed_stack_breadth; aoqi@0: aoqi@0: bool _totally_drain; aoqi@0: uint _target_stack_size; aoqi@0: aoqi@0: uint _array_chunk_size; aoqi@0: uint _min_array_size_for_chunking; aoqi@0: aoqi@0: PromotionFailedInfo _promotion_failed_info; aoqi@0: aoqi@0: // Accessors aoqi@0: static PSOldGen* old_gen() { return _old_gen; } aoqi@0: static MutableSpace* young_space() { return _young_space; } aoqi@0: aoqi@0: inline static PSPromotionManager* manager_array(int index); aoqi@0: template inline void claim_or_forward_internal_depth(T* p); aoqi@0: aoqi@0: // On the task queues we push reference locations as well as aoqi@0: // partially-scanned arrays (in the latter case, we push an oop to aoqi@0: // the from-space image of the array and the length on the aoqi@0: // from-space image indicates how many entries on the array we still aoqi@0: // need to scan; this is basically how ParNew does partial array aoqi@0: // scanning too). To be able to distinguish between reference aoqi@0: // locations and partially-scanned array oops we simply mask the aoqi@0: // latter oops with 0x01. The next three methods do the masking, aoqi@0: // unmasking, and checking whether the oop is masked or not. Notice aoqi@0: // that the signature of the mask and unmask methods looks a bit aoqi@0: // strange, as they accept and return different types (oop and aoqi@0: // oop*). This is because of the difference in types between what aoqi@0: // the task queue holds (oop*) and oops to partially-scanned arrays aoqi@0: // (oop). We do all the necessary casting in the mask / unmask aoqi@0: // methods to avoid sprinkling the rest of the code with more casts. aoqi@0: aoqi@0: // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any aoqi@0: // future masks) can't conflict with COMPRESSED_OOP_MASK aoqi@0: #define PS_CHUNKED_ARRAY_OOP_MASK 0x2 aoqi@0: aoqi@0: bool is_oop_masked(StarTask p) { aoqi@0: // If something is marked chunked it's always treated like wide oop* aoqi@0: return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) == aoqi@0: PS_CHUNKED_ARRAY_OOP_MASK; aoqi@0: } aoqi@0: aoqi@0: oop* mask_chunked_array_oop(oop obj) { aoqi@0: assert(!is_oop_masked((oop*) obj), "invariant"); aoqi@0: oop* ret = (oop*) (cast_from_oop(obj) | PS_CHUNKED_ARRAY_OOP_MASK); aoqi@0: assert(is_oop_masked(ret), "invariant"); aoqi@0: return ret; aoqi@0: } aoqi@0: aoqi@0: oop unmask_chunked_array_oop(StarTask p) { aoqi@0: assert(is_oop_masked(p), "invariant"); aoqi@0: assert(!p.is_narrow(), "chunked array oops cannot be narrow"); aoqi@0: oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) aoqi@0: oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); aoqi@0: assert(!is_oop_masked((oop*) ret), "invariant"); aoqi@0: return ret; aoqi@0: } aoqi@0: aoqi@0: template void process_array_chunk_work(oop obj, aoqi@0: int start, int end); aoqi@0: void process_array_chunk(oop old); aoqi@0: aoqi@0: template void push_depth(T* p) { aoqi@0: claimed_stack_depth()->push(p); aoqi@0: } aoqi@0: aoqi@0: protected: aoqi@0: static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } aoqi@0: public: aoqi@0: // Static aoqi@0: static void initialize(); aoqi@0: aoqi@0: static void pre_scavenge(); aoqi@0: static bool post_scavenge(YoungGCTracer& gc_tracer); aoqi@0: aoqi@0: static PSPromotionManager* gc_thread_promotion_manager(int index); aoqi@0: static PSPromotionManager* vm_thread_promotion_manager(); aoqi@0: aoqi@0: static bool steal_depth(int queue_num, int* seed, StarTask& t) { aoqi@0: return stack_array_depth()->steal(queue_num, seed, t); aoqi@0: } aoqi@0: aoqi@0: PSPromotionManager(); aoqi@0: aoqi@0: // Accessors aoqi@0: OopStarTaskQueue* claimed_stack_depth() { aoqi@0: return &_claimed_stack_depth; aoqi@0: } aoqi@0: aoqi@0: bool young_gen_is_full() { return _young_gen_is_full; } aoqi@0: aoqi@0: bool old_gen_is_full() { return _old_gen_is_full; } aoqi@0: void set_old_gen_is_full(bool state) { _old_gen_is_full = state; } aoqi@0: aoqi@0: // Promotion methods aoqi@0: template oop copy_to_survivor_space(oop o); aoqi@0: oop oop_promotion_failed(oop obj, markOop obj_mark); aoqi@0: aoqi@0: void reset(); aoqi@0: aoqi@0: void flush_labs(); aoqi@0: void drain_stacks(bool totally_drain) { aoqi@0: drain_stacks_depth(totally_drain); aoqi@0: } aoqi@0: public: aoqi@0: void drain_stacks_cond_depth() { aoqi@0: if (claimed_stack_depth()->size() > _target_stack_size) { aoqi@0: drain_stacks_depth(false); aoqi@0: } aoqi@0: } aoqi@0: void drain_stacks_depth(bool totally_drain); aoqi@0: aoqi@0: bool stacks_empty() { aoqi@0: return claimed_stack_depth()->is_empty(); aoqi@0: } aoqi@0: aoqi@0: inline void process_popped_location_depth(StarTask p); aoqi@0: aoqi@0: template inline void claim_or_forward_depth(T* p); aoqi@0: aoqi@0: TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);) aoqi@0: }; aoqi@0: aoqi@0: #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP