src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
27
28 #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
29 #include "gc_implementation/shared/gcTrace.hpp"
30 #include "gc_implementation/shared/copyFailedInfo.hpp"
31 #include "memory/allocation.hpp"
32 #include "memory/padded.hpp"
33 #include "utilities/globalDefinitions.hpp"
34 #include "utilities/taskqueue.hpp"
35
36 //
37 // psPromotionManager is used by a single thread to manage object survival
38 // during a scavenge. The promotion manager contains thread local data only.
39 //
40 // NOTE! Be careful when allocating the stacks on cheap. If you are going
41 // to use a promotion manager in more than one thread, the stacks MUST be
42 // on cheap. This can lead to memory leaks, though, as they are not auto
43 // deallocated.
44 //
45 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
46 //
47
48 // Move to some global location
49 #define HAS_BEEN_MOVED 0x1501d01d
50 // End move to some global location
51
52 class MutableSpace;
53 class PSOldGen;
54 class ParCompactionManager;
55
56 class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
57 friend class PSScavenge;
58 friend class PSRefProcTaskExecutor;
59 private:
60 static PaddedEnd<PSPromotionManager>* _manager_array;
61 static OopStarTaskQueueSet* _stack_array_depth;
62 static PSOldGen* _old_gen;
63 static MutableSpace* _young_space;
64
65 #if TASKQUEUE_STATS
66 size_t _masked_pushes;
67 size_t _masked_steals;
68 size_t _arrays_chunked;
69 size_t _array_chunks_processed;
70
71 void print_taskqueue_stats(uint i) const;
72 void print_local_stats(uint i) const;
73 static void print_stats();
74
75 void reset_stats();
76 #endif // TASKQUEUE_STATS
77
78 PSYoungPromotionLAB _young_lab;
79 PSOldPromotionLAB _old_lab;
80 bool _young_gen_is_full;
81 bool _old_gen_is_full;
82
83 OopStarTaskQueue _claimed_stack_depth;
84 OverflowTaskQueue<oop, mtGC> _claimed_stack_breadth;
85
86 bool _totally_drain;
87 uint _target_stack_size;
88
89 uint _array_chunk_size;
90 uint _min_array_size_for_chunking;
91
92 PromotionFailedInfo _promotion_failed_info;
93
94 // Accessors
95 static PSOldGen* old_gen() { return _old_gen; }
96 static MutableSpace* young_space() { return _young_space; }
97
98 inline static PSPromotionManager* manager_array(int index);
99 template <class T> inline void claim_or_forward_internal_depth(T* p);
100
101 // On the task queues we push reference locations as well as
102 // partially-scanned arrays (in the latter case, we push an oop to
103 // the from-space image of the array and the length on the
104 // from-space image indicates how many entries on the array we still
105 // need to scan; this is basically how ParNew does partial array
106 // scanning too). To be able to distinguish between reference
107 // locations and partially-scanned array oops we simply mask the
108 // latter oops with 0x01. The next three methods do the masking,
109 // unmasking, and checking whether the oop is masked or not. Notice
110 // that the signature of the mask and unmask methods looks a bit
111 // strange, as they accept and return different types (oop and
112 // oop*). This is because of the difference in types between what
113 // the task queue holds (oop*) and oops to partially-scanned arrays
114 // (oop). We do all the necessary casting in the mask / unmask
115 // methods to avoid sprinkling the rest of the code with more casts.
116
117 // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
118 // future masks) can't conflict with COMPRESSED_OOP_MASK
119 #define PS_CHUNKED_ARRAY_OOP_MASK 0x2
120
121 bool is_oop_masked(StarTask p) {
122 // If something is marked chunked it's always treated like wide oop*
123 return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
124 PS_CHUNKED_ARRAY_OOP_MASK;
125 }
126
127 oop* mask_chunked_array_oop(oop obj) {
128 assert(!is_oop_masked((oop*) obj), "invariant");
129 oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
130 assert(is_oop_masked(ret), "invariant");
131 return ret;
132 }
133
134 oop unmask_chunked_array_oop(StarTask p) {
135 assert(is_oop_masked(p), "invariant");
136 assert(!p.is_narrow(), "chunked array oops cannot be narrow");
137 oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
138 oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
139 assert(!is_oop_masked((oop*) ret), "invariant");
140 return ret;
141 }
142
143 template <class T> void process_array_chunk_work(oop obj,
144 int start, int end);
145 void process_array_chunk(oop old);
146
147 template <class T> void push_depth(T* p) {
148 claimed_stack_depth()->push(p);
149 }
150
151 protected:
152 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
153 public:
154 // Static
155 static void initialize();
156
157 static void pre_scavenge();
158 static bool post_scavenge(YoungGCTracer& gc_tracer);
159
160 static PSPromotionManager* gc_thread_promotion_manager(int index);
161 static PSPromotionManager* vm_thread_promotion_manager();
162
163 static bool steal_depth(int queue_num, int* seed, StarTask& t) {
164 return stack_array_depth()->steal(queue_num, seed, t);
165 }
166
167 PSPromotionManager();
168
169 // Accessors
170 OopStarTaskQueue* claimed_stack_depth() {
171 return &_claimed_stack_depth;
172 }
173
174 bool young_gen_is_full() { return _young_gen_is_full; }
175
176 bool old_gen_is_full() { return _old_gen_is_full; }
177 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
178
179 // Promotion methods
180 template<bool promote_immediately> oop copy_to_survivor_space(oop o);
181 oop oop_promotion_failed(oop obj, markOop obj_mark);
182
183 void reset();
184
185 void flush_labs();
186 void drain_stacks(bool totally_drain) {
187 drain_stacks_depth(totally_drain);
188 }
189 public:
190 void drain_stacks_cond_depth() {
191 if (claimed_stack_depth()->size() > _target_stack_size) {
192 drain_stacks_depth(false);
193 }
194 }
195 void drain_stacks_depth(bool totally_drain);
196
197 bool stacks_empty() {
198 return claimed_stack_depth()->is_empty();
199 }
200
201 inline void process_popped_location_depth(StarTask p);
202
203 template <class T> inline void claim_or_forward_depth(T* p);
204
205 TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
206 };
207
208 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP

mercurial