Wed, 26 Mar 2014 14:15:02 +0100
8035667: EventMetaspaceSummary doesn't report committed Metaspace memory
Reviewed-by: jmasa, stefank
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
27 #include "memory/sharedHeap.hpp"
28 #include "oops/arrayOop.hpp"
29 #include "oops/oop.inline.hpp"
31 ParGCAllocBuffer::ParGCAllocBuffer(size_t desired_plab_sz_) :
32 _word_sz(desired_plab_sz_), _bottom(NULL), _top(NULL),
33 _end(NULL), _hard_end(NULL),
34 _retained(false), _retained_filler(),
35 _allocated(0), _wasted(0)
36 {
37 assert (min_size() > AlignmentReserve, "Inconsistency!");
38 // arrayOopDesc::header_size depends on command line initialization.
39 FillerHeaderSize = align_object_size(arrayOopDesc::header_size(T_INT));
40 AlignmentReserve = oopDesc::header_size() > MinObjAlignment ? FillerHeaderSize : 0;
41 }
43 size_t ParGCAllocBuffer::FillerHeaderSize;
45 // If the minimum object size is greater than MinObjAlignment, we can
46 // end up with a shard at the end of the buffer that's smaller than
47 // the smallest object. We can't allow that because the buffer must
48 // look like it's full of objects when we retire it, so we make
49 // sure we have enough space for a filler int array object.
50 size_t ParGCAllocBuffer::AlignmentReserve;
52 void ParGCAllocBuffer::retire(bool end_of_gc, bool retain) {
53 assert(!retain || end_of_gc, "Can only retain at GC end.");
54 if (_retained) {
55 // If the buffer had been retained shorten the previous filler object.
56 assert(_retained_filler.end() <= _top, "INVARIANT");
57 CollectedHeap::fill_with_object(_retained_filler);
58 // Wasted space book-keeping, otherwise (normally) done in invalidate()
59 _wasted += _retained_filler.word_size();
60 _retained = false;
61 }
62 assert(!end_of_gc || !_retained, "At this point, end_of_gc ==> !_retained.");
63 if (_top < _hard_end) {
64 CollectedHeap::fill_with_object(_top, _hard_end);
65 if (!retain) {
66 invalidate();
67 } else {
68 // Is there wasted space we'd like to retain for the next GC?
69 if (pointer_delta(_end, _top) > FillerHeaderSize) {
70 _retained = true;
71 _retained_filler = MemRegion(_top, FillerHeaderSize);
72 _top = _top + FillerHeaderSize;
73 } else {
74 invalidate();
75 }
76 }
77 }
78 }
80 void ParGCAllocBuffer::flush_stats(PLABStats* stats) {
81 assert(ResizePLAB, "Wasted work");
82 stats->add_allocated(_allocated);
83 stats->add_wasted(_wasted);
84 stats->add_unused(pointer_delta(_end, _top));
85 }
87 // Compute desired plab size and latch result for later
88 // use. This should be called once at the end of parallel
89 // scavenge; it clears the sensor accumulators.
90 void PLABStats::adjust_desired_plab_sz(uint no_of_gc_workers) {
91 assert(ResizePLAB, "Not set");
93 assert(is_object_aligned(max_size()) && min_size() <= max_size(),
94 "PLAB clipping computation may be incorrect");
96 if (_allocated == 0) {
97 assert(_unused == 0,
98 err_msg("Inconsistency in PLAB stats: "
99 "_allocated: "SIZE_FORMAT", "
100 "_wasted: "SIZE_FORMAT", "
101 "_unused: "SIZE_FORMAT", "
102 "_used : "SIZE_FORMAT,
103 _allocated, _wasted, _unused, _used));
105 _allocated = 1;
106 }
107 double wasted_frac = (double)_unused/(double)_allocated;
108 size_t target_refills = (size_t)((wasted_frac*TargetSurvivorRatio)/
109 TargetPLABWastePct);
110 if (target_refills == 0) {
111 target_refills = 1;
112 }
113 _used = _allocated - _wasted - _unused;
114 size_t plab_sz = _used/(target_refills*no_of_gc_workers);
115 if (PrintPLAB) gclog_or_tty->print(" (plab_sz = %d ", plab_sz);
116 // Take historical weighted average
117 _filter.sample(plab_sz);
118 // Clip from above and below, and align to object boundary
119 plab_sz = MAX2(min_size(), (size_t)_filter.average());
120 plab_sz = MIN2(max_size(), plab_sz);
121 plab_sz = align_object_size(plab_sz);
122 // Latch the result
123 if (PrintPLAB) gclog_or_tty->print(" desired_plab_sz = %d) ", plab_sz);
124 _desired_plab_sz = plab_sz;
125 // Now clear the accumulators for next round:
126 // note this needs to be fixed in the case where we
127 // are retaining across scavenges. FIX ME !!! XXX
128 _allocated = 0;
129 _wasted = 0;
130 _unused = 0;
131 }
133 #ifndef PRODUCT
134 void ParGCAllocBuffer::print() {
135 gclog_or_tty->print("parGCAllocBuffer: _bottom: %p _top: %p _end: %p _hard_end: %p"
136 "_retained: %c _retained_filler: [%p,%p)\n",
137 _bottom, _top, _end, _hard_end,
138 "FT"[_retained], _retained_filler.start(), _retained_filler.end());
139 }
140 #endif // !PRODUCT
142 const size_t ParGCAllocBufferWithBOT::ChunkSizeInWords =
143 MIN2(CardTableModRefBS::par_chunk_heapword_alignment(),
144 ((size_t)Generation::GenGrain)/HeapWordSize);
145 const size_t ParGCAllocBufferWithBOT::ChunkSizeInBytes =
146 MIN2(CardTableModRefBS::par_chunk_heapword_alignment() * HeapWordSize,
147 (size_t)Generation::GenGrain);
149 ParGCAllocBufferWithBOT::ParGCAllocBufferWithBOT(size_t word_sz,
150 BlockOffsetSharedArray* bsa) :
151 ParGCAllocBuffer(word_sz),
152 _bsa(bsa),
153 _bt(bsa, MemRegion(_bottom, _hard_end)),
154 _true_end(_hard_end)
155 {}
157 // The buffer comes with its own BOT, with a shared (obviously) underlying
158 // BlockOffsetSharedArray. We manipulate this BOT in the normal way
159 // as we would for any contiguous space. However, on accasion we
160 // need to do some buffer surgery at the extremities before we
161 // start using the body of the buffer for allocations. Such surgery
162 // (as explained elsewhere) is to prevent allocation on a card that
163 // is in the process of being walked concurrently by another GC thread.
164 // When such surgery happens at a point that is far removed (to the
165 // right of the current allocation point, top), we use the "contig"
166 // parameter below to directly manipulate the shared array without
167 // modifying the _next_threshold state in the BOT.
168 void ParGCAllocBufferWithBOT::fill_region_with_block(MemRegion mr,
169 bool contig) {
170 CollectedHeap::fill_with_object(mr);
171 if (contig) {
172 _bt.alloc_block(mr.start(), mr.end());
173 } else {
174 _bt.BlockOffsetArray::alloc_block(mr.start(), mr.end());
175 }
176 }
178 HeapWord* ParGCAllocBufferWithBOT::allocate_slow(size_t word_sz) {
179 HeapWord* res = NULL;
180 if (_true_end > _hard_end) {
181 assert((HeapWord*)align_size_down(intptr_t(_hard_end),
182 ChunkSizeInBytes) == _hard_end,
183 "or else _true_end should be equal to _hard_end");
184 assert(_retained, "or else _true_end should be equal to _hard_end");
185 assert(_retained_filler.end() <= _top, "INVARIANT");
186 CollectedHeap::fill_with_object(_retained_filler);
187 if (_top < _hard_end) {
188 fill_region_with_block(MemRegion(_top, _hard_end), true);
189 }
190 HeapWord* next_hard_end = MIN2(_true_end, _hard_end + ChunkSizeInWords);
191 _retained_filler = MemRegion(_hard_end, FillerHeaderSize);
192 _bt.alloc_block(_retained_filler.start(), _retained_filler.word_size());
193 _top = _retained_filler.end();
194 _hard_end = next_hard_end;
195 _end = _hard_end - AlignmentReserve;
196 res = ParGCAllocBuffer::allocate(word_sz);
197 if (res != NULL) {
198 _bt.alloc_block(res, word_sz);
199 }
200 }
201 return res;
202 }
204 void
205 ParGCAllocBufferWithBOT::undo_allocation(HeapWord* obj, size_t word_sz) {
206 ParGCAllocBuffer::undo_allocation(obj, word_sz);
207 // This may back us up beyond the previous threshold, so reset.
208 _bt.set_region(MemRegion(_top, _hard_end));
209 _bt.initialize_threshold();
210 }
212 void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
213 assert(!retain || end_of_gc, "Can only retain at GC end.");
214 if (_retained) {
215 // We're about to make the retained_filler into a block.
216 _bt.BlockOffsetArray::alloc_block(_retained_filler.start(),
217 _retained_filler.end());
218 }
219 // Reset _hard_end to _true_end (and update _end)
220 if (retain && _hard_end != NULL) {
221 assert(_hard_end <= _true_end, "Invariant.");
222 _hard_end = _true_end;
223 _end = MAX2(_top, _hard_end - AlignmentReserve);
224 assert(_end <= _hard_end, "Invariant.");
225 }
226 _true_end = _hard_end;
227 HeapWord* pre_top = _top;
229 ParGCAllocBuffer::retire(end_of_gc, retain);
230 // Now any old _retained_filler is cut back to size, the free part is
231 // filled with a filler object, and top is past the header of that
232 // object.
234 if (retain && _top < _end) {
235 assert(end_of_gc && retain, "Or else retain should be false.");
236 // If the lab does not start on a card boundary, we don't want to
237 // allocate onto that card, since that might lead to concurrent
238 // allocation and card scanning, which we don't support. So we fill
239 // the first card with a garbage object.
240 size_t first_card_index = _bsa->index_for(pre_top);
241 HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
242 if (first_card_start < pre_top) {
243 HeapWord* second_card_start =
244 _bsa->inc_by_region_size(first_card_start);
246 // Ensure enough room to fill with the smallest block
247 second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
249 // If the end is already in the first card, don't go beyond it!
250 // Or if the remainder is too small for a filler object, gobble it up.
251 if (_hard_end < second_card_start ||
252 pointer_delta(_hard_end, second_card_start) < AlignmentReserve) {
253 second_card_start = _hard_end;
254 }
255 if (pre_top < second_card_start) {
256 MemRegion first_card_suffix(pre_top, second_card_start);
257 fill_region_with_block(first_card_suffix, true);
258 }
259 pre_top = second_card_start;
260 _top = pre_top;
261 _end = MAX2(_top, _hard_end - AlignmentReserve);
262 }
264 // If the lab does not end on a card boundary, we don't want to
265 // allocate onto that card, since that might lead to concurrent
266 // allocation and card scanning, which we don't support. So we fill
267 // the last card with a garbage object.
268 size_t last_card_index = _bsa->index_for(_hard_end);
269 HeapWord* last_card_start = _bsa->address_for_index(last_card_index);
270 if (last_card_start < _hard_end) {
272 // Ensure enough room to fill with the smallest block
273 last_card_start = MIN2(last_card_start, _hard_end - AlignmentReserve);
275 // If the top is already in the last card, don't go back beyond it!
276 // Or if the remainder is too small for a filler object, gobble it up.
277 if (_top > last_card_start ||
278 pointer_delta(last_card_start, _top) < AlignmentReserve) {
279 last_card_start = _top;
280 }
281 if (last_card_start < _hard_end) {
282 MemRegion last_card_prefix(last_card_start, _hard_end);
283 fill_region_with_block(last_card_prefix, false);
284 }
285 _hard_end = last_card_start;
286 _end = MAX2(_top, _hard_end - AlignmentReserve);
287 _true_end = _hard_end;
288 assert(_end <= _hard_end, "Invariant.");
289 }
291 // At this point:
292 // 1) we had a filler object from the original top to hard_end.
293 // 2) We've filled in any partial cards at the front and back.
294 if (pre_top < _hard_end) {
295 // Now we can reset the _bt to do allocation in the given area.
296 MemRegion new_filler(pre_top, _hard_end);
297 fill_region_with_block(new_filler, false);
298 _top = pre_top + ParGCAllocBuffer::FillerHeaderSize;
299 // If there's no space left, don't retain.
300 if (_top >= _end) {
301 _retained = false;
302 invalidate();
303 return;
304 }
305 _retained_filler = MemRegion(pre_top, _top);
306 _bt.set_region(MemRegion(_top, _hard_end));
307 _bt.initialize_threshold();
308 assert(_bt.threshold() > _top, "initialize_threshold failed!");
310 // There may be other reasons for queries into the middle of the
311 // filler object. When such queries are done in parallel with
312 // allocation, bad things can happen, if the query involves object
313 // iteration. So we ensure that such queries do not involve object
314 // iteration, by putting another filler object on the boundaries of
315 // such queries. One such is the object spanning a parallel card
316 // chunk boundary.
318 // "chunk_boundary" is the address of the first chunk boundary less
319 // than "hard_end".
320 HeapWord* chunk_boundary =
321 (HeapWord*)align_size_down(intptr_t(_hard_end-1), ChunkSizeInBytes);
322 assert(chunk_boundary < _hard_end, "Or else above did not work.");
323 assert(pointer_delta(_true_end, chunk_boundary) >= AlignmentReserve,
324 "Consequence of last card handling above.");
326 if (_top <= chunk_boundary) {
327 assert(_true_end == _hard_end, "Invariant.");
328 while (_top <= chunk_boundary) {
329 assert(pointer_delta(_hard_end, chunk_boundary) >= AlignmentReserve,
330 "Consequence of last card handling above.");
331 _bt.BlockOffsetArray::alloc_block(chunk_boundary, _hard_end);
332 CollectedHeap::fill_with_object(chunk_boundary, _hard_end);
333 _hard_end = chunk_boundary;
334 chunk_boundary -= ChunkSizeInWords;
335 }
336 _end = _hard_end - AlignmentReserve;
337 assert(_top <= _end, "Invariant.");
338 // Now reset the initial filler chunk so it doesn't overlap with
339 // the one(s) inserted above.
340 MemRegion new_filler(pre_top, _hard_end);
341 fill_region_with_block(new_filler, false);
342 }
343 } else {
344 _retained = false;
345 invalidate();
346 }
347 } else {
348 assert(!end_of_gc ||
349 (!_retained && _true_end == _hard_end), "Checking.");
350 }
351 assert(_end <= _hard_end, "Invariant.");
352 assert(_top < _end || _top == _hard_end, "Invariant");
353 }