Thu, 20 Sep 2012 09:52:56 -0700
7190666: G1: assert(_unused == 0) failed: Inconsistency in PLAB stats
Summary: Reset the fields in ParGCAllocBuffer, that are used for accumulating values for the ResizePLAB sensors in PLABStats, to zero after flushing the values to the PLABStats fields. Flush PLABStats values only when retiring the final allocation buffers prior to disposing of a G1ParScanThreadState object, rather than when retiring every allocation buffer.
Reviewed-by: jwilhelm, jmasa, ysr
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #ifndef SERIALGC
27 #include "gc_implementation/shared/mutableSpace.hpp"
28 #include "gc_implementation/shared/spaceDecorator.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "runtime/safepoint.hpp"
31 #include "runtime/thread.hpp"
32 #endif
34 MutableSpace::MutableSpace(size_t alignment): ImmutableSpace(), _top(NULL), _alignment(alignment) {
35 assert(MutableSpace::alignment() >= 0 &&
36 MutableSpace::alignment() % os::vm_page_size() == 0,
37 "Space should be aligned");
38 _mangler = new MutableSpaceMangler(this);
39 }
41 MutableSpace::~MutableSpace() {
42 delete _mangler;
43 }
45 void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
46 if (!mr.is_empty()) {
47 size_t page_size = UseLargePages ? alignment() : os::vm_page_size();
48 HeapWord *start = (HeapWord*)round_to((intptr_t) mr.start(), page_size);
49 HeapWord *end = (HeapWord*)round_down((intptr_t) mr.end(), page_size);
50 if (end > start) {
51 size_t size = pointer_delta(end, start, sizeof(char));
52 if (clear_space) {
53 // Prefer page reallocation to migration.
54 os::free_memory((char*)start, size, page_size);
55 }
56 os::numa_make_global((char*)start, size);
57 }
58 }
59 }
61 void MutableSpace::pretouch_pages(MemRegion mr) {
62 for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
63 char t = *p; *p = t;
64 }
65 }
67 void MutableSpace::initialize(MemRegion mr,
68 bool clear_space,
69 bool mangle_space,
70 bool setup_pages) {
72 assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
73 "invalid space boundaries");
75 if (setup_pages && (UseNUMA || AlwaysPreTouch)) {
76 // The space may move left and right or expand/shrink.
77 // We'd like to enforce the desired page placement.
78 MemRegion head, tail;
79 if (last_setup_region().is_empty()) {
80 // If it's the first initialization don't limit the amount of work.
81 head = mr;
82 tail = MemRegion(mr.end(), mr.end());
83 } else {
84 // Is there an intersection with the address space?
85 MemRegion intersection = last_setup_region().intersection(mr);
86 if (intersection.is_empty()) {
87 intersection = MemRegion(mr.end(), mr.end());
88 }
89 // All the sizes below are in words.
90 size_t head_size = 0, tail_size = 0;
91 if (mr.start() <= intersection.start()) {
92 head_size = pointer_delta(intersection.start(), mr.start());
93 }
94 if(intersection.end() <= mr.end()) {
95 tail_size = pointer_delta(mr.end(), intersection.end());
96 }
97 // Limit the amount of page manipulation if necessary.
98 if (NUMASpaceResizeRate > 0 && !AlwaysPreTouch) {
99 const size_t change_size = head_size + tail_size;
100 const float setup_rate_words = NUMASpaceResizeRate >> LogBytesPerWord;
101 head_size = MIN2((size_t)(setup_rate_words * head_size / change_size),
102 head_size);
103 tail_size = MIN2((size_t)(setup_rate_words * tail_size / change_size),
104 tail_size);
105 }
106 head = MemRegion(intersection.start() - head_size, intersection.start());
107 tail = MemRegion(intersection.end(), intersection.end() + tail_size);
108 }
109 assert(mr.contains(head) && mr.contains(tail), "Sanity");
111 if (UseNUMA) {
112 numa_setup_pages(head, clear_space);
113 numa_setup_pages(tail, clear_space);
114 }
116 if (AlwaysPreTouch) {
117 pretouch_pages(head);
118 pretouch_pages(tail);
119 }
121 // Remember where we stopped so that we can continue later.
122 set_last_setup_region(MemRegion(head.start(), tail.end()));
123 }
125 set_bottom(mr.start());
126 set_end(mr.end());
128 if (clear_space) {
129 clear(mangle_space);
130 }
131 }
133 void MutableSpace::clear(bool mangle_space) {
134 set_top(bottom());
135 if (ZapUnusedHeapArea && mangle_space) {
136 mangle_unused_area();
137 }
138 }
140 #ifndef PRODUCT
141 void MutableSpace::check_mangled_unused_area(HeapWord* limit) {
142 mangler()->check_mangled_unused_area(limit);
143 }
145 void MutableSpace::check_mangled_unused_area_complete() {
146 mangler()->check_mangled_unused_area_complete();
147 }
149 // Mangle only the unused space that has not previously
150 // been mangled and that has not been allocated since being
151 // mangled.
152 void MutableSpace::mangle_unused_area() {
153 mangler()->mangle_unused_area();
154 }
156 void MutableSpace::mangle_unused_area_complete() {
157 mangler()->mangle_unused_area_complete();
158 }
160 void MutableSpace::mangle_region(MemRegion mr) {
161 SpaceMangler::mangle_region(mr);
162 }
164 void MutableSpace::set_top_for_allocations(HeapWord* v) {
165 mangler()->set_top_for_allocations(v);
166 }
168 void MutableSpace::set_top_for_allocations() {
169 mangler()->set_top_for_allocations(top());
170 }
171 #endif
173 // This version requires locking. */
174 HeapWord* MutableSpace::allocate(size_t size) {
175 assert(Heap_lock->owned_by_self() ||
176 (SafepointSynchronize::is_at_safepoint() &&
177 Thread::current()->is_VM_thread()),
178 "not locked");
179 HeapWord* obj = top();
180 if (pointer_delta(end(), obj) >= size) {
181 HeapWord* new_top = obj + size;
182 set_top(new_top);
183 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
184 "checking alignment");
185 return obj;
186 } else {
187 return NULL;
188 }
189 }
191 // This version is lock-free.
192 HeapWord* MutableSpace::cas_allocate(size_t size) {
193 do {
194 HeapWord* obj = top();
195 if (pointer_delta(end(), obj) >= size) {
196 HeapWord* new_top = obj + size;
197 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
198 // result can be one of two:
199 // the old top value: the exchange succeeded
200 // otherwise: the new value of the top is returned.
201 if (result != obj) {
202 continue; // another thread beat us to the allocation, try again
203 }
204 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
205 "checking alignment");
206 return obj;
207 } else {
208 return NULL;
209 }
210 } while (true);
211 }
213 // Try to deallocate previous allocation. Returns true upon success.
214 bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
215 HeapWord* expected_top = obj + size;
216 return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top;
217 }
219 void MutableSpace::oop_iterate(ExtendedOopClosure* cl) {
220 HeapWord* obj_addr = bottom();
221 HeapWord* t = top();
222 // Could call objects iterate, but this is easier.
223 while (obj_addr < t) {
224 obj_addr += oop(obj_addr)->oop_iterate(cl);
225 }
226 }
228 void MutableSpace::oop_iterate_no_header(OopClosure* cl) {
229 HeapWord* obj_addr = bottom();
230 HeapWord* t = top();
231 // Could call objects iterate, but this is easier.
232 while (obj_addr < t) {
233 obj_addr += oop(obj_addr)->oop_iterate_no_header(cl);
234 }
235 }
237 void MutableSpace::object_iterate(ObjectClosure* cl) {
238 HeapWord* p = bottom();
239 while (p < top()) {
240 cl->do_object(oop(p));
241 p += oop(p)->size();
242 }
243 }
245 void MutableSpace::print_short() const { print_short_on(tty); }
246 void MutableSpace::print_short_on( outputStream* st) const {
247 st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K,
248 (int) ((double) used_in_bytes() * 100 / capacity_in_bytes()));
249 }
251 void MutableSpace::print() const { print_on(tty); }
252 void MutableSpace::print_on(outputStream* st) const {
253 MutableSpace::print_short_on(st);
254 st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")",
255 bottom(), top(), end());
256 }
258 void MutableSpace::verify() {
259 HeapWord* p = bottom();
260 HeapWord* t = top();
261 HeapWord* prev_p = NULL;
262 while (p < t) {
263 oop(p)->verify();
264 prev_p = p;
265 p += oop(p)->size();
266 }
267 guarantee(p == top(), "end of last object must match end of space");
268 }