Thu, 27 May 2010 19:08:38 -0700
6941466: Oracle rebranding changes for Hotspot repositories
Summary: Change all the Sun copyrights to Oracle copyright
Reviewed-by: ohair
2 /*
3 * Copyright (c) 2006, 2009, Oracle and/or its affiliates. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 # include "incls/_precompiled.incl"
27 # include "incls/_mutableNUMASpace.cpp.incl"
30 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
31 _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true);
32 _page_size = os::vm_page_size();
33 _adaptation_cycles = 0;
34 _samples_count = 0;
35 update_layout(true);
36 }
38 MutableNUMASpace::~MutableNUMASpace() {
39 for (int i = 0; i < lgrp_spaces()->length(); i++) {
40 delete lgrp_spaces()->at(i);
41 }
42 delete lgrp_spaces();
43 }
45 #ifndef PRODUCT
46 void MutableNUMASpace::mangle_unused_area() {
47 // This method should do nothing.
48 // It can be called on a numa space during a full compaction.
49 }
50 void MutableNUMASpace::mangle_unused_area_complete() {
51 // This method should do nothing.
52 // It can be called on a numa space during a full compaction.
53 }
54 void MutableNUMASpace::mangle_region(MemRegion mr) {
55 // This method should do nothing because numa spaces are not mangled.
56 }
57 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
58 assert(false, "Do not mangle MutableNUMASpace's");
59 }
60 void MutableNUMASpace::set_top_for_allocations() {
61 // This method should do nothing.
62 }
63 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
64 // This method should do nothing.
65 }
66 void MutableNUMASpace::check_mangled_unused_area_complete() {
67 // This method should do nothing.
68 }
69 #endif // NOT_PRODUCT
71 // There may be unallocated holes in the middle chunks
72 // that should be filled with dead objects to ensure parseability.
73 void MutableNUMASpace::ensure_parsability() {
74 for (int i = 0; i < lgrp_spaces()->length(); i++) {
75 LGRPSpace *ls = lgrp_spaces()->at(i);
76 MutableSpace *s = ls->space();
77 if (s->top() < top()) { // For all spaces preceding the one containing top()
78 if (s->free_in_words() > 0) {
79 size_t area_touched_words = pointer_delta(s->end(), s->top());
80 CollectedHeap::fill_with_object(s->top(), area_touched_words);
81 #ifndef ASSERT
82 if (!ZapUnusedHeapArea) {
83 area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
84 area_touched_words);
85 }
86 #endif
87 if (!os::numa_has_static_binding()) {
88 MemRegion invalid;
89 HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size());
90 HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words),
91 os::vm_page_size());
92 if (crossing_start != crossing_end) {
93 // If object header crossed a small page boundary we mark the area
94 // as invalid rounding it to a page_size().
95 HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
96 HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()),
97 s->end());
98 invalid = MemRegion(start, end);
99 }
101 ls->add_invalid_region(invalid);
102 }
103 }
104 } else {
105 if (!os::numa_has_static_binding()) {
106 #ifdef ASSERT
107 MemRegion invalid(s->top(), s->end());
108 ls->add_invalid_region(invalid);
109 #else
110 if (ZapUnusedHeapArea) {
111 MemRegion invalid(s->top(), s->end());
112 ls->add_invalid_region(invalid);
113 } else {
114 return;
115 }
116 #endif
117 } else {
118 return;
119 }
120 }
121 }
122 }
124 size_t MutableNUMASpace::used_in_words() const {
125 size_t s = 0;
126 for (int i = 0; i < lgrp_spaces()->length(); i++) {
127 s += lgrp_spaces()->at(i)->space()->used_in_words();
128 }
129 return s;
130 }
132 size_t MutableNUMASpace::free_in_words() const {
133 size_t s = 0;
134 for (int i = 0; i < lgrp_spaces()->length(); i++) {
135 s += lgrp_spaces()->at(i)->space()->free_in_words();
136 }
137 return s;
138 }
141 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
142 guarantee(thr != NULL, "No thread");
143 int lgrp_id = thr->lgrp_id();
144 if (lgrp_id == -1) {
145 // This case can occur after the topology of the system has
146 // changed. Thread can change their location, the new home
147 // group will be determined during the first allocation
148 // attempt. For now we can safely assume that all spaces
149 // have equal size because the whole space will be reinitialized.
150 if (lgrp_spaces()->length() > 0) {
151 return capacity_in_bytes() / lgrp_spaces()->length();
152 } else {
153 assert(false, "There should be at least one locality group");
154 return 0;
155 }
156 }
157 // That's the normal case, where we know the locality group of the thread.
158 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
159 if (i == -1) {
160 return 0;
161 }
162 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
163 }
165 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
166 // Please see the comments for tlab_capacity().
167 guarantee(thr != NULL, "No thread");
168 int lgrp_id = thr->lgrp_id();
169 if (lgrp_id == -1) {
170 if (lgrp_spaces()->length() > 0) {
171 return free_in_bytes() / lgrp_spaces()->length();
172 } else {
173 assert(false, "There should be at least one locality group");
174 return 0;
175 }
176 }
177 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
178 if (i == -1) {
179 return 0;
180 }
181 return lgrp_spaces()->at(i)->space()->free_in_bytes();
182 }
185 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
186 guarantee(thr != NULL, "No thread");
187 int lgrp_id = thr->lgrp_id();
188 if (lgrp_id == -1) {
189 if (lgrp_spaces()->length() > 0) {
190 return capacity_in_words() / lgrp_spaces()->length();
191 } else {
192 assert(false, "There should be at least one locality group");
193 return 0;
194 }
195 }
196 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
197 if (i == -1) {
198 return 0;
199 }
200 return lgrp_spaces()->at(i)->space()->capacity_in_words();
201 }
203 // Check if the NUMA topology has changed. Add and remove spaces if needed.
204 // The update can be forced by setting the force parameter equal to true.
205 bool MutableNUMASpace::update_layout(bool force) {
206 // Check if the topology had changed.
207 bool changed = os::numa_topology_changed();
208 if (force || changed) {
209 // Compute lgrp intersection. Add/remove spaces.
210 int lgrp_limit = (int)os::numa_get_groups_num();
211 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit);
212 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
213 assert(lgrp_num > 0, "There should be at least one locality group");
214 // Add new spaces for the new nodes
215 for (int i = 0; i < lgrp_num; i++) {
216 bool found = false;
217 for (int j = 0; j < lgrp_spaces()->length(); j++) {
218 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
219 found = true;
220 break;
221 }
222 }
223 if (!found) {
224 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
225 }
226 }
228 // Remove spaces for the removed nodes.
229 for (int i = 0; i < lgrp_spaces()->length();) {
230 bool found = false;
231 for (int j = 0; j < lgrp_num; j++) {
232 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
233 found = true;
234 break;
235 }
236 }
237 if (!found) {
238 delete lgrp_spaces()->at(i);
239 lgrp_spaces()->remove_at(i);
240 } else {
241 i++;
242 }
243 }
245 FREE_C_HEAP_ARRAY(int, lgrp_ids);
247 if (changed) {
248 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
249 thread->set_lgrp_id(-1);
250 }
251 }
252 return true;
253 }
254 return false;
255 }
257 // Bias region towards the first-touching lgrp. Set the right page sizes.
258 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
259 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
260 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
261 if (end > start) {
262 MemRegion aligned_region(start, end);
263 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
264 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
265 assert(region().contains(aligned_region), "Sanity");
266 // First we tell the OS which page size we want in the given range. The underlying
267 // large page can be broken down if we require small pages.
268 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
269 // Then we uncommit the pages in the range.
270 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
271 // And make them local/first-touch biased.
272 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
273 }
274 }
276 // Free all pages in the region.
277 void MutableNUMASpace::free_region(MemRegion mr) {
278 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
279 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
280 if (end > start) {
281 MemRegion aligned_region(start, end);
282 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
283 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
284 assert(region().contains(aligned_region), "Sanity");
285 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size());
286 }
287 }
289 // Update space layout. Perform adaptation.
290 void MutableNUMASpace::update() {
291 if (update_layout(false)) {
292 // If the topology has changed, make all chunks zero-sized.
293 // And clear the alloc-rate statistics.
294 // In future we may want to handle this more gracefully in order
295 // to avoid the reallocation of the pages as much as possible.
296 for (int i = 0; i < lgrp_spaces()->length(); i++) {
297 LGRPSpace *ls = lgrp_spaces()->at(i);
298 MutableSpace *s = ls->space();
299 s->set_end(s->bottom());
300 s->set_top(s->bottom());
301 ls->clear_alloc_rate();
302 }
303 // A NUMA space is never mangled
304 initialize(region(),
305 SpaceDecorator::Clear,
306 SpaceDecorator::DontMangle);
307 } else {
308 bool should_initialize = false;
309 if (!os::numa_has_static_binding()) {
310 for (int i = 0; i < lgrp_spaces()->length(); i++) {
311 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
312 should_initialize = true;
313 break;
314 }
315 }
316 }
318 if (should_initialize ||
319 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
320 // A NUMA space is never mangled
321 initialize(region(),
322 SpaceDecorator::Clear,
323 SpaceDecorator::DontMangle);
324 }
325 }
327 if (NUMAStats) {
328 for (int i = 0; i < lgrp_spaces()->length(); i++) {
329 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
330 }
331 }
333 scan_pages(NUMAPageScanRate);
334 }
336 // Scan pages. Free pages that have smaller size or wrong placement.
337 void MutableNUMASpace::scan_pages(size_t page_count)
338 {
339 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
340 if (pages_per_chunk > 0) {
341 for (int i = 0; i < lgrp_spaces()->length(); i++) {
342 LGRPSpace *ls = lgrp_spaces()->at(i);
343 ls->scan_pages(page_size(), pages_per_chunk);
344 }
345 }
346 }
348 // Accumulate statistics about the allocation rate of each lgrp.
349 void MutableNUMASpace::accumulate_statistics() {
350 if (UseAdaptiveNUMAChunkSizing) {
351 for (int i = 0; i < lgrp_spaces()->length(); i++) {
352 lgrp_spaces()->at(i)->sample();
353 }
354 increment_samples_count();
355 }
357 if (NUMAStats) {
358 for (int i = 0; i < lgrp_spaces()->length(); i++) {
359 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
360 }
361 }
362 }
364 // Get the current size of a chunk.
365 // This function computes the size of the chunk based on the
366 // difference between chunk ends. This allows it to work correctly in
367 // case the whole space is resized and during the process of adaptive
368 // chunk resizing.
369 size_t MutableNUMASpace::current_chunk_size(int i) {
370 HeapWord *cur_end, *prev_end;
371 if (i == 0) {
372 prev_end = bottom();
373 } else {
374 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
375 }
376 if (i == lgrp_spaces()->length() - 1) {
377 cur_end = end();
378 } else {
379 cur_end = lgrp_spaces()->at(i)->space()->end();
380 }
381 if (cur_end > prev_end) {
382 return pointer_delta(cur_end, prev_end, sizeof(char));
383 }
384 return 0;
385 }
387 // Return the default chunk size by equally diving the space.
388 // page_size() aligned.
389 size_t MutableNUMASpace::default_chunk_size() {
390 return base_space_size() / lgrp_spaces()->length() * page_size();
391 }
393 // Produce a new chunk size. page_size() aligned.
394 // This function is expected to be called on sequence of i's from 0 to
395 // lgrp_spaces()->length().
396 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
397 size_t pages_available = base_space_size();
398 for (int j = 0; j < i; j++) {
399 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
400 }
401 pages_available -= lgrp_spaces()->length() - i - 1;
402 assert(pages_available > 0, "No pages left");
403 float alloc_rate = 0;
404 for (int j = i; j < lgrp_spaces()->length(); j++) {
405 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
406 }
407 size_t chunk_size = 0;
408 if (alloc_rate > 0) {
409 LGRPSpace *ls = lgrp_spaces()->at(i);
410 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
411 }
412 chunk_size = MAX2(chunk_size, page_size());
414 if (limit > 0) {
415 limit = round_down(limit, page_size());
416 if (chunk_size > current_chunk_size(i)) {
417 size_t upper_bound = pages_available * page_size();
418 if (upper_bound > limit &&
419 current_chunk_size(i) < upper_bound - limit) {
420 // The resulting upper bound should not exceed the available
421 // amount of memory (pages_available * page_size()).
422 upper_bound = current_chunk_size(i) + limit;
423 }
424 chunk_size = MIN2(chunk_size, upper_bound);
425 } else {
426 size_t lower_bound = page_size();
427 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
428 lower_bound = current_chunk_size(i) - limit;
429 }
430 chunk_size = MAX2(chunk_size, lower_bound);
431 }
432 }
433 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
434 return chunk_size;
435 }
438 // Return the bottom_region and the top_region. Align them to page_size() boundary.
439 // |------------------new_region---------------------------------|
440 // |----bottom_region--|---intersection---|------top_region------|
441 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
442 MemRegion* bottom_region, MemRegion *top_region) {
443 // Is there bottom?
444 if (new_region.start() < intersection.start()) { // Yes
445 // Try to coalesce small pages into a large one.
446 if (UseLargePages && page_size() >= alignment()) {
447 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
448 if (new_region.contains(p)
449 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
450 if (intersection.contains(p)) {
451 intersection = MemRegion(p, intersection.end());
452 } else {
453 intersection = MemRegion(p, p);
454 }
455 }
456 }
457 *bottom_region = MemRegion(new_region.start(), intersection.start());
458 } else {
459 *bottom_region = MemRegion();
460 }
462 // Is there top?
463 if (intersection.end() < new_region.end()) { // Yes
464 // Try to coalesce small pages into a large one.
465 if (UseLargePages && page_size() >= alignment()) {
466 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
467 if (new_region.contains(p)
468 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
469 if (intersection.contains(p)) {
470 intersection = MemRegion(intersection.start(), p);
471 } else {
472 intersection = MemRegion(p, p);
473 }
474 }
475 }
476 *top_region = MemRegion(intersection.end(), new_region.end());
477 } else {
478 *top_region = MemRegion();
479 }
480 }
482 // Try to merge the invalid region with the bottom or top region by decreasing
483 // the intersection area. Return the invalid_region aligned to the page_size()
484 // boundary if it's inside the intersection. Return non-empty invalid_region
485 // if it lies inside the intersection (also page-aligned).
486 // |------------------new_region---------------------------------|
487 // |----------------|-------invalid---|--------------------------|
488 // |----bottom_region--|---intersection---|------top_region------|
489 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
490 MemRegion *invalid_region) {
491 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
492 *intersection = MemRegion(invalid_region->end(), intersection->end());
493 *invalid_region = MemRegion();
494 } else
495 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
496 *intersection = MemRegion(intersection->start(), invalid_region->start());
497 *invalid_region = MemRegion();
498 } else
499 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
500 *intersection = MemRegion(new_region.start(), new_region.start());
501 *invalid_region = MemRegion();
502 } else
503 if (intersection->contains(invalid_region)) {
504 // That's the only case we have to make an additional bias_region() call.
505 HeapWord* start = invalid_region->start();
506 HeapWord* end = invalid_region->end();
507 if (UseLargePages && page_size() >= alignment()) {
508 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
509 if (new_region.contains(p)) {
510 start = p;
511 }
512 p = (HeapWord*)round_to((intptr_t) end, alignment());
513 if (new_region.contains(end)) {
514 end = p;
515 }
516 }
517 if (intersection->start() > start) {
518 *intersection = MemRegion(start, intersection->end());
519 }
520 if (intersection->end() < end) {
521 *intersection = MemRegion(intersection->start(), end);
522 }
523 *invalid_region = MemRegion(start, end);
524 }
525 }
527 void MutableNUMASpace::initialize(MemRegion mr,
528 bool clear_space,
529 bool mangle_space,
530 bool setup_pages) {
531 assert(clear_space, "Reallocation will destory data!");
532 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
534 MemRegion old_region = region(), new_region;
535 set_bottom(mr.start());
536 set_end(mr.end());
537 // Must always clear the space
538 clear(SpaceDecorator::DontMangle);
540 // Compute chunk sizes
541 size_t prev_page_size = page_size();
542 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
543 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
544 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
545 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
547 // Try small pages if the chunk size is too small
548 if (base_space_size_pages / lgrp_spaces()->length() == 0
549 && page_size() > (size_t)os::vm_page_size()) {
550 set_page_size(os::vm_page_size());
551 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
552 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
553 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
554 }
555 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
556 set_base_space_size(base_space_size_pages);
558 // Handle space resize
559 MemRegion top_region, bottom_region;
560 if (!old_region.equals(region())) {
561 new_region = MemRegion(rounded_bottom, rounded_end);
562 MemRegion intersection = new_region.intersection(old_region);
563 if (intersection.start() == NULL ||
564 intersection.end() == NULL ||
565 prev_page_size > page_size()) { // If the page size got smaller we have to change
566 // the page size preference for the whole space.
567 intersection = MemRegion(new_region.start(), new_region.start());
568 }
569 select_tails(new_region, intersection, &bottom_region, &top_region);
570 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
571 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
572 }
574 // Check if the space layout has changed significantly?
575 // This happens when the space has been resized so that either head or tail
576 // chunk became less than a page.
577 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
578 current_chunk_size(0) > page_size() &&
579 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
582 for (int i = 0; i < lgrp_spaces()->length(); i++) {
583 LGRPSpace *ls = lgrp_spaces()->at(i);
584 MutableSpace *s = ls->space();
585 old_region = s->region();
587 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
588 if (i < lgrp_spaces()->length() - 1) {
589 if (!UseAdaptiveNUMAChunkSizing ||
590 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
591 samples_count() < AdaptiveSizePolicyReadyThreshold) {
592 // No adaptation. Divide the space equally.
593 chunk_byte_size = default_chunk_size();
594 } else
595 if (!layout_valid || NUMASpaceResizeRate == 0) {
596 // Fast adaptation. If no space resize rate is set, resize
597 // the chunks instantly.
598 chunk_byte_size = adaptive_chunk_size(i, 0);
599 } else {
600 // Slow adaptation. Resize the chunks moving no more than
601 // NUMASpaceResizeRate bytes per collection.
602 size_t limit = NUMASpaceResizeRate /
603 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
604 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
605 }
607 assert(chunk_byte_size >= page_size(), "Chunk size too small");
608 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
609 }
611 if (i == 0) { // Bottom chunk
612 if (i != lgrp_spaces()->length() - 1) {
613 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
614 } else {
615 new_region = MemRegion(bottom(), end());
616 }
617 } else
618 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
619 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
620 new_region = MemRegion(ps->end(),
621 ps->end() + (chunk_byte_size >> LogHeapWordSize));
622 } else { // Top chunk
623 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
624 new_region = MemRegion(ps->end(), end());
625 }
626 guarantee(region().contains(new_region), "Region invariant");
629 // The general case:
630 // |---------------------|--invalid---|--------------------------|
631 // |------------------new_region---------------------------------|
632 // |----bottom_region--|---intersection---|------top_region------|
633 // |----old_region----|
634 // The intersection part has all pages in place we don't need to migrate them.
635 // Pages for the top and bottom part should be freed and then reallocated.
637 MemRegion intersection = old_region.intersection(new_region);
639 if (intersection.start() == NULL || intersection.end() == NULL) {
640 intersection = MemRegion(new_region.start(), new_region.start());
641 }
643 if (!os::numa_has_static_binding()) {
644 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
645 // Invalid region is a range of memory that could've possibly
646 // been allocated on the other node. That's relevant only on Solaris where
647 // there is no static memory binding.
648 if (!invalid_region.is_empty()) {
649 merge_regions(new_region, &intersection, &invalid_region);
650 free_region(invalid_region);
651 ls->set_invalid_region(MemRegion());
652 }
653 }
655 select_tails(new_region, intersection, &bottom_region, &top_region);
657 if (!os::numa_has_static_binding()) {
658 // If that's a system with the first-touch policy then it's enough
659 // to free the pages.
660 free_region(bottom_region);
661 free_region(top_region);
662 } else {
663 // In a system with static binding we have to change the bias whenever
664 // we reshape the heap.
665 bias_region(bottom_region, ls->lgrp_id());
666 bias_region(top_region, ls->lgrp_id());
667 }
669 // Clear space (set top = bottom) but never mangle.
670 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
672 set_adaptation_cycles(samples_count());
673 }
674 }
676 // Set the top of the whole space.
677 // Mark the the holes in chunks below the top() as invalid.
678 void MutableNUMASpace::set_top(HeapWord* value) {
679 bool found_top = false;
680 for (int i = 0; i < lgrp_spaces()->length();) {
681 LGRPSpace *ls = lgrp_spaces()->at(i);
682 MutableSpace *s = ls->space();
683 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
685 if (s->contains(value)) {
686 // Check if setting the chunk's top to a given value would create a hole less than
687 // a minimal object; assuming that's not the last chunk in which case we don't care.
688 if (i < lgrp_spaces()->length() - 1) {
689 size_t remainder = pointer_delta(s->end(), value);
690 const size_t min_fill_size = CollectedHeap::min_fill_size();
691 if (remainder < min_fill_size && remainder > 0) {
692 // Add a minimum size filler object; it will cross the chunk boundary.
693 CollectedHeap::fill_with_object(value, min_fill_size);
694 value += min_fill_size;
695 assert(!s->contains(value), "Should be in the next chunk");
696 // Restart the loop from the same chunk, since the value has moved
697 // to the next one.
698 continue;
699 }
700 }
702 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
703 ls->add_invalid_region(MemRegion(top, value));
704 }
705 s->set_top(value);
706 found_top = true;
707 } else {
708 if (found_top) {
709 s->set_top(s->bottom());
710 } else {
711 if (!os::numa_has_static_binding() && top < s->end()) {
712 ls->add_invalid_region(MemRegion(top, s->end()));
713 }
714 s->set_top(s->end());
715 }
716 }
717 i++;
718 }
719 MutableSpace::set_top(value);
720 }
722 void MutableNUMASpace::clear(bool mangle_space) {
723 MutableSpace::set_top(bottom());
724 for (int i = 0; i < lgrp_spaces()->length(); i++) {
725 // Never mangle NUMA spaces because the mangling will
726 // bind the memory to a possibly unwanted lgroup.
727 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
728 }
729 }
731 /*
732 Linux supports static memory binding, therefore the most part of the
733 logic dealing with the possible invalid page allocation is effectively
734 disabled. Besides there is no notion of the home node in Linux. A
735 thread is allowed to migrate freely. Although the scheduler is rather
736 reluctant to move threads between the nodes. We check for the current
737 node every allocation. And with a high probability a thread stays on
738 the same node for some time allowing local access to recently allocated
739 objects.
740 */
742 HeapWord* MutableNUMASpace::allocate(size_t size) {
743 Thread* thr = Thread::current();
744 int lgrp_id = thr->lgrp_id();
745 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
746 lgrp_id = os::numa_get_group_id();
747 thr->set_lgrp_id(lgrp_id);
748 }
750 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
752 // It is possible that a new CPU has been hotplugged and
753 // we haven't reshaped the space accordingly.
754 if (i == -1) {
755 i = os::random() % lgrp_spaces()->length();
756 }
758 LGRPSpace* ls = lgrp_spaces()->at(i);
759 MutableSpace *s = ls->space();
760 HeapWord *p = s->allocate(size);
762 if (p != NULL) {
763 size_t remainder = s->free_in_words();
764 if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
765 s->set_top(s->top() - size);
766 p = NULL;
767 }
768 }
769 if (p != NULL) {
770 if (top() < s->top()) { // Keep _top updated.
771 MutableSpace::set_top(s->top());
772 }
773 }
774 // Make the page allocation happen here if there is no static binding..
775 if (p != NULL && !os::numa_has_static_binding()) {
776 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
777 *(int*)i = 0;
778 }
779 }
780 if (p == NULL) {
781 ls->set_allocation_failed();
782 }
783 return p;
784 }
786 // This version is lock-free.
787 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
788 Thread* thr = Thread::current();
789 int lgrp_id = thr->lgrp_id();
790 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
791 lgrp_id = os::numa_get_group_id();
792 thr->set_lgrp_id(lgrp_id);
793 }
795 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
796 // It is possible that a new CPU has been hotplugged and
797 // we haven't reshaped the space accordingly.
798 if (i == -1) {
799 i = os::random() % lgrp_spaces()->length();
800 }
801 LGRPSpace *ls = lgrp_spaces()->at(i);
802 MutableSpace *s = ls->space();
803 HeapWord *p = s->cas_allocate(size);
804 if (p != NULL) {
805 size_t remainder = pointer_delta(s->end(), p + size);
806 if (remainder < (size_t)oopDesc::header_size() && remainder > 0) {
807 if (s->cas_deallocate(p, size)) {
808 // We were the last to allocate and created a fragment less than
809 // a minimal object.
810 p = NULL;
811 } else {
812 guarantee(false, "Deallocation should always succeed");
813 }
814 }
815 }
816 if (p != NULL) {
817 HeapWord* cur_top, *cur_chunk_top = p + size;
818 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
819 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
820 break;
821 }
822 }
823 }
825 // Make the page allocation happen here if there is no static binding.
826 if (p != NULL && !os::numa_has_static_binding() ) {
827 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
828 *(int*)i = 0;
829 }
830 }
831 if (p == NULL) {
832 ls->set_allocation_failed();
833 }
834 return p;
835 }
837 void MutableNUMASpace::print_short_on(outputStream* st) const {
838 MutableSpace::print_short_on(st);
839 st->print(" (");
840 for (int i = 0; i < lgrp_spaces()->length(); i++) {
841 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
842 lgrp_spaces()->at(i)->space()->print_short_on(st);
843 if (i < lgrp_spaces()->length() - 1) {
844 st->print(", ");
845 }
846 }
847 st->print(")");
848 }
850 void MutableNUMASpace::print_on(outputStream* st) const {
851 MutableSpace::print_on(st);
852 for (int i = 0; i < lgrp_spaces()->length(); i++) {
853 LGRPSpace *ls = lgrp_spaces()->at(i);
854 st->print(" lgrp %d", ls->lgrp_id());
855 ls->space()->print_on(st);
856 if (NUMAStats) {
857 for (int i = 0; i < lgrp_spaces()->length(); i++) {
858 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
859 }
860 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
861 ls->space_stats()->_local_space / K,
862 ls->space_stats()->_remote_space / K,
863 ls->space_stats()->_unbiased_space / K,
864 ls->space_stats()->_uncommited_space / K,
865 ls->space_stats()->_large_pages,
866 ls->space_stats()->_small_pages);
867 }
868 }
869 }
871 void MutableNUMASpace::verify(bool allow_dirty) {
872 // This can be called after setting an arbitary value to the space's top,
873 // so an object can cross the chunk boundary. We ensure the parsablity
874 // of the space and just walk the objects in linear fashion.
875 ensure_parsability();
876 MutableSpace::verify(allow_dirty);
877 }
879 // Scan pages and gather stats about page placement and size.
880 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
881 clear_space_stats();
882 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
883 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
884 if (start < end) {
885 for (char *p = start; p < end;) {
886 os::page_info info;
887 if (os::get_page_info(p, &info)) {
888 if (info.size > 0) {
889 if (info.size > (size_t)os::vm_page_size()) {
890 space_stats()->_large_pages++;
891 } else {
892 space_stats()->_small_pages++;
893 }
894 if (info.lgrp_id == lgrp_id()) {
895 space_stats()->_local_space += info.size;
896 } else {
897 space_stats()->_remote_space += info.size;
898 }
899 p += info.size;
900 } else {
901 p += os::vm_page_size();
902 space_stats()->_uncommited_space += os::vm_page_size();
903 }
904 } else {
905 return;
906 }
907 }
908 }
909 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
910 pointer_delta(space()->end(), end, sizeof(char));
912 }
914 // Scan page_count pages and verify if they have the right size and right placement.
915 // If invalid pages are found they are freed in hope that subsequent reallocation
916 // will be more successful.
917 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
918 {
919 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
920 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
922 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
923 set_last_page_scanned(range_start);
924 }
926 char *scan_start = last_page_scanned();
927 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
929 os::page_info page_expected, page_found;
930 page_expected.size = page_size;
931 page_expected.lgrp_id = lgrp_id();
933 char *s = scan_start;
934 while (s < scan_end) {
935 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
936 if (e == NULL) {
937 break;
938 }
939 if (e != scan_end) {
940 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
941 && page_expected.size != 0) {
942 os::free_memory(s, pointer_delta(e, s, sizeof(char)));
943 }
944 page_expected = page_found;
945 }
946 s = e;
947 }
949 set_last_page_scanned(scan_end);
950 }