src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp

Mon, 27 Jan 2014 13:14:53 +0100

author
brutisso
date
Mon, 27 Jan 2014 13:14:53 +0100
changeset 6376
cfd4aac53239
parent 6198
55fb97c4c58d
child 6680
78bbf4d43a14
permissions
-rw-r--r--

8030177: G1: Enable TLAB resizing
Reviewed-by: tschatzl, stefank, jmasa

duke@435 1
duke@435 2 /*
mikael@6198 3 * Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 5 *
duke@435 6 * This code is free software; you can redistribute it and/or modify it
duke@435 7 * under the terms of the GNU General Public License version 2 only, as
duke@435 8 * published by the Free Software Foundation.
duke@435 9 *
duke@435 10 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 13 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 14 * accompanied this code).
duke@435 15 *
duke@435 16 * You should have received a copy of the GNU General Public License version
duke@435 17 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 19 *
trims@1907 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 21 * or visit www.oracle.com if you need additional information or have any
trims@1907 22 * questions.
duke@435 23 *
duke@435 24 */
duke@435 25
stefank@2314 26 #include "precompiled.hpp"
stefank@2314 27 #include "gc_implementation/shared/mutableNUMASpace.hpp"
stefank@2314 28 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
stefank@2314 30 #include "oops/oop.inline.hpp"
stefank@4299 31 #include "runtime/thread.inline.hpp"
duke@435 32
iveresov@970 33 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
zgu@3900 34 _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
duke@435 35 _page_size = os::vm_page_size();
duke@435 36 _adaptation_cycles = 0;
duke@435 37 _samples_count = 0;
duke@435 38 update_layout(true);
duke@435 39 }
duke@435 40
duke@435 41 MutableNUMASpace::~MutableNUMASpace() {
duke@435 42 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 43 delete lgrp_spaces()->at(i);
duke@435 44 }
duke@435 45 delete lgrp_spaces();
duke@435 46 }
duke@435 47
jmasa@698 48 #ifndef PRODUCT
duke@435 49 void MutableNUMASpace::mangle_unused_area() {
jmasa@698 50 // This method should do nothing.
jmasa@698 51 // It can be called on a numa space during a full compaction.
duke@435 52 }
jmasa@698 53 void MutableNUMASpace::mangle_unused_area_complete() {
jmasa@698 54 // This method should do nothing.
jmasa@698 55 // It can be called on a numa space during a full compaction.
jmasa@698 56 }
jmasa@698 57 void MutableNUMASpace::mangle_region(MemRegion mr) {
jmasa@698 58 // This method should do nothing because numa spaces are not mangled.
jmasa@698 59 }
jmasa@698 60 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
jmasa@698 61 assert(false, "Do not mangle MutableNUMASpace's");
jmasa@698 62 }
jmasa@698 63 void MutableNUMASpace::set_top_for_allocations() {
jmasa@698 64 // This method should do nothing.
jmasa@698 65 }
jmasa@698 66 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
jmasa@698 67 // This method should do nothing.
jmasa@698 68 }
jmasa@698 69 void MutableNUMASpace::check_mangled_unused_area_complete() {
jmasa@698 70 // This method should do nothing.
jmasa@698 71 }
jmasa@698 72 #endif // NOT_PRODUCT
duke@435 73
duke@435 74 // There may be unallocated holes in the middle chunks
duke@435 75 // that should be filled with dead objects to ensure parseability.
duke@435 76 void MutableNUMASpace::ensure_parsability() {
duke@435 77 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 78 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 79 MutableSpace *s = ls->space();
twisti@1040 80 if (s->top() < top()) { // For all spaces preceding the one containing top()
duke@435 81 if (s->free_in_words() > 0) {
brutisso@3668 82 intptr_t cur_top = (intptr_t)s->top();
brutisso@3668 83 size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
brutisso@3668 84 while (words_left_to_fill > 0) {
brutisso@3668 85 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
brutisso@3668 86 assert(words_to_fill >= CollectedHeap::min_fill_size(),
brutisso@3668 87 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
brutisso@3668 88 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
brutisso@3668 89 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
brutisso@3668 90 if (!os::numa_has_static_binding()) {
brutisso@3668 91 size_t touched_words = words_to_fill;
duke@435 92 #ifndef ASSERT
brutisso@3668 93 if (!ZapUnusedHeapArea) {
brutisso@3668 94 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
brutisso@3668 95 touched_words);
brutisso@3668 96 }
duke@435 97 #endif
brutisso@3668 98 MemRegion invalid;
brutisso@3668 99 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
brutisso@3668 100 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
brutisso@3668 101 if (crossing_start != crossing_end) {
brutisso@3668 102 // If object header crossed a small page boundary we mark the area
brutisso@3668 103 // as invalid rounding it to a page_size().
brutisso@3668 104 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
brutisso@3668 105 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
brutisso@3668 106 invalid = MemRegion(start, end);
brutisso@3668 107 }
brutisso@3668 108
brutisso@3668 109 ls->add_invalid_region(invalid);
iveresov@576 110 }
brutisso@3668 111 cur_top = cur_top + (words_to_fill * HeapWordSize);
brutisso@3668 112 words_left_to_fill -= words_to_fill;
duke@435 113 }
duke@435 114 }
duke@435 115 } else {
iveresov@576 116 if (!os::numa_has_static_binding()) {
duke@435 117 #ifdef ASSERT
duke@435 118 MemRegion invalid(s->top(), s->end());
duke@435 119 ls->add_invalid_region(invalid);
iveresov@576 120 #else
iveresov@576 121 if (ZapUnusedHeapArea) {
iveresov@576 122 MemRegion invalid(s->top(), s->end());
iveresov@576 123 ls->add_invalid_region(invalid);
iveresov@579 124 } else {
iveresov@579 125 return;
iveresov@579 126 }
duke@435 127 #endif
iveresov@579 128 } else {
iveresov@579 129 return;
iveresov@576 130 }
duke@435 131 }
duke@435 132 }
duke@435 133 }
duke@435 134
duke@435 135 size_t MutableNUMASpace::used_in_words() const {
duke@435 136 size_t s = 0;
duke@435 137 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 138 s += lgrp_spaces()->at(i)->space()->used_in_words();
duke@435 139 }
duke@435 140 return s;
duke@435 141 }
duke@435 142
duke@435 143 size_t MutableNUMASpace::free_in_words() const {
duke@435 144 size_t s = 0;
duke@435 145 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 146 s += lgrp_spaces()->at(i)->space()->free_in_words();
duke@435 147 }
duke@435 148 return s;
duke@435 149 }
duke@435 150
duke@435 151
duke@435 152 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
duke@435 153 guarantee(thr != NULL, "No thread");
duke@435 154 int lgrp_id = thr->lgrp_id();
iveresov@703 155 if (lgrp_id == -1) {
iveresov@703 156 // This case can occur after the topology of the system has
iveresov@703 157 // changed. Thread can change their location, the new home
iveresov@703 158 // group will be determined during the first allocation
iveresov@703 159 // attempt. For now we can safely assume that all spaces
iveresov@703 160 // have equal size because the whole space will be reinitialized.
iveresov@703 161 if (lgrp_spaces()->length() > 0) {
iveresov@703 162 return capacity_in_bytes() / lgrp_spaces()->length();
iveresov@703 163 } else {
iveresov@703 164 assert(false, "There should be at least one locality group");
iveresov@703 165 return 0;
iveresov@703 166 }
iveresov@703 167 }
iveresov@703 168 // That's the normal case, where we know the locality group of the thread.
duke@435 169 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 170 if (i == -1) {
duke@435 171 return 0;
duke@435 172 }
duke@435 173 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
duke@435 174 }
duke@435 175
brutisso@6376 176 size_t MutableNUMASpace::tlab_used(Thread *thr) const {
brutisso@6376 177 // Please see the comments for tlab_capacity().
brutisso@6376 178 guarantee(thr != NULL, "No thread");
brutisso@6376 179 int lgrp_id = thr->lgrp_id();
brutisso@6376 180 if (lgrp_id == -1) {
brutisso@6376 181 if (lgrp_spaces()->length() > 0) {
brutisso@6376 182 return (used_in_bytes()) / lgrp_spaces()->length();
brutisso@6376 183 } else {
brutisso@6376 184 assert(false, "There should be at least one locality group");
brutisso@6376 185 return 0;
brutisso@6376 186 }
brutisso@6376 187 }
brutisso@6376 188 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
brutisso@6376 189 if (i == -1) {
brutisso@6376 190 return 0;
brutisso@6376 191 }
brutisso@6376 192 return lgrp_spaces()->at(i)->space()->used_in_bytes();
brutisso@6376 193 }
brutisso@6376 194
brutisso@6376 195
duke@435 196 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
iveresov@703 197 // Please see the comments for tlab_capacity().
duke@435 198 guarantee(thr != NULL, "No thread");
duke@435 199 int lgrp_id = thr->lgrp_id();
iveresov@703 200 if (lgrp_id == -1) {
iveresov@703 201 if (lgrp_spaces()->length() > 0) {
iveresov@703 202 return free_in_bytes() / lgrp_spaces()->length();
iveresov@703 203 } else {
iveresov@703 204 assert(false, "There should be at least one locality group");
iveresov@703 205 return 0;
iveresov@703 206 }
iveresov@703 207 }
duke@435 208 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 209 if (i == -1) {
duke@435 210 return 0;
duke@435 211 }
duke@435 212 return lgrp_spaces()->at(i)->space()->free_in_bytes();
duke@435 213 }
duke@435 214
iveresov@808 215
iveresov@808 216 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
iveresov@808 217 guarantee(thr != NULL, "No thread");
iveresov@808 218 int lgrp_id = thr->lgrp_id();
iveresov@808 219 if (lgrp_id == -1) {
iveresov@808 220 if (lgrp_spaces()->length() > 0) {
iveresov@808 221 return capacity_in_words() / lgrp_spaces()->length();
iveresov@808 222 } else {
iveresov@808 223 assert(false, "There should be at least one locality group");
iveresov@808 224 return 0;
iveresov@808 225 }
iveresov@808 226 }
iveresov@808 227 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
iveresov@808 228 if (i == -1) {
iveresov@808 229 return 0;
iveresov@808 230 }
iveresov@808 231 return lgrp_spaces()->at(i)->space()->capacity_in_words();
iveresov@808 232 }
iveresov@808 233
duke@435 234 // Check if the NUMA topology has changed. Add and remove spaces if needed.
duke@435 235 // The update can be forced by setting the force parameter equal to true.
duke@435 236 bool MutableNUMASpace::update_layout(bool force) {
duke@435 237 // Check if the topology had changed.
duke@435 238 bool changed = os::numa_topology_changed();
duke@435 239 if (force || changed) {
duke@435 240 // Compute lgrp intersection. Add/remove spaces.
duke@435 241 int lgrp_limit = (int)os::numa_get_groups_num();
zgu@3900 242 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC);
duke@435 243 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
duke@435 244 assert(lgrp_num > 0, "There should be at least one locality group");
duke@435 245 // Add new spaces for the new nodes
duke@435 246 for (int i = 0; i < lgrp_num; i++) {
duke@435 247 bool found = false;
duke@435 248 for (int j = 0; j < lgrp_spaces()->length(); j++) {
duke@435 249 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
duke@435 250 found = true;
duke@435 251 break;
duke@435 252 }
duke@435 253 }
duke@435 254 if (!found) {
iveresov@970 255 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
duke@435 256 }
duke@435 257 }
duke@435 258
duke@435 259 // Remove spaces for the removed nodes.
duke@435 260 for (int i = 0; i < lgrp_spaces()->length();) {
duke@435 261 bool found = false;
duke@435 262 for (int j = 0; j < lgrp_num; j++) {
duke@435 263 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
duke@435 264 found = true;
duke@435 265 break;
duke@435 266 }
duke@435 267 }
duke@435 268 if (!found) {
duke@435 269 delete lgrp_spaces()->at(i);
duke@435 270 lgrp_spaces()->remove_at(i);
duke@435 271 } else {
duke@435 272 i++;
duke@435 273 }
duke@435 274 }
duke@435 275
zgu@3900 276 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC);
duke@435 277
duke@435 278 if (changed) {
duke@435 279 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
duke@435 280 thread->set_lgrp_id(-1);
duke@435 281 }
duke@435 282 }
duke@435 283 return true;
duke@435 284 }
duke@435 285 return false;
duke@435 286 }
duke@435 287
duke@435 288 // Bias region towards the first-touching lgrp. Set the right page sizes.
iveresov@576 289 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
duke@435 290 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@435 291 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@435 292 if (end > start) {
duke@435 293 MemRegion aligned_region(start, end);
duke@435 294 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@435 295 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@435 296 assert(region().contains(aligned_region), "Sanity");
iveresov@576 297 // First we tell the OS which page size we want in the given range. The underlying
iveresov@576 298 // large page can be broken down if we require small pages.
iveresov@576 299 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
iveresov@576 300 // Then we uncommit the pages in the range.
iveresov@3363 301 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
iveresov@576 302 // And make them local/first-touch biased.
iveresov@576 303 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
duke@435 304 }
duke@435 305 }
duke@435 306
duke@435 307 // Free all pages in the region.
duke@435 308 void MutableNUMASpace::free_region(MemRegion mr) {
duke@435 309 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@435 310 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@435 311 if (end > start) {
duke@435 312 MemRegion aligned_region(start, end);
duke@435 313 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@435 314 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@435 315 assert(region().contains(aligned_region), "Sanity");
iveresov@3363 316 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
duke@435 317 }
duke@435 318 }
duke@435 319
duke@435 320 // Update space layout. Perform adaptation.
duke@435 321 void MutableNUMASpace::update() {
duke@435 322 if (update_layout(false)) {
duke@435 323 // If the topology has changed, make all chunks zero-sized.
iveresov@703 324 // And clear the alloc-rate statistics.
iveresov@703 325 // In future we may want to handle this more gracefully in order
iveresov@703 326 // to avoid the reallocation of the pages as much as possible.
duke@435 327 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@703 328 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@703 329 MutableSpace *s = ls->space();
duke@435 330 s->set_end(s->bottom());
duke@435 331 s->set_top(s->bottom());
iveresov@703 332 ls->clear_alloc_rate();
duke@435 333 }
jmasa@698 334 // A NUMA space is never mangled
jmasa@698 335 initialize(region(),
jmasa@698 336 SpaceDecorator::Clear,
jmasa@698 337 SpaceDecorator::DontMangle);
duke@435 338 } else {
duke@435 339 bool should_initialize = false;
iveresov@576 340 if (!os::numa_has_static_binding()) {
iveresov@576 341 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@576 342 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
iveresov@576 343 should_initialize = true;
iveresov@576 344 break;
iveresov@576 345 }
duke@435 346 }
duke@435 347 }
duke@435 348
duke@435 349 if (should_initialize ||
duke@435 350 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
jmasa@698 351 // A NUMA space is never mangled
jmasa@698 352 initialize(region(),
jmasa@698 353 SpaceDecorator::Clear,
jmasa@698 354 SpaceDecorator::DontMangle);
duke@435 355 }
duke@435 356 }
duke@435 357
duke@435 358 if (NUMAStats) {
duke@435 359 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 360 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@435 361 }
duke@435 362 }
duke@435 363
duke@435 364 scan_pages(NUMAPageScanRate);
duke@435 365 }
duke@435 366
duke@435 367 // Scan pages. Free pages that have smaller size or wrong placement.
duke@435 368 void MutableNUMASpace::scan_pages(size_t page_count)
duke@435 369 {
duke@435 370 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
duke@435 371 if (pages_per_chunk > 0) {
duke@435 372 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 373 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 374 ls->scan_pages(page_size(), pages_per_chunk);
duke@435 375 }
duke@435 376 }
duke@435 377 }
duke@435 378
duke@435 379 // Accumulate statistics about the allocation rate of each lgrp.
duke@435 380 void MutableNUMASpace::accumulate_statistics() {
duke@435 381 if (UseAdaptiveNUMAChunkSizing) {
duke@435 382 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 383 lgrp_spaces()->at(i)->sample();
duke@435 384 }
duke@435 385 increment_samples_count();
duke@435 386 }
duke@435 387
duke@435 388 if (NUMAStats) {
duke@435 389 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 390 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@435 391 }
duke@435 392 }
duke@435 393 }
duke@435 394
duke@435 395 // Get the current size of a chunk.
duke@435 396 // This function computes the size of the chunk based on the
duke@435 397 // difference between chunk ends. This allows it to work correctly in
duke@435 398 // case the whole space is resized and during the process of adaptive
duke@435 399 // chunk resizing.
duke@435 400 size_t MutableNUMASpace::current_chunk_size(int i) {
duke@435 401 HeapWord *cur_end, *prev_end;
duke@435 402 if (i == 0) {
duke@435 403 prev_end = bottom();
duke@435 404 } else {
duke@435 405 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
duke@435 406 }
duke@435 407 if (i == lgrp_spaces()->length() - 1) {
duke@435 408 cur_end = end();
duke@435 409 } else {
duke@435 410 cur_end = lgrp_spaces()->at(i)->space()->end();
duke@435 411 }
duke@435 412 if (cur_end > prev_end) {
duke@435 413 return pointer_delta(cur_end, prev_end, sizeof(char));
duke@435 414 }
duke@435 415 return 0;
duke@435 416 }
duke@435 417
duke@435 418 // Return the default chunk size by equally diving the space.
duke@435 419 // page_size() aligned.
duke@435 420 size_t MutableNUMASpace::default_chunk_size() {
duke@435 421 return base_space_size() / lgrp_spaces()->length() * page_size();
duke@435 422 }
duke@435 423
duke@435 424 // Produce a new chunk size. page_size() aligned.
iveresov@826 425 // This function is expected to be called on sequence of i's from 0 to
iveresov@826 426 // lgrp_spaces()->length().
duke@435 427 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
duke@435 428 size_t pages_available = base_space_size();
duke@435 429 for (int j = 0; j < i; j++) {
duke@435 430 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
duke@435 431 }
duke@435 432 pages_available -= lgrp_spaces()->length() - i - 1;
duke@435 433 assert(pages_available > 0, "No pages left");
duke@435 434 float alloc_rate = 0;
duke@435 435 for (int j = i; j < lgrp_spaces()->length(); j++) {
duke@435 436 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
duke@435 437 }
duke@435 438 size_t chunk_size = 0;
duke@435 439 if (alloc_rate > 0) {
duke@435 440 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@826 441 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
duke@435 442 }
duke@435 443 chunk_size = MAX2(chunk_size, page_size());
duke@435 444
duke@435 445 if (limit > 0) {
duke@435 446 limit = round_down(limit, page_size());
duke@435 447 if (chunk_size > current_chunk_size(i)) {
iveresov@897 448 size_t upper_bound = pages_available * page_size();
iveresov@897 449 if (upper_bound > limit &&
iveresov@897 450 current_chunk_size(i) < upper_bound - limit) {
iveresov@897 451 // The resulting upper bound should not exceed the available
iveresov@897 452 // amount of memory (pages_available * page_size()).
iveresov@897 453 upper_bound = current_chunk_size(i) + limit;
iveresov@897 454 }
iveresov@897 455 chunk_size = MIN2(chunk_size, upper_bound);
duke@435 456 } else {
iveresov@897 457 size_t lower_bound = page_size();
iveresov@897 458 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
iveresov@897 459 lower_bound = current_chunk_size(i) - limit;
iveresov@897 460 }
iveresov@897 461 chunk_size = MAX2(chunk_size, lower_bound);
duke@435 462 }
duke@435 463 }
duke@435 464 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
duke@435 465 return chunk_size;
duke@435 466 }
duke@435 467
duke@435 468
duke@435 469 // Return the bottom_region and the top_region. Align them to page_size() boundary.
duke@435 470 // |------------------new_region---------------------------------|
duke@435 471 // |----bottom_region--|---intersection---|------top_region------|
duke@435 472 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
duke@435 473 MemRegion* bottom_region, MemRegion *top_region) {
duke@435 474 // Is there bottom?
duke@435 475 if (new_region.start() < intersection.start()) { // Yes
duke@435 476 // Try to coalesce small pages into a large one.
iveresov@970 477 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 478 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
duke@435 479 if (new_region.contains(p)
iveresov@970 480 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
duke@435 481 if (intersection.contains(p)) {
duke@435 482 intersection = MemRegion(p, intersection.end());
duke@435 483 } else {
duke@435 484 intersection = MemRegion(p, p);
duke@435 485 }
duke@435 486 }
duke@435 487 }
duke@435 488 *bottom_region = MemRegion(new_region.start(), intersection.start());
duke@435 489 } else {
duke@435 490 *bottom_region = MemRegion();
duke@435 491 }
duke@435 492
duke@435 493 // Is there top?
duke@435 494 if (intersection.end() < new_region.end()) { // Yes
duke@435 495 // Try to coalesce small pages into a large one.
iveresov@970 496 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 497 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
duke@435 498 if (new_region.contains(p)
iveresov@970 499 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
duke@435 500 if (intersection.contains(p)) {
duke@435 501 intersection = MemRegion(intersection.start(), p);
duke@435 502 } else {
duke@435 503 intersection = MemRegion(p, p);
duke@435 504 }
duke@435 505 }
duke@435 506 }
duke@435 507 *top_region = MemRegion(intersection.end(), new_region.end());
duke@435 508 } else {
duke@435 509 *top_region = MemRegion();
duke@435 510 }
duke@435 511 }
duke@435 512
duke@435 513 // Try to merge the invalid region with the bottom or top region by decreasing
duke@435 514 // the intersection area. Return the invalid_region aligned to the page_size()
duke@435 515 // boundary if it's inside the intersection. Return non-empty invalid_region
duke@435 516 // if it lies inside the intersection (also page-aligned).
duke@435 517 // |------------------new_region---------------------------------|
duke@435 518 // |----------------|-------invalid---|--------------------------|
duke@435 519 // |----bottom_region--|---intersection---|------top_region------|
duke@435 520 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
duke@435 521 MemRegion *invalid_region) {
duke@435 522 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
duke@435 523 *intersection = MemRegion(invalid_region->end(), intersection->end());
duke@435 524 *invalid_region = MemRegion();
duke@435 525 } else
duke@435 526 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
duke@435 527 *intersection = MemRegion(intersection->start(), invalid_region->start());
duke@435 528 *invalid_region = MemRegion();
duke@435 529 } else
duke@435 530 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
duke@435 531 *intersection = MemRegion(new_region.start(), new_region.start());
duke@435 532 *invalid_region = MemRegion();
duke@435 533 } else
duke@435 534 if (intersection->contains(invalid_region)) {
duke@435 535 // That's the only case we have to make an additional bias_region() call.
duke@435 536 HeapWord* start = invalid_region->start();
duke@435 537 HeapWord* end = invalid_region->end();
iveresov@970 538 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 539 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
duke@435 540 if (new_region.contains(p)) {
duke@435 541 start = p;
duke@435 542 }
iveresov@970 543 p = (HeapWord*)round_to((intptr_t) end, alignment());
duke@435 544 if (new_region.contains(end)) {
duke@435 545 end = p;
duke@435 546 }
duke@435 547 }
duke@435 548 if (intersection->start() > start) {
duke@435 549 *intersection = MemRegion(start, intersection->end());
duke@435 550 }
duke@435 551 if (intersection->end() < end) {
duke@435 552 *intersection = MemRegion(intersection->start(), end);
duke@435 553 }
duke@435 554 *invalid_region = MemRegion(start, end);
duke@435 555 }
duke@435 556 }
duke@435 557
jmasa@698 558 void MutableNUMASpace::initialize(MemRegion mr,
jmasa@698 559 bool clear_space,
iveresov@970 560 bool mangle_space,
iveresov@970 561 bool setup_pages) {
duke@435 562 assert(clear_space, "Reallocation will destory data!");
duke@435 563 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
duke@435 564
duke@435 565 MemRegion old_region = region(), new_region;
duke@435 566 set_bottom(mr.start());
duke@435 567 set_end(mr.end());
jmasa@698 568 // Must always clear the space
jmasa@698 569 clear(SpaceDecorator::DontMangle);
duke@435 570
duke@435 571 // Compute chunk sizes
duke@435 572 size_t prev_page_size = page_size();
iveresov@970 573 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
duke@435 574 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@435 575 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@435 576 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@435 577
duke@435 578 // Try small pages if the chunk size is too small
duke@435 579 if (base_space_size_pages / lgrp_spaces()->length() == 0
duke@435 580 && page_size() > (size_t)os::vm_page_size()) {
duke@435 581 set_page_size(os::vm_page_size());
duke@435 582 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@435 583 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@435 584 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@435 585 }
duke@435 586 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
duke@435 587 set_base_space_size(base_space_size_pages);
duke@435 588
duke@435 589 // Handle space resize
duke@435 590 MemRegion top_region, bottom_region;
duke@435 591 if (!old_region.equals(region())) {
duke@435 592 new_region = MemRegion(rounded_bottom, rounded_end);
duke@435 593 MemRegion intersection = new_region.intersection(old_region);
duke@435 594 if (intersection.start() == NULL ||
duke@435 595 intersection.end() == NULL ||
duke@435 596 prev_page_size > page_size()) { // If the page size got smaller we have to change
duke@435 597 // the page size preference for the whole space.
duke@435 598 intersection = MemRegion(new_region.start(), new_region.start());
duke@435 599 }
duke@435 600 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@576 601 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
iveresov@576 602 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
duke@435 603 }
duke@435 604
duke@435 605 // Check if the space layout has changed significantly?
duke@435 606 // This happens when the space has been resized so that either head or tail
duke@435 607 // chunk became less than a page.
duke@435 608 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
duke@435 609 current_chunk_size(0) > page_size() &&
duke@435 610 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
duke@435 611
duke@435 612
duke@435 613 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 614 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 615 MutableSpace *s = ls->space();
duke@435 616 old_region = s->region();
duke@435 617
duke@435 618 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
duke@435 619 if (i < lgrp_spaces()->length() - 1) {
duke@435 620 if (!UseAdaptiveNUMAChunkSizing ||
duke@435 621 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
duke@435 622 samples_count() < AdaptiveSizePolicyReadyThreshold) {
duke@435 623 // No adaptation. Divide the space equally.
duke@435 624 chunk_byte_size = default_chunk_size();
duke@435 625 } else
duke@435 626 if (!layout_valid || NUMASpaceResizeRate == 0) {
duke@435 627 // Fast adaptation. If no space resize rate is set, resize
duke@435 628 // the chunks instantly.
duke@435 629 chunk_byte_size = adaptive_chunk_size(i, 0);
duke@435 630 } else {
duke@435 631 // Slow adaptation. Resize the chunks moving no more than
duke@435 632 // NUMASpaceResizeRate bytes per collection.
duke@435 633 size_t limit = NUMASpaceResizeRate /
duke@435 634 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
duke@435 635 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
duke@435 636 }
duke@435 637
duke@435 638 assert(chunk_byte_size >= page_size(), "Chunk size too small");
duke@435 639 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
duke@435 640 }
duke@435 641
duke@435 642 if (i == 0) { // Bottom chunk
duke@435 643 if (i != lgrp_spaces()->length() - 1) {
duke@435 644 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
duke@435 645 } else {
duke@435 646 new_region = MemRegion(bottom(), end());
duke@435 647 }
duke@435 648 } else
duke@435 649 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
duke@435 650 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@435 651 new_region = MemRegion(ps->end(),
duke@435 652 ps->end() + (chunk_byte_size >> LogHeapWordSize));
duke@435 653 } else { // Top chunk
duke@435 654 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@435 655 new_region = MemRegion(ps->end(), end());
duke@435 656 }
duke@435 657 guarantee(region().contains(new_region), "Region invariant");
duke@435 658
duke@435 659
duke@435 660 // The general case:
duke@435 661 // |---------------------|--invalid---|--------------------------|
duke@435 662 // |------------------new_region---------------------------------|
duke@435 663 // |----bottom_region--|---intersection---|------top_region------|
duke@435 664 // |----old_region----|
duke@435 665 // The intersection part has all pages in place we don't need to migrate them.
duke@435 666 // Pages for the top and bottom part should be freed and then reallocated.
duke@435 667
duke@435 668 MemRegion intersection = old_region.intersection(new_region);
duke@435 669
duke@435 670 if (intersection.start() == NULL || intersection.end() == NULL) {
duke@435 671 intersection = MemRegion(new_region.start(), new_region.start());
duke@435 672 }
duke@435 673
iveresov@576 674 if (!os::numa_has_static_binding()) {
iveresov@576 675 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
iveresov@576 676 // Invalid region is a range of memory that could've possibly
iveresov@576 677 // been allocated on the other node. That's relevant only on Solaris where
iveresov@576 678 // there is no static memory binding.
iveresov@576 679 if (!invalid_region.is_empty()) {
iveresov@576 680 merge_regions(new_region, &intersection, &invalid_region);
iveresov@576 681 free_region(invalid_region);
iveresov@576 682 ls->set_invalid_region(MemRegion());
iveresov@576 683 }
duke@435 684 }
iveresov@576 685
duke@435 686 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@576 687
iveresov@576 688 if (!os::numa_has_static_binding()) {
iveresov@576 689 // If that's a system with the first-touch policy then it's enough
iveresov@576 690 // to free the pages.
iveresov@576 691 free_region(bottom_region);
iveresov@576 692 free_region(top_region);
iveresov@576 693 } else {
iveresov@576 694 // In a system with static binding we have to change the bias whenever
iveresov@576 695 // we reshape the heap.
iveresov@576 696 bias_region(bottom_region, ls->lgrp_id());
iveresov@576 697 bias_region(top_region, ls->lgrp_id());
iveresov@576 698 }
duke@435 699
jmasa@698 700 // Clear space (set top = bottom) but never mangle.
iveresov@970 701 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
duke@435 702
duke@435 703 set_adaptation_cycles(samples_count());
duke@435 704 }
duke@435 705 }
duke@435 706
duke@435 707 // Set the top of the whole space.
duke@435 708 // Mark the the holes in chunks below the top() as invalid.
duke@435 709 void MutableNUMASpace::set_top(HeapWord* value) {
duke@435 710 bool found_top = false;
iveresov@625 711 for (int i = 0; i < lgrp_spaces()->length();) {
duke@435 712 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 713 MutableSpace *s = ls->space();
duke@435 714 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
duke@435 715
duke@435 716 if (s->contains(value)) {
iveresov@625 717 // Check if setting the chunk's top to a given value would create a hole less than
iveresov@625 718 // a minimal object; assuming that's not the last chunk in which case we don't care.
iveresov@625 719 if (i < lgrp_spaces()->length() - 1) {
iveresov@625 720 size_t remainder = pointer_delta(s->end(), value);
jcoomes@916 721 const size_t min_fill_size = CollectedHeap::min_fill_size();
jcoomes@916 722 if (remainder < min_fill_size && remainder > 0) {
jcoomes@916 723 // Add a minimum size filler object; it will cross the chunk boundary.
jcoomes@916 724 CollectedHeap::fill_with_object(value, min_fill_size);
jcoomes@916 725 value += min_fill_size;
iveresov@625 726 assert(!s->contains(value), "Should be in the next chunk");
iveresov@625 727 // Restart the loop from the same chunk, since the value has moved
iveresov@625 728 // to the next one.
iveresov@625 729 continue;
iveresov@625 730 }
iveresov@625 731 }
iveresov@625 732
iveresov@576 733 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
duke@435 734 ls->add_invalid_region(MemRegion(top, value));
duke@435 735 }
duke@435 736 s->set_top(value);
duke@435 737 found_top = true;
duke@435 738 } else {
duke@435 739 if (found_top) {
duke@435 740 s->set_top(s->bottom());
duke@435 741 } else {
iveresov@576 742 if (!os::numa_has_static_binding() && top < s->end()) {
iveresov@576 743 ls->add_invalid_region(MemRegion(top, s->end()));
iveresov@576 744 }
iveresov@576 745 s->set_top(s->end());
duke@435 746 }
duke@435 747 }
iveresov@625 748 i++;
duke@435 749 }
duke@435 750 MutableSpace::set_top(value);
duke@435 751 }
duke@435 752
jmasa@698 753 void MutableNUMASpace::clear(bool mangle_space) {
duke@435 754 MutableSpace::set_top(bottom());
duke@435 755 for (int i = 0; i < lgrp_spaces()->length(); i++) {
jmasa@698 756 // Never mangle NUMA spaces because the mangling will
jmasa@698 757 // bind the memory to a possibly unwanted lgroup.
jmasa@698 758 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
duke@435 759 }
duke@435 760 }
duke@435 761
iveresov@576 762 /*
iveresov@576 763 Linux supports static memory binding, therefore the most part of the
iveresov@576 764 logic dealing with the possible invalid page allocation is effectively
iveresov@576 765 disabled. Besides there is no notion of the home node in Linux. A
iveresov@576 766 thread is allowed to migrate freely. Although the scheduler is rather
iveresov@576 767 reluctant to move threads between the nodes. We check for the current
iveresov@576 768 node every allocation. And with a high probability a thread stays on
iveresov@576 769 the same node for some time allowing local access to recently allocated
iveresov@576 770 objects.
iveresov@576 771 */
iveresov@576 772
duke@435 773 HeapWord* MutableNUMASpace::allocate(size_t size) {
iveresov@576 774 Thread* thr = Thread::current();
iveresov@576 775 int lgrp_id = thr->lgrp_id();
iveresov@576 776 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@435 777 lgrp_id = os::numa_get_group_id();
iveresov@576 778 thr->set_lgrp_id(lgrp_id);
duke@435 779 }
duke@435 780
duke@435 781 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 782
duke@435 783 // It is possible that a new CPU has been hotplugged and
duke@435 784 // we haven't reshaped the space accordingly.
duke@435 785 if (i == -1) {
duke@435 786 i = os::random() % lgrp_spaces()->length();
duke@435 787 }
duke@435 788
iveresov@808 789 LGRPSpace* ls = lgrp_spaces()->at(i);
iveresov@808 790 MutableSpace *s = ls->space();
duke@435 791 HeapWord *p = s->allocate(size);
duke@435 792
iveresov@579 793 if (p != NULL) {
iveresov@579 794 size_t remainder = s->free_in_words();
kvn@1926 795 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
iveresov@579 796 s->set_top(s->top() - size);
iveresov@579 797 p = NULL;
iveresov@579 798 }
duke@435 799 }
duke@435 800 if (p != NULL) {
duke@435 801 if (top() < s->top()) { // Keep _top updated.
duke@435 802 MutableSpace::set_top(s->top());
duke@435 803 }
duke@435 804 }
iveresov@576 805 // Make the page allocation happen here if there is no static binding..
iveresov@576 806 if (p != NULL && !os::numa_has_static_binding()) {
duke@435 807 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@435 808 *(int*)i = 0;
duke@435 809 }
duke@435 810 }
iveresov@808 811 if (p == NULL) {
iveresov@808 812 ls->set_allocation_failed();
iveresov@808 813 }
duke@435 814 return p;
duke@435 815 }
duke@435 816
duke@435 817 // This version is lock-free.
duke@435 818 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
iveresov@576 819 Thread* thr = Thread::current();
iveresov@576 820 int lgrp_id = thr->lgrp_id();
iveresov@576 821 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@435 822 lgrp_id = os::numa_get_group_id();
iveresov@576 823 thr->set_lgrp_id(lgrp_id);
duke@435 824 }
duke@435 825
duke@435 826 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 827 // It is possible that a new CPU has been hotplugged and
duke@435 828 // we haven't reshaped the space accordingly.
duke@435 829 if (i == -1) {
duke@435 830 i = os::random() % lgrp_spaces()->length();
duke@435 831 }
iveresov@808 832 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@808 833 MutableSpace *s = ls->space();
duke@435 834 HeapWord *p = s->cas_allocate(size);
iveresov@579 835 if (p != NULL) {
iveresov@625 836 size_t remainder = pointer_delta(s->end(), p + size);
kvn@1926 837 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
iveresov@579 838 if (s->cas_deallocate(p, size)) {
iveresov@579 839 // We were the last to allocate and created a fragment less than
iveresov@579 840 // a minimal object.
iveresov@579 841 p = NULL;
iveresov@625 842 } else {
iveresov@625 843 guarantee(false, "Deallocation should always succeed");
iveresov@579 844 }
duke@435 845 }
duke@435 846 }
duke@435 847 if (p != NULL) {
duke@435 848 HeapWord* cur_top, *cur_chunk_top = p + size;
duke@435 849 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
duke@435 850 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
duke@435 851 break;
duke@435 852 }
duke@435 853 }
duke@435 854 }
duke@435 855
iveresov@576 856 // Make the page allocation happen here if there is no static binding.
iveresov@576 857 if (p != NULL && !os::numa_has_static_binding() ) {
duke@435 858 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@435 859 *(int*)i = 0;
duke@435 860 }
duke@435 861 }
iveresov@808 862 if (p == NULL) {
iveresov@808 863 ls->set_allocation_failed();
iveresov@808 864 }
duke@435 865 return p;
duke@435 866 }
duke@435 867
duke@435 868 void MutableNUMASpace::print_short_on(outputStream* st) const {
duke@435 869 MutableSpace::print_short_on(st);
duke@435 870 st->print(" (");
duke@435 871 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 872 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
duke@435 873 lgrp_spaces()->at(i)->space()->print_short_on(st);
duke@435 874 if (i < lgrp_spaces()->length() - 1) {
duke@435 875 st->print(", ");
duke@435 876 }
duke@435 877 }
duke@435 878 st->print(")");
duke@435 879 }
duke@435 880
duke@435 881 void MutableNUMASpace::print_on(outputStream* st) const {
duke@435 882 MutableSpace::print_on(st);
duke@435 883 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 884 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 885 st->print(" lgrp %d", ls->lgrp_id());
duke@435 886 ls->space()->print_on(st);
duke@435 887 if (NUMAStats) {
iveresov@579 888 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@579 889 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
iveresov@579 890 }
duke@435 891 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
duke@435 892 ls->space_stats()->_local_space / K,
duke@435 893 ls->space_stats()->_remote_space / K,
duke@435 894 ls->space_stats()->_unbiased_space / K,
duke@435 895 ls->space_stats()->_uncommited_space / K,
duke@435 896 ls->space_stats()->_large_pages,
duke@435 897 ls->space_stats()->_small_pages);
duke@435 898 }
duke@435 899 }
duke@435 900 }
duke@435 901
brutisso@3711 902 void MutableNUMASpace::verify() {
iveresov@625 903 // This can be called after setting an arbitary value to the space's top,
iveresov@625 904 // so an object can cross the chunk boundary. We ensure the parsablity
iveresov@625 905 // of the space and just walk the objects in linear fashion.
iveresov@625 906 ensure_parsability();
brutisso@3711 907 MutableSpace::verify();
duke@435 908 }
duke@435 909
duke@435 910 // Scan pages and gather stats about page placement and size.
duke@435 911 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
duke@435 912 clear_space_stats();
duke@435 913 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@435 914 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@435 915 if (start < end) {
duke@435 916 for (char *p = start; p < end;) {
duke@435 917 os::page_info info;
duke@435 918 if (os::get_page_info(p, &info)) {
duke@435 919 if (info.size > 0) {
duke@435 920 if (info.size > (size_t)os::vm_page_size()) {
duke@435 921 space_stats()->_large_pages++;
duke@435 922 } else {
duke@435 923 space_stats()->_small_pages++;
duke@435 924 }
duke@435 925 if (info.lgrp_id == lgrp_id()) {
duke@435 926 space_stats()->_local_space += info.size;
duke@435 927 } else {
duke@435 928 space_stats()->_remote_space += info.size;
duke@435 929 }
duke@435 930 p += info.size;
duke@435 931 } else {
duke@435 932 p += os::vm_page_size();
duke@435 933 space_stats()->_uncommited_space += os::vm_page_size();
duke@435 934 }
duke@435 935 } else {
duke@435 936 return;
duke@435 937 }
duke@435 938 }
duke@435 939 }
duke@435 940 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
duke@435 941 pointer_delta(space()->end(), end, sizeof(char));
duke@435 942
duke@435 943 }
duke@435 944
duke@435 945 // Scan page_count pages and verify if they have the right size and right placement.
duke@435 946 // If invalid pages are found they are freed in hope that subsequent reallocation
duke@435 947 // will be more successful.
duke@435 948 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
duke@435 949 {
duke@435 950 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@435 951 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@435 952
duke@435 953 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
duke@435 954 set_last_page_scanned(range_start);
duke@435 955 }
duke@435 956
duke@435 957 char *scan_start = last_page_scanned();
duke@435 958 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
duke@435 959
duke@435 960 os::page_info page_expected, page_found;
duke@435 961 page_expected.size = page_size;
duke@435 962 page_expected.lgrp_id = lgrp_id();
duke@435 963
duke@435 964 char *s = scan_start;
duke@435 965 while (s < scan_end) {
duke@435 966 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
duke@435 967 if (e == NULL) {
duke@435 968 break;
duke@435 969 }
duke@435 970 if (e != scan_end) {
stefank@4739 971 assert(e < scan_end, err_msg("e: " PTR_FORMAT " scan_end: " PTR_FORMAT, e, scan_end));
stefank@4739 972
duke@435 973 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
duke@435 974 && page_expected.size != 0) {
iveresov@3363 975 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
duke@435 976 }
duke@435 977 page_expected = page_found;
duke@435 978 }
duke@435 979 s = e;
duke@435 980 }
duke@435 981
duke@435 982 set_last_page_scanned(scan_end);
duke@435 983 }

mercurial