src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp

Thu, 04 Oct 2012 10:40:23 -0700

author
jmasa
date
Thu, 04 Oct 2012 10:40:23 -0700
changeset 4131
097d78aaf2b5
parent 3900
d2a62e0f25eb
child 4299
f34d701e952e
permissions
-rw-r--r--

7198873: NPG: VM Does not unload classes with UseConcMarkSweepGC
Reviewed-by: johnc, mgerdin, jwilhelm

duke@435 1
duke@435 2 /*
brutisso@3668 3 * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 5 *
duke@435 6 * This code is free software; you can redistribute it and/or modify it
duke@435 7 * under the terms of the GNU General Public License version 2 only, as
duke@435 8 * published by the Free Software Foundation.
duke@435 9 *
duke@435 10 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 13 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 14 * accompanied this code).
duke@435 15 *
duke@435 16 * You should have received a copy of the GNU General Public License version
duke@435 17 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 19 *
trims@1907 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 21 * or visit www.oracle.com if you need additional information or have any
trims@1907 22 * questions.
duke@435 23 *
duke@435 24 */
duke@435 25
stefank@2314 26 #include "precompiled.hpp"
stefank@2314 27 #include "gc_implementation/shared/mutableNUMASpace.hpp"
stefank@2314 28 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 29 #include "memory/sharedHeap.hpp"
stefank@2314 30 #include "oops/oop.inline.hpp"
stefank@2314 31 #ifdef TARGET_OS_FAMILY_linux
stefank@2314 32 # include "thread_linux.inline.hpp"
stefank@2314 33 #endif
stefank@2314 34 #ifdef TARGET_OS_FAMILY_solaris
stefank@2314 35 # include "thread_solaris.inline.hpp"
stefank@2314 36 #endif
stefank@2314 37 #ifdef TARGET_OS_FAMILY_windows
stefank@2314 38 # include "thread_windows.inline.hpp"
stefank@2314 39 #endif
never@3156 40 #ifdef TARGET_OS_FAMILY_bsd
never@3156 41 # include "thread_bsd.inline.hpp"
never@3156 42 #endif
duke@435 43
duke@435 44
iveresov@970 45 MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) {
zgu@3900 46 _lgrp_spaces = new (ResourceObj::C_HEAP, mtGC) GrowableArray<LGRPSpace*>(0, true);
duke@435 47 _page_size = os::vm_page_size();
duke@435 48 _adaptation_cycles = 0;
duke@435 49 _samples_count = 0;
duke@435 50 update_layout(true);
duke@435 51 }
duke@435 52
duke@435 53 MutableNUMASpace::~MutableNUMASpace() {
duke@435 54 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 55 delete lgrp_spaces()->at(i);
duke@435 56 }
duke@435 57 delete lgrp_spaces();
duke@435 58 }
duke@435 59
jmasa@698 60 #ifndef PRODUCT
duke@435 61 void MutableNUMASpace::mangle_unused_area() {
jmasa@698 62 // This method should do nothing.
jmasa@698 63 // It can be called on a numa space during a full compaction.
duke@435 64 }
jmasa@698 65 void MutableNUMASpace::mangle_unused_area_complete() {
jmasa@698 66 // This method should do nothing.
jmasa@698 67 // It can be called on a numa space during a full compaction.
jmasa@698 68 }
jmasa@698 69 void MutableNUMASpace::mangle_region(MemRegion mr) {
jmasa@698 70 // This method should do nothing because numa spaces are not mangled.
jmasa@698 71 }
jmasa@698 72 void MutableNUMASpace::set_top_for_allocations(HeapWord* v) {
jmasa@698 73 assert(false, "Do not mangle MutableNUMASpace's");
jmasa@698 74 }
jmasa@698 75 void MutableNUMASpace::set_top_for_allocations() {
jmasa@698 76 // This method should do nothing.
jmasa@698 77 }
jmasa@698 78 void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) {
jmasa@698 79 // This method should do nothing.
jmasa@698 80 }
jmasa@698 81 void MutableNUMASpace::check_mangled_unused_area_complete() {
jmasa@698 82 // This method should do nothing.
jmasa@698 83 }
jmasa@698 84 #endif // NOT_PRODUCT
duke@435 85
duke@435 86 // There may be unallocated holes in the middle chunks
duke@435 87 // that should be filled with dead objects to ensure parseability.
duke@435 88 void MutableNUMASpace::ensure_parsability() {
duke@435 89 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 90 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 91 MutableSpace *s = ls->space();
twisti@1040 92 if (s->top() < top()) { // For all spaces preceding the one containing top()
duke@435 93 if (s->free_in_words() > 0) {
brutisso@3668 94 intptr_t cur_top = (intptr_t)s->top();
brutisso@3668 95 size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
brutisso@3668 96 while (words_left_to_fill > 0) {
brutisso@3668 97 size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
brutisso@3668 98 assert(words_to_fill >= CollectedHeap::min_fill_size(),
brutisso@3668 99 err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
brutisso@3668 100 words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
brutisso@3668 101 CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
brutisso@3668 102 if (!os::numa_has_static_binding()) {
brutisso@3668 103 size_t touched_words = words_to_fill;
duke@435 104 #ifndef ASSERT
brutisso@3668 105 if (!ZapUnusedHeapArea) {
brutisso@3668 106 touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
brutisso@3668 107 touched_words);
brutisso@3668 108 }
duke@435 109 #endif
brutisso@3668 110 MemRegion invalid;
brutisso@3668 111 HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
brutisso@3668 112 HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
brutisso@3668 113 if (crossing_start != crossing_end) {
brutisso@3668 114 // If object header crossed a small page boundary we mark the area
brutisso@3668 115 // as invalid rounding it to a page_size().
brutisso@3668 116 HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
brutisso@3668 117 HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
brutisso@3668 118 invalid = MemRegion(start, end);
brutisso@3668 119 }
brutisso@3668 120
brutisso@3668 121 ls->add_invalid_region(invalid);
iveresov@576 122 }
brutisso@3668 123 cur_top = cur_top + (words_to_fill * HeapWordSize);
brutisso@3668 124 words_left_to_fill -= words_to_fill;
duke@435 125 }
duke@435 126 }
duke@435 127 } else {
iveresov@576 128 if (!os::numa_has_static_binding()) {
duke@435 129 #ifdef ASSERT
duke@435 130 MemRegion invalid(s->top(), s->end());
duke@435 131 ls->add_invalid_region(invalid);
iveresov@576 132 #else
iveresov@576 133 if (ZapUnusedHeapArea) {
iveresov@576 134 MemRegion invalid(s->top(), s->end());
iveresov@576 135 ls->add_invalid_region(invalid);
iveresov@579 136 } else {
iveresov@579 137 return;
iveresov@579 138 }
duke@435 139 #endif
iveresov@579 140 } else {
iveresov@579 141 return;
iveresov@576 142 }
duke@435 143 }
duke@435 144 }
duke@435 145 }
duke@435 146
duke@435 147 size_t MutableNUMASpace::used_in_words() const {
duke@435 148 size_t s = 0;
duke@435 149 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 150 s += lgrp_spaces()->at(i)->space()->used_in_words();
duke@435 151 }
duke@435 152 return s;
duke@435 153 }
duke@435 154
duke@435 155 size_t MutableNUMASpace::free_in_words() const {
duke@435 156 size_t s = 0;
duke@435 157 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 158 s += lgrp_spaces()->at(i)->space()->free_in_words();
duke@435 159 }
duke@435 160 return s;
duke@435 161 }
duke@435 162
duke@435 163
duke@435 164 size_t MutableNUMASpace::tlab_capacity(Thread *thr) const {
duke@435 165 guarantee(thr != NULL, "No thread");
duke@435 166 int lgrp_id = thr->lgrp_id();
iveresov@703 167 if (lgrp_id == -1) {
iveresov@703 168 // This case can occur after the topology of the system has
iveresov@703 169 // changed. Thread can change their location, the new home
iveresov@703 170 // group will be determined during the first allocation
iveresov@703 171 // attempt. For now we can safely assume that all spaces
iveresov@703 172 // have equal size because the whole space will be reinitialized.
iveresov@703 173 if (lgrp_spaces()->length() > 0) {
iveresov@703 174 return capacity_in_bytes() / lgrp_spaces()->length();
iveresov@703 175 } else {
iveresov@703 176 assert(false, "There should be at least one locality group");
iveresov@703 177 return 0;
iveresov@703 178 }
iveresov@703 179 }
iveresov@703 180 // That's the normal case, where we know the locality group of the thread.
duke@435 181 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 182 if (i == -1) {
duke@435 183 return 0;
duke@435 184 }
duke@435 185 return lgrp_spaces()->at(i)->space()->capacity_in_bytes();
duke@435 186 }
duke@435 187
duke@435 188 size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
iveresov@703 189 // Please see the comments for tlab_capacity().
duke@435 190 guarantee(thr != NULL, "No thread");
duke@435 191 int lgrp_id = thr->lgrp_id();
iveresov@703 192 if (lgrp_id == -1) {
iveresov@703 193 if (lgrp_spaces()->length() > 0) {
iveresov@703 194 return free_in_bytes() / lgrp_spaces()->length();
iveresov@703 195 } else {
iveresov@703 196 assert(false, "There should be at least one locality group");
iveresov@703 197 return 0;
iveresov@703 198 }
iveresov@703 199 }
duke@435 200 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 201 if (i == -1) {
duke@435 202 return 0;
duke@435 203 }
duke@435 204 return lgrp_spaces()->at(i)->space()->free_in_bytes();
duke@435 205 }
duke@435 206
iveresov@808 207
iveresov@808 208 size_t MutableNUMASpace::capacity_in_words(Thread* thr) const {
iveresov@808 209 guarantee(thr != NULL, "No thread");
iveresov@808 210 int lgrp_id = thr->lgrp_id();
iveresov@808 211 if (lgrp_id == -1) {
iveresov@808 212 if (lgrp_spaces()->length() > 0) {
iveresov@808 213 return capacity_in_words() / lgrp_spaces()->length();
iveresov@808 214 } else {
iveresov@808 215 assert(false, "There should be at least one locality group");
iveresov@808 216 return 0;
iveresov@808 217 }
iveresov@808 218 }
iveresov@808 219 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
iveresov@808 220 if (i == -1) {
iveresov@808 221 return 0;
iveresov@808 222 }
iveresov@808 223 return lgrp_spaces()->at(i)->space()->capacity_in_words();
iveresov@808 224 }
iveresov@808 225
duke@435 226 // Check if the NUMA topology has changed. Add and remove spaces if needed.
duke@435 227 // The update can be forced by setting the force parameter equal to true.
duke@435 228 bool MutableNUMASpace::update_layout(bool force) {
duke@435 229 // Check if the topology had changed.
duke@435 230 bool changed = os::numa_topology_changed();
duke@435 231 if (force || changed) {
duke@435 232 // Compute lgrp intersection. Add/remove spaces.
duke@435 233 int lgrp_limit = (int)os::numa_get_groups_num();
zgu@3900 234 int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit, mtGC);
duke@435 235 int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
duke@435 236 assert(lgrp_num > 0, "There should be at least one locality group");
duke@435 237 // Add new spaces for the new nodes
duke@435 238 for (int i = 0; i < lgrp_num; i++) {
duke@435 239 bool found = false;
duke@435 240 for (int j = 0; j < lgrp_spaces()->length(); j++) {
duke@435 241 if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) {
duke@435 242 found = true;
duke@435 243 break;
duke@435 244 }
duke@435 245 }
duke@435 246 if (!found) {
iveresov@970 247 lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment()));
duke@435 248 }
duke@435 249 }
duke@435 250
duke@435 251 // Remove spaces for the removed nodes.
duke@435 252 for (int i = 0; i < lgrp_spaces()->length();) {
duke@435 253 bool found = false;
duke@435 254 for (int j = 0; j < lgrp_num; j++) {
duke@435 255 if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) {
duke@435 256 found = true;
duke@435 257 break;
duke@435 258 }
duke@435 259 }
duke@435 260 if (!found) {
duke@435 261 delete lgrp_spaces()->at(i);
duke@435 262 lgrp_spaces()->remove_at(i);
duke@435 263 } else {
duke@435 264 i++;
duke@435 265 }
duke@435 266 }
duke@435 267
zgu@3900 268 FREE_C_HEAP_ARRAY(int, lgrp_ids, mtGC);
duke@435 269
duke@435 270 if (changed) {
duke@435 271 for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
duke@435 272 thread->set_lgrp_id(-1);
duke@435 273 }
duke@435 274 }
duke@435 275 return true;
duke@435 276 }
duke@435 277 return false;
duke@435 278 }
duke@435 279
duke@435 280 // Bias region towards the first-touching lgrp. Set the right page sizes.
iveresov@576 281 void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) {
duke@435 282 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@435 283 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@435 284 if (end > start) {
duke@435 285 MemRegion aligned_region(start, end);
duke@435 286 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@435 287 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@435 288 assert(region().contains(aligned_region), "Sanity");
iveresov@576 289 // First we tell the OS which page size we want in the given range. The underlying
iveresov@576 290 // large page can be broken down if we require small pages.
iveresov@576 291 os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
iveresov@576 292 // Then we uncommit the pages in the range.
iveresov@3363 293 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
iveresov@576 294 // And make them local/first-touch biased.
iveresov@576 295 os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id);
duke@435 296 }
duke@435 297 }
duke@435 298
duke@435 299 // Free all pages in the region.
duke@435 300 void MutableNUMASpace::free_region(MemRegion mr) {
duke@435 301 HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size());
duke@435 302 HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size());
duke@435 303 if (end > start) {
duke@435 304 MemRegion aligned_region(start, end);
duke@435 305 assert((intptr_t)aligned_region.start() % page_size() == 0 &&
duke@435 306 (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
duke@435 307 assert(region().contains(aligned_region), "Sanity");
iveresov@3363 308 os::free_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size());
duke@435 309 }
duke@435 310 }
duke@435 311
duke@435 312 // Update space layout. Perform adaptation.
duke@435 313 void MutableNUMASpace::update() {
duke@435 314 if (update_layout(false)) {
duke@435 315 // If the topology has changed, make all chunks zero-sized.
iveresov@703 316 // And clear the alloc-rate statistics.
iveresov@703 317 // In future we may want to handle this more gracefully in order
iveresov@703 318 // to avoid the reallocation of the pages as much as possible.
duke@435 319 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@703 320 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@703 321 MutableSpace *s = ls->space();
duke@435 322 s->set_end(s->bottom());
duke@435 323 s->set_top(s->bottom());
iveresov@703 324 ls->clear_alloc_rate();
duke@435 325 }
jmasa@698 326 // A NUMA space is never mangled
jmasa@698 327 initialize(region(),
jmasa@698 328 SpaceDecorator::Clear,
jmasa@698 329 SpaceDecorator::DontMangle);
duke@435 330 } else {
duke@435 331 bool should_initialize = false;
iveresov@576 332 if (!os::numa_has_static_binding()) {
iveresov@576 333 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@576 334 if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) {
iveresov@576 335 should_initialize = true;
iveresov@576 336 break;
iveresov@576 337 }
duke@435 338 }
duke@435 339 }
duke@435 340
duke@435 341 if (should_initialize ||
duke@435 342 (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) {
jmasa@698 343 // A NUMA space is never mangled
jmasa@698 344 initialize(region(),
jmasa@698 345 SpaceDecorator::Clear,
jmasa@698 346 SpaceDecorator::DontMangle);
duke@435 347 }
duke@435 348 }
duke@435 349
duke@435 350 if (NUMAStats) {
duke@435 351 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 352 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@435 353 }
duke@435 354 }
duke@435 355
duke@435 356 scan_pages(NUMAPageScanRate);
duke@435 357 }
duke@435 358
duke@435 359 // Scan pages. Free pages that have smaller size or wrong placement.
duke@435 360 void MutableNUMASpace::scan_pages(size_t page_count)
duke@435 361 {
duke@435 362 size_t pages_per_chunk = page_count / lgrp_spaces()->length();
duke@435 363 if (pages_per_chunk > 0) {
duke@435 364 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 365 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 366 ls->scan_pages(page_size(), pages_per_chunk);
duke@435 367 }
duke@435 368 }
duke@435 369 }
duke@435 370
duke@435 371 // Accumulate statistics about the allocation rate of each lgrp.
duke@435 372 void MutableNUMASpace::accumulate_statistics() {
duke@435 373 if (UseAdaptiveNUMAChunkSizing) {
duke@435 374 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 375 lgrp_spaces()->at(i)->sample();
duke@435 376 }
duke@435 377 increment_samples_count();
duke@435 378 }
duke@435 379
duke@435 380 if (NUMAStats) {
duke@435 381 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 382 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
duke@435 383 }
duke@435 384 }
duke@435 385 }
duke@435 386
duke@435 387 // Get the current size of a chunk.
duke@435 388 // This function computes the size of the chunk based on the
duke@435 389 // difference between chunk ends. This allows it to work correctly in
duke@435 390 // case the whole space is resized and during the process of adaptive
duke@435 391 // chunk resizing.
duke@435 392 size_t MutableNUMASpace::current_chunk_size(int i) {
duke@435 393 HeapWord *cur_end, *prev_end;
duke@435 394 if (i == 0) {
duke@435 395 prev_end = bottom();
duke@435 396 } else {
duke@435 397 prev_end = lgrp_spaces()->at(i - 1)->space()->end();
duke@435 398 }
duke@435 399 if (i == lgrp_spaces()->length() - 1) {
duke@435 400 cur_end = end();
duke@435 401 } else {
duke@435 402 cur_end = lgrp_spaces()->at(i)->space()->end();
duke@435 403 }
duke@435 404 if (cur_end > prev_end) {
duke@435 405 return pointer_delta(cur_end, prev_end, sizeof(char));
duke@435 406 }
duke@435 407 return 0;
duke@435 408 }
duke@435 409
duke@435 410 // Return the default chunk size by equally diving the space.
duke@435 411 // page_size() aligned.
duke@435 412 size_t MutableNUMASpace::default_chunk_size() {
duke@435 413 return base_space_size() / lgrp_spaces()->length() * page_size();
duke@435 414 }
duke@435 415
duke@435 416 // Produce a new chunk size. page_size() aligned.
iveresov@826 417 // This function is expected to be called on sequence of i's from 0 to
iveresov@826 418 // lgrp_spaces()->length().
duke@435 419 size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
duke@435 420 size_t pages_available = base_space_size();
duke@435 421 for (int j = 0; j < i; j++) {
duke@435 422 pages_available -= round_down(current_chunk_size(j), page_size()) / page_size();
duke@435 423 }
duke@435 424 pages_available -= lgrp_spaces()->length() - i - 1;
duke@435 425 assert(pages_available > 0, "No pages left");
duke@435 426 float alloc_rate = 0;
duke@435 427 for (int j = i; j < lgrp_spaces()->length(); j++) {
duke@435 428 alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average();
duke@435 429 }
duke@435 430 size_t chunk_size = 0;
duke@435 431 if (alloc_rate > 0) {
duke@435 432 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@826 433 chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size();
duke@435 434 }
duke@435 435 chunk_size = MAX2(chunk_size, page_size());
duke@435 436
duke@435 437 if (limit > 0) {
duke@435 438 limit = round_down(limit, page_size());
duke@435 439 if (chunk_size > current_chunk_size(i)) {
iveresov@897 440 size_t upper_bound = pages_available * page_size();
iveresov@897 441 if (upper_bound > limit &&
iveresov@897 442 current_chunk_size(i) < upper_bound - limit) {
iveresov@897 443 // The resulting upper bound should not exceed the available
iveresov@897 444 // amount of memory (pages_available * page_size()).
iveresov@897 445 upper_bound = current_chunk_size(i) + limit;
iveresov@897 446 }
iveresov@897 447 chunk_size = MIN2(chunk_size, upper_bound);
duke@435 448 } else {
iveresov@897 449 size_t lower_bound = page_size();
iveresov@897 450 if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow.
iveresov@897 451 lower_bound = current_chunk_size(i) - limit;
iveresov@897 452 }
iveresov@897 453 chunk_size = MAX2(chunk_size, lower_bound);
duke@435 454 }
duke@435 455 }
duke@435 456 assert(chunk_size <= pages_available * page_size(), "Chunk size out of range");
duke@435 457 return chunk_size;
duke@435 458 }
duke@435 459
duke@435 460
duke@435 461 // Return the bottom_region and the top_region. Align them to page_size() boundary.
duke@435 462 // |------------------new_region---------------------------------|
duke@435 463 // |----bottom_region--|---intersection---|------top_region------|
duke@435 464 void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
duke@435 465 MemRegion* bottom_region, MemRegion *top_region) {
duke@435 466 // Is there bottom?
duke@435 467 if (new_region.start() < intersection.start()) { // Yes
duke@435 468 // Try to coalesce small pages into a large one.
iveresov@970 469 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 470 HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment());
duke@435 471 if (new_region.contains(p)
iveresov@970 472 && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
duke@435 473 if (intersection.contains(p)) {
duke@435 474 intersection = MemRegion(p, intersection.end());
duke@435 475 } else {
duke@435 476 intersection = MemRegion(p, p);
duke@435 477 }
duke@435 478 }
duke@435 479 }
duke@435 480 *bottom_region = MemRegion(new_region.start(), intersection.start());
duke@435 481 } else {
duke@435 482 *bottom_region = MemRegion();
duke@435 483 }
duke@435 484
duke@435 485 // Is there top?
duke@435 486 if (intersection.end() < new_region.end()) { // Yes
duke@435 487 // Try to coalesce small pages into a large one.
iveresov@970 488 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 489 HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment());
duke@435 490 if (new_region.contains(p)
iveresov@970 491 && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
duke@435 492 if (intersection.contains(p)) {
duke@435 493 intersection = MemRegion(intersection.start(), p);
duke@435 494 } else {
duke@435 495 intersection = MemRegion(p, p);
duke@435 496 }
duke@435 497 }
duke@435 498 }
duke@435 499 *top_region = MemRegion(intersection.end(), new_region.end());
duke@435 500 } else {
duke@435 501 *top_region = MemRegion();
duke@435 502 }
duke@435 503 }
duke@435 504
duke@435 505 // Try to merge the invalid region with the bottom or top region by decreasing
duke@435 506 // the intersection area. Return the invalid_region aligned to the page_size()
duke@435 507 // boundary if it's inside the intersection. Return non-empty invalid_region
duke@435 508 // if it lies inside the intersection (also page-aligned).
duke@435 509 // |------------------new_region---------------------------------|
duke@435 510 // |----------------|-------invalid---|--------------------------|
duke@435 511 // |----bottom_region--|---intersection---|------top_region------|
duke@435 512 void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection,
duke@435 513 MemRegion *invalid_region) {
duke@435 514 if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) {
duke@435 515 *intersection = MemRegion(invalid_region->end(), intersection->end());
duke@435 516 *invalid_region = MemRegion();
duke@435 517 } else
duke@435 518 if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) {
duke@435 519 *intersection = MemRegion(intersection->start(), invalid_region->start());
duke@435 520 *invalid_region = MemRegion();
duke@435 521 } else
duke@435 522 if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) {
duke@435 523 *intersection = MemRegion(new_region.start(), new_region.start());
duke@435 524 *invalid_region = MemRegion();
duke@435 525 } else
duke@435 526 if (intersection->contains(invalid_region)) {
duke@435 527 // That's the only case we have to make an additional bias_region() call.
duke@435 528 HeapWord* start = invalid_region->start();
duke@435 529 HeapWord* end = invalid_region->end();
iveresov@970 530 if (UseLargePages && page_size() >= alignment()) {
iveresov@970 531 HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment());
duke@435 532 if (new_region.contains(p)) {
duke@435 533 start = p;
duke@435 534 }
iveresov@970 535 p = (HeapWord*)round_to((intptr_t) end, alignment());
duke@435 536 if (new_region.contains(end)) {
duke@435 537 end = p;
duke@435 538 }
duke@435 539 }
duke@435 540 if (intersection->start() > start) {
duke@435 541 *intersection = MemRegion(start, intersection->end());
duke@435 542 }
duke@435 543 if (intersection->end() < end) {
duke@435 544 *intersection = MemRegion(intersection->start(), end);
duke@435 545 }
duke@435 546 *invalid_region = MemRegion(start, end);
duke@435 547 }
duke@435 548 }
duke@435 549
jmasa@698 550 void MutableNUMASpace::initialize(MemRegion mr,
jmasa@698 551 bool clear_space,
iveresov@970 552 bool mangle_space,
iveresov@970 553 bool setup_pages) {
duke@435 554 assert(clear_space, "Reallocation will destory data!");
duke@435 555 assert(lgrp_spaces()->length() > 0, "There should be at least one space");
duke@435 556
duke@435 557 MemRegion old_region = region(), new_region;
duke@435 558 set_bottom(mr.start());
duke@435 559 set_end(mr.end());
jmasa@698 560 // Must always clear the space
jmasa@698 561 clear(SpaceDecorator::DontMangle);
duke@435 562
duke@435 563 // Compute chunk sizes
duke@435 564 size_t prev_page_size = page_size();
iveresov@970 565 set_page_size(UseLargePages ? alignment() : os::vm_page_size());
duke@435 566 HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@435 567 HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@435 568 size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@435 569
duke@435 570 // Try small pages if the chunk size is too small
duke@435 571 if (base_space_size_pages / lgrp_spaces()->length() == 0
duke@435 572 && page_size() > (size_t)os::vm_page_size()) {
duke@435 573 set_page_size(os::vm_page_size());
duke@435 574 rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size());
duke@435 575 rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size());
duke@435 576 base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
duke@435 577 }
duke@435 578 guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
duke@435 579 set_base_space_size(base_space_size_pages);
duke@435 580
duke@435 581 // Handle space resize
duke@435 582 MemRegion top_region, bottom_region;
duke@435 583 if (!old_region.equals(region())) {
duke@435 584 new_region = MemRegion(rounded_bottom, rounded_end);
duke@435 585 MemRegion intersection = new_region.intersection(old_region);
duke@435 586 if (intersection.start() == NULL ||
duke@435 587 intersection.end() == NULL ||
duke@435 588 prev_page_size > page_size()) { // If the page size got smaller we have to change
duke@435 589 // the page size preference for the whole space.
duke@435 590 intersection = MemRegion(new_region.start(), new_region.start());
duke@435 591 }
duke@435 592 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@576 593 bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id());
iveresov@576 594 bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id());
duke@435 595 }
duke@435 596
duke@435 597 // Check if the space layout has changed significantly?
duke@435 598 // This happens when the space has been resized so that either head or tail
duke@435 599 // chunk became less than a page.
duke@435 600 bool layout_valid = UseAdaptiveNUMAChunkSizing &&
duke@435 601 current_chunk_size(0) > page_size() &&
duke@435 602 current_chunk_size(lgrp_spaces()->length() - 1) > page_size();
duke@435 603
duke@435 604
duke@435 605 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 606 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 607 MutableSpace *s = ls->space();
duke@435 608 old_region = s->region();
duke@435 609
duke@435 610 size_t chunk_byte_size = 0, old_chunk_byte_size = 0;
duke@435 611 if (i < lgrp_spaces()->length() - 1) {
duke@435 612 if (!UseAdaptiveNUMAChunkSizing ||
duke@435 613 (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) ||
duke@435 614 samples_count() < AdaptiveSizePolicyReadyThreshold) {
duke@435 615 // No adaptation. Divide the space equally.
duke@435 616 chunk_byte_size = default_chunk_size();
duke@435 617 } else
duke@435 618 if (!layout_valid || NUMASpaceResizeRate == 0) {
duke@435 619 // Fast adaptation. If no space resize rate is set, resize
duke@435 620 // the chunks instantly.
duke@435 621 chunk_byte_size = adaptive_chunk_size(i, 0);
duke@435 622 } else {
duke@435 623 // Slow adaptation. Resize the chunks moving no more than
duke@435 624 // NUMASpaceResizeRate bytes per collection.
duke@435 625 size_t limit = NUMASpaceResizeRate /
duke@435 626 (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2);
duke@435 627 chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size()));
duke@435 628 }
duke@435 629
duke@435 630 assert(chunk_byte_size >= page_size(), "Chunk size too small");
duke@435 631 assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check");
duke@435 632 }
duke@435 633
duke@435 634 if (i == 0) { // Bottom chunk
duke@435 635 if (i != lgrp_spaces()->length() - 1) {
duke@435 636 new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
duke@435 637 } else {
duke@435 638 new_region = MemRegion(bottom(), end());
duke@435 639 }
duke@435 640 } else
duke@435 641 if (i < lgrp_spaces()->length() - 1) { // Middle chunks
duke@435 642 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@435 643 new_region = MemRegion(ps->end(),
duke@435 644 ps->end() + (chunk_byte_size >> LogHeapWordSize));
duke@435 645 } else { // Top chunk
duke@435 646 MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
duke@435 647 new_region = MemRegion(ps->end(), end());
duke@435 648 }
duke@435 649 guarantee(region().contains(new_region), "Region invariant");
duke@435 650
duke@435 651
duke@435 652 // The general case:
duke@435 653 // |---------------------|--invalid---|--------------------------|
duke@435 654 // |------------------new_region---------------------------------|
duke@435 655 // |----bottom_region--|---intersection---|------top_region------|
duke@435 656 // |----old_region----|
duke@435 657 // The intersection part has all pages in place we don't need to migrate them.
duke@435 658 // Pages for the top and bottom part should be freed and then reallocated.
duke@435 659
duke@435 660 MemRegion intersection = old_region.intersection(new_region);
duke@435 661
duke@435 662 if (intersection.start() == NULL || intersection.end() == NULL) {
duke@435 663 intersection = MemRegion(new_region.start(), new_region.start());
duke@435 664 }
duke@435 665
iveresov@576 666 if (!os::numa_has_static_binding()) {
iveresov@576 667 MemRegion invalid_region = ls->invalid_region().intersection(new_region);
iveresov@576 668 // Invalid region is a range of memory that could've possibly
iveresov@576 669 // been allocated on the other node. That's relevant only on Solaris where
iveresov@576 670 // there is no static memory binding.
iveresov@576 671 if (!invalid_region.is_empty()) {
iveresov@576 672 merge_regions(new_region, &intersection, &invalid_region);
iveresov@576 673 free_region(invalid_region);
iveresov@576 674 ls->set_invalid_region(MemRegion());
iveresov@576 675 }
duke@435 676 }
iveresov@576 677
duke@435 678 select_tails(new_region, intersection, &bottom_region, &top_region);
iveresov@576 679
iveresov@576 680 if (!os::numa_has_static_binding()) {
iveresov@576 681 // If that's a system with the first-touch policy then it's enough
iveresov@576 682 // to free the pages.
iveresov@576 683 free_region(bottom_region);
iveresov@576 684 free_region(top_region);
iveresov@576 685 } else {
iveresov@576 686 // In a system with static binding we have to change the bias whenever
iveresov@576 687 // we reshape the heap.
iveresov@576 688 bias_region(bottom_region, ls->lgrp_id());
iveresov@576 689 bias_region(top_region, ls->lgrp_id());
iveresov@576 690 }
duke@435 691
jmasa@698 692 // Clear space (set top = bottom) but never mangle.
iveresov@970 693 s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
duke@435 694
duke@435 695 set_adaptation_cycles(samples_count());
duke@435 696 }
duke@435 697 }
duke@435 698
duke@435 699 // Set the top of the whole space.
duke@435 700 // Mark the the holes in chunks below the top() as invalid.
duke@435 701 void MutableNUMASpace::set_top(HeapWord* value) {
duke@435 702 bool found_top = false;
iveresov@625 703 for (int i = 0; i < lgrp_spaces()->length();) {
duke@435 704 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 705 MutableSpace *s = ls->space();
duke@435 706 HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom());
duke@435 707
duke@435 708 if (s->contains(value)) {
iveresov@625 709 // Check if setting the chunk's top to a given value would create a hole less than
iveresov@625 710 // a minimal object; assuming that's not the last chunk in which case we don't care.
iveresov@625 711 if (i < lgrp_spaces()->length() - 1) {
iveresov@625 712 size_t remainder = pointer_delta(s->end(), value);
jcoomes@916 713 const size_t min_fill_size = CollectedHeap::min_fill_size();
jcoomes@916 714 if (remainder < min_fill_size && remainder > 0) {
jcoomes@916 715 // Add a minimum size filler object; it will cross the chunk boundary.
jcoomes@916 716 CollectedHeap::fill_with_object(value, min_fill_size);
jcoomes@916 717 value += min_fill_size;
iveresov@625 718 assert(!s->contains(value), "Should be in the next chunk");
iveresov@625 719 // Restart the loop from the same chunk, since the value has moved
iveresov@625 720 // to the next one.
iveresov@625 721 continue;
iveresov@625 722 }
iveresov@625 723 }
iveresov@625 724
iveresov@576 725 if (!os::numa_has_static_binding() && top < value && top < s->end()) {
duke@435 726 ls->add_invalid_region(MemRegion(top, value));
duke@435 727 }
duke@435 728 s->set_top(value);
duke@435 729 found_top = true;
duke@435 730 } else {
duke@435 731 if (found_top) {
duke@435 732 s->set_top(s->bottom());
duke@435 733 } else {
iveresov@576 734 if (!os::numa_has_static_binding() && top < s->end()) {
iveresov@576 735 ls->add_invalid_region(MemRegion(top, s->end()));
iveresov@576 736 }
iveresov@576 737 s->set_top(s->end());
duke@435 738 }
duke@435 739 }
iveresov@625 740 i++;
duke@435 741 }
duke@435 742 MutableSpace::set_top(value);
duke@435 743 }
duke@435 744
jmasa@698 745 void MutableNUMASpace::clear(bool mangle_space) {
duke@435 746 MutableSpace::set_top(bottom());
duke@435 747 for (int i = 0; i < lgrp_spaces()->length(); i++) {
jmasa@698 748 // Never mangle NUMA spaces because the mangling will
jmasa@698 749 // bind the memory to a possibly unwanted lgroup.
jmasa@698 750 lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle);
duke@435 751 }
duke@435 752 }
duke@435 753
iveresov@576 754 /*
iveresov@576 755 Linux supports static memory binding, therefore the most part of the
iveresov@576 756 logic dealing with the possible invalid page allocation is effectively
iveresov@576 757 disabled. Besides there is no notion of the home node in Linux. A
iveresov@576 758 thread is allowed to migrate freely. Although the scheduler is rather
iveresov@576 759 reluctant to move threads between the nodes. We check for the current
iveresov@576 760 node every allocation. And with a high probability a thread stays on
iveresov@576 761 the same node for some time allowing local access to recently allocated
iveresov@576 762 objects.
iveresov@576 763 */
iveresov@576 764
duke@435 765 HeapWord* MutableNUMASpace::allocate(size_t size) {
iveresov@576 766 Thread* thr = Thread::current();
iveresov@576 767 int lgrp_id = thr->lgrp_id();
iveresov@576 768 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@435 769 lgrp_id = os::numa_get_group_id();
iveresov@576 770 thr->set_lgrp_id(lgrp_id);
duke@435 771 }
duke@435 772
duke@435 773 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 774
duke@435 775 // It is possible that a new CPU has been hotplugged and
duke@435 776 // we haven't reshaped the space accordingly.
duke@435 777 if (i == -1) {
duke@435 778 i = os::random() % lgrp_spaces()->length();
duke@435 779 }
duke@435 780
iveresov@808 781 LGRPSpace* ls = lgrp_spaces()->at(i);
iveresov@808 782 MutableSpace *s = ls->space();
duke@435 783 HeapWord *p = s->allocate(size);
duke@435 784
iveresov@579 785 if (p != NULL) {
iveresov@579 786 size_t remainder = s->free_in_words();
kvn@1926 787 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
iveresov@579 788 s->set_top(s->top() - size);
iveresov@579 789 p = NULL;
iveresov@579 790 }
duke@435 791 }
duke@435 792 if (p != NULL) {
duke@435 793 if (top() < s->top()) { // Keep _top updated.
duke@435 794 MutableSpace::set_top(s->top());
duke@435 795 }
duke@435 796 }
iveresov@576 797 // Make the page allocation happen here if there is no static binding..
iveresov@576 798 if (p != NULL && !os::numa_has_static_binding()) {
duke@435 799 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@435 800 *(int*)i = 0;
duke@435 801 }
duke@435 802 }
iveresov@808 803 if (p == NULL) {
iveresov@808 804 ls->set_allocation_failed();
iveresov@808 805 }
duke@435 806 return p;
duke@435 807 }
duke@435 808
duke@435 809 // This version is lock-free.
duke@435 810 HeapWord* MutableNUMASpace::cas_allocate(size_t size) {
iveresov@576 811 Thread* thr = Thread::current();
iveresov@576 812 int lgrp_id = thr->lgrp_id();
iveresov@576 813 if (lgrp_id == -1 || !os::numa_has_group_homing()) {
duke@435 814 lgrp_id = os::numa_get_group_id();
iveresov@576 815 thr->set_lgrp_id(lgrp_id);
duke@435 816 }
duke@435 817
duke@435 818 int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals);
duke@435 819 // It is possible that a new CPU has been hotplugged and
duke@435 820 // we haven't reshaped the space accordingly.
duke@435 821 if (i == -1) {
duke@435 822 i = os::random() % lgrp_spaces()->length();
duke@435 823 }
iveresov@808 824 LGRPSpace *ls = lgrp_spaces()->at(i);
iveresov@808 825 MutableSpace *s = ls->space();
duke@435 826 HeapWord *p = s->cas_allocate(size);
iveresov@579 827 if (p != NULL) {
iveresov@625 828 size_t remainder = pointer_delta(s->end(), p + size);
kvn@1926 829 if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
iveresov@579 830 if (s->cas_deallocate(p, size)) {
iveresov@579 831 // We were the last to allocate and created a fragment less than
iveresov@579 832 // a minimal object.
iveresov@579 833 p = NULL;
iveresov@625 834 } else {
iveresov@625 835 guarantee(false, "Deallocation should always succeed");
iveresov@579 836 }
duke@435 837 }
duke@435 838 }
duke@435 839 if (p != NULL) {
duke@435 840 HeapWord* cur_top, *cur_chunk_top = p + size;
duke@435 841 while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
duke@435 842 if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
duke@435 843 break;
duke@435 844 }
duke@435 845 }
duke@435 846 }
duke@435 847
iveresov@576 848 // Make the page allocation happen here if there is no static binding.
iveresov@576 849 if (p != NULL && !os::numa_has_static_binding() ) {
duke@435 850 for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
duke@435 851 *(int*)i = 0;
duke@435 852 }
duke@435 853 }
iveresov@808 854 if (p == NULL) {
iveresov@808 855 ls->set_allocation_failed();
iveresov@808 856 }
duke@435 857 return p;
duke@435 858 }
duke@435 859
duke@435 860 void MutableNUMASpace::print_short_on(outputStream* st) const {
duke@435 861 MutableSpace::print_short_on(st);
duke@435 862 st->print(" (");
duke@435 863 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 864 st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id());
duke@435 865 lgrp_spaces()->at(i)->space()->print_short_on(st);
duke@435 866 if (i < lgrp_spaces()->length() - 1) {
duke@435 867 st->print(", ");
duke@435 868 }
duke@435 869 }
duke@435 870 st->print(")");
duke@435 871 }
duke@435 872
duke@435 873 void MutableNUMASpace::print_on(outputStream* st) const {
duke@435 874 MutableSpace::print_on(st);
duke@435 875 for (int i = 0; i < lgrp_spaces()->length(); i++) {
duke@435 876 LGRPSpace *ls = lgrp_spaces()->at(i);
duke@435 877 st->print(" lgrp %d", ls->lgrp_id());
duke@435 878 ls->space()->print_on(st);
duke@435 879 if (NUMAStats) {
iveresov@579 880 for (int i = 0; i < lgrp_spaces()->length(); i++) {
iveresov@579 881 lgrp_spaces()->at(i)->accumulate_statistics(page_size());
iveresov@579 882 }
duke@435 883 st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n",
duke@435 884 ls->space_stats()->_local_space / K,
duke@435 885 ls->space_stats()->_remote_space / K,
duke@435 886 ls->space_stats()->_unbiased_space / K,
duke@435 887 ls->space_stats()->_uncommited_space / K,
duke@435 888 ls->space_stats()->_large_pages,
duke@435 889 ls->space_stats()->_small_pages);
duke@435 890 }
duke@435 891 }
duke@435 892 }
duke@435 893
brutisso@3711 894 void MutableNUMASpace::verify() {
iveresov@625 895 // This can be called after setting an arbitary value to the space's top,
iveresov@625 896 // so an object can cross the chunk boundary. We ensure the parsablity
iveresov@625 897 // of the space and just walk the objects in linear fashion.
iveresov@625 898 ensure_parsability();
brutisso@3711 899 MutableSpace::verify();
duke@435 900 }
duke@435 901
duke@435 902 // Scan pages and gather stats about page placement and size.
duke@435 903 void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) {
duke@435 904 clear_space_stats();
duke@435 905 char *start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@435 906 char* end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@435 907 if (start < end) {
duke@435 908 for (char *p = start; p < end;) {
duke@435 909 os::page_info info;
duke@435 910 if (os::get_page_info(p, &info)) {
duke@435 911 if (info.size > 0) {
duke@435 912 if (info.size > (size_t)os::vm_page_size()) {
duke@435 913 space_stats()->_large_pages++;
duke@435 914 } else {
duke@435 915 space_stats()->_small_pages++;
duke@435 916 }
duke@435 917 if (info.lgrp_id == lgrp_id()) {
duke@435 918 space_stats()->_local_space += info.size;
duke@435 919 } else {
duke@435 920 space_stats()->_remote_space += info.size;
duke@435 921 }
duke@435 922 p += info.size;
duke@435 923 } else {
duke@435 924 p += os::vm_page_size();
duke@435 925 space_stats()->_uncommited_space += os::vm_page_size();
duke@435 926 }
duke@435 927 } else {
duke@435 928 return;
duke@435 929 }
duke@435 930 }
duke@435 931 }
duke@435 932 space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) +
duke@435 933 pointer_delta(space()->end(), end, sizeof(char));
duke@435 934
duke@435 935 }
duke@435 936
duke@435 937 // Scan page_count pages and verify if they have the right size and right placement.
duke@435 938 // If invalid pages are found they are freed in hope that subsequent reallocation
duke@435 939 // will be more successful.
duke@435 940 void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count)
duke@435 941 {
duke@435 942 char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size);
duke@435 943 char* range_end = (char*)round_down((intptr_t) space()->end(), page_size);
duke@435 944
duke@435 945 if (range_start > last_page_scanned() || last_page_scanned() >= range_end) {
duke@435 946 set_last_page_scanned(range_start);
duke@435 947 }
duke@435 948
duke@435 949 char *scan_start = last_page_scanned();
duke@435 950 char* scan_end = MIN2(scan_start + page_size * page_count, range_end);
duke@435 951
duke@435 952 os::page_info page_expected, page_found;
duke@435 953 page_expected.size = page_size;
duke@435 954 page_expected.lgrp_id = lgrp_id();
duke@435 955
duke@435 956 char *s = scan_start;
duke@435 957 while (s < scan_end) {
duke@435 958 char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found);
duke@435 959 if (e == NULL) {
duke@435 960 break;
duke@435 961 }
duke@435 962 if (e != scan_end) {
duke@435 963 if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id())
duke@435 964 && page_expected.size != 0) {
iveresov@3363 965 os::free_memory(s, pointer_delta(e, s, sizeof(char)), page_size);
duke@435 966 }
duke@435 967 page_expected = page_found;
duke@435 968 }
duke@435 969 s = e;
duke@435 970 }
duke@435 971
duke@435 972 set_last_page_scanned(scan_end);
duke@435 973 }

mercurial