Sun, 25 Sep 2011 16:03:29 -0700
7089790: integrate bsd-port changes
Reviewed-by: kvn, twisti, jrose
Contributed-by: Kurt Miller <kurt@intricatesoftware.com>, Greg Lewis <glewis@eyesbeyond.com>, Jung-uk Kim <jkim@freebsd.org>, Christos Zoulas <christos@zoulas.com>, Landon Fuller <landonf@plausible.coop>, The FreeBSD Foundation <board@freebsdfoundation.org>, Michael Franz <mvfranz@gmail.com>, Roger Hoover <rhoover@apple.com>, Alexander Strange <astrange@apple.com>
duke@435 | 1 | |
duke@435 | 2 | /* |
stefank@2314 | 3 | * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 5 | * |
duke@435 | 6 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 7 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 8 | * published by the Free Software Foundation. |
duke@435 | 9 | * |
duke@435 | 10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 13 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 14 | * accompanied this code). |
duke@435 | 15 | * |
duke@435 | 16 | * You should have received a copy of the GNU General Public License version |
duke@435 | 17 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 19 | * |
trims@1907 | 20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 21 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 22 | * questions. |
duke@435 | 23 | * |
duke@435 | 24 | */ |
duke@435 | 25 | |
stefank@2314 | 26 | #include "precompiled.hpp" |
stefank@2314 | 27 | #include "gc_implementation/shared/mutableNUMASpace.hpp" |
stefank@2314 | 28 | #include "gc_implementation/shared/spaceDecorator.hpp" |
stefank@2314 | 29 | #include "memory/sharedHeap.hpp" |
stefank@2314 | 30 | #include "oops/oop.inline.hpp" |
stefank@2314 | 31 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 32 | # include "thread_linux.inline.hpp" |
stefank@2314 | 33 | #endif |
stefank@2314 | 34 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 35 | # include "thread_solaris.inline.hpp" |
stefank@2314 | 36 | #endif |
stefank@2314 | 37 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 38 | # include "thread_windows.inline.hpp" |
stefank@2314 | 39 | #endif |
never@3156 | 40 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 41 | # include "thread_bsd.inline.hpp" |
never@3156 | 42 | #endif |
duke@435 | 43 | |
duke@435 | 44 | |
iveresov@970 | 45 | MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment) { |
duke@435 | 46 | _lgrp_spaces = new (ResourceObj::C_HEAP) GrowableArray<LGRPSpace*>(0, true); |
duke@435 | 47 | _page_size = os::vm_page_size(); |
duke@435 | 48 | _adaptation_cycles = 0; |
duke@435 | 49 | _samples_count = 0; |
duke@435 | 50 | update_layout(true); |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | MutableNUMASpace::~MutableNUMASpace() { |
duke@435 | 54 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 55 | delete lgrp_spaces()->at(i); |
duke@435 | 56 | } |
duke@435 | 57 | delete lgrp_spaces(); |
duke@435 | 58 | } |
duke@435 | 59 | |
jmasa@698 | 60 | #ifndef PRODUCT |
duke@435 | 61 | void MutableNUMASpace::mangle_unused_area() { |
jmasa@698 | 62 | // This method should do nothing. |
jmasa@698 | 63 | // It can be called on a numa space during a full compaction. |
duke@435 | 64 | } |
jmasa@698 | 65 | void MutableNUMASpace::mangle_unused_area_complete() { |
jmasa@698 | 66 | // This method should do nothing. |
jmasa@698 | 67 | // It can be called on a numa space during a full compaction. |
jmasa@698 | 68 | } |
jmasa@698 | 69 | void MutableNUMASpace::mangle_region(MemRegion mr) { |
jmasa@698 | 70 | // This method should do nothing because numa spaces are not mangled. |
jmasa@698 | 71 | } |
jmasa@698 | 72 | void MutableNUMASpace::set_top_for_allocations(HeapWord* v) { |
jmasa@698 | 73 | assert(false, "Do not mangle MutableNUMASpace's"); |
jmasa@698 | 74 | } |
jmasa@698 | 75 | void MutableNUMASpace::set_top_for_allocations() { |
jmasa@698 | 76 | // This method should do nothing. |
jmasa@698 | 77 | } |
jmasa@698 | 78 | void MutableNUMASpace::check_mangled_unused_area(HeapWord* limit) { |
jmasa@698 | 79 | // This method should do nothing. |
jmasa@698 | 80 | } |
jmasa@698 | 81 | void MutableNUMASpace::check_mangled_unused_area_complete() { |
jmasa@698 | 82 | // This method should do nothing. |
jmasa@698 | 83 | } |
jmasa@698 | 84 | #endif // NOT_PRODUCT |
duke@435 | 85 | |
duke@435 | 86 | // There may be unallocated holes in the middle chunks |
duke@435 | 87 | // that should be filled with dead objects to ensure parseability. |
duke@435 | 88 | void MutableNUMASpace::ensure_parsability() { |
duke@435 | 89 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 90 | LGRPSpace *ls = lgrp_spaces()->at(i); |
duke@435 | 91 | MutableSpace *s = ls->space(); |
twisti@1040 | 92 | if (s->top() < top()) { // For all spaces preceding the one containing top() |
duke@435 | 93 | if (s->free_in_words() > 0) { |
iveresov@579 | 94 | size_t area_touched_words = pointer_delta(s->end(), s->top()); |
jcoomes@916 | 95 | CollectedHeap::fill_with_object(s->top(), area_touched_words); |
duke@435 | 96 | #ifndef ASSERT |
duke@435 | 97 | if (!ZapUnusedHeapArea) { |
duke@435 | 98 | area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), |
duke@435 | 99 | area_touched_words); |
duke@435 | 100 | } |
duke@435 | 101 | #endif |
iveresov@576 | 102 | if (!os::numa_has_static_binding()) { |
iveresov@576 | 103 | MemRegion invalid; |
iveresov@576 | 104 | HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); |
iveresov@576 | 105 | HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), |
iveresov@576 | 106 | os::vm_page_size()); |
iveresov@576 | 107 | if (crossing_start != crossing_end) { |
iveresov@576 | 108 | // If object header crossed a small page boundary we mark the area |
iveresov@576 | 109 | // as invalid rounding it to a page_size(). |
iveresov@576 | 110 | HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); |
iveresov@576 | 111 | HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), |
iveresov@576 | 112 | s->end()); |
iveresov@576 | 113 | invalid = MemRegion(start, end); |
iveresov@576 | 114 | } |
iveresov@576 | 115 | |
iveresov@576 | 116 | ls->add_invalid_region(invalid); |
duke@435 | 117 | } |
duke@435 | 118 | } |
duke@435 | 119 | } else { |
iveresov@576 | 120 | if (!os::numa_has_static_binding()) { |
duke@435 | 121 | #ifdef ASSERT |
duke@435 | 122 | MemRegion invalid(s->top(), s->end()); |
duke@435 | 123 | ls->add_invalid_region(invalid); |
iveresov@576 | 124 | #else |
iveresov@576 | 125 | if (ZapUnusedHeapArea) { |
iveresov@576 | 126 | MemRegion invalid(s->top(), s->end()); |
iveresov@576 | 127 | ls->add_invalid_region(invalid); |
iveresov@579 | 128 | } else { |
iveresov@579 | 129 | return; |
iveresov@579 | 130 | } |
duke@435 | 131 | #endif |
iveresov@579 | 132 | } else { |
iveresov@579 | 133 | return; |
iveresov@576 | 134 | } |
duke@435 | 135 | } |
duke@435 | 136 | } |
duke@435 | 137 | } |
duke@435 | 138 | |
duke@435 | 139 | size_t MutableNUMASpace::used_in_words() const { |
duke@435 | 140 | size_t s = 0; |
duke@435 | 141 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 142 | s += lgrp_spaces()->at(i)->space()->used_in_words(); |
duke@435 | 143 | } |
duke@435 | 144 | return s; |
duke@435 | 145 | } |
duke@435 | 146 | |
duke@435 | 147 | size_t MutableNUMASpace::free_in_words() const { |
duke@435 | 148 | size_t s = 0; |
duke@435 | 149 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 150 | s += lgrp_spaces()->at(i)->space()->free_in_words(); |
duke@435 | 151 | } |
duke@435 | 152 | return s; |
duke@435 | 153 | } |
duke@435 | 154 | |
duke@435 | 155 | |
duke@435 | 156 | size_t MutableNUMASpace::tlab_capacity(Thread *thr) const { |
duke@435 | 157 | guarantee(thr != NULL, "No thread"); |
duke@435 | 158 | int lgrp_id = thr->lgrp_id(); |
iveresov@703 | 159 | if (lgrp_id == -1) { |
iveresov@703 | 160 | // This case can occur after the topology of the system has |
iveresov@703 | 161 | // changed. Thread can change their location, the new home |
iveresov@703 | 162 | // group will be determined during the first allocation |
iveresov@703 | 163 | // attempt. For now we can safely assume that all spaces |
iveresov@703 | 164 | // have equal size because the whole space will be reinitialized. |
iveresov@703 | 165 | if (lgrp_spaces()->length() > 0) { |
iveresov@703 | 166 | return capacity_in_bytes() / lgrp_spaces()->length(); |
iveresov@703 | 167 | } else { |
iveresov@703 | 168 | assert(false, "There should be at least one locality group"); |
iveresov@703 | 169 | return 0; |
iveresov@703 | 170 | } |
iveresov@703 | 171 | } |
iveresov@703 | 172 | // That's the normal case, where we know the locality group of the thread. |
duke@435 | 173 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
duke@435 | 174 | if (i == -1) { |
duke@435 | 175 | return 0; |
duke@435 | 176 | } |
duke@435 | 177 | return lgrp_spaces()->at(i)->space()->capacity_in_bytes(); |
duke@435 | 178 | } |
duke@435 | 179 | |
duke@435 | 180 | size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const { |
iveresov@703 | 181 | // Please see the comments for tlab_capacity(). |
duke@435 | 182 | guarantee(thr != NULL, "No thread"); |
duke@435 | 183 | int lgrp_id = thr->lgrp_id(); |
iveresov@703 | 184 | if (lgrp_id == -1) { |
iveresov@703 | 185 | if (lgrp_spaces()->length() > 0) { |
iveresov@703 | 186 | return free_in_bytes() / lgrp_spaces()->length(); |
iveresov@703 | 187 | } else { |
iveresov@703 | 188 | assert(false, "There should be at least one locality group"); |
iveresov@703 | 189 | return 0; |
iveresov@703 | 190 | } |
iveresov@703 | 191 | } |
duke@435 | 192 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
duke@435 | 193 | if (i == -1) { |
duke@435 | 194 | return 0; |
duke@435 | 195 | } |
duke@435 | 196 | return lgrp_spaces()->at(i)->space()->free_in_bytes(); |
duke@435 | 197 | } |
duke@435 | 198 | |
iveresov@808 | 199 | |
iveresov@808 | 200 | size_t MutableNUMASpace::capacity_in_words(Thread* thr) const { |
iveresov@808 | 201 | guarantee(thr != NULL, "No thread"); |
iveresov@808 | 202 | int lgrp_id = thr->lgrp_id(); |
iveresov@808 | 203 | if (lgrp_id == -1) { |
iveresov@808 | 204 | if (lgrp_spaces()->length() > 0) { |
iveresov@808 | 205 | return capacity_in_words() / lgrp_spaces()->length(); |
iveresov@808 | 206 | } else { |
iveresov@808 | 207 | assert(false, "There should be at least one locality group"); |
iveresov@808 | 208 | return 0; |
iveresov@808 | 209 | } |
iveresov@808 | 210 | } |
iveresov@808 | 211 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
iveresov@808 | 212 | if (i == -1) { |
iveresov@808 | 213 | return 0; |
iveresov@808 | 214 | } |
iveresov@808 | 215 | return lgrp_spaces()->at(i)->space()->capacity_in_words(); |
iveresov@808 | 216 | } |
iveresov@808 | 217 | |
duke@435 | 218 | // Check if the NUMA topology has changed. Add and remove spaces if needed. |
duke@435 | 219 | // The update can be forced by setting the force parameter equal to true. |
duke@435 | 220 | bool MutableNUMASpace::update_layout(bool force) { |
duke@435 | 221 | // Check if the topology had changed. |
duke@435 | 222 | bool changed = os::numa_topology_changed(); |
duke@435 | 223 | if (force || changed) { |
duke@435 | 224 | // Compute lgrp intersection. Add/remove spaces. |
duke@435 | 225 | int lgrp_limit = (int)os::numa_get_groups_num(); |
duke@435 | 226 | int *lgrp_ids = NEW_C_HEAP_ARRAY(int, lgrp_limit); |
duke@435 | 227 | int lgrp_num = (int)os::numa_get_leaf_groups(lgrp_ids, lgrp_limit); |
duke@435 | 228 | assert(lgrp_num > 0, "There should be at least one locality group"); |
duke@435 | 229 | // Add new spaces for the new nodes |
duke@435 | 230 | for (int i = 0; i < lgrp_num; i++) { |
duke@435 | 231 | bool found = false; |
duke@435 | 232 | for (int j = 0; j < lgrp_spaces()->length(); j++) { |
duke@435 | 233 | if (lgrp_spaces()->at(j)->lgrp_id() == lgrp_ids[i]) { |
duke@435 | 234 | found = true; |
duke@435 | 235 | break; |
duke@435 | 236 | } |
duke@435 | 237 | } |
duke@435 | 238 | if (!found) { |
iveresov@970 | 239 | lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment())); |
duke@435 | 240 | } |
duke@435 | 241 | } |
duke@435 | 242 | |
duke@435 | 243 | // Remove spaces for the removed nodes. |
duke@435 | 244 | for (int i = 0; i < lgrp_spaces()->length();) { |
duke@435 | 245 | bool found = false; |
duke@435 | 246 | for (int j = 0; j < lgrp_num; j++) { |
duke@435 | 247 | if (lgrp_spaces()->at(i)->lgrp_id() == lgrp_ids[j]) { |
duke@435 | 248 | found = true; |
duke@435 | 249 | break; |
duke@435 | 250 | } |
duke@435 | 251 | } |
duke@435 | 252 | if (!found) { |
duke@435 | 253 | delete lgrp_spaces()->at(i); |
duke@435 | 254 | lgrp_spaces()->remove_at(i); |
duke@435 | 255 | } else { |
duke@435 | 256 | i++; |
duke@435 | 257 | } |
duke@435 | 258 | } |
duke@435 | 259 | |
duke@435 | 260 | FREE_C_HEAP_ARRAY(int, lgrp_ids); |
duke@435 | 261 | |
duke@435 | 262 | if (changed) { |
duke@435 | 263 | for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { |
duke@435 | 264 | thread->set_lgrp_id(-1); |
duke@435 | 265 | } |
duke@435 | 266 | } |
duke@435 | 267 | return true; |
duke@435 | 268 | } |
duke@435 | 269 | return false; |
duke@435 | 270 | } |
duke@435 | 271 | |
duke@435 | 272 | // Bias region towards the first-touching lgrp. Set the right page sizes. |
iveresov@576 | 273 | void MutableNUMASpace::bias_region(MemRegion mr, int lgrp_id) { |
duke@435 | 274 | HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
duke@435 | 275 | HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
duke@435 | 276 | if (end > start) { |
duke@435 | 277 | MemRegion aligned_region(start, end); |
duke@435 | 278 | assert((intptr_t)aligned_region.start() % page_size() == 0 && |
duke@435 | 279 | (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
duke@435 | 280 | assert(region().contains(aligned_region), "Sanity"); |
iveresov@576 | 281 | // First we tell the OS which page size we want in the given range. The underlying |
iveresov@576 | 282 | // large page can be broken down if we require small pages. |
iveresov@576 | 283 | os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), page_size()); |
iveresov@576 | 284 | // Then we uncommit the pages in the range. |
duke@435 | 285 | os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); |
iveresov@576 | 286 | // And make them local/first-touch biased. |
iveresov@576 | 287 | os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), lgrp_id); |
duke@435 | 288 | } |
duke@435 | 289 | } |
duke@435 | 290 | |
duke@435 | 291 | // Free all pages in the region. |
duke@435 | 292 | void MutableNUMASpace::free_region(MemRegion mr) { |
duke@435 | 293 | HeapWord *start = (HeapWord*)round_to((intptr_t)mr.start(), page_size()); |
duke@435 | 294 | HeapWord *end = (HeapWord*)round_down((intptr_t)mr.end(), page_size()); |
duke@435 | 295 | if (end > start) { |
duke@435 | 296 | MemRegion aligned_region(start, end); |
duke@435 | 297 | assert((intptr_t)aligned_region.start() % page_size() == 0 && |
duke@435 | 298 | (intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment"); |
duke@435 | 299 | assert(region().contains(aligned_region), "Sanity"); |
duke@435 | 300 | os::free_memory((char*)aligned_region.start(), aligned_region.byte_size()); |
duke@435 | 301 | } |
duke@435 | 302 | } |
duke@435 | 303 | |
duke@435 | 304 | // Update space layout. Perform adaptation. |
duke@435 | 305 | void MutableNUMASpace::update() { |
duke@435 | 306 | if (update_layout(false)) { |
duke@435 | 307 | // If the topology has changed, make all chunks zero-sized. |
iveresov@703 | 308 | // And clear the alloc-rate statistics. |
iveresov@703 | 309 | // In future we may want to handle this more gracefully in order |
iveresov@703 | 310 | // to avoid the reallocation of the pages as much as possible. |
duke@435 | 311 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
iveresov@703 | 312 | LGRPSpace *ls = lgrp_spaces()->at(i); |
iveresov@703 | 313 | MutableSpace *s = ls->space(); |
duke@435 | 314 | s->set_end(s->bottom()); |
duke@435 | 315 | s->set_top(s->bottom()); |
iveresov@703 | 316 | ls->clear_alloc_rate(); |
duke@435 | 317 | } |
jmasa@698 | 318 | // A NUMA space is never mangled |
jmasa@698 | 319 | initialize(region(), |
jmasa@698 | 320 | SpaceDecorator::Clear, |
jmasa@698 | 321 | SpaceDecorator::DontMangle); |
duke@435 | 322 | } else { |
duke@435 | 323 | bool should_initialize = false; |
iveresov@576 | 324 | if (!os::numa_has_static_binding()) { |
iveresov@576 | 325 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
iveresov@576 | 326 | if (!lgrp_spaces()->at(i)->invalid_region().is_empty()) { |
iveresov@576 | 327 | should_initialize = true; |
iveresov@576 | 328 | break; |
iveresov@576 | 329 | } |
duke@435 | 330 | } |
duke@435 | 331 | } |
duke@435 | 332 | |
duke@435 | 333 | if (should_initialize || |
duke@435 | 334 | (UseAdaptiveNUMAChunkSizing && adaptation_cycles() < samples_count())) { |
jmasa@698 | 335 | // A NUMA space is never mangled |
jmasa@698 | 336 | initialize(region(), |
jmasa@698 | 337 | SpaceDecorator::Clear, |
jmasa@698 | 338 | SpaceDecorator::DontMangle); |
duke@435 | 339 | } |
duke@435 | 340 | } |
duke@435 | 341 | |
duke@435 | 342 | if (NUMAStats) { |
duke@435 | 343 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 344 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
duke@435 | 345 | } |
duke@435 | 346 | } |
duke@435 | 347 | |
duke@435 | 348 | scan_pages(NUMAPageScanRate); |
duke@435 | 349 | } |
duke@435 | 350 | |
duke@435 | 351 | // Scan pages. Free pages that have smaller size or wrong placement. |
duke@435 | 352 | void MutableNUMASpace::scan_pages(size_t page_count) |
duke@435 | 353 | { |
duke@435 | 354 | size_t pages_per_chunk = page_count / lgrp_spaces()->length(); |
duke@435 | 355 | if (pages_per_chunk > 0) { |
duke@435 | 356 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 357 | LGRPSpace *ls = lgrp_spaces()->at(i); |
duke@435 | 358 | ls->scan_pages(page_size(), pages_per_chunk); |
duke@435 | 359 | } |
duke@435 | 360 | } |
duke@435 | 361 | } |
duke@435 | 362 | |
duke@435 | 363 | // Accumulate statistics about the allocation rate of each lgrp. |
duke@435 | 364 | void MutableNUMASpace::accumulate_statistics() { |
duke@435 | 365 | if (UseAdaptiveNUMAChunkSizing) { |
duke@435 | 366 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 367 | lgrp_spaces()->at(i)->sample(); |
duke@435 | 368 | } |
duke@435 | 369 | increment_samples_count(); |
duke@435 | 370 | } |
duke@435 | 371 | |
duke@435 | 372 | if (NUMAStats) { |
duke@435 | 373 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 374 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
duke@435 | 375 | } |
duke@435 | 376 | } |
duke@435 | 377 | } |
duke@435 | 378 | |
duke@435 | 379 | // Get the current size of a chunk. |
duke@435 | 380 | // This function computes the size of the chunk based on the |
duke@435 | 381 | // difference between chunk ends. This allows it to work correctly in |
duke@435 | 382 | // case the whole space is resized and during the process of adaptive |
duke@435 | 383 | // chunk resizing. |
duke@435 | 384 | size_t MutableNUMASpace::current_chunk_size(int i) { |
duke@435 | 385 | HeapWord *cur_end, *prev_end; |
duke@435 | 386 | if (i == 0) { |
duke@435 | 387 | prev_end = bottom(); |
duke@435 | 388 | } else { |
duke@435 | 389 | prev_end = lgrp_spaces()->at(i - 1)->space()->end(); |
duke@435 | 390 | } |
duke@435 | 391 | if (i == lgrp_spaces()->length() - 1) { |
duke@435 | 392 | cur_end = end(); |
duke@435 | 393 | } else { |
duke@435 | 394 | cur_end = lgrp_spaces()->at(i)->space()->end(); |
duke@435 | 395 | } |
duke@435 | 396 | if (cur_end > prev_end) { |
duke@435 | 397 | return pointer_delta(cur_end, prev_end, sizeof(char)); |
duke@435 | 398 | } |
duke@435 | 399 | return 0; |
duke@435 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | // Return the default chunk size by equally diving the space. |
duke@435 | 403 | // page_size() aligned. |
duke@435 | 404 | size_t MutableNUMASpace::default_chunk_size() { |
duke@435 | 405 | return base_space_size() / lgrp_spaces()->length() * page_size(); |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | // Produce a new chunk size. page_size() aligned. |
iveresov@826 | 409 | // This function is expected to be called on sequence of i's from 0 to |
iveresov@826 | 410 | // lgrp_spaces()->length(). |
duke@435 | 411 | size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) { |
duke@435 | 412 | size_t pages_available = base_space_size(); |
duke@435 | 413 | for (int j = 0; j < i; j++) { |
duke@435 | 414 | pages_available -= round_down(current_chunk_size(j), page_size()) / page_size(); |
duke@435 | 415 | } |
duke@435 | 416 | pages_available -= lgrp_spaces()->length() - i - 1; |
duke@435 | 417 | assert(pages_available > 0, "No pages left"); |
duke@435 | 418 | float alloc_rate = 0; |
duke@435 | 419 | for (int j = i; j < lgrp_spaces()->length(); j++) { |
duke@435 | 420 | alloc_rate += lgrp_spaces()->at(j)->alloc_rate()->average(); |
duke@435 | 421 | } |
duke@435 | 422 | size_t chunk_size = 0; |
duke@435 | 423 | if (alloc_rate > 0) { |
duke@435 | 424 | LGRPSpace *ls = lgrp_spaces()->at(i); |
iveresov@826 | 425 | chunk_size = (size_t)(ls->alloc_rate()->average() / alloc_rate * pages_available) * page_size(); |
duke@435 | 426 | } |
duke@435 | 427 | chunk_size = MAX2(chunk_size, page_size()); |
duke@435 | 428 | |
duke@435 | 429 | if (limit > 0) { |
duke@435 | 430 | limit = round_down(limit, page_size()); |
duke@435 | 431 | if (chunk_size > current_chunk_size(i)) { |
iveresov@897 | 432 | size_t upper_bound = pages_available * page_size(); |
iveresov@897 | 433 | if (upper_bound > limit && |
iveresov@897 | 434 | current_chunk_size(i) < upper_bound - limit) { |
iveresov@897 | 435 | // The resulting upper bound should not exceed the available |
iveresov@897 | 436 | // amount of memory (pages_available * page_size()). |
iveresov@897 | 437 | upper_bound = current_chunk_size(i) + limit; |
iveresov@897 | 438 | } |
iveresov@897 | 439 | chunk_size = MIN2(chunk_size, upper_bound); |
duke@435 | 440 | } else { |
iveresov@897 | 441 | size_t lower_bound = page_size(); |
iveresov@897 | 442 | if (current_chunk_size(i) > limit) { // lower_bound shouldn't underflow. |
iveresov@897 | 443 | lower_bound = current_chunk_size(i) - limit; |
iveresov@897 | 444 | } |
iveresov@897 | 445 | chunk_size = MAX2(chunk_size, lower_bound); |
duke@435 | 446 | } |
duke@435 | 447 | } |
duke@435 | 448 | assert(chunk_size <= pages_available * page_size(), "Chunk size out of range"); |
duke@435 | 449 | return chunk_size; |
duke@435 | 450 | } |
duke@435 | 451 | |
duke@435 | 452 | |
duke@435 | 453 | // Return the bottom_region and the top_region. Align them to page_size() boundary. |
duke@435 | 454 | // |------------------new_region---------------------------------| |
duke@435 | 455 | // |----bottom_region--|---intersection---|------top_region------| |
duke@435 | 456 | void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection, |
duke@435 | 457 | MemRegion* bottom_region, MemRegion *top_region) { |
duke@435 | 458 | // Is there bottom? |
duke@435 | 459 | if (new_region.start() < intersection.start()) { // Yes |
duke@435 | 460 | // Try to coalesce small pages into a large one. |
iveresov@970 | 461 | if (UseLargePages && page_size() >= alignment()) { |
iveresov@970 | 462 | HeapWord* p = (HeapWord*)round_to((intptr_t) intersection.start(), alignment()); |
duke@435 | 463 | if (new_region.contains(p) |
iveresov@970 | 464 | && pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) { |
duke@435 | 465 | if (intersection.contains(p)) { |
duke@435 | 466 | intersection = MemRegion(p, intersection.end()); |
duke@435 | 467 | } else { |
duke@435 | 468 | intersection = MemRegion(p, p); |
duke@435 | 469 | } |
duke@435 | 470 | } |
duke@435 | 471 | } |
duke@435 | 472 | *bottom_region = MemRegion(new_region.start(), intersection.start()); |
duke@435 | 473 | } else { |
duke@435 | 474 | *bottom_region = MemRegion(); |
duke@435 | 475 | } |
duke@435 | 476 | |
duke@435 | 477 | // Is there top? |
duke@435 | 478 | if (intersection.end() < new_region.end()) { // Yes |
duke@435 | 479 | // Try to coalesce small pages into a large one. |
iveresov@970 | 480 | if (UseLargePages && page_size() >= alignment()) { |
iveresov@970 | 481 | HeapWord* p = (HeapWord*)round_down((intptr_t) intersection.end(), alignment()); |
duke@435 | 482 | if (new_region.contains(p) |
iveresov@970 | 483 | && pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) { |
duke@435 | 484 | if (intersection.contains(p)) { |
duke@435 | 485 | intersection = MemRegion(intersection.start(), p); |
duke@435 | 486 | } else { |
duke@435 | 487 | intersection = MemRegion(p, p); |
duke@435 | 488 | } |
duke@435 | 489 | } |
duke@435 | 490 | } |
duke@435 | 491 | *top_region = MemRegion(intersection.end(), new_region.end()); |
duke@435 | 492 | } else { |
duke@435 | 493 | *top_region = MemRegion(); |
duke@435 | 494 | } |
duke@435 | 495 | } |
duke@435 | 496 | |
duke@435 | 497 | // Try to merge the invalid region with the bottom or top region by decreasing |
duke@435 | 498 | // the intersection area. Return the invalid_region aligned to the page_size() |
duke@435 | 499 | // boundary if it's inside the intersection. Return non-empty invalid_region |
duke@435 | 500 | // if it lies inside the intersection (also page-aligned). |
duke@435 | 501 | // |------------------new_region---------------------------------| |
duke@435 | 502 | // |----------------|-------invalid---|--------------------------| |
duke@435 | 503 | // |----bottom_region--|---intersection---|------top_region------| |
duke@435 | 504 | void MutableNUMASpace::merge_regions(MemRegion new_region, MemRegion* intersection, |
duke@435 | 505 | MemRegion *invalid_region) { |
duke@435 | 506 | if (intersection->start() >= invalid_region->start() && intersection->contains(invalid_region->end())) { |
duke@435 | 507 | *intersection = MemRegion(invalid_region->end(), intersection->end()); |
duke@435 | 508 | *invalid_region = MemRegion(); |
duke@435 | 509 | } else |
duke@435 | 510 | if (intersection->end() <= invalid_region->end() && intersection->contains(invalid_region->start())) { |
duke@435 | 511 | *intersection = MemRegion(intersection->start(), invalid_region->start()); |
duke@435 | 512 | *invalid_region = MemRegion(); |
duke@435 | 513 | } else |
duke@435 | 514 | if (intersection->equals(*invalid_region) || invalid_region->contains(*intersection)) { |
duke@435 | 515 | *intersection = MemRegion(new_region.start(), new_region.start()); |
duke@435 | 516 | *invalid_region = MemRegion(); |
duke@435 | 517 | } else |
duke@435 | 518 | if (intersection->contains(invalid_region)) { |
duke@435 | 519 | // That's the only case we have to make an additional bias_region() call. |
duke@435 | 520 | HeapWord* start = invalid_region->start(); |
duke@435 | 521 | HeapWord* end = invalid_region->end(); |
iveresov@970 | 522 | if (UseLargePages && page_size() >= alignment()) { |
iveresov@970 | 523 | HeapWord *p = (HeapWord*)round_down((intptr_t) start, alignment()); |
duke@435 | 524 | if (new_region.contains(p)) { |
duke@435 | 525 | start = p; |
duke@435 | 526 | } |
iveresov@970 | 527 | p = (HeapWord*)round_to((intptr_t) end, alignment()); |
duke@435 | 528 | if (new_region.contains(end)) { |
duke@435 | 529 | end = p; |
duke@435 | 530 | } |
duke@435 | 531 | } |
duke@435 | 532 | if (intersection->start() > start) { |
duke@435 | 533 | *intersection = MemRegion(start, intersection->end()); |
duke@435 | 534 | } |
duke@435 | 535 | if (intersection->end() < end) { |
duke@435 | 536 | *intersection = MemRegion(intersection->start(), end); |
duke@435 | 537 | } |
duke@435 | 538 | *invalid_region = MemRegion(start, end); |
duke@435 | 539 | } |
duke@435 | 540 | } |
duke@435 | 541 | |
jmasa@698 | 542 | void MutableNUMASpace::initialize(MemRegion mr, |
jmasa@698 | 543 | bool clear_space, |
iveresov@970 | 544 | bool mangle_space, |
iveresov@970 | 545 | bool setup_pages) { |
duke@435 | 546 | assert(clear_space, "Reallocation will destory data!"); |
duke@435 | 547 | assert(lgrp_spaces()->length() > 0, "There should be at least one space"); |
duke@435 | 548 | |
duke@435 | 549 | MemRegion old_region = region(), new_region; |
duke@435 | 550 | set_bottom(mr.start()); |
duke@435 | 551 | set_end(mr.end()); |
jmasa@698 | 552 | // Must always clear the space |
jmasa@698 | 553 | clear(SpaceDecorator::DontMangle); |
duke@435 | 554 | |
duke@435 | 555 | // Compute chunk sizes |
duke@435 | 556 | size_t prev_page_size = page_size(); |
iveresov@970 | 557 | set_page_size(UseLargePages ? alignment() : os::vm_page_size()); |
duke@435 | 558 | HeapWord* rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
duke@435 | 559 | HeapWord* rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
duke@435 | 560 | size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
duke@435 | 561 | |
duke@435 | 562 | // Try small pages if the chunk size is too small |
duke@435 | 563 | if (base_space_size_pages / lgrp_spaces()->length() == 0 |
duke@435 | 564 | && page_size() > (size_t)os::vm_page_size()) { |
duke@435 | 565 | set_page_size(os::vm_page_size()); |
duke@435 | 566 | rounded_bottom = (HeapWord*)round_to((intptr_t) bottom(), page_size()); |
duke@435 | 567 | rounded_end = (HeapWord*)round_down((intptr_t) end(), page_size()); |
duke@435 | 568 | base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size(); |
duke@435 | 569 | } |
duke@435 | 570 | guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small"); |
duke@435 | 571 | set_base_space_size(base_space_size_pages); |
duke@435 | 572 | |
duke@435 | 573 | // Handle space resize |
duke@435 | 574 | MemRegion top_region, bottom_region; |
duke@435 | 575 | if (!old_region.equals(region())) { |
duke@435 | 576 | new_region = MemRegion(rounded_bottom, rounded_end); |
duke@435 | 577 | MemRegion intersection = new_region.intersection(old_region); |
duke@435 | 578 | if (intersection.start() == NULL || |
duke@435 | 579 | intersection.end() == NULL || |
duke@435 | 580 | prev_page_size > page_size()) { // If the page size got smaller we have to change |
duke@435 | 581 | // the page size preference for the whole space. |
duke@435 | 582 | intersection = MemRegion(new_region.start(), new_region.start()); |
duke@435 | 583 | } |
duke@435 | 584 | select_tails(new_region, intersection, &bottom_region, &top_region); |
iveresov@576 | 585 | bias_region(bottom_region, lgrp_spaces()->at(0)->lgrp_id()); |
iveresov@576 | 586 | bias_region(top_region, lgrp_spaces()->at(lgrp_spaces()->length() - 1)->lgrp_id()); |
duke@435 | 587 | } |
duke@435 | 588 | |
duke@435 | 589 | // Check if the space layout has changed significantly? |
duke@435 | 590 | // This happens when the space has been resized so that either head or tail |
duke@435 | 591 | // chunk became less than a page. |
duke@435 | 592 | bool layout_valid = UseAdaptiveNUMAChunkSizing && |
duke@435 | 593 | current_chunk_size(0) > page_size() && |
duke@435 | 594 | current_chunk_size(lgrp_spaces()->length() - 1) > page_size(); |
duke@435 | 595 | |
duke@435 | 596 | |
duke@435 | 597 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 598 | LGRPSpace *ls = lgrp_spaces()->at(i); |
duke@435 | 599 | MutableSpace *s = ls->space(); |
duke@435 | 600 | old_region = s->region(); |
duke@435 | 601 | |
duke@435 | 602 | size_t chunk_byte_size = 0, old_chunk_byte_size = 0; |
duke@435 | 603 | if (i < lgrp_spaces()->length() - 1) { |
duke@435 | 604 | if (!UseAdaptiveNUMAChunkSizing || |
duke@435 | 605 | (UseAdaptiveNUMAChunkSizing && NUMAChunkResizeWeight == 0) || |
duke@435 | 606 | samples_count() < AdaptiveSizePolicyReadyThreshold) { |
duke@435 | 607 | // No adaptation. Divide the space equally. |
duke@435 | 608 | chunk_byte_size = default_chunk_size(); |
duke@435 | 609 | } else |
duke@435 | 610 | if (!layout_valid || NUMASpaceResizeRate == 0) { |
duke@435 | 611 | // Fast adaptation. If no space resize rate is set, resize |
duke@435 | 612 | // the chunks instantly. |
duke@435 | 613 | chunk_byte_size = adaptive_chunk_size(i, 0); |
duke@435 | 614 | } else { |
duke@435 | 615 | // Slow adaptation. Resize the chunks moving no more than |
duke@435 | 616 | // NUMASpaceResizeRate bytes per collection. |
duke@435 | 617 | size_t limit = NUMASpaceResizeRate / |
duke@435 | 618 | (lgrp_spaces()->length() * (lgrp_spaces()->length() + 1) / 2); |
duke@435 | 619 | chunk_byte_size = adaptive_chunk_size(i, MAX2(limit * (i + 1), page_size())); |
duke@435 | 620 | } |
duke@435 | 621 | |
duke@435 | 622 | assert(chunk_byte_size >= page_size(), "Chunk size too small"); |
duke@435 | 623 | assert(chunk_byte_size <= capacity_in_bytes(), "Sanity check"); |
duke@435 | 624 | } |
duke@435 | 625 | |
duke@435 | 626 | if (i == 0) { // Bottom chunk |
duke@435 | 627 | if (i != lgrp_spaces()->length() - 1) { |
duke@435 | 628 | new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize)); |
duke@435 | 629 | } else { |
duke@435 | 630 | new_region = MemRegion(bottom(), end()); |
duke@435 | 631 | } |
duke@435 | 632 | } else |
duke@435 | 633 | if (i < lgrp_spaces()->length() - 1) { // Middle chunks |
duke@435 | 634 | MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
duke@435 | 635 | new_region = MemRegion(ps->end(), |
duke@435 | 636 | ps->end() + (chunk_byte_size >> LogHeapWordSize)); |
duke@435 | 637 | } else { // Top chunk |
duke@435 | 638 | MutableSpace *ps = lgrp_spaces()->at(i - 1)->space(); |
duke@435 | 639 | new_region = MemRegion(ps->end(), end()); |
duke@435 | 640 | } |
duke@435 | 641 | guarantee(region().contains(new_region), "Region invariant"); |
duke@435 | 642 | |
duke@435 | 643 | |
duke@435 | 644 | // The general case: |
duke@435 | 645 | // |---------------------|--invalid---|--------------------------| |
duke@435 | 646 | // |------------------new_region---------------------------------| |
duke@435 | 647 | // |----bottom_region--|---intersection---|------top_region------| |
duke@435 | 648 | // |----old_region----| |
duke@435 | 649 | // The intersection part has all pages in place we don't need to migrate them. |
duke@435 | 650 | // Pages for the top and bottom part should be freed and then reallocated. |
duke@435 | 651 | |
duke@435 | 652 | MemRegion intersection = old_region.intersection(new_region); |
duke@435 | 653 | |
duke@435 | 654 | if (intersection.start() == NULL || intersection.end() == NULL) { |
duke@435 | 655 | intersection = MemRegion(new_region.start(), new_region.start()); |
duke@435 | 656 | } |
duke@435 | 657 | |
iveresov@576 | 658 | if (!os::numa_has_static_binding()) { |
iveresov@576 | 659 | MemRegion invalid_region = ls->invalid_region().intersection(new_region); |
iveresov@576 | 660 | // Invalid region is a range of memory that could've possibly |
iveresov@576 | 661 | // been allocated on the other node. That's relevant only on Solaris where |
iveresov@576 | 662 | // there is no static memory binding. |
iveresov@576 | 663 | if (!invalid_region.is_empty()) { |
iveresov@576 | 664 | merge_regions(new_region, &intersection, &invalid_region); |
iveresov@576 | 665 | free_region(invalid_region); |
iveresov@576 | 666 | ls->set_invalid_region(MemRegion()); |
iveresov@576 | 667 | } |
duke@435 | 668 | } |
iveresov@576 | 669 | |
duke@435 | 670 | select_tails(new_region, intersection, &bottom_region, &top_region); |
iveresov@576 | 671 | |
iveresov@576 | 672 | if (!os::numa_has_static_binding()) { |
iveresov@576 | 673 | // If that's a system with the first-touch policy then it's enough |
iveresov@576 | 674 | // to free the pages. |
iveresov@576 | 675 | free_region(bottom_region); |
iveresov@576 | 676 | free_region(top_region); |
iveresov@576 | 677 | } else { |
iveresov@576 | 678 | // In a system with static binding we have to change the bias whenever |
iveresov@576 | 679 | // we reshape the heap. |
iveresov@576 | 680 | bias_region(bottom_region, ls->lgrp_id()); |
iveresov@576 | 681 | bias_region(top_region, ls->lgrp_id()); |
iveresov@576 | 682 | } |
duke@435 | 683 | |
jmasa@698 | 684 | // Clear space (set top = bottom) but never mangle. |
iveresov@970 | 685 | s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages); |
duke@435 | 686 | |
duke@435 | 687 | set_adaptation_cycles(samples_count()); |
duke@435 | 688 | } |
duke@435 | 689 | } |
duke@435 | 690 | |
duke@435 | 691 | // Set the top of the whole space. |
duke@435 | 692 | // Mark the the holes in chunks below the top() as invalid. |
duke@435 | 693 | void MutableNUMASpace::set_top(HeapWord* value) { |
duke@435 | 694 | bool found_top = false; |
iveresov@625 | 695 | for (int i = 0; i < lgrp_spaces()->length();) { |
duke@435 | 696 | LGRPSpace *ls = lgrp_spaces()->at(i); |
duke@435 | 697 | MutableSpace *s = ls->space(); |
duke@435 | 698 | HeapWord *top = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); |
duke@435 | 699 | |
duke@435 | 700 | if (s->contains(value)) { |
iveresov@625 | 701 | // Check if setting the chunk's top to a given value would create a hole less than |
iveresov@625 | 702 | // a minimal object; assuming that's not the last chunk in which case we don't care. |
iveresov@625 | 703 | if (i < lgrp_spaces()->length() - 1) { |
iveresov@625 | 704 | size_t remainder = pointer_delta(s->end(), value); |
jcoomes@916 | 705 | const size_t min_fill_size = CollectedHeap::min_fill_size(); |
jcoomes@916 | 706 | if (remainder < min_fill_size && remainder > 0) { |
jcoomes@916 | 707 | // Add a minimum size filler object; it will cross the chunk boundary. |
jcoomes@916 | 708 | CollectedHeap::fill_with_object(value, min_fill_size); |
jcoomes@916 | 709 | value += min_fill_size; |
iveresov@625 | 710 | assert(!s->contains(value), "Should be in the next chunk"); |
iveresov@625 | 711 | // Restart the loop from the same chunk, since the value has moved |
iveresov@625 | 712 | // to the next one. |
iveresov@625 | 713 | continue; |
iveresov@625 | 714 | } |
iveresov@625 | 715 | } |
iveresov@625 | 716 | |
iveresov@576 | 717 | if (!os::numa_has_static_binding() && top < value && top < s->end()) { |
duke@435 | 718 | ls->add_invalid_region(MemRegion(top, value)); |
duke@435 | 719 | } |
duke@435 | 720 | s->set_top(value); |
duke@435 | 721 | found_top = true; |
duke@435 | 722 | } else { |
duke@435 | 723 | if (found_top) { |
duke@435 | 724 | s->set_top(s->bottom()); |
duke@435 | 725 | } else { |
iveresov@576 | 726 | if (!os::numa_has_static_binding() && top < s->end()) { |
iveresov@576 | 727 | ls->add_invalid_region(MemRegion(top, s->end())); |
iveresov@576 | 728 | } |
iveresov@576 | 729 | s->set_top(s->end()); |
duke@435 | 730 | } |
duke@435 | 731 | } |
iveresov@625 | 732 | i++; |
duke@435 | 733 | } |
duke@435 | 734 | MutableSpace::set_top(value); |
duke@435 | 735 | } |
duke@435 | 736 | |
jmasa@698 | 737 | void MutableNUMASpace::clear(bool mangle_space) { |
duke@435 | 738 | MutableSpace::set_top(bottom()); |
duke@435 | 739 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
jmasa@698 | 740 | // Never mangle NUMA spaces because the mangling will |
jmasa@698 | 741 | // bind the memory to a possibly unwanted lgroup. |
jmasa@698 | 742 | lgrp_spaces()->at(i)->space()->clear(SpaceDecorator::DontMangle); |
duke@435 | 743 | } |
duke@435 | 744 | } |
duke@435 | 745 | |
iveresov@576 | 746 | /* |
iveresov@576 | 747 | Linux supports static memory binding, therefore the most part of the |
iveresov@576 | 748 | logic dealing with the possible invalid page allocation is effectively |
iveresov@576 | 749 | disabled. Besides there is no notion of the home node in Linux. A |
iveresov@576 | 750 | thread is allowed to migrate freely. Although the scheduler is rather |
iveresov@576 | 751 | reluctant to move threads between the nodes. We check for the current |
iveresov@576 | 752 | node every allocation. And with a high probability a thread stays on |
iveresov@576 | 753 | the same node for some time allowing local access to recently allocated |
iveresov@576 | 754 | objects. |
iveresov@576 | 755 | */ |
iveresov@576 | 756 | |
duke@435 | 757 | HeapWord* MutableNUMASpace::allocate(size_t size) { |
iveresov@576 | 758 | Thread* thr = Thread::current(); |
iveresov@576 | 759 | int lgrp_id = thr->lgrp_id(); |
iveresov@576 | 760 | if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
duke@435 | 761 | lgrp_id = os::numa_get_group_id(); |
iveresov@576 | 762 | thr->set_lgrp_id(lgrp_id); |
duke@435 | 763 | } |
duke@435 | 764 | |
duke@435 | 765 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
duke@435 | 766 | |
duke@435 | 767 | // It is possible that a new CPU has been hotplugged and |
duke@435 | 768 | // we haven't reshaped the space accordingly. |
duke@435 | 769 | if (i == -1) { |
duke@435 | 770 | i = os::random() % lgrp_spaces()->length(); |
duke@435 | 771 | } |
duke@435 | 772 | |
iveresov@808 | 773 | LGRPSpace* ls = lgrp_spaces()->at(i); |
iveresov@808 | 774 | MutableSpace *s = ls->space(); |
duke@435 | 775 | HeapWord *p = s->allocate(size); |
duke@435 | 776 | |
iveresov@579 | 777 | if (p != NULL) { |
iveresov@579 | 778 | size_t remainder = s->free_in_words(); |
kvn@1926 | 779 | if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
iveresov@579 | 780 | s->set_top(s->top() - size); |
iveresov@579 | 781 | p = NULL; |
iveresov@579 | 782 | } |
duke@435 | 783 | } |
duke@435 | 784 | if (p != NULL) { |
duke@435 | 785 | if (top() < s->top()) { // Keep _top updated. |
duke@435 | 786 | MutableSpace::set_top(s->top()); |
duke@435 | 787 | } |
duke@435 | 788 | } |
iveresov@576 | 789 | // Make the page allocation happen here if there is no static binding.. |
iveresov@576 | 790 | if (p != NULL && !os::numa_has_static_binding()) { |
duke@435 | 791 | for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
duke@435 | 792 | *(int*)i = 0; |
duke@435 | 793 | } |
duke@435 | 794 | } |
iveresov@808 | 795 | if (p == NULL) { |
iveresov@808 | 796 | ls->set_allocation_failed(); |
iveresov@808 | 797 | } |
duke@435 | 798 | return p; |
duke@435 | 799 | } |
duke@435 | 800 | |
duke@435 | 801 | // This version is lock-free. |
duke@435 | 802 | HeapWord* MutableNUMASpace::cas_allocate(size_t size) { |
iveresov@576 | 803 | Thread* thr = Thread::current(); |
iveresov@576 | 804 | int lgrp_id = thr->lgrp_id(); |
iveresov@576 | 805 | if (lgrp_id == -1 || !os::numa_has_group_homing()) { |
duke@435 | 806 | lgrp_id = os::numa_get_group_id(); |
iveresov@576 | 807 | thr->set_lgrp_id(lgrp_id); |
duke@435 | 808 | } |
duke@435 | 809 | |
duke@435 | 810 | int i = lgrp_spaces()->find(&lgrp_id, LGRPSpace::equals); |
duke@435 | 811 | // It is possible that a new CPU has been hotplugged and |
duke@435 | 812 | // we haven't reshaped the space accordingly. |
duke@435 | 813 | if (i == -1) { |
duke@435 | 814 | i = os::random() % lgrp_spaces()->length(); |
duke@435 | 815 | } |
iveresov@808 | 816 | LGRPSpace *ls = lgrp_spaces()->at(i); |
iveresov@808 | 817 | MutableSpace *s = ls->space(); |
duke@435 | 818 | HeapWord *p = s->cas_allocate(size); |
iveresov@579 | 819 | if (p != NULL) { |
iveresov@625 | 820 | size_t remainder = pointer_delta(s->end(), p + size); |
kvn@1926 | 821 | if (remainder < CollectedHeap::min_fill_size() && remainder > 0) { |
iveresov@579 | 822 | if (s->cas_deallocate(p, size)) { |
iveresov@579 | 823 | // We were the last to allocate and created a fragment less than |
iveresov@579 | 824 | // a minimal object. |
iveresov@579 | 825 | p = NULL; |
iveresov@625 | 826 | } else { |
iveresov@625 | 827 | guarantee(false, "Deallocation should always succeed"); |
iveresov@579 | 828 | } |
duke@435 | 829 | } |
duke@435 | 830 | } |
duke@435 | 831 | if (p != NULL) { |
duke@435 | 832 | HeapWord* cur_top, *cur_chunk_top = p + size; |
duke@435 | 833 | while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated. |
duke@435 | 834 | if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) { |
duke@435 | 835 | break; |
duke@435 | 836 | } |
duke@435 | 837 | } |
duke@435 | 838 | } |
duke@435 | 839 | |
iveresov@576 | 840 | // Make the page allocation happen here if there is no static binding. |
iveresov@576 | 841 | if (p != NULL && !os::numa_has_static_binding() ) { |
duke@435 | 842 | for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) { |
duke@435 | 843 | *(int*)i = 0; |
duke@435 | 844 | } |
duke@435 | 845 | } |
iveresov@808 | 846 | if (p == NULL) { |
iveresov@808 | 847 | ls->set_allocation_failed(); |
iveresov@808 | 848 | } |
duke@435 | 849 | return p; |
duke@435 | 850 | } |
duke@435 | 851 | |
duke@435 | 852 | void MutableNUMASpace::print_short_on(outputStream* st) const { |
duke@435 | 853 | MutableSpace::print_short_on(st); |
duke@435 | 854 | st->print(" ("); |
duke@435 | 855 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 856 | st->print("lgrp %d: ", lgrp_spaces()->at(i)->lgrp_id()); |
duke@435 | 857 | lgrp_spaces()->at(i)->space()->print_short_on(st); |
duke@435 | 858 | if (i < lgrp_spaces()->length() - 1) { |
duke@435 | 859 | st->print(", "); |
duke@435 | 860 | } |
duke@435 | 861 | } |
duke@435 | 862 | st->print(")"); |
duke@435 | 863 | } |
duke@435 | 864 | |
duke@435 | 865 | void MutableNUMASpace::print_on(outputStream* st) const { |
duke@435 | 866 | MutableSpace::print_on(st); |
duke@435 | 867 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
duke@435 | 868 | LGRPSpace *ls = lgrp_spaces()->at(i); |
duke@435 | 869 | st->print(" lgrp %d", ls->lgrp_id()); |
duke@435 | 870 | ls->space()->print_on(st); |
duke@435 | 871 | if (NUMAStats) { |
iveresov@579 | 872 | for (int i = 0; i < lgrp_spaces()->length(); i++) { |
iveresov@579 | 873 | lgrp_spaces()->at(i)->accumulate_statistics(page_size()); |
iveresov@579 | 874 | } |
duke@435 | 875 | st->print(" local/remote/unbiased/uncommitted: %dK/%dK/%dK/%dK, large/small pages: %d/%d\n", |
duke@435 | 876 | ls->space_stats()->_local_space / K, |
duke@435 | 877 | ls->space_stats()->_remote_space / K, |
duke@435 | 878 | ls->space_stats()->_unbiased_space / K, |
duke@435 | 879 | ls->space_stats()->_uncommited_space / K, |
duke@435 | 880 | ls->space_stats()->_large_pages, |
duke@435 | 881 | ls->space_stats()->_small_pages); |
duke@435 | 882 | } |
duke@435 | 883 | } |
duke@435 | 884 | } |
duke@435 | 885 | |
iveresov@625 | 886 | void MutableNUMASpace::verify(bool allow_dirty) { |
iveresov@625 | 887 | // This can be called after setting an arbitary value to the space's top, |
iveresov@625 | 888 | // so an object can cross the chunk boundary. We ensure the parsablity |
iveresov@625 | 889 | // of the space and just walk the objects in linear fashion. |
iveresov@625 | 890 | ensure_parsability(); |
iveresov@625 | 891 | MutableSpace::verify(allow_dirty); |
duke@435 | 892 | } |
duke@435 | 893 | |
duke@435 | 894 | // Scan pages and gather stats about page placement and size. |
duke@435 | 895 | void MutableNUMASpace::LGRPSpace::accumulate_statistics(size_t page_size) { |
duke@435 | 896 | clear_space_stats(); |
duke@435 | 897 | char *start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
duke@435 | 898 | char* end = (char*)round_down((intptr_t) space()->end(), page_size); |
duke@435 | 899 | if (start < end) { |
duke@435 | 900 | for (char *p = start; p < end;) { |
duke@435 | 901 | os::page_info info; |
duke@435 | 902 | if (os::get_page_info(p, &info)) { |
duke@435 | 903 | if (info.size > 0) { |
duke@435 | 904 | if (info.size > (size_t)os::vm_page_size()) { |
duke@435 | 905 | space_stats()->_large_pages++; |
duke@435 | 906 | } else { |
duke@435 | 907 | space_stats()->_small_pages++; |
duke@435 | 908 | } |
duke@435 | 909 | if (info.lgrp_id == lgrp_id()) { |
duke@435 | 910 | space_stats()->_local_space += info.size; |
duke@435 | 911 | } else { |
duke@435 | 912 | space_stats()->_remote_space += info.size; |
duke@435 | 913 | } |
duke@435 | 914 | p += info.size; |
duke@435 | 915 | } else { |
duke@435 | 916 | p += os::vm_page_size(); |
duke@435 | 917 | space_stats()->_uncommited_space += os::vm_page_size(); |
duke@435 | 918 | } |
duke@435 | 919 | } else { |
duke@435 | 920 | return; |
duke@435 | 921 | } |
duke@435 | 922 | } |
duke@435 | 923 | } |
duke@435 | 924 | space_stats()->_unbiased_space = pointer_delta(start, space()->bottom(), sizeof(char)) + |
duke@435 | 925 | pointer_delta(space()->end(), end, sizeof(char)); |
duke@435 | 926 | |
duke@435 | 927 | } |
duke@435 | 928 | |
duke@435 | 929 | // Scan page_count pages and verify if they have the right size and right placement. |
duke@435 | 930 | // If invalid pages are found they are freed in hope that subsequent reallocation |
duke@435 | 931 | // will be more successful. |
duke@435 | 932 | void MutableNUMASpace::LGRPSpace::scan_pages(size_t page_size, size_t page_count) |
duke@435 | 933 | { |
duke@435 | 934 | char* range_start = (char*)round_to((intptr_t) space()->bottom(), page_size); |
duke@435 | 935 | char* range_end = (char*)round_down((intptr_t) space()->end(), page_size); |
duke@435 | 936 | |
duke@435 | 937 | if (range_start > last_page_scanned() || last_page_scanned() >= range_end) { |
duke@435 | 938 | set_last_page_scanned(range_start); |
duke@435 | 939 | } |
duke@435 | 940 | |
duke@435 | 941 | char *scan_start = last_page_scanned(); |
duke@435 | 942 | char* scan_end = MIN2(scan_start + page_size * page_count, range_end); |
duke@435 | 943 | |
duke@435 | 944 | os::page_info page_expected, page_found; |
duke@435 | 945 | page_expected.size = page_size; |
duke@435 | 946 | page_expected.lgrp_id = lgrp_id(); |
duke@435 | 947 | |
duke@435 | 948 | char *s = scan_start; |
duke@435 | 949 | while (s < scan_end) { |
duke@435 | 950 | char *e = os::scan_pages(s, (char*)scan_end, &page_expected, &page_found); |
duke@435 | 951 | if (e == NULL) { |
duke@435 | 952 | break; |
duke@435 | 953 | } |
duke@435 | 954 | if (e != scan_end) { |
duke@435 | 955 | if ((page_expected.size != page_size || page_expected.lgrp_id != lgrp_id()) |
duke@435 | 956 | && page_expected.size != 0) { |
duke@435 | 957 | os::free_memory(s, pointer_delta(e, s, sizeof(char))); |
duke@435 | 958 | } |
duke@435 | 959 | page_expected = page_found; |
duke@435 | 960 | } |
duke@435 | 961 | s = e; |
duke@435 | 962 | } |
duke@435 | 963 | |
duke@435 | 964 | set_last_page_scanned(scan_end); |
duke@435 | 965 | } |