Sat, 27 Sep 2008 00:33:13 -0700
6740923: NUMA allocator: Ensure the progress of adaptive chunk resizing
Summary: Treat a chuck where the allocation has failed as fully used.
Reviewed-by: ysr
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_mutableSpace.cpp.incl" |
duke@435 | 27 | |
jmasa@698 | 28 | MutableSpace::MutableSpace(): ImmutableSpace(), _top(NULL) { |
jmasa@698 | 29 | _mangler = new MutableSpaceMangler(this); |
jmasa@698 | 30 | } |
jmasa@698 | 31 | |
jmasa@698 | 32 | MutableSpace::~MutableSpace() { |
jmasa@698 | 33 | delete _mangler; |
jmasa@698 | 34 | } |
jmasa@698 | 35 | |
jmasa@698 | 36 | void MutableSpace::initialize(MemRegion mr, |
jmasa@698 | 37 | bool clear_space, |
jmasa@698 | 38 | bool mangle_space) { |
duke@435 | 39 | HeapWord* bottom = mr.start(); |
duke@435 | 40 | HeapWord* end = mr.end(); |
duke@435 | 41 | |
duke@435 | 42 | assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), |
duke@435 | 43 | "invalid space boundaries"); |
duke@435 | 44 | set_bottom(bottom); |
duke@435 | 45 | set_end(end); |
duke@435 | 46 | |
jmasa@698 | 47 | if (clear_space) { |
jmasa@698 | 48 | clear(mangle_space); |
jmasa@698 | 49 | } |
duke@435 | 50 | } |
duke@435 | 51 | |
jmasa@698 | 52 | void MutableSpace::clear(bool mangle_space) { |
duke@435 | 53 | set_top(bottom()); |
jmasa@698 | 54 | if (ZapUnusedHeapArea && mangle_space) { |
jmasa@698 | 55 | mangle_unused_area(); |
jmasa@698 | 56 | } |
duke@435 | 57 | } |
duke@435 | 58 | |
jmasa@698 | 59 | #ifndef PRODUCT |
jmasa@698 | 60 | void MutableSpace::check_mangled_unused_area(HeapWord* limit) { |
jmasa@698 | 61 | mangler()->check_mangled_unused_area(limit); |
jmasa@698 | 62 | } |
jmasa@698 | 63 | |
jmasa@698 | 64 | void MutableSpace::check_mangled_unused_area_complete() { |
jmasa@698 | 65 | mangler()->check_mangled_unused_area_complete(); |
jmasa@698 | 66 | } |
jmasa@698 | 67 | |
jmasa@698 | 68 | // Mangle only the unused space that has not previously |
jmasa@698 | 69 | // been mangled and that has not been allocated since being |
jmasa@698 | 70 | // mangled. |
jmasa@698 | 71 | void MutableSpace::mangle_unused_area() { |
jmasa@698 | 72 | mangler()->mangle_unused_area(); |
jmasa@698 | 73 | } |
jmasa@698 | 74 | |
jmasa@698 | 75 | void MutableSpace::mangle_unused_area_complete() { |
jmasa@698 | 76 | mangler()->mangle_unused_area_complete(); |
jmasa@698 | 77 | } |
jmasa@698 | 78 | |
jmasa@698 | 79 | void MutableSpace::mangle_region(MemRegion mr) { |
jmasa@698 | 80 | SpaceMangler::mangle_region(mr); |
jmasa@698 | 81 | } |
jmasa@698 | 82 | |
jmasa@698 | 83 | void MutableSpace::set_top_for_allocations(HeapWord* v) { |
jmasa@698 | 84 | mangler()->set_top_for_allocations(v); |
jmasa@698 | 85 | } |
jmasa@698 | 86 | |
jmasa@698 | 87 | void MutableSpace::set_top_for_allocations() { |
jmasa@698 | 88 | mangler()->set_top_for_allocations(top()); |
jmasa@698 | 89 | } |
jmasa@698 | 90 | #endif |
jmasa@698 | 91 | |
duke@435 | 92 | // This version requires locking. */ |
duke@435 | 93 | HeapWord* MutableSpace::allocate(size_t size) { |
duke@435 | 94 | assert(Heap_lock->owned_by_self() || |
duke@435 | 95 | (SafepointSynchronize::is_at_safepoint() && |
duke@435 | 96 | Thread::current()->is_VM_thread()), |
duke@435 | 97 | "not locked"); |
duke@435 | 98 | HeapWord* obj = top(); |
duke@435 | 99 | if (pointer_delta(end(), obj) >= size) { |
duke@435 | 100 | HeapWord* new_top = obj + size; |
duke@435 | 101 | set_top(new_top); |
duke@435 | 102 | assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), |
duke@435 | 103 | "checking alignment"); |
duke@435 | 104 | return obj; |
duke@435 | 105 | } else { |
duke@435 | 106 | return NULL; |
duke@435 | 107 | } |
duke@435 | 108 | } |
duke@435 | 109 | |
duke@435 | 110 | // This version is lock-free. |
duke@435 | 111 | HeapWord* MutableSpace::cas_allocate(size_t size) { |
duke@435 | 112 | do { |
duke@435 | 113 | HeapWord* obj = top(); |
duke@435 | 114 | if (pointer_delta(end(), obj) >= size) { |
duke@435 | 115 | HeapWord* new_top = obj + size; |
duke@435 | 116 | HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj); |
duke@435 | 117 | // result can be one of two: |
duke@435 | 118 | // the old top value: the exchange succeeded |
duke@435 | 119 | // otherwise: the new value of the top is returned. |
duke@435 | 120 | if (result != obj) { |
duke@435 | 121 | continue; // another thread beat us to the allocation, try again |
duke@435 | 122 | } |
duke@435 | 123 | assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top), |
duke@435 | 124 | "checking alignment"); |
duke@435 | 125 | return obj; |
duke@435 | 126 | } else { |
duke@435 | 127 | return NULL; |
duke@435 | 128 | } |
duke@435 | 129 | } while (true); |
duke@435 | 130 | } |
duke@435 | 131 | |
duke@435 | 132 | // Try to deallocate previous allocation. Returns true upon success. |
duke@435 | 133 | bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) { |
duke@435 | 134 | HeapWord* expected_top = obj + size; |
duke@435 | 135 | return (HeapWord*)Atomic::cmpxchg_ptr(obj, top_addr(), expected_top) == expected_top; |
duke@435 | 136 | } |
duke@435 | 137 | |
duke@435 | 138 | void MutableSpace::oop_iterate(OopClosure* cl) { |
duke@435 | 139 | HeapWord* obj_addr = bottom(); |
duke@435 | 140 | HeapWord* t = top(); |
duke@435 | 141 | // Could call objects iterate, but this is easier. |
duke@435 | 142 | while (obj_addr < t) { |
duke@435 | 143 | obj_addr += oop(obj_addr)->oop_iterate(cl); |
duke@435 | 144 | } |
duke@435 | 145 | } |
duke@435 | 146 | |
duke@435 | 147 | void MutableSpace::object_iterate(ObjectClosure* cl) { |
duke@435 | 148 | HeapWord* p = bottom(); |
duke@435 | 149 | while (p < top()) { |
duke@435 | 150 | cl->do_object(oop(p)); |
duke@435 | 151 | p += oop(p)->size(); |
duke@435 | 152 | } |
duke@435 | 153 | } |
duke@435 | 154 | |
duke@435 | 155 | void MutableSpace::print_short() const { print_short_on(tty); } |
duke@435 | 156 | void MutableSpace::print_short_on( outputStream* st) const { |
duke@435 | 157 | st->print(" space " SIZE_FORMAT "K, %d%% used", capacity_in_bytes() / K, |
duke@435 | 158 | (int) ((double) used_in_bytes() * 100 / capacity_in_bytes())); |
duke@435 | 159 | } |
duke@435 | 160 | |
duke@435 | 161 | void MutableSpace::print() const { print_on(tty); } |
duke@435 | 162 | void MutableSpace::print_on(outputStream* st) const { |
duke@435 | 163 | MutableSpace::print_short_on(st); |
duke@435 | 164 | st->print_cr(" [" INTPTR_FORMAT "," INTPTR_FORMAT "," INTPTR_FORMAT ")", |
duke@435 | 165 | bottom(), top(), end()); |
duke@435 | 166 | } |
duke@435 | 167 | |
iveresov@625 | 168 | void MutableSpace::verify(bool allow_dirty) { |
duke@435 | 169 | HeapWord* p = bottom(); |
duke@435 | 170 | HeapWord* t = top(); |
duke@435 | 171 | HeapWord* prev_p = NULL; |
duke@435 | 172 | while (p < t) { |
duke@435 | 173 | oop(p)->verify(); |
duke@435 | 174 | prev_p = p; |
duke@435 | 175 | p += oop(p)->size(); |
duke@435 | 176 | } |
duke@435 | 177 | guarantee(p == top(), "end of last object must match end of space"); |
duke@435 | 178 | } |