Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "memory/allocation.hpp" |
stefank@2314 | 27 | #include "memory/allocation.inline.hpp" |
coleenp@4037 | 28 | #include "memory/genCollectedHeap.hpp" |
coleenp@4037 | 29 | #include "memory/metaspaceShared.hpp" |
stefank@2314 | 30 | #include "memory/resourceArea.hpp" |
coleenp@4037 | 31 | #include "memory/universe.hpp" |
zgu@3900 | 32 | #include "runtime/atomic.hpp" |
stefank@2314 | 33 | #include "runtime/os.hpp" |
stefank@2314 | 34 | #include "runtime/task.hpp" |
stefank@2314 | 35 | #include "runtime/threadCritical.hpp" |
zgu@3900 | 36 | #include "services/memTracker.hpp" |
stefank@2314 | 37 | #include "utilities/ostream.hpp" |
zgu@3900 | 38 | |
stefank@2314 | 39 | #ifdef TARGET_OS_FAMILY_linux |
stefank@2314 | 40 | # include "os_linux.inline.hpp" |
stefank@2314 | 41 | #endif |
stefank@2314 | 42 | #ifdef TARGET_OS_FAMILY_solaris |
stefank@2314 | 43 | # include "os_solaris.inline.hpp" |
stefank@2314 | 44 | #endif |
stefank@2314 | 45 | #ifdef TARGET_OS_FAMILY_windows |
stefank@2314 | 46 | # include "os_windows.inline.hpp" |
stefank@2314 | 47 | #endif |
never@3156 | 48 | #ifdef TARGET_OS_FAMILY_bsd |
never@3156 | 49 | # include "os_bsd.inline.hpp" |
never@3156 | 50 | #endif |
duke@435 | 51 | |
duke@435 | 52 | void* StackObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; |
duke@435 | 53 | void StackObj::operator delete(void* p) { ShouldNotCallThis(); }; |
duke@435 | 54 | void* _ValueObj::operator new(size_t size) { ShouldNotCallThis(); return 0; }; |
duke@435 | 55 | void _ValueObj::operator delete(void* p) { ShouldNotCallThis(); }; |
duke@435 | 56 | |
coleenp@4037 | 57 | void* MetaspaceObj::operator new(size_t size, ClassLoaderData* loader_data, |
coleenp@4037 | 58 | size_t word_size, bool read_only, TRAPS) { |
coleenp@4037 | 59 | // Klass has it's own operator new |
coleenp@4037 | 60 | return Metaspace::allocate(loader_data, word_size, read_only, |
coleenp@4037 | 61 | Metaspace::NonClassType, CHECK_NULL); |
coleenp@4037 | 62 | } |
coleenp@4037 | 63 | |
coleenp@4037 | 64 | bool MetaspaceObj::is_shared() const { |
coleenp@4037 | 65 | return MetaspaceShared::is_in_shared_space(this); |
coleenp@4037 | 66 | } |
coleenp@4037 | 67 | |
coleenp@4037 | 68 | bool MetaspaceObj::is_metadata() const { |
coleenp@4037 | 69 | // ClassLoaderDataGraph::contains((address)this); has lock inversion problems |
coleenp@4037 | 70 | return !Universe::heap()->is_in_reserved(this); |
coleenp@4037 | 71 | } |
coleenp@4037 | 72 | |
coleenp@4037 | 73 | void MetaspaceObj::print_address_on(outputStream* st) const { |
coleenp@4037 | 74 | st->print(" {"INTPTR_FORMAT"}", this); |
coleenp@4037 | 75 | } |
coleenp@4037 | 76 | |
coleenp@4037 | 77 | |
zgu@3900 | 78 | void* ResourceObj::operator new(size_t size, allocation_type type, MEMFLAGS flags) { |
duke@435 | 79 | address res; |
duke@435 | 80 | switch (type) { |
duke@435 | 81 | case C_HEAP: |
zgu@3900 | 82 | res = (address)AllocateHeap(size, flags, CALLER_PC); |
kvn@2040 | 83 | DEBUG_ONLY(set_allocation_type(res, C_HEAP);) |
duke@435 | 84 | break; |
duke@435 | 85 | case RESOURCE_AREA: |
kvn@2043 | 86 | // new(size) sets allocation type RESOURCE_AREA. |
duke@435 | 87 | res = (address)operator new(size); |
duke@435 | 88 | break; |
duke@435 | 89 | default: |
duke@435 | 90 | ShouldNotReachHere(); |
duke@435 | 91 | } |
duke@435 | 92 | return res; |
duke@435 | 93 | } |
duke@435 | 94 | |
duke@435 | 95 | void ResourceObj::operator delete(void* p) { |
duke@435 | 96 | assert(((ResourceObj *)p)->allocated_on_C_heap(), |
duke@435 | 97 | "delete only allowed for C_HEAP objects"); |
kvn@2357 | 98 | DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) |
duke@435 | 99 | FreeHeap(p); |
duke@435 | 100 | } |
duke@435 | 101 | |
kvn@2040 | 102 | #ifdef ASSERT |
kvn@2040 | 103 | void ResourceObj::set_allocation_type(address res, allocation_type type) { |
kvn@2040 | 104 | // Set allocation type in the resource object |
kvn@2040 | 105 | uintptr_t allocation = (uintptr_t)res; |
kvn@2043 | 106 | assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least"); |
kvn@2040 | 107 | assert(type <= allocation_mask, "incorrect allocation type"); |
kvn@2357 | 108 | ResourceObj* resobj = (ResourceObj *)res; |
kvn@2357 | 109 | resobj->_allocation_t[0] = ~(allocation + type); |
kvn@2357 | 110 | if (type != STACK_OR_EMBEDDED) { |
kvn@2357 | 111 | // Called from operator new() and CollectionSetChooser(), |
kvn@2357 | 112 | // set verification value. |
kvn@2357 | 113 | resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; |
kvn@2357 | 114 | } |
kvn@2040 | 115 | } |
kvn@2040 | 116 | |
kvn@2043 | 117 | ResourceObj::allocation_type ResourceObj::get_allocation_type() const { |
kvn@2357 | 118 | assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); |
kvn@2357 | 119 | return (allocation_type)((~_allocation_t[0]) & allocation_mask); |
kvn@2357 | 120 | } |
kvn@2357 | 121 | |
kvn@2357 | 122 | bool ResourceObj::is_type_set() const { |
kvn@2357 | 123 | allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); |
kvn@2357 | 124 | return get_allocation_type() == type && |
kvn@2357 | 125 | (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); |
kvn@2040 | 126 | } |
kvn@2040 | 127 | |
kvn@2043 | 128 | ResourceObj::ResourceObj() { // default constructor |
kvn@2357 | 129 | if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { |
kvn@2357 | 130 | // Operator new() is not called for allocations |
kvn@2357 | 131 | // on stack and for embedded objects. |
kvn@2040 | 132 | set_allocation_type((address)this, STACK_OR_EMBEDDED); |
kvn@2357 | 133 | } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED |
kvn@2357 | 134 | // For some reason we got a value which resembles |
kvn@2357 | 135 | // an embedded or stack object (operator new() does not |
kvn@2357 | 136 | // set such type). Keep it since it is valid value |
kvn@2357 | 137 | // (even if it was garbage). |
kvn@2357 | 138 | // Ignore garbage in other fields. |
kvn@2357 | 139 | } else if (is_type_set()) { |
kvn@2357 | 140 | // Operator new() was called and type was set. |
kvn@2357 | 141 | assert(!allocated_on_stack(), |
kvn@2357 | 142 | err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
kvn@2357 | 143 | this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
kvn@2040 | 144 | } else { |
kvn@2357 | 145 | // Operator new() was not called. |
kvn@2357 | 146 | // Assume that it is embedded or stack object. |
kvn@2357 | 147 | set_allocation_type((address)this, STACK_OR_EMBEDDED); |
kvn@2040 | 148 | } |
kvn@2357 | 149 | _allocation_t[1] = 0; // Zap verification value |
kvn@2040 | 150 | } |
kvn@2040 | 151 | |
kvn@2043 | 152 | ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor |
kvn@2040 | 153 | // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. |
kvn@2357 | 154 | // Note: garbage may resembles valid value. |
kvn@2357 | 155 | assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), |
kvn@2357 | 156 | err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
kvn@2357 | 157 | this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
kvn@2040 | 158 | set_allocation_type((address)this, STACK_OR_EMBEDDED); |
kvn@2357 | 159 | _allocation_t[1] = 0; // Zap verification value |
kvn@2040 | 160 | } |
kvn@2040 | 161 | |
kvn@2040 | 162 | ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment |
kvn@2040 | 163 | // Used in InlineTree::ok_to_inline() for WarmCallInfo. |
kvn@2357 | 164 | assert(allocated_on_stack(), |
kvn@2357 | 165 | err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", |
kvn@2357 | 166 | this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); |
kvn@2357 | 167 | // Keep current _allocation_t value; |
kvn@2040 | 168 | return *this; |
kvn@2040 | 169 | } |
kvn@2040 | 170 | |
kvn@2040 | 171 | ResourceObj::~ResourceObj() { |
kvn@2043 | 172 | // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. |
kvn@2357 | 173 | if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. |
kvn@2357 | 174 | _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type |
kvn@2040 | 175 | } |
kvn@2040 | 176 | } |
kvn@2040 | 177 | #endif // ASSERT |
kvn@2040 | 178 | |
kvn@2040 | 179 | |
duke@435 | 180 | void trace_heap_malloc(size_t size, const char* name, void* p) { |
duke@435 | 181 | // A lock is not needed here - tty uses a lock internally |
kvn@2557 | 182 | tty->print_cr("Heap malloc " INTPTR_FORMAT " " SIZE_FORMAT " %s", p, size, name == NULL ? "" : name); |
duke@435 | 183 | } |
duke@435 | 184 | |
duke@435 | 185 | |
duke@435 | 186 | void trace_heap_free(void* p) { |
duke@435 | 187 | // A lock is not needed here - tty uses a lock internally |
duke@435 | 188 | tty->print_cr("Heap free " INTPTR_FORMAT, p); |
duke@435 | 189 | } |
duke@435 | 190 | |
duke@435 | 191 | bool warn_new_operator = false; // see vm_main |
duke@435 | 192 | |
duke@435 | 193 | //-------------------------------------------------------------------------------------- |
duke@435 | 194 | // ChunkPool implementation |
duke@435 | 195 | |
duke@435 | 196 | // MT-safe pool of chunks to reduce malloc/free thrashing |
duke@435 | 197 | // NB: not using Mutex because pools are used before Threads are initialized |
zgu@3900 | 198 | class ChunkPool: public CHeapObj<mtInternal> { |
duke@435 | 199 | Chunk* _first; // first cached Chunk; its first word points to next chunk |
duke@435 | 200 | size_t _num_chunks; // number of unused chunks in pool |
duke@435 | 201 | size_t _num_used; // number of chunks currently checked out |
duke@435 | 202 | const size_t _size; // size of each chunk (must be uniform) |
duke@435 | 203 | |
duke@435 | 204 | // Our three static pools |
duke@435 | 205 | static ChunkPool* _large_pool; |
duke@435 | 206 | static ChunkPool* _medium_pool; |
duke@435 | 207 | static ChunkPool* _small_pool; |
duke@435 | 208 | |
duke@435 | 209 | // return first element or null |
duke@435 | 210 | void* get_first() { |
duke@435 | 211 | Chunk* c = _first; |
duke@435 | 212 | if (_first) { |
duke@435 | 213 | _first = _first->next(); |
duke@435 | 214 | _num_chunks--; |
duke@435 | 215 | } |
duke@435 | 216 | return c; |
duke@435 | 217 | } |
duke@435 | 218 | |
duke@435 | 219 | public: |
duke@435 | 220 | // All chunks in a ChunkPool has the same size |
duke@435 | 221 | ChunkPool(size_t size) : _size(size) { _first = NULL; _num_chunks = _num_used = 0; } |
duke@435 | 222 | |
duke@435 | 223 | // Allocate a new chunk from the pool (might expand the pool) |
zgu@3900 | 224 | _NOINLINE_ void* allocate(size_t bytes) { |
duke@435 | 225 | assert(bytes == _size, "bad size"); |
duke@435 | 226 | void* p = NULL; |
zgu@3900 | 227 | // No VM lock can be taken inside ThreadCritical lock, so os::malloc |
zgu@3900 | 228 | // should be done outside ThreadCritical lock due to NMT |
duke@435 | 229 | { ThreadCritical tc; |
duke@435 | 230 | _num_used++; |
duke@435 | 231 | p = get_first(); |
duke@435 | 232 | } |
zgu@3900 | 233 | if (p == NULL) p = os::malloc(bytes, mtChunk, CURRENT_PC); |
duke@435 | 234 | if (p == NULL) |
duke@435 | 235 | vm_exit_out_of_memory(bytes, "ChunkPool::allocate"); |
duke@435 | 236 | |
duke@435 | 237 | return p; |
duke@435 | 238 | } |
duke@435 | 239 | |
duke@435 | 240 | // Return a chunk to the pool |
duke@435 | 241 | void free(Chunk* chunk) { |
duke@435 | 242 | assert(chunk->length() + Chunk::aligned_overhead_size() == _size, "bad size"); |
duke@435 | 243 | ThreadCritical tc; |
duke@435 | 244 | _num_used--; |
duke@435 | 245 | |
duke@435 | 246 | // Add chunk to list |
duke@435 | 247 | chunk->set_next(_first); |
duke@435 | 248 | _first = chunk; |
duke@435 | 249 | _num_chunks++; |
duke@435 | 250 | } |
duke@435 | 251 | |
duke@435 | 252 | // Prune the pool |
duke@435 | 253 | void free_all_but(size_t n) { |
zgu@3900 | 254 | Chunk* cur = NULL; |
zgu@3900 | 255 | Chunk* next; |
zgu@3900 | 256 | { |
duke@435 | 257 | // if we have more than n chunks, free all of them |
duke@435 | 258 | ThreadCritical tc; |
duke@435 | 259 | if (_num_chunks > n) { |
duke@435 | 260 | // free chunks at end of queue, for better locality |
zgu@3900 | 261 | cur = _first; |
duke@435 | 262 | for (size_t i = 0; i < (n - 1) && cur != NULL; i++) cur = cur->next(); |
duke@435 | 263 | |
duke@435 | 264 | if (cur != NULL) { |
zgu@3900 | 265 | next = cur->next(); |
duke@435 | 266 | cur->set_next(NULL); |
duke@435 | 267 | cur = next; |
duke@435 | 268 | |
zgu@3900 | 269 | _num_chunks = n; |
zgu@3900 | 270 | } |
zgu@3900 | 271 | } |
zgu@3900 | 272 | } |
zgu@3900 | 273 | |
zgu@3900 | 274 | // Free all remaining chunks, outside of ThreadCritical |
zgu@3900 | 275 | // to avoid deadlock with NMT |
duke@435 | 276 | while(cur != NULL) { |
duke@435 | 277 | next = cur->next(); |
zgu@3900 | 278 | os::free(cur, mtChunk); |
duke@435 | 279 | cur = next; |
duke@435 | 280 | } |
duke@435 | 281 | } |
duke@435 | 282 | |
duke@435 | 283 | // Accessors to preallocated pool's |
duke@435 | 284 | static ChunkPool* large_pool() { assert(_large_pool != NULL, "must be initialized"); return _large_pool; } |
duke@435 | 285 | static ChunkPool* medium_pool() { assert(_medium_pool != NULL, "must be initialized"); return _medium_pool; } |
duke@435 | 286 | static ChunkPool* small_pool() { assert(_small_pool != NULL, "must be initialized"); return _small_pool; } |
duke@435 | 287 | |
duke@435 | 288 | static void initialize() { |
duke@435 | 289 | _large_pool = new ChunkPool(Chunk::size + Chunk::aligned_overhead_size()); |
duke@435 | 290 | _medium_pool = new ChunkPool(Chunk::medium_size + Chunk::aligned_overhead_size()); |
duke@435 | 291 | _small_pool = new ChunkPool(Chunk::init_size + Chunk::aligned_overhead_size()); |
duke@435 | 292 | } |
bobv@2036 | 293 | |
bobv@2036 | 294 | static void clean() { |
bobv@2036 | 295 | enum { BlocksToKeep = 5 }; |
bobv@2036 | 296 | _small_pool->free_all_but(BlocksToKeep); |
bobv@2036 | 297 | _medium_pool->free_all_but(BlocksToKeep); |
bobv@2036 | 298 | _large_pool->free_all_but(BlocksToKeep); |
bobv@2036 | 299 | } |
duke@435 | 300 | }; |
duke@435 | 301 | |
duke@435 | 302 | ChunkPool* ChunkPool::_large_pool = NULL; |
duke@435 | 303 | ChunkPool* ChunkPool::_medium_pool = NULL; |
duke@435 | 304 | ChunkPool* ChunkPool::_small_pool = NULL; |
duke@435 | 305 | |
duke@435 | 306 | void chunkpool_init() { |
duke@435 | 307 | ChunkPool::initialize(); |
duke@435 | 308 | } |
duke@435 | 309 | |
bobv@2036 | 310 | void |
bobv@2036 | 311 | Chunk::clean_chunk_pool() { |
bobv@2036 | 312 | ChunkPool::clean(); |
bobv@2036 | 313 | } |
bobv@2036 | 314 | |
duke@435 | 315 | |
duke@435 | 316 | //-------------------------------------------------------------------------------------- |
duke@435 | 317 | // ChunkPoolCleaner implementation |
bobv@2036 | 318 | // |
duke@435 | 319 | |
duke@435 | 320 | class ChunkPoolCleaner : public PeriodicTask { |
bobv@2036 | 321 | enum { CleaningInterval = 5000 }; // cleaning interval in ms |
duke@435 | 322 | |
duke@435 | 323 | public: |
duke@435 | 324 | ChunkPoolCleaner() : PeriodicTask(CleaningInterval) {} |
duke@435 | 325 | void task() { |
bobv@2036 | 326 | ChunkPool::clean(); |
duke@435 | 327 | } |
duke@435 | 328 | }; |
duke@435 | 329 | |
duke@435 | 330 | //-------------------------------------------------------------------------------------- |
duke@435 | 331 | // Chunk implementation |
duke@435 | 332 | |
duke@435 | 333 | void* Chunk::operator new(size_t requested_size, size_t length) { |
duke@435 | 334 | // requested_size is equal to sizeof(Chunk) but in order for the arena |
duke@435 | 335 | // allocations to come out aligned as expected the size must be aligned |
duke@435 | 336 | // to expected arean alignment. |
duke@435 | 337 | // expect requested_size but if sizeof(Chunk) doesn't match isn't proper size we must align it. |
duke@435 | 338 | assert(ARENA_ALIGN(requested_size) == aligned_overhead_size(), "Bad alignment"); |
duke@435 | 339 | size_t bytes = ARENA_ALIGN(requested_size) + length; |
duke@435 | 340 | switch (length) { |
duke@435 | 341 | case Chunk::size: return ChunkPool::large_pool()->allocate(bytes); |
duke@435 | 342 | case Chunk::medium_size: return ChunkPool::medium_pool()->allocate(bytes); |
duke@435 | 343 | case Chunk::init_size: return ChunkPool::small_pool()->allocate(bytes); |
duke@435 | 344 | default: { |
zgu@3900 | 345 | void *p = os::malloc(bytes, mtChunk, CALLER_PC); |
duke@435 | 346 | if (p == NULL) |
duke@435 | 347 | vm_exit_out_of_memory(bytes, "Chunk::new"); |
duke@435 | 348 | return p; |
duke@435 | 349 | } |
duke@435 | 350 | } |
duke@435 | 351 | } |
duke@435 | 352 | |
duke@435 | 353 | void Chunk::operator delete(void* p) { |
duke@435 | 354 | Chunk* c = (Chunk*)p; |
duke@435 | 355 | switch (c->length()) { |
duke@435 | 356 | case Chunk::size: ChunkPool::large_pool()->free(c); break; |
duke@435 | 357 | case Chunk::medium_size: ChunkPool::medium_pool()->free(c); break; |
duke@435 | 358 | case Chunk::init_size: ChunkPool::small_pool()->free(c); break; |
zgu@3900 | 359 | default: os::free(c, mtChunk); |
duke@435 | 360 | } |
duke@435 | 361 | } |
duke@435 | 362 | |
duke@435 | 363 | Chunk::Chunk(size_t length) : _len(length) { |
duke@435 | 364 | _next = NULL; // Chain on the linked list |
duke@435 | 365 | } |
duke@435 | 366 | |
duke@435 | 367 | |
duke@435 | 368 | void Chunk::chop() { |
duke@435 | 369 | Chunk *k = this; |
duke@435 | 370 | while( k ) { |
duke@435 | 371 | Chunk *tmp = k->next(); |
duke@435 | 372 | // clear out this chunk (to detect allocation bugs) |
duke@435 | 373 | if (ZapResourceArea) memset(k->bottom(), badResourceValue, k->length()); |
duke@435 | 374 | delete k; // Free chunk (was malloc'd) |
duke@435 | 375 | k = tmp; |
duke@435 | 376 | } |
duke@435 | 377 | } |
duke@435 | 378 | |
duke@435 | 379 | void Chunk::next_chop() { |
duke@435 | 380 | _next->chop(); |
duke@435 | 381 | _next = NULL; |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | |
duke@435 | 385 | void Chunk::start_chunk_pool_cleaner_task() { |
duke@435 | 386 | #ifdef ASSERT |
duke@435 | 387 | static bool task_created = false; |
duke@435 | 388 | assert(!task_created, "should not start chuck pool cleaner twice"); |
duke@435 | 389 | task_created = true; |
duke@435 | 390 | #endif |
duke@435 | 391 | ChunkPoolCleaner* cleaner = new ChunkPoolCleaner(); |
duke@435 | 392 | cleaner->enroll(); |
duke@435 | 393 | } |
duke@435 | 394 | |
duke@435 | 395 | //------------------------------Arena------------------------------------------ |
zgu@3900 | 396 | NOT_PRODUCT(volatile jint Arena::_instance_count = 0;) |
duke@435 | 397 | |
duke@435 | 398 | Arena::Arena(size_t init_size) { |
duke@435 | 399 | size_t round_size = (sizeof (char *)) - 1; |
duke@435 | 400 | init_size = (init_size+round_size) & ~round_size; |
duke@435 | 401 | _first = _chunk = new (init_size) Chunk(init_size); |
duke@435 | 402 | _hwm = _chunk->bottom(); // Save the cached hwm, max |
duke@435 | 403 | _max = _chunk->top(); |
duke@435 | 404 | set_size_in_bytes(init_size); |
zgu@3900 | 405 | NOT_PRODUCT(Atomic::inc(&_instance_count);) |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | Arena::Arena() { |
duke@435 | 409 | _first = _chunk = new (Chunk::init_size) Chunk(Chunk::init_size); |
duke@435 | 410 | _hwm = _chunk->bottom(); // Save the cached hwm, max |
duke@435 | 411 | _max = _chunk->top(); |
duke@435 | 412 | set_size_in_bytes(Chunk::init_size); |
zgu@3900 | 413 | NOT_PRODUCT(Atomic::inc(&_instance_count);) |
duke@435 | 414 | } |
duke@435 | 415 | |
duke@435 | 416 | Arena::Arena(Arena *a) : _chunk(a->_chunk), _hwm(a->_hwm), _max(a->_max), _first(a->_first) { |
duke@435 | 417 | set_size_in_bytes(a->size_in_bytes()); |
zgu@3900 | 418 | NOT_PRODUCT(Atomic::inc(&_instance_count);) |
duke@435 | 419 | } |
duke@435 | 420 | |
zgu@3900 | 421 | |
duke@435 | 422 | Arena *Arena::move_contents(Arena *copy) { |
duke@435 | 423 | copy->destruct_contents(); |
duke@435 | 424 | copy->_chunk = _chunk; |
duke@435 | 425 | copy->_hwm = _hwm; |
duke@435 | 426 | copy->_max = _max; |
duke@435 | 427 | copy->_first = _first; |
duke@435 | 428 | copy->set_size_in_bytes(size_in_bytes()); |
duke@435 | 429 | // Destroy original arena |
duke@435 | 430 | reset(); |
duke@435 | 431 | return copy; // Return Arena with contents |
duke@435 | 432 | } |
duke@435 | 433 | |
duke@435 | 434 | Arena::~Arena() { |
duke@435 | 435 | destruct_contents(); |
zgu@3900 | 436 | NOT_PRODUCT(Atomic::dec(&_instance_count);) |
zgu@3900 | 437 | } |
zgu@3900 | 438 | |
zgu@3900 | 439 | void* Arena::operator new(size_t size) { |
zgu@3900 | 440 | assert(false, "Use dynamic memory type binding"); |
zgu@3900 | 441 | return NULL; |
zgu@3900 | 442 | } |
zgu@3900 | 443 | |
zgu@3900 | 444 | void* Arena::operator new (size_t size, const std::nothrow_t& nothrow_constant) { |
zgu@3900 | 445 | assert(false, "Use dynamic memory type binding"); |
zgu@3900 | 446 | return NULL; |
zgu@3900 | 447 | } |
zgu@3900 | 448 | |
zgu@3900 | 449 | // dynamic memory type binding |
zgu@3900 | 450 | void* Arena::operator new(size_t size, MEMFLAGS flags) { |
zgu@3900 | 451 | #ifdef ASSERT |
zgu@3900 | 452 | void* p = (void*)AllocateHeap(size, flags|otArena, CALLER_PC); |
zgu@3900 | 453 | if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
zgu@3900 | 454 | return p; |
zgu@3900 | 455 | #else |
zgu@3900 | 456 | return (void *) AllocateHeap(size, flags|otArena, CALLER_PC); |
zgu@3900 | 457 | #endif |
zgu@3900 | 458 | } |
zgu@3900 | 459 | |
zgu@3900 | 460 | void* Arena::operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) { |
zgu@3900 | 461 | #ifdef ASSERT |
zgu@3900 | 462 | void* p = os::malloc(size, flags|otArena, CALLER_PC); |
zgu@3900 | 463 | if (PrintMallocFree) trace_heap_malloc(size, "Arena-new", p); |
zgu@3900 | 464 | return p; |
zgu@3900 | 465 | #else |
zgu@3900 | 466 | return os::malloc(size, flags|otArena, CALLER_PC); |
zgu@3900 | 467 | #endif |
zgu@3900 | 468 | } |
zgu@3900 | 469 | |
zgu@3900 | 470 | void Arena::operator delete(void* p) { |
zgu@3900 | 471 | FreeHeap(p); |
duke@435 | 472 | } |
duke@435 | 473 | |
duke@435 | 474 | // Destroy this arenas contents and reset to empty |
duke@435 | 475 | void Arena::destruct_contents() { |
duke@435 | 476 | if (UseMallocOnly && _first != NULL) { |
duke@435 | 477 | char* end = _first->next() ? _first->top() : _hwm; |
duke@435 | 478 | free_malloced_objects(_first, _first->bottom(), end, _hwm); |
duke@435 | 479 | } |
duke@435 | 480 | _first->chop(); |
duke@435 | 481 | reset(); |
duke@435 | 482 | } |
duke@435 | 483 | |
zgu@3900 | 484 | // This is high traffic method, but many calls actually don't |
zgu@3900 | 485 | // change the size |
zgu@3900 | 486 | void Arena::set_size_in_bytes(size_t size) { |
zgu@3900 | 487 | if (_size_in_bytes != size) { |
zgu@3900 | 488 | _size_in_bytes = size; |
zgu@3900 | 489 | MemTracker::record_arena_size((address)this, size); |
zgu@3900 | 490 | } |
zgu@3900 | 491 | } |
duke@435 | 492 | |
duke@435 | 493 | // Total of all Chunks in arena |
duke@435 | 494 | size_t Arena::used() const { |
duke@435 | 495 | size_t sum = _chunk->length() - (_max-_hwm); // Size leftover in this Chunk |
duke@435 | 496 | register Chunk *k = _first; |
duke@435 | 497 | while( k != _chunk) { // Whilst have Chunks in a row |
duke@435 | 498 | sum += k->length(); // Total size of this Chunk |
duke@435 | 499 | k = k->next(); // Bump along to next Chunk |
duke@435 | 500 | } |
duke@435 | 501 | return sum; // Return total consumed space. |
duke@435 | 502 | } |
duke@435 | 503 | |
kamg@2589 | 504 | void Arena::signal_out_of_memory(size_t sz, const char* whence) const { |
kamg@2589 | 505 | vm_exit_out_of_memory(sz, whence); |
kamg@2589 | 506 | } |
duke@435 | 507 | |
duke@435 | 508 | // Grow a new Chunk |
duke@435 | 509 | void* Arena::grow( size_t x ) { |
duke@435 | 510 | // Get minimal required size. Either real big, or even bigger for giant objs |
duke@435 | 511 | size_t len = MAX2(x, (size_t) Chunk::size); |
duke@435 | 512 | |
duke@435 | 513 | Chunk *k = _chunk; // Get filled-up chunk address |
duke@435 | 514 | _chunk = new (len) Chunk(len); |
duke@435 | 515 | |
kamg@2589 | 516 | if (_chunk == NULL) { |
kamg@2589 | 517 | signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); |
kamg@2589 | 518 | } |
duke@435 | 519 | if (k) k->set_next(_chunk); // Append new chunk to end of linked list |
duke@435 | 520 | else _first = _chunk; |
duke@435 | 521 | _hwm = _chunk->bottom(); // Save the cached hwm, max |
duke@435 | 522 | _max = _chunk->top(); |
duke@435 | 523 | set_size_in_bytes(size_in_bytes() + len); |
duke@435 | 524 | void* result = _hwm; |
duke@435 | 525 | _hwm += x; |
duke@435 | 526 | return result; |
duke@435 | 527 | } |
duke@435 | 528 | |
duke@435 | 529 | |
duke@435 | 530 | |
duke@435 | 531 | // Reallocate storage in Arena. |
duke@435 | 532 | void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size) { |
duke@435 | 533 | assert(new_size >= 0, "bad size"); |
duke@435 | 534 | if (new_size == 0) return NULL; |
duke@435 | 535 | #ifdef ASSERT |
duke@435 | 536 | if (UseMallocOnly) { |
duke@435 | 537 | // always allocate a new object (otherwise we'll free this one twice) |
duke@435 | 538 | char* copy = (char*)Amalloc(new_size); |
duke@435 | 539 | size_t n = MIN2(old_size, new_size); |
duke@435 | 540 | if (n > 0) memcpy(copy, old_ptr, n); |
duke@435 | 541 | Afree(old_ptr,old_size); // Mostly done to keep stats accurate |
duke@435 | 542 | return copy; |
duke@435 | 543 | } |
duke@435 | 544 | #endif |
duke@435 | 545 | char *c_old = (char*)old_ptr; // Handy name |
duke@435 | 546 | // Stupid fast special case |
duke@435 | 547 | if( new_size <= old_size ) { // Shrink in-place |
duke@435 | 548 | if( c_old+old_size == _hwm) // Attempt to free the excess bytes |
duke@435 | 549 | _hwm = c_old+new_size; // Adjust hwm |
duke@435 | 550 | return c_old; |
duke@435 | 551 | } |
duke@435 | 552 | |
duke@435 | 553 | // make sure that new_size is legal |
duke@435 | 554 | size_t corrected_new_size = ARENA_ALIGN(new_size); |
duke@435 | 555 | |
duke@435 | 556 | // See if we can resize in-place |
duke@435 | 557 | if( (c_old+old_size == _hwm) && // Adjusting recent thing |
duke@435 | 558 | (c_old+corrected_new_size <= _max) ) { // Still fits where it sits |
duke@435 | 559 | _hwm = c_old+corrected_new_size; // Adjust hwm |
duke@435 | 560 | return c_old; // Return old pointer |
duke@435 | 561 | } |
duke@435 | 562 | |
duke@435 | 563 | // Oops, got to relocate guts |
duke@435 | 564 | void *new_ptr = Amalloc(new_size); |
duke@435 | 565 | memcpy( new_ptr, c_old, old_size ); |
duke@435 | 566 | Afree(c_old,old_size); // Mostly done to keep stats accurate |
duke@435 | 567 | return new_ptr; |
duke@435 | 568 | } |
duke@435 | 569 | |
duke@435 | 570 | |
duke@435 | 571 | // Determine if pointer belongs to this Arena or not. |
duke@435 | 572 | bool Arena::contains( const void *ptr ) const { |
duke@435 | 573 | #ifdef ASSERT |
duke@435 | 574 | if (UseMallocOnly) { |
duke@435 | 575 | // really slow, but not easy to make fast |
duke@435 | 576 | if (_chunk == NULL) return false; |
duke@435 | 577 | char** bottom = (char**)_chunk->bottom(); |
duke@435 | 578 | for (char** p = (char**)_hwm - 1; p >= bottom; p--) { |
duke@435 | 579 | if (*p == ptr) return true; |
duke@435 | 580 | } |
duke@435 | 581 | for (Chunk *c = _first; c != NULL; c = c->next()) { |
duke@435 | 582 | if (c == _chunk) continue; // current chunk has been processed |
duke@435 | 583 | char** bottom = (char**)c->bottom(); |
duke@435 | 584 | for (char** p = (char**)c->top() - 1; p >= bottom; p--) { |
duke@435 | 585 | if (*p == ptr) return true; |
duke@435 | 586 | } |
duke@435 | 587 | } |
duke@435 | 588 | return false; |
duke@435 | 589 | } |
duke@435 | 590 | #endif |
duke@435 | 591 | if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm ) |
duke@435 | 592 | return true; // Check for in this chunk |
duke@435 | 593 | for (Chunk *c = _first; c; c = c->next()) { |
duke@435 | 594 | if (c == _chunk) continue; // current chunk has been processed |
duke@435 | 595 | if ((void*)c->bottom() <= ptr && ptr < (void*)c->top()) { |
duke@435 | 596 | return true; // Check for every chunk in Arena |
duke@435 | 597 | } |
duke@435 | 598 | } |
duke@435 | 599 | return false; // Not in any Chunk, so not in Arena |
duke@435 | 600 | } |
duke@435 | 601 | |
duke@435 | 602 | |
duke@435 | 603 | #ifdef ASSERT |
duke@435 | 604 | void* Arena::malloc(size_t size) { |
duke@435 | 605 | assert(UseMallocOnly, "shouldn't call"); |
duke@435 | 606 | // use malloc, but save pointer in res. area for later freeing |
duke@435 | 607 | char** save = (char**)internal_malloc_4(sizeof(char*)); |
zgu@3900 | 608 | return (*save = (char*)os::malloc(size, mtChunk)); |
duke@435 | 609 | } |
duke@435 | 610 | |
duke@435 | 611 | // for debugging with UseMallocOnly |
duke@435 | 612 | void* Arena::internal_malloc_4(size_t x) { |
duke@435 | 613 | assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
kamg@2589 | 614 | check_for_overflow(x, "Arena::internal_malloc_4"); |
duke@435 | 615 | if (_hwm + x > _max) { |
duke@435 | 616 | return grow(x); |
duke@435 | 617 | } else { |
duke@435 | 618 | char *old = _hwm; |
duke@435 | 619 | _hwm += x; |
duke@435 | 620 | return old; |
duke@435 | 621 | } |
duke@435 | 622 | } |
duke@435 | 623 | #endif |
duke@435 | 624 | |
duke@435 | 625 | |
duke@435 | 626 | //-------------------------------------------------------------------------------------- |
duke@435 | 627 | // Non-product code |
duke@435 | 628 | |
duke@435 | 629 | #ifndef PRODUCT |
duke@435 | 630 | // The global operator new should never be called since it will usually indicate |
duke@435 | 631 | // a memory leak. Use CHeapObj as the base class of such objects to make it explicit |
duke@435 | 632 | // that they're allocated on the C heap. |
duke@435 | 633 | // Commented out in product version to avoid conflicts with third-party C++ native code. |
duke@435 | 634 | // %% note this is causing a problem on solaris debug build. the global |
duke@435 | 635 | // new is being called from jdk source and causing data corruption. |
duke@435 | 636 | // src/share/native/sun/awt/font/fontmanager/textcache/hsMemory.cpp::hsSoftNew |
duke@435 | 637 | // define CATCH_OPERATOR_NEW_USAGE if you want to use this. |
duke@435 | 638 | #ifdef CATCH_OPERATOR_NEW_USAGE |
duke@435 | 639 | void* operator new(size_t size){ |
duke@435 | 640 | static bool warned = false; |
duke@435 | 641 | if (!warned && warn_new_operator) |
duke@435 | 642 | warning("should not call global (default) operator new"); |
duke@435 | 643 | warned = true; |
duke@435 | 644 | return (void *) AllocateHeap(size, "global operator new"); |
duke@435 | 645 | } |
duke@435 | 646 | #endif |
duke@435 | 647 | |
duke@435 | 648 | void AllocatedObj::print() const { print_on(tty); } |
duke@435 | 649 | void AllocatedObj::print_value() const { print_value_on(tty); } |
duke@435 | 650 | |
duke@435 | 651 | void AllocatedObj::print_on(outputStream* st) const { |
duke@435 | 652 | st->print_cr("AllocatedObj(" INTPTR_FORMAT ")", this); |
duke@435 | 653 | } |
duke@435 | 654 | |
duke@435 | 655 | void AllocatedObj::print_value_on(outputStream* st) const { |
duke@435 | 656 | st->print("AllocatedObj(" INTPTR_FORMAT ")", this); |
duke@435 | 657 | } |
duke@435 | 658 | |
kvn@2557 | 659 | julong Arena::_bytes_allocated = 0; |
kvn@2557 | 660 | |
kvn@2557 | 661 | void Arena::inc_bytes_allocated(size_t x) { inc_stat_counter(&_bytes_allocated, x); } |
duke@435 | 662 | |
duke@435 | 663 | AllocStats::AllocStats() { |
kvn@2557 | 664 | start_mallocs = os::num_mallocs; |
kvn@2557 | 665 | start_frees = os::num_frees; |
duke@435 | 666 | start_malloc_bytes = os::alloc_bytes; |
kvn@2557 | 667 | start_mfree_bytes = os::free_bytes; |
kvn@2557 | 668 | start_res_bytes = Arena::_bytes_allocated; |
duke@435 | 669 | } |
duke@435 | 670 | |
kvn@2557 | 671 | julong AllocStats::num_mallocs() { return os::num_mallocs - start_mallocs; } |
kvn@2557 | 672 | julong AllocStats::alloc_bytes() { return os::alloc_bytes - start_malloc_bytes; } |
kvn@2557 | 673 | julong AllocStats::num_frees() { return os::num_frees - start_frees; } |
kvn@2557 | 674 | julong AllocStats::free_bytes() { return os::free_bytes - start_mfree_bytes; } |
kvn@2557 | 675 | julong AllocStats::resource_bytes() { return Arena::_bytes_allocated - start_res_bytes; } |
duke@435 | 676 | void AllocStats::print() { |
kvn@2557 | 677 | tty->print_cr(UINT64_FORMAT " mallocs (" UINT64_FORMAT "MB), " |
kvn@2557 | 678 | UINT64_FORMAT" frees (" UINT64_FORMAT "MB), " UINT64_FORMAT "MB resrc", |
kvn@2557 | 679 | num_mallocs(), alloc_bytes()/M, num_frees(), free_bytes()/M, resource_bytes()/M); |
duke@435 | 680 | } |
duke@435 | 681 | |
duke@435 | 682 | |
duke@435 | 683 | // debugging code |
duke@435 | 684 | inline void Arena::free_all(char** start, char** end) { |
duke@435 | 685 | for (char** p = start; p < end; p++) if (*p) os::free(*p); |
duke@435 | 686 | } |
duke@435 | 687 | |
duke@435 | 688 | void Arena::free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) { |
duke@435 | 689 | assert(UseMallocOnly, "should not call"); |
duke@435 | 690 | // free all objects malloced since resource mark was created; resource area |
duke@435 | 691 | // contains their addresses |
duke@435 | 692 | if (chunk->next()) { |
duke@435 | 693 | // this chunk is full, and some others too |
duke@435 | 694 | for (Chunk* c = chunk->next(); c != NULL; c = c->next()) { |
duke@435 | 695 | char* top = c->top(); |
duke@435 | 696 | if (c->next() == NULL) { |
duke@435 | 697 | top = hwm2; // last junk is only used up to hwm2 |
duke@435 | 698 | assert(c->contains(hwm2), "bad hwm2"); |
duke@435 | 699 | } |
duke@435 | 700 | free_all((char**)c->bottom(), (char**)top); |
duke@435 | 701 | } |
duke@435 | 702 | assert(chunk->contains(hwm), "bad hwm"); |
duke@435 | 703 | assert(chunk->contains(max), "bad max"); |
duke@435 | 704 | free_all((char**)hwm, (char**)max); |
duke@435 | 705 | } else { |
duke@435 | 706 | // this chunk was partially used |
duke@435 | 707 | assert(chunk->contains(hwm), "bad hwm"); |
duke@435 | 708 | assert(chunk->contains(hwm2), "bad hwm2"); |
duke@435 | 709 | free_all((char**)hwm, (char**)hwm2); |
duke@435 | 710 | } |
duke@435 | 711 | } |
duke@435 | 712 | |
duke@435 | 713 | |
duke@435 | 714 | ReallocMark::ReallocMark() { |
duke@435 | 715 | #ifdef ASSERT |
duke@435 | 716 | Thread *thread = ThreadLocalStorage::get_thread_slow(); |
duke@435 | 717 | _nesting = thread->resource_area()->nesting(); |
duke@435 | 718 | #endif |
duke@435 | 719 | } |
duke@435 | 720 | |
duke@435 | 721 | void ReallocMark::check() { |
duke@435 | 722 | #ifdef ASSERT |
duke@435 | 723 | if (_nesting != Thread::current()->resource_area()->nesting()) { |
duke@435 | 724 | fatal("allocation bug: array could grow within nested ResourceMark"); |
duke@435 | 725 | } |
duke@435 | 726 | #endif |
duke@435 | 727 | } |
duke@435 | 728 | |
duke@435 | 729 | #endif // Non-product |