Wed, 27 Aug 2014 08:19:12 -0400
8046598: Scalable Native memory tracking development
Summary: Enhance scalability of native memory tracking
Reviewed-by: coleenp, ctornqvi, gtriantafill
zgu@7074 | 1 | /* |
zgu@7074 | 2 | * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. |
zgu@7074 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@7074 | 4 | * |
zgu@7074 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@7074 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@7074 | 7 | * published by the Free Software Foundation. |
zgu@7074 | 8 | * |
zgu@7074 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@7074 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@7074 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@7074 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@7074 | 13 | * accompanied this code). |
zgu@7074 | 14 | * |
zgu@7074 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@7074 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@7074 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@7074 | 18 | * |
zgu@7074 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@7074 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@7074 | 21 | * questions. |
zgu@7074 | 22 | * |
zgu@7074 | 23 | */ |
zgu@7074 | 24 | #include "precompiled.hpp" |
zgu@7074 | 25 | |
zgu@7074 | 26 | #include "runtime/atomic.hpp" |
zgu@7074 | 27 | #include "runtime/atomic.inline.hpp" |
zgu@7074 | 28 | #include "services/mallocSiteTable.hpp" |
zgu@7074 | 29 | #include "services/mallocTracker.hpp" |
zgu@7074 | 30 | #include "services/mallocTracker.inline.hpp" |
zgu@7074 | 31 | #include "services/memTracker.hpp" |
zgu@7074 | 32 | |
zgu@7074 | 33 | size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; |
zgu@7074 | 34 | |
zgu@7074 | 35 | // Total malloc'd memory amount |
zgu@7074 | 36 | size_t MallocMemorySnapshot::total() const { |
zgu@7074 | 37 | size_t amount = 0; |
zgu@7074 | 38 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 39 | amount += _malloc[index].malloc_size(); |
zgu@7074 | 40 | } |
zgu@7074 | 41 | amount += _tracking_header.size() + total_arena(); |
zgu@7074 | 42 | return amount; |
zgu@7074 | 43 | } |
zgu@7074 | 44 | |
zgu@7074 | 45 | // Total malloc'd memory used by arenas |
zgu@7074 | 46 | size_t MallocMemorySnapshot::total_arena() const { |
zgu@7074 | 47 | size_t amount = 0; |
zgu@7074 | 48 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 49 | amount += _malloc[index].arena_size(); |
zgu@7074 | 50 | } |
zgu@7074 | 51 | return amount; |
zgu@7074 | 52 | } |
zgu@7074 | 53 | |
zgu@7074 | 54 | |
zgu@7074 | 55 | void MallocMemorySnapshot::reset() { |
zgu@7074 | 56 | _tracking_header.reset(); |
zgu@7074 | 57 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 58 | _malloc[index].reset(); |
zgu@7074 | 59 | } |
zgu@7074 | 60 | } |
zgu@7074 | 61 | |
zgu@7074 | 62 | // Make adjustment by subtracting chunks used by arenas |
zgu@7074 | 63 | // from total chunks to get total free chunck size |
zgu@7074 | 64 | void MallocMemorySnapshot::make_adjustment() { |
zgu@7074 | 65 | size_t arena_size = total_arena(); |
zgu@7074 | 66 | int chunk_idx = NMTUtil::flag_to_index(mtChunk); |
zgu@7074 | 67 | _malloc[chunk_idx].record_free(arena_size); |
zgu@7074 | 68 | } |
zgu@7074 | 69 | |
zgu@7074 | 70 | |
zgu@7074 | 71 | void MallocMemorySummary::initialize() { |
zgu@7074 | 72 | assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check"); |
zgu@7074 | 73 | // Uses placement new operator to initialize static area. |
zgu@7074 | 74 | ::new ((void*)_snapshot)MallocMemorySnapshot(); |
zgu@7074 | 75 | } |
zgu@7074 | 76 | |
zgu@7074 | 77 | void MallocHeader::release() const { |
zgu@7074 | 78 | // Tracking already shutdown, no housekeeping is needed anymore |
zgu@7074 | 79 | if (MemTracker::tracking_level() <= NMT_minimal) return; |
zgu@7074 | 80 | |
zgu@7074 | 81 | MallocMemorySummary::record_free(size(), flags()); |
zgu@7074 | 82 | MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader)); |
zgu@7074 | 83 | if (tracking_level() == NMT_detail) { |
zgu@7074 | 84 | MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx); |
zgu@7074 | 85 | } |
zgu@7074 | 86 | } |
zgu@7074 | 87 | |
zgu@7074 | 88 | bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size, |
zgu@7074 | 89 | size_t* bucket_idx, size_t* pos_idx) const { |
zgu@7074 | 90 | bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx); |
zgu@7074 | 91 | |
zgu@7074 | 92 | // Something went wrong, could be OOM or overflow malloc site table. |
zgu@7074 | 93 | // We want to keep tracking data under OOM circumstance, so transition to |
zgu@7074 | 94 | // summary tracking. |
zgu@7074 | 95 | if (!ret) { |
zgu@7074 | 96 | MemTracker::transition_to(NMT_summary); |
zgu@7074 | 97 | } |
zgu@7074 | 98 | return ret; |
zgu@7074 | 99 | } |
zgu@7074 | 100 | |
zgu@7074 | 101 | bool MallocHeader::get_stack(NativeCallStack& stack) const { |
zgu@7074 | 102 | return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx); |
zgu@7074 | 103 | } |
zgu@7074 | 104 | |
zgu@7074 | 105 | bool MallocTracker::initialize(NMT_TrackingLevel level) { |
zgu@7074 | 106 | if (level >= NMT_summary) { |
zgu@7074 | 107 | MallocMemorySummary::initialize(); |
zgu@7074 | 108 | } |
zgu@7074 | 109 | |
zgu@7074 | 110 | if (level == NMT_detail) { |
zgu@7074 | 111 | return MallocSiteTable::initialize(); |
zgu@7074 | 112 | } |
zgu@7074 | 113 | return true; |
zgu@7074 | 114 | } |
zgu@7074 | 115 | |
zgu@7074 | 116 | bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { |
zgu@7074 | 117 | assert(from != NMT_off, "Can not transition from off state"); |
zgu@7074 | 118 | assert(to != NMT_off, "Can not transition to off state"); |
zgu@7074 | 119 | if (from == NMT_minimal) { |
zgu@7074 | 120 | MallocMemorySummary::reset(); |
zgu@7074 | 121 | } |
zgu@7074 | 122 | |
zgu@7074 | 123 | if (to == NMT_detail) { |
zgu@7074 | 124 | assert(from == NMT_minimal || from == NMT_summary, "Just check"); |
zgu@7074 | 125 | return MallocSiteTable::initialize(); |
zgu@7074 | 126 | } else if (from == NMT_detail) { |
zgu@7074 | 127 | assert(to == NMT_minimal || to == NMT_summary, "Just check"); |
zgu@7074 | 128 | MallocSiteTable::shutdown(); |
zgu@7074 | 129 | } |
zgu@7074 | 130 | return true; |
zgu@7074 | 131 | } |
zgu@7074 | 132 | |
zgu@7074 | 133 | // Record a malloc memory allocation |
zgu@7074 | 134 | void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, |
zgu@7074 | 135 | const NativeCallStack& stack, NMT_TrackingLevel level) { |
zgu@7074 | 136 | void* memblock; // the address for user data |
zgu@7074 | 137 | MallocHeader* header = NULL; |
zgu@7074 | 138 | |
zgu@7074 | 139 | if (malloc_base == NULL) { |
zgu@7074 | 140 | return NULL; |
zgu@7074 | 141 | } |
zgu@7074 | 142 | |
zgu@7074 | 143 | // Check malloc size, size has to <= MAX_MALLOC_SIZE. This is only possible on 32-bit |
zgu@7074 | 144 | // systems, when malloc size >= 1GB, but is is safe to assume it won't happen. |
zgu@7074 | 145 | if (size > MAX_MALLOC_SIZE) { |
zgu@7074 | 146 | fatal("Should not use malloc for big memory block, use virtual memory instead"); |
zgu@7074 | 147 | } |
zgu@7074 | 148 | // Uses placement global new operator to initialize malloc header |
zgu@7074 | 149 | switch(level) { |
zgu@7074 | 150 | case NMT_off: |
zgu@7074 | 151 | return malloc_base; |
zgu@7074 | 152 | case NMT_minimal: { |
zgu@7074 | 153 | MallocHeader* hdr = ::new (malloc_base) MallocHeader(); |
zgu@7074 | 154 | break; |
zgu@7074 | 155 | } |
zgu@7074 | 156 | case NMT_summary: { |
zgu@7074 | 157 | header = ::new (malloc_base) MallocHeader(size, flags); |
zgu@7074 | 158 | break; |
zgu@7074 | 159 | } |
zgu@7074 | 160 | case NMT_detail: { |
zgu@7074 | 161 | header = ::new (malloc_base) MallocHeader(size, flags, stack); |
zgu@7074 | 162 | break; |
zgu@7074 | 163 | } |
zgu@7074 | 164 | default: |
zgu@7074 | 165 | ShouldNotReachHere(); |
zgu@7074 | 166 | } |
zgu@7074 | 167 | memblock = (void*)((char*)malloc_base + sizeof(MallocHeader)); |
zgu@7074 | 168 | |
zgu@7074 | 169 | // The alignment check: 8 bytes alignment for 32 bit systems. |
zgu@7074 | 170 | // 16 bytes alignment for 64-bit systems. |
zgu@7074 | 171 | assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check"); |
zgu@7074 | 172 | |
zgu@7074 | 173 | // Sanity check |
zgu@7074 | 174 | assert(get_memory_tracking_level(memblock) == level, |
zgu@7074 | 175 | "Wrong tracking level"); |
zgu@7074 | 176 | |
zgu@7074 | 177 | #ifdef ASSERT |
zgu@7074 | 178 | if (level > NMT_minimal) { |
zgu@7074 | 179 | // Read back |
zgu@7074 | 180 | assert(get_size(memblock) == size, "Wrong size"); |
zgu@7074 | 181 | assert(get_flags(memblock) == flags, "Wrong flags"); |
zgu@7074 | 182 | } |
zgu@7074 | 183 | #endif |
zgu@7074 | 184 | |
zgu@7074 | 185 | return memblock; |
zgu@7074 | 186 | } |
zgu@7074 | 187 | |
zgu@7074 | 188 | void* MallocTracker::record_free(void* memblock) { |
zgu@7074 | 189 | // Never turned on |
zgu@7074 | 190 | if (MemTracker::tracking_level() == NMT_off || |
zgu@7074 | 191 | memblock == NULL) { |
zgu@7074 | 192 | return memblock; |
zgu@7074 | 193 | } |
zgu@7074 | 194 | MallocHeader* header = malloc_header(memblock); |
zgu@7074 | 195 | header->release(); |
zgu@7074 | 196 | |
zgu@7074 | 197 | return (void*)header; |
zgu@7074 | 198 | } |
zgu@7074 | 199 | |
zgu@7074 | 200 |