Wed, 03 Jul 2019 20:42:37 +0800
Merge
zgu@7074 | 1 | /* |
zgu@9053 | 2 | * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved. |
zgu@7074 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
zgu@7074 | 4 | * |
zgu@7074 | 5 | * This code is free software; you can redistribute it and/or modify it |
zgu@7074 | 6 | * under the terms of the GNU General Public License version 2 only, as |
zgu@7074 | 7 | * published by the Free Software Foundation. |
zgu@7074 | 8 | * |
zgu@7074 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
zgu@7074 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
zgu@7074 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
zgu@7074 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
zgu@7074 | 13 | * accompanied this code). |
zgu@7074 | 14 | * |
zgu@7074 | 15 | * You should have received a copy of the GNU General Public License version |
zgu@7074 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
zgu@7074 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
zgu@7074 | 18 | * |
zgu@7074 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
zgu@7074 | 20 | * or visit www.oracle.com if you need additional information or have any |
zgu@7074 | 21 | * questions. |
zgu@7074 | 22 | * |
zgu@7074 | 23 | */ |
zgu@7074 | 24 | #include "precompiled.hpp" |
zgu@7074 | 25 | |
zgu@7074 | 26 | #include "runtime/atomic.hpp" |
zgu@7074 | 27 | #include "runtime/atomic.inline.hpp" |
zgu@7074 | 28 | #include "services/mallocSiteTable.hpp" |
zgu@7074 | 29 | #include "services/mallocTracker.hpp" |
zgu@7074 | 30 | #include "services/mallocTracker.inline.hpp" |
zgu@7074 | 31 | #include "services/memTracker.hpp" |
zgu@7074 | 32 | |
zgu@7074 | 33 | size_t MallocMemorySummary::_snapshot[CALC_OBJ_SIZE_IN_TYPE(MallocMemorySnapshot, size_t)]; |
zgu@7074 | 34 | |
zgu@7074 | 35 | // Total malloc'd memory amount |
zgu@7074 | 36 | size_t MallocMemorySnapshot::total() const { |
zgu@7074 | 37 | size_t amount = 0; |
zgu@7074 | 38 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 39 | amount += _malloc[index].malloc_size(); |
zgu@7074 | 40 | } |
zgu@7074 | 41 | amount += _tracking_header.size() + total_arena(); |
zgu@7074 | 42 | return amount; |
zgu@7074 | 43 | } |
zgu@7074 | 44 | |
zgu@7074 | 45 | // Total malloc'd memory used by arenas |
zgu@7074 | 46 | size_t MallocMemorySnapshot::total_arena() const { |
zgu@7074 | 47 | size_t amount = 0; |
zgu@7074 | 48 | for (int index = 0; index < mt_number_of_types; index ++) { |
zgu@7074 | 49 | amount += _malloc[index].arena_size(); |
zgu@7074 | 50 | } |
zgu@7074 | 51 | return amount; |
zgu@7074 | 52 | } |
zgu@7074 | 53 | |
zgu@7074 | 54 | // Make adjustment by subtracting chunks used by arenas |
zgu@7074 | 55 | // from total chunks to get total free chunck size |
zgu@7074 | 56 | void MallocMemorySnapshot::make_adjustment() { |
zgu@7074 | 57 | size_t arena_size = total_arena(); |
zgu@7074 | 58 | int chunk_idx = NMTUtil::flag_to_index(mtChunk); |
zgu@7074 | 59 | _malloc[chunk_idx].record_free(arena_size); |
zgu@7074 | 60 | } |
zgu@7074 | 61 | |
zgu@7074 | 62 | |
zgu@7074 | 63 | void MallocMemorySummary::initialize() { |
zgu@7074 | 64 | assert(sizeof(_snapshot) >= sizeof(MallocMemorySnapshot), "Sanity Check"); |
zgu@7074 | 65 | // Uses placement new operator to initialize static area. |
zgu@7074 | 66 | ::new ((void*)_snapshot)MallocMemorySnapshot(); |
zgu@7074 | 67 | } |
zgu@7074 | 68 | |
zgu@7074 | 69 | void MallocHeader::release() const { |
zgu@7074 | 70 | // Tracking already shutdown, no housekeeping is needed anymore |
zgu@7074 | 71 | if (MemTracker::tracking_level() <= NMT_minimal) return; |
zgu@7074 | 72 | |
zgu@7074 | 73 | MallocMemorySummary::record_free(size(), flags()); |
zgu@7074 | 74 | MallocMemorySummary::record_free_malloc_header(sizeof(MallocHeader)); |
ctornqvi@7344 | 75 | if (MemTracker::tracking_level() == NMT_detail) { |
zgu@7074 | 76 | MallocSiteTable::deallocation_at(size(), _bucket_idx, _pos_idx); |
zgu@7074 | 77 | } |
zgu@7074 | 78 | } |
zgu@7074 | 79 | |
zgu@7074 | 80 | bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size, |
zgu@9053 | 81 | size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const { |
zgu@9053 | 82 | bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx, flags); |
zgu@7074 | 83 | |
zgu@7074 | 84 | // Something went wrong, could be OOM or overflow malloc site table. |
zgu@7074 | 85 | // We want to keep tracking data under OOM circumstance, so transition to |
zgu@7074 | 86 | // summary tracking. |
zgu@7074 | 87 | if (!ret) { |
zgu@7074 | 88 | MemTracker::transition_to(NMT_summary); |
zgu@7074 | 89 | } |
zgu@7074 | 90 | return ret; |
zgu@7074 | 91 | } |
zgu@7074 | 92 | |
zgu@7074 | 93 | bool MallocHeader::get_stack(NativeCallStack& stack) const { |
zgu@7074 | 94 | return MallocSiteTable::access_stack(stack, _bucket_idx, _pos_idx); |
zgu@7074 | 95 | } |
zgu@7074 | 96 | |
zgu@7074 | 97 | bool MallocTracker::initialize(NMT_TrackingLevel level) { |
zgu@7074 | 98 | if (level >= NMT_summary) { |
zgu@7074 | 99 | MallocMemorySummary::initialize(); |
zgu@7074 | 100 | } |
zgu@7074 | 101 | |
zgu@7074 | 102 | if (level == NMT_detail) { |
zgu@7074 | 103 | return MallocSiteTable::initialize(); |
zgu@7074 | 104 | } |
zgu@7074 | 105 | return true; |
zgu@7074 | 106 | } |
zgu@7074 | 107 | |
zgu@7074 | 108 | bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { |
zgu@7074 | 109 | assert(from != NMT_off, "Can not transition from off state"); |
zgu@7074 | 110 | assert(to != NMT_off, "Can not transition to off state"); |
coleenp@7267 | 111 | assert (from != NMT_minimal, "cannot transition from minimal state"); |
zgu@7074 | 112 | |
coleenp@7267 | 113 | if (from == NMT_detail) { |
zgu@7074 | 114 | assert(to == NMT_minimal || to == NMT_summary, "Just check"); |
zgu@7074 | 115 | MallocSiteTable::shutdown(); |
zgu@7074 | 116 | } |
zgu@7074 | 117 | return true; |
zgu@7074 | 118 | } |
zgu@7074 | 119 | |
zgu@7074 | 120 | // Record a malloc memory allocation |
zgu@7074 | 121 | void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, |
zgu@7074 | 122 | const NativeCallStack& stack, NMT_TrackingLevel level) { |
zgu@7074 | 123 | void* memblock; // the address for user data |
zgu@7074 | 124 | MallocHeader* header = NULL; |
zgu@7074 | 125 | |
zgu@7074 | 126 | if (malloc_base == NULL) { |
zgu@7074 | 127 | return NULL; |
zgu@7074 | 128 | } |
zgu@7074 | 129 | |
zgu@7074 | 130 | // Uses placement global new operator to initialize malloc header |
ctornqvi@7344 | 131 | |
ctornqvi@7344 | 132 | if (level == NMT_off) { |
ctornqvi@7344 | 133 | return malloc_base; |
zgu@7074 | 134 | } |
ctornqvi@7344 | 135 | |
ctornqvi@7344 | 136 | header = ::new (malloc_base)MallocHeader(size, flags, stack, level); |
zgu@7074 | 137 | memblock = (void*)((char*)malloc_base + sizeof(MallocHeader)); |
zgu@7074 | 138 | |
zgu@7074 | 139 | // The alignment check: 8 bytes alignment for 32 bit systems. |
zgu@7074 | 140 | // 16 bytes alignment for 64-bit systems. |
zgu@7074 | 141 | assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check"); |
zgu@7074 | 142 | |
zgu@7074 | 143 | #ifdef ASSERT |
zgu@7074 | 144 | if (level > NMT_minimal) { |
zgu@7074 | 145 | // Read back |
zgu@7074 | 146 | assert(get_size(memblock) == size, "Wrong size"); |
zgu@7074 | 147 | assert(get_flags(memblock) == flags, "Wrong flags"); |
zgu@7074 | 148 | } |
zgu@7074 | 149 | #endif |
zgu@7074 | 150 | |
zgu@7074 | 151 | return memblock; |
zgu@7074 | 152 | } |
zgu@7074 | 153 | |
zgu@7074 | 154 | void* MallocTracker::record_free(void* memblock) { |
zgu@7074 | 155 | // Never turned on |
zgu@7074 | 156 | if (MemTracker::tracking_level() == NMT_off || |
zgu@7074 | 157 | memblock == NULL) { |
zgu@7074 | 158 | return memblock; |
zgu@7074 | 159 | } |
zgu@7074 | 160 | MallocHeader* header = malloc_header(memblock); |
zgu@7074 | 161 | header->release(); |
zgu@7074 | 162 | |
zgu@7074 | 163 | return (void*)header; |
zgu@7074 | 164 | } |
zgu@7074 | 165 | |
zgu@7074 | 166 |