src/share/vm/services/memTrackWorker.cpp

Sun, 03 Feb 2013 22:28:08 +0400

author
dsamersoff
date
Sun, 03 Feb 2013 22:28:08 +0400
changeset 4520
8f696cf1a0fb
parent 4512
4102b59539ce
child 4927
35f8765422b9
permissions
-rw-r--r--

8002048: Protocol to discovery of manageable Java processes on a network
Summary: Introduce a protocol to discover manageble Java instances across a network subnet, JDP
Reviewed-by: sla, dfuchs

zgu@3900 1 /*
zgu@3900 2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
zgu@3900 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
zgu@3900 4 *
zgu@3900 5 * This code is free software; you can redistribute it and/or modify it
zgu@3900 6 * under the terms of the GNU General Public License version 2 only, as
zgu@3900 7 * published by the Free Software Foundation.
zgu@3900 8 *
zgu@3900 9 * This code is distributed in the hope that it will be useful, but WITHOUT
zgu@3900 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
zgu@3900 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
zgu@3900 12 * version 2 for more details (a copy is included in the LICENSE file that
zgu@3900 13 * accompanied this code).
zgu@3900 14 *
zgu@3900 15 * You should have received a copy of the GNU General Public License version
zgu@3900 16 * 2 along with this work; if not, write to the Free Software Foundation,
zgu@3900 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
zgu@3900 18 *
zgu@3900 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
zgu@3900 20 * or visit www.oracle.com if you need additional information or have any
zgu@3900 21 * questions.
zgu@3900 22 *
zgu@3900 23 */
zgu@3900 24
zgu@3900 25 #include "precompiled.hpp"
zgu@3900 26 #include "runtime/threadCritical.hpp"
zgu@3900 27 #include "services/memTracker.hpp"
zgu@3900 28 #include "services/memTrackWorker.hpp"
zgu@3900 29 #include "utilities/decoder.hpp"
zgu@3900 30 #include "utilities/vmError.hpp"
zgu@3900 31
zgu@4400 32
zgu@4400 33 void GenerationData::reset() {
zgu@4400 34 _number_of_classes = 0;
zgu@4400 35 while (_recorder_list != NULL) {
zgu@4400 36 MemRecorder* tmp = _recorder_list;
zgu@4400 37 _recorder_list = _recorder_list->next();
zgu@4400 38 MemTracker::release_thread_recorder(tmp);
zgu@4400 39 }
zgu@4400 40 }
zgu@4400 41
zgu@3900 42 MemTrackWorker::MemTrackWorker() {
zgu@3900 43 // create thread uses cgc thread type for now. We should revisit
zgu@3900 44 // the option, or create new thread type.
zgu@3900 45 _has_error = !os::create_thread(this, os::cgc_thread);
zgu@3900 46 set_name("MemTrackWorker", 0);
zgu@3900 47
zgu@3900 48 // initial generation circuit buffer
zgu@3900 49 if (!has_error()) {
zgu@3900 50 _head = _tail = 0;
zgu@3900 51 for(int index = 0; index < MAX_GENERATIONS; index ++) {
zgu@4400 52 ::new ((void*)&_gen[index]) GenerationData();
zgu@3900 53 }
zgu@3900 54 }
zgu@3900 55 NOT_PRODUCT(_sync_point_count = 0;)
zgu@3900 56 NOT_PRODUCT(_merge_count = 0;)
zgu@3900 57 NOT_PRODUCT(_last_gen_in_use = 0;)
zgu@3900 58 }
zgu@3900 59
zgu@3900 60 MemTrackWorker::~MemTrackWorker() {
zgu@3900 61 for (int index = 0; index < MAX_GENERATIONS; index ++) {
zgu@4400 62 _gen[index].reset();
zgu@3900 63 }
zgu@3900 64 }
zgu@3900 65
zgu@3900 66 void* MemTrackWorker::operator new(size_t size) {
zgu@3900 67 assert(false, "use nothrow version");
zgu@3900 68 return NULL;
zgu@3900 69 }
zgu@3900 70
zgu@3900 71 void* MemTrackWorker::operator new(size_t size, const std::nothrow_t& nothrow_constant) {
zgu@3900 72 return allocate(size, false, mtNMT);
zgu@3900 73 }
zgu@3900 74
zgu@3900 75 void MemTrackWorker::start() {
zgu@3900 76 os::start_thread(this);
zgu@3900 77 }
zgu@3900 78
zgu@3900 79 /*
zgu@3900 80 * Native memory tracking worker thread loop:
zgu@3900 81 * 1. merge one generation of memory recorders to staging area
zgu@3900 82 * 2. promote staging data to memory snapshot
zgu@3900 83 *
zgu@3900 84 * This thread can run through safepoint.
zgu@3900 85 */
zgu@3900 86
zgu@3900 87 void MemTrackWorker::run() {
zgu@3900 88 assert(MemTracker::is_on(), "native memory tracking is off");
zgu@3900 89 this->initialize_thread_local_storage();
zgu@3900 90 this->record_stack_base_and_size();
zgu@3900 91 MemSnapshot* snapshot = MemTracker::get_snapshot();
zgu@3900 92 assert(snapshot != NULL, "Worker should not be started");
zgu@3900 93 MemRecorder* rec;
ctornqvi@4512 94 unsigned long processing_generation = 0;
ctornqvi@4512 95 bool worker_idle = false;
zgu@3900 96
zgu@3900 97 while (!MemTracker::shutdown_in_progress()) {
zgu@3900 98 NOT_PRODUCT(_last_gen_in_use = generations_in_use();)
zgu@3900 99 {
zgu@3900 100 // take a recorder from earliest generation in buffer
zgu@3900 101 ThreadCritical tc;
zgu@4400 102 rec = _gen[_head].next_recorder();
zgu@3900 103 }
zgu@3900 104 if (rec != NULL) {
ctornqvi@4512 105 if (rec->get_generation() != processing_generation || worker_idle) {
ctornqvi@4512 106 processing_generation = rec->get_generation();
ctornqvi@4512 107 worker_idle = false;
ctornqvi@4512 108 MemTracker::set_current_processing_generation(processing_generation);
ctornqvi@4512 109 }
ctornqvi@4512 110
zgu@3900 111 // merge the recorder into staging area
zgu@3986 112 if (!snapshot->merge(rec)) {
zgu@3986 113 MemTracker::shutdown(MemTracker::NMT_out_of_memory);
zgu@3986 114 } else {
zgu@3986 115 NOT_PRODUCT(_merge_count ++;)
zgu@3986 116 }
zgu@3900 117 MemTracker::release_thread_recorder(rec);
zgu@3900 118 } else {
zgu@3900 119 // no more recorder to merge, promote staging area
zgu@3900 120 // to snapshot
zgu@3900 121 if (_head != _tail) {
zgu@4400 122 long number_of_classes;
zgu@3900 123 {
zgu@3900 124 ThreadCritical tc;
zgu@4400 125 if (_gen[_head].has_more_recorder() || _head == _tail) {
zgu@3900 126 continue;
zgu@3900 127 }
zgu@4400 128 number_of_classes = _gen[_head].number_of_classes();
zgu@4400 129 _gen[_head].reset();
zgu@4400 130
zgu@3900 131 // done with this generation, increment _head pointer
zgu@3900 132 _head = (_head + 1) % MAX_GENERATIONS;
zgu@3900 133 }
zgu@3900 134 // promote this generation data to snapshot
zgu@4400 135 if (!snapshot->promote(number_of_classes)) {
zgu@4053 136 // failed to promote, means out of memory
zgu@4053 137 MemTracker::shutdown(MemTracker::NMT_out_of_memory);
zgu@4053 138 }
zgu@3900 139 } else {
ctornqvi@4512 140 // worker thread is idle
ctornqvi@4512 141 worker_idle = true;
ctornqvi@4512 142 MemTracker::report_worker_idle();
zgu@3900 143 snapshot->wait(1000);
zgu@3900 144 ThreadCritical tc;
zgu@3900 145 // check if more data arrived
zgu@4400 146 if (!_gen[_head].has_more_recorder()) {
zgu@4400 147 _gen[_head].add_recorders(MemTracker::get_pending_recorders());
zgu@3900 148 }
zgu@3900 149 }
zgu@3900 150 }
zgu@3900 151 }
zgu@3900 152 assert(MemTracker::shutdown_in_progress(), "just check");
zgu@3900 153
zgu@3986 154 // transits to final shutdown
zgu@3900 155 MemTracker::final_shutdown();
zgu@3900 156 }
zgu@3900 157
zgu@3900 158 // at synchronization point, where 'safepoint visible' Java threads are blocked
zgu@3900 159 // at a safepoint, and the rest of threads are blocked on ThreadCritical lock.
zgu@3900 160 // The caller MemTracker::sync() already takes ThreadCritical before calling this
zgu@3900 161 // method.
zgu@3900 162 //
zgu@3900 163 // Following tasks are performed:
zgu@3900 164 // 1. add all recorders in pending queue to current generation
zgu@3900 165 // 2. increase generation
zgu@3900 166
zgu@4400 167 void MemTrackWorker::at_sync_point(MemRecorder* rec, int number_of_classes) {
zgu@3900 168 NOT_PRODUCT(_sync_point_count ++;)
zgu@3900 169 assert(count_recorder(rec) <= MemRecorder::_instance_count,
zgu@3900 170 "pending queue has infinite loop");
zgu@3900 171
zgu@3900 172 bool out_of_generation_buffer = false;
zgu@3900 173 // check shutdown state inside ThreadCritical
zgu@3900 174 if (MemTracker::shutdown_in_progress()) return;
zgu@4400 175
zgu@4400 176 _gen[_tail].set_number_of_classes(number_of_classes);
zgu@3900 177 // append the recorders to the end of the generation
zgu@4400 178 _gen[_tail].add_recorders(rec);
zgu@4400 179 assert(count_recorder(_gen[_tail].peek()) <= MemRecorder::_instance_count,
zgu@3900 180 "after add to current generation has infinite loop");
zgu@3900 181 // we have collected all recorders for this generation. If there is data,
zgu@3900 182 // we need to increment _tail to start a new generation.
zgu@4400 183 if (_gen[_tail].has_more_recorder() || _head == _tail) {
zgu@3900 184 _tail = (_tail + 1) % MAX_GENERATIONS;
zgu@3900 185 out_of_generation_buffer = (_tail == _head);
zgu@3900 186 }
zgu@3900 187
zgu@3900 188 if (out_of_generation_buffer) {
zgu@3900 189 MemTracker::shutdown(MemTracker::NMT_out_of_generation);
zgu@3900 190 }
zgu@3900 191 }
zgu@3900 192
zgu@3900 193 #ifndef PRODUCT
zgu@3900 194 int MemTrackWorker::count_recorder(const MemRecorder* head) {
zgu@3900 195 int count = 0;
zgu@3900 196 while(head != NULL) {
zgu@3900 197 count ++;
zgu@3900 198 head = head->next();
zgu@3900 199 }
zgu@3900 200 return count;
zgu@3900 201 }
zgu@3900 202
zgu@3900 203 int MemTrackWorker::count_pending_recorders() const {
zgu@3900 204 int count = 0;
zgu@3900 205 for (int index = 0; index < MAX_GENERATIONS; index ++) {
zgu@4400 206 MemRecorder* head = _gen[index].peek();
zgu@3900 207 if (head != NULL) {
zgu@3900 208 count += count_recorder(head);
zgu@3900 209 }
zgu@3900 210 }
zgu@3900 211 return count;
zgu@3900 212 }
zgu@3900 213 #endif

mercurial