Wed, 10 Apr 2013 08:55:50 -0400
8010151: nsk/regression/b6653214 fails "assert(snapshot != NULL) failed: Worker should not be started"
Summary: Fixed a racing condition when shutting down NMT while worker thread is being started, also fixed a few mis-declared volatile pointers.
Reviewed-by: dholmes, dlong
1 /*
2 * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
27 #include "runtime/atomic.hpp"
28 #include "services/memBaseline.hpp"
29 #include "services/memRecorder.hpp"
30 #include "services/memPtr.hpp"
31 #include "services/memTracker.hpp"
33 MemPointer* SequencedRecordIterator::next_record() {
34 MemPointerRecord* itr_cur = (MemPointerRecord*)_itr.current();
35 if (itr_cur == NULL) {
36 return itr_cur;
37 }
39 MemPointerRecord* itr_next = (MemPointerRecord*)_itr.next();
41 // don't collapse virtual memory records
42 while (itr_next != NULL && !itr_cur->is_vm_pointer() &&
43 !itr_next->is_vm_pointer() &&
44 same_kind(itr_cur, itr_next)) {
45 itr_cur = itr_next;
46 itr_next = (MemPointerRecord*)_itr.next();
47 }
49 return itr_cur;
50 }
53 volatile jint MemRecorder::_instance_count = 0;
55 MemRecorder::MemRecorder() {
56 assert(MemTracker::is_on(), "Native memory tracking is off");
57 Atomic::inc(&_instance_count);
58 set_generation();
60 if (MemTracker::track_callsite()) {
61 _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecordEx,
62 DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
63 } else {
64 _pointer_records = new (std::nothrow)FixedSizeMemPointerArray<SeqMemPointerRecord,
65 DEFAULT_RECORDER_PTR_ARRAY_SIZE>();
66 }
67 _next = NULL;
70 if (_pointer_records != NULL) {
71 // recode itself
72 record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
73 sizeof(MemRecorder), CALLER_PC);
74 record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
75 _pointer_records->instance_size(),CURRENT_PC);
76 }
77 }
79 MemRecorder::~MemRecorder() {
80 if (_pointer_records != NULL) {
81 if (MemTracker::is_on()) {
82 MemTracker::record_free((address)_pointer_records, mtNMT);
83 MemTracker::record_free((address)this, mtNMT);
84 }
85 delete _pointer_records;
86 }
87 // delete all linked recorders
88 while (_next != NULL) {
89 MemRecorder* tmp = _next;
90 _next = _next->next();
91 tmp->set_next(NULL);
92 delete tmp;
93 }
94 Atomic::dec(&_instance_count);
95 }
97 // Sorting order:
98 // 1. memory block address
99 // 2. mem pointer record tags
100 // 3. sequence number
101 int MemRecorder::sort_record_fn(const void* e1, const void* e2) {
102 const MemPointerRecord* p1 = (const MemPointerRecord*)e1;
103 const MemPointerRecord* p2 = (const MemPointerRecord*)e2;
104 int delta = UNSIGNED_COMPARE(p1->addr(), p2->addr());
105 if (delta == 0) {
106 int df = UNSIGNED_COMPARE((p1->flags() & MemPointerRecord::tag_masks),
107 (p2->flags() & MemPointerRecord::tag_masks));
108 if (df == 0) {
109 assert(p1->seq() != p2->seq(), "dup seq");
110 return p1->seq() - p2->seq();
111 } else {
112 return df;
113 }
114 } else {
115 return delta;
116 }
117 }
119 bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
120 #ifdef ASSERT
121 if (MemPointerRecord::is_virtual_memory_record(flags)) {
122 assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
123 } else {
124 assert((flags & MemPointerRecord::tag_masks) == MemPointerRecord::malloc_tag() ||
125 (flags & MemPointerRecord::tag_masks) == MemPointerRecord::free_tag() ||
126 IS_ARENA_OBJ(flags),
127 "bad malloc record");
128 }
129 // a recorder should only hold records within the same generation
130 unsigned long cur_generation = SequenceGenerator::current_generation();
131 assert(cur_generation == _generation,
132 "this thread did not enter sync point");
133 #endif
135 if (MemTracker::track_callsite()) {
136 SeqMemPointerRecordEx ap(p, flags, size, pc);
137 debug_only(check_dup_seq(ap.seq());)
138 return _pointer_records->append(&ap);
139 } else {
140 SeqMemPointerRecord ap(p, flags, size);
141 debug_only(check_dup_seq(ap.seq());)
142 return _pointer_records->append(&ap);
143 }
144 }
146 // iterator for alloc pointers
147 SequencedRecordIterator MemRecorder::pointer_itr() {
148 assert(_pointer_records != NULL, "just check");
149 _pointer_records->sort((FN_SORT)sort_record_fn);
150 return SequencedRecordIterator(_pointer_records);
151 }
154 void MemRecorder::set_generation() {
155 _generation = SequenceGenerator::current_generation();
156 }
158 #ifdef ASSERT
160 void MemRecorder::check_dup_seq(jint seq) const {
161 MemPointerArrayIteratorImpl itr(_pointer_records);
162 MemPointerRecord* rc = (MemPointerRecord*)itr.current();
163 while (rc != NULL) {
164 assert(rc->seq() != seq, "dup seq");
165 rc = (MemPointerRecord*)itr.next();
166 }
167 }
169 #endif