Mon, 12 Aug 2019 18:30:40 +0300
8223147: JFR Backport
8199712: Flight Recorder
8203346: JFR: Inconsistent signature of jfr_add_string_constant
8195817: JFR.stop should require name of recording
8195818: JFR.start should increase autogenerated name by one
8195819: Remove recording=x from jcmd JFR.check output
8203921: JFR thread sampling is missing fixes from JDK-8194552
8203929: Limit amount of data for JFR.dump
8203664: JFR start failure after AppCDS archive created with JFR StartFlightRecording
8003209: JFR events for network utilization
8207392: [PPC64] Implement JFR profiling
8202835: jfr/event/os/TestSystemProcess.java fails on missing events
Summary: Backport JFR from JDK11. Initial integration
Reviewed-by: neugens
1 /*
2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
28 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
29 #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
30 #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp"
31 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
32 #include "oops/oop.psgc.inline.hpp"
34 inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
35 assert(_manager_array != NULL, "access of NULL manager_array");
36 assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
37 return &_manager_array[index];
38 }
40 template <class T>
41 inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
42 if (p != NULL) { // XXX: error if p != NULL here
43 oop o = oopDesc::load_decode_heap_oop_not_null(p);
44 if (o->is_forwarded()) {
45 o = o->forwardee();
46 // Card mark
47 if (PSScavenge::is_obj_in_young(o)) {
48 PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
49 }
50 oopDesc::encode_store_heap_oop_not_null(p, o);
51 } else {
52 push_depth(p);
53 }
54 }
55 }
57 template <class T>
58 inline void PSPromotionManager::claim_or_forward_depth(T* p) {
59 assert(PSScavenge::should_scavenge(p, true), "revisiting object?");
60 assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap,
61 "Sanity");
62 assert(Universe::heap()->is_in(p), "pointer outside heap");
64 claim_or_forward_internal_depth(p);
65 }
67 inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
68 size_t obj_size,
69 uint age, bool tenured,
70 const PSPromotionLAB* lab) {
71 // Skip if memory allocation failed
72 if (new_obj != NULL) {
73 const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
75 if (lab != NULL) {
76 // Promotion of object through newly allocated PLAB
77 if (gc_tracer->should_report_promotion_in_new_plab_event()) {
78 size_t obj_bytes = obj_size * HeapWordSize;
79 size_t lab_size = lab->capacity();
80 gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
81 age, tenured, lab_size);
82 }
83 } else {
84 // Promotion of object directly to heap
85 if (gc_tracer->should_report_promotion_outside_plab_event()) {
86 size_t obj_bytes = obj_size * HeapWordSize;
87 gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
88 age, tenured);
89 }
90 }
91 }
92 }
94 //
95 // This method is pretty bulky. It would be nice to split it up
96 // into smaller submethods, but we need to be careful not to hurt
97 // performance.
98 //
99 template<bool promote_immediately>
100 oop PSPromotionManager::copy_to_survivor_space(oop o) {
101 assert(PSScavenge::should_scavenge(&o), "Sanity");
103 oop new_obj = NULL;
105 // NOTE! We must be very careful with any methods that access the mark
106 // in o. There may be multiple threads racing on it, and it may be forwarded
107 // at any time. Do not use oop methods for accessing the mark!
108 markOop test_mark = o->mark();
110 // The same test as "o->is_forwarded()"
111 if (!test_mark->is_marked()) {
112 bool new_obj_is_tenured = false;
113 size_t new_obj_size = o->size();
115 // Find the objects age, MT safe.
116 uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
117 test_mark->displaced_mark_helper()->age() : test_mark->age();
119 if (!promote_immediately) {
120 // Try allocating obj in to-space (unless too old)
121 if (age < PSScavenge::tenuring_threshold()) {
122 new_obj = (oop) _young_lab.allocate(new_obj_size);
123 if (new_obj == NULL && !_young_gen_is_full) {
124 // Do we allocate directly, or flush and refill?
125 if (new_obj_size > (YoungPLABSize / 2)) {
126 // Allocate this object directly
127 new_obj = (oop)young_space()->cas_allocate(new_obj_size);
128 promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
129 } else {
130 // Flush and fill
131 _young_lab.flush();
133 HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize);
134 if (lab_base != NULL) {
135 _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
136 // Try the young lab allocation again.
137 new_obj = (oop) _young_lab.allocate(new_obj_size);
138 promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
139 } else {
140 _young_gen_is_full = true;
141 }
142 }
143 }
144 }
145 }
147 // Otherwise try allocating obj tenured
148 if (new_obj == NULL) {
149 #ifndef PRODUCT
150 if (Universe::heap()->promotion_should_fail()) {
151 return oop_promotion_failed(o, test_mark);
152 }
153 #endif // #ifndef PRODUCT
155 new_obj = (oop) _old_lab.allocate(new_obj_size);
156 new_obj_is_tenured = true;
158 if (new_obj == NULL) {
159 if (!_old_gen_is_full) {
160 // Do we allocate directly, or flush and refill?
161 if (new_obj_size > (OldPLABSize / 2)) {
162 // Allocate this object directly
163 new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
164 promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
165 } else {
166 // Flush and fill
167 _old_lab.flush();
169 HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
170 if(lab_base != NULL) {
171 #ifdef ASSERT
172 // Delay the initialization of the promotion lab (plab).
173 // This exposes uninitialized plabs to card table processing.
174 if (GCWorkerDelayMillis > 0) {
175 os::sleep(Thread::current(), GCWorkerDelayMillis, false);
176 }
177 #endif
178 _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
179 // Try the old lab allocation again.
180 new_obj = (oop) _old_lab.allocate(new_obj_size);
181 promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
182 }
183 }
184 }
186 // This is the promotion failed test, and code handling.
187 // The code belongs here for two reasons. It is slightly
188 // different than the code below, and cannot share the
189 // CAS testing code. Keeping the code here also minimizes
190 // the impact on the common case fast path code.
192 if (new_obj == NULL) {
193 _old_gen_is_full = true;
194 return oop_promotion_failed(o, test_mark);
195 }
196 }
197 }
199 assert(new_obj != NULL, "allocation should have succeeded");
201 // Copy obj
202 Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
204 // Now we have to CAS in the header.
205 if (o->cas_forward_to(new_obj, test_mark)) {
206 // We won any races, we "own" this object.
207 assert(new_obj == o->forwardee(), "Sanity");
209 // Increment age if obj still in new generation. Now that
210 // we're dealing with a markOop that cannot change, it is
211 // okay to use the non mt safe oop methods.
212 if (!new_obj_is_tenured) {
213 new_obj->incr_age();
214 assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj");
215 }
217 // Do the size comparison first with new_obj_size, which we
218 // already have. Hopefully, only a few objects are larger than
219 // _min_array_size_for_chunking, and most of them will be arrays.
220 // So, the is->objArray() test would be very infrequent.
221 if (new_obj_size > _min_array_size_for_chunking &&
222 new_obj->is_objArray() &&
223 PSChunkLargeArrays) {
224 // we'll chunk it
225 oop* const masked_o = mask_chunked_array_oop(o);
226 push_depth(masked_o);
227 TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes);
228 } else {
229 // we'll just push its contents
230 new_obj->push_contents(this);
231 }
232 } else {
233 // We lost, someone else "owns" this object
234 guarantee(o->is_forwarded(), "Object must be forwarded if the cas failed.");
236 // Try to deallocate the space. If it was directly allocated we cannot
237 // deallocate it, so we have to test. If the deallocation fails,
238 // overwrite with a filler object.
239 if (new_obj_is_tenured) {
240 if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
241 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
242 }
243 } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
244 CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
245 }
247 // don't update this before the unallocation!
248 new_obj = o->forwardee();
249 }
250 } else {
251 assert(o->is_forwarded(), "Sanity");
252 new_obj = o->forwardee();
253 }
255 #ifndef PRODUCT
256 // This code must come after the CAS test, or it will print incorrect
257 // information.
258 if (TraceScavenge) {
259 gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
260 PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
261 new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
262 }
263 #endif
265 return new_obj;
266 }
269 inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
270 if (is_oop_masked(p)) {
271 assert(PSChunkLargeArrays, "invariant");
272 oop const old = unmask_chunked_array_oop(p);
273 process_array_chunk(old);
274 } else {
275 if (p.is_narrow()) {
276 assert(UseCompressedOops, "Error");
277 PSScavenge::copy_and_push_safe_barrier<narrowOop, /*promote_immediately=*/false>(this, p);
278 } else {
279 PSScavenge::copy_and_push_safe_barrier<oop, /*promote_immediately=*/false>(this, p);
280 }
281 }
282 }
284 #if TASKQUEUE_STATS
285 void PSPromotionManager::record_steal(StarTask& p) {
286 if (is_oop_masked(p)) {
287 ++_masked_steals;
288 }
289 }
290 #endif // TASKQUEUE_STATS
292 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP