src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp

Mon, 19 Aug 2019 10:11:31 +0200

author
neugens
date
Mon, 19 Aug 2019 10:11:31 +0200
changeset 9861
a248d0be1309
parent 7257
e7d0505c8a30
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8229401: Fix JFR code cache test failures
8223689: Add JFR Thread Sampling Support
8223690: Add JFR BiasedLock Event Support
8223691: Add JFR G1 Region Type Change Event Support
8223692: Add JFR G1 Heap Summary Event Support
Summary: Backport JFR from JDK11, additional fixes
Reviewed-by: neugens, apetushkov
Contributed-by: denghui.ddh@alibaba-inc.com

ysr@777 1 /*
mikael@6198 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP
stefank@2314 27
tschatzl@7051 28 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
stefank@2314 29 #include "memory/cardTableModRefBS.hpp"
stefank@2314 30 #include "memory/memRegion.hpp"
stefank@2314 31 #include "oops/oop.inline.hpp"
jprovino@4542 32 #include "utilities/macros.hpp"
stefank@2314 33
ysr@777 34 class DirtyCardQueueSet;
tschatzl@7051 35 class G1SATBCardTableLoggingModRefBS;
ysr@777 36
ysr@777 37 // This barrier is specialized to use a logging barrier to support
ysr@777 38 // snapshot-at-the-beginning marking.
ysr@777 39
ysr@777 40 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
mgerdin@5860 41 protected:
mgerdin@5860 42 enum G1CardValues {
mgerdin@5860 43 g1_young_gen = CT_MR_BS_last_reserved << 1
mgerdin@5860 44 };
mgerdin@5860 45
johnc@2781 46 public:
mgerdin@5860 47 static int g1_young_card_val() { return g1_young_gen; }
mgerdin@5860 48
ysr@777 49 // Add "pre_val" to a set of objects that may have been disconnected from the
ysr@777 50 // pre-marking object graph.
ysr@777 51 static void enqueue(oop pre_val);
ysr@777 52
ysr@777 53 G1SATBCardTableModRefBS(MemRegion whole_heap,
ysr@777 54 int max_covered_regions);
ysr@777 55
ysr@777 56 bool is_a(BarrierSet::Name bsn) {
ysr@777 57 return bsn == BarrierSet::G1SATBCT || CardTableModRefBS::is_a(bsn);
ysr@777 58 }
ysr@777 59
ysr@777 60 virtual bool has_write_ref_pre_barrier() { return true; }
ysr@777 61
ysr@777 62 // This notes that we don't need to access any BarrierSet data
ysr@777 63 // structures, so this can be called from a static context.
ysr@1280 64 template <class T> static void write_ref_field_pre_static(T* field, oop newVal) {
ysr@1280 65 T heap_oop = oopDesc::load_heap_oop(field);
ysr@1280 66 if (!oopDesc::is_null(heap_oop)) {
ysr@1280 67 enqueue(oopDesc::decode_heap_oop(heap_oop));
ysr@777 68 }
ysr@777 69 }
ysr@777 70
ysr@777 71 // We export this to make it available in cases where the static
ysr@777 72 // type of the barrier set is known. Note that it is non-virtual.
ysr@1280 73 template <class T> inline void inline_write_ref_field_pre(T* field, oop newVal) {
ysr@777 74 write_ref_field_pre_static(field, newVal);
ysr@777 75 }
ysr@777 76
ysr@1280 77 // These are the more general virtual versions.
ysr@1280 78 virtual void write_ref_field_pre_work(oop* field, oop new_val) {
ysr@777 79 inline_write_ref_field_pre(field, new_val);
ysr@777 80 }
ysr@1280 81 virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
ysr@1280 82 inline_write_ref_field_pre(field, new_val);
ysr@1280 83 }
ysr@1280 84 virtual void write_ref_field_pre_work(void* field, oop new_val) {
ysr@1280 85 guarantee(false, "Not needed");
ysr@1280 86 }
ysr@777 87
ysr@1280 88 template <class T> void write_ref_array_pre_work(T* dst, int count);
mgerdin@6989 89 virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized);
mgerdin@6989 90 virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized);
mgerdin@5811 91
mgerdin@5811 92 /*
mgerdin@5811 93 Claimed and deferred bits are used together in G1 during the evacuation
mgerdin@5811 94 pause. These bits can have the following state transitions:
mgerdin@5811 95 1. The claimed bit can be put over any other card state. Except that
mgerdin@5811 96 the "dirty -> dirty and claimed" transition is checked for in
mgerdin@5811 97 G1 code and is not used.
mgerdin@5811 98 2. Deferred bit can be set only if the previous state of the card
mgerdin@5811 99 was either clean or claimed. mark_card_deferred() is wait-free.
mgerdin@5811 100 We do not care if the operation is be successful because if
mgerdin@5811 101 it does not it will only result in duplicate entry in the update
mgerdin@5811 102 buffer because of the "cache-miss". So it's not worth spinning.
mgerdin@5811 103 */
mgerdin@5811 104
mgerdin@5811 105 bool is_card_claimed(size_t card_index) {
mgerdin@5811 106 jbyte val = _byte_map[card_index];
mgerdin@5811 107 return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
mgerdin@5811 108 }
mgerdin@5811 109
mgerdin@5811 110 void set_card_claimed(size_t card_index) {
mgerdin@5811 111 jbyte val = _byte_map[card_index];
mgerdin@5811 112 if (val == clean_card_val()) {
mgerdin@5811 113 val = (jbyte)claimed_card_val();
mgerdin@5811 114 } else {
mgerdin@5811 115 val |= (jbyte)claimed_card_val();
mgerdin@5811 116 }
mgerdin@5811 117 _byte_map[card_index] = val;
mgerdin@5811 118 }
mgerdin@5811 119
mgerdin@5860 120 void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
mgerdin@5860 121 void g1_mark_as_young(const MemRegion& mr);
mgerdin@5860 122
mgerdin@5811 123 bool mark_card_deferred(size_t card_index);
mgerdin@5811 124
mgerdin@5811 125 bool is_card_deferred(size_t card_index) {
mgerdin@5811 126 jbyte val = _byte_map[card_index];
mgerdin@5811 127 return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
mgerdin@5811 128 }
tschatzl@7051 129 };
mgerdin@5811 130
tschatzl@7051 131 class G1SATBCardTableLoggingModRefBSChangedListener : public G1MappingChangedListener {
tschatzl@7051 132 private:
tschatzl@7051 133 G1SATBCardTableLoggingModRefBS* _card_table;
tschatzl@7051 134 public:
tschatzl@7051 135 G1SATBCardTableLoggingModRefBSChangedListener() : _card_table(NULL) { }
tschatzl@7051 136
tschatzl@7051 137 void set_card_table(G1SATBCardTableLoggingModRefBS* card_table) { _card_table = card_table; }
tschatzl@7051 138
tschatzl@7257 139 virtual void on_commit(uint start_idx, size_t num_regions, bool zero_filled);
ysr@777 140 };
ysr@777 141
ysr@777 142 // Adds card-table logging to the post-barrier.
ysr@777 143 // Usual invariant: all dirty cards are logged in the DirtyCardQueueSet.
ysr@777 144 class G1SATBCardTableLoggingModRefBS: public G1SATBCardTableModRefBS {
tschatzl@7051 145 friend class G1SATBCardTableLoggingModRefBSChangedListener;
ysr@777 146 private:
tschatzl@7051 147 G1SATBCardTableLoggingModRefBSChangedListener _listener;
ysr@777 148 DirtyCardQueueSet& _dcqs;
ysr@777 149 public:
tschatzl@7051 150 static size_t compute_size(size_t mem_region_size_in_words) {
tschatzl@7051 151 size_t number_of_slots = (mem_region_size_in_words / card_size_in_words);
tschatzl@7051 152 return ReservedSpace::allocation_align_size_up(number_of_slots);
tschatzl@7051 153 }
tschatzl@7051 154
ysr@777 155 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
ysr@777 156 int max_covered_regions);
ysr@777 157
tschatzl@7051 158 virtual void initialize() { }
tschatzl@7051 159 virtual void initialize(G1RegionToSpaceMapper* mapper);
tschatzl@7051 160
tschatzl@7051 161 virtual void resize_covered_region(MemRegion new_region) { ShouldNotReachHere(); }
tschatzl@7051 162
ysr@777 163 bool is_a(BarrierSet::Name bsn) {
ysr@777 164 return bsn == BarrierSet::G1SATBCTLogging ||
ysr@777 165 G1SATBCardTableModRefBS::is_a(bsn);
ysr@777 166 }
ysr@777 167
goetz@6493 168 void write_ref_field_work(void* field, oop new_val, bool release = false);
ysr@777 169
ysr@777 170 // Can be called from static contexts.
ysr@777 171 static void write_ref_field_static(void* field, oop new_val);
ysr@777 172
ysr@777 173 // NB: if you do a whole-heap invalidation, the "usual invariant" defined
ysr@777 174 // above no longer applies.
ysr@777 175 void invalidate(MemRegion mr, bool whole_heap = false);
ysr@777 176
ysr@777 177 void write_region_work(MemRegion mr) { invalidate(mr); }
ysr@777 178 void write_ref_array_work(MemRegion mr) { invalidate(mr); }
ysr@777 179 };
ysr@777 180
stefank@2314 181 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1SATBCARDTABLEMODREFBS_HPP

mercurial