Tue, 12 Dec 2017 10:30:27 +0800
#6345 sync is controled by UseSyncLevel instead of Use3A2000
Reviewed-by: fujie
1 /*
2 * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP
26 #define SHARE_VM_MEMORY_BARRIERSET_HPP
28 #include "memory/memRegion.hpp"
29 #include "oops/oopsHierarchy.hpp"
30 #include "runtime/orderAccess.hpp"
32 // This class provides the interface between a barrier implementation and
33 // the rest of the system.
35 class BarrierSet: public CHeapObj<mtGC> {
36 friend class VMStructs;
37 public:
38 enum Name {
39 ModRef,
40 CardTableModRef,
41 CardTableExtension,
42 G1SATBCT,
43 G1SATBCTLogging,
44 Other,
45 Uninit
46 };
48 enum Flags {
49 None = 0,
50 TargetUninitialized = 1
51 };
52 protected:
53 int _max_covered_regions;
54 Name _kind;
56 public:
58 BarrierSet() { _kind = Uninit; }
59 // To get around prohibition on RTTI.
60 BarrierSet::Name kind() { return _kind; }
61 virtual bool is_a(BarrierSet::Name bsn) = 0;
63 // These operations indicate what kind of barriers the BarrierSet has.
64 virtual bool has_read_ref_barrier() = 0;
65 virtual bool has_read_prim_barrier() = 0;
66 virtual bool has_write_ref_barrier() = 0;
67 virtual bool has_write_ref_pre_barrier() = 0;
68 virtual bool has_write_prim_barrier() = 0;
70 // These functions indicate whether a particular access of the given
71 // kinds requires a barrier.
72 virtual bool read_ref_needs_barrier(void* field) = 0;
73 virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0;
74 virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes,
75 juint val1, juint val2) = 0;
77 // The first four operations provide a direct implementation of the
78 // barrier set. An interpreter loop, for example, could call these
79 // directly, as appropriate.
81 // Invoke the barrier, if any, necessary when reading the given ref field.
82 virtual void read_ref_field(void* field) = 0;
84 // Invoke the barrier, if any, necessary when reading the given primitive
85 // "field" of "bytes" bytes in "obj".
86 virtual void read_prim_field(HeapWord* field, size_t bytes) = 0;
88 // Invoke the barrier, if any, necessary when writing "new_val" into the
89 // ref field at "offset" in "obj".
90 // (For efficiency reasons, this operation is specialized for certain
91 // barrier types. Semantically, it should be thought of as a call to the
92 // virtual "_work" function below, which must implement the barrier.)
93 // First the pre-write versions...
94 template <class T> inline void write_ref_field_pre(T* field, oop new_val);
95 private:
96 // Keep this private so as to catch violations at build time.
97 virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); };
98 protected:
99 virtual void write_ref_field_pre_work( oop* field, oop new_val) {
100 #ifdef MIPS64
101 if (UseSyncLevel >= 2000) OrderAccess::fence();
102 #endif
103 };
104 virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {
105 #ifdef MIPS64
106 if (UseSyncLevel >= 2000) OrderAccess::fence();
107 #endif
108 };
109 public:
111 // ...then the post-write version.
112 inline void write_ref_field(void* field, oop new_val, bool release = false);
113 protected:
114 virtual void write_ref_field_work(void* field, oop new_val, bool release = false) = 0;
115 public:
117 // Invoke the barrier, if any, necessary when writing the "bytes"-byte
118 // value(s) "val1" (and "val2") into the primitive "field".
119 virtual void write_prim_field(HeapWord* field, size_t bytes,
120 juint val1, juint val2) = 0;
122 // Operations on arrays, or general regions (e.g., for "clone") may be
123 // optimized by some barriers.
125 // The first six operations tell whether such an optimization exists for
126 // the particular barrier.
127 virtual bool has_read_ref_array_opt() = 0;
128 virtual bool has_read_prim_array_opt() = 0;
129 virtual bool has_write_ref_array_pre_opt() { return true; }
130 virtual bool has_write_ref_array_opt() = 0;
131 virtual bool has_write_prim_array_opt() = 0;
133 virtual bool has_read_region_opt() = 0;
134 virtual bool has_write_region_opt() = 0;
136 // These operations should assert false unless the correponding operation
137 // above returns true. Otherwise, they should perform an appropriate
138 // barrier for an array whose elements are all in the given memory region.
139 virtual void read_ref_array(MemRegion mr) = 0;
140 virtual void read_prim_array(MemRegion mr) = 0;
142 // Below length is the # array elements being written
143 virtual void write_ref_array_pre(oop* dst, int length,
144 bool dest_uninitialized = false) {
145 #ifdef MIPS64
146 if (UseSyncLevel >= 2000) OrderAccess::fence();
147 #endif
148 }
149 virtual void write_ref_array_pre(narrowOop* dst, int length,
150 bool dest_uninitialized = false) {
151 #ifdef MIPS64
152 if (UseSyncLevel >= 2000) OrderAccess::fence();
153 #endif
154 }
155 // Below count is the # array elements being written, starting
156 // at the address "start", which may not necessarily be HeapWord-aligned
157 inline void write_ref_array(HeapWord* start, size_t count);
159 // Static versions, suitable for calling from generated code;
160 // count is # array elements being written, starting with "start",
161 // which may not necessarily be HeapWord-aligned.
162 static void static_write_ref_array_pre(HeapWord* start, size_t count);
163 static void static_write_ref_array_post(HeapWord* start, size_t count);
165 protected:
166 virtual void write_ref_array_work(MemRegion mr) = 0;
167 public:
168 virtual void write_prim_array(MemRegion mr) = 0;
170 virtual void read_region(MemRegion mr) = 0;
172 // (For efficiency reasons, this operation is specialized for certain
173 // barrier types. Semantically, it should be thought of as a call to the
174 // virtual "_work" function below, which must implement the barrier.)
175 inline void write_region(MemRegion mr);
176 protected:
177 virtual void write_region_work(MemRegion mr) = 0;
178 public:
180 // Some barrier sets create tables whose elements correspond to parts of
181 // the heap; the CardTableModRefBS is an example. Such barrier sets will
182 // normally reserve space for such tables, and commit parts of the table
183 // "covering" parts of the heap that are committed. The constructor is
184 // passed the maximum number of independently committable subregions to
185 // be covered, and the "resize_covoered_region" function allows the
186 // sub-parts of the heap to inform the barrier set of changes of their
187 // sizes.
188 BarrierSet(int max_covered_regions) :
189 _max_covered_regions(max_covered_regions) {}
191 // Inform the BarrierSet that the the covered heap region that starts
192 // with "base" has been changed to have the given size (possibly from 0,
193 // for initialization.)
194 virtual void resize_covered_region(MemRegion new_region) = 0;
196 // If the barrier set imposes any alignment restrictions on boundaries
197 // within the heap, this function tells whether they are met.
198 virtual bool is_aligned(HeapWord* addr) = 0;
200 // Print a description of the memory for the barrier set
201 virtual void print_on(outputStream* st) const = 0;
202 };
204 #endif // SHARE_VM_MEMORY_BARRIERSET_HPP