duke@435: /* coleenp@4037: * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_BARRIERSET_HPP stefank@2314: #define SHARE_VM_MEMORY_BARRIERSET_HPP stefank@2314: stefank@2314: #include "memory/memRegion.hpp" stefank@2314: #include "oops/oopsHierarchy.hpp" stefank@2314: duke@435: // This class provides the interface between a barrier implementation and duke@435: // the rest of the system. duke@435: zgu@3900: class BarrierSet: public CHeapObj { duke@435: friend class VMStructs; duke@435: public: duke@435: enum Name { duke@435: ModRef, duke@435: CardTableModRef, duke@435: CardTableExtension, ysr@777: G1SATBCT, ysr@777: G1SATBCTLogging, duke@435: Other, duke@435: Uninit duke@435: }; duke@435: iveresov@2606: enum Flags { iveresov@2606: None = 0, iveresov@2606: TargetUninitialized = 1 iveresov@2606: }; duke@435: protected: duke@435: int _max_covered_regions; duke@435: Name _kind; duke@435: duke@435: public: duke@435: ysr@777: BarrierSet() { _kind = Uninit; } duke@435: // To get around prohibition on RTTI. ysr@777: BarrierSet::Name kind() { return _kind; } duke@435: virtual bool is_a(BarrierSet::Name bsn) = 0; duke@435: duke@435: // These operations indicate what kind of barriers the BarrierSet has. duke@435: virtual bool has_read_ref_barrier() = 0; duke@435: virtual bool has_read_prim_barrier() = 0; duke@435: virtual bool has_write_ref_barrier() = 0; ysr@777: virtual bool has_write_ref_pre_barrier() = 0; duke@435: virtual bool has_write_prim_barrier() = 0; duke@435: duke@435: // These functions indicate whether a particular access of the given duke@435: // kinds requires a barrier. coleenp@548: virtual bool read_ref_needs_barrier(void* field) = 0; duke@435: virtual bool read_prim_needs_barrier(HeapWord* field, size_t bytes) = 0; ysr@777: virtual bool write_prim_needs_barrier(HeapWord* field, size_t bytes, ysr@777: juint val1, juint val2) = 0; duke@435: duke@435: // The first four operations provide a direct implementation of the duke@435: // barrier set. An interpreter loop, for example, could call these duke@435: // directly, as appropriate. duke@435: duke@435: // Invoke the barrier, if any, necessary when reading the given ref field. coleenp@548: virtual void read_ref_field(void* field) = 0; duke@435: duke@435: // Invoke the barrier, if any, necessary when reading the given primitive duke@435: // "field" of "bytes" bytes in "obj". duke@435: virtual void read_prim_field(HeapWord* field, size_t bytes) = 0; duke@435: duke@435: // Invoke the barrier, if any, necessary when writing "new_val" into the duke@435: // ref field at "offset" in "obj". duke@435: // (For efficiency reasons, this operation is specialized for certain duke@435: // barrier types. Semantically, it should be thought of as a call to the duke@435: // virtual "_work" function below, which must implement the barrier.) ysr@777: // First the pre-write versions... ysr@1280: template inline void write_ref_field_pre(T* field, oop new_val); ysr@1280: private: ysr@1280: // Keep this private so as to catch violations at build time. ysr@1280: virtual void write_ref_field_pre_work( void* field, oop new_val) { guarantee(false, "Not needed"); }; ysr@777: protected: ysr@1280: virtual void write_ref_field_pre_work( oop* field, oop new_val) {}; ysr@1280: virtual void write_ref_field_pre_work(narrowOop* field, oop new_val) {}; ysr@777: public: ysr@777: ysr@777: // ...then the post-write version. coleenp@548: inline void write_ref_field(void* field, oop new_val); duke@435: protected: coleenp@548: virtual void write_ref_field_work(void* field, oop new_val) = 0; duke@435: public: duke@435: duke@435: // Invoke the barrier, if any, necessary when writing the "bytes"-byte duke@435: // value(s) "val1" (and "val2") into the primitive "field". duke@435: virtual void write_prim_field(HeapWord* field, size_t bytes, duke@435: juint val1, juint val2) = 0; duke@435: duke@435: // Operations on arrays, or general regions (e.g., for "clone") may be duke@435: // optimized by some barriers. duke@435: duke@435: // The first six operations tell whether such an optimization exists for duke@435: // the particular barrier. duke@435: virtual bool has_read_ref_array_opt() = 0; duke@435: virtual bool has_read_prim_array_opt() = 0; ysr@777: virtual bool has_write_ref_array_pre_opt() { return true; } duke@435: virtual bool has_write_ref_array_opt() = 0; duke@435: virtual bool has_write_prim_array_opt() = 0; duke@435: duke@435: virtual bool has_read_region_opt() = 0; duke@435: virtual bool has_write_region_opt() = 0; duke@435: duke@435: // These operations should assert false unless the correponding operation duke@435: // above returns true. Otherwise, they should perform an appropriate duke@435: // barrier for an array whose elements are all in the given memory region. duke@435: virtual void read_ref_array(MemRegion mr) = 0; duke@435: virtual void read_prim_array(MemRegion mr) = 0; duke@435: ysr@1526: // Below length is the # array elements being written iveresov@2606: virtual void write_ref_array_pre(oop* dst, int length, iveresov@2606: bool dest_uninitialized = false) {} iveresov@2606: virtual void write_ref_array_pre(narrowOop* dst, int length, iveresov@2606: bool dest_uninitialized = false) {} ysr@1526: // Below count is the # array elements being written, starting ysr@1526: // at the address "start", which may not necessarily be HeapWord-aligned ysr@1526: inline void write_ref_array(HeapWord* start, size_t count); ysr@777: ysr@1526: // Static versions, suitable for calling from generated code; ysr@1526: // count is # array elements being written, starting with "start", ysr@1526: // which may not necessarily be HeapWord-aligned. ysr@777: static void static_write_ref_array_pre(HeapWord* start, size_t count); ysr@777: static void static_write_ref_array_post(HeapWord* start, size_t count); ysr@777: duke@435: protected: duke@435: virtual void write_ref_array_work(MemRegion mr) = 0; duke@435: public: duke@435: virtual void write_prim_array(MemRegion mr) = 0; duke@435: duke@435: virtual void read_region(MemRegion mr) = 0; duke@435: duke@435: // (For efficiency reasons, this operation is specialized for certain duke@435: // barrier types. Semantically, it should be thought of as a call to the duke@435: // virtual "_work" function below, which must implement the barrier.) duke@435: inline void write_region(MemRegion mr); duke@435: protected: duke@435: virtual void write_region_work(MemRegion mr) = 0; duke@435: public: duke@435: duke@435: // Some barrier sets create tables whose elements correspond to parts of duke@435: // the heap; the CardTableModRefBS is an example. Such barrier sets will duke@435: // normally reserve space for such tables, and commit parts of the table duke@435: // "covering" parts of the heap that are committed. The constructor is duke@435: // passed the maximum number of independently committable subregions to duke@435: // be covered, and the "resize_covoered_region" function allows the duke@435: // sub-parts of the heap to inform the barrier set of changes of their duke@435: // sizes. duke@435: BarrierSet(int max_covered_regions) : duke@435: _max_covered_regions(max_covered_regions) {} duke@435: duke@435: // Inform the BarrierSet that the the covered heap region that starts duke@435: // with "base" has been changed to have the given size (possibly from 0, duke@435: // for initialization.) duke@435: virtual void resize_covered_region(MemRegion new_region) = 0; duke@435: duke@435: // If the barrier set imposes any alignment restrictions on boundaries duke@435: // within the heap, this function tells whether they are met. duke@435: virtual bool is_aligned(HeapWord* addr) = 0; duke@435: never@3687: // Print a description of the memory for the barrier set never@3687: virtual void print_on(outputStream* st) const = 0; duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_BARRIERSET_HPP