duke@435: /* stefank@2314: * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP stefank@2314: #define SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP stefank@2314: stefank@2314: #include "memory/barrierSet.hpp" stefank@2314: #include "memory/cardTableModRefBS.hpp" stefank@2314: duke@435: // Inline functions of BarrierSet, which de-virtualize certain ysr@1280: // performance-critical calls when the barrier is the most common duke@435: // card-table kind. duke@435: ysr@1280: template void BarrierSet::write_ref_field_pre(T* field, oop new_val) { ysr@777: if (kind() == CardTableModRef) { ysr@777: ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val); ysr@777: } else { ysr@777: write_ref_field_pre_work(field, new_val); ysr@777: } ysr@777: } ysr@777: coleenp@548: void BarrierSet::write_ref_field(void* field, oop new_val) { duke@435: if (kind() == CardTableModRef) { duke@435: ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val); duke@435: } else { duke@435: write_ref_field_work(field, new_val); duke@435: } duke@435: } duke@435: ysr@1526: // count is number of array elements being written ysr@1526: void BarrierSet::write_ref_array(HeapWord* start, size_t count) { ysr@1526: assert(count <= (size_t)max_intx, "count too large"); ysr@1526: HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize)); ysr@1526: // In the case of compressed oops, start and end may potentially be misaligned; ysr@1526: // so we need to conservatively align the first downward (this is not ysr@1526: // strictly necessary for current uses, but a case of good hygiene and, ysr@1526: // if you will, aesthetics) and the second upward (this is essential for ysr@1526: // current uses) to a HeapWord boundary, so we mark all cards overlapping ysr@1680: // this write. If this evolves in the future to calling a ysr@1526: // logging barrier of narrow oop granularity, like the pre-barrier for G1 ysr@1526: // (mentioned here merely by way of example), we will need to change this ysr@1680: // interface, so it is "exactly precise" (if i may be allowed the adverbial ysr@1680: // redundancy for emphasis) and does not include narrow oop slots not ysr@1680: // included in the original write interval. ysr@1526: HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize); ysr@1526: HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize); ysr@1526: // If compressed oops were not being used, these should already be aligned ysr@1526: assert(UseCompressedOops || (aligned_start == start && aligned_end == end), ysr@1526: "Expected heap word alignment of start and end"); ysr@1526: #if 0 ysr@1526: warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t", ysr@1526: start, count, aligned_start, aligned_end); ysr@1526: #endif ysr@1526: write_ref_array_work(MemRegion(aligned_start, aligned_end)); ysr@1526: } ysr@1526: ysr@1526: duke@435: void BarrierSet::write_region(MemRegion mr) { duke@435: if (kind() == CardTableModRef) { duke@435: ((CardTableModRefBS*)this)->inline_write_region(mr); duke@435: } else { duke@435: write_region_work(mr); duke@435: } duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP