src/share/vm/memory/barrierSet.inline.hpp

Fri, 20 Sep 2013 10:53:28 +0200

author
stefank
date
Fri, 20 Sep 2013 10:53:28 +0200
changeset 5769
2c022e432e10
parent 2314
f95d63e2154a
child 6493
3205e78d8193
permissions
-rw-r--r--

8024974: Incorrect use of GC_locker::is_active()
Summary: SymbolTable and StringTable can make calls to GC_locker::is_active() outside a safepoint. This isn't safe because the GC_locker active state (lock count) is only updated at a safepoint and only remains valid as long as _needs_gc is true. However, outside a safepoint_needs_gc can change to false at any time, which makes it impossible to do a correct call to is_active() in that context. In this case these calls can just be removed since the input argument to basic_add() should never be on the heap and so there's no need to check the GC_locker state. This change also adjusts the assert() in is_active() to makes sure all calls to this function are always done under a safepoint.
Reviewed-by: brutisso, dcubed
Contributed-by: per.liden@oracle.com

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP
stefank@2314 26 #define SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "memory/barrierSet.hpp"
stefank@2314 29 #include "memory/cardTableModRefBS.hpp"
stefank@2314 30
duke@435 31 // Inline functions of BarrierSet, which de-virtualize certain
ysr@1280 32 // performance-critical calls when the barrier is the most common
duke@435 33 // card-table kind.
duke@435 34
ysr@1280 35 template <class T> void BarrierSet::write_ref_field_pre(T* field, oop new_val) {
ysr@777 36 if (kind() == CardTableModRef) {
ysr@777 37 ((CardTableModRefBS*)this)->inline_write_ref_field_pre(field, new_val);
ysr@777 38 } else {
ysr@777 39 write_ref_field_pre_work(field, new_val);
ysr@777 40 }
ysr@777 41 }
ysr@777 42
coleenp@548 43 void BarrierSet::write_ref_field(void* field, oop new_val) {
duke@435 44 if (kind() == CardTableModRef) {
duke@435 45 ((CardTableModRefBS*)this)->inline_write_ref_field(field, new_val);
duke@435 46 } else {
duke@435 47 write_ref_field_work(field, new_val);
duke@435 48 }
duke@435 49 }
duke@435 50
ysr@1526 51 // count is number of array elements being written
ysr@1526 52 void BarrierSet::write_ref_array(HeapWord* start, size_t count) {
ysr@1526 53 assert(count <= (size_t)max_intx, "count too large");
ysr@1526 54 HeapWord* end = (HeapWord*)((char*)start + (count*heapOopSize));
ysr@1526 55 // In the case of compressed oops, start and end may potentially be misaligned;
ysr@1526 56 // so we need to conservatively align the first downward (this is not
ysr@1526 57 // strictly necessary for current uses, but a case of good hygiene and,
ysr@1526 58 // if you will, aesthetics) and the second upward (this is essential for
ysr@1526 59 // current uses) to a HeapWord boundary, so we mark all cards overlapping
ysr@1680 60 // this write. If this evolves in the future to calling a
ysr@1526 61 // logging barrier of narrow oop granularity, like the pre-barrier for G1
ysr@1526 62 // (mentioned here merely by way of example), we will need to change this
ysr@1680 63 // interface, so it is "exactly precise" (if i may be allowed the adverbial
ysr@1680 64 // redundancy for emphasis) and does not include narrow oop slots not
ysr@1680 65 // included in the original write interval.
ysr@1526 66 HeapWord* aligned_start = (HeapWord*)align_size_down((uintptr_t)start, HeapWordSize);
ysr@1526 67 HeapWord* aligned_end = (HeapWord*)align_size_up ((uintptr_t)end, HeapWordSize);
ysr@1526 68 // If compressed oops were not being used, these should already be aligned
ysr@1526 69 assert(UseCompressedOops || (aligned_start == start && aligned_end == end),
ysr@1526 70 "Expected heap word alignment of start and end");
ysr@1526 71 #if 0
ysr@1526 72 warning("Post:\t" INTPTR_FORMAT "[" SIZE_FORMAT "] : [" INTPTR_FORMAT","INTPTR_FORMAT")\t",
ysr@1526 73 start, count, aligned_start, aligned_end);
ysr@1526 74 #endif
ysr@1526 75 write_ref_array_work(MemRegion(aligned_start, aligned_end));
ysr@1526 76 }
ysr@1526 77
ysr@1526 78
duke@435 79 void BarrierSet::write_region(MemRegion mr) {
duke@435 80 if (kind() == CardTableModRef) {
duke@435 81 ((CardTableModRefBS*)this)->inline_write_region(mr);
duke@435 82 } else {
duke@435 83 write_region_work(mr);
duke@435 84 }
duke@435 85 }
stefank@2314 86
stefank@2314 87 #endif // SHARE_VM_MEMORY_BARRIERSET_INLINE_HPP

mercurial