Thu, 20 Nov 2008 16:56:09 -0800
6684579: SoftReference processing can be made more efficient
Summary: For current soft-ref clearing policies, we can decide at marking time if a soft-reference will definitely not be cleared, postponing the decision of whether it will definitely be cleared to the final reference processing phase. This can be especially beneficial in the case of concurrent collectors where the marking is usually concurrent but reference processing is usually not.
Reviewed-by: jmasa
duke@435 | 1 | /* |
xdono@631 | 2 | * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // Assembly code for platforms that need it. |
duke@435 | 26 | extern "C" { |
duke@435 | 27 | void _Copy_conjoint_words(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 28 | void _Copy_disjoint_words(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 29 | |
duke@435 | 30 | void _Copy_conjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 31 | void _Copy_disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 32 | |
duke@435 | 33 | void _Copy_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 34 | void _Copy_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 35 | |
duke@435 | 36 | void _Copy_conjoint_bytes(void* from, void* to, size_t count); |
duke@435 | 37 | |
duke@435 | 38 | void _Copy_conjoint_bytes_atomic (void* from, void* to, size_t count); |
duke@435 | 39 | void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count); |
duke@435 | 40 | void _Copy_conjoint_jints_atomic (jint* from, jint* to, size_t count); |
duke@435 | 41 | void _Copy_conjoint_jlongs_atomic (jlong* from, jlong* to, size_t count); |
duke@435 | 42 | void _Copy_conjoint_oops_atomic (oop* from, oop* to, size_t count); |
duke@435 | 43 | |
duke@435 | 44 | void _Copy_arrayof_conjoint_bytes (HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 45 | void _Copy_arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 46 | void _Copy_arrayof_conjoint_jints (HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 47 | void _Copy_arrayof_conjoint_jlongs (HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 48 | void _Copy_arrayof_conjoint_oops (HeapWord* from, HeapWord* to, size_t count); |
duke@435 | 49 | } |
duke@435 | 50 | |
duke@435 | 51 | class Copy : AllStatic { |
duke@435 | 52 | public: |
duke@435 | 53 | // Block copy methods have four attributes. We don't define all possibilities. |
duke@435 | 54 | // alignment: aligned according to minimum Java object alignment (MinObjAlignment) |
duke@435 | 55 | // arrayof: arraycopy operation with both operands aligned on the same |
duke@435 | 56 | // boundary as the first element of an array of the copy unit. |
duke@435 | 57 | // This is currently a HeapWord boundary on all platforms, except |
duke@435 | 58 | // for long and double arrays, which are aligned on an 8-byte |
duke@435 | 59 | // boundary on all platforms. |
duke@435 | 60 | // arraycopy operations are implicitly atomic on each array element. |
duke@435 | 61 | // overlap: disjoint or conjoint. |
duke@435 | 62 | // copy unit: bytes or words (i.e., HeapWords) or oops (i.e., pointers). |
duke@435 | 63 | // atomicity: atomic or non-atomic on the copy unit. |
duke@435 | 64 | // |
duke@435 | 65 | // Names are constructed thusly: |
duke@435 | 66 | // |
duke@435 | 67 | // [ 'aligned_' | 'arrayof_' ] |
duke@435 | 68 | // ('conjoint_' | 'disjoint_') |
duke@435 | 69 | // ('words' | 'bytes' | 'jshorts' | 'jints' | 'jlongs' | 'oops') |
duke@435 | 70 | // [ '_atomic' ] |
duke@435 | 71 | // |
duke@435 | 72 | // Except in the arrayof case, whatever the alignment is, we assume we can copy |
duke@435 | 73 | // whole alignment units. E.g., if MinObjAlignment is 2x word alignment, an odd |
duke@435 | 74 | // count may copy an extra word. In the arrayof case, we are allowed to copy |
duke@435 | 75 | // only the number of copy units specified. |
duke@435 | 76 | |
duke@435 | 77 | // HeapWords |
duke@435 | 78 | |
duke@435 | 79 | // Word-aligned words, conjoint, not atomic on each word |
duke@435 | 80 | static void conjoint_words(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 81 | assert_params_ok(from, to, LogHeapWordSize); |
duke@435 | 82 | pd_conjoint_words(from, to, count); |
duke@435 | 83 | } |
duke@435 | 84 | |
duke@435 | 85 | // Word-aligned words, disjoint, not atomic on each word |
duke@435 | 86 | static void disjoint_words(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 87 | assert_params_ok(from, to, LogHeapWordSize); |
duke@435 | 88 | assert_disjoint(from, to, count); |
duke@435 | 89 | pd_disjoint_words(from, to, count); |
duke@435 | 90 | } |
duke@435 | 91 | |
duke@435 | 92 | // Word-aligned words, disjoint, atomic on each word |
duke@435 | 93 | static void disjoint_words_atomic(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 94 | assert_params_ok(from, to, LogHeapWordSize); |
duke@435 | 95 | assert_disjoint(from, to, count); |
duke@435 | 96 | pd_disjoint_words_atomic(from, to, count); |
duke@435 | 97 | } |
duke@435 | 98 | |
duke@435 | 99 | // Object-aligned words, conjoint, not atomic on each word |
duke@435 | 100 | static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 101 | assert_params_aligned(from, to); |
duke@435 | 102 | assert_non_zero(count); |
duke@435 | 103 | pd_aligned_conjoint_words(from, to, count); |
duke@435 | 104 | } |
duke@435 | 105 | |
duke@435 | 106 | // Object-aligned words, disjoint, not atomic on each word |
duke@435 | 107 | static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 108 | assert_params_aligned(from, to); |
duke@435 | 109 | assert_disjoint(from, to, count); |
duke@435 | 110 | assert_non_zero(count); |
duke@435 | 111 | pd_aligned_disjoint_words(from, to, count); |
duke@435 | 112 | } |
duke@435 | 113 | |
duke@435 | 114 | // bytes, jshorts, jints, jlongs, oops |
duke@435 | 115 | |
duke@435 | 116 | // bytes, conjoint, not atomic on each byte (not that it matters) |
duke@435 | 117 | static void conjoint_bytes(void* from, void* to, size_t count) { |
duke@435 | 118 | assert_non_zero(count); |
duke@435 | 119 | pd_conjoint_bytes(from, to, count); |
duke@435 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | // bytes, conjoint, atomic on each byte (not that it matters) |
duke@435 | 123 | static void conjoint_bytes_atomic(void* from, void* to, size_t count) { |
duke@435 | 124 | assert_non_zero(count); |
duke@435 | 125 | pd_conjoint_bytes(from, to, count); |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | // jshorts, conjoint, atomic on each jshort |
duke@435 | 129 | static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { |
duke@435 | 130 | assert_params_ok(from, to, LogBytesPerShort); |
duke@435 | 131 | assert_non_zero(count); |
duke@435 | 132 | pd_conjoint_jshorts_atomic(from, to, count); |
duke@435 | 133 | } |
duke@435 | 134 | |
duke@435 | 135 | // jints, conjoint, atomic on each jint |
duke@435 | 136 | static void conjoint_jints_atomic(jint* from, jint* to, size_t count) { |
duke@435 | 137 | assert_params_ok(from, to, LogBytesPerInt); |
duke@435 | 138 | assert_non_zero(count); |
duke@435 | 139 | pd_conjoint_jints_atomic(from, to, count); |
duke@435 | 140 | } |
duke@435 | 141 | |
duke@435 | 142 | // jlongs, conjoint, atomic on each jlong |
duke@435 | 143 | static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { |
duke@435 | 144 | assert_params_ok(from, to, LogBytesPerLong); |
duke@435 | 145 | assert_non_zero(count); |
duke@435 | 146 | pd_conjoint_jlongs_atomic(from, to, count); |
duke@435 | 147 | } |
duke@435 | 148 | |
duke@435 | 149 | // oops, conjoint, atomic on each oop |
duke@435 | 150 | static void conjoint_oops_atomic(oop* from, oop* to, size_t count) { |
coleenp@548 | 151 | assert_params_ok(from, to, LogBytesPerHeapOop); |
duke@435 | 152 | assert_non_zero(count); |
duke@435 | 153 | pd_conjoint_oops_atomic(from, to, count); |
duke@435 | 154 | } |
duke@435 | 155 | |
coleenp@548 | 156 | // overloaded for UseCompressedOops |
coleenp@548 | 157 | static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) { |
coleenp@548 | 158 | assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong"); |
coleenp@548 | 159 | assert_params_ok(from, to, LogBytesPerInt); |
coleenp@548 | 160 | assert_non_zero(count); |
coleenp@548 | 161 | pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); |
coleenp@548 | 162 | } |
coleenp@548 | 163 | |
duke@435 | 164 | // Copy a span of memory. If the span is an integral number of aligned |
duke@435 | 165 | // longs, words, or ints, copy those units atomically. |
duke@435 | 166 | // The largest atomic transfer unit is 8 bytes, or the largest power |
duke@435 | 167 | // of two which divides all of from, to, and size, whichever is smaller. |
duke@435 | 168 | static void conjoint_memory_atomic(void* from, void* to, size_t size); |
duke@435 | 169 | |
duke@435 | 170 | // bytes, conjoint array, atomic on each byte (not that it matters) |
duke@435 | 171 | static void arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 172 | assert_non_zero(count); |
duke@435 | 173 | pd_arrayof_conjoint_bytes(from, to, count); |
duke@435 | 174 | } |
duke@435 | 175 | |
duke@435 | 176 | // jshorts, conjoint array, atomic on each jshort |
duke@435 | 177 | static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 178 | assert_params_ok(from, to, LogBytesPerShort); |
duke@435 | 179 | assert_non_zero(count); |
duke@435 | 180 | pd_arrayof_conjoint_jshorts(from, to, count); |
duke@435 | 181 | } |
duke@435 | 182 | |
duke@435 | 183 | // jints, conjoint array, atomic on each jint |
duke@435 | 184 | static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 185 | assert_params_ok(from, to, LogBytesPerInt); |
duke@435 | 186 | assert_non_zero(count); |
duke@435 | 187 | pd_arrayof_conjoint_jints(from, to, count); |
duke@435 | 188 | } |
duke@435 | 189 | |
duke@435 | 190 | // jlongs, conjoint array, atomic on each jlong |
duke@435 | 191 | static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 192 | assert_params_ok(from, to, LogBytesPerLong); |
duke@435 | 193 | assert_non_zero(count); |
duke@435 | 194 | pd_arrayof_conjoint_jlongs(from, to, count); |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | // oops, conjoint array, atomic on each oop |
duke@435 | 198 | static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { |
coleenp@548 | 199 | assert_params_ok(from, to, LogBytesPerHeapOop); |
duke@435 | 200 | assert_non_zero(count); |
duke@435 | 201 | pd_arrayof_conjoint_oops(from, to, count); |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | // Known overlap methods |
duke@435 | 205 | |
duke@435 | 206 | // Copy word-aligned words from higher to lower addresses, not atomic on each word |
duke@435 | 207 | inline static void conjoint_words_to_lower(HeapWord* from, HeapWord* to, size_t byte_count) { |
duke@435 | 208 | // byte_count is in bytes to check its alignment |
duke@435 | 209 | assert_params_ok(from, to, LogHeapWordSize); |
duke@435 | 210 | assert_byte_count_ok(byte_count, HeapWordSize); |
duke@435 | 211 | |
duke@435 | 212 | size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize; |
duke@435 | 213 | assert(to <= from || from + count <= to, "do not overwrite source data"); |
duke@435 | 214 | |
duke@435 | 215 | while (count-- > 0) { |
duke@435 | 216 | *to++ = *from++; |
duke@435 | 217 | } |
duke@435 | 218 | } |
duke@435 | 219 | |
duke@435 | 220 | // Copy word-aligned words from lower to higher addresses, not atomic on each word |
duke@435 | 221 | inline static void conjoint_words_to_higher(HeapWord* from, HeapWord* to, size_t byte_count) { |
duke@435 | 222 | // byte_count is in bytes to check its alignment |
duke@435 | 223 | assert_params_ok(from, to, LogHeapWordSize); |
duke@435 | 224 | assert_byte_count_ok(byte_count, HeapWordSize); |
duke@435 | 225 | |
duke@435 | 226 | size_t count = (size_t)round_to(byte_count, HeapWordSize) >> LogHeapWordSize; |
duke@435 | 227 | assert(from <= to || to + count <= from, "do not overwrite source data"); |
duke@435 | 228 | |
duke@435 | 229 | from += count - 1; |
duke@435 | 230 | to += count - 1; |
duke@435 | 231 | while (count-- > 0) { |
duke@435 | 232 | *to-- = *from--; |
duke@435 | 233 | } |
duke@435 | 234 | } |
duke@435 | 235 | |
duke@435 | 236 | // Fill methods |
duke@435 | 237 | |
duke@435 | 238 | // Fill word-aligned words, not atomic on each word |
duke@435 | 239 | // set_words |
duke@435 | 240 | static void fill_to_words(HeapWord* to, size_t count, juint value = 0) { |
duke@435 | 241 | assert_params_ok(to, LogHeapWordSize); |
duke@435 | 242 | pd_fill_to_words(to, count, value); |
duke@435 | 243 | } |
duke@435 | 244 | |
duke@435 | 245 | static void fill_to_aligned_words(HeapWord* to, size_t count, juint value = 0) { |
duke@435 | 246 | assert_params_aligned(to); |
duke@435 | 247 | pd_fill_to_aligned_words(to, count, value); |
duke@435 | 248 | } |
duke@435 | 249 | |
duke@435 | 250 | // Fill bytes |
duke@435 | 251 | static void fill_to_bytes(void* to, size_t count, jubyte value = 0) { |
duke@435 | 252 | pd_fill_to_bytes(to, count, value); |
duke@435 | 253 | } |
duke@435 | 254 | |
duke@435 | 255 | // Fill a span of memory. If the span is an integral number of aligned |
duke@435 | 256 | // longs, words, or ints, store to those units atomically. |
duke@435 | 257 | // The largest atomic transfer unit is 8 bytes, or the largest power |
duke@435 | 258 | // of two which divides both to and size, whichever is smaller. |
duke@435 | 259 | static void fill_to_memory_atomic(void* to, size_t size, jubyte value = 0); |
duke@435 | 260 | |
duke@435 | 261 | // Zero-fill methods |
duke@435 | 262 | |
duke@435 | 263 | // Zero word-aligned words, not atomic on each word |
duke@435 | 264 | static void zero_to_words(HeapWord* to, size_t count) { |
duke@435 | 265 | assert_params_ok(to, LogHeapWordSize); |
duke@435 | 266 | pd_zero_to_words(to, count); |
duke@435 | 267 | } |
duke@435 | 268 | |
duke@435 | 269 | // Zero bytes |
duke@435 | 270 | static void zero_to_bytes(void* to, size_t count) { |
duke@435 | 271 | pd_zero_to_bytes(to, count); |
duke@435 | 272 | } |
duke@435 | 273 | |
duke@435 | 274 | private: |
duke@435 | 275 | static bool params_disjoint(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 276 | if (from < to) { |
duke@435 | 277 | return pointer_delta(to, from) >= count; |
duke@435 | 278 | } |
duke@435 | 279 | return pointer_delta(from, to) >= count; |
duke@435 | 280 | } |
duke@435 | 281 | |
duke@435 | 282 | // These methods raise a fatal if they detect a problem. |
duke@435 | 283 | |
duke@435 | 284 | static void assert_disjoint(HeapWord* from, HeapWord* to, size_t count) { |
duke@435 | 285 | #ifdef ASSERT |
duke@435 | 286 | if (!params_disjoint(from, to, count)) |
duke@435 | 287 | basic_fatal("source and dest overlap"); |
duke@435 | 288 | #endif |
duke@435 | 289 | } |
duke@435 | 290 | |
duke@435 | 291 | static void assert_params_ok(void* from, void* to, intptr_t log_align) { |
duke@435 | 292 | #ifdef ASSERT |
duke@435 | 293 | if (mask_bits((uintptr_t)from, right_n_bits(log_align)) != 0) |
duke@435 | 294 | basic_fatal("not aligned"); |
duke@435 | 295 | if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0) |
duke@435 | 296 | basic_fatal("not aligned"); |
duke@435 | 297 | #endif |
duke@435 | 298 | } |
duke@435 | 299 | |
duke@435 | 300 | static void assert_params_ok(HeapWord* to, intptr_t log_align) { |
duke@435 | 301 | #ifdef ASSERT |
duke@435 | 302 | if (mask_bits((uintptr_t)to, right_n_bits(log_align)) != 0) |
duke@435 | 303 | basic_fatal("not word aligned"); |
duke@435 | 304 | #endif |
duke@435 | 305 | } |
duke@435 | 306 | static void assert_params_aligned(HeapWord* from, HeapWord* to) { |
duke@435 | 307 | #ifdef ASSERT |
duke@435 | 308 | if (mask_bits((uintptr_t)from, MinObjAlignmentInBytes-1) != 0) |
duke@435 | 309 | basic_fatal("not object aligned"); |
duke@435 | 310 | if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0) |
duke@435 | 311 | basic_fatal("not object aligned"); |
duke@435 | 312 | #endif |
duke@435 | 313 | } |
duke@435 | 314 | |
duke@435 | 315 | static void assert_params_aligned(HeapWord* to) { |
duke@435 | 316 | #ifdef ASSERT |
duke@435 | 317 | if (mask_bits((uintptr_t)to, MinObjAlignmentInBytes-1) != 0) |
duke@435 | 318 | basic_fatal("not object aligned"); |
duke@435 | 319 | #endif |
duke@435 | 320 | } |
duke@435 | 321 | |
duke@435 | 322 | static void assert_non_zero(size_t count) { |
duke@435 | 323 | #ifdef ASSERT |
duke@435 | 324 | if (count == 0) { |
duke@435 | 325 | basic_fatal("count must be non-zero"); |
duke@435 | 326 | } |
duke@435 | 327 | #endif |
duke@435 | 328 | } |
duke@435 | 329 | |
duke@435 | 330 | static void assert_byte_count_ok(size_t byte_count, size_t unit_size) { |
duke@435 | 331 | #ifdef ASSERT |
duke@435 | 332 | if ((size_t)round_to(byte_count, unit_size) != byte_count) { |
duke@435 | 333 | basic_fatal("byte count must be aligned"); |
duke@435 | 334 | } |
duke@435 | 335 | #endif |
duke@435 | 336 | } |
duke@435 | 337 | |
duke@435 | 338 | // Platform dependent implementations of the above methods. |
duke@435 | 339 | #include "incls/_copy_pd.hpp.incl" |
duke@435 | 340 | }; |