1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,279 @@ 1.4 +/* 1.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP 1.29 +#define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP 1.30 + 1.31 +#include "gc_interface/allocTracer.hpp" 1.32 +#include "gc_interface/collectedHeap.hpp" 1.33 +#include "memory/threadLocalAllocBuffer.inline.hpp" 1.34 +#include "memory/universe.hpp" 1.35 +#include "oops/arrayOop.hpp" 1.36 +#include "prims/jvmtiExport.hpp" 1.37 +#include "runtime/sharedRuntime.hpp" 1.38 +#include "runtime/thread.inline.hpp" 1.39 +#include "services/lowMemoryDetector.hpp" 1.40 +#include "utilities/copy.hpp" 1.41 + 1.42 +// Inline allocation implementations. 1.43 + 1.44 +void CollectedHeap::post_allocation_setup_common(KlassHandle klass, 1.45 + HeapWord* obj) { 1.46 + post_allocation_setup_no_klass_install(klass, obj); 1.47 + post_allocation_install_obj_klass(klass, oop(obj)); 1.48 +} 1.49 + 1.50 +void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, 1.51 + HeapWord* objPtr) { 1.52 + oop obj = (oop)objPtr; 1.53 + 1.54 + assert(obj != NULL, "NULL object pointer"); 1.55 + if (UseBiasedLocking && (klass() != NULL)) { 1.56 + obj->set_mark(klass->prototype_header()); 1.57 + } else { 1.58 + // May be bootstrapping 1.59 + obj->set_mark(markOopDesc::prototype()); 1.60 + } 1.61 +} 1.62 + 1.63 +void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, 1.64 + oop obj) { 1.65 + // These asserts are kind of complicated because of klassKlass 1.66 + // and the beginning of the world. 1.67 + assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); 1.68 + assert(klass() == NULL || klass()->is_klass(), "not a klass"); 1.69 + assert(obj != NULL, "NULL object pointer"); 1.70 + obj->set_klass(klass()); 1.71 + assert(!Universe::is_fully_initialized() || obj->klass() != NULL, 1.72 + "missing klass"); 1.73 +} 1.74 + 1.75 +// Support for jvmti and dtrace 1.76 +inline void post_allocation_notify(KlassHandle klass, oop obj, int size) { 1.77 + // support low memory notifications (no-op if not enabled) 1.78 + LowMemoryDetector::detect_low_memory_for_collected_pools(); 1.79 + 1.80 + // support for JVMTI VMObjectAlloc event (no-op if not enabled) 1.81 + JvmtiExport::vm_object_alloc_event_collector(obj); 1.82 + 1.83 + if (DTraceAllocProbes) { 1.84 + // support for Dtrace object alloc event (no-op most of the time) 1.85 + if (klass() != NULL && klass()->name() != NULL) { 1.86 + SharedRuntime::dtrace_object_alloc(obj, size); 1.87 + } 1.88 + } 1.89 +} 1.90 + 1.91 +void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, 1.92 + HeapWord* obj, 1.93 + int size) { 1.94 + post_allocation_setup_common(klass, obj); 1.95 + assert(Universe::is_bootstrapping() || 1.96 + !((oop)obj)->is_array(), "must not be an array"); 1.97 + // notify jvmti and dtrace 1.98 + post_allocation_notify(klass, (oop)obj, size); 1.99 +} 1.100 + 1.101 +void CollectedHeap::post_allocation_setup_array(KlassHandle klass, 1.102 + HeapWord* obj, 1.103 + int length) { 1.104 + // Set array length before setting the _klass field 1.105 + // in post_allocation_setup_common() because the klass field 1.106 + // indicates that the object is parsable by concurrent GC. 1.107 + assert(length >= 0, "length should be non-negative"); 1.108 + ((arrayOop)obj)->set_length(length); 1.109 + post_allocation_setup_common(klass, obj); 1.110 + oop new_obj = (oop)obj; 1.111 + assert(new_obj->is_array(), "must be an array"); 1.112 + // notify jvmti and dtrace (must be after length is set for dtrace) 1.113 + post_allocation_notify(klass, new_obj, new_obj->size()); 1.114 +} 1.115 + 1.116 +HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) { 1.117 + 1.118 + // Clear unhandled oops for memory allocation. Memory allocation might 1.119 + // not take out a lock if from tlab, so clear here. 1.120 + CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();) 1.121 + 1.122 + if (HAS_PENDING_EXCEPTION) { 1.123 + NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending")); 1.124 + return NULL; // caller does a CHECK_0 too 1.125 + } 1.126 + 1.127 + HeapWord* result = NULL; 1.128 + if (UseTLAB) { 1.129 + result = allocate_from_tlab(klass, THREAD, size); 1.130 + if (result != NULL) { 1.131 + assert(!HAS_PENDING_EXCEPTION, 1.132 + "Unexpected exception, will result in uninitialized storage"); 1.133 + return result; 1.134 + } 1.135 + } 1.136 + bool gc_overhead_limit_was_exceeded = false; 1.137 + result = Universe::heap()->mem_allocate(size, 1.138 + &gc_overhead_limit_was_exceeded); 1.139 + if (result != NULL) { 1.140 + NOT_PRODUCT(Universe::heap()-> 1.141 + check_for_non_bad_heap_word_value(result, size)); 1.142 + assert(!HAS_PENDING_EXCEPTION, 1.143 + "Unexpected exception, will result in uninitialized storage"); 1.144 + THREAD->incr_allocated_bytes(size * HeapWordSize); 1.145 + 1.146 + AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize); 1.147 + 1.148 + return result; 1.149 + } 1.150 + 1.151 + 1.152 + if (!gc_overhead_limit_was_exceeded) { 1.153 + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 1.154 + report_java_out_of_memory("Java heap space"); 1.155 + 1.156 + if (JvmtiExport::should_post_resource_exhausted()) { 1.157 + JvmtiExport::post_resource_exhausted( 1.158 + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 1.159 + "Java heap space"); 1.160 + } 1.161 + 1.162 + THROW_OOP_0(Universe::out_of_memory_error_java_heap()); 1.163 + } else { 1.164 + // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 1.165 + report_java_out_of_memory("GC overhead limit exceeded"); 1.166 + 1.167 + if (JvmtiExport::should_post_resource_exhausted()) { 1.168 + JvmtiExport::post_resource_exhausted( 1.169 + JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP, 1.170 + "GC overhead limit exceeded"); 1.171 + } 1.172 + 1.173 + THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit()); 1.174 + } 1.175 +} 1.176 + 1.177 +HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) { 1.178 + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 1.179 + init_obj(obj, size); 1.180 + return obj; 1.181 +} 1.182 + 1.183 +HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) { 1.184 + assert(UseTLAB, "should use UseTLAB"); 1.185 + 1.186 + HeapWord* obj = thread->tlab().allocate(size); 1.187 + if (obj != NULL) { 1.188 + return obj; 1.189 + } 1.190 + // Otherwise... 1.191 + return allocate_from_tlab_slow(klass, thread, size); 1.192 +} 1.193 + 1.194 +void CollectedHeap::init_obj(HeapWord* obj, size_t size) { 1.195 + assert(obj != NULL, "cannot initialize NULL object"); 1.196 + const size_t hs = oopDesc::header_size(); 1.197 + assert(size >= hs, "unexpected object size"); 1.198 + ((oop)obj)->set_klass_gap(0); 1.199 + Copy::fill_to_aligned_words(obj + hs, size - hs); 1.200 +} 1.201 + 1.202 +oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { 1.203 + debug_only(check_for_valid_allocation_state()); 1.204 + assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 1.205 + assert(size >= 0, "int won't convert to size_t"); 1.206 + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 1.207 + post_allocation_setup_obj(klass, obj, size); 1.208 + NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 1.209 + return (oop)obj; 1.210 +} 1.211 + 1.212 +oop CollectedHeap::array_allocate(KlassHandle klass, 1.213 + int size, 1.214 + int length, 1.215 + TRAPS) { 1.216 + debug_only(check_for_valid_allocation_state()); 1.217 + assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 1.218 + assert(size >= 0, "int won't convert to size_t"); 1.219 + HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL); 1.220 + post_allocation_setup_array(klass, obj, length); 1.221 + NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); 1.222 + return (oop)obj; 1.223 +} 1.224 + 1.225 +oop CollectedHeap::array_allocate_nozero(KlassHandle klass, 1.226 + int size, 1.227 + int length, 1.228 + TRAPS) { 1.229 + debug_only(check_for_valid_allocation_state()); 1.230 + assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); 1.231 + assert(size >= 0, "int won't convert to size_t"); 1.232 + HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL); 1.233 + ((oop)obj)->set_klass_gap(0); 1.234 + post_allocation_setup_array(klass, obj, length); 1.235 +#ifndef PRODUCT 1.236 + const size_t hs = oopDesc::header_size()+1; 1.237 + Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs); 1.238 +#endif 1.239 + return (oop)obj; 1.240 +} 1.241 + 1.242 +inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) { 1.243 + NoHeaderExtendedOopClosure no_header_cl(cl); 1.244 + oop_iterate(&no_header_cl); 1.245 +} 1.246 + 1.247 +#ifndef PRODUCT 1.248 + 1.249 +inline bool 1.250 +CollectedHeap::promotion_should_fail(volatile size_t* count) { 1.251 + // Access to count is not atomic; the value does not have to be exact. 1.252 + if (PromotionFailureALot) { 1.253 + const size_t gc_num = total_collections(); 1.254 + const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number; 1.255 + if (elapsed_gcs >= PromotionFailureALotInterval) { 1.256 + // Test for unsigned arithmetic wrap-around. 1.257 + if (++*count >= PromotionFailureALotCount) { 1.258 + *count = 0; 1.259 + return true; 1.260 + } 1.261 + } 1.262 + } 1.263 + return false; 1.264 +} 1.265 + 1.266 +inline bool CollectedHeap::promotion_should_fail() { 1.267 + return promotion_should_fail(&_promotion_failure_alot_count); 1.268 +} 1.269 + 1.270 +inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) { 1.271 + if (PromotionFailureALot) { 1.272 + _promotion_failure_alot_gc_number = total_collections(); 1.273 + *count = 0; 1.274 + } 1.275 +} 1.276 + 1.277 +inline void CollectedHeap::reset_promotion_should_fail() { 1.278 + reset_promotion_should_fail(&_promotion_failure_alot_count); 1.279 +} 1.280 +#endif // #ifndef PRODUCT 1.281 + 1.282 +#endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP