Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
28 #include "gc_interface/allocTracer.hpp"
29 #include "gc_interface/collectedHeap.hpp"
30 #include "memory/threadLocalAllocBuffer.inline.hpp"
31 #include "memory/universe.hpp"
32 #include "oops/arrayOop.hpp"
33 #include "prims/jvmtiExport.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/thread.inline.hpp"
36 #include "services/lowMemoryDetector.hpp"
37 #include "utilities/copy.hpp"
39 // Inline allocation implementations.
41 void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
42 HeapWord* obj) {
43 post_allocation_setup_no_klass_install(klass, obj);
44 post_allocation_install_obj_klass(klass, oop(obj));
45 }
47 void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
48 HeapWord* objPtr) {
49 oop obj = (oop)objPtr;
51 assert(obj != NULL, "NULL object pointer");
52 if (UseBiasedLocking && (klass() != NULL)) {
53 obj->set_mark(klass->prototype_header());
54 } else {
55 // May be bootstrapping
56 obj->set_mark(markOopDesc::prototype());
57 }
58 }
60 void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
61 oop obj) {
62 // These asserts are kind of complicated because of klassKlass
63 // and the beginning of the world.
64 assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
65 assert(klass() == NULL || klass()->is_klass(), "not a klass");
66 assert(obj != NULL, "NULL object pointer");
67 obj->set_klass(klass());
68 assert(!Universe::is_fully_initialized() || obj->klass() != NULL,
69 "missing klass");
70 }
72 // Support for jvmti and dtrace
73 inline void post_allocation_notify(KlassHandle klass, oop obj, int size) {
74 // support low memory notifications (no-op if not enabled)
75 LowMemoryDetector::detect_low_memory_for_collected_pools();
77 // support for JVMTI VMObjectAlloc event (no-op if not enabled)
78 JvmtiExport::vm_object_alloc_event_collector(obj);
80 if (DTraceAllocProbes) {
81 // support for Dtrace object alloc event (no-op most of the time)
82 if (klass() != NULL && klass()->name() != NULL) {
83 SharedRuntime::dtrace_object_alloc(obj, size);
84 }
85 }
86 }
88 void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
89 HeapWord* obj,
90 int size) {
91 post_allocation_setup_common(klass, obj);
92 assert(Universe::is_bootstrapping() ||
93 !((oop)obj)->is_array(), "must not be an array");
94 // notify jvmti and dtrace
95 post_allocation_notify(klass, (oop)obj, size);
96 }
98 void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
99 HeapWord* obj,
100 int length) {
101 // Set array length before setting the _klass field
102 // in post_allocation_setup_common() because the klass field
103 // indicates that the object is parsable by concurrent GC.
104 assert(length >= 0, "length should be non-negative");
105 ((arrayOop)obj)->set_length(length);
106 post_allocation_setup_common(klass, obj);
107 oop new_obj = (oop)obj;
108 assert(new_obj->is_array(), "must be an array");
109 // notify jvmti and dtrace (must be after length is set for dtrace)
110 post_allocation_notify(klass, new_obj, new_obj->size());
111 }
113 HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
115 // Clear unhandled oops for memory allocation. Memory allocation might
116 // not take out a lock if from tlab, so clear here.
117 CHECK_UNHANDLED_OOPS_ONLY(THREAD->clear_unhandled_oops();)
119 if (HAS_PENDING_EXCEPTION) {
120 NOT_PRODUCT(guarantee(false, "Should not allocate with exception pending"));
121 return NULL; // caller does a CHECK_0 too
122 }
124 HeapWord* result = NULL;
125 if (UseTLAB) {
126 result = allocate_from_tlab(klass, THREAD, size);
127 if (result != NULL) {
128 assert(!HAS_PENDING_EXCEPTION,
129 "Unexpected exception, will result in uninitialized storage");
130 return result;
131 }
132 }
133 bool gc_overhead_limit_was_exceeded = false;
134 result = Universe::heap()->mem_allocate(size,
135 &gc_overhead_limit_was_exceeded);
136 if (result != NULL) {
137 NOT_PRODUCT(Universe::heap()->
138 check_for_non_bad_heap_word_value(result, size));
139 assert(!HAS_PENDING_EXCEPTION,
140 "Unexpected exception, will result in uninitialized storage");
141 THREAD->incr_allocated_bytes(size * HeapWordSize);
143 AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize);
145 return result;
146 }
149 if (!gc_overhead_limit_was_exceeded) {
150 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
151 report_java_out_of_memory("Java heap space");
153 if (JvmtiExport::should_post_resource_exhausted()) {
154 JvmtiExport::post_resource_exhausted(
155 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
156 "Java heap space");
157 }
159 THROW_OOP_0(Universe::out_of_memory_error_java_heap());
160 } else {
161 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
162 report_java_out_of_memory("GC overhead limit exceeded");
164 if (JvmtiExport::should_post_resource_exhausted()) {
165 JvmtiExport::post_resource_exhausted(
166 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR | JVMTI_RESOURCE_EXHAUSTED_JAVA_HEAP,
167 "GC overhead limit exceeded");
168 }
170 THROW_OOP_0(Universe::out_of_memory_error_gc_overhead_limit());
171 }
172 }
174 HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) {
175 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
176 init_obj(obj, size);
177 return obj;
178 }
180 HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) {
181 assert(UseTLAB, "should use UseTLAB");
183 HeapWord* obj = thread->tlab().allocate(size);
184 if (obj != NULL) {
185 return obj;
186 }
187 // Otherwise...
188 return allocate_from_tlab_slow(klass, thread, size);
189 }
191 void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
192 assert(obj != NULL, "cannot initialize NULL object");
193 const size_t hs = oopDesc::header_size();
194 assert(size >= hs, "unexpected object size");
195 ((oop)obj)->set_klass_gap(0);
196 Copy::fill_to_aligned_words(obj + hs, size - hs);
197 }
199 oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
200 debug_only(check_for_valid_allocation_state());
201 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
202 assert(size >= 0, "int won't convert to size_t");
203 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
204 post_allocation_setup_obj(klass, obj, size);
205 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
206 return (oop)obj;
207 }
209 oop CollectedHeap::array_allocate(KlassHandle klass,
210 int size,
211 int length,
212 TRAPS) {
213 debug_only(check_for_valid_allocation_state());
214 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
215 assert(size >= 0, "int won't convert to size_t");
216 HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
217 post_allocation_setup_array(klass, obj, length);
218 NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
219 return (oop)obj;
220 }
222 oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
223 int size,
224 int length,
225 TRAPS) {
226 debug_only(check_for_valid_allocation_state());
227 assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
228 assert(size >= 0, "int won't convert to size_t");
229 HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
230 ((oop)obj)->set_klass_gap(0);
231 post_allocation_setup_array(klass, obj, length);
232 #ifndef PRODUCT
233 const size_t hs = oopDesc::header_size()+1;
234 Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
235 #endif
236 return (oop)obj;
237 }
239 inline void CollectedHeap::oop_iterate_no_header(OopClosure* cl) {
240 NoHeaderExtendedOopClosure no_header_cl(cl);
241 oop_iterate(&no_header_cl);
242 }
244 #ifndef PRODUCT
246 inline bool
247 CollectedHeap::promotion_should_fail(volatile size_t* count) {
248 // Access to count is not atomic; the value does not have to be exact.
249 if (PromotionFailureALot) {
250 const size_t gc_num = total_collections();
251 const size_t elapsed_gcs = gc_num - _promotion_failure_alot_gc_number;
252 if (elapsed_gcs >= PromotionFailureALotInterval) {
253 // Test for unsigned arithmetic wrap-around.
254 if (++*count >= PromotionFailureALotCount) {
255 *count = 0;
256 return true;
257 }
258 }
259 }
260 return false;
261 }
263 inline bool CollectedHeap::promotion_should_fail() {
264 return promotion_should_fail(&_promotion_failure_alot_count);
265 }
267 inline void CollectedHeap::reset_promotion_should_fail(volatile size_t* count) {
268 if (PromotionFailureALot) {
269 _promotion_failure_alot_gc_number = total_collections();
270 *count = 0;
271 }
272 }
274 inline void CollectedHeap::reset_promotion_should_fail() {
275 reset_promotion_should_fail(&_promotion_failure_alot_count);
276 }
277 #endif // #ifndef PRODUCT
279 #endif // SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP