1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/memory/threadLocalAllocBuffer.inline.hpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,90 @@ 1.4 +/* 1.5 + * Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) { 1.29 + invariants(); 1.30 + HeapWord* obj = top(); 1.31 + if (pointer_delta(end(), obj) >= size) { 1.32 + // successful thread-local allocation 1.33 + 1.34 + DEBUG_ONLY(Copy::fill_to_words(obj, size, badHeapWordVal)); 1.35 + // This addition is safe because we know that top is 1.36 + // at least size below end, so the add can't wrap. 1.37 + set_top(obj + size); 1.38 + 1.39 + invariants(); 1.40 + return obj; 1.41 + } 1.42 + return NULL; 1.43 +} 1.44 + 1.45 +inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) { 1.46 + const size_t aligned_obj_size = align_object_size(obj_size); 1.47 + 1.48 + // Compute the size for the new TLAB. 1.49 + // The "last" tlab may be smaller to reduce fragmentation. 1.50 + // unsafe_max_tlab_alloc is just a hint. 1.51 + const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(myThread()) / 1.52 + HeapWordSize; 1.53 + size_t new_tlab_size = MIN2(available_size, desired_size() + aligned_obj_size); 1.54 + 1.55 + // Make sure there's enough room for object and filler int[]. 1.56 + const size_t obj_plus_filler_size = aligned_obj_size + alignment_reserve(); 1.57 + if (new_tlab_size < obj_plus_filler_size) { 1.58 + // If there isn't enough room for the allocation, return failure. 1.59 + if (PrintTLAB && Verbose) { 1.60 + gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")" 1.61 + " returns failure", 1.62 + obj_size); 1.63 + } 1.64 + return 0; 1.65 + } 1.66 + if (PrintTLAB && Verbose) { 1.67 + gclog_or_tty->print_cr("ThreadLocalAllocBuffer::compute_size(" SIZE_FORMAT ")" 1.68 + " returns " SIZE_FORMAT, 1.69 + obj_size, new_tlab_size); 1.70 + } 1.71 + return new_tlab_size; 1.72 +} 1.73 + 1.74 + 1.75 +void ThreadLocalAllocBuffer::record_slow_allocation(size_t obj_size) { 1.76 + // Raise size required to bypass TLAB next time. Why? Else there's 1.77 + // a risk that a thread that repeatedly allocates objects of one 1.78 + // size will get stuck on this slow path. 1.79 + 1.80 + set_refill_waste_limit(refill_waste_limit() + refill_waste_limit_increment()); 1.81 + 1.82 + _slow_allocations++; 1.83 + 1.84 + if (PrintTLAB && Verbose) { 1.85 + Thread* thrd = myThread(); 1.86 + gclog_or_tty->print("TLAB: %s thread: "INTPTR_FORMAT" [id: %2d]" 1.87 + " obj: "SIZE_FORMAT 1.88 + " free: "SIZE_FORMAT 1.89 + " waste: "SIZE_FORMAT"\n", 1.90 + "slow", thrd, thrd->osthread()->thread_id(), 1.91 + obj_size, free(), refill_waste_limit()); 1.92 + } 1.93 +}