1.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jan 12 13:06:00 2011 -0500 1.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Jan 12 16:34:25 2011 -0500 1.3 @@ -1,5 +1,5 @@ 1.4 /* 1.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 1.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. 1.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.8 * 1.9 * This code is free software; you can redistribute it and/or modify it 1.10 @@ -63,10 +63,12 @@ 1.11 // assumptions of this method (and other related ones). 1.12 inline HeapWord* 1.13 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region, 1.14 - size_t word_size) { 1.15 - assert_heap_locked_and_not_at_safepoint(); 1.16 + size_t word_size, 1.17 + bool with_heap_lock) { 1.18 + assert_not_at_safepoint(); 1.19 + assert(with_heap_lock == Heap_lock->owned_by_self(), 1.20 + "with_heap_lock and Heap_lock->owned_by_self() should be a tautology"); 1.21 assert(cur_alloc_region != NULL, "pre-condition of the method"); 1.22 - assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method"); 1.23 assert(cur_alloc_region->is_young(), 1.24 "we only support young current alloc regions"); 1.25 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() " 1.26 @@ -76,20 +78,24 @@ 1.27 assert(!cur_alloc_region->is_empty(), 1.28 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty", 1.29 cur_alloc_region->bottom(), cur_alloc_region->end())); 1.30 - // This allocate method does BOT updates and we don't need them in 1.31 - // the young generation. This will be fixed in the near future by 1.32 - // CR 6994297. 1.33 - HeapWord* result = cur_alloc_region->allocate(word_size); 1.34 + HeapWord* result = cur_alloc_region->par_allocate_no_bot_updates(word_size); 1.35 if (result != NULL) { 1.36 assert(is_in(result), "result should be in the heap"); 1.37 - Heap_lock->unlock(); 1.38 1.39 + if (with_heap_lock) { 1.40 + Heap_lock->unlock(); 1.41 + } 1.42 + assert_heap_not_locked(); 1.43 // Do the dirtying after we release the Heap_lock. 1.44 dirty_young_block(result, word_size); 1.45 return result; 1.46 } 1.47 1.48 - assert_heap_locked(); 1.49 + if (with_heap_lock) { 1.50 + assert_heap_locked(); 1.51 + } else { 1.52 + assert_heap_not_locked(); 1.53 + } 1.54 return NULL; 1.55 } 1.56 1.57 @@ -97,31 +103,27 @@ 1.58 // assumptions of this method (and other related ones). 1.59 inline HeapWord* 1.60 G1CollectedHeap::attempt_allocation(size_t word_size) { 1.61 - assert_heap_locked_and_not_at_safepoint(); 1.62 + assert_heap_not_locked_and_not_at_safepoint(); 1.63 assert(!isHumongous(word_size), "attempt_allocation() should not be called " 1.64 "for humongous allocation requests"); 1.65 1.66 HeapRegion* cur_alloc_region = _cur_alloc_region; 1.67 if (cur_alloc_region != NULL) { 1.68 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region, 1.69 - word_size); 1.70 + word_size, 1.71 + false /* with_heap_lock */); 1.72 + assert_heap_not_locked(); 1.73 if (result != NULL) { 1.74 - assert_heap_not_locked(); 1.75 return result; 1.76 } 1.77 - 1.78 - assert_heap_locked(); 1.79 - 1.80 - // Since we couldn't successfully allocate into it, retire the 1.81 - // current alloc region. 1.82 - retire_cur_alloc_region(cur_alloc_region); 1.83 } 1.84 1.85 - // Try to get a new region and allocate out of it 1.86 - HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, 1.87 - false, /* at_safepoint */ 1.88 - true, /* do_dirtying */ 1.89 - false /* can_expand */); 1.90 + // Our attempt to allocate lock-free failed as the current 1.91 + // allocation region is either NULL or full. So, we'll now take the 1.92 + // Heap_lock and retry. 1.93 + Heap_lock->lock(); 1.94 + 1.95 + HeapWord* result = attempt_allocation_locked(word_size); 1.96 if (result != NULL) { 1.97 assert_heap_not_locked(); 1.98 return result; 1.99 @@ -145,6 +147,45 @@ 1.100 _cur_alloc_region = NULL; 1.101 } 1.102 1.103 +inline HeapWord* 1.104 +G1CollectedHeap::attempt_allocation_locked(size_t word_size) { 1.105 + assert_heap_locked_and_not_at_safepoint(); 1.106 + assert(!isHumongous(word_size), "attempt_allocation_locked() " 1.107 + "should not be called for humongous allocation requests"); 1.108 + 1.109 + // First, reread the current alloc region and retry the allocation 1.110 + // in case somebody replaced it while we were waiting to get the 1.111 + // Heap_lock. 1.112 + HeapRegion* cur_alloc_region = _cur_alloc_region; 1.113 + if (cur_alloc_region != NULL) { 1.114 + HeapWord* result = allocate_from_cur_alloc_region( 1.115 + cur_alloc_region, word_size, 1.116 + true /* with_heap_lock */); 1.117 + if (result != NULL) { 1.118 + assert_heap_not_locked(); 1.119 + return result; 1.120 + } 1.121 + 1.122 + // We failed to allocate out of the current alloc region, so let's 1.123 + // retire it before getting a new one. 1.124 + retire_cur_alloc_region(cur_alloc_region); 1.125 + } 1.126 + 1.127 + assert_heap_locked(); 1.128 + // Try to get a new region and allocate out of it 1.129 + HeapWord* result = replace_cur_alloc_region_and_allocate(word_size, 1.130 + false, /* at_safepoint */ 1.131 + true, /* do_dirtying */ 1.132 + false /* can_expand */); 1.133 + if (result != NULL) { 1.134 + assert_heap_not_locked(); 1.135 + return result; 1.136 + } 1.137 + 1.138 + assert_heap_locked(); 1.139 + return NULL; 1.140 +} 1.141 + 1.142 // It dirties the cards that cover the block so that so that the post 1.143 // write barrier never queues anything when updating objects on this 1.144 // block. It is assumed (and in fact we assert) that the block