Mon, 08 Dec 2014 18:57:33 +0100
8067655: Clean up G1 remembered set oop iteration
Summary: Pass on the static type G1ParPushHeapRSClosure to allow oop_iterate devirtualization
Reviewed-by: jmasa, kbarrett
ysr@777 | 1 | /* |
tschatzl@7445 | 2 | * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "gc_implementation/g1/ptrQueue.hpp" |
stefank@2314 | 29 | #include "memory/allocation.hpp" |
stefank@2314 | 30 | |
ysr@777 | 31 | class FreeIdSet; |
ysr@777 | 32 | |
ysr@777 | 33 | // A closure class for processing card table entries. Note that we don't |
ysr@777 | 34 | // require these closure objects to be stack-allocated. |
zgu@3900 | 35 | class CardTableEntryClosure: public CHeapObj<mtGC> { |
ysr@777 | 36 | public: |
ysr@777 | 37 | // Process the card whose card table entry is "card_ptr". If returns |
ysr@777 | 38 | // "false", terminate the iteration early. |
vkempik@6552 | 39 | virtual bool do_card_ptr(jbyte* card_ptr, uint worker_i = 0) = 0; |
ysr@777 | 40 | }; |
ysr@777 | 41 | |
ysr@777 | 42 | // A ptrQueue whose elements are "oops", pointers to object heads. |
ysr@777 | 43 | class DirtyCardQueue: public PtrQueue { |
ysr@777 | 44 | public: |
ysr@777 | 45 | DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) : |
tonyp@2197 | 46 | // Dirty card queues are always active, so we create them with their |
tonyp@2197 | 47 | // active field set to true. |
tonyp@2197 | 48 | PtrQueue(qset_, perm, true /* active */) { } |
tonyp@2197 | 49 | |
tschatzl@7445 | 50 | // Flush before destroying; queue may be used to capture pending work while |
tschatzl@7445 | 51 | // doing something else, with auto-flush on completion. |
tschatzl@7445 | 52 | ~DirtyCardQueue() { if (!is_permanent()) flush(); } |
tschatzl@7445 | 53 | |
tschatzl@7445 | 54 | // Process queue entries and release resources. |
tschatzl@7445 | 55 | void flush() { flush_impl(); } |
tschatzl@7445 | 56 | |
ysr@777 | 57 | // Apply the closure to all elements, and reset the index to make the |
ysr@777 | 58 | // buffer empty. If a closure application returns "false", return |
ysr@777 | 59 | // "false" immediately, halting the iteration. If "consume" is true, |
ysr@777 | 60 | // deletes processed entries from logs. |
ysr@777 | 61 | bool apply_closure(CardTableEntryClosure* cl, |
ysr@777 | 62 | bool consume = true, |
vkempik@6552 | 63 | uint worker_i = 0); |
ysr@777 | 64 | |
ysr@777 | 65 | // Apply the closure to all elements of "buf", down to "index" |
ysr@777 | 66 | // (inclusive.) If returns "false", then a closure application returned |
ysr@777 | 67 | // "false", and we return immediately. If "consume" is true, entries are |
ysr@777 | 68 | // set to NULL as they are processed, so they will not be processed again |
ysr@777 | 69 | // later. |
ysr@777 | 70 | static bool apply_closure_to_buffer(CardTableEntryClosure* cl, |
ysr@777 | 71 | void** buf, size_t index, size_t sz, |
ysr@777 | 72 | bool consume = true, |
vkempik@6552 | 73 | uint worker_i = 0); |
ysr@777 | 74 | void **get_buf() { return _buf;} |
ysr@777 | 75 | void set_buf(void **buf) {_buf = buf;} |
ysr@777 | 76 | size_t get_index() { return _index;} |
ysr@777 | 77 | void reinitialize() { _buf = 0; _sz = 0; _index = 0;} |
ysr@777 | 78 | }; |
ysr@777 | 79 | |
ysr@777 | 80 | |
ysr@777 | 81 | |
ysr@777 | 82 | class DirtyCardQueueSet: public PtrQueueSet { |
tschatzl@6930 | 83 | // The closure used in mut_process_buffer(). |
tschatzl@6930 | 84 | CardTableEntryClosure* _mut_process_closure; |
ysr@777 | 85 | |
ysr@777 | 86 | DirtyCardQueue _shared_dirty_card_queue; |
ysr@777 | 87 | |
ysr@777 | 88 | // Override. |
ysr@777 | 89 | bool mut_process_buffer(void** buf); |
ysr@777 | 90 | |
ysr@777 | 91 | // Protected by the _cbl_mon. |
ysr@777 | 92 | FreeIdSet* _free_ids; |
ysr@777 | 93 | |
ysr@777 | 94 | // The number of completed buffers processed by mutator and rs thread, |
ysr@777 | 95 | // respectively. |
ysr@777 | 96 | jint _processed_buffers_mut; |
ysr@777 | 97 | jint _processed_buffers_rs_thread; |
ysr@777 | 98 | |
tschatzl@6930 | 99 | // Current buffer node used for parallel iteration. |
tschatzl@6930 | 100 | BufferNode* volatile _cur_par_buffer_node; |
ysr@777 | 101 | public: |
iveresov@1546 | 102 | DirtyCardQueueSet(bool notify_when_complete = true); |
ysr@777 | 103 | |
tschatzl@6930 | 104 | void initialize(CardTableEntryClosure* cl, Monitor* cbl_mon, Mutex* fl_lock, |
iveresov@1546 | 105 | int process_completed_threshold, |
iveresov@1546 | 106 | int max_completed_queue, |
iveresov@1546 | 107 | Mutex* lock, PtrQueueSet* fl_owner = NULL); |
ysr@777 | 108 | |
ysr@777 | 109 | // The number of parallel ids that can be claimed to allow collector or |
ysr@777 | 110 | // mutator threads to do card-processing work. |
vkempik@6552 | 111 | static uint num_par_ids(); |
ysr@777 | 112 | |
ysr@777 | 113 | static void handle_zero_index_for_thread(JavaThread* t); |
ysr@777 | 114 | |
tschatzl@6930 | 115 | // Apply the given closure to all entries in all currently-active buffers. |
tschatzl@6930 | 116 | // This should only be applied at a safepoint. (Currently must not be called |
tschatzl@6930 | 117 | // in parallel; this should change in the future.) If "consume" is true, |
tschatzl@6930 | 118 | // processed entries are discarded. |
tschatzl@6930 | 119 | void iterate_closure_all_threads(CardTableEntryClosure* cl, |
tschatzl@6930 | 120 | bool consume = true, |
vkempik@6552 | 121 | uint worker_i = 0); |
ysr@777 | 122 | |
ysr@777 | 123 | // If there exists some completed buffer, pop it, then apply the |
johnc@2060 | 124 | // specified closure to all its elements, nulling out those elements |
johnc@2060 | 125 | // processed. If all elements are processed, returns "true". If no |
johnc@2060 | 126 | // completed buffers exist, returns false. If a completed buffer exists, |
johnc@2060 | 127 | // but is only partially completed before a "yield" happens, the |
johnc@2060 | 128 | // partially completed buffer (with its processed elements set to NULL) |
johnc@2060 | 129 | // is returned to the completed buffer set, and this call returns false. |
johnc@2060 | 130 | bool apply_closure_to_completed_buffer(CardTableEntryClosure* cl, |
vkempik@6552 | 131 | uint worker_i = 0, |
johnc@2060 | 132 | int stop_at = 0, |
johnc@2060 | 133 | bool during_pause = false); |
johnc@2060 | 134 | |
johnc@2060 | 135 | // Helper routine for the above. |
johnc@2060 | 136 | bool apply_closure_to_completed_buffer_helper(CardTableEntryClosure* cl, |
vkempik@6552 | 137 | uint worker_i, |
iveresov@1546 | 138 | BufferNode* nd); |
ysr@777 | 139 | |
iveresov@1546 | 140 | BufferNode* get_completed_buffer(int stop_at); |
johnc@1525 | 141 | |
ysr@777 | 142 | // Applies the current closure to all completed buffers, |
ysr@777 | 143 | // non-consumptively. |
tschatzl@6930 | 144 | void apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); |
tschatzl@6930 | 145 | |
tschatzl@6930 | 146 | void reset_for_par_iteration() { _cur_par_buffer_node = _completed_buffers_head; } |
tschatzl@6930 | 147 | // Applies the current closure to all completed buffers, non-consumptively. |
tschatzl@6930 | 148 | // Parallel version. |
tschatzl@6930 | 149 | void par_apply_closure_to_all_completed_buffers(CardTableEntryClosure* cl); |
ysr@777 | 150 | |
ysr@777 | 151 | DirtyCardQueue* shared_dirty_card_queue() { |
ysr@777 | 152 | return &_shared_dirty_card_queue; |
ysr@777 | 153 | } |
ysr@777 | 154 | |
johnc@2060 | 155 | // Deallocate any completed log buffers |
johnc@2060 | 156 | void clear(); |
johnc@2060 | 157 | |
ysr@777 | 158 | // If a full collection is happening, reset partial logs, and ignore |
ysr@777 | 159 | // completed ones: the full collection will make them all irrelevant. |
ysr@777 | 160 | void abandon_logs(); |
ysr@777 | 161 | |
ysr@777 | 162 | // If any threads have partial logs, add them to the global list of logs. |
ysr@777 | 163 | void concatenate_logs(); |
ysr@777 | 164 | void clear_n_completed_buffers() { _n_completed_buffers = 0;} |
ysr@777 | 165 | |
ysr@777 | 166 | jint processed_buffers_mut() { |
ysr@777 | 167 | return _processed_buffers_mut; |
ysr@777 | 168 | } |
ysr@777 | 169 | jint processed_buffers_rs_thread() { |
ysr@777 | 170 | return _processed_buffers_rs_thread; |
ysr@777 | 171 | } |
ysr@777 | 172 | |
ysr@777 | 173 | }; |
stefank@2314 | 174 | |
stefank@2314 | 175 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_DIRTYCARDQUEUE_HPP |