Tue, 13 Apr 2010 13:52:10 -0700
6858496: Clear all SoftReferences before an out-of-memory due to GC overhead limit.
Summary: Ensure a full GC that clears SoftReferences before throwing an out-of-memory
Reviewed-by: ysr, jcoomes
1 /*
2 * Copyright 2001-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 class FreeIdSet;
27 // A closure class for processing card table entries. Note that we don't
28 // require these closure objects to be stack-allocated.
29 class CardTableEntryClosure: public CHeapObj {
30 public:
31 // Process the card whose card table entry is "card_ptr". If returns
32 // "false", terminate the iteration early.
33 virtual bool do_card_ptr(jbyte* card_ptr, int worker_i = 0) = 0;
34 };
36 // A ptrQueue whose elements are "oops", pointers to object heads.
37 class DirtyCardQueue: public PtrQueue {
38 public:
39 DirtyCardQueue(PtrQueueSet* qset_, bool perm = false) :
40 PtrQueue(qset_, perm)
41 {
42 // Dirty card queues are always active.
43 _active = true;
44 }
45 // Apply the closure to all elements, and reset the index to make the
46 // buffer empty. If a closure application returns "false", return
47 // "false" immediately, halting the iteration. If "consume" is true,
48 // deletes processed entries from logs.
49 bool apply_closure(CardTableEntryClosure* cl,
50 bool consume = true,
51 size_t worker_i = 0);
53 // Apply the closure to all elements of "buf", down to "index"
54 // (inclusive.) If returns "false", then a closure application returned
55 // "false", and we return immediately. If "consume" is true, entries are
56 // set to NULL as they are processed, so they will not be processed again
57 // later.
58 static bool apply_closure_to_buffer(CardTableEntryClosure* cl,
59 void** buf, size_t index, size_t sz,
60 bool consume = true,
61 int worker_i = 0);
62 void **get_buf() { return _buf;}
63 void set_buf(void **buf) {_buf = buf;}
64 size_t get_index() { return _index;}
65 void reinitialize() { _buf = 0; _sz = 0; _index = 0;}
66 };
70 class DirtyCardQueueSet: public PtrQueueSet {
71 CardTableEntryClosure* _closure;
73 DirtyCardQueue _shared_dirty_card_queue;
75 // Override.
76 bool mut_process_buffer(void** buf);
78 // Protected by the _cbl_mon.
79 FreeIdSet* _free_ids;
81 // The number of completed buffers processed by mutator and rs thread,
82 // respectively.
83 jint _processed_buffers_mut;
84 jint _processed_buffers_rs_thread;
86 public:
87 DirtyCardQueueSet(bool notify_when_complete = true);
89 void initialize(Monitor* cbl_mon, Mutex* fl_lock,
90 int process_completed_threshold,
91 int max_completed_queue,
92 Mutex* lock, PtrQueueSet* fl_owner = NULL);
94 // The number of parallel ids that can be claimed to allow collector or
95 // mutator threads to do card-processing work.
96 static size_t num_par_ids();
98 static void handle_zero_index_for_thread(JavaThread* t);
100 // Register "blk" as "the closure" for all queues. Only one such closure
101 // is allowed. The "apply_closure_to_completed_buffer" method will apply
102 // this closure to a completed buffer, and "iterate_closure_all_threads"
103 // applies it to partially-filled buffers (the latter should only be done
104 // with the world stopped).
105 void set_closure(CardTableEntryClosure* closure);
107 // If there is a registered closure for buffers, apply it to all entries
108 // in all currently-active buffers. This should only be applied at a
109 // safepoint. (Currently must not be called in parallel; this should
110 // change in the future.) If "consume" is true, processed entries are
111 // discarded.
112 void iterate_closure_all_threads(bool consume = true,
113 size_t worker_i = 0);
115 // If there exists some completed buffer, pop it, then apply the
116 // registered closure to all its elements, nulling out those elements
117 // processed. If all elements are processed, returns "true". If no
118 // completed buffers exist, returns false. If a completed buffer exists,
119 // but is only partially completed before a "yield" happens, the
120 // partially completed buffer (with its processed elements set to NULL)
121 // is returned to the completed buffer set, and this call returns false.
122 bool apply_closure_to_completed_buffer(int worker_i = 0,
123 int stop_at = 0,
124 bool during_pause = false);
126 bool apply_closure_to_completed_buffer_helper(int worker_i,
127 BufferNode* nd);
129 BufferNode* get_completed_buffer(int stop_at);
131 // Applies the current closure to all completed buffers,
132 // non-consumptively.
133 void apply_closure_to_all_completed_buffers();
135 DirtyCardQueue* shared_dirty_card_queue() {
136 return &_shared_dirty_card_queue;
137 }
139 // If a full collection is happening, reset partial logs, and ignore
140 // completed ones: the full collection will make them all irrelevant.
141 void abandon_logs();
143 // If any threads have partial logs, add them to the global list of logs.
144 void concatenate_logs();
145 void clear_n_completed_buffers() { _n_completed_buffers = 0;}
147 jint processed_buffers_mut() {
148 return _processed_buffers_mut;
149 }
150 jint processed_buffers_rs_thread() {
151 return _processed_buffers_rs_thread;
152 }
154 };