src/share/vm/gc_implementation/g1/concurrentMark.cpp

changeset 3175
4dfb2df418f2
parent 3173
5cc33133bc6d
child 3182
65a8ff39a6da
equal deleted inserted replaced
3174:f0ecbe78fc7b 3175:4dfb2df418f2
816 816
817 // For each region note start of marking. 817 // For each region note start of marking.
818 NoteStartOfMarkHRClosure startcl; 818 NoteStartOfMarkHRClosure startcl;
819 g1h->heap_region_iterate(&startcl); 819 g1h->heap_region_iterate(&startcl);
820 820
821 // Start weak-reference discovery. 821 // Start Concurrent Marking weak-reference discovery.
822 ReferenceProcessor* rp = g1h->ref_processor(); 822 ReferenceProcessor* rp = g1h->ref_processor_cm();
823 rp->verify_no_references_recorded(); 823 // enable ("weak") refs discovery
824 rp->enable_discovery(); // enable ("weak") refs discovery 824 rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
825 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle 825 rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
826 826
827 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 827 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
828 // This is the start of the marking cycle, we're expected all 828 // This is the start of the marking cycle, we're expected all
829 // threads to have SATB queues with active set to false. 829 // threads to have SATB queues with active set to false.
1131 1131
1132 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) { 1132 void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
1133 // world is stopped at this checkpoint 1133 // world is stopped at this checkpoint
1134 assert(SafepointSynchronize::is_at_safepoint(), 1134 assert(SafepointSynchronize::is_at_safepoint(),
1135 "world should be stopped"); 1135 "world should be stopped");
1136
1136 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 1137 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1137 1138
1138 // If a full collection has happened, we shouldn't do this. 1139 // If a full collection has happened, we shouldn't do this.
1139 if (has_aborted()) { 1140 if (has_aborted()) {
1140 g1h->set_marking_complete(); // So bitmap clearing isn't confused 1141 g1h->set_marking_complete(); // So bitmap clearing isn't confused
1834 g1h->capacity()); 1835 g1h->capacity());
1835 } 1836 }
1836 1837
1837 size_t cleaned_up_bytes = start_used_bytes - g1h->used(); 1838 size_t cleaned_up_bytes = start_used_bytes - g1h->used();
1838 g1p->decrease_known_garbage_bytes(cleaned_up_bytes); 1839 g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
1840
1841 // Clean up will have freed any regions completely full of garbage.
1842 // Update the soft reference policy with the new heap occupancy.
1843 Universe::update_heap_info_at_gc();
1839 1844
1840 // We need to make this be a "collection" so any collection pause that 1845 // We need to make this be a "collection" so any collection pause that
1841 // races with it goes around and waits for completeCleanup to finish. 1846 // races with it goes around and waits for completeCleanup to finish.
1842 g1h->increment_total_collections(); 1847 g1h->increment_total_collections();
1843 1848
2070 true /* do_termination */); 2075 true /* do_termination */);
2071 } while (_task->has_aborted() && !_cm->has_overflown()); 2076 } while (_task->has_aborted() && !_cm->has_overflown());
2072 } 2077 }
2073 }; 2078 };
2074 2079
2075 // Implementation of AbstractRefProcTaskExecutor for G1 2080 // Implementation of AbstractRefProcTaskExecutor for parallel
2076 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor { 2081 // reference processing at the end of G1 concurrent marking
2082
2083 class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2077 private: 2084 private:
2078 G1CollectedHeap* _g1h; 2085 G1CollectedHeap* _g1h;
2079 ConcurrentMark* _cm; 2086 ConcurrentMark* _cm;
2080 CMBitMap* _bitmap; 2087 CMBitMap* _bitmap;
2081 WorkGang* _workers; 2088 WorkGang* _workers;
2082 int _active_workers; 2089 int _active_workers;
2083 2090
2084 public: 2091 public:
2085 G1RefProcTaskExecutor(G1CollectedHeap* g1h, 2092 G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
2086 ConcurrentMark* cm, 2093 ConcurrentMark* cm,
2087 CMBitMap* bitmap, 2094 CMBitMap* bitmap,
2088 WorkGang* workers, 2095 WorkGang* workers,
2089 int n_workers) : 2096 int n_workers) :
2090 _g1h(g1h), _cm(cm), _bitmap(bitmap), 2097 _g1h(g1h), _cm(cm), _bitmap(bitmap),
2094 // Executes the given task using concurrent marking worker threads. 2101 // Executes the given task using concurrent marking worker threads.
2095 virtual void execute(ProcessTask& task); 2102 virtual void execute(ProcessTask& task);
2096 virtual void execute(EnqueueTask& task); 2103 virtual void execute(EnqueueTask& task);
2097 }; 2104 };
2098 2105
2099 class G1RefProcTaskProxy: public AbstractGangTask { 2106 class G1CMRefProcTaskProxy: public AbstractGangTask {
2100 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask; 2107 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2101 ProcessTask& _proc_task; 2108 ProcessTask& _proc_task;
2102 G1CollectedHeap* _g1h; 2109 G1CollectedHeap* _g1h;
2103 ConcurrentMark* _cm; 2110 ConcurrentMark* _cm;
2104 CMBitMap* _bitmap; 2111 CMBitMap* _bitmap;
2105 2112
2106 public: 2113 public:
2107 G1RefProcTaskProxy(ProcessTask& proc_task, 2114 G1CMRefProcTaskProxy(ProcessTask& proc_task,
2108 G1CollectedHeap* g1h, 2115 G1CollectedHeap* g1h,
2109 ConcurrentMark* cm, 2116 ConcurrentMark* cm,
2110 CMBitMap* bitmap) : 2117 CMBitMap* bitmap) :
2111 AbstractGangTask("Process reference objects in parallel"), 2118 AbstractGangTask("Process reference objects in parallel"),
2112 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap) 2119 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2120 2127
2121 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain); 2128 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2122 } 2129 }
2123 }; 2130 };
2124 2131
2125 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) { 2132 void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
2126 assert(_workers != NULL, "Need parallel worker threads."); 2133 assert(_workers != NULL, "Need parallel worker threads.");
2127 2134
2128 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap); 2135 G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2129 2136
2130 // We need to reset the phase for each task execution so that 2137 // We need to reset the phase for each task execution so that
2131 // the termination protocol of CMTask::do_marking_step works. 2138 // the termination protocol of CMTask::do_marking_step works.
2132 _cm->set_phase(_active_workers, false /* concurrent */); 2139 _cm->set_phase(_active_workers, false /* concurrent */);
2133 _g1h->set_par_threads(_active_workers); 2140 _g1h->set_par_threads(_active_workers);
2134 _workers->run_task(&proc_task_proxy); 2141 _workers->run_task(&proc_task_proxy);
2135 _g1h->set_par_threads(0); 2142 _g1h->set_par_threads(0);
2136 } 2143 }
2137 2144
2138 class G1RefEnqueueTaskProxy: public AbstractGangTask { 2145 class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
2139 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask; 2146 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2140 EnqueueTask& _enq_task; 2147 EnqueueTask& _enq_task;
2141 2148
2142 public: 2149 public:
2143 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) : 2150 G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
2144 AbstractGangTask("Enqueue reference objects in parallel"), 2151 AbstractGangTask("Enqueue reference objects in parallel"),
2145 _enq_task(enq_task) 2152 _enq_task(enq_task)
2146 { } 2153 { }
2147 2154
2148 virtual void work(int i) { 2155 virtual void work(int i) {
2149 _enq_task.work(i); 2156 _enq_task.work(i);
2150 } 2157 }
2151 }; 2158 };
2152 2159
2153 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) { 2160 void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2154 assert(_workers != NULL, "Need parallel worker threads."); 2161 assert(_workers != NULL, "Need parallel worker threads.");
2155 2162
2156 G1RefEnqueueTaskProxy enq_task_proxy(enq_task); 2163 G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
2157 2164
2158 _g1h->set_par_threads(_active_workers); 2165 _g1h->set_par_threads(_active_workers);
2159 _workers->run_task(&enq_task_proxy); 2166 _workers->run_task(&enq_task_proxy);
2160 _g1h->set_par_threads(0); 2167 _g1h->set_par_threads(0);
2161 } 2168 }
2176 if (verbose) { 2183 if (verbose) {
2177 gclog_or_tty->put(' '); 2184 gclog_or_tty->put(' ');
2178 } 2185 }
2179 TraceTime t("GC ref-proc", verbose, false, gclog_or_tty); 2186 TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
2180 2187
2181 ReferenceProcessor* rp = g1h->ref_processor(); 2188 ReferenceProcessor* rp = g1h->ref_processor_cm();
2182 2189
2183 // See the comment in G1CollectedHeap::ref_processing_init() 2190 // See the comment in G1CollectedHeap::ref_processing_init()
2184 // about how reference processing currently works in G1. 2191 // about how reference processing currently works in G1.
2185 2192
2186 // Process weak references. 2193 // Process weak references.
2194 // We use the work gang from the G1CollectedHeap and we utilize all 2201 // We use the work gang from the G1CollectedHeap and we utilize all
2195 // the worker threads. 2202 // the worker threads.
2196 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1; 2203 int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
2197 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1); 2204 active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
2198 2205
2199 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(), 2206 G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2200 g1h->workers(), active_workers); 2207 g1h->workers(), active_workers);
2201 2208
2202 if (rp->processing_is_mt()) { 2209 if (rp->processing_is_mt()) {
2203 // Set the degree of MT here. If the discovery is done MT, there 2210 // Set the degree of MT here. If the discovery is done MT, there
2204 // may have been a different number of threads doing the discovery 2211 // may have been a different number of threads doing the discovery
2205 // and a different number of discovered lists may have Ref objects. 2212 // and a different number of discovered lists may have Ref objects.
2236 } else { 2243 } else {
2237 rp->enqueue_discovered_references(); 2244 rp->enqueue_discovered_references();
2238 } 2245 }
2239 2246
2240 rp->verify_no_references_recorded(); 2247 rp->verify_no_references_recorded();
2241 assert(!rp->discovery_enabled(), "should have been disabled"); 2248 assert(!rp->discovery_enabled(), "Post condition");
2242 } 2249 }
2243 2250
2244 // Now clean up stale oops in StringTable 2251 // Now clean up stale oops in StringTable
2245 StringTable::unlink(&g1_is_alive); 2252 StringTable::unlink(&g1_is_alive);
2246 // Clean up unreferenced symbols in symbol table. 2253 // Clean up unreferenced symbols in symbol table.
3340 CMTask* task) 3347 CMTask* task)
3341 : _g1h(g1h), _cm(cm), _task(task) { 3348 : _g1h(g1h), _cm(cm), _task(task) {
3342 assert(_ref_processor == NULL, "should be initialized to NULL"); 3349 assert(_ref_processor == NULL, "should be initialized to NULL");
3343 3350
3344 if (G1UseConcMarkReferenceProcessing) { 3351 if (G1UseConcMarkReferenceProcessing) {
3345 _ref_processor = g1h->ref_processor(); 3352 _ref_processor = g1h->ref_processor_cm();
3346 assert(_ref_processor != NULL, "should not be NULL"); 3353 assert(_ref_processor != NULL, "should not be NULL");
3347 } 3354 }
3348 } 3355 }
3349 3356
3350 void CMTask::setup_for_region(HeapRegion* hr) { 3357 void CMTask::setup_for_region(HeapRegion* hr) {

mercurial