src/share/vm/gc_implementation/g1/concurrentMark.cpp

changeset 2494
234761c55641
parent 2472
0fa27f37d4d4
child 2495
81668b1f4877
equal deleted inserted replaced
2492:a672e43650cc 2494:234761c55641
1053 the_task->record_start_time(); 1053 the_task->record_start_time();
1054 if (!_cm->has_aborted()) { 1054 if (!_cm->has_aborted()) {
1055 do { 1055 do {
1056 double start_vtime_sec = os::elapsedVTime(); 1056 double start_vtime_sec = os::elapsedVTime();
1057 double start_time_sec = os::elapsedTime(); 1057 double start_time_sec = os::elapsedTime();
1058 the_task->do_marking_step(10.0); 1058 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1059
1060 the_task->do_marking_step(mark_step_duration_ms,
1061 true /* do_stealing */,
1062 true /* do_termination */);
1063
1059 double end_time_sec = os::elapsedTime(); 1064 double end_time_sec = os::elapsedTime();
1060 double end_vtime_sec = os::elapsedVTime(); 1065 double end_vtime_sec = os::elapsedVTime();
1061 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec; 1066 double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
1062 double elapsed_time_sec = end_time_sec - start_time_sec; 1067 double elapsed_time_sec = end_time_sec - start_time_sec;
1063 _cm->clear_has_overflown(); 1068 _cm->clear_has_overflown();
1109 // a safepoint is indeed in progress as a younger generation 1114 // a safepoint is indeed in progress as a younger generation
1110 // stop-the-world GC happens even as we mark in this generation. 1115 // stop-the-world GC happens even as we mark in this generation.
1111 1116
1112 _restart_for_overflow = false; 1117 _restart_for_overflow = false;
1113 1118
1114 set_phase(MAX2((size_t) 1, parallel_marking_threads()), true); 1119 size_t active_workers = MAX2((size_t) 1, parallel_marking_threads());
1120 set_phase(active_workers, true /* concurrent */);
1115 1121
1116 CMConcurrentMarkingTask markingTask(this, cmThread()); 1122 CMConcurrentMarkingTask markingTask(this, cmThread());
1117 if (parallel_marking_threads() > 0) 1123 if (parallel_marking_threads() > 0)
1118 _parallel_workers->run_task(&markingTask); 1124 _parallel_workers->run_task(&markingTask);
1119 else 1125 else
1174 Universe::heap()->prepare_for_verify(); 1180 Universe::heap()->prepare_for_verify();
1175 Universe::heap()->verify(/* allow_dirty */ true, 1181 Universe::heap()->verify(/* allow_dirty */ true,
1176 /* silent */ false, 1182 /* silent */ false,
1177 /* use_prev_marking */ false); 1183 /* use_prev_marking */ false);
1178 } 1184 }
1185 assert(!restart_for_overflow(), "sanity");
1186 }
1187
1188 // Reset the marking state if marking completed
1189 if (!restart_for_overflow()) {
1190 set_non_marking_state();
1179 } 1191 }
1180 1192
1181 #if VERIFY_OBJS_PROCESSED 1193 #if VERIFY_OBJS_PROCESSED
1182 _scan_obj_cl.objs_processed = 0; 1194 _scan_obj_cl.objs_processed = 0;
1183 ThreadLocalObjQueue::objs_enqueued = 0; 1195 ThreadLocalObjQueue::objs_enqueued = 0;
1851 } 1863 }
1852 } 1864 }
1853 assert(local_free_list.is_empty(), "post-condition"); 1865 assert(local_free_list.is_empty(), "post-condition");
1854 } 1866 }
1855 1867
1868 // Support closures for reference procssing in G1
1869
1856 bool G1CMIsAliveClosure::do_object_b(oop obj) { 1870 bool G1CMIsAliveClosure::do_object_b(oop obj) {
1857 HeapWord* addr = (HeapWord*)obj; 1871 HeapWord* addr = (HeapWord*)obj;
1858 return addr != NULL && 1872 return addr != NULL &&
1859 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj)); 1873 (!_g1->is_in_g1_reserved(addr) || !_g1->is_obj_ill(obj));
1860 } 1874 }
1871 1885
1872 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 1886 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1873 virtual void do_oop( oop* p) { do_oop_work(p); } 1887 virtual void do_oop( oop* p) { do_oop_work(p); }
1874 1888
1875 template <class T> void do_oop_work(T* p) { 1889 template <class T> void do_oop_work(T* p) {
1876 oop thisOop = oopDesc::load_decode_heap_oop(p); 1890 oop obj = oopDesc::load_decode_heap_oop(p);
1877 HeapWord* addr = (HeapWord*)thisOop; 1891 HeapWord* addr = (HeapWord*)obj;
1878 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(thisOop)) { 1892
1893 if (_cm->verbose_high())
1894 gclog_or_tty->print_cr("\t[0] we're looking at location "
1895 "*"PTR_FORMAT" = "PTR_FORMAT,
1896 p, (void*) obj);
1897
1898 if (_g1->is_in_g1_reserved(addr) && _g1->is_obj_ill(obj)) {
1879 _bitMap->mark(addr); 1899 _bitMap->mark(addr);
1880 _cm->mark_stack_push(thisOop); 1900 _cm->mark_stack_push(obj);
1881 } 1901 }
1882 } 1902 }
1883 }; 1903 };
1884 1904
1885 class G1CMDrainMarkingStackClosure: public VoidClosure { 1905 class G1CMDrainMarkingStackClosure: public VoidClosure {
1897 void do_void() { 1917 void do_void() {
1898 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false); 1918 _markStack->drain((OopClosure*)_oopClosure, _bitMap, false);
1899 } 1919 }
1900 }; 1920 };
1901 1921
1922 // 'Keep Alive' closure used by parallel reference processing.
1923 // An instance of this closure is used in the parallel reference processing
1924 // code rather than an instance of G1CMKeepAliveClosure. We could have used
1925 // the G1CMKeepAliveClosure as it is MT-safe. Also reference objects are
1926 // placed on to discovered ref lists once so we can mark and push with no
1927 // need to check whether the object has already been marked. Using the
1928 // G1CMKeepAliveClosure would mean, however, having all the worker threads
1929 // operating on the global mark stack. This means that an individual
1930 // worker would be doing lock-free pushes while it processes its own
1931 // discovered ref list followed by drain call. If the discovered ref lists
1932 // are unbalanced then this could cause interference with the other
1933 // workers. Using a CMTask (and its embedded local data structures)
1934 // avoids that potential interference.
1935 class G1CMParKeepAliveAndDrainClosure: public OopClosure {
1936 ConcurrentMark* _cm;
1937 CMTask* _task;
1938 CMBitMap* _bitMap;
1939 int _ref_counter_limit;
1940 int _ref_counter;
1941 public:
1942 G1CMParKeepAliveAndDrainClosure(ConcurrentMark* cm,
1943 CMTask* task,
1944 CMBitMap* bitMap) :
1945 _cm(cm), _task(task), _bitMap(bitMap),
1946 _ref_counter_limit(G1RefProcDrainInterval)
1947 {
1948 assert(_ref_counter_limit > 0, "sanity");
1949 _ref_counter = _ref_counter_limit;
1950 }
1951
1952 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
1953 virtual void do_oop( oop* p) { do_oop_work(p); }
1954
1955 template <class T> void do_oop_work(T* p) {
1956 if (!_cm->has_overflown()) {
1957 oop obj = oopDesc::load_decode_heap_oop(p);
1958 if (_cm->verbose_high())
1959 gclog_or_tty->print_cr("\t[%d] we're looking at location "
1960 "*"PTR_FORMAT" = "PTR_FORMAT,
1961 _task->task_id(), p, (void*) obj);
1962
1963 _task->deal_with_reference(obj);
1964 _ref_counter--;
1965
1966 if (_ref_counter == 0) {
1967 // We have dealt with _ref_counter_limit references, pushing them and objects
1968 // reachable from them on to the local stack (and possibly the global stack).
1969 // Call do_marking_step() to process these entries. We call the routine in a
1970 // loop, which we'll exit if there's nothing more to do (i.e. we're done
1971 // with the entries that we've pushed as a result of the deal_with_reference
1972 // calls above) or we overflow.
1973 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
1974 // while there may still be some work to do. (See the comment at the
1975 // beginning of CMTask::do_marking_step() for those conditions - one of which
1976 // is reaching the specified time target.) It is only when
1977 // CMTask::do_marking_step() returns without setting the has_aborted() flag
1978 // that the marking has completed.
1979 do {
1980 double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
1981 _task->do_marking_step(mark_step_duration_ms,
1982 false /* do_stealing */,
1983 false /* do_termination */);
1984 } while (_task->has_aborted() && !_cm->has_overflown());
1985 _ref_counter = _ref_counter_limit;
1986 }
1987 } else {
1988 if (_cm->verbose_high())
1989 gclog_or_tty->print_cr("\t[%d] CM Overflow", _task->task_id());
1990 }
1991 }
1992 };
1993
1994 class G1CMParDrainMarkingStackClosure: public VoidClosure {
1995 ConcurrentMark* _cm;
1996 CMTask* _task;
1997 public:
1998 G1CMParDrainMarkingStackClosure(ConcurrentMark* cm, CMTask* task) :
1999 _cm(cm), _task(task)
2000 {}
2001
2002 void do_void() {
2003 do {
2004 if (_cm->verbose_high())
2005 gclog_or_tty->print_cr("\t[%d] Drain: Calling do marking_step", _task->task_id());
2006
2007 // We call CMTask::do_marking_step() to completely drain the local and
2008 // global marking stacks. The routine is called in a loop, which we'll
2009 // exit if there's nothing more to do (i.e. we'completely drained the
2010 // entries that were pushed as a result of applying the
2011 // G1CMParKeepAliveAndDrainClosure to the entries on the discovered ref
2012 // lists above) or we overflow the global marking stack.
2013 // Note: CMTask::do_marking_step() can set the CMTask::has_aborted() flag
2014 // while there may still be some work to do. (See the comment at the
2015 // beginning of CMTask::do_marking_step() for those conditions - one of which
2016 // is reaching the specified time target.) It is only when
2017 // CMTask::do_marking_step() returns without setting the has_aborted() flag
2018 // that the marking has completed.
2019
2020 _task->do_marking_step(1000000000.0 /* something very large */,
2021 true /* do_stealing */,
2022 true /* do_termination */);
2023 } while (_task->has_aborted() && !_cm->has_overflown());
2024 }
2025 };
2026
2027 // Implementation of AbstractRefProcTaskExecutor for G1
2028 class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2029 private:
2030 G1CollectedHeap* _g1h;
2031 ConcurrentMark* _cm;
2032 CMBitMap* _bitmap;
2033 WorkGang* _workers;
2034 int _active_workers;
2035
2036 public:
2037 G1RefProcTaskExecutor(G1CollectedHeap* g1h,
2038 ConcurrentMark* cm,
2039 CMBitMap* bitmap,
2040 WorkGang* workers,
2041 int n_workers) :
2042 _g1h(g1h), _cm(cm), _bitmap(bitmap),
2043 _workers(workers), _active_workers(n_workers)
2044 { }
2045
2046 // Executes the given task using concurrent marking worker threads.
2047 virtual void execute(ProcessTask& task);
2048 virtual void execute(EnqueueTask& task);
2049 };
2050
2051 class G1RefProcTaskProxy: public AbstractGangTask {
2052 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2053 ProcessTask& _proc_task;
2054 G1CollectedHeap* _g1h;
2055 ConcurrentMark* _cm;
2056 CMBitMap* _bitmap;
2057
2058 public:
2059 G1RefProcTaskProxy(ProcessTask& proc_task,
2060 G1CollectedHeap* g1h,
2061 ConcurrentMark* cm,
2062 CMBitMap* bitmap) :
2063 AbstractGangTask("Process reference objects in parallel"),
2064 _proc_task(proc_task), _g1h(g1h), _cm(cm), _bitmap(bitmap)
2065 {}
2066
2067 virtual void work(int i) {
2068 CMTask* marking_task = _cm->task(i);
2069 G1CMIsAliveClosure g1_is_alive(_g1h);
2070 G1CMParKeepAliveAndDrainClosure g1_par_keep_alive(_cm, marking_task, _bitmap);
2071 G1CMParDrainMarkingStackClosure g1_par_drain(_cm, marking_task);
2072
2073 _proc_task.work(i, g1_is_alive, g1_par_keep_alive, g1_par_drain);
2074 }
2075 };
2076
2077 void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
2078 assert(_workers != NULL, "Need parallel worker threads.");
2079
2080 G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
2081
2082 // We need to reset the phase for each task execution so that
2083 // the termination protocol of CMTask::do_marking_step works.
2084 _cm->set_phase(_active_workers, false /* concurrent */);
2085 _g1h->set_par_threads(_active_workers);
2086 _workers->run_task(&proc_task_proxy);
2087 _g1h->set_par_threads(0);
2088 }
2089
2090 class G1RefEnqueueTaskProxy: public AbstractGangTask {
2091 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
2092 EnqueueTask& _enq_task;
2093
2094 public:
2095 G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
2096 AbstractGangTask("Enqueue reference objects in parallel"),
2097 _enq_task(enq_task)
2098 { }
2099
2100 virtual void work(int i) {
2101 _enq_task.work(i);
2102 }
2103 };
2104
2105 void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
2106 assert(_workers != NULL, "Need parallel worker threads.");
2107
2108 G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
2109
2110 _g1h->set_par_threads(_active_workers);
2111 _workers->run_task(&enq_task_proxy);
2112 _g1h->set_par_threads(0);
2113 }
2114
1902 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) { 2115 void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
1903 ResourceMark rm; 2116 ResourceMark rm;
1904 HandleMark hm; 2117 HandleMark hm;
1905 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 2118 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1906 ReferenceProcessor* rp = g1h->ref_processor(); 2119 ReferenceProcessor* rp = g1h->ref_processor();
1915 G1CMIsAliveClosure g1_is_alive(g1h); 2128 G1CMIsAliveClosure g1_is_alive(g1h);
1916 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap()); 2129 G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
1917 G1CMDrainMarkingStackClosure 2130 G1CMDrainMarkingStackClosure
1918 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive); 2131 g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
1919 2132
1920 // XXXYYY Also: copy the parallel ref processing code from CMS. 2133 // We use the work gang from the G1CollectedHeap and we utilize all
1921 rp->process_discovered_references(&g1_is_alive, 2134 // the worker threads.
1922 &g1_keep_alive, 2135 int active_workers = MAX2(MIN2(g1h->workers()->total_workers(), (int)_max_task_num), 1);
1923 &g1_drain_mark_stack, 2136
1924 NULL); 2137 G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
2138 g1h->workers(), active_workers);
2139
2140 if (rp->processing_is_mt()) {
2141 // Set the degree of MT here. If the discovery is done MT, there
2142 // may have been a different number of threads doing the discovery
2143 // and a different number of discovered lists may have Ref objects.
2144 // That is OK as long as the Reference lists are balanced (see
2145 // balance_all_queues() and balance_queues()).
2146 rp->set_mt_degree(active_workers);
2147
2148 rp->process_discovered_references(&g1_is_alive,
2149 &g1_keep_alive,
2150 &g1_drain_mark_stack,
2151 &par_task_executor);
2152
2153 // The work routines of the parallel keep_alive and drain_marking_stack
2154 // will set the has_overflown flag if we overflow the global marking
2155 // stack.
2156 } else {
2157 rp->process_discovered_references(&g1_is_alive,
2158 &g1_keep_alive,
2159 &g1_drain_mark_stack,
2160 NULL);
2161
2162 }
2163
1925 assert(_markStack.overflow() || _markStack.isEmpty(), 2164 assert(_markStack.overflow() || _markStack.isEmpty(),
1926 "mark stack should be empty (unless it overflowed)"); 2165 "mark stack should be empty (unless it overflowed)");
1927 if (_markStack.overflow()) { 2166 if (_markStack.overflow()) {
2167 // Should have been done already when we tried to push an
2168 // entry on to the global mark stack. But let's do it again.
1928 set_has_overflown(); 2169 set_has_overflown();
1929 } 2170 }
1930 2171
1931 rp->enqueue_discovered_references(); 2172 if (rp->processing_is_mt()) {
2173 assert(rp->num_q() == active_workers, "why not");
2174 rp->enqueue_discovered_references(&par_task_executor);
2175 } else {
2176 rp->enqueue_discovered_references();
2177 }
2178
1932 rp->verify_no_references_recorded(); 2179 rp->verify_no_references_recorded();
1933 assert(!rp->discovery_enabled(), "should have been disabled"); 2180 assert(!rp->discovery_enabled(), "should have been disabled");
1934 2181
1935 // Now clean up stale oops in SymbolTable and StringTable 2182 // Now clean up stale oops in SymbolTable and StringTable
1936 SymbolTable::unlink(&g1_is_alive); 2183 SymbolTable::unlink(&g1_is_alive);
1953 // only proceed if we're supposed to be actived. 2200 // only proceed if we're supposed to be actived.
1954 if ((size_t)worker_i < _cm->active_tasks()) { 2201 if ((size_t)worker_i < _cm->active_tasks()) {
1955 CMTask* task = _cm->task(worker_i); 2202 CMTask* task = _cm->task(worker_i);
1956 task->record_start_time(); 2203 task->record_start_time();
1957 do { 2204 do {
1958 task->do_marking_step(1000000000.0 /* something very large */); 2205 task->do_marking_step(1000000000.0 /* something very large */,
2206 true /* do_stealing */,
2207 true /* do_termination */);
1959 } while (task->has_aborted() && !_cm->has_overflown()); 2208 } while (task->has_aborted() && !_cm->has_overflown());
1960 // If we overflow, then we do not want to restart. We instead 2209 // If we overflow, then we do not want to restart. We instead
1961 // want to abort remark and do concurrent marking again. 2210 // want to abort remark and do concurrent marking again.
1962 task->record_end_time(); 2211 task->record_end_time();
1963 } 2212 }
1976 2225
1977 if (G1CollectedHeap::use_parallel_gc_threads()) { 2226 if (G1CollectedHeap::use_parallel_gc_threads()) {
1978 G1CollectedHeap::StrongRootsScope srs(g1h); 2227 G1CollectedHeap::StrongRootsScope srs(g1h);
1979 // this is remark, so we'll use up all available threads 2228 // this is remark, so we'll use up all available threads
1980 int active_workers = ParallelGCThreads; 2229 int active_workers = ParallelGCThreads;
1981 set_phase(active_workers, false); 2230 set_phase(active_workers, false /* concurrent */);
1982 2231
1983 CMRemarkTask remarkTask(this); 2232 CMRemarkTask remarkTask(this);
1984 // We will start all available threads, even if we decide that the 2233 // We will start all available threads, even if we decide that the
1985 // active_workers will be fewer. The extra ones will just bail out 2234 // active_workers will be fewer. The extra ones will just bail out
1986 // immediately. 2235 // immediately.
1990 g1h->set_par_threads(0); 2239 g1h->set_par_threads(0);
1991 } else { 2240 } else {
1992 G1CollectedHeap::StrongRootsScope srs(g1h); 2241 G1CollectedHeap::StrongRootsScope srs(g1h);
1993 // this is remark, so we'll use up all available threads 2242 // this is remark, so we'll use up all available threads
1994 int active_workers = 1; 2243 int active_workers = 1;
1995 set_phase(active_workers, false); 2244 set_phase(active_workers, false /* concurrent */);
1996 2245
1997 CMRemarkTask remarkTask(this); 2246 CMRemarkTask remarkTask(this);
1998 // We will start all available threads, even if we decide that the 2247 // We will start all available threads, even if we decide that the
1999 // active_workers will be fewer. The extra ones will just bail out 2248 // active_workers will be fewer. The extra ones will just bail out
2000 // immediately. 2249 // immediately.
2002 } 2251 }
2003 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 2252 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
2004 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant"); 2253 guarantee(satb_mq_set.completed_buffers_num() == 0, "invariant");
2005 2254
2006 print_stats(); 2255 print_stats();
2007
2008 if (!restart_for_overflow())
2009 set_non_marking_state();
2010 2256
2011 #if VERIFY_OBJS_PROCESSED 2257 #if VERIFY_OBJS_PROCESSED
2012 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) { 2258 if (_scan_obj_cl.objs_processed != ThreadLocalObjQueue::objs_enqueued) {
2013 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.", 2259 gclog_or_tty->print_cr("Processed = %d, enqueued = %d.",
2014 _scan_obj_cl.objs_processed, 2260 _scan_obj_cl.objs_processed,
3122 push(obj); 3368 push(obj);
3123 } else { 3369 } else {
3124 // do nothing 3370 // do nothing
3125 } 3371 }
3126 #else // _CHECK_BOTH_FINGERS_ 3372 #else // _CHECK_BOTH_FINGERS_
3127 // we will only check the global finger 3373 // we will only check the global finger
3128 3374
3129 if (objAddr < global_finger) { 3375 if (objAddr < global_finger) {
3130 // see long comment above 3376 // see long comment above
3131 3377
3132 if (_cm->verbose_high()) 3378 if (_cm->verbose_high())
3247 // (5) We check whether we've reached our time quota. If we have, 3493 // (5) We check whether we've reached our time quota. If we have,
3248 // then we abort. 3494 // then we abort.
3249 double elapsed_time_ms = curr_time_ms - _start_time_ms; 3495 double elapsed_time_ms = curr_time_ms - _start_time_ms;
3250 if (elapsed_time_ms > _time_target_ms) { 3496 if (elapsed_time_ms > _time_target_ms) {
3251 set_has_aborted(); 3497 set_has_aborted();
3252 _has_aborted_timed_out = true; 3498 _has_timed_out = true;
3253 statsOnly( ++_aborted_timed_out ); 3499 statsOnly( ++_aborted_timed_out );
3254 return; 3500 return;
3255 } 3501 }
3256 3502
3257 // (6) Finally, we check whether there are enough completed STAB 3503 // (6) Finally, we check whether there are enough completed STAB
3752 place, it was natural to piggy-back all the other conditions on it 3998 place, it was natural to piggy-back all the other conditions on it
3753 too and not constantly check them throughout the code. 3999 too and not constantly check them throughout the code.
3754 4000
3755 *****************************************************************************/ 4001 *****************************************************************************/
3756 4002
3757 void CMTask::do_marking_step(double time_target_ms) { 4003 void CMTask::do_marking_step(double time_target_ms,
4004 bool do_stealing,
4005 bool do_termination) {
3758 assert(time_target_ms >= 1.0, "minimum granularity is 1ms"); 4006 assert(time_target_ms >= 1.0, "minimum granularity is 1ms");
3759 assert(concurrent() == _cm->concurrent(), "they should be the same"); 4007 assert(concurrent() == _cm->concurrent(), "they should be the same");
3760 4008
3761 assert(concurrent() || _cm->region_stack_empty(), 4009 assert(concurrent() || _cm->region_stack_empty(),
3762 "the region stack should have been cleared before remark"); 4010 "the region stack should have been cleared before remark");
3792 _refs_reached = 0; 4040 _refs_reached = 0;
3793 recalculate_limits(); 4041 recalculate_limits();
3794 4042
3795 // clear all flags 4043 // clear all flags
3796 clear_has_aborted(); 4044 clear_has_aborted();
3797 _has_aborted_timed_out = false; 4045 _has_timed_out = false;
3798 _draining_satb_buffers = false; 4046 _draining_satb_buffers = false;
3799 4047
3800 ++_calls; 4048 ++_calls;
3801 4049
3802 if (_cm->verbose_low()) 4050 if (_cm->verbose_low())
3968 // local queue and global stack. 4216 // local queue and global stack.
3969 drain_local_queue(false); 4217 drain_local_queue(false);
3970 drain_global_stack(false); 4218 drain_global_stack(false);
3971 4219
3972 // Attempt at work stealing from other task's queues. 4220 // Attempt at work stealing from other task's queues.
3973 if (!has_aborted()) { 4221 if (do_stealing && !has_aborted()) {
3974 // We have not aborted. This means that we have finished all that 4222 // We have not aborted. This means that we have finished all that
3975 // we could. Let's try to do some stealing... 4223 // we could. Let's try to do some stealing...
3976 4224
3977 // We cannot check whether the global stack is empty, since other 4225 // We cannot check whether the global stack is empty, since other
3978 // tasks might be pushing objects to it concurrently. We also cannot 4226 // tasks might be pushing objects to it concurrently. We also cannot
4009 } 4257 }
4010 } 4258 }
4011 4259
4012 // We still haven't aborted. Now, let's try to get into the 4260 // We still haven't aborted. Now, let's try to get into the
4013 // termination protocol. 4261 // termination protocol.
4014 if (!has_aborted()) { 4262 if (do_termination && !has_aborted()) {
4015 // We cannot check whether the global stack is empty, since other 4263 // We cannot check whether the global stack is empty, since other
4016 // tasks might be concurrently pushing objects on it. We also cannot 4264 // tasks might be concurrently pushing objects on it. We also cannot
4017 // check if the region stack is empty because if a thread is aborting 4265 // check if the region stack is empty because if a thread is aborting
4018 // it can push a partially done region back. 4266 // it can push a partially done region back.
4019 // Separated the asserts so that we know which one fires. 4267 // Separated the asserts so that we know which one fires.
4085 if (has_aborted()) { 4333 if (has_aborted()) {
4086 // The task was aborted for some reason. 4334 // The task was aborted for some reason.
4087 4335
4088 statsOnly( ++_aborted ); 4336 statsOnly( ++_aborted );
4089 4337
4090 if (_has_aborted_timed_out) { 4338 if (_has_timed_out) {
4091 double diff_ms = elapsed_time_ms - _time_target_ms; 4339 double diff_ms = elapsed_time_ms - _time_target_ms;
4092 // Keep statistics of how well we did with respect to hitting 4340 // Keep statistics of how well we did with respect to hitting
4093 // our target only if we actually timed out (if we aborted for 4341 // our target only if we actually timed out (if we aborted for
4094 // other reasons, then the results might get skewed). 4342 // other reasons, then the results might get skewed).
4095 _marking_step_diffs_ms.add(diff_ms); 4343 _marking_step_diffs_ms.add(diff_ms);

mercurial