604 { |
604 { |
605 _modUnionTable.allocate(_span); |
605 _modUnionTable.allocate(_span); |
606 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); |
606 assert(_modUnionTable.covers(_span), "_modUnionTable inconsistency?"); |
607 } |
607 } |
608 |
608 |
609 if (!_markStack.allocate(CMSMarkStackSize)) { |
609 if (!_markStack.allocate(MarkStackSize)) { |
610 warning("Failed to allocate CMS Marking Stack"); |
610 warning("Failed to allocate CMS Marking Stack"); |
611 return; |
611 return; |
612 } |
612 } |
613 if (!_revisitStack.allocate(CMSRevisitStackSize)) { |
613 if (!_revisitStack.allocate(CMSRevisitStackSize)) { |
614 warning("Failed to allocate CMS Revisit Stack"); |
614 warning("Failed to allocate CMS Revisit Stack"); |
615 return; |
615 return; |
616 } |
616 } |
617 |
617 |
618 // Support for multi-threaded concurrent phases |
618 // Support for multi-threaded concurrent phases |
619 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { |
619 if (ParallelGCThreads > 0 && CMSConcurrentMTEnabled) { |
620 if (FLAG_IS_DEFAULT(ParallelCMSThreads)) { |
620 if (FLAG_IS_DEFAULT(ConcGCThreads)) { |
621 // just for now |
621 // just for now |
622 FLAG_SET_DEFAULT(ParallelCMSThreads, (ParallelGCThreads + 3)/4); |
622 FLAG_SET_DEFAULT(ConcGCThreads, (ParallelGCThreads + 3)/4); |
623 } |
623 } |
624 if (ParallelCMSThreads > 1) { |
624 if (ConcGCThreads > 1) { |
625 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", |
625 _conc_workers = new YieldingFlexibleWorkGang("Parallel CMS Threads", |
626 ParallelCMSThreads, true); |
626 ConcGCThreads, true); |
627 if (_conc_workers == NULL) { |
627 if (_conc_workers == NULL) { |
628 warning("GC/CMS: _conc_workers allocation failure: " |
628 warning("GC/CMS: _conc_workers allocation failure: " |
629 "forcing -CMSConcurrentMTEnabled"); |
629 "forcing -CMSConcurrentMTEnabled"); |
630 CMSConcurrentMTEnabled = false; |
630 CMSConcurrentMTEnabled = false; |
631 } |
631 } |
632 } else { |
632 } else { |
633 CMSConcurrentMTEnabled = false; |
633 CMSConcurrentMTEnabled = false; |
634 } |
634 } |
635 } |
635 } |
636 if (!CMSConcurrentMTEnabled) { |
636 if (!CMSConcurrentMTEnabled) { |
637 ParallelCMSThreads = 0; |
637 ConcGCThreads = 0; |
638 } else { |
638 } else { |
639 // Turn off CMSCleanOnEnter optimization temporarily for |
639 // Turn off CMSCleanOnEnter optimization temporarily for |
640 // the MT case where it's not fixed yet; see 6178663. |
640 // the MT case where it's not fixed yet; see 6178663. |
641 CMSCleanOnEnter = false; |
641 CMSCleanOnEnter = false; |
642 } |
642 } |
643 assert((_conc_workers != NULL) == (ParallelCMSThreads > 1), |
643 assert((_conc_workers != NULL) == (ConcGCThreads > 1), |
644 "Inconsistency"); |
644 "Inconsistency"); |
645 |
645 |
646 // Parallel task queues; these are shared for the |
646 // Parallel task queues; these are shared for the |
647 // concurrent and stop-world phases of CMS, but |
647 // concurrent and stop-world phases of CMS, but |
648 // are not shared with parallel scavenge (ParNew). |
648 // are not shared with parallel scavenge (ParNew). |
649 { |
649 { |
650 uint i; |
650 uint i; |
651 uint num_queues = (uint) MAX2(ParallelGCThreads, ParallelCMSThreads); |
651 uint num_queues = (uint) MAX2(ParallelGCThreads, ConcGCThreads); |
652 |
652 |
653 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled |
653 if ((CMSParallelRemarkEnabled || CMSConcurrentMTEnabled |
654 || ParallelRefProcEnabled) |
654 || ParallelRefProcEnabled) |
655 && num_queues > 0) { |
655 && num_queues > 0) { |
656 _task_queues = new OopTaskQueueSet(num_queues); |
656 _task_queues = new OopTaskQueueSet(num_queues); |
3655 verify_work_stacks_empty(); |
3655 verify_work_stacks_empty(); |
3656 verify_overflow_empty(); |
3656 verify_overflow_empty(); |
3657 assert(_revisitStack.isEmpty(), "tabula rasa"); |
3657 assert(_revisitStack.isEmpty(), "tabula rasa"); |
3658 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
3658 DEBUG_ONLY(RememberKlassesChecker cmx(should_unload_classes());) |
3659 bool result = false; |
3659 bool result = false; |
3660 if (CMSConcurrentMTEnabled && ParallelCMSThreads > 0) { |
3660 if (CMSConcurrentMTEnabled && ConcGCThreads > 0) { |
3661 result = do_marking_mt(asynch); |
3661 result = do_marking_mt(asynch); |
3662 } else { |
3662 } else { |
3663 result = do_marking_st(asynch); |
3663 result = do_marking_st(asynch); |
3664 } |
3664 } |
3665 return result; |
3665 return result; |
4172 _bit_map_lock->lock_without_safepoint_check(); |
4172 _bit_map_lock->lock_without_safepoint_check(); |
4173 _collector->startTimer(); |
4173 _collector->startTimer(); |
4174 } |
4174 } |
4175 |
4175 |
4176 bool CMSCollector::do_marking_mt(bool asynch) { |
4176 bool CMSCollector::do_marking_mt(bool asynch) { |
4177 assert(ParallelCMSThreads > 0 && conc_workers() != NULL, "precondition"); |
4177 assert(ConcGCThreads > 0 && conc_workers() != NULL, "precondition"); |
4178 // In the future this would be determined ergonomically, based |
4178 // In the future this would be determined ergonomically, based |
4179 // on #cpu's, # active mutator threads (and load), and mutation rate. |
4179 // on #cpu's, # active mutator threads (and load), and mutation rate. |
4180 int num_workers = ParallelCMSThreads; |
4180 int num_workers = ConcGCThreads; |
4181 |
4181 |
4182 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); |
4182 CompactibleFreeListSpace* cms_space = _cmsGen->cmsSpace(); |
4183 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); |
4183 CompactibleFreeListSpace* perm_space = _permGen->cmsSpace(); |
4184 |
4184 |
4185 CMSConcMarkingTask tsk(this, cms_space, perm_space, |
4185 CMSConcMarkingTask tsk(this, cms_space, perm_space, |
6427 // lock-ranks involved in order to be able to rpint the |
6427 // lock-ranks involved in order to be able to rpint the |
6428 // messages below. (Or defer the printing to the caller. |
6428 // messages below. (Or defer the printing to the caller. |
6429 // For now we take the expedient path of just disabling the |
6429 // For now we take the expedient path of just disabling the |
6430 // messages for the problematic case.) |
6430 // messages for the problematic case.) |
6431 void CMSMarkStack::expand() { |
6431 void CMSMarkStack::expand() { |
6432 assert(_capacity <= CMSMarkStackSizeMax, "stack bigger than permitted"); |
6432 assert(_capacity <= MarkStackSizeMax, "stack bigger than permitted"); |
6433 if (_capacity == CMSMarkStackSizeMax) { |
6433 if (_capacity == MarkStackSizeMax) { |
6434 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { |
6434 if (_hit_limit++ == 0 && !CMSConcurrentMTEnabled && PrintGCDetails) { |
6435 // We print a warning message only once per CMS cycle. |
6435 // We print a warning message only once per CMS cycle. |
6436 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); |
6436 gclog_or_tty->print_cr(" (benign) Hit CMSMarkStack max size limit"); |
6437 } |
6437 } |
6438 return; |
6438 return; |
6439 } |
6439 } |
6440 // Double capacity if possible |
6440 // Double capacity if possible |
6441 size_t new_capacity = MIN2(_capacity*2, CMSMarkStackSizeMax); |
6441 size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax); |
6442 // Do not give up existing stack until we have managed to |
6442 // Do not give up existing stack until we have managed to |
6443 // get the double capacity that we desired. |
6443 // get the double capacity that we desired. |
6444 ReservedSpace rs(ReservedSpace::allocation_align_size_up( |
6444 ReservedSpace rs(ReservedSpace::allocation_align_size_up( |
6445 new_capacity * sizeof(oop))); |
6445 new_capacity * sizeof(oop))); |
6446 if (rs.is_reserved()) { |
6446 if (rs.is_reserved()) { |