src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

changeset 2454
b158bed62ef5
parent 2453
2250ee17e258
child 2455
2e0b0c4671e4
equal deleted inserted replaced
2453:2250ee17e258 2454:b158bed62ef5
608 // for it to complete, updates to _summary_bytes_used might get 608 // for it to complete, updates to _summary_bytes_used might get
609 // lost. This will be resolved in the near future when the operation 609 // lost. This will be resolved in the near future when the operation
610 // of the free region list is revamped as part of CR 6977804. 610 // of the free region list is revamped as part of CR 6977804.
611 wait_for_cleanup_complete(); 611 wait_for_cleanup_complete();
612 612
613 // Other threads might still be trying to allocate using CASes out
614 // of the region we are retiring, as they can do so without holding
615 // the Heap_lock. So we first have to make sure that noone else can
616 // allocate in it by doing a maximal allocation. Even if our CAS
617 // attempt fails a few times, we'll succeed sooner or later given
618 // that a failed CAS attempt mean that the region is getting closed
619 // to being full (someone else succeeded in allocating into it).
620 size_t free_word_size = cur_alloc_region->free() / HeapWordSize;
621
622 // This is the minimum free chunk we can turn into a dummy
623 // object. If the free space falls below this, then noone can
624 // allocate in this region anyway (all allocation requests will be
625 // of a size larger than this) so we won't have to perform the dummy
626 // allocation.
627 size_t min_word_size_to_fill = CollectedHeap::min_fill_size();
628
629 while (free_word_size >= min_word_size_to_fill) {
630 HeapWord* dummy =
631 cur_alloc_region->par_allocate_no_bot_updates(free_word_size);
632 if (dummy != NULL) {
633 // If the allocation was successful we should fill in the space.
634 CollectedHeap::fill_with_object(dummy, free_word_size);
635 break;
636 }
637
638 free_word_size = cur_alloc_region->free() / HeapWordSize;
639 // It's also possible that someone else beats us to the
640 // allocation and they fill up the region. In that case, we can
641 // just get out of the loop
642 }
643 assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill,
644 "sanity");
645
613 retire_cur_alloc_region_common(cur_alloc_region); 646 retire_cur_alloc_region_common(cur_alloc_region);
614 assert(_cur_alloc_region == NULL, "post-condition"); 647 assert(_cur_alloc_region == NULL, "post-condition");
615 } 648 }
616 649
617 // See the comment in the .hpp file about the locking protocol and 650 // See the comment in the .hpp file about the locking protocol and
659 // We need to ensure that the stores to _cur_alloc_region and, 692 // We need to ensure that the stores to _cur_alloc_region and,
660 // subsequently, to top do not float above the setting of the 693 // subsequently, to top do not float above the setting of the
661 // young type. 694 // young type.
662 OrderAccess::storestore(); 695 OrderAccess::storestore();
663 696
664 // Now allocate out of the new current alloc region. We could 697 // Now, perform the allocation out of the region we just
665 // have re-used allocate_from_cur_alloc_region() but its 698 // allocated. Note that noone else can access that region at
666 // operation is slightly different to what we need here. First, 699 // this point (as _cur_alloc_region has not been updated yet),
667 // allocate_from_cur_alloc_region() is only called outside a 700 // so we can just go ahead and do the allocation without any
668 // safepoint and will always unlock the Heap_lock if it returns 701 // atomics (and we expect this allocation attempt to
669 // a non-NULL result. Second, it assumes that the current alloc 702 // suceeded). Given that other threads can attempt an allocation
670 // region is what's already assigned in _cur_alloc_region. What 703 // with a CAS and without needing the Heap_lock, if we assigned
671 // we want here is to actually do the allocation first before we 704 // the new region to _cur_alloc_region before first allocating
672 // assign the new region to _cur_alloc_region. This ordering is 705 // into it other threads might have filled up the new region
673 // not currently important, but it will be essential when we 706 // before we got a chance to do the allocation ourselves. In
674 // change the code to support CAS allocation in the future (see 707 // that case, we would have needed to retire the region, grab a
675 // CR 6994297). 708 // new one, and go through all this again. Allocating out of the
676 // 709 // new region before assigning it to _cur_alloc_region avoids
677 // This allocate method does BOT updates and we don't need them in 710 // all this.
678 // the young generation. This will be fixed in the near future by 711 HeapWord* result =
679 // CR 6994297. 712 new_cur_alloc_region->allocate_no_bot_updates(word_size);
680 HeapWord* result = new_cur_alloc_region->allocate(word_size);
681 assert(result != NULL, "we just allocate out of an empty region " 713 assert(result != NULL, "we just allocate out of an empty region "
682 "so allocation should have been successful"); 714 "so allocation should have been successful");
683 assert(is_in(result), "result should be in the heap"); 715 assert(is_in(result), "result should be in the heap");
684 716
717 // Now make sure that the store to _cur_alloc_region does not
718 // float above the store to top.
719 OrderAccess::storestore();
685 _cur_alloc_region = new_cur_alloc_region; 720 _cur_alloc_region = new_cur_alloc_region;
686 721
687 if (!at_safepoint) { 722 if (!at_safepoint) {
688 Heap_lock->unlock(); 723 Heap_lock->unlock();
689 } 724 }
715 // exit the loop, either one of the allocation attempts was 750 // exit the loop, either one of the allocation attempts was
716 // successful, or we succeeded in doing the VM op but which was 751 // successful, or we succeeded in doing the VM op but which was
717 // unable to allocate after the collection. 752 // unable to allocate after the collection.
718 for (int try_count = 1; /* we'll return or break */; try_count += 1) { 753 for (int try_count = 1; /* we'll return or break */; try_count += 1) {
719 bool succeeded = true; 754 bool succeeded = true;
755
756 // Every time we go round the loop we should be holding the Heap_lock.
757 assert_heap_locked();
720 758
721 { 759 {
722 // We may have concurrent cleanup working at the time. Wait for 760 // We may have concurrent cleanup working at the time. Wait for
723 // it to complete. In the future we would probably want to make 761 // it to complete. In the future we would probably want to make
724 // the concurrent cleanup truly concurrent by decoupling it from 762 // the concurrent cleanup truly concurrent by decoupling it from
732 // allocate again, just in case. When we make cleanup truly 770 // allocate again, just in case. When we make cleanup truly
733 // concurrent with allocation, we should remove this allocation 771 // concurrent with allocation, we should remove this allocation
734 // attempt as it's redundant (we only reach here after an 772 // attempt as it's redundant (we only reach here after an
735 // allocation attempt has been unsuccessful). 773 // allocation attempt has been unsuccessful).
736 wait_for_cleanup_complete(); 774 wait_for_cleanup_complete();
737 HeapWord* result = attempt_allocation(word_size); 775
776 HeapWord* result = attempt_allocation_locked(word_size);
738 if (result != NULL) { 777 if (result != NULL) {
739 assert_heap_not_locked(); 778 assert_heap_not_locked();
740 return result; 779 return result;
741 } 780 }
742 } 781 }
746 // allocate a new region only if we can expand the young gen. 785 // allocate a new region only if we can expand the young gen.
747 786
748 if (g1_policy()->can_expand_young_list()) { 787 if (g1_policy()->can_expand_young_list()) {
749 // Yes, we are allowed to expand the young gen. Let's try to 788 // Yes, we are allowed to expand the young gen. Let's try to
750 // allocate a new current alloc region. 789 // allocate a new current alloc region.
751
752 HeapWord* result = 790 HeapWord* result =
753 replace_cur_alloc_region_and_allocate(word_size, 791 replace_cur_alloc_region_and_allocate(word_size,
754 false, /* at_safepoint */ 792 false, /* at_safepoint */
755 true, /* do_dirtying */ 793 true, /* do_dirtying */
756 true /* can_expand */); 794 true /* can_expand */);
769 // initiated by the last thread exiting the critical section; so 807 // initiated by the last thread exiting the critical section; so
770 // we retry the allocation sequence from the beginning of the loop, 808 // we retry the allocation sequence from the beginning of the loop,
771 // rather than causing more, now probably unnecessary, GC attempts. 809 // rather than causing more, now probably unnecessary, GC attempts.
772 JavaThread* jthr = JavaThread::current(); 810 JavaThread* jthr = JavaThread::current();
773 assert(jthr != NULL, "sanity"); 811 assert(jthr != NULL, "sanity");
774 if (!jthr->in_critical()) { 812 if (jthr->in_critical()) {
775 MutexUnlocker mul(Heap_lock);
776 GC_locker::stall_until_clear();
777
778 // We'll then fall off the end of the ("if GC locker active")
779 // if-statement and retry the allocation further down in the
780 // loop.
781 } else {
782 if (CheckJNICalls) { 813 if (CheckJNICalls) {
783 fatal("Possible deadlock due to allocating while" 814 fatal("Possible deadlock due to allocating while"
784 " in jni critical section"); 815 " in jni critical section");
785 } 816 }
817 // We are returning NULL so the protocol is that we're still
818 // holding the Heap_lock.
819 assert_heap_locked();
786 return NULL; 820 return NULL;
787 } 821 }
822
823 Heap_lock->unlock();
824 GC_locker::stall_until_clear();
825
826 // No need to relock the Heap_lock. We'll fall off to the code
827 // below the else-statement which assumes that we are not
828 // holding the Heap_lock.
788 } else { 829 } else {
789 // We are not locked out. So, let's try to do a GC. The VM op 830 // We are not locked out. So, let's try to do a GC. The VM op
790 // will retry the allocation before it completes. 831 // will retry the allocation before it completes.
791 832
792 // Read the GC count while holding the Heap_lock 833 // Read the GC count while holding the Heap_lock
803 // Allocations that take place on VM operations do not do any 844 // Allocations that take place on VM operations do not do any
804 // card dirtying and we have to do it here. 845 // card dirtying and we have to do it here.
805 dirty_young_block(result, word_size); 846 dirty_young_block(result, word_size);
806 return result; 847 return result;
807 } 848 }
808 849 }
809 Heap_lock->lock(); 850
810 } 851 // Both paths that get us here from above unlock the Heap_lock.
811 852 assert_heap_not_locked();
812 assert_heap_locked();
813 853
814 // We can reach here when we were unsuccessful in doing a GC, 854 // We can reach here when we were unsuccessful in doing a GC,
815 // because another thread beat us to it, or because we were locked 855 // because another thread beat us to it, or because we were locked
816 // out of GC due to the GC locker. In either case a new alloc 856 // out of GC due to the GC locker. In either case a new alloc
817 // region might be available so we will retry the allocation. 857 // region might be available so we will retry the allocation.
946 986
947 if (!isHumongous(word_size)) { 987 if (!isHumongous(word_size)) {
948 if (!expect_null_cur_alloc_region) { 988 if (!expect_null_cur_alloc_region) {
949 HeapRegion* cur_alloc_region = _cur_alloc_region; 989 HeapRegion* cur_alloc_region = _cur_alloc_region;
950 if (cur_alloc_region != NULL) { 990 if (cur_alloc_region != NULL) {
951 // This allocate method does BOT updates and we don't need them in 991 // We are at a safepoint so no reason to use the MT-safe version.
952 // the young generation. This will be fixed in the near future by 992 HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size);
953 // CR 6994297.
954 HeapWord* result = cur_alloc_region->allocate(word_size);
955 if (result != NULL) { 993 if (result != NULL) {
956 assert(is_in(result), "result should be in the heap"); 994 assert(is_in(result), "result should be in the heap");
957 995
958 // We will not do any dirtying here. This is guaranteed to be 996 // We will not do any dirtying here. This is guaranteed to be
959 // called during a safepoint and the thread that scheduled the 997 // called during a safepoint and the thread that scheduled the
981 1019
982 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { 1020 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
983 assert_heap_not_locked_and_not_at_safepoint(); 1021 assert_heap_not_locked_and_not_at_safepoint();
984 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); 1022 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size");
985 1023
986 Heap_lock->lock(); 1024 // First attempt: Try allocating out of the current alloc region
987 1025 // using a CAS. If that fails, take the Heap_lock and retry the
988 // First attempt: try allocating out of the current alloc region or 1026 // allocation, potentially replacing the current alloc region.
989 // after replacing the current alloc region.
990 HeapWord* result = attempt_allocation(word_size); 1027 HeapWord* result = attempt_allocation(word_size);
991 if (result != NULL) { 1028 if (result != NULL) {
992 assert_heap_not_locked(); 1029 assert_heap_not_locked();
993 return result; 1030 return result;
994 } 1031 }
995 1032
996 assert_heap_locked(); 1033 // Second attempt: Go to the slower path where we might try to
997 1034 // schedule a collection.
998 // Second attempt: go into the even slower path where we might
999 // try to schedule a collection.
1000 result = attempt_allocation_slow(word_size); 1035 result = attempt_allocation_slow(word_size);
1001 if (result != NULL) { 1036 if (result != NULL) {
1002 assert_heap_not_locked(); 1037 assert_heap_not_locked();
1003 return result; 1038 return result;
1004 } 1039 }
1005 1040
1006 assert_heap_locked(); 1041 assert_heap_locked();
1042 // Need to unlock the Heap_lock before returning.
1007 Heap_lock->unlock(); 1043 Heap_lock->unlock();
1008 return NULL; 1044 return NULL;
1009 } 1045 }
1010 1046
1011 HeapWord* 1047 HeapWord*
1020 // Loop until the allocation is satisified, 1056 // Loop until the allocation is satisified,
1021 // or unsatisfied after GC. 1057 // or unsatisfied after GC.
1022 for (int try_count = 1; /* we'll return */; try_count += 1) { 1058 for (int try_count = 1; /* we'll return */; try_count += 1) {
1023 unsigned int gc_count_before; 1059 unsigned int gc_count_before;
1024 { 1060 {
1025 Heap_lock->lock();
1026
1027 if (!isHumongous(word_size)) { 1061 if (!isHumongous(word_size)) {
1028 // First attempt: try allocating out of the current alloc 1062 // First attempt: Try allocating out of the current alloc region
1029 // region or after replacing the current alloc region. 1063 // using a CAS. If that fails, take the Heap_lock and retry the
1064 // allocation, potentially replacing the current alloc region.
1030 HeapWord* result = attempt_allocation(word_size); 1065 HeapWord* result = attempt_allocation(word_size);
1031 if (result != NULL) { 1066 if (result != NULL) {
1032 assert_heap_not_locked(); 1067 assert_heap_not_locked();
1033 return result; 1068 return result;
1034 } 1069 }
1035 1070
1036 assert_heap_locked(); 1071 assert_heap_locked();
1037 1072
1038 // Second attempt: go into the even slower path where we might 1073 // Second attempt: Go to the slower path where we might try to
1039 // try to schedule a collection. 1074 // schedule a collection.
1040 result = attempt_allocation_slow(word_size); 1075 result = attempt_allocation_slow(word_size);
1041 if (result != NULL) { 1076 if (result != NULL) {
1042 assert_heap_not_locked(); 1077 assert_heap_not_locked();
1043 return result; 1078 return result;
1044 } 1079 }
1045 } else { 1080 } else {
1081 // attempt_allocation_humongous() requires the Heap_lock to be held.
1082 Heap_lock->lock();
1083
1046 HeapWord* result = attempt_allocation_humongous(word_size, 1084 HeapWord* result = attempt_allocation_humongous(word_size,
1047 false /* at_safepoint */); 1085 false /* at_safepoint */);
1048 if (result != NULL) { 1086 if (result != NULL) {
1049 assert_heap_not_locked(); 1087 assert_heap_not_locked();
1050 return result; 1088 return result;
1052 } 1090 }
1053 1091
1054 assert_heap_locked(); 1092 assert_heap_locked();
1055 // Read the gc count while the heap lock is held. 1093 // Read the gc count while the heap lock is held.
1056 gc_count_before = SharedHeap::heap()->total_collections(); 1094 gc_count_before = SharedHeap::heap()->total_collections();
1057 // We cannot be at a safepoint, so it is safe to unlock the Heap_lock 1095
1096 // Release the Heap_lock before attempting the collection.
1058 Heap_lock->unlock(); 1097 Heap_lock->unlock();
1059 } 1098 }
1060 1099
1061 // Create the garbage collection operation... 1100 // Create the garbage collection operation...
1062 VM_G1CollectForAllocation op(gc_count_before, word_size); 1101 VM_G1CollectForAllocation op(gc_count_before, word_size);

mercurial