608 // for it to complete, updates to _summary_bytes_used might get |
608 // for it to complete, updates to _summary_bytes_used might get |
609 // lost. This will be resolved in the near future when the operation |
609 // lost. This will be resolved in the near future when the operation |
610 // of the free region list is revamped as part of CR 6977804. |
610 // of the free region list is revamped as part of CR 6977804. |
611 wait_for_cleanup_complete(); |
611 wait_for_cleanup_complete(); |
612 |
612 |
|
613 // Other threads might still be trying to allocate using CASes out |
|
614 // of the region we are retiring, as they can do so without holding |
|
615 // the Heap_lock. So we first have to make sure that noone else can |
|
616 // allocate in it by doing a maximal allocation. Even if our CAS |
|
617 // attempt fails a few times, we'll succeed sooner or later given |
|
618 // that a failed CAS attempt mean that the region is getting closed |
|
619 // to being full (someone else succeeded in allocating into it). |
|
620 size_t free_word_size = cur_alloc_region->free() / HeapWordSize; |
|
621 |
|
622 // This is the minimum free chunk we can turn into a dummy |
|
623 // object. If the free space falls below this, then noone can |
|
624 // allocate in this region anyway (all allocation requests will be |
|
625 // of a size larger than this) so we won't have to perform the dummy |
|
626 // allocation. |
|
627 size_t min_word_size_to_fill = CollectedHeap::min_fill_size(); |
|
628 |
|
629 while (free_word_size >= min_word_size_to_fill) { |
|
630 HeapWord* dummy = |
|
631 cur_alloc_region->par_allocate_no_bot_updates(free_word_size); |
|
632 if (dummy != NULL) { |
|
633 // If the allocation was successful we should fill in the space. |
|
634 CollectedHeap::fill_with_object(dummy, free_word_size); |
|
635 break; |
|
636 } |
|
637 |
|
638 free_word_size = cur_alloc_region->free() / HeapWordSize; |
|
639 // It's also possible that someone else beats us to the |
|
640 // allocation and they fill up the region. In that case, we can |
|
641 // just get out of the loop |
|
642 } |
|
643 assert(cur_alloc_region->free() / HeapWordSize < min_word_size_to_fill, |
|
644 "sanity"); |
|
645 |
613 retire_cur_alloc_region_common(cur_alloc_region); |
646 retire_cur_alloc_region_common(cur_alloc_region); |
614 assert(_cur_alloc_region == NULL, "post-condition"); |
647 assert(_cur_alloc_region == NULL, "post-condition"); |
615 } |
648 } |
616 |
649 |
617 // See the comment in the .hpp file about the locking protocol and |
650 // See the comment in the .hpp file about the locking protocol and |
659 // We need to ensure that the stores to _cur_alloc_region and, |
692 // We need to ensure that the stores to _cur_alloc_region and, |
660 // subsequently, to top do not float above the setting of the |
693 // subsequently, to top do not float above the setting of the |
661 // young type. |
694 // young type. |
662 OrderAccess::storestore(); |
695 OrderAccess::storestore(); |
663 |
696 |
664 // Now allocate out of the new current alloc region. We could |
697 // Now, perform the allocation out of the region we just |
665 // have re-used allocate_from_cur_alloc_region() but its |
698 // allocated. Note that noone else can access that region at |
666 // operation is slightly different to what we need here. First, |
699 // this point (as _cur_alloc_region has not been updated yet), |
667 // allocate_from_cur_alloc_region() is only called outside a |
700 // so we can just go ahead and do the allocation without any |
668 // safepoint and will always unlock the Heap_lock if it returns |
701 // atomics (and we expect this allocation attempt to |
669 // a non-NULL result. Second, it assumes that the current alloc |
702 // suceeded). Given that other threads can attempt an allocation |
670 // region is what's already assigned in _cur_alloc_region. What |
703 // with a CAS and without needing the Heap_lock, if we assigned |
671 // we want here is to actually do the allocation first before we |
704 // the new region to _cur_alloc_region before first allocating |
672 // assign the new region to _cur_alloc_region. This ordering is |
705 // into it other threads might have filled up the new region |
673 // not currently important, but it will be essential when we |
706 // before we got a chance to do the allocation ourselves. In |
674 // change the code to support CAS allocation in the future (see |
707 // that case, we would have needed to retire the region, grab a |
675 // CR 6994297). |
708 // new one, and go through all this again. Allocating out of the |
676 // |
709 // new region before assigning it to _cur_alloc_region avoids |
677 // This allocate method does BOT updates and we don't need them in |
710 // all this. |
678 // the young generation. This will be fixed in the near future by |
711 HeapWord* result = |
679 // CR 6994297. |
712 new_cur_alloc_region->allocate_no_bot_updates(word_size); |
680 HeapWord* result = new_cur_alloc_region->allocate(word_size); |
|
681 assert(result != NULL, "we just allocate out of an empty region " |
713 assert(result != NULL, "we just allocate out of an empty region " |
682 "so allocation should have been successful"); |
714 "so allocation should have been successful"); |
683 assert(is_in(result), "result should be in the heap"); |
715 assert(is_in(result), "result should be in the heap"); |
684 |
716 |
|
717 // Now make sure that the store to _cur_alloc_region does not |
|
718 // float above the store to top. |
|
719 OrderAccess::storestore(); |
685 _cur_alloc_region = new_cur_alloc_region; |
720 _cur_alloc_region = new_cur_alloc_region; |
686 |
721 |
687 if (!at_safepoint) { |
722 if (!at_safepoint) { |
688 Heap_lock->unlock(); |
723 Heap_lock->unlock(); |
689 } |
724 } |
715 // exit the loop, either one of the allocation attempts was |
750 // exit the loop, either one of the allocation attempts was |
716 // successful, or we succeeded in doing the VM op but which was |
751 // successful, or we succeeded in doing the VM op but which was |
717 // unable to allocate after the collection. |
752 // unable to allocate after the collection. |
718 for (int try_count = 1; /* we'll return or break */; try_count += 1) { |
753 for (int try_count = 1; /* we'll return or break */; try_count += 1) { |
719 bool succeeded = true; |
754 bool succeeded = true; |
|
755 |
|
756 // Every time we go round the loop we should be holding the Heap_lock. |
|
757 assert_heap_locked(); |
720 |
758 |
721 { |
759 { |
722 // We may have concurrent cleanup working at the time. Wait for |
760 // We may have concurrent cleanup working at the time. Wait for |
723 // it to complete. In the future we would probably want to make |
761 // it to complete. In the future we would probably want to make |
724 // the concurrent cleanup truly concurrent by decoupling it from |
762 // the concurrent cleanup truly concurrent by decoupling it from |
732 // allocate again, just in case. When we make cleanup truly |
770 // allocate again, just in case. When we make cleanup truly |
733 // concurrent with allocation, we should remove this allocation |
771 // concurrent with allocation, we should remove this allocation |
734 // attempt as it's redundant (we only reach here after an |
772 // attempt as it's redundant (we only reach here after an |
735 // allocation attempt has been unsuccessful). |
773 // allocation attempt has been unsuccessful). |
736 wait_for_cleanup_complete(); |
774 wait_for_cleanup_complete(); |
737 HeapWord* result = attempt_allocation(word_size); |
775 |
|
776 HeapWord* result = attempt_allocation_locked(word_size); |
738 if (result != NULL) { |
777 if (result != NULL) { |
739 assert_heap_not_locked(); |
778 assert_heap_not_locked(); |
740 return result; |
779 return result; |
741 } |
780 } |
742 } |
781 } |
746 // allocate a new region only if we can expand the young gen. |
785 // allocate a new region only if we can expand the young gen. |
747 |
786 |
748 if (g1_policy()->can_expand_young_list()) { |
787 if (g1_policy()->can_expand_young_list()) { |
749 // Yes, we are allowed to expand the young gen. Let's try to |
788 // Yes, we are allowed to expand the young gen. Let's try to |
750 // allocate a new current alloc region. |
789 // allocate a new current alloc region. |
751 |
|
752 HeapWord* result = |
790 HeapWord* result = |
753 replace_cur_alloc_region_and_allocate(word_size, |
791 replace_cur_alloc_region_and_allocate(word_size, |
754 false, /* at_safepoint */ |
792 false, /* at_safepoint */ |
755 true, /* do_dirtying */ |
793 true, /* do_dirtying */ |
756 true /* can_expand */); |
794 true /* can_expand */); |
769 // initiated by the last thread exiting the critical section; so |
807 // initiated by the last thread exiting the critical section; so |
770 // we retry the allocation sequence from the beginning of the loop, |
808 // we retry the allocation sequence from the beginning of the loop, |
771 // rather than causing more, now probably unnecessary, GC attempts. |
809 // rather than causing more, now probably unnecessary, GC attempts. |
772 JavaThread* jthr = JavaThread::current(); |
810 JavaThread* jthr = JavaThread::current(); |
773 assert(jthr != NULL, "sanity"); |
811 assert(jthr != NULL, "sanity"); |
774 if (!jthr->in_critical()) { |
812 if (jthr->in_critical()) { |
775 MutexUnlocker mul(Heap_lock); |
|
776 GC_locker::stall_until_clear(); |
|
777 |
|
778 // We'll then fall off the end of the ("if GC locker active") |
|
779 // if-statement and retry the allocation further down in the |
|
780 // loop. |
|
781 } else { |
|
782 if (CheckJNICalls) { |
813 if (CheckJNICalls) { |
783 fatal("Possible deadlock due to allocating while" |
814 fatal("Possible deadlock due to allocating while" |
784 " in jni critical section"); |
815 " in jni critical section"); |
785 } |
816 } |
|
817 // We are returning NULL so the protocol is that we're still |
|
818 // holding the Heap_lock. |
|
819 assert_heap_locked(); |
786 return NULL; |
820 return NULL; |
787 } |
821 } |
|
822 |
|
823 Heap_lock->unlock(); |
|
824 GC_locker::stall_until_clear(); |
|
825 |
|
826 // No need to relock the Heap_lock. We'll fall off to the code |
|
827 // below the else-statement which assumes that we are not |
|
828 // holding the Heap_lock. |
788 } else { |
829 } else { |
789 // We are not locked out. So, let's try to do a GC. The VM op |
830 // We are not locked out. So, let's try to do a GC. The VM op |
790 // will retry the allocation before it completes. |
831 // will retry the allocation before it completes. |
791 |
832 |
792 // Read the GC count while holding the Heap_lock |
833 // Read the GC count while holding the Heap_lock |
803 // Allocations that take place on VM operations do not do any |
844 // Allocations that take place on VM operations do not do any |
804 // card dirtying and we have to do it here. |
845 // card dirtying and we have to do it here. |
805 dirty_young_block(result, word_size); |
846 dirty_young_block(result, word_size); |
806 return result; |
847 return result; |
807 } |
848 } |
808 |
849 } |
809 Heap_lock->lock(); |
850 |
810 } |
851 // Both paths that get us here from above unlock the Heap_lock. |
811 |
852 assert_heap_not_locked(); |
812 assert_heap_locked(); |
|
813 |
853 |
814 // We can reach here when we were unsuccessful in doing a GC, |
854 // We can reach here when we were unsuccessful in doing a GC, |
815 // because another thread beat us to it, or because we were locked |
855 // because another thread beat us to it, or because we were locked |
816 // out of GC due to the GC locker. In either case a new alloc |
856 // out of GC due to the GC locker. In either case a new alloc |
817 // region might be available so we will retry the allocation. |
857 // region might be available so we will retry the allocation. |
946 |
986 |
947 if (!isHumongous(word_size)) { |
987 if (!isHumongous(word_size)) { |
948 if (!expect_null_cur_alloc_region) { |
988 if (!expect_null_cur_alloc_region) { |
949 HeapRegion* cur_alloc_region = _cur_alloc_region; |
989 HeapRegion* cur_alloc_region = _cur_alloc_region; |
950 if (cur_alloc_region != NULL) { |
990 if (cur_alloc_region != NULL) { |
951 // This allocate method does BOT updates and we don't need them in |
991 // We are at a safepoint so no reason to use the MT-safe version. |
952 // the young generation. This will be fixed in the near future by |
992 HeapWord* result = cur_alloc_region->allocate_no_bot_updates(word_size); |
953 // CR 6994297. |
|
954 HeapWord* result = cur_alloc_region->allocate(word_size); |
|
955 if (result != NULL) { |
993 if (result != NULL) { |
956 assert(is_in(result), "result should be in the heap"); |
994 assert(is_in(result), "result should be in the heap"); |
957 |
995 |
958 // We will not do any dirtying here. This is guaranteed to be |
996 // We will not do any dirtying here. This is guaranteed to be |
959 // called during a safepoint and the thread that scheduled the |
997 // called during a safepoint and the thread that scheduled the |
981 |
1019 |
982 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
1020 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { |
983 assert_heap_not_locked_and_not_at_safepoint(); |
1021 assert_heap_not_locked_and_not_at_safepoint(); |
984 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); |
1022 assert(!isHumongous(word_size), "we do not allow TLABs of humongous size"); |
985 |
1023 |
986 Heap_lock->lock(); |
1024 // First attempt: Try allocating out of the current alloc region |
987 |
1025 // using a CAS. If that fails, take the Heap_lock and retry the |
988 // First attempt: try allocating out of the current alloc region or |
1026 // allocation, potentially replacing the current alloc region. |
989 // after replacing the current alloc region. |
|
990 HeapWord* result = attempt_allocation(word_size); |
1027 HeapWord* result = attempt_allocation(word_size); |
991 if (result != NULL) { |
1028 if (result != NULL) { |
992 assert_heap_not_locked(); |
1029 assert_heap_not_locked(); |
993 return result; |
1030 return result; |
994 } |
1031 } |
995 |
1032 |
996 assert_heap_locked(); |
1033 // Second attempt: Go to the slower path where we might try to |
997 |
1034 // schedule a collection. |
998 // Second attempt: go into the even slower path where we might |
|
999 // try to schedule a collection. |
|
1000 result = attempt_allocation_slow(word_size); |
1035 result = attempt_allocation_slow(word_size); |
1001 if (result != NULL) { |
1036 if (result != NULL) { |
1002 assert_heap_not_locked(); |
1037 assert_heap_not_locked(); |
1003 return result; |
1038 return result; |
1004 } |
1039 } |
1005 |
1040 |
1006 assert_heap_locked(); |
1041 assert_heap_locked(); |
|
1042 // Need to unlock the Heap_lock before returning. |
1007 Heap_lock->unlock(); |
1043 Heap_lock->unlock(); |
1008 return NULL; |
1044 return NULL; |
1009 } |
1045 } |
1010 |
1046 |
1011 HeapWord* |
1047 HeapWord* |
1020 // Loop until the allocation is satisified, |
1056 // Loop until the allocation is satisified, |
1021 // or unsatisfied after GC. |
1057 // or unsatisfied after GC. |
1022 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1058 for (int try_count = 1; /* we'll return */; try_count += 1) { |
1023 unsigned int gc_count_before; |
1059 unsigned int gc_count_before; |
1024 { |
1060 { |
1025 Heap_lock->lock(); |
|
1026 |
|
1027 if (!isHumongous(word_size)) { |
1061 if (!isHumongous(word_size)) { |
1028 // First attempt: try allocating out of the current alloc |
1062 // First attempt: Try allocating out of the current alloc region |
1029 // region or after replacing the current alloc region. |
1063 // using a CAS. If that fails, take the Heap_lock and retry the |
|
1064 // allocation, potentially replacing the current alloc region. |
1030 HeapWord* result = attempt_allocation(word_size); |
1065 HeapWord* result = attempt_allocation(word_size); |
1031 if (result != NULL) { |
1066 if (result != NULL) { |
1032 assert_heap_not_locked(); |
1067 assert_heap_not_locked(); |
1033 return result; |
1068 return result; |
1034 } |
1069 } |
1035 |
1070 |
1036 assert_heap_locked(); |
1071 assert_heap_locked(); |
1037 |
1072 |
1038 // Second attempt: go into the even slower path where we might |
1073 // Second attempt: Go to the slower path where we might try to |
1039 // try to schedule a collection. |
1074 // schedule a collection. |
1040 result = attempt_allocation_slow(word_size); |
1075 result = attempt_allocation_slow(word_size); |
1041 if (result != NULL) { |
1076 if (result != NULL) { |
1042 assert_heap_not_locked(); |
1077 assert_heap_not_locked(); |
1043 return result; |
1078 return result; |
1044 } |
1079 } |
1045 } else { |
1080 } else { |
|
1081 // attempt_allocation_humongous() requires the Heap_lock to be held. |
|
1082 Heap_lock->lock(); |
|
1083 |
1046 HeapWord* result = attempt_allocation_humongous(word_size, |
1084 HeapWord* result = attempt_allocation_humongous(word_size, |
1047 false /* at_safepoint */); |
1085 false /* at_safepoint */); |
1048 if (result != NULL) { |
1086 if (result != NULL) { |
1049 assert_heap_not_locked(); |
1087 assert_heap_not_locked(); |
1050 return result; |
1088 return result; |
1866 const size_t total_reserved = max_byte_size + pgs->max_size(); |
1905 const size_t total_reserved = max_byte_size + pgs->max_size(); |
1867 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
1906 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
1868 |
1907 |
1869 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1908 ReservedSpace heap_rs(max_byte_size + pgs->max_size(), |
1870 HeapRegion::GrainBytes, |
1909 HeapRegion::GrainBytes, |
1871 false /*ism*/, addr); |
1910 UseLargePages, addr); |
1872 |
1911 |
1873 if (UseCompressedOops) { |
1912 if (UseCompressedOops) { |
1874 if (addr != NULL && !heap_rs.is_reserved()) { |
1913 if (addr != NULL && !heap_rs.is_reserved()) { |
1875 // Failed to reserve at specified address - the requested memory |
1914 // Failed to reserve at specified address - the requested memory |
1876 // region is taken already, for example, by 'java' launcher. |
1915 // region is taken already, for example, by 'java' launcher. |
1877 // Try again to reserver heap higher. |
1916 // Try again to reserver heap higher. |
1878 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
1917 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
1879 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
1918 ReservedSpace heap_rs0(total_reserved, HeapRegion::GrainBytes, |
1880 false /*ism*/, addr); |
1919 UseLargePages, addr); |
1881 if (addr != NULL && !heap_rs0.is_reserved()) { |
1920 if (addr != NULL && !heap_rs0.is_reserved()) { |
1882 // Failed to reserve at specified address again - give up. |
1921 // Failed to reserve at specified address again - give up. |
1883 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
1922 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
1884 assert(addr == NULL, ""); |
1923 assert(addr == NULL, ""); |
1885 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
1924 ReservedSpace heap_rs1(total_reserved, HeapRegion::GrainBytes, |
1886 false /*ism*/, addr); |
1925 UseLargePages, addr); |
1887 heap_rs = heap_rs1; |
1926 heap_rs = heap_rs1; |
1888 } else { |
1927 } else { |
1889 heap_rs = heap_rs0; |
1928 heap_rs = heap_rs0; |
1890 } |
1929 } |
1891 } |
1930 } |
3854 HeapRegion* _hr; |
3893 HeapRegion* _hr; |
3855 size_t _prev_marked_bytes; |
3894 size_t _prev_marked_bytes; |
3856 size_t _next_marked_bytes; |
3895 size_t _next_marked_bytes; |
3857 OopsInHeapRegionClosure *_cl; |
3896 OopsInHeapRegionClosure *_cl; |
3858 public: |
3897 public: |
3859 RemoveSelfPointerClosure(G1CollectedHeap* g1, OopsInHeapRegionClosure* cl) : |
3898 RemoveSelfPointerClosure(G1CollectedHeap* g1, HeapRegion* hr, |
3860 _g1(g1), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
3899 OopsInHeapRegionClosure* cl) : |
|
3900 _g1(g1), _hr(hr), _cm(_g1->concurrent_mark()), _prev_marked_bytes(0), |
3861 _next_marked_bytes(0), _cl(cl) {} |
3901 _next_marked_bytes(0), _cl(cl) {} |
3862 |
3902 |
3863 size_t prev_marked_bytes() { return _prev_marked_bytes; } |
3903 size_t prev_marked_bytes() { return _prev_marked_bytes; } |
3864 size_t next_marked_bytes() { return _next_marked_bytes; } |
3904 size_t next_marked_bytes() { return _next_marked_bytes; } |
3865 |
3905 |
|
3906 // <original comment> |
3866 // The original idea here was to coalesce evacuated and dead objects. |
3907 // The original idea here was to coalesce evacuated and dead objects. |
3867 // However that caused complications with the block offset table (BOT). |
3908 // However that caused complications with the block offset table (BOT). |
3868 // In particular if there were two TLABs, one of them partially refined. |
3909 // In particular if there were two TLABs, one of them partially refined. |
3869 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
3910 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| |
3870 // The BOT entries of the unrefined part of TLAB_2 point to the start |
3911 // The BOT entries of the unrefined part of TLAB_2 point to the start |
3871 // of TLAB_2. If the last object of the TLAB_1 and the first object |
3912 // of TLAB_2. If the last object of the TLAB_1 and the first object |
3872 // of TLAB_2 are coalesced, then the cards of the unrefined part |
3913 // of TLAB_2 are coalesced, then the cards of the unrefined part |
3873 // would point into middle of the filler object. |
3914 // would point into middle of the filler object. |
|
3915 // The current approach is to not coalesce and leave the BOT contents intact. |
|
3916 // </original comment> |
3874 // |
3917 // |
3875 // The current approach is to not coalesce and leave the BOT contents intact. |
3918 // We now reset the BOT when we start the object iteration over the |
|
3919 // region and refine its entries for every object we come across. So |
|
3920 // the above comment is not really relevant and we should be able |
|
3921 // to coalesce dead objects if we want to. |
3876 void do_object(oop obj) { |
3922 void do_object(oop obj) { |
|
3923 HeapWord* obj_addr = (HeapWord*) obj; |
|
3924 assert(_hr->is_in(obj_addr), "sanity"); |
|
3925 size_t obj_size = obj->size(); |
|
3926 _hr->update_bot_for_object(obj_addr, obj_size); |
3877 if (obj->is_forwarded() && obj->forwardee() == obj) { |
3927 if (obj->is_forwarded() && obj->forwardee() == obj) { |
3878 // The object failed to move. |
3928 // The object failed to move. |
3879 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
3929 assert(!_g1->is_obj_dead(obj), "We should not be preserving dead objs."); |
3880 _cm->markPrev(obj); |
3930 _cm->markPrev(obj); |
3881 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
3931 assert(_cm->isPrevMarked(obj), "Should be marked!"); |
3882 _prev_marked_bytes += (obj->size() * HeapWordSize); |
3932 _prev_marked_bytes += (obj_size * HeapWordSize); |
3883 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
3933 if (_g1->mark_in_progress() && !_g1->is_obj_ill(obj)) { |
3884 _cm->markAndGrayObjectIfNecessary(obj); |
3934 _cm->markAndGrayObjectIfNecessary(obj); |
3885 } |
3935 } |
3886 obj->set_mark(markOopDesc::prototype()); |
3936 obj->set_mark(markOopDesc::prototype()); |
3887 // While we were processing RSet buffers during the |
3937 // While we were processing RSet buffers during the |
3919 cl = &immediate_update; |
3969 cl = &immediate_update; |
3920 } |
3970 } |
3921 HeapRegion* cur = g1_policy()->collection_set(); |
3971 HeapRegion* cur = g1_policy()->collection_set(); |
3922 while (cur != NULL) { |
3972 while (cur != NULL) { |
3923 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
3973 assert(g1_policy()->assertMarkedBytesDataOK(), "Should be!"); |
3924 |
3974 assert(!cur->isHumongous(), "sanity"); |
3925 RemoveSelfPointerClosure rspc(_g1h, cl); |
3975 |
3926 if (cur->evacuation_failed()) { |
3976 if (cur->evacuation_failed()) { |
3927 assert(cur->in_collection_set(), "bad CS"); |
3977 assert(cur->in_collection_set(), "bad CS"); |
|
3978 RemoveSelfPointerClosure rspc(_g1h, cur, cl); |
|
3979 |
|
3980 cur->reset_bot(); |
3928 cl->set_region(cur); |
3981 cl->set_region(cur); |
3929 cur->object_iterate(&rspc); |
3982 cur->object_iterate(&rspc); |
3930 |
3983 |
3931 // A number of manipulations to make the TAMS be the current top, |
3984 // A number of manipulations to make the TAMS be the current top, |
3932 // and the marked bytes be the ones observed in the iteration. |
3985 // and the marked bytes be the ones observed in the iteration. |
3985 while (_evac_failure_scan_stack->length() > 0) { |
4038 while (_evac_failure_scan_stack->length() > 0) { |
3986 oop obj = _evac_failure_scan_stack->pop(); |
4039 oop obj = _evac_failure_scan_stack->pop(); |
3987 _evac_failure_closure->set_region(heap_region_containing(obj)); |
4040 _evac_failure_closure->set_region(heap_region_containing(obj)); |
3988 obj->oop_iterate_backwards(_evac_failure_closure); |
4041 obj->oop_iterate_backwards(_evac_failure_closure); |
3989 } |
4042 } |
3990 } |
|
3991 |
|
3992 void G1CollectedHeap::handle_evacuation_failure(oop old) { |
|
3993 markOop m = old->mark(); |
|
3994 // forward to self |
|
3995 assert(!old->is_forwarded(), "precondition"); |
|
3996 |
|
3997 old->forward_to(old); |
|
3998 handle_evacuation_failure_common(old, m); |
|
3999 } |
4043 } |
4000 |
4044 |
4001 oop |
4045 oop |
4002 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
4046 G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, |
4003 oop old) { |
4047 oop old) { |