src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp

changeset 7208
7baf47cb97cb
parent 7106
66d359ee9681
child 7535
7ae4e26cb1e0
child 7828
cbc7c4c9e11c
equal deleted inserted replaced
7207:152cf4afc11f 7208:7baf47cb97cb
921 BitMap* region_bm, BitMap* card_bm) { 921 BitMap* region_bm, BitMap* card_bm) {
922 _other_regions.scrub(ctbs, region_bm, card_bm); 922 _other_regions.scrub(ctbs, region_bm, card_bm);
923 } 923 }
924 924
925 // Code roots support 925 // Code roots support
926 //
927 // The code root set is protected by two separate locking schemes
928 // When at safepoint the per-hrrs lock must be held during modifications
929 // except when doing a full gc.
930 // When not at safepoint the CodeCache_lock must be held during modifications.
931 // When concurrent readers access the contains() function
932 // (during the evacuation phase) no removals are allowed.
926 933
927 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { 934 void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
935 assert(nm != NULL, "sanity");
936 // Optimistic unlocked contains-check
937 if (!_code_roots.contains(nm)) {
938 MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
939 add_strong_code_root_locked(nm);
940 }
941 }
942
943 void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
928 assert(nm != NULL, "sanity"); 944 assert(nm != NULL, "sanity");
929 _code_roots.add(nm); 945 _code_roots.add(nm);
930 } 946 }
931 947
932 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { 948 void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
933 assert(nm != NULL, "sanity"); 949 assert(nm != NULL, "sanity");
934 assert_locked_or_safepoint(CodeCache_lock); 950 assert_locked_or_safepoint(CodeCache_lock);
935 951
936 _code_roots.remove_lock_free(nm); 952 MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
953 _code_roots.remove(nm);
937 954
938 // Check that there were no duplicates 955 // Check that there were no duplicates
939 guarantee(!_code_roots.contains(nm), "duplicate entry found"); 956 guarantee(!_code_roots.contains(nm), "duplicate entry found");
940 } 957 }
941 958
942 class NMethodMigrationOopClosure : public OopClosure {
943 G1CollectedHeap* _g1h;
944 HeapRegion* _from;
945 nmethod* _nm;
946
947 uint _num_self_forwarded;
948
949 template <class T> void do_oop_work(T* p) {
950 T heap_oop = oopDesc::load_heap_oop(p);
951 if (!oopDesc::is_null(heap_oop)) {
952 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
953 if (_from->is_in(obj)) {
954 // Reference still points into the source region.
955 // Since roots are immediately evacuated this means that
956 // we must have self forwarded the object
957 assert(obj->is_forwarded(),
958 err_msg("code roots should be immediately evacuated. "
959 "Ref: "PTR_FORMAT", "
960 "Obj: "PTR_FORMAT", "
961 "Region: "HR_FORMAT,
962 p, (void*) obj, HR_FORMAT_PARAMS(_from)));
963 assert(obj->forwardee() == obj,
964 err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
965
966 // The object has been self forwarded.
967 // Note, if we're during an initial mark pause, there is
968 // no need to explicitly mark object. It will be marked
969 // during the regular evacuation failure handling code.
970 _num_self_forwarded++;
971 } else {
972 // The reference points into a promotion or to-space region
973 HeapRegion* to = _g1h->heap_region_containing(obj);
974 to->rem_set()->add_strong_code_root(_nm);
975 }
976 }
977 }
978
979 public:
980 NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
981 _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
982
983 void do_oop(narrowOop* p) { do_oop_work(p); }
984 void do_oop(oop* p) { do_oop_work(p); }
985
986 uint retain() { return _num_self_forwarded > 0; }
987 };
988
989 void HeapRegionRemSet::migrate_strong_code_roots() {
990 assert(hr()->in_collection_set(), "only collection set regions");
991 assert(!hr()->isHumongous(),
992 err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
993 HR_FORMAT_PARAMS(hr())));
994
995 ResourceMark rm;
996
997 // List of code blobs to retain for this region
998 GrowableArray<nmethod*> to_be_retained(10);
999 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1000
1001 while (!_code_roots.is_empty()) {
1002 nmethod *nm = _code_roots.pop();
1003 if (nm != NULL) {
1004 NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
1005 nm->oops_do(&oop_cl);
1006 if (oop_cl.retain()) {
1007 to_be_retained.push(nm);
1008 }
1009 }
1010 }
1011
1012 // Now push any code roots we need to retain
1013 assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
1014 "Retained nmethod list must be empty or "
1015 "evacuation of this region failed");
1016
1017 while (to_be_retained.is_nonempty()) {
1018 nmethod* nm = to_be_retained.pop();
1019 assert(nm != NULL, "sanity");
1020 add_strong_code_root(nm);
1021 }
1022 }
1023
1024 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { 959 void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
1025 _code_roots.nmethods_do(blk); 960 _code_roots.nmethods_do(blk);
961 }
962
963 void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
964 _code_roots.clean(hr);
1026 } 965 }
1027 966
1028 size_t HeapRegionRemSet::strong_code_roots_mem_size() { 967 size_t HeapRegionRemSet::strong_code_roots_mem_size() {
1029 return _code_roots.mem_size(); 968 return _code_roots.mem_size();
1030 } 969 }

mercurial