43 #include "services/memTracker.hpp" |
43 #include "services/memTracker.hpp" |
44 #include "utilities/vmError.hpp" |
44 #include "utilities/vmError.hpp" |
45 |
45 |
46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; |
46 PSYoungGen* ParallelScavengeHeap::_young_gen = NULL; |
47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; |
47 PSOldGen* ParallelScavengeHeap::_old_gen = NULL; |
48 PSPermGen* ParallelScavengeHeap::_perm_gen = NULL; |
|
49 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; |
48 PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = NULL; |
50 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; |
49 PSGCAdaptivePolicyCounters* ParallelScavengeHeap::_gc_policy_counters = NULL; |
51 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; |
50 ParallelScavengeHeap* ParallelScavengeHeap::_psh = NULL; |
52 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; |
51 GCTaskManager* ParallelScavengeHeap::_gc_task_manager = NULL; |
53 |
52 |
54 static void trace_gen_sizes(const char* const str, |
53 static void trace_gen_sizes(const char* const str, |
55 size_t pg_min, size_t pg_max, |
|
56 size_t og_min, size_t og_max, |
54 size_t og_min, size_t og_max, |
57 size_t yg_min, size_t yg_max) |
55 size_t yg_min, size_t yg_max) |
58 { |
56 { |
59 if (TracePageSizes) { |
57 if (TracePageSizes) { |
60 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " |
58 tty->print_cr("%s: " SIZE_FORMAT "," SIZE_FORMAT " " |
61 SIZE_FORMAT "," SIZE_FORMAT " " |
59 SIZE_FORMAT "," SIZE_FORMAT " " |
62 SIZE_FORMAT "," SIZE_FORMAT " " |
|
63 SIZE_FORMAT, |
60 SIZE_FORMAT, |
64 str, pg_min / K, pg_max / K, |
61 str, |
65 og_min / K, og_max / K, |
62 og_min / K, og_max / K, |
66 yg_min / K, yg_max / K, |
63 yg_min / K, yg_max / K, |
67 (pg_max + og_max + yg_max) / K); |
64 (og_max + yg_max) / K); |
68 } |
65 } |
69 } |
66 } |
70 |
67 |
71 jint ParallelScavengeHeap::initialize() { |
68 jint ParallelScavengeHeap::initialize() { |
72 CollectedHeap::pre_initialize(); |
69 CollectedHeap::pre_initialize(); |
77 |
74 |
78 size_t yg_min_size = _collector_policy->min_young_gen_size(); |
75 size_t yg_min_size = _collector_policy->min_young_gen_size(); |
79 size_t yg_max_size = _collector_policy->max_young_gen_size(); |
76 size_t yg_max_size = _collector_policy->max_young_gen_size(); |
80 size_t og_min_size = _collector_policy->min_old_gen_size(); |
77 size_t og_min_size = _collector_policy->min_old_gen_size(); |
81 size_t og_max_size = _collector_policy->max_old_gen_size(); |
78 size_t og_max_size = _collector_policy->max_old_gen_size(); |
82 // Why isn't there a min_perm_gen_size()? |
|
83 size_t pg_min_size = _collector_policy->perm_gen_size(); |
|
84 size_t pg_max_size = _collector_policy->max_perm_gen_size(); |
|
85 |
79 |
86 trace_gen_sizes("ps heap raw", |
80 trace_gen_sizes("ps heap raw", |
87 pg_min_size, pg_max_size, |
|
88 og_min_size, og_max_size, |
81 og_min_size, og_max_size, |
89 yg_min_size, yg_max_size); |
82 yg_min_size, yg_max_size); |
90 |
83 |
91 // The ReservedSpace ctor used below requires that the page size for the perm |
|
92 // gen is <= the page size for the rest of the heap (young + old gens). |
|
93 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, |
84 const size_t og_page_sz = os::page_size_for_region(yg_min_size + og_min_size, |
94 yg_max_size + og_max_size, |
85 yg_max_size + og_max_size, |
95 8); |
86 8); |
96 const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size, |
87 |
97 pg_max_size, 16), |
|
98 og_page_sz); |
|
99 |
|
100 const size_t pg_align = set_alignment(_perm_gen_alignment, pg_page_sz); |
|
101 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); |
88 const size_t og_align = set_alignment(_old_gen_alignment, og_page_sz); |
102 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); |
89 const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz); |
103 |
90 |
104 // Update sizes to reflect the selected page size(s). |
91 // Update sizes to reflect the selected page size(s). |
105 // |
92 // |
119 og_max_size = MAX2(og_max_size, og_min_size); |
106 og_max_size = MAX2(og_max_size, og_min_size); |
120 size_t og_cur_size = |
107 size_t og_cur_size = |
121 align_size_down(_collector_policy->old_gen_size(), og_align); |
108 align_size_down(_collector_policy->old_gen_size(), og_align); |
122 og_cur_size = MAX2(og_cur_size, og_min_size); |
109 og_cur_size = MAX2(og_cur_size, og_min_size); |
123 |
110 |
124 pg_min_size = align_size_up(pg_min_size, pg_align); |
|
125 pg_max_size = align_size_up(pg_max_size, pg_align); |
|
126 size_t pg_cur_size = pg_min_size; |
|
127 |
|
128 trace_gen_sizes("ps heap rnd", |
111 trace_gen_sizes("ps heap rnd", |
129 pg_min_size, pg_max_size, |
|
130 og_min_size, og_max_size, |
112 og_min_size, og_max_size, |
131 yg_min_size, yg_max_size); |
113 yg_min_size, yg_max_size); |
132 |
114 |
133 const size_t total_reserved = pg_max_size + og_max_size + yg_max_size; |
115 const size_t heap_size = og_max_size + yg_max_size; |
134 char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop); |
116 |
135 |
117 ReservedSpace heap_rs = Universe::reserve_heap(heap_size, og_align); |
136 // The main part of the heap (old gen + young gen) can often use a larger page |
|
137 // size than is needed or wanted for the perm gen. Use the "compound |
|
138 // alignment" ReservedSpace ctor to avoid having to use the same page size for |
|
139 // all gens. |
|
140 |
|
141 ReservedHeapSpace heap_rs(pg_max_size, pg_align, og_max_size + yg_max_size, |
|
142 og_align, addr); |
|
143 |
|
144 if (UseCompressedOops) { |
|
145 if (addr != NULL && !heap_rs.is_reserved()) { |
|
146 // Failed to reserve at specified address - the requested memory |
|
147 // region is taken already, for example, by 'java' launcher. |
|
148 // Try again to reserver heap higher. |
|
149 addr = Universe::preferred_heap_base(total_reserved, Universe::ZeroBasedNarrowOop); |
|
150 ReservedHeapSpace heap_rs0(pg_max_size, pg_align, og_max_size + yg_max_size, |
|
151 og_align, addr); |
|
152 if (addr != NULL && !heap_rs0.is_reserved()) { |
|
153 // Failed to reserve at specified address again - give up. |
|
154 addr = Universe::preferred_heap_base(total_reserved, Universe::HeapBasedNarrowOop); |
|
155 assert(addr == NULL, ""); |
|
156 ReservedHeapSpace heap_rs1(pg_max_size, pg_align, og_max_size + yg_max_size, |
|
157 og_align, addr); |
|
158 heap_rs = heap_rs1; |
|
159 } else { |
|
160 heap_rs = heap_rs0; |
|
161 } |
|
162 } |
|
163 } |
|
164 |
118 |
165 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); |
119 MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtJavaHeap); |
166 |
120 |
167 os::trace_page_sizes("ps perm", pg_min_size, pg_max_size, pg_page_sz, |
|
168 heap_rs.base(), pg_max_size); |
|
169 os::trace_page_sizes("ps main", og_min_size + yg_min_size, |
121 os::trace_page_sizes("ps main", og_min_size + yg_min_size, |
170 og_max_size + yg_max_size, og_page_sz, |
122 og_max_size + yg_max_size, og_page_sz, |
171 heap_rs.base() + pg_max_size, |
123 heap_rs.base(), |
172 heap_rs.size() - pg_max_size); |
124 heap_rs.size()); |
173 if (!heap_rs.is_reserved()) { |
125 if (!heap_rs.is_reserved()) { |
174 vm_shutdown_during_initialization( |
126 vm_shutdown_during_initialization( |
175 "Could not reserve enough space for object heap"); |
127 "Could not reserve enough space for object heap"); |
176 return JNI_ENOMEM; |
128 return JNI_ENOMEM; |
177 } |
129 } |
191 // Initial young gen size is 4 Mb |
143 // Initial young gen size is 4 Mb |
192 // |
144 // |
193 // XXX - what about flag_parser.young_gen_size()? |
145 // XXX - what about flag_parser.young_gen_size()? |
194 const size_t init_young_size = align_size_up(4 * M, yg_align); |
146 const size_t init_young_size = align_size_up(4 * M, yg_align); |
195 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); |
147 yg_cur_size = MAX2(MIN2(init_young_size, yg_max_size), yg_cur_size); |
196 |
|
197 // Split the reserved space into perm gen and the main heap (everything else). |
|
198 // The main heap uses a different alignment. |
|
199 ReservedSpace perm_rs = heap_rs.first_part(pg_max_size); |
|
200 ReservedSpace main_rs = heap_rs.last_part(pg_max_size, og_align); |
|
201 |
148 |
202 // Make up the generations |
149 // Make up the generations |
203 // Calculate the maximum size that a generation can grow. This |
150 // Calculate the maximum size that a generation can grow. This |
204 // includes growth into the other generation. Note that the |
151 // includes growth into the other generation. Note that the |
205 // parameter _max_gen_size is kept as the maximum |
152 // parameter _max_gen_size is kept as the maximum |
206 // size of the generation as the boundaries currently stand. |
153 // size of the generation as the boundaries currently stand. |
207 // _max_gen_size is still used as that value. |
154 // _max_gen_size is still used as that value. |
208 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; |
155 double max_gc_pause_sec = ((double) MaxGCPauseMillis)/1000.0; |
209 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; |
156 double max_gc_minor_pause_sec = ((double) MaxGCMinorPauseMillis)/1000.0; |
210 |
157 |
211 _gens = new AdjoiningGenerations(main_rs, |
158 _gens = new AdjoiningGenerations(heap_rs, |
212 og_cur_size, |
159 og_cur_size, |
213 og_min_size, |
160 og_min_size, |
214 og_max_size, |
161 og_max_size, |
215 yg_cur_size, |
162 yg_cur_size, |
216 yg_min_size, |
163 yg_min_size, |
589 } |
524 } |
590 |
525 |
591 // Fourth level allocation failure. We're running out of memory. |
526 // Fourth level allocation failure. We're running out of memory. |
592 // More complete mark sweep and allocate in young generation. |
527 // More complete mark sweep and allocate in young generation. |
593 if (result == NULL) { |
528 if (result == NULL) { |
594 invoke_full_gc(true); |
529 do_full_collection(true); |
595 result = young_gen()->allocate(size); |
530 result = young_gen()->allocate(size); |
596 } |
531 } |
597 |
532 |
598 // Fifth level allocation failure. |
533 // Fifth level allocation failure. |
599 // After more complete mark sweep, allocate in old generation. |
534 // After more complete mark sweep, allocate in old generation. |
600 if (result == NULL) { |
535 if (result == NULL) { |
601 result = old_gen()->allocate(size); |
536 result = old_gen()->allocate(size); |
602 } |
|
603 |
|
604 return result; |
|
605 } |
|
606 |
|
607 // |
|
608 // This is the policy loop for allocating in the permanent generation. |
|
609 // If the initial allocation fails, we create a vm operation which will |
|
610 // cause a collection. |
|
611 HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) { |
|
612 assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint"); |
|
613 assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread"); |
|
614 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
615 |
|
616 HeapWord* result; |
|
617 |
|
618 uint loop_count = 0; |
|
619 uint gc_count = 0; |
|
620 uint full_gc_count = 0; |
|
621 |
|
622 do { |
|
623 // We don't want to have multiple collections for a single filled generation. |
|
624 // To prevent this, each thread tracks the total_collections() value, and if |
|
625 // the count has changed, does not do a new collection. |
|
626 // |
|
627 // The collection count must be read only while holding the heap lock. VM |
|
628 // operations also hold the heap lock during collections. There is a lock |
|
629 // contention case where thread A blocks waiting on the Heap_lock, while |
|
630 // thread B is holding it doing a collection. When thread A gets the lock, |
|
631 // the collection count has already changed. To prevent duplicate collections, |
|
632 // The policy MUST attempt allocations during the same period it reads the |
|
633 // total_collections() value! |
|
634 { |
|
635 MutexLocker ml(Heap_lock); |
|
636 gc_count = Universe::heap()->total_collections(); |
|
637 full_gc_count = Universe::heap()->total_full_collections(); |
|
638 |
|
639 result = perm_gen()->allocate_permanent(size); |
|
640 |
|
641 if (result != NULL) { |
|
642 return result; |
|
643 } |
|
644 |
|
645 if (GC_locker::is_active_and_needs_gc()) { |
|
646 // If this thread is not in a jni critical section, we stall |
|
647 // the requestor until the critical section has cleared and |
|
648 // GC allowed. When the critical section clears, a GC is |
|
649 // initiated by the last thread exiting the critical section; so |
|
650 // we retry the allocation sequence from the beginning of the loop, |
|
651 // rather than causing more, now probably unnecessary, GC attempts. |
|
652 JavaThread* jthr = JavaThread::current(); |
|
653 if (!jthr->in_critical()) { |
|
654 MutexUnlocker mul(Heap_lock); |
|
655 GC_locker::stall_until_clear(); |
|
656 continue; |
|
657 } else { |
|
658 if (CheckJNICalls) { |
|
659 fatal("Possible deadlock due to allocating while" |
|
660 " in jni critical section"); |
|
661 } |
|
662 return NULL; |
|
663 } |
|
664 } |
|
665 } |
|
666 |
|
667 if (result == NULL) { |
|
668 |
|
669 // Exit the loop if the gc time limit has been exceeded. |
|
670 // The allocation must have failed above (result must be NULL), |
|
671 // and the most recent collection must have exceeded the |
|
672 // gc time limit. Exit the loop so that an out-of-memory |
|
673 // will be thrown (returning a NULL will do that), but |
|
674 // clear gc_overhead_limit_exceeded so that the next collection |
|
675 // will succeeded if the applications decides to handle the |
|
676 // out-of-memory and tries to go on. |
|
677 const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded(); |
|
678 if (limit_exceeded) { |
|
679 size_policy()->set_gc_overhead_limit_exceeded(false); |
|
680 if (PrintGCDetails && Verbose) { |
|
681 gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:" |
|
682 " return NULL because gc_overhead_limit_exceeded is set"); |
|
683 } |
|
684 assert(result == NULL, "Allocation did not fail"); |
|
685 return NULL; |
|
686 } |
|
687 |
|
688 // Generate a VM operation |
|
689 VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count); |
|
690 VMThread::execute(&op); |
|
691 |
|
692 // Did the VM operation execute? If so, return the result directly. |
|
693 // This prevents us from looping until time out on requests that can |
|
694 // not be satisfied. |
|
695 if (op.prologue_succeeded()) { |
|
696 assert(Universe::heap()->is_in_permanent_or_null(op.result()), |
|
697 "result not in heap"); |
|
698 // If GC was locked out during VM operation then retry allocation |
|
699 // and/or stall as necessary. |
|
700 if (op.gc_locked()) { |
|
701 assert(op.result() == NULL, "must be NULL if gc_locked() is true"); |
|
702 continue; // retry and/or stall as necessary |
|
703 } |
|
704 // If a NULL results is being returned, an out-of-memory |
|
705 // will be thrown now. Clear the gc_overhead_limit_exceeded |
|
706 // flag to avoid the following situation. |
|
707 // gc_overhead_limit_exceeded is set during a collection |
|
708 // the collection fails to return enough space and an OOM is thrown |
|
709 // a subsequent GC prematurely throws an out-of-memory because |
|
710 // the gc_overhead_limit_exceeded counts did not start |
|
711 // again from 0. |
|
712 if (op.result() == NULL) { |
|
713 size_policy()->reset_gc_overhead_limit_count(); |
|
714 } |
|
715 return op.result(); |
|
716 } |
|
717 } |
|
718 |
|
719 // The policy object will prevent us from looping forever. If the |
|
720 // time spent in gc crosses a threshold, we will bail out. |
|
721 loop_count++; |
|
722 if ((QueuedAllocationWarningCount > 0) && |
|
723 (loop_count % QueuedAllocationWarningCount == 0)) { |
|
724 warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t" |
|
725 " size=%d", loop_count, size); |
|
726 } |
|
727 } while (result == NULL); |
|
728 |
|
729 return result; |
|
730 } |
|
731 |
|
732 // |
|
733 // This is the policy code for permanent allocations which have failed |
|
734 // and require a collection. Note that just as in failed_mem_allocate, |
|
735 // we do not set collection policy, only where & when to allocate and |
|
736 // collect. |
|
737 HeapWord* ParallelScavengeHeap::failed_permanent_mem_allocate(size_t size) { |
|
738 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); |
|
739 assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread"); |
|
740 assert(!Universe::heap()->is_gc_active(), "not reentrant"); |
|
741 assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock"); |
|
742 assert(size > perm_gen()->free_in_words(), "Allocation should fail"); |
|
743 |
|
744 // We assume (and assert!) that an allocation at this point will fail |
|
745 // unless we collect. |
|
746 |
|
747 // First level allocation failure. Mark-sweep and allocate in perm gen. |
|
748 GCCauseSetter gccs(this, GCCause::_allocation_failure); |
|
749 invoke_full_gc(false); |
|
750 HeapWord* result = perm_gen()->allocate_permanent(size); |
|
751 |
|
752 // Second level allocation failure. We're running out of memory. |
|
753 if (result == NULL) { |
|
754 invoke_full_gc(true); |
|
755 result = perm_gen()->allocate_permanent(size); |
|
756 } |
537 } |
757 |
538 |
758 return result; |
539 return result; |
759 } |
540 } |
760 |
541 |
810 |
591 |
811 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); |
592 VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); |
812 VMThread::execute(&op); |
593 VMThread::execute(&op); |
813 } |
594 } |
814 |
595 |
815 // This interface assumes that it's being called by the |
596 void ParallelScavengeHeap::oop_iterate(ExtendedOopClosure* cl) { |
816 // vm thread. It collects the heap assuming that the |
|
817 // heap lock is already held and that we are executing in |
|
818 // the context of the vm thread. |
|
819 void ParallelScavengeHeap::collect_as_vm_thread(GCCause::Cause cause) { |
|
820 assert(Thread::current()->is_VM_thread(), "Precondition#1"); |
|
821 assert(Heap_lock->is_locked(), "Precondition#2"); |
|
822 GCCauseSetter gcs(this, cause); |
|
823 switch (cause) { |
|
824 case GCCause::_heap_inspection: |
|
825 case GCCause::_heap_dump: { |
|
826 HandleMark hm; |
|
827 invoke_full_gc(false); |
|
828 break; |
|
829 } |
|
830 default: // XXX FIX ME |
|
831 ShouldNotReachHere(); |
|
832 } |
|
833 } |
|
834 |
|
835 |
|
836 void ParallelScavengeHeap::oop_iterate(OopClosure* cl) { |
|
837 Unimplemented(); |
597 Unimplemented(); |
838 } |
598 } |
839 |
599 |
840 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { |
600 void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) { |
841 young_gen()->object_iterate(cl); |
601 young_gen()->object_iterate(cl); |
842 old_gen()->object_iterate(cl); |
602 old_gen()->object_iterate(cl); |
843 perm_gen()->object_iterate(cl); |
603 } |
844 } |
604 |
845 |
|
846 void ParallelScavengeHeap::permanent_oop_iterate(OopClosure* cl) { |
|
847 Unimplemented(); |
|
848 } |
|
849 |
|
850 void ParallelScavengeHeap::permanent_object_iterate(ObjectClosure* cl) { |
|
851 perm_gen()->object_iterate(cl); |
|
852 } |
|
853 |
605 |
854 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { |
606 HeapWord* ParallelScavengeHeap::block_start(const void* addr) const { |
855 if (young_gen()->is_in_reserved(addr)) { |
607 if (young_gen()->is_in_reserved(addr)) { |
856 assert(young_gen()->is_in(addr), |
608 assert(young_gen()->is_in(addr), |
857 "addr should be in allocated part of young gen"); |
609 "addr should be in allocated part of young gen"); |