1799 } else { |
1799 } else { |
1800 warning("Unable to open cgroup memory limit file %s (%s)", lim_file, strerror(errno)); |
1800 warning("Unable to open cgroup memory limit file %s (%s)", lim_file, strerror(errno)); |
1801 } |
1801 } |
1802 } |
1802 } |
1803 |
1803 |
|
1804 // Convert Fraction to Precentage values |
|
1805 if (FLAG_IS_DEFAULT(MaxRAMPercentage) && |
|
1806 !FLAG_IS_DEFAULT(MaxRAMFraction)) |
|
1807 MaxRAMPercentage = 100.0 / MaxRAMFraction; |
|
1808 |
|
1809 if (FLAG_IS_DEFAULT(MinRAMPercentage) && |
|
1810 !FLAG_IS_DEFAULT(MinRAMFraction)) |
|
1811 MinRAMPercentage = 100.0 / MinRAMFraction; |
|
1812 |
|
1813 if (FLAG_IS_DEFAULT(InitialRAMPercentage) && |
|
1814 !FLAG_IS_DEFAULT(InitialRAMFraction)) |
|
1815 InitialRAMPercentage = 100.0 / InitialRAMFraction; |
|
1816 |
1804 // If the maximum heap size has not been set with -Xmx, |
1817 // If the maximum heap size has not been set with -Xmx, |
1805 // then set it as fraction of the size of physical memory, |
1818 // then set it as fraction of the size of physical memory, |
1806 // respecting the maximum and minimum sizes of the heap. |
1819 // respecting the maximum and minimum sizes of the heap. |
1807 if (FLAG_IS_DEFAULT(MaxHeapSize)) { |
1820 if (FLAG_IS_DEFAULT(MaxHeapSize)) { |
1808 julong reasonable_max = phys_mem / MaxRAMFraction; |
1821 julong reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100); |
1809 |
1822 const julong reasonable_min = (julong)((phys_mem * MinRAMPercentage) / 100); |
1810 if (phys_mem <= MaxHeapSize * MinRAMFraction) { |
1823 if (reasonable_min < MaxHeapSize) { |
1811 // Small physical memory, so use a minimum fraction of it for the heap |
1824 // Small physical memory, so use a minimum fraction of it for the heap |
1812 reasonable_max = phys_mem / MinRAMFraction; |
1825 reasonable_max = reasonable_min; |
1813 } else { |
1826 } else { |
1814 // Not-small physical memory, so require a heap at least |
1827 // Not-small physical memory, so require a heap at least |
1815 // as large as MaxHeapSize |
1828 // as large as MaxHeapSize |
1816 reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize); |
1829 reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize); |
1817 } |
1830 } |
|
1831 |
1818 if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) { |
1832 if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) { |
1819 // Limit the heap size to ErgoHeapSizeLimit |
1833 // Limit the heap size to ErgoHeapSizeLimit |
1820 reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit); |
1834 reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit); |
1821 } |
1835 } |
1822 if (UseCompressedOops) { |
1836 if (UseCompressedOops) { |
1877 // Cannot use gclog_or_tty yet. |
1891 // Cannot use gclog_or_tty yet. |
1878 tty->print_cr(" Minimum heap size " SIZE_FORMAT, min_heap_size()); |
1892 tty->print_cr(" Minimum heap size " SIZE_FORMAT, min_heap_size()); |
1879 } |
1893 } |
1880 } |
1894 } |
1881 } |
1895 } |
|
1896 } |
|
1897 |
|
1898 // This option inspects the machine and attempts to set various |
|
1899 // parameters to be optimal for long-running, memory allocation |
|
1900 // intensive jobs. It is intended for machines with large |
|
1901 // amounts of cpu and memory. |
|
1902 jint Arguments::set_aggressive_heap_flags() { |
|
1903 // initHeapSize is needed since _initial_heap_size is 4 bytes on a 32 bit |
|
1904 // VM, but we may not be able to represent the total physical memory |
|
1905 // available (like having 8gb of memory on a box but using a 32bit VM). |
|
1906 // Thus, we need to make sure we're using a julong for intermediate |
|
1907 // calculations. |
|
1908 julong initHeapSize; |
|
1909 julong total_memory = os::physical_memory(); |
|
1910 |
|
1911 if (total_memory < (julong) 256 * M) { |
|
1912 jio_fprintf(defaultStream::error_stream(), |
|
1913 "You need at least 256mb of memory to use -XX:+AggressiveHeap\n"); |
|
1914 vm_exit(1); |
|
1915 } |
|
1916 |
|
1917 // The heap size is half of available memory, or (at most) |
|
1918 // all of possible memory less 160mb (leaving room for the OS |
|
1919 // when using ISM). This is the maximum; because adaptive sizing |
|
1920 // is turned on below, the actual space used may be smaller. |
|
1921 |
|
1922 initHeapSize = MIN2(total_memory / (julong) 2, |
|
1923 total_memory - (julong) 160 * M); |
|
1924 |
|
1925 initHeapSize = limit_by_allocatable_memory(initHeapSize); |
|
1926 |
|
1927 if (FLAG_IS_DEFAULT(MaxHeapSize)) { |
|
1928 FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize); |
|
1929 FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize); |
|
1930 // Currently the minimum size and the initial heap sizes are the same. |
|
1931 set_min_heap_size(initHeapSize); |
|
1932 } |
|
1933 if (FLAG_IS_DEFAULT(NewSize)) { |
|
1934 // Make the young generation 3/8ths of the total heap. |
|
1935 FLAG_SET_CMDLINE(uintx, NewSize, |
|
1936 ((julong) MaxHeapSize / (julong) 8) * (julong) 3); |
|
1937 FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize); |
|
1938 } |
|
1939 |
|
1940 #ifndef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD. |
|
1941 FLAG_SET_DEFAULT(UseLargePages, true); |
|
1942 #endif |
|
1943 |
|
1944 // Increase some data structure sizes for efficiency |
|
1945 FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize); |
|
1946 FLAG_SET_CMDLINE(bool, ResizeTLAB, false); |
|
1947 FLAG_SET_CMDLINE(uintx, TLABSize, 256 * K); |
|
1948 |
|
1949 // See the OldPLABSize comment below, but replace 'after promotion' |
|
1950 // with 'after copying'. YoungPLABSize is the size of the survivor |
|
1951 // space per-gc-thread buffers. The default is 4kw. |
|
1952 FLAG_SET_CMDLINE(uintx, YoungPLABSize, 256 * K); // Note: this is in words |
|
1953 |
|
1954 // OldPLABSize is the size of the buffers in the old gen that |
|
1955 // UseParallelGC uses to promote live data that doesn't fit in the |
|
1956 // survivor spaces. At any given time, there's one for each gc thread. |
|
1957 // The default size is 1kw. These buffers are rarely used, since the |
|
1958 // survivor spaces are usually big enough. For specjbb, however, there |
|
1959 // are occasions when there's lots of live data in the young gen |
|
1960 // and we end up promoting some of it. We don't have a definite |
|
1961 // explanation for why bumping OldPLABSize helps, but the theory |
|
1962 // is that a bigger PLAB results in retaining something like the |
|
1963 // original allocation order after promotion, which improves mutator |
|
1964 // locality. A minor effect may be that larger PLABs reduce the |
|
1965 // number of PLAB allocation events during gc. The value of 8kw |
|
1966 // was arrived at by experimenting with specjbb. |
|
1967 FLAG_SET_CMDLINE(uintx, OldPLABSize, 8 * K); // Note: this is in words |
|
1968 |
|
1969 // Enable parallel GC and adaptive generation sizing |
|
1970 FLAG_SET_CMDLINE(bool, UseParallelGC, true); |
|
1971 |
|
1972 // Encourage steady state memory management |
|
1973 FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100); |
|
1974 |
|
1975 // This appears to improve mutator locality |
|
1976 FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false); |
|
1977 |
|
1978 // Get around early Solaris scheduling bug |
|
1979 // (affinity vs other jobs on system) |
|
1980 // but disallow DR and offlining (5008695). |
|
1981 FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true); |
|
1982 |
|
1983 return JNI_OK; |
1882 } |
1984 } |
1883 |
1985 |
1884 // This must be called after ergonomics because we want bytecode rewriting |
1986 // This must be called after ergonomics because we want bytecode rewriting |
1885 // if the server compiler is used, or if UseSharedSpaces is disabled. |
1987 // if the server compiler is used, or if UseSharedSpaces is disabled. |
1886 void Arguments::set_bytecode_flags() { |
1988 void Arguments::set_bytecode_flags() { |
3115 _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo); |
3225 _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo); |
3116 } else if (match_option(option, "exit", &tail)) { |
3226 } else if (match_option(option, "exit", &tail)) { |
3117 _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo); |
3227 _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo); |
3118 } else if (match_option(option, "abort", &tail)) { |
3228 } else if (match_option(option, "abort", &tail)) { |
3119 _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo); |
3229 _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo); |
3120 // -XX:+AggressiveHeap |
|
3121 } else if (match_option(option, "-XX:+AggressiveHeap", &tail)) { |
|
3122 |
|
3123 // This option inspects the machine and attempts to set various |
|
3124 // parameters to be optimal for long-running, memory allocation |
|
3125 // intensive jobs. It is intended for machines with large |
|
3126 // amounts of cpu and memory. |
|
3127 |
|
3128 // initHeapSize is needed since _initial_heap_size is 4 bytes on a 32 bit |
|
3129 // VM, but we may not be able to represent the total physical memory |
|
3130 // available (like having 8gb of memory on a box but using a 32bit VM). |
|
3131 // Thus, we need to make sure we're using a julong for intermediate |
|
3132 // calculations. |
|
3133 julong initHeapSize; |
|
3134 julong total_memory = os::physical_memory(); |
|
3135 |
|
3136 if (total_memory < (julong)256*M) { |
|
3137 jio_fprintf(defaultStream::error_stream(), |
|
3138 "You need at least 256mb of memory to use -XX:+AggressiveHeap\n"); |
|
3139 vm_exit(1); |
|
3140 } |
|
3141 |
|
3142 // The heap size is half of available memory, or (at most) |
|
3143 // all of possible memory less 160mb (leaving room for the OS |
|
3144 // when using ISM). This is the maximum; because adaptive sizing |
|
3145 // is turned on below, the actual space used may be smaller. |
|
3146 |
|
3147 initHeapSize = MIN2(total_memory / (julong)2, |
|
3148 total_memory - (julong)160*M); |
|
3149 |
|
3150 initHeapSize = limit_by_allocatable_memory(initHeapSize); |
|
3151 |
|
3152 if (FLAG_IS_DEFAULT(MaxHeapSize)) { |
|
3153 FLAG_SET_CMDLINE(uintx, MaxHeapSize, initHeapSize); |
|
3154 FLAG_SET_CMDLINE(uintx, InitialHeapSize, initHeapSize); |
|
3155 // Currently the minimum size and the initial heap sizes are the same. |
|
3156 set_min_heap_size(initHeapSize); |
|
3157 } |
|
3158 if (FLAG_IS_DEFAULT(NewSize)) { |
|
3159 // Make the young generation 3/8ths of the total heap. |
|
3160 FLAG_SET_CMDLINE(uintx, NewSize, |
|
3161 ((julong)MaxHeapSize / (julong)8) * (julong)3); |
|
3162 FLAG_SET_CMDLINE(uintx, MaxNewSize, NewSize); |
|
3163 } |
|
3164 |
|
3165 #ifndef _ALLBSD_SOURCE // UseLargePages is not yet supported on BSD. |
|
3166 FLAG_SET_DEFAULT(UseLargePages, true); |
|
3167 #endif |
|
3168 |
|
3169 // Increase some data structure sizes for efficiency |
|
3170 FLAG_SET_CMDLINE(uintx, BaseFootPrintEstimate, MaxHeapSize); |
|
3171 FLAG_SET_CMDLINE(bool, ResizeTLAB, false); |
|
3172 FLAG_SET_CMDLINE(uintx, TLABSize, 256*K); |
|
3173 |
|
3174 // See the OldPLABSize comment below, but replace 'after promotion' |
|
3175 // with 'after copying'. YoungPLABSize is the size of the survivor |
|
3176 // space per-gc-thread buffers. The default is 4kw. |
|
3177 FLAG_SET_CMDLINE(uintx, YoungPLABSize, 256*K); // Note: this is in words |
|
3178 |
|
3179 // OldPLABSize is the size of the buffers in the old gen that |
|
3180 // UseParallelGC uses to promote live data that doesn't fit in the |
|
3181 // survivor spaces. At any given time, there's one for each gc thread. |
|
3182 // The default size is 1kw. These buffers are rarely used, since the |
|
3183 // survivor spaces are usually big enough. For specjbb, however, there |
|
3184 // are occasions when there's lots of live data in the young gen |
|
3185 // and we end up promoting some of it. We don't have a definite |
|
3186 // explanation for why bumping OldPLABSize helps, but the theory |
|
3187 // is that a bigger PLAB results in retaining something like the |
|
3188 // original allocation order after promotion, which improves mutator |
|
3189 // locality. A minor effect may be that larger PLABs reduce the |
|
3190 // number of PLAB allocation events during gc. The value of 8kw |
|
3191 // was arrived at by experimenting with specjbb. |
|
3192 FLAG_SET_CMDLINE(uintx, OldPLABSize, 8*K); // Note: this is in words |
|
3193 |
|
3194 // Enable parallel GC and adaptive generation sizing |
|
3195 FLAG_SET_CMDLINE(bool, UseParallelGC, true); |
|
3196 |
|
3197 // Encourage steady state memory management |
|
3198 FLAG_SET_CMDLINE(uintx, ThresholdTolerance, 100); |
|
3199 |
|
3200 // This appears to improve mutator locality |
|
3201 FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false); |
|
3202 |
|
3203 // Get around early Solaris scheduling bug |
|
3204 // (affinity vs other jobs on system) |
|
3205 // but disallow DR and offlining (5008695). |
|
3206 FLAG_SET_CMDLINE(bool, BindGCTaskThreadsToCPUs, true); |
|
3207 |
|
3208 } else if (match_option(option, "-XX:+NeverTenure", &tail)) { |
3230 } else if (match_option(option, "-XX:+NeverTenure", &tail)) { |
3209 // The last option must always win. |
3231 // The last option must always win. |
3210 FLAG_SET_CMDLINE(bool, AlwaysTenure, false); |
3232 FLAG_SET_CMDLINE(bool, AlwaysTenure, false); |
3211 FLAG_SET_CMDLINE(bool, NeverTenure, true); |
3233 FLAG_SET_CMDLINE(bool, NeverTenure, true); |
3212 } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) { |
3234 } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) { |