src/share/vm/runtime/sweeper.cpp

Mon, 12 Jul 2010 22:27:18 -0700

author
never
date
Mon, 12 Jul 2010 22:27:18 -0700
changeset 2001
8d5934a77f10
parent 1999
2a47bd84841f
child 2138
d5d065957597
permissions
-rw-r--r--

6968385: malformed xml in sweeper logging
Reviewed-by: kvn

duke@435 1 /*
never@1999 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 # include "incls/_precompiled.incl"
duke@435 26 # include "incls/_sweeper.cpp.incl"
duke@435 27
duke@435 28 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
never@1970 29 nmethod* NMethodSweeper::_current = NULL; // Current nmethod
never@1999 30 int NMethodSweeper::_seen = 0 ; // No. of nmethods we have currently processed in current pass of CodeCache
never@1999 31
never@1999 32 volatile int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
never@1999 33 volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
duke@435 34
duke@435 35 jint NMethodSweeper::_locked_seen = 0;
duke@435 36 jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
duke@435 37 bool NMethodSweeper::_rescan = false;
never@1893 38 bool NMethodSweeper::_do_sweep = false;
kvn@1637 39 bool NMethodSweeper::_was_full = false;
kvn@1637 40 jint NMethodSweeper::_advise_to_sweep = 0;
kvn@1637 41 jlong NMethodSweeper::_last_was_full = 0;
kvn@1637 42 uint NMethodSweeper::_highest_marked = 0;
kvn@1637 43 long NMethodSweeper::_was_full_traversal = 0;
duke@435 44
jrose@1424 45 class MarkActivationClosure: public CodeBlobClosure {
jrose@1424 46 public:
jrose@1424 47 virtual void do_code_blob(CodeBlob* cb) {
jrose@1424 48 // If we see an activation belonging to a non_entrant nmethod, we mark it.
jrose@1424 49 if (cb->is_nmethod() && ((nmethod*)cb)->is_not_entrant()) {
jrose@1424 50 ((nmethod*)cb)->mark_as_seen_on_stack();
jrose@1424 51 }
jrose@1424 52 }
jrose@1424 53 };
jrose@1424 54 static MarkActivationClosure mark_activation_closure;
jrose@1424 55
never@1893 56 void NMethodSweeper::scan_stacks() {
duke@435 57 assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
duke@435 58 if (!MethodFlushing) return;
never@1893 59 _do_sweep = true;
duke@435 60
duke@435 61 // No need to synchronize access, since this is always executed at a
duke@435 62 // safepoint. If we aren't in the middle of scan and a rescan
never@1893 63 // hasn't been requested then just return. If UseCodeCacheFlushing is on and
never@1893 64 // code cache flushing is in progress, don't skip sweeping to help make progress
never@1893 65 // clearing space in the code cache.
never@1893 66 if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
never@1893 67 _do_sweep = false;
never@1893 68 return;
never@1893 69 }
duke@435 70
duke@435 71 // Make sure CompiledIC_lock in unlocked, since we might update some
duke@435 72 // inline caches. If it is, we just bail-out and try later.
duke@435 73 if (CompiledIC_lock->is_locked() || Patching_lock->is_locked()) return;
duke@435 74
duke@435 75 // Check for restart
duke@435 76 assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
duke@435 77 if (_current == NULL) {
duke@435 78 _seen = 0;
duke@435 79 _invocations = NmethodSweepFraction;
never@1893 80 _current = CodeCache::first_nmethod();
duke@435 81 _traversals += 1;
duke@435 82 if (PrintMethodFlushing) {
duke@435 83 tty->print_cr("### Sweep: stack traversal %d", _traversals);
duke@435 84 }
jrose@1424 85 Threads::nmethods_do(&mark_activation_closure);
duke@435 86
duke@435 87 // reset the flags since we started a scan from the beginning.
duke@435 88 _rescan = false;
duke@435 89 _locked_seen = 0;
duke@435 90 _not_entrant_seen_on_stack = 0;
duke@435 91 }
duke@435 92
kvn@1637 93 if (UseCodeCacheFlushing) {
kvn@1637 94 if (!CodeCache::needs_flushing()) {
never@1893 95 // scan_stacks() runs during a safepoint, no race with setters
kvn@1637 96 _advise_to_sweep = 0;
kvn@1637 97 }
kvn@1637 98
kvn@1637 99 if (was_full()) {
kvn@1637 100 // There was some progress so attempt to restart the compiler
kvn@1637 101 jlong now = os::javaTimeMillis();
kvn@1637 102 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
kvn@1637 103 jlong curr_interval = now - _last_was_full;
kvn@1637 104 if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
kvn@1637 105 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
kvn@1637 106 set_was_full(false);
kvn@1637 107
kvn@1637 108 // Update the _last_was_full time so we can tell how fast the
kvn@1637 109 // code cache is filling up
kvn@1637 110 _last_was_full = os::javaTimeMillis();
kvn@1637 111
never@1999 112 log_sweep("restart_compiler");
kvn@1637 113 }
kvn@1637 114 }
kvn@1637 115 }
duke@435 116 }
duke@435 117
never@1893 118 void NMethodSweeper::possibly_sweep() {
never@1999 119 assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
never@1893 120 if ((!MethodFlushing) || (!_do_sweep)) return;
never@1893 121
never@1893 122 if (_invocations > 0) {
never@1893 123 // Only one thread at a time will sweep
never@1893 124 jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
never@1893 125 if (old != 0) {
never@1893 126 return;
never@1893 127 }
never@1999 128 if (_invocations > 0) {
never@1999 129 sweep_code_cache();
never@1999 130 _invocations--;
never@1999 131 }
never@1999 132 _sweep_started = 0;
never@1893 133 }
never@1893 134 }
never@1893 135
never@1893 136 void NMethodSweeper::sweep_code_cache() {
never@1893 137 #ifdef ASSERT
never@1893 138 jlong sweep_start;
never@1999 139 if (PrintMethodFlushing) {
never@1893 140 sweep_start = os::javaTimeMillis();
never@1893 141 }
never@1893 142 #endif
never@1893 143 if (PrintMethodFlushing && Verbose) {
never@1999 144 tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
never@1893 145 }
never@1893 146
never@1999 147 // We want to visit all nmethods after NmethodSweepFraction
never@1999 148 // invocations so divide the remaining number of nmethods by the
never@1999 149 // remaining number of invocations. This is only an estimate since
never@1999 150 // the number of nmethods changes during the sweep so the final
never@1999 151 // stage must iterate until it there are no more nmethods.
never@1999 152 int todo = (CodeCache::nof_nmethods() - _seen) / _invocations;
never@1893 153
never@1893 154 assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
never@1893 155 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 156
never@1893 157 {
never@1893 158 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1893 159
never@1999 160 // The last invocation iterates until there are no more nmethods
never@1999 161 for (int i = 0; (i < todo || _invocations == 1) && _current != NULL; i++) {
never@1893 162
never@1999 163 // Since we will give up the CodeCache_lock, always skip ahead
never@1999 164 // to the next nmethod. Other blobs can be deleted by other
never@1999 165 // threads but nmethods are only reclaimed by the sweeper.
never@1970 166 nmethod* next = CodeCache::next_nmethod(_current);
never@1893 167
never@1893 168 // Now ready to process nmethod and give up CodeCache_lock
never@1893 169 {
never@1893 170 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
never@1970 171 process_nmethod(_current);
never@1893 172 }
never@1893 173 _seen++;
never@1893 174 _current = next;
never@1893 175 }
never@1893 176 }
never@1893 177
never@1999 178 assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
never@1999 179
never@1893 180 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
never@1893 181 // we've completed a scan without making progress but there were
never@1893 182 // nmethods we were unable to process either because they were
never@1893 183 // locked or were still on stack. We don't have to aggresively
never@1893 184 // clean them up so just stop scanning. We could scan once more
never@1893 185 // but that complicates the control logic and it's unlikely to
never@1893 186 // matter much.
never@1893 187 if (PrintMethodFlushing) {
never@1893 188 tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
never@1893 189 }
never@1893 190 }
never@1893 191
never@1893 192 #ifdef ASSERT
never@1893 193 if(PrintMethodFlushing) {
never@1893 194 jlong sweep_end = os::javaTimeMillis();
never@1893 195 tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
never@1893 196 }
never@1893 197 #endif
never@1999 198
never@1999 199 if (_invocations == 1) {
never@1999 200 log_sweep("finished");
never@1999 201 }
never@1893 202 }
never@1893 203
duke@435 204
duke@435 205 void NMethodSweeper::process_nmethod(nmethod *nm) {
never@1893 206 assert(!CodeCache_lock->owned_by_self(), "just checking");
never@1893 207
duke@435 208 // Skip methods that are currently referenced by the VM
duke@435 209 if (nm->is_locked_by_vm()) {
duke@435 210 // But still remember to clean-up inline caches for alive nmethods
duke@435 211 if (nm->is_alive()) {
duke@435 212 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 213 MutexLocker cl(CompiledIC_lock);
duke@435 214 nm->cleanup_inline_caches();
duke@435 215 } else {
duke@435 216 _locked_seen++;
duke@435 217 }
duke@435 218 return;
duke@435 219 }
duke@435 220
duke@435 221 if (nm->is_zombie()) {
duke@435 222 // If it is first time, we see nmethod then we mark it. Otherwise,
duke@435 223 // we reclame it. When we have seen a zombie method twice, we know that
never@1999 224 // there are no inline caches that refer to it.
duke@435 225 if (nm->is_marked_for_reclamation()) {
duke@435 226 assert(!nm->is_locked_by_vm(), "must not flush locked nmethods");
ysr@1376 227 if (PrintMethodFlushing && Verbose) {
kvn@1637 228 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
ysr@1376 229 }
never@1893 230 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@435 231 nm->flush();
duke@435 232 } else {
ysr@1376 233 if (PrintMethodFlushing && Verbose) {
kvn@1637 234 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
ysr@1376 235 }
duke@435 236 nm->mark_for_reclamation();
duke@435 237 _rescan = true;
duke@435 238 }
duke@435 239 } else if (nm->is_not_entrant()) {
duke@435 240 // If there is no current activations of this method on the
duke@435 241 // stack we can safely convert it to a zombie method
duke@435 242 if (nm->can_not_entrant_be_converted()) {
ysr@1376 243 if (PrintMethodFlushing && Verbose) {
kvn@1637 244 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
ysr@1376 245 }
duke@435 246 nm->make_zombie();
duke@435 247 _rescan = true;
duke@435 248 } else {
duke@435 249 // Still alive, clean up its inline caches
never@1893 250 MutexLocker cl(CompiledIC_lock);
duke@435 251 nm->cleanup_inline_caches();
duke@435 252 // we coudn't transition this nmethod so don't immediately
duke@435 253 // request a rescan. If this method stays on the stack for a
never@1893 254 // long time we don't want to keep rescanning the code cache.
duke@435 255 _not_entrant_seen_on_stack++;
duke@435 256 }
duke@435 257 } else if (nm->is_unloaded()) {
duke@435 258 // Unloaded code, just make it a zombie
ysr@1376 259 if (PrintMethodFlushing && Verbose)
kvn@1637 260 tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
ysr@1376 261 if (nm->is_osr_method()) {
duke@435 262 // No inline caches will ever point to osr methods, so we can just remove it
never@1893 263 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
duke@435 264 nm->flush();
duke@435 265 } else {
duke@435 266 nm->make_zombie();
duke@435 267 _rescan = true;
duke@435 268 }
duke@435 269 } else {
duke@435 270 assert(nm->is_alive(), "should be alive");
kvn@1637 271
kvn@1637 272 if (UseCodeCacheFlushing) {
kvn@1637 273 if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
kvn@1637 274 (_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
kvn@1637 275 CodeCache::needs_flushing()) {
kvn@1637 276 // This method has not been called since the forced cleanup happened
kvn@1637 277 nm->make_not_entrant();
kvn@1637 278 }
kvn@1637 279 }
kvn@1637 280
duke@435 281 // Clean-up all inline caches that points to zombie/non-reentrant methods
never@1893 282 MutexLocker cl(CompiledIC_lock);
duke@435 283 nm->cleanup_inline_caches();
duke@435 284 }
duke@435 285 }
kvn@1637 286
kvn@1637 287 // Code cache unloading: when compilers notice the code cache is getting full,
kvn@1637 288 // they will call a vm op that comes here. This code attempts to speculatively
kvn@1637 289 // unload the oldest half of the nmethods (based on the compile job id) by
kvn@1637 290 // saving the old code in a list in the CodeCache. Then
never@1893 291 // execution resumes. If a method so marked is not called by the second sweeper
never@1893 292 // stack traversal after the current one, the nmethod will be marked non-entrant and
kvn@1637 293 // got rid of by normal sweeping. If the method is called, the methodOop's
kvn@1637 294 // _code field is restored and the methodOop/nmethod
kvn@1637 295 // go back to their normal state.
kvn@1637 296 void NMethodSweeper::handle_full_code_cache(bool is_full) {
kvn@1637 297 // Only the first one to notice can advise us to start early cleaning
kvn@1637 298 if (!is_full){
kvn@1637 299 jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
kvn@1637 300 if (old != 0) {
kvn@1637 301 return;
kvn@1637 302 }
kvn@1637 303 }
kvn@1637 304
kvn@1637 305 if (is_full) {
kvn@1637 306 // Since code cache is full, immediately stop new compiles
kvn@1637 307 bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
kvn@1637 308 if (!did_set) {
kvn@1637 309 // only the first to notice can start the cleaning,
kvn@1637 310 // others will go back and block
kvn@1637 311 return;
kvn@1637 312 }
kvn@1637 313 set_was_full(true);
kvn@1637 314
kvn@1637 315 // If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
kvn@1637 316 jlong now = os::javaTimeMillis();
kvn@1637 317 jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
kvn@1637 318 jlong curr_interval = now - _last_was_full;
kvn@1637 319 if (curr_interval < max_interval) {
kvn@1637 320 _rescan = true;
never@1999 321 log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
never@1999 322 curr_interval/1000);
kvn@1637 323 return;
kvn@1637 324 }
kvn@1637 325 }
kvn@1637 326
kvn@1637 327 VM_HandleFullCodeCache op(is_full);
kvn@1637 328 VMThread::execute(&op);
kvn@1637 329
kvn@1637 330 // rescan again as soon as possible
kvn@1637 331 _rescan = true;
kvn@1637 332 }
kvn@1637 333
kvn@1637 334 void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
kvn@1637 335 // If there was a race in detecting full code cache, only run
kvn@1637 336 // one vm op for it or keep the compiler shut off
kvn@1637 337
kvn@1637 338 debug_only(jlong start = os::javaTimeMillis();)
kvn@1637 339
kvn@1637 340 if ((!was_full()) && (is_full)) {
kvn@1637 341 if (!CodeCache::needs_flushing()) {
never@1999 342 log_sweep("restart_compiler");
kvn@1637 343 CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
kvn@1637 344 return;
kvn@1637 345 }
kvn@1637 346 }
kvn@1637 347
kvn@1637 348 // Traverse the code cache trying to dump the oldest nmethods
kvn@1637 349 uint curr_max_comp_id = CompileBroker::get_compilation_id();
kvn@1637 350 uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
never@1999 351 log_sweep("start_cleaning");
kvn@1637 352
kvn@1637 353 nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
kvn@1637 354 jint disconnected = 0;
kvn@1637 355 jint made_not_entrant = 0;
kvn@1637 356 while ((nm != NULL)){
kvn@1637 357 uint curr_comp_id = nm->compile_id();
kvn@1637 358
kvn@1637 359 // OSR methods cannot be flushed like this. Also, don't flush native methods
kvn@1637 360 // since they are part of the JDK in most cases
kvn@1637 361 if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
kvn@1637 362 (!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
kvn@1637 363
kvn@1637 364 if ((nm->method()->code() == nm)) {
kvn@1637 365 // This method has not been previously considered for
kvn@1637 366 // unloading or it was restored already
kvn@1637 367 CodeCache::speculatively_disconnect(nm);
kvn@1637 368 disconnected++;
kvn@1637 369 } else if (nm->is_speculatively_disconnected()) {
kvn@1637 370 // This method was previously considered for preemptive unloading and was not called since then
kvn@1637 371 nm->method()->invocation_counter()->decay();
kvn@1637 372 nm->method()->backedge_counter()->decay();
kvn@1637 373 nm->make_not_entrant();
kvn@1637 374 made_not_entrant++;
kvn@1637 375 }
kvn@1637 376
kvn@1637 377 if (curr_comp_id > _highest_marked) {
kvn@1637 378 _highest_marked = curr_comp_id;
kvn@1637 379 }
kvn@1637 380 }
kvn@1637 381 nm = CodeCache::alive_nmethod(CodeCache::next(nm));
kvn@1637 382 }
kvn@1637 383
never@1999 384 log_sweep("stop_cleaning",
never@1999 385 "disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
never@1999 386 disconnected, made_not_entrant);
kvn@1637 387
never@1893 388 // Shut off compiler. Sweeper will start over with a new stack scan and
never@1893 389 // traversal cycle and turn it back on if it clears enough space.
kvn@1637 390 if (was_full()) {
kvn@1637 391 _last_was_full = os::javaTimeMillis();
kvn@1637 392 CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
kvn@1637 393 }
kvn@1637 394
kvn@1637 395 // After two more traversals the sweeper will get rid of unrestored nmethods
kvn@1637 396 _was_full_traversal = _traversals;
kvn@1637 397 #ifdef ASSERT
kvn@1637 398 jlong end = os::javaTimeMillis();
kvn@1637 399 if(PrintMethodFlushing && Verbose) {
kvn@1637 400 tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
kvn@1637 401 }
kvn@1637 402 #endif
kvn@1637 403 }
never@1999 404
never@1999 405
never@1999 406 // Print out some state information about the current sweep and the
never@1999 407 // state of the code cache if it's requested.
never@1999 408 void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) {
never@1999 409 if (PrintMethodFlushing) {
never@1999 410 ttyLocker ttyl;
never@1999 411 tty->print("### sweeper: %s ", msg);
never@1999 412 if (format != NULL) {
never@1999 413 va_list ap;
never@1999 414 va_start(ap, format);
never@1999 415 tty->vprint(format, ap);
never@1999 416 va_end(ap);
never@1999 417 }
never@1999 418 tty->print_cr(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
never@1999 419 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
never@1999 420 CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
never@1999 421 }
never@1999 422
never@1999 423 if (LogCompilation && (xtty != NULL)) {
never@1999 424 ttyLocker ttyl;
never@2001 425 xtty->begin_elem("sweeper state='%s' traversals='" INTX_FORMAT "' ", msg, (intx)traversal_count());
never@1999 426 if (format != NULL) {
never@1999 427 va_list ap;
never@1999 428 va_start(ap, format);
never@1999 429 xtty->vprint(format, ap);
never@1999 430 va_end(ap);
never@1999 431 }
never@1999 432 xtty->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
never@1999 433 " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
never@1999 434 CodeCache::nof_blobs(), CodeCache::nof_nmethods(), CodeCache::nof_adapters(), CodeCache::unallocated_capacity());
never@1999 435 xtty->stamp();
never@1999 436 xtty->end_elem();
never@1999 437 }
never@1999 438 }

mercurial