1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Apr 11 09:56:35 2008 -0400 1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Sun Apr 13 17:43:42 2008 -0400 1.3 @@ -1226,7 +1226,7 @@ 1.4 return NULL; 1.5 } 1.6 1.7 -oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size, oop* ref) { 1.8 +oop ConcurrentMarkSweepGeneration::promote(oop obj, size_t obj_size) { 1.9 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); 1.10 // allocate, copy and if necessary update promoinfo -- 1.11 // delegate to underlying space. 1.12 @@ -1238,7 +1238,7 @@ 1.13 } 1.14 #endif // #ifndef PRODUCT 1.15 1.16 - oop res = _cmsSpace->promote(obj, obj_size, ref); 1.17 + oop res = _cmsSpace->promote(obj, obj_size); 1.18 if (res == NULL) { 1.19 // expand and retry 1.20 size_t s = _cmsSpace->expansionSpaceRequired(obj_size); // HeapWords 1.21 @@ -1249,7 +1249,7 @@ 1.22 assert(next_gen() == NULL, "assumption, based upon which no attempt " 1.23 "is made to pass on a possibly failing " 1.24 "promotion to next generation"); 1.25 - res = _cmsSpace->promote(obj, obj_size, ref); 1.26 + res = _cmsSpace->promote(obj, obj_size); 1.27 } 1.28 if (res != NULL) { 1.29 // See comment in allocate() about when objects should 1.30 @@ -3922,13 +3922,15 @@ 1.31 } 1.32 1.33 class Par_ConcMarkingClosure: public OopClosure { 1.34 + private: 1.35 CMSCollector* _collector; 1.36 MemRegion _span; 1.37 CMSBitMap* _bit_map; 1.38 CMSMarkStack* _overflow_stack; 1.39 CMSMarkStack* _revisit_stack; // XXXXXX Check proper use 1.40 OopTaskQueue* _work_queue; 1.41 - 1.42 + protected: 1.43 + DO_OOP_WORK_DEFN 1.44 public: 1.45 Par_ConcMarkingClosure(CMSCollector* collector, OopTaskQueue* work_queue, 1.46 CMSBitMap* bit_map, CMSMarkStack* overflow_stack): 1.47 @@ -3937,8 +3939,8 @@ 1.48 _work_queue(work_queue), 1.49 _bit_map(bit_map), 1.50 _overflow_stack(overflow_stack) { } // need to initialize revisit stack etc. 1.51 - 1.52 - void do_oop(oop* p); 1.53 + virtual void do_oop(oop* p); 1.54 + virtual void do_oop(narrowOop* p); 1.55 void trim_queue(size_t max); 1.56 void handle_stack_overflow(HeapWord* lost); 1.57 }; 1.58 @@ -3947,11 +3949,9 @@ 1.59 // the salient assumption here is that stolen oops must 1.60 // always be initialized, so we do not need to check for 1.61 // uninitialized objects before scanning here. 1.62 -void Par_ConcMarkingClosure::do_oop(oop* p) { 1.63 - oop this_oop = *p; 1.64 - assert(this_oop->is_oop_or_null(), 1.65 - "expected an oop or NULL"); 1.66 - HeapWord* addr = (HeapWord*)this_oop; 1.67 +void Par_ConcMarkingClosure::do_oop(oop obj) { 1.68 + assert(obj->is_oop_or_null(), "expected an oop or NULL"); 1.69 + HeapWord* addr = (HeapWord*)obj; 1.70 // Check if oop points into the CMS generation 1.71 // and is not marked 1.72 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 1.73 @@ -3970,7 +3970,7 @@ 1.74 } 1.75 ) 1.76 if (simulate_overflow || 1.77 - !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { 1.78 + !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 1.79 // stack overflow 1.80 if (PrintCMSStatistics != 0) { 1.81 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 1.82 @@ -3987,6 +3987,9 @@ 1.83 } 1.84 } 1.85 1.86 +void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } 1.87 +void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } 1.88 + 1.89 void Par_ConcMarkingClosure::trim_queue(size_t max) { 1.90 while (_work_queue->size() > max) { 1.91 oop new_oop; 1.92 @@ -4086,8 +4089,8 @@ 1.93 // 1.94 // Tony 2006.06.29 1.95 for (unsigned i = 0; i < CMSCoordinatorYieldSleepCount && 1.96 - ConcurrentMarkSweepThread::should_yield() && 1.97 - !CMSCollector::foregroundGCIsActive(); ++i) { 1.98 + ConcurrentMarkSweepThread::should_yield() && 1.99 + !CMSCollector::foregroundGCIsActive(); ++i) { 1.100 os::sleep(Thread::current(), 1, false); 1.101 ConcurrentMarkSweepThread::acknowledge_yield_request(); 1.102 } 1.103 @@ -6048,8 +6051,8 @@ 1.104 1.105 // See the comment in coordinator_yield() 1.106 for (unsigned i = 0; i < CMSYieldSleepCount && 1.107 - ConcurrentMarkSweepThread::should_yield() && 1.108 - !CMSCollector::foregroundGCIsActive(); ++i) { 1.109 + ConcurrentMarkSweepThread::should_yield() && 1.110 + !CMSCollector::foregroundGCIsActive(); ++i) { 1.111 os::sleep(Thread::current(), 1, false); 1.112 ConcurrentMarkSweepThread::acknowledge_yield_request(); 1.113 } 1.114 @@ -6362,18 +6365,18 @@ 1.115 assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); 1.116 } 1.117 1.118 -void MarkRefsIntoClosure::do_oop(oop* p) { 1.119 +void MarkRefsIntoClosure::do_oop(oop obj) { 1.120 // if p points into _span, then mark corresponding bit in _markBitMap 1.121 - oop thisOop = *p; 1.122 - if (thisOop != NULL) { 1.123 - assert(thisOop->is_oop(), "expected an oop"); 1.124 - HeapWord* addr = (HeapWord*)thisOop; 1.125 - if (_span.contains(addr)) { 1.126 - // this should be made more efficient 1.127 - _bitMap->mark(addr); 1.128 - } 1.129 - } 1.130 -} 1.131 + assert(obj->is_oop(), "expected an oop"); 1.132 + HeapWord* addr = (HeapWord*)obj; 1.133 + if (_span.contains(addr)) { 1.134 + // this should be made more efficient 1.135 + _bitMap->mark(addr); 1.136 + } 1.137 +} 1.138 + 1.139 +void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } 1.140 +void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } 1.141 1.142 // A variant of the above, used for CMS marking verification. 1.143 MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( 1.144 @@ -6387,22 +6390,22 @@ 1.145 assert(_verification_bm->covers(_span), "_verification_bm/_span mismatch"); 1.146 } 1.147 1.148 -void MarkRefsIntoVerifyClosure::do_oop(oop* p) { 1.149 +void MarkRefsIntoVerifyClosure::do_oop(oop obj) { 1.150 // if p points into _span, then mark corresponding bit in _markBitMap 1.151 - oop this_oop = *p; 1.152 - if (this_oop != NULL) { 1.153 - assert(this_oop->is_oop(), "expected an oop"); 1.154 - HeapWord* addr = (HeapWord*)this_oop; 1.155 - if (_span.contains(addr)) { 1.156 - _verification_bm->mark(addr); 1.157 - if (!_cms_bm->isMarked(addr)) { 1.158 - oop(addr)->print(); 1.159 - gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 1.160 - fatal("... aborting"); 1.161 - } 1.162 - } 1.163 - } 1.164 -} 1.165 + assert(obj->is_oop(), "expected an oop"); 1.166 + HeapWord* addr = (HeapWord*)obj; 1.167 + if (_span.contains(addr)) { 1.168 + _verification_bm->mark(addr); 1.169 + if (!_cms_bm->isMarked(addr)) { 1.170 + oop(addr)->print(); 1.171 + gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", addr); 1.172 + fatal("... aborting"); 1.173 + } 1.174 + } 1.175 +} 1.176 + 1.177 +void MarkRefsIntoVerifyClosure::do_oop(oop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } 1.178 +void MarkRefsIntoVerifyClosure::do_oop(narrowOop* p) { MarkRefsIntoVerifyClosure::do_oop_work(p); } 1.179 1.180 ////////////////////////////////////////////////// 1.181 // MarkRefsIntoAndScanClosure 1.182 @@ -6438,13 +6441,13 @@ 1.183 // The marks are made in the marking bit map and the marking stack is 1.184 // used for keeping the (newly) grey objects during the scan. 1.185 // The parallel version (Par_...) appears further below. 1.186 -void MarkRefsIntoAndScanClosure::do_oop(oop* p) { 1.187 - oop this_oop = *p; 1.188 - if (this_oop != NULL) { 1.189 - assert(this_oop->is_oop(), "expected an oop"); 1.190 - HeapWord* addr = (HeapWord*)this_oop; 1.191 - assert(_mark_stack->isEmpty(), "post-condition (eager drainage)"); 1.192 - assert(_collector->overflow_list_is_empty(), "should be empty"); 1.193 +void MarkRefsIntoAndScanClosure::do_oop(oop obj) { 1.194 + if (obj != NULL) { 1.195 + assert(obj->is_oop(), "expected an oop"); 1.196 + HeapWord* addr = (HeapWord*)obj; 1.197 + assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)"); 1.198 + assert(_collector->overflow_list_is_empty(), 1.199 + "overflow list should be empty"); 1.200 if (_span.contains(addr) && 1.201 !_bit_map->isMarked(addr)) { 1.202 // mark bit map (object is now grey) 1.203 @@ -6452,7 +6455,7 @@ 1.204 // push on marking stack (stack should be empty), and drain the 1.205 // stack by applying this closure to the oops in the oops popped 1.206 // from the stack (i.e. blacken the grey objects) 1.207 - bool res = _mark_stack->push(this_oop); 1.208 + bool res = _mark_stack->push(obj); 1.209 assert(res, "Should have space to push on empty stack"); 1.210 do { 1.211 oop new_oop = _mark_stack->pop(); 1.212 @@ -6488,6 +6491,9 @@ 1.213 } 1.214 } 1.215 1.216 +void MarkRefsIntoAndScanClosure::do_oop(oop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 1.217 +void MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { MarkRefsIntoAndScanClosure::do_oop_work(p); } 1.218 + 1.219 void MarkRefsIntoAndScanClosure::do_yield_work() { 1.220 assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), 1.221 "CMS thread should hold CMS token"); 1.222 @@ -6506,9 +6512,11 @@ 1.223 _collector->icms_wait(); 1.224 1.225 // See the comment in coordinator_yield() 1.226 - for (unsigned i = 0; i < CMSYieldSleepCount && 1.227 - ConcurrentMarkSweepThread::should_yield() && 1.228 - !CMSCollector::foregroundGCIsActive(); ++i) { 1.229 + for (unsigned i = 0; 1.230 + i < CMSYieldSleepCount && 1.231 + ConcurrentMarkSweepThread::should_yield() && 1.232 + !CMSCollector::foregroundGCIsActive(); 1.233 + ++i) { 1.234 os::sleep(Thread::current(), 1, false); 1.235 ConcurrentMarkSweepThread::acknowledge_yield_request(); 1.236 } 1.237 @@ -6545,13 +6553,12 @@ 1.238 // the scan phase whence they are also available for stealing by parallel 1.239 // threads. Since the marking bit map is shared, updates are 1.240 // synchronized (via CAS). 1.241 -void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { 1.242 - oop this_oop = *p; 1.243 - if (this_oop != NULL) { 1.244 +void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { 1.245 + if (obj != NULL) { 1.246 // Ignore mark word because this could be an already marked oop 1.247 // that may be chained at the end of the overflow list. 1.248 - assert(this_oop->is_oop(true /* ignore mark word */), "expected an oop"); 1.249 - HeapWord* addr = (HeapWord*)this_oop; 1.250 + assert(obj->is_oop(), "expected an oop"); 1.251 + HeapWord* addr = (HeapWord*)obj; 1.252 if (_span.contains(addr) && 1.253 !_bit_map->isMarked(addr)) { 1.254 // mark bit map (object will become grey): 1.255 @@ -6565,7 +6572,7 @@ 1.256 // queue to an appropriate length by applying this closure to 1.257 // the oops in the oops popped from the stack (i.e. blacken the 1.258 // grey objects) 1.259 - bool res = _work_queue->push(this_oop); 1.260 + bool res = _work_queue->push(obj); 1.261 assert(res, "Low water mark should be less than capacity?"); 1.262 trim_queue(_low_water_mark); 1.263 } // Else, another thread claimed the object 1.264 @@ -6573,6 +6580,9 @@ 1.265 } 1.266 } 1.267 1.268 +void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 1.269 +void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } 1.270 + 1.271 // This closure is used to rescan the marked objects on the dirty cards 1.272 // in the mod union table and the card table proper. 1.273 size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( 1.274 @@ -6675,8 +6685,8 @@ 1.275 1.276 // See the comment in coordinator_yield() 1.277 for (unsigned i = 0; i < CMSYieldSleepCount && 1.278 - ConcurrentMarkSweepThread::should_yield() && 1.279 - !CMSCollector::foregroundGCIsActive(); ++i) { 1.280 + ConcurrentMarkSweepThread::should_yield() && 1.281 + !CMSCollector::foregroundGCIsActive(); ++i) { 1.282 os::sleep(Thread::current(), 1, false); 1.283 ConcurrentMarkSweepThread::acknowledge_yield_request(); 1.284 } 1.285 @@ -6928,13 +6938,13 @@ 1.286 assert(_markStack->isEmpty(), 1.287 "should drain stack to limit stack usage"); 1.288 // convert ptr to an oop preparatory to scanning 1.289 - oop this_oop = oop(ptr); 1.290 + oop obj = oop(ptr); 1.291 // Ignore mark word in verification below, since we 1.292 // may be running concurrent with mutators. 1.293 - assert(this_oop->is_oop(true), "should be an oop"); 1.294 + assert(obj->is_oop(true), "should be an oop"); 1.295 assert(_finger <= ptr, "_finger runneth ahead"); 1.296 // advance the finger to right end of this object 1.297 - _finger = ptr + this_oop->size(); 1.298 + _finger = ptr + obj->size(); 1.299 assert(_finger > ptr, "we just incremented it above"); 1.300 // On large heaps, it may take us some time to get through 1.301 // the marking phase (especially if running iCMS). During 1.302 @@ -6980,7 +6990,7 @@ 1.303 _span, _bitMap, _markStack, 1.304 _revisitStack, 1.305 _finger, this); 1.306 - bool res = _markStack->push(this_oop); 1.307 + bool res = _markStack->push(obj); 1.308 assert(res, "Empty non-zero size stack should have space for single push"); 1.309 while (!_markStack->isEmpty()) { 1.310 oop new_oop = _markStack->pop(); 1.311 @@ -7052,13 +7062,13 @@ 1.312 assert(_work_queue->size() == 0, 1.313 "should drain stack to limit stack usage"); 1.314 // convert ptr to an oop preparatory to scanning 1.315 - oop this_oop = oop(ptr); 1.316 + oop obj = oop(ptr); 1.317 // Ignore mark word in verification below, since we 1.318 // may be running concurrent with mutators. 1.319 - assert(this_oop->is_oop(true), "should be an oop"); 1.320 + assert(obj->is_oop(true), "should be an oop"); 1.321 assert(_finger <= ptr, "_finger runneth ahead"); 1.322 // advance the finger to right end of this object 1.323 - _finger = ptr + this_oop->size(); 1.324 + _finger = ptr + obj->size(); 1.325 assert(_finger > ptr, "we just incremented it above"); 1.326 // On large heaps, it may take us some time to get through 1.327 // the marking phase (especially if running iCMS). During 1.328 @@ -7106,7 +7116,7 @@ 1.329 _revisit_stack, 1.330 _finger, 1.331 gfa, this); 1.332 - bool res = _work_queue->push(this_oop); // overflow could occur here 1.333 + bool res = _work_queue->push(obj); // overflow could occur here 1.334 assert(res, "Will hold once we use workqueues"); 1.335 while (true) { 1.336 oop new_oop; 1.337 @@ -7176,15 +7186,15 @@ 1.338 assert(_mark_stack->isEmpty(), 1.339 "should drain stack to limit stack usage"); 1.340 // convert addr to an oop preparatory to scanning 1.341 - oop this_oop = oop(addr); 1.342 - assert(this_oop->is_oop(), "should be an oop"); 1.343 + oop obj = oop(addr); 1.344 + assert(obj->is_oop(), "should be an oop"); 1.345 assert(_finger <= addr, "_finger runneth ahead"); 1.346 // advance the finger to right end of this object 1.347 - _finger = addr + this_oop->size(); 1.348 + _finger = addr + obj->size(); 1.349 assert(_finger > addr, "we just incremented it above"); 1.350 // Note: the finger doesn't advance while we drain 1.351 // the stack below. 1.352 - bool res = _mark_stack->push(this_oop); 1.353 + bool res = _mark_stack->push(obj); 1.354 assert(res, "Empty non-zero size stack should have space for single push"); 1.355 while (!_mark_stack->isEmpty()) { 1.356 oop new_oop = _mark_stack->pop(); 1.357 @@ -7207,6 +7217,8 @@ 1.358 _mark_stack(mark_stack) 1.359 { } 1.360 1.361 +void PushAndMarkVerifyClosure::do_oop(oop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 1.362 +void PushAndMarkVerifyClosure::do_oop(narrowOop* p) { PushAndMarkVerifyClosure::do_oop_work(p); } 1.363 1.364 // Upon stack overflow, we discard (part of) the stack, 1.365 // remembering the least address amongst those discarded 1.366 @@ -7219,20 +7231,20 @@ 1.367 _mark_stack->expand(); // expand the stack if possible 1.368 } 1.369 1.370 -void PushAndMarkVerifyClosure::do_oop(oop* p) { 1.371 - oop this_oop = *p; 1.372 - assert(this_oop->is_oop_or_null(), "expected an oop or NULL"); 1.373 - HeapWord* addr = (HeapWord*)this_oop; 1.374 +void PushAndMarkVerifyClosure::do_oop(oop obj) { 1.375 + assert(obj->is_oop_or_null(), "expected an oop or NULL"); 1.376 + HeapWord* addr = (HeapWord*)obj; 1.377 if (_span.contains(addr) && !_verification_bm->isMarked(addr)) { 1.378 // Oop lies in _span and isn't yet grey or black 1.379 _verification_bm->mark(addr); // now grey 1.380 if (!_cms_bm->isMarked(addr)) { 1.381 oop(addr)->print(); 1.382 - gclog_or_tty->print_cr(" ("INTPTR_FORMAT" should have been marked)", addr); 1.383 + gclog_or_tty->print_cr(" (" INTPTR_FORMAT " should have been marked)", 1.384 + addr); 1.385 fatal("... aborting"); 1.386 } 1.387 1.388 - if (!_mark_stack->push(this_oop)) { // stack overflow 1.389 + if (!_mark_stack->push(obj)) { // stack overflow 1.390 if (PrintCMSStatistics != 0) { 1.391 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 1.392 SIZE_FORMAT, _mark_stack->capacity()); 1.393 @@ -7285,7 +7297,6 @@ 1.394 _should_remember_klasses(collector->should_unload_classes()) 1.395 { } 1.396 1.397 - 1.398 void CMSCollector::lower_restart_addr(HeapWord* low) { 1.399 assert(_span.contains(low), "Out of bounds addr"); 1.400 if (_restart_addr == NULL) { 1.401 @@ -7321,12 +7332,10 @@ 1.402 _overflow_stack->expand(); // expand the stack if possible 1.403 } 1.404 1.405 - 1.406 -void PushOrMarkClosure::do_oop(oop* p) { 1.407 - oop thisOop = *p; 1.408 +void PushOrMarkClosure::do_oop(oop obj) { 1.409 // Ignore mark word because we are running concurrent with mutators. 1.410 - assert(thisOop->is_oop_or_null(true), "expected an oop or NULL"); 1.411 - HeapWord* addr = (HeapWord*)thisOop; 1.412 + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 1.413 + HeapWord* addr = (HeapWord*)obj; 1.414 if (_span.contains(addr) && !_bitMap->isMarked(addr)) { 1.415 // Oop lies in _span and isn't yet grey or black 1.416 _bitMap->mark(addr); // now grey 1.417 @@ -7342,7 +7351,7 @@ 1.418 simulate_overflow = true; 1.419 } 1.420 ) 1.421 - if (simulate_overflow || !_markStack->push(thisOop)) { // stack overflow 1.422 + if (simulate_overflow || !_markStack->push(obj)) { // stack overflow 1.423 if (PrintCMSStatistics != 0) { 1.424 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 1.425 SIZE_FORMAT, _markStack->capacity()); 1.426 @@ -7358,11 +7367,13 @@ 1.427 } 1.428 } 1.429 1.430 -void Par_PushOrMarkClosure::do_oop(oop* p) { 1.431 - oop this_oop = *p; 1.432 +void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } 1.433 +void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } 1.434 + 1.435 +void Par_PushOrMarkClosure::do_oop(oop obj) { 1.436 // Ignore mark word because we are running concurrent with mutators. 1.437 - assert(this_oop->is_oop_or_null(true), "expected an oop or NULL"); 1.438 - HeapWord* addr = (HeapWord*)this_oop; 1.439 + assert(obj->is_oop_or_null(true), "expected an oop or NULL"); 1.440 + HeapWord* addr = (HeapWord*)obj; 1.441 if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { 1.442 // Oop lies in _span and isn't yet grey or black 1.443 // We read the global_finger (volatile read) strictly after marking oop 1.444 @@ -7391,7 +7402,7 @@ 1.445 } 1.446 ) 1.447 if (simulate_overflow || 1.448 - !(_work_queue->push(this_oop) || _overflow_stack->par_push(this_oop))) { 1.449 + !(_work_queue->push(obj) || _overflow_stack->par_push(obj))) { 1.450 // stack overflow 1.451 if (PrintCMSStatistics != 0) { 1.452 gclog_or_tty->print_cr("CMS marking stack overflow (benign) at " 1.453 @@ -7408,6 +7419,8 @@ 1.454 } 1.455 } 1.456 1.457 +void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 1.458 +void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } 1.459 1.460 PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, 1.461 MemRegion span, 1.462 @@ -7432,16 +7445,11 @@ 1.463 1.464 // Grey object rescan during pre-cleaning and second checkpoint phases -- 1.465 // the non-parallel version (the parallel version appears further below.) 1.466 -void PushAndMarkClosure::do_oop(oop* p) { 1.467 - oop this_oop = *p; 1.468 - // Ignore mark word verification. If during concurrent precleaning 1.469 - // the object monitor may be locked. If during the checkpoint 1.470 - // phases, the object may already have been reached by a different 1.471 - // path and may be at the end of the global overflow list (so 1.472 - // the mark word may be NULL). 1.473 - assert(this_oop->is_oop_or_null(true/* ignore mark word */), 1.474 +void PushAndMarkClosure::do_oop(oop obj) { 1.475 + // If _concurrent_precleaning, ignore mark word verification 1.476 + assert(obj->is_oop_or_null(_concurrent_precleaning), 1.477 "expected an oop or NULL"); 1.478 - HeapWord* addr = (HeapWord*)this_oop; 1.479 + HeapWord* addr = (HeapWord*)obj; 1.480 // Check if oop points into the CMS generation 1.481 // and is not marked 1.482 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 1.483 @@ -7456,7 +7464,7 @@ 1.484 simulate_overflow = true; 1.485 } 1.486 ) 1.487 - if (simulate_overflow || !_mark_stack->push(this_oop)) { 1.488 + if (simulate_overflow || !_mark_stack->push(obj)) { 1.489 if (_concurrent_precleaning) { 1.490 // During precleaning we can just dirty the appropriate card 1.491 // in the mod union table, thus ensuring that the object remains 1.492 @@ -7468,7 +7476,7 @@ 1.493 } else { 1.494 // During the remark phase, we need to remember this oop 1.495 // in the overflow list. 1.496 - _collector->push_on_overflow_list(this_oop); 1.497 + _collector->push_on_overflow_list(obj); 1.498 _collector->_ser_pmc_remark_ovflw++; 1.499 } 1.500 } 1.501 @@ -7492,10 +7500,12 @@ 1.502 assert(_ref_processor != NULL, "_ref_processor shouldn't be NULL"); 1.503 } 1.504 1.505 +void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } 1.506 +void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } 1.507 + 1.508 // Grey object rescan during second checkpoint phase -- 1.509 // the parallel version. 1.510 -void Par_PushAndMarkClosure::do_oop(oop* p) { 1.511 - oop this_oop = *p; 1.512 +void Par_PushAndMarkClosure::do_oop(oop obj) { 1.513 // In the assert below, we ignore the mark word because 1.514 // this oop may point to an already visited object that is 1.515 // on the overflow stack (in which case the mark word has 1.516 @@ -7507,9 +7517,9 @@ 1.517 // value, by the time we get to examined this failing assert in 1.518 // the debugger, is_oop_or_null(false) may subsequently start 1.519 // to hold. 1.520 - assert(this_oop->is_oop_or_null(true), 1.521 + assert(obj->is_oop_or_null(true), 1.522 "expected an oop or NULL"); 1.523 - HeapWord* addr = (HeapWord*)this_oop; 1.524 + HeapWord* addr = (HeapWord*)obj; 1.525 // Check if oop points into the CMS generation 1.526 // and is not marked 1.527 if (_span.contains(addr) && !_bit_map->isMarked(addr)) { 1.528 @@ -7527,14 +7537,17 @@ 1.529 simulate_overflow = true; 1.530 } 1.531 ) 1.532 - if (simulate_overflow || !_work_queue->push(this_oop)) { 1.533 - _collector->par_push_on_overflow_list(this_oop); 1.534 + if (simulate_overflow || !_work_queue->push(obj)) { 1.535 + _collector->par_push_on_overflow_list(obj); 1.536 _collector->_par_pmc_remark_ovflw++; // imprecise OK: no need to CAS 1.537 } 1.538 } // Else, some other thread got there first 1.539 } 1.540 } 1.541 1.542 +void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 1.543 +void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } 1.544 + 1.545 void PushAndMarkClosure::remember_klass(Klass* k) { 1.546 if (!_revisit_stack->push(oop(k))) { 1.547 fatal("Revisit stack overflowed in PushAndMarkClosure"); 1.548 @@ -8228,9 +8241,8 @@ 1.549 } 1.550 1.551 // CMSKeepAliveClosure: the serial version 1.552 -void CMSKeepAliveClosure::do_oop(oop* p) { 1.553 - oop this_oop = *p; 1.554 - HeapWord* addr = (HeapWord*)this_oop; 1.555 +void CMSKeepAliveClosure::do_oop(oop obj) { 1.556 + HeapWord* addr = (HeapWord*)obj; 1.557 if (_span.contains(addr) && 1.558 !_bit_map->isMarked(addr)) { 1.559 _bit_map->mark(addr); 1.560 @@ -8242,26 +8254,28 @@ 1.561 simulate_overflow = true; 1.562 } 1.563 ) 1.564 - if (simulate_overflow || !_mark_stack->push(this_oop)) { 1.565 - _collector->push_on_overflow_list(this_oop); 1.566 + if (simulate_overflow || !_mark_stack->push(obj)) { 1.567 + _collector->push_on_overflow_list(obj); 1.568 _collector->_ser_kac_ovflw++; 1.569 } 1.570 } 1.571 } 1.572 1.573 +void CMSKeepAliveClosure::do_oop(oop* p) { CMSKeepAliveClosure::do_oop_work(p); } 1.574 +void CMSKeepAliveClosure::do_oop(narrowOop* p) { CMSKeepAliveClosure::do_oop_work(p); } 1.575 + 1.576 // CMSParKeepAliveClosure: a parallel version of the above. 1.577 // The work queues are private to each closure (thread), 1.578 // but (may be) available for stealing by other threads. 1.579 -void CMSParKeepAliveClosure::do_oop(oop* p) { 1.580 - oop this_oop = *p; 1.581 - HeapWord* addr = (HeapWord*)this_oop; 1.582 +void CMSParKeepAliveClosure::do_oop(oop obj) { 1.583 + HeapWord* addr = (HeapWord*)obj; 1.584 if (_span.contains(addr) && 1.585 !_bit_map->isMarked(addr)) { 1.586 // In general, during recursive tracing, several threads 1.587 // may be concurrently getting here; the first one to 1.588 // "tag" it, claims it. 1.589 if (_bit_map->par_mark(addr)) { 1.590 - bool res = _work_queue->push(this_oop); 1.591 + bool res = _work_queue->push(obj); 1.592 assert(res, "Low water mark should be much less than capacity"); 1.593 // Do a recursive trim in the hope that this will keep 1.594 // stack usage lower, but leave some oops for potential stealers 1.595 @@ -8270,6 +8284,9 @@ 1.596 } 1.597 } 1.598 1.599 +void CMSParKeepAliveClosure::do_oop(oop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 1.600 +void CMSParKeepAliveClosure::do_oop(narrowOop* p) { CMSParKeepAliveClosure::do_oop_work(p); } 1.601 + 1.602 void CMSParKeepAliveClosure::trim_queue(uint max) { 1.603 while (_work_queue->size() > max) { 1.604 oop new_oop; 1.605 @@ -8285,9 +8302,8 @@ 1.606 } 1.607 } 1.608 1.609 -void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { 1.610 - oop this_oop = *p; 1.611 - HeapWord* addr = (HeapWord*)this_oop; 1.612 +void CMSInnerParMarkAndPushClosure::do_oop(oop obj) { 1.613 + HeapWord* addr = (HeapWord*)obj; 1.614 if (_span.contains(addr) && 1.615 !_bit_map->isMarked(addr)) { 1.616 if (_bit_map->par_mark(addr)) { 1.617 @@ -8299,14 +8315,17 @@ 1.618 simulate_overflow = true; 1.619 } 1.620 ) 1.621 - if (simulate_overflow || !_work_queue->push(this_oop)) { 1.622 - _collector->par_push_on_overflow_list(this_oop); 1.623 + if (simulate_overflow || !_work_queue->push(obj)) { 1.624 + _collector->par_push_on_overflow_list(obj); 1.625 _collector->_par_kac_ovflw++; 1.626 } 1.627 } // Else another thread got there already 1.628 } 1.629 } 1.630 1.631 +void CMSInnerParMarkAndPushClosure::do_oop(oop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 1.632 +void CMSInnerParMarkAndPushClosure::do_oop(narrowOop* p) { CMSInnerParMarkAndPushClosure::do_oop_work(p); } 1.633 + 1.634 ////////////////////////////////////////////////////////////////// 1.635 // CMSExpansionCause ///////////////////////////// 1.636 ////////////////////////////////////////////////////////////////// 1.637 @@ -8337,12 +8356,12 @@ 1.638 while (!_mark_stack->isEmpty() || 1.639 // if stack is empty, check the overflow list 1.640 _collector->take_from_overflow_list(num, _mark_stack)) { 1.641 - oop this_oop = _mark_stack->pop(); 1.642 - HeapWord* addr = (HeapWord*)this_oop; 1.643 + oop obj = _mark_stack->pop(); 1.644 + HeapWord* addr = (HeapWord*)obj; 1.645 assert(_span.contains(addr), "Should be within span"); 1.646 assert(_bit_map->isMarked(addr), "Should be marked"); 1.647 - assert(this_oop->is_oop(), "Should be an oop"); 1.648 - this_oop->oop_iterate(_keep_alive); 1.649 + assert(obj->is_oop(), "Should be an oop"); 1.650 + obj->oop_iterate(_keep_alive); 1.651 } 1.652 } 1.653