1.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Fri Oct 05 13:37:08 2012 -0700 1.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Sat Oct 06 01:17:44 2012 -0700 1.3 @@ -399,9 +399,9 @@ 1.4 // last claimed region 1.5 1.6 // marking tasks 1.7 - uint _max_task_num; // maximum task number 1.8 + uint _max_worker_id;// maximum worker id 1.9 uint _active_tasks; // task num currently active 1.10 - CMTask** _tasks; // task queue array (max_task_num len) 1.11 + CMTask** _tasks; // task queue array (max_worker_id len) 1.12 CMTaskQueueSet* _task_queues; // task queue set 1.13 ParallelTaskTerminator _terminator; // for termination 1.14 1.15 @@ -492,10 +492,10 @@ 1.16 ParallelTaskTerminator* terminator() { return &_terminator; } 1.17 1.18 // It claims the next available region to be scanned by a marking 1.19 - // task. It might return NULL if the next region is empty or we have 1.20 - // run out of regions. In the latter case, out_of_regions() 1.21 + // task/thread. It might return NULL if the next region is empty or 1.22 + // we have run out of regions. In the latter case, out_of_regions() 1.23 // determines whether we've really run out of regions or the task 1.24 - // should call claim_region() again. This might seem a bit 1.25 + // should call claim_region() again. This might seem a bit 1.26 // awkward. Originally, the code was written so that claim_region() 1.27 // either successfully returned with a non-empty region or there 1.28 // were no more regions to be claimed. The problem with this was 1.29 @@ -505,7 +505,7 @@ 1.30 // method. So, this way, each task will spend very little time in 1.31 // claim_region() and is allowed to call the regular clock method 1.32 // frequently. 1.33 - HeapRegion* claim_region(int task); 1.34 + HeapRegion* claim_region(uint worker_id); 1.35 1.36 // It determines whether we've run out of regions to scan. 1.37 bool out_of_regions() { return _finger == _heap_end; } 1.38 @@ -537,8 +537,8 @@ 1.39 bool has_aborted() { return _has_aborted; } 1.40 1.41 // Methods to enter the two overflow sync barriers 1.42 - void enter_first_sync_barrier(int task_num); 1.43 - void enter_second_sync_barrier(int task_num); 1.44 + void enter_first_sync_barrier(uint worker_id); 1.45 + void enter_second_sync_barrier(uint worker_id); 1.46 1.47 ForceOverflowSettings* force_overflow_conc() { 1.48 return &_force_overflow_conc; 1.49 @@ -626,14 +626,14 @@ 1.50 1.51 double all_task_accum_vtime() { 1.52 double ret = 0.0; 1.53 - for (int i = 0; i < (int)_max_task_num; ++i) 1.54 + for (uint i = 0; i < _max_worker_id; ++i) 1.55 ret += _accum_task_vtime[i]; 1.56 return ret; 1.57 } 1.58 1.59 // Attempts to steal an object from the task queues of other tasks 1.60 - bool try_stealing(int task_num, int* hash_seed, oop& obj) { 1.61 - return _task_queues->steal(task_num, hash_seed, obj); 1.62 + bool try_stealing(uint worker_id, int* hash_seed, oop& obj) { 1.63 + return _task_queues->steal(worker_id, hash_seed, obj); 1.64 } 1.65 1.66 ConcurrentMark(ReservedSpace rs, uint max_regions); 1.67 @@ -823,7 +823,7 @@ 1.68 1.69 // Returns the card bitmap for a given task or worker id. 1.70 BitMap* count_card_bitmap_for(uint worker_id) { 1.71 - assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 1.72 + assert(0 <= worker_id && worker_id < _max_worker_id, "oob"); 1.73 assert(_count_card_bitmaps != NULL, "uninitialized"); 1.74 BitMap* task_card_bm = &_count_card_bitmaps[worker_id]; 1.75 assert(task_card_bm->size() == _card_bm.size(), "size mismatch"); 1.76 @@ -833,7 +833,7 @@ 1.77 // Returns the array containing the marked bytes for each region, 1.78 // for the given worker or task id. 1.79 size_t* count_marked_bytes_array_for(uint worker_id) { 1.80 - assert(0 <= worker_id && worker_id < _max_task_num, "oob"); 1.81 + assert(0 <= worker_id && worker_id < _max_worker_id, "oob"); 1.82 assert(_count_marked_bytes != NULL, "uninitialized"); 1.83 size_t* marked_bytes_array = _count_marked_bytes[worker_id]; 1.84 assert(marked_bytes_array != NULL, "uninitialized"); 1.85 @@ -939,7 +939,7 @@ 1.86 global_stack_transfer_size = 16 1.87 }; 1.88 1.89 - int _task_id; 1.90 + uint _worker_id; 1.91 G1CollectedHeap* _g1h; 1.92 ConcurrentMark* _cm; 1.93 CMBitMap* _nextMarkBitMap; 1.94 @@ -1115,8 +1115,8 @@ 1.95 _elapsed_time_ms = os::elapsedTime() * 1000.0 - _elapsed_time_ms; 1.96 } 1.97 1.98 - // returns the task ID 1.99 - int task_id() { return _task_id; } 1.100 + // returns the worker ID associated with this task. 1.101 + uint worker_id() { return _worker_id; } 1.102 1.103 // From TerminatorTerminator. It determines whether this task should 1.104 // exit the termination protocol after it's entered it. 1.105 @@ -1170,7 +1170,7 @@ 1.106 _finger = new_finger; 1.107 } 1.108 1.109 - CMTask(int task_num, ConcurrentMark *cm, 1.110 + CMTask(uint worker_id, ConcurrentMark *cm, 1.111 size_t* marked_bytes, BitMap* card_bm, 1.112 CMTaskQueue* task_queue, CMTaskQueueSet* task_queues); 1.113