1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Fri Apr 11 09:56:35 2008 -0400 1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp Sun Apr 13 17:43:42 2008 -0400 1.3 @@ -42,8 +42,6 @@ 1.4 class PSOldGen; 1.5 class ParCompactionManager; 1.6 1.7 -#define PS_CHUNKED_ARRAY_OOP_MASK 1 1.8 - 1.9 #define PS_PM_STATS 0 1.10 1.11 class PSPromotionManager : public CHeapObj { 1.12 @@ -80,7 +78,7 @@ 1.13 PrefetchQueue _prefetch_queue; 1.14 1.15 OopStarTaskQueue _claimed_stack_depth; 1.16 - GrowableArray<oop*>* _overflow_stack_depth; 1.17 + GrowableArray<StarTask>* _overflow_stack_depth; 1.18 OopTaskQueue _claimed_stack_breadth; 1.19 GrowableArray<oop>* _overflow_stack_breadth; 1.20 1.21 @@ -92,13 +90,15 @@ 1.22 uint _min_array_size_for_chunking; 1.23 1.24 // Accessors 1.25 - static PSOldGen* old_gen() { return _old_gen; } 1.26 - static MutableSpace* young_space() { return _young_space; } 1.27 + static PSOldGen* old_gen() { return _old_gen; } 1.28 + static MutableSpace* young_space() { return _young_space; } 1.29 1.30 inline static PSPromotionManager* manager_array(int index); 1.31 + template <class T> inline void claim_or_forward_internal_depth(T* p); 1.32 + template <class T> inline void claim_or_forward_internal_breadth(T* p); 1.33 1.34 - GrowableArray<oop*>* overflow_stack_depth() { return _overflow_stack_depth; } 1.35 - GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; } 1.36 + GrowableArray<StarTask>* overflow_stack_depth() { return _overflow_stack_depth; } 1.37 + GrowableArray<oop>* overflow_stack_breadth() { return _overflow_stack_breadth; } 1.38 1.39 // On the task queues we push reference locations as well as 1.40 // partially-scanned arrays (in the latter case, we push an oop to 1.41 @@ -116,27 +116,37 @@ 1.42 // (oop). We do all the necessary casting in the mask / unmask 1.43 // methods to avoid sprinkling the rest of the code with more casts. 1.44 1.45 - bool is_oop_masked(oop* p) { 1.46 - return ((intptr_t) p & PS_CHUNKED_ARRAY_OOP_MASK) == PS_CHUNKED_ARRAY_OOP_MASK; 1.47 + // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any 1.48 + // future masks) can't conflict with COMPRESSED_OOP_MASK 1.49 +#define PS_CHUNKED_ARRAY_OOP_MASK 0x2 1.50 + 1.51 + bool is_oop_masked(StarTask p) { 1.52 + // If something is marked chunked it's always treated like wide oop* 1.53 + return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) == 1.54 + PS_CHUNKED_ARRAY_OOP_MASK; 1.55 } 1.56 1.57 oop* mask_chunked_array_oop(oop obj) { 1.58 assert(!is_oop_masked((oop*) obj), "invariant"); 1.59 - oop* ret = (oop*) ((intptr_t) obj | PS_CHUNKED_ARRAY_OOP_MASK); 1.60 + oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK); 1.61 assert(is_oop_masked(ret), "invariant"); 1.62 return ret; 1.63 } 1.64 1.65 - oop unmask_chunked_array_oop(oop* p) { 1.66 + oop unmask_chunked_array_oop(StarTask p) { 1.67 assert(is_oop_masked(p), "invariant"); 1.68 - oop ret = oop((intptr_t) p & ~PS_CHUNKED_ARRAY_OOP_MASK); 1.69 + assert(!p.is_narrow(), "chunked array oops cannot be narrow"); 1.70 + oop *chunk = (oop*)p; // cast p to oop (uses conversion operator) 1.71 + oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK)); 1.72 assert(!is_oop_masked((oop*) ret), "invariant"); 1.73 return ret; 1.74 } 1.75 1.76 + template <class T> void process_array_chunk_work(oop obj, 1.77 + int start, int end); 1.78 void process_array_chunk(oop old); 1.79 1.80 - void push_depth(oop* p) { 1.81 + template <class T> void push_depth(T* p) { 1.82 assert(depth_first(), "pre-condition"); 1.83 1.84 #if PS_PM_STATS 1.85 @@ -175,7 +185,7 @@ 1.86 } 1.87 1.88 protected: 1.89 - static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } 1.90 + static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; } 1.91 static OopTaskQueueSet* stack_array_breadth() { return _stack_array_breadth; } 1.92 1.93 public: 1.94 @@ -227,6 +237,7 @@ 1.95 drain_stacks_breadth(totally_drain); 1.96 } 1.97 } 1.98 + public: 1.99 void drain_stacks_cond_depth() { 1.100 if (claimed_stack_depth()->size() > _target_stack_size) { 1.101 drain_stacks_depth(false); 1.102 @@ -256,15 +267,11 @@ 1.103 return _depth_first; 1.104 } 1.105 1.106 - inline void process_popped_location_depth(oop* p); 1.107 + inline void process_popped_location_depth(StarTask p); 1.108 1.109 inline void flush_prefetch_queue(); 1.110 - 1.111 - inline void claim_or_forward_depth(oop* p); 1.112 - inline void claim_or_forward_internal_depth(oop* p); 1.113 - 1.114 - inline void claim_or_forward_breadth(oop* p); 1.115 - inline void claim_or_forward_internal_breadth(oop* p); 1.116 + template <class T> inline void claim_or_forward_depth(T* p); 1.117 + template <class T> inline void claim_or_forward_breadth(T* p); 1.118 1.119 #if PS_PM_STATS 1.120 void increment_steals(oop* p = NULL) {