src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp

changeset 6938
a2328cbebb23
parent 6937
b0c374311c4e
child 7118
227a9e5e4b4a
     1.1 --- a/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Jul 21 09:41:04 2014 +0200
     1.2 +++ b/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Jul 21 09:41:06 2014 +0200
     1.3 @@ -39,7 +39,7 @@
     1.4  class outputStream;
     1.5  
     1.6  class G1ParScanThreadState : public StackObj {
     1.7 -protected:
     1.8 + private:
     1.9    G1CollectedHeap* _g1h;
    1.10    RefToScanQueue*  _refs;
    1.11    DirtyCardQueue   _dcq;
    1.12 @@ -98,14 +98,10 @@
    1.13      }
    1.14    }
    1.15  
    1.16 -public:
    1.17 + public:
    1.18    G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
    1.19 -  ~G1ParScanThreadState() {
    1.20 -    retire_alloc_buffers();
    1.21 -    FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
    1.22 -  }
    1.23 +  ~G1ParScanThreadState();
    1.24  
    1.25 -  RefToScanQueue*   refs()            { return _refs;             }
    1.26    ageTable*         age_table()       { return &_age_table;       }
    1.27  
    1.28    G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
    1.29 @@ -116,6 +112,8 @@
    1.30    size_t undo_waste() const                      { return _undo_waste; }
    1.31  
    1.32  #ifdef ASSERT
    1.33 +  bool queue_is_empty() const { return _refs->is_empty(); }
    1.34 +
    1.35    bool verify_ref(narrowOop* ref) const;
    1.36    bool verify_ref(oop* ref) const;
    1.37    bool verify_task(StarTask ref) const;
    1.38 @@ -123,56 +121,24 @@
    1.39  
    1.40    template <class T> void push_on_queue(T* ref) {
    1.41      assert(verify_ref(ref), "sanity");
    1.42 -    refs()->push(ref);
    1.43 +    _refs->push(ref);
    1.44    }
    1.45  
    1.46    template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
    1.47  
    1.48 -  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
    1.49 -    HeapWord* obj = NULL;
    1.50 -    size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
    1.51 -    if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
    1.52 -      G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
    1.53 -      add_to_alloc_buffer_waste(alloc_buf->words_remaining());
    1.54 -      alloc_buf->retire(false /* end_of_gc */, false /* retain */);
    1.55 + private:
    1.56  
    1.57 -      HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
    1.58 -      if (buf == NULL) return NULL; // Let caller handle allocation failure.
    1.59 -      // Otherwise.
    1.60 -      alloc_buf->set_word_size(gclab_word_size);
    1.61 -      alloc_buf->set_buf(buf);
    1.62 +  inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
    1.63 +  inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
    1.64 +  inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
    1.65  
    1.66 -      obj = alloc_buf->allocate(word_sz);
    1.67 -      assert(obj != NULL, "buffer was definitely big enough...");
    1.68 -    } else {
    1.69 -      obj = _g1h->par_allocate_during_gc(purpose, word_sz);
    1.70 -    }
    1.71 -    return obj;
    1.72 -  }
    1.73 -
    1.74 -  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
    1.75 -    HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
    1.76 -    if (obj != NULL) return obj;
    1.77 -    return allocate_slow(purpose, word_sz);
    1.78 -  }
    1.79 -
    1.80 -  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
    1.81 -    if (alloc_buffer(purpose)->contains(obj)) {
    1.82 -      assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
    1.83 -             "should contain whole object");
    1.84 -      alloc_buffer(purpose)->undo_allocation(obj, word_sz);
    1.85 -    } else {
    1.86 -      CollectedHeap::fill_with_object(obj, word_sz);
    1.87 -      add_to_undo_waste(word_sz);
    1.88 -    }
    1.89 -  }
    1.90 + public:
    1.91  
    1.92    void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
    1.93      _evac_failure_cl = evac_failure_cl;
    1.94    }
    1.95 -  OopsInHeapRegionClosure* evac_failure_closure() {
    1.96 -    return _evac_failure_cl;
    1.97 -  }
    1.98 +
    1.99 +  OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
   1.100  
   1.101    int* hash_seed() { return &_hash_seed; }
   1.102    uint queue_num() { return _queue_num; }
   1.103 @@ -201,10 +167,8 @@
   1.104      return os::elapsedTime() - _start;
   1.105    }
   1.106  
   1.107 -  static void
   1.108 -    print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
   1.109 -  void
   1.110 -    print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
   1.111 +  static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
   1.112 +  void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
   1.113  
   1.114    size_t* surviving_young_words() {
   1.115      // We add on to hide entry 0 which accumulates surviving words for
   1.116 @@ -213,15 +177,7 @@
   1.117    }
   1.118  
   1.119   private:
   1.120 -  void retire_alloc_buffers() {
   1.121 -    for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   1.122 -      size_t waste = _alloc_buffers[ap]->words_remaining();
   1.123 -      add_to_alloc_buffer_waste(waste);
   1.124 -      _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
   1.125 -                                                 true /* end_of_gc */,
   1.126 -                                                 false /* retain */);
   1.127 -    }
   1.128 -  }
   1.129 +  void retire_alloc_buffers();
   1.130  
   1.131    #define G1_PARTIAL_ARRAY_MASK 0x2
   1.132  
   1.133 @@ -254,39 +210,18 @@
   1.134    inline void do_oop_partial_array(oop* p);
   1.135  
   1.136    // This method is applied to the fields of the objects that have just been copied.
   1.137 -  template <class T> void do_oop_evac(T* p, HeapRegion* from) {
   1.138 -    assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
   1.139 -           "Reference should not be NULL here as such are never pushed to the task queue.");
   1.140 -    oop obj = oopDesc::load_decode_heap_oop_not_null(p);
   1.141 +  template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
   1.142  
   1.143 -    // Although we never intentionally push references outside of the collection
   1.144 -    // set, due to (benign) races in the claim mechanism during RSet scanning more
   1.145 -    // than one thread might claim the same card. So the same card may be
   1.146 -    // processed multiple times. So redo this check.
   1.147 -    if (_g1h->in_cset_fast_test(obj)) {
   1.148 -      oop forwardee;
   1.149 -      if (obj->is_forwarded()) {
   1.150 -        forwardee = obj->forwardee();
   1.151 -      } else {
   1.152 -        forwardee = copy_to_survivor_space(obj);
   1.153 -      }
   1.154 -      assert(forwardee != NULL, "forwardee should not be NULL");
   1.155 -      oopDesc::encode_store_heap_oop(p, forwardee);
   1.156 -    }
   1.157 +  template <class T> inline void deal_with_reference(T* ref_to_scan);
   1.158  
   1.159 -    assert(obj != NULL, "Must be");
   1.160 -    update_rs(from, p, queue_num());
   1.161 -  }
   1.162 -public:
   1.163 +  inline void dispatch_reference(StarTask ref);
   1.164 + public:
   1.165  
   1.166    oop copy_to_survivor_space(oop const obj);
   1.167  
   1.168 -  template <class T> inline void deal_with_reference(T* ref_to_scan);
   1.169 +  void trim_queue();
   1.170  
   1.171 -  inline void deal_with_reference(StarTask ref);
   1.172 -
   1.173 -public:
   1.174 -  void trim_queue();
   1.175 +  inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
   1.176  };
   1.177  
   1.178  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP

mercurial