src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp

Fri, 10 Oct 2014 15:51:58 +0200

author
tschatzl
date
Fri, 10 Oct 2014 15:51:58 +0200
changeset 7257
e7d0505c8a30
parent 7218
6948da6d7c13
child 7645
f2e3f0e1f97d
permissions
-rw-r--r--

8059758: Footprint regressions with JDK-8038423
Summary: Changes in JDK-8038423 always initialize (zero out) virtual memory used for auxiliary data structures. This causes a footprint regression for G1 in startup benchmarks. This is because they do not touch that memory at all, so the operating system does not actually commit these pages. The fix is to, if the initialization value of the data structures matches the default value of just committed memory (=0), do not do anything.
Reviewed-by: jwilhelm, brutisso

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
    28 #include "gc_implementation/g1/dirtyCardQueue.hpp"
    29 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    30 #include "gc_implementation/g1/g1CollectedHeap.hpp"
    31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    32 #include "gc_implementation/g1/g1OopClosures.hpp"
    33 #include "gc_implementation/g1/g1RemSet.hpp"
    34 #include "gc_implementation/shared/ageTable.hpp"
    35 #include "memory/allocation.hpp"
    36 #include "oops/oop.hpp"
    38 class HeapRegion;
    39 class outputStream;
    41 class G1ParScanThreadState : public StackObj {
    42  private:
    43   G1CollectedHeap* _g1h;
    44   RefToScanQueue*  _refs;
    45   DirtyCardQueue   _dcq;
    46   G1SATBCardTableModRefBS* _ct_bs;
    47   G1RemSet* _g1_rem;
    49   G1ParGCAllocator*   _g1_par_allocator;
    51   ageTable            _age_table;
    53   G1ParScanClosure    _scanner;
    55   size_t           _alloc_buffer_waste;
    56   size_t           _undo_waste;
    58   OopsInHeapRegionClosure*      _evac_failure_cl;
    60   int  _hash_seed;
    61   uint _queue_num;
    63   size_t _term_attempts;
    65   double _start;
    66   double _start_strong_roots;
    67   double _strong_roots_time;
    68   double _start_term;
    69   double _term_time;
    71   // Map from young-age-index (0 == not young, 1 is youngest) to
    72   // surviving words. base is what we get back from the malloc call
    73   size_t* _surviving_young_words_base;
    74   // this points into the array, as we use the first few entries for padding
    75   size_t* _surviving_young_words;
    77 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
    79   void   add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
    80   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
    82   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
    83   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
    85  public:
    86   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
    87   ~G1ParScanThreadState();
    89   ageTable*         age_table()       { return &_age_table;       }
    91 #ifdef ASSERT
    92   bool queue_is_empty() const { return _refs->is_empty(); }
    94   bool verify_ref(narrowOop* ref) const;
    95   bool verify_ref(oop* ref) const;
    96   bool verify_task(StarTask ref) const;
    97 #endif // ASSERT
    99   template <class T> void push_on_queue(T* ref) {
   100     assert(verify_ref(ref), "sanity");
   101     _refs->push(ref);
   102   }
   104   template <class T> void update_rs(HeapRegion* from, T* p, int tid) {
   105     // If the new value of the field points to the same region or
   106     // is the to-space, we don't need to include it in the Rset updates.
   107     if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
   108       size_t card_index = ctbs()->index_for(p);
   109       // If the card hasn't been added to the buffer, do it.
   110       if (ctbs()->mark_card_deferred(card_index)) {
   111         dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
   112       }
   113    }
   114   }
   115  public:
   117   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
   118     _evac_failure_cl = evac_failure_cl;
   119   }
   121   OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
   123   int* hash_seed() { return &_hash_seed; }
   124   uint queue_num() { return _queue_num; }
   126   size_t term_attempts() const  { return _term_attempts; }
   127   void note_term_attempt() { _term_attempts++; }
   129   void start_strong_roots() {
   130     _start_strong_roots = os::elapsedTime();
   131   }
   132   void end_strong_roots() {
   133     _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
   134   }
   135   double strong_roots_time() const { return _strong_roots_time; }
   137   void start_term_time() {
   138     note_term_attempt();
   139     _start_term = os::elapsedTime();
   140   }
   141   void end_term_time() {
   142     _term_time += (os::elapsedTime() - _start_term);
   143   }
   144   double term_time() const { return _term_time; }
   146   double elapsed_time() const {
   147     return os::elapsedTime() - _start;
   148   }
   150   static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
   151   void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
   153   size_t* surviving_young_words() {
   154     // We add on to hide entry 0 which accumulates surviving words for
   155     // age -1 regions (i.e. non-young ones)
   156     return _surviving_young_words;
   157   }
   159  private:
   160   #define G1_PARTIAL_ARRAY_MASK 0x2
   162   inline bool has_partial_array_mask(oop* ref) const {
   163     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
   164   }
   166   // We never encode partial array oops as narrowOop*, so return false immediately.
   167   // This allows the compiler to create optimized code when popping references from
   168   // the work queue.
   169   inline bool has_partial_array_mask(narrowOop* ref) const {
   170     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
   171     return false;
   172   }
   174   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
   175   // We always encode partial arrays as regular oop, to allow the
   176   // specialization for has_partial_array_mask() for narrowOops above.
   177   // This means that unintentional use of this method with narrowOops are caught
   178   // by the compiler.
   179   inline oop* set_partial_array_mask(oop obj) const {
   180     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
   181     return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
   182   }
   184   inline oop clear_partial_array_mask(oop* ref) const {
   185     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
   186   }
   188   inline void do_oop_partial_array(oop* p);
   190   // This method is applied to the fields of the objects that have just been copied.
   191   template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
   193   template <class T> inline void deal_with_reference(T* ref_to_scan);
   195   inline void dispatch_reference(StarTask ref);
   196  public:
   198   oop copy_to_survivor_space(oop const obj);
   200   void trim_queue();
   202   inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
   203 };
   205 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP

mercurial