8229401: Fix JFR code cache test failures

Mon, 19 Aug 2019 10:11:31 +0200

author
neugens
date
Mon, 19 Aug 2019 10:11:31 +0200
changeset 9861
a248d0be1309
parent 9860
6c8e5745df03
child 9862
f162232da105

8229401: Fix JFR code cache test failures
8223689: Add JFR Thread Sampling Support
8223690: Add JFR BiasedLock Event Support
8223691: Add JFR G1 Region Type Change Event Support
8223692: Add JFR G1 Heap Summary Event Support
Summary: Backport JFR from JDK11, additional fixes
Reviewed-by: neugens, apetushkov
Contributed-by: denghui.ddh@alibaba-inc.com

src/share/vm/code/codeBlob.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeBlob.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1HeapRegionTraceType.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegion.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionTracer.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionTracer.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionType.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/heapRegionType.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcHeapSummary.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTraceSend.cpp file | annotate | diff | comparison | revisions
src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp file | annotate | diff | comparison | revisions
src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp file | annotate | diff | comparison | revisions
src/share/vm/jfr/support/jfrThreadExtension.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/whitebox.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/whitebox.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/biasedLocking.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/code/codeBlob.cpp	Wed May 16 15:25:51 2018 +0200
     1.2 +++ b/src/share/vm/code/codeBlob.cpp	Mon Aug 19 10:11:31 2019 +0200
     1.3 @@ -58,7 +58,7 @@
     1.4  #include "c1/c1_Runtime1.hpp"
     1.5  #endif
     1.6  
     1.7 -unsigned int align_code_offset(int offset) {
     1.8 +unsigned int CodeBlob::align_code_offset(int offset) {
     1.9    // align the size to CodeEntryAlignment
    1.10    return
    1.11      ((offset + (int)CodeHeap::header_size() + (CodeEntryAlignment-1)) & ~(CodeEntryAlignment-1))
     2.1 --- a/src/share/vm/code/codeBlob.hpp	Wed May 16 15:25:51 2018 +0200
     2.2 +++ b/src/share/vm/code/codeBlob.hpp	Mon Aug 19 10:11:31 2019 +0200
     2.3 @@ -30,6 +30,15 @@
     2.4  #include "runtime/frame.hpp"
     2.5  #include "runtime/handles.hpp"
     2.6  
     2.7 +// CodeBlob Types
     2.8 +// Used in the CodeCache to assign CodeBlobs to different CodeHeaps
     2.9 +struct CodeBlobType {
    2.10 +  enum {
    2.11 +    All                 = 0,    // All types (No code cache segmentation)
    2.12 +    NumTypes            = 1     // Number of CodeBlobTypes
    2.13 +  };
    2.14 +};
    2.15 +
    2.16  // CodeBlob - superclass for all entries in the CodeCache.
    2.17  //
    2.18  // Suptypes are:
    2.19 @@ -71,6 +80,7 @@
    2.20   public:
    2.21    // Returns the space needed for CodeBlob
    2.22    static unsigned int allocation_size(CodeBuffer* cb, int header_size);
    2.23 +  static unsigned int align_code_offset(int offset);
    2.24  
    2.25    // Creation
    2.26    // a) simple CodeBlob
    2.27 @@ -205,6 +215,7 @@
    2.28    friend class AdapterBlob;
    2.29    friend class VtableBlob;
    2.30    friend class MethodHandlesAdapterBlob;
    2.31 +  friend class WhiteBox;
    2.32  
    2.33   private:
    2.34    // Creation support
     3.1 --- a/src/share/vm/code/codeCache.cpp	Wed May 16 15:25:51 2018 +0200
     3.2 +++ b/src/share/vm/code/codeCache.cpp	Mon Aug 19 10:11:31 2019 +0200
     3.3 @@ -189,6 +189,12 @@
     3.4      if (cb != NULL) break;
     3.5      if (!_heap->expand_by(CodeCacheExpansionSize)) {
     3.6        // Expansion failed
     3.7 +      if (CodeCache_lock->owned_by_self()) {
     3.8 +        MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
     3.9 +        report_codemem_full();
    3.10 +      } else {
    3.11 +        report_codemem_full();
    3.12 +      }
    3.13        return NULL;
    3.14      }
    3.15      if (PrintCodeCacheExtension) {
    3.16 @@ -780,6 +786,7 @@
    3.17    _codemem_full_count++;
    3.18    EventCodeCacheFull event;
    3.19    if (event.should_commit()) {
    3.20 +    event.set_codeBlobType((u1)CodeBlobType::All);
    3.21      event.set_startAddress((u8)low_bound());
    3.22      event.set_commitedTopAddress((u8)high());
    3.23      event.set_reservedTopAddress((u8)high_bound());
     4.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 16 15:25:51 2018 +0200
     4.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Aug 19 10:11:31 2019 +0200
     4.3 @@ -3572,6 +3572,28 @@
     4.4  }
     4.5  #endif // PRODUCT
     4.6  
     4.7 +G1HeapSummary G1CollectedHeap::create_g1_heap_summary() {
     4.8 +
     4.9 +  size_t eden_used_bytes = _young_list->eden_used_bytes();
    4.10 +  size_t survivor_used_bytes = _young_list->survivor_used_bytes();
    4.11 +  size_t heap_used = Heap_lock->owned_by_self() ? used() : used_unlocked();
    4.12 +
    4.13 +  size_t eden_capacity_bytes =
    4.14 +    (g1_policy()->young_list_target_length() * HeapRegion::GrainBytes) - survivor_used_bytes;
    4.15 +
    4.16 +  VirtualSpaceSummary heap_summary = create_heap_space_summary();
    4.17 +  return G1HeapSummary(heap_summary, heap_used, eden_used_bytes,
    4.18 +                       eden_capacity_bytes, survivor_used_bytes, num_regions());
    4.19 +}
    4.20 +
    4.21 +void G1CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
    4.22 +  const G1HeapSummary& heap_summary = create_g1_heap_summary();
    4.23 +  gc_tracer->report_gc_heap_summary(when, heap_summary);
    4.24 +
    4.25 +  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
    4.26 +  gc_tracer->report_metaspace_summary(when, metaspace_summary);
    4.27 +}
    4.28 +
    4.29  G1CollectedHeap* G1CollectedHeap::heap() {
    4.30    assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
    4.31           "not a garbage-first heap");
     5.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 16 15:25:51 2018 +0200
     5.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Aug 19 10:11:31 2019 +0200
     5.3 @@ -375,6 +375,8 @@
     5.4                                                           size_t size,
     5.5                                                           size_t translation_factor);
     5.6  
     5.7 +  void trace_heap(GCWhen::Type when, GCTracer* tracer);
     5.8 +
     5.9    double verify(bool guard, const char* msg);
    5.10    void verify_before_gc();
    5.11    void verify_after_gc();
    5.12 @@ -1621,6 +1623,8 @@
    5.13    bool is_obj_dead_cond(const oop obj,
    5.14                          const VerifyOption vo) const;
    5.15  
    5.16 +  G1HeapSummary create_g1_heap_summary();
    5.17 +
    5.18    // Printing
    5.19  
    5.20    virtual void print_on(outputStream* st) const;
     6.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.2 +++ b/src/share/vm/gc_implementation/g1/g1HeapRegionTraceType.hpp	Mon Aug 19 10:11:31 2019 +0200
     6.3 @@ -0,0 +1,56 @@
     6.4 +/*
     6.5 + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
     6.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.7 + *
     6.8 + * This code is free software; you can redistribute it and/or modify it
     6.9 + * under the terms of the GNU General Public License version 2 only, as
    6.10 + * published by the Free Software Foundation.
    6.11 + *
    6.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    6.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    6.15 + * version 2 for more details (a copy is included in the LICENSE file that
    6.16 + * accompanied this code).
    6.17 + *
    6.18 + * You should have received a copy of the GNU General Public License version
    6.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    6.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 + *
    6.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.23 + * or visit www.oracle.com if you need additional information or have any
    6.24 + * questions.
    6.25 + *
    6.26 + */
    6.27 +
    6.28 +#ifndef SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
    6.29 +#define SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
    6.30 +
    6.31 +#include "memory/allocation.hpp"
    6.32 +#include "utilities/debug.hpp"
    6.33 +
    6.34 +class G1HeapRegionTraceType : AllStatic {
    6.35 + public:
    6.36 +  enum Type {
    6.37 +    Free,
    6.38 +    Eden,
    6.39 +    Survivor,
    6.40 +    StartsHumongous,
    6.41 +    ContinuesHumongous,
    6.42 +    Old,
    6.43 +    G1HeapRegionTypeEndSentinel
    6.44 +  };
    6.45 +
    6.46 +  static const char* to_string(G1HeapRegionTraceType::Type type) {
    6.47 +    switch (type) {
    6.48 +      case Free:               return "Free";
    6.49 +      case Eden:               return "Eden";
    6.50 +      case Survivor:           return "Survivor";
    6.51 +      case StartsHumongous:    return "Starts Humongous";
    6.52 +      case ContinuesHumongous: return "Continues Humongous";
    6.53 +      case Old:                return "Old";
    6.54 +      default: ShouldNotReachHere(); return NULL;
    6.55 +    }
    6.56 +  }
    6.57 +};
    6.58 +
    6.59 +#endif // SHARE_GC_G1_G1HEAPREGIONTRACETYPE_HPP
     7.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Wed May 16 15:25:51 2018 +0200
     7.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Aug 19 10:11:31 2019 +0200
     7.3 @@ -37,6 +37,7 @@
     7.4  #include "memory/space.inline.hpp"
     7.5  #include "oops/oop.inline.hpp"
     7.6  #include "runtime/orderAccess.inline.hpp"
     7.7 +#include "gc_implementation/g1/heapRegionTracer.hpp"
     7.8  
     7.9  PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    7.10  
    7.11 @@ -211,6 +212,31 @@
    7.12    _gc_efficiency = (double) reclaimable_bytes() / region_elapsed_time_ms;
    7.13  }
    7.14  
    7.15 +void HeapRegion::set_free() {
    7.16 +  report_region_type_change(G1HeapRegionTraceType::Free);
    7.17 +  _type.set_free();
    7.18 +}
    7.19 +
    7.20 +void HeapRegion::set_eden() {
    7.21 +  report_region_type_change(G1HeapRegionTraceType::Eden);
    7.22 +  _type.set_eden();
    7.23 +}
    7.24 +
    7.25 +void HeapRegion::set_eden_pre_gc() {
    7.26 +  report_region_type_change(G1HeapRegionTraceType::Eden);
    7.27 +  _type.set_eden_pre_gc();
    7.28 +}
    7.29 +
    7.30 +void HeapRegion::set_survivor() {
    7.31 +  report_region_type_change(G1HeapRegionTraceType::Survivor);
    7.32 +  _type.set_survivor();
    7.33 +}
    7.34 +
    7.35 +void HeapRegion::set_old() {
    7.36 +  report_region_type_change(G1HeapRegionTraceType::Old);
    7.37 +  _type.set_old();
    7.38 +}
    7.39 +
    7.40  void HeapRegion::set_startsHumongous(HeapWord* new_top, HeapWord* new_end) {
    7.41    assert(!isHumongous(), "sanity / pre-condition");
    7.42    assert(end() == _orig_end,
    7.43 @@ -218,6 +244,7 @@
    7.44    assert(top() == bottom(), "should be empty");
    7.45    assert(bottom() <= new_top && new_top <= new_end, "pre-condition");
    7.46  
    7.47 +  report_region_type_change(G1HeapRegionTraceType::StartsHumongous);
    7.48    _type.set_starts_humongous();
    7.49    _humongous_start_region = this;
    7.50  
    7.51 @@ -232,6 +259,7 @@
    7.52    assert(top() == bottom(), "should be empty");
    7.53    assert(first_hr->startsHumongous(), "pre-condition");
    7.54  
    7.55 +  report_region_type_change(G1HeapRegionTraceType::ContinuesHumongous);
    7.56    _type.set_continues_humongous();
    7.57    _humongous_start_region = first_hr;
    7.58  }
    7.59 @@ -303,6 +331,14 @@
    7.60    record_timestamp();
    7.61  }
    7.62  
    7.63 +void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
    7.64 +  HeapRegionTracer::send_region_type_change(_hrm_index,
    7.65 +                                            get_trace_type(),
    7.66 +                                            to,
    7.67 +                                            (uintptr_t)bottom(),
    7.68 +                                            used());
    7.69 +}
    7.70 +
    7.71  CompactibleSpace* HeapRegion::next_compaction_space() const {
    7.72    return G1CollectedHeap::heap()->next_compaction_region(this);
    7.73  }
     8.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Wed May 16 15:25:51 2018 +0200
     8.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Aug 19 10:11:31 2019 +0200
     8.3 @@ -35,6 +35,7 @@
     8.4  #include "memory/space.inline.hpp"
     8.5  #include "memory/watermark.hpp"
     8.6  #include "utilities/macros.hpp"
     8.7 +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
     8.8  
     8.9  // A HeapRegion is the smallest piece of a G1CollectedHeap that
    8.10  // can be collected independently.
    8.11 @@ -211,6 +212,8 @@
    8.12  
    8.13    G1BlockOffsetArrayContigSpace* offsets() { return &_offsets; }
    8.14  
    8.15 +  void report_region_type_change(G1HeapRegionTraceType::Type to);
    8.16 +
    8.17   protected:
    8.18    // The index of this region in the heap region sequence.
    8.19    uint  _hrm_index;
    8.20 @@ -405,6 +408,7 @@
    8.21  
    8.22    const char* get_type_str() const { return _type.get_str(); }
    8.23    const char* get_short_type_str() const { return _type.get_short_str(); }
    8.24 +  G1HeapRegionTraceType::Type get_trace_type() { return _type.get_trace_type(); }
    8.25  
    8.26    bool is_free() const { return _type.is_free(); }
    8.27  
    8.28 @@ -667,13 +671,13 @@
    8.29      }
    8.30    }
    8.31  
    8.32 -  void set_free() { _type.set_free(); }
    8.33 +  void set_free();
    8.34  
    8.35 -  void set_eden()        { _type.set_eden();        }
    8.36 -  void set_eden_pre_gc() { _type.set_eden_pre_gc(); }
    8.37 -  void set_survivor()    { _type.set_survivor();    }
    8.38 +  void set_eden();
    8.39 +  void set_eden_pre_gc();
    8.40 +  void set_survivor();
    8.41  
    8.42 -  void set_old() { _type.set_old(); }
    8.43 +  void set_old();
    8.44  
    8.45    // Determine if an object has been allocated since the last
    8.46    // mark performed by the collector. This returns true iff the object
     9.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionTracer.cpp	Mon Aug 19 10:11:31 2019 +0200
     9.3 @@ -0,0 +1,43 @@
     9.4 +/*
     9.5 + * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
     9.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 + *
     9.8 + * This code is free software; you can redistribute it and/or modify it
     9.9 + * under the terms of the GNU General Public License version 2 only, as
    9.10 + * published by the Free Software Foundation.
    9.11 + *
    9.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 + * version 2 for more details (a copy is included in the LICENSE file that
    9.16 + * accompanied this code).
    9.17 + *
    9.18 + * You should have received a copy of the GNU General Public License version
    9.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    9.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 + *
    9.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    9.23 + * or visit www.oracle.com if you need additional information or have any
    9.24 + * questions.
    9.25 + *
    9.26 + */
    9.27 +
    9.28 +#include "precompiled.hpp"
    9.29 +#include "gc_implementation/g1/heapRegionTracer.hpp"
    9.30 +#include "jfr/jfrEvents.hpp"
    9.31 +
    9.32 +void HeapRegionTracer::send_region_type_change(uint index,
    9.33 +                                               G1HeapRegionTraceType::Type from,
    9.34 +                                               G1HeapRegionTraceType::Type to,
    9.35 +                                               uintptr_t start,
    9.36 +                                               size_t used) {
    9.37 +  EventG1HeapRegionTypeChange e;
    9.38 +  if (e.should_commit()) {
    9.39 +    e.set_index(index);
    9.40 +    e.set_from(from);
    9.41 +    e.set_to(to);
    9.42 +    e.set_start(start);
    9.43 +    e.set_used(used);
    9.44 +    e.commit();
    9.45 +  }
    9.46 +}
    10.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionTracer.hpp	Mon Aug 19 10:11:31 2019 +0200
    10.3 @@ -0,0 +1,40 @@
    10.4 +/*
    10.5 + * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
    10.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.7 + *
    10.8 + * This code is free software; you can redistribute it and/or modify it
    10.9 + * under the terms of the GNU General Public License version 2 only, as
   10.10 + * published by the Free Software Foundation.
   10.11 + *
   10.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   10.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   10.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   10.15 + * version 2 for more details (a copy is included in the LICENSE file that
   10.16 + * accompanied this code).
   10.17 + *
   10.18 + * You should have received a copy of the GNU General Public License version
   10.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   10.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   10.21 + *
   10.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   10.23 + * or visit www.oracle.com if you need additional information or have any
   10.24 + * questions.
   10.25 + *
   10.26 + */
   10.27 +
   10.28 +#ifndef SHARE_GC_G1_HEAPREGIONTRACER_HPP
   10.29 +#define SHARE_GC_G1_HEAPREGIONTRACER_HPP
   10.30 +
   10.31 +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
   10.32 +#include "memory/allocation.hpp"
   10.33 +
   10.34 +class HeapRegionTracer : AllStatic {
   10.35 +  public:
   10.36 +    static void send_region_type_change(uint index,
   10.37 +                                        G1HeapRegionTraceType::Type from,
   10.38 +                                        G1HeapRegionTraceType::Type to,
   10.39 +                                        uintptr_t start,
   10.40 +                                        size_t used);
   10.41 +};
   10.42 +
   10.43 +#endif // SHARE_GC_G1_HEAPREGIONTRACER_HPP
    11.1 --- a/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Wed May 16 15:25:51 2018 +0200
    11.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionType.cpp	Mon Aug 19 10:11:31 2019 +0200
    11.3 @@ -67,3 +67,18 @@
    11.4    // keep some compilers happy
    11.5    return NULL;
    11.6  }
    11.7 +
    11.8 +G1HeapRegionTraceType::Type HeapRegionType::get_trace_type() {
    11.9 +  hrt_assert_is_valid(_tag);
   11.10 +  switch (_tag) {
   11.11 +    case FreeTag:               return G1HeapRegionTraceType::Free;
   11.12 +    case EdenTag:               return G1HeapRegionTraceType::Eden;
   11.13 +    case SurvTag:               return G1HeapRegionTraceType::Survivor;
   11.14 +    case HumStartsTag:          return G1HeapRegionTraceType::StartsHumongous;
   11.15 +    case HumContTag:            return G1HeapRegionTraceType::ContinuesHumongous;
   11.16 +    case OldTag:                return G1HeapRegionTraceType::Old;
   11.17 +    default:
   11.18 +      ShouldNotReachHere();
   11.19 +      return G1HeapRegionTraceType::Free; // keep some compilers happy
   11.20 +  }
   11.21 +}
    12.1 --- a/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Wed May 16 15:25:51 2018 +0200
    12.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionType.hpp	Mon Aug 19 10:11:31 2019 +0200
    12.3 @@ -26,6 +26,7 @@
    12.4  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONTYPE_HPP
    12.5  
    12.6  #include "memory/allocation.hpp"
    12.7 +#include "gc_implementation/g1/g1HeapRegionTraceType.hpp"
    12.8  
    12.9  #define hrt_assert_is_valid(tag) \
   12.10    assert(is_valid((tag)), err_msg("invalid HR type: %u", (uint) (tag)))
   12.11 @@ -127,6 +128,7 @@
   12.12  
   12.13    const char* get_str() const;
   12.14    const char* get_short_str() const;
   12.15 +  G1HeapRegionTraceType::Type get_trace_type();
   12.16  
   12.17    HeapRegionType() : _tag(FreeTag) { hrt_assert_is_valid(_tag); }
   12.18  };
    13.1 --- a/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Wed May 16 15:25:51 2018 +0200
    13.2 +++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Mon Aug 19 10:11:31 2019 +0200
    13.3 @@ -78,11 +78,13 @@
    13.4  
    13.5  class GCHeapSummary;
    13.6  class PSHeapSummary;
    13.7 +class G1HeapSummary;
    13.8  
    13.9  class GCHeapSummaryVisitor {
   13.10   public:
   13.11    virtual void visit(const GCHeapSummary* heap_summary) const = 0;
   13.12    virtual void visit(const PSHeapSummary* heap_summary) const {}
   13.13 +  virtual void visit(const G1HeapSummary* heap_summary) const {}
   13.14  };
   13.15  
   13.16  class GCHeapSummary : public StackObj {
   13.17 @@ -125,6 +127,24 @@
   13.18     }
   13.19  };
   13.20  
   13.21 +class G1HeapSummary : public GCHeapSummary {
   13.22 +  size_t  _edenUsed;
   13.23 +  size_t  _edenCapacity;
   13.24 +  size_t  _survivorUsed;
   13.25 +  uint    _numberOfRegions;
   13.26 + public:
   13.27 +   G1HeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, size_t edenUsed, size_t edenCapacity, size_t survivorUsed, uint numberOfRegions) :
   13.28 +      GCHeapSummary(heap_space, heap_used), _edenUsed(edenUsed), _edenCapacity(edenCapacity), _survivorUsed(survivorUsed), _numberOfRegions(numberOfRegions) { }
   13.29 +   const size_t edenUsed() const { return _edenUsed; }
   13.30 +   const size_t edenCapacity() const { return _edenCapacity; }
   13.31 +   const size_t survivorUsed() const { return _survivorUsed; }
   13.32 +   const uint   numberOfRegions() const { return _numberOfRegions; }
   13.33 +
   13.34 +   virtual void accept(GCHeapSummaryVisitor* visitor) const {
   13.35 +     visitor->visit(this);
   13.36 +   }
   13.37 +};
   13.38 +
   13.39  class MetaspaceSummary : public StackObj {
   13.40    size_t _capacity_until_GC;
   13.41    MetaspaceSizes _meta_space;
    14.1 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Wed May 16 15:25:51 2018 +0200
    14.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Aug 19 10:11:31 2019 +0200
    14.3 @@ -349,20 +349,20 @@
    14.4      }
    14.5    }
    14.6  
    14.7 -//  void visit(const G1HeapSummary* g1_heap_summary) const {
    14.8 -//    visit((GCHeapSummary*)g1_heap_summary);
    14.9 -//
   14.10 -//    EventG1HeapSummary e;
   14.11 -//    if (e.should_commit()) {
   14.12 -//      e.set_gcId(_shared_gc_info.gc_id().id());
   14.13 -//      e.set_when((u1)_when);
   14.14 -//      e.set_edenUsedSize(g1_heap_summary->edenUsed());
   14.15 -//      e.set_edenTotalSize(g1_heap_summary->edenCapacity());
   14.16 -//      e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
   14.17 -//      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
   14.18 -//      e.commit();
   14.19 -//    }
   14.20 -//  }
   14.21 +  void visit(const G1HeapSummary* g1_heap_summary) const {
   14.22 +    visit((GCHeapSummary*)g1_heap_summary);
   14.23 +
   14.24 +    EventG1HeapSummary e;
   14.25 +    if (e.should_commit()) {
   14.26 +      e.set_gcId(_gc_id.id());
   14.27 +      e.set_when((u1)_when);
   14.28 +      e.set_edenUsedSize(g1_heap_summary->edenUsed());
   14.29 +      e.set_edenTotalSize(g1_heap_summary->edenCapacity());
   14.30 +      e.set_survivorUsedSize(g1_heap_summary->survivorUsed());
   14.31 +      e.set_numberOfRegions(g1_heap_summary->numberOfRegions());
   14.32 +      e.commit();
   14.33 +    }
   14.34 +  }
   14.35  
   14.36    void visit(const PSHeapSummary* ps_heap_summary) const {
   14.37      visit((GCHeapSummary*)ps_heap_summary);
    15.1 --- a/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp	Wed May 16 15:25:51 2018 +0200
    15.2 +++ b/src/share/vm/jfr/periodic/sampling/jfrThreadSampler.cpp	Mon Aug 19 10:11:31 2019 +0200
    15.3 @@ -321,7 +321,8 @@
    15.4    volatile bool _disenrolled;
    15.5    static Monitor* _transition_block_lock;
    15.6  
    15.7 -//  JavaThread* next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current);
    15.8 +  int find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target);
    15.9 +  JavaThread* next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current);
   15.10    void task_stacktrace(JfrSampleType type, JavaThread** last_thread);
   15.11    JfrThreadSampler(size_t interval_java, size_t interval_native, u4 max_frames);
   15.12    ~JfrThreadSampler();
   15.13 @@ -344,7 +345,7 @@
   15.14  Monitor* JfrThreadSampler::_transition_block_lock = new Monitor(Mutex::leaf, "Trace block", true);
   15.15  
   15.16  static void clear_transition_block(JavaThread* jt) {
   15.17 -//  jt->clear_trace_flag();
   15.18 +  jt->clear_trace_flag();
   15.19    JfrThreadLocal* const tl = jt->jfr_thread_local();
   15.20    if (tl->is_trace_block()) {
   15.21      MutexLockerEx ml(JfrThreadSampler::transition_block(), Mutex::_no_safepoint_check_flag);
   15.22 @@ -359,7 +360,7 @@
   15.23    }
   15.24  
   15.25    bool ret = false;
   15.26 -//  thread->set_trace_flag();
   15.27 +  thread->set_trace_flag();
   15.28    if (!UseMembar) {
   15.29      os::serialize_thread_states();
   15.30    }
   15.31 @@ -398,37 +399,61 @@
   15.32    JfrThreadLocal* const tl = thread->jfr_thread_local();
   15.33    tl->set_trace_block();
   15.34    {
   15.35 -//    MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag);
   15.36 -//    while (thread->is_trace_suspend()) {
   15.37 -//      transition_block()->wait(true);
   15.38 -//    }
   15.39 -//    tl->clear_trace_block();
   15.40 +    MutexLockerEx ml(transition_block(), Mutex::_no_safepoint_check_flag);
   15.41 +    while (thread->is_trace_suspend()) {
   15.42 +      transition_block()->wait(true);
   15.43 +    }
   15.44 +    tl->clear_trace_block();
   15.45    }
   15.46  }
   15.47  
   15.48 -//JavaThread* JfrThreadSampler::next_thread(ThreadsList* t_list, JavaThread* first_sampled, JavaThread* current) {
   15.49 -//  assert(t_list != NULL, "invariant");
   15.50 -//  assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
   15.51 -//  assert(_cur_index >= -1 && (uint)_cur_index + 1 <= t_list->length(), "invariant");
   15.52 -//  assert((current == NULL && -1 == _cur_index) || (t_list->find_index_of_JavaThread(current) == _cur_index), "invariant");
   15.53 -//  if ((uint)_cur_index + 1 == t_list->length()) {
   15.54 -//    // wrap
   15.55 -//    _cur_index = 0;
   15.56 -//  } else {
   15.57 -//    _cur_index++;
   15.58 -//  }
   15.59 -//  assert(_cur_index >= 0 && (uint)_cur_index < t_list->length(), "invariant");
   15.60 -//  JavaThread* const next = t_list->thread_at(_cur_index);
   15.61 -//  return next != first_sampled ? next : NULL;
   15.62 -//}
   15.63 +int JfrThreadSampler::find_index_of_JavaThread(JavaThread** t_list, uint length, JavaThread *target) {
   15.64 +  assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
   15.65 +  if (target == NULL) {
   15.66 +    return -1;
   15.67 +  }
   15.68 +  for (uint i = 0; i < length; i++) {
   15.69 +    if (target == t_list[i]) {
   15.70 +      return (int)i;
   15.71 +    }
   15.72 +  }
   15.73 +  return -1;
   15.74 +}
   15.75 +
   15.76 +JavaThread* JfrThreadSampler::next_thread(JavaThread** t_list, uint length, JavaThread* first_sampled, JavaThread* current) {
   15.77 +  assert(Threads_lock->owned_by_self(), "Holding the thread table lock.");
   15.78 +  if (current == NULL) {
   15.79 +    _cur_index = 0;
   15.80 +    return t_list[_cur_index];
   15.81 +  }
   15.82 +
   15.83 +  if (_cur_index == -1 || t_list[_cur_index] != current) {
   15.84 +    // 'current' is not at '_cur_index' so find it:
   15.85 +    _cur_index = find_index_of_JavaThread(t_list, length, current);
   15.86 +    assert(_cur_index != -1, "current JavaThread should be findable.");
   15.87 +  }
   15.88 +  _cur_index++;
   15.89 +
   15.90 +  JavaThread* next = NULL;
   15.91 +  // wrap
   15.92 +  if ((uint)_cur_index >= length) {
   15.93 +    _cur_index = 0;
   15.94 +  }
   15.95 +  next = t_list[_cur_index];
   15.96 +
   15.97 +  // sample wrap
   15.98 +  if (next == first_sampled) {
   15.99 +    return NULL;
  15.100 +  }
  15.101 +  return next;
  15.102 +}
  15.103  
  15.104  void JfrThreadSampler::start_thread() {
  15.105 -  // XXX TODO implement sampling
  15.106 -//  if (os::create_thread(this, os::os_thread)) {
  15.107 -//    os::start_thread(this);
  15.108 -//  } else {
  15.109 -//    if (true) tty->print_cr("Failed to create thread for thread sampling");
  15.110 -//  }
  15.111 +  if (os::create_thread(this, os::os_thread)) {
  15.112 +    os::start_thread(this);
  15.113 +  } else {
  15.114 +    tty->print_cr("Failed to create thread for thread sampling");
  15.115 +  }
  15.116  }
  15.117  
  15.118  void JfrThreadSampler::enroll() {
  15.119 @@ -510,28 +535,33 @@
  15.120      elapsedTimer sample_time;
  15.121      sample_time.start();
  15.122      {
  15.123 -//      MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag);
  15.124 -//      ThreadsListHandle tlh;
  15.125 -//      // Resolve a sample session relative start position index into the thread list array.
  15.126 -//      // In cases where the last sampled thread is NULL or not-NULL but stale, find_index() returns -1.
  15.127 -//      _cur_index = tlh.list()->find_index_of_JavaThread(*last_thread);
  15.128 -//      JavaThread* current = _cur_index != -1 ? *last_thread : NULL;
  15.129 -//
  15.130 -//      while (num_sample_attempts < sample_limit) {
  15.131 -//        current = next_thread(tlh.list(), start, current);
  15.132 -//        if (current == NULL) {
  15.133 -//          break;
  15.134 -//        }
  15.135 -//        if (start == NULL) {
  15.136 -//          start = current;  // remember the thread where we started to attempt sampling
  15.137 -//        }
  15.138 -//        if (current->is_Compiler_thread()) {
  15.139 -//          continue;
  15.140 -//        }
  15.141 -//        sample_task.do_sample_thread(current, _frames, _max_frames, type);
  15.142 -//        num_sample_attempts++;
  15.143 -//      }
  15.144 -//      *last_thread = current;  // remember the thread we last attempted to sample
  15.145 +      MonitorLockerEx tlock(Threads_lock, Mutex::_allow_vm_block_flag);
  15.146 +      int max_threads = Threads::number_of_threads();
  15.147 +      assert(max_threads >= 0, "Threads list is empty");
  15.148 +      uint index = 0;
  15.149 +      JavaThread** threads_list = NEW_C_HEAP_ARRAY(JavaThread *, max_threads, mtInternal);
  15.150 +      for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
  15.151 +        threads_list[index++] = tp;
  15.152 +      }
  15.153 +      JavaThread* current = Threads::includes(*last_thread) ? *last_thread : NULL;
  15.154 +      JavaThread* start = NULL;
  15.155 +
  15.156 +      while (num_sample_attempts < sample_limit) {
  15.157 +        current = next_thread(threads_list, index, start, current);
  15.158 +        if (current == NULL) {
  15.159 +          break;
  15.160 +        }
  15.161 +        if (start == NULL) {
  15.162 +          start = current;  // remember the thread where we started to attempt sampling
  15.163 +        }
  15.164 +        if (current->is_Compiler_thread()) {
  15.165 +          continue;
  15.166 +        }
  15.167 +        sample_task.do_sample_thread(current, _frames, _max_frames, type);
  15.168 +        num_sample_attempts++;
  15.169 +      }
  15.170 +      *last_thread = current;  // remember the thread we last attempted to sample
  15.171 +      FREE_C_HEAP_ARRAY(JavaThread *, threads_list, mtInternal);
  15.172      }
  15.173      sample_time.stop();
  15.174      if (LogJFR && Verbose) tty->print_cr("JFR thread sampling done in %3.7f secs with %d java %d native samples",
    16.1 --- a/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp	Wed May 16 15:25:51 2018 +0200
    16.2 +++ b/src/share/vm/jfr/recorder/checkpoint/types/jfrType.cpp	Mon Aug 19 10:11:31 2019 +0200
    16.3 @@ -188,13 +188,12 @@
    16.4  }
    16.5  
    16.6  void G1HeapRegionTypeConstant::serialize(JfrCheckpointWriter& writer) {
    16.7 -  // XXX TODO?
    16.8 -//  static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel;
    16.9 -//  writer.write_count(nof_entries);
   16.10 -//  for (u4 i = 0; i < nof_entries; ++i) {
   16.11 -//    writer.write_key(i);
   16.12 -//    writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i));
   16.13 -//  }
   16.14 +  static const u4 nof_entries = G1HeapRegionTraceType::G1HeapRegionTypeEndSentinel;
   16.15 +  writer.write_count(nof_entries);
   16.16 +  for (u4 i = 0; i < nof_entries; ++i) {
   16.17 +    writer.write_key(i);
   16.18 +    writer.write(G1HeapRegionTraceType::to_string((G1HeapRegionTraceType::Type)i));
   16.19 +  }
   16.20  }
   16.21  
   16.22  void GCThresholdUpdaterConstant::serialize(JfrCheckpointWriter& writer) {
   16.23 @@ -279,13 +278,10 @@
   16.24  }
   16.25  
   16.26  void CodeBlobTypeConstant::serialize(JfrCheckpointWriter& writer) {
   16.27 -  // XXX no code blob types. need to send any stub value?
   16.28 -//  static const u4 nof_entries = CodeBlobType::NumTypes;
   16.29 -//  writer.write_count(nof_entries);
   16.30 -//  for (u4 i = 0; i < nof_entries; ++i) {
   16.31 -//    writer.write_key(i);
   16.32 -//    writer.write(CodeCache::get_code_heap_name(i));
   16.33 -//  }
   16.34 +  static const u4 nof_entries = CodeBlobType::NumTypes;
   16.35 +  writer.write_count(nof_entries);
   16.36 +  writer.write_key((u4)CodeBlobType::All);
   16.37 +  writer.write("CodeCache");
   16.38  };
   16.39  
   16.40  void VMOperationTypeConstant::serialize(JfrCheckpointWriter& writer) {
    17.1 --- a/src/share/vm/jfr/support/jfrThreadExtension.hpp	Wed May 16 15:25:51 2018 +0200
    17.2 +++ b/src/share/vm/jfr/support/jfrThreadExtension.hpp	Mon Aug 19 10:11:31 2019 +0200
    17.3 @@ -46,7 +46,11 @@
    17.4  #define THREAD_LOCAL_WRITER_OFFSET_JFR \
    17.5    JfrThreadLocal::java_event_writer_offset() + THREAD_LOCAL_OFFSET_JFR
    17.6  
    17.7 -// XXX consider implementing thread suspend tracing
    17.8 -#define SUSPEND_THREAD_CONDITIONAL(thread) if (false/*(thread)->is_trace_suspend()*/) JfrThreadSampling::on_javathread_suspend(thread)
    17.9 +#define DEFINE_TRACE_SUSPEND_FLAG_METHODS \
   17.10 +  void set_trace_flag() { set_suspend_flag(_trace_flag); } \
   17.11 +  void clear_trace_flag() { clear_suspend_flag(_trace_flag); } \
   17.12 +  bool is_trace_suspend() { return (_suspend_flags & _trace_flag) != 0; }
   17.13 +
   17.14 +#define SUSPEND_THREAD_CONDITIONAL(thread) if ((thread)->is_trace_suspend()) JfrThreadSampling::on_javathread_suspend(thread)
   17.15  
   17.16  #endif // SHARE_VM_JFR_SUPPORT_JFRTHREADEXTENSION_HPP
    18.1 --- a/src/share/vm/prims/whitebox.cpp	Wed May 16 15:25:51 2018 +0200
    18.2 +++ b/src/share/vm/prims/whitebox.cpp	Mon Aug 19 10:11:31 2019 +0200
    18.3 @@ -40,6 +40,7 @@
    18.4  #include "runtime/interfaceSupport.hpp"
    18.5  #include "runtime/os.hpp"
    18.6  #include "utilities/array.hpp"
    18.7 +#include "utilities/align.hpp"
    18.8  #include "utilities/debug.hpp"
    18.9  #include "utilities/macros.hpp"
   18.10  #include "utilities/exceptions.hpp"
   18.11 @@ -653,13 +654,13 @@
   18.12  WB_END
   18.13  
   18.14  template <typename T>
   18.15 -static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*)) {
   18.16 +static bool GetVMFlag(JavaThread* thread, JNIEnv* env, jstring name, T* value, bool (*TAt)(const char*, T*, bool, bool)) {
   18.17    if (name == NULL) {
   18.18      return false;
   18.19    }
   18.20    ThreadToNativeFromVM ttnfv(thread);   // can't be in VM when we call JNI
   18.21    const char* flag_name = env->GetStringUTFChars(name, NULL);
   18.22 -  bool result = (*TAt)(flag_name, value);
   18.23 +  bool result = (*TAt)(flag_name, value, true, true);
   18.24    env->ReleaseStringUTFChars(name, flag_name);
   18.25    return result;
   18.26  }
   18.27 @@ -851,6 +852,47 @@
   18.28    return features_string;
   18.29  WB_END
   18.30  
   18.31 +int WhiteBox::get_blob_type(const CodeBlob* code) {
   18.32 +  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled");
   18.33 +  return CodeBlobType::All;;
   18.34 +}
   18.35 +
   18.36 +struct CodeBlobStub {
   18.37 +  CodeBlobStub(const CodeBlob* blob) :
   18.38 +      name(os::strdup(blob->name())),
   18.39 +      size(blob->size()),
   18.40 +      blob_type(WhiteBox::get_blob_type(blob)),
   18.41 +      address((jlong) blob) { }
   18.42 +  ~CodeBlobStub() { os::free((void*) name); }
   18.43 +  const char* const name;
   18.44 +  const jint        size;
   18.45 +  const jint        blob_type;
   18.46 +  const jlong       address;
   18.47 +};
   18.48 +
   18.49 +static jobjectArray codeBlob2objectArray(JavaThread* thread, JNIEnv* env, CodeBlobStub* cb) {
   18.50 +  jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string());
   18.51 +  CHECK_JNI_EXCEPTION_(env, NULL);
   18.52 +  jobjectArray result = env->NewObjectArray(4, clazz, NULL);
   18.53 +
   18.54 +  jstring name = env->NewStringUTF(cb->name);
   18.55 +  CHECK_JNI_EXCEPTION_(env, NULL);
   18.56 +  env->SetObjectArrayElement(result, 0, name);
   18.57 +
   18.58 +  jobject obj = integerBox(thread, env, cb->size);
   18.59 +  CHECK_JNI_EXCEPTION_(env, NULL);
   18.60 +  env->SetObjectArrayElement(result, 1, obj);
   18.61 +
   18.62 +  obj = integerBox(thread, env, cb->blob_type);
   18.63 +  CHECK_JNI_EXCEPTION_(env, NULL);
   18.64 +  env->SetObjectArrayElement(result, 2, obj);
   18.65 +
   18.66 +  obj = longBox(thread, env, cb->address);
   18.67 +  CHECK_JNI_EXCEPTION_(env, NULL);
   18.68 +  env->SetObjectArrayElement(result, 3, obj);
   18.69 +
   18.70 +  return result;
   18.71 +}
   18.72  
   18.73  WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jboolean is_osr))
   18.74    ResourceMark rm(THREAD);
   18.75 @@ -888,6 +930,47 @@
   18.76    return result;
   18.77  WB_END
   18.78  
   18.79 +CodeBlob* WhiteBox::allocate_code_blob(int size, int blob_type) {
   18.80 +  guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled");
   18.81 +  BufferBlob* blob;
   18.82 +  int full_size = CodeBlob::align_code_offset(sizeof(BufferBlob));
   18.83 +  if (full_size < size) {
   18.84 +    full_size += align_up(size - full_size, oopSize);
   18.85 +  }
   18.86 +  {
   18.87 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   18.88 +    blob = (BufferBlob*) CodeCache::allocate(full_size);
   18.89 +    ::new (blob) BufferBlob("WB::DummyBlob", full_size);
   18.90 +  }
   18.91 +  // Track memory usage statistic after releasing CodeCache_lock
   18.92 +  MemoryService::track_code_cache_memory_usage();
   18.93 +  return blob;
   18.94 +}
   18.95 +
   18.96 +WB_ENTRY(jlong, WB_AllocateCodeBlob(JNIEnv* env, jobject o, jint size, jint blob_type))
   18.97 +  if (size < 0) {
   18.98 +    THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
   18.99 +      err_msg("WB_AllocateCodeBlob: size is negative: " INT32_FORMAT, size));
  18.100 +  }
  18.101 +  return (jlong) WhiteBox::allocate_code_blob(size, blob_type);
  18.102 +WB_END
  18.103 +
  18.104 +WB_ENTRY(void, WB_FreeCodeBlob(JNIEnv* env, jobject o, jlong addr))
  18.105 +  if (addr == 0) {
  18.106 +    return;
  18.107 +  }
  18.108 +  BufferBlob::free((BufferBlob*) addr);
  18.109 +WB_END
  18.110 +
  18.111 +WB_ENTRY(jobjectArray, WB_GetCodeBlob(JNIEnv* env, jobject o, jlong addr))
  18.112 +  if (addr == 0) {
  18.113 +    THROW_MSG_NULL(vmSymbols::java_lang_NullPointerException(),
  18.114 +      "WB_GetCodeBlob: addr is null");
  18.115 +  }
  18.116 +  ThreadToNativeFromVM ttn(thread);
  18.117 +  CodeBlobStub stub((CodeBlob*) addr);
  18.118 +  return codeBlob2objectArray(thread, env, &stub);
  18.119 +WB_END
  18.120  
  18.121  int WhiteBox::array_bytes_to_length(size_t bytes) {
  18.122    return Array<u1>::bytes_to_length(bytes);
  18.123 @@ -1167,6 +1250,9 @@
  18.124    {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
  18.125    {CC"youngGC",  CC"()V",                             (void*)&WB_YoungGC },
  18.126    {CC"readReservedMemory", CC"()V",                   (void*)&WB_ReadReservedMemory },
  18.127 +  {CC"allocateCodeBlob",   CC"(II)J",                 (void*)&WB_AllocateCodeBlob   },
  18.128 +  {CC"freeCodeBlob",       CC"(J)V",                  (void*)&WB_FreeCodeBlob       },
  18.129 +  {CC"getCodeBlob",        CC"(J)[Ljava/lang/Object;",(void*)&WB_GetCodeBlob        },
  18.130    {CC"allocateMetaspace",
  18.131       CC"(Ljava/lang/ClassLoader;J)J",                 (void*)&WB_AllocateMetaspace },
  18.132    {CC"freeMetaspace",
    19.1 --- a/src/share/vm/prims/whitebox.hpp	Wed May 16 15:25:51 2018 +0200
    19.2 +++ b/src/share/vm/prims/whitebox.hpp	Mon Aug 19 10:11:31 2019 +0200
    19.3 @@ -64,7 +64,8 @@
    19.4      Symbol* signature_symbol);
    19.5    static const char* lookup_jstring(const char* field_name, oop object);
    19.6    static bool lookup_bool(const char* field_name, oop object);
    19.7 -
    19.8 +  static int get_blob_type(const CodeBlob* code);
    19.9 +  static CodeBlob* allocate_code_blob(int size, int blob_type);
   19.10    static int array_bytes_to_length(size_t bytes);
   19.11    static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
   19.12      JNINativeMethod* method_array, int method_count);
    20.1 --- a/src/share/vm/runtime/biasedLocking.cpp	Wed May 16 15:25:51 2018 +0200
    20.2 +++ b/src/share/vm/runtime/biasedLocking.cpp	Mon Aug 19 10:11:31 2019 +0200
    20.3 @@ -31,6 +31,8 @@
    20.4  #include "runtime/vframe.hpp"
    20.5  #include "runtime/vmThread.hpp"
    20.6  #include "runtime/vm_operations.hpp"
    20.7 +#include "jfr/support/jfrThreadId.hpp"
    20.8 +#include "jfr/jfrEvents.hpp"
    20.9  
   20.10  static bool _biased_locking_enabled = false;
   20.11  BiasedLockingCounters BiasedLocking::_counters;
   20.12 @@ -142,8 +144,9 @@
   20.13    return info;
   20.14  }
   20.15  
   20.16 -
   20.17 -static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
   20.18 +// After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
   20.19 +// AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
   20.20 +static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
   20.21    markOop mark = obj->mark();
   20.22    if (!mark->has_bias_pattern()) {
   20.23      if (TraceBiasedLocking) {
   20.24 @@ -253,6 +256,11 @@
   20.25      }
   20.26    }
   20.27  
   20.28 +  // If requested, return information on which thread held the bias
   20.29 +  if (biased_locker != NULL) {
   20.30 +    *biased_locker = biased_thread;
   20.31 +  }
   20.32 +
   20.33    return BiasedLocking::BIAS_REVOKED;
   20.34  }
   20.35  
   20.36 @@ -373,7 +381,7 @@
   20.37  
   20.38      // At this point we're done. All we have to do is potentially
   20.39      // adjust the header of the given object to revoke its bias.
   20.40 -    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
   20.41 +    revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
   20.42    } else {
   20.43      if (TraceBiasedLocking) {
   20.44        ResourceMark rm;
   20.45 @@ -395,14 +403,14 @@
   20.46          oop owner = mon_info->owner();
   20.47          markOop mark = owner->mark();
   20.48          if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
   20.49 -          revoke_bias(owner, false, true, requesting_thread);
   20.50 +          revoke_bias(owner, false, true, requesting_thread, NULL);
   20.51          }
   20.52        }
   20.53      }
   20.54  
   20.55      // Must force the bias of the passed object to be forcibly revoked
   20.56      // as well to ensure guarantees to callers
   20.57 -    revoke_bias(o, false, true, requesting_thread);
   20.58 +    revoke_bias(o, false, true, requesting_thread, NULL);
   20.59    }
   20.60  
   20.61    if (TraceBiasedLocking) {
   20.62 @@ -445,19 +453,22 @@
   20.63    GrowableArray<Handle>* _objs;
   20.64    JavaThread* _requesting_thread;
   20.65    BiasedLocking::Condition _status_code;
   20.66 +  traceid _biased_locker_id;
   20.67  
   20.68  public:
   20.69    VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
   20.70      : _obj(obj)
   20.71      , _objs(NULL)
   20.72      , _requesting_thread(requesting_thread)
   20.73 -    , _status_code(BiasedLocking::NOT_BIASED) {}
   20.74 +    , _status_code(BiasedLocking::NOT_BIASED)
   20.75 +    , _biased_locker_id(0) {}
   20.76  
   20.77    VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
   20.78      : _obj(NULL)
   20.79      , _objs(objs)
   20.80      , _requesting_thread(requesting_thread)
   20.81 -    , _status_code(BiasedLocking::NOT_BIASED) {}
   20.82 +    , _status_code(BiasedLocking::NOT_BIASED)
   20.83 +    , _biased_locker_id(0) {}
   20.84  
   20.85    virtual VMOp_Type type() const { return VMOp_RevokeBias; }
   20.86  
   20.87 @@ -486,7 +497,11 @@
   20.88        if (TraceBiasedLocking) {
   20.89          tty->print_cr("Revoking bias with potentially per-thread safepoint:");
   20.90        }
   20.91 -      _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread);
   20.92 +      JavaThread* biased_locker = NULL;
   20.93 +      _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
   20.94 +      if (biased_locker != NULL) {
   20.95 +        _biased_locker_id = JFR_THREAD_ID(biased_locker);
   20.96 +      }
   20.97        clean_up_cached_monitor_info();
   20.98        return;
   20.99      } else {
  20.100 @@ -500,6 +515,10 @@
  20.101    BiasedLocking::Condition status_code() const {
  20.102      return _status_code;
  20.103    }
  20.104 +
  20.105 +  traceid biased_locker() const {
  20.106 +    return _biased_locker_id;
  20.107 +  }
  20.108  };
  20.109  
  20.110  
  20.111 @@ -609,23 +628,44 @@
  20.112        if (TraceBiasedLocking) {
  20.113          tty->print_cr("Revoking bias by walking my own stack:");
  20.114        }
  20.115 -      BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD);
  20.116 +      EventBiasedLockSelfRevocation event;
  20.117 +      BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
  20.118        ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
  20.119        assert(cond == BIAS_REVOKED, "why not?");
  20.120 +      if (event.should_commit()) {
  20.121 +        event.set_lockClass(k);
  20.122 +        event.commit();
  20.123 +      }
  20.124        return cond;
  20.125      } else {
  20.126 +      EventBiasedLockRevocation event;
  20.127        VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
  20.128        VMThread::execute(&revoke);
  20.129 +      if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) {
  20.130 +        event.set_lockClass(k);
  20.131 +        // Subtract 1 to match the id of events committed inside the safepoint
  20.132 +        event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
  20.133 +        event.set_previousOwner(revoke.biased_locker());
  20.134 +        event.commit();
  20.135 +      }
  20.136        return revoke.status_code();
  20.137      }
  20.138    }
  20.139  
  20.140    assert((heuristics == HR_BULK_REVOKE) ||
  20.141           (heuristics == HR_BULK_REBIAS), "?");
  20.142 +  EventBiasedLockClassRevocation event;
  20.143    VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
  20.144                                  (heuristics == HR_BULK_REBIAS),
  20.145                                  attempt_rebias);
  20.146    VMThread::execute(&bulk_revoke);
  20.147 +  if (event.should_commit()) {
  20.148 +    event.set_revokedClass(obj->klass());
  20.149 +    event.set_disableBiasing((heuristics != HR_BULK_REBIAS));
  20.150 +    // Subtract 1 to match the id of events committed inside the safepoint
  20.151 +    event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1);
  20.152 +    event.commit();
  20.153 +  }
  20.154    return bulk_revoke.status_code();
  20.155  }
  20.156  
  20.157 @@ -645,7 +685,7 @@
  20.158    oop obj = h_obj();
  20.159    HeuristicsResult heuristics = update_heuristics(obj, false);
  20.160    if (heuristics == HR_SINGLE_REVOKE) {
  20.161 -    revoke_bias(obj, false, false, NULL);
  20.162 +    revoke_bias(obj, false, false, NULL, NULL);
  20.163    } else if ((heuristics == HR_BULK_REBIAS) ||
  20.164               (heuristics == HR_BULK_REVOKE)) {
  20.165      bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
  20.166 @@ -661,7 +701,7 @@
  20.167      oop obj = (objs->at(i))();
  20.168      HeuristicsResult heuristics = update_heuristics(obj, false);
  20.169      if (heuristics == HR_SINGLE_REVOKE) {
  20.170 -      revoke_bias(obj, false, false, NULL);
  20.171 +      revoke_bias(obj, false, false, NULL, NULL);
  20.172      } else if ((heuristics == HR_BULK_REBIAS) ||
  20.173                 (heuristics == HR_BULK_REVOKE)) {
  20.174        bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
    21.1 --- a/src/share/vm/runtime/globals.cpp	Wed May 16 15:25:51 2018 +0200
    21.2 +++ b/src/share/vm/runtime/globals.cpp	Mon Aug 19 10:11:31 2019 +0200
    21.3 @@ -616,8 +616,8 @@
    21.4    e.commit();
    21.5  }
    21.6  
    21.7 -bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value) {
    21.8 -  Flag* result = Flag::find_flag(name, len);
    21.9 +bool CommandLineFlags::boolAt(const char* name, size_t len, bool* value, bool allow_locked, bool return_flag) {
   21.10 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.11    if (result == NULL) return false;
   21.12    if (!result->is_bool()) return false;
   21.13    *value = result->get_bool();
   21.14 @@ -644,8 +644,8 @@
   21.15    faddr->set_origin(origin);
   21.16  }
   21.17  
   21.18 -bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value) {
   21.19 -  Flag* result = Flag::find_flag(name, len);
   21.20 +bool CommandLineFlags::intxAt(const char* name, size_t len, intx* value, bool allow_locked, bool return_flag) {
   21.21 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.22    if (result == NULL) return false;
   21.23    if (!result->is_intx()) return false;
   21.24    *value = result->get_intx();
   21.25 @@ -672,8 +672,8 @@
   21.26    faddr->set_origin(origin);
   21.27  }
   21.28  
   21.29 -bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value) {
   21.30 -  Flag* result = Flag::find_flag(name, len);
   21.31 +bool CommandLineFlags::uintxAt(const char* name, size_t len, uintx* value, bool allow_locked, bool return_flag) {
   21.32 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.33    if (result == NULL) return false;
   21.34    if (!result->is_uintx()) return false;
   21.35    *value = result->get_uintx();
   21.36 @@ -700,8 +700,8 @@
   21.37    faddr->set_origin(origin);
   21.38  }
   21.39  
   21.40 -bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value) {
   21.41 -  Flag* result = Flag::find_flag(name, len);
   21.42 +bool CommandLineFlags::uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked, bool return_flag) {
   21.43 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.44    if (result == NULL) return false;
   21.45    if (!result->is_uint64_t()) return false;
   21.46    *value = result->get_uint64_t();
   21.47 @@ -728,8 +728,8 @@
   21.48    faddr->set_origin(origin);
   21.49  }
   21.50  
   21.51 -bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value) {
   21.52 -  Flag* result = Flag::find_flag(name, len);
   21.53 +bool CommandLineFlags::doubleAt(const char* name, size_t len, double* value, bool allow_locked, bool return_flag) {
   21.54 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.55    if (result == NULL) return false;
   21.56    if (!result->is_double()) return false;
   21.57    *value = result->get_double();
   21.58 @@ -756,8 +756,8 @@
   21.59    faddr->set_origin(origin);
   21.60  }
   21.61  
   21.62 -bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value) {
   21.63 -  Flag* result = Flag::find_flag(name, len);
   21.64 +bool CommandLineFlags::ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked, bool return_flag) {
   21.65 +  Flag* result = Flag::find_flag(name, len, allow_locked, return_flag);
   21.66    if (result == NULL) return false;
   21.67    if (!result->is_ccstr()) return false;
   21.68    *value = result->get_ccstr();
    22.1 --- a/src/share/vm/runtime/globals.hpp	Wed May 16 15:25:51 2018 +0200
    22.2 +++ b/src/share/vm/runtime/globals.hpp	Mon Aug 19 10:11:31 2019 +0200
    22.3 @@ -369,33 +369,33 @@
    22.4  
    22.5  class CommandLineFlags {
    22.6   public:
    22.7 -  static bool boolAt(const char* name, size_t len, bool* value);
    22.8 -  static bool boolAt(const char* name, bool* value)      { return boolAt(name, strlen(name), value); }
    22.9 +  static bool boolAt(const char* name, size_t len, bool* value, bool allow_locked = false, bool return_flag = false);
   22.10 +  static bool boolAt(const char* name, bool* value, bool allow_locked = false, bool return_flag = false)   { return boolAt(name, strlen(name), value, allow_locked, return_flag); }
   22.11    static bool boolAtPut(const char* name, size_t len, bool* value, Flag::Flags origin);
   22.12    static bool boolAtPut(const char* name, bool* value, Flag::Flags origin)   { return boolAtPut(name, strlen(name), value, origin); }
   22.13  
   22.14 -  static bool intxAt(const char* name, size_t len, intx* value);
   22.15 -  static bool intxAt(const char* name, intx* value)      { return intxAt(name, strlen(name), value); }
   22.16 +  static bool intxAt(const char* name, size_t len, intx* value, bool allow_locked = false, bool return_flag = false);
   22.17 +  static bool intxAt(const char* name, intx* value, bool allow_locked = false, bool return_flag = false)      { return intxAt(name, strlen(name), value, allow_locked, return_flag); }
   22.18    static bool intxAtPut(const char* name, size_t len, intx* value, Flag::Flags origin);
   22.19    static bool intxAtPut(const char* name, intx* value, Flag::Flags origin)   { return intxAtPut(name, strlen(name), value, origin); }
   22.20  
   22.21 -  static bool uintxAt(const char* name, size_t len, uintx* value);
   22.22 -  static bool uintxAt(const char* name, uintx* value)    { return uintxAt(name, strlen(name), value); }
   22.23 +  static bool uintxAt(const char* name, size_t len, uintx* value, bool allow_locked = false, bool return_flag = false);
   22.24 +  static bool uintxAt(const char* name, uintx* value, bool allow_locked = false, bool return_flag = false)    { return uintxAt(name, strlen(name), value, allow_locked, return_flag); }
   22.25    static bool uintxAtPut(const char* name, size_t len, uintx* value, Flag::Flags origin);
   22.26    static bool uintxAtPut(const char* name, uintx* value, Flag::Flags origin) { return uintxAtPut(name, strlen(name), value, origin); }
   22.27  
   22.28 -  static bool uint64_tAt(const char* name, size_t len, uint64_t* value);
   22.29 -  static bool uint64_tAt(const char* name, uint64_t* value) { return uint64_tAt(name, strlen(name), value); }
   22.30 +  static bool uint64_tAt(const char* name, size_t len, uint64_t* value, bool allow_locked = false, bool return_flag = false);
   22.31 +  static bool uint64_tAt(const char* name, uint64_t* value, bool allow_locked = false, bool return_flag = false) { return uint64_tAt(name, strlen(name), value, allow_locked, return_flag); }
   22.32    static bool uint64_tAtPut(const char* name, size_t len, uint64_t* value, Flag::Flags origin);
   22.33    static bool uint64_tAtPut(const char* name, uint64_t* value, Flag::Flags origin) { return uint64_tAtPut(name, strlen(name), value, origin); }
   22.34  
   22.35 -  static bool doubleAt(const char* name, size_t len, double* value);
   22.36 -  static bool doubleAt(const char* name, double* value)    { return doubleAt(name, strlen(name), value); }
   22.37 +  static bool doubleAt(const char* name, size_t len, double* value, bool allow_locked = false, bool return_flag = false);
   22.38 +  static bool doubleAt(const char* name, double* value, bool allow_locked = false, bool return_flag = false)    { return doubleAt(name, strlen(name), value, allow_locked, return_flag); }
   22.39    static bool doubleAtPut(const char* name, size_t len, double* value, Flag::Flags origin);
   22.40    static bool doubleAtPut(const char* name, double* value, Flag::Flags origin) { return doubleAtPut(name, strlen(name), value, origin); }
   22.41  
   22.42 -  static bool ccstrAt(const char* name, size_t len, ccstr* value);
   22.43 -  static bool ccstrAt(const char* name, ccstr* value)    { return ccstrAt(name, strlen(name), value); }
   22.44 +  static bool ccstrAt(const char* name, size_t len, ccstr* value, bool allow_locked = false, bool return_flag = false);
   22.45 +  static bool ccstrAt(const char* name, ccstr* value, bool allow_locked = false, bool return_flag = false)    { return ccstrAt(name, strlen(name), value, allow_locked, return_flag); }
   22.46    // Contract:  Flag will make private copy of the incoming value.
   22.47    // Outgoing value is always malloc-ed, and caller MUST call free.
   22.48    static bool ccstrAtPut(const char* name, size_t len, ccstr* value, Flag::Flags origin);
    23.1 --- a/src/share/vm/runtime/thread.hpp	Wed May 16 15:25:51 2018 +0200
    23.2 +++ b/src/share/vm/runtime/thread.hpp	Mon Aug 19 10:11:31 2019 +0200
    23.3 @@ -196,7 +196,9 @@
    23.4      _deopt_suspend          = 0x10000000U, // thread needs to self suspend for deopt
    23.5  
    23.6      _has_async_exception    = 0x00000001U, // there is a pending async exception
    23.7 -    _critical_native_unlock = 0x00000002U  // Must call back to unlock JNI critical lock
    23.8 +    _critical_native_unlock = 0x00000002U, // Must call back to unlock JNI critical lock
    23.9 +
   23.10 +    JFR_ONLY(_trace_flag    = 0x00000004U)  // call jfr tracing
   23.11    };
   23.12  
   23.13    // various suspension related flags - atomically updated
   23.14 @@ -443,6 +445,7 @@
   23.15    inline jlong cooked_allocated_bytes();
   23.16  
   23.17    JFR_ONLY(DEFINE_THREAD_LOCAL_ACCESSOR_JFR;)
   23.18 +  JFR_ONLY(DEFINE_TRACE_SUSPEND_FLAG_METHODS)
   23.19  
   23.20    const ThreadExt& ext() const          { return _ext; }
   23.21    ThreadExt& ext()                      { return _ext; }

mercurial