8238589: Necessary code cleanup in JFR for JDK8u

Fri, 14 Feb 2020 17:13:16 +0100

author
neugens
date
Fri, 14 Feb 2020 17:13:16 +0100
changeset 9892
9a4141de094d
parent 9891
4904bded9702
child 9893
be5266057dda

8238589: Necessary code cleanup in JFR for JDK8u
Reviewed-by: shade, apetushkov

src/os/linux/vm/perfMemory_linux.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/compiler/compileBroker.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTraceSend.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/superword.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/biasedLocking.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/mutexLocker.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/mutexLocker.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/synchronizer.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/os/linux/vm/perfMemory_linux.cpp	Tue Feb 04 11:16:27 2020 +0800
     1.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp	Fri Feb 14 17:13:16 2020 +0100
     1.3 @@ -34,7 +34,6 @@
     1.4  #include "utilities/exceptions.hpp"
     1.5  
     1.6  // put OS-includes here
     1.7 -#include <dirent.h>
     1.8  # include <sys/types.h>
     1.9  # include <sys/mman.h>
    1.10  # include <errno.h>
     2.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Tue Feb 04 11:16:27 2020 +0800
     2.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Feb 14 17:13:16 2020 +0100
     2.3 @@ -617,7 +617,7 @@
     2.4                                      (ClassLoaderData*)NULL);
     2.5      event.commit();
     2.6    }
     2.7 -#endif // INCLUDE_JFR
     2.8 +#endif
     2.9  }
    2.10  
    2.11  Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
    2.12 @@ -1337,24 +1337,23 @@
    2.13      if (!k.is_null()) {
    2.14        k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh));
    2.15      }
    2.16 +
    2.17  #if INCLUDE_JFR
    2.18 -    else {
    2.19 +    if (k.is_null() && (class_name == jfr_event_handler_proxy)) {
    2.20        assert(jfr_event_handler_proxy != NULL, "invariant");
    2.21 -      if (class_name == jfr_event_handler_proxy) {
    2.22 -        // EventHandlerProxy class is generated dynamically in
    2.23 -        // EventHandlerProxyCreator::makeEventHandlerProxyClass
    2.24 -        // method, so we generate a Java call from here.
    2.25 -        //
    2.26 -        // EventHandlerProxy class will finally be defined in
    2.27 -        // SystemDictionary::resolve_from_stream method, down
    2.28 -        // the call stack. Bootstrap classloader is parallel-capable,
    2.29 -        // so no concurrency issues are expected.
    2.30 -        CLEAR_PENDING_EXCEPTION;
    2.31 -        k = JfrUpcalls::load_event_handler_proxy_class(THREAD);
    2.32 -        assert(!k.is_null(), "invariant");
    2.33 -      }
    2.34 +      // EventHandlerProxy class is generated dynamically in
    2.35 +      // EventHandlerProxyCreator::makeEventHandlerProxyClass
    2.36 +      // method, so we generate a Java call from here.
    2.37 +      //
    2.38 +      // EventHandlerProxy class will finally be defined in
    2.39 +      // SystemDictionary::resolve_from_stream method, down
    2.40 +      // the call stack. Bootstrap classloader is parallel-capable,
    2.41 +      // so no concurrency issues are expected.
    2.42 +      CLEAR_PENDING_EXCEPTION;
    2.43 +      k = JfrUpcalls::load_event_handler_proxy_class(THREAD);
    2.44 +      assert(!k.is_null(), "invariant");
    2.45      }
    2.46 -#endif // INCLUDE_JFR
    2.47 +#endif
    2.48  
    2.49      return k;
    2.50    } else {
     3.1 --- a/src/share/vm/compiler/compileBroker.cpp	Tue Feb 04 11:16:27 2020 +0800
     3.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Fri Feb 14 17:13:16 2020 +0100
     3.3 @@ -2023,7 +2023,7 @@
     3.4      compilable = ci_env.compilable();
     3.5  
     3.6      if (ci_env.failing()) {
     3.7 -      const char *failure_reason = ci_env.failure_reason();
     3.8 +      const char* failure_reason = ci_env.failure_reason();
     3.9        const char* retry_message = ci_env.retry_message();
    3.10        task->set_failure_reason(failure_reason);
    3.11        if (_compilation_log != NULL) {
     4.1 --- a/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Tue Feb 04 11:16:27 2020 +0800
     4.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Fri Feb 14 17:13:16 2020 +0100
     4.3 @@ -233,82 +233,6 @@
     4.4    }
     4.5  }
     4.6  
     4.7 -// XXX
     4.8 -//static JfrStructG1EvacuationStatistics
     4.9 -//create_g1_evacstats(unsigned gcid, const G1EvacSummary& summary) {
    4.10 -//  JfrStructG1EvacuationStatistics s;
    4.11 -//  s.set_gcId(gcid);
    4.12 -//  s.set_allocated(summary.allocated() * HeapWordSize);
    4.13 -//  s.set_wasted(summary.wasted() * HeapWordSize);
    4.14 -//  s.set_used(summary.used() * HeapWordSize);
    4.15 -//  s.set_undoWaste(summary.undo_wasted() * HeapWordSize);
    4.16 -//  s.set_regionEndWaste(summary.region_end_waste() * HeapWordSize);
    4.17 -//  s.set_regionsRefilled(summary.regions_filled());
    4.18 -//  s.set_directAllocated(summary.direct_allocated() * HeapWordSize);
    4.19 -//  s.set_failureUsed(summary.failure_used() * HeapWordSize);
    4.20 -//  s.set_failureWaste(summary.failure_waste() * HeapWordSize);
    4.21 -//  return s;
    4.22 -//}
    4.23 -//
    4.24 -//void G1NewTracer::send_young_evacuation_statistics(const G1EvacSummary& summary) const {
    4.25 -//  EventG1EvacuationYoungStatistics surv_evt;
    4.26 -//  if (surv_evt.should_commit()) {
    4.27 -//    surv_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
    4.28 -//    surv_evt.commit();
    4.29 -//  }
    4.30 -//}
    4.31 -//
    4.32 -//void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) const {
    4.33 -//  EventG1EvacuationOldStatistics old_evt;
    4.34 -//  if (old_evt.should_commit()) {
    4.35 -//    old_evt.set_statistics(create_g1_evacstats(_shared_gc_info.gc_id().id(), summary));
    4.36 -//    old_evt.commit();
    4.37 -//  }
    4.38 -//}
    4.39 -//
    4.40 -//void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
    4.41 -//                                             size_t target_occupancy,
    4.42 -//                                             size_t current_occupancy,
    4.43 -//                                             size_t last_allocation_size,
    4.44 -//                                             double last_allocation_duration,
    4.45 -//                                             double last_marking_length) {
    4.46 -//  EventG1BasicIHOP evt;
    4.47 -//  if (evt.should_commit()) {
    4.48 -//    evt.set_gcId(_shared_gc_info.gc_id().id());
    4.49 -//    evt.set_threshold(threshold);
    4.50 -//    evt.set_targetOccupancy(target_occupancy);
    4.51 -//    evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
    4.52 -//    evt.set_currentOccupancy(current_occupancy);
    4.53 -//    evt.set_recentMutatorAllocationSize(last_allocation_size);
    4.54 -//    evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
    4.55 -//    evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
    4.56 -//    evt.set_lastMarkingDuration(last_marking_length * MILLIUNITS);
    4.57 -//    evt.commit();
    4.58 -//  }
    4.59 -//}
    4.60 -//
    4.61 -//void G1NewTracer::send_adaptive_ihop_statistics(size_t threshold,
    4.62 -//                                                size_t internal_target_occupancy,
    4.63 -//                                                size_t current_occupancy,
    4.64 -//                                                size_t additional_buffer_size,
    4.65 -//                                                double predicted_allocation_rate,
    4.66 -//                                                double predicted_marking_length,
    4.67 -//                                                bool prediction_active) {
    4.68 -//  EventG1AdaptiveIHOP evt;
    4.69 -//  if (evt.should_commit()) {
    4.70 -//    evt.set_gcId(_shared_gc_info.gc_id().id());
    4.71 -//    evt.set_threshold(threshold);
    4.72 -//    evt.set_thresholdPercentage(internal_target_occupancy > 0 ? ((double)threshold / internal_target_occupancy) : 0.0);
    4.73 -//    evt.set_ihopTargetOccupancy(internal_target_occupancy);
    4.74 -//    evt.set_currentOccupancy(current_occupancy);
    4.75 -//    evt.set_additionalBufferSize(additional_buffer_size);
    4.76 -//    evt.set_predictedAllocationRate(predicted_allocation_rate);
    4.77 -//    evt.set_predictedMarkingDuration(predicted_marking_length * MILLIUNITS);
    4.78 -//    evt.set_predictionActive(prediction_active);
    4.79 -//    evt.commit();
    4.80 -//  }
    4.81 -//}
    4.82 -
    4.83  #endif // INCLUDE_ALL_GCS
    4.84  
    4.85  static JfrStructVirtualSpace to_struct(const VirtualSpaceSummary& summary) {
     5.1 --- a/src/share/vm/opto/superword.hpp	Tue Feb 04 11:16:27 2020 +0800
     5.2 +++ b/src/share/vm/opto/superword.hpp	Fri Feb 14 17:13:16 2020 +0100
     5.3 @@ -201,32 +201,6 @@
     5.4    static const SWNodeInfo initial;
     5.5  };
     5.6  
     5.7 -
     5.8 -// JVMCI: OrderedPair is moved up to deal with compilation issues on Windows
     5.9 -//------------------------------OrderedPair---------------------------
    5.10 -// Ordered pair of Node*.
    5.11 -class OrderedPair VALUE_OBJ_CLASS_SPEC {
    5.12 - protected:
    5.13 -  Node* _p1;
    5.14 -  Node* _p2;
    5.15 - public:
    5.16 -  OrderedPair() : _p1(NULL), _p2(NULL) {}
    5.17 -  OrderedPair(Node* p1, Node* p2) {
    5.18 -    if (p1->_idx < p2->_idx) {
    5.19 -      _p1 = p1; _p2 = p2;
    5.20 -    } else {
    5.21 -      _p1 = p2; _p2 = p1;
    5.22 -    }
    5.23 -  }
    5.24 -
    5.25 -  bool operator==(const OrderedPair &rhs) {
    5.26 -    return _p1 == rhs._p1 && _p2 == rhs._p2;
    5.27 -  }
    5.28 -  void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
    5.29 -
    5.30 -  static const OrderedPair initial;
    5.31 -};
    5.32 -
    5.33  // -----------------------------SuperWord---------------------------------
    5.34  // Transforms scalar operations into packed (superword) operations.
    5.35  class SuperWord : public ResourceObj {
    5.36 @@ -450,6 +424,7 @@
    5.37  };
    5.38  
    5.39  
    5.40 +
    5.41  //------------------------------SWPointer---------------------------
    5.42  // Information about an address for dependence checking and vector alignment
    5.43  class SWPointer VALUE_OBJ_CLASS_SPEC {
    5.44 @@ -531,4 +506,29 @@
    5.45    void print();
    5.46  };
    5.47  
    5.48 +
    5.49 +//------------------------------OrderedPair---------------------------
    5.50 +// Ordered pair of Node*.
    5.51 +class OrderedPair VALUE_OBJ_CLASS_SPEC {
    5.52 + protected:
    5.53 +  Node* _p1;
    5.54 +  Node* _p2;
    5.55 + public:
    5.56 +  OrderedPair() : _p1(NULL), _p2(NULL) {}
    5.57 +  OrderedPair(Node* p1, Node* p2) {
    5.58 +    if (p1->_idx < p2->_idx) {
    5.59 +      _p1 = p1; _p2 = p2;
    5.60 +    } else {
    5.61 +      _p1 = p2; _p2 = p1;
    5.62 +    }
    5.63 +  }
    5.64 +
    5.65 +  bool operator==(const OrderedPair &rhs) {
    5.66 +    return _p1 == rhs._p1 && _p2 == rhs._p2;
    5.67 +  }
    5.68 +  void print() { tty->print("  (%d, %d)", _p1->_idx, _p2->_idx); }
    5.69 +
    5.70 +  static const OrderedPair initial;
    5.71 +};
    5.72 +
    5.73  #endif // SHARE_VM_OPTO_SUPERWORD_HPP
     6.1 --- a/src/share/vm/runtime/biasedLocking.cpp	Tue Feb 04 11:16:27 2020 +0800
     6.2 +++ b/src/share/vm/runtime/biasedLocking.cpp	Fri Feb 14 17:13:16 2020 +0100
     6.3 @@ -256,10 +256,12 @@
     6.4      }
     6.5    }
     6.6  
     6.7 +#if INCLUDE_JFR
     6.8    // If requested, return information on which thread held the bias
     6.9    if (biased_locker != NULL) {
    6.10      *biased_locker = biased_thread;
    6.11    }
    6.12 +#endif // INCLUDE_JFR
    6.13  
    6.14    return BiasedLocking::BIAS_REVOKED;
    6.15  }
    6.16 @@ -497,11 +499,15 @@
    6.17        if (TraceBiasedLocking) {
    6.18          tty->print_cr("Revoking bias with potentially per-thread safepoint:");
    6.19        }
    6.20 +
    6.21        JavaThread* biased_locker = NULL;
    6.22        _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
    6.23 +#if INCLUDE_JFR
    6.24        if (biased_locker != NULL) {
    6.25          _biased_locker_id = JFR_THREAD_ID(biased_locker);
    6.26        }
    6.27 +#endif // INCLUDE_JFR
    6.28 +
    6.29        clean_up_cached_monitor_info();
    6.30        return;
    6.31      } else {
    6.32 @@ -516,9 +522,11 @@
    6.33      return _status_code;
    6.34    }
    6.35  
    6.36 +#if INCLUDE_JFR
    6.37    traceid biased_locker() const {
    6.38      return _biased_locker_id;
    6.39    }
    6.40 +#endif // INCLUDE_JFR
    6.41  };
    6.42  
    6.43  
     7.1 --- a/src/share/vm/runtime/mutexLocker.cpp	Tue Feb 04 11:16:27 2020 +0800
     7.2 +++ b/src/share/vm/runtime/mutexLocker.cpp	Fri Feb 14 17:13:16 2020 +0100
     7.3 @@ -284,7 +284,7 @@
     7.4    def(CompileThread_lock           , Monitor, nonleaf+5,   false );
     7.5    def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
     7.6  
     7.7 -#ifdef INCLUDE_JFR
     7.8 +#if INCLUDE_JFR
     7.9    def(JfrMsg_lock                  , Monitor, leaf,        true);
    7.10    def(JfrBuffer_lock               , Mutex,   leaf,        true);
    7.11    def(JfrThreadGroups_lock         , Mutex,   leaf,        true);
     8.1 --- a/src/share/vm/runtime/mutexLocker.hpp	Tue Feb 04 11:16:27 2020 +0800
     8.2 +++ b/src/share/vm/runtime/mutexLocker.hpp	Fri Feb 14 17:13:16 2020 +0100
     8.3 @@ -142,7 +142,7 @@
     8.4  extern Monitor* Service_lock;                    // a lock used for service thread operation
     8.5  extern Monitor* PeriodicTask_lock;               // protects the periodic task structure
     8.6  
     8.7 -#ifdef INCLUDE_JFR
     8.8 +#if INCLUDE_JFR
     8.9  extern Mutex*   JfrStacktrace_lock;              // used to guard access to the JFR stacktrace table
    8.10  extern Monitor* JfrMsg_lock;                     // protects JFR messaging
    8.11  extern Mutex*   JfrBuffer_lock;                  // protects JFR buffer operations
     9.1 --- a/src/share/vm/runtime/synchronizer.cpp	Tue Feb 04 11:16:27 2020 +0800
     9.2 +++ b/src/share/vm/runtime/synchronizer.cpp	Fri Feb 14 17:13:16 2020 +0100
     9.3 @@ -1185,8 +1185,6 @@
     9.4    assert(event->should_commit(), "invariant");
     9.5    event->set_monitorClass(obj->klass());
     9.6    event->set_address((uintptr_t)(void*)obj);
     9.7 -  // XXX no such counters. implement?
     9.8 -//  event->set_cause((u1)cause);
     9.9    event->commit();
    9.10  }
    9.11  

mercurial