src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp

changeset 6904
0982ec23da03
parent 6719
8e20ef014b08
child 6992
2c6ef90f030a
     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jun 25 15:51:15 2014 -0700
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Jun 19 13:31:14 2014 +0200
     1.3 @@ -978,7 +978,7 @@
     1.4    // at each young gen gc.  Do the update unconditionally (even though a
     1.5    // promotion failure does not swap spaces) because an unknown number of minor
     1.6    // collections will have swapped the spaces an unknown number of times.
     1.7 -  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
     1.8 +  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
     1.9    ParallelScavengeHeap* heap = gc_heap();
    1.10    _space_info[from_space_id].set_space(heap->young_gen()->from_space());
    1.11    _space_info[to_space_id].set_space(heap->young_gen()->to_space());
    1.12 @@ -1021,7 +1021,7 @@
    1.13  
    1.14  void PSParallelCompact::post_compact()
    1.15  {
    1.16 -  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
    1.17 +  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.18  
    1.19    for (unsigned int id = old_space_id; id < last_space_id; ++id) {
    1.20      // Clear the marking bitmap, summary data and split info.
    1.21 @@ -1847,7 +1847,7 @@
    1.22  void PSParallelCompact::summary_phase(ParCompactionManager* cm,
    1.23                                        bool maximum_compaction)
    1.24  {
    1.25 -  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
    1.26 +  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.27    // trace("2");
    1.28  
    1.29  #ifdef  ASSERT
    1.30 @@ -2056,7 +2056,7 @@
    1.31  
    1.32      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    1.33      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    1.34 -    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
    1.35 +    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer.gc_id());
    1.36      TraceCollectorStats tcs(counters());
    1.37      TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
    1.38  
    1.39 @@ -2351,7 +2351,7 @@
    1.40                                        bool maximum_heap_compaction,
    1.41                                        ParallelOldTracer *gc_tracer) {
    1.42    // Recursively traverse all live objects and mark them
    1.43 -  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
    1.44 +  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.45  
    1.46    ParallelScavengeHeap* heap = gc_heap();
    1.47    uint parallel_gc_threads = heap->gc_task_manager()->workers();
    1.48 @@ -2366,7 +2366,7 @@
    1.49    ClassLoaderDataGraph::clear_claimed_marks();
    1.50  
    1.51    {
    1.52 -    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
    1.53 +    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.54  
    1.55      ParallelScavengeHeap::ParStrongRootsScope psrs;
    1.56  
    1.57 @@ -2395,24 +2395,24 @@
    1.58  
    1.59    // Process reference objects found during marking
    1.60    {
    1.61 -    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
    1.62 +    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.63  
    1.64      ReferenceProcessorStats stats;
    1.65      if (ref_processor()->processing_is_mt()) {
    1.66        RefProcTaskExecutor task_executor;
    1.67        stats = ref_processor()->process_discovered_references(
    1.68          is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
    1.69 -        &task_executor, &_gc_timer);
    1.70 +        &task_executor, &_gc_timer, _gc_tracer.gc_id());
    1.71      } else {
    1.72        stats = ref_processor()->process_discovered_references(
    1.73          is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
    1.74 -        &_gc_timer);
    1.75 +        &_gc_timer, _gc_tracer.gc_id());
    1.76      }
    1.77  
    1.78      gc_tracer->report_gc_reference_stats(stats);
    1.79    }
    1.80  
    1.81 -  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
    1.82 +  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.83  
    1.84    // This is the point where the entire marking should have completed.
    1.85    assert(cm->marking_stacks_empty(), "Marking should have completed");
    1.86 @@ -2451,7 +2451,7 @@
    1.87  
    1.88  void PSParallelCompact::adjust_roots() {
    1.89    // Adjust the pointers to reflect the new locations
    1.90 -  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
    1.91 +  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
    1.92  
    1.93    // Need new claim bits when tracing through and adjusting pointers.
    1.94    ClassLoaderDataGraph::clear_claimed_marks();
    1.95 @@ -2487,7 +2487,7 @@
    1.96  void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
    1.97                                                        uint parallel_gc_threads)
    1.98  {
    1.99 -  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
   1.100 +  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.101  
   1.102    // Find the threads that are active
   1.103    unsigned int which = 0;
   1.104 @@ -2561,7 +2561,7 @@
   1.105  
   1.106  void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
   1.107                                                      uint parallel_gc_threads) {
   1.108 -  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
   1.109 +  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.110  
   1.111    ParallelCompactData& sd = PSParallelCompact::summary_data();
   1.112  
   1.113 @@ -2643,7 +2643,7 @@
   1.114                                       GCTaskQueue* q,
   1.115                                       ParallelTaskTerminator* terminator_ptr,
   1.116                                       uint parallel_gc_threads) {
   1.117 -  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
   1.118 +  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.119  
   1.120    // Once a thread has drained it's stack, it should try to steal regions from
   1.121    // other threads.
   1.122 @@ -2691,7 +2691,7 @@
   1.123  
   1.124  void PSParallelCompact::compact() {
   1.125    // trace("5");
   1.126 -  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
   1.127 +  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.128  
   1.129    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   1.130    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   1.131 @@ -2708,7 +2708,7 @@
   1.132    enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
   1.133  
   1.134    {
   1.135 -    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
   1.136 +    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.137  
   1.138      gc_task_manager()->execute_and_wait(q);
   1.139  
   1.140 @@ -2722,7 +2722,7 @@
   1.141  
   1.142    {
   1.143      // Update the deferred objects, if any.  Any compaction manager can be used.
   1.144 -    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
   1.145 +    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer, _gc_tracer.gc_id());
   1.146      ParCompactionManager* cm = ParCompactionManager::manager_array(0);
   1.147      for (unsigned int id = old_space_id; id < last_space_id; ++id) {
   1.148        update_deferred_objects(cm, SpaceId(id));

mercurial