src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp

changeset 1
2d8a650513c2
parent 0
f90c822e73f8
child 25
873fd82b133d
     1.1 --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Fri Apr 29 00:06:10 2016 +0800
     1.3 @@ -22,6 +22,12 @@
     1.4   *
     1.5   */
     1.6  
     1.7 +/*
     1.8 + * This file has been modified by Loongson Technology in 2015. These
     1.9 + * modifications are Copyright (c) 2015 Loongson Technology, and are made
    1.10 + * available on the same license terms set forth above.
    1.11 + */
    1.12 +
    1.13  #include "precompiled.hpp"
    1.14  #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    1.15  #include "gc_implementation/parallelScavenge/gcTaskThread.hpp"
    1.16 @@ -280,6 +286,41 @@
    1.17    return result;
    1.18  }
    1.19  
    1.20 +// Dequeue one task, preferring one with numa_aware.
    1.21 +GCTask* GCTaskQueue::numa_dequeue(int numa_id) {
    1.22 +  if (TraceGCTaskQueue) {
    1.23 +    tty->print_cr("[" INTPTR_FORMAT "]"
    1.24 +                  " GCTaskQueue::dequeue(%u)", this, numa_id);
    1.25 +    print("before:");
    1.26 +  }
    1.27 +  assert(!is_empty(), "shouldn't dequeue from empty list");
    1.28 +  // Look down to the next barrier for a task with this affinity.
    1.29 +  GCTask* result = NULL;
    1.30 +  for (GCTask* element = remove_end();
    1.31 +       element != NULL;
    1.32 +       element = element->newer()) {
    1.33 +    if (element->is_barrier_task()) {
    1.34 +      // Don't consider barrier tasks, nor past them.
    1.35 +      result = NULL;
    1.36 +      break;
    1.37 +    }
    1.38 +    if (element->task_numa_id() == numa_id) {
    1.39 +      result = remove(element);
    1.40 +      break;
    1.41 +    }
    1.42 +  }
    1.43 +  // If we didn't find anything with affinity, just take the next task.
    1.44 +  if (result == NULL) {
    1.45 +    result = remove();
    1.46 +  }
    1.47 +
    1.48 +  if (TraceGCTaskQueue) {
    1.49 +    tty->print_cr("    return: " INTPTR_FORMAT, result);
    1.50 +    print("after:");
    1.51 +  }
    1.52 +  return result;
    1.53 +}
    1.54 +
    1.55  GCTask* GCTaskQueue::remove() {
    1.56    // Dequeue from remove end.
    1.57    GCTask* result = remove_end();
    1.58 @@ -411,10 +452,11 @@
    1.59      //     Distribute the workers among the available processors,
    1.60      //     unless we were told not to, or if the os doesn't want to.
    1.61      uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
    1.62 -    if (!BindGCTaskThreadsToCPUs ||
    1.63 +    if (BindGCTaskThreadsToCPUs ||
    1.64          !os::distribute_processes(workers(), processor_assignment)) {
    1.65        for (uint a = 0; a < workers(); a += 1) {
    1.66 -        processor_assignment[a] = sentinel_worker();
    1.67 +        /*2014/7/7 Liao: Bind GCThread [a] to processor [a] */
    1.68 +        processor_assignment[a] = a;
    1.69        }
    1.70      }
    1.71      _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
    1.72 @@ -659,7 +701,13 @@
    1.73      if (UseGCTaskAffinity) {
    1.74        result = queue()->dequeue(which);
    1.75      } else {
    1.76 -      result = queue()->dequeue();
    1.77 +      /* 2014/7/7 Liao: GCThread get task on numa machines with numa_aware. */
    1.78 +      if(UseNUMAThreadRoots) {
    1.79 +        result = queue()->numa_dequeue(os::numa_get_group_id());
    1.80 +      }
    1.81 +      else {
    1.82 +        result = queue()->dequeue();
    1.83 +      }
    1.84      }
    1.85      if (result->is_barrier_task()) {
    1.86        assert(which != sentinel_worker(),

mercurial