[Code Reorganization] Removed GC related modifications made by Loongson, for example, UseOldNUMA.

Fri, 24 Jun 2016 17:12:13 +0800

author
aoqi<aoqi@loongson.cn>
date
Fri, 24 Jun 2016 17:12:13 +0800
changeset 25
873fd82b133d
parent 24
d2be62fdfa50
child 26
ed5b982c0b0e

[Code Reorganization] Removed GC related modifications made by Loongson, for example, UseOldNUMA.

src/os/linux/vm/os_linux.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/mutableSpace.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/mutableSpace.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/taskqueue.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/os/linux/vm/os_linux.cpp	Wed Jun 22 14:26:49 2016 +0800
     1.2 +++ b/src/os/linux/vm/os_linux.cpp	Fri Jun 24 17:12:13 2016 +0800
     1.3 @@ -251,7 +251,7 @@
     1.4  #define SYS_gettid 186
     1.5  #elif __sparc__
     1.6  #define SYS_gettid 143
     1.7 -#elif __mips__     
     1.8 +#elif __mips__
     1.9  #define SYS_gettid 4222
    1.10  #else
    1.11  #error define gettid for the arch
    1.12 @@ -2841,16 +2841,6 @@
    1.13    return 0;
    1.14  }
    1.15  
    1.16 -int os::numa_get_cpu_id() {
    1.17 -  int cpu_id = Linux::sched_getcpu();
    1.18 -  if(cpu_id != -1)
    1.19 -    return cpu_id;
    1.20 -  else {
    1.21 -    tty->print_cr("cpu_id got a unacceptable value");
    1.22 -    return 0;
    1.23 -  }
    1.24 -}
    1.25 -
    1.26  size_t os::numa_get_leaf_groups(int *ids, size_t size) {
    1.27    for (size_t i = 0; i < size; i++) {
    1.28      ids[i] = i;
    1.29 @@ -4925,20 +4915,6 @@
    1.30      }
    1.31    }
    1.32  
    1.33 -/*Liao:2013/11/18 UseOldNUMA: Let OldGen support NUMA*/
    1.34 -  if(UseNUMA == true && UseOldNUMA == true) {
    1.35 -    UseOldNUMA = true;
    1.36 -  } else {
    1.37 -    UseOldNUMA = false;
    1.38 -  }
    1.39 -
    1.40 -  if(UseOldNUMA == false) {
    1.41 -    UseNUMAGC = false;
    1.42 -    UseNUMAThreadRoots = false;
    1.43 -    UseNUMASteal = false;
    1.44 -    BindGCTaskThreadsToCPUs = false;
    1.45 -  }
    1.46 -
    1.47    if (MaxFDLimit) {
    1.48      // set the number of file descriptors to max. print out error
    1.49      // if getrlimit/setrlimit fails but continue regardless.
    1.50 @@ -5028,24 +5004,8 @@
    1.51  }
    1.52  
    1.53  bool os::bind_to_processor(uint processor_id) {
    1.54 -  /* 2014/7/7 implemented by Liao */
    1.55 -  if(BindGCTaskThreadsToCPUs) {
    1.56 -    cpu_set_t mask;
    1.57 -    cpu_set_t get;
    1.58 -    CPU_ZERO(&mask);
    1.59 -    CPU_SET(processor_id, &mask);
    1.60 -
    1.61 -    if(sched_setaffinity(0, sizeof(mask), &mask) == -1) {
    1.62 -      tty->print_cr("Can't bind to processor_id = %d!", processor_id);
    1.63 -      return false;
    1.64 -    }
    1.65 -    else {
    1.66 -      return true;
    1.67 -    }
    1.68 -  }
    1.69 -  else {
    1.70 -    return false;
    1.71 -  }
    1.72 +  // Not yet implemented.
    1.73 +  return false;
    1.74  }
    1.75  
    1.76  ///
     2.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Wed Jun 22 14:26:49 2016 +0800
     2.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Fri Jun 24 17:12:13 2016 +0800
     2.3 @@ -22,12 +22,6 @@
     2.4   *
     2.5   */
     2.6  
     2.7 -/*
     2.8 - * This file has been modified by Loongson Technology in 2015. These
     2.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    2.10 - * available on the same license terms set forth above.
    2.11 - */
    2.12 -
    2.13  #include "precompiled.hpp"
    2.14  #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
    2.15  #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    2.16 @@ -207,13 +201,7 @@
    2.17          worker_end_card = end_card;
    2.18      }
    2.19  
    2.20 -    /* 2014/2/12/ Liao: In UseOldNUMA, when FullGC occurs, all live objects in old-gen are 
    2.21 -     * compacted to the low end of old-gen. If the size of live objects is larger than lgrp0, 
    2.22 -     * and the last object in lgrp0 spans both lgrp0 and lgrp1, then the slice_end is greater 
    2.23 -     * than sp_top. (The lgrp0 is full, so the sp_top equals to sp_end.) 
    2.24 -     * This situtation is reasonable in UseOldNUMA.*/
    2.25 -    if(!UseOldNUMA)
    2.26 -      assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
    2.27 +    assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary");
    2.28      assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
    2.29      assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
    2.30      // Note that worker_start_card >= worker_end_card is legal, and happens when
    2.31 @@ -331,213 +319,6 @@
    2.32    }
    2.33  }
    2.34  
    2.35 -void CardTableExtension::scavenge_contents_parallel_oldnuma(ObjectStartArray* start_array,
    2.36 -                                                    MutableSpace* sp_below,
    2.37 -                                                    MutableSpace* sp,
    2.38 -                                                    HeapWord* space_top,
    2.39 -                                                    PSPromotionManager* pm,
    2.40 -                                                    uint stripe_number,
    2.41 -                                                    uint stripe_total) {
    2.42 -  int ssize = 128; // Naked constant!  Work unit = 64k.
    2.43 -  int dirty_card_count = 0;
    2.44 -
    2.45 -  // It is a waste to get here if empty.
    2.46 -  assert(sp->bottom() < sp->top(), "Should not be called if empty");
    2.47 -  oop* sp_top = (oop*)space_top;
    2.48 -  jbyte* start_card = byte_for(sp->bottom());
    2.49 -  jbyte* end_card   = byte_for(sp_top - 1) + 1;
    2.50 -  oop* last_scanned = NULL; // Prevent scanning objects more than once
    2.51 -  // The width of the stripe ssize*stripe_total must be
    2.52 -  // consistent with the number of stripes so that the complete slice
    2.53 -  // is covered.
    2.54 -  size_t slice_width = ssize * stripe_total;
    2.55 -  for (jbyte* slice = start_card; slice < end_card; slice += slice_width) {
    2.56 -    jbyte* worker_start_card = slice + stripe_number * ssize;
    2.57 -    if (worker_start_card >= end_card)
    2.58 -      return; // We're done.
    2.59 -
    2.60 -    jbyte* worker_end_card = worker_start_card + ssize;
    2.61 -    if (worker_end_card > end_card)
    2.62 -      worker_end_card = end_card;
    2.63 -
    2.64 -    // We do not want to scan objects more than once. In order to accomplish
    2.65 -    // this, we assert that any object with an object head inside our 'slice'
    2.66 -    // belongs to us. We may need to extend the range of scanned cards if the
    2.67 -    // last object continues into the next 'slice'.
    2.68 -    //
    2.69 -    // Note! ending cards are exclusive!
    2.70 -    HeapWord* slice_start = addr_for(worker_start_card);
    2.71 -    HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card));
    2.72 -
    2.73 -#ifdef ASSERT
    2.74 -    if (GCWorkerDelayMillis > 0) {
    2.75 -      // Delay 1 worker so that it proceeds after all the work
    2.76 -      // has been completed.
    2.77 -      if (stripe_number < 2) {
    2.78 -        os::sleep(Thread::current(), GCWorkerDelayMillis, false);
    2.79 -      }
    2.80 -    }
    2.81 -#endif
    2.82 -
    2.83 -    // If there are not objects starting within the chunk, skip it.
    2.84 -    if (!start_array->object_starts_in_range(slice_start, slice_end)) {
    2.85 -      continue;
    2.86 -    }
    2.87 -    // Update our beginning addr
    2.88 -    HeapWord* first_object;
    2.89 -    if(sp_below->top() == sp->bottom())
    2.90 -      first_object = start_array->object_start(slice_start);
    2.91 -    else
    2.92 -      first_object = start_array->object_start_oldnuma(slice_start, sp->bottom());
    2.93 -    debug_only(oop* first_object_within_slice = (oop*) first_object;)
    2.94 -    if (first_object < slice_start) {
    2.95 -      last_scanned = (oop*)(first_object + oop(first_object)->size());
    2.96 -      debug_only(first_object_within_slice = last_scanned;)
    2.97 -      worker_start_card = byte_for(last_scanned);
    2.98 -    }
    2.99 -
   2.100 -    // Update the ending addr
   2.101 -    if (slice_end < (HeapWord*)sp_top) {
   2.102 -      // The subtraction is important! An object may start precisely at slice_end.
   2.103 -      HeapWord* last_object;
   2.104 -      if(sp_below->top() == sp->bottom())
   2.105 -        last_object = start_array->object_start(slice_end - 1);
   2.106 -      else
   2.107 -        last_object = start_array->object_start_oldnuma(slice_end - 1, sp->bottom());
   2.108 -      slice_end = last_object + oop(last_object)->size();
   2.109 -      // worker_end_card is exclusive, so bump it one past the end of last_object's
   2.110 -      // covered span.
   2.111 -      worker_end_card = byte_for(slice_end) + 1;
   2.112 -
   2.113 -      if (worker_end_card > end_card)
   2.114 -        worker_end_card = end_card;
   2.115 -    }
   2.116 -
   2.117 -    assert(is_valid_card_address(worker_start_card), "Invalid worker start card");
   2.118 -    assert(is_valid_card_address(worker_end_card), "Invalid worker end card");
   2.119 -    // Note that worker_start_card >= worker_end_card is legal, and happens when
   2.120 -    // an object spans an entire slice.
   2.121 -    assert(worker_start_card <= end_card, "worker start card beyond end card");
   2.122 -    assert(worker_end_card <= end_card, "worker end card beyond end card");
   2.123 -
   2.124 -    jbyte* current_card = worker_start_card;
   2.125 -    while (current_card < worker_end_card) {
   2.126 -      // Find an unclean card.
   2.127 -      while (current_card < worker_end_card && card_is_clean(*current_card)) {
   2.128 -        current_card++;
   2.129 -      }
   2.130 -      jbyte* first_unclean_card = current_card;
   2.131 -
   2.132 -      // Find the end of a run of contiguous unclean cards
   2.133 -      while (current_card < worker_end_card && !card_is_clean(*current_card)) {
   2.134 -        while (current_card < worker_end_card && !card_is_clean(*current_card)) {
   2.135 -          current_card++;
   2.136 -        }
   2.137 -
   2.138 -        if (current_card < worker_end_card) {
   2.139 -          // Some objects may be large enough to span several cards. If such
   2.140 -          // an object has more than one dirty card, separated by a clean card,
   2.141 -          // we will attempt to scan it twice. The test against "last_scanned"
   2.142 -          // prevents the redundant object scan, but it does not prevent newly
   2.143 -          // marked cards from being cleaned.
   2.144 -          HeapWord* last_object_in_dirty_region;
   2.145 -          if(sp_below->top() == sp->bottom())
   2.146 -            last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1);
   2.147 -          else
   2.148 -            last_object_in_dirty_region = start_array->object_start_oldnuma(addr_for(current_card)-1, sp->bottom());
   2.149 -          size_t size_of_last_object = oop(last_object_in_dirty_region)->size();
   2.150 -          HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object;
   2.151 -          jbyte* ending_card_of_last_object = byte_for(end_of_last_object);
   2.152 -          assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card");
   2.153 -          if (ending_card_of_last_object > current_card) {
   2.154 -            // This means the object spans the next complete card.
   2.155 -            // We need to bump the current_card to ending_card_of_last_object
   2.156 -            current_card = ending_card_of_last_object;
   2.157 -          }
   2.158 -        }
   2.159 -      }
   2.160 -      jbyte* following_clean_card = current_card;
   2.161 -
   2.162 -      if (first_unclean_card < worker_end_card) {
   2.163 -        oop* p;
   2.164 -        if(sp_below->top() == sp->bottom())
   2.165 -          p = (oop*) start_array->object_start(addr_for(first_unclean_card));
   2.166 -        else
   2.167 -          p = (oop*) start_array->object_start_oldnuma(addr_for(first_unclean_card), sp->bottom());
   2.168 -        assert((HeapWord*)p <= addr_for(first_unclean_card), "checking");
   2.169 -        // "p" should always be >= "last_scanned" because newly GC dirtied
   2.170 -        // cards are no longer scanned again (see comment at end
   2.171 -        // of loop on the increment of "current_card").  Test that
   2.172 -        // hypothesis before removing this code.
   2.173 -        // If this code is removed, deal with the first time through
   2.174 -        // the loop when the last_scanned is the object starting in
   2.175 -        // the previous slice.
   2.176 -        assert((p >= last_scanned) ||
   2.177 -               (last_scanned == first_object_within_slice),
   2.178 -               "Should no longer be possible");
   2.179 -        if (p < last_scanned) {
   2.180 -          // Avoid scanning more than once; this can happen because
   2.181 -          // newgen cards set by GC may a different set than the
   2.182 -          // originally dirty set
   2.183 -          p = last_scanned;
   2.184 -        }
   2.185 -        oop* to = (oop*)addr_for(following_clean_card);
   2.186 -
   2.187 -        // Test slice_end first!
   2.188 -        if ((HeapWord*)to > slice_end) {
   2.189 -          to = (oop*)slice_end;
   2.190 -        } else if (to > sp_top) {
   2.191 -          to = sp_top;
   2.192 -        }
   2.193 -
   2.194 -        // we know which cards to scan, now clear them
   2.195 -        if (first_unclean_card <= worker_start_card+1)
   2.196 -          first_unclean_card = worker_start_card+1;
   2.197 -        if (following_clean_card >= worker_end_card-1)
   2.198 -          following_clean_card = worker_end_card-1;
   2.199 -
   2.200 -        while (first_unclean_card < following_clean_card) {
   2.201 -          *first_unclean_card++ = clean_card;
   2.202 -        }
   2.203 -
   2.204 -        const int interval = PrefetchScanIntervalInBytes;
   2.205 -        // scan all objects in the range
   2.206 -        if (interval != 0) {
   2.207 -          while (p < to) {
   2.208 -            Prefetch::write(p, interval);
   2.209 -            oop m = oop(p);
   2.210 -            assert(m->is_oop_or_null(), "check for header");
   2.211 -            m->push_contents(pm);
   2.212 -            p += m->size();
   2.213 -          }
   2.214 -          pm->drain_stacks_cond_depth();
   2.215 -        } else {
   2.216 -          while (p < to) {
   2.217 -            oop m = oop(p);
   2.218 -            assert(m->is_oop_or_null(), "check for header");
   2.219 -            m->push_contents(pm);
   2.220 -            p += m->size();
   2.221 -          }
   2.222 -          pm->drain_stacks_cond_depth();
   2.223 -        }
   2.224 -        last_scanned = p;
   2.225 -      }
   2.226 -      // "current_card" is still the "following_clean_card" or
   2.227 -      // the current_card is >= the worker_end_card so the
   2.228 -      // loop will not execute again.
   2.229 -      assert((current_card == following_clean_card) ||
   2.230 -             (current_card >= worker_end_card),
   2.231 -        "current_card should only be incremented if it still equals "
   2.232 -        "following_clean_card");
   2.233 -      // Increment current_card so that it is not processed again.
   2.234 -      // It may now be dirty because a old-to-young pointer was
   2.235 -      // found on it an updated.  If it is now dirty, it cannot be
   2.236 -      // be safely cleaned in the next iteration.
   2.237 -      current_card++;
   2.238 -    }
   2.239 -  }
   2.240 -}
   2.241 -
   2.242  // This should be called before a scavenge.
   2.243  void CardTableExtension::verify_all_young_refs_imprecise() {
   2.244    CheckForUnmarkedObjects check;
     3.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Wed Jun 22 14:26:49 2016 +0800
     3.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp	Fri Jun 24 17:12:13 2016 +0800
     3.3 @@ -22,12 +22,6 @@
     3.4   *
     3.5   */
     3.6  
     3.7 -/*
     3.8 - * This file has been modified by Loongson Technology in 2015. These
     3.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    3.10 - * available on the same license terms set forth above.
    3.11 - */
    3.12 -
    3.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_CARDTABLEEXTENSION_HPP
    3.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_CARDTABLEEXTENSION_HPP
    3.15  
    3.16 @@ -73,15 +67,6 @@
    3.17                                    uint stripe_number,
    3.18                                    uint stripe_total);
    3.19  
    3.20 -  void scavenge_contents_parallel_oldnuma(ObjectStartArray* start_array,
    3.21 -                                          MutableSpace* sp_below,
    3.22 -                                          MutableSpace* sp,
    3.23 -                                          HeapWord* space_top,
    3.24 -                                          PSPromotionManager* pm,
    3.25 -                                          uint stripe_number,
    3.26 -                                          uint stripe_total);
    3.27 -
    3.28 -
    3.29    // Verification
    3.30    static void verify_all_young_refs_imprecise();
    3.31    static void verify_all_young_refs_precise();
     4.1 --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Wed Jun 22 14:26:49 2016 +0800
     4.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Fri Jun 24 17:12:13 2016 +0800
     4.3 @@ -22,12 +22,6 @@
     4.4   *
     4.5   */
     4.6  
     4.7 -/*
     4.8 - * This file has been modified by Loongson Technology in 2015. These
     4.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    4.10 - * available on the same license terms set forth above.
    4.11 - */
    4.12 -
    4.13  #include "precompiled.hpp"
    4.14  #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
    4.15  #include "gc_implementation/parallelScavenge/gcTaskThread.hpp"
    4.16 @@ -286,41 +280,6 @@
    4.17    return result;
    4.18  }
    4.19  
    4.20 -// Dequeue one task, preferring one with numa_aware.
    4.21 -GCTask* GCTaskQueue::numa_dequeue(int numa_id) {
    4.22 -  if (TraceGCTaskQueue) {
    4.23 -    tty->print_cr("[" INTPTR_FORMAT "]"
    4.24 -                  " GCTaskQueue::dequeue(%u)", this, numa_id);
    4.25 -    print("before:");
    4.26 -  }
    4.27 -  assert(!is_empty(), "shouldn't dequeue from empty list");
    4.28 -  // Look down to the next barrier for a task with this affinity.
    4.29 -  GCTask* result = NULL;
    4.30 -  for (GCTask* element = remove_end();
    4.31 -       element != NULL;
    4.32 -       element = element->newer()) {
    4.33 -    if (element->is_barrier_task()) {
    4.34 -      // Don't consider barrier tasks, nor past them.
    4.35 -      result = NULL;
    4.36 -      break;
    4.37 -    }
    4.38 -    if (element->task_numa_id() == numa_id) {
    4.39 -      result = remove(element);
    4.40 -      break;
    4.41 -    }
    4.42 -  }
    4.43 -  // If we didn't find anything with affinity, just take the next task.
    4.44 -  if (result == NULL) {
    4.45 -    result = remove();
    4.46 -  }
    4.47 -
    4.48 -  if (TraceGCTaskQueue) {
    4.49 -    tty->print_cr("    return: " INTPTR_FORMAT, result);
    4.50 -    print("after:");
    4.51 -  }
    4.52 -  return result;
    4.53 -}
    4.54 -
    4.55  GCTask* GCTaskQueue::remove() {
    4.56    // Dequeue from remove end.
    4.57    GCTask* result = remove_end();
    4.58 @@ -452,11 +411,10 @@
    4.59      //     Distribute the workers among the available processors,
    4.60      //     unless we were told not to, or if the os doesn't want to.
    4.61      uint* processor_assignment = NEW_C_HEAP_ARRAY(uint, workers(), mtGC);
    4.62 -    if (BindGCTaskThreadsToCPUs ||
    4.63 +    if (!BindGCTaskThreadsToCPUs ||
    4.64          !os::distribute_processes(workers(), processor_assignment)) {
    4.65        for (uint a = 0; a < workers(); a += 1) {
    4.66 -        /*2014/7/7 Liao: Bind GCThread [a] to processor [a] */
    4.67 -        processor_assignment[a] = a;
    4.68 +        processor_assignment[a] = sentinel_worker();
    4.69        }
    4.70      }
    4.71      _thread = NEW_C_HEAP_ARRAY(GCTaskThread*, workers(), mtGC);
    4.72 @@ -701,13 +659,7 @@
    4.73      if (UseGCTaskAffinity) {
    4.74        result = queue()->dequeue(which);
    4.75      } else {
    4.76 -      /* 2014/7/7 Liao: GCThread get task on numa machines with numa_aware. */
    4.77 -      if(UseNUMAThreadRoots) {
    4.78 -        result = queue()->numa_dequeue(os::numa_get_group_id());
    4.79 -      }
    4.80 -      else {
    4.81 -        result = queue()->dequeue();
    4.82 -      }
    4.83 +      result = queue()->dequeue();
    4.84      }
    4.85      if (result->is_barrier_task()) {
    4.86        assert(which != sentinel_worker(),
     5.1 --- a/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Wed Jun 22 14:26:49 2016 +0800
     5.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.hpp	Fri Jun 24 17:12:13 2016 +0800
     5.3 @@ -22,12 +22,6 @@
     5.4   *
     5.5   */
     5.6  
     5.7 -/*
     5.8 - * This file has been modified by Loongson Technology in 2015. These
     5.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    5.10 - * available on the same license terms set forth above.
    5.11 - */
    5.12 -
    5.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GCTASKMANAGER_HPP
    5.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_GCTASKMANAGER_HPP
    5.15  
    5.16 @@ -80,7 +74,6 @@
    5.17    // Instance state.
    5.18    const Kind::kind _kind;               // For runtime type checking.
    5.19    const uint       _affinity;           // Which worker should run task.
    5.20 -  int              _numa_id;            //Which numa node should run task.
    5.21    GCTask*          _newer;              // Tasks are on doubly-linked ...
    5.22    GCTask*          _older;              // ... lists.
    5.23  public:
    5.24 @@ -95,12 +88,6 @@
    5.25    uint affinity() const {
    5.26      return _affinity;
    5.27    }
    5.28 -  uint set_task_numa_id(int id) {
    5.29 -    _numa_id = id;
    5.30 -  }
    5.31 -  int task_numa_id() {
    5.32 -    return _numa_id;
    5.33 -  }
    5.34    GCTask* newer() const {
    5.35      return _newer;
    5.36    }
    5.37 @@ -186,8 +173,6 @@
    5.38    GCTask* dequeue();
    5.39    //     Dequeue one task, preferring one with affinity.
    5.40    GCTask* dequeue(uint affinity);
    5.41 -  //     Dequeue one task, preferring on with numa_aware.
    5.42 -  GCTask* numa_dequeue(int numa_id);
    5.43  protected:
    5.44    // Constructor. Clients use factory, but there might be subclasses.
    5.45    GCTaskQueue(bool on_c_heap);
    5.46 @@ -276,10 +261,6 @@
    5.47      guarantee(own_lock(), "don't own the lock");
    5.48      return unsynchronized_queue()->dequeue(affinity);
    5.49    }
    5.50 -  GCTask* numa_dequeue(int numa_id) {
    5.51 -    guarantee(own_lock(), "don't own the lock");
    5.52 -    return unsynchronized_queue()->numa_dequeue(numa_id);
    5.53 -  }
    5.54    uint length() const {
    5.55      guarantee(own_lock(), "don't own the lock");
    5.56      return unsynchronized_queue()->length();
     6.1 --- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	Wed Jun 22 14:26:49 2016 +0800
     6.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp	Fri Jun 24 17:12:13 2016 +0800
     6.3 @@ -22,12 +22,6 @@
     6.4   *
     6.5   */
     6.6  
     6.7 -/*
     6.8 - * This file has been modified by Loongson Technology in 2015. These
     6.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    6.10 - * available on the same license terms set forth above.
    6.11 - */
    6.12 -
    6.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP
    6.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_OBJECTSTARTARRAY_HPP
    6.15  
    6.16 @@ -160,32 +154,6 @@
    6.17      return scroll_forward;
    6.18    }
    6.19  
    6.20 -  HeapWord* object_start_oldnuma(HeapWord* addr, HeapWord* bottom) const {
    6.21 -    assert(_covered_region.contains(addr), "Must be in covered region");
    6.22 -    jbyte* block = block_for_addr(addr);
    6.23 -    HeapWord* scroll_forward;
    6.24 -    if(bottom == addr)
    6.25 -      return bottom;
    6.26 -    else
    6.27 -      scroll_forward = offset_addr_for_block(block--);
    6.28 -    while (scroll_forward > addr) {
    6.29 -      scroll_forward = offset_addr_for_block(block--);
    6.30 -    }
    6.31 -
    6.32 -    HeapWord* next = scroll_forward;
    6.33 -    while (next <= addr) {
    6.34 -      if(oop(next)->is_oop_or_null()) {
    6.35 -        scroll_forward = next;
    6.36 -        next += oop(next)->size();
    6.37 -      }
    6.38 -      else
    6.39 -        next++;
    6.40 -    }
    6.41 -    assert(scroll_forward <= addr, "wrong order for current and arg");
    6.42 -    assert(addr <= next, "wrong order for arg and next");
    6.43 -    return scroll_forward;
    6.44 -  }
    6.45 -
    6.46    bool is_block_allocated(HeapWord* addr) {
    6.47      assert(_covered_region.contains(addr), "Must be in covered region");
    6.48      jbyte* block = block_for_addr(addr);
     7.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Wed Jun 22 14:26:49 2016 +0800
     7.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.cpp	Fri Jun 24 17:12:13 2016 +0800
     7.3 @@ -22,18 +22,11 @@
     7.4   *
     7.5   */
     7.6  
     7.7 -/*
     7.8 - * This file has been modified by Loongson Technology in 2015. These
     7.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    7.10 - * available on the same license terms set forth above.
    7.11 - */
    7.12 -
    7.13  #include "precompiled.hpp"
    7.14  #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
    7.15  #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
    7.16  #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp"
    7.17  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    7.18 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
    7.19  #include "gc_implementation/shared/spaceDecorator.hpp"
    7.20  #include "memory/cardTableModRefBS.hpp"
    7.21  #include "memory/gcLocker.inline.hpp"
    7.22 @@ -135,12 +128,8 @@
    7.23    //
    7.24    // ObjectSpace stuff
    7.25    //
    7.26 -  if(UseOldNUMA) {
    7.27 -    _object_space = new MutableNUMASpace(virtual_space()->alignment());
    7.28 -  }
    7.29 -  else {
    7.30 -    _object_space = new MutableSpace(virtual_space()->alignment());
    7.31 -  }
    7.32 +
    7.33 +  _object_space = new MutableSpace(virtual_space()->alignment());
    7.34  
    7.35    if (_object_space == NULL)
    7.36      vm_exit_during_initialization("Could not allocate an old gen space");
    7.37 @@ -225,12 +214,12 @@
    7.38    return allocate_noexpand(word_size);
    7.39  }
    7.40  
    7.41 -HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size, int node) {
    7.42 +HeapWord* PSOldGen::expand_and_cas_allocate(size_t word_size) {
    7.43    expand(word_size*HeapWordSize);
    7.44    if (GCExpandToAllocateDelayMillis > 0) {
    7.45      os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
    7.46    }
    7.47 -  return cas_allocate_noexpand(word_size, node);
    7.48 +  return cas_allocate_noexpand(word_size);
    7.49  }
    7.50  
    7.51  void PSOldGen::expand(size_t bytes) {
    7.52 @@ -413,16 +402,10 @@
    7.53    Universe::heap()->barrier_set()->resize_covered_region(new_memregion);
    7.54  
    7.55    // ALWAYS do this last!!
    7.56 -  if(UseOldNUMA) {
    7.57 -    HeapWord* const virtual_space_high = (HeapWord*) virtual_space()->high();
    7.58 -    object_space()->set_end(virtual_space_high);                          
    7.59 -  }
    7.60 -  else {
    7.61 -    object_space()->initialize(new_memregion,
    7.62 -                               SpaceDecorator::DontClear,
    7.63 -                               SpaceDecorator::DontMangle);
    7.64 -  }
    7.65 - 
    7.66 +  object_space()->initialize(new_memregion,
    7.67 +                             SpaceDecorator::DontClear,
    7.68 +                             SpaceDecorator::DontMangle);
    7.69 +
    7.70    assert(new_word_size == heap_word_size(object_space()->capacity_in_bytes()),
    7.71      "Sanity");
    7.72  }
     8.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Wed Jun 22 14:26:49 2016 +0800
     8.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp	Fri Jun 24 17:12:13 2016 +0800
     8.3 @@ -22,12 +22,6 @@
     8.4   *
     8.5   */
     8.6  
     8.7 -/*
     8.8 - * This file has been modified by Loongson Technology in 2015. These
     8.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    8.10 - * available on the same license terms set forth above.
    8.11 - */
    8.12 -
    8.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSOLDGEN_HPP
    8.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSOLDGEN_HPP
    8.15  
    8.16 @@ -35,7 +29,6 @@
    8.17  #include "gc_implementation/parallelScavenge/psGenerationCounters.hpp"
    8.18  #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
    8.19  #include "gc_implementation/shared/mutableSpace.hpp"
    8.20 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
    8.21  #include "gc_implementation/shared/spaceCounters.hpp"
    8.22  #include "runtime/safepoint.hpp"
    8.23  
    8.24 @@ -80,15 +73,9 @@
    8.25    // Support for MT garbage collection. CAS allocation is lower overhead than grabbing
    8.26    // and releasing the heap lock, which is held during gc's anyway. This method is not
    8.27    // safe for use at the same time as allocate_noexpand()!
    8.28 -  HeapWord* cas_allocate_noexpand(size_t word_size, int node) {
    8.29 +  HeapWord* cas_allocate_noexpand(size_t word_size) {
    8.30      assert(SafepointSynchronize::is_at_safepoint(), "Must only be called at safepoint");
    8.31 -    HeapWord* res;
    8.32 -    if(UseOldNUMA) {
    8.33 -      res = object_space()->cas_allocate_oldnuma(word_size, node);
    8.34 -    }
    8.35 -    else {
    8.36 -      res = object_space()->cas_allocate(word_size);
    8.37 -    } 
    8.38 +    HeapWord* res = object_space()->cas_allocate(word_size);
    8.39      if (res != NULL) {
    8.40        _start_array.allocate_block(res);
    8.41      }
    8.42 @@ -96,13 +83,13 @@
    8.43    }
    8.44  
    8.45    // Support for MT garbage collection. See above comment.
    8.46 -  HeapWord* cas_allocate(size_t word_size, int node) {
    8.47 -    HeapWord* res = cas_allocate_noexpand(word_size, node);
    8.48 -    return (res == NULL) ? expand_and_cas_allocate(word_size, node) : res;
    8.49 +  HeapWord* cas_allocate(size_t word_size) {
    8.50 +    HeapWord* res = cas_allocate_noexpand(word_size);
    8.51 +    return (res == NULL) ? expand_and_cas_allocate(word_size) : res;
    8.52    }
    8.53  
    8.54    HeapWord* expand_and_allocate(size_t word_size);
    8.55 -  HeapWord* expand_and_cas_allocate(size_t word_size, int node);
    8.56 +  HeapWord* expand_and_cas_allocate(size_t word_size);
    8.57    void expand(size_t bytes);
    8.58    bool expand_by(size_t bytes);
    8.59    bool expand_to_reserved();
    8.60 @@ -160,22 +147,6 @@
    8.61    size_t capacity_in_bytes() const        { return object_space()->capacity_in_bytes(); }
    8.62    size_t used_in_bytes() const            { return object_space()->used_in_bytes(); }
    8.63    size_t free_in_bytes() const            { return object_space()->free_in_bytes(); }
    8.64 -  size_t free_in_bytes_numa() const {
    8.65 -    size_t min_size, free_bytes;
    8.66 -    MutableNUMASpace* s = (MutableNUMASpace*) object_space();
    8.67 -    int i = s->lgrp_spaces()->length();
    8.68 -    int j;
    8.69 -    MutableNUMASpace::LGRPSpace *ls;
    8.70 -    MutableSpace* sp;
    8.71 -    for(j = 0; j < i; j++) {
    8.72 -      ls = s->lgrp_spaces()->at(j);
    8.73 -      sp = ls->space();
    8.74 -      free_bytes = sp->free_in_bytes();
    8.75 -      if(j == 0) min_size = free_bytes;
    8.76 -      if(free_bytes < min_size) min_size = free_bytes;
    8.77 -    }
    8.78 -    return min_size;
    8.79 -  }
    8.80  
    8.81    size_t capacity_in_words() const        { return object_space()->capacity_in_words(); }
    8.82    size_t used_in_words() const            { return object_space()->used_in_words(); }
     9.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Jun 22 14:26:49 2016 +0800
     9.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jun 24 17:12:13 2016 +0800
     9.3 @@ -22,12 +22,6 @@
     9.4   *
     9.5   */
     9.6  
     9.7 -/*
     9.8 - * This file has been modified by Loongson Technology in 2015. These
     9.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
    9.10 - * available on the same license terms set forth above.
    9.11 - */
    9.12 -
    9.13  #include "precompiled.hpp"
    9.14  #include "classfile/symbolTable.hpp"
    9.15  #include "classfile/systemDictionary.hpp"
    9.16 @@ -2105,11 +2099,7 @@
    9.17      // Let the size policy know we're done
    9.18      size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);
    9.19  
    9.20 -    /* 2014/2/12/ Liao: In UseOldNUMA, the size of old-gen should not be changed as the young-gen,
    9.21 -     * when minorGC happens, the eden size will be emptied, so we can change the size of the eden
    9.22 -     * size and then bind to each NUMA group, but in fullGC, the old-gen should not be emptied, so
    9.23 -     * we let the old-gen should not be changed here. */
    9.24 -    if (UseAdaptiveSizePolicy && !UseOldNUMA) {
    9.25 +    if (UseAdaptiveSizePolicy) {
    9.26        if (PrintAdaptiveSizePolicy) {
    9.27          gclog_or_tty->print("AdaptiveSizeStart: ");
    9.28          gclog_or_tty->stamp();
    10.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Wed Jun 22 14:26:49 2016 +0800
    10.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionLAB.cpp	Fri Jun 24 17:12:13 2016 +0800
    10.3 @@ -22,17 +22,10 @@
    10.4   *
    10.5   */
    10.6  
    10.7 -/*
    10.8 - * This file has been modified by Loongson Technology in 2015. These
    10.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   10.10 - * available on the same license terms set forth above.
   10.11 - */
   10.12 -
   10.13  #include "precompiled.hpp"
   10.14  #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
   10.15  #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
   10.16  #include "gc_implementation/shared/mutableSpace.hpp"
   10.17 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
   10.18  #include "oops/oop.inline.hpp"
   10.19  
   10.20  size_t PSPromotionLAB::filler_header_size;
   10.21 @@ -164,28 +157,8 @@
   10.22    PSOldGen* old_gen = heap->old_gen();
   10.23    MemRegion used = old_gen->object_space()->used_region();
   10.24  
   10.25 -  /* 2014/2/12/ Liao: In UseOldNUMA, the new lab may be allocated out of the current used_region. 
   10.26 -   * For example, a new plab should be allocated in lgrp2, while the top of current used_region 
   10.27 -   * is in lgrp1. The original checking will return invalid, while this situation is reasonable. 
   10.28 -   * So we should check whether the lab is in one of the lgrps. */
   10.29 -  if(UseOldNUMA) {
   10.30 -    MutableSpace* sp;
   10.31 -    MutableNUMASpace::LGRPSpace *ls;
   10.32 -    MutableNUMASpace* s = (MutableNUMASpace*) old_gen->object_space();
   10.33 -    int i, j;
   10.34 -    i = s->lgrp_spaces()->length();
   10.35 -    for(j = 0; j < i; j++) {
   10.36 -      ls = s->lgrp_spaces()->at(j);
   10.37 -      sp = ls->space();
   10.38 -      used = sp ->used_region(); 
   10.39 -      if (used.contains(lab)) 
   10.40 -        return true;
   10.41 -    }
   10.42 -  }
   10.43 -  else {
   10.44 -    if (used.contains(lab)) {
   10.45 -      return true;
   10.46 -    }
   10.47 +  if (used.contains(lab)) {
   10.48 +    return true;
   10.49    }
   10.50  
   10.51    return false;
    11.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Wed Jun 22 14:26:49 2016 +0800
    11.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Jun 24 17:12:13 2016 +0800
    11.3 @@ -22,12 +22,6 @@
    11.4   *
    11.5   */
    11.6  
    11.7 -/*
    11.8 - * This file has been modified by Loongson Technology in 2015. These
    11.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   11.10 - * available on the same license terms set forth above.
   11.11 - */
   11.12 -
   11.13  #include "precompiled.hpp"
   11.14  #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
   11.15  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
   11.16 @@ -35,7 +29,6 @@
   11.17  #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
   11.18  #include "gc_implementation/shared/gcTrace.hpp"
   11.19  #include "gc_implementation/shared/mutableSpace.hpp"
   11.20 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
   11.21  #include "memory/allocation.inline.hpp"
   11.22  #include "memory/memRegion.hpp"
   11.23  #include "memory/padded.inline.hpp"
   11.24 @@ -48,7 +41,6 @@
   11.25  OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
   11.26  PSOldGen*                      PSPromotionManager::_old_gen = NULL;
   11.27  MutableSpace*                  PSPromotionManager::_young_space = NULL;
   11.28 -int node_ex = 0;
   11.29  
   11.30  void PSPromotionManager::initialize() {
   11.31    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   11.32 @@ -165,16 +157,7 @@
   11.33    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   11.34  
   11.35    // We set the old lab's start array.
   11.36 -  if(UseOldNUMA) {
   11.37 -    MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
   11.38 -    int i;
   11.39 -    for(i = 0; i < s->lgrp_spaces()->length(); i++) {
   11.40 -      _old_lab_oldnuma[i].set_start_array(old_gen()->start_array());
   11.41 -    }
   11.42 -  }
   11.43 -  else {
   11.44 -    _old_lab.set_start_array(old_gen()->start_array());
   11.45 -  }
   11.46 +  _old_lab.set_start_array(old_gen()->start_array());
   11.47  
   11.48    uint queue_size;
   11.49    claimed_stack_depth()->initialize();
   11.50 @@ -210,21 +193,7 @@
   11.51    _young_gen_is_full = false;
   11.52  
   11.53    lab_base = old_gen()->object_space()->top();
   11.54 -  if(UseOldNUMA) {
   11.55 -    MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
   11.56 -    int i;
   11.57 -    MutableNUMASpace::LGRPSpace *ls;
   11.58 -    MutableSpace *sp;
   11.59 -    for(i = 0; i < s->lgrp_spaces()->length(); i++) {
   11.60 -      ls = s->lgrp_spaces()->at(i);
   11.61 -      sp = ls->space();
   11.62 -      lab_base = sp->top();
   11.63 -      _old_lab_oldnuma[i].initialize(MemRegion(lab_base, (size_t)0));
   11.64 -    }
   11.65 -  }
   11.66 -  else {
   11.67 -    _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   11.68 -  }
   11.69 +  _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   11.70    _old_gen_is_full = false;
   11.71  
   11.72    _promotion_failed_info.reset();
   11.73 @@ -232,8 +201,8 @@
   11.74    TASKQUEUE_STATS_ONLY(reset_stats());
   11.75  }
   11.76  
   11.77 +
   11.78  void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
   11.79 -  Thread* thr = Thread::current();
   11.80    totally_drain = totally_drain || _totally_drain;
   11.81  
   11.82  #ifdef ASSERT
   11.83 @@ -278,20 +247,9 @@
   11.84    if (!_young_lab.is_flushed())
   11.85      _young_lab.flush();
   11.86  
   11.87 -  if(UseOldNUMA) {
   11.88 -    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   11.89 -    MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
   11.90 -    int i;
   11.91 -    for(i = 0; i < s->lgrp_spaces()->length(); i++) {
   11.92 -      if (!_old_lab_oldnuma[i].is_flushed())
   11.93 -        _old_lab_oldnuma[i].flush();
   11.94 -    }
   11.95 -  }
   11.96 -  else {
   11.97 -    assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
   11.98 -    if (!_old_lab.is_flushed())
   11.99 -      _old_lab.flush();
  11.100 -  }
  11.101 +  assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
  11.102 +  if (!_old_lab.is_flushed())
  11.103 +    _old_lab.flush();
  11.104  
  11.105    // Let PSScavenge know if we overflowed
  11.106    if (_young_gen_is_full) {
  11.107 @@ -384,87 +342,3 @@
  11.108  
  11.109    return obj;
  11.110  }
  11.111 -
  11.112 -//Used to recognise the location of oop o, in eden_space or old_space, or others?
  11.113 -int PSPromotionManager::get_oop_location(oop o) {
  11.114 -  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  11.115 -  MutableSpace* sp_eden = heap->young_gen()->eden_space();
  11.116 -  MutableSpace* sp_old = heap->old_gen()->object_space();
  11.117 -  HeapWord* temp_o = (HeapWord*) o;
  11.118 -  if(temp_o > sp_eden->bottom() && temp_o < sp_eden->end())
  11.119 -    return 1;
  11.120 -  else if (temp_o > sp_old->bottom() && temp_o < sp_old->end())
  11.121 -    return 2;
  11.122 -  else
  11.123 -    return (-1);
  11.124 -}
  11.125 -
  11.126 -// Used to get numa_node_id of Oop o, location = 1 means Oop o is in eden space, 
  11.127 -// location = 2 means Oops o is in old space
  11.128 -
  11.129 -int PSPromotionManager::get_oop_node_id(oop o, int location) {
  11.130 -  HeapWord* temp_o = (HeapWord*) o;
  11.131 -  ParallelScavengeHeap* heap = (ParallelScavengeHeap*) Universe::heap();
  11.132 -  MutableNUMASpace* sp_eden = (MutableNUMASpace*) heap->young_gen()->eden_space();
  11.133 -  MutableNUMASpace* sp_old = (MutableNUMASpace*) heap->old_gen()->object_space();
  11.134 -  MutableNUMASpace::LGRPSpace *ls;
  11.135 -  MutableSpace* sp_temp;
  11.136 -  int i = sp_eden->lgrp_spaces()->length();
  11.137 -  int j;
  11.138 -
  11.139 -  if(location == 1) {
  11.140 -    for(j = 0; j < i; j++) {
  11.141 -      ls = sp_eden->lgrp_spaces()->at(j);
  11.142 -      sp_temp = ls->space();
  11.143 -      if(temp_o > sp_temp->bottom() && temp_o < sp_temp->end())
  11.144 -        return j;
  11.145 -    }
  11.146 -  }
  11.147 -  else if(location == 2){
  11.148 -    for(j = 0; j < i; j++) {
  11.149 -      ls = sp_old->lgrp_spaces()->at(j);
  11.150 -      sp_temp = ls->space();
  11.151 -      if(temp_o > sp_temp->bottom() && temp_o < sp_temp->end())
  11.152 -        return j;
  11.153 -    }
  11.154 -  }
  11.155 -  else {
  11.156 -    tty->print_cr("Warning:Should not reach here!!!");
  11.157 -  }
  11.158 -}
  11.159 -
  11.160 -extern int task_tag[16];
  11.161 -extern int each_total_num[16];
  11.162 -extern int each_eden_total_num[3][16];
  11.163 -extern int each_eden_aligned_num[3][16];
  11.164 -
  11.165 -void PSPromotionManager::stastic_scavenge(oop o) {
  11.166 -  // oop o is in eden
  11.167 -  if(get_oop_location(o) == 1) {
  11.168 -    each_total_num[os::numa_get_cpu_id()]++;
  11.169 -
  11.170 -    // Used to stastic which Task handle this oop
  11.171 -    if(task_tag[os::numa_get_cpu_id()] == 1) {
  11.172 -      each_eden_total_num[0][os::numa_get_cpu_id()]++;
  11.173 -    }
  11.174 -    else if(task_tag[os::numa_get_cpu_id()] == 2) {
  11.175 -      each_eden_total_num[1][os::numa_get_cpu_id()]++;
  11.176 -    }
  11.177 -    else if(task_tag[os::numa_get_cpu_id()] == 3) {
  11.178 -      each_eden_total_num[2][os::numa_get_cpu_id()]++;
  11.179 -    }
  11.180 -
  11.181 -    // Used to stastic the propotion of oop on the same node of GC Thread
  11.182 -    if(get_oop_node_id(o, 1) == os::numa_get_group_id()) {
  11.183 -      if(task_tag[os::numa_get_cpu_id()] == 1) {
  11.184 -        each_eden_aligned_num[0][os::numa_get_cpu_id()]++;
  11.185 -      }
  11.186 -      else if(task_tag[os::numa_get_cpu_id()] == 2) {
  11.187 -        each_eden_aligned_num[1][os::numa_get_cpu_id()]++;
  11.188 -      }
  11.189 -      else if(task_tag[os::numa_get_cpu_id()] == 3) {
  11.190 -        each_eden_aligned_num[2][os::numa_get_cpu_id()]++;
  11.191 -      }
  11.192 -    }
  11.193 -  }
  11.194 -}
    12.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Wed Jun 22 14:26:49 2016 +0800
    12.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Jun 24 17:12:13 2016 +0800
    12.3 @@ -22,12 +22,6 @@
    12.4   *
    12.5   */
    12.6  
    12.7 -/*
    12.8 - * This file has been modified by Loongson Technology in 2015. These
    12.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   12.10 - * available on the same license terms set forth above.
   12.11 - */
   12.12 -
   12.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
   12.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
   12.15  
   12.16 @@ -83,7 +77,6 @@
   12.17  
   12.18    PSYoungPromotionLAB                 _young_lab;
   12.19    PSOldPromotionLAB                   _old_lab;
   12.20 -  PSOldPromotionLAB                   _old_lab_oldnuma[4];
   12.21    bool                                _young_gen_is_full;
   12.22    bool                                _old_gen_is_full;
   12.23  
   12.24 @@ -183,11 +176,6 @@
   12.25    bool old_gen_is_full()               { return _old_gen_is_full; }
   12.26    void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
   12.27  
   12.28 -  //Stastic methods
   12.29 -  int get_oop_location(oop o);
   12.30 -  int get_oop_node_id(oop o, int location);
   12.31 -  void stastic_scavenge(oop o);
   12.32 -
   12.33    // Promotion methods
   12.34    template<bool promote_immediately> oop copy_to_survivor_space(oop o);
   12.35    oop oop_promotion_failed(oop obj, markOop obj_mark);
    13.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Wed Jun 22 14:26:49 2016 +0800
    13.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Jun 24 17:12:13 2016 +0800
    13.3 @@ -22,19 +22,12 @@
    13.4   *
    13.5   */
    13.6  
    13.7 -/*
    13.8 - * This file has been modified by Loongson Technology in 2015. These
    13.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   13.10 - * available on the same license terms set forth above.
   13.11 - */
   13.12 -
   13.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
   13.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
   13.15  
   13.16  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
   13.17  #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
   13.18  #include "gc_implementation/parallelScavenge/psScavenge.hpp"
   13.19 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
   13.20  #include "oops/oop.psgc.inline.hpp"
   13.21  
   13.22  inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
   13.23 @@ -75,11 +68,6 @@
   13.24  // into smaller submethods, but we need to be careful not to hurt
   13.25  // performance.
   13.26  //
   13.27 -
   13.28 -extern int node_ex;
   13.29 -extern int   each_gc_copy_fre[16];
   13.30 -extern float each_gc_copy_time[16];
   13.31 -
   13.32  template<bool promote_immediately>
   13.33  oop PSPromotionManager::copy_to_survivor_space(oop o) {
   13.34    assert(PSScavenge::should_scavenge(&o), "Sanity");
   13.35 @@ -95,10 +83,6 @@
   13.36    if (!test_mark->is_marked()) {
   13.37      bool new_obj_is_tenured = false;
   13.38      size_t new_obj_size = o->size();
   13.39 -    
   13.40 -    if(UseStasticScavenge) {
   13.41 -      stastic_scavenge(o);
   13.42 -    }
   13.43  
   13.44      if (!promote_immediately) {
   13.45        // Find the objects age, MT safe.
   13.46 @@ -138,154 +122,53 @@
   13.47        }
   13.48  #endif  // #ifndef PRODUCT
   13.49  
   13.50 -      if(UseOldNUMA) { 
   13.51 -/* 2014/7/7 Liao: Copy objects to the same node of current GC thread */
   13.52 -        if(UseNUMAGC) {
   13.53 -          new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
   13.54 -          new_obj_is_tenured = true;
   13.55 +      new_obj = (oop) _old_lab.allocate(new_obj_size);
   13.56 +      new_obj_is_tenured = true;
   13.57  
   13.58 -          if (new_obj == NULL) {
   13.59 -            if (!_old_gen_is_full) {
   13.60 -              // Do we allocate directly, or flush and refill?
   13.61 -              if (new_obj_size > (OldPLABSize / 2)) {
   13.62 -                // Allocate this object directly
   13.63 -                new_obj = (oop)old_gen()->cas_allocate(new_obj_size, os::numa_get_group_id());
   13.64 -              } else {
   13.65 -                // Flush and fill
   13.66 -                _old_lab_oldnuma[os::numa_get_group_id()].flush();
   13.67 +      if (new_obj == NULL) {
   13.68 +        if (!_old_gen_is_full) {
   13.69 +          // Do we allocate directly, or flush and refill?
   13.70 +          if (new_obj_size > (OldPLABSize / 2)) {
   13.71 +            // Allocate this object directly
   13.72 +            new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
   13.73 +          } else {
   13.74 +            // Flush and fill
   13.75 +            _old_lab.flush();
   13.76  
   13.77 -                HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, os::numa_get_group_id());
   13.78 -                if(lab_base != NULL) {
   13.79 -                  _old_lab_oldnuma[os::numa_get_group_id()].initialize(MemRegion(lab_base, OldPLABSize));
   13.80 -                  // Try the old lab allocation again.
   13.81 -                  new_obj = (oop) _old_lab_oldnuma[os::numa_get_group_id()].allocate(new_obj_size);
   13.82 -                }
   13.83 +            HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize);
   13.84 +            if(lab_base != NULL) {
   13.85 +#ifdef ASSERT
   13.86 +              // Delay the initialization of the promotion lab (plab).
   13.87 +              // This exposes uninitialized plabs to card table processing.
   13.88 +              if (GCWorkerDelayMillis > 0) {
   13.89 +                os::sleep(Thread::current(), GCWorkerDelayMillis, false);
   13.90                }
   13.91 -            }
   13.92 -
   13.93 -            // This is the promotion failed test, and code handling.
   13.94 -            // The code belongs here for two reasons. It is slightly
   13.95 -            // different than the code below, and cannot share the
   13.96 -            // CAS testing code. Keeping the code here also minimizes
   13.97 -            // the impact on the common case fast path code.
   13.98 -
   13.99 -            if (new_obj == NULL) {
  13.100 -              _old_gen_is_full = true;
  13.101 -              return oop_promotion_failed(o, test_mark);
  13.102 +#endif
  13.103 +              _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
  13.104 +              // Try the old lab allocation again.
  13.105 +              new_obj = (oop) _old_lab.allocate(new_obj_size);
  13.106              }
  13.107            }
  13.108          }
  13.109 -        else {
  13.110 -          ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  13.111 -          MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
  13.112 -          int i = s->lgrp_spaces()->length();
  13.113 -          int node;
  13.114 -          if(i > 1) {
  13.115 -            node = node_ex % (i - 1) + 1;
  13.116 -            node_ex++;
  13.117 -          }
  13.118 -          else
  13.119 -           node = 0;
  13.120  
  13.121 -          new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
  13.122 -          new_obj_is_tenured = true;
  13.123 -
  13.124 -          if (new_obj == NULL) {
  13.125 -            if (!_old_gen_is_full) {
  13.126 -              // Do we allocate directly, or flush and refill?
  13.127 -              if (new_obj_size > (OldPLABSize / 2)) {
  13.128 -                // Allocate this object directly
  13.129 -                new_obj = (oop)old_gen()->cas_allocate(new_obj_size, node);
  13.130 -              } else {
  13.131 -                // Flush and fill
  13.132 -                _old_lab_oldnuma[node].flush();
  13.133 -
  13.134 -                HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, node);
  13.135 -                if(lab_base != NULL) {
  13.136 -                  _old_lab_oldnuma[node].initialize(MemRegion(lab_base, OldPLABSize));
  13.137 -                  // Try the old lab allocation again.
  13.138 -                  new_obj = (oop) _old_lab_oldnuma[node].allocate(new_obj_size);
  13.139 -                }
  13.140 -              }
  13.141 -            }
  13.142 -
  13.143 -            // This is the promotion failed test, and code handling.
  13.144 -            // The code belongs here for two reasons. It is slightly
  13.145 -            // different than the code below, and cannot share the
  13.146 -            // CAS testing code. Keeping the code here also minimizes
  13.147 -            // the impact on the common case fast path code.
  13.148 -
  13.149 -            if (new_obj == NULL) {
  13.150 -              _old_gen_is_full = true;
  13.151 -              return oop_promotion_failed(o, test_mark);
  13.152 -            }
  13.153 -          }
  13.154 -        }
  13.155 -      }
  13.156 -      else {
  13.157 -        new_obj = (oop) _old_lab.allocate(new_obj_size);
  13.158 -        new_obj_is_tenured = true;
  13.159 +        // This is the promotion failed test, and code handling.
  13.160 +        // The code belongs here for two reasons. It is slightly
  13.161 +        // different than the code below, and cannot share the
  13.162 +        // CAS testing code. Keeping the code here also minimizes
  13.163 +        // the impact on the common case fast path code.
  13.164  
  13.165          if (new_obj == NULL) {
  13.166 -          if (!_old_gen_is_full) {
  13.167 -            // Do we allocate directly, or flush and refill?
  13.168 -            if (new_obj_size > (OldPLABSize / 2)) {
  13.169 -              // Allocate this object directly
  13.170 -              new_obj = (oop)old_gen()->cas_allocate(new_obj_size, 0);
  13.171 -            } else {
  13.172 -              // Flush and fill
  13.173 -              _old_lab.flush();
  13.174 -
  13.175 -              HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize, 0);
  13.176 -              if(lab_base != NULL) {
  13.177 -#ifdef ASSERT
  13.178 -                // Delay the initialization of the promotion lab (plab).
  13.179 -                // This exposes uninitialized plabs to card table processing.
  13.180 -                if (GCWorkerDelayMillis > 0) {
  13.181 -                  os::sleep(Thread::current(), GCWorkerDelayMillis, false);
  13.182 -                }
  13.183 -#endif
  13.184 -                _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
  13.185 -                // Try the old lab allocation again.
  13.186 -                new_obj = (oop) _old_lab.allocate(new_obj_size);
  13.187 -              }
  13.188 -            }
  13.189 -          }
  13.190 -
  13.191 -          // This is the promotion failed test, and code handling.
  13.192 -          // The code belongs here for two reasons. It is slightly
  13.193 -          // different than the code below, and cannot share the
  13.194 -          // CAS testing code. Keeping the code here also minimizes
  13.195 -          // the impact on the common case fast path code.
  13.196 -
  13.197 -          if (new_obj == NULL) {
  13.198 -            _old_gen_is_full = true;
  13.199 -            return oop_promotion_failed(o, test_mark);
  13.200 -          }
  13.201 +          _old_gen_is_full = true;
  13.202 +          return oop_promotion_failed(o, test_mark);
  13.203          }
  13.204        }
  13.205      }
  13.206  
  13.207      assert(new_obj != NULL, "allocation should have succeeded");
  13.208  
  13.209 -    TimeStamp before_copy, after_copy;
  13.210 -
  13.211 -    if(UseStasticCopy) {
  13.212 -      before_copy.update();
  13.213 -    }
  13.214 -   
  13.215      // Copy obj
  13.216      Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
  13.217  
  13.218 -    if(UseStasticCopy) {
  13.219 -      after_copy.update();
  13.220 -    }
  13.221 -
  13.222 -    if(UseStasticCopy) {
  13.223 -      each_gc_copy_time[os::numa_get_cpu_id()] += after_copy.ticks() - before_copy.ticks();
  13.224 -      each_gc_copy_fre[os::numa_get_cpu_id()]++;
  13.225 -    }
  13.226 -
  13.227      // Now we have to CAS in the header.
  13.228      if (o->cas_forward_to(new_obj, test_mark)) {
  13.229        // We won any races, we "own" this object.
  13.230 @@ -322,20 +205,8 @@
  13.231        // deallocate it, so we have to test.  If the deallocation fails,
  13.232        // overwrite with a filler object.
  13.233        if (new_obj_is_tenured) {
  13.234 -        if(UseOldNUMA) {
  13.235 -          ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  13.236 -          MutableNUMASpace* s = (MutableNUMASpace*) heap->old_gen()->object_space();
  13.237 -          int i;
  13.238 -          for(i = 0; i < s->lgrp_spaces()->length(); i++) {
  13.239 -            if (!_old_lab_oldnuma[i].unallocate_object((HeapWord*) new_obj, new_obj_size)) {
  13.240 -              CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
  13.241 -            }
  13.242 -          }
  13.243 -        }
  13.244 -        else {
  13.245 -          if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
  13.246 -            CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
  13.247 -          }
  13.248 +        if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
  13.249 +          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
  13.250          }
  13.251        } else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
  13.252          CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
  13.253 @@ -362,6 +233,7 @@
  13.254    return new_obj;
  13.255  }
  13.256  
  13.257 +
  13.258  inline void PSPromotionManager::process_popped_location_depth(StarTask p) {
  13.259    if (is_oop_masked(p)) {
  13.260      assert(PSChunkLargeArrays, "invariant");
    14.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Wed Jun 22 14:26:49 2016 +0800
    14.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jun 24 17:12:13 2016 +0800
    14.3 @@ -22,12 +22,6 @@
    14.4   *
    14.5   */
    14.6  
    14.7 -/*
    14.8 - * This file has been modified by Loongson Technology in 2015. These
    14.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   14.10 - * available on the same license terms set forth above.
   14.11 - */
   14.12 -
   14.13  #include "precompiled.hpp"
   14.14  #include "classfile/symbolTable.hpp"
   14.15  #include "code/codeCache.hpp"
   14.16 @@ -44,7 +38,6 @@
   14.17  #include "gc_implementation/shared/gcTrace.hpp"
   14.18  #include "gc_implementation/shared/gcTraceTime.hpp"
   14.19  #include "gc_implementation/shared/isGCActiveMark.hpp"
   14.20 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
   14.21  #include "gc_implementation/shared/spaceDecorator.hpp"
   14.22  #include "gc_interface/gcCause.hpp"
   14.23  #include "memory/collectorPolicy.hpp"
   14.24 @@ -237,15 +230,8 @@
   14.25    IsGCActiveMark mark;
   14.26  
   14.27    const bool scavenge_done = PSScavenge::invoke_no_policy();
   14.28 -  bool need_full_gc;
   14.29 -  if(UseOldNUMA) {
   14.30 -    need_full_gc = !scavenge_done ||
   14.31 -      policy->should_full_GC(heap->old_gen()->free_in_bytes_numa());
   14.32 -  }
   14.33 -  else {
   14.34 -    need_full_gc = !scavenge_done ||
   14.35 -      policy->should_full_GC(heap->old_gen()->free_in_bytes());
   14.36 -  }
   14.37 +  const bool need_full_gc = !scavenge_done ||
   14.38 +    policy->should_full_GC(heap->old_gen()->free_in_bytes());
   14.39    bool full_gc_done = false;
   14.40  
   14.41    if (UsePerfData) {
   14.42 @@ -269,30 +255,6 @@
   14.43    return full_gc_done;
   14.44  }
   14.45  
   14.46 -/* 2014/7/7 Liao: Add these variables to stastic detail information during GC. */
   14.47 -/* Used for objects copy stastic. */
   14.48 -float each_gc_copy_time[16];
   14.49 -int   each_gc_copy_fre[16];
   14.50 -
   14.51 -/* Used for GC details stastic. */
   14.52 -float total_gc_time = 0;
   14.53 -int   total_gc_fre  = 0;
   14.54 -
   14.55 -/* Used to statstic ThreadRoots optimization. */
   14.56 -int task_tag[16];
   14.57 -//Used to stastic each cpu
   14.58 -int each_total_num[16];
   14.59 -int each_eden_total_num[3][16];
   14.60 -int each_eden_aligned_num[3][16];
   14.61 -//Used to stastic every GC
   14.62 -int every_total_num;
   14.63 -int every_eden_total_num[3];
   14.64 -int every_eden_aligned_num[3];
   14.65 -//Used to stastic all the time
   14.66 -int all_total_num;
   14.67 -int all_eden_total_num[3];
   14.68 -int all_eden_aligned_num[3];
   14.69 -
   14.70  // This method contains no policy. You should probably
   14.71  // be calling invoke() instead.
   14.72  bool PSScavenge::invoke_no_policy() {
   14.73 @@ -346,29 +308,6 @@
   14.74      heap->record_gen_tops_before_GC();
   14.75    }
   14.76  
   14.77 -  if(UseStasticCopy) {
   14.78 -    for(uint i = 0; i < ParallelGCThreads; i++) {
   14.79 -      each_gc_copy_time[i] = 0;
   14.80 -      each_gc_copy_fre[i] = 0;
   14.81 -    }
   14.82 -  }
   14.83 -
   14.84 -  if(UseStasticScavenge) {
   14.85 -    for(int j = 0; j < 3; j++) {
   14.86 -      for(uint i = 0; i < ParallelGCThreads; i++) {
   14.87 -        task_tag[i] = 0;
   14.88 -
   14.89 -        each_total_num[i] = 0;
   14.90 -        each_eden_total_num[j][i] = 0;
   14.91 -        each_eden_aligned_num[j][i] = 0;
   14.92 -
   14.93 -        every_total_num = 0;
   14.94 -        every_eden_total_num[j]  = 0;
   14.95 -        every_eden_aligned_num[j] = 0;
   14.96 -      }
   14.97 -    }
   14.98 -  }
   14.99 -  
  14.100    heap->print_heap_before_gc();
  14.101    heap->trace_heap_before_gc(&_gc_tracer);
  14.102  
  14.103 @@ -463,34 +402,12 @@
  14.104  
  14.105        GCTaskQueue* q = GCTaskQueue::create();
  14.106  
  14.107 -      if(UseOldNUMA) {
  14.108 -        MutableSpace* sp;
  14.109 -        MutableNUMASpace::LGRPSpace *ls;
  14.110 -        MutableNUMASpace* s = (MutableNUMASpace*) old_gen->object_space();
  14.111 -        int i, j;
  14.112 -        i = s->lgrp_spaces()->length();
  14.113 -        HeapWord** gen_top = (HeapWord**) malloc (i * sizeof(HeapWord));
  14.114 -        for(j = 0; j < i; j++) {
  14.115 -          ls = s->lgrp_spaces()->at(j);
  14.116 -          sp = ls->space();
  14.117 -          *(gen_top + j) = sp->top();
  14.118 -        }
  14.119 -
  14.120 -        if (!old_gen->object_space()->is_empty()) {
  14.121 -          uint stripe_total = active_workers;
  14.122 -          for(uint i=0; i < stripe_total; i++) {
  14.123 -            q->enqueue(new OldToYoungRootsTask_OldNUMA(old_gen, gen_top, i, stripe_total));
  14.124 -          }
  14.125 -        }
  14.126 -      }
  14.127 -      else {
  14.128 -        if (!old_gen->object_space()->is_empty()) {
  14.129 -          // There are only old-to-young pointers if there are objects
  14.130 -          // in the old gen.
  14.131 -          uint stripe_total = active_workers;
  14.132 -          for(uint i=0; i < stripe_total; i++) {
  14.133 -            q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
  14.134 -          }
  14.135 +      if (!old_gen->object_space()->is_empty()) {
  14.136 +        // There are only old-to-young pointers if there are objects
  14.137 +        // in the old gen.
  14.138 +        uint stripe_total = active_workers;
  14.139 +        for(uint i=0; i < stripe_total; i++) {
  14.140 +          q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
  14.141          }
  14.142        }
  14.143  
  14.144 @@ -781,56 +698,6 @@
  14.145                    scavenge_exit.ticks());
  14.146      gc_task_manager()->print_task_time_stamps();
  14.147    }
  14.148 -   
  14.149 -  if(PrintGCDetails) {
  14.150 -    float young_gc_time;
  14.151 -    total_gc_fre++; 
  14.152 -    young_gc_time = ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
  14.153 -    total_gc_time = total_gc_time + ((float)(scavenge_exit.ticks() - scavenge_entry.ticks()))/1e9;
  14.154 -    tty->print_cr("total_gc_fre = %d, young_gc_time = %f, total_gc_time = %f", total_gc_fre, young_gc_time, total_gc_time);
  14.155 -  }
  14.156 -  
  14.157 -  if(UseStasticCopy) {
  14.158 -    for(uint i = 0; i < ParallelGCThreads; i++) {
  14.159 -      tty->print_cr("each_gc_copy_time[%d] = %f", i, each_gc_copy_time[i]/each_gc_copy_fre[i]);
  14.160 -    }
  14.161 -    tty->print_cr("");
  14.162 -    for(uint i = 0; i < ParallelGCThreads; i++) {
  14.163 -      tty->print_cr("each_gc_copy_fre[%d] = %d", i, each_gc_copy_fre[i]);
  14.164 -    }
  14.165 -  }
  14.166 -
  14.167 -  if(UseStasticScavenge) {
  14.168 -    for(int i = 0; i < 3; i++) {
  14.169 -      for(uint j = 0; j < ParallelGCThreads; j++) {
  14.170 -        every_eden_total_num[i] += each_eden_total_num[i][j];
  14.171 -        every_eden_aligned_num[i] += each_eden_aligned_num[i][j];
  14.172 -      }
  14.173 -    }
  14.174 -
  14.175 -    for(uint i = 0; i < ParallelGCThreads; i++) {
  14.176 -      every_total_num += each_total_num[i];
  14.177 -    }
  14.178 - 
  14.179 -    all_total_num += every_total_num;
  14.180 -
  14.181 -    for(int i = 0; i < 3; i++) {
  14.182 -      all_eden_total_num[i] +=  every_eden_total_num[i];
  14.183 -      all_eden_aligned_num[i] += every_eden_aligned_num[i];
  14.184 -    }
  14.185 -
  14.186 -    tty->print_cr("============= Every GCDetails: =============");
  14.187 -    tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[0]/(float)every_total_num, (float)every_eden_aligned_num[0]/(float)every_eden_total_num[0]);
  14.188 -    tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[1]/(float)every_total_num, (float)every_eden_aligned_num[1]/(float)every_eden_total_num[1]);
  14.189 -    tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)every_eden_total_num[2]/(float)every_total_num, (float)every_eden_aligned_num[2]/(float)every_eden_total_num[2]);
  14.190 -    tty->print_cr("");
  14.191 -
  14.192 -    tty->print_cr("============= Total GCDetails: =============");
  14.193 -    tty->print_cr("ThreadRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[0]/(float)all_total_num, (float)all_eden_aligned_num[0]/(float)all_eden_total_num[0]);
  14.194 -    tty->print_cr("OldToYoungRootTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[1]/(float)all_total_num, (float)all_eden_aligned_num[1]/(float)all_eden_total_num[1]);
  14.195 -    tty->print_cr("StealTask: prop of all = %f, prop of aligned = %f", (float)all_eden_total_num[2]/(float)all_total_num, (float)all_eden_aligned_num[2]/(float)all_eden_total_num[2]);
  14.196 -    tty->print_cr("");
  14.197 -  }
  14.198  
  14.199  #ifdef TRACESPINNING
  14.200    ParallelTaskTerminator::print_termination_counts();
    15.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Wed Jun 22 14:26:49 2016 +0800
    15.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.cpp	Fri Jun 24 17:12:13 2016 +0800
    15.3 @@ -22,12 +22,6 @@
    15.4   *
    15.5   */
    15.6  
    15.7 -/*
    15.8 - * This file has been modified by Loongson Technology in 2015. These
    15.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   15.10 - * available on the same license terms set forth above.
   15.11 - */
   15.12 -
   15.13  #include "precompiled.hpp"
   15.14  #include "classfile/systemDictionary.hpp"
   15.15  #include "code/codeCache.hpp"
   15.16 @@ -38,7 +32,6 @@
   15.17  #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
   15.18  #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
   15.19  #include "gc_implementation/parallelScavenge/psTasks.hpp"
   15.20 -#include "gc_implementation/shared/mutableNUMASpace.hpp"
   15.21  #include "memory/iterator.hpp"
   15.22  #include "memory/universe.hpp"
   15.23  #include "oops/oop.inline.hpp"
   15.24 @@ -123,9 +116,8 @@
   15.25  //
   15.26  // ThreadRootsTask
   15.27  //
   15.28 -extern int task_tag[16];
   15.29 +
   15.30  void ThreadRootsTask::do_it(GCTaskManager* manager, uint which) {
   15.31 -  task_tag[which] = 1;
   15.32    assert(Universe::heap()->is_gc_active(), "called outside gc");
   15.33  
   15.34    PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   15.35 @@ -148,12 +140,9 @@
   15.36  //
   15.37  
   15.38  StealTask::StealTask(ParallelTaskTerminator* t) :
   15.39 -  _terminator(t) {set_task_numa_id(-1);}
   15.40 +  _terminator(t) {}
   15.41  
   15.42  void StealTask::do_it(GCTaskManager* manager, uint which) {
   15.43 -
   15.44 -  task_tag[which] = 3;
   15.45 -
   15.46    assert(Universe::heap()->is_gc_active(), "called outside gc");
   15.47  
   15.48    PSPromotionManager* pm =
   15.49 @@ -183,7 +172,6 @@
   15.50  //
   15.51  
   15.52  void OldToYoungRootsTask::do_it(GCTaskManager* manager, uint which) {
   15.53 -  task_tag[which] = 2;
   15.54    // There are not old-to-young pointers if the old gen is empty.
   15.55    assert(!_gen->object_space()->is_empty(),
   15.56      "Should not be called is there is no work");
   15.57 @@ -209,56 +197,3 @@
   15.58      pm->drain_stacks(false);
   15.59    }
   15.60  }
   15.61 -
   15.62 -//
   15.63 -// OldToYoungRootsTask_OldNUMA
   15.64 -//
   15.65 -
   15.66 -void OldToYoungRootsTask_OldNUMA::do_it(GCTaskManager* manager, uint which) {
   15.67 -  assert(_gen != NULL, "Sanity");
   15.68 -  assert(_stripe_number < ParallelGCThreads, "Sanity");
   15.69 -
   15.70 -  {
   15.71 -    PSPromotionManager* pm = PSPromotionManager::gc_thread_promotion_manager(which);
   15.72 -
   15.73 -    assert(Universe::heap()->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   15.74 -    CardTableExtension* card_table = (CardTableExtension *)Universe::heap()->barrier_set();
   15.75 -    // FIX ME! Assert that card_table is the type we believe it to be.
   15.76 -
   15.77 -    MutableNUMASpace* s = (MutableNUMASpace*) _gen->object_space();
   15.78 -    int i = s->lgrp_spaces()->length();
   15.79 -    int j;
   15.80 -    for(j = 0; j < i; j++) {
   15.81 -      MutableNUMASpace::LGRPSpace *ls = s->lgrp_spaces()->at(j);
   15.82 -      MutableSpace* sp = ls->space();
   15.83 -      /* 2014/2/12/ Liao:[cardTableExtension.cpp] assert(sp->bottom() < sp->top(), "Should not be called if empty");
   15.84 -       * The situation sp->bottom() = sp->top() indicates that the lgrp is empty,
   15.85 -       * so no need of the following operations. */
   15.86 -      if(sp->bottom() != sp->top()) {
   15.87 -        if(j == 0) {
   15.88 -          card_table->scavenge_contents_parallel(_gen->start_array(),
   15.89 -                                                 sp,
   15.90 -                                                 *(_gen_top + j),
   15.91 -                                                 pm,
   15.92 -                                                 _stripe_number,
   15.93 -                                                 _stripe_total);
   15.94 -
   15.95 -        }
   15.96 -        else {
   15.97 -          MutableNUMASpace::LGRPSpace *ls_below = s->lgrp_spaces()->at(j-1);
   15.98 -          MutableSpace* sp_below = ls_below->space();
   15.99 -          card_table->scavenge_contents_parallel_oldnuma(_gen->start_array(),
  15.100 -                                                         sp_below,
  15.101 -                                                         sp,
  15.102 -                                                         *(_gen_top + j),
  15.103 -                                                         pm,
  15.104 -                                                         _stripe_number,
  15.105 -                                                         _stripe_total);
  15.106 -        }
  15.107 -      }
  15.108 -    }
  15.109 -    // Do the real work
  15.110 -    pm->drain_stacks(false);
  15.111 -  }
  15.112 -}
  15.113 -
    16.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Wed Jun 22 14:26:49 2016 +0800
    16.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psTasks.hpp	Fri Jun 24 17:12:13 2016 +0800
    16.3 @@ -22,12 +22,6 @@
    16.4   *
    16.5   */
    16.6  
    16.7 -/*
    16.8 - * This file has been modified by Loongson Technology in 2015. These
    16.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   16.10 - * available on the same license terms set forth above.
   16.11 - */
   16.12 -
   16.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSTASKS_HPP
   16.14  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSTASKS_HPP
   16.15  
   16.16 @@ -73,7 +67,7 @@
   16.17   private:
   16.18    RootType _root_type;
   16.19   public:
   16.20 -  ScavengeRootsTask(RootType value) : _root_type(value) {set_task_numa_id(-1);}
   16.21 +  ScavengeRootsTask(RootType value) : _root_type(value) {}
   16.22  
   16.23    char* name() { return (char *)"scavenge-roots-task"; }
   16.24  
   16.25 @@ -92,8 +86,8 @@
   16.26    JavaThread* _java_thread;
   16.27    VMThread* _vm_thread;
   16.28   public:
   16.29 -  ThreadRootsTask(JavaThread* root) : _java_thread(root), _vm_thread(NULL) {set_task_numa_id(root->lgrp_id());}
   16.30 -  ThreadRootsTask(VMThread* root) : _java_thread(NULL), _vm_thread(root) {set_task_numa_id(root->lgrp_id());}
   16.31 +  ThreadRootsTask(JavaThread* root) : _java_thread(root), _vm_thread(NULL) {}
   16.32 +  ThreadRootsTask(VMThread* root) : _java_thread(NULL), _vm_thread(root) {}
   16.33  
   16.34    char* name() { return (char *)"thread-roots-task"; }
   16.35  
   16.36 @@ -179,34 +173,11 @@
   16.37      _gen(gen),
   16.38      _gen_top(gen_top),
   16.39      _stripe_number(stripe_number),
   16.40 -    _stripe_total(stripe_total) {set_task_numa_id(-1);}
   16.41 +    _stripe_total(stripe_total) { }
   16.42  
   16.43    char* name() { return (char *)"old-to-young-roots-task"; }
   16.44  
   16.45    virtual void do_it(GCTaskManager* manager, uint which);
   16.46  };
   16.47  
   16.48 -class OldToYoungRootsTask_OldNUMA : public GCTask {
   16.49 - private:
   16.50 -  PSOldGen* _gen;
   16.51 -  HeapWord** _gen_top;
   16.52 -  uint _stripe_number;
   16.53 -  uint _stripe_total;
   16.54 -
   16.55 - public:
   16.56 -  OldToYoungRootsTask_OldNUMA(PSOldGen *gen, 
   16.57 -                              HeapWord** gen_top, 
   16.58 -                              uint stripe_number,
   16.59 -                              uint stripe_total) :
   16.60 -     _gen(gen), 
   16.61 -     _gen_top(gen_top), 
   16.62 -     _stripe_number(stripe_number),
   16.63 -     _stripe_total(stripe_total) {set_task_numa_id(-1);}
   16.64 -
   16.65 -  char* name() { return (char *)"OldNUMA-old-to-young-roots-task"; }
   16.66 -
   16.67 -  virtual void do_it(GCTaskManager* manager, uint which);
   16.68 -};
   16.69 -
   16.70 -
   16.71  #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSTASKS_HPP
    17.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Wed Jun 22 14:26:49 2016 +0800
    17.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp	Fri Jun 24 17:12:13 2016 +0800
    17.3 @@ -23,12 +23,6 @@
    17.4   *
    17.5   */
    17.6  
    17.7 -/*
    17.8 - * This file has been modified by Loongson Technology in 2015. These
    17.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   17.10 - * available on the same license terms set forth above.
   17.11 - */
   17.12 -
   17.13  #include "precompiled.hpp"
   17.14  #include "gc_implementation/shared/mutableNUMASpace.hpp"
   17.15  #include "gc_implementation/shared/spaceDecorator.hpp"
   17.16 @@ -873,43 +867,6 @@
   17.17    return p;
   17.18  }
   17.19  
   17.20 -HeapWord* MutableNUMASpace::cas_allocate_oldnuma(size_t size, int node) {
   17.21 -  LGRPSpace *ls = lgrp_spaces()->at(node);
   17.22 -  MutableSpace *s = ls->space();
   17.23 -  HeapWord *p = s->cas_allocate(size);
   17.24 -  if (p != NULL) {
   17.25 -    size_t remainder = pointer_delta(s->end(), p + size);
   17.26 -    if (remainder < CollectedHeap::min_fill_size() && remainder > 0) {
   17.27 -      if (s->cas_deallocate(p, size)) {
   17.28 -        // We were the last to allocate and created a fragment less than
   17.29 -        // a minimal object.
   17.30 -        p = NULL;
   17.31 -      } else {
   17.32 -        guarantee(false, "Deallocation should always succeed");
   17.33 -      }
   17.34 -    }
   17.35 -  }
   17.36 -  if (p != NULL) {
   17.37 -    HeapWord* cur_top, *cur_chunk_top = p + size;
   17.38 -    while ((cur_top = top()) < cur_chunk_top) { // Keep _top updated.
   17.39 -      if (Atomic::cmpxchg_ptr(cur_chunk_top, top_addr(), cur_top) == cur_top) {
   17.40 -        break;
   17.41 -      }
   17.42 -    }
   17.43 -  }
   17.44 -
   17.45 -  // Make the page allocation happen here if there is no static binding.
   17.46 -  if (p != NULL && !os::numa_has_static_binding() ) {
   17.47 -    for (HeapWord *i = p; i < p + size; i += os::vm_page_size() >> LogHeapWordSize) {
   17.48 -      *(int*)i = 0;
   17.49 -    }
   17.50 -  }
   17.51 -  if (p == NULL) {
   17.52 -    ls->set_allocation_failed();
   17.53 -  }
   17.54 -  return p;
   17.55 -}
   17.56 -
   17.57  void MutableNUMASpace::print_short_on(outputStream* st) const {
   17.58    MutableSpace::print_short_on(st);
   17.59    st->print(" (");
    18.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Wed Jun 22 14:26:49 2016 +0800
    18.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.hpp	Fri Jun 24 17:12:13 2016 +0800
    18.3 @@ -22,12 +22,6 @@
    18.4   *
    18.5   */
    18.6  
    18.7 -/*
    18.8 - * This file has been modified by Loongson Technology in 2015. These
    18.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   18.10 - * available on the same license terms set forth above.
   18.11 - */
   18.12 -
   18.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLENUMASPACE_HPP
   18.14  #define SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLENUMASPACE_HPP
   18.15  
   18.16 @@ -69,14 +63,6 @@
   18.17  
   18.18  class MutableNUMASpace : public MutableSpace {
   18.19    friend class VMStructs;
   18.20 -  friend class CardTableExtension;
   18.21 -  friend class OldToYoungRootsTask;
   18.22 -  friend class OldToYoungRootsTask_OldNUMA;
   18.23 -  friend class PSScavenge;
   18.24 -  friend class PSOldGen;
   18.25 -  friend class PSYoungGen;
   18.26 -  friend class PSPromotionManager;
   18.27 -  friend class PSOldPromotionLAB;
   18.28  
   18.29    class LGRPSpace : public CHeapObj<mtGC> {
   18.30      int _lgrp_id;
   18.31 @@ -237,9 +223,6 @@
   18.32    // Allocation (return NULL if full)
   18.33    virtual HeapWord* allocate(size_t word_size);
   18.34    virtual HeapWord* cas_allocate(size_t word_size);
   18.35 -  
   18.36 -  // Allocation for Old NUMA (return NULL if full)
   18.37 -  virtual HeapWord* cas_allocate_oldnuma(size_t word_size, int node);
   18.38  
   18.39    // Debugging
   18.40    virtual void print_on(outputStream* st) const;
    19.1 --- a/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Wed Jun 22 14:26:49 2016 +0800
    19.2 +++ b/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Fri Jun 24 17:12:13 2016 +0800
    19.3 @@ -22,12 +22,6 @@
    19.4   *
    19.5   */
    19.6  
    19.7 -/*
    19.8 - * This file has been modified by Loongson Technology in 2015. These
    19.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   19.10 - * available on the same license terms set forth above.
   19.11 - */
   19.12 -
   19.13  #include "precompiled.hpp"
   19.14  #include "utilities/macros.hpp"
   19.15  #if INCLUDE_ALL_GCS
   19.16 @@ -219,8 +213,6 @@
   19.17    } while (true);
   19.18  }
   19.19  
   19.20 -HeapWord* MutableSpace::cas_allocate_oldnuma(size_t size, int node) { }
   19.21 -
   19.22  // Try to deallocate previous allocation. Returns true upon success.
   19.23  bool MutableSpace::cas_deallocate(HeapWord *obj, size_t size) {
   19.24    HeapWord* expected_top = obj + size;
    20.1 --- a/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Wed Jun 22 14:26:49 2016 +0800
    20.2 +++ b/src/share/vm/gc_implementation/shared/mutableSpace.hpp	Fri Jun 24 17:12:13 2016 +0800
    20.3 @@ -22,12 +22,6 @@
    20.4   *
    20.5   */
    20.6  
    20.7 -/*
    20.8 - * This file has been modified by Loongson Technology in 2015. These
    20.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   20.10 - * available on the same license terms set forth above.
   20.11 - */
   20.12 -
   20.13  #ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
   20.14  #define SHARE_VM_GC_IMPLEMENTATION_SHARED_MUTABLESPACE_HPP
   20.15  
   20.16 @@ -136,10 +130,6 @@
   20.17    // Allocation (return NULL if full)
   20.18    virtual HeapWord* allocate(size_t word_size);
   20.19    virtual HeapWord* cas_allocate(size_t word_size);
   20.20 -
   20.21 -  // Allocation for Old NUMA (return NULL if full)
   20.22 -  virtual HeapWord* cas_allocate_oldnuma(size_t word_size, int node);
   20.23 -
   20.24    // Optional deallocation. Used in NUMA-allocator.
   20.25    bool cas_deallocate(HeapWord *obj, size_t size);
   20.26  
    21.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Jun 22 14:26:49 2016 +0800
    21.2 +++ b/src/share/vm/runtime/arguments.cpp	Fri Jun 24 17:12:13 2016 +0800
    21.3 @@ -22,12 +22,6 @@
    21.4   *
    21.5   */
    21.6  
    21.7 -/*
    21.8 - * This file has been modified by Loongson Technology in 2015. These
    21.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   21.10 - * available on the same license terms set forth above.
   21.11 - */
   21.12 -
   21.13  #include "precompiled.hpp"
   21.14  #include "classfile/javaAssertions.hpp"
   21.15  #include "classfile/symbolTable.hpp"
   21.16 @@ -1660,32 +1654,6 @@
   21.17  }
   21.18  
   21.19  void Arguments::set_heap_size() {
   21.20 -#ifdef MIPS64
   21.21 -  /* 2013/10/24 Jin: Force -Xmx2.5G */
   21.22 -  if(InitialHeapSize < MaxHeapSize) {
   21.23 -    InitialHeapSize = MaxHeapSize;
   21.24 -    set_min_heap_size(MaxHeapSize);
   21.25 -    return;
   21.26 -  }
   21.27 -  /* 2014/3/14 Liao: The default InitialHeapSize and MaxHeapSize is set 3g in loongsonJDK,
   21.28 -   * if we only set -Xmx<3G(e.g. -Xmx1g), or -Xms>3g(e.g. -Xms4g), the heapsize is conflicated,
   21.29 -   * so we must handle this situation. Using set_min_heap_size aims to set the NewSize reasonable.*/
   21.30 -  if(!FLAG_IS_DEFAULT(InitialHeapSize) && InitialHeapSize > MaxHeapSize) {
   21.31 -    MaxHeapSize = InitialHeapSize;
   21.32 -    set_min_heap_size(MaxHeapSize);
   21.33 -    return;
   21.34 -  }
   21.35 -  if(!FLAG_IS_DEFAULT(MaxHeapSize) && InitialHeapSize > MaxHeapSize)  {
   21.36 -    InitialHeapSize = MaxHeapSize;
   21.37 -    set_min_heap_size(MaxHeapSize);
   21.38 -    return;
   21.39 -  }
   21.40 -  if (FLAG_IS_DEFAULT(InitialHeapSize) && FLAG_IS_DEFAULT(MaxHeapSize)) {
   21.41 -    set_min_heap_size(MaxHeapSize);
   21.42 -    return;
   21.43 -  }
   21.44 -#endif
   21.45 -
   21.46    if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
   21.47      // Deprecated flag
   21.48      FLAG_SET_CMDLINE(uintx, MaxRAMFraction, DefaultMaxRAMFraction);
    22.1 --- a/src/share/vm/runtime/globals.hpp	Wed Jun 22 14:26:49 2016 +0800
    22.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Jun 24 17:12:13 2016 +0800
    22.3 @@ -22,12 +22,6 @@
    22.4   *
    22.5   */
    22.6  
    22.7 -/*
    22.8 - * This file has been modified by Loongson Technology in 2015. These
    22.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   22.10 - * available on the same license terms set forth above.
   22.11 - */
   22.12 -
   22.13  #ifndef SHARE_VM_RUNTIME_GLOBALS_HPP
   22.14  #define SHARE_VM_RUNTIME_GLOBALS_HPP
   22.15  
   22.16 @@ -575,28 +569,9 @@
   22.17    develop(bool, TracePageSizes, false,                                      \
   22.18            "Trace page size selection and usage")                            \
   22.19                                                                              \
   22.20 -  /* 2013/12/18 Jin: disable UseNUMA until OS is fixed*/                    \
   22.21 -  product(bool, UseNUMA, MIPS64_ONLY(false) NOT_MIPS64(false),              \
   22.22 +  product(bool, UseNUMA, false,                                             \
   22.23            "Use NUMA if available")                                          \
   22.24                                                                              \
   22.25 -  product(bool, UseOldNUMA, MIPS64_ONLY(true) NOT_MIPS64(false),            \
   22.26 -          "Use Old NUMA if available")                                      \
   22.27 -                                                                            \
   22.28 -  product(bool, UseNUMAThreadRoots, MIPS64_ONLY(true) NOT_MIPS64(false),    \
   22.29 -          "Use NUMAThreadRoots if available")                               \
   22.30 -                                                                            \
   22.31 -  product(bool, UseNUMASteal, MIPS64_ONLY(true) NOT_MIPS64(false),          \
   22.32 -          "Use NUMASteal if available")                                     \
   22.33 -                                                                            \
   22.34 -  product(bool, UseNUMAGC, MIPS64_ONLY(true) NOT_MIPS64(false),             \
   22.35 -          "Use NUMAGC if available")                                        \
   22.36 -                                                                            \
   22.37 -  product(bool, UseStasticScavenge, false,                                  \
   22.38 -          "Use StasticScavenge if available")                               \
   22.39 -                                                                            \
   22.40 -  product(bool, UseStasticCopy, false,                                      \
   22.41 -          "Use StasticCopy if available")                                   \
   22.42 -                                                                            \
   22.43    product(bool, UseNUMAInterleaving, false,                                 \
   22.44            "Interleave memory across NUMA nodes if available")               \
   22.45                                                                              \
   22.46 @@ -2294,7 +2269,7 @@
   22.47    diagnostic(uintx, CPUForCMSThread, 0,                                     \
   22.48            "When BindCMSThreadToCPU is true, the CPU to bind CMS thread to") \
   22.49                                                                              \
   22.50 -  product(bool, BindGCTaskThreadsToCPUs, MIPS64_ONLY(true) NOT_MIPS64(false), \
   22.51 +  product(bool, BindGCTaskThreadsToCPUs, false,                             \
   22.52            "Bind GCTaskThreads to CPUs if possible")                         \
   22.53                                                                              \
   22.54    product(bool, UseGCTaskAffinity, false,                                   \
   22.55 @@ -3140,10 +3115,10 @@
   22.56            "Number of times to spin wait before inflation")                  \
   22.57                                                                              \
   22.58    /* gc parameters */                                                       \
   22.59 -  product(uintx, InitialHeapSize, ScaleForWordSize(MIPS64_ONLY(2500) NOT_MIPS64(0) *M),                 \
   22.60 +  product(uintx, InitialHeapSize, 0,                                        \
   22.61            "Initial heap size (in bytes); zero means use ergonomics")        \
   22.62                                                                              \
   22.63 -  product(uintx, MaxHeapSize, ScaleForWordSize(MIPS64_ONLY(2500) NOT_MIPS64(96) *M),                     \
   22.64 +  product(uintx, MaxHeapSize, ScaleForWordSize(96*M),                       \
   22.65            "Maximum heap size (in bytes)")                                   \
   22.66                                                                              \
   22.67    product(uintx, OldSize, ScaleForWordSize(4*M),                            \
    23.1 --- a/src/share/vm/runtime/os.hpp	Wed Jun 22 14:26:49 2016 +0800
    23.2 +++ b/src/share/vm/runtime/os.hpp	Fri Jun 24 17:12:13 2016 +0800
    23.3 @@ -342,7 +342,6 @@
    23.4    static size_t numa_get_leaf_groups(int *ids, size_t size);
    23.5    static bool   numa_topology_changed();
    23.6    static int    numa_get_group_id();
    23.7 -  static int    numa_get_cpu_id();
    23.8  
    23.9    // Page manipulation
   23.10    struct page_info {
    24.1 --- a/src/share/vm/utilities/taskqueue.hpp	Wed Jun 22 14:26:49 2016 +0800
    24.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Fri Jun 24 17:12:13 2016 +0800
    24.3 @@ -22,12 +22,6 @@
    24.4   *
    24.5   */
    24.6  
    24.7 -/*
    24.8 - * This file has been modified by Loongson Technology in 2015. These
    24.9 - * modifications are Copyright (c) 2015 Loongson Technology, and are made
   24.10 - * available on the same license terms set forth above.
   24.11 - */
   24.12 -
   24.13  #ifndef SHARE_VM_UTILITIES_TASKQUEUE_HPP
   24.14  #define SHARE_VM_UTILITIES_TASKQUEUE_HPP
   24.15  
   24.16 @@ -594,34 +588,15 @@
   24.17  template<class T, MEMFLAGS F> bool
   24.18  GenericTaskQueueSet<T, F>::steal_best_of_2(uint queue_num, int* seed, E& t) {
   24.19    if (_n > 2) {
   24.20 -    if(UseNUMASteal) {
   24.21 -      uint i = 10;
   24.22 -      uint k = queue_num;
   24.23 -      while ((k == queue_num || (k - queue_num) > 3 || (queue_num - k) > 3) && i > 0) {
   24.24 -        i--;
   24.25 -        k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.26 -      }
   24.27 -      if(i > 0) {
   24.28 -        return _queues[k]->pop_global(t);
   24.29 -      }
   24.30 -      else {
   24.31 -         while (k == queue_num) { 
   24.32 -           k = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.33 -         }
   24.34 -         return _queues[k]->pop_global(t);
   24.35 -      }
   24.36 -    }
   24.37 -    else{
   24.38 -      uint k1 = queue_num;
   24.39 -      while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.40 -      uint k2 = queue_num;
   24.41 -      while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.42 -      // Sample both and try the larger.
   24.43 -      uint sz1 = _queues[k1]->size();
   24.44 -      uint sz2 = _queues[k2]->size();
   24.45 -      if (sz2 > sz1) return _queues[k2]->pop_global(t);
   24.46 -      else return _queues[k1]->pop_global(t);
   24.47 -    }
   24.48 +    uint k1 = queue_num;
   24.49 +    while (k1 == queue_num) k1 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.50 +    uint k2 = queue_num;
   24.51 +    while (k2 == queue_num || k2 == k1) k2 = TaskQueueSetSuper::randomParkAndMiller(seed) % _n;
   24.52 +    // Sample both and try the larger.
   24.53 +    uint sz1 = _queues[k1]->size();
   24.54 +    uint sz2 = _queues[k2]->size();
   24.55 +    if (sz2 > sz1) return _queues[k2]->pop_global(t);
   24.56 +    else return _queues[k1]->pop_global(t);
   24.57    } else if (_n == 2) {
   24.58      // Just try the other one.
   24.59      uint k = (queue_num + 1) % 2;

mercurial