sched/numa: Don't scale the imbalance
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 10 May 2012 22:26:27 +0000 (00:26 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 14 May 2012 13:05:26 +0000 (15:05 +0200)
It's far too easy to get ridiculously large imbalance pct when you
scale it like that. Use a fixed 125% for now.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/n/tip-zsriaft1dv7hhboyrpvqjy6s@git.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c

index 24922b7ff5675dd2120b115fc2d87341012792f7..6883d998dc38db9c8d07f320ac05bcd49d04b056 100644 (file)
@@ -6261,11 +6261,6 @@ static int *sched_domains_numa_distance;
 static struct cpumask ***sched_domains_numa_masks;
 static int sched_domains_curr_level;
 
-static inline unsigned long numa_scale(unsigned long x, int level)
-{
-       return x * sched_domains_numa_distance[level] / sched_domains_numa_scale;
-}
-
 static inline int sd_local_flags(int level)
 {
        if (sched_domains_numa_distance[level] > REMOTE_DISTANCE)
@@ -6286,7 +6281,7 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu)
                .min_interval           = sd_weight,
                .max_interval           = 2*sd_weight,
                .busy_factor            = 32,
-               .imbalance_pct          = 100 + numa_scale(25, level),
+               .imbalance_pct          = 125,
                .cache_nice_tries       = 2,
                .busy_idx               = 3,
                .idle_idx               = 2,