sched/topology: Rewrite get_group()
authorPeter Zijlstra <peterz@infradead.org>
Wed, 3 May 2017 12:18:06 +0000 (14:18 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 15 May 2017 08:15:32 +0000 (10:15 +0200)
We want to attain:

  sg_cpus() & sg_mask() == sg_mask()

for this to be so we must initialize sg_mask() to sg_cpus() for the
!overlap case (its currently cpumask_setall()).

Since the code makes my head hurt bad, rewrite it into a simpler form,
inspired by the now fixed overlap code.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/topology.c

index b2790830e18400759b91e73e1599834db49f65a1..dea1950b42a5ed3221b1a43ed157b56b65f35425 100644 (file)
@@ -833,23 +833,34 @@ fail:
  * [*] in other words, the first group of each domain is its child domain.
  */
 
-static int get_group(int cpu, struct sd_data *sdd, struct sched_group **sg)
+static struct sched_group *get_group(int cpu, struct sd_data *sdd)
 {
        struct sched_domain *sd = *per_cpu_ptr(sdd->sd, cpu);
        struct sched_domain *child = sd->child;
+       struct sched_group *sg;
 
        if (child)
                cpu = cpumask_first(sched_domain_span(child));
 
-       if (sg) {
-               *sg = *per_cpu_ptr(sdd->sg, cpu);
-               (*sg)->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+       sg = *per_cpu_ptr(sdd->sg, cpu);
+       sg->sgc = *per_cpu_ptr(sdd->sgc, cpu);
+
+       /* For claim_allocations: */
+       atomic_inc(&sg->ref);
+       atomic_inc(&sg->sgc->ref);
 
-               /* For claim_allocations: */
-               atomic_set(&(*sg)->sgc->ref, 1);
+       if (child) {
+               cpumask_copy(sched_group_cpus(sg), sched_domain_span(child));
+               cpumask_copy(sched_group_mask(sg), sched_group_cpus(sg));
+       } else {
+               cpumask_set_cpu(cpu, sched_group_cpus(sg));
+               cpumask_set_cpu(cpu, sched_group_mask(sg));
        }
 
-       return cpu;
+       sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sched_group_cpus(sg));
+       sg->sgc->min_capacity = SCHED_CAPACITY_SCALE;
+
+       return sg;
 }
 
 /*
@@ -868,34 +879,20 @@ build_sched_groups(struct sched_domain *sd, int cpu)
        struct cpumask *covered;
        int i;
 
-       get_group(cpu, sdd, &sd->groups);
-       atomic_inc(&sd->groups->ref);
-
-       if (cpu != cpumask_first(span))
-               return 0;
-
        lockdep_assert_held(&sched_domains_mutex);
        covered = sched_domains_tmpmask;
 
        cpumask_clear(covered);
 
-       for_each_cpu(i, span) {
+       for_each_cpu_wrap(i, span, cpu) {
                struct sched_group *sg;
-               int group, j;
 
                if (cpumask_test_cpu(i, covered))
                        continue;
 
-               group = get_group(i, sdd, &sg);
-               cpumask_setall(sched_group_mask(sg));
+               sg = get_group(i, sdd);
 
-               for_each_cpu(j, span) {
-                       if (get_group(j, sdd, NULL) != group)
-                               continue;
-
-                       cpumask_set_cpu(j, covered);
-                       cpumask_set_cpu(j, sched_group_cpus(sg));
-               }
+               cpumask_or(covered, covered, sched_group_cpus(sg));
 
                if (!first)
                        first = sg;
@@ -904,6 +901,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
                last = sg;
        }
        last->next = first;
+       sd->groups = first;
 
        return 0;
 }