sched/topology: Add sched_group_capacity debugging
authorPeter Zijlstra <peterz@infradead.org>
Wed, 26 Apr 2017 15:35:35 +0000 (17:35 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 15 May 2017 08:15:30 +0000 (10:15 +0200)
Add sgc::id to easier spot domain construction issues.

Take the opportunity to slightly rework the group printing, because
adding more "(id: %d)" strings makes the entire thing very hard to
read. Also the individual groups are very hard to separate, so add
explicit visual grouping, which allows replacing all the "(%s: %d)"
format things with shorter "%s=%d" variants.

Then fix up some inconsistencies in surrounding prints for domains.

The end result looks like:

  [] CPU0 attaching sched-domain(s):
  []  domain-0: span=0,4 level=DIE
  []   groups: 0:{ span=0 }, 4:{ span=4 }
  []   domain-1: span=0-1,3-5,7 level=NUMA
  []    groups: 0:{ span=0,4 mask=0,4 cap=2048 }, 1:{ span=1,5 mask=1,5 cap=2048 }, 3:{ span=3,7 mask=3,7 cap=2048 }
  []    domain-2: span=0-7 level=NUMA
  []     groups: 0:{ span=0-1,3-5,7 mask=0,4 cap=6144 }, 2:{ span=1-3,5-7 mask=2,6 cap=6144 }

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/sched.h
kernel/sched/topology.c

index 6e1eae717a2421290428d23808eb0ecc356481b1..4312b2adfb028aa821edbbd2457668e57a5cb854 100644 (file)
@@ -1023,6 +1023,10 @@ struct sched_group_capacity {
        unsigned long next_update;
        int imbalance; /* XXX unrelated to capacity but shared group state */
 
+#ifdef CONFIG_SCHED_DEBUG
+       int id;
+#endif
+
        unsigned long cpumask[0]; /* iteration mask */
 };
 
index a4b868c76f3cf054819ce9fdee3f452961d97d0f..12af4b1579289e28bc59ab5b147d1f45cf991374 100644 (file)
@@ -35,7 +35,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
        cpumask_clear(groupmask);
 
-       printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
+       printk(KERN_DEBUG "%*s domain-%d: ", level, "", level);
 
        if (!(sd->flags & SD_LOAD_BALANCE)) {
                printk("does not load-balance\n");
@@ -45,7 +45,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                return -1;
        }
 
-       printk(KERN_CONT "span %*pbl level %s\n",
+       printk(KERN_CONT "span=%*pbl level=%s\n",
               cpumask_pr_args(sched_domain_span(sd)), sd->name);
 
        if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
@@ -80,18 +80,17 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
 
                cpumask_or(groupmask, groupmask, sched_group_cpus(group));
 
-               printk(KERN_CONT " %*pbl",
-                      cpumask_pr_args(sched_group_cpus(group)));
+               printk(KERN_CONT " %d:{ span=%*pbl",
+                               group->sgc->id,
+                               cpumask_pr_args(sched_group_cpus(group)));
 
                if ((sd->flags & SD_OVERLAP) && !cpumask_full(sched_group_mask(group))) {
-                       printk(KERN_CONT " (mask: %*pbl)",
+                       printk(KERN_CONT " mask=%*pbl",
                                cpumask_pr_args(sched_group_mask(group)));
                }
 
-               if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
-                       printk(KERN_CONT " (cpu_capacity: %lu)",
-                               group->sgc->capacity);
-               }
+               if (group->sgc->capacity != SCHED_CAPACITY_SCALE)
+                       printk(KERN_CONT " cap=%lu", group->sgc->capacity);
 
                if (group == sd->groups && sd->child &&
                    !cpumask_equal(sched_domain_span(sd->child),
@@ -99,6 +98,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
                        printk(KERN_ERR "ERROR: domain->groups does not match domain->child\n");
                }
 
+               printk(KERN_CONT " }");
+
                group = group->next;
 
                if (group != sd->groups)
@@ -129,7 +130,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
                return;
        }
 
-       printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+       printk(KERN_DEBUG "CPU%d attaching sched-domain(s):\n", cpu);
 
        for (;;) {
                if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask))
@@ -1356,6 +1357,10 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
                        if (!sgc)
                                return -ENOMEM;
 
+#ifdef CONFIG_SCHED_DEBUG
+                       sgc->id = j;
+#endif
+
                        *per_cpu_ptr(sdd->sgc, j) = sgc;
                }
        }