* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
static int
-find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
+find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu,
+ cpumask_t *tmp)
{
- cpumask_t tmp;
unsigned long load, min_load = ULONG_MAX;
int idlest = -1;
int i;
/* Traverse only the allowed CPUs */
- cpus_and(tmp, group->cpumask, p->cpus_allowed);
+ cpus_and(*tmp, group->cpumask, p->cpus_allowed);
- for_each_cpu_mask(i, tmp) {
+ for_each_cpu_mask(i, *tmp) {
load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) {
}
while (sd) {
- cpumask_t span;
+ cpumask_t span, tmpmask;
struct sched_group *group;
int new_cpu, weight;
continue;
}
- new_cpu = find_idlest_cpu(group, t, cpu);
+ new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask);
if (new_cpu == -1 || new_cpu == cpu) {
/* Now try balancing at a lower domain level of cpu */
sd = sd->child;
static struct sched_group *
find_busiest_group(struct sched_domain *sd, int this_cpu,
unsigned long *imbalance, enum cpu_idle_type idle,
- int *sd_idle, cpumask_t *cpus, int *balance)
+ int *sd_idle, const cpumask_t *cpus, int *balance)
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
*/
static struct rq *
find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
- unsigned long imbalance, cpumask_t *cpus)
+ unsigned long imbalance, const cpumask_t *cpus)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance)
+ int *balance, cpumask_t *cpus)
{
int ld_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
struct sched_group *group;
unsigned long imbalance;
struct rq *busiest;
- cpumask_t cpus = CPU_MASK_ALL;
unsigned long flags;
+ cpus_setall(*cpus);
+
/*
* When power savings policy is enabled for the parent domain, idle
* sibling can pick up load irrespective of busy siblings. In this case,
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
- &cpus, balance);
+ cpus, balance);
if (*balance == 0)
goto out_balanced;
goto out_balanced;
}
- busiest = find_busiest_queue(group, idle, imbalance, &cpus);
+ busiest = find_busiest_queue(group, idle, imbalance, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
/* All tasks on this runqueue were pinned by CPU affinity */
if (unlikely(all_pinned)) {
- cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
+ cpu_clear(cpu_of(busiest), *cpus);
+ if (!cpus_empty(*cpus))
goto redo;
goto out_balanced;
}
* this_rq is locked.
*/
static int
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
+load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd,
+ cpumask_t *cpus)
{
struct sched_group *group;
struct rq *busiest = NULL;
int ld_moved = 0;
int sd_idle = 0;
int all_pinned = 0;
- cpumask_t cpus = CPU_MASK_ALL;
+
+ cpus_setall(*cpus);
/*
* When power savings policy is enabled for the parent domain, idle
schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
redo:
group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
- &sd_idle, &cpus, NULL);
+ &sd_idle, cpus, NULL);
if (!group) {
schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
goto out_balanced;
}
- busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
- &cpus);
+ busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
goto out_balanced;
spin_unlock(&busiest->lock);
if (unlikely(all_pinned)) {
- cpu_clear(cpu_of(busiest), cpus);
- if (!cpus_empty(cpus))
+ cpu_clear(cpu_of(busiest), *cpus);
+ if (!cpus_empty(*cpus))
goto redo;
}
}
struct sched_domain *sd;
int pulled_task = -1;
unsigned long next_balance = jiffies + HZ;
+ cpumask_t tmpmask;
for_each_domain(this_cpu, sd) {
unsigned long interval;
if (sd->flags & SD_BALANCE_NEWIDLE)
/* If we've pulled tasks over stop searching: */
- pulled_task = load_balance_newidle(this_cpu,
- this_rq, sd);
+ pulled_task = load_balance_newidle(this_cpu, this_rq,
+ sd, &tmpmask);
interval = msecs_to_jiffies(sd->balance_interval);
if (time_after(next_balance, sd->last_balance + interval))
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
+ cpumask_t tmp;
for_each_domain(cpu, sd) {
if (!(sd->flags & SD_LOAD_BALANCE))
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance)) {
+ if (load_balance(cpu, rq, sd, idle, &balance, &tmp)) {
/*
* We've pulled tasks over so either we're no
* longer idle, or one of our SMT siblings is
cpuset_cpus_allowed(p, &cpus_allowed);
cpus_and(new_mask, new_mask, cpus_allowed);
again:
- retval = set_cpus_allowed(p, new_mask);
+ retval = set_cpus_allowed_ptr(p, &new_mask);
if (!retval) {
cpuset_cpus_allowed(p, &cpus_allowed);
*/
static void migrate_nr_uninterruptible(struct rq *rq_src)
{
- struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
+ struct rq *rq_dest = cpu_rq(any_online_cpu(*CPU_MASK_ALL_PTR));
unsigned long flags;
local_irq_save(flags);
#ifdef CONFIG_SCHED_DEBUG
-static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level)
+static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
+ cpumask_t *groupmask)
{
struct sched_group *group = sd->groups;
- cpumask_t groupmask;
char str[256];
cpulist_scnprintf(str, sizeof(str), sd->span);
- cpus_clear(groupmask);
+ cpus_clear(*groupmask);
printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
break;
}
- if (cpus_intersects(groupmask, group->cpumask)) {
+ if (cpus_intersects(*groupmask, group->cpumask)) {
printk(KERN_CONT "\n");
printk(KERN_ERR "ERROR: repeated CPUs\n");
break;
}
- cpus_or(groupmask, groupmask, group->cpumask);
+ cpus_or(*groupmask, *groupmask, group->cpumask);
cpulist_scnprintf(str, sizeof(str), group->cpumask);
printk(KERN_CONT " %s", str);
} while (group != sd->groups);
printk(KERN_CONT "\n");
- if (!cpus_equal(sd->span, groupmask))
+ if (!cpus_equal(sd->span, *groupmask))
printk(KERN_ERR "ERROR: groups don't span domain->span\n");
- if (sd->parent && !cpus_subset(groupmask, sd->parent->span))
+ if (sd->parent && !cpus_subset(*groupmask, sd->parent->span))
printk(KERN_ERR "ERROR: parent span is not a superset "
"of domain->span\n");
return 0;
static void sched_domain_debug(struct sched_domain *sd, int cpu)
{
+ cpumask_t *groupmask;
int level = 0;
if (!sd) {
printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
+ groupmask = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
+ if (!groupmask) {
+ printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
+ return;
+ }
+
for (;;) {
- if (sched_domain_debug_one(sd, cpu, level))
+ if (sched_domain_debug_one(sd, cpu, level, groupmask))
break;
level++;
sd = sd->parent;
if (!sd)
break;
}
+ kfree(groupmask);
}
#else
# define sched_domain_debug(sd, cpu) do { } while (0)
* and ->cpu_power to 0.
*/
static void
-init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
+init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
int (*group_fn)(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg))
+ struct sched_group **sg,
+ cpumask_t *tmpmask),
+ cpumask_t *covered, cpumask_t *tmpmask)
{
struct sched_group *first = NULL, *last = NULL;
- cpumask_t covered = CPU_MASK_NONE;
int i;
- for_each_cpu_mask(i, span) {
+ cpus_clear(*covered);
+
+ for_each_cpu_mask(i, *span) {
struct sched_group *sg;
- int group = group_fn(i, cpu_map, &sg);
+ int group = group_fn(i, cpu_map, &sg, tmpmask);
int j;
- if (cpu_isset(i, covered))
+ if (cpu_isset(i, *covered))
continue;
- sg->cpumask = CPU_MASK_NONE;
+ cpus_clear(sg->cpumask);
sg->__cpu_power = 0;
- for_each_cpu_mask(j, span) {
- if (group_fn(j, cpu_map, NULL) != group)
+ for_each_cpu_mask(j, *span) {
+ if (group_fn(j, cpu_map, NULL, tmpmask) != group)
continue;
- cpu_set(j, covered);
+ cpu_set(j, *covered);
cpu_set(j, sg->cpumask);
}
if (!first)
static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
static int
-cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *unused)
{
if (sg)
*sg = &per_cpu(sched_group_cpus, cpu);
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
static int
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *mask)
{
int group;
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+
+ *mask = per_cpu(cpu_sibling_map, cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
if (sg)
*sg = &per_cpu(sched_group_core, group);
return group;
}
#elif defined(CONFIG_SCHED_MC)
static int
-cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_core_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *unused)
{
if (sg)
*sg = &per_cpu(sched_group_core, cpu);
static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
static int
-cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg)
+cpu_to_phys_group(int cpu, const cpumask_t *cpu_map, struct sched_group **sg,
+ cpumask_t *mask)
{
int group;
#ifdef CONFIG_SCHED_MC
- cpumask_t mask = cpu_coregroup_map(cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+ *mask = cpu_coregroup_map(cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
#elif defined(CONFIG_SCHED_SMT)
- cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
- cpus_and(mask, mask, *cpu_map);
- group = first_cpu(mask);
+ *mask = per_cpu(cpu_sibling_map, cpu);
+ cpus_and(*mask, *mask, *cpu_map);
+ group = first_cpu(*mask);
#else
group = cpu;
#endif
static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
- struct sched_group **sg)
+ struct sched_group **sg, cpumask_t *nodemask)
{
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
int group;
- cpus_and(nodemask, nodemask, *cpu_map);
- group = first_cpu(nodemask);
+ *nodemask = node_to_cpumask(cpu_to_node(cpu));
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ group = first_cpu(*nodemask);
if (sg)
*sg = &per_cpu(sched_group_allnodes, group);
#ifdef CONFIG_NUMA
/* Free memory allocated for various sched_group structures */
-static void free_sched_groups(const cpumask_t *cpu_map)
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
int cpu, i;
continue;
for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
struct sched_group *oldsg, *sg = sched_group_nodes[i];
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
+ *nodemask = node_to_cpumask(i);
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask))
continue;
if (sg == NULL)
}
}
#else
-static void free_sched_groups(const cpumask_t *cpu_map)
+static void free_sched_groups(const cpumask_t *cpu_map, cpumask_t *nodemask)
{
}
#endif
} while (group != child->groups);
}
+/*
+ * Initializers for schedule domains
+ * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
+ */
+
+#define SD_INIT(sd, type) sd_init_##type(sd)
+#define SD_INIT_FUNC(type) \
+static noinline void sd_init_##type(struct sched_domain *sd) \
+{ \
+ memset(sd, 0, sizeof(*sd)); \
+ *sd = SD_##type##_INIT; \
+}
+
+SD_INIT_FUNC(CPU)
+#ifdef CONFIG_NUMA
+ SD_INIT_FUNC(ALLNODES)
+ SD_INIT_FUNC(NODE)
+#endif
+#ifdef CONFIG_SCHED_SMT
+ SD_INIT_FUNC(SIBLING)
+#endif
+#ifdef CONFIG_SCHED_MC
+ SD_INIT_FUNC(MC)
+#endif
+
+/*
+ * To minimize stack usage kmalloc room for cpumasks and share the
+ * space as the usage in build_sched_domains() dictates. Used only
+ * if the amount of space is significant.
+ */
+struct allmasks {
+ cpumask_t tmpmask; /* make this one first */
+ union {
+ cpumask_t nodemask;
+ cpumask_t this_sibling_map;
+ cpumask_t this_core_map;
+ };
+ cpumask_t send_covered;
+
+#ifdef CONFIG_NUMA
+ cpumask_t domainspan;
+ cpumask_t covered;
+ cpumask_t notcovered;
+#endif
+};
+
+#if NR_CPUS > 128
+#define SCHED_CPUMASK_ALLOC 1
+#define SCHED_CPUMASK_FREE(v) kfree(v)
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks *v
+#else
+#define SCHED_CPUMASK_ALLOC 0
+#define SCHED_CPUMASK_FREE(v)
+#define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v
+#endif
+
+#define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \
+ ((unsigned long)(a) + offsetof(struct allmasks, v))
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
{
int i;
struct root_domain *rd;
+ SCHED_CPUMASK_DECLARE(allmasks);
+ cpumask_t *tmpmask;
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
int sd_allnodes = 0;
printk(KERN_WARNING "Can not alloc sched group node list\n");
return -ENOMEM;
}
- sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
rd = alloc_rootdomain();
if (!rd) {
printk(KERN_WARNING "Cannot alloc root domain\n");
+#ifdef CONFIG_NUMA
+ kfree(sched_group_nodes);
+#endif
return -ENOMEM;
}
+#if SCHED_CPUMASK_ALLOC
+ /* get space for all scratch cpumask variables */
+ allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL);
+ if (!allmasks) {
+ printk(KERN_WARNING "Cannot alloc cpumask array\n");
+ kfree(rd);
+#ifdef CONFIG_NUMA
+ kfree(sched_group_nodes);
+#endif
+ return -ENOMEM;
+ }
+#endif
+ tmpmask = (cpumask_t *)allmasks;
+
+
+#ifdef CONFIG_NUMA
+ sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
+#endif
+
/*
* Set up domains for cpus specified by the cpu_map.
*/
for_each_cpu_mask(i, *cpu_map) {
struct sched_domain *sd = NULL, *p;
- cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
- cpus_and(nodemask, nodemask, *cpu_map);
+ *nodemask = node_to_cpumask(cpu_to_node(i));
+ cpus_and(*nodemask, *nodemask, *cpu_map);
#ifdef CONFIG_NUMA
if (cpus_weight(*cpu_map) >
- SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
+ SD_NODES_PER_DOMAIN*cpus_weight(*nodemask)) {
sd = &per_cpu(allnodes_domains, i);
- *sd = SD_ALLNODES_INIT;
+ SD_INIT(sd, ALLNODES);
sd->span = *cpu_map;
- cpu_to_allnodes_group(i, cpu_map, &sd->groups);
+ cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
p = sd;
sd_allnodes = 1;
} else
p = NULL;
sd = &per_cpu(node_domains, i);
- *sd = SD_NODE_INIT;
+ SD_INIT(sd, NODE);
sd->span = sched_domain_node_span(cpu_to_node(i));
sd->parent = p;
if (p)
p = sd;
sd = &per_cpu(phys_domains, i);
- *sd = SD_CPU_INIT;
- sd->span = nodemask;
+ SD_INIT(sd, CPU);
+ sd->span = *nodemask;
sd->parent = p;
if (p)
p->child = sd;
- cpu_to_phys_group(i, cpu_map, &sd->groups);
+ cpu_to_phys_group(i, cpu_map, &sd->groups, tmpmask);
#ifdef CONFIG_SCHED_MC
p = sd;
sd = &per_cpu(core_domains, i);
- *sd = SD_MC_INIT;
+ SD_INIT(sd, MC);
sd->span = cpu_coregroup_map(i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_core_group(i, cpu_map, &sd->groups);
+ cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
#endif
#ifdef CONFIG_SCHED_SMT
p = sd;
sd = &per_cpu(cpu_domains, i);
- *sd = SD_SIBLING_INIT;
+ SD_INIT(sd, SIBLING);
sd->span = per_cpu(cpu_sibling_map, i);
cpus_and(sd->span, sd->span, *cpu_map);
sd->parent = p;
p->child = sd;
- cpu_to_cpu_group(i, cpu_map, &sd->groups);
+ cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
#endif
}
#ifdef CONFIG_SCHED_SMT
/* Set up CPU (sibling) groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
- cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
- if (i != first_cpu(this_sibling_map))
+ SCHED_CPUMASK_VAR(this_sibling_map, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ *this_sibling_map = per_cpu(cpu_sibling_map, i);
+ cpus_and(*this_sibling_map, *this_sibling_map, *cpu_map);
+ if (i != first_cpu(*this_sibling_map))
continue;
init_sched_build_groups(this_sibling_map, cpu_map,
- &cpu_to_cpu_group);
+ &cpu_to_cpu_group,
+ send_covered, tmpmask);
}
#endif
#ifdef CONFIG_SCHED_MC
/* Set up multi-core groups */
for_each_cpu_mask(i, *cpu_map) {
- cpumask_t this_core_map = cpu_coregroup_map(i);
- cpus_and(this_core_map, this_core_map, *cpu_map);
- if (i != first_cpu(this_core_map))
+ SCHED_CPUMASK_VAR(this_core_map, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ *this_core_map = cpu_coregroup_map(i);
+ cpus_and(*this_core_map, *this_core_map, *cpu_map);
+ if (i != first_cpu(*this_core_map))
continue;
+
init_sched_build_groups(this_core_map, cpu_map,
- &cpu_to_core_group);
+ &cpu_to_core_group,
+ send_covered, tmpmask);
}
#endif
/* Set up physical groups */
for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
+ *nodemask = node_to_cpumask(i);
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask))
continue;
- init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
+ init_sched_build_groups(nodemask, cpu_map,
+ &cpu_to_phys_group,
+ send_covered, tmpmask);
}
#ifdef CONFIG_NUMA
/* Set up node groups */
- if (sd_allnodes)
- init_sched_build_groups(*cpu_map, cpu_map,
- &cpu_to_allnodes_group);
+ if (sd_allnodes) {
+ SCHED_CPUMASK_VAR(send_covered, allmasks);
+
+ init_sched_build_groups(cpu_map, cpu_map,
+ &cpu_to_allnodes_group,
+ send_covered, tmpmask);
+ }
for (i = 0; i < MAX_NUMNODES; i++) {
/* Set up node groups */
struct sched_group *sg, *prev;
- cpumask_t nodemask = node_to_cpumask(i);
- cpumask_t domainspan;
- cpumask_t covered = CPU_MASK_NONE;
+ SCHED_CPUMASK_VAR(nodemask, allmasks);
+ SCHED_CPUMASK_VAR(domainspan, allmasks);
+ SCHED_CPUMASK_VAR(covered, allmasks);
int j;
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask)) {
+ *nodemask = node_to_cpumask(i);
+ cpus_clear(*covered);
+
+ cpus_and(*nodemask, *nodemask, *cpu_map);
+ if (cpus_empty(*nodemask)) {
sched_group_nodes[i] = NULL;
continue;
}
- domainspan = sched_domain_node_span(i);
- cpus_and(domainspan, domainspan, *cpu_map);
+ *domainspan = sched_domain_node_span(i);
+ cpus_and(*domainspan, *domainspan, *cpu_map);
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
if (!sg) {
goto error;
}
sched_group_nodes[i] = sg;
- for_each_cpu_mask(j, nodemask) {
+ for_each_cpu_mask(j, *nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
sd->groups = sg;
}
sg->__cpu_power = 0;
- sg->cpumask = nodemask;
+ sg->cpumask = *nodemask;
sg->next = sg;
- cpus_or(covered, covered, nodemask);
+ cpus_or(*covered, *covered, *nodemask);
prev = sg;
for (j = 0; j < MAX_NUMNODES; j++) {
- cpumask_t tmp, notcovered;
+ SCHED_CPUMASK_VAR(notcovered, allmasks);
int n = (i + j) % MAX_NUMNODES;
node_to_cpumask_ptr(pnodemask, n);
- cpus_complement(notcovered, covered);
- cpus_and(tmp, notcovered, *cpu_map);
- cpus_and(tmp, tmp, domainspan);
- if (cpus_empty(tmp))
+ cpus_complement(*notcovered, *covered);
+ cpus_and(*tmpmask, *notcovered, *cpu_map);
+ cpus_and(*tmpmask, *tmpmask, *domainspan);
+ if (cpus_empty(*tmpmask))
break;
- cpus_and(tmp, tmp, *pnodemask);
- if (cpus_empty(tmp))
+ cpus_and(*tmpmask, *tmpmask, *pnodemask);
+ if (cpus_empty(*tmpmask))
continue;
sg = kmalloc_node(sizeof(struct sched_group),
goto error;
}
sg->__cpu_power = 0;
- sg->cpumask = tmp;
+ sg->cpumask = *tmpmask;
sg->next = prev->next;
- cpus_or(covered, covered, tmp);
+ cpus_or(*covered, *covered, *tmpmask);
prev->next = sg;
prev = sg;
}
if (sd_allnodes) {
struct sched_group *sg;
- cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
+ cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg,
+ tmpmask);
init_numa_sched_groups_power(sg);
}
#endif
cpu_attach_domain(sd, rd, i);
}
+ SCHED_CPUMASK_FREE((void *)allmasks);
return 0;
#ifdef CONFIG_NUMA
error:
- free_sched_groups(cpu_map);
+ free_sched_groups(cpu_map, tmpmask);
+ SCHED_CPUMASK_FREE((void *)allmasks);
return -ENOMEM;
#endif
}
return err;
}
-static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
+static void arch_destroy_sched_domains(const cpumask_t *cpu_map,
+ cpumask_t *tmpmask)
{
- free_sched_groups(cpu_map);
+ free_sched_groups(cpu_map, tmpmask);
}
/*
*/
static void detach_destroy_domains(const cpumask_t *cpu_map)
{
+ cpumask_t tmpmask;
int i;
unregister_sched_domain_sysctl();
for_each_cpu_mask(i, *cpu_map)
cpu_attach_domain(NULL, &def_root_domain, i);
synchronize_sched();
- arch_destroy_sched_domains(cpu_map);
+ arch_destroy_sched_domains(cpu_map, &tmpmask);
}
/*
hotcpu_notifier(update_sched_domains, 0);
/* Move init over to a non-isolated CPU */
- if (set_cpus_allowed(current, non_isolated_cpus) < 0)
+ if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)
BUG();
sched_init_granularity();
}