topology_set_cpu_scale(cpu, cpu_capacity(cpu) / middle_capacity);
pr_info("CPU%u: update cpu_capacity %lu\n",
- cpu, topology_get_cpu_scale(NULL, cpu));
+ cpu, topology_get_cpu_scale(cpu));
}
#else
{
struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%lu\n", topology_get_cpu_scale(NULL, cpu->dev.id));
+ return sprintf(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id));
}
static void update_topology_flags_workfn(struct work_struct *work);
/ capacity_scale;
topology_set_cpu_scale(cpu, capacity);
pr_debug("cpu_capacity: CPU%d cpu_capacity=%lu\n",
- cpu, topology_get_cpu_scale(NULL, cpu));
+ cpu, topology_get_cpu_scale(cpu));
}
}
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus);
for_each_cpu(cpu, policy->related_cpus) {
- raw_capacity[cpu] = topology_get_cpu_scale(NULL, cpu) *
+ raw_capacity[cpu] = topology_get_cpu_scale(cpu) *
policy->cpuinfo.max_freq / 1000UL;
capacity_scale = max(raw_capacity[cpu], capacity_scale);
}
struct sched_domain;
static inline
-unsigned long topology_get_cpu_scale(struct sched_domain *sd, int cpu)
+unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}
* like schedutil.
*/
cpu = cpumask_first(to_cpumask(pd->cpus));
- scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ scale_cpu = arch_scale_cpu_capacity(cpu);
cs = &pd->table[pd->nr_cap_states - 1];
freq = map_util_freq(max_util, cs->frequency, scale_cpu);
# define SD_INIT_NAME(type)
#endif
-#ifndef arch_scale_cpu_capacity
-static __always_inline
-unsigned long arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
-{
- return SCHED_CAPACITY_SCALE;
-}
-#endif
-
#else /* CONFIG_SMP */
struct sched_domain_attr;
return true;
}
+#endif /* !CONFIG_SMP */
+
#ifndef arch_scale_cpu_capacity
static __always_inline
-unsigned long arch_scale_cpu_capacity(void __always_unused *sd, int cpu)
+unsigned long arch_scale_cpu_capacity(int cpu)
{
return SCHED_CAPACITY_SCALE;
}
#endif
-#endif /* !CONFIG_SMP */
-
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
* All CPUs of a domain must have the same micro-architecture
* since they all share the same table.
*/
- cap = arch_scale_cpu_capacity(NULL, cpu);
+ cap = arch_scale_cpu_capacity(cpu);
if (prev_cap && prev_cap != cap) {
pr_err("CPUs of %*pbl must have the same capacity\n",
cpumask_pr_args(span));
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
unsigned long util = cpu_util_cfs(rq);
- unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+ unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max;
sg_cpu->bw_dl = cpu_bw_dl(rq);
&curr->dl);
} else {
unsigned long scale_freq = arch_scale_freq_capacity(cpu);
- unsigned long scale_cpu = arch_scale_cpu_capacity(NULL, cpu);
+ unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
scaled_delta_exec = cap_scale(delta_exec, scale_freq);
scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
struct sched_entity *se = &p->se;
struct cfs_rq *cfs_rq = cfs_rq_of(se);
struct sched_avg *sa = &se->avg;
- long cpu_scale = arch_scale_cpu_capacity(NULL, cpu_of(rq_of(cfs_rq)));
+ long cpu_scale = arch_scale_cpu_capacity(cpu_of(rq_of(cfs_rq)));
long cap = (long)(cpu_scale - cfs_rq->avg.util_avg) / 2;
if (cap > 0) {
static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu)
{
struct rq *rq = cpu_rq(cpu);
- unsigned long max = arch_scale_cpu_capacity(sd, cpu);
+ unsigned long max = arch_scale_cpu_capacity(cpu);
unsigned long used, free;
unsigned long irq;
unsigned long capacity = scale_rt_capacity(sd, cpu);
struct sched_group *sdg = sd->groups;
- cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(sd, cpu);
+ cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
if (!capacity)
capacity = 1;
* reflect the real amount of computation
*/
running = cap_scale(running, arch_scale_freq_capacity(cpu_of(rq)));
- running = cap_scale(running, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+ running = cap_scale(running, arch_scale_cpu_capacity(cpu_of(rq)));
/*
* We know the time that has been used by interrupt since last update
* Scale the elapsed time to reflect the real amount of
* computation
*/
- delta = cap_scale(delta, arch_scale_cpu_capacity(NULL, cpu_of(rq)));
+ delta = cap_scale(delta, arch_scale_cpu_capacity(cpu_of(rq)));
delta = cap_scale(delta, arch_scale_freq_capacity(cpu_of(rq)));
rq->clock_pelt += delta;
static inline unsigned long schedutil_energy_util(int cpu, unsigned long cfs)
{
- unsigned long max = arch_scale_cpu_capacity(NULL, cpu);
+ unsigned long max = arch_scale_cpu_capacity(cpu);
return schedutil_freq_util(cpu, cfs, max, ENERGY_UTIL);
}
unsigned long cap;
/* Is there any asymmetry? */
- cap = arch_scale_cpu_capacity(NULL, cpumask_first(cpu_map));
+ cap = arch_scale_cpu_capacity(cpumask_first(cpu_map));
for_each_cpu(i, cpu_map) {
- if (arch_scale_cpu_capacity(NULL, i) != cap) {
+ if (arch_scale_cpu_capacity(i) != cap) {
asym = true;
break;
}
* to everyone.
*/
for_each_cpu(i, cpu_map) {
- unsigned long max_capacity = arch_scale_cpu_capacity(NULL, i);
+ unsigned long max_capacity = arch_scale_cpu_capacity(i);
int tl_id = 0;
for_each_sd_topology(tl) {
for_each_cpu_and(j, tl->mask(i), cpu_map) {
unsigned long capacity;
- capacity = arch_scale_cpu_capacity(NULL, j);
+ capacity = arch_scale_cpu_capacity(j);
if (capacity <= max_capacity)
continue;