for_each_online_cpu(j) {
cpu_freq = per_cpu(cpu_last_req_freq, j);
- if ((cluster == per_cpu(physical_cluster, j)) &&
- (max_freq < cpu_freq))
+ if (cluster == per_cpu(physical_cluster, j) &&
+ max_freq < cpu_freq)
max_freq = cpu_freq;
}
freqs_new = freq_table[cur_cluster][index].frequency;
if (is_bL_switching_enabled()) {
- if ((actual_cluster == A15_CLUSTER) &&
- (freqs_new < clk_big_min)) {
+ if (actual_cluster == A15_CLUSTER && freqs_new < clk_big_min)
new_cluster = A7_CLUSTER;
- } else if ((actual_cluster == A7_CLUSTER) &&
- (freqs_new > clk_little_max)) {
+ else if (actual_cluster == A7_CLUSTER &&
+ freqs_new > clk_little_max)
new_cluster = A15_CLUSTER;
- }
}
ret = ve_spc_cpufreq_set_rate(cpu, actual_cluster, new_cluster,
static inline u32 get_table_min(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- uint32_t min_freq = ~0;
+ u32 min_freq = ~0;
+
cpufreq_for_each_entry(pos, table)
if (pos->frequency < min_freq)
min_freq = pos->frequency;
static inline u32 get_table_max(struct cpufreq_frequency_table *table)
{
struct cpufreq_frequency_table *pos;
- uint32_t max_freq = 0;
+ u32 max_freq = 0;
+
cpufreq_for_each_entry(pos, table)
if (pos->frequency > max_freq)
max_freq = pos->frequency;
/* Add in reverse order to get freqs in increasing order */
for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END;
- j++) {
- table[k].frequency = VIRT_FREQ(i,
- freq_table[i][j].frequency);
- k++;
+ j++, k++) {
+ table[k].frequency =
+ VIRT_FREQ(i, freq_table[i][j].frequency);
}
}
return 0;
dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d, cluster: %d\n",
- __func__, cpu_dev->id, cluster);
+ __func__, cpu_dev->id, cluster);
ret = PTR_ERR(clk[cluster]);
dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table[cluster]);
out:
dev_err(cpu_dev, "%s: Failed to get data for cluster: %d\n", __func__,
- cluster);
+ cluster);
return ret;
}
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
+ policy->cpu);
return -ENODEV;
}
dev_pm_opp_of_register_em(policy->cpus);
if (is_bL_switching_enabled())
- per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu);
+ per_cpu(cpu_last_req_freq, policy->cpu) =
+ clk_get_cpu_rate(policy->cpu);
dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
return 0;
cpu_dev = get_cpu_device(policy->cpu);
if (!cpu_dev) {
pr_err("%s: failed to get cpu%d device\n", __func__,
- policy->cpu);
+ policy->cpu);
return -ENODEV;
}