* when per cpu controls are enforced
* @acpi_perf_data: Stores ACPI perf information read from _PSS
* @valid_pss_table: Set to true for valid ACPI _PSS entries found
+ * @epp_saved: Last saved HWP energy performance preference
+ * (EPP) or energy performance bias (EPB)
+ * @epp_policy: Last saved policy used to set EPP/EPB
*
* This structure stores per CPU instance data for all CPUs.
*/
bool valid_pss_table;
#endif
unsigned int iowait_boost;
+ s16 epp_saved;
+ s16 epp_policy;
};
static struct cpudata **all_cpu_data;
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
}
+static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
+{
+ u64 epb;
+ int ret;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return -ENXIO;
+
+ ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
+ if (ret)
+ return (s16)ret;
+
+ return (s16)(epb & 0x0f);
+}
+
+static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
+{
+ s16 epp;
+
+ if (static_cpu_has(X86_FEATURE_HWP_EPP))
+ epp = (hwp_req_data >> 24) & 0xff;
+ else
+ /* When there is no EPP present, HWP uses EPB settings */
+ epp = intel_pstate_get_epb(cpu_data);
+
+ return epp;
+}
+
+static void intel_pstate_set_epb(int cpu, s16 pref)
+{
+ u64 epb;
+
+ if (!static_cpu_has(X86_FEATURE_EPB))
+ return;
+
+ if (rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb))
+ return;
+
+ epb = (epb & ~0x0f) | pref;
+ wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
+}
+
static void intel_pstate_hwp_set(const struct cpumask *cpumask)
{
int min, hw_min, max, hw_max, cpu, range, adj_range;
for_each_cpu(cpu, cpumask) {
int max_perf_pct, min_perf_pct;
+ struct cpudata *cpu_data = all_cpu_data[cpu];
+ s16 epp;
if (per_cpu_limits)
perf_limits = all_cpu_data[cpu]->perf_limits;
value &= ~HWP_MAX_PERF(~0L);
value |= HWP_MAX_PERF(max);
+
+ if (cpu_data->epp_policy == cpu_data->policy)
+ goto skip_epp;
+
+ cpu_data->epp_policy = cpu_data->policy;
+
+ if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
+ epp = intel_pstate_get_epp(cpu_data, value);
+ /* If EPP read was failed, then don't try to write */
+ if (epp < 0) {
+ cpu_data->epp_saved = epp;
+ goto skip_epp;
+ }
+
+ cpu_data->epp_saved = epp;
+
+ epp = 0;
+ } else {
+ /* skip setting EPP, when saved value is invalid */
+ if (cpu_data->epp_saved < 0)
+ goto skip_epp;
+
+ /*
+ * No need to restore EPP when it is not zero. This
+ * means:
+ * - Policy is not changed
+ * - user has manually changed
+ * - Error reading EPB
+ */
+ epp = intel_pstate_get_epp(cpu_data, value);
+ if (epp)
+ goto skip_epp;
+
+ epp = cpu_data->epp_saved;
+ }
+ if (static_cpu_has(X86_FEATURE_HWP_EPP)) {
+ value &= ~GENMASK_ULL(31, 24);
+ value |= (u64)epp << 24;
+ } else {
+ intel_pstate_set_epb(cpu, epp);
+ }
+skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
}
return 0;
}
+static int intel_pstate_resume(struct cpufreq_policy *policy)
+{
+ if (!hwp_active)
+ return 0;
+
+ all_cpu_data[policy->cpu]->epp_policy = 0;
+ all_cpu_data[policy->cpu]->epp_saved = -EINVAL;
+
+ return intel_pstate_hwp_set_policy(policy);
+}
+
static void intel_pstate_hwp_set_online_cpus(void)
{
get_online_cpus();
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
+ cpudata->epp_policy = 0;
+ cpudata->epp_saved = -EINVAL;
}
static int atom_get_min_pstate(void)
.flags = CPUFREQ_CONST_LOOPS,
.verify = intel_pstate_verify_policy,
.setpolicy = intel_pstate_set_policy,
- .resume = intel_pstate_hwp_set_policy,
+ .resume = intel_pstate_resume,
.get = intel_pstate_get,
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,