#include <linux/slab.h>
#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
#include <mach/map.h>
#include <mach/regs-clock.h>
static struct cpufreq_freqs freqs;
static unsigned int memtype;
+static unsigned int locking_frequency;
+static bool frequency_locked;
+static DEFINE_MUTEX(cpufreq_lock);
+
enum exynos4_memory_type {
DDR2 = 4,
LPDDR2,
{
unsigned int index, old_index;
unsigned int arm_volt, int_volt;
+ int err = -EINVAL;
freqs.old = exynos4_getspeed(policy->cpu);
+ mutex_lock(&cpufreq_lock);
+
+ if (frequency_locked && target_freq != locking_frequency) {
+ err = -EAGAIN;
+ goto out;
+ }
+
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
freqs.old, relation, &old_index))
- return -EINVAL;
+ goto out;
if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
target_freq, relation, &index))
- return -EINVAL;
+ goto out;
+
+ err = 0;
freqs.new = exynos4_freq_table[index].frequency;
freqs.cpu = policy->cpu;
if (freqs.new == freqs.old)
- return 0;
+ goto out;
/* get the voltage value */
arm_volt = exynos4_volt_table[index].arm_volt;
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- return 0;
+out:
+ mutex_unlock(&cpufreq_lock);
+ return err;
}
#ifdef CONFIG_PM
+/*
+ * These suspend/resume are used as syscore_ops, it is already too
+ * late to set regulator voltages at this stage.
+ */
static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
{
return 0;
}
#endif
+/**
+ * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
+ * context
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ * While frequency_locked == true, target() ignores every frequency but
+ * locking_frequency. The locking_frequency value is the initial frequency,
+ * which is set by the bootloader. In order to eliminate possible
+ * inconsistency in clock values, we save and restore frequencies during
+ * suspend and resume and block CPUFREQ activities. Note that the standard
+ * suspend/resume cannot be used as they are too deep (syscore_ops) for
+ * regulator actions.
+ */
+static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ static unsigned int saved_frequency;
+ unsigned int temp;
+
+ mutex_lock(&cpufreq_lock);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (frequency_locked)
+ goto out;
+ frequency_locked = true;
+
+ if (locking_frequency) {
+ saved_frequency = exynos4_getspeed(0);
+
+ mutex_unlock(&cpufreq_lock);
+ exynos4_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+ }
+
+ break;
+ case PM_POST_SUSPEND:
+
+ if (saved_frequency) {
+ /*
+ * While frequency_locked, only locking_frequency
+ * is valid for target(). In order to use
+ * saved_frequency while keeping frequency_locked,
+ * we temporarly overwrite locking_frequency.
+ */
+ temp = locking_frequency;
+ locking_frequency = saved_frequency;
+
+ mutex_unlock(&cpufreq_lock);
+ exynos4_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+
+ locking_frequency = temp;
+ }
+
+ frequency_locked = false;
+ break;
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos4_cpufreq_nb = {
+ .notifier_call = exynos4_cpufreq_pm_notifier,
+};
+
static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int ret;
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
+ locking_frequency = exynos4_getspeed(0);
+
moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore))
goto out;
printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
}
+ register_pm_notifier(&exynos4_cpufreq_nb);
+
return cpufreq_register_driver(&exynos4_driver);
out: