/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A5XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+ /* Select RBBM0 to countable 6 to get the busy status for devfreq */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
/* Increase VFD cache access so LRZ and other data gets evicted less */
gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
return a5xx_gpu->cur_ring;
}
+static int a5xx_gpu_busy(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_RBBM_0_LO,
+ REG_A5XX_RBBM_PERFCTR_RBBM_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
#ifdef CONFIG_DEBUG_FS
.show = a5xx_show,
#endif
+ .gpu_busy = a5xx_gpu_busy,
},
.get_timestamp = a5xx_get_timestamp,
};
#include "msm_fence.h"
#include <linux/string_helpers.h>
+#include <linux/pm_opp.h>
+#include <linux/devfreq.h>
/*
* Power Management:
*/
+static int msm_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ struct dev_pm_opp *opp;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+
+ if (IS_ERR(opp))
+ return PTR_ERR(opp);
+
+ clk_set_rate(gpu->core_clk, *freq);
+ dev_pm_opp_put(opp);
+
+ return 0;
+}
+
+static int msm_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *status)
+{
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+ u64 cycles;
+ u32 freq = ((u32) status->current_frequency) / 1000000;
+ ktime_t time;
+
+ status->current_frequency = (unsigned long) clk_get_rate(gpu->core_clk);
+ gpu->funcs->gpu_busy(gpu, &cycles);
+
+ status->busy_time = ((u32) (cycles - gpu->devfreq.busy_cycles)) / freq;
+
+ gpu->devfreq.busy_cycles = cycles;
+
+ time = ktime_get();
+ status->total_time = ktime_us_delta(time, gpu->devfreq.time);
+ gpu->devfreq.time = time;
+
+ return 0;
+}
+
+static int msm_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
+{
+ struct msm_gpu *gpu = platform_get_drvdata(to_platform_device(dev));
+
+ *freq = (unsigned long) clk_get_rate(gpu->core_clk);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile msm_devfreq_profile = {
+ .polling_ms = 10,
+ .target = msm_devfreq_target,
+ .get_dev_status = msm_devfreq_get_dev_status,
+ .get_cur_freq = msm_devfreq_get_cur_freq,
+};
+
+static void msm_devfreq_init(struct msm_gpu *gpu)
+{
+ /* We need target support to do devfreq */
+ if (!gpu->funcs->gpu_busy)
+ return;
+
+ msm_devfreq_profile.initial_freq = gpu->fast_rate;
+
+ /*
+ * Don't set the freq_table or max_state and let devfreq build the table
+ * from OPP
+ */
+
+ gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ &msm_devfreq_profile, "simple_ondemand", NULL);
+
+ if (IS_ERR(gpu->devfreq.devfreq)) {
+ dev_err(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
+ gpu->devfreq.devfreq = NULL;
+ }
+}
+
static int enable_pwrrail(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
if (ret)
return ret;
+ if (gpu->devfreq.devfreq) {
+ gpu->devfreq.busy_cycles = 0;
+ gpu->devfreq.time = ktime_get();
+
+ devfreq_resume_device(gpu->devfreq.devfreq);
+ }
+
gpu->needs_hw_init = true;
return 0;
DBG("%s", gpu->name);
+ if (gpu->devfreq.devfreq)
+ devfreq_suspend_device(gpu->devfreq.devfreq);
+
ret = disable_axi(gpu);
if (ret)
return ret;
gpu->pdev = pdev;
platform_set_drvdata(pdev, gpu);
+ msm_devfreq_init(gpu);
+
gpu->aspace = msm_gpu_create_address_space(gpu, pdev,
config->va_start, config->va_end);