if (gpu) {
int ret;
- mutex_lock(&dev->struct_mutex);
- gpu->funcs->pm_resume(gpu);
- mutex_unlock(&dev->struct_mutex);
- disable_irq(gpu->irq);
-
- ret = gpu->funcs->hw_init(gpu);
+ pm_runtime_get_sync(&pdev->dev);
+ ret = msm_gpu_hw_init(gpu);
+ pm_runtime_put_sync(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
- } else {
- enable_irq(gpu->irq);
- /* give inactive pm a chance to kick in: */
- msm_gpu_retire(gpu);
}
}
{}
};
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
},
};
struct drm_device *dev = gpu->dev;
int ret;
+ // XXX pm-runtime?? we *need* the device to be off after this
+ // so maybe continuing to call ->pm_suspend/resume() is better?
+
gpu->funcs->pm_suspend(gpu);
/* reset ringbuffer: */
gpu->funcs->pm_resume(gpu);
- disable_irq(gpu->irq);
- ret = gpu->funcs->hw_init(gpu);
+ ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
- enable_irq(gpu->irq);
}
void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
if (ret)
return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (gpu->active_cnt++ > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt <= 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
if (ret)
return ret;
+ gpu->needs_hw_init = true;
+
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (--gpu->active_cnt > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt < 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = disable_axi(gpu);
if (ret)
return 0;
}
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
-{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
- struct drm_device *dev = gpu->dev;
-
- if (gpu->inactive)
- return;
-
- DBG("%s: inactive!\n", gpu->name);
- mutex_lock(&dev->struct_mutex);
- if (!(msm_gpu_active(gpu) || gpu->inactive)) {
- disable_axi(gpu);
- disable_clk(gpu);
- gpu->inactive = true;
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-static void inactive_handler(unsigned long data)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ int ret;
- queue_work(priv->wq, &gpu->inactive_work);
-}
+ if (!gpu->needs_hw_init)
+ return 0;
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- del_timer(&gpu->inactive_timer);
- if (gpu->inactive) {
- enable_clk(gpu);
- enable_axi(gpu);
- gpu->inactive = false;
- }
-}
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
-static void inactive_start(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- mod_timer(&gpu->inactive_timer,
- round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+ return ret;
}
/*
/* retire completed submits, plus the one that hung: */
retire_submits(gpu);
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->recover(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
/* replay the remaining submits after the one that hung: */
list_for_each_entry(submit, &gpu->submit_list, node) {
{
unsigned long flags;
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
drm_gem_object_unreference(&msm_obj->base);
}
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
msm_gem_submit_free(submit);
}
mutex_lock(&dev->struct_mutex);
retire_submits(gpu);
mutex_unlock(&dev->struct_mutex);
-
- if (!msm_gpu_active(gpu))
- inactive_start(gpu);
}
/* call from irq handler to schedule work to retire bo's */
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- inactive_cancel(gpu);
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
+ msm_gpu_hw_init(gpu);
list_add_tail(&submit->node, &gpu->submit_list);
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->inactive = true;
gpu->fctx = msm_fence_context_alloc(drm, name);
if (IS_ERR(gpu->fctx)) {
ret = PTR_ERR(gpu->fctx);
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
- INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
INIT_LIST_HEAD(&gpu->submit_list);
- setup_timer(&gpu->inactive_timer, inactive_handler,
- (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
goto fail;
}
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
bs_init(gpu);
return 0;
struct msm_gpu {
const char *name;
struct drm_device *dev;
+ struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
/* fencing: */
struct msm_fence_context *fctx;
- /* is gpu powered/active? */
- int active_cnt;
- bool inactive;
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
- struct timer_list inactive_timer;
- struct work_struct inactive_work;
+
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,