drm/msm/dpu: use msm wq for vblank events
authorJeykumar Sankaran <jsanka@codeaurora.org>
Fri, 14 Dec 2018 23:57:52 +0000 (15:57 -0800)
committerSean Paul <seanpaul@chromium.org>
Fri, 1 Feb 2019 15:35:55 +0000 (10:35 -0500)
DPU was using one thread per display to dispatch async commits and
vblank requests. Since clean up already happened in msm to use the
common thread for all the display commits, display threads are only
used to cater vblank requests. Since a single thread is sufficient
to do the job without any performance hits, use msm workqueue
to queue requests. A separate patch is submitted later in this
series to remove the display threads altogether.

changes in v2:
- switch to system wq before removing disp threads (Sean Paul)
changes in v3:
- none
changes in v4:
- use msm wq for vblank events
changes in v5:
- none

Signed-off-by: Jeykumar Sankaran <jsanka@codeaurora.org>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h

index 5f859653442de924fe5e996c0a100ead32d53e05..6c311f92c11dfd0b028700d9e5d8778b2b979884 100644 (file)
@@ -213,7 +213,7 @@ struct vblank_event {
        bool enable;
 };
 
-static void vblank_ctrl_worker(struct kthread_work *work)
+static void vblank_ctrl_worker(struct work_struct *work)
 {
        struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
                                                struct msm_vblank_ctrl, work);
@@ -261,8 +261,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
        list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
        spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
 
-       kthread_queue_work(&priv->disp_thread[crtc_id].worker,
-                       &vbl_ctrl->work);
+       queue_work(priv->wq, &vbl_ctrl->work);
 
        return 0;
 }
@@ -282,7 +281,7 @@ static int msm_drm_uninit(struct device *dev)
         * work before drm_irq_uninstall() to avoid work re-enabling an
         * irq after uninstall has disabled it.
         */
-       kthread_flush_work(&vbl_ctrl->work);
+       flush_work(&vbl_ctrl->work);
        list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
                list_del(&vbl_ev->node);
                kfree(vbl_ev);
@@ -489,7 +488,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
 
        INIT_LIST_HEAD(&priv->inactive_list);
        INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
-       kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+       INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
        spin_lock_init(&priv->vblank_ctrl.lock);
 
        drm_mode_config_init(ddev);
index 197ed319a97b26def60b033f3e82c4151401bcf7..e2689c2e580e9a2912d24cd53475b0f1f24bf93b 100644 (file)
@@ -78,7 +78,7 @@ enum msm_mdp_plane_property {
 };
 
 struct msm_vblank_ctrl {
-       struct kthread_work work;
+       struct work_struct work;
        struct list_head event_list;
        spinlock_t lock;
 };