drm/virtio: move mapping teardown to virtio_gpu_cleanup_object()
authorGerd Hoffmann <kraxel@redhat.com>
Fri, 7 Feb 2020 07:46:37 +0000 (08:46 +0100)
committerGerd Hoffmann <kraxel@redhat.com>
Mon, 10 Feb 2020 11:54:33 +0000 (12:54 +0100)
Stop sending DETACH_BACKING commands, that will happening anyway when
releasing resources via UNREF.  Handle guest-side cleanup in
virtio_gpu_cleanup_object(), called when the host finished processing
the UNREF command.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20200207074638.26386-4-kraxel@redhat.com
Reviewed-by: Chia-I Wu <olvaffe@gmail.com>
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/virtio/virtgpu_vq.c

index c3a33ba3257164936598ec801a323be147d6ebec..cab18c23064bbcb418a269c2d4b43d39bfd366d4 100644 (file)
@@ -282,8 +282,6 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
                             struct virtio_gpu_object *obj,
                             struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
-                             struct virtio_gpu_object *obj);
 int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
 int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
index 28a161af7503bc145b5a586dd62a10dc562598f3..bce2b3d843fe4297a205bf03ce5b1d20ae9f1085 100644 (file)
@@ -23,6 +23,7 @@
  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
+#include <linux/dma-mapping.h>
 #include <linux/moduleparam.h>
 
 #include "virtgpu_drv.h"
@@ -65,6 +66,17 @@ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
 {
        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
+       if (bo->pages) {
+               if (bo->mapped) {
+                       dma_unmap_sg(vgdev->vdev->dev.parent,
+                                    bo->pages->sgl, bo->mapped,
+                                    DMA_TO_DEVICE);
+                       bo->mapped = 0;
+               }
+               sg_free_table(bo->pages);
+               bo->pages = NULL;
+               drm_gem_shmem_unpin(&bo->base.base);
+       }
        virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
        drm_gem_shmem_free_object(&bo->base.base);
 }
@@ -74,8 +86,6 @@ static void virtio_gpu_free_object(struct drm_gem_object *obj)
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
        struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
 
-       if (bo->pages)
-               virtio_gpu_object_detach(vgdev, bo);
        if (bo->created) {
                virtio_gpu_cmd_unref_resource(vgdev, bo);
                /* completion handler calls virtio_gpu_cleanup_object() */
index 755a3e26ef013aad6a2253b01ff88cf10baf2a0d..ae0b0a9ac3b5d596fee645de3c70cd9c8cad9dc2 100644 (file)
@@ -548,22 +548,6 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
 }
 
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
-                                                 uint32_t resource_id,
-                                                 struct virtio_gpu_fence *fence)
-{
-       struct virtio_gpu_resource_detach_backing *cmd_p;
-       struct virtio_gpu_vbuffer *vbuf;
-
-       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
-       memset(cmd_p, 0, sizeof(*cmd_p));
-
-       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
-       cmd_p->resource_id = cpu_to_le32(resource_id);
-
-       virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
-}
-
 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
                                uint32_t scanout_id, uint32_t resource_id,
                                uint32_t width, uint32_t height,
@@ -1158,36 +1142,6 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
        return 0;
 }
 
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
-                             struct virtio_gpu_object *obj)
-{
-       bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
-       if (WARN_ON_ONCE(!obj->pages))
-               return;
-
-       if (use_dma_api && obj->mapped) {
-               struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
-               /* detach backing and wait for the host process it ... */
-               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
-               dma_fence_wait(&fence->f, true);
-               dma_fence_put(&fence->f);
-
-               /* ... then tear down iommu mappings */
-               dma_unmap_sg(vgdev->vdev->dev.parent,
-                            obj->pages->sgl, obj->mapped,
-                            DMA_TO_DEVICE);
-               obj->mapped = 0;
-       } else {
-               virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
-       }
-
-       sg_free_table(obj->pages);
-       obj->pages = NULL;
-
-       drm_gem_shmem_unpin(&obj->base.base);
-}
-
 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
                            struct virtio_gpu_output *output)
 {