virtio-gpu: add & use virtio_gpu_queue_fenced_ctrl_buffer
authorGerd Hoffmann <kraxel@redhat.com>
Wed, 19 Aug 2015 21:44:15 +0000 (23:44 +0200)
committerGerd Hoffmann <kraxel@redhat.com>
Fri, 16 Oct 2015 08:44:00 +0000 (10:44 +0200)
Add helper function to handle the submission of fenced control requests.
Make sure we initialize the fence while holding the virtqueue lock, so
requests can't be reordered.

Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
drivers/gpu/drm/virtio/virtgpu_fence.c
drivers/gpu/drm/virtio/virtgpu_vq.c

index 1da632631dac808e8273fe3aa77a5426950f9156..793ad9f631fd357ecbfeb85813076779067d1ef9 100644 (file)
@@ -81,7 +81,7 @@ int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
        unsigned long irq_flags;
 
-       *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_KERNEL);
+       *fence = kmalloc(sizeof(struct virtio_gpu_fence), GFP_ATOMIC);
        if ((*fence) == NULL)
                return -ENOMEM;
 
index 5b9bc242890fea4720278f7acd1e549a18aaf961..ee25e9a4ae03bc62d7d5b4b43a4fe498019b1b3a 100644 (file)
@@ -347,6 +347,38 @@ static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
        return rc;
 }
 
+static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
+                                              struct virtio_gpu_vbuffer *vbuf,
+                                              struct virtio_gpu_ctrl_hdr *hdr,
+                                              struct virtio_gpu_fence **fence)
+{
+       struct virtqueue *vq = vgdev->ctrlq.vq;
+       int rc;
+
+again:
+       spin_lock(&vgdev->ctrlq.qlock);
+
+       /*
+        * Make sure we have enouth space in the virtqueue.  If not
+        * wait here until we have.
+        *
+        * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
+        * to wait for free space, which can result in fence ids being
+        * submitted out-of-order.
+        */
+       if (vq->num_free < 3) {
+               spin_unlock(&vgdev->ctrlq.qlock);
+               wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
+               goto again;
+       }
+
+       if (fence)
+               virtio_gpu_fence_emit(vgdev, hdr, fence);
+       rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
+       spin_unlock(&vgdev->ctrlq.qlock);
+       return rc;
+}
+
 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
                                   struct virtio_gpu_vbuffer *vbuf)
 {
@@ -499,9 +531,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
        cmd_p->r.x = x;
        cmd_p->r.y = y;
 
-       if (fence)
-               virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
-       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 }
 
 static void
@@ -524,9 +554,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
        vbuf->data_buf = ents;
        vbuf->data_size = sizeof(*ents) * nents;
 
-       if (fence)
-               virtio_gpu_fence_emit(vgdev, &cmd_p->hdr, fence);
-       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
 }
 
 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,