int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ if (bo->pages) {
+ if (bo->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ bo->pages->sgl, bo->mapped,
+ DMA_TO_DEVICE);
+ bo->mapped = 0;
+ }
+ sg_free_table(bo->pages);
+ bo->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
drm_gem_shmem_free_object(&bo->base.base);
}
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
if (bo->created) {
virtio_gpu_cmd_unref_resource(vgdev, bo);
/* completion handler calls virtio_gpu_cleanup_object() */
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
-{
- struct virtio_gpu_resource_detach_backing *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
-
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
-
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
-}
-
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t scanout_id, uint32_t resource_id,
uint32_t width, uint32_t height,
return 0;
}
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (WARN_ON_ONCE(!obj->pages))
- return;
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
-
- sg_free_table(obj->pages);
- obj->pages = NULL;
-
- drm_gem_shmem_unpin(&obj->base.base);
-}
-
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{