#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_probe_helper.h>
+#include <drm/virtgpu_drm.h>
#define DRIVER_NAME "virtio_gpu"
#define DRIVER_DESC "virtio GPU"
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void
#include "virtgpu_drv.h"
-static void convert_to_hw_box(struct virtio_gpu_box *dst,
- const struct drm_virtgpu_3d_box *src)
-{
- dst->x = cpu_to_le32(src->x);
- dst->y = cpu_to_le32(src->y);
- dst->z = cpu_to_le32(src->z);
- dst->w = cpu_to_le32(src->w);
- dst->h = cpu_to_le32(src->h);
- dst->d = cpu_to_le32(src->d);
-}
-
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct virtio_gpu_fence *fence;
int ret;
u32 offset = args->offset;
- struct virtio_gpu_box box;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
if (ret != 0)
goto err_put_free;
- convert_to_hw_box(&box, &args->box);
-
fence = virtio_gpu_fence_alloc(vgdev);
if (!fence) {
ret = -ENOMEM;
}
virtio_gpu_cmd_transfer_from_host_3d
(vgdev, vfpriv->ctx_id, offset, args->level,
- &box, objs, fence);
+ &args->box, objs, fence);
dma_fence_put(&fence->f);
return 0;
struct drm_virtgpu_3d_transfer_to_host *args = data;
struct virtio_gpu_object_array *objs;
struct virtio_gpu_fence *fence;
- struct virtio_gpu_box box;
int ret;
u32 offset = args->offset;
if (objs == NULL)
return -ENOENT;
- convert_to_hw_box(&box, &args->box);
if (!vgdev->has_virgl_3d) {
virtio_gpu_cmd_transfer_to_host_2d
(vgdev, offset,
- box.w, box.h, box.x, box.y,
+ args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
ret = virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_3d
(vgdev,
vfpriv ? vfpriv->ctx_id : 0, offset,
- args->level, &box, objs, fence);
+ args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
return 0;
+ MAX_INLINE_CMD_SIZE \
+ MAX_INLINE_RESP_SIZE)
+static void convert_to_hw_box(struct virtio_gpu_box *dst,
+ const struct drm_virtgpu_3d_box *src)
+{
+ dst->x = cpu_to_le32(src->x);
+ dst->y = cpu_to_le32(src->y);
+ dst->z = cpu_to_le32(src->z);
+ dst->w = cpu_to_le32(src->w);
+ dst->h = cpu_to_le32(src->h);
+ dst->d = cpu_to_le32(src->d);
+}
+
void virtio_gpu_ctrl_ack(struct virtqueue *vq)
{
struct drm_device *dev = vq->vdev->priv;
void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
uint32_t ctx_id,
uint64_t offset, uint32_t level,
- struct virtio_gpu_box *box,
+ struct drm_virtgpu_3d_box *box,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence)
{
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- cmd_p->box = *box;
+ convert_to_hw_box(&cmd_p->box, box);
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);