void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id);
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t resource_id,
uint32_t format,
uint32_t width,
uint32_t height);
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
struct virtio_gpu_fence **fence);
void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj);
return PTR_ERR(obj);
virtio_gpu_resource_id_get(vgdev, &obj->hw_res_handle);
- virtio_gpu_cmd_create_resource(vgdev, obj, obj->hw_res_handle, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
mode_cmd.width, mode_cmd.height);
ret = virtio_gpu_object_kmap(obj);
}
/* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, obj->hw_res_handle, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto err_obj_attach;
format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888);
obj = gem_to_virtio_gpu_obj(gobj);
virtio_gpu_resource_id_get(vgdev, &obj->hw_res_handle);
- virtio_gpu_cmd_create_resource(vgdev, obj, obj->hw_res_handle, format,
+ virtio_gpu_cmd_create_resource(vgdev, obj, format,
args->width, args->height);
/* attach the object to the resource */
- ret = virtio_gpu_object_attach(vgdev, obj, obj->hw_res_handle, NULL);
+ ret = virtio_gpu_object_attach(vgdev, obj, NULL);
if (ret)
goto fail;
virtio_gpu_resource_id_get(vgdev, &qobj->hw_res_handle);
if (!vgdev->has_virgl_3d) {
- virtio_gpu_cmd_create_resource(vgdev, qobj, qobj->hw_res_handle, rc->format,
+ virtio_gpu_cmd_create_resource(vgdev, qobj, rc->format,
rc->width, rc->height);
- ret = virtio_gpu_object_attach(vgdev, qobj, qobj->hw_res_handle, NULL);
+ ret = virtio_gpu_object_attach(vgdev, qobj, NULL);
} else {
/* use a gem reference since unref list undoes them */
drm_gem_object_get(&qobj->gem_base);
rc_3d.flags = cpu_to_le32(rc->flags);
virtio_gpu_cmd_resource_create_3d(vgdev, qobj, &rc_3d, NULL);
- ret = virtio_gpu_object_attach(vgdev, qobj, qobj->hw_res_handle, &fence);
+ ret = virtio_gpu_object_attach(vgdev, qobj, &fence);
if (ret) {
ttm_eu_backoff_reservation(&ticket, &validate_list);
goto fail_unref;
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
- virtio_gpu_object_attach(vgdev, bo, bo->hw_res_handle,
- NULL);
+ virtio_gpu_object_attach(vgdev, bo, NULL);
}
}
}
/* create a basic resource */
void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *bo,
- uint32_t resource_id,
uint32_t format,
uint32_t width,
uint32_t height)
memset(cmd_p, 0, sizeof(*cmd_p));
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
cmd_p->format = cpu_to_le32(format);
cmd_p->width = cpu_to_le32(width);
cmd_p->height = cpu_to_le32(height);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- uint32_t resource_id,
struct virtio_gpu_fence **fence)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
ents[si].padding = 0;
}
- virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
+ virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
ents, nents,
fence);
- obj->hw_res_handle = resource_id;
return 0;
}