Give moving a BO into place an operation context to work with.
v2: rebased
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Dieter Nützel <Dieter@nuetzel-hh.de>
Tested-by: Michel Dänzer <michel.daenzer@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { true, false };
u64 initial_bytes_moved, bytes_moved;
uint32_t domain;
int r;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
p->bytes_moved += bytes_moved;
struct amdgpu_bo *validated)
{
uint32_t domain = validated->allowed_domains;
+ struct ttm_operation_ctx ctx = { true, false };
int r;
if (!p->evictable)
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved = atomic64_read(&adev->num_bytes_moved) -
initial_bytes_moved;
p->bytes_moved += bytes_moved;
static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
struct list_head *validated)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_bo_list_entry *lobj;
int r;
lobj->user_pages) {
amdgpu_ttm_placement_from_domain(bo,
AMDGPU_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
- false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
struct amdgpu_bo_va_mapping **map)
{
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va_mapping *mapping;
int r;
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
- r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
- false);
+ r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
}
int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct amdgpu_device *adev = dev->dev_private;
struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj;
goto free_pages;
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
goto free_pages;
int amdgpu_bo_validate(struct amdgpu_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
uint32_t domain;
int r;
retry:
amdgpu_ttm_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
goto retry;
u64 *gpu_addr)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p pin failed\n", bo);
goto error;
int amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r)) {
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
goto error;
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo;
unsigned long offset, size;
int r;
abo->placement.num_busy_placement = 1;
abo->placement.busy_placement = &abo->placements[1];
- r = ttm_bo_validate(bo, &abo->placement, false, false);
+ r = ttm_bo_validate(bo, &abo->placement, &ctx);
if (unlikely(r != 0))
return r;
*/
static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
{
+ struct ttm_operation_ctx tctx = { false, false };
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint32_t cmd;
}
amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
}
return r;
static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
amdgpu_uvd_force_into_uvd_segment(bo);
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
int lo, int hi, unsigned size, int32_t index)
{
int64_t offset = ((uint64_t)size) * ((int64_t)index);
+ struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo_va_mapping *mapping;
unsigned i, fpfn, lpfn;
struct amdgpu_bo *bo;
bo->placements[i].lpfn = bo->placements[i].fpfn ?
min(bo->placements[i].fpfn, lpfn) : lpfn;
}
- return ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
bool direct, struct dma_fence **fence)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct ttm_validate_buffer tv;
struct ww_acquire_ctx ticket;
struct list_head head;
if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto err;
int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
ast_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int ast_bo_unpin(struct ast_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
}
int ast_bo_push_sysram(struct ast_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
bochs_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int bochs_bo_unpin(struct bochs_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
cirrus_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int cirrus_bo_push_sysram(struct cirrus_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
hibmc_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int hibmc_bo_unpin(struct hibmc_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("validate failed for unpin: %d\n", ret);
return ret;
int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (bo->pin_count) {
mgag200_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret)
return ret;
int mgag200_bo_unpin(struct mgag200_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
}
int mgag200_bo_push_sysram(struct mgag200_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
for (i = 0; i < bo->placement.num_placement ; i++)
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
- ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+ ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
bool no_wait_gpu)
{
+ struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
- interruptible, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
if (ret)
return ret;
int ret;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL;
+ struct ttm_operation_ctx ctx = { true, false };
if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom)
if (!qobj->pin_count) {
qxl_ttm_placement_from_domain(qobj, qobj->type, false);
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out;
}
static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
int r;
return 0;
}
qxl_ttm_placement_from_domain(bo, domain, true);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
static int __qxl_bo_unpin(struct qxl_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct drm_device *ddev = bo->gem_base.dev;
int r, i;
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r != 0))
dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
return r;
static int qxl_release_validate_bo(struct qxl_bo *bo)
{
+ struct ttm_operation_ctx ctx = { true, false };
int ret;
if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type, false);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
return ret;
}
int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_userptr *args = data;
struct drm_gem_object *gobj;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
up_read(¤t->mm->mmap_sem);
if (r)
unsigned long end)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+ struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
/* notification is exclusive, but interval is inclusive */
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
DRM_ERROR("(%ld) failed to validate user bo\n", r);
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
u64 *gpu_addr)
{
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
int radeon_bo_unpin(struct radeon_bo *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
int r, i;
if (!bo->pin_count) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (likely(r == 0)) {
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
bo->rdev->vram_pin_size -= radeon_bo_size(bo);
struct ww_acquire_ctx *ticket,
struct list_head *head, int ring)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_bo_list *lobj;
struct list_head duplicates;
int r;
radeon_uvd_force_into_uvd_segment(bo, allowed);
initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
initial_bytes_moved;
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct radeon_device *rdev;
struct radeon_bo *rbo;
unsigned long offset, size, lpfn;
(!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
rbo->placements[i].lpfn = lpfn;
}
- r = ttm_bo_validate(bo, &rbo->placement, false, false);
+ r = ttm_bo_validate(bo, &rbo->placement, &ctx);
if (unlikely(r == -ENOMEM)) {
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
- return ttm_bo_validate(bo, &rbo->placement, false, false);
+ return ttm_bo_validate(bo, &rbo->placement, &ctx);
} else if (unlikely(r != 0)) {
return r;
}
static int radeon_vm_clear_bo(struct radeon_device *rdev,
struct radeon_bo *bo)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct radeon_ib ib;
unsigned entries;
uint64_t addr;
if (r)
return r;
- r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
goto error_unreserve;
EXPORT_SYMBOL(ttm_bo_mem_compat);
int ttm_bo_validate(struct ttm_buffer_object *bo,
- struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu)
+ struct ttm_placement *placement,
+ struct ttm_operation_ctx *ctx)
{
int ret;
uint32_t new_flags;
* Check whether we need to move buffer.
*/
if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
- ret = ttm_bo_move_buffer(bo, placement, interruptible,
- no_wait_gpu);
+ ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible,
+ ctx->no_wait_gpu);
if (ret)
return ret;
} else {
WARN_ON(!locked);
}
- if (likely(!ret))
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ if (likely(!ret)) {
+ struct ttm_operation_ctx ctx = { interruptible, false };
+
+ ret = ttm_bo_validate(bo, placement, &ctx);
+ }
if (unlikely(ret)) {
if (!resv)
static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
struct list_head *head)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct ttm_validate_buffer *buf;
struct ttm_buffer_object *bo;
struct virtio_gpu_object *qobj;
list_for_each_entry(buf, head, head) {
bo = buf->bo;
qobj = container_of(bo, struct virtio_gpu_object, tbo);
- ret = ttm_bo_validate(bo, &qobj->placement, false, false);
+ ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
if (ret) {
ttm_eu_backoff_reservation(ticket, head);
return ret;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_from_host *args = data;
+ struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence;
if (ret)
goto out;
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out_unres;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct drm_virtgpu_3d_transfer_to_host *args = data;
+ struct ttm_operation_ctx ctx = { true, false };
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
struct virtio_gpu_fence *fence;
if (ret)
goto out;
- ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
- true, false);
+ ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
if (unlikely(ret))
goto out_unres;
*/
static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
{
+ struct ttm_operation_ctx ctx = { false, false };
struct vmw_private *dev_priv = res->dev_priv;
struct vmw_cotable *vcotbl = vmw_cotable(res);
struct vmw_dma_buffer *buf, *old_buf = res->backup;
}
/* Unpin new buffer, and switch backup buffers. */
- ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+ ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed validating new COTable backup buffer.\n");
goto out_wait;
struct ttm_placement *placement,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
ret = ttm_bo_mem_compat(placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
- ret = ttm_bo_validate(bo, placement, interruptible, false);
+ ret = ttm_bo_validate(bo, placement, &ctx);
if (!ret)
vmw_bo_pin_reserved(buf, true);
struct vmw_dma_buffer *buf,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
uint32_t new_flags;
goto out_unreserve;
}
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
- false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0) || ret == -ERESTARTSYS)
goto out_unreserve;
- ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
out_unreserve:
if (!ret)
struct vmw_dma_buffer *buf,
bool interruptible)
{
+ struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
struct ttm_placement placement;
struct ttm_place place;
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.start < bo->num_pages &&
bo->mem.start > 0 &&
- buf->pin_count == 0)
- (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+ buf->pin_count == 0) {
+ ctx.interruptible = false;
+ (void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+ }
if (buf->pin_count > 0)
ret = ttm_bo_mem_compat(&placement, &bo->mem,
&new_flags) == true ? 0 : -EINVAL;
else
- ret = ttm_bo_validate(bo, &placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &placement, &ctx);
/* For some reason we didn't end up at the start of vram */
WARN_ON(ret == 0 && bo->offset != 0);
*/
void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
{
+ struct ttm_operation_ctx ctx = { false, true };
struct ttm_place pl;
struct ttm_placement placement;
struct ttm_buffer_object *bo = &vbo->base;
placement.num_placement = 1;
placement.placement = &pl;
- ret = ttm_bo_validate(bo, &placement, false, true);
+ ret = ttm_bo_validate(bo, &placement, &ctx);
BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
}
{
struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
base);
+ struct ttm_operation_ctx ctx = { interruptible, true };
int ret;
if (vbo->pin_count > 0)
return 0;
if (validate_as_mob)
- return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
- false);
+ return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
/**
* Put BO in VRAM if there is space, otherwise as a GMR.
* used as a GMR, this will return -ENOMEM.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
- false);
+ ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
if (likely(ret == 0 || ret == -ERESTARTSYS))
return ret;
* previous contents.
*/
- ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+ ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
return ret;
}
bool interruptible,
struct ttm_validate_buffer *val_buf)
{
+ struct ttm_operation_ctx ctx = { true, false };
struct list_head val_list;
bool backup_dirty = false;
int ret;
backup_dirty = res->backup_dirty;
ret = ttm_bo_validate(&res->backup->base,
res->func->backup_placement,
- true, false);
+ &ctx);
if (unlikely(ret != 0))
goto out_no_validate;
*/
int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
{
+ struct ttm_operation_ctx ctx = { interruptible, false };
struct vmw_private *dev_priv = res->dev_priv;
int ret;
ret = ttm_bo_validate
(&vbo->base,
res->func->backup_placement,
- interruptible, false);
+ &ctx);
if (ret) {
ttm_bo_unreserve(&vbo->base);
goto out_no_validate;
size_t size,
struct list_head *list)
{
+ struct ttm_operation_ctx ctx = { false, true };
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
- ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
struct ttm_buffer_object *bo;
};
+/**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+ bool interruptible;
+ bool no_wait_gpu;
+};
+
/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
*
* @bo: The buffer object.
* @placement: Proposed placement for the buffer object.
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @ctx: validation parameters.
*
* Changes placement and caching policy of the buffer object
* according proposed placement.
*/
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
- bool interruptible,
- bool no_wait_gpu);
+ struct ttm_operation_ctx *ctx);
/**
* ttm_bo_unref