amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
+ false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else {
}
ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
- false, &ctx->duplicates);
+ false, &ctx->duplicates, true);
if (!ret)
ctx->reserved = true;
else
}
/* Reserve all BOs and page tables for validation */
- ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
+ ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
+ true);
WARN(!list_empty(&duplicates), "Duplicates should be empty");
if (ret)
goto out_free;
}
ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
- false, &duplicate_save);
+ false, &duplicate_save, true);
if (ret) {
pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
goto ttm_reserve_fail;
}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates);
+ &duplicates, true);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
if (r)
goto error_unref;
return 0;
ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
- !no_intr, NULL);
+ !no_intr, NULL, true);
if (ret)
return ret;
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
if (r)
goto error_free;
u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+ r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
if (unlikely(r != 0)) {
return r;
}
reservation_object_assert_held(bo->resv);
- if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
- BUG_ON(!list_empty(&bo->lru));
+ if (!list_empty(&bo->lru))
+ return;
- man = &bdev->man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+ return;
- if (bo->ttm && !(bo->ttm->page_flags &
- (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
- list_add_tail(&bo->swap,
- &bdev->glob->swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
- }
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru[bo->priority]);
+ kref_get(&bo->list_kref);
+
+ if (bo->ttm && !(bo->ttm->page_flags &
+ (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+ list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+ kref_get(&bo->list_kref);
}
}
EXPORT_SYMBOL(ttm_bo_add_to_lru);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups)
+ struct list_head *dups, bool del_lru)
{
struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
list_add(&entry->head, list);
}
- if (ticket)
- ww_acquire_done(ticket);
- spin_lock(&glob->lru_lock);
- ttm_eu_del_from_lru_locked(list);
- spin_unlock(&glob->lru_lock);
+ if (del_lru) {
+ spin_lock(&glob->lru_lock);
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ }
return 0;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
reservation_object_add_shared_fence(bo->resv, fence);
else
reservation_object_add_excl_fence(bo->resv, fence);
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
+ else
+ ttm_bo_move_to_lru_tail(bo, NULL);
reservation_object_unlock(bo->resv);
}
spin_unlock(&glob->lru_lock);
struct virtio_gpu_object *qobj;
int ret;
- ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
if (ret != 0)
return ret;
val_buf->bo = &res->backup->base;
val_buf->num_shared = 0;
list_add_tail(&val_buf->head, &val_list);
- ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
+ ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
+ true);
if (unlikely(ret != 0))
goto out_no_reserve;
bool intr)
{
return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
- NULL);
+ NULL, true);
}
/**
{
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
spin_lock(&bo->bdev->glob->lru_lock);
- ttm_bo_add_to_lru(bo);
+ if (list_empty(&bo->lru))
+ ttm_bo_add_to_lru(bo);
+ else
+ ttm_bo_move_to_lru_tail(bo, NULL);
spin_unlock(&bo->bdev->glob->lru_lock);
}
reservation_object_unlock(bo->resv);
* @list: thread private list of ttm_validate_buffer structs.
* @intr: should the wait be interruptible
* @dups: [out] optional list of duplicates.
+ * @del_lru: true if BOs should be removed from the LRU.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups);
+ struct list_head *dups, bool del_lru);
/**
* function ttm_eu_fence_buffer_objects.