}
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
- &duplicates, true);
+ &duplicates, false);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
list_add(&csa_tv.head, &list);
amdgpu_vm_get_pd_bo(vm, &list, &pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false);
if (r) {
DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
return r;
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false);
if (r) {
dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r);
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
- r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false);
if (r)
goto error_unref;
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
int r;
- r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
+ r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(adev->dev, "%p reserve failed\n", bo);