*
* @adev: amdgpu_device pointer
* @vm: requested vm
+ * @fence: optional resulting fence (unchanged if no work needed to be done
+ * or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
* Returns 0 for success.
* PTs have to be reserved and mutex must be locked!
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm)
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence)
{
struct amdgpu_bo_va_mapping *mapping;
- struct dma_fence *fence = NULL;
+ struct dma_fence *f = NULL;
int r;
while (!list_empty(&vm->freed)) {
list_del(&mapping->list);
r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
- 0, 0, &fence);
- amdgpu_vm_free_mapping(adev, vm, mapping, fence);
+ 0, 0, &f);
+ amdgpu_vm_free_mapping(adev, vm, mapping, f);
if (r) {
- dma_fence_put(fence);
+ dma_fence_put(f);
return r;
}
+ }
+ if (fence && f) {
+ dma_fence_put(*fence);
+ *fence = f;
+ } else {
+ dma_fence_put(f);
}
- dma_fence_put(fence);
+
return 0;
}
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
- struct amdgpu_vm *vm);
+ struct amdgpu_vm *vm,
+ struct dma_fence **fence);
int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_sync *sync);
int amdgpu_vm_bo_update(struct amdgpu_device *adev,