void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
+void radeon_vm_flush(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
}
- if (!r) {
- radeon_vm_fence(rdev, vm, parser->ib.fence);
- }
-
out:
radeon_vm_add_to_lru(rdev, vm);
mutex_unlock(&vm->mutex);
return r;
}
- /* if we can't remember our last VM flush then flush now! */
- /* XXX figure out why we have to flush for every IB */
- if (ib->vm /*&& !ib->vm->last_flush*/) {
- radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
- }
+ if (ib->vm)
+ radeon_vm_flush(rdev, ib->vm, ib->ring);
+
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
- /* we just flushed the VM, remember that */
- if (ib->vm && !ib->vm->last_flush) {
- ib->vm->last_flush = radeon_fence_ref(ib->fence);
- }
+
+ if (ib->vm)
+ radeon_vm_fence(rdev, ib->vm, ib->fence);
+
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
return NULL;
}
+/**
+ * radeon_vm_flush - hardware flush the vm
+ *
+ * @rdev: radeon_device pointer
+ * @vm: vm we want to flush
+ * @ring: ring to use for flush
+ *
+ * Flush the vm (cayman+).
+ *
+ * Global and local mutex must be locked!
+ */
+void radeon_vm_flush(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ int ring)
+{
+ /* if we can't remember our last VM flush then flush now! */
+ /* XXX figure out why we have to flush all the time */
+ if (!vm->last_flush || true)
+ radeon_ring_vm_flush(rdev, ring, vm);
+}
+
/**
* radeon_vm_fence - remember fence for vm
*
struct radeon_vm *vm,
struct radeon_fence *fence)
{
- radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
- rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
-
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
+ radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
+ rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
+
radeon_fence_unref(&vm->last_id_use);
vm->last_id_use = radeon_fence_ref(fence);
+
+ /* we just flushed the VM, remember that */
+ if (!vm->last_flush)
+ vm->last_flush = radeon_fence_ref(fence);
}
/**