{
base->vm = vm;
base->bo = bo;
- INIT_LIST_HEAD(&base->bo_list);
+ base->next = NULL;
INIT_LIST_HEAD(&base->vm_status);
if (!bo)
return;
- list_add_tail(&base->bo_list, &bo->va);
+ base->next = bo->vm_bo;
+ bo->vm_bo = base;
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
if (!parent)
return NULL;
- return list_first_entry(&parent->va, struct amdgpu_vm_pt, base.bo_list);
+ return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
}
/**
for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
if (entry->base.bo) {
- list_del(&entry->base.bo_list);
+ entry->base.bo->vm_bo = NULL;
list_del(&entry->base.vm_status);
amdgpu_bo_unref(&entry->base.bo->shadow);
amdgpu_bo_unref(&entry->base.bo);
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *base;
- list_for_each_entry(bo_va, &bo->va, base.bo_list) {
- if (bo_va->base.vm == vm) {
- return bo_va;
- }
+ for (base = bo->vm_bo; base; base = base->next) {
+ if (base->vm != vm)
+ continue;
+
+ return container_of(base, struct amdgpu_bo_va, base);
}
return NULL;
}
struct amdgpu_bo_va_mapping *mapping, *next;
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_vm_bo_base **base;
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv)
- vm->bulk_moveable = false;
+ if (bo) {
+ if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ vm->bulk_moveable = false;
- list_del(&bo_va->base.bo_list);
+ for (base = &bo_va->base.bo->vm_bo; *base;
+ base = &(*base)->next) {
+ if (*base != &bo_va->base)
+ continue;
+
+ *base = bo_va->base.next;
+ break;
+ }
+ }
spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
if (bo->parent && bo->parent->shadow == bo)
bo = bo->parent;
- list_for_each_entry(bo_base, &bo->va, bo_list) {
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {