drm/amdgpu: Enable XGMI mapping for peer device
authorshaoyunl <shaoyun.liu@amd.com>
Fri, 22 Feb 2019 21:20:38 +0000 (16:20 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 19 Mar 2019 20:36:48 +0000 (15:36 -0500)
Adjust vram base offset for XGMI mapping when update the PT entry so
the address will fall into correct XGMI aperture for peer device

Signed-off-by: shaoyunl <shaoyun.liu@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 1b5a2b29c95b5b890bd363dcb45fd33694422253..67d638fef1fb191fc22e62d07b436a1b6ada3928 100644 (file)
@@ -1876,6 +1876,7 @@ error_free:
  * @vm: requested vm
  * @mapping: mapped range and flags to use for the update
  * @flags: HW flags for the mapping
+ * @bo_adev: amdgpu_device pointer that bo actually been allocated
  * @nodes: array of drm_mm_nodes with the MC addresses
  * @fence: optional resulting fence
  *
@@ -1891,6 +1892,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                                      struct amdgpu_vm *vm,
                                      struct amdgpu_bo_va_mapping *mapping,
                                      uint64_t flags,
+                                     struct amdgpu_device *bo_adev,
                                      struct drm_mm_node *nodes,
                                      struct dma_fence **fence)
 {
@@ -1965,7 +1967,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
                        }
 
                } else if (flags & AMDGPU_PTE_VALID) {
-                       addr += adev->vm_manager.vram_base_offset;
+                       addr += bo_adev->vm_manager.vram_base_offset;
                        addr += pfn << PAGE_SHIFT;
                }
 
@@ -2012,6 +2014,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        struct drm_mm_node *nodes;
        struct dma_fence *exclusive, **last_update;
        uint64_t flags;
+       struct amdgpu_device *bo_adev = adev;
        int r;
 
        if (clear || !bo) {
@@ -2030,10 +2033,12 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
                exclusive = reservation_object_get_excl(bo->tbo.resv);
        }
 
-       if (bo)
+       if (bo) {
                flags = amdgpu_ttm_tt_pte_flags(adev, bo->tbo.ttm, mem);
-       else
+               bo_adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       } else {
                flags = 0x0;
+       }
 
        if (clear || (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv))
                last_update = &vm->last_update;
@@ -2050,7 +2055,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
 
        list_for_each_entry(mapping, &bo_va->invalids, list) {
                r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
-                                              mapping, flags, nodes,
+                                              mapping, flags, bo_adev, nodes,
                                               last_update);
                if (r)
                        return r;