drm/amdgpu: distinct between allocated GART space and GMC addr
authorChristian König <christian.koenig@amd.com>
Mon, 27 Aug 2018 11:51:27 +0000 (13:51 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 29 Aug 2018 17:35:37 +0000 (12:35 -0500)
Most of the time we only need to know if the BO has a valid GMC addr.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 5ddd4e87480bb010dd92dca2a620ec0d8e8e51a6..b5f20b42439e19c050731ec2fdc1de8f265c2c4f 100644 (file)
@@ -1362,8 +1362,6 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
-       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
-                    !amdgpu_gtt_mgr_has_gart_addr(&bo->tbo.mem));
        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
                     !bo->pin_count);
        WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
index 5cadf4f1ee2ce4225431a34889ae91855a590f96..d9f3201c9e5c26d1dd870e734183d791578c4e50 100644 (file)
@@ -345,7 +345,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
 {
        uint64_t addr = 0;
 
-       if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
+       if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
                addr = mm_node->start << PAGE_SHIFT;
                addr += bo->bdev->man[mem->mem_type].gpu_offset;
        }
@@ -433,8 +433,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                /* Map only what needs to be accessed. Map src to window 0 and
                 * dst to window 1
                 */
-               if (src->mem->mem_type == TTM_PL_TT &&
-                   !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
+               if (src->mem->start == AMDGPU_BO_INVALID_OFFSET) {
                        r = amdgpu_map_buffer(src->bo, src->mem,
                                        PFN_UP(cur_size + src_page_offset),
                                        src_node_start, 0, ring,
@@ -447,8 +446,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
                        from += src_page_offset;
                }
 
-               if (dst->mem->mem_type == TTM_PL_TT &&
-                   !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
+               if (dst->mem->start == AMDGPU_BO_INVALID_OFFSET) {
                        r = amdgpu_map_buffer(dst->bo, dst->mem,
                                        PFN_UP(cur_size + dst_page_offset),
                                        dst_node_start, 1, ring,
@@ -1086,11 +1084,10 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
        uint64_t flags;
        int r;
 
-       if (bo->mem.mem_type != TTM_PL_TT ||
-           amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
+       if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
                return 0;
 
-       /* allocate GTT space */
+       /* allocate GART space */
        tmp = bo->mem;
        tmp.mm_node = NULL;
        placement.num_placement = 1;