drm/amdgpu: use the AGP aperture for system memory access v2
authorChristian König <christian.koenig@amd.com>
Mon, 27 Aug 2018 16:19:48 +0000 (18:19 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 11 Sep 2018 03:41:39 +0000 (22:41 -0500)
Start to use the old AGP aperture for system memory access.

v2: Move that to amdgpu_ttm_alloc_gart

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c

index 86887c1496f89523e8058e2c8e24e2035c691f63..6acdeebabfc08e10969a2393785532289153e986 100644 (file)
@@ -79,6 +79,29 @@ uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo)
        return pd_addr;
 }
 
+/**
+ * amdgpu_gmc_agp_addr - return the address in the AGP address space
+ *
+ * @tbo: TTM BO which needs the address, must be in GTT domain
+ *
+ * Tries to figure out how to access the BO through the AGP aperture. Returns
+ * AMDGPU_BO_INVALID_OFFSET if that is not possible.
+ */
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
+{
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct ttm_dma_tt *ttm;
+
+       if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+               return AMDGPU_BO_INVALID_OFFSET;
+
+       ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+       if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+               return AMDGPU_BO_INVALID_OFFSET;
+
+       return adev->gmc.agp_start + ttm->dma_address[0];
+}
+
 /**
  * amdgpu_gmc_vram_location - try to find VRAM location
  *
index baedd6d6266df6c842af0679a030af8340b737b5..17ffc35d13661720ee1aa7059638fb0cb3fdf47d 100644 (file)
@@ -165,6 +165,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
 void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
                               uint64_t *addr, uint64_t *flags);
 uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
 void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
                              u64 base);
 void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
index d9f3201c9e5c26d1dd870e734183d791578c4e50..8a158ee922f73a7c82df42ff351c7d0b72a5797c 100644 (file)
@@ -1081,41 +1081,49 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
        struct ttm_mem_reg tmp;
        struct ttm_placement placement;
        struct ttm_place placements;
-       uint64_t flags;
+       uint64_t addr, flags;
        int r;
 
        if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
                return 0;
 
-       /* allocate GART space */
-       tmp = bo->mem;
-       tmp.mm_node = NULL;
-       placement.num_placement = 1;
-       placement.placement = &placements;
-       placement.num_busy_placement = 1;
-       placement.busy_placement = &placements;
-       placements.fpfn = 0;
-       placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
-       placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
-               TTM_PL_FLAG_TT;
+       addr = amdgpu_gmc_agp_addr(bo);
+       if (addr != AMDGPU_BO_INVALID_OFFSET) {
+               bo->mem.start = addr >> PAGE_SHIFT;
+       } else {
 
-       r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
-       if (unlikely(r))
-               return r;
+               /* allocate GART space */
+               tmp = bo->mem;
+               tmp.mm_node = NULL;
+               placement.num_placement = 1;
+               placement.placement = &placements;
+               placement.num_busy_placement = 1;
+               placement.busy_placement = &placements;
+               placements.fpfn = 0;
+               placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+               placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+                       TTM_PL_FLAG_TT;
+
+               r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+               if (unlikely(r))
+                       return r;
 
-       /* compute PTE flags for this buffer object */
-       flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+               /* compute PTE flags for this buffer object */
+               flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
 
-       /* Bind pages */
-       gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
-       r = amdgpu_ttm_gart_bind(adev, bo, flags);
-       if (unlikely(r)) {
-               ttm_bo_mem_put(bo, &tmp);
-               return r;
+               /* Bind pages */
+               gtt->offset = ((u64)tmp.start << PAGE_SHIFT) -
+                       adev->gmc.gart_start;
+               r = amdgpu_ttm_gart_bind(adev, bo, flags);
+               if (unlikely(r)) {
+                       ttm_bo_mem_put(bo, &tmp);
+                       return r;
+               }
+
+               ttm_bo_mem_put(bo, &bo->mem);
+               bo->mem = tmp;
        }
 
-       ttm_bo_mem_put(bo, &bo->mem);
-       bo->mem = tmp;
        bo->offset = (bo->mem.start << PAGE_SHIFT) +
                bo->bdev->man[bo->mem.mem_type].gpu_offset;