return pd_addr;
}
+/**
+ * amdgpu_gmc_agp_addr - return the address in the AGP address space
+ *
+ * @tbo: TTM BO which needs the address, must be in GTT domain
+ *
+ * Tries to figure out how to access the BO through the AGP aperture. Returns
+ * AMDGPU_BO_INVALID_OFFSET if that is not possible.
+ */
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+ struct ttm_dma_tt *ttm;
+
+ if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached)
+ return AMDGPU_BO_INVALID_OFFSET;
+
+ ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm);
+ if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size)
+ return AMDGPU_BO_INVALID_OFFSET;
+
+ return adev->gmc.agp_start + ttm->dma_address[0];
+}
+
/**
* amdgpu_gmc_vram_location - try to find VRAM location
*
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
+uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo);
void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc,
u64 base);
void amdgpu_gmc_gart_location(struct amdgpu_device *adev,
struct ttm_mem_reg tmp;
struct ttm_placement placement;
struct ttm_place placements;
- uint64_t flags;
+ uint64_t addr, flags;
int r;
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
return 0;
- /* allocate GART space */
- tmp = bo->mem;
- tmp.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
- TTM_PL_FLAG_TT;
+ addr = amdgpu_gmc_agp_addr(bo);
+ if (addr != AMDGPU_BO_INVALID_OFFSET) {
+ bo->mem.start = addr >> PAGE_SHIFT;
+ } else {
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ tmp = bo->mem;
+ tmp.mm_node = NULL;
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
+ TTM_PL_FLAG_TT;
+
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
- /* Bind pages */
- gtt->offset = ((u64)tmp.start << PAGE_SHIFT) - adev->gmc.gart_start;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_bo_mem_put(bo, &tmp);
- return r;
+ /* Bind pages */
+ gtt->offset = ((u64)tmp.start << PAGE_SHIFT) -
+ adev->gmc.gart_start;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_bo_mem_put(bo, &tmp);
+ return r;
+ }
+
+ ttm_bo_mem_put(bo, &bo->mem);
+ bo->mem = tmp;
}
- ttm_bo_mem_put(bo, &bo->mem);
- bo->mem = tmp;
bo->offset = (bo->mem.start << PAGE_SHIFT) +
bo->bdev->man[bo->mem.mem_type].gpu_offset;