Correct sign extend the GMC addresses to 48bit.
v2: sign extending turned out easier than thought.
v3: clean up the defines and move them into amdgpu_gmc.h as well
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Junwei Zhang <Jerry.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
.num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe,
.gpuvm_size = min(adev->vm_manager.max_pfn
<< AMDGPU_GPU_PAGE_SHIFT,
- AMDGPU_VA_HOLE_START),
+ AMDGPU_GMC_HOLE_START),
.drm_render_minor = adev->ddev->render->index
};
if (chunk->chunk_id != AMDGPU_CHUNK_ID_IB)
continue;
- va_start = chunk_ib->va_start & AMDGPU_VA_HOLE_MASK;
+ va_start = chunk_ib->va_start & AMDGPU_GMC_HOLE_MASK;
r = amdgpu_cs_find_mapping(p, va_start, &aobj, &m);
if (r) {
DRM_ERROR("IB va_start is invalid\n");
return -EINVAL;
}
- if (args->va_address >= AMDGPU_VA_HOLE_START &&
- args->va_address < AMDGPU_VA_HOLE_END) {
+ if (args->va_address >= AMDGPU_GMC_HOLE_START &&
+ args->va_address < AMDGPU_GMC_HOLE_END) {
dev_dbg(&dev->pdev->dev,
"va_address 0x%LX is in VA hole 0x%LX-0x%LX\n",
- args->va_address, AMDGPU_VA_HOLE_START,
- AMDGPU_VA_HOLE_END);
+ args->va_address, AMDGPU_GMC_HOLE_START,
+ AMDGPU_GMC_HOLE_END);
return -EINVAL;
}
- args->va_address &= AMDGPU_VA_HOLE_MASK;
+ args->va_address &= AMDGPU_GMC_HOLE_MASK;
if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) {
dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n",
#include "amdgpu_irq.h"
+/* VA hole for 48bit addresses on Vega10 */
+#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL
+#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL
+
+/*
+ * Hardware is programmed as if the hole doesn't exists with start and end
+ * address values.
+ *
+ * This mask is used to remove the upper 16bits of the VA and so come up with
+ * the linear addr value.
+ */
+#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL
+
struct firmware;
/*
return (gmc->real_vram_size == gmc->visible_vram_size);
}
+/**
+ * amdgpu_gmc_sign_extend - sign extend the given gmc address
+ *
+ * @addr: address to extend
+ */
+static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr)
+{
+ if (addr >= AMDGPU_GMC_HOLE_START)
+ addr |= AMDGPU_GMC_HOLE_END;
+
+ return addr;
+}
+
void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
uint64_t *addr, uint64_t *flags);
uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo);
dev_info.virtual_address_offset = AMDGPU_VA_RESERVED_SIZE;
dev_info.virtual_address_max =
- min(vm_size, AMDGPU_VA_HOLE_START);
+ min(vm_size, AMDGPU_GMC_HOLE_START);
- if (vm_size > AMDGPU_VA_HOLE_START) {
- dev_info.high_va_offset = AMDGPU_VA_HOLE_END;
- dev_info.high_va_max = AMDGPU_VA_HOLE_END | vm_size;
+ if (vm_size > AMDGPU_GMC_HOLE_START) {
+ dev_info.high_va_offset = AMDGPU_GMC_HOLE_END;
+ dev_info.high_va_max = AMDGPU_GMC_HOLE_END | vm_size;
}
dev_info.virtual_address_alignment = max((int)PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE);
dev_info.pte_fragment_size = (1 << adev->vm_manager.fragment_size) * AMDGPU_GPU_PAGE_SIZE;
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
- return bo->tbo.offset;
+ return amdgpu_gmc_sign_extend(bo->tbo.offset);
}
/**
uint64_t addr = adev->vm_manager.max_pfn << AMDGPU_GPU_PAGE_SHIFT;
addr -= AMDGPU_VA_RESERVED_SIZE;
-
- if (addr >= AMDGPU_VA_HOLE_START)
- addr |= AMDGPU_VA_HOLE_END;
+ addr = amdgpu_gmc_sign_extend(addr);
return addr;
}
int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_va **bo_va)
{
- uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_VA_HOLE_MASK;
+ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
struct ww_acquire_ctx ticket;
struct list_head list;
struct amdgpu_bo_list_entry pd;
if (level == adev->vm_manager.root_level) {
ats_entries = amdgpu_vm_level_shift(adev, level);
ats_entries += AMDGPU_GPU_PAGE_SHIFT;
- ats_entries = AMDGPU_VA_HOLE_START >> ats_entries;
+ ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
ats_entries = min(ats_entries, entries);
entries -= ats_entries;
} else {
eaddr = saddr + size - 1;
if (vm->pte_support_ats)
- ats = saddr < AMDGPU_VA_HOLE_START;
+ ats = saddr < AMDGPU_GMC_HOLE_START;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- if (vm->pte_support_ats && mapping->start < AMDGPU_VA_HOLE_START)
+ if (vm->pte_support_ats &&
+ mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
/* hardcode that limit for now */
#define AMDGPU_VA_RESERVED_SIZE (1ULL << 20)
-/* VA hole for 48bit addresses on Vega10 */
-#define AMDGPU_VA_HOLE_START 0x0000800000000000ULL
-#define AMDGPU_VA_HOLE_END 0xffff800000000000ULL
-
-/*
- * Hardware is programmed as if the hole doesn't exists with start and end
- * address values.
- *
- * This mask is used to remove the upper 16bits of the VA and so come up with
- * the linear addr value.
- */
-#define AMDGPU_VA_HOLE_MASK 0x0000ffffffffffffULL
-
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1