When we need to find the mapping we need sysvm access anyway.
Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Leo Liu <leo.liu@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_mn;
+struct amdgpu_bo_va_mapping;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
#endif
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo);
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **mapping);
#include "amdgpu_object.h"
#endif
uint64_t offset;
uint8_t *kptr;
- m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
- &aobj);
- if (!aobj) {
+ r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
+ &aobj, &m);
+ if (r) {
DRM_ERROR("IB va_start is invalid\n");
- return -EINVAL;
+ return r;
}
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
* virtual memory address. Returns allocation structure when found, NULL
* otherwise.
*/
-struct amdgpu_bo_va_mapping *
-amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
- uint64_t addr, struct amdgpu_bo **bo)
+int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
+ uint64_t addr, struct amdgpu_bo **bo,
+ struct amdgpu_bo_va_mapping **map)
{
struct amdgpu_bo_va_mapping *mapping;
unsigned i;
+ int r;
if (!parser->bo_list)
- return NULL;
+ return 0;
addr /= AMDGPU_GPU_PAGE_SIZE;
continue;
*bo = lobj->bo_va->base.bo;
- return mapping;
+ *map = mapping;
+ goto found;
}
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
continue;
*bo = lobj->bo_va->base.bo;
- return mapping;
+ *map = mapping;
+ goto found;
}
}
- return NULL;
-}
+ return -EINVAL;
-/**
- * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
- *
- * @parser: command submission parser context
- *
- * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
- */
-int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
-{
- unsigned i;
- int r;
+found:
+ r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
+ if (unlikely(r))
+ return r;
- if (!parser->bo_list)
+ if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
return 0;
- for (i = 0; i < parser->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
-
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
- if (unlikely(r))
- return r;
-
- if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
- continue;
-
- bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
- if (unlikely(r))
- return r;
- }
-
- return 0;
+ (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
}
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r = 0;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
if (!ctx->parser->adev->uvd.address_64_bit) {
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
int r;
- mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
- return -EINVAL;
+ return r;
}
start = amdgpu_bo_gpu_offset(bo);
return -EINVAL;
}
- r = amdgpu_cs_sysvm_access_required(parser);
- if (r)
- return r;
-
ctx.parser = parser;
ctx.buf_sizes = buf_sizes;
ctx.ib_idx = ib_idx;
struct amdgpu_bo_va_mapping *mapping;
struct amdgpu_bo *bo;
uint64_t addr;
+ int r;
if (index == 0xffffffff)
index = 0;
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
addr += ((uint64_t)size) * ((uint64_t)index);
- mapping = amdgpu_cs_find_mapping(p, addr, &bo);
- if (mapping == NULL) {
+ r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
+ if (r) {
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
addr, lo, hi, size, index);
- return -EINVAL;
+ return r;
}
if ((addr + (uint64_t)size) >
p->job->vm = NULL;
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
- r = amdgpu_cs_sysvm_access_required(p);
- if (r)
- return r;
-
while (idx < ib->length_dw) {
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);