drm/amdgpu: bind GTT on demand
authorChristian König <christian.koenig@amd.com>
Mon, 5 Sep 2016 15:00:57 +0000 (17:00 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 14 Sep 2016 19:10:30 +0000 (15:10 -0400)
We don't really need the GTT table any more most of the time. So bind it
only on demand.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Acked-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c

index 717c3b4e1d5467a81a02d5e2cb052f296494e339..2e8f469159c5b5254d69b43a057739ac75ee1d7e 100644 (file)
@@ -2521,6 +2521,7 @@ static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
 struct amdgpu_bo_va_mapping *
 amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
                       uint64_t addr, struct amdgpu_bo **bo);
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
 
 #include "amdgpu_object.h"
 #endif
index 61b7e25535bfea6f6718013bd3cab2522c27a486..f7dccb10d9657b79731e8c96888b19d79e568859 100644 (file)
@@ -639,8 +639,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
                }
        }
 
-       if (p->uf_entry.robj)
-               p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
+       if (!r && p->uf_entry.robj) {
+               struct amdgpu_bo *uf = p->uf_entry.robj;
+
+               r = amdgpu_ttm_bind(uf->tbo.ttm, &uf->tbo.mem);
+               p->job->uf_addr += amdgpu_bo_gpu_offset(uf);
+       }
 
 error_validate:
        if (r) {
@@ -1163,3 +1167,29 @@ amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 
        return NULL;
 }
+
+/**
+ * amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
+ *
+ * @parser: command submission parser context
+ *
+ * Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
+ */
+int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
+{
+       unsigned i;
+       int r;
+
+       if (!parser->bo_list)
+               return 0;
+
+       for (i = 0; i < parser->bo_list->num_entries; i++) {
+               struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
+
+               r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+               if (unlikely(r))
+                       return r;
+       }
+
+       return 0;
+}
index 162411bd8145550192f1ad52865b6bbeb48b6750..5a6216c9c0073972ea99546e96177e479d5dc1bc 100644 (file)
@@ -675,6 +675,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
                dev_err(bo->adev->dev, "%p pin failed\n", bo);
                goto error;
        }
+       r = amdgpu_ttm_bind(bo->tbo.ttm, &bo->tbo.mem);
+       if (unlikely(r)) {
+               dev_err(bo->adev->dev, "%p bind failed\n", bo);
+               goto error;
+       }
 
        bo->pin_count = 1;
        if (gpu_addr != NULL)
@@ -947,6 +952,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct fence *fence,
 u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
 {
        WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+       WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_TT &&
+                    !amdgpu_ttm_is_bound(bo->tbo.ttm));
        WARN_ON_ONCE(!ww_mutex_is_locked(&bo->tbo.resv->lock) &&
                     !bo->pin_count);
 
index a4914b78851788458158751dcdec7a100fd9e223..73298fadd7f391a31836c74a9a5fbed1ab1d7e0c 100644 (file)
@@ -256,8 +256,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
        new_start = new_mem->start << PAGE_SHIFT;
 
        switch (old_mem->mem_type) {
-       case TTM_PL_VRAM:
        case TTM_PL_TT:
+               r = amdgpu_ttm_bind(bo->ttm, old_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
                old_start += bo->bdev->man[old_mem->mem_type].gpu_offset;
                break;
        default:
@@ -265,8 +269,12 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
                return -EINVAL;
        }
        switch (new_mem->mem_type) {
-       case TTM_PL_VRAM:
        case TTM_PL_TT:
+               r = amdgpu_ttm_bind(bo->ttm, new_mem);
+               if (r)
+                       return r;
+
+       case TTM_PL_VRAM:
                new_start += bo->bdev->man[new_mem->mem_type].gpu_offset;
                break;
        default:
@@ -638,7 +646,6 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
                                   struct ttm_mem_reg *bo_mem)
 {
        struct amdgpu_ttm_tt *gtt = (void*)ttm;
-       uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        int r;
 
        if (gtt->userptr) {
@@ -659,6 +666,26 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
            bo_mem->mem_type == AMDGPU_PL_OA)
                return -EINVAL;
 
+       return 0;
+}
+
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+
+       return gtt && !list_empty(&gtt->list);
+}
+
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
+{
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       uint32_t flags;
+       int r;
+
+       if (!ttm || amdgpu_ttm_is_bound(ttm))
+               return 0;
+
+       flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
        r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
                ttm->pages, gtt->ttm.dma_address, flags);
 
index 72f6bfc15d8fbbfc6bb12df2d6f1cc5495d777e0..214bae965fc6e4f5ec862d5ac6d6de212b90c39c 100644 (file)
@@ -77,4 +77,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
                        struct fence **fence);
 
 int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
+bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
+int amdgpu_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
+
 #endif
index a71efeb5083bbf37c608595e0f69eeccd27f3b81..25dd58a65905d8e4a2e5680842ba27fe36709f0e 100644 (file)
@@ -882,6 +882,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
                return -EINVAL;
        }
 
+       r = amdgpu_cs_sysvm_access_required(parser);
+       if (r)
+               return r;
+
        ctx.parser = parser;
        ctx.buf_sizes = buf_sizes;
        ctx.ib_idx = ib_idx;
index 9b71d6c2a968b7c82a16bd5c6ce28f21a64d9801..b0c670294e2f94077947697a21dd1b41dd8598f5 100644 (file)
@@ -634,7 +634,11 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
        uint32_t allocated = 0;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
-       int i, r = 0, idx = 0;
+       int i, r, idx = 0;
+
+       r = amdgpu_cs_sysvm_access_required(p);
+       if (r)
+               return r;
 
        while (idx < ib->length_dw) {
                uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
index bf56f18144377e0626b913f9e5ec3daec245ffc3..bd5af328154f92525cea46ac8912f808d6200555 100644 (file)
@@ -1163,7 +1163,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
        }
 
        flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
-       gtt_flags = (adev == bo_va->bo->adev) ? flags : 0;
+       gtt_flags = (amdgpu_ttm_is_bound(bo_va->bo->tbo.ttm) &&
+               adev == bo_va->bo->adev) ? flags : 0;
 
        spin_lock(&vm->status_lock);
        if (!list_empty(&bo_va->vm_status))