drm/ttm: Make LRU removal optional v2
authorChristian König <christian.koenig@amd.com>
Fri, 10 May 2019 12:15:08 +0000 (14:15 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 31 May 2019 15:39:34 +0000 (10:39 -0500)
We are already doing this for DMA-buf imports and also for
amdgpu VM BOs for quite a while now.

If this doesn't run into any problems we are probably going
to stop removing BOs from the LRU altogether.

v2: drop BUG_ON from ttm_bo_add_to_lru

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
14 files changed:
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_validation.h
include/drm/ttm/ttm_bo_driver.h
include/drm/ttm/ttm_execbuf_util.h

index e304271612958a6cc872e6710c93a3363b779b0d..81e0e758cc548362313df15f0ed508e4b20b5cf5 100644 (file)
@@ -585,7 +585,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem,
        amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]);
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
+                                    false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else {
@@ -658,7 +658,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem,
        }
 
        ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list,
-                                    false, &ctx->duplicates);
+                                    false, &ctx->duplicates, true);
        if (!ret)
                ctx->reserved = true;
        else
@@ -1808,7 +1808,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
        }
 
        /* Reserve all BOs and page tables for validation */
-       ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates);
+       ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates,
+                                    true);
        WARN(!list_empty(&duplicates), "Duplicates should be empty");
        if (ret)
                goto out_free;
@@ -2014,7 +2015,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
        }
 
        ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list,
-                                    false, &duplicate_save);
+                                    false, &duplicate_save, true);
        if (ret) {
                pr_debug("Memory eviction: TTM Reserve Failed. Try again\n");
                goto ttm_reserve_fail;
index d72cc583ebd15c13a07b4b7d55211c67a30258c8..fff558cf385b8ec4b016372bb44e5f8cb4183c6d 100644 (file)
@@ -648,7 +648,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
        }
 
        r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
-                                  &duplicates);
+                                  &duplicates, true);
        if (unlikely(r != 0)) {
                if (r != -ERESTARTSYS)
                        DRM_ERROR("ttm_eu_reserve_buffers failed.\n");
index 54dd02a898b9c6101bd4d01bb575c88c829caafb..06f83cac0d3abacc2f08681c9445a836ffe268f7 100644 (file)
@@ -79,7 +79,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        list_add(&csa_tv.head, &list);
        amdgpu_vm_get_pd_bo(vm, &list, &pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
        if (r) {
                DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
                return r;
index 7b840367004c3be5f40f1191eb70574a2af488e4..d513a5ad03dd3121ec88f8b57f61060b11cd14e8 100644 (file)
@@ -171,7 +171,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
+       r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, true);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
                        "we fail to reserve bo (%d)\n", r);
@@ -608,7 +608,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
 
        amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, true);
        if (r)
                goto error_unref;
 
index 30f85f0130cbb172ef9292097acb3c56c0adcbb7..49f9a9385393a3c51de9c07f4e881427196da844 100644 (file)
@@ -256,7 +256,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
                return 0;
 
        ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
-                                    !no_intr, NULL);
+                                    !no_intr, NULL, true);
        if (ret)
                return ret;
 
index 44617dec8183373c5bd278502d54337f93b4ad77..7411e69e2712c9a670f4f2bb2f58f9979d850137 100644 (file)
@@ -559,7 +559,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        if (!vm_bos)
                return;
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true);
        if (r)
                goto error_free;
 
index 833e909706a9c2ef6845abd7358d495acc728603..36683de0300baf39d5104f8f27d7385b0316d0b7 100644 (file)
@@ -539,7 +539,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
        INIT_LIST_HEAD(&duplicates);
-       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
+       r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true);
        if (unlikely(r != 0)) {
                return r;
        }
index 2845fceb2fbd8f3c2eebe188e2f8d97403da534e..06bbcd2679b29c534bccc9adf8affb1c973cdd33 100644 (file)
@@ -173,19 +173,20 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
 
        reservation_object_assert_held(bo->resv);
 
-       if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
-               BUG_ON(!list_empty(&bo->lru));
+       if (!list_empty(&bo->lru))
+               return;
 
-               man = &bdev->man[bo->mem.mem_type];
-               list_add_tail(&bo->lru, &man->lru[bo->priority]);
-               kref_get(&bo->list_kref);
+       if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)
+               return;
 
-               if (bo->ttm && !(bo->ttm->page_flags &
-                                (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
-                       list_add_tail(&bo->swap,
-                                     &bdev->glob->swap_lru[bo->priority]);
-                       kref_get(&bo->list_kref);
-               }
+       man = &bdev->man[bo->mem.mem_type];
+       list_add_tail(&bo->lru, &man->lru[bo->priority]);
+       kref_get(&bo->list_kref);
+
+       if (bo->ttm && !(bo->ttm->page_flags &
+                        (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+               list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+               kref_get(&bo->list_kref);
        }
 }
 EXPORT_SYMBOL(ttm_bo_add_to_lru);
index 0075eb9a0b52f6f22f2c98d8aa739030d195b4f1..957ec375a4ba54b5399c89aa5dda91ae822e9abd 100644 (file)
@@ -69,7 +69,8 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
 
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
                reservation_object_unlock(bo->resv);
        }
        spin_unlock(&glob->lru_lock);
@@ -93,7 +94,7 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
 
 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                           struct list_head *list, bool intr,
-                          struct list_head *dups)
+                          struct list_head *dups, bool del_lru)
 {
        struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
@@ -172,11 +173,11 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                list_add(&entry->head, list);
        }
 
-       if (ticket)
-               ww_acquire_done(ticket);
-       spin_lock(&glob->lru_lock);
-       ttm_eu_del_from_lru_locked(list);
-       spin_unlock(&glob->lru_lock);
+       if (del_lru) {
+               spin_lock(&glob->lru_lock);
+               ttm_eu_del_from_lru_locked(list);
+               spin_unlock(&glob->lru_lock);
+       }
        return 0;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -203,7 +204,10 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
                        reservation_object_add_shared_fence(bo->resv, fence);
                else
                        reservation_object_add_excl_fence(bo->resv, fence);
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
+               else
+                       ttm_bo_move_to_lru_tail(bo, NULL);
                reservation_object_unlock(bo->resv);
        }
        spin_unlock(&glob->lru_lock);
index b7f9dfe61d1c1d3a57b2a9d26736063af9fd0247..fe0bd6274db612ed6200b351b9d7d632f56c664f 100644 (file)
@@ -63,7 +63,7 @@ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
        struct virtio_gpu_object *qobj;
        int ret;
 
-       ret = ttm_eu_reserve_buffers(ticket, head, true, NULL);
+       ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true);
        if (ret != 0)
                return ret;
 
index 711f8fd0dd454fcbde3f79643d874cf73c5527ae..1d38a8b2f2ec8be2839e9b297ab1f1bbf5eb9fdd 100644 (file)
@@ -464,7 +464,8 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
        val_buf->bo = &res->backup->base;
        val_buf->num_shared = 0;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
+       ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
+                                    true);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
index 523f6ac5c335efe30d49b4802c9f325b61b18514..1d2322ad6fd5947f8daf3d8ab32d6a00e63e38b6 100644 (file)
@@ -169,7 +169,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx,
                          bool intr)
 {
        return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr,
-                                     NULL);
+                                     NULL, true);
 }
 
 /**
index 129dabbc002d4d5ceb0ebb5d0884ccf884765661..9f54cf9c60df2aaa4c2396a6cd7af0aa944aadce 100644 (file)
@@ -769,7 +769,10 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
        if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
                spin_lock(&bo->bdev->glob->lru_lock);
-               ttm_bo_add_to_lru(bo);
+               if (list_empty(&bo->lru))
+                       ttm_bo_add_to_lru(bo);
+               else
+                       ttm_bo_move_to_lru_tail(bo, NULL);
                spin_unlock(&bo->bdev->glob->lru_lock);
        }
        reservation_object_unlock(bo->resv);
index 621615fa7728f1553afd7167f5398ae41a892a37..7e46cc678e7e7ab48b8dd9538a3864dfae6bbf3a 100644 (file)
@@ -70,6 +70,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  * @list:    thread private list of ttm_validate_buffer structs.
  * @intr:    should the wait be interruptible
  * @dups:    [out] optional list of duplicates.
+ * @del_lru: true if BOs should be removed from the LRU.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -98,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 
 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                                  struct list_head *list, bool intr,
-                                 struct list_head *dups);
+                                 struct list_head *dups, bool del_lru);
 
 /**
  * function ttm_eu_fence_buffer_objects.