From d0cef9fa4411eb17dd350cced3336ca58f465ff1 Mon Sep 17 00:00:00 2001 From: Roger He Date: Thu, 21 Dec 2017 17:42:50 +0800 Subject: [PATCH] drm/ttm: use an operation ctx for ttm_tt_populate in ttm_bo_driver (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit forward the operation context to ttm_tt_populate as well, and the ultimate goal is swapout enablement for reserved BOs. v2: squash in fix for vboxvideo Reviewed-by: Christian König Signed-off-by: Roger He Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++++--- drivers/gpu/drm/ast/ast_ttm.c | 5 +++-- drivers/gpu/drm/cirrus/cirrus_ttm.c | 5 +++-- drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c | 5 +++-- drivers/gpu/drm/mgag200/mgag200_ttm.c | 5 +++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 ++++---- drivers/gpu/drm/qxl/qxl_ttm.c | 5 +++-- drivers/gpu/drm/radeon/radeon_ttm.c | 9 +++++---- drivers/gpu/drm/ttm/ttm_agp_backend.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo_util.c | 11 ++++++++--- drivers/gpu/drm/ttm/ttm_bo_vm.c | 7 ++++++- drivers/gpu/drm/ttm/ttm_page_alloc.c | 13 +++++-------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 11 ++++------- drivers/gpu/drm/ttm/ttm_tt.c | 6 +++++- drivers/gpu/drm/virtio/virtgpu_object.c | 6 +++++- drivers/gpu/drm/virtio/virtgpu_ttm.c | 5 +++-- drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c | 13 +++++-------- drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 13 +++++++++++-- drivers/staging/vboxvideo/vbox_ttm.c | 5 +++-- include/drm/ttm/ttm_bo_driver.h | 5 +++-- include/drm/ttm/ttm_page_alloc.h | 11 +++++++---- 21 files changed, 95 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f1b7d987bd57..044f5b57e197 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -990,7 +990,8 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, return >t->ttm.ttm; } -static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) +static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; @@ -1018,11 +1019,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, adev->dev); + return ttm_dma_populate(>t->ttm, adev->dev, ctx); } #endif - return ttm_populate_and_map_pages(adev->dev, >t->ttm); + return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx); } static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 572d2d27ac20..7b784d91e258 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -216,9 +216,10 @@ static struct ttm_tt *ast_ttm_tt_create(struct ttm_bo_device *bdev, return tt; } -static int ast_ttm_tt_populate(struct ttm_tt *ttm) +static int ast_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 5a08224edf41..a8e31ea07382 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -216,9 +216,10 @@ static struct ttm_tt *cirrus_ttm_tt_create(struct ttm_bo_device *bdev, return tt; } -static int cirrus_ttm_tt_populate(struct ttm_tt *ttm) +static int cirrus_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index ab4ee5953615..8516e005643f 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -223,9 +223,10 @@ static struct ttm_tt *hibmc_ttm_tt_create(struct ttm_bo_device *bdev, return tt; } -static int hibmc_ttm_tt_populate(struct ttm_tt *ttm) +static int hibmc_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 2d61869152d1..c97009bb77dd 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -216,9 +216,10 @@ static struct ttm_tt *mgag200_ttm_tt_create(struct ttm_bo_device *bdev, return tt; } -static int mgag200_ttm_tt_populate(struct ttm_tt *ttm) +static int mgag200_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index b0084bd35415..a7df632b88cf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1547,7 +1547,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } static int -nouveau_ttm_tt_populate(struct ttm_tt *ttm) +nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_dma_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; @@ -1572,17 +1572,17 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { - return ttm_agp_tt_populate(ttm); + return ttm_agp_tt_populate(ttm, ctx); } #endif #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) if (swiotlb_nr_tbl()) { - return ttm_dma_populate((void *)ttm, dev); + return ttm_dma_populate((void *)ttm, dev, ctx); } #endif - r = ttm_pool_populate(ttm); + r = ttm_pool_populate(ttm, ctx); if (r) { return r; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 145175b851c5..59cd74c3f3af 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -291,14 +291,15 @@ static struct ttm_backend_func qxl_backend_func = { .destroy = &qxl_ttm_backend_destroy, }; -static int qxl_ttm_tt_populate(struct ttm_tt *ttm) +static int qxl_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { int r; if (ttm->state != tt_unpopulated) return 0; - r = ttm_pool_populate(ttm); + r = ttm_pool_populate(ttm, ctx); if (r) return r; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 7b915aaab937..afc5eedddd8f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -721,7 +721,8 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) return (struct radeon_ttm_tt *)ttm; } -static int radeon_ttm_tt_populate(struct ttm_tt *ttm) +static int radeon_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); struct radeon_device *rdev; @@ -750,17 +751,17 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm) rdev = radeon_get_rdev(ttm->bdev); #if IS_ENABLED(CONFIG_AGP) if (rdev->flags & RADEON_IS_AGP) { - return ttm_agp_tt_populate(ttm); + return ttm_agp_tt_populate(ttm, ctx); } #endif #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl()) { - return ttm_dma_populate(>t->ttm, rdev->dev); + return ttm_dma_populate(>t->ttm, rdev->dev, ctx); } #endif - return ttm_populate_and_map_pages(rdev->dev, >t->ttm); + return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx); } static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 028ab6007873..3e795a099d06 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -133,12 +133,12 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_agp_tt_create); -int ttm_agp_tt_populate(struct ttm_tt *ttm) +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { if (ttm->state != tt_unpopulated) return 0; - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } EXPORT_SYMBOL(ttm_agp_tt_populate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 6e353df4e4bc..b7eb507aaf40 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -376,7 +376,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * TTM might be null for moves within the same region. */ if (ttm && ttm->state == tt_unpopulated) { - ret = ttm->bdev->driver->ttm_tt_populate(ttm); + ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx); if (ret) goto out1; } @@ -545,14 +545,19 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; + struct ttm_mem_reg *mem = &bo->mem; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; struct ttm_tt *ttm = bo->ttm; + pgprot_t prot; int ret; BUG_ON(!ttm); if (ttm->state == tt_unpopulated) { - ret = ttm->bdev->driver->ttm_tt_populate(ttm); + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx); if (ret) return ret; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 292d157d1a61..8e68e70b02b9 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -226,12 +226,17 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf) cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot); } else { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + ttm = bo->ttm; cvma.vm_page_prot = ttm_io_prot(bo->mem.placement, cvma.vm_page_prot); /* Allocate all page at once, most common usage */ - if (ttm->bdev->driver->ttm_tt_populate(ttm)) { + if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) { retval = VM_FAULT_OOM; goto out_io_unlock; } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 8f93ff30f78b..f1a3d55ead83 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1058,13 +1058,9 @@ void ttm_page_alloc_fini(void) _manager = NULL; } -int ttm_pool_populate(struct ttm_tt *ttm) +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; unsigned i; int ret; @@ -1080,7 +1076,7 @@ int ttm_pool_populate(struct ttm_tt *ttm) for (i = 0; i < ttm->num_pages; ++i) { ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - PAGE_SIZE, &ctx); + PAGE_SIZE, ctx); if (unlikely(ret != 0)) { ttm_pool_unpopulate(ttm); return -ENOMEM; @@ -1117,12 +1113,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx) { unsigned i, j; int r; - r = ttm_pool_populate(&tt->ttm); + r = ttm_pool_populate(&tt->ttm, ctx); if (r) return r; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 8aac86a16e22..3ac53918881e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -923,14 +923,11 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) * On success pages list will hold count number of correctly * cached pages. On failure will hold the negative return value (-ENOMEM, etc). */ -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx) { struct ttm_tt *ttm = &ttm_dma->ttm; struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; unsigned long num_pages = ttm->num_pages; struct dma_pool *pool; enum pool_type type; @@ -966,7 +963,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev) break; ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, &ctx); + pool->size, ctx); if (unlikely(ret != 0)) { ttm_dma_unpopulate(ttm_dma, dev); return -ENOMEM; @@ -1002,7 +999,7 @@ skip_huge: } ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], - pool->size, &ctx); + pool->size, ctx); if (unlikely(ret != 0)) { ttm_dma_unpopulate(ttm_dma, dev); return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 8ebc8d3560c3..b48d7a011cb1 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -263,6 +263,10 @@ void ttm_tt_unbind(struct ttm_tt *ttm) int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; int ret = 0; if (!ttm) @@ -271,7 +275,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) if (ttm->state == tt_bound) return 0; - ret = ttm->bdev->driver->ttm_tt_populate(ttm); + ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 6f66b7347cd0..0b90cdb3d9fe 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -124,13 +124,17 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, int ret; struct page **pages = bo->tbo.ttm->pages; int nr_pages = bo->tbo.num_pages; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; /* wtf swapping */ if (bo->pages) return 0; if (bo->tbo.ttm->state == tt_unpopulated) - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm); + bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!bo->pages) goto out; diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 43483e97adbb..36655b709eb2 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -324,12 +324,13 @@ static struct ttm_backend_func virtio_gpu_backend_func = { .destroy = &virtio_gpu_ttm_backend_destroy, }; -static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm) +static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { if (ttm->state != tt_unpopulated) return 0; - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c index cb386eb234ac..22231bc9e845 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c @@ -635,16 +635,12 @@ static void vmw_ttm_destroy(struct ttm_tt *ttm) } -static int vmw_ttm_populate(struct ttm_tt *ttm) +static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); - struct ttm_operation_ctx ctx = { - .interruptible = true, - .no_wait_gpu = false - }; int ret; if (ttm->state != tt_unpopulated) @@ -653,15 +649,16 @@ static int vmw_ttm_populate(struct ttm_tt *ttm) if (dev_priv->map_mode == vmw_dma_alloc_coherent) { size_t size = ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t)); - ret = ttm_mem_global_alloc(glob, size, &ctx); + ret = ttm_mem_global_alloc(glob, size, ctx); if (unlikely(ret != 0)) return ret; - ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev); + ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev, + ctx); if (unlikely(ret != 0)) ttm_mem_global_free(glob, size); } else - ret = ttm_pool_populate(ttm); + ret = ttm_pool_populate(ttm, ctx); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index b17f08fc50d3..736ca47e28ea 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -240,6 +240,10 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, unsigned long offset; unsigned long bo_size; struct vmw_otable *otables = batch->otables; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; SVGAOTableType i; int ret; @@ -264,7 +268,7 @@ static int vmw_otable_batch_setup(struct vmw_private *dev_priv, ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL); BUG_ON(ret != 0); - ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); + ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx); if (unlikely(ret != 0)) goto out_unreserve; ret = vmw_bo_map_dma(batch->otable_bo); @@ -430,6 +434,11 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob) { int ret; + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false + }; + BUG_ON(mob->pt_bo != NULL); ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE, @@ -442,7 +451,7 @@ static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL); BUG_ON(ret != 0); - ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm); + ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx); if (unlikely(ret != 0)) goto out_unreserve; ret = vmw_bo_map_dma(mob->pt_bo); diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c index 231c89e0699c..55f14c979524 100644 --- a/drivers/staging/vboxvideo/vbox_ttm.c +++ b/drivers/staging/vboxvideo/vbox_ttm.c @@ -213,9 +213,10 @@ static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, return tt; } -static int vbox_ttm_tt_populate(struct ttm_tt *ttm) +static int vbox_ttm_tt_populate(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx) { - return ttm_pool_populate(ttm); + return ttm_pool_populate(ttm, ctx); } static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm) diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 934fecfa7b07..84860ece77eb 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -352,7 +352,8 @@ struct ttm_bo_driver { * Returns: * -ENOMEM: Out of memory. */ - int (*ttm_tt_populate)(struct ttm_tt *ttm); + int (*ttm_tt_populate)(struct ttm_tt *ttm, + struct ttm_operation_ctx *ctx); /** * ttm_tt_unpopulate @@ -1077,7 +1078,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev, struct agp_bridge_data *bridge, unsigned long size, uint32_t page_flags, struct page *dummy_read_page); -int ttm_agp_tt_populate(struct ttm_tt *ttm); +int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); void ttm_agp_tt_unpopulate(struct ttm_tt *ttm); #endif diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index 593811362a91..4d9b019d253c 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -47,7 +47,7 @@ void ttm_page_alloc_fini(void); * * Add backing pages to all of @ttm */ -int ttm_pool_populate(struct ttm_tt *ttm); +int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx); /** * ttm_pool_unpopulate: @@ -61,7 +61,8 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Populates and DMA maps pages to fullfil a ttm_dma_populate() request */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); +int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, + struct ttm_operation_ctx *ctx); /** * Unpopulates and DMA unmaps pages as part of a @@ -89,7 +90,8 @@ void ttm_dma_page_alloc_fini(void); */ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); +int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, + struct ttm_operation_ctx *ctx); void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); #else @@ -106,7 +108,8 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, - struct device *dev) + struct device *dev, + struct ttm_operation_ctx *ctx) { return -ENOMEM; } -- 2.30.2