drm/amdgpu: Move to gtt before cpu accesses dma buf.
authorSamuel Li <Samuel.Li@amd.com>
Fri, 8 Dec 2017 21:18:59 +0000 (16:18 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:17:41 +0000 (14:17 -0500)
To improve cpu read performance. This is implemented for APUs currently.

v2: Adapt to change https://lists.freedesktop.org/archives/amd-gfx/2017-October/015174.html
v3: Adapt to change "forward begin_cpu_access callback to drivers"
v4: Instead of v3, reuse drm_gem dmabuf_ops here. Also some minor fixes as suggested.
v5: only set dma_buf ops when it is valid (Samuel)

Signed-off-by: Samuel Li <Samuel.Li@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c

index 0059a060958a902a55fa5ebb49b47f9b6cf4cea5..2ca14d6dd6f155f263362bddd6e4b20c06e73187 100644 (file)
@@ -418,6 +418,8 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                                        struct drm_gem_object *gobj,
                                        int flags);
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf);
 int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
 void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
 struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
index d96f9ac9e5fdebeb1f9151bfccbb7a6e52d1b1f8..0bb34db265eccd275e94c36f06c41d950abac527 100644 (file)
@@ -870,7 +870,7 @@ static struct drm_driver kms_driver = {
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
        .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = amdgpu_gem_prime_export,
-       .gem_prime_import = drm_gem_prime_import,
+       .gem_prime_import = amdgpu_gem_prime_import,
        .gem_prime_pin = amdgpu_gem_prime_pin,
        .gem_prime_unpin = amdgpu_gem_prime_unpin,
        .gem_prime_res_obj = amdgpu_gem_prime_res_obj,
index ae9c106979d7de51ce3ec1027593d80d7a897e34..8afec21dc45dc9a81fbea2b3064d6e11ab88e796 100644 (file)
@@ -26,6 +26,7 @@
 #include <drm/drmP.h>
 
 #include "amdgpu.h"
+#include "amdgpu_display.h"
 #include <drm/amdgpu_drm.h>
 #include <linux/dma-buf.h>
 
@@ -164,6 +165,50 @@ struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *obj)
        return bo->tbo.resv;
 }
 
+static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
+                                      enum dma_data_direction direction)
+{
+       struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv);
+       struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+       struct ttm_operation_ctx ctx = { true, false };
+       u32 domain = amdgpu_display_framebuffer_domains(adev);
+       int ret;
+       bool reads = (direction == DMA_BIDIRECTIONAL ||
+                     direction == DMA_FROM_DEVICE);
+
+       if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT))
+               return 0;
+
+       /* move to gtt */
+       ret = amdgpu_bo_reserve(bo, false);
+       if (unlikely(ret != 0))
+               return ret;
+
+       if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
+               amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+               ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+       }
+
+       amdgpu_bo_unreserve(bo);
+       return ret;
+}
+
+static const struct dma_buf_ops amdgpu_dmabuf_ops = {
+       .attach = drm_gem_map_attach,
+       .detach = drm_gem_map_detach,
+       .map_dma_buf = drm_gem_map_dma_buf,
+       .unmap_dma_buf = drm_gem_unmap_dma_buf,
+       .release = drm_gem_dmabuf_release,
+       .begin_cpu_access = amdgpu_gem_begin_cpu_access,
+       .map = drm_gem_dmabuf_kmap,
+       .map_atomic = drm_gem_dmabuf_kmap_atomic,
+       .unmap = drm_gem_dmabuf_kunmap,
+       .unmap_atomic = drm_gem_dmabuf_kunmap_atomic,
+       .mmap = drm_gem_dmabuf_mmap,
+       .vmap = drm_gem_dmabuf_vmap,
+       .vunmap = drm_gem_dmabuf_vunmap,
+};
+
 struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                                        struct drm_gem_object *gobj,
                                        int flags)
@@ -176,7 +221,30 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
                return ERR_PTR(-EPERM);
 
        buf = drm_gem_prime_export(dev, gobj, flags);
-       if (!IS_ERR(buf))
+       if (!IS_ERR(buf)) {
                buf->file->f_mapping = dev->anon_inode->i_mapping;
+               buf->ops = &amdgpu_dmabuf_ops;
+       }
+
        return buf;
 }
+
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+                                           struct dma_buf *dma_buf)
+{
+       struct drm_gem_object *obj;
+
+       if (dma_buf->ops == &amdgpu_dmabuf_ops) {
+               obj = dma_buf->priv;
+               if (obj->dev == dev) {
+                       /*
+                        * Importing dmabuf exported from out own gem increases
+                        * refcount on gem itself instead of f_count of dmabuf.
+                        */
+                       drm_gem_object_get(obj);
+                       return obj;
+               }
+       }
+
+       return drm_gem_prime_import(dev, dma_buf);
+}