drm/i915: Implement dma_buf_ops->kmap
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 May 2017 20:25:17 +0000 (21:25 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 3 May 2017 22:15:02 +0000 (23:15 +0100)
Since kmap allows us to block we can pin the pages and use our normal
page lookup routine making the implementation simple, or as some might
say quick and dirty.

Testcase: igt/drv_selftest/dmabuf
Testcase: igt/prime_rw
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170503202517.16797-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c

index f225bf680b6de190ce1cbb4aa0e67a5b3d5f603a..6176e589cf09f9b287cf25700a27a170255d6fe8 100644 (file)
@@ -122,12 +122,36 @@ static void i915_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, unsigned long
 }
 static void *i915_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
 {
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
+       struct page *page;
+
+       if (page_num >= obj->base.size >> PAGE_SHIFT)
+               return NULL;
+
+       if (!i915_gem_object_has_struct_page(obj))
+               return NULL;
+
+       if (i915_gem_object_pin_pages(obj))
+               return NULL;
+
+       /* Synchronisation is left to the caller (via .begin_cpu_access()) */
+       page = i915_gem_object_get_page(obj, page_num);
+       if (IS_ERR(page))
+               goto err_unpin;
+
+       return kmap(page);
+
+err_unpin:
+       i915_gem_object_unpin_pages(obj);
        return NULL;
 }
 
 static void i915_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num, void *addr)
 {
+       struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 
+       kunmap(virt_to_page(addr));
+       i915_gem_object_unpin_pages(obj);
 }
 
 static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
index 817bef74bbcbc437c96485e2da07b83f81cd6765..d15cc9d3a5cd1960f55a74ac0424c3e77f503f25 100644 (file)
@@ -271,6 +271,105 @@ err_obj:
        return err;
 }
 
+static int igt_dmabuf_export_kmap(void *arg)
+{
+       struct drm_i915_private *i915 = arg;
+       struct drm_i915_gem_object *obj;
+       struct dma_buf *dmabuf;
+       void *ptr;
+       int err;
+
+       obj = i915_gem_object_create(i915, 2*PAGE_SIZE);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
+
+       dmabuf = i915_gem_prime_export(&i915->drm, &obj->base, 0);
+       i915_gem_object_put(obj);
+       if (IS_ERR(dmabuf)) {
+               err = PTR_ERR(dmabuf);
+               pr_err("i915_gem_prime_export failed with err=%d\n", err);
+               return err;
+       }
+
+       ptr = dma_buf_kmap(dmabuf, 0);
+       if (!ptr) {
+               pr_err("dma_buf_kmap failed\n");
+               err = -ENOMEM;
+               goto err;
+       }
+
+       if (memchr_inv(ptr, 0, PAGE_SIZE)) {
+               dma_buf_kunmap(dmabuf, 0, ptr);
+               pr_err("Exported page[0] not initialiased to zero!\n");
+               err = -EINVAL;
+               goto err;
+       }
+
+       memset(ptr, 0xc5, PAGE_SIZE);
+       dma_buf_kunmap(dmabuf, 0, ptr);
+
+       ptr = i915_gem_object_pin_map(obj, I915_MAP_WB);
+       if (IS_ERR(ptr)) {
+               err = PTR_ERR(ptr);
+               pr_err("i915_gem_object_pin_map failed with err=%d\n", err);
+               goto err;
+       }
+       memset(ptr + PAGE_SIZE, 0xaa, PAGE_SIZE);
+       i915_gem_object_unpin_map(obj);
+
+       ptr = dma_buf_kmap(dmabuf, 1);
+       if (!ptr) {
+               pr_err("dma_buf_kmap failed\n");
+               err = -ENOMEM;
+               goto err;
+       }
+
+       if (memchr_inv(ptr, 0xaa, PAGE_SIZE)) {
+               dma_buf_kunmap(dmabuf, 1, ptr);
+               pr_err("Exported page[1] not set to 0xaa!\n");
+               err = -EINVAL;
+               goto err;
+       }
+
+       memset(ptr, 0xc5, PAGE_SIZE);
+       dma_buf_kunmap(dmabuf, 1, ptr);
+
+       ptr = dma_buf_kmap(dmabuf, 0);
+       if (!ptr) {
+               pr_err("dma_buf_kmap failed\n");
+               err = -ENOMEM;
+               goto err;
+       }
+       if (memchr_inv(ptr, 0xc5, PAGE_SIZE)) {
+               dma_buf_kunmap(dmabuf, 0, ptr);
+               pr_err("Exported page[0] did not retain 0xc5!\n");
+               err = -EINVAL;
+               goto err;
+       }
+       dma_buf_kunmap(dmabuf, 0, ptr);
+
+       ptr = dma_buf_kmap(dmabuf, 2);
+       if (ptr) {
+               pr_err("Erroneously kmapped beyond the end of the object!\n");
+               dma_buf_kunmap(dmabuf, 2, ptr);
+               err = -EINVAL;
+               goto err;
+       }
+
+       ptr = dma_buf_kmap(dmabuf, -1);
+       if (ptr) {
+               pr_err("Erroneously kmapped before the start of the object!\n");
+               dma_buf_kunmap(dmabuf, -1, ptr);
+               err = -EINVAL;
+               goto err;
+       }
+
+       err = 0;
+err:
+       dma_buf_put(dmabuf);
+       return err;
+}
+
 int i915_gem_dmabuf_mock_selftests(void)
 {
        static const struct i915_subtest tests[] = {
@@ -279,6 +378,7 @@ int i915_gem_dmabuf_mock_selftests(void)
                SUBTEST(igt_dmabuf_import),
                SUBTEST(igt_dmabuf_import_ownership),
                SUBTEST(igt_dmabuf_export_vmap),
+               SUBTEST(igt_dmabuf_export_kmap),
        };
        struct drm_i915_private *i915;
        int err;