drm/i915: Pin fence for iomap
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 08:43:55 +0000 (09:43 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 9 Oct 2017 16:07:29 +0000 (17:07 +0100)
Acquire the fence register for the iomap in i915_vma_pin_iomap() on
behalf of the caller.

We probably want for the caller to specify whether the fence should be
pinned for their usage, but at the moment all callers do want the
associated fence, or none, so take it on their behalf.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009084401.29090-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/i915_vma.h
drivers/gpu/drm/i915/selftests/i915_gem_object.c

index 2bad88cc927b489785d323f7206a74e5569eb441..595209a2f1599fe5e5b8f034163348d2116c5e13 100644 (file)
@@ -280,13 +280,16 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
        void __iomem *ptr;
+       int err;
 
        /* Access through the GTT requires the device to be awake. */
        assert_rpm_wakelock_held(vma->vm->i915);
 
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
-       if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
-               return IO_ERR_PTR(-ENODEV);
+       if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) {
+               err = -ENODEV;
+               goto err;
+       }
 
        GEM_BUG_ON(!i915_vma_is_ggtt(vma));
        GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
@@ -296,14 +299,38 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
                ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
                                        vma->node.start,
                                        vma->node.size);
-               if (ptr == NULL)
-                       return IO_ERR_PTR(-ENOMEM);
+               if (ptr == NULL) {
+                       err = -ENOMEM;
+                       goto err;
+               }
 
                vma->iomap = ptr;
        }
 
        __i915_vma_pin(vma);
+
+       err = i915_vma_get_fence(vma);
+       if (err)
+               goto err_unpin;
+
+       i915_vma_pin_fence(vma);
+
        return ptr;
+
+err_unpin:
+       __i915_vma_unpin(vma);
+err:
+       return IO_ERR_PTR(err);
+}
+
+void i915_vma_unpin_iomap(struct i915_vma *vma)
+{
+       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
+
+       GEM_BUG_ON(vma->iomap == NULL);
+
+       i915_vma_unpin_fence(vma);
+       i915_vma_unpin(vma);
 }
 
 void i915_vma_unpin_and_release(struct i915_vma **p_vma)
index c59ba76613a34fc49289bbbb032917621fb49111..864d84ab916dc0069274cd9de247025f24dc1fe1 100644 (file)
@@ -322,12 +322,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
  * Callers must hold the struct_mutex. This function is only valid to be
  * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
  */
-static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
-{
-       lockdep_assert_held(&vma->obj->base.dev->struct_mutex);
-       GEM_BUG_ON(vma->iomap == NULL);
-       i915_vma_unpin(vma);
-}
+void i915_vma_unpin_iomap(struct i915_vma *vma);
 
 static inline struct page *i915_vma_first_page(struct i915_vma *vma)
 {
index 8f011c447e4103464253d1cd1490065703f01bfb..1b8774a42e4872e307367600f69bf81a0d3cb682 100644 (file)
@@ -251,14 +251,6 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj,
                        return PTR_ERR(io);
                }
 
-               err = i915_vma_get_fence(vma);
-               if (err) {
-                       pr_err("Failed to get fence for partial view: offset=%lu\n",
-                              page);
-                       i915_vma_unpin_iomap(vma);
-                       return err;
-               }
-
                iowrite32(page, io + n * PAGE_SIZE/sizeof(*io));
                i915_vma_unpin_iomap(vma);