drm/i915/selftests: Reorder request allocation vs vma pinning
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 4 Dec 2018 14:15:18 +0000 (14:15 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 4 Dec 2018 17:53:19 +0000 (17:53 +0000)
Impose a restraint that we have all vma pinned for a request prior to
its allocation. This is to simplify request construction, and should
facilitate unravelling the lock interdependencies later.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20181204141522.13640-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/igt_spinner.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c

index 26c065c8d2c0a7e3b550dc96fa94ee611daae296..a0c7cbc212bad4db3d1c050e6b85f9ed6a95ceef 100644 (file)
@@ -972,7 +972,6 @@ static int gpu_write(struct i915_vma *vma,
 {
        struct i915_request *rq;
        struct i915_vma *batch;
-       int flags = 0;
        int err;
 
        GEM_BUG_ON(!intel_engine_can_store_dword(engine));
@@ -981,14 +980,14 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                return err;
 
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return PTR_ERR(rq);
-
        batch = gpu_write_dw(vma, dword * sizeof(u32), value);
-       if (IS_ERR(batch)) {
-               err = PTR_ERR(batch);
-               goto err_request;
+       if (IS_ERR(batch))
+               return PTR_ERR(batch);
+
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
+               goto err_batch;
        }
 
        err = i915_vma_move_to_active(batch, rq, 0);
@@ -996,21 +995,21 @@ static int gpu_write(struct i915_vma *vma,
                goto err_request;
 
        i915_gem_object_set_active_reference(batch->obj);
-       i915_vma_unpin(batch);
-       i915_vma_close(batch);
 
-       err = engine->emit_bb_start(rq,
-                                   batch->node.start, batch->node.size,
-                                   flags);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        if (err)
                goto err_request;
 
-       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = engine->emit_bb_start(rq,
+                                   batch->node.start, batch->node.size,
+                                   0);
+err_request:
        if (err)
                i915_request_skip(rq, err);
-
-err_request:
        i915_request_add(rq);
+err_batch:
+       i915_vma_unpin(batch);
+       i915_vma_close(batch);
 
        return err;
 }
index 8cd34f6e6859d6882b0c2ac11b78d8566a8996c8..0e70df0230b83c1c9a976b8ae1937c3aef344868 100644 (file)
@@ -68,48 +68,65 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + seqno_offset(rq->fence.context);
 }
 
-static int emit_recurse_batch(struct igt_spinner *spin,
-                             struct i915_request *rq,
-                             u32 arbitration_command)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
 {
-       struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, flags);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+struct i915_request *
+igt_spinner_create_request(struct igt_spinner *spin,
+                          struct i915_gem_context *ctx,
+                          struct intel_engine_cs *engine,
+                          u32 arbitration_command)
+{
+       struct i915_address_space *vm = &ctx->ppgtt->vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        u32 *batch;
        int err;
 
        vma = i915_vma_instance(spin->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(spin->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = spin->batch;
 
@@ -127,35 +144,18 @@ static int emit_recurse_batch(struct igt_spinner *spin,
 
        i915_gem_chipset_flush(spin->i915);
 
-       err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
+       err = engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-struct i915_request *
-igt_spinner_create_request(struct igt_spinner *spin,
-                          struct i915_gem_context *ctx,
-                          struct intel_engine_cs *engine,
-                          u32 arbitration_command)
-{
-       struct i915_request *rq;
-       int err;
-
-       rq = i915_request_alloc(engine, ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(spin, rq, arbitration_command);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32
index 40efbed611de8d80e8734e223d91fd5604e1d4f3..60a4bd9405be2e26f9dad7b8ad621b9fae9be101 100644 (file)
@@ -103,52 +103,87 @@ static u64 hws_address(const struct i915_vma *hws,
        return hws->node.start + offset_in_page(sizeof(u32)*rq->fence.context);
 }
 
-static int emit_recurse_batch(struct hang *h,
-                             struct i915_request *rq)
+static int move_to_active(struct i915_vma *vma,
+                         struct i915_request *rq,
+                         unsigned int flags)
+{
+       int err;
+
+       err = i915_vma_move_to_active(vma, rq, flags);
+       if (err)
+               return err;
+
+       if (!i915_gem_object_has_active_reference(vma->obj)) {
+               i915_gem_object_get(vma->obj);
+               i915_gem_object_set_active_reference(vma->obj);
+       }
+
+       return 0;
+}
+
+static struct i915_request *
+hang_create_request(struct hang *h, struct intel_engine_cs *engine)
 {
        struct drm_i915_private *i915 = h->i915;
        struct i915_address_space *vm =
-               rq->gem_context->ppgtt ?
-               &rq->gem_context->ppgtt->vm :
-               &i915->ggtt.vm;
+               h->ctx->ppgtt ? &h->ctx->ppgtt->vm : &i915->ggtt.vm;
+       struct i915_request *rq = NULL;
        struct i915_vma *hws, *vma;
        unsigned int flags;
        u32 *batch;
        int err;
 
+       if (i915_gem_object_is_active(h->obj)) {
+               struct drm_i915_gem_object *obj;
+               void *vaddr;
+
+               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+               if (IS_ERR(obj))
+                       return ERR_CAST(obj);
+
+               vaddr = i915_gem_object_pin_map(obj,
+                                               i915_coherent_map_type(h->i915));
+               if (IS_ERR(vaddr)) {
+                       i915_gem_object_put(obj);
+                       return ERR_CAST(vaddr);
+               }
+
+               i915_gem_object_unpin_map(h->obj);
+               i915_gem_object_put(h->obj);
+
+               h->obj = obj;
+               h->batch = vaddr;
+       }
+
        vma = i915_vma_instance(h->obj, vm, NULL);
        if (IS_ERR(vma))
-               return PTR_ERR(vma);
+               return ERR_CAST(vma);
 
        hws = i915_vma_instance(h->hws, vm, NULL);
        if (IS_ERR(hws))
-               return PTR_ERR(hws);
+               return ERR_CAST(hws);
 
        err = i915_vma_pin(vma, 0, 0, PIN_USER);
        if (err)
-               return err;
+               return ERR_PTR(err);
 
        err = i915_vma_pin(hws, 0, 0, PIN_USER);
        if (err)
                goto unpin_vma;
 
-       err = i915_vma_move_to_active(vma, rq, 0);
-       if (err)
+       rq = i915_request_alloc(engine, h->ctx);
+       if (IS_ERR(rq)) {
+               err = PTR_ERR(rq);
                goto unpin_hws;
-
-       if (!i915_gem_object_has_active_reference(vma->obj)) {
-               i915_gem_object_get(vma->obj);
-               i915_gem_object_set_active_reference(vma->obj);
        }
 
-       err = i915_vma_move_to_active(hws, rq, 0);
+       err = move_to_active(vma, rq, 0);
        if (err)
-               goto unpin_hws;
+               goto cancel_rq;
 
-       if (!i915_gem_object_has_active_reference(hws->obj)) {
-               i915_gem_object_get(hws->obj);
-               i915_gem_object_set_active_reference(hws->obj);
-       }
+       err = move_to_active(hws, rq, 0);
+       if (err)
+               goto cancel_rq;
 
        batch = h->batch;
        if (INTEL_GEN(i915) >= 8) {
@@ -213,52 +248,16 @@ static int emit_recurse_batch(struct hang *h,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
+cancel_rq:
+       if (err) {
+               i915_request_skip(rq, err);
+               i915_request_add(rq);
+       }
 unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
-       return err;
-}
-
-static struct i915_request *
-hang_create_request(struct hang *h, struct intel_engine_cs *engine)
-{
-       struct i915_request *rq;
-       int err;
-
-       if (i915_gem_object_is_active(h->obj)) {
-               struct drm_i915_gem_object *obj;
-               void *vaddr;
-
-               obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
-               if (IS_ERR(obj))
-                       return ERR_CAST(obj);
-
-               vaddr = i915_gem_object_pin_map(obj,
-                                               i915_coherent_map_type(h->i915));
-               if (IS_ERR(vaddr)) {
-                       i915_gem_object_put(obj);
-                       return ERR_CAST(vaddr);
-               }
-
-               i915_gem_object_unpin_map(h->obj);
-               i915_gem_object_put(h->obj);
-
-               h->obj = obj;
-               h->batch = vaddr;
-       }
-
-       rq = i915_request_alloc(engine, h->ctx);
-       if (IS_ERR(rq))
-               return rq;
-
-       err = emit_recurse_batch(h, rq);
-       if (err) {
-               i915_request_add(rq);
-               return ERR_PTR(err);
-       }
-
-       return rq;
+       return err ? ERR_PTR(err) : rq;
 }
 
 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)