drm/i915: Removed duplicate members from submit_request
authorNick Hoath <nicholas.hoath@intel.com>
Thu, 15 Jan 2015 13:10:37 +0000 (13:10 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 27 Jan 2015 08:50:52 +0000 (09:50 +0100)
Where there were duplicate variables for the tail, context and ring (engine)
in the gem request and the execlist queue item, use the one from the request
and remove the duplicate from the execlist queue item.

Issue: VIZ-4274

v1: Rebase
v2: Fixed build issues. Keep separate postfix & tail pointers as these are
used in different ways. Reinserted missing full tail pointer update.

Signed-off-by: Nick Hoath <nicholas.hoath@intel.com>
Reviewed-by: Thomas Daniel <thomas.daniel@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index f56cf627f8b6d164efad6040db93f4f85e5a6768..064efec2c7cb4d5d3b391d9c4d3d37af4bd39160 100644 (file)
@@ -1968,11 +1968,11 @@ static int i915_execlists(struct seq_file *m, void *data)
                if (head_req) {
                        struct drm_i915_gem_object *ctx_obj;
 
-                       ctx_obj = head_req->ctx->engine[ring_id].state;
+                       ctx_obj = head_req->request->ctx->engine[ring_id].state;
                        seq_printf(m, "\tHead request id: %u\n",
                                   intel_execlists_ctx_id(ctx_obj));
                        seq_printf(m, "\tHead request tail: %u\n",
-                                  head_req->tail);
+                                  head_req->request->tail);
                }
 
                seq_putc(m, '\n');
index 600a2250fe03bd77266e88724a1f49885b69e576..81d102abb9b59b5989ec941828db3cbf78fd8ec0 100644 (file)
@@ -2089,7 +2089,14 @@ struct drm_i915_gem_request {
        /** Position in the ringbuffer of the start of the request */
        u32 head;
 
-       /** Position in the ringbuffer of the end of the request */
+       /**
+        * Position in the ringbuffer of the start of the postfix.
+        * This is required to calculate the maximum available ringbuffer
+        * space without overwriting the postfix.
+        */
+        u32 postfix;
+
+       /** Position in the ringbuffer of the end of the whole request */
        u32 tail;
 
        /** Context related to this request */
index 6c403654e33a121c0d6c3028e1e3e467fef84b04..e6768d3392434ecdb98d25b3e0ed7dc511d933e3 100644 (file)
@@ -2453,7 +2453,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        request_ring_position = intel_ring_get_tail(ringbuf);
 
        if (i915.enable_execlists) {
-               ret = ring->emit_request(ringbuf);
+               ret = ring->emit_request(ringbuf, request);
                if (ret)
                        return ret;
        } else {
@@ -2463,7 +2463,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
        }
 
        request->head = request_start;
-       request->tail = request_ring_position;
+       request->postfix = request_ring_position;
 
        /* Whilst this request exists, batch_obj will be on the
         * active_list, and so will hold the active reference. Only when this
@@ -2657,7 +2657,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                execlist_link);
                list_del(&submit_req->execlist_link);
                intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(submit_req->ctx);
+               i915_gem_context_unreference(submit_req->request->ctx);
                kfree(submit_req);
        }
 
@@ -2783,7 +2783,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                 * of tail of the request to update the last known position
                 * of the GPU head.
                 */
-               ringbuf->last_retired_head = request->tail;
+               ringbuf->last_retired_head = request->postfix;
 
                i915_gem_free_request(request);
        }
index be5c9908659b20ce0f6a5eacc18bfadb545d8052..48ddbf44c8629f34eb570a94e3e9d8572ae1511f 100644 (file)
@@ -1052,7 +1052,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
                        erq = &error->ring[i].requests[count++];
                        erq->seqno = request->seqno;
                        erq->jiffies = request->emitted_jiffies;
-                       erq->tail = request->tail;
+                       erq->tail = request->postfix;
                }
        }
 }
index 7992af808404ff1808c1addb1a7f21292aa0377e..5b6e3698ead3862b5c56b892d53300042c6ffb12 100644 (file)
@@ -417,7 +417,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
                                 execlist_link) {
                if (!req0) {
                        req0 = cursor;
-               } else if (req0->ctx == cursor->ctx) {
+               } else if (req0->request->ctx == cursor->request->ctx) {
                        /* Same ctx: ignore first request, as second request
                         * will update tail past first request's workload */
                        cursor->elsp_submitted = req0->elsp_submitted;
@@ -433,9 +433,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
 
        WARN_ON(req1 && req1->elsp_submitted);
 
-       execlists_submit_contexts(ring, req0->ctx, req0->tail,
-                                 req1 ? req1->ctx : NULL,
-                                 req1 ? req1->tail : 0);
+       execlists_submit_contexts(ring, req0->request->ctx, req0->request->tail,
+                                 req1 ? req1->request->ctx : NULL,
+                                 req1 ? req1->request->tail : 0);
 
        req0->elsp_submitted++;
        if (req1)
@@ -455,7 +455,7 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
 
        if (head_req != NULL) {
                struct drm_i915_gem_object *ctx_obj =
-                               head_req->ctx->engine[ring->id].state;
+                               head_req->request->ctx->engine[ring->id].state;
                if (intel_execlists_ctx_id(ctx_obj) == request_id) {
                        WARN(head_req->elsp_submitted == 0,
                             "Never submitted head request\n");
@@ -545,15 +545,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (req == NULL)
                return -ENOMEM;
-       req->ctx = to;
-       i915_gem_context_reference(req->ctx);
 
        if (to != ring->default_context)
                intel_lr_context_pin(ring, to);
 
-       req->ring = ring;
-       req->tail = tail;
-
        if (!request) {
                /*
                 * If there isn't a request associated with this submission,
@@ -563,11 +558,13 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                request = kzalloc(sizeof(*request), GFP_KERNEL);
                if (request == NULL)
                        return -ENOMEM;
-               request->ctx = to;
                request->ring = ring;
        }
+       request->ctx = to;
+       request->tail = tail;
        req->request = request;
        i915_gem_request_reference(request);
+       i915_gem_context_reference(req->request->ctx);
 
        intel_runtime_pm_get(dev_priv);
 
@@ -584,7 +581,7 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                                           struct intel_ctx_submit_request,
                                           execlist_link);
 
-               if (to == tail_req->ctx) {
+               if (to == tail_req->request->ctx) {
                        WARN(tail_req->elsp_submitted != 0,
                                "More than 2 already-submitted reqs queued\n");
                        list_del(&tail_req->execlist_link);
@@ -774,14 +771,14 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
        spin_unlock_irqrestore(&ring->execlist_lock, flags);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
-               struct intel_context *ctx = req->ctx;
+               struct intel_context *ctx = req->request->ctx;
                struct drm_i915_gem_object *ctx_obj =
                                ctx->engine[ring->id].state;
 
                if (ctx_obj && (ctx != ring->default_context))
                        intel_lr_context_unpin(ring, ctx);
                intel_runtime_pm_put(dev_priv);
-               i915_gem_context_unreference(req->ctx);
+               i915_gem_context_unreference(ctx);
                i915_gem_request_unreference(req->request);
                list_del(&req->execlist_link);
                kfree(req);
index 7a82bc9b03a79ca0d7bf00f9a3bdfffb1eb84d6e..376c307f68358b67609b66b80c9ff54e57b717d0 100644 (file)
@@ -105,10 +105,6 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
  * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
  */
 struct intel_ctx_submit_request {
-       struct intel_context *ctx;
-       struct intel_engine_cs *ring;
-       u32 tail;
-
        struct list_head execlist_link;
 
        int elsp_submitted;
index 23020d67329b042a3cac7a30efd4092087a2e06a..b117717639fec8ef47bf1af54d618d9a3215ee22 100644 (file)
@@ -1949,7 +1949,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
                return 0;
 
        list_for_each_entry(request, &ring->request_list, list) {
-               if (__intel_ring_space(request->tail, ringbuf->tail,
+               if (__intel_ring_space(request->postfix, ringbuf->tail,
                                       ringbuf->size) >= n) {
                        break;
                }
index 6dbb6f4620074dd5b909a620c243690600c3ce04..32aa3f36a7963a07153bbdfeb043daaba7b85a4e 100644 (file)
@@ -239,7 +239,8 @@ struct  intel_engine_cs {
        struct list_head execlist_retired_req_list;
        u8 next_context_status_buffer;
        u32             irq_keep_mask; /* bitmask for interrupts that should not be masked */
-       int             (*emit_request)(struct intel_ringbuffer *ringbuf);
+       int             (*emit_request)(struct intel_ringbuffer *ringbuf,
+                                       struct drm_i915_gem_request *request);
        int             (*emit_flush)(struct intel_ringbuffer *ringbuf,
                                      u32 invalidate_domains,
                                      u32 flush_domains);