{
struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
assert_spin_locked(&ring->execlist_lock);
* will update tail past first request's workload */
cursor->elsp_submitted = req0->elsp_submitted;
list_del(&req0->execlist_link);
- queue_work(dev_priv->wq, &req0->work);
+ list_add_tail(&req0->execlist_link,
+ &ring->execlist_retired_req_list);
req0 = cursor;
} else {
req1 = cursor;
static bool execlists_check_remove_request(struct intel_engine_cs *ring,
u32 request_id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
struct intel_ctx_submit_request *head_req;
assert_spin_locked(&ring->execlist_lock);
if (--head_req->elsp_submitted <= 0) {
list_del(&head_req->execlist_link);
- queue_work(dev_priv->wq, &head_req->work);
+ list_add_tail(&head_req->execlist_link,
+ &ring->execlist_retired_req_list);
return true;
}
}
((u32)ring->next_context_status_buffer & 0x07) << 8);
}
-static void execlists_free_request_task(struct work_struct *work)
-{
- struct intel_ctx_submit_request *req =
- container_of(work, struct intel_ctx_submit_request, work);
- struct drm_device *dev = req->ring->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- intel_runtime_pm_put(dev_priv);
-
- mutex_lock(&dev->struct_mutex);
- i915_gem_context_unreference(req->ctx);
- mutex_unlock(&dev->struct_mutex);
-
- kfree(req);
-}
-
static int execlists_context_queue(struct intel_engine_cs *ring,
struct intel_context *to,
u32 tail)
i915_gem_context_reference(req->ctx);
req->ring = ring;
req->tail = tail;
- INIT_WORK(&req->work, execlists_free_request_task);
intel_runtime_pm_get(dev_priv);
WARN(tail_req->elsp_submitted != 0,
"More than 2 already-submitted reqs queued\n");
list_del(&tail_req->execlist_link);
- queue_work(dev_priv->wq, &tail_req->work);
+ list_add_tail(&tail_req->execlist_link,
+ &ring->execlist_retired_req_list);
}
}
return 0;
}
+void intel_execlists_retire_requests(struct intel_engine_cs *ring)
+{
+ struct intel_ctx_submit_request *req, *tmp;
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ unsigned long flags;
+ struct list_head retired_list;
+
+ WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+ if (list_empty(&ring->execlist_retired_req_list))
+ return;
+
+ INIT_LIST_HEAD(&retired_list);
+ spin_lock_irqsave(&ring->execlist_lock, flags);
+ list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+ spin_unlock_irqrestore(&ring->execlist_lock, flags);
+
+ list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+ intel_runtime_pm_put(dev_priv);
+ i915_gem_context_unreference(req->ctx);
+ list_del(&req->execlist_link);
+ kfree(req);
+ }
+}
+
void intel_logical_ring_stop(struct intel_engine_cs *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
init_waitqueue_head(&ring->irq_queue);
INIT_LIST_HEAD(&ring->execlist_queue);
+ INIT_LIST_HEAD(&ring->execlist_retired_req_list);
spin_lock_init(&ring->execlist_lock);
ring->next_context_status_buffer = 0;