* Note this requires that we are always called in request
* completion order.
*/
+ list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
/* Walk through the active list, calling retire on each. This allows
request->previous_seqno = engine->last_submitted_seqno;
smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
list_add_tail(&request->link, &engine->request_list);
+ list_add_tail(&request->ring_link, &ring->request_list);
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
/** engine->request_list entry for this request */
struct list_head link;
+ /** ring->request_list entry for this request */
+ struct list_head ring_link;
+
struct drm_i915_file_private *file_priv;
/** file_priv list entry for this request */
struct list_head client_list;
ring->engine = engine;
list_add(&ring->link, &engine->buffers);
+ INIT_LIST_HEAD(&ring->request_list);
+
ring->size = size;
/* Workaround an erratum on the i830 which causes a hang if
* the TAIL pointer points to within the last 2 cachelines
static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
{
struct intel_ring *ring = req->ring;
- struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *target;
intel_ring_update_space(ring);
*/
GEM_BUG_ON(!req->reserved_space);
- list_for_each_entry(target, &engine->request_list, link) {
+ list_for_each_entry(target, &ring->request_list, ring_link) {
unsigned space;
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ring != ring)
- continue;
-
/* Would completion of this request free enough space? */
space = __intel_ring_space(target->postfix, ring->tail,
ring->size);
break;
}
- if (WARN_ON(&target->link == &engine->request_list))
+ if (WARN_ON(&target->ring_link == &ring->request_list))
return -ENOSPC;
return i915_wait_request(target);