drm/i915: Rename ring->outstanding_lazy_request
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 4 Sep 2013 09:45:51 +0000 (10:45 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 5 Sep 2013 10:03:12 +0000 (12:03 +0200)
Prior to preallocating an request for lazy emission, rename the existing
field to make way (and differentiate the seqno from the request struct).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index fdeecae058e12a3ca3b87d2e26af08ad70087f6d..858e78886637f93e194d17b2e14fd48d3e3002ec 100644 (file)
@@ -964,7 +964,7 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
        BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 
        ret = 0;
-       if (seqno == ring->outstanding_lazy_request)
+       if (seqno == ring->outstanding_lazy_seqno)
                ret = i915_add_request(ring, NULL);
 
        return ret;
@@ -2094,7 +2094,7 @@ int __i915_add_request(struct intel_ring_buffer *ring,
        }
 
        trace_i915_gem_request_add(ring, request->seqno);
-       ring->outstanding_lazy_request = 0;
+       ring->outstanding_lazy_seqno = 0;
 
        if (!dev_priv->ums.mm_suspended) {
                i915_queue_hangcheck(ring->dev);
index 460ee1026fcad63249e83af59da0353b40b04460..a83ff1863a5ed4a08c546aff8d86bdc4887f6e3c 100644 (file)
@@ -593,7 +593,7 @@ update_mboxes(struct intel_ring_buffer *ring,
 #define MBOX_UPDATE_DWORDS 4
        intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
        intel_ring_emit(ring, mmio_offset);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_NOOP);
 }
 
@@ -629,7 +629,7 @@ gen6_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
@@ -723,7 +723,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 128; /* write to separate cachelines */
@@ -742,7 +742,7 @@ pc_render_add_request(struct intel_ring_buffer *ring)
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, 0);
        intel_ring_advance(ring);
 
@@ -963,7 +963,7 @@ i9xx_add_request(struct intel_ring_buffer *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring, ring->outstanding_lazy_request);
+       intel_ring_emit(ring, ring->outstanding_lazy_seqno);
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        intel_ring_advance(ring);
 
@@ -1475,7 +1475,7 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
        int ret;
 
        /* We need to add any requests required to flush the objects and ring */
-       if (ring->outstanding_lazy_request) {
+       if (ring->outstanding_lazy_seqno) {
                ret = i915_add_request(ring, NULL);
                if (ret)
                        return ret;
@@ -1495,10 +1495,10 @@ int intel_ring_idle(struct intel_ring_buffer *ring)
 static int
 intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
 {
-       if (ring->outstanding_lazy_request)
+       if (ring->outstanding_lazy_seqno)
                return 0;
 
-       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
+       return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
 }
 
 static int __intel_ring_begin(struct intel_ring_buffer *ring,
@@ -1545,7 +1545,7 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
-       BUG_ON(ring->outstanding_lazy_request);
+       BUG_ON(ring->outstanding_lazy_seqno);
 
        if (INTEL_INFO(ring->dev)->gen >= 6) {
                I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
index 68b1ca974d594dc483827fdc0dba724563a07f8d..c6aa2b3c8c268d061b467d6d12569c22d753424d 100644 (file)
@@ -140,7 +140,7 @@ struct  intel_ring_buffer {
        /**
         * Do we have some not yet emitted requests outstanding?
         */
-       u32 outstanding_lazy_request;
+       u32 outstanding_lazy_seqno;
        bool gpu_caches_dirty;
        bool fbc_dirty;
 
@@ -258,8 +258,8 @@ static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
 
 static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
 {
-       BUG_ON(ring->outstanding_lazy_request == 0);
-       return ring->outstanding_lazy_request;
+       BUG_ON(ring->outstanding_lazy_seqno == 0);
+       return ring->outstanding_lazy_seqno;
 }
 
 static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)