struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-int __must_check i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains);
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
* is that the flush _must_ happen before the next request, no matter
* what.
*/
- if (ring->gpu_caches_dirty) {
- ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
- if (ret)
- return ret;
-
- ring->gpu_caches_dirty = false;
- }
+ ret = intel_ring_flush_all_caches(ring);
+ if (ret)
+ return ret;
if (request == NULL) {
request = kmalloc(sizeof(*request), GFP_KERNEL);
return ret;
}
-int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- int ret;
-
- if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
-
- trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
-
- ret = ring->flush(ring, invalidate_domains, flush_domains);
- if (ret)
- return ret;
-
- return 0;
-}
-
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
if (list_empty(&ring->active_list))
/* Unconditionally invalidate gpu caches and ensure that we do flush
* any residual writes from the previous batch.
*/
- ret = i915_gem_flush_ring(ring,
- I915_GEM_GPU_DOMAINS,
- ring->gpu_caches_dirty ? I915_GEM_GPU_DOMAINS : 0);
- if (ret)
- return ret;
-
- ring->gpu_caches_dirty = false;
- return 0;
+ return intel_ring_invalidate_all_caches(ring);
}
static bool
return intel_init_ring_buffer(dev, ring);
}
+
+int
+intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
+{
+ int ret;
+
+ if (!ring->gpu_caches_dirty)
+ return 0;
+
+ ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
+
+int
+intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
+{
+ uint32_t flush_domains;
+ int ret;
+
+ flush_domains = 0;
+ if (ring->gpu_caches_dirty)
+ flush_domains = I915_GEM_GPU_DOMAINS;
+
+ ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+ if (ret)
+ return ret;
+
+ trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
+
+ ring->gpu_caches_dirty = false;
+ return 0;
+}
void intel_ring_advance(struct intel_ring_buffer *ring);
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
+int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);