drm_clflush_virt_range(vaddr, args->size);
i915_gem_chipset_flush(to_i915(obj->base.dev));
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
return 0;
}
user_data += page_length;
offset += page_length;
}
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
mutex_lock(&i915->drm.struct_mutex);
out_unpin:
offset = 0;
}
- intel_fb_obj_flush(obj, false, ORIGIN_CPU);
+ intel_fb_obj_flush(obj, ORIGIN_CPU);
i915_gem_obj_finish_shmem_access(obj);
return ret;
}
if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
- intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
+ intel_fb_obj_flush(obj, write_origin(obj, I915_GEM_DOMAIN_GTT));
obj->base.write_domain = 0;
}
/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
__i915_gem_object_flush_for_display(obj);
- intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
+ intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
struct drm_i915_gem_object *obj =
container_of(active, typeof(*obj), frontbuffer_write);
- intel_fb_obj_flush(obj, true, ORIGIN_CS);
+ intel_fb_obj_flush(obj, ORIGIN_CS);
}
void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- if (retire) {
+ if (origin == ORIGIN_CS) {
spin_lock(&dev_priv->fb_tracking.lock);
/* Filter out new bits since rendering started. */
frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin,
unsigned int frontbuffer_bits);
/**
* intel_fb_obj_flush - flush frontbuffer object
* @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
* @origin: which operation caused the flush
*
* This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
+ * completed and frontbuffer caching can be started again.
*/
static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
- bool retire,
enum fb_op_origin origin)
{
unsigned int frontbuffer_bits;
if (!frontbuffer_bits)
return;
- __intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
+ __intel_fb_obj_flush(obj, origin, frontbuffer_bits);
}
#endif /* __INTEL_FRONTBUFFER_H__ */