* accurate mappable working set.
*/
unsigned int fault_mappable:1;
- unsigned int pin_display:1;
/*
* Is the object to be mapped as read-only to the GPU
unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+ unsigned int pin_display;
+
struct sg_table *pages;
int pages_pin_count;
struct get_page {
return ret;
}
-static bool is_pin_display(struct drm_i915_gem_object *obj)
-{
- struct i915_vma *vma;
-
- vma = i915_gem_obj_to_ggtt(obj);
- if (!vma)
- return false;
-
- /* There are 2 sources that pin objects:
- * 1. The display engine (scanouts, sprites, cursors);
- * 2. Reservations for execbuffer;
- *
- * We can ignore reservations as we hold the struct_mutex and
- * are only called outside of the reservation path.
- */
- return vma->pin_count;
-}
-
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
const struct i915_ggtt_view *view)
{
u32 old_read_domains, old_write_domain;
- bool was_pin_display;
int ret;
if (pipelined != i915_gem_request_get_ring(obj->last_read_req)) {
/* Mark the pin_display early so that we account for the
* display coherency whilst setting up the cache domains.
*/
- was_pin_display = obj->pin_display;
- obj->pin_display = true;
+ obj->pin_display++;
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
return 0;
err_unpin_display:
- WARN_ON(was_pin_display != is_pin_display(obj));
- obj->pin_display = was_pin_display;
+ obj->pin_display--;
return ret;
}
i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
+ if (WARN_ON(obj->pin_display == 0))
+ return;
+
i915_gem_object_ggtt_unpin_view(obj, view);
- obj->pin_display = is_pin_display(obj);
+ obj->pin_display--;
}
int