#define __I915_BO_ACTIVE(bo) \
((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
+ /**
+ * Have we taken a reference for the object for incomplete GPU
+ * activity?
+ */
+#define I915_BO_ACTIVE_REF (I915_BO_ACTIVE_SHIFT + I915_NUM_ENGINES)
+
/**
* This is set if the object has been written to since last bound
* to the GTT
return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
}
+static inline bool
+i915_gem_object_has_active_reference(const struct drm_i915_gem_object *obj)
+{
+ return test_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_set_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __set_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+static inline void
+i915_gem_object_clear_active_reference(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+ __clear_bit(I915_BO_ACTIVE_REF, &obj->flags);
+}
+
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj);
+
static inline unsigned int
i915_gem_object_get_tiling(struct drm_i915_gem_object *obj)
{
list_move_tail(&obj->global_list,
&request->i915->mm.bound_list);
- i915_gem_object_put(obj);
+ if (i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_clear_active_reference(obj);
+ i915_gem_object_put(obj);
+ }
}
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
if (vma->vm->file == fpriv)
i915_vma_close(vma);
+
+ if (i915_gem_object_is_active(obj) &&
+ !i915_gem_object_has_active_reference(obj)) {
+ i915_gem_object_set_active_reference(obj);
+ i915_gem_object_get(obj);
+ }
mutex_unlock(&obj->base.dev->struct_mutex);
}
intel_runtime_pm_put(dev_priv);
}
+void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
+{
+ lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+ GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
+ if (i915_gem_object_is_active(obj))
+ i915_gem_object_set_active_reference(obj);
+ else
+ i915_gem_object_put(obj);
+}
+
int i915_gem_suspend(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
list_for_each_entry_safe(obj, next,
&pool->cache_list[n],
batch_pool_link)
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
INIT_LIST_HEAD(&pool->cache_list[n]);
}
if (ce->ring)
intel_ring_free(ce->ring);
- i915_vma_put(ce->state);
+ __i915_gem_object_release_unless_active(ce->state->obj);
}
put_pid(ctx->pid);
* add the active reference first and queue for it to be dropped
* *last*.
*/
- if (!i915_gem_object_is_active(obj))
- i915_gem_object_get(obj);
i915_gem_object_set_active(obj, idx);
i915_gem_active_set(&obj->last_read[idx], req);
void i915_vma_unpin_and_release(struct i915_vma **p_vma)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(p_vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ __i915_gem_object_release_unless_active(obj);
}
i915_vma_move_to_active(so.vma, req, 0);
err_unpin:
i915_vma_unpin(so.vma);
+ i915_vma_close(so.vma);
err_obj:
- i915_gem_object_put(obj);
+ __i915_gem_object_release_unless_active(obj);
return ret;
}
static void cleanup_status_page(struct intel_engine_cs *engine)
{
struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
vma = fetch_and_zero(&engine->status_page.vma);
if (!vma)
return;
+ obj = vma->obj;
+
i915_vma_unpin(vma);
- i915_gem_object_unpin_map(vma->obj);
- i915_vma_put(vma);
+ i915_vma_close(vma);
+
+ i915_gem_object_unpin_map(obj);
+ __i915_gem_object_release_unless_active(obj);
}
static int init_status_page(struct intel_engine_cs *engine)
void
intel_ring_free(struct intel_ring *ring)
{
- i915_vma_put(ring->vma);
+ struct drm_i915_gem_object *obj = ring->vma->obj;
+
+ i915_vma_close(ring->vma);
+ __i915_gem_object_release_unless_active(obj);
+
kfree(ring);
}