void intel_context_unpin(struct intel_context *ce)
{
- if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+ if (!atomic_dec_and_test(&ce->pin_count))
return;
- /* We may be called from inside intel_context_pin() to evict another */
- intel_context_get(ce);
- mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
-
- if (likely(atomic_dec_and_test(&ce->pin_count))) {
- CE_TRACE(ce, "retire\n");
-
- ce->ops->unpin(ce);
-
- intel_context_active_release(ce);
- }
+ CE_TRACE(ce, "unpin\n");
+ ce->ops->unpin(ce);
- mutex_unlock(&ce->pin_mutex);
+ /*
+ * Once released, we may asynchronously drop the active reference.
+ * As that may be the only reference keeping the context alive,
+ * take an extra now so that it is not freed before we finish
+ * dereferencing it.
+ */
+ intel_context_get(ce);
+ intel_context_active_release(ce);
intel_context_put(ce);
}
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
+ struct llist_node *pos = NULL, *next;
struct intel_gt *gt = engine->gt;
- struct llist_node *pos, *next;
int err;
GEM_BUG_ON(i915_active_is_idle(ref));
- GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+
+ /* Wait until the previous preallocation is completed */
+ while (!llist_empty(&ref->preallocated_barriers))
+ cond_resched();
/*
* Preallocate a node for each physical engine supporting the target
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
+ next = barrier_to_ll(node);
+ next->next = pos;
+ if (!pos)
+ pos = next;
intel_engine_pm_get(engine);
}
+ GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
+ llist_add_batch(next, pos, &ref->preallocated_barriers);
+
return 0;
unwind:
- llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
+ while (pos) {
struct active_node *node = barrier_from_ll(pos);
+ pos = pos->next;
+
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));