{
struct intel_gt_timelines *timelines = >->timelines;
struct intel_timeline *tl, *tn;
+ unsigned long active_count = 0;
bool interruptible;
LIST_HEAD(free);
spin_lock(&timelines->lock);
list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
- if (!mutex_trylock(&tl->mutex))
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++; /* report busy to caller, try again? */
continue;
+ }
intel_timeline_get(tl);
GEM_BUG_ON(!atomic_read(&tl->active_count));
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
+ else
+ active_count += !!rcu_access_pointer(tl->last_request.fence);
mutex_unlock(&tl->mutex);
list_for_each_entry_safe(tl, tn, &free, link)
__intel_timeline_free(&tl->kref);
- return list_empty(&timelines->active_list) ? 0 : timeout;
+ return active_count ? timeout : 0;
}
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout)