cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
raw_spin_lock(&cfs_b->lock);
- list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+ /*
+ * Add to the _head_ of the list, so that an already-started
+ * distribute_cfs_runtime will not see us
+ */
+ list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
if (!cfs_b->timer_active)
__start_cfs_bandwidth(cfs_b, false);
raw_spin_unlock(&cfs_b->lock);
u64 remaining, u64 expires)
{
struct cfs_rq *cfs_rq;
- u64 runtime = remaining;
+ u64 runtime;
+ u64 starting_runtime = remaining;
rcu_read_lock();
list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
}
rcu_read_unlock();
- return remaining;
+ return starting_runtime - remaining;
}
/*
/* account preceding periods in which throttling occurred */
cfs_b->nr_throttled += overrun;
- /*
- * There are throttled entities so we must first use the new bandwidth
- * to unthrottle them before making it generally available. This
- * ensures that all existing debts will be paid before a new cfs_rq is
- * allowed to run.
- */
- runtime = cfs_b->runtime;
runtime_expires = cfs_b->runtime_expires;
- cfs_b->runtime = 0;
/*
- * This check is repeated as we are holding onto the new bandwidth
- * while we unthrottle. This can potentially race with an unthrottled
- * group trying to acquire new bandwidth from the global pool.
+ * This check is repeated as we are holding onto the new bandwidth while
+ * we unthrottle. This can potentially race with an unthrottled group
+ * trying to acquire new bandwidth from the global pool. This can result
+ * in us over-using our runtime if it is all used during this loop, but
+ * only by limited amounts in that extreme case.
*/
- while (throttled && runtime > 0) {
+ while (throttled && cfs_b->runtime > 0) {
+ runtime = cfs_b->runtime;
raw_spin_unlock(&cfs_b->lock);
/* we can't nest cfs_b->lock while distributing bandwidth */
runtime = distribute_cfs_runtime(cfs_b, runtime,
raw_spin_lock(&cfs_b->lock);
throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+
+ cfs_b->runtime -= min(runtime, cfs_b->runtime);
}
- /* return (any) remaining runtime */
- cfs_b->runtime = runtime;
/*
* While we are ensured activity in the period following an
* unthrottle, this also covers the case in which the new bandwidth is
return;
}
- if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+ if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
runtime = cfs_b->runtime;
- cfs_b->runtime = 0;
- }
+
expires = cfs_b->runtime_expires;
raw_spin_unlock(&cfs_b->lock);
raw_spin_lock(&cfs_b->lock);
if (expires == cfs_b->runtime_expires)
- cfs_b->runtime = runtime;
+ cfs_b->runtime -= min(runtime, cfs_b->runtime);
raw_spin_unlock(&cfs_b->lock);
}