void freeze_workqueues_begin(void)
{
struct worker_pool *pool;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
int id;
spin_lock_irq(&workqueue_lock);
WARN_ON_ONCE(workqueue_freezing);
workqueue_freezing = true;
+ /* set FREEZING */
for_each_pool(pool, id) {
- struct workqueue_struct *wq;
-
spin_lock(&pool->lock);
-
WARN_ON_ONCE(pool->flags & POOL_FREEZING);
pool->flags |= POOL_FREEZING;
+ spin_unlock(&pool->lock);
+ }
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(pool->cpu, wq);
+ /* suppress further executions by setting max_active to zero */
+ list_for_each_entry(wq, &workqueues, list) {
+ if (!(wq->flags & WQ_FREEZABLE))
+ continue;
- if (pwq && pwq->pool == pool &&
- (wq->flags & WQ_FREEZABLE))
- pwq->max_active = 0;
+ for_each_pwq(pwq, wq) {
+ spin_lock(&pwq->pool->lock);
+ pwq->max_active = 0;
+ spin_unlock(&pwq->pool->lock);
}
-
- spin_unlock(&pool->lock);
}
spin_unlock_irq(&workqueue_lock);
*/
bool freeze_workqueues_busy(void)
{
- unsigned int cpu;
bool busy = false;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
spin_lock_irq(&workqueue_lock);
WARN_ON_ONCE(!workqueue_freezing);
- for_each_wq_cpu(cpu) {
- struct workqueue_struct *wq;
+ list_for_each_entry(wq, &workqueues, list) {
+ if (!(wq->flags & WQ_FREEZABLE))
+ continue;
/*
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
- if (!pwq || !(wq->flags & WQ_FREEZABLE))
- continue;
-
+ for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
*/
void thaw_workqueues(void)
{
- unsigned int cpu;
+ struct workqueue_struct *wq;
+ struct pool_workqueue *pwq;
+ struct worker_pool *pool;
+ int id;
spin_lock_irq(&workqueue_lock);
if (!workqueue_freezing)
goto out_unlock;
- for_each_wq_cpu(cpu) {
- struct worker_pool *pool;
- struct workqueue_struct *wq;
-
- for_each_std_worker_pool(pool, cpu) {
- spin_lock(&pool->lock);
-
- WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
- pool->flags &= ~POOL_FREEZING;
-
- list_for_each_entry(wq, &workqueues, list) {
- struct pool_workqueue *pwq = get_pwq(cpu, wq);
-
- if (!pwq || pwq->pool != pool ||
- !(wq->flags & WQ_FREEZABLE))
- continue;
-
- /* restore max_active and repopulate worklist */
- pwq_set_max_active(pwq, wq->saved_max_active);
- }
+ /* clear FREEZING */
+ for_each_pool(pool, id) {
+ spin_lock(&pool->lock);
+ WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
+ pool->flags &= ~POOL_FREEZING;
+ spin_unlock(&pool->lock);
+ }
- wake_up_worker(pool);
+ /* restore max_active and repopulate worklist */
+ list_for_each_entry(wq, &workqueues, list) {
+ if (!(wq->flags & WQ_FREEZABLE))
+ continue;
- spin_unlock(&pool->lock);
+ for_each_pwq(pwq, wq) {
+ spin_lock(&pwq->pool->lock);
+ pwq_set_max_active(pwq, wq->saved_max_active);
+ spin_unlock(&pwq->pool->lock);
}
}
+ /* kick workers */
+ for_each_pool(pool, id) {
+ spin_lock(&pool->lock);
+ wake_up_worker(pool);
+ spin_unlock(&pool->lock);
+ }
+
workqueue_freezing = false;
out_unlock:
spin_unlock_irq(&workqueue_lock);