/*
* Worker for allocating per cpu stat for tgs. This is scheduled on the
- * system_nrt_wq once there are some groups on the alloc_list waiting for
+ * system_wq once there are some groups on the alloc_list waiting for
* allocation.
*/
static void tg_stats_alloc_fn(struct work_struct *work)
stats_cpu = alloc_percpu(struct tg_stats_cpu);
if (!stats_cpu) {
/* allocation failed, try again after some time */
- queue_delayed_work(system_nrt_wq, dwork,
- msecs_to_jiffies(10));
+ schedule_delayed_work(dwork, msecs_to_jiffies(10));
return;
}
}
*/
spin_lock_irqsave(&tg_stats_alloc_lock, flags);
list_add(&tg->stats_alloc_node, &tg_stats_alloc_list);
- queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0);
+ schedule_delayed_work(&tg_stats_alloc_work, 0);
spin_unlock_irqrestore(&tg_stats_alloc_lock, flags);
}
intv = disk_events_poll_jiffies(disk);
set_timer_slack(&ev->dwork.timer, intv / 4);
if (check_now)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
else if (intv)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
out_unlock:
spin_unlock_irqrestore(&ev->lock, flags);
}
spin_lock_irq(&ev->lock);
ev->clearing |= mask;
if (!ev->block)
- mod_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ mod_delayed_work(system_freezable_wq, &ev->dwork, 0);
spin_unlock_irq(&ev->lock);
}
/* uncondtionally schedule event check and wait for it to finish */
disk_block_events(disk);
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0);
+ queue_delayed_work(system_freezable_wq, &ev->dwork, 0);
flush_delayed_work(&ev->dwork);
__disk_unblock_events(disk, false);
intv = disk_events_poll_jiffies(disk);
if (!ev->block && intv)
- queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv);
+ queue_delayed_work(system_freezable_wq, &ev->dwork, intv);
spin_unlock_irq(&ev->lock);
}
if (repoll)
- queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
+ schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
void drm_kms_helper_poll_disable(struct drm_device *dev)
}
if (poll)
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
+ schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
/* kill timer and schedule immediate execution, this doesn't block */
cancel_delayed_work(&dev->mode_config.output_poll_work);
if (drm_kms_helper_poll)
- queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
+ schedule_delayed_work(&dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
/* schedule work only once, otherwise mark for reschedule */
static void wiiext_schedule(struct wiimote_ext *ext)
{
- queue_work(system_nrt_wq, &ext->worker);
+ schedule_work(&ext->worker);
}
/*
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
- msecs_to_jiffies(host->clkgate_delay));
+ schedule_delayed_work(&host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
+ schedule_delayed_work(&vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
}
/* Out of packets? */
/* Make sure we have some buffers: if oom use wq. */
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
virtnet_napi_enable(vi);
return 0;
{
struct virtnet_info *vi = vdev->priv;
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
}
static int init_vqs(struct virtnet_info *vi)
otherwise get link status from config. */
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
netif_carrier_off(dev);
- queue_work(system_nrt_wq, &vi->config_work);
+ schedule_work(&vi->config_work);
} else {
vi->status = VIRTIO_NET_S_LINK_UP;
netif_carrier_on(dev);
netif_device_attach(vi->dev);
if (!try_fill_recv(vi, GFP_KERNEL))
- queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+ schedule_delayed_work(&vi->refill, 0);
mutex_lock(&vi->config_lock);
vi->config_enable = true;
extern struct workqueue_struct *system_unbound_wq;
extern struct workqueue_struct *system_freezable_wq;
-static inline struct workqueue_struct *__system_nrt_wq(void)
+static inline struct workqueue_struct * __deprecated __system_nrt_wq(void)
{
return system_wq;
}
-static inline struct workqueue_struct *__system_nrt_freezable_wq(void)
+static inline struct workqueue_struct * __deprecated __system_nrt_freezable_wq(void)
{
return system_freezable_wq;
}
rcu_batch_queue(&sp->batch_queue, head);
if (!sp->running) {
sp->running = true;
- queue_delayed_work(system_nrt_wq, &sp->work, 0);
+ schedule_delayed_work(&sp->work, 0);
}
spin_unlock_irqrestore(&sp->queue_lock, flags);
}
}
if (pending)
- queue_delayed_work(system_nrt_wq, &sp->work, SRCU_INTERVAL);
+ schedule_delayed_work(&sp->work, SRCU_INTERVAL);
}
/*
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
kdebug("DEFERRED");
key_gc_next_run = gc_at;
void key_schedule_gc_links(void)
{
set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
}
/*
set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
kdebug("schedule");
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
kdebug("sleep");
wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
}
if (gc_state & KEY_GC_REAP_AGAIN)
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
kleave(" [end %x]", gc_state);
return;
key_check(key);
if (atomic_dec_and_test(&key->usage))
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
}
}
EXPORT_SYMBOL(key_put);