if (netif_msg_tx_err(ag))
pr_info("%s: tx timeout\n", ag->dev->name);
- schedule_delayed_work(&ag->restart_work, 1);
+ schedule_work(&ag->restart_work);
}
static void ag71xx_restart_work_func(struct work_struct *work)
{
- struct ag71xx *ag = container_of(work, struct ag71xx, restart_work.work);
+ struct ag71xx *ag = container_of(work, struct ag71xx, restart_work);
rtnl_lock();
ag71xx_hw_disable(ag);
if (!flush && !ag71xx_desc_empty(desc)) {
if (pdata->is_ar724x &&
ag71xx_check_dma_stuck(ag, ring->buf[i].timestamp))
- schedule_delayed_work(&ag->restart_work, HZ / 2);
+ schedule_work(&ag->restart_work);
break;
}
netdev_completed_queue(ag->dev, sent, bytes_compl);
if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
netif_wake_queue(ag->dev);
- cancel_delayed_work(&ag->restart_work);
return sent;
}
dev->netdev_ops = &ag71xx_netdev_ops;
dev->ethtool_ops = &ag71xx_ethtool_ops;
- INIT_DELAYED_WORK(&ag->restart_work, ag71xx_restart_work_func);
+ INIT_WORK(&ag->restart_work, ag71xx_restart_work_func);
init_timer(&ag->oom_timer);
ag->oom_timer.data = (unsigned long) dev;