#define HAVE_NETIF_QUEUE
-extern void __netif_schedule(struct net_device *dev);
+extern void __netif_schedule(struct netdev_queue *txq);
-static inline void netif_schedule(struct net_device *dev)
+static inline void netif_schedule_queue(struct netdev_queue *txq)
{
+ struct net_device *dev = txq->dev;
+
if (!test_bit(__LINK_STATE_XOFF, &dev->state))
- __netif_schedule(dev);
+ __netif_schedule(txq);
+}
+
+static inline void netif_schedule(struct net_device *dev)
+{
+ netif_schedule_queue(&dev->tx_queue);
}
/**
}
#endif
if (test_and_clear_bit(__LINK_STATE_XOFF, &dev->state))
- __netif_schedule(dev);
+ __netif_schedule(&dev->tx_queue);
}
/**
#endif
if (test_and_clear_bit(__LINK_STATE_XOFF,
&dev->egress_subqueue[queue_index].state))
- __netif_schedule(dev);
+ __netif_schedule(&dev->tx_queue);
#endif
}
}
-void __netif_schedule(struct net_device *dev)
+void __netif_schedule(struct netdev_queue *txq)
{
+ struct net_device *dev = txq->dev;
+
if (!test_and_set_bit(__LINK_STATE_SCHED, &dev->state)) {
- struct netdev_queue *txq = &dev->tx_queue;
- unsigned long flags;
struct softnet_data *sd;
+ unsigned long flags;
local_irq_save(flags);
sd = &__get_cpu_var(softnet_data);
qdisc_run(dev);
spin_unlock(&txq->lock);
} else {
- netif_schedule(dev);
+ netif_schedule_queue(txq);
}
}
}
spin_unlock_bh(&txq->lock);
/* we just requeued the all the frames that were in the removed
- * queue, and since we might miss a softirq we do netif_schedule.
+ * queue, and since we might miss a softirq we do netif_schedule_queue.
* ieee80211_wake_queue is not used here as this queue is not
* necessarily stopped */
- netif_schedule(local->mdev);
+ netif_schedule_queue(txq);
spin_lock_bh(&sta->lock);
*state = HT_AGG_STATE_IDLE;
sta->ampdu_mlme.addba_req_num[tid] = 0;
{
struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog,
timer);
- struct net_device *dev = qdisc_dev(wd->qdisc);
+ struct netdev_queue *txq = wd->qdisc->dev_queue;
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
- netif_schedule(dev);
+ netif_schedule_queue(txq);
return HRTIMER_NORESTART;
}
}
sch->flags &= ~TCQ_F_THROTTLED;
- netif_schedule(qdisc_dev(sch));
+ netif_schedule_queue(sch->dev_queue);
return HRTIMER_NORESTART;
}
return q->q.qlen;
}
-static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
+static inline int dev_requeue_skb(struct sk_buff *skb,
struct netdev_queue *dev_queue,
struct Qdisc *q)
{
else
q->ops->requeue(skb, q);
- netif_schedule(dev);
+ netif_schedule_queue(dev_queue);
return 0;
}
* some time.
*/
__get_cpu_var(netdev_rx_stat).cpu_collision++;
- ret = dev_requeue_skb(skb, dev, dev_queue, q);
+ ret = dev_requeue_skb(skb, dev_queue, q);
}
return ret;
printk(KERN_WARNING "BUG %s code %d qlen %d\n",
dev->name, ret, q->q.qlen);
- ret = dev_requeue_skb(skb, dev, txq, q);
+ ret = dev_requeue_skb(skb, txq, q);
break;
}
* 2. we've been doing it for too long.
*/
if (need_resched() || jiffies != start_time) {
- netif_schedule(dev);
+ netif_schedule_queue(&dev->tx_queue);
break;
}
}