struct device *d = tp_to_dev(tp);
dma_addr_t mapping;
u32 opts[2], len;
+ bool stop_queue;
int frags;
if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) {
txd->opts2 = cpu_to_le32(opts[1]);
- netdev_sent_queue(dev, skb->len);
-
skb_tx_timestamp(skb);
/* Force memory writes to complete before releasing descriptor */
tp->cur_tx += frags + 1;
- RTL_W8(tp, TxPoll, NPQ);
+ stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS);
+ if (unlikely(stop_queue))
+ netif_stop_queue(dev);
- mmiowb();
+ if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) {
+ RTL_W8(tp, TxPoll, NPQ);
+ mmiowb();
+ }
- if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) {
- /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
- * not miss a ring update when it notices a stopped queue.
- */
- smp_wmb();
- netif_stop_queue(dev);
+ if (unlikely(stop_queue)) {
/* Sync with rtl_tx:
* - publish queue status and cur_tx ring index (write barrier)
* - refresh dirty_tx ring index (read barrier).