#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
* descriptor ring
*/
int count;
+ int pending;
int tx_stop_threshold;
int tx_wake_threshold;
/* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256
*/
- val = pend_desc;
+ val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
-
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;