/* Handle tx done - called in softirq context. The <cause_tx_done> argument
* must be a valid cause according to MVNETA_TXQ_INTR_MASK_ALL.
*/
-static u32 mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done,
- int *tx_todo)
+static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done)
{
struct mvneta_tx_queue *txq;
- u32 tx_done = 0;
struct netdev_queue *nq;
- *tx_todo = 0;
while (cause_tx_done) {
txq = mvneta_tx_done_policy(pp, cause_tx_done);
nq = netdev_get_tx_queue(pp->dev, txq->id);
__netif_tx_lock(nq, smp_processor_id());
- if (txq->count) {
- tx_done += mvneta_txq_done(pp, txq);
- *tx_todo += txq->count;
- }
+ if (txq->count)
+ mvneta_txq_done(pp, txq);
__netif_tx_unlock(nq);
cause_tx_done &= ~((1 << txq->id));
}
-
- return tx_done;
}
/* Compute crc8 of the specified address, using a unique algorithm ,
/* Release Tx descriptors */
if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) {
- int tx_todo = 0;
-
- mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL), &tx_todo);
+ mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL));
cause_rx_tx &= ~MVNETA_TX_INTR_MASK_ALL;
}