dev->stats.tx_packets++;
/* Free the sk buffer associated with this TxBD */
- dev_kfree_skb_irq(ugeth->
+ dev_kfree_skb(ugeth->
tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]);
ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
ugeth->skb_dirtytx[txQ] =
for (i = 0; i < ug_info->numQueuesRx; i++)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
+ /* Tx event processing */
+ spin_lock(&ugeth->lock);
+ for (i = 0; i < ug_info->numQueuesTx; i++)
+ ucc_geth_tx(ugeth->ndev, i);
+ spin_unlock(&ugeth->lock);
+
if (howmany < budget) {
napi_complete(napi);
- setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS);
+ setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
return howmany;
struct ucc_geth_info *ug_info;
register u32 ucce;
register u32 uccm;
- register u32 tx_mask;
- u8 i;
ugeth_vdbg("%s: IN", __func__);
out_be32(uccf->p_ucce, ucce);
/* check for receive events that require processing */
- if (ucce & UCCE_RX_EVENTS) {
+ if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) {
if (napi_schedule_prep(&ugeth->napi)) {
- uccm &= ~UCCE_RX_EVENTS;
+ uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS);
out_be32(uccf->p_uccm, uccm);
__napi_schedule(&ugeth->napi);
}
}
- /* Tx event processing */
- if (ucce & UCCE_TX_EVENTS) {
- spin_lock(&ugeth->lock);
- tx_mask = UCC_GETH_UCCE_TXB0;
- for (i = 0; i < ug_info->numQueuesTx; i++) {
- if (ucce & tx_mask)
- ucc_geth_tx(dev, i);
- ucce &= ~tx_mask;
- tx_mask <<= 1;
- }
- spin_unlock(&ugeth->lock);
- }
-
/* Errors and other events */
if (ucce & UCCE_OTHER) {
if (ucce & UCC_GETH_UCCE_BSY)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);