int len;
int i;
+ memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));
+
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
memset(re->tx, 0, len);
for (i = 0; i < NUM_TX_DESC; i++) {
+ struct raeth_tx_info *txi;
struct ramips_tx_dma *txd;
txd = &re->tx[i];
txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;
- if (re->tx_skb[i] != NULL) {
+ txi = &re->tx_info[i];
+ txi->tx_desc = txd;
+ if (txi->tx_skb != NULL) {
netdev_warn(re->netdev,
"dirty skb for TX desc %d\n", i);
- re->tx_skb[i] = NULL;
+ txi->tx_skb = NULL;
}
}
dma_unmap_single(&re->netdev->dev, re->rx_dma[i],
MAX_RX_LENGTH, DMA_FROM_DEVICE);
- for (i = 0; i < NUM_TX_DESC; i++)
- if (re->tx_skb[i]) {
- dev_kfree_skb_any(re->tx_skb[i]);
- re->tx_skb[i] = NULL;
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ struct raeth_tx_info *txi;
+
+ txi = &re->tx_info[i];
+ if (txi->tx_skb) {
+ dev_kfree_skb_any(txi->tx_skb);
+ txi->tx_skb = NULL;
}
+ }
}
#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT3883)
dma_free_coherent(&re->netdev->dev, len, re->tx,
re->tx_desc_dma);
}
+
+ kfree(re->tx_info);
}
static int
int err = -ENOMEM;
int i;
+ re->tx_info = kzalloc(NUM_TX_DESC * sizeof(struct raeth_tx_info),
+ GFP_ATOMIC);
+ if (!re->tx_info)
+ goto err_cleanup;
+
/* allocate tx ring */
len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
re->tx = dma_alloc_coherent(&re->netdev->dev, len,
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct raeth_priv *re = netdev_priv(dev);
+ struct raeth_tx_info *txi, *txi_next;
+ struct ramips_tx_dma *txd, *txd_next;
unsigned long tx;
unsigned int tx_next;
dma_addr_t mapped_addr;
tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
tx_next = (tx + 1) % NUM_TX_DESC;
- if ((re->tx_skb[tx]) || (re->tx_skb[tx_next]) ||
- !(re->tx[tx].txd2 & TX_DMA_DONE) ||
- !(re->tx[tx_next].txd2 & TX_DMA_DONE))
+ txi = &re->tx_info[tx];
+ txd = txi->tx_desc;
+ txi_next = &re->tx_info[tx_next];
+ txd_next = txi_next->tx_desc;
+
+ if ((txi->tx_skb) || (txi_next->tx_skb) ||
+ !(txd->txd2 & TX_DMA_DONE) ||
+ !(txd_next->txd2 & TX_DMA_DONE))
goto out;
- re->tx[tx].txd1 = (unsigned int) mapped_addr;
- re->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
- re->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
+ txi->tx_skb = skb;
+
+ txd->txd1 = (unsigned int) mapped_addr;
+ wmb();
+ txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- re->tx_skb[tx] = skb;
- wmb();
ramips_fe_wr(tx_next, RAMIPS_TX_CTX_IDX0);
spin_unlock(&re->page_lock);
return NETDEV_TX_OK;
struct raeth_priv *re = netdev_priv(dev);
spin_lock(&re->page_lock);
- while ((re->tx[re->skb_free_idx].txd2 & TX_DMA_DONE) &&
- (re->tx_skb[re->skb_free_idx])) {
- dev_kfree_skb_irq(re->tx_skb[re->skb_free_idx]);
- re->tx_skb[re->skb_free_idx] = 0;
+ while (1) {
+ struct raeth_tx_info *txi;
+ struct ramips_tx_dma *txd;
+
+ txi = &re->tx_info[re->skb_free_idx];
+ txd = txi->tx_desc;
+
+ if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
+ break;
+
+ dev_kfree_skb_irq(txi->tx_skb);
+ txi->tx_skb = NULL;
re->skb_free_idx++;
if (re->skb_free_idx >= NUM_TX_DESC)
re->skb_free_idx = 0;