#define AG71XX_TX_MTU_LEN 1540
#define AG71XX_RX_PKT_SIZE \
(ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-#define AG71XX_RX_BUF_SIZE (AG71XX_RX_PKT_SIZE + NET_SKB_PAD + NET_IP_ALIGN)
#define AG71XX_TX_RING_SIZE_DEFAULT 32
#define AG71XX_RX_RING_SIZE_DEFAULT 128
int duplex;
unsigned int max_frame_len;
+ unsigned int rx_buf_size;
struct work_struct restart_work;
struct delayed_work link_work;
for (i = 0; i < ring->size; i++)
if (ring->buf[i].rx_buf) {
dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
- AG71XX_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ ag->rx_buf_size, DMA_FROM_DEVICE);
kfree(ring->buf[i].rx_buf);
}
}
{
void *data;
- data = kmalloc(AG71XX_RX_BUF_SIZE +
+ data = kmalloc(ag->rx_buf_size +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
GFP_ATOMIC);
if (!data)
return false;
buf->rx_buf = data;
- buf->dma_addr = dma_map_single(&ag->dev->dev, data,
- AG71XX_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ buf->dma_addr = dma_map_single(&ag->dev->dev, data, ag->rx_buf_size,
+ DMA_FROM_DEVICE);
buf->desc->data = (u32) buf->dma_addr + offset;
return true;
}
struct ag71xx *ag = netdev_priv(dev);
int ret;
+ ag->rx_buf_size = AG71XX_RX_PKT_SIZE + NET_SKB_PAD + NET_IP_ALIGN;
+
ret = ag71xx_rings_init(ag);
if (ret)
goto err;
pktlen -= ETH_FCS_LEN;
dma_unmap_single(&dev->dev, ring->buf[i].dma_addr,
- AG71XX_RX_BUF_SIZE, DMA_FROM_DEVICE);
+ ag->rx_buf_size, DMA_FROM_DEVICE);
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;