u32 cfg2;
u32 ifctl;
u32 fifo5;
+ u32 fifo3;
if (!ag->link) {
ag71xx_hw_stop(ag);
}
if (pdata->is_ar91xx)
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x00780fff);
+ fifo3 = 0x00780fff;
else if (pdata->is_ar724x)
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, pdata->fifo_cfg3);
+ fifo3 = pdata->fifo_cfg3;
else
- ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff);
+ fifo3 = 0x008001ff;
+
+ if (ag->tx_ring.desc_split) {
+ fifo3 &= 0xffff;
+ fifo3 |= ((2048 - ag->tx_ring.desc_split) / 4) << 16;
+ }
+
+ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, fifo3);
if (pdata->set_speed)
pdata->set_speed(ag->speed);
return 0;
}
+static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
+{
+ int i;
+ struct ag71xx_desc *desc;
+ int ndesc = 0;
+ int split = ring->desc_split;
+
+ if (!split)
+ split = len;
+
+ while (len > 0) {
+ unsigned int cur_len = len;
+
+ i = (ring->curr + ndesc) % ring->size;
+ desc = ring->buf[i].desc;
+
+ if (!ag71xx_desc_empty(desc))
+ return -1;
+
+ if (cur_len > split) {
+ cur_len = split;
+ if (len < split + 4)
+ cur_len -= 4;
+ }
+
+ desc->data = addr;
+ addr += cur_len;
+ len -= cur_len;
+
+ if (len > 0)
+ cur_len |= DESC_MORE;
+
+ /* prevent early tx attempt of this descriptor */
+ if (!ndesc)
+ cur_len |= DESC_EMPTY;
+
+ desc->ctrl = cur_len;
+ ndesc++;
+ }
+
+ return ndesc;
+}
+
static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct ag71xx_desc *desc;
dma_addr_t dma_addr;
- int i;
-
- i = ring->curr % ring->size;
- desc = ring->buf[i].desc;
-
- if (!ag71xx_desc_empty(desc))
- goto err_drop;
+ int i, n, ring_min;
if (ag71xx_has_ar8216(ag))
ag71xx_add_ar8216_header(ag, skb);
- if (skb->len <= 0) {
+ if (skb->len <= 4) {
DBG("%s: packet len is too small\n", ag->dev->name);
goto err_drop;
}
dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- netdev_sent_queue(dev, skb->len);
+ i = ring->curr % ring->size;
+ desc = ring->buf[i].desc;
+
+ /* setup descriptor fields */
+ n = ag71xx_fill_dma_desc(ring, (u32) dma_addr, skb->len & ag->desc_pktlen_mask);
+ if (n < 0)
+ goto err_drop_unmap;
+
+ i = (ring->curr + n - 1) % ring->size;
ring->buf[i].len = skb->len;
ring->buf[i].skb = skb;
ring->buf[i].timestamp = jiffies;
- /* setup descriptor fields */
- desc->data = (u32) dma_addr;
- desc->ctrl = skb->len & ag->desc_pktlen_mask;
+ netdev_sent_queue(dev, skb->len);
+
+ desc->ctrl &= ~DESC_EMPTY;
+ ring->curr += n;
/* flush descriptor */
wmb();
- ring->curr++;
- if (ring->curr == (ring->dirty + ring->size)) {
- DBG("%s: tx queue full\n", ag->dev->name);
+ ring_min = 2;
+ if (ring->desc_split)
+ ring_min *= AG71XX_TX_RING_DS_PER_PKT;
+
+ if (ring->curr - ring->dirty >= ring->size - ring_min) {
+ DBG("%s: tx queue full\n", dev->name);
netif_stop_queue(dev);
}
return NETDEV_TX_OK;
+err_drop_unmap:
+ dma_unmap_single(&dev->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
err_drop:
dev->stats.tx_dropped++;
unsigned int i = ring->dirty % ring->size;
struct ag71xx_desc *desc = ring->buf[i].desc;
struct sk_buff *skb = ring->buf[i].skb;
- int len = ring->buf[i].len;
if (!ag71xx_desc_empty(desc)) {
if (pdata->is_ar7240 &&
ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS);
- bytes_compl += len;
- ag->dev->stats.tx_bytes += len;
- ag->dev->stats.tx_packets++;
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ ring->buf[i].skb = NULL;
- dev_kfree_skb_any(skb);
- ring->buf[i].skb = NULL;
+ bytes_compl += ring->buf[i].len;
+ sent++;
+ }
ring->dirty++;
- sent++;
}
DBG("%s: %d packets sent out\n", ag->dev->name, sent);
+ ag->dev->stats.tx_bytes += bytes_compl;
+ ag->dev->stats.tx_packets += sent;
+
if (!sent)
return 0;
ag->max_frame_len = pdata->max_frame_len;
ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
+ if (!pdata->is_ar724x && !pdata->is_ar91xx) {
+ ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
+ ag->tx_ring.size *= AG71XX_TX_RING_DS_PER_PKT;
+ }
+
ag->stop_desc = dma_alloc_coherent(NULL,
sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);