} else {
temp = MDIO_READ_COMMAND;
}
+
temp |= ((location & 0x1f) << MDIO_REG_OFFSET);
temp |= (phy_id & 0x1f);
}
if (cycles == 5000) {
- printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name,
- phy_id);
+ printk(KERN_ERR "%s #%i: MII transaction failed\n", bus->name, phy_id);
return -1;
}
return ret;
}
-static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location,
- u16 val)
+static int cns3xxx_mdio_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
unsigned long flags;
int ret;
if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus);
+
return err;
}
/* put the new buffer on RX-free queue */
rx_ring->buff_tab[i] = buf;
rx_ring->phys_tab[i] = phys;
+
if (i == RX_DESCS - 1) {
+ desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU | END_OF_RING;
i = 0;
- desc->config0 = END_OF_RING | FIRST_SEGMENT |
- LAST_SEGMENT | RX_SEGMENT_MRU;
desc = &(rx_ring)->desc[i];
} else {
- desc->config0 = FIRST_SEGMENT | LAST_SEGMENT |
- RX_SEGMENT_MRU;
+ desc->config0 = FIRST_SEGMENT | LAST_SEGMENT | RX_SEGMENT_MRU;
i++;
desc++;
}
return;
tx_ring->stopped = stop;
+
for (i = 0; i < 4; i++) {
struct port *port = switch_port_tab[i];
struct net_device *dev;
continue;
dev = port->netdev;
+
if (stop)
netif_stop_queue(dev);
else
index = tx_ring->free_index;
desc = &(tx_ring)->desc[index];
+
for (i = 0; i < num_used; i++) {
if (desc->cown) {
skb = tx_ring->buff_tab[index];
break;
}
}
+
tx_ring->free_index = index;
tx_ring->num_used -= i;
eth_check_num_used(tx_ring);
break;
/* process received frame */
- dma_unmap_single(sw->dev, rx_ring->phys_tab[i],
- RX_SEGMENT_MRU, DMA_FROM_DEVICE);
+ dma_unmap_single(sw->dev, rx_ring->phys_tab[i], RX_SEGMENT_MRU, DMA_FROM_DEVICE);
skb = build_skb(rx_ring->buff_tab[i], RX_SEGMENT_ALLOC_SIZE);
if (!skb)
tx_ring->phys_tab[index] = phys;
config0 |= len;
+
if (index == TX_DESCS - 1)
config0 |= END_OF_RING;
+
if (index == index_last)
config0 |= LAST_SEGMENT;
eth_schedule_poll(sw);
spin_lock_bh(&tx_lock);
+
if ((tx_ring->num_used + nr_desc + 1) >= TX_DESCS) {
spin_unlock_bh(&tx_lock);
return NETDEV_TX_BUSY;
__raw_writel(TS_SUSPEND | FS_SUSPEND, &sw->regs->dma_auto_poll_cfg);
__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
__raw_writel(CLR_FS_STATE | QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
-
__raw_writel(QUEUE_THRESHOLD, &sw->regs->dma_ring_ctrl);
rx_ring->desc = dmam_alloc_coherent(sw->dev, RX_POOL_ALLOC_SIZE,
/* Setup RX buffers */
memset(rx_ring->desc, 0, RX_POOL_ALLOC_SIZE);
+
for (i = 0; i < RX_DESCS; i++) {
struct rx_desc *desc = &(rx_ring)->desc[i];
void *buf;
return -ENOMEM;
desc->sdl = RX_SEGMENT_MRU;
+
if (i == (RX_DESCS - 1))
desc->eor = 1;
+
desc->fsd = 1;
desc->lsd = 1;
desc->sdp = dma_map_single(sw->dev, buf + SKB_HEAD_ALIGN,
RX_SEGMENT_MRU, DMA_FROM_DEVICE);
+
if (dma_mapping_error(sw->dev, desc->sdp))
return -EIO;
/* Setup TX buffers */
memset(tx_ring->desc, 0, TX_POOL_ALLOC_SIZE);
+
for (i = 0; i < TX_DESCS; i++) {
struct tx_desc *desc = &(tx_ring)->desc[i];
tx_ring->buff_tab[i] = 0;
if (i == (TX_DESCS - 1))
desc->eor = 1;
+
desc->cown = 1;
}
__raw_writel(tx_ring->phys_addr, &sw->regs->ts_desc_ptr0);
if (!buf)
continue;
- dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU,
- DMA_FROM_DEVICE);
+ dma_unmap_single(sw->dev, desc->sdp, RX_SEGMENT_MRU, DMA_FROM_DEVICE);
skb_free_frag(buf);
}