}
if (ring->rx_dma) {
- dma_free_coherent(&priv->netdev->dev,
+ dma_free_coherent(priv->dev,
ring->rx_ring_size * sizeof(*ring->rx_dma),
ring->rx_dma,
ring->rx_phys);
static int fe_alloc_rx(struct fe_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct fe_rx_ring *ring = &priv->rx_ring;
int i, pad;
goto no_rx_mem;
}
- ring->rx_dma = dma_alloc_coherent(&netdev->dev,
+ ring->rx_dma = dma_alloc_coherent(priv->dev,
ring->rx_ring_size * sizeof(*ring->rx_dma),
&ring->rx_phys,
GFP_ATOMIC | __GFP_ZERO);
else
pad = NET_IP_ALIGN;
for (i = 0; i < ring->rx_ring_size; i++) {
- dma_addr_t dma_addr = dma_map_single(&netdev->dev,
+ dma_addr_t dma_addr = dma_map_single(priv->dev,
ring->rx_data[i] + NET_SKB_PAD + pad,
ring->rx_buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr)))
+ if (unlikely(dma_mapping_error(priv->dev, dma_addr)))
goto no_rx_mem;
ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
static void fe_clean_tx(struct fe_priv *priv)
{
int i;
- struct device *dev = &priv->netdev->dev;
+ struct device *dev = priv->dev;
struct fe_tx_ring *ring = &priv->tx_ring;
if (ring->tx_buf) {
if (!ring->tx_buf)
goto no_tx_mem;
- ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev,
+ ring->tx_dma = dma_alloc_coherent(priv->dev,
ring->tx_ring_size * sizeof(*ring->tx_dma),
&ring->tx_phys,
GFP_ATOMIC | __GFP_ZERO);
{
struct fe_priv *priv = netdev_priv(dev);
struct fe_map_state st = {
- .dev = &dev->dev,
+ .dev = priv->dev,
.ring_idx = ring->tx_next_idx,
};
struct sk_buff *head = skb;
j = ring->tx_next_idx;
for (i = 0; i < tx_num; i++) {
/* unmap dma */
- fe_txd_unmap(&dev->dev, &ring->tx_buf[j]);
+ fe_txd_unmap(priv->dev, &ring->tx_buf[j]);
ring->tx_dma[j].txd2 = TX_DMA_DESP2_DEF;
j = NEXT_TX_DESP_IDX(j);
stats->rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(&netdev->dev,
+ dma_addr = dma_map_single(priv->dev,
new_data + NET_SKB_PAD + pad,
ring->rx_buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(priv->dev, dma_addr))) {
skb_free_frag(new_data);
goto release_desc;
}
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(priv->dev, trxd.rxd1,
ring->rx_buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
int *tx_again)
{
struct net_device *netdev = priv->netdev;
- struct device *dev = &netdev->dev;
unsigned int bytes_compl = 0;
struct sk_buff *skb;
struct fe_tx_buf *tx_buf;
done++;
budget--;
}
- fe_txd_unmap(dev, tx_buf);
+ fe_txd_unmap(priv->dev, tx_buf);
idx = NEXT_TX_DESP_IDX(idx);
}
ring->tx_free_idx = idx;