if (!priv->rx_buf[desc_idx]) {
unsigned char *buf;
+ dma_addr_t p;
if (likely(napi_mode))
buf = napi_alloc_frag(priv->rx_frag_size);
if (unlikely(!buf))
break;
+ p = dma_map_single(&priv->pdev->dev, buf + NET_SKB_PAD,
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, p))) {
+ skb_free_frag(buf);
+ break;
+ }
+
priv->rx_buf[desc_idx] = buf;
- desc->address = dma_map_single(&priv->pdev->dev,
- buf + NET_SKB_PAD,
- priv->rx_buf_size,
- DMA_FROM_DEVICE);
+ desc->address = p;
}
len_stat = priv->rx_buf_size << DMADESC_LENGTH_SHIFT;
struct bcm6368_enetsw_desc *desc;
u32 len_stat;
netdev_tx_t ret;
+ dma_addr_t p;
/* lock against tx reclaim */
spin_lock(&priv->tx_lock);
data = skb_put_zero(skb, needed);
}
+ /* fill descriptor */
+ p = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&priv->pdev->dev, p))) {
+ dev_kfree_skb(skb);
+ ret = NETDEV_TX_OK;
+ goto out_unlock;
+ }
+
/* point to the next available desc */
desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
priv->tx_skb[priv->tx_curr_desc] = skb;
-
- /* fill descriptor */
- desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
+ desc->address = p;
len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
len_stat |= DMADESC_ESOP_MASK | DMADESC_APPEND_CRC |