skb = dev_alloc_skb(length + 2);
if (likely(skb != NULL)) {
skb_reserve(skb, 2);
- dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr,
length, DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
skb_put(skb, length);
ep->descs->tdesc[entry].tdesc1 =
TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
- dma_sync_single(NULL, ep->descs->tdesc[entry].buf_addr,
+ dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb(skb);
dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
RX_BUFF_SIZE, DMA_FROM_DEVICE);
#else
- dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
- RX_BUFF_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN,
+ RX_BUFF_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
#endif
for (i = 0; i < npages; ++i)
mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, dma_handle, npages * sizeof (u64), DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&dev->pdev->dev, dma_handle,
+ npages * sizeof (u64), DMA_TO_DEVICE);
return 0;
}
for (i = 0; i < npages; ++i)
fmr->mtts[i] = cpu_to_be64(page_list[i] | MLX4_MTT_FLAG_PRESENT);
- dma_sync_single(&dev->pdev->dev, fmr->dma_handle,
- npages * sizeof(u64), DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&dev->pdev->dev, fmr->dma_handle,
+ npages * sizeof(u64), DMA_TO_DEVICE);
fmr->mpt->key = cpu_to_be32(key);
fmr->mpt->lkey = cpu_to_be32(key);
dma_unmap_single(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
#else
- dma_sync_single(&dev->dev, desc->data,
- RX_SIZE, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&dev->dev, desc->data,
+ RX_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(desc->pkt_len, 4) / 4);
#endif