#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/init.h>
+#include <linux/dma-mapping.h>
#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/ioport.h>
struct velocity_opt *opt = &vptr->options;
const unsigned int rx_ring_size = opt->numrx * sizeof(struct rx_desc);
const unsigned int tx_ring_size = opt->numtx * sizeof(struct tx_desc);
- struct pci_dev *pdev = vptr->pdev;
dma_addr_t pool_dma;
void *pool;
unsigned int i;
/*
* Allocate all RD/TD rings a single pool.
*
- * pci_alloc_consistent() fulfills the requirement for 64 bytes
+ * dma_alloc_coherent() fulfills the requirement for 64 bytes
* alignment
*/
- pool = pci_alloc_consistent(pdev, tx_ring_size * vptr->tx.numq +
- rx_ring_size, &pool_dma);
+ pool = dma_alloc_coherent(vptr->dev, tx_ring_size * vptr->tx.numq +
+ rx_ring_size, &pool_dma, GFP_ATOMIC);
if (!pool) {
- dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n",
+ dev_err(vptr->dev, "%s : DMA memory allocation failed.\n",
vptr->netdev->name);
return -ENOMEM;
}
*/
skb_reserve(rd_info->skb,
64 - ((unsigned long) rd_info->skb->data & 63));
- rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data,
- vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+ rd_info->skb_dma = dma_map_single(vptr->dev, rd_info->skb->data,
+ vptr->rx.buf_sz, DMA_FROM_DEVICE);
/*
* Fill in the descriptor to match
if (!rd_info->skb)
continue;
- pci_unmap_single(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
+ DMA_FROM_DEVICE);
rd_info->skb_dma = 0;
dev_kfree_skb(rd_info->skb);
const int size = vptr->options.numrx * sizeof(struct rx_desc) +
vptr->options.numtx * sizeof(struct tx_desc) * vptr->tx.numq;
- pci_free_consistent(vptr->pdev, size, vptr->rx.ring, vptr->rx.pool_dma);
+ dma_free_coherent(vptr->dev, size, vptr->rx.ring, vptr->rx.pool_dma);
}
static int velocity_init_rings(struct velocity_info *vptr, int mtu)
pktlen = max_t(size_t, pktlen,
td->td_buf[i].size & ~TD_QUEUE);
- pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i],
- le16_to_cpu(pktlen), PCI_DMA_TODEVICE);
+ dma_unmap_single(vptr->dev, tdinfo->skb_dma[i],
+ le16_to_cpu(pktlen), DMA_TO_DEVICE);
}
}
dev_kfree_skb_irq(skb);
if (td_info->skb) {
for (i = 0; i < td_info->nskb_dma; i++) {
if (td_info->skb_dma[i]) {
- pci_unmap_single(vptr->pdev, td_info->skb_dma[i],
- td_info->skb->len, PCI_DMA_TODEVICE);
+ dma_unmap_single(vptr->dev, td_info->skb_dma[i],
+ td_info->skb->len, DMA_TO_DEVICE);
td_info->skb_dma[i] = 0;
}
}
*/
static int velocity_receive_frame(struct velocity_info *vptr, int idx)
{
- void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int);
struct net_device_stats *stats = &vptr->netdev->stats;
struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]);
struct rx_desc *rd = &(vptr->rx.ring[idx]);
skb = rd_info->skb;
- pci_dma_sync_single_for_cpu(vptr->pdev, rd_info->skb_dma,
- vptr->rx.buf_sz, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
+ vptr->rx.buf_sz, DMA_FROM_DEVICE);
/*
* Drop frame not meeting IEEE 802.3
}
}
- pci_action = pci_dma_sync_single_for_device;
-
velocity_rx_csum(rd, skb);
if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
velocity_iph_realign(vptr, skb, pkt_len);
- pci_action = pci_unmap_single;
rd_info->skb = NULL;
+ dma_unmap_single(vptr->dev, rd_info->skb_dma, vptr->rx.buf_sz,
+ DMA_FROM_DEVICE);
+ } else {
+ dma_sync_single_for_device(vptr->dev, rd_info->skb_dma,
+ vptr->rx.buf_sz, DMA_FROM_DEVICE);
}
- pci_action(vptr->pdev, rd_info->skb_dma, vptr->rx.buf_sz,
- PCI_DMA_FROMDEVICE);
-
skb_put(skb, pkt_len - 4);
skb->protocol = eth_type_trans(skb, vptr->netdev);
* add it to the transmit ring.
*/
tdinfo->skb = skb;
- tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
+ tdinfo->skb_dma[0] = dma_map_single(vptr->dev, skb->data, pktlen,
+ DMA_TO_DEVICE);
td_ptr->tdesc0.len = cpu_to_le16(pktlen);
td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
td_ptr->td_buf[0].pa_high = 0;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev,
+ tdinfo->skb_dma[i + 1] = skb_frag_dma_map(vptr->dev,
frag, 0,
skb_frag_size(frag),
DMA_TO_DEVICE);
{
memset(vptr, 0, sizeof(struct velocity_info));
+ vptr->dev = &pdev->dev;
vptr->pdev = pdev;
vptr->chip_id = info->chip_id;
vptr->tx.numq = info->txqueue;
SET_NETDEV_DEV(dev, &pdev->dev);
vptr = netdev_priv(dev);
-
if (first) {
printk(KERN_INFO "%s Ver. %s\n",
VELOCITY_FULL_DRV_NAM, VELOCITY_VERSION);