Remove unused zero-copy code from velocity NIC driver.
authorDave Jones <davej@redhat.com>
Tue, 21 Jul 2009 09:15:49 +0000 (09:15 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 24 Jul 2009 01:01:37 +0000 (18:01 -0700)
This code hasn't been enabled in forever.

Signed-off-by: Dave Jones <davej@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/via-velocity.c

index 42365e5f931b9558be8478f677f0ffb41f709e0e..4ebe2cacf50ca11c1885a7e5539c1d40a7b0a3d8 100644 (file)
@@ -976,9 +976,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
        dev->netdev_ops = &velocity_netdev_ops;
        dev->ethtool_ops = &velocity_ethtool_ops;
 
-#ifdef  VELOCITY_ZERO_COPY_SUPPORT
-       dev->features |= NETIF_F_SG;
-#endif
        dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
                NETIF_F_HW_VLAN_RX;
 
@@ -1849,11 +1846,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, struct velocity_td_
 
                pktlen = max_t(unsigned int, skb->len, ETH_ZLEN);
                for (i = 0; i < tdinfo->nskb_dma; i++) {
-#ifdef VELOCITY_ZERO_COPY_SUPPORT
-                       pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], le16_to_cpu(td->tdesc1.len), PCI_DMA_TODEVICE);
-#else
                        pci_unmap_single(vptr->pdev, tdinfo->skb_dma[i], pktlen, PCI_DMA_TODEVICE);
-#endif
                        tdinfo->skb_dma[i] = 0;
                }
        }
@@ -2095,13 +2088,6 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
 
        len = cpu_to_le16(pktlen);
 
-#ifdef VELOCITY_ZERO_COPY_SUPPORT
-       if (skb_shinfo(skb)->nr_frags > 6 && __skb_linearize(skb)) {
-               kfree_skb(skb);
-               return NETDEV_TX_OK;
-       }
-#endif
-
        spin_lock_irqsave(&vptr->lock, flags);
 
        index = vptr->tx.curr[qnum];
@@ -2111,59 +2097,18 @@ static int velocity_xmit(struct sk_buff *skb, struct net_device *dev)
        td_ptr->tdesc1.TCR = TCR0_TIC;
        td_ptr->td_buf[0].size &= ~TD_QUEUE;
 
-#ifdef VELOCITY_ZERO_COPY_SUPPORT
-       if (skb_shinfo(skb)->nr_frags > 0) {
-               int nfrags = skb_shinfo(skb)->nr_frags;
-               tdinfo->skb = skb;
-               if (nfrags > 6) {
-                       skb_copy_from_linear_data(skb, tdinfo->buf, skb->len);
-                       tdinfo->skb_dma[0] = tdinfo->buf_dma;
-                       td_ptr->tdesc0.len = len;
-                       td_ptr->tx.buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
-                       td_ptr->tx.buf[0].pa_high = 0;
-                       td_ptr->tx.buf[0].size = len;   /* queue is 0 anyway */
-                       tdinfo->nskb_dma = 1;
-               } else {
-                       int i = 0;
-                       tdinfo->nskb_dma = 0;
-                       tdinfo->skb_dma[i] = pci_map_single(vptr->pdev, skb->data,
-                                               skb_headlen(skb), PCI_DMA_TODEVICE);
-
-                       td_ptr->tdesc0.len = len;
-
-                       /* FIXME: support 48bit DMA later */
-                       td_ptr->tx.buf[i].pa_low = cpu_to_le32(tdinfo->skb_dma);
-                       td_ptr->tx.buf[i].pa_high = 0;
-                       td_ptr->tx.buf[i].size = cpu_to_le16(skb_headlen(skb));
-
-                       for (i = 0; i < nfrags; i++) {
-                               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-                               void *addr = (void *)page_address(frag->page) + frag->page_offset;
-
-                               tdinfo->skb_dma[i + 1] = pci_map_single(vptr->pdev, addr, frag->size, PCI_DMA_TODEVICE);
-
-                               td_ptr->tx.buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]);
-                               td_ptr->tx.buf[i + 1].pa_high = 0;
-                               td_ptr->tx.buf[i + 1].size = cpu_to_le16(frag->size);
-                       }
-                       tdinfo->nskb_dma = i - 1;
-               }
+       /*
+        *      Map the linear network buffer into PCI space and
+        *      add it to the transmit ring.
+        */
+       tdinfo->skb = skb;
+       tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
+       td_ptr->tdesc0.len = len;
+       td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
+       td_ptr->td_buf[0].pa_high = 0;
+       td_ptr->td_buf[0].size = len;
+       tdinfo->nskb_dma = 1;
 
-       } else
-#endif
-       {
-               /*
-                *      Map the linear network buffer into PCI space and
-                *      add it to the transmit ring.
-                */
-               tdinfo->skb = skb;
-               tdinfo->skb_dma[0] = pci_map_single(vptr->pdev, skb->data, pktlen, PCI_DMA_TODEVICE);
-               td_ptr->tdesc0.len = len;
-               td_ptr->td_buf[0].pa_low = cpu_to_le32(tdinfo->skb_dma[0]);
-               td_ptr->td_buf[0].pa_high = 0;
-               td_ptr->td_buf[0].size = len;
-               tdinfo->nskb_dma = 1;
-       }
        td_ptr->tdesc1.cmd = TCPLS_NORMAL + (tdinfo->nskb_dma + 1) * 16;
 
        if (vptr->vlgrp && vlan_tx_tag_present(skb)) {