net: sgi: ioc3-eth: allocate space for desc rings only once
authorThomas Bogendoerfer <tbogendoerfer@suse.de>
Fri, 30 Aug 2019 09:25:28 +0000 (11:25 +0200)
committerDavid S. Miller <davem@davemloft.net>
Fri, 30 Aug 2019 20:54:35 +0000 (13:54 -0700)
Memory for descriptor rings are allocated/freed, when interface is
brought up/down. Since the size of the rings is not changeable by
hardware, we now allocate rings now during probe and free it, when
device is removed.

Signed-off-by: Thomas Bogendoerfer <tbogendoerfer@suse.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/sgi/ioc3-eth.c

index ba18a53fbbe6cd09ab06bd01eb3f323bc59b6264..e84239734338bf94787a6f656ad7d6d742c99f9f 100644 (file)
@@ -800,28 +800,17 @@ static inline void ioc3_clean_tx_ring(struct ioc3_private *ip)
 
 static void ioc3_free_rings(struct ioc3_private *ip)
 {
-       struct sk_buff *skb;
        int rx_entry, n_entry;
 
-       if (ip->txr) {
-               ioc3_clean_tx_ring(ip);
-               free_pages((unsigned long)ip->txr, 2);
-               ip->txr = NULL;
-       }
+       ioc3_clean_tx_ring(ip);
 
-       if (ip->rxr) {
-               n_entry = ip->rx_ci;
-               rx_entry = ip->rx_pi;
+       n_entry = ip->rx_ci;
+       rx_entry = ip->rx_pi;
 
-               while (n_entry != rx_entry) {
-                       skb = ip->rx_skbs[n_entry];
-                       if (skb)
-                               dev_kfree_skb_any(skb);
+       while (n_entry != rx_entry) {
+               dev_kfree_skb_any(ip->rx_skbs[n_entry]);
 
-                       n_entry = (n_entry + 1) & RX_RING_MASK;
-               }
-               free_page((unsigned long)ip->rxr);
-               ip->rxr = NULL;
+               n_entry = (n_entry + 1) & RX_RING_MASK;
        }
 }
 
@@ -829,49 +818,34 @@ static void ioc3_alloc_rings(struct net_device *dev)
 {
        struct ioc3_private *ip = netdev_priv(dev);
        struct ioc3_erxbuf *rxb;
-       unsigned long *rxr;
        int i;
 
-       if (!ip->rxr) {
-               /* Allocate and initialize rx ring.  4kb = 512 entries  */
-               ip->rxr = (unsigned long *)get_zeroed_page(GFP_ATOMIC);
-               rxr = ip->rxr;
-               if (!rxr)
-                       pr_err("%s: get_zeroed_page() failed!\n", __func__);
-
-               /* Now the rx buffers.  The RX ring may be larger but
-                * we only allocate 16 buffers for now.  Need to tune
-                * this for performance and memory later.
-                */
-               for (i = 0; i < RX_BUFFS; i++) {
-                       struct sk_buff *skb;
+       /* Now the rx buffers.  The RX ring may be larger but
+        * we only allocate 16 buffers for now.  Need to tune
+        * this for performance and memory later.
+        */
+       for (i = 0; i < RX_BUFFS; i++) {
+               struct sk_buff *skb;
 
-                       skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
-                       if (!skb) {
-                               show_free_areas(0, NULL);
-                               continue;
-                       }
+               skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
+               if (!skb) {
+                       show_free_areas(0, NULL);
+                       continue;
+               }
 
-                       ip->rx_skbs[i] = skb;
+               ip->rx_skbs[i] = skb;
 
-                       /* Because we reserve afterwards. */
-                       skb_put(skb, (1664 + RX_OFFSET));
-                       rxb = (struct ioc3_erxbuf *)skb->data;
-                       rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
-                       skb_reserve(skb, RX_OFFSET);
-               }
-               ip->rx_ci = 0;
-               ip->rx_pi = RX_BUFFS;
+               /* Because we reserve afterwards. */
+               skb_put(skb, (1664 + RX_OFFSET));
+               rxb = (struct ioc3_erxbuf *)skb->data;
+               ip->rxr[i] = cpu_to_be64(ioc3_map(rxb, 1));
+               skb_reserve(skb, RX_OFFSET);
        }
+       ip->rx_ci = 0;
+       ip->rx_pi = RX_BUFFS;
 
-       if (!ip->txr) {
-               /* Allocate and initialize tx rings.  16kb = 128 bufs.  */
-               ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
-               if (!ip->txr)
-                       pr_err("%s: __get_free_pages() failed!\n", __func__);
-               ip->tx_pi = 0;
-               ip->tx_ci = 0;
-       }
+       ip->tx_pi = 0;
+       ip->tx_ci = 0;
 }
 
 static void ioc3_init_rings(struct net_device *dev)
@@ -1239,6 +1213,23 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        timer_setup(&ip->ioc3_timer, ioc3_timer, 0);
 
        ioc3_stop(ip);
+
+       /* Allocate rx ring.  4kb = 512 entries, must be 4kb aligned */
+       ip->rxr = (unsigned long *)get_zeroed_page(GFP_KERNEL);
+       if (!ip->rxr) {
+               pr_err("ioc3-eth: rx ring allocation failed\n");
+               err = -ENOMEM;
+               goto out_stop;
+       }
+
+       /* Allocate tx rings.  16kb = 128 bufs, must be 16kb aligned  */
+       ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL, 2);
+       if (!ip->txr) {
+               pr_err("ioc3-eth: tx ring allocation failed\n");
+               err = -ENOMEM;
+               goto out_stop;
+       }
+
        ioc3_init(dev);
 
        ip->pdev = pdev;
@@ -1293,6 +1284,11 @@ out_stop:
        ioc3_stop(ip);
        del_timer_sync(&ip->ioc3_timer);
        ioc3_free_rings(ip);
+       if (ip->rxr)
+               free_page((unsigned long)ip->rxr);
+       if (ip->txr)
+               free_pages((unsigned long)ip->txr, 2);
+       kfree(ip->txr);
 out_res:
        pci_release_regions(pdev);
 out_free:
@@ -1310,6 +1306,9 @@ static void ioc3_remove_one(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct ioc3_private *ip = netdev_priv(dev);
 
+       free_page((unsigned long)ip->rxr);
+       free_pages((unsigned long)ip->txr, 2);
+
        unregister_netdev(dev);
        del_timer_sync(&ip->ioc3_timer);