forcedeth: optimize the xmit/rx with unlikely
authorZhu Yanjun <yanjun.zhu@oracle.com>
Fri, 22 Sep 2017 14:20:21 +0000 (10:20 -0400)
committerDavid S. Miller <davem@davemloft.net>
Sun, 24 Sep 2017 03:04:23 +0000 (20:04 -0700)
In the xmit/rx fastpath, the function dma_map_single rarely fails.
Therefore, add an unlikely() optimization to this error check
conditional.

Signed-off-by: Zhu Yanjun <yanjun.zhu@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/nvidia/forcedeth.c

index b605b94f456750c97652aab8039f6e84ae6f99f9..a235e8881af910ed5ab631c556d43658a9b1a226 100644 (file)
@@ -1817,8 +1817,8 @@ static int nv_alloc_rx(struct net_device *dev)
                                                             skb->data,
                                                             skb_tailroom(skb),
                                                             DMA_FROM_DEVICE);
-                       if (dma_mapping_error(&np->pci_dev->dev,
-                                             np->put_rx_ctx->dma)) {
+                       if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                                      np->put_rx_ctx->dma))) {
                                kfree_skb(skb);
                                goto packet_dropped;
                        }
@@ -1858,8 +1858,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
                                                             skb->data,
                                                             skb_tailroom(skb),
                                                             DMA_FROM_DEVICE);
-                       if (dma_mapping_error(&np->pci_dev->dev,
-                                             np->put_rx_ctx->dma)) {
+                       if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                                      np->put_rx_ctx->dma))) {
                                kfree_skb(skb);
                                goto packet_dropped;
                        }
@@ -2227,8 +2227,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                     skb->data + offset, bcnt,
                                                     DMA_TO_DEVICE);
-               if (dma_mapping_error(&np->pci_dev->dev,
-                                     np->put_tx_ctx->dma)) {
+               if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                              np->put_tx_ctx->dma))) {
                        /* on DMA mapping error - drop the packet */
                        dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2268,7 +2268,8 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
                                                        frag, offset,
                                                        bcnt,
                                                        DMA_TO_DEVICE);
-                       if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+                       if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                                      np->put_tx_ctx->dma))) {
 
                                /* Unwind the mapped fragments */
                                do {
@@ -2377,8 +2378,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                np->put_tx_ctx->dma = dma_map_single(&np->pci_dev->dev,
                                                     skb->data + offset, bcnt,
                                                     DMA_TO_DEVICE);
-               if (dma_mapping_error(&np->pci_dev->dev,
-                                     np->put_tx_ctx->dma)) {
+               if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                              np->put_tx_ctx->dma))) {
                        /* on DMA mapping error - drop the packet */
                        dev_kfree_skb_any(skb);
                        u64_stats_update_begin(&np->swstats_tx_syncp);
@@ -2419,7 +2420,8 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
                                                        bcnt,
                                                        DMA_TO_DEVICE);
 
-                       if (dma_mapping_error(&np->pci_dev->dev, np->put_tx_ctx->dma)) {
+                       if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                                      np->put_tx_ctx->dma))) {
 
                                /* Unwind the mapped fragments */
                                do {
@@ -5075,8 +5077,8 @@ static int nv_loopback_test(struct net_device *dev)
        test_dma_addr = dma_map_single(&np->pci_dev->dev, tx_skb->data,
                                       skb_tailroom(tx_skb),
                                       DMA_FROM_DEVICE);
-       if (dma_mapping_error(&np->pci_dev->dev,
-                             test_dma_addr)) {
+       if (unlikely(dma_mapping_error(&np->pci_dev->dev,
+                                      test_dma_addr))) {
                dev_kfree_skb_any(tx_skb);
                goto out;
        }