net: core: support XDP generic on stacked devices.
authorStephen Hemminger <stephen@networkplumber.org>
Tue, 28 May 2019 18:47:31 +0000 (11:47 -0700)
committerDavid S. Miller <davem@davemloft.net>
Thu, 30 May 2019 18:12:21 +0000 (11:12 -0700)
When a device is stacked like (team, bonding, failsafe or netvsc) the
XDP generic program for the parent device was not called.

Move the call to XDP generic inside __netif_receive_skb_core where
it can be done multiple times for stacked case.

Fixes: d445516966dc ("net: xdp: support xdp generic on virtual devices")
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/dev.c

index b6b8505cfb3e2394f74b41b8e01055c697ad384b..cc2a4e2573244c0749b6a829f8efddfc0ded3737 100644 (file)
@@ -4502,23 +4502,6 @@ static int netif_rx_internal(struct sk_buff *skb)
 
        trace_netif_rx(skb);
 
-       if (static_branch_unlikely(&generic_xdp_needed_key)) {
-               int ret;
-
-               preempt_disable();
-               rcu_read_lock();
-               ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
-               rcu_read_unlock();
-               preempt_enable();
-
-               /* Consider XDP consuming the packet a success from
-                * the netdev point of view we do not want to count
-                * this as an error.
-                */
-               if (ret != XDP_PASS)
-                       return NET_RX_SUCCESS;
-       }
-
 #ifdef CONFIG_RPS
        if (static_branch_unlikely(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
@@ -4858,6 +4841,18 @@ another_round:
 
        __this_cpu_inc(softnet_data.processed);
 
+       if (static_branch_unlikely(&generic_xdp_needed_key)) {
+               int ret2;
+
+               preempt_disable();
+               ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+               preempt_enable();
+
+               if (ret2 != XDP_PASS)
+                       return NET_RX_DROP;
+               skb_reset_mac_len(skb);
+       }
+
        if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
            skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
                skb = skb_vlan_untag(skb);
@@ -5178,19 +5173,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
        if (skb_defer_rx_timestamp(skb))
                return NET_RX_SUCCESS;
 
-       if (static_branch_unlikely(&generic_xdp_needed_key)) {
-               int ret;
-
-               preempt_disable();
-               rcu_read_lock();
-               ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
-               rcu_read_unlock();
-               preempt_enable();
-
-               if (ret != XDP_PASS)
-                       return NET_RX_DROP;
-       }
-
        rcu_read_lock();
 #ifdef CONFIG_RPS
        if (static_branch_unlikely(&rps_needed)) {
@@ -5211,7 +5193,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
 
 static void netif_receive_skb_list_internal(struct list_head *head)
 {
-       struct bpf_prog *xdp_prog = NULL;
        struct sk_buff *skb, *next;
        struct list_head sublist;
 
@@ -5224,21 +5205,6 @@ static void netif_receive_skb_list_internal(struct list_head *head)
        }
        list_splice_init(&sublist, head);
 
-       if (static_branch_unlikely(&generic_xdp_needed_key)) {
-               preempt_disable();
-               rcu_read_lock();
-               list_for_each_entry_safe(skb, next, head, list) {
-                       xdp_prog = rcu_dereference(skb->dev->xdp_prog);
-                       skb_list_del_init(skb);
-                       if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
-                               list_add_tail(&skb->list, &sublist);
-               }
-               rcu_read_unlock();
-               preempt_enable();
-               /* Put passed packets back on main list */
-               list_splice_init(&sublist, head);
-       }
-
        rcu_read_lock();
 #ifdef CONFIG_RPS
        if (static_branch_unlikely(&rps_needed)) {