xdp: Fix handling of devmap in generic XDP
authorToshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
Thu, 14 Jun 2018 02:07:42 +0000 (11:07 +0900)
committerDaniel Borkmann <daniel@iogearbox.net>
Fri, 15 Jun 2018 21:47:15 +0000 (23:47 +0200)
Commit 67f29e07e131 ("bpf: devmap introduce dev_map_enqueue") changed
the return value type of __devmap_lookup_elem() from struct net_device *
to struct bpf_dtab_netdev * but forgot to modify generic XDP code
accordingly.

Thus generic XDP incorrectly used struct bpf_dtab_netdev where struct
net_device is expected, then skb->dev was set to invalid value.

v2:
- Fix compiler warning without CONFIG_BPF_SYSCALL.

Fixes: 67f29e07e131 ("bpf: devmap introduce dev_map_enqueue")
Signed-off-by: Toshiaki Makita <makita.toshiaki@lab.ntt.co.jp>
Acked-by: Yonghong Song <yhs@fb.com>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
include/linux/bpf.h
include/linux/filter.h
kernel/bpf/devmap.c
net/core/filter.c

index 995c3b1e59bfa82ef3ad0504b090ab28a898f016..7df32a3200f740b0a00a85615938178547bb5690 100644 (file)
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
 
 /* Map specifics */
 struct xdp_buff;
+struct sk_buff;
 
 struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
 void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
 void __dev_map_flush(struct bpf_map *map);
 int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
                    struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog);
 
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
 void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return 0;
 }
 
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+                                          struct sk_buff *skb,
+                                          struct bpf_prog *xdp_prog)
+{
+       return 0;
+}
+
 static inline
 struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
 {
index 108f9812e1968d1552f42044daee7c2ea013d897..b615df57b7d5b2ccb468c411c3a2aae103cd2aea 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/cryptohash.h>
 #include <linux/set_memory.h>
 #include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
 
 #include <net/sch_generic.h>
 
@@ -802,6 +803,21 @@ static inline bool bpf_dump_raw_ok(void)
 struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
                                       const struct bpf_insn *patch, u32 len);
 
+static inline int __xdp_generic_ok_fwd_dev(struct sk_buff *skb,
+                                          struct net_device *fwd)
+{
+       unsigned int len;
+
+       if (unlikely(!(fwd->flags & IFF_UP)))
+               return -ENETDOWN;
+
+       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+       if (skb->len > len)
+               return -EMSGSIZE;
+
+       return 0;
+}
+
 /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
  * same cpu context. Further for best results no more than a single map
  * for the do_redirect/do_flush pair should be used. This limitation is
index a7cc7b3494a90f582886485668562ccfef5f5ffd..642c97f6d1b8efeb07e0281680cd1dad26d0081b 100644 (file)
@@ -345,6 +345,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
        return bq_enqueue(dst, xdpf, dev_rx);
 }
 
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+                            struct bpf_prog *xdp_prog)
+{
+       int err;
+
+       err = __xdp_generic_ok_fwd_dev(skb, dst->dev);
+       if (unlikely(err))
+               return err;
+       skb->dev = dst->dev;
+       generic_xdp_tx(skb, xdp_prog);
+
+       return 0;
+}
+
 static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
 {
        struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
index 3d9ba7e5965adc4658b379a0cf55ff2f22f4b94d..e7f12e9f598c8edfc684e05e3d290a952aa9cf2f 100644 (file)
@@ -3214,20 +3214,6 @@ err:
 }
 EXPORT_SYMBOL_GPL(xdp_do_redirect);
 
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
-       unsigned int len;
-
-       if (unlikely(!(fwd->flags & IFF_UP)))
-               return -ENETDOWN;
-
-       len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
-       if (skb->len > len)
-               return -EMSGSIZE;
-
-       return 0;
-}
-
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
@@ -3256,10 +3242,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        }
 
        if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
-               if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+               struct bpf_dtab_netdev *dst = fwd;
+
+               err = dev_map_generic_redirect(dst, skb, xdp_prog);
+               if (unlikely(err))
                        goto err;
-               skb->dev = fwd;
-               generic_xdp_tx(skb, xdp_prog);
        } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
                struct xdp_sock *xs = fwd;