xsk: wire up XDP_SKB side of AF_XDP
authorBjörn Töpel <bjorn.topel@intel.com>
Wed, 2 May 2018 11:01:30 +0000 (13:01 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 3 May 2018 22:55:24 +0000 (15:55 -0700)
This commit wires up the xskmap to XDP_SKB layer.

Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/filter.h
net/core/dev.c
net/core/filter.c

index 64899c04c1a6949645526940e60b65c71170f666..b7f81e3a70cb20a00aa22a7c88b80343b19f0d9d 100644 (file)
@@ -760,7 +760,7 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
  * This does not appear to be a real limitation for existing software.
  */
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *prog);
+                           struct xdp_buff *xdp, struct bpf_prog *prog);
 int xdp_do_redirect(struct net_device *dev,
                    struct xdp_buff *xdp,
                    struct bpf_prog *prog);
index 8f8931b93140e15db2b9a530d9b28dd94b6f7cd1..aea36b5a2fed0294cf924e485afe70a700e688af 100644 (file)
@@ -3994,12 +3994,12 @@ static struct netdev_rx_queue *netif_get_rxqueue(struct sk_buff *skb)
 }
 
 static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+                                    struct xdp_buff *xdp,
                                     struct bpf_prog *xdp_prog)
 {
        struct netdev_rx_queue *rxqueue;
        void *orig_data, *orig_data_end;
        u32 metalen, act = XDP_DROP;
-       struct xdp_buff xdp;
        int hlen, off;
        u32 mac_len;
 
@@ -4034,19 +4034,19 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
         */
        mac_len = skb->data - skb_mac_header(skb);
        hlen = skb_headlen(skb) + mac_len;
-       xdp.data = skb->data - mac_len;
-       xdp.data_meta = xdp.data;
-       xdp.data_end = xdp.data + hlen;
-       xdp.data_hard_start = skb->data - skb_headroom(skb);
-       orig_data_end = xdp.data_end;
-       orig_data = xdp.data;
+       xdp->data = skb->data - mac_len;
+       xdp->data_meta = xdp->data;
+       xdp->data_end = xdp->data + hlen;
+       xdp->data_hard_start = skb->data - skb_headroom(skb);
+       orig_data_end = xdp->data_end;
+       orig_data = xdp->data;
 
        rxqueue = netif_get_rxqueue(skb);
-       xdp.rxq = &rxqueue->xdp_rxq;
+       xdp->rxq = &rxqueue->xdp_rxq;
 
-       act = bpf_prog_run_xdp(xdp_prog, &xdp);
+       act = bpf_prog_run_xdp(xdp_prog, xdp);
 
-       off = xdp.data - orig_data;
+       off = xdp->data - orig_data;
        if (off > 0)
                __skb_pull(skb, off);
        else if (off < 0)
@@ -4056,10 +4056,11 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
        /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
         * pckt.
         */
-       off = orig_data_end - xdp.data_end;
+       off = orig_data_end - xdp->data_end;
        if (off != 0) {
-               skb_set_tail_pointer(skb, xdp.data_end - xdp.data);
+               skb_set_tail_pointer(skb, xdp->data_end - xdp->data);
                skb->len -= off;
+
        }
 
        switch (act) {
@@ -4068,7 +4069,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
                __skb_push(skb, mac_len);
                break;
        case XDP_PASS:
-               metalen = xdp.data - xdp.data_meta;
+               metalen = xdp->data - xdp->data_meta;
                if (metalen)
                        skb_metadata_set(skb, metalen);
                break;
@@ -4118,17 +4119,19 @@ static struct static_key generic_xdp_needed __read_mostly;
 int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb)
 {
        if (xdp_prog) {
-               u32 act = netif_receive_generic_xdp(skb, xdp_prog);
+               struct xdp_buff xdp;
+               u32 act;
                int err;
 
+               act = netif_receive_generic_xdp(skb, &xdp, xdp_prog);
                if (act != XDP_PASS) {
                        switch (act) {
                        case XDP_REDIRECT:
                                err = xdp_do_generic_redirect(skb->dev, skb,
-                                                             xdp_prog);
+                                                             &xdp, xdp_prog);
                                if (err)
                                        goto out_redir;
-                       /* fallthru to submit skb */
+                               break;
                        case XDP_TX:
                                generic_xdp_tx(skb, xdp_prog);
                                break;
index 40d4bbb4508dd3691c2d24cf8282865c8cf7c4de..120bc8a202d9789f4fc6e22df307ef45c306fcde 100644 (file)
@@ -59,6 +59,7 @@
 #include <net/tcp.h>
 #include <net/xfrm.h>
 #include <linux/bpf_trace.h>
+#include <net/xdp_sock.h>
 
 /**
  *     sk_filter_trim_cap - run a packet through a socket filter
@@ -2973,13 +2974,14 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
 
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
+                                      struct xdp_buff *xdp,
                                       struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        unsigned long map_owner = ri->map_owner;
        struct bpf_map *map = ri->map;
-       struct net_device *fwd = NULL;
        u32 index = ri->ifindex;
+       void *fwd = NULL;
        int err = 0;
 
        ri->ifindex = 0;
@@ -3001,6 +3003,14 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
                if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
                        goto err;
                skb->dev = fwd;
+               generic_xdp_tx(skb, xdp_prog);
+       } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
+               struct xdp_sock *xs = fwd;
+
+               err = xsk_generic_rcv(xs, xdp);
+               if (err)
+                       goto err;
+               consume_skb(skb);
        } else {
                /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
                err = -EBADRQC;
@@ -3015,7 +3025,7 @@ err:
 }
 
 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
-                           struct bpf_prog *xdp_prog)
+                           struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
 {
        struct redirect_info *ri = this_cpu_ptr(&redirect_info);
        u32 index = ri->ifindex;
@@ -3023,7 +3033,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
        int err = 0;
 
        if (ri->map)
-               return xdp_do_generic_redirect_map(dev, skb, xdp_prog);
+               return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
 
        ri->ifindex = 0;
        fwd = dev_get_by_index_rcu(dev_net(dev), index);
@@ -3037,6 +3047,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
 
        skb->dev = fwd;
        _trace_xdp_redirect(dev, xdp_prog, index);
+       generic_xdp_tx(skb, xdp_prog);
        return 0;
 err:
        _trace_xdp_redirect_err(dev, xdp_prog, index, err);