DEFINE_PER_CPU(int, xmit_recursion);
EXPORT_SYMBOL(xmit_recursion);
-#define RECURSION_LIMIT 10
-
/**
* dev_loopback_xmit - loop back @skb
* @net: network namespace this loopback is happening in
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
-
- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
+ if (unlikely(__this_cpu_read(xmit_recursion) >
+ XMIT_RECURSION_LIMIT))
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
.arg5_type = ARG_ANYTHING,
};
+static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ if (skb_at_tc_ingress(skb))
+ skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
+
+ return dev_forward_skb(dev, skb);
+}
+
+static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ int ret;
+
+ if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
+ net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
+ kfree_skb(skb);
+ return -ENETDOWN;
+ }
+
+ skb->dev = dev;
+
+ __this_cpu_inc(xmit_recursion);
+ ret = dev_queue_xmit(skb);
+ __this_cpu_dec(xmit_recursion);
+
+ return ret;
+}
+
static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
{
- struct sk_buff *skb = (struct sk_buff *) (long) r1, *skb2;
+ struct sk_buff *skb = (struct sk_buff *) (long) r1;
struct net_device *dev;
if (unlikely(flags & ~(BPF_F_INGRESS)))
if (unlikely(!dev))
return -EINVAL;
- skb2 = skb_clone(skb, GFP_ATOMIC);
- if (unlikely(!skb2))
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
return -ENOMEM;
- if (flags & BPF_F_INGRESS) {
- if (skb_at_tc_ingress(skb2))
- skb_postpush_rcsum(skb2, skb_mac_header(skb2),
- skb2->mac_len);
- return dev_forward_skb(dev, skb2);
- }
-
- skb2->dev = dev;
- return dev_queue_xmit(skb2);
+ return flags & BPF_F_INGRESS ?
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
static const struct bpf_func_proto bpf_clone_redirect_proto = {
return -EINVAL;
}
- if (ri->flags & BPF_F_INGRESS) {
- if (skb_at_tc_ingress(skb))
- skb_postpush_rcsum(skb, skb_mac_header(skb),
- skb->mac_len);
- return dev_forward_skb(dev, skb);
- }
-
- skb->dev = dev;
- return dev_queue_xmit(skb);
+ return ri->flags & BPF_F_INGRESS ?
+ __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
}
static const struct bpf_func_proto bpf_redirect_proto = {