The goal of this patch is to harmonize cleanup done on a skbuff on xmit path.
Before this patch, behaviors were different depending of the tunnel type.
Signed-off-by: Nicolas Dichtel <nicolas.dichtel@6wind.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
skb->encapsulation = 1;
}
+ skb_scrub_packet(skb, false);
+
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ VXLAN_HLEN + sizeof(struct ipv6hdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
- skb_dst_drop(skb);
skb_dst_set(skb, dst);
if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
if (err)
return err;
- return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df);
+ return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
+ false);
}
EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
- nf_reset(skb);
pkt_len = skb->len;
err = ip6_local_out(skb);
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df);
+ __u8 tos, __u8 ttl, __be16 df, bool xnet);
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
goto tx_error;
}
- if (!net_eq(tunnel->net, dev_net(dev)))
- skb_scrub_packet(skb, true);
-
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
}
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
- ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
+ ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df,
+ !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return;
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df)
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
int pkt_len = skb->len;
struct iphdr *iph;
int err;
- nf_reset(skb);
- secpath_reset(skb);
+ skb_scrub_packet(skb, xnet);
+
skb->rxhash = 0;
- skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
tunnel->err_count = 0;
}
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+
max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
skb = new_skb;
}
- skb_dst_drop(skb);
-
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
goto tx_err_dst_release;
}
- if (!net_eq(t->net, dev_net(dev)))
- skb_scrub_packet(skb, true);
+ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
/*
* Okay, now see if we can stuff it in the buffer as-is.
consume_skb(skb);
skb = new_skb;
}
- skb_dst_drop(skb);
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
tunnel->err_count = 0;
}
- if (!net_eq(tunnel->net, dev_net(dev)))
- skb_scrub_packet(skb, true);
-
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
- ttl, df);
+ ttl, df, !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
return iptunnel_xmit(rt, skb, fl.saddr,
OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df);
+ OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error: