1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Tue, 23 Mar 2021 00:56:22 +0100
3 Subject: [PATCH] netfilter: flowtable: consolidate
4 skb_try_make_writable() call
6 Fetch the layer 4 header size to be mangled by NAT when building the
7 tuple, then use it to make writable the network and the transport
8 headers. After this update, the NAT routines now assumes that the skbuff
9 area is writable. Do the pointer refetch only after the single
10 skb_try_make_writable() call.
12 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
15 --- a/net/netfilter/nf_flow_table_core.c
16 +++ b/net/netfilter/nf_flow_table_core.c
17 @@ -395,9 +395,6 @@ static int nf_flow_nat_port_tcp(struct s
21 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
24 tcph = (void *)(skb_network_header(skb) + thoff);
25 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
27 @@ -409,9 +406,6 @@ static int nf_flow_nat_port_udp(struct s
31 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
34 udph = (void *)(skb_network_header(skb) + thoff);
35 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
36 inet_proto_csum_replace2(&udph->check, skb, port,
37 @@ -447,9 +441,6 @@ int nf_flow_snat_port(const struct flow_
38 struct flow_ports *hdr;
39 __be16 port, new_port;
41 - if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
44 hdr = (void *)(skb_network_header(skb) + thoff);
47 @@ -478,9 +469,6 @@ int nf_flow_dnat_port(const struct flow_
48 struct flow_ports *hdr;
49 __be16 port, new_port;
51 - if (skb_try_make_writable(skb, thoff + sizeof(*hdr)))
54 hdr = (void *)(skb_network_header(skb) + thoff);
57 --- a/net/netfilter/nf_flow_table_ip.c
58 +++ b/net/netfilter/nf_flow_table_ip.c
59 @@ -39,9 +39,6 @@ static int nf_flow_nat_ip_tcp(struct sk_
63 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
66 tcph = (void *)(skb_network_header(skb) + thoff);
67 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
69 @@ -53,9 +50,6 @@ static int nf_flow_nat_ip_udp(struct sk_
73 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
76 udph = (void *)(skb_network_header(skb) + thoff);
77 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
78 inet_proto_csum_replace4(&udph->check, skb, addr,
79 @@ -136,19 +130,17 @@ static int nf_flow_dnat_ip(const struct
82 static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
83 - unsigned int thoff, enum flow_offload_tuple_dir dir)
84 + unsigned int thoff, enum flow_offload_tuple_dir dir,
87 - struct iphdr *iph = ip_hdr(skb);
89 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
90 (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
91 - nf_flow_snat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
92 + nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
96 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
97 (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
98 - nf_flow_dnat_ip(flow, skb, ip_hdr(skb), thoff, dir) < 0))
99 + nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
103 @@ -160,10 +152,10 @@ static bool ip_has_options(unsigned int
106 static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
107 - struct flow_offload_tuple *tuple)
108 + struct flow_offload_tuple *tuple, u32 *hdrsize)
110 - unsigned int thoff, hdrsize;
111 struct flow_ports *ports;
112 + unsigned int thoff;
115 if (!pskb_may_pull(skb, sizeof(*iph)))
116 @@ -178,10 +170,10 @@ static int nf_flow_tuple_ip(struct sk_bu
118 switch (iph->protocol) {
120 - hdrsize = sizeof(struct tcphdr);
121 + *hdrsize = sizeof(struct tcphdr);
124 - hdrsize = sizeof(struct udphdr);
125 + *hdrsize = sizeof(struct udphdr);
129 @@ -191,7 +183,7 @@ static int nf_flow_tuple_ip(struct sk_bu
132 thoff = iph->ihl * 4;
133 - if (!pskb_may_pull(skb, thoff + hdrsize))
134 + if (!pskb_may_pull(skb, thoff + *hdrsize))
138 @@ -252,11 +244,12 @@ nf_flow_offload_ip_hook(void *priv, stru
144 if (skb->protocol != htons(ETH_P_IP))
147 - if (nf_flow_tuple_ip(skb, state->in, &tuple) < 0)
148 + if (nf_flow_tuple_ip(skb, state->in, &tuple, &hdrsize) < 0)
151 tuplehash = flow_offload_lookup(flow_table, &tuple);
152 @@ -271,11 +264,13 @@ nf_flow_offload_ip_hook(void *priv, stru
153 if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
156 - if (skb_try_make_writable(skb, sizeof(*iph)))
158 + thoff = iph->ihl * 4;
159 + if (skb_try_make_writable(skb, thoff + hdrsize))
162 - thoff = ip_hdr(skb)->ihl * 4;
163 - if (nf_flow_state_check(flow, ip_hdr(skb)->protocol, skb, thoff))
165 + if (nf_flow_state_check(flow, iph->protocol, skb, thoff))
168 flow_offload_refresh(flow_table, flow);
169 @@ -285,10 +280,9 @@ nf_flow_offload_ip_hook(void *priv, stru
173 - if (nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
174 + if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0)
178 ip_decrease_ttl(iph);
181 @@ -317,9 +311,6 @@ static int nf_flow_nat_ipv6_tcp(struct s
185 - if (skb_try_make_writable(skb, thoff + sizeof(*tcph)))
188 tcph = (void *)(skb_network_header(skb) + thoff);
189 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
190 new_addr->s6_addr32, true);
191 @@ -333,9 +324,6 @@ static int nf_flow_nat_ipv6_udp(struct s
195 - if (skb_try_make_writable(skb, thoff + sizeof(*udph)))
198 udph = (void *)(skb_network_header(skb) + thoff);
199 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
200 inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
201 @@ -417,31 +405,30 @@ static int nf_flow_dnat_ipv6(const struc
203 static int nf_flow_nat_ipv6(const struct flow_offload *flow,
205 - enum flow_offload_tuple_dir dir)
206 + enum flow_offload_tuple_dir dir,
207 + struct ipv6hdr *ip6h)
209 - struct ipv6hdr *ip6h = ipv6_hdr(skb);
210 unsigned int thoff = sizeof(*ip6h);
212 if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
213 (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
214 - nf_flow_snat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
215 + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
218 - ip6h = ipv6_hdr(skb);
219 if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
220 (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
221 - nf_flow_dnat_ipv6(flow, skb, ipv6_hdr(skb), thoff, dir) < 0))
222 + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
228 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
229 - struct flow_offload_tuple *tuple)
230 + struct flow_offload_tuple *tuple, u32 *hdrsize)
232 - unsigned int thoff, hdrsize;
233 struct flow_ports *ports;
234 struct ipv6hdr *ip6h;
235 + unsigned int thoff;
237 if (!pskb_may_pull(skb, sizeof(*ip6h)))
239 @@ -450,10 +437,10 @@ static int nf_flow_tuple_ipv6(struct sk_
241 switch (ip6h->nexthdr) {
243 - hdrsize = sizeof(struct tcphdr);
244 + *hdrsize = sizeof(struct tcphdr);
247 - hdrsize = sizeof(struct udphdr);
248 + *hdrsize = sizeof(struct udphdr);
252 @@ -463,7 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_
255 thoff = sizeof(*ip6h);
256 - if (!pskb_may_pull(skb, thoff + hdrsize))
257 + if (!pskb_may_pull(skb, thoff + *hdrsize))
260 ip6h = ipv6_hdr(skb);
261 @@ -493,11 +480,12 @@ nf_flow_offload_ipv6_hook(void *priv, st
262 struct net_device *outdev;
263 struct ipv6hdr *ip6h;
267 if (skb->protocol != htons(ETH_P_IPV6))
270 - if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
271 + if (nf_flow_tuple_ipv6(skb, state->in, &tuple, &hdrsize) < 0)
274 tuplehash = flow_offload_lookup(flow_table, &tuple);
275 @@ -523,13 +511,13 @@ nf_flow_offload_ipv6_hook(void *priv, st
279 - if (skb_try_make_writable(skb, sizeof(*ip6h)))
280 + if (skb_try_make_writable(skb, sizeof(*ip6h) + hdrsize))
283 - if (nf_flow_nat_ipv6(flow, skb, dir) < 0)
284 + ip6h = ipv6_hdr(skb);
285 + if (nf_flow_nat_ipv6(flow, skb, dir, ip6h) < 0)
288 - ip6h = ipv6_hdr(skb);