71bb394fe2186a7c1f5a9325f3b1efdfe2ac7e71
[openwrt/staging/linusw.git] /
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Tue, 23 Mar 2021 00:56:25 +0100
3 Subject: [PATCH] netfilter: flowtable: fast NAT functions never fail
4
5 Simplify existing fast NAT routines by returning void. After the
6 skb_try_make_writable() call consolidation, these routines cannot ever
7 fail.
8
9 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
10 ---
11
12 --- a/include/net/netfilter/nf_flow_table.h
13 +++ b/include/net/netfilter/nf_flow_table.h
14 @@ -229,12 +229,12 @@ void nf_flow_table_free(struct nf_flowta
15
16 void flow_offload_teardown(struct flow_offload *flow);
17
18 -int nf_flow_snat_port(const struct flow_offload *flow,
19 - struct sk_buff *skb, unsigned int thoff,
20 - u8 protocol, enum flow_offload_tuple_dir dir);
21 -int nf_flow_dnat_port(const struct flow_offload *flow,
22 - struct sk_buff *skb, unsigned int thoff,
23 - u8 protocol, enum flow_offload_tuple_dir dir);
24 +void nf_flow_snat_port(const struct flow_offload *flow,
25 + struct sk_buff *skb, unsigned int thoff,
26 + u8 protocol, enum flow_offload_tuple_dir dir);
27 +void nf_flow_dnat_port(const struct flow_offload *flow,
28 + struct sk_buff *skb, unsigned int thoff,
29 + u8 protocol, enum flow_offload_tuple_dir dir);
30
31 struct flow_ports {
32 __be16 source, dest;
33 --- a/net/netfilter/nf_flow_table_core.c
34 +++ b/net/netfilter/nf_flow_table_core.c
35 @@ -389,20 +389,17 @@ static void nf_flow_offload_work_gc(stru
36 queue_delayed_work(system_power_efficient_wq, &flow_table->gc_work, HZ);
37 }
38
39 -
40 -static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
41 - __be16 port, __be16 new_port)
42 +static void nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
43 + __be16 port, __be16 new_port)
44 {
45 struct tcphdr *tcph;
46
47 tcph = (void *)(skb_network_header(skb) + thoff);
48 inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
49 -
50 - return 0;
51 }
52
53 -static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
54 - __be16 port, __be16 new_port)
55 +static void nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
56 + __be16 port, __be16 new_port)
57 {
58 struct udphdr *udph;
59
60 @@ -413,30 +410,24 @@ static int nf_flow_nat_port_udp(struct s
61 if (!udph->check)
62 udph->check = CSUM_MANGLED_0;
63 }
64 -
65 - return 0;
66 }
67
68 -static int nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
69 - u8 protocol, __be16 port, __be16 new_port)
70 +static void nf_flow_nat_port(struct sk_buff *skb, unsigned int thoff,
71 + u8 protocol, __be16 port, __be16 new_port)
72 {
73 switch (protocol) {
74 case IPPROTO_TCP:
75 - if (nf_flow_nat_port_tcp(skb, thoff, port, new_port) < 0)
76 - return NF_DROP;
77 + nf_flow_nat_port_tcp(skb, thoff, port, new_port);
78 break;
79 case IPPROTO_UDP:
80 - if (nf_flow_nat_port_udp(skb, thoff, port, new_port) < 0)
81 - return NF_DROP;
82 + nf_flow_nat_port_udp(skb, thoff, port, new_port);
83 break;
84 }
85 -
86 - return 0;
87 }
88
89 -int nf_flow_snat_port(const struct flow_offload *flow,
90 - struct sk_buff *skb, unsigned int thoff,
91 - u8 protocol, enum flow_offload_tuple_dir dir)
92 +void nf_flow_snat_port(const struct flow_offload *flow,
93 + struct sk_buff *skb, unsigned int thoff,
94 + u8 protocol, enum flow_offload_tuple_dir dir)
95 {
96 struct flow_ports *hdr;
97 __be16 port, new_port;
98 @@ -456,13 +447,13 @@ int nf_flow_snat_port(const struct flow_
99 break;
100 }
101
102 - return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
103 + nf_flow_nat_port(skb, thoff, protocol, port, new_port);
104 }
105 EXPORT_SYMBOL_GPL(nf_flow_snat_port);
106
107 -int nf_flow_dnat_port(const struct flow_offload *flow,
108 - struct sk_buff *skb, unsigned int thoff,
109 - u8 protocol, enum flow_offload_tuple_dir dir)
110 +void nf_flow_dnat_port(const struct flow_offload *flow, struct sk_buff *skb,
111 + unsigned int thoff, u8 protocol,
112 + enum flow_offload_tuple_dir dir)
113 {
114 struct flow_ports *hdr;
115 __be16 port, new_port;
116 @@ -482,7 +473,7 @@ int nf_flow_dnat_port(const struct flow_
117 break;
118 }
119
120 - return nf_flow_nat_port(skb, thoff, protocol, port, new_port);
121 + nf_flow_nat_port(skb, thoff, protocol, port, new_port);
122 }
123 EXPORT_SYMBOL_GPL(nf_flow_dnat_port);
124
125 --- a/net/netfilter/nf_flow_table_ip.c
126 +++ b/net/netfilter/nf_flow_table_ip.c
127 @@ -34,19 +34,17 @@ static int nf_flow_state_check(struct fl
128 return 0;
129 }
130
131 -static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
132 - __be32 addr, __be32 new_addr)
133 +static void nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
134 + __be32 addr, __be32 new_addr)
135 {
136 struct tcphdr *tcph;
137
138 tcph = (void *)(skb_network_header(skb) + thoff);
139 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, true);
140 -
141 - return 0;
142 }
143
144 -static int nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
145 - __be32 addr, __be32 new_addr)
146 +static void nf_flow_nat_ip_udp(struct sk_buff *skb, unsigned int thoff,
147 + __be32 addr, __be32 new_addr)
148 {
149 struct udphdr *udph;
150
151 @@ -57,31 +55,25 @@ static int nf_flow_nat_ip_udp(struct sk_
152 if (!udph->check)
153 udph->check = CSUM_MANGLED_0;
154 }
155 -
156 - return 0;
157 }
158
159 -static int nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
160 - unsigned int thoff, __be32 addr,
161 - __be32 new_addr)
162 +static void nf_flow_nat_ip_l4proto(struct sk_buff *skb, struct iphdr *iph,
163 + unsigned int thoff, __be32 addr,
164 + __be32 new_addr)
165 {
166 switch (iph->protocol) {
167 case IPPROTO_TCP:
168 - if (nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr) < 0)
169 - return NF_DROP;
170 + nf_flow_nat_ip_tcp(skb, thoff, addr, new_addr);
171 break;
172 case IPPROTO_UDP:
173 - if (nf_flow_nat_ip_udp(skb, thoff, addr, new_addr) < 0)
174 - return NF_DROP;
175 + nf_flow_nat_ip_udp(skb, thoff, addr, new_addr);
176 break;
177 }
178 -
179 - return 0;
180 }
181
182 -static int nf_flow_snat_ip(const struct flow_offload *flow, struct sk_buff *skb,
183 - struct iphdr *iph, unsigned int thoff,
184 - enum flow_offload_tuple_dir dir)
185 +static void nf_flow_snat_ip(const struct flow_offload *flow,
186 + struct sk_buff *skb, struct iphdr *iph,
187 + unsigned int thoff, enum flow_offload_tuple_dir dir)
188 {
189 __be32 addr, new_addr;
190
191 @@ -99,12 +91,12 @@ static int nf_flow_snat_ip(const struct
192 }
193 csum_replace4(&iph->check, addr, new_addr);
194
195 - return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
196 + nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
197 }
198
199 -static int nf_flow_dnat_ip(const struct flow_offload *flow, struct sk_buff *skb,
200 - struct iphdr *iph, unsigned int thoff,
201 - enum flow_offload_tuple_dir dir)
202 +static void nf_flow_dnat_ip(const struct flow_offload *flow,
203 + struct sk_buff *skb, struct iphdr *iph,
204 + unsigned int thoff, enum flow_offload_tuple_dir dir)
205 {
206 __be32 addr, new_addr;
207
208 @@ -122,24 +114,21 @@ static int nf_flow_dnat_ip(const struct
209 }
210 csum_replace4(&iph->check, addr, new_addr);
211
212 - return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
213 + nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
214 }
215
216 -static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
217 +static void nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
218 unsigned int thoff, enum flow_offload_tuple_dir dir,
219 struct iphdr *iph)
220 {
221 - if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
222 - (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
223 - nf_flow_snat_ip(flow, skb, iph, thoff, dir) < 0))
224 - return -1;
225 -
226 - if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
227 - (nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
228 - nf_flow_dnat_ip(flow, skb, iph, thoff, dir) < 0))
229 - return -1;
230 -
231 - return 0;
232 + if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
233 + nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir);
234 + nf_flow_snat_ip(flow, skb, iph, thoff, dir);
235 + }
236 + if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
237 + nf_flow_dnat_port(flow, skb, thoff, iph->protocol, dir);
238 + nf_flow_dnat_ip(flow, skb, iph, thoff, dir);
239 + }
240 }
241
242 static bool ip_has_options(unsigned int thoff)
243 @@ -276,8 +265,7 @@ nf_flow_offload_ip_hook(void *priv, stru
244 return NF_DROP;
245
246 iph = ip_hdr(skb);
247 - if (nf_flow_nat_ip(flow, skb, thoff, dir, iph) < 0)
248 - return NF_DROP;
249 + nf_flow_nat_ip(flow, skb, thoff, dir, iph);
250
251 ip_decrease_ttl(iph);
252 skb->tstamp = 0;
253 @@ -301,22 +289,21 @@ nf_flow_offload_ip_hook(void *priv, stru
254 }
255 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
256
257 -static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
258 - struct in6_addr *addr,
259 - struct in6_addr *new_addr)
260 +static void nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
261 + struct in6_addr *addr,
262 + struct in6_addr *new_addr,
263 + struct ipv6hdr *ip6h)
264 {
265 struct tcphdr *tcph;
266
267 tcph = (void *)(skb_network_header(skb) + thoff);
268 inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
269 new_addr->s6_addr32, true);
270 -
271 - return 0;
272 }
273
274 -static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
275 - struct in6_addr *addr,
276 - struct in6_addr *new_addr)
277 +static void nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
278 + struct in6_addr *addr,
279 + struct in6_addr *new_addr)
280 {
281 struct udphdr *udph;
282
283 @@ -327,32 +314,26 @@ static int nf_flow_nat_ipv6_udp(struct s
284 if (!udph->check)
285 udph->check = CSUM_MANGLED_0;
286 }
287 -
288 - return 0;
289 }
290
291 -static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
292 - unsigned int thoff, struct in6_addr *addr,
293 - struct in6_addr *new_addr)
294 +static void nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
295 + unsigned int thoff, struct in6_addr *addr,
296 + struct in6_addr *new_addr)
297 {
298 switch (ip6h->nexthdr) {
299 case IPPROTO_TCP:
300 - if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
301 - return NF_DROP;
302 + nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr, ip6h);
303 break;
304 case IPPROTO_UDP:
305 - if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
306 - return NF_DROP;
307 + nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr);
308 break;
309 }
310 -
311 - return 0;
312 }
313
314 -static int nf_flow_snat_ipv6(const struct flow_offload *flow,
315 - struct sk_buff *skb, struct ipv6hdr *ip6h,
316 - unsigned int thoff,
317 - enum flow_offload_tuple_dir dir)
318 +static void nf_flow_snat_ipv6(const struct flow_offload *flow,
319 + struct sk_buff *skb, struct ipv6hdr *ip6h,
320 + unsigned int thoff,
321 + enum flow_offload_tuple_dir dir)
322 {
323 struct in6_addr addr, new_addr;
324
325 @@ -369,13 +350,13 @@ static int nf_flow_snat_ipv6(const struc
326 break;
327 }
328
329 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
330 + nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
331 }
332
333 -static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
334 - struct sk_buff *skb, struct ipv6hdr *ip6h,
335 - unsigned int thoff,
336 - enum flow_offload_tuple_dir dir)
337 +static void nf_flow_dnat_ipv6(const struct flow_offload *flow,
338 + struct sk_buff *skb, struct ipv6hdr *ip6h,
339 + unsigned int thoff,
340 + enum flow_offload_tuple_dir dir)
341 {
342 struct in6_addr addr, new_addr;
343
344 @@ -392,27 +373,24 @@ static int nf_flow_dnat_ipv6(const struc
345 break;
346 }
347
348 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
349 + nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
350 }
351
352 -static int nf_flow_nat_ipv6(const struct flow_offload *flow,
353 - struct sk_buff *skb,
354 - enum flow_offload_tuple_dir dir,
355 - struct ipv6hdr *ip6h)
356 +static void nf_flow_nat_ipv6(const struct flow_offload *flow,
357 + struct sk_buff *skb,
358 + enum flow_offload_tuple_dir dir,
359 + struct ipv6hdr *ip6h)
360 {
361 unsigned int thoff = sizeof(*ip6h);
362
363 - if (test_bit(NF_FLOW_SNAT, &flow->flags) &&
364 - (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
365 - nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
366 - return -1;
367 -
368 - if (test_bit(NF_FLOW_DNAT, &flow->flags) &&
369 - (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
370 - nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
371 - return -1;
372 -
373 - return 0;
374 + if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
375 + nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir);
376 + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir);
377 + }
378 + if (test_bit(NF_FLOW_DNAT, &flow->flags)) {
379 + nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir);
380 + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir);
381 + }
382 }
383
384 static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
385 @@ -507,8 +485,7 @@ nf_flow_offload_ipv6_hook(void *priv, st
386 return NF_DROP;
387
388 ip6h = ipv6_hdr(skb);
389 - if (nf_flow_nat_ipv6(flow, skb, dir, ip6h) < 0)
390 - return NF_DROP;
391 + nf_flow_nat_ipv6(flow, skb, dir, ip6h);
392
393 ip6h->hop_limit--;
394 skb->tstamp = 0;