20ab0ed5041fb8791a40418e544d124cc001f930
[openwrt/staging/xback.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sat, 17 Feb 2018 11:55:51 +0100
3 Subject: [PATCH] netfilter: nf_flow_table: move ipv6 offload hook code to
4 nf_flow_table
5
6 Useful as preparation for adding iptables support for offload
7
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
9 ---
10
11 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
12 +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
13 @@ -3,240 +3,8 @@
14 #include <linux/module.h>
15 #include <linux/netfilter.h>
16 #include <linux/rhashtable.h>
17 -#include <linux/ipv6.h>
18 -#include <linux/netdevice.h>
19 -#include <net/ipv6.h>
20 -#include <net/ip6_route.h>
21 -#include <net/neighbour.h>
22 #include <net/netfilter/nf_flow_table.h>
23 #include <net/netfilter/nf_tables.h>
24 -/* For layer 4 checksum field offset. */
25 -#include <linux/tcp.h>
26 -#include <linux/udp.h>
27 -
28 -static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
29 - struct in6_addr *addr,
30 - struct in6_addr *new_addr)
31 -{
32 - struct tcphdr *tcph;
33 -
34 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
35 - skb_try_make_writable(skb, thoff + sizeof(*tcph)))
36 - return -1;
37 -
38 - tcph = (void *)(skb_network_header(skb) + thoff);
39 - inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
40 - new_addr->s6_addr32, true);
41 -
42 - return 0;
43 -}
44 -
45 -static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
46 - struct in6_addr *addr,
47 - struct in6_addr *new_addr)
48 -{
49 - struct udphdr *udph;
50 -
51 - if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
52 - skb_try_make_writable(skb, thoff + sizeof(*udph)))
53 - return -1;
54 -
55 - udph = (void *)(skb_network_header(skb) + thoff);
56 - if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
57 - inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
58 - new_addr->s6_addr32, true);
59 - if (!udph->check)
60 - udph->check = CSUM_MANGLED_0;
61 - }
62 -
63 - return 0;
64 -}
65 -
66 -static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
67 - unsigned int thoff, struct in6_addr *addr,
68 - struct in6_addr *new_addr)
69 -{
70 - switch (ip6h->nexthdr) {
71 - case IPPROTO_TCP:
72 - if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
73 - return NF_DROP;
74 - break;
75 - case IPPROTO_UDP:
76 - if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
77 - return NF_DROP;
78 - break;
79 - }
80 -
81 - return 0;
82 -}
83 -
84 -static int nf_flow_snat_ipv6(const struct flow_offload *flow,
85 - struct sk_buff *skb, struct ipv6hdr *ip6h,
86 - unsigned int thoff,
87 - enum flow_offload_tuple_dir dir)
88 -{
89 - struct in6_addr addr, new_addr;
90 -
91 - switch (dir) {
92 - case FLOW_OFFLOAD_DIR_ORIGINAL:
93 - addr = ip6h->saddr;
94 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
95 - ip6h->saddr = new_addr;
96 - break;
97 - case FLOW_OFFLOAD_DIR_REPLY:
98 - addr = ip6h->daddr;
99 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
100 - ip6h->daddr = new_addr;
101 - break;
102 - default:
103 - return -1;
104 - }
105 -
106 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
107 -}
108 -
109 -static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
110 - struct sk_buff *skb, struct ipv6hdr *ip6h,
111 - unsigned int thoff,
112 - enum flow_offload_tuple_dir dir)
113 -{
114 - struct in6_addr addr, new_addr;
115 -
116 - switch (dir) {
117 - case FLOW_OFFLOAD_DIR_ORIGINAL:
118 - addr = ip6h->daddr;
119 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
120 - ip6h->daddr = new_addr;
121 - break;
122 - case FLOW_OFFLOAD_DIR_REPLY:
123 - addr = ip6h->saddr;
124 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
125 - ip6h->saddr = new_addr;
126 - break;
127 - default:
128 - return -1;
129 - }
130 -
131 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
132 -}
133 -
134 -static int nf_flow_nat_ipv6(const struct flow_offload *flow,
135 - struct sk_buff *skb,
136 - enum flow_offload_tuple_dir dir)
137 -{
138 - struct ipv6hdr *ip6h = ipv6_hdr(skb);
139 - unsigned int thoff = sizeof(*ip6h);
140 -
141 - if (flow->flags & FLOW_OFFLOAD_SNAT &&
142 - (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
143 - nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
144 - return -1;
145 - if (flow->flags & FLOW_OFFLOAD_DNAT &&
146 - (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
147 - nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
148 - return -1;
149 -
150 - return 0;
151 -}
152 -
153 -static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
154 - struct flow_offload_tuple *tuple)
155 -{
156 - struct flow_ports *ports;
157 - struct ipv6hdr *ip6h;
158 - unsigned int thoff;
159 -
160 - if (!pskb_may_pull(skb, sizeof(*ip6h)))
161 - return -1;
162 -
163 - ip6h = ipv6_hdr(skb);
164 -
165 - if (ip6h->nexthdr != IPPROTO_TCP &&
166 - ip6h->nexthdr != IPPROTO_UDP)
167 - return -1;
168 -
169 - thoff = sizeof(*ip6h);
170 - if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
171 - return -1;
172 -
173 - ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
174 -
175 - tuple->src_v6 = ip6h->saddr;
176 - tuple->dst_v6 = ip6h->daddr;
177 - tuple->src_port = ports->source;
178 - tuple->dst_port = ports->dest;
179 - tuple->l3proto = AF_INET6;
180 - tuple->l4proto = ip6h->nexthdr;
181 - tuple->iifidx = dev->ifindex;
182 -
183 - return 0;
184 -}
185 -
186 -/* Based on ip_exceeds_mtu(). */
187 -static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
188 -{
189 - if (skb->len <= mtu)
190 - return false;
191 -
192 - if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
193 - return false;
194 -
195 - return true;
196 -}
197 -
198 -unsigned int
199 -nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
200 - const struct nf_hook_state *state)
201 -{
202 - struct flow_offload_tuple_rhash *tuplehash;
203 - struct nf_flowtable *flow_table = priv;
204 - struct flow_offload_tuple tuple = {};
205 - enum flow_offload_tuple_dir dir;
206 - struct flow_offload *flow;
207 - struct net_device *outdev;
208 - struct in6_addr *nexthop;
209 - struct ipv6hdr *ip6h;
210 - struct rt6_info *rt;
211 -
212 - if (skb->protocol != htons(ETH_P_IPV6))
213 - return NF_ACCEPT;
214 -
215 - if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
216 - return NF_ACCEPT;
217 -
218 - tuplehash = flow_offload_lookup(flow_table, &tuple);
219 - if (tuplehash == NULL)
220 - return NF_ACCEPT;
221 -
222 - outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
223 - if (!outdev)
224 - return NF_ACCEPT;
225 -
226 - dir = tuplehash->tuple.dir;
227 - flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
228 - rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
229 -
230 - if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
231 - return NF_ACCEPT;
232 -
233 - if (skb_try_make_writable(skb, sizeof(*ip6h)))
234 - return NF_DROP;
235 -
236 - if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
237 - nf_flow_nat_ipv6(flow, skb, dir) < 0)
238 - return NF_DROP;
239 -
240 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
241 - ip6h = ipv6_hdr(skb);
242 - ip6h->hop_limit--;
243 -
244 - skb->dev = outdev;
245 - nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
246 - neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
247 -
248 - return NF_STOLEN;
249 -}
250 -EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
251
252 static struct nf_flowtable_type flowtable_ipv6 = {
253 .family = NFPROTO_IPV6,
254 --- a/net/netfilter/nf_flow_table_ip.c
255 +++ b/net/netfilter/nf_flow_table_ip.c
256 @@ -4,8 +4,11 @@
257 #include <linux/netfilter.h>
258 #include <linux/rhashtable.h>
259 #include <linux/ip.h>
260 +#include <linux/ipv6.h>
261 #include <linux/netdevice.h>
262 #include <net/ip.h>
263 +#include <net/ipv6.h>
264 +#include <net/ip6_route.h>
265 #include <net/neighbour.h>
266 #include <net/netfilter/nf_flow_table.h>
267 /* For layer 4 checksum field offset. */
268 @@ -241,3 +244,215 @@ nf_flow_offload_ip_hook(void *priv, stru
269 return NF_STOLEN;
270 }
271 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
272 +
273 +static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
274 + struct in6_addr *addr,
275 + struct in6_addr *new_addr)
276 +{
277 + struct tcphdr *tcph;
278 +
279 + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
280 + skb_try_make_writable(skb, thoff + sizeof(*tcph)))
281 + return -1;
282 +
283 + tcph = (void *)(skb_network_header(skb) + thoff);
284 + inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
285 + new_addr->s6_addr32, true);
286 +
287 + return 0;
288 +}
289 +
290 +static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
291 + struct in6_addr *addr,
292 + struct in6_addr *new_addr)
293 +{
294 + struct udphdr *udph;
295 +
296 + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
297 + skb_try_make_writable(skb, thoff + sizeof(*udph)))
298 + return -1;
299 +
300 + udph = (void *)(skb_network_header(skb) + thoff);
301 + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
302 + inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
303 + new_addr->s6_addr32, true);
304 + if (!udph->check)
305 + udph->check = CSUM_MANGLED_0;
306 + }
307 +
308 + return 0;
309 +}
310 +
311 +static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
312 + unsigned int thoff, struct in6_addr *addr,
313 + struct in6_addr *new_addr)
314 +{
315 + switch (ip6h->nexthdr) {
316 + case IPPROTO_TCP:
317 + if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
318 + return NF_DROP;
319 + break;
320 + case IPPROTO_UDP:
321 + if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
322 + return NF_DROP;
323 + break;
324 + }
325 +
326 + return 0;
327 +}
328 +
329 +static int nf_flow_snat_ipv6(const struct flow_offload *flow,
330 + struct sk_buff *skb, struct ipv6hdr *ip6h,
331 + unsigned int thoff,
332 + enum flow_offload_tuple_dir dir)
333 +{
334 + struct in6_addr addr, new_addr;
335 +
336 + switch (dir) {
337 + case FLOW_OFFLOAD_DIR_ORIGINAL:
338 + addr = ip6h->saddr;
339 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
340 + ip6h->saddr = new_addr;
341 + break;
342 + case FLOW_OFFLOAD_DIR_REPLY:
343 + addr = ip6h->daddr;
344 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
345 + ip6h->daddr = new_addr;
346 + break;
347 + default:
348 + return -1;
349 + }
350 +
351 + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
352 +}
353 +
354 +static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
355 + struct sk_buff *skb, struct ipv6hdr *ip6h,
356 + unsigned int thoff,
357 + enum flow_offload_tuple_dir dir)
358 +{
359 + struct in6_addr addr, new_addr;
360 +
361 + switch (dir) {
362 + case FLOW_OFFLOAD_DIR_ORIGINAL:
363 + addr = ip6h->daddr;
364 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
365 + ip6h->daddr = new_addr;
366 + break;
367 + case FLOW_OFFLOAD_DIR_REPLY:
368 + addr = ip6h->saddr;
369 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
370 + ip6h->saddr = new_addr;
371 + break;
372 + default:
373 + return -1;
374 + }
375 +
376 + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
377 +}
378 +
379 +static int nf_flow_nat_ipv6(const struct flow_offload *flow,
380 + struct sk_buff *skb,
381 + enum flow_offload_tuple_dir dir)
382 +{
383 + struct ipv6hdr *ip6h = ipv6_hdr(skb);
384 + unsigned int thoff = sizeof(*ip6h);
385 +
386 + if (flow->flags & FLOW_OFFLOAD_SNAT &&
387 + (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
388 + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
389 + return -1;
390 + if (flow->flags & FLOW_OFFLOAD_DNAT &&
391 + (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
392 + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
393 + return -1;
394 +
395 + return 0;
396 +}
397 +
398 +static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
399 + struct flow_offload_tuple *tuple)
400 +{
401 + struct flow_ports *ports;
402 + struct ipv6hdr *ip6h;
403 + unsigned int thoff;
404 +
405 + if (!pskb_may_pull(skb, sizeof(*ip6h)))
406 + return -1;
407 +
408 + ip6h = ipv6_hdr(skb);
409 +
410 + if (ip6h->nexthdr != IPPROTO_TCP &&
411 + ip6h->nexthdr != IPPROTO_UDP)
412 + return -1;
413 +
414 + thoff = sizeof(*ip6h);
415 + if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
416 + return -1;
417 +
418 + ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
419 +
420 + tuple->src_v6 = ip6h->saddr;
421 + tuple->dst_v6 = ip6h->daddr;
422 + tuple->src_port = ports->source;
423 + tuple->dst_port = ports->dest;
424 + tuple->l3proto = AF_INET6;
425 + tuple->l4proto = ip6h->nexthdr;
426 + tuple->iifidx = dev->ifindex;
427 +
428 + return 0;
429 +}
430 +
431 +unsigned int
432 +nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
433 + const struct nf_hook_state *state)
434 +{
435 + struct flow_offload_tuple_rhash *tuplehash;
436 + struct nf_flowtable *flow_table = priv;
437 + struct flow_offload_tuple tuple = {};
438 + enum flow_offload_tuple_dir dir;
439 + struct flow_offload *flow;
440 + struct net_device *outdev;
441 + struct in6_addr *nexthop;
442 + struct ipv6hdr *ip6h;
443 + struct rt6_info *rt;
444 +
445 + if (skb->protocol != htons(ETH_P_IPV6))
446 + return NF_ACCEPT;
447 +
448 + if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
449 + return NF_ACCEPT;
450 +
451 + tuplehash = flow_offload_lookup(flow_table, &tuple);
452 + if (tuplehash == NULL)
453 + return NF_ACCEPT;
454 +
455 + outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
456 + if (!outdev)
457 + return NF_ACCEPT;
458 +
459 + dir = tuplehash->tuple.dir;
460 + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
461 + rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
462 +
463 + if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
464 + return NF_ACCEPT;
465 +
466 + if (skb_try_make_writable(skb, sizeof(*ip6h)))
467 + return NF_DROP;
468 +
469 + if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
470 + nf_flow_nat_ipv6(flow, skb, dir) < 0)
471 + return NF_DROP;
472 +
473 + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
474 + ip6h = ipv6_hdr(skb);
475 + ip6h->hop_limit--;
476 +
477 + skb->dev = outdev;
478 + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
479 + neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
480 +
481 + return NF_STOLEN;
482 +}
483 +EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);