1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sat, 17 Feb 2018 11:55:51 +0100
3 Subject: [PATCH] netfilter: nf_flow_table: move ipv6 offload hook code to
6 Useful as preparation for adding iptables support for offload
8 Signed-off-by: Felix Fietkau <nbd@nbd.name>
11 --- a/net/ipv6/netfilter/nf_flow_table_ipv6.c
12 +++ b/net/ipv6/netfilter/nf_flow_table_ipv6.c
14 #include <linux/module.h>
15 #include <linux/netfilter.h>
16 #include <linux/rhashtable.h>
17 -#include <linux/ipv6.h>
18 -#include <linux/netdevice.h>
19 -#include <net/ipv6.h>
20 -#include <net/ip6_route.h>
21 -#include <net/neighbour.h>
22 #include <net/netfilter/nf_flow_table.h>
23 #include <net/netfilter/nf_tables.h>
24 -/* For layer 4 checksum field offset. */
25 -#include <linux/tcp.h>
26 -#include <linux/udp.h>
28 -static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
29 - struct in6_addr *addr,
30 - struct in6_addr *new_addr)
32 - struct tcphdr *tcph;
34 - if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
35 - skb_try_make_writable(skb, thoff + sizeof(*tcph)))
38 - tcph = (void *)(skb_network_header(skb) + thoff);
39 - inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
40 - new_addr->s6_addr32, true);
45 -static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
46 - struct in6_addr *addr,
47 - struct in6_addr *new_addr)
49 - struct udphdr *udph;
51 - if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
52 - skb_try_make_writable(skb, thoff + sizeof(*udph)))
55 - udph = (void *)(skb_network_header(skb) + thoff);
56 - if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
57 - inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
58 - new_addr->s6_addr32, true);
60 - udph->check = CSUM_MANGLED_0;
66 -static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
67 - unsigned int thoff, struct in6_addr *addr,
68 - struct in6_addr *new_addr)
70 - switch (ip6h->nexthdr) {
72 - if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
76 - if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
84 -static int nf_flow_snat_ipv6(const struct flow_offload *flow,
85 - struct sk_buff *skb, struct ipv6hdr *ip6h,
87 - enum flow_offload_tuple_dir dir)
89 - struct in6_addr addr, new_addr;
92 - case FLOW_OFFLOAD_DIR_ORIGINAL:
94 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
95 - ip6h->saddr = new_addr;
97 - case FLOW_OFFLOAD_DIR_REPLY:
99 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
100 - ip6h->daddr = new_addr;
106 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
109 -static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
110 - struct sk_buff *skb, struct ipv6hdr *ip6h,
111 - unsigned int thoff,
112 - enum flow_offload_tuple_dir dir)
114 - struct in6_addr addr, new_addr;
117 - case FLOW_OFFLOAD_DIR_ORIGINAL:
118 - addr = ip6h->daddr;
119 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
120 - ip6h->daddr = new_addr;
122 - case FLOW_OFFLOAD_DIR_REPLY:
123 - addr = ip6h->saddr;
124 - new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
125 - ip6h->saddr = new_addr;
131 - return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
134 -static int nf_flow_nat_ipv6(const struct flow_offload *flow,
135 - struct sk_buff *skb,
136 - enum flow_offload_tuple_dir dir)
138 - struct ipv6hdr *ip6h = ipv6_hdr(skb);
139 - unsigned int thoff = sizeof(*ip6h);
141 - if (flow->flags & FLOW_OFFLOAD_SNAT &&
142 - (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
143 - nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
145 - if (flow->flags & FLOW_OFFLOAD_DNAT &&
146 - (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
147 - nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
153 -static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
154 - struct flow_offload_tuple *tuple)
156 - struct flow_ports *ports;
157 - struct ipv6hdr *ip6h;
158 - unsigned int thoff;
160 - if (!pskb_may_pull(skb, sizeof(*ip6h)))
163 - ip6h = ipv6_hdr(skb);
165 - if (ip6h->nexthdr != IPPROTO_TCP &&
166 - ip6h->nexthdr != IPPROTO_UDP)
169 - thoff = sizeof(*ip6h);
170 - if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
173 - ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
175 - tuple->src_v6 = ip6h->saddr;
176 - tuple->dst_v6 = ip6h->daddr;
177 - tuple->src_port = ports->source;
178 - tuple->dst_port = ports->dest;
179 - tuple->l3proto = AF_INET6;
180 - tuple->l4proto = ip6h->nexthdr;
181 - tuple->iifidx = dev->ifindex;
186 -/* Based on ip_exceeds_mtu(). */
187 -static bool nf_flow_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
189 - if (skb->len <= mtu)
192 - if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
199 -nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
200 - const struct nf_hook_state *state)
202 - struct flow_offload_tuple_rhash *tuplehash;
203 - struct nf_flowtable *flow_table = priv;
204 - struct flow_offload_tuple tuple = {};
205 - enum flow_offload_tuple_dir dir;
206 - struct flow_offload *flow;
207 - struct net_device *outdev;
208 - struct in6_addr *nexthop;
209 - struct ipv6hdr *ip6h;
210 - struct rt6_info *rt;
212 - if (skb->protocol != htons(ETH_P_IPV6))
215 - if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
218 - tuplehash = flow_offload_lookup(flow_table, &tuple);
219 - if (tuplehash == NULL)
222 - outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
226 - dir = tuplehash->tuple.dir;
227 - flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
228 - rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
230 - if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
233 - if (skb_try_make_writable(skb, sizeof(*ip6h)))
236 - if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
237 - nf_flow_nat_ipv6(flow, skb, dir) < 0)
240 - flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
241 - ip6h = ipv6_hdr(skb);
245 - nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
246 - neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
250 -EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);
252 static struct nf_flowtable_type flowtable_ipv6 = {
253 .family = NFPROTO_IPV6,
254 --- a/net/netfilter/nf_flow_table_ip.c
255 +++ b/net/netfilter/nf_flow_table_ip.c
257 #include <linux/netfilter.h>
258 #include <linux/rhashtable.h>
259 #include <linux/ip.h>
260 +#include <linux/ipv6.h>
261 #include <linux/netdevice.h>
263 +#include <net/ipv6.h>
264 +#include <net/ip6_route.h>
265 #include <net/neighbour.h>
266 #include <net/netfilter/nf_flow_table.h>
267 /* For layer 4 checksum field offset. */
268 @@ -241,3 +244,215 @@ nf_flow_offload_ip_hook(void *priv, stru
271 EXPORT_SYMBOL_GPL(nf_flow_offload_ip_hook);
273 +static int nf_flow_nat_ipv6_tcp(struct sk_buff *skb, unsigned int thoff,
274 + struct in6_addr *addr,
275 + struct in6_addr *new_addr)
277 + struct tcphdr *tcph;
279 + if (!pskb_may_pull(skb, thoff + sizeof(*tcph)) ||
280 + skb_try_make_writable(skb, thoff + sizeof(*tcph)))
283 + tcph = (void *)(skb_network_header(skb) + thoff);
284 + inet_proto_csum_replace16(&tcph->check, skb, addr->s6_addr32,
285 + new_addr->s6_addr32, true);
290 +static int nf_flow_nat_ipv6_udp(struct sk_buff *skb, unsigned int thoff,
291 + struct in6_addr *addr,
292 + struct in6_addr *new_addr)
294 + struct udphdr *udph;
296 + if (!pskb_may_pull(skb, thoff + sizeof(*udph)) ||
297 + skb_try_make_writable(skb, thoff + sizeof(*udph)))
300 + udph = (void *)(skb_network_header(skb) + thoff);
301 + if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
302 + inet_proto_csum_replace16(&udph->check, skb, addr->s6_addr32,
303 + new_addr->s6_addr32, true);
305 + udph->check = CSUM_MANGLED_0;
311 +static int nf_flow_nat_ipv6_l4proto(struct sk_buff *skb, struct ipv6hdr *ip6h,
312 + unsigned int thoff, struct in6_addr *addr,
313 + struct in6_addr *new_addr)
315 + switch (ip6h->nexthdr) {
317 + if (nf_flow_nat_ipv6_tcp(skb, thoff, addr, new_addr) < 0)
321 + if (nf_flow_nat_ipv6_udp(skb, thoff, addr, new_addr) < 0)
329 +static int nf_flow_snat_ipv6(const struct flow_offload *flow,
330 + struct sk_buff *skb, struct ipv6hdr *ip6h,
331 + unsigned int thoff,
332 + enum flow_offload_tuple_dir dir)
334 + struct in6_addr addr, new_addr;
337 + case FLOW_OFFLOAD_DIR_ORIGINAL:
338 + addr = ip6h->saddr;
339 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_v6;
340 + ip6h->saddr = new_addr;
342 + case FLOW_OFFLOAD_DIR_REPLY:
343 + addr = ip6h->daddr;
344 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.src_v6;
345 + ip6h->daddr = new_addr;
351 + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
354 +static int nf_flow_dnat_ipv6(const struct flow_offload *flow,
355 + struct sk_buff *skb, struct ipv6hdr *ip6h,
356 + unsigned int thoff,
357 + enum flow_offload_tuple_dir dir)
359 + struct in6_addr addr, new_addr;
362 + case FLOW_OFFLOAD_DIR_ORIGINAL:
363 + addr = ip6h->daddr;
364 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.src_v6;
365 + ip6h->daddr = new_addr;
367 + case FLOW_OFFLOAD_DIR_REPLY:
368 + addr = ip6h->saddr;
369 + new_addr = flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_v6;
370 + ip6h->saddr = new_addr;
376 + return nf_flow_nat_ipv6_l4proto(skb, ip6h, thoff, &addr, &new_addr);
379 +static int nf_flow_nat_ipv6(const struct flow_offload *flow,
380 + struct sk_buff *skb,
381 + enum flow_offload_tuple_dir dir)
383 + struct ipv6hdr *ip6h = ipv6_hdr(skb);
384 + unsigned int thoff = sizeof(*ip6h);
386 + if (flow->flags & FLOW_OFFLOAD_SNAT &&
387 + (nf_flow_snat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
388 + nf_flow_snat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
390 + if (flow->flags & FLOW_OFFLOAD_DNAT &&
391 + (nf_flow_dnat_port(flow, skb, thoff, ip6h->nexthdr, dir) < 0 ||
392 + nf_flow_dnat_ipv6(flow, skb, ip6h, thoff, dir) < 0))
398 +static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
399 + struct flow_offload_tuple *tuple)
401 + struct flow_ports *ports;
402 + struct ipv6hdr *ip6h;
403 + unsigned int thoff;
405 + if (!pskb_may_pull(skb, sizeof(*ip6h)))
408 + ip6h = ipv6_hdr(skb);
410 + if (ip6h->nexthdr != IPPROTO_TCP &&
411 + ip6h->nexthdr != IPPROTO_UDP)
414 + thoff = sizeof(*ip6h);
415 + if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
418 + ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
420 + tuple->src_v6 = ip6h->saddr;
421 + tuple->dst_v6 = ip6h->daddr;
422 + tuple->src_port = ports->source;
423 + tuple->dst_port = ports->dest;
424 + tuple->l3proto = AF_INET6;
425 + tuple->l4proto = ip6h->nexthdr;
426 + tuple->iifidx = dev->ifindex;
432 +nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
433 + const struct nf_hook_state *state)
435 + struct flow_offload_tuple_rhash *tuplehash;
436 + struct nf_flowtable *flow_table = priv;
437 + struct flow_offload_tuple tuple = {};
438 + enum flow_offload_tuple_dir dir;
439 + struct flow_offload *flow;
440 + struct net_device *outdev;
441 + struct in6_addr *nexthop;
442 + struct ipv6hdr *ip6h;
443 + struct rt6_info *rt;
445 + if (skb->protocol != htons(ETH_P_IPV6))
448 + if (nf_flow_tuple_ipv6(skb, state->in, &tuple) < 0)
451 + tuplehash = flow_offload_lookup(flow_table, &tuple);
452 + if (tuplehash == NULL)
455 + outdev = dev_get_by_index_rcu(state->net, tuplehash->tuple.oifidx);
459 + dir = tuplehash->tuple.dir;
460 + flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
461 + rt = (struct rt6_info *)flow->tuplehash[dir].tuple.dst_cache;
463 + if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
466 + if (skb_try_make_writable(skb, sizeof(*ip6h)))
469 + if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
470 + nf_flow_nat_ipv6(flow, skb, dir) < 0)
473 + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
474 + ip6h = ipv6_hdr(skb);
478 + nexthop = rt6_nexthop(rt, &flow->tuplehash[!dir].tuple.src_v6);
479 + neigh_xmit(NEIGH_ND_TABLE, outdev, nexthop, skb);
483 +EXPORT_SYMBOL_GPL(nf_flow_offload_ipv6_hook);