905880fead2fbd49f468216284a55d4e18007066
[openwrt/openwrt.git] /
1 From: Felix Fietkau <nbd@nbd.name>
2 Date: Sun, 25 Feb 2018 15:41:11 +0100
3 Subject: [PATCH] netfilter: nf_flow_table: add support for sending flows
4 back to the slow path
5
6 Reset the timeout. For TCP, also set the state to indicate to use the
7 next incoming packets to reset window tracking.
8 This allows the slow path to take over again once the offload state has
9 been torn down
10
11 Signed-off-by: Felix Fietkau <nbd@nbd.name>
12 ---
13
14 --- a/net/netfilter/nf_flow_table_core.c
15 +++ b/net/netfilter/nf_flow_table_core.c
16 @@ -100,6 +100,43 @@ err_ct_refcnt:
17 }
18 EXPORT_SYMBOL_GPL(flow_offload_alloc);
19
20 +static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
21 +{
22 + tcp->state = TCP_CONNTRACK_ESTABLISHED;
23 + tcp->seen[0].td_maxwin = 0;
24 + tcp->seen[1].td_maxwin = 0;
25 +}
26 +
27 +static void flow_offload_fixup_ct_state(struct nf_conn *ct)
28 +{
29 + const struct nf_conntrack_l4proto *l4proto;
30 + struct net *net = nf_ct_net(ct);
31 + unsigned int *timeouts;
32 + unsigned int timeout;
33 + int l4num;
34 +
35 + l4num = nf_ct_protonum(ct);
36 + if (l4num == IPPROTO_TCP)
37 + flow_offload_fixup_tcp(&ct->proto.tcp);
38 +
39 + l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
40 + if (!l4proto)
41 + return;
42 +
43 + timeouts = l4proto->get_timeouts(net);
44 + if (!timeouts)
45 + return;
46 +
47 + if (l4num == IPPROTO_TCP)
48 + timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
49 + else if (l4num == IPPROTO_UDP)
50 + timeout = timeouts[UDP_CT_REPLIED];
51 + else
52 + return;
53 +
54 + ct->timeout = nfct_time_stamp + timeout;
55 +}
56 +
57 void flow_offload_free(struct flow_offload *flow)
58 {
59 struct flow_offload_entry *e;
60 @@ -107,7 +144,8 @@ void flow_offload_free(struct flow_offlo
61 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
62 dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
63 e = container_of(flow, struct flow_offload_entry, flow);
64 - nf_ct_delete(e->ct, 0, 0);
65 + if (flow->flags & FLOW_OFFLOAD_DYING)
66 + nf_ct_delete(e->ct, 0, 0);
67 nf_ct_put(e->ct);
68 kfree_rcu(e, rcu_head);
69 }
70 @@ -164,6 +202,8 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
71 static void flow_offload_del(struct nf_flowtable *flow_table,
72 struct flow_offload *flow)
73 {
74 + struct flow_offload_entry *e;
75 +
76 rhashtable_remove_fast(&flow_table->rhashtable,
77 &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
78 nf_flow_offload_rhash_params);
79 @@ -171,12 +211,20 @@ static void flow_offload_del(struct nf_f
80 &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
81 nf_flow_offload_rhash_params);
82
83 + e = container_of(flow, struct flow_offload_entry, flow);
84 + clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
85 +
86 flow_offload_free(flow);
87 }
88
89 void flow_offload_teardown(struct flow_offload *flow)
90 {
91 + struct flow_offload_entry *e;
92 +
93 flow->flags |= FLOW_OFFLOAD_TEARDOWN;
94 +
95 + e = container_of(flow, struct flow_offload_entry, flow);
96 + flow_offload_fixup_ct_state(e->ct);
97 }
98 EXPORT_SYMBOL_GPL(flow_offload_teardown);
99