struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
-@@ -108,6 +109,11 @@ static inline void flow_offload_dead(str
+@@ -103,6 +104,7 @@ void nf_flow_table_cleanup(struct net *n
+ int nf_flow_table_init(struct nf_flowtable *flow_table);
+ void nf_flow_table_free(struct nf_flowtable *flow_table);
+
++void flow_offload_teardown(struct flow_offload *flow);
+ static inline void flow_offload_dead(struct flow_offload *flow)
+ {
flow->flags |= FLOW_OFFLOAD_DYING;
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -174,6 +174,12 @@ static void flow_offload_del(struct nf_f
+ flow_offload_free(flow);
}
-+static inline void flow_offload_teardown(struct flow_offload *flow)
++void flow_offload_teardown(struct flow_offload *flow)
+{
+ flow->flags |= FLOW_OFFLOAD_TEARDOWN;
+}
++EXPORT_SYMBOL_GPL(flow_offload_teardown);
+
- int nf_flow_snat_port(const struct flow_offload *flow,
- struct sk_buff *skb, unsigned int thoff,
- u8 protocol, enum flow_offload_tuple_dir dir);
---- a/net/netfilter/nf_flow_table_core.c
-+++ b/net/netfilter/nf_flow_table_core.c
-@@ -226,11 +226,6 @@ static inline bool nf_flow_has_expired(c
+ struct flow_offload_tuple_rhash *
+ flow_offload_lookup(struct nf_flowtable *flow_table,
+ struct flow_offload_tuple *tuple)
+@@ -226,11 +232,6 @@ static inline bool nf_flow_has_expired(c
return (__s32)(flow->timeout - (u32)jiffies) <= 0;
}
static int nf_flow_offload_gc_step(struct nf_flowtable *flow_table)
{
struct flow_offload_tuple_rhash *tuplehash;
-@@ -258,7 +253,8 @@ static int nf_flow_offload_gc_step(struc
+@@ -258,7 +259,8 @@ static int nf_flow_offload_gc_step(struc
flow = container_of(tuplehash, struct flow_offload, tuplehash[0]);
if (nf_flow_has_expired(flow) ||
flow_offload_del(flow_table, flow);
}
out:
-@@ -419,10 +415,14 @@ static void nf_flow_table_do_cleanup(str
+@@ -419,10 +421,14 @@ static void nf_flow_table_do_cleanup(str
{
struct net_device *dev = data;
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
-@@ -100,6 +100,36 @@ err_ct_refcnt:
+@@ -100,6 +100,43 @@ err_ct_refcnt:
}
EXPORT_SYMBOL_GPL(flow_offload_alloc);
++static void flow_offload_fixup_tcp(struct ip_ct_tcp *tcp)
++{
++ tcp->state = TCP_CONNTRACK_ESTABLISHED;
++ tcp->seen[0].td_maxwin = 0;
++ tcp->seen[1].td_maxwin = 0;
++}
++
+static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+{
+ const struct nf_conntrack_l4proto *l4proto;
+ int l4num;
+
+ l4num = nf_ct_protonum(ct);
++ if (l4num == IPPROTO_TCP)
++ flow_offload_fixup_tcp(&ct->proto.tcp);
++
+ l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
+ if (!l4proto)
+ return;
+ if (!timeouts)
+ return;
+
-+ if (l4num == IPPROTO_TCP) {
++ if (l4num == IPPROTO_TCP)
+ timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
-+ ct->proto.tcp.state = TCP_CONNTRACK_IGNORE;
-+ } else if (l4num == IPPROTO_UDP) {
++ else if (l4num == IPPROTO_UDP)
+ timeout = timeouts[UDP_CT_REPLIED];
-+ } else {
++ else
+ return;
-+ }
+
+ ct->timeout = nfct_time_stamp + timeout;
-+ clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+}
+
void flow_offload_free(struct flow_offload *flow)
{
struct flow_offload_entry *e;
-@@ -107,7 +137,10 @@ void flow_offload_free(struct flow_offlo
+@@ -107,7 +144,8 @@ void flow_offload_free(struct flow_offlo
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
e = container_of(flow, struct flow_offload_entry, flow);
- nf_ct_delete(e->ct, 0, 0);
+ if (flow->flags & FLOW_OFFLOAD_DYING)
+ nf_ct_delete(e->ct, 0, 0);
-+ else
-+ flow_offload_fixup_ct_state(e->ct);
nf_ct_put(e->ct);
kfree_rcu(e, rcu_head);
}
+@@ -164,6 +202,8 @@ EXPORT_SYMBOL_GPL(flow_offload_add);
+ static void flow_offload_del(struct nf_flowtable *flow_table,
+ struct flow_offload *flow)
+ {
++ struct flow_offload_entry *e;
++
+ rhashtable_remove_fast(&flow_table->rhashtable,
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].node,
+ nf_flow_offload_rhash_params);
+@@ -171,12 +211,20 @@ static void flow_offload_del(struct nf_f
+ &flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].node,
+ nf_flow_offload_rhash_params);
+
++ e = container_of(flow, struct flow_offload_entry, flow);
++ clear_bit(IPS_OFFLOAD_BIT, &e->ct->status);
++
+ flow_offload_free(flow);
+ }
+
+ void flow_offload_teardown(struct flow_offload *flow)
+ {
++ struct flow_offload_entry *e;
++
+ flow->flags |= FLOW_OFFLOAD_TEARDOWN;
++
++ e = container_of(flow, struct flow_offload_entry, flow);
++ flow_offload_fixup_ct_state(e->ct);
+ }
+ EXPORT_SYMBOL_GPL(flow_offload_teardown);
+
--- /dev/null
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 25 Feb 2018 17:22:55 +0100
+Subject: [PATCH] netfilter: nf_flow_table: fix checksum when handling DNAT
+
+Add a missing call to csum_replace4 like on SNAT
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -130,6 +130,7 @@ static int nf_flow_dnat_ip(const struct
+ default:
+ return -1;
+ }
++ csum_replace4(&iph->check, addr, new_addr);
+
+ return nf_flow_nat_ip_l4proto(skb, iph, thoff, addr, new_addr);
+ }