--- /dev/null
+From: wenxu <wenxu@ucloud.cn>
+Date: Thu, 10 Jan 2019 14:51:35 +0800
+Subject: [PATCH] netfilter: nft_flow_offload: fix interaction with vrf slave
+ device
+
+In the forward chain, the iif is changed from slave device to master vrf
+device. Thus, flow offload does not find a match on the lower slave
+device.
+
+This patch uses the cached route, ie. dst->dev, to update the iif and
+oif fields in the flow entry.
+
+After this patch, the following example works fine:
+
+ # ip addr add dev eth0 1.1.1.1/24
+ # ip addr add dev eth1 10.0.0.1/24
+ # ip link add user1 type vrf table 1
+ # ip l set user1 up
+ # ip l set dev eth0 master user1
+ # ip l set dev eth1 master user1
+
+ # nft add table firewall
+ # nft add flowtable f fb1 { hook ingress priority 0 \; devices = { eth0, eth1 } \; }
+ # nft add chain f ftb-all {type filter hook forward priority 0 \; policy accept \; }
+ # nft add rule f ftb-all ct zone 1 ip protocol tcp flow offload @fb1
+ # nft add rule f ftb-all ct zone 1 ip protocol udp flow offload @fb1
+
+Signed-off-by: wenxu <wenxu@ucloud.cn>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -84,7 +84,6 @@ struct flow_offload {
+ struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
+- int ifindex;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+ };
+
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offloa
+ {
+ struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
+ struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
++ struct dst_entry *other_dst = route->tuple[!dir].dst;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ ft->dir = dir;
+@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offloa
+ ft->src_port = ctt->src.u.tcp.port;
+ ft->dst_port = ctt->dst.u.tcp.port;
+
+- ft->iifidx = route->tuple[dir].ifindex;
+- ft->oifidx = route->tuple[!dir].ifindex;
++ ft->iifidx = other_dst->dev->ifindex;
++ ft->oifidx = dst->dev->ifindex;
+ ft->dst_cache = dst;
+ }
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -28,9 +28,11 @@ nft_flow_dst(const struct nf_conn *ct, e
+ switch (nft_pf(pkt)) {
+ case NFPROTO_IPV4:
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
++ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
+ break;
+ case NFPROTO_IPV6:
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
++ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
+ break;
+ }
+
+@@ -52,9 +54,7 @@ static int nft_flow_route(const struct n
+ return -ENOENT;
+
+ route->tuple[dir].dst = this_dst;
+- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
+ route->tuple[!dir].dst = other_dst;
+- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
+
+ return 0;
+ }
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
-@@ -164,6 +164,8 @@ struct nf_flow_table_hw {
+@@ -163,6 +163,8 @@ struct nf_flow_table_hw {
int nf_flow_table_hw_register(const struct nf_flow_table_hw *offload);
void nf_flow_table_hw_unregister(const struct nf_flow_table_hw *offload);
struct flow_offload_entry {
struct flow_offload flow;
-@@ -151,6 +152,22 @@ void flow_offload_free(struct flow_offlo
+@@ -152,6 +153,22 @@ void flow_offload_free(struct flow_offlo
}
EXPORT_SYMBOL_GPL(flow_offload_free);
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
--- /dev/null
+++ b/net/netfilter/xt_FLOWOFFLOAD.c
-@@ -0,0 +1,408 @@
+@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2018 Felix Fietkau <nbd@nbd.name>
+ *
+#include <linux/netfilter/xt_FLOWOFFLOAD.h>
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h>
-+#include <net/netfilter/nf_flow_table.h>
++#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_helper.h>
++#include <net/netfilter/nf_flow_table.h>
+
+static struct nf_flowtable nf_flowtable;
+static HLIST_HEAD(hooks);
+}
+
+static bool
-+xt_flowoffload_skip(struct sk_buff *skb)
++xt_flowoffload_skip(struct sk_buff *skb, int family)
+{
-+ struct ip_options *opt = &(IPCB(skb)->opt);
-+
-+ if (unlikely(opt->optlen))
-+ return true;
+ if (skb_sec_path(skb))
+ return true;
+
++ if (family == NFPROTO_IPV4) {
++ const struct ip_options *opt = &(IPCB(skb)->opt);
++
++ if (unlikely(opt->optlen))
++ return true;
++ }
++
+ return false;
+}
+
+static struct dst_entry *
+xt_flowoffload_dst(const struct nf_conn *ct, enum ip_conntrack_dir dir,
-+ const struct xt_action_param *par)
++ const struct xt_action_param *par, int ifindex)
+{
+ struct dst_entry *dst = NULL;
+ struct flowi fl;
+{
+ struct dst_entry *this_dst, *other_dst;
+
-+ this_dst = xt_flowoffload_dst(ct, dir, par);
-+ other_dst = xt_flowoffload_dst(ct, !dir, par);
++ this_dst = xt_flowoffload_dst(ct, !dir, par, xt_out(par)->ifindex);
++ other_dst = xt_flowoffload_dst(ct, dir, par, xt_in(par)->ifindex);
+ if (!this_dst || !other_dst)
+ return -ENOENT;
+
+ return -EINVAL;
+
+ route->tuple[dir].dst = this_dst;
-+ route->tuple[dir].ifindex = xt_in(par)->ifindex;
+ route->tuple[!dir].dst = other_dst;
-+ route->tuple[!dir].ifindex = xt_out(par)->ifindex;
+
+ return 0;
+}
+flowoffload_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_flowoffload_target_info *info = par->targinfo;
-+ const struct nf_conn_help *help;
++ struct tcphdr _tcph, *tcph = NULL;
+ enum ip_conntrack_info ctinfo;
+ enum ip_conntrack_dir dir;
+ struct nf_flow_route route;
+ struct nf_conn *ct;
+ struct net *net;
+
-+ if (xt_flowoffload_skip(skb))
++ if (xt_flowoffload_skip(skb, xt_family(par)))
+ return XT_CONTINUE;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ case IPPROTO_TCP:
+ if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+ return XT_CONTINUE;
++
++ tcph = skb_header_pointer(skb, par->thoff,
++ sizeof(_tcph), &_tcph);
++ if (unlikely(!tcph || tcph->fin || tcph->rst))
++ return XT_CONTINUE;
+ break;
+ case IPPROTO_UDP:
+ break;
+ return XT_CONTINUE;
+ }
+
-+ help = nfct_help(ct);
-+ if (help)
++ if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
++ ct->status & IPS_SEQ_ADJUST)
+ return XT_CONTINUE;
+
-+ if (ctinfo == IP_CT_NEW ||
-+ ctinfo == IP_CT_RELATED)
++ if (!nf_ct_is_confirmed(ct))
+ return XT_CONTINUE;
+
+ if (!xt_in(par) || !xt_out(par))
+ if (!flow)
+ goto err_flow_alloc;
+
++ if (tcph) {
++ ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++ ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
++ }
++
+ if (flow_offload_add(&nf_flowtable, flow) < 0)
+ goto err_flow_add;
+