1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Wed, 24 Mar 2021 02:30:47 +0100
3 Subject: [PATCH] netfilter: nft_flow_offload: use direct xmit if
4 hardware offload is enabled
6 If there is a forward path to reach an ethernet device and hardware
7 offload is enabled, then use the direct xmit path.
9 Moreover, store the real device in the direct xmit path info since
10 software datapath uses dev_hard_header() to push the layer encapsulation
11 headers while hardware offload refers to the real device.
13 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
16 --- a/include/net/netfilter/nf_flow_table.h
17 +++ b/include/net/netfilter/nf_flow_table.h
18 @@ -131,6 +131,7 @@ struct flow_offload_tuple {
19 struct dst_entry *dst_cache;
23 u8 h_source[ETH_ALEN];
26 @@ -187,6 +188,7 @@ struct nf_flow_route {
31 u8 h_source[ETH_ALEN];
34 --- a/net/netfilter/nf_flow_table_core.c
35 +++ b/net/netfilter/nf_flow_table_core.c
36 @@ -106,6 +106,7 @@ static int flow_offload_fill_route(struc
37 memcpy(flow_tuple->out.h_source, route->tuple[dir].out.h_source,
39 flow_tuple->out.ifidx = route->tuple[dir].out.ifindex;
40 + flow_tuple->out.hw_ifidx = route->tuple[dir].out.hw_ifindex;
42 case FLOW_OFFLOAD_XMIT_XFRM:
43 case FLOW_OFFLOAD_XMIT_NEIGH:
44 --- a/net/netfilter/nf_flow_table_offload.c
45 +++ b/net/netfilter/nf_flow_table_offload.c
46 @@ -508,7 +508,7 @@ static void flow_offload_redirect(struct
47 switch (this_tuple->xmit_type) {
48 case FLOW_OFFLOAD_XMIT_DIRECT:
49 this_tuple = &flow->tuplehash[dir].tuple;
50 - ifindex = this_tuple->out.ifidx;
51 + ifindex = this_tuple->out.hw_ifidx;
53 case FLOW_OFFLOAD_XMIT_NEIGH:
54 other_tuple = &flow->tuplehash[!dir].tuple;
55 --- a/net/netfilter/nft_flow_offload.c
56 +++ b/net/netfilter/nft_flow_offload.c
57 @@ -66,6 +66,7 @@ static int nft_dev_fill_forward_path(con
58 struct nft_forward_info {
59 const struct net_device *indev;
60 const struct net_device *outdev;
61 + const struct net_device *hw_outdev;
65 @@ -76,9 +77,18 @@ struct nft_forward_info {
66 enum flow_offload_xmit_type xmit_type;
69 +static bool nft_is_valid_ether_device(const struct net_device *dev)
71 + if (!dev || (dev->flags & IFF_LOOPBACK) || dev->type != ARPHRD_ETHER ||
72 + dev->addr_len != ETH_ALEN || !is_valid_ether_addr(dev->dev_addr))
78 static void nft_dev_path_info(const struct net_device_path_stack *stack,
79 struct nft_forward_info *info,
81 + unsigned char *ha, struct nf_flowtable *flowtable)
83 const struct net_device_path *path;
85 @@ -140,6 +150,12 @@ static void nft_dev_path_info(const stru
88 info->outdev = info->indev;
90 + info->hw_outdev = info->indev;
92 + if (nf_flowtable_hw_offload(flowtable) &&
93 + nft_is_valid_ether_device(info->indev))
94 + info->xmit_type = FLOW_OFFLOAD_XMIT_DIRECT;
97 static bool nft_flowtable_find_dev(const struct net_device *dev,
98 @@ -171,7 +187,7 @@ static void nft_dev_forward_path(struct
101 if (nft_dev_fill_forward_path(route, dst, ct, dir, ha, &stack) >= 0)
102 - nft_dev_path_info(&stack, &info, ha);
103 + nft_dev_path_info(&stack, &info, ha, &ft->data);
105 if (!info.indev || !nft_flowtable_find_dev(info.indev, ft))
107 @@ -187,6 +203,7 @@ static void nft_dev_forward_path(struct
108 memcpy(route->tuple[dir].out.h_source, info.h_source, ETH_ALEN);
109 memcpy(route->tuple[dir].out.h_dest, info.h_dest, ETH_ALEN);
110 route->tuple[dir].out.ifindex = info.outdev->ifindex;
111 + route->tuple[dir].out.hw_ifindex = info.hw_outdev->ifindex;
112 route->tuple[dir].xmit_type = info.xmit_type;