7b6ec68d555933ac43f259db5629090ec8be3619
[openwrt/staging/blogic.git] /
1 From: Pablo Neira Ayuso <pablo@netfilter.org>
2 Date: Wed, 24 Mar 2021 02:30:46 +0100
3 Subject: [PATCH] netfilter: flowtable: add offload support for xmit path
4 types
5
6 When the flow tuple xmit_type is set to FLOW_OFFLOAD_XMIT_DIRECT, the
7 dst_cache pointer is not valid, and the h_source/h_dest/ifidx out fields
8 need to be used.
9
10 This patch also adds the FLOW_ACTION_VLAN_PUSH action to pass the VLAN
11 tag to the driver.
12
13 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
14 ---
15
16 --- a/net/netfilter/nf_flow_table_offload.c
17 +++ b/net/netfilter/nf_flow_table_offload.c
18 @@ -177,28 +177,45 @@ static int flow_offload_eth_src(struct n
19 enum flow_offload_tuple_dir dir,
20 struct nf_flow_rule *flow_rule)
21 {
22 - const struct flow_offload_tuple *tuple = &flow->tuplehash[!dir].tuple;
23 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
24 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
25 - struct net_device *dev;
26 + const struct flow_offload_tuple *other_tuple, *this_tuple;
27 + struct net_device *dev = NULL;
28 + const unsigned char *addr;
29 u32 mask, val;
30 u16 val16;
31
32 - dev = dev_get_by_index(net, tuple->iifidx);
33 - if (!dev)
34 - return -ENOENT;
35 + this_tuple = &flow->tuplehash[dir].tuple;
36 +
37 + switch (this_tuple->xmit_type) {
38 + case FLOW_OFFLOAD_XMIT_DIRECT:
39 + addr = this_tuple->out.h_source;
40 + break;
41 + case FLOW_OFFLOAD_XMIT_NEIGH:
42 + other_tuple = &flow->tuplehash[!dir].tuple;
43 + dev = dev_get_by_index(net, other_tuple->iifidx);
44 + if (!dev)
45 + return -ENOENT;
46 +
47 + addr = dev->dev_addr;
48 + break;
49 + default:
50 + return -EOPNOTSUPP;
51 + }
52
53 mask = ~0xffff0000;
54 - memcpy(&val16, dev->dev_addr, 2);
55 + memcpy(&val16, addr, 2);
56 val = val16 << 16;
57 flow_offload_mangle(entry0, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
58 &val, &mask);
59
60 mask = ~0xffffffff;
61 - memcpy(&val, dev->dev_addr + 2, 4);
62 + memcpy(&val, addr + 2, 4);
63 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 8,
64 &val, &mask);
65 - dev_put(dev);
66 +
67 + if (dev)
68 + dev_put(dev);
69
70 return 0;
71 }
72 @@ -210,27 +227,40 @@ static int flow_offload_eth_dst(struct n
73 {
74 struct flow_action_entry *entry0 = flow_action_entry_next(flow_rule);
75 struct flow_action_entry *entry1 = flow_action_entry_next(flow_rule);
76 - const void *daddr = &flow->tuplehash[!dir].tuple.src_v4;
77 + const struct flow_offload_tuple *other_tuple, *this_tuple;
78 const struct dst_entry *dst_cache;
79 unsigned char ha[ETH_ALEN];
80 struct neighbour *n;
81 + const void *daddr;
82 u32 mask, val;
83 u8 nud_state;
84 u16 val16;
85
86 - dst_cache = flow->tuplehash[dir].tuple.dst_cache;
87 - n = dst_neigh_lookup(dst_cache, daddr);
88 - if (!n)
89 - return -ENOENT;
90 -
91 - read_lock_bh(&n->lock);
92 - nud_state = n->nud_state;
93 - ether_addr_copy(ha, n->ha);
94 - read_unlock_bh(&n->lock);
95 + this_tuple = &flow->tuplehash[dir].tuple;
96
97 - if (!(nud_state & NUD_VALID)) {
98 + switch (this_tuple->xmit_type) {
99 + case FLOW_OFFLOAD_XMIT_DIRECT:
100 + ether_addr_copy(ha, this_tuple->out.h_dest);
101 + break;
102 + case FLOW_OFFLOAD_XMIT_NEIGH:
103 + other_tuple = &flow->tuplehash[!dir].tuple;
104 + daddr = &other_tuple->src_v4;
105 + dst_cache = this_tuple->dst_cache;
106 + n = dst_neigh_lookup(dst_cache, daddr);
107 + if (!n)
108 + return -ENOENT;
109 +
110 + read_lock_bh(&n->lock);
111 + nud_state = n->nud_state;
112 + ether_addr_copy(ha, n->ha);
113 + read_unlock_bh(&n->lock);
114 neigh_release(n);
115 - return -ENOENT;
116 +
117 + if (!(nud_state & NUD_VALID))
118 + return -ENOENT;
119 + break;
120 + default:
121 + return -EOPNOTSUPP;
122 }
123
124 mask = ~0xffffffff;
125 @@ -243,7 +273,6 @@ static int flow_offload_eth_dst(struct n
126 val = val16;
127 flow_offload_mangle(entry1, FLOW_ACT_MANGLE_HDR_TYPE_ETH, 4,
128 &val, &mask);
129 - neigh_release(n);
130
131 return 0;
132 }
133 @@ -465,27 +494,52 @@ static void flow_offload_ipv4_checksum(s
134 }
135 }
136
137 -static void flow_offload_redirect(const struct flow_offload *flow,
138 +static void flow_offload_redirect(struct net *net,
139 + const struct flow_offload *flow,
140 enum flow_offload_tuple_dir dir,
141 struct nf_flow_rule *flow_rule)
142 {
143 - struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
144 - struct rtable *rt;
145 + const struct flow_offload_tuple *this_tuple, *other_tuple;
146 + struct flow_action_entry *entry;
147 + struct net_device *dev;
148 + int ifindex;
149 +
150 + this_tuple = &flow->tuplehash[dir].tuple;
151 + switch (this_tuple->xmit_type) {
152 + case FLOW_OFFLOAD_XMIT_DIRECT:
153 + this_tuple = &flow->tuplehash[dir].tuple;
154 + ifindex = this_tuple->out.ifidx;
155 + break;
156 + case FLOW_OFFLOAD_XMIT_NEIGH:
157 + other_tuple = &flow->tuplehash[!dir].tuple;
158 + ifindex = other_tuple->iifidx;
159 + break;
160 + default:
161 + return;
162 + }
163
164 - rt = (struct rtable *)flow->tuplehash[dir].tuple.dst_cache;
165 + dev = dev_get_by_index(net, ifindex);
166 + if (!dev)
167 + return;
168 +
169 + entry = flow_action_entry_next(flow_rule);
170 entry->id = FLOW_ACTION_REDIRECT;
171 - entry->dev = rt->dst.dev;
172 - dev_hold(rt->dst.dev);
173 + entry->dev = dev;
174 }
175
176 static void flow_offload_encap_tunnel(const struct flow_offload *flow,
177 enum flow_offload_tuple_dir dir,
178 struct nf_flow_rule *flow_rule)
179 {
180 + const struct flow_offload_tuple *this_tuple;
181 struct flow_action_entry *entry;
182 struct dst_entry *dst;
183
184 - dst = flow->tuplehash[dir].tuple.dst_cache;
185 + this_tuple = &flow->tuplehash[dir].tuple;
186 + if (this_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
187 + return;
188 +
189 + dst = this_tuple->dst_cache;
190 if (dst && dst->lwtstate) {
191 struct ip_tunnel_info *tun_info;
192
193 @@ -502,10 +556,15 @@ static void flow_offload_decap_tunnel(co
194 enum flow_offload_tuple_dir dir,
195 struct nf_flow_rule *flow_rule)
196 {
197 + const struct flow_offload_tuple *other_tuple;
198 struct flow_action_entry *entry;
199 struct dst_entry *dst;
200
201 - dst = flow->tuplehash[!dir].tuple.dst_cache;
202 + other_tuple = &flow->tuplehash[!dir].tuple;
203 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_DIRECT)
204 + return;
205 +
206 + dst = other_tuple->dst_cache;
207 if (dst && dst->lwtstate) {
208 struct ip_tunnel_info *tun_info;
209
210 @@ -517,10 +576,14 @@ static void flow_offload_decap_tunnel(co
211 }
212 }
213
214 -int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
215 - enum flow_offload_tuple_dir dir,
216 - struct nf_flow_rule *flow_rule)
217 +static int
218 +nf_flow_rule_route_common(struct net *net, const struct flow_offload *flow,
219 + enum flow_offload_tuple_dir dir,
220 + struct nf_flow_rule *flow_rule)
221 {
222 + const struct flow_offload_tuple *other_tuple;
223 + int i;
224 +
225 flow_offload_decap_tunnel(flow, dir, flow_rule);
226 flow_offload_encap_tunnel(flow, dir, flow_rule);
227
228 @@ -528,6 +591,26 @@ int nf_flow_rule_route_ipv4(struct net *
229 flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
230 return -1;
231
232 + other_tuple = &flow->tuplehash[!dir].tuple;
233 +
234 + for (i = 0; i < other_tuple->encap_num; i++) {
235 + struct flow_action_entry *entry = flow_action_entry_next(flow_rule);
236 +
237 + entry->id = FLOW_ACTION_VLAN_PUSH;
238 + entry->vlan.vid = other_tuple->encap[i].id;
239 + entry->vlan.proto = other_tuple->encap[i].proto;
240 + }
241 +
242 + return 0;
243 +}
244 +
245 +int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
246 + enum flow_offload_tuple_dir dir,
247 + struct nf_flow_rule *flow_rule)
248 +{
249 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
250 + return -1;
251 +
252 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
253 flow_offload_ipv4_snat(net, flow, dir, flow_rule);
254 flow_offload_port_snat(net, flow, dir, flow_rule);
255 @@ -540,7 +623,7 @@ int nf_flow_rule_route_ipv4(struct net *
256 test_bit(NF_FLOW_DNAT, &flow->flags))
257 flow_offload_ipv4_checksum(net, flow, flow_rule);
258
259 - flow_offload_redirect(flow, dir, flow_rule);
260 + flow_offload_redirect(net, flow, dir, flow_rule);
261
262 return 0;
263 }
264 @@ -550,11 +633,7 @@ int nf_flow_rule_route_ipv6(struct net *
265 enum flow_offload_tuple_dir dir,
266 struct nf_flow_rule *flow_rule)
267 {
268 - flow_offload_decap_tunnel(flow, dir, flow_rule);
269 - flow_offload_encap_tunnel(flow, dir, flow_rule);
270 -
271 - if (flow_offload_eth_src(net, flow, dir, flow_rule) < 0 ||
272 - flow_offload_eth_dst(net, flow, dir, flow_rule) < 0)
273 + if (nf_flow_rule_route_common(net, flow, dir, flow_rule) < 0)
274 return -1;
275
276 if (test_bit(NF_FLOW_SNAT, &flow->flags)) {
277 @@ -566,7 +645,7 @@ int nf_flow_rule_route_ipv6(struct net *
278 flow_offload_port_dnat(net, flow, dir, flow_rule);
279 }
280
281 - flow_offload_redirect(flow, dir, flow_rule);
282 + flow_offload_redirect(net, flow, dir, flow_rule);
283
284 return 0;
285 }
286 @@ -580,10 +659,10 @@ nf_flow_offload_rule_alloc(struct net *n
287 enum flow_offload_tuple_dir dir)
288 {
289 const struct nf_flowtable *flowtable = offload->flowtable;
290 + const struct flow_offload_tuple *tuple, *other_tuple;
291 const struct flow_offload *flow = offload->flow;
292 - const struct flow_offload_tuple *tuple;
293 + struct dst_entry *other_dst = NULL;
294 struct nf_flow_rule *flow_rule;
295 - struct dst_entry *other_dst;
296 int err = -ENOMEM;
297
298 flow_rule = kzalloc(sizeof(*flow_rule), GFP_KERNEL);
299 @@ -599,7 +678,10 @@ nf_flow_offload_rule_alloc(struct net *n
300 flow_rule->rule->match.key = &flow_rule->match.key;
301
302 tuple = &flow->tuplehash[dir].tuple;
303 - other_dst = flow->tuplehash[!dir].tuple.dst_cache;
304 + other_tuple = &flow->tuplehash[!dir].tuple;
305 + if (other_tuple->xmit_type == FLOW_OFFLOAD_XMIT_NEIGH)
306 + other_dst = other_tuple->dst_cache;
307 +
308 err = nf_flow_rule_match(&flow_rule->match, tuple, other_dst);
309 if (err < 0)
310 goto err_flow_match;