1 From: Oz Shlomo <ozsh@nvidia.com>
2 Date: Tue, 23 Mar 2021 00:56:19 +0100
3 Subject: [PATCH] netfilter: flowtable: separate replace, destroy and
4 stats to different workqueues
6 Currently the flow table offload replace, destroy and stats work items are
7 executed on a single workqueue. As such, DESTROY and STATS commands may
8 be backloged after a burst of REPLACE work items. This scenario can bloat
9 up memory and may cause active connections to age.
11 Instatiate add, del and stats workqueues to avoid backlogs of non-dependent
12 actions. Provide sysfs control over the workqueue attributes, allowing
13 userspace applications to control the workqueue cpumask.
15 Signed-off-by: Oz Shlomo <ozsh@nvidia.com>
16 Reviewed-by: Paul Blakey <paulb@nvidia.com>
17 Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
20 --- a/net/netfilter/nf_flow_table_offload.c
21 +++ b/net/netfilter/nf_flow_table_offload.c
23 #include <net/netfilter/nf_conntrack_core.h>
24 #include <net/netfilter/nf_conntrack_tuple.h>
26 -static struct workqueue_struct *nf_flow_offload_wq;
27 +static struct workqueue_struct *nf_flow_offload_add_wq;
28 +static struct workqueue_struct *nf_flow_offload_del_wq;
29 +static struct workqueue_struct *nf_flow_offload_stats_wq;
31 struct flow_offload_work {
32 struct list_head list;
33 @@ -826,7 +828,12 @@ static void flow_offload_work_handler(st
35 static void flow_offload_queue_work(struct flow_offload_work *offload)
37 - queue_work(nf_flow_offload_wq, &offload->work);
38 + if (offload->cmd == FLOW_CLS_REPLACE)
39 + queue_work(nf_flow_offload_add_wq, &offload->work);
40 + else if (offload->cmd == FLOW_CLS_DESTROY)
41 + queue_work(nf_flow_offload_del_wq, &offload->work);
43 + queue_work(nf_flow_offload_stats_wq, &offload->work);
46 static struct flow_offload_work *
47 @@ -898,8 +905,11 @@ void nf_flow_offload_stats(struct nf_flo
49 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable)
51 - if (nf_flowtable_hw_offload(flowtable))
52 - flush_workqueue(nf_flow_offload_wq);
53 + if (nf_flowtable_hw_offload(flowtable)) {
54 + flush_workqueue(nf_flow_offload_add_wq);
55 + flush_workqueue(nf_flow_offload_del_wq);
56 + flush_workqueue(nf_flow_offload_stats_wq);
60 static int nf_flow_table_block_setup(struct nf_flowtable *flowtable,
61 @@ -1011,15 +1021,33 @@ EXPORT_SYMBOL_GPL(nf_flow_table_offload_
63 int nf_flow_table_offload_init(void)
65 - nf_flow_offload_wq = alloc_workqueue("nf_flow_table_offload",
67 - if (!nf_flow_offload_wq)
68 + nf_flow_offload_add_wq = alloc_workqueue("nf_ft_offload_add",
69 + WQ_UNBOUND | WQ_SYSFS, 0);
70 + if (!nf_flow_offload_add_wq)
73 + nf_flow_offload_del_wq = alloc_workqueue("nf_ft_offload_del",
74 + WQ_UNBOUND | WQ_SYSFS, 0);
75 + if (!nf_flow_offload_del_wq)
78 + nf_flow_offload_stats_wq = alloc_workqueue("nf_ft_offload_stats",
79 + WQ_UNBOUND | WQ_SYSFS, 0);
80 + if (!nf_flow_offload_stats_wq)
86 + destroy_workqueue(nf_flow_offload_del_wq);
88 + destroy_workqueue(nf_flow_offload_add_wq);
92 void nf_flow_table_offload_exit(void)
94 - destroy_workqueue(nf_flow_offload_wq);
95 + destroy_workqueue(nf_flow_offload_add_wq);
96 + destroy_workqueue(nf_flow_offload_del_wq);
97 + destroy_workqueue(nf_flow_offload_stats_wq);