void *cb_priv,
enum flow_block_command command);
+struct flow_indr_block_ing_entry {
+ flow_indr_block_ing_cmd_t *cb;
+ struct list_head list;
+};
+
+void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
+
+void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
+
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident);
void *cb_ident);
void flow_indr_block_call(struct net_device *dev,
- flow_indr_block_ing_cmd_t *cb,
struct flow_block_offload *bo,
enum flow_block_command command);
#include <linux/slab.h>
#include <net/flow_offload.h>
#include <linux/rtnetlink.h>
+#include <linux/mutex.h>
struct flow_rule *flow_rule_alloc(unsigned int num_actions)
{
}
EXPORT_SYMBOL(flow_block_cb_setup_simple);
+static LIST_HEAD(block_ing_cb_list);
+
static struct rhashtable indr_setup_block_ht;
struct flow_indr_block_cb {
struct rhash_head ht_node;
struct net_device *dev;
unsigned int refcnt;
- flow_indr_block_ing_cmd_t *block_ing_cmd_cb;
struct list_head cb_list;
};
kfree(indr_block_cb);
}
+static void flow_block_ing_cmd(struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb,
+ void *cb_priv,
+ enum flow_block_command command)
+{
+ struct flow_indr_block_ing_entry *entry;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &block_ing_cb_list, list) {
+ entry->cb(dev, cb, cb_priv, command);
+ }
+ rcu_read_unlock();
+}
+
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
void *cb_ident)
if (err)
goto err_dev_put;
- if (indr_dev->block_ing_cmd_cb)
- indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
- indr_block_cb->cb_priv,
- FLOW_BLOCK_BIND);
+ flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
+ FLOW_BLOCK_BIND);
return 0;
if (!indr_block_cb)
return;
- if (indr_dev->block_ing_cmd_cb)
- indr_dev->block_ing_cmd_cb(dev, indr_block_cb->cb,
- indr_block_cb->cb_priv,
- FLOW_BLOCK_UNBIND);
+ flow_block_ing_cmd(dev, indr_block_cb->cb, indr_block_cb->cb_priv,
+ FLOW_BLOCK_UNBIND);
flow_indr_block_cb_del(indr_block_cb);
flow_indr_block_dev_put(indr_dev);
EXPORT_SYMBOL_GPL(flow_indr_block_cb_unregister);
void flow_indr_block_call(struct net_device *dev,
- flow_indr_block_ing_cmd_t cb,
struct flow_block_offload *bo,
enum flow_block_command command)
{
if (!indr_dev)
return;
- indr_dev->block_ing_cmd_cb = command == FLOW_BLOCK_BIND
- ? cb : NULL;
-
list_for_each_entry(indr_block_cb, &indr_dev->cb_list, list)
indr_block_cb->cb(dev, indr_block_cb->cb_priv, TC_SETUP_BLOCK,
bo);
}
EXPORT_SYMBOL_GPL(flow_indr_block_call);
+static DEFINE_MUTEX(flow_indr_block_ing_cb_lock);
+void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry)
+{
+ mutex_lock(&flow_indr_block_ing_cb_lock);
+ list_add_tail_rcu(&entry->list, &block_ing_cb_list);
+ mutex_unlock(&flow_indr_block_ing_cb_lock);
+}
+EXPORT_SYMBOL_GPL(flow_indr_add_block_ing_cb);
+
+void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry)
+{
+ mutex_lock(&flow_indr_block_ing_cb_lock);
+ list_del_rcu(&entry->list);
+ mutex_unlock(&flow_indr_block_ing_cb_lock);
+}
+EXPORT_SYMBOL_GPL(flow_indr_del_block_ing_cb);
+
static int __init init_flow_indr_rhashtable(void)
{
return rhashtable_init(&indr_setup_block_ht,
};
INIT_LIST_HEAD(&bo.cb_list);
- flow_indr_block_call(dev, tc_indr_block_get_and_ing_cmd, &bo, command);
+ flow_indr_block_call(dev, &bo, command);
tcf_block_setup(block, &bo);
}
.size = sizeof(struct tcf_net),
};
+static struct flow_indr_block_ing_entry block_ing_entry = {
+ .cb = tc_indr_block_get_and_ing_cmd,
+ .list = LIST_HEAD_INIT(block_ing_entry.list),
+};
+
static int __init tc_filter_init(void)
{
int err;
if (err)
goto err_register_pernet_subsys;
+ flow_indr_add_block_ing_cb(&block_ing_entry);
+
rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
RTNL_FLAG_DOIT_UNLOCKED);
rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,