static enum net_dm_alert_mode net_dm_alert_mode = NET_DM_ALERT_MODE_SUMMARY;
static u32 net_dm_trunc_len;
+static u32 net_dm_queue_len = 1000;
struct net_dm_alert_ops {
void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb,
#define NET_DM_SKB_CB(__skb) ((struct net_dm_skb_cb *)&((__skb)->cb[0]))
-#define NET_DM_QUEUE_LEN 1000
-
static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
{
size_t al;
data = this_cpu_ptr(&dm_cpu_data);
spin_lock_irqsave(&data->drop_queue.lock, flags);
- if (skb_queue_len(&data->drop_queue) < NET_DM_QUEUE_LEN)
+ if (skb_queue_len(&data->drop_queue) < net_dm_queue_len)
__skb_queue_tail(&data->drop_queue, nskb);
else
goto unlock_free;
net_dm_trunc_len = nla_get_u32(info->attrs[NET_DM_ATTR_TRUNC_LEN]);
}
+static void net_dm_queue_len_set(struct genl_info *info)
+{
+ if (!info->attrs[NET_DM_ATTR_QUEUE_LEN])
+ return;
+
+ net_dm_queue_len = nla_get_u32(info->attrs[NET_DM_ATTR_QUEUE_LEN]);
+}
+
static int net_dm_cmd_config(struct sk_buff *skb,
struct genl_info *info)
{
net_dm_trunc_len_set(info);
+ net_dm_queue_len_set(info);
+
return 0;
}
if (nla_put_u32(msg, NET_DM_ATTR_TRUNC_LEN, net_dm_trunc_len))
goto nla_put_failure;
+ if (nla_put_u32(msg, NET_DM_ATTR_QUEUE_LEN, net_dm_queue_len))
+ goto nla_put_failure;
+
genlmsg_end(msg, hdr);
return 0;
[NET_DM_ATTR_UNSPEC] = { .strict_start_type = NET_DM_ATTR_UNSPEC + 1 },
[NET_DM_ATTR_ALERT_MODE] = { .type = NLA_U8 },
[NET_DM_ATTR_TRUNC_LEN] = { .type = NLA_U32 },
+ [NET_DM_ATTR_QUEUE_LEN] = { .type = NLA_U32 },
};
static const struct genl_ops dropmon_ops[] = {