#include "qosify.h"
+struct qosify_map_class;
+
static int qosify_map_entry_cmp(const void *k1, const void *k2, void *ptr);
static int qosify_map_fds[__CL_MAP_MAX];
static AVL_TREE(map_data, qosify_map_entry_cmp, false, NULL);
static LIST_HEAD(map_files);
-static AVL_TREE(map_aliases, avl_strcmp, false, NULL);
+static struct qosify_map_class *map_class[QOSIFY_MAX_CLASS_ENTRIES];
static uint32_t next_timeout;
-static struct qosify_dscp_val qosify_dscp_default[2] = {
- { 0xff, 0xff },
- { 0xff, 0xff }
-};
+static uint8_t qosify_dscp_default[2] = { 0xff, 0xff };
int qosify_map_timeout;
int qosify_active_timeout;
struct qosify_config config;
+struct qosify_flow_config flow_config;
struct qosify_map_file {
struct list_head list;
char filename[];
};
-struct qosify_map_alias {
- struct avl_node avl;
- struct qosify_dscp_val value;
+struct qosify_map_class {
+ const char *name;
+ struct qosify_class data;
};
static const struct {
[CL_MAP_IPV4_ADDR] = { "ipv4_map", "ipv4_addr" },
[CL_MAP_IPV6_ADDR] = { "ipv6_map", "ipv6_addr" },
[CL_MAP_CONFIG] = { "config", "config" },
+ [CL_MAP_CLASS] = { "class_map", "class" },
[CL_MAP_DNS] = { "dns", "dns" },
};
bpf_map_delete_elem(fd, &key);
}
-static void __qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val *val)
+static void __qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val)
{
struct qosify_map_data data = {
.id = id,
};
- int fd = qosify_map_fds[id];
+ struct qosify_class class = {
+ .val.ingress = val,
+ .val.egress = val,
+ };
+ uint32_t key;
+ int fd;
int i;
- val->flags |= QOSIFY_VAL_FLAG_PRIO_CHECK |
- QOSIFY_VAL_FLAG_BULK_CHECK;
+ if (id == CL_MAP_TCP_PORTS)
+ key = QOSIFY_MAX_CLASS_ENTRIES;
+ else if (id == CL_MAP_UDP_PORTS)
+ key = QOSIFY_MAX_CLASS_ENTRIES + 1;
+ else
+ return;
+
+ fd = qosify_map_fds[CL_MAP_CLASS];
+ if (val & QOSIFY_DSCP_CLASS_FLAG) {
+ uint8_t fallback = val & QOSIFY_DSCP_FALLBACK_FLAG;
+ val &= QOSIFY_DSCP_VALUE_MASK;
+ if (val > ARRAY_SIZE(map_class) || !map_class[val])
+ return;
+
+ class.val.ingress = map_class[val]->data.val.ingress | fallback;
+ class.val.egress = map_class[val]->data.val.egress | fallback;
+ }
+
+ memcpy(&class.config, &flow_config, sizeof(class.config));
+ bpf_map_update_elem(fd, &key, &class, BPF_ANY);
+
+ val = key | QOSIFY_DSCP_CLASS_FLAG;
+ fd = qosify_map_fds[id];
for (i = 0; i < (1 << 16); i++) {
data.addr.port = htons(i);
if (avl_find(&map_data, &data))
continue;
- bpf_map_update_elem(fd, &data.addr, val, BPF_ANY);
+ bpf_map_update_elem(fd, &data.addr, &val, BPF_ANY);
}
}
-void qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val val)
+void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val)
{
bool udp;
return;
qosify_dscp_default[udp] = val;
- __qosify_map_set_dscp_default(id, &qosify_dscp_default[udp]);
+ __qosify_map_set_dscp_default(id, val);
}
int qosify_map_init(void)
static void __qosify_map_set_entry(struct qosify_map_data *data)
{
int fd = qosify_map_fds[data->id];
- struct qosify_dscp_val prev_dscp = { 0xff, 0xff };
struct qosify_map_entry *e;
bool file = data->file;
+ uint8_t prev_dscp = 0xff;
int32_t delta = 0;
- bool add = data->dscp.ingress != 0xff;
+ bool add = data->dscp != 0xff;
e = avl_find_element(&map_data, data, e, avl);
if (!e) {
e->data.dscp = e->data.file_dscp;
}
- if (memcmp(&e->data.dscp, &prev_dscp, sizeof(prev_dscp)) != 0 &&
- data->id < CL_MAP_DNS) {
+ if (e->data.dscp != prev_dscp && data->id < CL_MAP_DNS) {
struct qosify_ip_map_val val = {
.dscp = e->data.dscp,
.seen = 1,
}
int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str,
- struct qosify_dscp_val dscp)
+ uint8_t dscp)
{
struct qosify_map_data data = {
.id = id,
return 0;
}
-int qosify_map_dscp_value(const char *val, struct qosify_dscp_val *dscp_val)
+static int
+qosify_map_check_class(const char *val, uint8_t *dscp_val)
{
- struct qosify_map_alias *alias;
- bool fallback = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++) {
+ if (map_class[i] && !strcmp(val, map_class[i]->name)) {
+ *dscp_val = i | QOSIFY_DSCP_CLASS_FLAG;
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
+int qosify_map_dscp_value(const char *val, uint8_t *dscp_val)
+{
+ uint8_t fallback = 0;
if (*val == '+') {
- fallback = true;
+ fallback = QOSIFY_DSCP_FALLBACK_FLAG;
val++;
}
- alias = avl_find_element(&map_aliases, val, alias, avl);
- if (alias) {
- *dscp_val = alias->value;
- } else {
- if (__qosify_map_dscp_value(val, &dscp_val->egress))
+ if (qosify_map_check_class(val, dscp_val) &&
+ __qosify_map_dscp_value(val, dscp_val))
return -1;
- dscp_val->ingress = dscp_val->egress;
- }
-
- if (fallback) {
- dscp_val->ingress |= (1 << 6);
- dscp_val->egress |= (1 << 6);
- }
+ *dscp_val |= fallback;
return 0;
}
qosify_map_parse_line(char *str)
{
const char *key, *value;
- struct qosify_dscp_val dscp;
+ uint8_t dscp;
str = str_skip(str, true);
key = str;
void qosify_map_reset_config(void)
{
- struct qosify_dscp_val val = {};
-
qosify_map_clear_files();
- qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, val);
- qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, val);
+ qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, 0);
+ qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, 0);
qosify_map_timeout = 3600;
qosify_active_timeout = 300;
memset(&config, 0, sizeof(config));
- config.flow.dscp_prio.ingress = 0xff;
- config.flow.dscp_prio.egress = 0xff;
- config.flow.dscp_bulk.ingress = 0xff;
- config.flow.dscp_bulk.egress = 0xff;
- config.dscp_icmp.ingress = 0xff;
- config.dscp_icmp.egress = 0xff;
+ flow_config.dscp_prio = 0xff;
+ flow_config.dscp_bulk = 0xff;
+ config.dscp_icmp = 0xff;
}
void qosify_map_reload(void)
int buf_len = 8;
char *buf;
+ if (dscp & QOSIFY_DSCP_CLASS_FLAG) {
+ const char *val;
+ int idx;
+
+ idx = dscp & QOSIFY_DSCP_VALUE_MASK;
+ if (map_class[idx])
+ val = map_class[idx]->name;
+ else
+ val = "<invalid>";
+
+ blobmsg_printf(b, name, "%s%s",
+ (dscp & QOSIFY_DSCP_FALLBACK_FLAG) ? "+" : "", val);
+ return;
+ }
+
buf = blobmsg_alloc_string_buffer(b, name, buf_len);
qosify_map_dscp_codepoint_str(buf, buf_len, dscp);
blobmsg_add_string_buffer(b);
blobmsg_add_u8(b, "file", e->data.file);
blobmsg_add_u8(b, "user", e->data.user);
- blobmsg_add_dscp(b, "dscp_ingress", e->data.dscp.ingress);
- blobmsg_add_dscp(b, "dscp_egress", e->data.dscp.egress);
+ blobmsg_add_dscp(b, "dscp", e->data.dscp);
blobmsg_add_string(b, "type", qosify_map_info[e->data.id].type_name);
blobmsg_close_array(b, a);
}
+static int32_t
+qosify_map_get_class_id(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++)
+ if (map_class[i] && !strcmp(map_class[i]->name, name))
+ return i;
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++)
+ if (!map_class[i])
+ return i;
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++) {
+ if (!(map_class[i]->data.flags & QOSIFY_CLASS_FLAG_PRESENT)) {
+ free(map_class[i]);
+ map_class[i] = NULL;
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+int map_fill_dscp_value(uint8_t *dest, struct blob_attr *attr, bool reset)
+{
+ if (reset)
+ *dest = 0xff;
+
+ if (!attr)
+ return 0;
+
+ if (qosify_map_dscp_value(blobmsg_get_string(attr), dest))
+ return -1;
+
+ return 0;
+}
+
+int map_parse_flow_config(struct qosify_flow_config *cfg, struct blob_attr *attr,
+ bool reset)
+{
+ enum {
+ CL_CONFIG_DSCP_PRIO,
+ CL_CONFIG_DSCP_BULK,
+ CL_CONFIG_BULK_TIMEOUT,
+ CL_CONFIG_BULK_PPS,
+ CL_CONFIG_PRIO_PKT_LEN,
+ __CL_CONFIG_MAX
+ };
+ static const struct blobmsg_policy policy[__CL_CONFIG_MAX] = {
+ [CL_CONFIG_DSCP_PRIO] = { "dscp_prio", BLOBMSG_TYPE_STRING },
+ [CL_CONFIG_DSCP_BULK] = { "dscp_bulk", BLOBMSG_TYPE_STRING },
+ [CL_CONFIG_BULK_TIMEOUT] = { "bulk_trigger_timeout", BLOBMSG_TYPE_INT32 },
+ [CL_CONFIG_BULK_PPS] = { "bulk_trigger_pps", BLOBMSG_TYPE_INT32 },
+ [CL_CONFIG_PRIO_PKT_LEN] = { "prio_max_avg_pkt_len", BLOBMSG_TYPE_INT32 },
+ };
+ struct blob_attr *tb[__CL_CONFIG_MAX];
+ struct blob_attr *cur;
+
+ if (reset)
+ memset(cfg, 0, sizeof(*cfg));
+
+ blobmsg_parse(policy, __CL_CONFIG_MAX, tb, blobmsg_data(attr), blobmsg_len(attr));
+
+ if (map_fill_dscp_value(&cfg->dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset) ||
+ map_fill_dscp_value(&cfg->dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset))
+ return -1;
+
+ if ((cur = tb[CL_CONFIG_BULK_TIMEOUT]) != NULL)
+ cfg->bulk_trigger_timeout = blobmsg_get_u32(cur);
+
+ if ((cur = tb[CL_CONFIG_BULK_PPS]) != NULL)
+ cfg->bulk_trigger_pps = blobmsg_get_u32(cur);
+
+ if ((cur = tb[CL_CONFIG_PRIO_PKT_LEN]) != NULL)
+ cfg->prio_max_avg_pkt_len = blobmsg_get_u32(cur);
+
+ return 0;
+}
+
static int
-qosify_map_create_alias(struct blob_attr *attr)
+qosify_map_create_class(struct blob_attr *attr)
{
- struct qosify_map_alias *alias;
+ struct qosify_map_class *class;
enum {
- MAP_ALIAS_INGRESS,
- MAP_ALIAS_EGRESS,
- __MAP_ALIAS_MAX
+ MAP_CLASS_INGRESS,
+ MAP_CLASS_EGRESS,
+ __MAP_CLASS_MAX
};
- static const struct blobmsg_policy policy[__MAP_ALIAS_MAX] = {
- [MAP_ALIAS_INGRESS] = { .type = BLOBMSG_TYPE_STRING },
- [MAP_ALIAS_EGRESS] = { .type = BLOBMSG_TYPE_STRING },
+ static const struct blobmsg_policy policy[__MAP_CLASS_MAX] = {
+ [MAP_CLASS_INGRESS] = { "ingress", BLOBMSG_TYPE_STRING },
+ [MAP_CLASS_EGRESS] = { "egress", BLOBMSG_TYPE_STRING },
};
- struct blob_attr *tb[__MAP_ALIAS_MAX];
+ struct blob_attr *tb[__MAP_CLASS_MAX];
const char *name;
char *name_buf;
+ int32_t slot;
- if (blobmsg_check_array(attr, BLOBMSG_TYPE_STRING) != 2)
- return -1;
+ blobmsg_parse(policy, __MAP_CLASS_MAX, tb,
+ blobmsg_data(attr), blobmsg_len(attr));
- blobmsg_parse_array(policy, __MAP_ALIAS_MAX, tb,
- blobmsg_data(attr), blobmsg_len(attr));
-
- if (!tb[MAP_ALIAS_INGRESS] || !tb[MAP_ALIAS_EGRESS])
+ if (!tb[MAP_CLASS_INGRESS] || !tb[MAP_CLASS_EGRESS])
return -1;
name = blobmsg_name(attr);
- alias = calloc_a(sizeof(*alias), &name_buf, strlen(name) + 1);
- alias->avl.key = strcpy(name_buf, name);
- if (__qosify_map_dscp_value(blobmsg_get_string(tb[MAP_ALIAS_INGRESS]),
- &alias->value.ingress) ||
- __qosify_map_dscp_value(blobmsg_get_string(tb[MAP_ALIAS_EGRESS]),
- &alias->value.egress) ||
- avl_insert(&map_aliases, &alias->avl)) {
- free(alias);
+ slot = qosify_map_get_class_id(name);
+ if (slot < 0)
+ return -1;
+
+ class = map_class[slot];
+ if (!class) {
+ class = calloc_a(sizeof(*class), &name_buf, strlen(name) + 1);
+ class->name = strcpy(name_buf, name);
+ map_class[slot] = class;
+ }
+
+ class->data.flags |= QOSIFY_CLASS_FLAG_PRESENT;
+ if (__qosify_map_dscp_value(blobmsg_get_string(tb[MAP_CLASS_INGRESS]),
+ &class->data.val.ingress) ||
+ __qosify_map_dscp_value(blobmsg_get_string(tb[MAP_CLASS_EGRESS]),
+ &class->data.val.egress)) {
+ map_class[slot] = NULL;
+ free(class);
return -1;
}
return 0;
}
-void qosify_map_set_aliases(struct blob_attr *val)
+void qosify_map_set_classes(struct blob_attr *val)
{
- struct qosify_map_alias *alias, *tmp;
+ int fd = qosify_map_fds[CL_MAP_CLASS];
+ struct qosify_class empty_data = {};
struct blob_attr *cur;
+ int32_t i;
int rem;
- avl_remove_all_elements(&map_aliases, alias, avl, tmp)
- free(alias);
+ for (i = 0; i < ARRAY_SIZE(map_class); i++)
+ if (map_class[i])
+ map_class[i]->data.flags &= ~QOSIFY_CLASS_FLAG_PRESENT;
blobmsg_for_each_attr(cur, val, rem)
- qosify_map_create_alias(cur);
+ qosify_map_create_class(cur);
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++) {
+ if (map_class[i] &&
+ (map_class[i]->data.flags & QOSIFY_CLASS_FLAG_PRESENT))
+ continue;
+
+ free(map_class[i]);
+ map_class[i] = NULL;
+ }
+
+ blobmsg_for_each_attr(cur, val, rem) {
+ i = qosify_map_get_class_id(blobmsg_name(cur));
+ if (i < 0 || !map_class[i])
+ continue;
+
+ map_parse_flow_config(&map_class[i]->data.config, cur, true);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(map_class); i++) {
+ struct qosify_class *data;
+
+ data = map_class[i] ? &map_class[i]->data : &empty_data;
+ bpf_map_update_elem(fd, &i, data, BPF_ANY);
+ }
}
void qosify_map_update_config(void)
struct flow_bucket {
__u32 last_update;
__u32 pkt_len_avg;
- __u16 pkt_count;
- struct qosify_dscp_val val;
- __u8 bulk_timeout;
-} __packed;
+ __u32 pkt_count;
+ __u32 bulk_timeout;
+};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(pinning, 1);
__type(key, __u32);
- __type(value, struct qosify_dscp_val);
+ __type(value, __u8);
__uint(max_entries, 1 << 16);
} port_array_t;
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__uint(pinning, 1);
__type(key, __u32);
- __uint(value_size, sizeof(struct flow_bucket));
+ __type(value, struct flow_bucket);
__uint(max_entries, QOSIFY_FLOW_BUCKETS);
} flow_map SEC(".maps");
__uint(map_flags, BPF_F_NO_PREALLOC);
} ipv6_map SEC(".maps");
+struct {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __uint(pinning, 1);
+ __type(key, __u32);
+ __type(value, struct qosify_class);
+ __uint(max_entries, QOSIFY_MAX_CLASS_ENTRIES +
+ QOSIFY_DEFAULT_CLASS_ENTRIES);
+} class_map SEC(".maps");
+
static struct qosify_config *get_config(void)
{
__u32 key = 0;
}
static __always_inline void
-ipv4_change_dsfield(struct iphdr *iph, __u8 mask, __u8 value, bool force)
+ipv4_change_dsfield(struct __sk_buff *skb, __u32 offset,
+ __u8 mask, __u8 value, bool force)
{
- __u32 check = bpf_ntohs(iph->check);
+ struct iphdr *iph;
+ __u32 check;
__u8 dsfield;
+ iph = skb_ptr(skb, offset);
+ if (skb_check(skb, iph + 1))
+ return;
+
+ check = bpf_ntohs(iph->check);
if ((iph->tos & mask) && !force)
return;
}
static __always_inline void
-ipv6_change_dsfield(struct ipv6hdr *ipv6h, __u8 mask, __u8 value, bool force)
+ipv6_change_dsfield(struct __sk_buff *skb, __u32 offset,
+ __u8 mask, __u8 value, bool force)
{
- __u16 *p = (__u16 *)ipv6h;
+ struct ipv6hdr *ipv6h;
+ __u16 *p;
__u16 val;
+ ipv6h = skb_ptr(skb, offset);
+ if (skb_check(skb, ipv6h + 1))
+ return;
+
+ p = (__u16 *)ipv6h;
if (((*p >> 4) & mask) && !force)
return;
static void
parse_l4proto(struct qosify_config *config, struct __sk_buff *skb,
__u32 offset, __u8 proto, bool ingress,
- struct qosify_dscp_val *out_val)
+ __u8 *out_val)
{
- struct qosify_dscp_val *value;
struct udphdr *udp;
__u32 src, dest, key;
+ __u8 *value;
udp = skb_ptr(skb, offset);
if (skb_check(skb, &udp->len))
static __always_inline void
check_flow_bulk(struct qosify_flow_config *config, struct __sk_buff *skb,
- struct flow_bucket *flow, struct qosify_dscp_val *out_val)
+ struct flow_bucket *flow, __u8 *out_val)
{
bool trigger = false;
__s32 delta;
if (!config->bulk_trigger_pps)
return;
+ time = cur_time();
if (!flow->last_update)
goto reset;
- time = cur_time();
delta = time - flow->last_update;
if ((u32)delta > FLOW_TIMEOUT)
goto reset;
- if (flow->pkt_count < 0xffff)
- flow->pkt_count++;
-
+ flow->pkt_count++;
if (flow->pkt_count > config->bulk_trigger_pps) {
- flow->val = config->dscp_bulk;
- flow->val.flags = QOSIFY_VAL_FLAG_BULK_CHECK;
flow->bulk_timeout = config->bulk_trigger_timeout + 1;
trigger = true;
}
if (delta >= FLOW_CHECK_INTERVAL) {
- if (flow->bulk_timeout && !trigger) {
+ if (flow->bulk_timeout && !trigger)
flow->bulk_timeout--;
- if (!flow->bulk_timeout)
- flow->val.flags = 0;
- }
goto clear;
}
- return;
+ goto out;
reset:
- flow->val.flags = 0;
flow->pkt_len_avg = 0;
clear:
flow->pkt_count = 1;
flow->last_update = time;
+out:
+ if (flow->bulk_timeout)
+ *out_val = config->dscp_bulk;
}
static __always_inline void
check_flow_prio(struct qosify_flow_config *config, struct __sk_buff *skb,
- struct flow_bucket *flow, struct qosify_dscp_val *out_val)
+ struct flow_bucket *flow, __u8 *out_val)
{
- if ((flow->val.flags & QOSIFY_VAL_FLAG_BULK_CHECK) ||
- !config->prio_max_avg_pkt_len)
- return;
-
- if (ewma(&flow->pkt_len_avg, skb->len) > config->prio_max_avg_pkt_len) {
- flow->val.flags = 0;
+ if (flow->bulk_timeout)
return;
- }
- flow->val = config->dscp_prio;
- flow->val.flags = QOSIFY_VAL_FLAG_PRIO_CHECK;
+ if (config->prio_max_avg_pkt_len &&
+ ewma(&flow->pkt_len_avg, skb->len) <= config->prio_max_avg_pkt_len)
+ *out_val = config->dscp_prio;
}
static __always_inline void
check_flow(struct qosify_flow_config *config, struct __sk_buff *skb,
- struct qosify_dscp_val *out_val)
+ __u8 *out_val)
{
struct flow_bucket flow_data;
struct flow_bucket *flow;
__u32 hash;
- if (!(out_val->flags & (QOSIFY_VAL_FLAG_PRIO_CHECK |
- QOSIFY_VAL_FLAG_BULK_CHECK)))
- return;
-
if (!config)
return;
return;
}
-
- if (out_val->flags & QOSIFY_VAL_FLAG_BULK_CHECK)
- check_flow_bulk(config, skb, flow, out_val);
- if (out_val->flags & QOSIFY_VAL_FLAG_PRIO_CHECK)
- check_flow_prio(config, skb, flow, out_val);
-
- if (flow->val.flags & out_val->flags)
- *out_val = flow->val;
+ check_flow_bulk(config, skb, flow, out_val);
+ check_flow_prio(config, skb, flow, out_val);
}
static __always_inline struct qosify_ip_map_val *
parse_ipv4(struct qosify_config *config, struct __sk_buff *skb, __u32 *offset,
- bool ingress, struct qosify_dscp_val *out_val)
+ bool ingress, __u8 *out_val)
{
- struct qosify_dscp_val *value;
struct iphdr *iph;
__u8 ipproto;
int hdr_len;
static __always_inline struct qosify_ip_map_val *
parse_ipv6(struct qosify_config *config, struct __sk_buff *skb, __u32 *offset,
- bool ingress, struct qosify_dscp_val *out_val)
+ bool ingress, __u8 *out_val)
{
- struct qosify_dscp_val *value;
struct ipv6hdr *iph;
__u8 ipproto;
void *key;
return bpf_map_lookup_elem(&ipv6_map, key);
}
+static __always_inline int
+dscp_lookup_class(uint8_t *dscp, bool ingress, struct qosify_class **out_class)
+{
+ struct qosify_class *class;
+ __u8 fallback_flag;
+ __u32 key;
+
+ if (!(*dscp & QOSIFY_DSCP_CLASS_FLAG))
+ return 0;
+
+ fallback_flag = *dscp & QOSIFY_DSCP_FALLBACK_FLAG;
+ key = *dscp & QOSIFY_DSCP_VALUE_MASK;
+ class = bpf_map_lookup_elem(&class_map, &key);
+ if (!class)
+ return -1;
+
+ if (!(class->flags & QOSIFY_CLASS_FLAG_PRESENT))
+ return -1;
+
+ *dscp = dscp_val(&class->val, ingress);
+ *dscp |= fallback_flag;
+ *out_class = class;
+
+ return 0;
+}
+
SEC("classifier")
int classify(struct __sk_buff *skb)
{
bool ingress = module_flags & QOSIFY_INGRESS;
struct qosify_config *config;
+ struct qosify_class *class = NULL;
struct qosify_ip_map_val *ip_val;
- struct qosify_dscp_val val = {
- .ingress = 0xff,
- .egress = 0xff,
- .flags = 0,
- };
__u32 offset = 0;
__u32 iph_offset;
void *iph;
iph_offset = offset;
if (type == bpf_htons(ETH_P_IP))
- ip_val = parse_ipv4(config, skb, &offset, ingress, &val);
+ ip_val = parse_ipv4(config, skb, &offset, ingress, &dscp);
else if (type == bpf_htons(ETH_P_IPV6))
- ip_val = parse_ipv6(config, skb, &offset, ingress, &val);
+ ip_val = parse_ipv6(config, skb, &offset, ingress, &dscp);
else
return TC_ACT_OK;
if (ip_val) {
if (!ip_val->seen)
ip_val->seen = 1;
- val = ip_val->dscp;
+ dscp = ip_val->dscp;
}
- check_flow(&config->flow, skb, &val);
-
- dscp = dscp_val(&val, ingress);
- if (dscp == 0xff)
+ if (dscp_lookup_class(&dscp, ingress, &class))
return TC_ACT_OK;
+ if (class) {
+ check_flow(&class->config, skb, &dscp);
+
+ if (dscp_lookup_class(&dscp, ingress, &class))
+ return TC_ACT_OK;
+ }
+
dscp &= GENMASK(5, 0);
dscp <<= 2;
force = !(dscp & QOSIFY_DSCP_FALLBACK_FLAG);
- iph = skb_ptr(skb, iph_offset);
- if (skb_check(skb, (void *)iph + sizeof(struct ipv6hdr)))
- return TC_ACT_OK;
-
if (type == bpf_htons(ETH_P_IP))
- ipv4_change_dsfield(iph, INET_ECN_MASK, dscp, force);
+ ipv4_change_dsfield(skb, iph_offset, INET_ECN_MASK, dscp, force);
else if (type == bpf_htons(ETH_P_IPV6))
- ipv6_change_dsfield(iph, INET_ECN_MASK, dscp, force);
+ ipv6_change_dsfield(skb, iph_offset, INET_ECN_MASK, dscp, force);
return TC_ACT_OK;
}
#ifndef __BPF_QOSIFY_H
#define __BPF_QOSIFY_H
+#define QOSIFY_MAX_CLASS_ENTRIES 16
+#define QOSIFY_DEFAULT_CLASS_ENTRIES 2
+
#ifndef QOSIFY_FLOW_BUCKET_SHIFT
#define QOSIFY_FLOW_BUCKET_SHIFT 13
#endif
#define QOSIFY_INGRESS (1 << 0)
#define QOSIFY_IP_ONLY (1 << 1)
+#define QOSIFY_DSCP_VALUE_MASK ((1 << 6) - 1)
#define QOSIFY_DSCP_FALLBACK_FLAG (1 << 6)
+#define QOSIFY_DSCP_CLASS_FLAG (1 << 7)
-#define QOSIFY_VAL_FLAG_PRIO_CHECK (1 << 0)
-#define QOSIFY_VAL_FLAG_BULK_CHECK (1 << 1)
+#define QOSIFY_CLASS_FLAG_PRESENT (1 << 0)
struct qosify_dscp_val {
uint8_t ingress;
uint8_t egress;
- uint8_t flags;
-} __attribute__((packed));
+};
/* global config data */
struct qosify_flow_config {
- struct qosify_dscp_val dscp_prio;
- struct qosify_dscp_val dscp_bulk;
+ uint8_t dscp_prio;
+ uint8_t dscp_bulk;
uint8_t bulk_trigger_timeout;
uint16_t bulk_trigger_pps;
};
struct qosify_config {
- struct qosify_dscp_val dscp_icmp;
-
- struct qosify_flow_config flow;
+ uint8_t dscp_icmp;
};
struct qosify_ip_map_val {
- struct qosify_dscp_val dscp; /* must be first */
+ uint8_t dscp; /* must be first */
uint8_t seen;
};
+struct qosify_class {
+ struct qosify_flow_config config;
+
+ struct qosify_dscp_val val;
+
+ uint8_t flags;
+};
+
#endif
CL_MAP_UDP_PORTS,
CL_MAP_IPV4_ADDR,
CL_MAP_IPV6_ADDR,
+ CL_MAP_CLASS,
CL_MAP_CONFIG,
CL_MAP_DNS,
__CL_MAP_MAX,
bool file : 1;
bool user : 1;
- struct qosify_dscp_val dscp;
- struct qosify_dscp_val file_dscp;
+ uint8_t dscp;
+ uint8_t file_dscp;
union {
uint32_t port;
extern int qosify_map_timeout;
extern int qosify_active_timeout;
extern struct qosify_config config;
+extern struct qosify_flow_config flow_config;
int qosify_loader_init(void);
int qosify_map_init(void);
-int qosify_map_dscp_value(const char *val, struct qosify_dscp_val *dscp);
+int qosify_map_dscp_value(const char *val, uint8_t *dscp);
int qosify_map_load_file(const char *file);
int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str,
- struct qosify_dscp_val dscp);
+ uint8_t dscp);
void qosify_map_reload(void);
void qosify_map_clear_files(void);
void qosify_map_gc(void);
void qosify_map_dump(struct blob_buf *b);
-void qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val val);
+void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val);
void qosify_map_reset_config(void);
void qosify_map_update_config(void);
-void qosify_map_set_aliases(struct blob_attr *val);
+void qosify_map_set_classes(struct blob_attr *val);
int qosify_map_add_dns_host(char *host, const char *addr, const char *type, int ttl);
+int map_parse_flow_config(struct qosify_flow_config *cfg, struct blob_attr *attr,
+ bool reset);
+int map_fill_dscp_value(uint8_t *dest, struct blob_attr *attr, bool reset);
int qosify_iface_init(void);
void qosify_iface_config_update(struct blob_attr *ifaces, struct blob_attr *devs);
static struct blob_buf b;
static int
-qosify_ubus_add_array(struct blob_attr *attr, struct qosify_dscp_val val,
- enum qosify_map_id id)
+qosify_ubus_add_array(struct blob_attr *attr, uint8_t val, enum qosify_map_id id)
{
struct blob_attr *cur;
int rem;
int prev_timemout = qosify_map_timeout;
struct blob_attr *tb[__CL_ADD_MAX];
struct blob_attr *cur;
- struct qosify_dscp_val dscp = { 0xff, 0xff };
+ uint8_t dscp = 0xff;
int ret;
blobmsg_parse(qosify_add_policy, __CL_ADD_MAX, tb,
CL_CONFIG_TIMEOUT,
CL_CONFIG_DSCP_UDP,
CL_CONFIG_DSCP_TCP,
- CL_CONFIG_DSCP_PRIO,
- CL_CONFIG_DSCP_BULK,
CL_CONFIG_DSCP_ICMP,
- CL_CONFIG_BULK_TIMEOUT,
- CL_CONFIG_BULK_PPS,
- CL_CONFIG_PRIO_PKT_LEN,
CL_CONFIG_INTERFACES,
CL_CONFIG_DEVICES,
- CL_CONFIG_ALIASES,
+ CL_CONFIG_CLASSES,
__CL_CONFIG_MAX
};
[CL_CONFIG_TIMEOUT] = { "timeout", BLOBMSG_TYPE_INT32 },
[CL_CONFIG_DSCP_UDP] = { "dscp_default_udp", BLOBMSG_TYPE_STRING },
[CL_CONFIG_DSCP_TCP] = { "dscp_default_tcp", BLOBMSG_TYPE_STRING },
- [CL_CONFIG_DSCP_PRIO] = { "dscp_prio", BLOBMSG_TYPE_STRING },
- [CL_CONFIG_DSCP_BULK] = { "dscp_bulk", BLOBMSG_TYPE_STRING },
[CL_CONFIG_DSCP_ICMP] = { "dscp_icmp", BLOBMSG_TYPE_STRING },
- [CL_CONFIG_BULK_TIMEOUT] = { "bulk_trigger_timeout", BLOBMSG_TYPE_INT32 },
- [CL_CONFIG_BULK_PPS] = { "bulk_trigger_pps", BLOBMSG_TYPE_INT32 },
- [CL_CONFIG_PRIO_PKT_LEN] = { "prio_max_avg_pkt_len", BLOBMSG_TYPE_INT32 },
[CL_CONFIG_INTERFACES] = { "interfaces", BLOBMSG_TYPE_TABLE },
[CL_CONFIG_DEVICES] = { "devices", BLOBMSG_TYPE_TABLE },
- [CL_CONFIG_ALIASES] = { "aliases", BLOBMSG_TYPE_TABLE },
+ [CL_CONFIG_CLASSES] = { "classes", BLOBMSG_TYPE_TABLE },
};
-static int __set_dscp(struct qosify_dscp_val *dest, struct blob_attr *attr, bool reset)
-{
- if (reset) {
- dest->ingress = 0xff;
- dest->egress = 0xff;
- }
-
- if (!attr)
- return 0;
-
- if (qosify_map_dscp_value(blobmsg_get_string(attr), dest))
- return -1;
-
- return 0;
-}
-
static int
qosify_ubus_config(struct ubus_context *ctx, struct ubus_object *obj,
struct ubus_request_data *req, const char *method,
{
struct blob_attr *tb[__CL_CONFIG_MAX];
struct blob_attr *cur;
- struct qosify_dscp_val dscp;
+ uint8_t dscp;
bool reset = false;
int ret;
if (reset)
qosify_map_reset_config();
- if ((cur = tb[CL_CONFIG_ALIASES]) != NULL || reset)
- qosify_map_set_aliases(cur);
+ if ((cur = tb[CL_CONFIG_CLASSES]) != NULL || reset)
+ qosify_map_set_classes(cur);
if ((cur = tb[CL_CONFIG_TIMEOUT]) != NULL)
qosify_map_timeout = blobmsg_get_u32(cur);
(ret = qosify_ubus_set_files(cur) != 0))
return ret;
- __set_dscp(&dscp, tb[CL_CONFIG_DSCP_UDP], true);
- if (dscp.ingress != 0xff)
- qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, dscp);
-
- __set_dscp(&dscp, tb[CL_CONFIG_DSCP_TCP], true);
- if (dscp.ingress != 0xff)
- qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, dscp);
-
- if (__set_dscp(&config.flow.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset) ||
- __set_dscp(&config.flow.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset) ||
- __set_dscp(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset))
+ if (map_parse_flow_config(&flow_config, msg, reset) ||
+ map_fill_dscp_value(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset))
return UBUS_STATUS_INVALID_ARGUMENT;
- if ((cur = tb[CL_CONFIG_BULK_TIMEOUT]) != NULL)
- config.flow.bulk_trigger_timeout = blobmsg_get_u32(cur);
-
- if ((cur = tb[CL_CONFIG_BULK_PPS]) != NULL)
- config.flow.bulk_trigger_pps = blobmsg_get_u32(cur);
+ map_fill_dscp_value(&dscp, tb[CL_CONFIG_DSCP_UDP], true);
+ if (dscp != 0xff)
+ qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, dscp);
- if ((cur = tb[CL_CONFIG_PRIO_PKT_LEN]) != NULL)
- config.flow.prio_max_avg_pkt_len = blobmsg_get_u32(cur);
+ map_fill_dscp_value(&dscp, tb[CL_CONFIG_DSCP_TCP], true);
+ if (dscp != 0xff)
+ qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, dscp);
qosify_map_update_config();