This will be used for supporting different tags on the LAN side and the WAN side.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
static AVL_TREE(map_data, qosify_map_entry_cmp, false, NULL);
static LIST_HEAD(map_files);
static uint32_t next_timeout;
-static uint8_t qosify_dscp_default[2] = { 0xff, 0xff };
+static struct qosify_dscp_val qosify_dscp_default[2] = {
+ { 0xff, 0xff },
+ { 0xff, 0xff }
+};
int qosify_map_timeout;
int qosify_active_timeout;
struct qosify_config config;
bpf_map_delete_elem(fd, &key);
}
-static void __qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val)
+static void __qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val *val)
{
struct qosify_map_data data = {
.id = id,
int fd = qosify_map_fds[id];
int i;
- val |= QOSIFY_DSCP_DEFAULT_FLAG;
+ val->ingress |= QOSIFY_DSCP_DEFAULT_FLAG;
+ val->egress |= QOSIFY_DSCP_DEFAULT_FLAG;
for (i = 0; i < (1 << 16); i++) {
data.addr.port = htons(i);
if (avl_find(&map_data, &data))
continue;
- bpf_map_update_elem(fd, &data.addr, &val, BPF_ANY);
+ bpf_map_update_elem(fd, &data.addr, val, BPF_ANY);
}
}
-void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val)
+void qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val val)
{
bool udp;
else
return;
- if (qosify_dscp_default[udp] == val)
+ if (!memcmp(&qosify_dscp_default[udp], &val, sizeof(val)))
return;
qosify_dscp_default[udp] = val;
- __qosify_map_set_dscp_default(id, val);
+ __qosify_map_set_dscp_default(id, &qosify_dscp_default[udp]);
}
int qosify_map_init(void)
static void __qosify_map_set_entry(struct qosify_map_data *data)
{
int fd = qosify_map_fds[data->id];
+ struct qosify_dscp_val prev_dscp = { 0xff, 0xff };
struct qosify_map_entry *e;
bool file = data->file;
int32_t delta = 0;
- bool add = data->dscp != 0xff;
- uint8_t prev_dscp = 0xff;
+ bool add = data->dscp.ingress != 0xff;
e = avl_find_element(&map_data, data, e, avl);
if (!e) {
e->data.dscp = e->data.file_dscp;
}
- if (e->data.dscp != prev_dscp && data->id < CL_MAP_DNS) {
+ if (memcmp(&e->data.dscp, &prev_dscp, sizeof(prev_dscp)) != 0 &&
+ data->id < CL_MAP_DNS) {
struct qosify_ip_map_val val = {
.dscp = e->data.dscp,
.seen = 1,
return 0;
}
-int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str, uint8_t dscp)
+int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str,
+ struct qosify_dscp_val dscp)
{
struct qosify_map_data data = {
.id = id,
return 0;
}
-int qosify_map_dscp_value(const char *val)
+int qosify_map_dscp_value(const char *val, struct qosify_dscp_val *dscp_val)
{
unsigned long dscp;
char *err;
if (dscp >= 64)
return -1;
- return dscp + (fallback << 6);
+ dscp_val->ingress = dscp_val->egress = dscp + (fallback << 6);
+
+ return 0;
}
static void
qosify_map_parse_line(char *str)
{
const char *key, *value;
- int dscp;
+ struct qosify_dscp_val dscp;
str = str_skip(str, true);
key = str;
str = str_skip(str, true);
value = str;
- dscp = qosify_map_dscp_value(value);
- if (dscp < 0)
+ if (qosify_map_dscp_value(value, &dscp))
return;
if (!strncmp(key, "dns:", 4))
void qosify_map_reset_config(void)
{
+ struct qosify_dscp_val val = {};
+
qosify_map_clear_files();
- qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, 0);
- qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, 0);
+ qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, val);
+ qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, val);
qosify_map_timeout = 3600;
qosify_active_timeout = 300;
memset(&config, 0, sizeof(config));
- config.dscp_prio = 0xff;
- config.dscp_bulk = 0xff;
- config.dscp_icmp = 0xff;
+ config.dscp_prio.ingress = 0xff;
+ config.dscp_prio.egress = 0xff;
+ config.dscp_bulk.ingress = 0xff;
+ config.dscp_bulk.egress = 0xff;
+ config.dscp_icmp.ingress = 0xff;
+ config.dscp_icmp.egress = 0xff;
}
void qosify_map_reload(void)
return 0;
}
+static void
+blobmsg_add_dscp(struct blob_buf *b, const char *name, uint8_t dscp)
+{
+ int buf_len = 8;
+ char *buf;
+
+ buf = blobmsg_alloc_string_buffer(b, name, buf_len);
+ qosify_map_dscp_codepoint_str(buf, buf_len, dscp);
+ blobmsg_add_string_buffer(b);
+}
+
void qosify_map_dump(struct blob_buf *b)
{
blobmsg_add_u8(b, "file", e->data.file);
blobmsg_add_u8(b, "user", e->data.user);
- buf = blobmsg_alloc_string_buffer(b, "dscp", buf_len);
- qosify_map_dscp_codepoint_str(buf, buf_len, e->data.dscp);
- blobmsg_add_string_buffer(b);
+ blobmsg_add_dscp(b, "dscp_ingress", e->data.dscp.ingress);
+ blobmsg_add_dscp(b, "dscp_egress", e->data.dscp.egress);
blobmsg_add_string(b, "type", qosify_map_info[e->data.id].type_name);
blobmsg_add_string(b, "addr", e->data.addr.dns.pattern);
break;
default:
- *buf = 0;
break;
}
blobmsg_close_table(b, c);
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(pinning, 1);
__type(key, __u32);
- __type(value, __u8);
+ __type(value, struct qosify_dscp_val);
__uint(max_entries, 1 << 16);
} port_array_t;
return *avg >> EWMA_SHIFT;
}
+static __always_inline __u8 dscp_val(struct qosify_dscp_val *val, bool ingress)
+{
+ __u8 ival = val->ingress;
+ __u8 eval = val->egress;
+
+ return ingress ? ival : eval;
+}
+
static __always_inline void
ipv4_change_dsfield(struct iphdr *iph, __u8 mask, __u8 value, bool force)
{
static void
parse_l4proto(struct qosify_config *config, struct __sk_buff *skb,
- __u32 offset, __u8 proto, __u8 *dscp_out)
+ __u32 offset, __u8 proto, __u8 *dscp_out, bool ingress)
{
struct udphdr *udp;
__u32 src, dest, key;
- __u8 *value;
+ struct qosify_dscp_val *value;
udp = skb_ptr(skb, offset);
if (skb_check(skb, &udp->len))
return;
if (config && (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6)) {
- *dscp_out = config->dscp_icmp;
+ *dscp_out = dscp_val(&config->dscp_icmp, ingress);
return;
}
- src = udp->source;
- dest = udp->dest;
-
- if (module_flags & QOSIFY_INGRESS)
- key = src;
+ if (ingress)
+ key = udp->source;
else
- key = dest;
+ key = udp->dest;
if (proto == IPPROTO_TCP) {
value = bpf_map_lookup_elem(&tcp_ports, &key);
if (!value)
return;
- *dscp_out = *value;
+ *dscp_out = dscp_val(value, ingress);
}
static __always_inline void
check_flow(struct qosify_config *config, struct __sk_buff *skb,
- uint8_t *dscp)
+ uint8_t *dscp, bool ingress)
{
struct flow_bucket flow_data;
struct flow_bucket *flow;
if (config->bulk_trigger_pps &&
flow->pkt_count > config->bulk_trigger_pps) {
- flow->dscp = config->dscp_bulk;
+ flow->dscp = dscp_val(&config->dscp_bulk, ingress);
flow->bulk_timeout = config->bulk_trigger_timeout;
}
out:
if (config->prio_max_avg_pkt_len &&
- flow->dscp != config->dscp_bulk) {
+ flow->dscp != dscp_val(&config->dscp_bulk, ingress)) {
if (ewma(&flow->pkt_len_avg, skb->len) <
config->prio_max_avg_pkt_len)
- flow->dscp = config->dscp_prio;
+ flow->dscp = dscp_val(&config->dscp_prio, ingress);
else
flow->dscp = 0xff;
}
}
static __always_inline void
-parse_ipv4(struct __sk_buff *skb, __u32 *offset)
+parse_ipv4(struct __sk_buff *skb, __u32 *offset, bool ingress)
{
struct qosify_config *config;
struct qosify_ip_map_val *ip_val;
+ struct qosify_dscp_val *value;
const __u32 zero_port = 0;
struct iphdr *iph;
__u8 dscp = 0xff;
- __u8 *value;
__u8 ipproto;
int hdr_len;
void *key;
return;
ipproto = iph->protocol;
- parse_l4proto(config, skb, *offset, ipproto, &dscp);
+ parse_l4proto(config, skb, *offset, ipproto, &dscp, ingress);
- if (module_flags & QOSIFY_INGRESS)
+ if (ingress)
key = &iph->saddr;
else
key = &iph->daddr;
if (ip_val) {
if (!ip_val->seen)
ip_val->seen = 1;
- dscp = ip_val->dscp;
+ dscp = dscp_val(&ip_val->dscp, ingress);
} else if (dscp == 0xff) {
/* use udp port 0 entry as fallback for non-tcp/udp */
value = bpf_map_lookup_elem(&udp_ports, &zero_port);
if (value)
- dscp = *value;
+ dscp = dscp_val(value, ingress);
}
- check_flow(config, skb, &dscp);
+ check_flow(config, skb, &dscp, ingress);
force = !(dscp & QOSIFY_DSCP_FALLBACK_FLAG);
dscp &= GENMASK(5, 0);
}
static __always_inline void
-parse_ipv6(struct __sk_buff *skb, __u32 *offset)
+parse_ipv6(struct __sk_buff *skb, __u32 *offset, bool ingress)
{
struct qosify_config *config;
struct qosify_ip_map_val *ip_val;
+ struct qosify_dscp_val *value;
const __u32 zero_port = 0;
struct ipv6hdr *iph;
__u8 dscp = 0;
- __u8 *value;
__u8 ipproto;
void *key;
bool force;
return;
ipproto = iph->nexthdr;
- if (module_flags & QOSIFY_INGRESS)
+ if (ingress)
key = &iph->saddr;
else
key = &iph->daddr;
- parse_l4proto(config, skb, *offset, ipproto, &dscp);
+ parse_l4proto(config, skb, *offset, ipproto, &dscp, ingress);
ip_val = bpf_map_lookup_elem(&ipv6_map, key);
if (ip_val) {
if (!ip_val->seen)
ip_val->seen = 1;
- dscp = ip_val->dscp;
+ dscp = dscp_val(&ip_val->dscp, ingress);
} else if (dscp == 0xff) {
/* use udp port 0 entry as fallback for non-tcp/udp */
value = bpf_map_lookup_elem(&udp_ports, &zero_port);
if (value)
- dscp = *value;
+ dscp = dscp_val(value, ingress);
}
- check_flow(config, skb, &dscp);
+ check_flow(config, skb, &dscp, ingress);
force = !(dscp & QOSIFY_DSCP_FALLBACK_FLAG);
dscp &= GENMASK(5, 0);
SEC("classifier")
int classify(struct __sk_buff *skb)
{
+ bool ingress = module_flags & QOSIFY_INGRESS;
__u32 offset = 0;
int type;
type = parse_ethernet(skb, &offset);
if (type == bpf_htons(ETH_P_IP))
- parse_ipv4(skb, &offset);
+ parse_ipv4(skb, &offset, ingress);
else if (type == bpf_htons(ETH_P_IPV6))
- parse_ipv6(skb, &offset);
+ parse_ipv6(skb, &offset, ingress);
return TC_ACT_OK;
}
#define QOSIFY_DSCP_FALLBACK_FLAG (1 << 6)
#define QOSIFY_DSCP_DEFAULT_FLAG (1 << 7)
+struct qosify_dscp_val {
+ uint8_t ingress;
+ uint8_t egress;
+};
+
/* global config data */
struct qosify_config {
- uint8_t dscp_prio;
- uint8_t dscp_bulk;
- uint8_t dscp_icmp;
+ struct qosify_dscp_val dscp_prio;
+ struct qosify_dscp_val dscp_bulk;
+ struct qosify_dscp_val dscp_icmp;
uint8_t bulk_trigger_timeout;
uint16_t bulk_trigger_pps;
};
struct qosify_ip_map_val {
- uint8_t dscp; /* must be first */
+ struct qosify_dscp_val dscp; /* must be first */
uint8_t seen;
};
bool file : 1;
bool user : 1;
- uint8_t dscp;
- uint8_t file_dscp;
+ struct qosify_dscp_val dscp;
+ struct qosify_dscp_val file_dscp;
union {
uint32_t port;
int qosify_loader_init(void);
int qosify_map_init(void);
-int qosify_map_dscp_value(const char *val);
+int qosify_map_dscp_value(const char *val, struct qosify_dscp_val *dscp);
int qosify_map_load_file(const char *file);
-int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str, uint8_t dscp);
+int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str,
+ struct qosify_dscp_val dscp);
void qosify_map_reload(void);
void qosify_map_clear_files(void);
void qosify_map_gc(void);
void qosify_map_dump(struct blob_buf *b);
-void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val);
+void qosify_map_set_dscp_default(enum qosify_map_id id, struct qosify_dscp_val val);
void qosify_map_reset_config(void);
void qosify_map_update_config(void);
int qosify_map_add_dns_host(char *host, const char *addr, const char *type, int ttl);
static struct blob_buf b;
static int
-qosify_ubus_add_array(struct blob_attr *attr, uint8_t val, enum qosify_map_id id)
+qosify_ubus_add_array(struct blob_attr *attr, struct qosify_dscp_val val,
+ enum qosify_map_id id)
{
struct blob_attr *cur;
int rem;
int prev_timemout = qosify_map_timeout;
struct blob_attr *tb[__CL_ADD_MAX];
struct blob_attr *cur;
- int dscp = -1;
+ struct qosify_dscp_val dscp = { 0xff, 0xff };
int ret;
blobmsg_parse(qosify_add_policy, __CL_ADD_MAX, tb,
blobmsg_data(msg), blobmsg_len(msg));
if (!strcmp(method, "add")) {
- if ((cur = tb[CL_ADD_DSCP]) != NULL)
- dscp = qosify_map_dscp_value(blobmsg_get_string(cur));
- else
- return UBUS_STATUS_INVALID_ARGUMENT;
- if (dscp < 0)
+ if ((cur = tb[CL_ADD_DSCP]) == NULL ||
+ qosify_map_dscp_value(blobmsg_get_string(cur), &dscp))
return UBUS_STATUS_INVALID_ARGUMENT;
if ((cur = tb[CL_ADD_TIMEOUT]) != NULL)
qosify_map_timeout = blobmsg_get_u32(cur);
- } else {
- dscp = 0xff;
}
if ((cur = tb[CL_ADD_IPV4]) != NULL &&
[CL_CONFIG_DEVICES] = { "devices", BLOBMSG_TYPE_TABLE },
};
-static int __set_dscp(uint8_t *dest, struct blob_attr *attr, bool reset)
+static int __set_dscp(struct qosify_dscp_val *dest, struct blob_attr *attr, bool reset)
{
- int dscp;
-
- if (reset)
- *dest = 0xff;
+ if (reset) {
+ dest->ingress = 0xff;
+ dest->egress = 0xff;
+ }
if (!attr)
return 0;
- dscp = qosify_map_dscp_value(blobmsg_get_string(attr));
- if (dscp < 0)
+ if (qosify_map_dscp_value(blobmsg_get_string(attr), dest))
return -1;
- *dest = dscp;
-
return 0;
}
{
struct blob_attr *tb[__CL_CONFIG_MAX];
struct blob_attr *cur;
- uint8_t dscp;
+ struct qosify_dscp_val dscp;
bool reset = false;
int ret;
return ret;
__set_dscp(&dscp, tb[CL_CONFIG_DSCP_UDP], true);
- if (dscp != 0xff)
+ if (dscp.ingress != 0xff)
qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, dscp);
__set_dscp(&dscp, tb[CL_CONFIG_DSCP_TCP], true);
- if (dscp != 0xff)
+ if (dscp.ingress != 0xff)
qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, dscp);
- __set_dscp(&config.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset);
- __set_dscp(&config.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset);
- __set_dscp(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset);
+ if (__set_dscp(&config.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset) ||
+ __set_dscp(&config.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset) ||
+ __set_dscp(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset))
+ return UBUS_STATUS_INVALID_ARGUMENT;
if ((cur = tb[CL_CONFIG_BULK_TIMEOUT]) != NULL)
config.bulk_trigger_timeout = blobmsg_get_u32(cur);