From: Felix Fietkau Date: Tue, 12 Oct 2021 12:41:21 +0000 (+0200) Subject: Initial import X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=52a57bf0e4eb2c6419be304981c5c94adb32d037;p=project%2Fqosify.git Initial import Signed-off-by: Felix Fietkau --- 52a57bf0e4eb2c6419be304981c5c94adb32d037 diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..3d73a60 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,15 @@ +cmake_minimum_required(VERSION 3.10) + +PROJECT(qosify C) + +ADD_DEFINITIONS(-Os -Wall -Wno-unknown-warning-option -Wno-array-bounds -Wno-format-truncation -Werror --std=gnu99) + +SET(CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "") + +find_library(bpf NAMES bpf) +ADD_EXECUTABLE(qosify main.c loader.c map.c ubus.c interface.c) +TARGET_LINK_LIBRARIES(qosify ${bpf} ubox ubus) + +INSTALL(TARGETS qosify + RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR} +) diff --git a/README b/README new file mode 100644 index 0000000..3b5dff8 --- /dev/null +++ b/README @@ -0,0 +1,91 @@ +QoSify is simple daemon for setting up and managing CAKE along with a custom +eBPF based classifier that sets DSCP fields of packets. + +It supports the following features: +- simple TCP/UDP port based mapping +- IP address based mapping +- priority boosting based on average packet size +- bulk flow detection based on number of packets per second +- dynamically add IP entries with timeout + +It con be configured via ubus call qosify config. + +This call supports the following parameters: +- "reset": BOOL + Reset the config to defaults instead of only updating supplied values + +- "files": ARRAY of STRING + List of files with port/IP mappings + +- "timeout": INT32 + Default timeout for dynamically added entries + +- "dscp_default_udp": STRING + Default DSCP value for UDP packets + +- "dscp_default_tcp": STRING + Default DSCP value for TCP packets + +- "dscp_prio": STRING + DSCP value for priority-marked packets + +- "dscp_bulk": STRING + DSCP value for bulk-marked packets + +- "dscp_icmp": STRING + DSCP value for ICMP packets + +- "bulk_trigger_pps": INT32 + Number of packets per second to trigger bulk flow detection + +- "bulk_trigger_timeout": INT32 + Time below bulk_trigger_pps threshold until a bulk flow mark is removed + +- "prio_max_avg_pkt_len": INT32 + Maximum average packet length for marking a flow as priority + +- "interfaces": TABLE of TABLE + netifd interfaces to enable QoS on + +- "devices": TABLE of TABLE + netdevs to enable QoS on + + +interface/device properties: +- "bandwidth_up": STRING + Uplink bandwidth (same format as tc) + +- "bandwidth_down": STRING + Downlink bandwidth (same format as tc) + +- "ingress": BOOL + Enable ingress shaping + +- "egress": BOOL + Enable egress shaping + +- "mode": STRING + CAKE diffserv mode + +- "nat": BOOL + Enable CAKE NAT host detection via conntrack + +- "host_isolate": BOOL + Enable CAKE host isolation + +- "autorate_ingress": BOOL + Enable CAKE automatic rate estimation for ingress + +- "ingress_options": STRING + CAKE ingress options + +- "egress_options": STRING + CAKE egress options + +- "options": STRING + CAKE options for ingress + egress + + +Planned features: +- Integration with dnsmasq to support hostname pattern based DSCP marking +- Support for LAN host based priority diff --git a/interface.c b/interface.c new file mode 100644 index 0000000..82a6e05 --- /dev/null +++ b/interface.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include "qosify.h" + +static void interface_update_cb(struct vlist_tree *tree, + struct vlist_node *node_new, + struct vlist_node *node_old); + +static VLIST_TREE(devices, avl_strcmp, interface_update_cb, true, false); +static VLIST_TREE(interfaces, avl_strcmp, interface_update_cb, true, false); +static int socket_fd; + +#define APPEND(_buf, _ofs, _format, ...) _ofs += snprintf(_buf + _ofs, sizeof(_buf) - _ofs, _format, ##__VA_ARGS__) + +struct qosify_iface_config { + struct blob_attr *data; + + bool ingress; + bool egress; + bool nat; + bool host_isolate; + bool autorate_ingress; + + const char *bandwidth_up; + const char *bandwidth_down; + const char *mode; + const char *common_opts; + const char *ingress_opts; + const char *egress_opts; +}; + + +struct qosify_iface { + struct vlist_node node; + + char ifname[IFNAMSIZ]; + bool active; + + bool device; + struct blob_attr *config_data; + struct qosify_iface_config config; +}; + +enum { + IFACE_ATTR_BW_UP, + IFACE_ATTR_BW_DOWN, + IFACE_ATTR_INGRESS, + IFACE_ATTR_EGRESS, + IFACE_ATTR_MODE, + IFACE_ATTR_NAT, + IFACE_ATTR_HOST_ISOLATE, + IFACE_ATTR_AUTORATE_IN, + IFACE_ATTR_INGRESS_OPTS, + IFACE_ATTR_EGRESS_OPTS, + IFACE_ATTR_OPTS, + __IFACE_ATTR_MAX +}; + +static inline const char *qosify_iface_name(struct qosify_iface *iface) +{ + return iface->node.avl.key; +} + +static void +iface_config_parse(struct blob_attr *attr, struct blob_attr **tb) +{ + static const struct blobmsg_policy policy[__IFACE_ATTR_MAX] = { + [IFACE_ATTR_BW_UP] = { "bandwidth_up", BLOBMSG_TYPE_STRING }, + [IFACE_ATTR_BW_DOWN] = { "bandwidth_down", BLOBMSG_TYPE_STRING }, + [IFACE_ATTR_INGRESS] = { "ingress", BLOBMSG_TYPE_BOOL }, + [IFACE_ATTR_EGRESS] = { "egress", BLOBMSG_TYPE_BOOL }, + [IFACE_ATTR_MODE] = { "mode", BLOBMSG_TYPE_STRING }, + [IFACE_ATTR_NAT] = { "nat", BLOBMSG_TYPE_BOOL }, + [IFACE_ATTR_HOST_ISOLATE] = { "host_isolate", BLOBMSG_TYPE_BOOL }, + [IFACE_ATTR_AUTORATE_IN] = { "autorate_ingress", BLOBMSG_TYPE_BOOL }, + [IFACE_ATTR_INGRESS_OPTS] = { "ingress_options", BLOBMSG_TYPE_STRING }, + [IFACE_ATTR_EGRESS_OPTS] = { "egress_options", BLOBMSG_TYPE_STRING }, + [IFACE_ATTR_OPTS] = { "options", BLOBMSG_TYPE_STRING }, + }; + + blobmsg_parse(policy, __IFACE_ATTR_MAX, tb, blobmsg_data(attr), blobmsg_len(attr)); +} + +static bool +iface_config_equal(struct qosify_iface *if1, struct qosify_iface *if2) +{ + struct blob_attr *tb1[__IFACE_ATTR_MAX], *tb2[__IFACE_ATTR_MAX]; + int i; + + iface_config_parse(if1->config_data, tb1); + iface_config_parse(if2->config_data, tb2); + + for (i = 0; i < __IFACE_ATTR_MAX; i++) { + if (!!tb1[i] != !!tb2[i]) + return false; + + if (!tb1[i]) + continue; + + if (blob_raw_len(tb1[i]) != blob_raw_len(tb2[i])) + return false; + + if (memcmp(tb1[i], tb2[i], blob_raw_len(tb1[i])) != 0) + return false; + } + + return true; +} + +static const char *check_str(struct blob_attr *attr) +{ + const char *str = blobmsg_get_string(attr); + + if (strchr(str, '\'')) + return NULL; + + return str; +} + +static void +iface_config_set(struct qosify_iface_config *cfg, struct blob_attr *attr) +{ + struct blob_attr *tb[__IFACE_ATTR_MAX]; + struct blob_attr *cur; + + iface_config_parse(attr, tb); + + memset(cfg, 0, sizeof(*cfg)); + + /* defaults */ + cfg->mode = "diffserv4"; + cfg->ingress = true; + cfg->egress = true; + cfg->host_isolate = true; + cfg->autorate_ingress = true; + + if ((cur = tb[IFACE_ATTR_BW_UP]) != NULL) + cfg->bandwidth_up = check_str(cur); + if ((cur = tb[IFACE_ATTR_BW_DOWN]) != NULL) + cfg->bandwidth_down = check_str(cur); + if ((cur = tb[IFACE_ATTR_MODE]) != NULL) + cfg->mode = check_str(cur); + if ((cur = tb[IFACE_ATTR_OPTS]) != NULL) + cfg->common_opts = check_str(cur); + if ((cur = tb[IFACE_ATTR_EGRESS_OPTS]) != NULL) + cfg->egress_opts = check_str(cur); + if ((cur = tb[IFACE_ATTR_INGRESS_OPTS]) != NULL) + cfg->ingress_opts = check_str(cur); + if ((cur = tb[IFACE_ATTR_INGRESS]) != NULL) + cfg->ingress = blobmsg_get_bool(cur); + if ((cur = tb[IFACE_ATTR_EGRESS]) != NULL) + cfg->egress = blobmsg_get_bool(cur); + if ((cur = tb[IFACE_ATTR_NAT]) != NULL) + cfg->nat = blobmsg_get_bool(cur); + if ((cur = tb[IFACE_ATTR_HOST_ISOLATE]) != NULL) + cfg->host_isolate = blobmsg_get_bool(cur); + if ((cur = tb[IFACE_ATTR_AUTORATE_IN]) != NULL) + cfg->autorate_ingress = blobmsg_get_bool(cur); +} + +static const char * +interface_ifb_name(struct qosify_iface *iface) +{ + static char ifname[IFNAMSIZ + 1] = "ifb-"; + int len = strlen(iface->ifname); + + if (len + 4 < IFNAMSIZ) { + snprintf(ifname + 4, IFNAMSIZ - 4, "%s", iface->ifname); + + return ifname; + } + + ifname[4] = iface->ifname[0]; + ifname[5] = iface->ifname[1]; + snprintf(ifname + 6, IFNAMSIZ - 6, "%s", iface->ifname + len - (IFNAMSIZ + 6) - 1); + + return ifname; +} + +static int run_cmd(char *cmd, bool ignore) +{ + char *argv[] = { "sh", "-c", cmd, NULL }; + bool first = true; + int status = -1; + char buf[512]; + int fds[2]; + FILE *f; + int pid; + + if (pipe(fds)) + return -1; + + pid = fork(); + if (!pid) { + close(fds[0]); + if (fds[1] != STDOUT_FILENO) + dup2(fds[1], STDOUT_FILENO); + if (fds[1] != STDERR_FILENO) + dup2(fds[1], STDERR_FILENO); + if (fds[1] > STDERR_FILENO) + close(fds[1]); + execv("/bin/sh", argv); + exit(1); + } + + if (pid < 0) + return -1; + + close(fds[1]); + f = fdopen(fds[0], "r"); + if (!f) { + close(fds[0]); + goto out; + } + + while (fgets(buf, sizeof(buf), f) != NULL) { + if (!strlen(buf)) + break; + if (ignore) + continue; + if (first) { + ULOG_WARN("Command: %s\n", cmd); + first = false; + } + ULOG_WARN("%s%s", buf, strchr(buf, '\n') ? "" : "\n"); + } + + fclose(f); + +out: + while (waitpid(pid, &status, 0) < 0) + if (errno != EINTR) + break; + + return status; +} + +static int +prepare_tc_cmd(char *buf, int len, const char *type, const char *cmd, + const char *dev, const char *extra) +{ + return snprintf(buf, len, "tc %s %s dev '%s' %s", type, cmd, dev, extra); +} + +static int +cmd_del_qdisc(const char *ifname, const char *type) +{ + char buf[64]; + + prepare_tc_cmd(buf, sizeof(buf), "qdisc", "del", ifname, type); + + return run_cmd(buf, true); +} + +static int +cmd_add_qdisc(struct qosify_iface *iface, const char *ifname, bool egress, bool eth) +{ + struct qosify_iface_config *cfg = &iface->config; + const char *bw = egress ? cfg->bandwidth_up : cfg->bandwidth_down; + const char *dir_opts = egress ? cfg->egress_opts : cfg->ingress_opts; + char buf[512]; + int ofs; + + cmd_del_qdisc(ifname, "root"); + + ofs = prepare_tc_cmd(buf, sizeof(buf), "qdisc", "add", ifname, "root handle 1: cake"); + if (bw) + APPEND(buf, ofs, " bandwidth %s", bw); + + APPEND(buf, ofs, " %s %sgress", cfg->mode, egress ? "e" : "in"); + + if (cfg->host_isolate) + APPEND(buf, ofs, " %snat dual-%shost", + cfg->nat ? "" : "no", + egress ? "src" : "dst"); + else + APPEND(buf, ofs, " flows"); + + APPEND(buf, ofs, " %s %s", + cfg->common_opts ? cfg->common_opts : "", + dir_opts ? dir_opts : ""); + + run_cmd(buf, false); + + ofs = prepare_tc_cmd(buf, sizeof(buf), "filter", "add", ifname, "parent 1: bpf"); + APPEND(buf, ofs, " object-pinned /sys/fs/bpf/qosify_%sgress_%s verbose direct-action", + egress ? "e" : "in", + eth ? "eth" : "ip"); + + return run_cmd(buf, false); +} + +static int +cmd_del_ingress(struct qosify_iface *iface) +{ + char buf[256]; + + cmd_del_qdisc(iface->ifname, "handle ffff: ingress"); + snprintf(buf, sizeof(buf), "ip link del '%s'", interface_ifb_name(iface)); + + return run_cmd(buf, true); +} + + +static int +cmd_add_ingress(struct qosify_iface *iface, bool eth) +{ + const char *ifbdev = interface_ifb_name(iface); + char buf[256]; + int ofs; + + cmd_del_ingress(iface); + + ofs = prepare_tc_cmd(buf, sizeof(buf), "qdisc", "add", iface->ifname, " handle ffff: ingress"); + run_cmd(buf, false); + + snprintf(buf, sizeof(buf), "ip link add '%s' type ifb", ifbdev); + run_cmd(buf, false); + + cmd_add_qdisc(iface, ifbdev, false, eth); + + snprintf(buf, sizeof(buf), "ip link set dev '%s' up", ifbdev); + run_cmd(buf, false); + + ofs = prepare_tc_cmd(buf, sizeof(buf), "filter", "add", iface->ifname, " parent ffff:"); + APPEND(buf, ofs, " protocol all prio 10 u32 match u32 0 0 " + "flowid 1:1 action mirred egress redirect dev '%s'", ifbdev); + return run_cmd(buf, false); +} + +static void +interface_start(struct qosify_iface *iface) +{ + struct ifreq ifr = {}; + bool eth; + + if (!iface->ifname[0] || iface->active) + return; + + ULOG_INFO("start interface %s\n", iface->ifname); + + strncpy(ifr.ifr_name, iface->ifname, sizeof(ifr.ifr_name)); + if (ioctl(socket_fd, SIOCGIFHWADDR, &ifr) < 0) { + ULOG_ERR("ioctl(SIOCGIFHWADDR, %s) failed: %s\n", iface->ifname, strerror(errno)); + return; + } + + eth = ifr.ifr_hwaddr.sa_family == ARPHRD_ETHER; + + if (iface->config.egress) + cmd_add_qdisc(iface, iface->ifname, true, eth); + if (iface->config.ingress) + cmd_add_ingress(iface, eth); + + iface->active = true; +} + +static void +interface_stop(struct qosify_iface *iface) +{ + if (!iface->ifname[0] || !iface->active) + return; + + ULOG_INFO("stop interface %s\n", iface->ifname); + iface->active = false; + + if (iface->config.egress) + cmd_del_qdisc(iface->ifname, "root"); + if (iface->config.ingress) + cmd_del_ingress(iface); +} + +static void +interface_set_config(struct qosify_iface *iface, struct blob_attr *config) +{ + iface->config_data = blob_memdup(config); + iface_config_set(&iface->config, iface->config_data); + interface_start(iface); +} + +static void +interface_update_cb(struct vlist_tree *tree, + struct vlist_node *node_new, struct vlist_node *node_old) +{ + struct qosify_iface *if_new = NULL, *if_old = NULL; + + if (node_new) + if_new = container_of(node_new, struct qosify_iface, node); + if (node_old) + if_old = container_of(node_old, struct qosify_iface, node); + + if (if_new && if_old) { + if (!iface_config_equal(if_old, if_new)) { + interface_stop(if_old); + free(if_old->config_data); + interface_set_config(if_old, if_new->config_data); + } + + free(if_new); + return; + } + + if (if_old) { + interface_stop(if_old); + free(if_old->config_data); + free(if_old); + } + + if (if_new) + interface_set_config(if_new, if_new->config_data); +} + +static void +interface_create(struct blob_attr *attr, bool device) +{ + struct qosify_iface *iface; + const char *name = blobmsg_name(attr); + int name_len = strlen(name); + char *name_buf; + + if (strchr(name, '\'')) + return; + + if (name_len >= IFNAMSIZ) + return; + + if (blobmsg_type(attr) != BLOBMSG_TYPE_TABLE) + return; + + iface = calloc_a(sizeof(*iface), &name_buf, name_len + 1); + strcpy(name_buf, blobmsg_name(attr)); + iface->config_data = attr; + iface->device = device; + vlist_add(device ? &devices : &interfaces, &iface->node, name_buf); +} + +void qosify_iface_config_update(struct blob_attr *ifaces, struct blob_attr *devs) +{ + struct blob_attr *cur; + int rem; + + vlist_update(&devices); + blobmsg_for_each_attr(cur, devs, rem) + interface_create(cur, true); + vlist_flush(&devices); + + vlist_update(&interfaces); + blobmsg_for_each_attr(cur, ifaces, rem) + interface_create(cur, false); + vlist_flush(&interfaces); +} + +static void +qosify_iface_check_device(struct qosify_iface *iface) +{ + const char *name = qosify_iface_name(iface); + int ifindex; + + ifindex = if_nametoindex(name); + if (!ifindex) { + interface_stop(iface); + iface->ifname[0] = 0; + } else { + snprintf(iface->ifname, sizeof(iface->ifname), "%s", name); + interface_start(iface); + } +} + +static void +qosify_iface_check_interface(struct qosify_iface *iface) +{ + const char *name = qosify_iface_name(iface); + char ifname[IFNAMSIZ]; + + if (qosify_ubus_check_interface(name, ifname, sizeof(ifname)) == 0) { + snprintf(iface->ifname, sizeof(iface->ifname), "%s", ifname); + interface_start(iface); + } else { + interface_stop(iface); + iface->ifname[0] = 0; + } +} + +static void qos_iface_check_cb(struct uloop_timeout *t) +{ + struct qosify_iface *iface; + + vlist_for_each_element(&devices, iface, node) + qosify_iface_check_device(iface); + vlist_for_each_element(&interfaces, iface, node) + qosify_iface_check_interface(iface); +} + +void qosify_iface_check(void) +{ + static struct uloop_timeout timer = { + .cb = qos_iface_check_cb, + }; + + uloop_timeout_set(&timer, 10); +} + +void qosify_iface_status(struct blob_buf *b) +{ + struct qosify_iface *iface; + void *c, *i; + + c = blobmsg_open_table(b, "devices"); + vlist_for_each_element(&devices, iface, node) { + i = blobmsg_open_table(b, qosify_iface_name(iface)); + blobmsg_add_u8(b, "active", iface->active); + blobmsg_close_table(b, i); + } + blobmsg_close_table(b, c); + + c = blobmsg_open_table(b, "interfaces"); + vlist_for_each_element(&interfaces, iface, node) { + i = blobmsg_open_table(b, qosify_iface_name(iface)); + blobmsg_add_u8(b, "active", iface->active); + if (iface->ifname) + blobmsg_add_string(b, "ifname", iface->ifname); + blobmsg_close_table(b, i); + } + blobmsg_close_table(b, c); +} + +int qosify_iface_init(void) +{ + socket_fd = socket(AF_UNIX, SOCK_DGRAM, 0); + if (socket < 0) + return -1; + + return 0; +} + +void qosify_iface_stop(void) +{ + struct qosify_iface *iface; + + vlist_for_each_element(&interfaces, iface, node) + interface_stop(iface); + vlist_for_each_element(&devices, iface, node) + interface_stop(iface); +} + diff --git a/loader.c b/loader.c new file mode 100644 index 0000000..539aae4 --- /dev/null +++ b/loader.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#include +#include +#include +#include +#include + +#include "qosify.h" + +static int qosify_bpf_pr(enum libbpf_print_level level, const char *format, + va_list args) +{ + return vfprintf(stderr, format, args); +} + +static void qosify_init_env(void) +{ + struct rlimit limit = { + .rlim_cur = RLIM_INFINITY, + .rlim_max = RLIM_INFINITY, + }; + + setrlimit(RLIMIT_MEMLOCK, &limit); +} + +static void qosify_fill_rodata(struct bpf_object *obj, uint32_t flags) +{ + struct bpf_map *map = NULL; + + while ((map = bpf_map__next(map, obj)) != NULL) { + if (!strstr(bpf_map__name(map), ".rodata")) + continue; + + bpf_map__set_initial_value(map, &flags, sizeof(flags)); + } +} + +static int +qosify_create_program(const char *suffix, uint32_t flags, bool *force_init) +{ + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .pin_root_path = CLASSIFY_DATA_PATH, + ); + struct bpf_program *prog; + struct bpf_object *obj; + struct stat st; + char path[256]; + int err; + + snprintf(path, sizeof(path), CLASSIFY_PIN_PATH "_" "%s", suffix); + if (!*force_init) { + if (stat(path, &st) == 0) + return 0; + + *force_init = true; + } + + obj = bpf_object__open_file(CLASSIFY_PROG_PATH, &opts); + err = libbpf_get_error(obj); + if (err) { + perror("bpf_object__open_file"); + return -1; + } + + prog = bpf_object__find_program_by_title(obj, "classifier"); + if (!prog) { + fprintf(stderr, "Can't find classifier prog\n"); + return -1; + } + + bpf_program__set_type(prog, BPF_PROG_TYPE_SCHED_CLS); + + qosify_fill_rodata(obj, flags); + + err = bpf_object__load(obj); + if (err) { + perror("bpf_object__load"); + return -1; + } + + libbpf_set_print(NULL); + + unlink(path); + err = bpf_program__pin(prog, path); + if (err) { + fprintf(stderr, "Failed to pin program to %s: %s\n", + path, strerror(-err)); + } + + bpf_object__close(obj); + + return 0; +} + +int qosify_loader_init(bool force_init) +{ + static const struct { + const char *suffix; + uint32_t flags; + } progs[] = { + { "egress_eth", 0 }, + { "egress_ip", QOSIFY_IP_ONLY }, + { "ingress_eth", QOSIFY_INGRESS }, + { "ingress_ip", QOSIFY_INGRESS | QOSIFY_IP_ONLY }, + }; + glob_t g; + int i; + + if (force_init && + glob(CLASSIFY_DATA_PATH "/*", 0, NULL, &g) == 0) { + for (i = 0; i < g.gl_pathc; i++) + unlink(g.gl_pathv[i]); + } + + + libbpf_set_print(qosify_bpf_pr); + + qosify_init_env(); + + for (i = 0; i < ARRAY_SIZE(progs); i++) { + if (qosify_create_program(progs[i].suffix, progs[i].flags, + &force_init)) + return -1; + } + + return 0; +} diff --git a/main.c b/main.c new file mode 100644 index 0000000..0352183 --- /dev/null +++ b/main.c @@ -0,0 +1,75 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#include +#include +#include + +#include + +#include "qosify.h" + +static int usage(const char *progname) +{ + fprintf(stderr, "Usage: %s [options]\n" + "Options:\n" + " -f: force reload of BPF programs\n" + " -l Load defaults from \n" + " -o only load program/maps without running as daemon\n" + "\n", progname); + + return 1; +} + +int main(int argc, char **argv) +{ + const char *load_file = NULL; + bool force_init = false; + bool oneshot = false; + int ch; + + while ((ch = getopt(argc, argv, "fl:o")) != -1) { + switch (ch) { + case 'f': + force_init = true; + break; + case 'l': + load_file = optarg; + break; + case 'o': + oneshot = true; + break; + default: + return usage(argv[0]); + } + } + + if (qosify_loader_init(force_init)) + return 2; + + if (qosify_map_init()) + return 2; + + if (qosify_map_load_file(load_file)) + return 2; + + if (oneshot) + return 0; + + ulog_open(ULOG_SYSLOG, LOG_DAEMON, "qosify"); + uloop_init(); + + if (qosify_ubus_init() || + qosify_iface_init()) + return 2; + + uloop_run(); + + qosify_ubus_stop(); + qosify_iface_stop(); + + uloop_done(); + + return 0; +} diff --git a/map.c b/map.c new file mode 100644 index 0000000..636a46b --- /dev/null +++ b/map.c @@ -0,0 +1,608 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#include + +#include +#include +#include +#include +#include + +#include + +#include "qosify.h" + +static int qosify_map_entry_cmp(const void *k1, const void *k2, void *ptr); + +static int qosify_map_fds[__CL_MAP_MAX]; +static AVL_TREE(map_data, qosify_map_entry_cmp, false, NULL); +static LIST_HEAD(map_files); +static uint32_t next_timeout; +static uint8_t qosify_dscp_default[2] = { 0xff, 0xff }; +int qosify_map_timeout = 3600; +struct qosify_config config; + +struct qosify_map_file { + struct list_head list; + char filename[]; +}; + +static const struct { + const char *name; + const char *type_name; +} qosify_map_info[] = { + [CL_MAP_TCP_PORTS] = { "tcp_ports", "tcp_port" }, + [CL_MAP_UDP_PORTS] = { "udp_ports", "udp_port" }, + [CL_MAP_IPV4_ADDR] = { "ipv4_map", "ipv4_addr" }, + [CL_MAP_IPV6_ADDR] = { "ipv6_map", "ipv6_addr" }, + [CL_MAP_CONFIG] = { "config", "config" }, +}; + +static const struct { + const char name[5]; + uint8_t val; +} codepoints[] = { + { "CS0", 0 }, + { "CS1", 8 }, + { "CS2", 16 }, + { "CS3", 24 }, + { "CS4", 32 }, + { "CS5", 40 }, + { "CS6", 48 }, + { "CS7", 56 }, + { "AF11", 10 }, + { "AF12", 12 }, + { "AF13", 14 }, + { "AF21", 18 }, + { "AF22", 20 }, + { "AF22", 22 }, + { "AF31", 26 }, + { "AF32", 28 }, + { "AF33", 30 }, + { "AF41", 34 }, + { "AF42", 36 }, + { "AF43", 38 }, + { "EF", 46 }, + { "VA", 44 }, +}; + +static void qosify_map_timer_cb(struct uloop_timeout *t) +{ + qosify_map_gc(); +} + +static struct uloop_timeout qosify_map_timer = { + .cb = qosify_map_timer_cb, +}; + +static uint32_t qosify_gettime(void) +{ + struct timespec ts; + + clock_gettime(CLOCK_MONOTONIC, &ts); + + return ts.tv_sec; +} + +static const char * +qosify_map_path(enum qosify_map_id id) +{ + static char path[128]; + const char *name; + + if (id >= ARRAY_SIZE(qosify_map_info)) + return NULL; + + name = qosify_map_info[id].name; + if (!name) + return NULL; + + snprintf(path, sizeof(path), "%s/%s", CLASSIFY_DATA_PATH, name); + + return path; +} + +static int qosify_map_get_fd(enum qosify_map_id id) +{ + const char *path = qosify_map_path(id); + int fd; + + if (!path) + return -1; + + fd = bpf_obj_get(path); + if (fd < 0) + fprintf(stderr, "Failed to open map %s: %s\n", path, strerror(errno)); + + return fd; +} + +static void qosify_map_clear_list(enum qosify_map_id id) +{ + int fd = qosify_map_fds[id]; + __u32 key[4] = {}; + + while (bpf_map_get_next_key(fd, &key, &key) != -1) + bpf_map_delete_elem(fd, &key); +} + +static void __qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val) +{ + struct qosify_map_data data = { + .id = id, + }; + int fd = qosify_map_fds[id]; + int i; + + val |= QOSIFY_DSCP_DEFAULT_FLAG; + + for (i = 0; i < (1 << 16); i++) { + data.addr.port = htons(i); + if (avl_find(&map_data, &data)) + continue; + + bpf_map_update_elem(fd, &data.addr, &val, BPF_ANY); + } +} + +void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val) +{ + bool udp; + + if (id == CL_MAP_TCP_PORTS) + udp = false; + else if (id == CL_MAP_UDP_PORTS) + udp = true; + else + return; + + if (qosify_dscp_default[udp] == val) + return; + + qosify_dscp_default[udp] = val; + __qosify_map_set_dscp_default(id, val); +} + +int qosify_map_init(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(qosify_map_fds); i++) { + qosify_map_fds[i] = qosify_map_get_fd(i); + if (qosify_map_fds[i] < 0) + return -1; + } + + qosify_map_clear_list(CL_MAP_IPV4_ADDR); + qosify_map_clear_list(CL_MAP_IPV6_ADDR); + qosify_map_reset_config(); + + return 0; +} + +static char *str_skip(char *str, bool space) +{ + while (*str && isspace(*str) == space) + str++; + + return str; +} + +static int +qosify_map_codepoint(const char *val) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(codepoints); i++) + if (!strcmp(codepoints[i].name, val)) + return codepoints[i].val; + + return 0xff; +} + +static int qosify_map_entry_cmp(const void *k1, const void *k2, void *ptr) +{ + const struct qosify_map_data *d1 = k1; + const struct qosify_map_data *d2 = k2; + + if (d1->id != d2->id) + return d2->id - d1->id; + + return memcmp(&d1->addr, &d2->addr, sizeof(d1->addr)); +} + +static void __qosify_map_set_entry(struct qosify_map_data *data) +{ + int fd = qosify_map_fds[data->id]; + struct qosify_map_entry *e; + bool file = data->file; + int32_t delta = 0; + bool add = data->dscp != 0xff; + uint8_t prev_dscp = 0xff; + + e = avl_find_element(&map_data, data, e, avl); + if (!e) { + if (!add) + return; + + e = calloc(1, sizeof(*e)); + e->avl.key = &e->data; + e->data.id = data->id; + memcpy(&e->data.addr, &data->addr, sizeof(e->data.addr)); + avl_insert(&map_data, &e->avl); + } else { + prev_dscp = e->data.dscp; + } + + if (file) + e->data.file = add; + else + e->data.user = add; + + if (add) { + if (file) + e->data.file_dscp = data->dscp; + if (!e->data.user || !file) + e->data.dscp = data->dscp; + } else if (e->data.file && !file) { + e->data.dscp = e->data.file_dscp; + } + + if (e->data.dscp != prev_dscp) + bpf_map_update_elem(fd, &data->addr, &e->data.dscp, BPF_ANY); + + if (add) { + if (qosify_map_timeout == ~0 || file) { + e->timeout = ~0; + return; + } + + e->timeout = qosify_gettime() + qosify_map_timeout; + delta = e->timeout - next_timeout; + if (next_timeout && delta >= 0) + return; + } + + uloop_timeout_set(&qosify_map_timer, 1); +} + +static int +qosify_map_set_port(struct qosify_map_data *data, const char *str) +{ + unsigned long start_port, end_port; + char *err; + int i; + + start_port = end_port = strtoul(str, &err, 0); + if (err && *err) { + if (*err == '-') + end_port = strtoul(err + 1, &err, 0); + if (*err) + return -1; + } + + if (!start_port || end_port < start_port || + end_port >= 65535) + return -1; + + for (i = start_port; i <= end_port; i++) { + data->addr.port = htons(i); + __qosify_map_set_entry(data); + } + + return 0; +} + +static int +qosify_map_fill_ip(struct qosify_map_data *data, const char *str) +{ + int af; + + if (data->id == CL_MAP_IPV6_ADDR) + af = AF_INET6; + else + af = AF_INET; + + if (inet_pton(af, str, &data->addr) != 1) + return -1; + + return 0; +} + +int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str, uint8_t dscp) +{ + struct qosify_map_data data = { + .id = id, + .file = file, + .dscp = dscp, + }; + + switch (id) { + case CL_MAP_TCP_PORTS: + case CL_MAP_UDP_PORTS: + return qosify_map_set_port(&data, str); + case CL_MAP_IPV4_ADDR: + case CL_MAP_IPV6_ADDR: + if (qosify_map_fill_ip(&data, str)) + return -1; + break; + default: + return -1; + } + + __qosify_map_set_entry(&data); + + return 0; +} + +int qosify_map_dscp_value(const char *val) +{ + unsigned long dscp; + char *err; + bool fallback = false; + + if (*val == '+') { + fallback = true; + val++; + } + + dscp = strtoul(val, &err, 0); + if (err && *err) + dscp = qosify_map_codepoint(val); + + if (dscp >= 64) + return -1; + + return dscp + (fallback << 6); +} + +static void +qosify_map_dscp_codepoint_str(char *dest, int len, uint8_t dscp) +{ + int i; + + if (dscp & QOSIFY_DSCP_FALLBACK_FLAG) { + *(dest++) = '+'; + len--; + dscp &= ~QOSIFY_DSCP_FALLBACK_FLAG; + } + + for (i = 0; i < ARRAY_SIZE(codepoints); i++) { + if (codepoints[i].val != dscp) + continue; + + snprintf(dest, len, "%s", codepoints[i].name); + return; + } + + snprintf(dest, len, "0x%x", dscp); +} + +static void +qosify_map_parse_line(char *str) +{ + const char *key, *value; + int dscp; + + str = str_skip(str, true); + key = str; + + str = str_skip(str, false); + if (!*str) + return; + + *(str++) = 0; + str = str_skip(str, true); + value = str; + + dscp = qosify_map_dscp_value(value); + if (dscp < 0) + return; + + if (!strncmp(key, "tcp:", 4)) + qosify_map_set_entry(CL_MAP_TCP_PORTS, true, key + 4, dscp); + else if (!strncmp(key, "udp:", 4)) + qosify_map_set_entry(CL_MAP_UDP_PORTS, true, key + 4, dscp); + else if (strchr(key, ':')) + qosify_map_set_entry(CL_MAP_IPV6_ADDR, true, key, dscp); + else if (strchr(key, '.')) + qosify_map_set_entry(CL_MAP_IPV4_ADDR, true, key, dscp); +} + +static int __qosify_map_load_file(const char *file) +{ + char line[1024]; + char *cur; + FILE *f; + + if (!file) + return 0; + + f = fopen(file, "r"); + if (!f) { + fprintf(stderr, "Can't open data file %s\n", file); + return -1; + } + + while (fgets(line, sizeof(line), f)) { + cur = strchr(line, '#'); + if (cur) + *cur = 0; + + cur = line + strlen(line); + if (cur == line) + continue; + + while (cur > line && isspace(cur[-1])) + cur--; + + *cur = 0; + qosify_map_parse_line(line); + } + + fclose(f); + + return 0; +} + +int qosify_map_load_file(const char *file) +{ + struct qosify_map_file *f; + + if (!file) + return 0; + + f = calloc(1, sizeof(*f) + strlen(file) + 1); + strcpy(f->filename, file); + list_add_tail(&f->list, &map_files); + + return __qosify_map_load_file(file); +} + +static void qosify_map_reset_file_entries(void) +{ + struct qosify_map_entry *e; + + avl_for_each_element(&map_data, e, avl) + e->data.file = false; +} + +void qosify_map_clear_files(void) +{ + struct qosify_map_file *f, *tmp; + + qosify_map_reset_file_entries(); + + list_for_each_entry_safe(f, tmp, &map_files, list) { + list_del(&f->list); + free(f); + } +} + +void qosify_map_reset_config(void) +{ + qosify_map_clear_files(); + qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, 0); + qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, 0); + qosify_map_timeout = 3600; + + memset(&config, 0, sizeof(config)); + config.dscp_prio = 0xff; + config.dscp_bulk = 0xff; + config.dscp_icmp = 0xff; +} + +void qosify_map_reload(void) +{ + struct qosify_map_file *f; + + qosify_map_reset_file_entries(); + + list_for_each_entry(f, &map_files, list) + __qosify_map_load_file(f->filename); + + qosify_map_gc(); +} + +void qosify_map_gc(void) +{ + struct qosify_map_entry *e, *tmp; + int32_t timeout = 0; + uint32_t cur_time = qosify_gettime(); + int fd; + + next_timeout = 0; + avl_for_each_element_safe(&map_data, e, avl, tmp) { + int32_t cur_timeout; + + if (e->data.user && e->timeout != ~0) { + cur_timeout = e->timeout - cur_time; + if (cur_timeout <= 0) { + e->data.user = false; + e->data.dscp = e->data.file_dscp; + } else if (!timeout || cur_timeout < timeout) { + timeout = cur_timeout; + next_timeout = e->timeout; + } + } + + if (e->data.file || e->data.user) + continue; + + avl_delete(&map_data, &e->avl); + fd = qosify_map_fds[e->data.id]; + bpf_map_delete_elem(fd, &e->data.addr); + free(e); + } + + if (!timeout) + return; + + uloop_timeout_set(&qosify_map_timer, timeout * 1000); +} + +void qosify_map_dump(struct blob_buf *b) +{ + struct qosify_map_entry *e; + uint32_t cur_time = qosify_gettime(); + int buf_len = INET6_ADDRSTRLEN + 1; + char *buf; + void *a; + int af; + + a = blobmsg_open_array(b, "entries"); + avl_for_each_element(&map_data, e, avl) { + void *c; + + if (!e->data.file && !e->data.user) + continue; + + c = blobmsg_open_table(b, NULL); + if (e->data.user && e->timeout != ~0) { + int32_t cur_timeout = e->timeout - cur_time; + + if (cur_timeout < 0) + cur_timeout = 0; + + blobmsg_add_u32(b, "timeout", cur_timeout); + } + + blobmsg_add_u8(b, "file", e->data.file); + blobmsg_add_u8(b, "user", e->data.user); + + buf = blobmsg_alloc_string_buffer(b, "dscp", buf_len); + qosify_map_dscp_codepoint_str(buf, buf_len, e->data.dscp); + blobmsg_add_string_buffer(b); + + blobmsg_add_string(b, "type", qosify_map_info[e->data.id].type_name); + + buf = blobmsg_alloc_string_buffer(b, "value", buf_len); + switch (e->data.id) { + case CL_MAP_TCP_PORTS: + case CL_MAP_UDP_PORTS: + snprintf(buf, buf_len, "%d", ntohs(e->data.addr.port)); + break; + case CL_MAP_IPV4_ADDR: + case CL_MAP_IPV6_ADDR: + af = e->data.id == CL_MAP_IPV6_ADDR ? AF_INET6 : AF_INET; + inet_ntop(af, &e->data.addr, buf, buf_len); + break; + default: + *buf = 0; + break; + } + blobmsg_add_string_buffer(b); + blobmsg_close_table(b, c); + } + blobmsg_close_array(b, a); +} + +void qosify_map_update_config(void) +{ + int fd = qosify_map_fds[CL_MAP_CONFIG]; + uint32_t key = 0; + + bpf_map_update_elem(fd, &key, &config, BPF_ANY); +} diff --git a/qosify-bpf.c b/qosify-bpf.c new file mode 100644 index 0000000..e63e861 --- /dev/null +++ b/qosify-bpf.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#define KBUILD_MODNAME "foo" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qosify-bpf.h" + +#define INET_ECN_MASK 3 + +#define FLOW_CHECK_INTERVAL ((u32)((1000000000ULL) >> 24)) +#define FLOW_TIMEOUT ((u32)((30ULL * 1000000000ULL) >> 24)) +#define FLOW_BULK_TIMEOUT 5 + +#define EWMA_SHIFT 12 + +const volatile static uint32_t module_flags = 0; + +struct flow_bucket { + __u32 last_update; + __u32 pkt_len_avg; + __u16 pkt_count; + __u8 dscp; + __u8 bulk_timeout; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(pinning, 1); + __type(key, __u32); + __type(value, struct qosify_config); + __uint(max_entries, 1); +} config SEC(".maps"); + +typedef struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(pinning, 1); + __type(key, __u32); + __type(value, __u8); + __uint(max_entries, 1 << 16); +} port_array_t; + +struct { + __uint(type, BPF_MAP_TYPE_LRU_HASH); + __uint(pinning, 1); + __type(key, __u32); + __uint(value_size, sizeof(struct flow_bucket)); + __uint(max_entries, QOSIFY_FLOW_BUCKETS); +} flow_map SEC(".maps"); + +port_array_t tcp_ports SEC(".maps"); +port_array_t udp_ports SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(pinning, 1); + __uint(key_size, sizeof(struct in_addr)); + __type(value, __u8); + __uint(max_entries, 100000); + __uint(map_flags, BPF_F_NO_PREALLOC); +} ipv4_map SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(pinning, 1); + __uint(key_size, sizeof(struct in6_addr)); + __type(value, __u8); + __uint(max_entries, 100000); + __uint(map_flags, BPF_F_NO_PREALLOC); +} ipv6_map SEC(".maps"); + +static struct qosify_config *get_config(void) +{ + __u32 key = 0; + + return bpf_map_lookup_elem(&config, &key); +} + +static __always_inline int proto_is_vlan(__u16 h_proto) +{ + return !!(h_proto == bpf_htons(ETH_P_8021Q) || + h_proto == bpf_htons(ETH_P_8021AD)); +} + +static __always_inline int proto_is_ip(__u16 h_proto) +{ + return !!(h_proto == bpf_htons(ETH_P_IP) || + h_proto == bpf_htons(ETH_P_IPV6)); +} + +static __always_inline void *skb_ptr(struct __sk_buff *skb, __u32 offset) +{ + void *start = (void *)(unsigned long long)skb->data; + + return start + offset; +} + +static __always_inline void *skb_end_ptr(struct __sk_buff *skb) +{ + return (void *)(unsigned long long)skb->data_end; +} + +static __always_inline int skb_check(struct __sk_buff *skb, void *ptr) +{ + if (ptr > skb_end_ptr(skb)) + return -1; + + return 0; +} + +static __always_inline __u32 cur_time(void) +{ + __u32 val = bpf_ktime_get_ns() >> 24; + + if (!val) + val = 1; + + return val; +} + +static __always_inline __u32 ewma(__u32 *avg, __u32 val) +{ + if (*avg) + *avg = (*avg * 3) / 4 + (val << EWMA_SHIFT) / 4; + else + *avg = val << EWMA_SHIFT; + + return *avg >> EWMA_SHIFT; +} + +static __always_inline void +ipv4_change_dsfield(struct iphdr *iph, __u8 mask, __u8 value, bool force) +{ + __u32 check = bpf_ntohs(iph->check); + __u8 dsfield; + + if ((iph->tos & mask) && !force) + return; + + dsfield = (iph->tos & mask) | value; + if (iph->tos == dsfield) + return; + + check += iph->tos; + if ((check + 1) >> 16) + check = (check + 1) & 0xffff; + check -= dsfield; + check += check >> 16; + iph->check = bpf_htons(check); + iph->tos = dsfield; +} + +static __always_inline void +ipv6_change_dsfield(struct ipv6hdr *ipv6h, __u8 mask, __u8 value, bool force) +{ + __u16 *p = (__u16 *)ipv6h; + __u16 val; + + if (((*p >> 4) & mask) && !force) + return; + + val = (*p & bpf_htons((((__u16)mask << 4) | 0xf00f))) | bpf_htons((__u16)value << 4); + if (val == *p) + return; + + *p = val; +} + +static __always_inline int +parse_ethernet(struct __sk_buff *skb, __u32 *offset) +{ + struct ethhdr *eth; + __u16 h_proto; + int i; + + eth = skb_ptr(skb, *offset); + if (skb_check(skb, eth + 1)) + return -1; + + h_proto = eth->h_proto; + *offset += sizeof(*eth); + +#pragma unroll + for (i = 0; i < 2; i++) { + struct vlan_hdr *vlh = skb_ptr(skb, *offset); + + if (!proto_is_vlan(h_proto)) + break; + + if (skb_check(skb, vlh + 1)) + return -1; + + h_proto = vlh->h_vlan_encapsulated_proto; + *offset += sizeof(*vlh); + } + + return h_proto; +} + +static void +parse_l4proto(struct qosify_config *config, struct __sk_buff *skb, + __u32 offset, __u8 proto, __u8 *dscp_out) +{ + struct udphdr *udp; + __u32 src, dest, key; + __u8 *value; + + udp = skb_ptr(skb, offset); + if (skb_check(skb, &udp->len)) + return; + + if (config && (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6)) { + *dscp_out = config->dscp_icmp; + return; + } + + src = udp->source; + dest = udp->dest; + + if (module_flags & QOSIFY_INGRESS) + key = src; + else + key = dest; + + if (proto == IPPROTO_TCP) { + value = bpf_map_lookup_elem(&tcp_ports, &key); + } else { + if (proto != IPPROTO_UDP) + key = 0; + + value = bpf_map_lookup_elem(&udp_ports, &key); + } + + if (!value) + return; + + *dscp_out = *value; +} + +static void +check_flow(struct qosify_config *config, struct __sk_buff *skb, + uint8_t *dscp) +{ + struct flow_bucket flow_data; + struct flow_bucket *flow; + __s32 delta; + __u32 hash; + __u32 time; + + if (!(*dscp & QOSIFY_DSCP_DEFAULT_FLAG)) + return; + + if (!config) + return; + + if (!config->bulk_trigger_pps && + !config->prio_max_avg_pkt_len) + return; + + time = cur_time(); + hash = bpf_get_hash_recalc(skb); + flow = bpf_map_lookup_elem(&flow_map, &hash); + if (!flow) { + memset(&flow_data, 0, sizeof(flow_data)); + bpf_map_update_elem(&flow_map, &hash, &flow_data, BPF_ANY); + flow = bpf_map_lookup_elem(&flow_map, &hash); + if (!flow) + return; + } + + if (!flow->last_update) + goto reset; + + delta = time - flow->last_update; + if ((u32)delta > FLOW_TIMEOUT) + goto reset; + + if (delta >= FLOW_CHECK_INTERVAL) { + if (flow->bulk_timeout) { + flow->bulk_timeout--; + if (!flow->bulk_timeout) + flow->dscp = 0xff; + } + + goto clear; + } + + if (flow->pkt_count < 0xffff) + flow->pkt_count++; + + if (config->bulk_trigger_pps && + flow->pkt_count > config->bulk_trigger_pps) { + flow->dscp = config->dscp_bulk; + flow->bulk_timeout = config->bulk_trigger_timeout; + } + +out: + if (config->prio_max_avg_pkt_len && + flow->dscp != config->dscp_bulk) { + if (ewma(&flow->pkt_len_avg, skb->len) < + config->prio_max_avg_pkt_len) + flow->dscp = config->dscp_prio; + else + flow->dscp = 0xff; + } + + if (flow->dscp != 0xff) + *dscp = flow->dscp; + + return; + +reset: + flow->dscp = 0xff; + flow->pkt_len_avg = 0; +clear: + flow->pkt_count = 1; + flow->last_update = time; + + goto out; +} + +static __always_inline void +parse_ipv4(struct __sk_buff *skb, __u32 *offset) +{ + struct qosify_config *config; + const __u32 zero_port = 0; + struct iphdr *iph; + __u8 dscp = 0xff; + __u8 *value; + __u8 ipproto; + int hdr_len; + void *key; + bool force; + + config = get_config(); + + iph = skb_ptr(skb, *offset); + if (skb_check(skb, iph + 1)) + return; + + hdr_len = iph->ihl * 4; + if (bpf_skb_pull_data(skb, *offset + hdr_len + sizeof(struct udphdr))) + return; + + iph = skb_ptr(skb, *offset); + *offset += hdr_len; + + if (skb_check(skb, (void *)(iph + 1))) + return; + + ipproto = iph->protocol; + parse_l4proto(config, skb, *offset, ipproto, &dscp); + + if (module_flags & QOSIFY_INGRESS) + key = &iph->saddr; + else + key = &iph->daddr; + + value = bpf_map_lookup_elem(&ipv4_map, key); + /* use udp port 0 entry as fallback for non-tcp/udp */ + if (!value && dscp == 0xff) + value = bpf_map_lookup_elem(&udp_ports, &zero_port); + if (value) + dscp = *value; + + check_flow(config, skb, &dscp); + + force = !(dscp & QOSIFY_DSCP_FALLBACK_FLAG); + dscp &= GENMASK(5, 0); + + ipv4_change_dsfield(iph, INET_ECN_MASK, dscp << 2, force); +} + +static __always_inline void +parse_ipv6(struct __sk_buff *skb, __u32 *offset) +{ + struct qosify_config *config; + const __u32 zero_port = 0; + struct ipv6hdr *iph; + __u8 dscp = 0; + __u8 *value; + __u8 ipproto; + void *key; + bool force; + + config = get_config(); + + if (bpf_skb_pull_data(skb, *offset + sizeof(*iph) + sizeof(struct udphdr))) + return; + + iph = skb_ptr(skb, *offset); + *offset += sizeof(*iph); + + if (skb_check(skb, (void *)(iph + 1))) + return; + + ipproto = iph->nexthdr; + if (module_flags & QOSIFY_INGRESS) + key = &iph->saddr; + else + key = &iph->daddr; + + parse_l4proto(config, skb, *offset, ipproto, &dscp); + + value = bpf_map_lookup_elem(&ipv6_map, key); + + /* use udp port 0 entry as fallback for non-tcp/udp */ + if (!value) + value = bpf_map_lookup_elem(&udp_ports, &zero_port); + if (value) + dscp = *value; + + check_flow(config, skb, &dscp); + + force = !(dscp & QOSIFY_DSCP_FALLBACK_FLAG); + dscp &= GENMASK(5, 0); + + ipv6_change_dsfield(iph, INET_ECN_MASK, dscp << 2, force); +} + +SEC("classifier") +int classify(struct __sk_buff *skb) +{ + __u32 offset = 0; + int type; + + if (module_flags & QOSIFY_IP_ONLY) + type = skb->protocol; + else + type = parse_ethernet(skb, &offset); + + if (type == bpf_htons(ETH_P_IP)) + parse_ipv4(skb, &offset); + else if (type == bpf_htons(ETH_P_IPV6)) + parse_ipv6(skb, &offset); + + return TC_ACT_OK; +} + +char _license[] SEC("license") = "GPL"; diff --git a/qosify-bpf.h b/qosify-bpf.h new file mode 100644 index 0000000..c5525c8 --- /dev/null +++ b/qosify-bpf.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#ifndef __BPF_QOSIFY_H +#define __BPF_QOSIFY_H + +#ifndef QOSIFY_FLOW_BUCKET_SHIFT +#define QOSIFY_FLOW_BUCKET_SHIFT 13 +#endif + +#define QOSIFY_FLOW_BUCKETS (1 << QOSIFY_FLOW_BUCKET_SHIFT) + +/* rodata per-instance flags */ +#define QOSIFY_INGRESS (1 << 0) +#define QOSIFY_IP_ONLY (1 << 1) + + +#define QOSIFY_DSCP_FALLBACK_FLAG (1 << 6) +#define QOSIFY_DSCP_DEFAULT_FLAG (1 << 7) + +/* global config data */ +struct qosify_config { + uint8_t dscp_prio; + uint8_t dscp_bulk; + uint8_t dscp_icmp; + + uint8_t bulk_trigger_timeout; + uint16_t bulk_trigger_pps; + + uint16_t prio_max_avg_pkt_len; +}; + +#endif diff --git a/qosify.h b/qosify.h new file mode 100644 index 0000000..e6934ad --- /dev/null +++ b/qosify.h @@ -0,0 +1,87 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#ifndef __QOS_CLASSIFY_H +#define __QOS_CLASSIFY_H + +#include + +#include +#include + +#include "qosify-bpf.h" + +#include +#include +#include +#include + +#include + +#define CLASSIFY_PROG_PATH "/lib/bpf/qosify-bpf.o" +#define CLASSIFY_PIN_PATH "/sys/fs/bpf/qosify" +#define CLASSIFY_DATA_PATH "/sys/fs/bpf/qosify_data" + +enum qosify_map_id { + CL_MAP_TCP_PORTS, + CL_MAP_UDP_PORTS, + CL_MAP_IPV4_ADDR, + CL_MAP_IPV6_ADDR, + CL_MAP_CONFIG, + __CL_MAP_MAX, +}; + +struct qosify_map_data { + enum qosify_map_id id; + + bool file : 1; + bool user : 1; + + uint8_t dscp; + uint8_t file_dscp; + + union { + uint32_t port; + struct in_addr ip; + struct in6_addr ip6; + } addr; +}; + +struct qosify_map_entry { + struct avl_node avl; + + uint32_t timeout; + + struct qosify_map_data data; +}; + + +extern int qosify_map_timeout; +extern struct qosify_config config; + +int qosify_loader_init(bool force_init); + +int qosify_map_init(void); +int qosify_map_dscp_value(const char *val); +int qosify_map_load_file(const char *file); +int qosify_map_set_entry(enum qosify_map_id id, bool file, const char *str, uint8_t dscp); +void qosify_map_reload(void); +void qosify_map_clear_files(void); +void qosify_map_gc(void); +void qosify_map_dump(struct blob_buf *b); +void qosify_map_set_dscp_default(enum qosify_map_id id, uint8_t val); +void qosify_map_reset_config(void); +void qosify_map_update_config(void); + +int qosify_iface_init(void); +void qosify_iface_config_update(struct blob_attr *ifaces, struct blob_attr *devs); +void qosify_iface_check(void); +void qosify_iface_status(struct blob_buf *b); +void qosify_iface_stop(void); + +int qosify_ubus_init(void); +void qosify_ubus_stop(void); +int qosify_ubus_check_interface(const char *name, char *ifname, int ifname_len); + +#endif diff --git a/ubus.c b/ubus.c new file mode 100644 index 0000000..fe2614b --- /dev/null +++ b/ubus.c @@ -0,0 +1,373 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2021 Felix Fietkau + */ +#include + +#include "qosify.h" + +static struct blob_buf b; + +static int +qosify_ubus_add_array(struct blob_attr *attr, uint8_t val, enum qosify_map_id id) +{ + struct blob_attr *cur; + int rem; + + if (blobmsg_check_array(attr, BLOBMSG_TYPE_STRING) < 0) + return UBUS_STATUS_INVALID_ARGUMENT; + + blobmsg_for_each_attr(cur, attr, rem) + qosify_map_set_entry(id, false, blobmsg_get_string(cur), val); + + return 0; +} + +static int +qosify_ubus_set_files(struct blob_attr *attr) +{ + struct blob_attr *cur; + int rem; + + if (blobmsg_check_array(attr, BLOBMSG_TYPE_STRING) < 0) + return UBUS_STATUS_INVALID_ARGUMENT; + + qosify_map_clear_files(); + + blobmsg_for_each_attr(cur, attr, rem) + qosify_map_load_file(blobmsg_get_string(cur)); + + qosify_map_gc(); + + return 0; +} + + +enum { + CL_ADD_DSCP, + CL_ADD_TIMEOUT, + CL_ADD_IPV4, + CL_ADD_IPV6, + CL_ADD_TCP_PORT, + CL_ADD_UDP_PORT, + __CL_ADD_MAX +}; + +static const struct blobmsg_policy qosify_add_policy[__CL_ADD_MAX] = { + [CL_ADD_DSCP] = { "dscp", BLOBMSG_TYPE_STRING }, + [CL_ADD_TIMEOUT] = { "timeout", BLOBMSG_TYPE_INT32 }, + [CL_ADD_IPV4] = { "ipv4", BLOBMSG_TYPE_ARRAY }, + [CL_ADD_IPV6] = { "ipv6", BLOBMSG_TYPE_ARRAY }, + [CL_ADD_TCP_PORT] = { "tcp_port", BLOBMSG_TYPE_ARRAY }, + [CL_ADD_UDP_PORT] = { "udp_port", BLOBMSG_TYPE_ARRAY }, +}; + + +static int +qosify_ubus_reload(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + qosify_map_reload(); + return 0; +} + + +static int +qosify_ubus_add(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + int prev_timemout = qosify_map_timeout; + struct blob_attr *tb[__CL_ADD_MAX]; + struct blob_attr *cur; + int dscp = -1; + int ret; + + blobmsg_parse(qosify_add_policy, __CL_ADD_MAX, tb, + blobmsg_data(msg), blobmsg_len(msg)); + + if (!strcmp(method, "add")) { + if ((cur = tb[CL_ADD_DSCP]) != NULL) + dscp = qosify_map_dscp_value(blobmsg_get_string(cur)); + else + return UBUS_STATUS_INVALID_ARGUMENT; + if (dscp < 0) + return UBUS_STATUS_INVALID_ARGUMENT; + + if ((cur = tb[CL_ADD_TIMEOUT]) != NULL) + qosify_map_timeout = blobmsg_get_u32(cur); + } else { + dscp = 0xff; + } + + if ((cur = tb[CL_ADD_IPV4]) != NULL && + (ret = qosify_ubus_add_array(cur, dscp, CL_MAP_IPV4_ADDR) != 0)) + return ret; + + if ((cur = tb[CL_ADD_IPV6]) != NULL && + (ret = qosify_ubus_add_array(cur, dscp, CL_MAP_IPV6_ADDR) != 0)) + return ret; + + if ((cur = tb[CL_ADD_TCP_PORT]) != NULL && + (ret = qosify_ubus_add_array(cur, dscp, CL_MAP_TCP_PORTS) != 0)) + return ret; + + if ((cur = tb[CL_ADD_UDP_PORT]) != NULL && + (ret = qosify_ubus_add_array(cur, dscp, CL_MAP_UDP_PORTS) != 0)) + return ret; + + qosify_map_timeout = prev_timemout; + + return 0; +} + +enum { + CL_CONFIG_RESET, + CL_CONFIG_FILES, + CL_CONFIG_TIMEOUT, + CL_CONFIG_DSCP_UDP, + CL_CONFIG_DSCP_TCP, + CL_CONFIG_DSCP_PRIO, + CL_CONFIG_DSCP_BULK, + CL_CONFIG_DSCP_ICMP, + CL_CONFIG_BULK_TIMEOUT, + CL_CONFIG_BULK_PPS, + CL_CONFIG_PRIO_PKT_LEN, + CL_CONFIG_INTERFACES, + CL_CONFIG_DEVICES, + __CL_CONFIG_MAX +}; + +static const struct blobmsg_policy qosify_config_policy[__CL_CONFIG_MAX] = { + [CL_CONFIG_RESET] = { "reset", BLOBMSG_TYPE_BOOL }, + [CL_CONFIG_FILES] = { "files", BLOBMSG_TYPE_ARRAY }, + [CL_CONFIG_TIMEOUT] = { "timeout", BLOBMSG_TYPE_INT32 }, + [CL_CONFIG_DSCP_UDP] = { "dscp_default_udp", BLOBMSG_TYPE_STRING }, + [CL_CONFIG_DSCP_TCP] = { "dscp_default_tcp", BLOBMSG_TYPE_STRING }, + [CL_CONFIG_DSCP_PRIO] = { "dscp_prio", BLOBMSG_TYPE_STRING }, + [CL_CONFIG_DSCP_BULK] = { "dscp_bulk", BLOBMSG_TYPE_STRING }, + [CL_CONFIG_DSCP_ICMP] = { "dscp_icmp", BLOBMSG_TYPE_STRING }, + [CL_CONFIG_BULK_TIMEOUT] = { "bulk_trigger_timeout", BLOBMSG_TYPE_INT32 }, + [CL_CONFIG_BULK_PPS] = { "bulk_trigger_pps", BLOBMSG_TYPE_INT32 }, + [CL_CONFIG_PRIO_PKT_LEN] = { "prio_max_avg_pkt_len", BLOBMSG_TYPE_INT32 }, + [CL_CONFIG_INTERFACES] = { "interfaces", BLOBMSG_TYPE_TABLE }, + [CL_CONFIG_DEVICES] = { "devices", BLOBMSG_TYPE_TABLE }, +}; + +static int __set_dscp(uint8_t *dest, struct blob_attr *attr, bool reset) +{ + int dscp; + + if (reset) + *dest = 0xff; + + if (!attr) + return 0; + + dscp = qosify_map_dscp_value(blobmsg_get_string(attr)); + if (dscp < 0) + return -1; + + *dest = dscp; + + return 0; +} + +static int +qosify_ubus_config(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + struct blob_attr *tb[__CL_CONFIG_MAX]; + struct blob_attr *cur; + uint8_t dscp; + bool reset = false; + int ret; + + blobmsg_parse(qosify_config_policy, __CL_CONFIG_MAX, tb, + blobmsg_data(msg), blobmsg_len(msg)); + + if ((cur = tb[CL_CONFIG_RESET]) != NULL) + reset = blobmsg_get_bool(cur); + + if (reset) + qosify_map_reset_config(); + + if ((cur = tb[CL_CONFIG_TIMEOUT]) != NULL) + qosify_map_timeout = blobmsg_get_u32(cur); + + if ((cur = tb[CL_CONFIG_FILES]) != NULL && + (ret = qosify_ubus_set_files(cur) != 0)) + return ret; + + __set_dscp(&dscp, tb[CL_CONFIG_DSCP_UDP], true); + if (dscp != 0xff) + qosify_map_set_dscp_default(CL_MAP_UDP_PORTS, dscp); + + __set_dscp(&dscp, tb[CL_CONFIG_DSCP_TCP], true); + if (dscp != 0xff) + qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, dscp); + + __set_dscp(&config.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset); + __set_dscp(&config.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset); + __set_dscp(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset); + + if ((cur = tb[CL_CONFIG_BULK_TIMEOUT]) != NULL) + config.bulk_trigger_timeout = blobmsg_get_u32(cur); + + if ((cur = tb[CL_CONFIG_BULK_PPS]) != NULL) + config.bulk_trigger_pps = blobmsg_get_u32(cur); + + if ((cur = tb[CL_CONFIG_PRIO_PKT_LEN]) != NULL) + config.prio_max_avg_pkt_len = blobmsg_get_u32(cur); + + qosify_map_update_config(); + + qosify_iface_config_update(tb[CL_CONFIG_INTERFACES], tb[CL_CONFIG_DEVICES]); + + qosify_iface_check(); + + return 0; +} + + +static int +qosify_ubus_dump(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + blob_buf_init(&b, 0); + qosify_map_dump(&b); + ubus_send_reply(ctx, req, b.head); + blob_buf_free(&b); + + return 0; +} + +static int +qosify_ubus_status(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + blob_buf_init(&b, 0); + qosify_iface_status(&b); + ubus_send_reply(ctx, req, b.head); + blob_buf_free(&b); + + return 0; +} + +enum { + CL_DEV_EVENT_NAME, + CL_DEV_EVENT_ADD, + __CL_DEV_EVENT_MAX, +}; + +static int +qosify_ubus_check_devices(struct ubus_context *ctx, struct ubus_object *obj, + struct ubus_request_data *req, const char *method, + struct blob_attr *msg) +{ + qosify_iface_check(); + + return 0; +} + + +static const struct ubus_method qosify_methods[] = { + UBUS_METHOD_NOARG("reload", qosify_ubus_reload), + UBUS_METHOD("add", qosify_ubus_add, qosify_add_policy), + UBUS_METHOD_MASK("remove", qosify_ubus_add, qosify_add_policy, + ((1 << __CL_ADD_MAX) - 1) & ~(1 << CL_ADD_DSCP)), + UBUS_METHOD("config", qosify_ubus_config, qosify_config_policy), + UBUS_METHOD_NOARG("dump", qosify_ubus_dump), + UBUS_METHOD_NOARG("status", qosify_ubus_status), + UBUS_METHOD_NOARG("check_devices", qosify_ubus_check_devices), +}; + +static struct ubus_object_type qosify_object_type = + UBUS_OBJECT_TYPE("qosify", qosify_methods); + +static struct ubus_object qosify_object = { + .name = "qosify", + .type = &qosify_object_type, + .methods = qosify_methods, + .n_methods = ARRAY_SIZE(qosify_methods), +}; + +static void +ubus_connect_handler(struct ubus_context *ctx) +{ + ubus_add_object(ctx, &qosify_object); +} + +static struct ubus_auto_conn conn; + +int qosify_ubus_init(void) +{ + conn.cb = ubus_connect_handler; + ubus_auto_connect(&conn); + + return 0; +} + +void qosify_ubus_stop(void) +{ + ubus_auto_shutdown(&conn); +} + +struct iface_req { + char *name; + int len; +}; + +static void +netifd_if_cb(struct ubus_request *req, int type, struct blob_attr *msg) +{ + struct iface_req *ifr = req->priv; + enum { + IFS_ATTR_UP, + IFS_ATTR_DEV, + __IFS_ATTR_MAX + }; + static const struct blobmsg_policy policy[__IFS_ATTR_MAX] = { + [IFS_ATTR_UP] = { "up", BLOBMSG_TYPE_BOOL }, + [IFS_ATTR_DEV] = { "l3_device", BLOBMSG_TYPE_STRING }, + }; + struct blob_attr *tb[__IFS_ATTR_MAX]; + + blobmsg_parse(policy, __IFS_ATTR_MAX, tb, blobmsg_data(msg), blobmsg_len(msg)); + + if (!tb[IFS_ATTR_UP] || !tb[IFS_ATTR_DEV]) + return; + + if (!blobmsg_get_bool(tb[IFS_ATTR_UP])) + return; + + snprintf(ifr->name, ifr->len, "%s", blobmsg_get_string(tb[IFS_ATTR_DEV])); +} + +int qosify_ubus_check_interface(const char *name, char *ifname, int ifname_len) +{ + struct iface_req req = { ifname, ifname_len }; + char *obj_name = "network.interface."; + uint32_t id; + +#define PREFIX "network.interface." + obj_name = alloca(sizeof(PREFIX) + strlen(name) + 1); + sprintf(obj_name, PREFIX "%s", name); +#undef PREFIX + + ifname[0] = 0; + + if (ubus_lookup_id(&conn.ctx, obj_name, &id)) + return -1; + + ubus_invoke(&conn.ctx, id, "status", b.head, netifd_if_cb, &req, 1000); + + if (!ifname[0]) + return -1; + + return 0; +}