qosify_active_timeout = 300;
memset(&config, 0, sizeof(config));
- config.dscp_prio.ingress = 0xff;
- config.dscp_prio.egress = 0xff;
- config.dscp_bulk.ingress = 0xff;
- config.dscp_bulk.egress = 0xff;
+ config.flow.dscp_prio.ingress = 0xff;
+ config.flow.dscp_prio.egress = 0xff;
+ config.flow.dscp_bulk.ingress = 0xff;
+ config.flow.dscp_bulk.egress = 0xff;
config.dscp_icmp.ingress = 0xff;
config.dscp_icmp.egress = 0xff;
}
}
static __always_inline void
-check_flow_bulk(struct qosify_config *config, struct __sk_buff *skb,
+check_flow_bulk(struct qosify_flow_config *config, struct __sk_buff *skb,
struct flow_bucket *flow, struct qosify_dscp_val *out_val)
{
bool trigger = false;
}
static __always_inline void
-check_flow_prio(struct qosify_config *config, struct __sk_buff *skb,
+check_flow_prio(struct qosify_flow_config *config, struct __sk_buff *skb,
struct flow_bucket *flow, struct qosify_dscp_val *out_val)
{
if ((flow->val.flags & QOSIFY_VAL_FLAG_BULK_CHECK) ||
}
static __always_inline void
-check_flow(struct qosify_config *config, struct __sk_buff *skb,
+check_flow(struct qosify_flow_config *config, struct __sk_buff *skb,
struct qosify_dscp_val *out_val)
{
struct flow_bucket flow_data;
int type;
config = get_config();
+ if (!config)
+ return TC_ACT_OK;
if (module_flags & QOSIFY_IP_ONLY)
type = skb->protocol;
val = ip_val->dscp;
}
- check_flow(config, skb, &val);
+ check_flow(&config->flow, skb, &val);
dscp = dscp_val(&val, ingress);
if (dscp == 0xff)
} __attribute__((packed));
/* global config data */
-struct qosify_config {
+
+struct qosify_flow_config {
struct qosify_dscp_val dscp_prio;
struct qosify_dscp_val dscp_bulk;
- struct qosify_dscp_val dscp_icmp;
uint8_t bulk_trigger_timeout;
uint16_t bulk_trigger_pps;
uint16_t prio_max_avg_pkt_len;
};
+struct qosify_config {
+ struct qosify_dscp_val dscp_icmp;
+
+ struct qosify_flow_config flow;
+};
+
struct qosify_ip_map_val {
struct qosify_dscp_val dscp; /* must be first */
uint8_t seen;
if (dscp.ingress != 0xff)
qosify_map_set_dscp_default(CL_MAP_TCP_PORTS, dscp);
- if (__set_dscp(&config.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset) ||
- __set_dscp(&config.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset) ||
+ if (__set_dscp(&config.flow.dscp_prio, tb[CL_CONFIG_DSCP_PRIO], reset) ||
+ __set_dscp(&config.flow.dscp_bulk, tb[CL_CONFIG_DSCP_BULK], reset) ||
__set_dscp(&config.dscp_icmp, tb[CL_CONFIG_DSCP_ICMP], reset))
return UBUS_STATUS_INVALID_ARGUMENT;
if ((cur = tb[CL_CONFIG_BULK_TIMEOUT]) != NULL)
- config.bulk_trigger_timeout = blobmsg_get_u32(cur);
+ config.flow.bulk_trigger_timeout = blobmsg_get_u32(cur);
if ((cur = tb[CL_CONFIG_BULK_PPS]) != NULL)
- config.bulk_trigger_pps = blobmsg_get_u32(cur);
+ config.flow.bulk_trigger_pps = blobmsg_get_u32(cur);
if ((cur = tb[CL_CONFIG_PRIO_PKT_LEN]) != NULL)
- config.prio_max_avg_pkt_len = blobmsg_get_u32(cur);
+ config.flow.prio_max_avg_pkt_len = blobmsg_get_u32(cur);
qosify_map_update_config();