From: Felix Fietkau Date: Sat, 10 Mar 2018 09:09:07 +0000 (+0100) Subject: kernel: merge a pending fix for HFSC warnings/slowdowns (fixes FS#1136) X-Git-Tag: v17.01.5~58 X-Git-Url: http://git.lede-project.org./?a=commitdiff_plain;h=60f8d388c69e0faddbf1c3033cdcb74b38fe3694;p=openwrt%2Fstaging%2Fpepe2k.git kernel: merge a pending fix for HFSC warnings/slowdowns (fixes FS#1136) Signed-off-by: Felix Fietkau --- diff --git a/target/linux/generic/patches-4.4/620-net_sched-codel-do-not-defer-queue-length-update.patch b/target/linux/generic/patches-4.4/620-net_sched-codel-do-not-defer-queue-length-update.patch new file mode 100644 index 0000000000..2ec9c7a9e2 --- /dev/null +++ b/target/linux/generic/patches-4.4/620-net_sched-codel-do-not-defer-queue-length-update.patch @@ -0,0 +1,86 @@ +From: Konstantin Khlebnikov +Date: Mon, 21 Aug 2017 11:14:14 +0300 +Subject: [PATCH] net_sched/codel: do not defer queue length update + +When codel wants to drop last packet in ->dequeue() it cannot call +qdisc_tree_reduce_backlog() right away - it will notify parent qdisc +about zero qlen and HTB/HFSC will deactivate class. The same class will +be deactivated second time by caller of ->dequeue(). Currently codel and +fq_codel defer update. This triggers warning in HFSC when it's qlen != 0 +but there is no active classes. + +This patch update parent queue length immediately: just temporary increase +qlen around qdisc_tree_reduce_backlog() to prevent first class deactivation +if we have skb to return. + +This might open another problem in HFSC - now operation peek could fail and +deactivate parent class. + +Signed-off-by: Konstantin Khlebnikov +Link: https://bugzilla.kernel.org/show_bug.cgi?id=109581 +--- + +--- a/net/sched/sch_codel.c ++++ b/net/sched/sch_codel.c +@@ -79,11 +79,17 @@ static struct sk_buff *codel_qdisc_deque + + skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); + +- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, +- * or HTB crashes. Defer it for next round. ++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate ++ * parent class, dequeue in parent qdisc will do the same if we ++ * return skb. Temporary increment qlen if we have skb. + */ +- if (q->stats.drop_count && sch->q.qlen) { +- qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); ++ if (q->stats.drop_count) { ++ if (skb) ++ sch->q.qlen++; ++ qdisc_tree_reduce_backlog(sch, q->stats.drop_count, ++ q->stats.drop_len); ++ if (skb) ++ sch->q.qlen--; + q->stats.drop_count = 0; + q->stats.drop_len = 0; + } +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -311,6 +311,21 @@ begin: + flow->dropped += q->cstats.drop_count - prev_drop_count; + flow->dropped += q->cstats.ecn_mark - prev_ecn_mark; + ++ /* If our qlen is 0 qdisc_tree_reduce_backlog() will deactivate ++ * parent class, dequeue in parent qdisc will do the same if we ++ * return skb. Temporary increment qlen if we have skb. ++ */ ++ if (q->cstats.drop_count) { ++ if (skb) ++ sch->q.qlen++; ++ qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, ++ q->cstats.drop_len); ++ if (skb) ++ sch->q.qlen--; ++ q->cstats.drop_count = 0; ++ q->cstats.drop_len = 0; ++ } ++ + if (!skb) { + /* force a pass through old_flows to prevent starvation */ + if ((head == &q->new_flows) && !list_empty(&q->old_flows)) +@@ -321,15 +336,6 @@ begin: + } + qdisc_bstats_update(sch, skb); + flow->deficit -= qdisc_pkt_len(skb); +- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0, +- * or HTB crashes. Defer it for next round. +- */ +- if (q->cstats.drop_count && sch->q.qlen) { +- qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, +- q->cstats.drop_len); +- q->cstats.drop_count = 0; +- q->cstats.drop_len = 0; +- } + return skb; + } + diff --git a/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch b/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch index 46fceffcf1..fbe900891c 100644 --- a/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch +++ b/target/linux/generic/patches-4.4/660-fq_codel_defaults.patch @@ -1,6 +1,6 @@ --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c -@@ -471,7 +471,7 @@ static int fq_codel_init(struct Qdisc *s +@@ -477,7 +477,7 @@ static int fq_codel_init(struct Qdisc *s sch->limit = 10*1024; q->flows_cnt = 1024; diff --git a/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch b/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch index 9d1962caa9..8d70b82350 100644 --- a/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch +++ b/target/linux/generic/patches-4.4/662-use_fq_codel_by_default.patch @@ -13,7 +13,7 @@ device, it has to decide which ones to send first, which ones to --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c -@@ -688,7 +688,7 @@ static const struct Qdisc_class_ops fq_c +@@ -694,7 +694,7 @@ static const struct Qdisc_class_ops fq_c .walk = fq_codel_walk, }; @@ -22,7 +22,7 @@ .cl_ops = &fq_codel_class_ops, .id = "fq_codel", .priv_size = sizeof(struct fq_codel_sched_data), -@@ -704,6 +704,7 @@ static struct Qdisc_ops fq_codel_qdisc_o +@@ -710,6 +710,7 @@ static struct Qdisc_ops fq_codel_qdisc_o .dump_stats = fq_codel_dump_stats, .owner = THIS_MODULE, };