pkt_sched: fq: use proper locking in fq_dump_stats()
authorEric Dumazet <edumazet@google.com>
Thu, 15 Sep 2016 23:20:01 +0000 (16:20 -0700)
committerDavid S. Miller <davem@davemloft.net>
Mon, 19 Sep 2016 02:15:08 +0000 (22:15 -0400)
When fq is used on 32bit kernels, we need to lock the qdisc before
copying 64bit fields.

Otherwise "tc -s qdisc ..." might report bogus values.

Fixes: afe4fd062416 ("pkt_sched: fq: Fair Queue packet scheduler")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/sched/sch_fq.c

index e5458b99e09cb4d4226a1ef49a7ec81f2e20d94a..dc52cc10d6ed64e6841aa68d4cbdbbe066c4a5ef 100644 (file)
@@ -823,20 +823,24 @@ nla_put_failure:
 static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
 {
        struct fq_sched_data *q = qdisc_priv(sch);
-       u64 now = ktime_get_ns();
-       struct tc_fq_qd_stats st = {
-               .gc_flows               = q->stat_gc_flows,
-               .highprio_packets       = q->stat_internal_packets,
-               .tcp_retrans            = q->stat_tcp_retrans,
-               .throttled              = q->stat_throttled,
-               .flows_plimit           = q->stat_flows_plimit,
-               .pkts_too_long          = q->stat_pkts_too_long,
-               .allocation_errors      = q->stat_allocation_errors,
-               .flows                  = q->flows,
-               .inactive_flows         = q->inactive_flows,
-               .throttled_flows        = q->throttled_flows,
-               .time_next_delayed_flow = q->time_next_delayed_flow - now,
-       };
+       struct tc_fq_qd_stats st;
+
+       sch_tree_lock(sch);
+
+       st.gc_flows               = q->stat_gc_flows;
+       st.highprio_packets       = q->stat_internal_packets;
+       st.tcp_retrans            = q->stat_tcp_retrans;
+       st.throttled              = q->stat_throttled;
+       st.flows_plimit           = q->stat_flows_plimit;
+       st.pkts_too_long          = q->stat_pkts_too_long;
+       st.allocation_errors      = q->stat_allocation_errors;
+       st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns();
+       st.flows                  = q->flows;
+       st.inactive_flows         = q->inactive_flows;
+       st.throttled_flows        = q->throttled_flows;
+       st.pad                    = 0;
+
+       sch_tree_unlock(sch);
 
        return gnet_stats_copy_app(d, &st, sizeof(st));
 }