net: sched: gred: provide a better structured dump and expose stats
authorJakub Kicinski <jakub.kicinski@netronome.com>
Thu, 15 Nov 2018 06:23:49 +0000 (22:23 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sat, 17 Nov 2018 07:08:51 +0000 (23:08 -0800)
Currently all GRED's virtual queue data is dumped in a single
array in a single attribute.  This makes it pretty much impossible
to add new fields.  In order to expose more detailed stats add a
new set of attributes.  We can now expose the 64 bit value of bytesin
and all the mark stats which were not part of the original design.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: John Hurley <john.hurley@netronome.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/uapi/linux/pkt_sched.h
net/sched/sch_gred.c

index ee017bc057a3cb390f995329ec8ab5432a844557..c8f717346b601c24c9d2e8c331da6ab04a6526c0 100644 (file)
@@ -291,11 +291,37 @@ enum {
        TCA_GRED_DPS,
        TCA_GRED_MAX_P,
        TCA_GRED_LIMIT,
+       TCA_GRED_VQ_LIST,       /* nested TCA_GRED_VQ_ENTRY */
        __TCA_GRED_MAX,
 };
 
 #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
 
+enum {
+       TCA_GRED_VQ_ENTRY_UNSPEC,
+       TCA_GRED_VQ_ENTRY,      /* nested TCA_GRED_VQ_* */
+       __TCA_GRED_VQ_ENTRY_MAX,
+};
+#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
+
+enum {
+       TCA_GRED_VQ_UNSPEC,
+       TCA_GRED_VQ_PAD,
+       TCA_GRED_VQ_DP,                 /* u32 */
+       TCA_GRED_VQ_STAT_BYTES,         /* u64 */
+       TCA_GRED_VQ_STAT_PACKETS,       /* u32 */
+       TCA_GRED_VQ_STAT_BACKLOG,       /* u32 */
+       TCA_GRED_VQ_STAT_PROB_DROP,     /* u32 */
+       TCA_GRED_VQ_STAT_PROB_MARK,     /* u32 */
+       TCA_GRED_VQ_STAT_FORCED_DROP,   /* u32 */
+       TCA_GRED_VQ_STAT_FORCED_MARK,   /* u32 */
+       TCA_GRED_VQ_STAT_PDROP,         /* u32 */
+       TCA_GRED_VQ_STAT_OTHER,         /* u32 */
+       __TCA_GRED_VQ_MAX
+};
+
+#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
+
 struct tc_gred_qopt {
        __u32           limit;        /* HARD maximal queue length (bytes)    */
        __u32           qth_min;      /* Min average length threshold (bytes) */
index 6f209c83ee7ae580154ce676e45ec28e23d1d3a3..dc09a32c4b4f4fb8561acd6119372ba37e93fa02 100644 (file)
@@ -404,6 +404,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
        [TCA_GRED_DPS]          = { .len = sizeof(struct tc_gred_sopt) },
        [TCA_GRED_MAX_P]        = { .type = NLA_U32 },
        [TCA_GRED_LIMIT]        = { .type = NLA_U32 },
+       [TCA_GRED_VQ_LIST]      = { .type = NLA_REJECT },
 };
 
 static int gred_change(struct Qdisc *sch, struct nlattr *opt,
@@ -517,7 +518,7 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
 static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 {
        struct gred_sched *table = qdisc_priv(sch);
-       struct nlattr *parms, *opts = NULL;
+       struct nlattr *parms, *vqs, *opts = NULL;
        int i;
        u32 max_p[MAX_DPs];
        struct tc_gred_sopt sopt = {
@@ -544,6 +545,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
        if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
                goto nla_put_failure;
 
+       /* Old style all-in-one dump of VQs */
        parms = nla_nest_start(skb, TCA_GRED_PARMS);
        if (parms == NULL)
                goto nla_put_failure;
@@ -594,6 +596,55 @@ append_opt:
 
        nla_nest_end(skb, parms);
 
+       /* Dump the VQs again, in more structured way */
+       vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
+       if (!vqs)
+               goto nla_put_failure;
+
+       for (i = 0; i < MAX_DPs; i++) {
+               struct gred_sched_data *q = table->tab[i];
+               struct nlattr *vq;
+
+               if (!q)
+                       continue;
+
+               vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
+               if (!vq)
+                       goto nla_put_failure;
+
+               if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
+                       goto nla_put_failure;
+
+               /* Stats */
+               if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
+                                     TCA_GRED_VQ_PAD))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
+                               gred_backlog(table, q, sch)))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
+                               q->stats.prob_drop))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
+                               q->stats.prob_mark))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
+                               q->stats.forced_drop))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
+                               q->stats.forced_mark))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
+                       goto nla_put_failure;
+               if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
+                       goto nla_put_failure;
+
+               nla_nest_end(skb, vq);
+       }
+       nla_nest_end(skb, vqs);
+
        return nla_nest_end(skb, opts);
 
 nla_put_failure: