When we exceed current packets limit and we have more than one
segment in the list returned by skb_gso_segment(), netem drops
only the first one, skipping the rest, hence kmemleak reports:
unreferenced object 0xffff880b5d23b600 (size 1024):
comm "softirq", pid 0, jiffies
4384527763 (age 2770.629s)
hex dump (first 32 bytes):
00 80 23 5d 0b 88 ff ff 00 00 00 00 00 00 00 00 ..#]............
00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
backtrace:
[<
00000000d8a19b9d>] __alloc_skb+0xc9/0x520
[<
000000001709b32f>] skb_segment+0x8c8/0x3710
[<
00000000c7b9bb88>] tcp_gso_segment+0x331/0x1830
[<
00000000c921cba1>] inet_gso_segment+0x476/0x1370
[<
000000008b762dd4>] skb_mac_gso_segment+0x1f9/0x510
[<
000000002182660a>] __skb_gso_segment+0x1dd/0x620
[<
00000000412651b9>] netem_enqueue+0x1536/0x2590 [sch_netem]
[<
0000000005d3b2a9>] __dev_queue_xmit+0x1167/0x2120
[<
00000000fc5f7327>] ip_finish_output2+0x998/0xf00
[<
00000000d309e9d3>] ip_output+0x1aa/0x2c0
[<
000000007ecbd3a4>] tcp_transmit_skb+0x18db/0x3670
[<
0000000042d2a45f>] tcp_write_xmit+0x4d4/0x58c0
[<
0000000056a44199>] tcp_tasklet_func+0x3d9/0x540
[<
0000000013d06d02>] tasklet_action+0x1ca/0x250
[<
00000000fcde0b8b>] __do_softirq+0x1b4/0x5a3
[<
00000000e7ed027c>] irq_exit+0x1e2/0x210
Fix it by adding the rest of the segments, if any, to skb 'to_free'
list. Add new __qdisc_drop_all() and qdisc_drop_all() functions
because they can be useful in the future if we need to drop segmented
GSO packets in other places.
Fixes: 6071bd1aa13e ("netem: Segment GSO packets on enqueue")
Signed-off-by: Alexey Kodanev <alexey.kodanev@oracle.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
*to_free = skb;
}
+static inline void __qdisc_drop_all(struct sk_buff *skb,
+ struct sk_buff **to_free)
+{
+ if (skb->prev)
+ skb->prev->next = *to_free;
+ else
+ skb->next = *to_free;
+ *to_free = skb;
+}
+
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
struct qdisc_skb_head *qh,
struct sk_buff **to_free)
return NET_XMIT_DROP;
}
+static inline int qdisc_drop_all(struct sk_buff *skb, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ __qdisc_drop_all(skb, to_free);
+ qdisc_qstats_drop(sch);
+
+ return NET_XMIT_DROP;
+}
+
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
long it will take to send a packet given its size.
*/
}
if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_all(skb, sch, to_free);
qdisc_qstats_backlog_inc(sch, skb);