From 872619d8cf810c17279335ef531a2a34f3b4e589 Mon Sep 17 00:00:00 2001 From: GhantaKrishnamurthy MohanKrishna Date: Wed, 21 Mar 2018 14:37:45 +0100 Subject: [PATCH] tipc: step sk->sk_drops when rcv buffer is full Currently when tipc is unable to queue a received message on a socket, the message is rejected back to the sender with error TIPC_ERR_OVERLOAD. However, the application on this socket has no knowledge about these discards. In this commit, we try to step the sk_drops counter when tipc is unable to queue a received message. Export sk_drops using tipc socket diagnostics. Acked-by: Jon Maloy Acked-by: Ying Xue Signed-off-by: GhantaKrishnamurthy MohanKrishna Signed-off-by: Parthasarathy Bhuvaragan Signed-off-by: David S. Miller --- include/uapi/linux/tipc_netlink.h | 1 + net/tipc/socket.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index d7cec0480d70..d896ded51bcb 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -251,6 +251,7 @@ enum { TIPC_NLA_SOCK_STAT_SENDQ, /* u32 */ TIPC_NLA_SOCK_STAT_LINK_CONG, /* flag */ TIPC_NLA_SOCK_STAT_CONN_CONG, /* flag */ + TIPC_NLA_SOCK_STAT_DROP, /* u32 */ __TIPC_NLA_SOCK_STAT_MAX, TIPC_NLA_SOCK_STAT_MAX = __TIPC_NLA_SOCK_STAT_MAX - 1 diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 07559ce4b8ba..732ec894f69f 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2122,8 +2122,10 @@ static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb, (!sk_conn && msg_connected(hdr)) || (!grp && msg_in_group(hdr))) err = TIPC_ERR_NO_PORT; - else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) + else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) { + atomic_inc(&sk->sk_drops); err = TIPC_ERR_OVERLOAD; + } if (unlikely(err)) { tipc_skb_reject(net, err, skb, xmitq); @@ -2202,6 +2204,7 @@ static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk, /* Overload => reject message back to sender */ onode = tipc_own_addr(sock_net(sk)); + atomic_inc(&sk->sk_drops); if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) __skb_queue_tail(xmitq, skb); break; @@ -3293,7 +3296,9 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk, if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ, skb_queue_len(&sk->sk_receive_queue)) || nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ, - skb_queue_len(&sk->sk_write_queue))) + skb_queue_len(&sk->sk_write_queue)) || + nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP, + atomic_read(&sk->sk_drops))) goto stat_msg_cancel; if (tsk->cong_link_cnt && -- 2.30.2