Otherwise we beat heavily on the global tcp_memory atomics
when all of the sockets in the system are slowly sending
perioding packet clumps.
Noticed and suggested by Eric Dumazet.
Signed-off-by: David S. Miller <davem@davemloft.net>
__sk_mem_reclaim(sk);
}
+static inline void sk_mem_reclaim_partial(struct sock *sk)
+{
+ if (!sk_has_account(sk))
+ return;
+ if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
+ __sk_mem_reclaim(sk);
+}
+
static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
goto out_unlock;
}
- sk_mem_reclaim(sk);
+ sk_mem_reclaim_partial(sk);
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
goto out;