struct sk_buff *head;
struct sk_buff *tail;
int len;
- int limit;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
skb->next = NULL;
}
+/*
+ * Take into account size of receive queue and backlog queue
+ */
+static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+{
+ unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
+
+ return qsize + skb->truesize > sk->sk_rcvbuf;
+}
+
/* The per-socket spinlock must be held here. */
static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
- if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
+ if (sk_rcvqueues_full(sk, skb))
return -ENOBUFS;
__sk_add_backlog(sk, skb);
skb->dev = NULL;
+ if (sk_rcvqueues_full(sk, skb)) {
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
if (nested)
bh_lock_sock_nested(sk);
else
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
- sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);
goto drop;
}
+
+ if (sk_rcvqueues_full(sk, skb))
+ goto drop;
+
rc = 0;
bh_lock_sock(sk);
sk = stack[i];
if (skb1) {
+ if (sk_rcvqueues_full(sk, skb)) {
+ kfree_skb(skb1);
+ goto drop;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb1);
/* deliver */
+ if (sk_rcvqueues_full(sk, skb)) {
+ sock_put(sk);
+ goto discard;
+ }
bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
SCTP_DBG_OBJCNT_INC(sock);
percpu_counter_inc(&sctp_sockets_allocated);
- /* Set socket backlog limit. */
- sk->sk_backlog.limit = sysctl_sctp_rmem[1];
-
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();