struct {
struct sk_buff *head;
struct sk_buff *tail;
+ int len;
+ int limit;
} sk_backlog;
wait_queue_head_t *sk_sleep;
struct dst_entry *sk_dst_cache;
return sk->sk_wmem_queued < sk->sk_sndbuf;
}
-/* The per-socket spinlock must be held here. */
+/* OOB backlog add */
static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
if (!sk->sk_backlog.tail) {
skb->next = NULL;
}
+/* The per-socket spinlock must be held here. */
+static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+{
+ if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
+ return -ENOBUFS;
+
+ sk_add_backlog(sk, skb);
+ sk->sk_backlog.len += skb->truesize;
+ return 0;
+}
+
static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
return sk->sk_backlog_rcv(sk, skb);
rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
- } else
- sk_add_backlog(sk, skb);
+ } else if (sk_add_backlog_limited(sk, skb)) {
+ bh_unlock_sock(sk);
+ atomic_inc(&sk->sk_drops);
+ goto discard_and_relse;
+ }
+
bh_unlock_sock(sk);
out:
sock_put(sk);
sock_lock_init(newsk);
bh_lock_sock(newsk);
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+ newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0);
/*
bh_lock_sock(sk);
} while ((skb = sk->sk_backlog.head) != NULL);
+
+ /*
+ * Doing the zeroing here guarantee we can not loop forever
+ * while a wild producer attempts to flood us.
+ */
+ sk->sk_backlog.len = 0;
}
/**
sk->sk_allocation = GFP_KERNEL;
sk->sk_rcvbuf = sysctl_rmem_default;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_backlog.limit = sk->sk_rcvbuf << 1;
sk->sk_state = TCP_CLOSE;
sk_set_socket(sk, sock);