skb->next = NULL;
}
+static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ return sk->sk_backlog_rcv(sk, skb);
+}
+
#define sk_wait_event(__sk, __timeo, __condition) \
({ int __rc; \
release_sock(__sk); \
BUG_ON(sock_owned_by_user(sk));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
- sk->sk_backlog_rcv(sk, skb1);
+ sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
}
*/
mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
- rc = sk->sk_backlog_rcv(sk, skb);
+ rc = sk_backlog_rcv(sk, skb);
mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
} else
struct sk_buff *next = skb->next;
skb->next = NULL;
- sk->sk_backlog_rcv(sk, skb);
+ sk_backlog_rcv(sk, skb);
/*
* We are in process context here with softirqs
* necessary */
local_bh_disable();
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->sk_backlog_rcv(sk, skb);
+ sk_backlog_rcv(sk, skb);
local_bh_enable();
/* Clear memory counter. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
- sk->sk_backlog_rcv(sk, skb);
+ sk_backlog_rcv(sk, skb);
tp->ucopy.memory = 0;
}