return 0;
}
-static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
{
struct iscsi_conn *conn;
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_send out;
/* old values for socket callbacks */
- void (*old_data_ready)(struct sock *, int);
+ void (*old_data_ready)(struct sock *);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
* socket call back in Linux
*/
static void
-ksocknal_data_ready (struct sock *sk, int n)
+ksocknal_data_ready (struct sock *sk)
{
ksock_conn_t *conn;
conn = sk->sk_user_data;
if (conn == NULL) { /* raced with ksocknal_terminate_conn */
LASSERT (sk->sk_data_ready != &ksocknal_data_ready);
- sk->sk_data_ready (sk, n);
+ sk->sk_data_ready (sk);
} else
ksocknal_read_callback(conn);
struct completion rx_half_close_comp;
/* socket used by this connection */
struct socket *sock;
- void (*orig_data_ready)(struct sock *, int);
+ void (*orig_data_ready)(struct sock *);
void (*orig_state_change)(struct sock *);
#define LOGIN_FLAGS_READ_ACTIVE 1
#define LOGIN_FLAGS_CLOSED 2
return 0;
}
-static void iscsi_target_sk_data_ready(struct sock *sk, int count)
+static void iscsi_target_sk_data_ready(struct sock *sk)
{
struct iscsi_conn *conn = sk->sk_user_data;
bool rc;
}
/* Data available on socket or listen socket received a connect */
-static void lowcomms_data_ready(struct sock *sk, int count_unused)
+static void lowcomms_data_ready(struct sock *sk)
{
struct connection *con = sock2con(sk);
if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
spinlock_t requests_lock; /* Lock accesses to tx.requests, tx.creq and rcv.creq when STREAM mode */
- void (*data_ready)(struct sock* sk, int len);
+ void (*data_ready)(struct sock* sk);
void (*error_report)(struct sock* sk);
void (*write_space)(struct sock* sk); /* STREAM mode only */
struct {
extern void ncpdgram_rcv_proc(struct work_struct *work);
extern void ncpdgram_timeout_proc(struct work_struct *work);
extern void ncpdgram_timeout_call(unsigned long server);
-extern void ncp_tcp_data_ready(struct sock* sk, int len);
+extern void ncp_tcp_data_ready(struct sock* sk);
extern void ncp_tcp_write_space(struct sock* sk);
extern void ncp_tcp_error_report(struct sock* sk);
kfree(req);
}
-void ncp_tcp_data_ready(struct sock *sk, int len)
+void ncp_tcp_data_ready(struct sock *sk)
{
struct ncp_server *server = sk->sk_user_data;
- server->data_ready(sk, len);
+ server->data_ready(sk);
schedule_work(&server->rcv.tq);
}
static void o2net_sc_connect_completed(struct work_struct *work);
static void o2net_rx_until_empty(struct work_struct *work);
static void o2net_shutdown_sc(struct work_struct *work);
-static void o2net_listen_data_ready(struct sock *sk, int bytes);
+static void o2net_listen_data_ready(struct sock *sk);
static void o2net_sc_send_keep_req(struct work_struct *work);
static void o2net_idle_timer(unsigned long data);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
}
/* see o2net_register_callbacks() */
-static void o2net_data_ready(struct sock *sk, int bytes)
+static void o2net_data_ready(struct sock *sk)
{
- void (*ready)(struct sock *sk, int bytes);
+ void (*ready)(struct sock *sk);
read_lock(&sk->sk_callback_lock);
if (sk->sk_user_data) {
}
read_unlock(&sk->sk_callback_lock);
- ready(sk, bytes);
+ ready(sk);
}
/* see o2net_register_callbacks() */
cond_resched();
}
-static void o2net_listen_data_ready(struct sock *sk, int bytes)
+static void o2net_listen_data_ready(struct sock *sk)
{
- void (*ready)(struct sock *sk, int bytes);
+ void (*ready)(struct sock *sk);
read_lock(&sk->sk_callback_lock);
ready = sk->sk_user_data;
*/
if (sk->sk_state == TCP_LISTEN) {
- mlog(ML_TCP, "bytes: %d\n", bytes);
queue_work(o2net_wq, &o2net_listen_work);
} else {
ready = NULL;
out:
read_unlock(&sk->sk_callback_lock);
if (ready != NULL)
- ready(sk, bytes);
+ ready(sk);
}
static int o2net_open_listening_sock(__be32 addr, __be16 port)
/* original handlers for the sockets */
void (*sc_state_change)(struct sock *sk);
- void (*sc_data_ready)(struct sock *sk, int bytes);
+ void (*sc_data_ready)(struct sock *sk);
u32 sc_msg_key;
u16 sc_msg_type;
/* We keep the old state_change and data_ready CB's here */
void (*sk_ostate)(struct sock *);
- void (*sk_odata)(struct sock *, int bytes);
+ void (*sk_odata)(struct sock *);
void (*sk_owspace)(struct sock *);
/* private TCP part */
int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
-void sctp_data_ready(struct sock *sk, int len);
+void sctp_data_ready(struct sock *sk);
unsigned int sctp_poll(struct file *file, struct socket *sock,
poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
u32 sk_classid;
struct cg_proto *sk_cgrp;
void (*sk_state_change)(struct sock *sk);
- void (*sk_data_ready)(struct sock *sk, int bytes);
+ void (*sk_data_ready)(struct sock *sk);
void (*sk_write_space)(struct sock *sk);
void (*sk_error_report)(struct sock *sk);
int (*sk_backlog_rcv)(struct sock *sk,
sk = sk_atm(atmarpd);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
return 0;
}
atm_force_charge(priv->lecd, skb2->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb2);
- sk->sk_data_ready(sk, skb2->len);
+ sk->sk_data_ready(sk);
}
}
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
atm_force_charge(priv->lecd, skb2->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb2);
- sk->sk_data_ready(sk, skb2->len);
+ sk->sk_data_ready(sk);
}
}
#endif /* defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) */
atm_force_charge(priv->lecd, skb->truesize);
sk = sk_atm(priv->lecd);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
if (data != NULL) {
pr_debug("about to send %d bytes of data\n", data->len);
atm_force_charge(priv->lecd, data->truesize);
skb_queue_tail(&sk->sk_receive_queue, data);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
}
return 0;
pr_debug("%s: To daemon\n", dev->name);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
} else { /* Data frame, queue to protocol handlers */
struct lec_arp_table *entry;
unsigned char *src, *dst;
dprintk("(%s) control packet arrived\n", dev->name);
/* Pass control packets to daemon */
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
return;
}
sk = sk_atm(mpc->mpoad_vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
return 0;
}
sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
dprintk("exiting\n");
}
struct sock *sk = sk_atm(vcc);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
}
}
#endif
atm_force_charge(sigd, skb->truesize);
skb_queue_tail(&sk_atm(sigd)->sk_receive_queue, skb);
- sk_atm(sigd)->sk_data_ready(sk_atm(sigd), skb->len);
+ sk_atm(sigd)->sk_data_ready(sk_atm(sigd));
}
static void modify_qos(struct atm_vcc *vcc, struct atmsvc_msg *msg)
if (sk) {
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
sock_put(sk);
} else {
free:
if (parent) {
bt_accept_unlink(sk);
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
} else {
sk->sk_state_change(sk);
}
sk->sk_state_change(sk);
if (parent)
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
release_sock(sk);
}
parent = bt_sk(sk)->parent;
if (parent)
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
release_sock(sk);
}
rfcomm_schedule();
}
-static void rfcomm_l2data_ready(struct sock *sk, int bytes)
+static void rfcomm_l2data_ready(struct sock *sk)
{
- BT_DBG("%p bytes %d", sk, bytes);
+ BT_DBG("%p", sk);
rfcomm_schedule();
}
atomic_add(skb->len, &sk->sk_rmem_alloc);
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
rfcomm_dlc_throttle(d);
sock_set_flag(sk, SOCK_ZAPPED);
bt_accept_unlink(sk);
}
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
} else {
if (d->state == BT_CONNECTED)
rfcomm_session_getaddr(d->session,
sk->sk_state = BT_CONNECTED;
/* Wake up parent */
- parent->sk_data_ready(parent, 1);
+ parent->sk_data_ready(parent);
bh_unlock_sock(parent);
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int err;
- int skb_len;
unsigned long flags;
struct sk_buff_head *list = &sk->sk_receive_queue;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
* may be freed by other threads of control pulling packets
* from the queue.
*/
- skb_len = skb->len;
spin_lock_irqsave(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
__skb_queue_tail(list, skb);
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb_len);
+ sk->sk_data_ready(sk);
else
kfree_skb(skb);
return 0;
*/
/* data available on socket, or listen socket received a connect */
-static void ceph_sock_data_ready(struct sock *sk, int count_unused)
+static void ceph_sock_data_ready(struct sock *sk)
{
struct ceph_connection *con = sk->sk_user_data;
if (atomic_read(&con->msgr->stopping)) {
*/
int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
{
- int len = skb->len;
-
if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
(unsigned int)sk->sk_rcvbuf)
return -ENOMEM;
skb_queue_tail(&sk->sk_error_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, len);
+ sk->sk_data_ready(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_err_skb);
spin_unlock_irqrestore(&list->lock, flags);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb_len);
+ sk->sk_data_ready(sk);
return 0;
}
EXPORT_SYMBOL(sock_queue_rcv_skb);
rcu_read_unlock();
}
-static void sock_def_readable(struct sock *sk, int len)
+static void sock_def_readable(struct sock *sk)
{
struct socket_wq *wq;
__skb_pull(skb, dccp_hdr(skb)->dccph_doff * 4);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
static void dccp_fin(struct sock *sk, struct sk_buff *skb)
/* Wakeup parent, send SIGIO */
if (state == DCCP_RESPOND && child->sk_state != state)
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
static __inline__ int dn_queue_skb(struct sock *sk, struct sk_buff *skb, int sig, struct sk_buff_head *queue)
{
int err;
- int skb_len;
/* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
number of warnings when compiling with -W --ANK
if (err)
goto out;
- skb_len = skb->len;
skb_set_owner_r(skb, sk);
skb_queue_tail(queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb_len);
+ sk->sk_data_ready(sk);
out:
return err;
}
if (eaten > 0)
kfree_skb_partial(skb, fragstolen);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
return;
}
BUG();
tp->urg_data = TCP_URG_VALID | tmp;
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
}
}
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
tp->ucopy.wakeup = 1;
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
} else if (chunk > 0) {
tp->ucopy.wakeup = 1;
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
out:
return copied_early;
#endif
if (eaten)
kfree_skb_partial(skb, fragstolen);
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
return;
}
}
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
tp->syn_data_acked = 1;
}
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
bh_unlock_sock(child);
sock_put(child);
WARN_ON(req->sk == NULL);
skb->len);
/* Wakeup parent, send SIGIO */
if (state == TCP_SYN_RECV && child->sk_state != state)
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
} else {
/* Alas, it is possible again, because we do lookup
* in main socket hash table and lock on listening
/* Wake up accept */
nsk->sk_state = IUCV_CONNECTED;
- sk->sk_data_ready(sk, 1);
+ sk->sk_data_ready(sk);
err = 0;
fail:
bh_unlock_sock(sk);
if (!err) {
iucv_accept_enqueue(sk, nsk);
nsk->sk_state = IUCV_CONNECTED;
- sk->sk_data_ready(sk, 1);
+ sk->sk_data_ready(sk);
} else
iucv_sock_kill(nsk);
bh_unlock_sock(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) {
skb_set_owner_r(*skb2, sk);
skb_queue_tail(&sk->sk_receive_queue, *skb2);
- sk->sk_data_ready(sk, (*skb2)->len);
+ sk->sk_data_ready(sk);
*skb2 = NULL;
err = 0;
}
else
#endif /* CONFIG_NETLINK_MMAP */
skb_queue_tail(&sk->sk_receive_queue, skb);
- sk->sk_data_ready(sk, len);
+ sk->sk_data_ready(sk);
return len;
}
return err ? : copied;
}
-static void netlink_data_ready(struct sock *sk, int len)
+static void netlink_data_ready(struct sock *sk)
{
BUG();
}
skb_queue_head(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
bh_unlock_sock(sk);
new_sk->sk_state = LLCP_CONNECTED;
/* Wake the listening processes */
- parent->sk_data_ready(parent, 0);
+ parent->sk_data_ready(parent);
/* Send CC */
nfc_llcp_send_cc(new_sock);
skb->dropcount = atomic_read(&sk->sk_drops);
__skb_queue_tail(&sk->sk_receive_queue, skb);
spin_unlock(&sk->sk_receive_queue.lock);
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
return 0;
drop_n_acct:
else
prb_clear_blk_fill_status(&po->rx_ring);
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
po->stats.stats1.tp_drops++;
spin_unlock(&sk->sk_receive_queue.lock);
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
kfree_skb(copy_skb);
goto drop_n_restore;
}
struct gprs_dev {
struct sock *sk;
void (*old_state_change)(struct sock *);
- void (*old_data_ready)(struct sock *, int);
+ void (*old_data_ready)(struct sock *);
void (*old_write_space)(struct sock *);
struct net_device *dev;
return err;
}
-static void gprs_data_ready(struct sock *sk, int len)
+static void gprs_data_ready(struct sock *sk)
{
struct gprs_dev *gp = sk->sk_user_data;
struct sk_buff *skb;
queue:
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- err = skb->len;
skb_queue_tail(queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, err);
+ sk->sk_data_ready(sk);
return NET_RX_SUCCESS;
}
pn->rx_credits--;
skb->dev = NULL;
skb_set_owner_r(skb, sk);
- err = skb->len;
skb_queue_tail(&sk->sk_receive_queue, skb);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, err);
+ sk->sk_data_ready(sk);
return NET_RX_SUCCESS;
case PNS_PEP_CONNECT_RESP:
skb_queue_head(&sk->sk_receive_queue, skb);
sk_acceptq_added(sk);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
return NET_RX_SUCCESS;
case PNS_PEP_DISCONNECT_REQ:
/* tcp_listen.c */
int rds_tcp_listen_init(void);
void rds_tcp_listen_stop(void);
-void rds_tcp_listen_data_ready(struct sock *sk, int bytes);
+void rds_tcp_listen_data_ready(struct sock *sk);
/* tcp_recv.c */
int rds_tcp_recv_init(void);
void rds_tcp_recv_exit(void);
-void rds_tcp_data_ready(struct sock *sk, int bytes);
+void rds_tcp_data_ready(struct sock *sk);
int rds_tcp_recv(struct rds_connection *conn);
void rds_tcp_inc_free(struct rds_incoming *inc);
int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *iov,
cond_resched();
}
-void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
+void rds_tcp_listen_data_ready(struct sock *sk)
{
- void (*ready)(struct sock *sk, int bytes);
+ void (*ready)(struct sock *sk);
rdsdebug("listen data ready sk %p\n", sk);
out:
read_unlock(&sk->sk_callback_lock);
- ready(sk, bytes);
+ ready(sk);
}
int rds_tcp_listen_init(void)
return ret;
}
-void rds_tcp_data_ready(struct sock *sk, int bytes)
+void rds_tcp_data_ready(struct sock *sk)
{
- void (*ready)(struct sock *sk, int bytes);
+ void (*ready)(struct sock *sk);
struct rds_connection *conn;
struct rds_tcp_connection *tc;
- rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
+ rdsdebug("data ready sk %p\n", sk);
read_lock(&sk->sk_callback_lock);
conn = sk->sk_user_data;
queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
out:
read_unlock(&sk->sk_callback_lock);
- ready(sk, bytes);
+ ready(sk);
}
int rds_tcp_recv_init(void)
rose_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
return 1;
}
spin_unlock_bh(&sk->sk_receive_queue.lock);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb_len);
+ sk->sk_data_ready(sk);
}
skb = NULL;
} else {
* handle data received on the local endpoint
* - may be called in interrupt context
*/
-void rxrpc_data_ready(struct sock *sk, int count)
+void rxrpc_data_ready(struct sock *sk)
{
struct rxrpc_skb_priv *sp;
struct rxrpc_local *local;
struct sk_buff *skb;
int ret;
- _enter("%p, %d", sk, count);
+ _enter("%p", sk);
ASSERT(!irqs_disabled());
*/
extern const char *rxrpc_pkts[];
-void rxrpc_data_ready(struct sock *, int);
+void rxrpc_data_ready(struct sock *);
int rxrpc_queue_rcv_skb(struct rxrpc_call *, struct sk_buff *, bool, bool);
void rxrpc_fast_process_packet(struct rxrpc_call *, struct sk_buff *);
goto out;
}
-void sctp_data_ready(struct sock *sk, int len)
+void sctp_data_ready(struct sock *sk)
{
struct socket_wq *wq;
sctp_ulpq_clear_pd(ulpq);
if (queue == &sk->sk_receive_queue)
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
return 1;
out_free:
/* If there is data waiting, send it up the socket now. */
if (sctp_ulpq_clear_pd(ulpq) || ev)
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
int flags);
-static void svc_udp_data_ready(struct sock *, int);
+static void svc_udp_data_ready(struct sock *);
static int svc_udp_recvfrom(struct svc_rqst *);
static int svc_udp_sendto(struct svc_rqst *);
static void svc_sock_detach(struct svc_xprt *);
/*
* INET callback when data has been received on the socket.
*/
-static void svc_udp_data_ready(struct sock *sk, int count)
+static void svc_udp_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
wait_queue_head_t *wq = sk_sleep(sk);
if (svsk) {
- dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
- svsk, sk, count,
+ dprintk("svc: socket %p(inet %p), busy=%d\n",
+ svsk, sk,
test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_xprt_enqueue(&svsk->sk_xprt);
* A data_ready event on a listening socket means there's a connection
* pending. Do not use state_change as a substitute for it.
*/
-static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
+static void svc_tcp_listen_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
wait_queue_head_t *wq;
wake_up_interruptible_all(wq);
}
-static void svc_tcp_data_ready(struct sock *sk, int count)
+static void svc_tcp_data_ready(struct sock *sk)
{
struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data;
wait_queue_head_t *wq = sk_sleep(sk);
/*
* Saved socket callback addresses
*/
- void (*old_data_ready)(struct sock *, int);
+ void (*old_data_ready)(struct sock *);
void (*old_state_change)(struct sock *);
void (*old_write_space)(struct sock *);
void (*old_error_report)(struct sock *);
*
* Currently this assumes we can read the whole reply in a single gulp.
*/
-static void xs_local_data_ready(struct sock *sk, int len)
+static void xs_local_data_ready(struct sock *sk)
{
struct rpc_task *task;
struct rpc_xprt *xprt;
* @len: how much data to read
*
*/
-static void xs_udp_data_ready(struct sock *sk, int len)
+static void xs_udp_data_ready(struct sock *sk)
{
struct rpc_task *task;
struct rpc_xprt *xprt;
* @bytes: how much data to read
*
*/
-static void xs_tcp_data_ready(struct sock *sk, int bytes)
+static void xs_tcp_data_ready(struct sock *sk)
{
struct rpc_xprt *xprt;
read_descriptor_t rd_desc;
return con;
}
-static void sock_data_ready(struct sock *sk, int unused)
+static void sock_data_ready(struct sock *sk)
{
struct tipc_conn *con;
newcon->usr_data = s->tipc_conn_new(newcon->conid);
/* Wake up receive process in case of 'SYN+' message */
- newsock->sk->sk_data_ready(newsock->sk, 0);
+ newsock->sk->sk_data_ready(newsock->sk);
return ret;
}
#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
static int backlog_rcv(struct sock *sk, struct sk_buff *skb);
-static void tipc_data_ready(struct sock *sk, int len);
+static void tipc_data_ready(struct sock *sk);
static void tipc_write_space(struct sock *sk);
static int tipc_release(struct socket *sock);
static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
* @sk: socket
* @len: the length of messages
*/
-static void tipc_data_ready(struct sock *sk, int len)
+static void tipc_data_ready(struct sock *sk)
{
struct socket_wq *wq;
__skb_queue_tail(&sk->sk_receive_queue, buf);
skb_set_owner_r(buf, sk);
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
return TIPC_OK;
}
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
- other->sk_data_ready(other, 0);
+ other->sk_data_ready(other);
sock_put(other);
return 0;
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
- other->sk_data_ready(other, len);
+ other->sk_data_ready(other);
sock_put(other);
scm_destroy(siocb->scm);
return len;
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
- other->sk_data_ready(other, size);
+ other->sk_data_ready(other);
sent += size;
}
struct vsock_sock *vsk = vsock_sk(sk);
PKT_FIELD(vsk, sent_waiting_read) = false;
#endif
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
static void vmci_transport_notify_pkt_socket_init(struct sock *sk)
bool bottom_half,
struct sockaddr_vm *dst, struct sockaddr_vm *src)
{
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
static void vsock_block_update_write_window(struct sock *sk)
/* See the comment in
* vmci_transport_notify_pkt_send_post_enqueue().
*/
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk);
}
return err;
x25_start_heartbeat(make);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skb->len);
+ sk->sk_data_ready(sk);
rc = 1;
sock_put(sk);
out:
skb_set_owner_r(skbn, sk);
skb_queue_tail(&sk->sk_receive_queue, skbn);
if (!sock_flag(sk, SOCK_DEAD))
- sk->sk_data_ready(sk, skbn->len);
+ sk->sk_data_ready(sk);
return 0;
}