};
size_t total_len = 0;
int err, mergeable;
- s16 headcount;
+ s16 headcount, nheads = 0;
size_t vhost_hlen, sock_hlen;
size_t vhost_len, sock_len;
struct socket *sock;
while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
- headcount = get_rx_bufs(vq, vq->heads, vhost_len,
+ headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len,
&in, vq_log, &log,
likely(mergeable) ? UIO_MAXIOV : 1);
/* On error, stop handling until the next kick. */
vhost_discard_vq_desc(vq, headcount);
goto out;
}
- vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
- headcount);
+ nheads += headcount;
+ if (nheads > VHOST_RX_BATCH) {
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
+ nheads = 0;
+ }
if (unlikely(vq_log))
vhost_log_write(vq, vq_log, log, vhost_len);
total_len += vhost_len;
}
vhost_net_enable_vq(net, vq);
out:
+ if (nheads)
+ vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
+ nheads);
mutex_unlock(&vq->mutex);
}