/* FIXME: MTU in config. */
#define MAX_PACKET_LEN (ETH_HLEN+ETH_DATA_LEN)
+#define GOOD_COPY_LEN 128
struct virtnet_info
{
/* I like... big packets and I cannot lie! */
bool big_packets;
+ /* Host will merge rx buffers for big packets (shake it! shake it!) */
+ bool mergeable_rx_bufs;
+
/* Receive & send queues. */
struct sk_buff_head recv;
struct sk_buff_head send;
struct page *pages;
};
-static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb)
+static inline void *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct virtio_net_hdr *)skb->cb;
}
-static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb)
-{
- sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr));
-}
-
static void give_a_page(struct virtnet_info *vi, struct page *page)
{
page->private = (unsigned long)vi->pages;
static void receive_skb(struct net_device *dev, struct sk_buff *skb,
unsigned len)
{
+ struct virtnet_info *vi = netdev_priv(dev);
struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
int err;
+ int i;
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
goto drop;
}
- len -= sizeof(struct virtio_net_hdr);
- if (len <= MAX_PACKET_LEN)
- trim_pages(netdev_priv(dev), skb);
+ if (vi->mergeable_rx_bufs) {
+ struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
+ unsigned int copy;
+ char *p = page_address(skb_shinfo(skb)->frags[0].page);
- err = pskb_trim(skb, len);
- if (err) {
- pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
- dev->stats.rx_dropped++;
- goto drop;
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
+
+ memcpy(hdr, p, sizeof(*mhdr));
+ p += sizeof(*mhdr);
+
+ copy = len;
+ if (copy > skb_tailroom(skb))
+ copy = skb_tailroom(skb);
+
+ memcpy(skb_put(skb, copy), p, copy);
+
+ len -= copy;
+
+ if (!len) {
+ give_a_page(vi, skb_shinfo(skb)->frags[0].page);
+ skb_shinfo(skb)->nr_frags--;
+ } else {
+ skb_shinfo(skb)->frags[0].page_offset +=
+ sizeof(*mhdr) + copy;
+ skb_shinfo(skb)->frags[0].size = len;
+ skb->data_len += len;
+ skb->len += len;
+ }
+
+ while (--mhdr->num_buffers) {
+ struct sk_buff *nskb;
+
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long %d\n", dev->name,
+ len);
+ dev->stats.rx_length_errors++;
+ goto drop;
+ }
+
+ nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ if (!nskb) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, mhdr->num_buffers);
+ dev->stats.rx_length_errors++;
+ goto drop;
+ }
+
+ __skb_unlink(nskb, &vi->recv);
+ vi->num--;
+
+ skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
+ skb_shinfo(nskb)->nr_frags = 0;
+ kfree_skb(nskb);
+
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ skb_shinfo(skb)->frags[i].size = len;
+ skb_shinfo(skb)->nr_frags++;
+ skb->data_len += len;
+ skb->len += len;
+ }
+ } else {
+ len -= sizeof(struct virtio_net_hdr);
+
+ if (len <= MAX_PACKET_LEN)
+ trim_pages(vi, skb);
+
+ err = pskb_trim(skb, len);
+ if (err) {
+ pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
+ len, err);
+ dev->stats.rx_dropped++;
+ goto drop;
+ }
}
+
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
dev_kfree_skb(skb);
}
-static void try_fill_recv(struct virtnet_info *vi)
+static void try_fill_recv_maxbufs(struct virtnet_info *vi)
{
struct sk_buff *skb;
struct scatterlist sg[2+MAX_SKB_FRAGS];
sg_init_table(sg, 2+MAX_SKB_FRAGS);
for (;;) {
+ struct virtio_net_hdr *hdr;
+
skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN);
if (unlikely(!skb))
break;
skb_put(skb, MAX_PACKET_LEN);
- vnet_hdr_to_sg(sg, skb);
+
+ hdr = skb_vnet_hdr(skb);
+ sg_init_one(sg, hdr, sizeof(*hdr));
if (vi->big_packets) {
for (i = 0; i < MAX_SKB_FRAGS; i++) {
vi->rvq->vq_ops->kick(vi->rvq);
}
+static void try_fill_recv(struct virtnet_info *vi)
+{
+ struct sk_buff *skb;
+ struct scatterlist sg[1];
+ int err;
+
+ if (!vi->mergeable_rx_bufs) {
+ try_fill_recv_maxbufs(vi);
+ return;
+ }
+
+ for (;;) {
+ skb_frag_t *f;
+
+ skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
+ if (unlikely(!skb))
+ break;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ f = &skb_shinfo(skb)->frags[0];
+ f->page = get_a_page(vi, GFP_ATOMIC);
+ if (!f->page) {
+ kfree_skb(skb);
+ break;
+ }
+
+ f->page_offset = 0;
+ f->size = PAGE_SIZE;
+
+ skb_shinfo(skb)->nr_frags++;
+
+ sg_init_one(sg, page_address(f->page), PAGE_SIZE);
+ skb_queue_head(&vi->recv, skb);
+
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
+ if (err) {
+ skb_unlink(skb, &vi->recv);
+ kfree_skb(skb);
+ break;
+ }
+ vi->num++;
+ }
+ if (unlikely(vi->num > vi->max))
+ vi->max = vi->num;
+ vi->rvq->vq_ops->kick(vi->rvq);
+}
+
static void skb_recv_done(struct virtqueue *rvq)
{
struct virtnet_info *vi = rvq->vdev->priv;
{
int num, err;
struct scatterlist sg[2+MAX_SKB_FRAGS];
- struct virtio_net_hdr *hdr;
+ struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
+ struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
sg_init_table(sg, 2+MAX_SKB_FRAGS);
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
- /* Encode metadata header at front. */
- hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = skb->csum_start - skb_headroom(skb);
hdr->gso_size = hdr->hdr_len = 0;
}
- vnet_hdr_to_sg(sg, skb);
+ mhdr->num_buffers = 0;
+
+ /* Encode metadata header at front. */
+ if (vi->mergeable_rx_bufs)
+ sg_init_one(sg, mhdr, sizeof(*mhdr));
+ else
+ sg_init_one(sg, hdr, sizeof(*hdr));
+
num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
|| virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
vi->big_packets = true;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
+ vi->mergeable_rx_bufs = true;
+
/* We expect two virtqueues, receive then send. */
vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
if (IS_ERR(vi->rvq)) {
VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
+ VIRTIO_NET_F_MRG_RXBUF,
VIRTIO_F_NOTIFY_ON_EMPTY,
};