vmbus: keep pointer to ring buffer page
authorStephen Hemminger <stephen@networkplumber.org>
Fri, 14 Sep 2018 16:10:16 +0000 (09:10 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 25 Sep 2018 18:33:24 +0000 (20:33 +0200)
Avoid going from struct page to virt address (and back) by just
keeping pointer to the allocated pages instead of virt address.

Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/hv/channel.c
drivers/uio/uio_hv_generic.c
include/linux/hyperv.h

index 33e6db02dbab918237b9a2cf508c91a31342619e..56ec0d96d876a773bd34ad89617e687292359e00 100644 (file)
@@ -91,11 +91,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
        unsigned long flags;
        int ret, err = 0;
        struct page *page;
+       unsigned int order;
 
        if (send_ringbuffer_size % PAGE_SIZE ||
            recv_ringbuffer_size % PAGE_SIZE)
                return -EINVAL;
 
+       order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
+
        spin_lock_irqsave(&newchannel->lock, flags);
        if (newchannel->state == CHANNEL_OPEN_STATE) {
                newchannel->state = CHANNEL_OPENING_STATE;
@@ -110,21 +113,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
 
        /* Allocate the ring buffer */
        page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
-                               GFP_KERNEL|__GFP_ZERO,
-                               get_order(send_ringbuffer_size +
-                               recv_ringbuffer_size));
+                               GFP_KERNEL|__GFP_ZERO, order);
 
        if (!page)
-               page = alloc_pages(GFP_KERNEL|__GFP_ZERO,
-                                  get_order(send_ringbuffer_size +
-                                            recv_ringbuffer_size));
+               page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
 
        if (!page) {
                err = -ENOMEM;
                goto error_set_chnstate;
        }
 
-       newchannel->ringbuffer_pages = page_address(page);
+       newchannel->ringbuffer_page = page;
        newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
                                           recv_ringbuffer_size) >> PAGE_SHIFT;
 
@@ -239,8 +238,7 @@ error_free_gpadl:
 error_free_pages:
        hv_ringbuffer_cleanup(&newchannel->outbound);
        hv_ringbuffer_cleanup(&newchannel->inbound);
-       __free_pages(page,
-                    get_order(send_ringbuffer_size + recv_ringbuffer_size));
+       __free_pages(page, order);
 error_set_chnstate:
        newchannel->state = CHANNEL_OPEN_STATE;
        return err;
@@ -658,8 +656,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
        hv_ringbuffer_cleanup(&channel->outbound);
        hv_ringbuffer_cleanup(&channel->inbound);
 
-       free_pages((unsigned long)channel->ringbuffer_pages,
-               get_order(channel->ringbuffer_pagecount * PAGE_SIZE));
+       __free_pages(channel->ringbuffer_page,
+                    get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
 
 out:
        return ret;
index a08860260f55a12ce8aa699299cae94d8615a1a9..ba67a52675578e9282d64a3706549f992a9216bb 100644 (file)
@@ -130,11 +130,12 @@ static int hv_uio_ring_mmap(struct file *filp, struct kobject *kobj,
                = container_of(kobj, struct vmbus_channel, kobj);
        struct hv_device *dev = channel->primary_channel->device_obj;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
+       void *ring_buffer = page_address(channel->ringbuffer_page);
 
        dev_dbg(&dev->device, "mmap channel %u pages %#lx at %#lx\n",
                q_idx, vma_pages(vma), vma->vm_pgoff);
 
-       return vm_iomap_memory(vma, virt_to_phys(channel->ringbuffer_pages),
+       return vm_iomap_memory(vma, virt_to_phys(ring_buffer),
                               channel->ringbuffer_pagecount << PAGE_SHIFT);
 }
 
@@ -223,7 +224,7 @@ hv_uio_probe(struct hv_device *dev,
        /* mem resources */
        pdata->info.mem[TXRX_RING_MAP].name = "txrx_rings";
        pdata->info.mem[TXRX_RING_MAP].addr
-               = (uintptr_t)dev->channel->ringbuffer_pages;
+               = (uintptr_t)page_address(dev->channel->ringbuffer_page);
        pdata->info.mem[TXRX_RING_MAP].size
                = dev->channel->ringbuffer_pagecount << PAGE_SHIFT;
        pdata->info.mem[TXRX_RING_MAP].memtype = UIO_MEM_LOGICAL;
index 6c4575c7f46b61e6cc68868264941f7c37ec75ab..a6c32d2d090b8cdfd6370ea074f8ab02d386ef20 100644 (file)
@@ -739,7 +739,7 @@ struct vmbus_channel {
        u32 ringbuffer_gpadlhandle;
 
        /* Allocated memory for ring buffer */
-       void *ringbuffer_pages;
+       struct page *ringbuffer_page;
        u32 ringbuffer_pagecount;
        struct hv_ring_buffer_info outbound;    /* send to parent */
        struct hv_ring_buffer_info inbound;     /* receive from parent */