drivers: dma-coherent: Move spinlock in dma_alloc_from_coherent()
authorBastian Hecht <bhecht@de.adit-jv.com>
Wed, 28 Sep 2016 07:51:57 +0000 (08:51 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 28 Sep 2016 15:53:16 +0000 (17:53 +0200)
We don't need to hold the spinlock while zeroing the allocated memory.
In case we handle big buffers this is a severe issue as other CPUs might
be spinning half a second or longer.

Signed-off-by: Bastian Hecht <bhecht@de.adit-jv.com>
Signed-off-by: George G. Davis <george_davis@mentor.com>
Signed-off-by: Mark Craske <Mark_Craske@mentor.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/base/dma-coherent.c

index 2789f7a95b936e601e9037819df5f70912678beb..640a7e63c4537a5b82e4dc2f983168390d9896b8 100644 (file)
@@ -165,6 +165,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
        int order = get_order(size);
        unsigned long flags;
        int pageno;
+       int dma_memory_map;
 
        if (!dev)
                return 0;
@@ -187,11 +188,12 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
         */
        *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
        *ret = mem->virt_base + (pageno << PAGE_SHIFT);
-       if (mem->flags & DMA_MEMORY_MAP)
+       dma_memory_map = (mem->flags & DMA_MEMORY_MAP);
+       spin_unlock_irqrestore(&mem->spinlock, flags);
+       if (dma_memory_map)
                memset(*ret, 0, size);
        else
                memset_io(*ret, 0, size);
-       spin_unlock_irqrestore(&mem->spinlock, flags);
 
        return 1;