mm: add vfree_atomic()
authorAndrey Ryabinin <aryabinin@virtuozzo.com>
Tue, 13 Dec 2016 00:44:10 +0000 (16:44 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 13 Dec 2016 02:55:08 +0000 (18:55 -0800)
We are going to use sleeping lock for freeing vmap.  However some
vfree() users want to free memory from atomic (but not from interrupt)
context.  For this we add vfree_atomic() - deferred variation of vfree()
which can be used in any atomic context (except NMIs).

[akpm@linux-foundation.org: tweak comment grammar]
[aryabinin@virtuozzo.com: use raw_cpu_ptr() instead of this_cpu_ptr()]
Link: http://lkml.kernel.org/r/1481553981-3856-1-git-send-email-aryabinin@virtuozzo.com
Link: http://lkml.kernel.org/r/1479474236-4139-5-git-send-email-hch@lst.de
Signed-off-by: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Cc: Joel Fernandes <joelaf@google.com>
Cc: Jisheng Zhang <jszhang@marvell.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: John Dias <joaodias@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/vmalloc.h
mm/vmalloc.c

index 3d9d786a943cacd7321b208f59ff4a4467c561df..d68edffbf142cec5f8987a7907b6f2e76cefdfd7 100644 (file)
@@ -82,6 +82,7 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
                        const void *caller);
 
 extern void vfree(const void *addr);
+extern void vfree_atomic(const void *addr);
 
 extern void *vmap(struct page **pages, unsigned int count,
                        unsigned long flags, pgprot_t prot);
index 1f5501b43026e758d26a1433b9364db46a30bc69..4ac776f10ad1bc68d8bde6dacaa2241cbf1de513 100644 (file)
@@ -1486,7 +1486,39 @@ static void __vunmap(const void *addr, int deallocate_pages)
        kfree(area);
        return;
 }
+
+static inline void __vfree_deferred(const void *addr)
+{
+       /*
+        * Use raw_cpu_ptr() because this can be called from preemptible
+        * context. Preemption is absolutely fine here, because the llist_add()
+        * implementation is lockless, so it works even if we are adding to
+        * nother cpu's list.  schedule_work() should be fine with this too.
+        */
+       struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+       if (llist_add((struct llist_node *)addr, &p->list))
+               schedule_work(&p->wq);
+}
+
+/**
+ *     vfree_atomic  -  release memory allocated by vmalloc()
+ *     @addr:          memory base address
+ *
+ *     This one is just like vfree() but can be called in any atomic context
+ *     except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+       BUG_ON(in_nmi());
+
+       kmemleak_free(addr);
+
+       if (!addr)
+               return;
+       __vfree_deferred(addr);
+}
+
 /**
  *     vfree  -  release memory allocated by vmalloc()
  *     @addr:          memory base address
@@ -1509,11 +1541,9 @@ void vfree(const void *addr)
 
        if (!addr)
                return;
-       if (unlikely(in_interrupt())) {
-               struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
-               if (llist_add((struct llist_node *)addr, &p->list))
-                       schedule_work(&p->wq);
-       } else
+       if (unlikely(in_interrupt()))
+               __vfree_deferred(addr);
+       else
                __vunmap(addr, 1);
 }
 EXPORT_SYMBOL(vfree);