mm: use helper functions for allocating and freeing vm_area structs
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Jul 2018 20:48:51 +0000 (13:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 21 Jul 2018 20:48:51 +0000 (13:48 -0700)
The vm_area_struct is one of the most fundamental memory management
objects, but the management of it is entirely open-coded evertwhere,
ranging from allocation and freeing (using kmem_cache_[z]alloc and
kmem_cache_free) to initializing all the fields.

We want to unify this in order to end up having some unified
initialization of the vmas, and the first step to this is to at least
have basic allocation functions.

Right now those functions are literally just wrappers around the
kmem_cache_*() calls.  This is a purely mechanical conversion:

    # new vma:
    kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL) -> vm_area_alloc()

    # copy old vma
    kmem_cache_alloc(vm_area_cachep, GFP_KERNEL) -> vm_area_dup(old)

    # free vma
    kmem_cache_free(vm_area_cachep, vma) -> vm_area_free(vma)

to the point where the old vma passed in to the vm_area_dup() function
isn't even used yet (because I've left all the old manual initialization
alone).

Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/ia64/kernel/perfmon.c
arch/ia64/mm/init.c
fs/exec.c
include/linux/mm.h
kernel/fork.c
mm/mmap.c
mm/nommu.c

index 3b38c717008ac1993e5b54aa28a8fa6342ab7350..e859246badcacb32a643df81f6078c34ad0d622c 100644 (file)
@@ -2278,7 +2278,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        DPRINT(("smpl_buf @%p\n", smpl_buf));
 
        /* allocate vma */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (!vma) {
                DPRINT(("Cannot allocate vma\n"));
                goto error_kmem;
@@ -2346,7 +2346,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
        return 0;
 
 error:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 error_kmem:
        pfm_rvfree(smpl_buf, size);
 
index 18278b448530d3ac9302754cf170e261401fd008..3f2321bffb72a2406afe468c9ad378f22f5f4da5 100644 (file)
@@ -114,7 +114,7 @@ ia64_init_addr_space (void)
         * the problem.  When the process attempts to write to the register backing store
         * for the first time, it will get a SEGFAULT in this case.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (vma) {
                INIT_LIST_HEAD(&vma->anon_vma_chain);
                vma->vm_mm = current->mm;
@@ -125,7 +125,7 @@ ia64_init_addr_space (void)
                down_write(&current->mm->mmap_sem);
                if (insert_vm_struct(current->mm, vma)) {
                        up_write(&current->mm->mmap_sem);
-                       kmem_cache_free(vm_area_cachep, vma);
+                       vm_area_free(vma);
                        return;
                }
                up_write(&current->mm->mmap_sem);
@@ -133,7 +133,7 @@ ia64_init_addr_space (void)
 
        /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
        if (!(current->personality & MMAP_PAGE_ZERO)) {
-               vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+               vma = vm_area_alloc();
                if (vma) {
                        INIT_LIST_HEAD(&vma->anon_vma_chain);
                        vma->vm_mm = current->mm;
@@ -144,7 +144,7 @@ ia64_init_addr_space (void)
                        down_write(&current->mm->mmap_sem);
                        if (insert_vm_struct(current->mm, vma)) {
                                up_write(&current->mm->mmap_sem);
-                               kmem_cache_free(vm_area_cachep, vma);
+                               vm_area_free(vma);
                                return;
                        }
                        up_write(&current->mm->mmap_sem);
index 2d4e0075bd2457c83f9109d5a29365de61658840..9bd83989ea259bc19250c83d905870236c7df169 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -290,7 +290,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
        struct vm_area_struct *vma = NULL;
        struct mm_struct *mm = bprm->mm;
 
-       bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       bprm->vma = vma = vm_area_alloc();
        if (!vma)
                return -ENOMEM;
 
@@ -326,7 +326,7 @@ err:
        up_write(&mm->mmap_sem);
 err_free:
        bprm->vma = NULL;
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return err;
 }
 
index 3982c83fdcbfa335ef728cbcb10f073b3b1c7d70..de2fd86c61549f5eacbea530b61a6d7e63db2d5d 100644 (file)
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  * mmap() functions).
  */
 
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(void);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
 
 #ifndef CONFIG_MMU
 extern struct rb_root nommu_region_tree;
index 9440d61b925ca08faa3beb7d3a02fedfdd0a9b84..0e23deb5acfc2b3958bb082d83ab27ee27a555d2 100644 (file)
@@ -303,11 +303,26 @@ struct kmem_cache *files_cachep;
 struct kmem_cache *fs_cachep;
 
 /* SLAB cache for vm_area_struct structures */
-struct kmem_cache *vm_area_cachep;
+static struct kmem_cache *vm_area_cachep;
 
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
+struct vm_area_struct *vm_area_alloc(void)
+{
+       return kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+}
+
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+{
+       return kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+}
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+       kmem_cache_free(vm_area_cachep, vma);
+}
+
 static void account_kernel_stack(struct task_struct *tsk, int account)
 {
        void *stack = task_stack_page(tsk);
@@ -455,7 +470,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
                                goto fail_nomem;
                        charge = len;
                }
-               tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               tmp = vm_area_dup(mpnt);
                if (!tmp)
                        goto fail_nomem;
                *tmp = *mpnt;
@@ -539,7 +554,7 @@ fail_uprobe_end:
 fail_nomem_anon_vma_fork:
        mpol_put(vma_policy(tmp));
 fail_nomem_policy:
-       kmem_cache_free(vm_area_cachep, tmp);
+       vm_area_free(tmp);
 fail_nomem:
        retval = -ENOMEM;
        vm_unacct_memory(charge);
index 5801b5f0a634b5561db5e70c347a93fd3a1ad59f..4286ad2dd1f5dff3ace4ce1297cc9154955a13d0 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,7 +182,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        mpol_put(vma_policy(vma));
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return next;
 }
 
@@ -911,7 +911,7 @@ again:
                        anon_vma_merge(vma, next);
                mm->map_count--;
                mpol_put(vma_policy(next));
-               kmem_cache_free(vm_area_cachep, next);
+               vm_area_free(next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
                 * we must remove another next too. It would clutter
@@ -1729,7 +1729,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
         * specific mapper. the address has already been validated, but
         * not unmapped, but the maps are removed from the list.
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (!vma) {
                error = -ENOMEM;
                goto unacct_error;
@@ -1832,7 +1832,7 @@ allow_write_and_free_vma:
        if (vm_flags & VM_DENYWRITE)
                allow_write_access(file);
 free_vma:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 unacct_error:
        if (charged)
                vm_unacct_memory(charged);
@@ -2620,7 +2620,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
                        return err;
        }
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new)
                return -ENOMEM;
 
@@ -2669,7 +2669,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  out_free_mpol:
        mpol_put(vma_policy(new));
  out_free_vma:
-       kmem_cache_free(vm_area_cachep, new);
+       vm_area_free(new);
        return err;
 }
 
@@ -2984,7 +2984,7 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        /*
         * create a vma struct for an anonymous mapping
         */
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (!vma) {
                vm_unacct_memory(len >> PAGE_SHIFT);
                return -ENOMEM;
@@ -3202,7 +3202,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                }
                *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
        } else {
-               new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+               new_vma = vm_area_dup(vma);
                if (!new_vma)
                        goto out;
                *new_vma = *vma;
@@ -3226,7 +3226,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
 out_free_mempol:
        mpol_put(vma_policy(new_vma));
 out_free_vma:
-       kmem_cache_free(vm_area_cachep, new_vma);
+       vm_area_free(new_vma);
 out:
        return NULL;
 }
@@ -3350,7 +3350,7 @@ static struct vm_area_struct *__install_special_mapping(
        int ret;
        struct vm_area_struct *vma;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (unlikely(vma == NULL))
                return ERR_PTR(-ENOMEM);
 
@@ -3376,7 +3376,7 @@ static struct vm_area_struct *__install_special_mapping(
        return vma;
 
 out:
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ERR_PTR(ret);
 }
 
index 4452d8bd9ae4b84851f433885eac18e050d72dad..006e3fe6501708e0e4ef951a1d8f21269c99a75b 100644 (file)
@@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
        if (vma->vm_file)
                fput(vma->vm_file);
        put_nommu_region(vma->vm_region);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
 }
 
 /*
@@ -1204,7 +1204,7 @@ unsigned long do_mmap(struct file *file,
        if (!region)
                goto error_getting_region;
 
-       vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+       vma = vm_area_alloc();
        if (!vma)
                goto error_getting_vma;
 
@@ -1368,7 +1368,7 @@ error:
        kmem_cache_free(vm_region_jar, region);
        if (vma->vm_file)
                fput(vma->vm_file);
-       kmem_cache_free(vm_area_cachep, vma);
+       vm_area_free(vma);
        return ret;
 
 sharing_violation:
@@ -1469,7 +1469,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!region)
                return -ENOMEM;
 
-       new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+       new = vm_area_dup(vma);
        if (!new) {
                kmem_cache_free(vm_region_jar, region);
                return -ENOMEM;