staging: erofs: separate into init_once / always
authorGao Xiang <gaoxiang25@huawei.com>
Thu, 22 Nov 2018 17:21:46 +0000 (01:21 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 23 Nov 2018 09:53:08 +0000 (10:53 +0100)
`z_erofs_vle_workgroup' is heavily generated in the decompression,
for example, it resets 32 bytes redundantly for 64-bit platforms
even through Z_EROFS_VLE_INLINE_PAGEVECS + Z_EROFS_CLUSTER_MAX_PAGES,
default 4, pages are stored in `z_erofs_vle_workgroup'.

As an another example, `struct mutex' takes 72 bytes for our kirin
64-bit platforms, it's unnecessary to be reseted at first and
be initialized each time.

Let's avoid filling all `z_erofs_vle_workgroup' with 0 at first
since most fields are reinitialized to meaningful values later,
and pagevec is no need to initialized at all.

Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/erofs/unzip_vle.c

index ede3383ac601f04153b55616a35c86260a4561be..4e5843e8ee353260f72103f4446d7d7556db431f 100644 (file)
@@ -43,12 +43,38 @@ static inline int init_unzip_workqueue(void)
        return z_erofs_workqueue ? 0 : -ENOMEM;
 }
 
+static void init_once(void *ptr)
+{
+       struct z_erofs_vle_workgroup *grp = ptr;
+       struct z_erofs_vle_work *const work =
+               z_erofs_vle_grab_primary_work(grp);
+       unsigned int i;
+
+       mutex_init(&work->lock);
+       work->nr_pages = 0;
+       work->vcnt = 0;
+       for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
+               grp->compressed_pages[i] = NULL;
+}
+
+static void init_always(struct z_erofs_vle_workgroup *grp)
+{
+       struct z_erofs_vle_work *const work =
+               z_erofs_vle_grab_primary_work(grp);
+
+       atomic_set(&grp->obj.refcount, 1);
+       grp->flags = 0;
+
+       DBG_BUGON(work->nr_pages);
+       DBG_BUGON(work->vcnt);
+}
+
 int __init z_erofs_init_zip_subsystem(void)
 {
        z_erofs_workgroup_cachep =
                kmem_cache_create("erofs_compress",
                                  Z_EROFS_WORKGROUP_SIZE, 0,
-                                 SLAB_RECLAIM_ACCOUNT, NULL);
+                                 SLAB_RECLAIM_ACCOUNT, init_once);
 
        if (z_erofs_workgroup_cachep) {
                if (!init_unzip_workqueue())
@@ -370,10 +396,11 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
        BUG_ON(grp);
 
        /* no available workgroup, let's allocate one */
-       grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
+       grp = kmem_cache_alloc(z_erofs_workgroup_cachep, GFP_NOFS);
        if (unlikely(!grp))
                return ERR_PTR(-ENOMEM);
 
+       init_always(grp);
        grp->obj.index = f->idx;
        grp->llen = map->m_llen;
 
@@ -381,7 +408,6 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
                (map->m_flags & EROFS_MAP_ZIPPED) ?
                        Z_EROFS_VLE_WORKGRP_FMT_LZ4 :
                        Z_EROFS_VLE_WORKGRP_FMT_PLAIN);
-       atomic_set(&grp->obj.refcount, 1);
 
        /* new workgrps have been claimed as type 1 */
        WRITE_ONCE(grp->next, *f->owned_head);
@@ -394,8 +420,6 @@ z_erofs_vle_work_register(const struct z_erofs_vle_work_finder *f,
        work = z_erofs_vle_grab_primary_work(grp);
        work->pageofs = f->pageofs;
 
-       mutex_init(&work->lock);
-
        if (gnew) {
                int err = erofs_register_workgroup(f->sb, &grp->obj, 0);