mm: move 'private' union within struct page
authorMatthew Wilcox <mawilcox@microsoft.com>
Fri, 8 Jun 2018 00:08:31 +0000 (17:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 00:34:37 +0000 (17:34 -0700)
By moving page->private to the fourth word of struct page, we can put the
SLUB counters in the same word as SLAB's s_mem and still do the
cmpxchg_double trick.  Now the SLUB counters no longer overlap with the
mapcount or refcount so we can drop the call to page_mapcount_reset() and
simplify set_page_slub_counters() to a single line.

Link: http://lkml.kernel.org/r/20180518194519.3820-6-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm_types.h
mm/slub.c

index 6f153f2fab5042539b3388f523b2c597f10d1a5a..bcc5ee8b7b073bd36419396d2a88b963c65874d0 100644 (file)
@@ -65,15 +65,9 @@ struct hmm;
  */
 #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE
 #define _struct_page_alignment __aligned(2 * sizeof(unsigned long))
-#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE)
-#define _slub_counter_t                unsigned long
 #else
-#define _slub_counter_t                unsigned int
-#endif
-#else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
 #define _struct_page_alignment
-#define _slub_counter_t                unsigned int
-#endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */
+#endif
 
 struct page {
        /* First double word block */
@@ -95,6 +89,30 @@ struct page {
                /* page_deferred_list().prev    -- second tail page */
        };
 
+       union {
+               /*
+                * Mapping-private opaque data:
+                * Usually used for buffer_heads if PagePrivate
+                * Used for swp_entry_t if PageSwapCache
+                * Indicates order in the buddy system if PageBuddy
+                */
+               unsigned long private;
+#if USE_SPLIT_PTE_PTLOCKS
+#if ALLOC_SPLIT_PTLOCKS
+               spinlock_t *ptl;
+#else
+               spinlock_t ptl;
+#endif
+#endif
+               void *s_mem;                    /* slab first object */
+               unsigned long counters;         /* SLUB */
+               struct {                        /* SLUB */
+                       unsigned inuse:16;
+                       unsigned objects:15;
+                       unsigned frozen:1;
+               };
+       };
+
        union {
                /*
                 * If the page is neither PageSlab nor mappable to userspace,
@@ -104,13 +122,7 @@ struct page {
                 */
                unsigned int page_type;
 
-               _slub_counter_t counters;
                unsigned int active;            /* SLAB */
-               struct {                        /* SLUB */
-                       unsigned inuse:16;
-                       unsigned objects:15;
-                       unsigned frozen:1;
-               };
                int units;                      /* SLOB */
 
                struct {                        /* Page cache */
@@ -179,24 +191,6 @@ struct page {
 #endif
        };
 
-       union {
-               /*
-                * Mapping-private opaque data:
-                * Usually used for buffer_heads if PagePrivate
-                * Used for swp_entry_t if PageSwapCache
-                * Indicates order in the buddy system if PageBuddy
-                */
-               unsigned long private;
-#if USE_SPLIT_PTE_PTLOCKS
-#if ALLOC_SPLIT_PTLOCKS
-               spinlock_t *ptl;
-#else
-               spinlock_t ptl;
-#endif
-#endif
-               void *s_mem;                    /* slab first object */
-       };
-
 #ifdef CONFIG_MEMCG
        struct mem_cgroup *mem_cgroup;
 #endif
index 0170ea8a97fe3554acf9ceac10c270f6c888432a..f5db87839ab442b11afa360f4c32d690d1f162e1 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -356,21 +356,6 @@ static __always_inline void slab_unlock(struct page *page)
        __bit_spin_unlock(PG_locked, &page->flags);
 }
 
-static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
-{
-       struct page tmp;
-       tmp.counters = counters_new;
-       /*
-        * page->counters can cover frozen/inuse/objects as well
-        * as page->_refcount.  If we assign to ->counters directly
-        * we run the risk of losing updates to page->_refcount, so
-        * be careful and only assign to the fields we need.
-        */
-       page->frozen  = tmp.frozen;
-       page->inuse   = tmp.inuse;
-       page->objects = tmp.objects;
-}
-
 /* Interrupts must be disabled (for the fallback code to work right) */
 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                void *freelist_old, unsigned long counters_old,
@@ -392,7 +377,7 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page
                if (page->freelist == freelist_old &&
                                        page->counters == counters_old) {
                        page->freelist = freelist_new;
-                       set_page_slub_counters(page, counters_new);
+                       page->counters = counters_new;
                        slab_unlock(page);
                        return true;
                }
@@ -431,7 +416,7 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
                if (page->freelist == freelist_old &&
                                        page->counters == counters_old) {
                        page->freelist = freelist_new;
-                       set_page_slub_counters(page, counters_new);
+                       page->counters = counters_new;
                        slab_unlock(page);
                        local_irq_restore(flags);
                        return true;
@@ -1694,7 +1679,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        __ClearPageSlabPfmemalloc(page);
        __ClearPageSlab(page);
 
-       page_mapcount_reset(page);
        page->mapping = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;