mm: move lru union within struct page
authorMatthew Wilcox <mawilcox@microsoft.com>
Fri, 8 Jun 2018 00:08:46 +0000 (17:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 00:34:37 +0000 (17:34 -0700)
Since the LRU is two words, this does not affect the double-word alignment
of SLUB's freelist.

Link: http://lkml.kernel.org/r/20180518194519.3820-10-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm_types.h
mm/slub.c

index 158f9693f8652798887bbe57a8e55da9a6313f61..5b97bd445adab8063b5275e749f8ba7ff5cde7a5 100644 (file)
@@ -72,6 +72,57 @@ struct hmm;
 struct page {
        unsigned long flags;            /* Atomic flags, some possibly
                                         * updated asynchronously */
+       /*
+        * WARNING: bit 0 of the first word encode PageTail(). That means
+        * the rest users of the storage space MUST NOT use the bit to
+        * avoid collision and false-positive PageTail().
+        */
+       union {
+               struct list_head lru;   /* Pageout list, eg. active_list
+                                        * protected by zone_lru_lock !
+                                        * Can be used as a generic list
+                                        * by the page owner.
+                                        */
+               struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
+                                           * lru or handled by a slab
+                                           * allocator, this points to the
+                                           * hosting device page map.
+                                           */
+               struct {                /* slub per cpu partial pages */
+                       struct page *next;      /* Next partial slab */
+#ifdef CONFIG_64BIT
+                       int pages;      /* Nr of partial slabs left */
+                       int pobjects;   /* Approximate # of objects */
+#else
+                       short int pages;
+                       short int pobjects;
+#endif
+               };
+
+               struct rcu_head rcu_head;       /* Used by SLAB
+                                                * when destroying via RCU
+                                                */
+               /* Tail pages of compound page */
+               struct {
+                       unsigned long compound_head; /* If bit zero is set */
+
+                       /* First tail page only */
+                       unsigned char compound_dtor;
+                       unsigned char compound_order;
+                       /* two/six bytes available here */
+               };
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+               struct {
+                       unsigned long __pad;    /* do not overlay pmd_huge_pte
+                                                * with compound_head to avoid
+                                                * possible bit 0 collision.
+                                                */
+                       pgtable_t pmd_huge_pte; /* protected by page->ptl */
+               };
+#endif
+       };
+
        /* Three words (12/24 bytes) are available in this union. */
        union {
                struct {        /* Page cache and anonymous pages */
@@ -135,57 +186,6 @@ struct page {
        /* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
        atomic_t _refcount;
 
-       /*
-        * WARNING: bit 0 of the first word encode PageTail(). That means
-        * the rest users of the storage space MUST NOT use the bit to
-        * avoid collision and false-positive PageTail().
-        */
-       union {
-               struct list_head lru;   /* Pageout list, eg. active_list
-                                        * protected by zone_lru_lock !
-                                        * Can be used as a generic list
-                                        * by the page owner.
-                                        */
-               struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
-                                           * lru or handled by a slab
-                                           * allocator, this points to the
-                                           * hosting device page map.
-                                           */
-               struct {                /* slub per cpu partial pages */
-                       struct page *next;      /* Next partial slab */
-#ifdef CONFIG_64BIT
-                       int pages;      /* Nr of partial slabs left */
-                       int pobjects;   /* Approximate # of objects */
-#else
-                       short int pages;
-                       short int pobjects;
-#endif
-               };
-
-               struct rcu_head rcu_head;       /* Used by SLAB
-                                                * when destroying via RCU
-                                                */
-               /* Tail pages of compound page */
-               struct {
-                       unsigned long compound_head; /* If bit zero is set */
-
-                       /* First tail page only */
-                       unsigned char compound_dtor;
-                       unsigned char compound_order;
-                       /* two/six bytes available here */
-               };
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
-               struct {
-                       unsigned long __pad;    /* do not overlay pmd_huge_pte
-                                                * with compound_head to avoid
-                                                * possible bit 0 collision.
-                                                */
-                       pgtable_t pmd_huge_pte; /* protected by page->ptl */
-               };
-#endif
-       };
-
 #ifdef CONFIG_MEMCG
        struct mem_cgroup *mem_cgroup;
 #endif
index f5db87839ab442b11afa360f4c32d690d1f162e1..a96bf429af0836f32d9397b54a49a50d31846fa5 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
  *   and to synchronize major metadata changes to slab cache structures.
  *
  *   The slab_lock is only used for debugging and on arches that do not
- *   have the ability to do a cmpxchg_double. It only protects the second
- *   double word in the page struct. Meaning
+ *   have the ability to do a cmpxchg_double. It only protects:
  *     A. page->freelist       -> List of object free in a page
- *     B. page->counters       -> Counters of objects
- *     C. page->frozen         -> frozen state
+ *     B. page->inuse          -> Number of objects in use
+ *     C. page->objects        -> Number of objects in page
+ *     D. page->frozen         -> frozen state
  *
  *   If a slab is frozen then it is exempt from list management. It is not
  *   on any list. The processor that froze the slab is the one who can