struct page {
unsigned long flags; /* Atomic flags, some possibly
* updated asynchronously */
+ /*
+ * WARNING: bit 0 of the first word encode PageTail(). That means
+ * the rest users of the storage space MUST NOT use the bit to
+ * avoid collision and false-positive PageTail().
+ */
+ union {
+ struct list_head lru; /* Pageout list, eg. active_list
+ * protected by zone_lru_lock !
+ * Can be used as a generic list
+ * by the page owner.
+ */
+ struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
+ * lru or handled by a slab
+ * allocator, this points to the
+ * hosting device page map.
+ */
+ struct { /* slub per cpu partial pages */
+ struct page *next; /* Next partial slab */
+#ifdef CONFIG_64BIT
+ int pages; /* Nr of partial slabs left */
+ int pobjects; /* Approximate # of objects */
+#else
+ short int pages;
+ short int pobjects;
+#endif
+ };
+
+ struct rcu_head rcu_head; /* Used by SLAB
+ * when destroying via RCU
+ */
+ /* Tail pages of compound page */
+ struct {
+ unsigned long compound_head; /* If bit zero is set */
+
+ /* First tail page only */
+ unsigned char compound_dtor;
+ unsigned char compound_order;
+ /* two/six bytes available here */
+ };
+
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
+ struct {
+ unsigned long __pad; /* do not overlay pmd_huge_pte
+ * with compound_head to avoid
+ * possible bit 0 collision.
+ */
+ pgtable_t pmd_huge_pte; /* protected by page->ptl */
+ };
+#endif
+ };
+
/* Three words (12/24 bytes) are available in this union. */
union {
struct { /* Page cache and anonymous pages */
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
atomic_t _refcount;
- /*
- * WARNING: bit 0 of the first word encode PageTail(). That means
- * the rest users of the storage space MUST NOT use the bit to
- * avoid collision and false-positive PageTail().
- */
- union {
- struct list_head lru; /* Pageout list, eg. active_list
- * protected by zone_lru_lock !
- * Can be used as a generic list
- * by the page owner.
- */
- struct dev_pagemap *pgmap; /* ZONE_DEVICE pages are never on an
- * lru or handled by a slab
- * allocator, this points to the
- * hosting device page map.
- */
- struct { /* slub per cpu partial pages */
- struct page *next; /* Next partial slab */
-#ifdef CONFIG_64BIT
- int pages; /* Nr of partial slabs left */
- int pobjects; /* Approximate # of objects */
-#else
- short int pages;
- short int pobjects;
-#endif
- };
-
- struct rcu_head rcu_head; /* Used by SLAB
- * when destroying via RCU
- */
- /* Tail pages of compound page */
- struct {
- unsigned long compound_head; /* If bit zero is set */
-
- /* First tail page only */
- unsigned char compound_dtor;
- unsigned char compound_order;
- /* two/six bytes available here */
- };
-
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
- struct {
- unsigned long __pad; /* do not overlay pmd_huge_pte
- * with compound_head to avoid
- * possible bit 0 collision.
- */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- };
-#endif
- };
-
#ifdef CONFIG_MEMCG
struct mem_cgroup *mem_cgroup;
#endif
* and to synchronize major metadata changes to slab cache structures.
*
* The slab_lock is only used for debugging and on arches that do not
- * have the ability to do a cmpxchg_double. It only protects the second
- * double word in the page struct. Meaning
+ * have the ability to do a cmpxchg_double. It only protects:
* A. page->freelist -> List of object free in a page
- * B. page->counters -> Counters of objects
- * C. page->frozen -> frozen state
+ * B. page->inuse -> Number of objects in use
+ * C. page->objects -> Number of objects in page
+ * D. page->frozen -> frozen state
*
* If a slab is frozen then it is exempt from list management. It is not
* on any list. The processor that froze the slab is the one who can