mm: improve dump_page() for compound pages
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Thu, 2 Apr 2020 04:05:49 +0000 (21:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Apr 2020 16:35:27 +0000 (09:35 -0700)
There was no protection against a corrupted struct page having an
implausible compound_head().  Sanity check that a compound page has a head
within reach of the maximum allocatable page (this will need to be
adjusted if one of the plans to allocate 1GB pages comes to fruition).  In
addition,

 - Print the mapping pointer using %p insted of %px.  The actual value of
   the pointer can be read out of the raw page dump and using %p gives a
   chance to correlate it with an earlier printk of the mapping pointer
 - Print the mapping pointer from the head page, not the tail page
   (the tail ->mapping pointer may be in use for other purposes, eg part
   of a list_head)
 - Print the order of the page for compound pages
 - Dump the raw head page as well as the raw page
 - Print the refcount from the head page, not the tail page

Suggested-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Co-developed-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Shuah Khan <shuah@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Link: http://lkml.kernel.org/r/20200211001536.1027652-12-jhubbard@nvidia.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/debug.c

index ecccd9f17801ddbd31d98ae101cfe6e2e5a3be5a..f5ffb0784559b3eb5b2bbddd008c9c5de9379b3b 100644 (file)
@@ -44,8 +44,10 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       struct page *head = compound_head(page);
        struct address_space *mapping;
        bool page_poisoned = PagePoisoned(page);
+       bool compound = PageCompound(page);
        /*
         * Accessing the pageblock without the zone lock. It could change to
         * "isolate" again in the meantime, but since we are just dumping the
@@ -66,25 +68,32 @@ void __dump_page(struct page *page, const char *reason)
                goto hex_only;
        }
 
-       mapping = page_mapping(page);
+       if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
+               /* Corrupt page, cannot call page_mapping */
+               mapping = page->mapping;
+               head = page;
+               compound = false;
+       } else {
+               mapping = page_mapping(page);
+       }
 
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+       mapcount = PageSlab(head) ? 0 : page_mapcount(page);
 
-       if (PageCompound(page))
-               pr_warn("page:%px refcount:%d mapcount:%d mapping:%px "
-                       "index:%#lx compound_mapcount: %d\n",
-                       page, page_ref_count(page), mapcount,
-                       page->mapping, page_to_pgoff(page),
-                       compound_mapcount(page));
+       if (compound)
+               pr_warn("page:%px refcount:%d mapcount:%d mapping:%p "
+                       "index:%#lx head:%px order:%u compound_mapcount:%d\n",
+                       page, page_ref_count(head), mapcount,
+                       mapping, page_to_pgoff(page), head,
+                       compound_order(head), compound_mapcount(page));
        else
-               pr_warn("page:%px refcount:%d mapcount:%d mapping:%px index:%#lx\n",
+               pr_warn("page:%px refcount:%d mapcount:%d mapping:%p index:%#lx\n",
                        page, page_ref_count(page), mapcount,
-                       page->mapping, page_to_pgoff(page));
+                       mapping, page_to_pgoff(page));
        if (PageKsm(page))
                type = "ksm ";
        else if (PageAnon(page))
@@ -106,6 +115,10 @@ hex_only:
        print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
+       if (head != page)
+               print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
+                       sizeof(unsigned long), head,
+                       sizeof(struct page), false);
 
        if (reason)
                pr_warn("page dumped because: %s\n", reason);