mm: introduce page_shift()
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 23 Sep 2019 22:34:28 +0000 (15:34 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Sep 2019 22:54:08 +0000 (15:54 -0700)
Replace PAGE_SHIFT + compound_order(page) with the new page_shift()
function.  Minor improvements in readability.

[akpm@linux-foundation.org: fix build in tce_page_is_contained()]
Link: http://lkml.kernel.org/r/201907241853.yNQTrJWd%25lkp@intel.com
Link: http://lkml.kernel.org/r/20190721104612.19120-3-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/powerpc/mm/book3s64/iommu_api.c
drivers/vfio/vfio_iommu_spapr_tce.c
include/linux/mm.h

index b056cae3388b76e139a941ca1b4d48417ac89d5f..56cc845205779b17def2e37ca728f01e046efd05 100644 (file)
@@ -129,11 +129,8 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua,
                 * Allow to use larger than 64k IOMMU pages. Only do that
                 * if we are backed by hugetlb.
                 */
-               if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page)) {
-                       struct page *head = compound_head(page);
-
-                       pageshift = compound_order(head) + PAGE_SHIFT;
-               }
+               if ((mem->pageshift > PAGE_SHIFT) && PageHuge(page))
+                       pageshift = page_shift(compound_head(page));
                mem->pageshift = min(mem->pageshift, pageshift);
                /*
                 * We don't need struct page reference any more, switch
index 3b18fa4d090a301e051f6057a31794a621f01585..26cef65b41e7a167cbc500d672e093e851c59504 100644 (file)
@@ -176,13 +176,13 @@ put_exit:
 }
 
 static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
-               unsigned int page_shift)
+               unsigned int it_page_shift)
 {
        struct page *page;
        unsigned long size = 0;
 
-       if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
-               return size == (1UL << page_shift);
+       if (mm_iommu_is_devmem(mm, hpa, it_page_shift, &size))
+               return size == (1UL << it_page_shift);
 
        page = pfn_to_page(hpa >> PAGE_SHIFT);
        /*
@@ -190,7 +190,7 @@ static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
         * a page we just found. Otherwise the hardware can get access to
         * a bigger memory chunk that it should.
         */
-       return (PAGE_SHIFT + compound_order(compound_head(page))) >= page_shift;
+       return page_shift(compound_head(page)) >= it_page_shift;
 }
 
 static inline bool tce_groups_attached(struct tce_container *container)
index d46d5585e2a2236f05718166e5c428bbfc0d6279..9238548bdec5534f52f096bc2e0abfe58fa7cee3 100644 (file)
@@ -811,6 +811,12 @@ static inline unsigned long page_size(struct page *page)
        return PAGE_SIZE << compound_order(page);
 }
 
+/* Returns the number of bits needed for the number of bytes in a page */
+static inline unsigned int page_shift(struct page *page)
+{
+       return PAGE_SHIFT + compound_order(page);
+}
+
 void free_compound_page(struct page *page);
 
 #ifdef CONFIG_MMU