mm/sparse.c: move subsection_map related functions together
authorBaoquan He <bhe@redhat.com>
Tue, 7 Apr 2020 03:07:13 +0000 (20:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 7 Apr 2020 17:43:40 +0000 (10:43 -0700)
No functional change.

[bhe@redhat.com: move functions into CONFIG_MEMORY_HOTPLUG ifdeffery scope]
Link: http://lkml.kernel.org/r/20200316045804.GC3486@MiWiFi-R3L-srv
Signed-off-by: Baoquan He <bhe@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Link: http://lkml.kernel.org/r/20200312124414.439-6-bhe@redhat.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/sparse.c

index 9d43fde1f6305b506ed658c23f6a5885ad23d983..1aee5a4815715c13bb2c966fb487f807757fc5d9 100644 (file)
@@ -666,6 +666,55 @@ static void free_map_bootmem(struct page *memmap)
 
        vmemmap_free(start, end, NULL);
 }
+
+static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
+       struct mem_section *ms = __pfn_to_section(pfn);
+       unsigned long *subsection_map = ms->usage
+               ? &ms->usage->subsection_map[0] : NULL;
+
+       subsection_mask_set(map, pfn, nr_pages);
+       if (subsection_map)
+               bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
+
+       if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
+                               "section already deactivated (%#lx + %ld)\n",
+                               pfn, nr_pages))
+               return -EINVAL;
+
+       bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
+       return 0;
+}
+
+static bool is_subsection_map_empty(struct mem_section *ms)
+{
+       return bitmap_empty(&ms->usage->subsection_map[0],
+                           SUBSECTIONS_PER_SECTION);
+}
+
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
+{
+       struct mem_section *ms = __pfn_to_section(pfn);
+       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
+       unsigned long *subsection_map;
+       int rc = 0;
+
+       subsection_mask_set(map, pfn, nr_pages);
+
+       subsection_map = &ms->usage->subsection_map[0];
+
+       if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
+               rc = -EINVAL;
+       else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
+               rc = -EEXIST;
+       else
+               bitmap_or(subsection_map, map, subsection_map,
+                               SUBSECTIONS_PER_SECTION);
+
+       return rc;
+}
 #else
 struct page * __meminit populate_section_memmap(unsigned long pfn,
                unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
@@ -709,46 +758,22 @@ static void free_map_bootmem(struct page *memmap)
                        put_page_bootmem(page);
        }
 }
-#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
 static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
-       DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
-       struct mem_section *ms = __pfn_to_section(pfn);
-       unsigned long *subsection_map = ms->usage
-               ? &ms->usage->subsection_map[0] : NULL;
-
-       subsection_mask_set(map, pfn, nr_pages);
-       if (subsection_map)
-               bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
-
-       if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
-                               "section already deactivated (%#lx + %ld)\n",
-                               pfn, nr_pages))
-               return -EINVAL;
-
-       bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
        return 0;
 }
 
 static bool is_subsection_map_empty(struct mem_section *ms)
 {
-       return bitmap_empty(&ms->usage->subsection_map[0],
-                           SUBSECTIONS_PER_SECTION);
-}
-#else
-static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
-       return 0;
+       return true;
 }
 
-static bool is_subsection_map_empty(struct mem_section *ms)
+static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
 {
-       return true;
+       return 0;
 }
-#endif
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
 /*
  * To deactivate a memory region, there are 3 cases to handle across
@@ -810,35 +835,6 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
                ms->section_mem_map = (unsigned long)NULL;
 }
 
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
-       struct mem_section *ms = __pfn_to_section(pfn);
-       DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
-       unsigned long *subsection_map;
-       int rc = 0;
-
-       subsection_mask_set(map, pfn, nr_pages);
-
-       subsection_map = &ms->usage->subsection_map[0];
-
-       if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
-               rc = -EINVAL;
-       else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
-               rc = -EEXIST;
-       else
-               bitmap_or(subsection_map, map, subsection_map,
-                               SUBSECTIONS_PER_SECTION);
-
-       return rc;
-}
-#else
-static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
-{
-       return 0;
-}
-#endif
-
 static struct page * __meminit section_activate(int nid, unsigned long pfn,
                unsigned long nr_pages, struct vmem_altmap *altmap)
 {