mm, THP, swap: unify swap slot free functions to put_swap_page
authorMinchan Kim <minchan@kernel.org>
Thu, 6 Jul 2017 22:37:21 +0000 (15:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 6 Jul 2017 23:24:31 +0000 (16:24 -0700)
Now, get_swap_page takes struct page and allocates swap space according
to page size(ie, normal or THP) so it would be more cleaner to introduce
put_swap_page which is a counter function of get_swap_page.  Then, it
calls right swap slot free function depending on page's size.

[ying.huang@intel.com: minor cleanup and fix]
Link: http://lkml.kernel.org/r/20170515112522.32457-3-ying.huang@intel.com
Signed-off-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: "Huang, Ying" <ying.huang@intel.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Ebru Akagunduz <ebru.akagunduz@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Shaohua Li <shli@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/shmem.c
mm/swap_state.c
mm/swapfile.c
mm/vmscan.c

index d18876384de069cc465a6a0163ededb543c892e6..ead6fd7966b4fe1a25f5dc9112cdc5442576e55f 100644 (file)
@@ -387,6 +387,7 @@ static inline long get_nr_swap_pages(void)
 
 extern void si_swapinfo(struct sysinfo *);
 extern swp_entry_t get_swap_page(struct page *page);
+extern void put_swap_page(struct page *page, swp_entry_t entry);
 extern swp_entry_t get_swap_page_of_type(int);
 extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
 extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -394,7 +395,6 @@ extern void swap_shmem_alloc(swp_entry_t);
 extern int swap_duplicate(swp_entry_t);
 extern int swapcache_prepare(swp_entry_t);
 extern void swap_free(swp_entry_t);
-extern void swapcache_free(swp_entry_t);
 extern void swapcache_free_entries(swp_entry_t *entries, int n);
 extern int free_swap_and_cache(swp_entry_t);
 extern int swap_type_of(dev_t, sector_t, struct block_device **);
@@ -453,7 +453,7 @@ static inline void swap_free(swp_entry_t swp)
 {
 }
 
-static inline void swapcache_free(swp_entry_t swp)
+static inline void put_swap_page(struct page *page, swp_entry_t swp)
 {
 }
 
@@ -578,13 +578,5 @@ static inline bool mem_cgroup_swap_full(struct page *page)
 }
 #endif
 
-#ifdef CONFIG_THP_SWAP
-extern void swapcache_free_cluster(swp_entry_t entry);
-#else
-static inline void swapcache_free_cluster(swp_entry_t entry)
-{
-}
-#endif
-
 #endif /* __KERNEL__*/
 #endif /* _LINUX_SWAP_H */
index bbb987c58dad6c8ce29aff08070b4795a9f9d94b..a06f23731d3f96db3a2b134d0d62b3c78dcef00c 100644 (file)
@@ -1327,7 +1327,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
 
        mutex_unlock(&shmem_swaplist_mutex);
 free_swap:
-       swapcache_free(swap);
+       put_swap_page(page, swap);
 redirty:
        set_page_dirty(page);
        if (wbc->for_reclaim)
index 16ff89d058f4e6663eb4e84214bfdc16e36864f8..0ad214d7a7ad22c1ee871759f07d70e6f74be0c8 100644 (file)
@@ -231,10 +231,7 @@ retry:
        return 1;
 
 fail_free:
-       if (PageTransHuge(page))
-               swapcache_free_cluster(entry);
-       else
-               swapcache_free(entry);
+       put_swap_page(page, entry);
 fail:
        if (PageTransHuge(page) && !split_huge_page_to_list(page, list))
                goto retry;
@@ -259,11 +256,7 @@ void delete_from_swap_cache(struct page *page)
        __delete_from_swap_cache(page);
        spin_unlock_irq(&address_space->tree_lock);
 
-       if (PageTransHuge(page))
-               swapcache_free_cluster(entry);
-       else
-               swapcache_free(entry);
-
+       put_swap_page(page, entry);
        page_ref_sub(page, hpage_nr_pages(page));
 }
 
@@ -415,7 +408,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
                 * clear SWAP_HAS_CACHE flag.
                 */
-               swapcache_free(entry);
+               put_swap_page(new_page, entry);
        } while (err != -ENOMEM);
 
        if (new_page)
index 984f0dd94948b77047ff94e6880beea367160e57..8a6cdf9e55f97bf6815789844722fe6bd56256a3 100644 (file)
@@ -1148,7 +1148,7 @@ void swap_free(swp_entry_t entry)
 /*
  * Called after dropping swapcache to decrease refcnt to swap entries.
  */
-void swapcache_free(swp_entry_t entry)
+static void swapcache_free(swp_entry_t entry)
 {
        struct swap_info_struct *p;
 
@@ -1160,7 +1160,7 @@ void swapcache_free(swp_entry_t entry)
 }
 
 #ifdef CONFIG_THP_SWAP
-void swapcache_free_cluster(swp_entry_t entry)
+static void swapcache_free_cluster(swp_entry_t entry)
 {
        unsigned long offset = swp_offset(entry);
        unsigned long idx = offset / SWAPFILE_CLUSTER;
@@ -1184,8 +1184,20 @@ void swapcache_free_cluster(swp_entry_t entry)
        swap_free_cluster(si, idx);
        spin_unlock(&si->lock);
 }
+#else
+static inline void swapcache_free_cluster(swp_entry_t entry)
+{
+}
 #endif /* CONFIG_THP_SWAP */
 
+void put_swap_page(struct page *page, swp_entry_t entry)
+{
+       if (!PageTransHuge(page))
+               swapcache_free(entry);
+       else
+               swapcache_free_cluster(entry);
+}
+
 void swapcache_free_entries(swp_entry_t *entries, int n)
 {
        struct swap_info_struct *p, *prev;
index a10e058708357334eb7860c1f572558f4a5d4796..cb7c154a4a9dc10f4ab7399908f34817e3389757 100644 (file)
@@ -708,7 +708,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                mem_cgroup_swapout(page, swap);
                __delete_from_swap_cache(page);
                spin_unlock_irqrestore(&mapping->tree_lock, flags);
-               swapcache_free(swap);
+               put_swap_page(page, swap);
        } else {
                void (*freepage)(struct page *);
                void *shadow = NULL;