mm: remove SWAP_DIRTY in ttu
authorMinchan Kim <minchan@kernel.org>
Wed, 3 May 2017 21:54:04 +0000 (14:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 3 May 2017 22:52:10 +0000 (15:52 -0700)
If we found lazyfree page is dirty, try_to_unmap_one can just
SetPageSwapBakced in there like PG_mlocked page and just return with
SWAP_FAIL which is very natural because the page is not swappable right
now so that vmscan can activate it.  There is no point to introduce new
return value SWAP_DIRTY in try_to_unmap at the moment.

Link: http://lkml.kernel.org/r/1489555493-14659-3-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Anshuman Khandual <khandual@linux.vnet.ibm.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/rmap.c
mm/vmscan.c

index fee10d744ebd2f95522c4cbf4ed1a014ef48e9d1..b556eefa62bceaf398dc5c81decb74d0708756cd 100644 (file)
@@ -298,6 +298,5 @@ static inline int page_mkclean(struct page *page)
 #define SWAP_AGAIN     1
 #define SWAP_FAIL      2
 #define SWAP_MLOCK     3
-#define SWAP_DIRTY     4
 
 #endif /* _LINUX_RMAP_H */
index 4baf504e4213aff2cdbb5d7612c48a7ebb48fbcb..f6aa18d8a420d492b11a0ded1b0e695dfec0e3ec 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1436,7 +1436,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                 * discarded. Remap the page to page table.
                                 */
                                set_pte_at(mm, address, pvmw.pte, pteval);
-                               ret = SWAP_DIRTY;
+                               SetPageSwapBacked(page);
+                               ret = SWAP_FAIL;
                                page_vma_mapped_walk_done(&pvmw);
                                break;
                        }
@@ -1506,7 +1507,6 @@ static int page_mapcount_is_zero(struct page *page)
  * SWAP_AGAIN  - we missed a mapping, try again later
  * SWAP_FAIL   - the page is unswappable
  * SWAP_MLOCK  - page is mlocked.
- * SWAP_DIRTY  - page is dirty MADV_FREE page
  */
 int try_to_unmap(struct page *page, enum ttu_flags flags)
 {
index e54c882d6789c6c974277b0a08ef43f729fbad3d..f1fd388454bd04254df86097300b395643bf3747 100644 (file)
@@ -1147,9 +1147,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (page_mapped(page)) {
                        switch (ret = try_to_unmap(page,
                                ttu_flags | TTU_BATCH_FLUSH)) {
-                       case SWAP_DIRTY:
-                               SetPageSwapBacked(page);
-                               /* fall through */
                        case SWAP_FAIL:
                                nr_unmap_fail++;
                                goto activate_locked;