From 34290e2c6419d3a61391416b5ab6ceb37f857fde Mon Sep 17 00:00:00 2001 From: Ralph Campbell Date: Thu, 30 Jan 2020 22:14:44 -0800 Subject: [PATCH] mm/migrate: add stable check in migrate_vma_insert_page() migrate_vma_insert_page() closely follows the code in: __handle_mm_fault() handle_pte_fault() do_anonymous_page() Add a call to check_stable_address_space() after locking the page table entry before inserting a ZONE_DEVICE private zero page mapping similar to page faulting a new anonymous page. Link: http://lkml.kernel.org/r/20200107211208.24595-4-rcampbell@nvidia.com Signed-off-by: Ralph Campbell Reviewed-by: Christoph Hellwig Cc: Jerome Glisse Cc: John Hubbard Cc: Jason Gunthorpe Cc: Bharata B Rao Cc: Michal Hocko Cc: Chris Down Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/migrate.c b/mm/migrate.c index 0144e13c6b64..edf42ed90030 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -48,6 +48,7 @@ #include #include #include +#include #include @@ -2695,6 +2696,14 @@ int migrate_vma_setup(struct migrate_vma *args) } EXPORT_SYMBOL(migrate_vma_setup); +/* + * This code closely matches the code in: + * __handle_mm_fault() + * handle_pte_fault() + * do_anonymous_page() + * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE + * private page. + */ static void migrate_vma_insert_page(struct migrate_vma *migrate, unsigned long addr, struct page *page, @@ -2775,6 +2784,9 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (check_stable_address_space(mm)) + goto unlock_abort; + if (pte_present(*ptep)) { unsigned long pfn = pte_pfn(*ptep); -- 2.30.2