From 369a713e9678227e203b53931ad1a10cd8eac811 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Mon, 29 Apr 2013 15:06:14 -0700 Subject: [PATCH] rmap: recompute pgoff for unmapping huge page We have to recompute pgoff if the given page is huge, since result based on HPAGE_SIZE is not approapriate for scanning the vma interval tree, as shown by commit 36e4f20af833 ("hugetlb: do not use vma_hugecache_offset() for vma_prio_tree_foreach"). Signed-off-by: Hillf Danton Cc: Michal Hocko Cc: Michel Lespinasse Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/rmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/rmap.c b/mm/rmap.c index 807c96bf0dc6..6280da86b5d6 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1513,6 +1513,9 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags) unsigned long max_nl_size = 0; unsigned int mapcount; + if (PageHuge(page)) + pgoff = page->index << compound_order(page); + mutex_lock(&mapping->i_mmap_mutex); vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { unsigned long address = vma_address(page, vma); -- 2.30.2