mm: drop mmap_sem before calling balance_dirty_pages() in write fault
authorJohannes Weiner <hannes@cmpxchg.org>
Sun, 1 Dec 2019 01:50:22 +0000 (17:50 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 1 Dec 2019 14:29:18 +0000 (06:29 -0800)
One of our services is observing hanging ps/top/etc under heavy write
IO, and the task states show this is an mmap_sem priority inversion:

A write fault is holding the mmap_sem in read-mode and waiting for
(heavily cgroup-limited) IO in balance_dirty_pages():

    balance_dirty_pages+0x724/0x905
    balance_dirty_pages_ratelimited+0x254/0x390
    fault_dirty_shared_page.isra.96+0x4a/0x90
    do_wp_page+0x33e/0x400
    __handle_mm_fault+0x6f0/0xfa0
    handle_mm_fault+0xe4/0x200
    __do_page_fault+0x22b/0x4a0
    page_fault+0x45/0x50

Somebody tries to change the address space, contending for the mmap_sem in
write-mode:

    call_rwsem_down_write_failed_killable+0x13/0x20
    do_mprotect_pkey+0xa8/0x330
    SyS_mprotect+0xf/0x20
    do_syscall_64+0x5b/0x100
    entry_SYSCALL_64_after_hwframe+0x3d/0xa2

The waiting writer locks out all subsequent readers to avoid lock
starvation, and several threads can be seen hanging like this:

    call_rwsem_down_read_failed+0x14/0x30
    proc_pid_cmdline_read+0xa0/0x480
    __vfs_read+0x23/0x140
    vfs_read+0x87/0x130
    SyS_read+0x42/0x90
    do_syscall_64+0x5b/0x100
    entry_SYSCALL_64_after_hwframe+0x3d/0xa2

To fix this, do what we do for cache read faults already: drop the
mmap_sem before calling into anything IO bound, in this case the
balance_dirty_pages() function, and return VM_FAULT_RETRY.

Link: http://lkml.kernel.org/r/20190924194238.GA29030@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Josef Bacik <josef@toxicpanda.com>
Cc: Hillf Danton <hdanton@sina.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c
mm/internal.h
mm/memory.c

index dc3b78db079bf2ee555fdf9927cadb713d6d7e14..bf6aa30be58dca822906d55001e563fa0b4d2e8a 100644 (file)
@@ -2329,27 +2329,6 @@ EXPORT_SYMBOL(generic_file_read_iter);
 
 #ifdef CONFIG_MMU
 #define MMAP_LOTSAMISS  (100)
-static struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
-                                            struct file *fpin)
-{
-       int flags = vmf->flags;
-
-       if (fpin)
-               return fpin;
-
-       /*
-        * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
-        * anything, so we only pin the file and drop the mmap_sem if only
-        * FAULT_FLAG_ALLOW_RETRY is set.
-        */
-       if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
-           FAULT_FLAG_ALLOW_RETRY) {
-               fpin = get_file(vmf->vma->vm_file);
-               up_read(&vmf->vma->vm_mm->mmap_sem);
-       }
-       return fpin;
-}
-
 /*
  * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_sem
  * @vmf - the vm_fault for this fault.
index 0d5f720c75abf0291d4c2a3f494cb7d6553804bd..7dd7fbb577a9a6bffd9e38daef64cb79a939580a 100644 (file)
@@ -362,6 +362,27 @@ vma_address(struct page *page, struct vm_area_struct *vma)
        return max(start, vma->vm_start);
 }
 
+static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
+                                                   struct file *fpin)
+{
+       int flags = vmf->flags;
+
+       if (fpin)
+               return fpin;
+
+       /*
+        * FAULT_FLAG_RETRY_NOWAIT means we don't want to wait on page locks or
+        * anything, so we only pin the file and drop the mmap_sem if only
+        * FAULT_FLAG_ALLOW_RETRY is set.
+        */
+       if ((flags & (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT)) ==
+           FAULT_FLAG_ALLOW_RETRY) {
+               fpin = get_file(vmf->vma->vm_file);
+               up_read(&vmf->vma->vm_mm->mmap_sem);
+       }
+       return fpin;
+}
+
 #else /* !CONFIG_MMU */
 static inline void clear_page_mlock(struct page *page) { }
 static inline void mlock_vma_page(struct page *page) { }
index b6a5d6a08438693e892ac63960bc044a2a62b166..9ea917e28ef4e37732bf504d09eee2b948e3a8cf 100644 (file)
@@ -2289,10 +2289,11 @@ static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
  *
  * The function expects the page to be locked and unlocks it.
  */
-static void fault_dirty_shared_page(struct vm_area_struct *vma,
-                                   struct page *page)
+static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
 {
+       struct vm_area_struct *vma = vmf->vma;
        struct address_space *mapping;
+       struct page *page = vmf->page;
        bool dirtied;
        bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
 
@@ -2307,16 +2308,30 @@ static void fault_dirty_shared_page(struct vm_area_struct *vma,
        mapping = page_rmapping(page);
        unlock_page(page);
 
+       if (!page_mkwrite)
+               file_update_time(vma->vm_file);
+
+       /*
+        * Throttle page dirtying rate down to writeback speed.
+        *
+        * mapping may be NULL here because some device drivers do not
+        * set page.mapping but still dirty their pages
+        *
+        * Drop the mmap_sem before waiting on IO, if we can. The file
+        * is pinning the mapping, as per above.
+        */
        if ((dirtied || page_mkwrite) && mapping) {
-               /*
-                * Some device drivers do not set page.mapping
-                * but still dirty their pages
-                */
+               struct file *fpin;
+
+               fpin = maybe_unlock_mmap_for_io(vmf, NULL);
                balance_dirty_pages_ratelimited(mapping);
+               if (fpin) {
+                       fput(fpin);
+                       return VM_FAULT_RETRY;
+               }
        }
 
-       if (!page_mkwrite)
-               file_update_time(vma->vm_file);
+       return 0;
 }
 
 /*
@@ -2571,6 +2586,7 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
        __releases(vmf->ptl)
 {
        struct vm_area_struct *vma = vmf->vma;
+       vm_fault_t ret = VM_FAULT_WRITE;
 
        get_page(vmf->page);
 
@@ -2594,10 +2610,10 @@ static vm_fault_t wp_page_shared(struct vm_fault *vmf)
                wp_page_reuse(vmf);
                lock_page(vmf->page);
        }
-       fault_dirty_shared_page(vma, vmf->page);
+       ret |= fault_dirty_shared_page(vmf);
        put_page(vmf->page);
 
-       return VM_FAULT_WRITE;
+       return ret;
 }
 
 /*
@@ -3641,7 +3657,7 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
                return ret;
        }
 
-       fault_dirty_shared_page(vma, vmf->page);
+       ret |= fault_dirty_shared_page(vmf);
        return ret;
 }