mm: migrate: lock buffers before migrate_page_move_mapping()
authorJan Kara <jack@suse.cz>
Fri, 28 Dec 2018 08:39:05 +0000 (00:39 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Dec 2018 20:11:51 +0000 (12:11 -0800)
Lock buffers before calling into migrate_page_move_mapping() so that that
function doesn't have to know about buffers (which is somewhat unexpected
anyway) and all the buffer head logic is in buffer_migrate_page().

Link: http://lkml.kernel.org/r/20181211172143.7358-3-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/migrate.c

index 94c9ebf1f33e93ba6dbd1dd286527c19fd8d19cd..e0bc03e15e74edee08c48550c4b5e8b6c2e3a708 100644 (file)
@@ -486,20 +486,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
                return -EAGAIN;
        }
 
-       /*
-        * In the async migration case of moving a page with buffers, lock the
-        * buffers using trylock before the mapping is moved. If the mapping
-        * was moved, we later failed to lock the buffers and could not move
-        * the mapping back due to an elevated page count, we would have to
-        * block waiting on other references to be dropped.
-        */
-       if (mode == MIGRATE_ASYNC && head &&
-                       !buffer_migrate_lock_buffers(head, mode)) {
-               page_ref_unfreeze(page, expected_count);
-               xas_unlock_irq(&xas);
-               return -EAGAIN;
-       }
-
        /*
         * Now we know that no one else is looking at the page:
         * no turning back from here.
@@ -775,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
 {
        struct buffer_head *bh, *head;
        int rc;
+       int expected_count;
 
        if (!page_has_buffers(page))
                return migrate_page(mapping, newpage, page, mode);
 
-       head = page_buffers(page);
+       /* Check whether page does not have extra refs before we do more work */
+       expected_count = expected_page_refs(page);
+       if (page_count(page) != expected_count)
+               return -EAGAIN;
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
+       head = page_buffers(page);
+       if (!buffer_migrate_lock_buffers(head, mode))
+               return -EAGAIN;
 
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
        if (rc != MIGRATEPAGE_SUCCESS)
-               return rc;
-
-       /*
-        * In the async case, migrate_page_move_mapping locked the buffers
-        * with an IRQ-safe spinlock held. In the sync case, the buffers
-        * need to be locked now
-        */
-       if (mode != MIGRATE_ASYNC)
-               BUG_ON(!buffer_migrate_lock_buffers(head, mode));
+               goto unlock_buffers;
 
        ClearPagePrivate(page);
        set_page_private(newpage, page_private(page));
@@ -814,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
        else
                migrate_page_states(newpage, page);
 
+       rc = MIGRATEPAGE_SUCCESS;
+unlock_buffers:
        bh = head;
        do {
                unlock_buffer(bh);
@@ -822,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
 
        } while (bh != head);
 
-       return MIGRATEPAGE_SUCCESS;
+       return rc;
 }
 EXPORT_SYMBOL(buffer_migrate_page);
 #endif