mm/z3fold.c: claim page in the beginning of free
authorVitaly Wool <vitalywool@gmail.com>
Mon, 7 Oct 2019 00:58:22 +0000 (17:58 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 7 Oct 2019 22:47:19 +0000 (15:47 -0700)
There's a really hard to reproduce race in z3fold between z3fold_free()
and z3fold_reclaim_page().  z3fold_reclaim_page() can claim the page
after z3fold_free() has checked if the page was claimed and
z3fold_free() will then schedule this page for compaction which may in
turn lead to random page faults (since that page would have been
reclaimed by then).

Fix that by claiming page in the beginning of z3fold_free() and not
forgetting to clear the claim in the end.

[vitalywool@gmail.com: v2]
Link: http://lkml.kernel.org/r/20190928113456.152742cf@bigdell
Link: http://lkml.kernel.org/r/20190926104844.4f0c6efa1366b8f5741eaba9@gmail.com
Signed-off-by: Vitaly Wool <vitalywool@gmail.com>
Reported-by: Markus Linnala <markus.linnala@gmail.com>
Cc: Dan Streetman <ddstreet@ieee.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Henry Burns <henrywolfeburns@gmail.com>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Markus Linnala <markus.linnala@gmail.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/z3fold.c

index 05bdf90646e7937ab8435327f785afbb0deaabf1..6d3d3f698ebb9f91db74662b25fe68b0bc86650b 100644 (file)
@@ -998,9 +998,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
        struct z3fold_header *zhdr;
        struct page *page;
        enum buddy bud;
+       bool page_claimed;
 
        zhdr = handle_to_z3fold_header(handle);
        page = virt_to_page(zhdr);
+       page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
 
        if (test_bit(PAGE_HEADLESS, &page->private)) {
                /* if a headless page is under reclaim, just leave.
@@ -1008,7 +1010,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                 * has not been set before, we release this page
                 * immediately so we don't care about its value any more.
                 */
-               if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+               if (!page_claimed) {
                        spin_lock(&pool->lock);
                        list_del(&page->lru);
                        spin_unlock(&pool->lock);
@@ -1044,13 +1046,15 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
-       if (test_bit(PAGE_CLAIMED, &page->private)) {
+       if (page_claimed) {
+               /* the page has not been claimed by us */
                z3fold_page_unlock(zhdr);
                return;
        }
        if (unlikely(PageIsolated(page)) ||
            test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
                z3fold_page_unlock(zhdr);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1060,10 +1064,12 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                zhdr->cpu = -1;
                kref_get(&zhdr->refcount);
                do_compact_page(zhdr, true);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        kref_get(&zhdr->refcount);
        queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
+       clear_bit(PAGE_CLAIMED, &page->private);
        z3fold_page_unlock(zhdr);
 }