blk-mq: fallback to previous nr_hw_queues when updating fails
authorJianchao Wang <jianchao.w.wang@oracle.com>
Fri, 12 Oct 2018 10:07:28 +0000 (18:07 +0800)
committerJens Axboe <axboe@kernel.dk>
Sat, 13 Oct 2018 21:42:04 +0000 (15:42 -0600)
When we try to increate the nr_hw_queues, we may fail due to
shortage of memory or other reason, then blk_mq_realloc_hw_ctxs stops
and some entries in q->queue_hw_ctx are left with NULL. However,
because queue map has been updated with new nr_hw_queues, some cpus
have been mapped to hw queue which just encounters allocation failure,
thus blk_mq_map_queue could return NULL. This will cause panic in
following blk_mq_map_swqueue.

To fix it, when increase nr_hw_queues fails, fallback to previous
nr_hw_queues and post warning. At the same time, driver's .map_queues
usually use completion irq affinity to map hw and cpu, fallback
nr_hw_queues will cause lack of some cpu's map to hw, so use default
blk_mq_map_queues to do that.

Reported-by: syzbot+83e8cbe702263932d9d4@syzkaller.appspotmail.com
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c

index 941f513800774f98f6aa5032878fbacfb57c2f51..c2ecd64a2403d422f9c47badfef3f5350eb1a154 100644 (file)
@@ -2557,7 +2557,7 @@ static struct blk_mq_hw_ctx *blk_mq_alloc_and_init_hctx(
 static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                                                struct request_queue *q)
 {
-       int i, j;
+       int i, j, end;
        struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx;
 
        /* protect against switching io scheduler  */
@@ -2591,8 +2591,20 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
                                break;
                }
        }
+       /*
+        * Increasing nr_hw_queues fails. Free the newly allocated
+        * hctxs and keep the previous q->nr_hw_queues.
+        */
+       if (i != set->nr_hw_queues) {
+               j = q->nr_hw_queues;
+               end = i;
+       } else {
+               j = i;
+               end = q->nr_hw_queues;
+               q->nr_hw_queues = set->nr_hw_queues;
+       }
 
-       for (j = i; j < q->nr_hw_queues; j++) {
+       for (; j < end; j++) {
                struct blk_mq_hw_ctx *hctx = hctxs[j];
 
                if (hctx) {
@@ -2604,7 +2616,6 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
 
                }
        }
-       q->nr_hw_queues = i;
        mutex_unlock(&q->sysfs_lock);
 }
 
@@ -2989,6 +3000,7 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
 {
        struct request_queue *q;
        LIST_HEAD(head);
+       int prev_nr_hw_queues;
 
        lockdep_assert_held(&set->tag_list_lock);
 
@@ -3017,10 +3029,19 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
                blk_mq_sysfs_unregister(q);
        }
 
+       prev_nr_hw_queues = set->nr_hw_queues;
        set->nr_hw_queues = nr_hw_queues;
        blk_mq_update_queue_map(set);
+fallback:
        list_for_each_entry(q, &set->tag_list, tag_set_list) {
                blk_mq_realloc_hw_ctxs(set, q);
+               if (q->nr_hw_queues != set->nr_hw_queues) {
+                       pr_warn("Increasing nr_hw_queues to %d fails, fallback to %d\n",
+                                       nr_hw_queues, prev_nr_hw_queues);
+                       set->nr_hw_queues = prev_nr_hw_queues;
+                       blk_mq_map_queues(set);
+                       goto fallback;
+               }
                blk_mq_map_swqueue(q);
        }