block: remove QUEUE_FLAG_STACKABLE
authorChristoph Hellwig <hch@lst.de>
Thu, 5 Oct 2017 19:22:52 +0000 (21:22 +0200)
committerJens Axboe <axboe@kernel.dk>
Thu, 5 Oct 2017 21:22:59 +0000 (15:22 -0600)
We already have a queue_is_rq_based helper to check if a request_queue
is request based, so we can remove the flag for it.

Acked-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-debugfs.c
block/elevator.c
drivers/md/dm-rq.c
drivers/md/dm-table.c
drivers/md/dm.c
include/linux/blkdev.h

index 980e7309564332ee1c23e0d96b13996f27c0aeca..7f4a1ba532afcfd91c14c0c5f622a74d364a3cd6 100644 (file)
@@ -54,7 +54,6 @@ static const char *const blk_queue_flag_name[] = {
        QUEUE_FLAG_NAME(NOMERGES),
        QUEUE_FLAG_NAME(SAME_COMP),
        QUEUE_FLAG_NAME(FAIL_IO),
-       QUEUE_FLAG_NAME(STACKABLE),
        QUEUE_FLAG_NAME(NONROT),
        QUEUE_FLAG_NAME(IO_STAT),
        QUEUE_FLAG_NAME(DISCARD),
index 153926a909011e5f2a9759d19e90ab74ef00f405..7ae50eb2732bb11aab120ee775d339658ac57eeb 100644 (file)
@@ -1118,7 +1118,7 @@ ssize_t elv_iosched_show(struct request_queue *q, char *name)
        struct elevator_type *__e;
        int len = 0;
 
-       if (!blk_queue_stackable(q))
+       if (!queue_is_rq_based(q))
                return sprintf(name, "none\n");
 
        if (!q->elevator)
index eadfcfd106ffffa4773421d5b61ae77414e36d6d..9d32f25489c27ad1fc743d387e19f8fa5a67cf25 100644 (file)
@@ -56,7 +56,7 @@ static unsigned dm_get_blk_mq_queue_depth(void)
 
 int dm_request_based(struct mapped_device *md)
 {
-       return blk_queue_stackable(md->queue);
+       return queue_is_rq_based(md->queue);
 }
 
 static void dm_old_start_queue(struct request_queue *q)
index ef7b8f201f73ad9777cf336344fe59b11914cc73..75281828f2cbcedb47320cd3c38344a8b89c2ae0 100644 (file)
@@ -1000,7 +1000,7 @@ verify_rq_based:
        list_for_each_entry(dd, devices, list) {
                struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev);
 
-               if (!blk_queue_stackable(q)) {
+               if (!queue_is_rq_based(q)) {
                        DMERR("table load rejected: including"
                              " non-request-stackable devices");
                        return -EINVAL;
@@ -1847,19 +1847,6 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
         */
        if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
                queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
-
-       /*
-        * QUEUE_FLAG_STACKABLE must be set after all queue settings are
-        * visible to other CPUs because, once the flag is set, incoming bios
-        * are processed by request-based dm, which refers to the queue
-        * settings.
-        * Until the flag set, bios are passed to bio-based dm and queued to
-        * md->deferred where queue settings are not needed yet.
-        * Those bios are passed to request-based dm at the resume time.
-        */
-       smp_mb();
-       if (dm_table_request_based(t))
-               queue_flag_set_unlocked(QUEUE_FLAG_STACKABLE, q);
 }
 
 unsigned int dm_table_get_num_targets(struct dm_table *t)
index 6e54145969c5ce30184cf162283db0f01796f1ca..8d07ad61221c64ee7352e3f02f7ae8b40a700967 100644 (file)
@@ -1612,17 +1612,6 @@ static void dm_wq_work(struct work_struct *work);
 
 void dm_init_md_queue(struct mapped_device *md)
 {
-       /*
-        * Request-based dm devices cannot be stacked on top of bio-based dm
-        * devices.  The type of this dm device may not have been decided yet.
-        * The type is decided at the first table loading time.
-        * To prevent problematic device stacking, clear the queue flag
-        * for request stacking support until then.
-        *
-        * This queue is new, so no concurrency on the queue_flags.
-        */
-       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-
        /*
         * Initialize data that will only be used by a non-blk-mq DM queue
         * - must do so here (in alloc_dev callchain) before queue is used
index 02fa42d24b52f5632500e2dd530a3b16be1f38d8..9fb71fc7d0e85077ae4536c197a68897ba3880a6 100644 (file)
@@ -609,7 +609,6 @@ struct request_queue {
 #define QUEUE_FLAG_NOMERGES     5      /* disable merge attempts */
 #define QUEUE_FLAG_SAME_COMP   6       /* complete on same CPU-group */
 #define QUEUE_FLAG_FAIL_IO     7       /* fake timeout */
-#define QUEUE_FLAG_STACKABLE   8       /* supports request stacking */
 #define QUEUE_FLAG_NONROT      9       /* non-rotational device (SSD) */
 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
 #define QUEUE_FLAG_IO_STAT     10      /* do IO stats */
@@ -633,12 +632,10 @@ struct request_queue {
 #define QUEUE_FLAG_QUIESCED    28      /* queue has been quiesced */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
-                                (1 << QUEUE_FLAG_STACKABLE)    |       \
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_ADD_RANDOM))
 
 #define QUEUE_FLAG_MQ_DEFAULT  ((1 << QUEUE_FLAG_IO_STAT) |            \
-                                (1 << QUEUE_FLAG_STACKABLE)    |       \
                                 (1 << QUEUE_FLAG_SAME_COMP)    |       \
                                 (1 << QUEUE_FLAG_POLL))
 
@@ -722,8 +719,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
 #define blk_queue_nonrot(q)    test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
 #define blk_queue_io_stat(q)   test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
 #define blk_queue_add_random(q)        test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_stackable(q) \
-       test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
 #define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
 #define blk_queue_secure_erase(q) \
        (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))