block: Add 'lock' as third argument to blk_alloc_queue_node()
authorBart Van Assche <bart.vanassche@wdc.com>
Wed, 28 Feb 2018 18:15:31 +0000 (10:15 -0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 28 Feb 2018 19:23:35 +0000 (12:23 -0700)
This patch does not change any functionality.

Signed-off-by: Bart Van Assche <bart.vanassche@wdc.com>
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Philipp Reisner <philipp.reisner@linbit.com>
Cc: Ulf Hansson <ulf.hansson@linaro.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-core.c
block/blk-mq.c
drivers/block/null_blk.c
drivers/ide/ide-probe.c
drivers/lightnvm/core.c
drivers/md/dm.c
drivers/nvdimm/pmem.c
drivers/nvme/host/multipath.c
drivers/scsi/scsi_lib.c
include/linux/blkdev.h

index 2d1a7bbe063437bfacfca43ad479c305fccf56c7..e873a24bf82d17d8e33015c3544e99acd974a9d6 100644 (file)
@@ -810,7 +810,7 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl)
 
 struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
 {
-       return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE);
+       return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL);
 }
 EXPORT_SYMBOL(blk_alloc_queue);
 
@@ -888,7 +888,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
        kblockd_schedule_work(&q->timeout_work);
 }
 
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+                                          spinlock_t *lock)
 {
        struct request_queue *q;
 
@@ -1030,7 +1031,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 {
        struct request_queue *q;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, node_id);
+       q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL);
        if (!q)
                return NULL;
 
index 9594a0e9f65bfba50e5e413a76b09c35f3aacd78..75336848f7a78d89720a86755e3e6277572388ca 100644 (file)
@@ -2556,7 +2556,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
 {
        struct request_queue *uninit_q, *q;
 
-       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node);
+       uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL);
        if (!uninit_q)
                return ERR_PTR(-ENOMEM);
 
index d12d7a8325ad5c1e17bf81b95e98da2a741ae709..6dc7e7cfca4a6e708d31cb4c42a7875ec2fe7d5b 100644 (file)
@@ -1760,7 +1760,8 @@ static int null_add_dev(struct nullb_device *dev)
                }
                null_init_queues(nullb);
        } else if (dev->queue_mode == NULL_Q_BIO) {
-               nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node);
+               nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node,
+                                               NULL);
                if (!nullb->q) {
                        rv = -ENOMEM;
                        goto out_cleanup_queues;
index caa20eb5f26b03c7e02569e54dbb599fd2e73ec1..d6b8c7e1545dd4d25e45c936e4eded010ffdd0e9 100644 (file)
@@ -766,7 +766,7 @@ static int ide_init_queue(ide_drive_t *drive)
         *      limits and LBA48 we could raise it but as yet
         *      do not.
         */
-       q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif));
+       q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL);
        if (!q)
                return 1;
 
index dcc9e621e6514636ffb76f36d5c7856744300068..5f1988df1593d20d19ee17564b89736a4e97dc0b 100644 (file)
@@ -384,7 +384,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create)
                goto err_dev;
        }
 
-       tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node);
+       tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL);
        if (!tqueue) {
                ret = -ENOMEM;
                goto err_disk;
index 68136806d365821f63ace7675ce21ba2bf10ed8d..7586d249266c5203d1c59e0cb807c513130297e8 100644 (file)
@@ -1841,7 +1841,7 @@ static struct mapped_device *alloc_dev(int minor)
        INIT_LIST_HEAD(&md->table_devices);
        spin_lock_init(&md->uevent_lock);
 
-       md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id);
+       md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL);
        if (!md->queue)
                goto bad;
        md->queue->queuedata = md;
index 10041ac4032c038db09109b8f757a8719d26f902..cfb15ac509250c9978408e465af335832ee6a360 100644 (file)
@@ -344,7 +344,7 @@ static int pmem_attach_disk(struct device *dev,
                return -EBUSY;
        }
 
-       q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev));
+       q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL);
        if (!q)
                return -ENOMEM;
 
index b7e5c6db4d92fe61313fdb9120cef307204bfd70..88440562a197c272681d9b9867a569f8ae20df16 100644 (file)
@@ -162,7 +162,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
        if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
                return 0;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
        if (!q)
                goto out;
        q->queuedata = head;
index a86df9ca7d1c88aceb1d1e2298f48fbdb8bb3c49..71d1135f94d068a251c1df50c772c575051ce752 100644 (file)
@@ -2223,7 +2223,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev)
        struct Scsi_Host *shost = sdev->host;
        struct request_queue *q;
 
-       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE);
+       q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
        if (!q)
                return NULL;
        q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
index ed63f3b69c12b6378feeb52dc91503753ec74284..667a9b0053d923d4c4a5b50d59b2223871a83725 100644 (file)
@@ -1321,7 +1321,8 @@ extern long nr_blockdev_pages(void);
 
 bool __must_check blk_get_queue(struct request_queue *);
 struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t, int);
+struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id,
+                                          spinlock_t *lock);
 extern void blk_put_queue(struct request_queue *);
 extern void blk_set_queue_dying(struct request_queue *);