From 45a9c9d909b24c6ad0e28a7946e7486e73010319 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Tue, 9 Dec 2014 16:57:48 +0100 Subject: [PATCH] blk-mq: Fix a use-after-free blk-mq users are allowed to free the memory request_queue.tag_set points at after blk_cleanup_queue() has finished but before blk_release_queue() has started. This can happen e.g. in the SCSI core. The SCSI core namely embeds the tag_set structure in a SCSI host structure. The SCSI host structure is freed by scsi_host_dev_release(). This function is called after blk_cleanup_queue() finished but can be called before blk_release_queue(). This means that it is not safe to access request_queue.tag_set from inside blk_release_queue(). Hence remove the blk_sync_queue() call from blk_release_queue(). This call is not necessary - outstanding requests must have finished before blk_release_queue() is called. Additionally, move the blk_mq_free_queue() call from blk_release_queue() to blk_cleanup_queue() to avoid that struct request_queue.tag_set gets accessed after it has been freed. This patch avoids that the following kernel oops can be triggered when deleting a SCSI host for which scsi-mq was enabled: Call Trace: [] lock_acquire+0xc4/0x270 [] mutex_lock_nested+0x61/0x380 [] blk_mq_free_queue+0x30/0x180 [] blk_release_queue+0x84/0xd0 [] kobject_cleanup+0x7b/0x1a0 [] kobject_put+0x30/0x70 [] blk_put_queue+0x15/0x20 [] disk_release+0x99/0xd0 [] device_release+0x36/0xb0 [] kobject_cleanup+0x7b/0x1a0 [] kobject_put+0x30/0x70 [] put_disk+0x1a/0x20 [] __blkdev_put+0x135/0x1b0 [] blkdev_put+0x50/0x160 [] kill_block_super+0x44/0x70 [] deactivate_locked_super+0x44/0x60 [] deactivate_super+0x4e/0x70 [] cleanup_mnt+0x43/0x90 [] __cleanup_mnt+0x12/0x20 [] task_work_run+0xac/0xe0 [] do_notify_resume+0x61/0xa0 [] int_signal+0x12/0x17 Signed-off-by: Bart Van Assche Cc: Christoph Hellwig Cc: Robert Elliott Cc: Ming Lei Cc: Alexander Gordeev Cc: # v3.13+ Signed-off-by: Jens Axboe --- block/blk-core.c | 3 +++ block/blk-sysfs.c | 12 ++++-------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/block/blk-core.c b/block/blk-core.c index 0421b53e6431..93f9152fc271 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -525,6 +525,9 @@ void blk_cleanup_queue(struct request_queue *q) del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); blk_sync_queue(q); + if (q->mq_ops) + blk_mq_free_queue(q); + spin_lock_irq(lock); if (q->queue_lock != &q->__queue_lock) q->queue_lock = &q->__queue_lock; diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 1fac43408911..935ea2aa0730 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -492,17 +492,15 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head) * Currently, its primary task it to free all the &struct request * structures that were allocated to the queue and the queue itself. * - * Caveat: - * Hopefully the low level driver will have finished any - * outstanding requests first... + * Note: + * The low level driver must have finished any outstanding requests first + * via blk_cleanup_queue(). **/ static void blk_release_queue(struct kobject *kobj) { struct request_queue *q = container_of(kobj, struct request_queue, kobj); - blk_sync_queue(q); - blkcg_exit_queue(q); if (q->elevator) { @@ -517,9 +515,7 @@ static void blk_release_queue(struct kobject *kobj) if (q->queue_tags) __blk_queue_free_tags(q); - if (q->mq_ops) - blk_mq_free_queue(q); - else + if (!q->mq_ops) blk_free_flush_queue(q->fq); blk_trace_shutdown(q); -- 2.30.2