return true;
}
- bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
+ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
{
- struct request_queue *q = hctx->queue;
+ struct blk_mq_hw_ctx *hctx;
struct request *rq;
- LIST_HEAD(driver_list);
- struct list_head *dptr;
int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK;
- /*
- * Start off with dptr being NULL, so we start the first request
- * immediately, even if we have more pending.
- */
- dptr = NULL;
-
+ if (list_empty(list))
+ return false;
+
/*
* Now process all the entries, sending them to the driver.
*/
if (ret == BLK_MQ_RQ_QUEUE_BUSY)
break;
- }
-
- /*
- * We've done the first request. If we have more than 1
- * left in the list, set dptr to defer issue.
- */
- if (!dptr && list->next != list->prev)
- dptr = &driver_list;
+ } while (!list_empty(list));
hctx->dispatched[queued_to_index(queued)]++;
blk_mq_freeze_queue(q);
set->nr_hw_queues = nr_hw_queues;
+ blk_mq_update_queue_map(set);
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_realloc_hw_ctxs(set, q);
-
- /*
- * Manually set the make_request_fn as blk_queue_make_request
- * resets a lot of the queue settings.
- */
- if (q->nr_hw_queues > 1)
- q->make_request_fn = blk_mq_make_request;
- else
- q->make_request_fn = blk_sq_make_request;
-
blk_mq_queue_reinit(q, cpu_online_mask);
}
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
- #define QUEUE_FLAG_RESTART 28 /* queue needs restart at completion */
- #define QUEUE_FLAG_POLL_STATS 29 /* collecting stats for hybrid polling */
- #define QUEUE_FLAG_REGISTERED 30 /* queue has been registered to a disk */
++#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
++#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \