* If the cpu isn't present, the cpu is mapped to first hctx.
*/
for_each_possible_cpu(i) {
- hctx_idx = q->mq_map[i];
+ hctx_idx = set->mq_map[i];
/* unmapped hw queue can be remapped after CPU topo changed */
if (!set->tags[hctx_idx] &&
!__blk_mq_alloc_rq_map(set, hctx_idx)) {
* case, remap the current ctx to hctx[0] which
* is guaranteed to always have tags allocated
*/
- q->mq_map[i] = 0;
+ set->mq_map[i] = 0;
}
ctx = per_cpu_ptr(q->queue_ctx, i);
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
struct request_queue *q)
{
- q->tag_set = set;
-
mutex_lock(&set->tag_list_lock);
/*
kobject_put(&hctx->kobj);
}
- q->mq_map = NULL;
-
kfree(q->queue_hw_ctx);
/*
int node;
struct blk_mq_hw_ctx *hctx;
- node = blk_mq_hw_queue_to_node(q->mq_map, i);
+ node = blk_mq_hw_queue_to_node(set->mq_map, i);
/*
* If the hw queue has been mapped to another numa node,
* we need to realloc the hctx. If allocation fails, fallback
if (!q->queue_hw_ctx)
goto err_percpu;
- q->mq_map = set->mq_map;
-
blk_mq_realloc_hw_ctxs(set, q);
if (!q->nr_hw_queues)
goto err_hctxs;
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;
+ q->tag_set = set;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
int cpu)
{
- return q->queue_hw_ctx[q->mq_map[cpu]];
+ struct blk_mq_tag_set *set = q->tag_set;
+
+ return q->queue_hw_ctx[set->mq_map[cpu]];
}
/*