}
EXPORT_SYMBOL(blk_mq_map_queue);
-struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *set,
- unsigned int hctx_index,
- int node)
-{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
-}
-EXPORT_SYMBOL(blk_mq_alloc_single_hw_queue);
-
-void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *hctx,
- unsigned int hctx_index)
-{
- kfree(hctx);
-}
-EXPORT_SYMBOL(blk_mq_free_single_hw_queue);
-
static void blk_mq_free_rq_map(struct blk_mq_tag_set *set,
struct blk_mq_tags *tags, unsigned int hctx_idx)
{
queue_for_each_hw_ctx(q, hctx, i) {
free_cpumask_var(hctx->cpumask);
- set->ops->free_hctx(hctx, i);
+ kfree(hctx);
}
}
for (i = 0; i < set->nr_hw_queues; i++) {
int node = blk_mq_hw_queue_to_node(map, i);
- hctxs[i] = set->ops->alloc_hctx(set, i, node);
+ hctxs[i] = kzalloc_node(sizeof(struct blk_mq_hw_ctx),
+ GFP_KERNEL, node);
if (!hctxs[i])
goto err_hctxs;
if (!hctxs[i])
break;
free_cpumask_var(hctxs[i]->cpumask);
- set->ops->free_hctx(hctxs[i], i);
+ kfree(hctxs[i]);
}
err_map:
kfree(hctxs);
if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN)
return -EINVAL;
- if (!set->nr_hw_queues ||
- !set->ops->queue_rq || !set->ops->map_queue ||
- !set->ops->alloc_hctx || !set->ops->free_hctx)
+ if (!set->nr_hw_queues || !set->ops->queue_rq || !set->ops->map_queue)
return -EINVAL;
return BLK_MQ_RQ_QUEUE_OK;
}
-static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_tag_set *set,
- unsigned int hctx_index,
- int node)
-{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, node);
-}
-
-static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
-{
- kfree(hctx);
-}
-
static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
{
BUG_ON(!nullb);
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
.complete = null_softirq_done_fn,
- .alloc_hctx = blk_mq_alloc_single_hw_queue,
- .free_hctx = blk_mq_free_single_hw_queue,
-};
-
-static struct blk_mq_ops null_mq_ops_pernode = {
- .queue_rq = null_queue_rq,
- .map_queue = blk_mq_map_queue,
- .init_hctx = null_init_hctx,
- .complete = null_softirq_done_fn,
- .alloc_hctx = null_alloc_hctx,
- .free_hctx = null_free_hctx,
};
static void null_del_dev(struct nullb *nullb)
goto out_free_nullb;
if (queue_mode == NULL_Q_MQ) {
- if (use_per_node_hctx)
- nullb->tag_set.ops = &null_mq_ops_pernode;
- else
- nullb->tag_set.ops = &null_mq_ops;
+ nullb->tag_set.ops = &null_mq_ops;
nullb->tag_set.nr_hw_queues = submit_queues;
nullb->tag_set.queue_depth = hw_queue_depth;
nullb->tag_set.numa_node = home_node;
static struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.map_queue = blk_mq_map_queue,
- .alloc_hctx = blk_mq_alloc_single_hw_queue,
- .free_hctx = blk_mq_free_single_hw_queue,
.complete = virtblk_request_done,
.init_request = virtblk_init_request,
};
typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
-typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_tag_set *,
- unsigned int, int);
-typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (init_request_fn)(void *, struct request *, unsigned int,
softirq_done_fn *complete;
- /*
- * Override for hctx allocations (should probably go)
- */
- alloc_hctx_fn *alloc_hctx;
- free_hctx_fn *free_hctx;
-
/*
* Called when the block layer side of a hardware queue has been
* set up, allowing the driver to allocate/init matching structures.
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
-void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
void blk_mq_end_io(struct request *rq, int error);
void __blk_mq_end_io(struct request *rq, int error);