static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
+ struct request_queue *q = rq->q;
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
static void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
+ struct request_queue *q = rq->q;
bool shared = false;
int cpu;
* So complete IO reqeust in softirq context in case of single queue
* for not degrading IO performance by irqsoff latency.
*/
- if (rq->q->nr_hw_queues == 1) {
+ if (q->nr_hw_queues == 1) {
__blk_complete_request(rq);
return;
}
- if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) {
- rq->q->softirq_done_fn(rq);
+ if (!test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) {
+ q->mq_ops->complete(rq);
return;
}
cpu = get_cpu();
- if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
+ if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
rq->csd.flags = 0;
smp_call_function_single_async(ctx->cpu, &rq->csd);
} else {
- rq->q->softirq_done_fn(rq);
+ q->mq_ops->complete(rq);
}
put_cpu();
}
*/
q->poll_nsec = -1;
- if (set->ops->complete)
- blk_queue_softirq_done(q, set->ops->complete);
-
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
blk_mq_add_queue_tag_set(set, q);
blk_mq_map_swqueue(q);
unsigned long blk_max_pfn;
-void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
-{
- q->softirq_done_fn = fn;
-}
-EXPORT_SYMBOL(blk_queue_softirq_done);
-
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
rq = list_entry(local_list.next, struct request, ipi_list);
list_del_init(&rq->ipi_list);
- rq->q->softirq_done_fn(rq);
+ rq->q->mq_ops->complete(rq);
}
}
unsigned long flags;
bool shared = false;
- BUG_ON(!q->softirq_done_fn);
+ BUG_ON(!q->mq_ops->complete);
local_irq_save(flags);
cpu = smp_processor_id();
typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
typedef bool (busy_fn)(struct request_queue *);
+typedef void (complete_fn)(struct request *);
struct blk_mq_ops {
*/
poll_fn *poll;
- softirq_done_fn *complete;
+ complete_fn *complete;
/*
* Called when the block layer side of a hardware queue has been
typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
struct bio_vec;
-typedef void (softirq_done_fn)(struct request *);
typedef int (dma_drain_needed_fn)(struct request *);
enum blk_eh_timer_return {
make_request_fn *make_request_fn;
poll_q_fn *poll_fn;
- softirq_done_fn *softirq_done_fn;
dma_drain_needed_fn *dma_drain_needed;
const struct blk_mq_ops *mq_ops;
extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
extern void blk_queue_dma_alignment(struct request_queue *, int);
extern void blk_queue_update_dma_alignment(struct request_queue *, int);
-extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);