[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
+ [BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
[BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
/* device mapper special case, should not leak out: */
return true;
}
+#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
+
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bool got_budget)
{
struct request *rq, *nxt;
bool no_tag = false;
int errors, queued;
+ blk_status_t ret = BLK_STS_OK;
if (list_empty(list))
return false;
errors = queued = 0;
do {
struct blk_mq_queue_data bd;
- blk_status_t ret;
rq = list_first_entry(list, struct request, queuelist);
if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
}
ret = q->mq_ops->queue_rq(hctx, &bd);
- if (ret == BLK_STS_RESOURCE) {
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
/*
* If an I/O scheduler has been configured and we got a
* driver tag for the next request already, free it
* that is where we will continue on next queue run.
*/
if (!list_empty(list)) {
+ bool needs_restart;
+
spin_lock(&hctx->lock);
list_splice_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
* - Some but not all block drivers stop a queue before
* returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
* and dm-rq.
+ *
+ * If driver returns BLK_STS_RESOURCE and SCHED_RESTART
+ * bit is set, run queue after a delay to avoid IO stalls
+ * that could otherwise occur if the queue is idle.
*/
- if (!blk_mq_sched_needs_restart(hctx) ||
+ needs_restart = blk_mq_sched_needs_restart(hctx);
+ if (!needs_restart ||
(no_tag && list_empty_careful(&hctx->dispatch_wait.entry)))
blk_mq_run_hw_queue(hctx, true);
+ else if (needs_restart && (ret == BLK_STS_RESOURCE))
+ blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
}
return (queued + errors) != 0;
*cookie = new_cookie;
break;
case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
__blk_mq_requeue_request(rq);
break;
default:
hctx_lock(hctx, &srcu_idx);
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false);
- if (ret == BLK_STS_RESOURCE)
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_sched_insert_request(rq, false, true, false);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
return BLK_STS_OK;
} else
/* requeue request */
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
}
}
/* Out of mem doesn't actually happen, since we fall back
* to direct descriptors */
if (err == -ENOMEM || err == -ENOSPC)
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
return BLK_STS_IOERR;
}
out_busy:
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
- return BLK_STS_RESOURCE;
+ return BLK_STS_DEV_RESOURCE;
}
static void blkif_complete_rq(struct request *rq)
clone->start_time = jiffies;
r = blk_insert_cloned_request(clone->q, clone);
- if (r != BLK_STS_OK && r != BLK_STS_RESOURCE)
+ if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
/* must complete clone in terms of original request */
dm_complete_request(rq, r);
return r;
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
blk_rq_pos(rq));
ret = dm_dispatch_clone_request(clone, rq);
- if (ret == BLK_STS_RESOURCE) {
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
blk_rq_unprep_clone(clone);
tio->ti->type->release_clone_rq(clone);
tio->clone = NULL;
/* Undo dm_start_request() before requeuing */
rq_end_stats(md, rq);
rq_completed(md, rq_data_dir(rq), false);
- blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
return BLK_STS_RESOURCE;
}
NVME_FC_Q_LIVE,
};
-#define NVMEFC_QUEUE_DELAY 3 /* ms units */
-
#define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
struct nvme_fc_queue {
* the target device is present
*/
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
- goto busy;
+ return BLK_STS_RESOURCE;
if (!nvme_fc_ctrl_get(ctrl))
return BLK_STS_IOERR;
ret != -EBUSY)
return BLK_STS_IOERR;
- goto busy;
+ return BLK_STS_RESOURCE;
}
return BLK_STS_OK;
-
-busy:
- if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
- blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
-
- return BLK_STS_RESOURCE;
}
static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
case BLK_STS_OK:
break;
case BLK_STS_RESOURCE:
- if (atomic_read(&sdev->device_busy) == 0 &&
- !scsi_device_blocked(sdev))
- blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
+ if (atomic_read(&sdev->device_busy) ||
+ scsi_device_blocked(sdev))
+ ret = BLK_STS_DEV_RESOURCE;
break;
default:
/*
#define BLK_STS_AGAIN ((__force blk_status_t)12)
+/*
+ * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
+ * device related resources are unavailable, but the driver can guarantee
+ * that the queue will be rerun in the future once resources become
+ * available again. This is typically the case for device specific
+ * resources that are consumed for IO. If the driver fails allocating these
+ * resources, we know that inflight (or pending) IO will free these
+ * resource upon completion.
+ *
+ * This is different from BLK_STS_RESOURCE in that it explicitly references
+ * a device specific resource. For resources of wider scope, allocation
+ * failure can happen without having pending IO. This means that we can't
+ * rely on request completions freeing these resources, as IO may not be in
+ * flight. Examples of that are kernel memory allocations, DMA mappings, or
+ * any other system wide resources.
+ */
+#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
+
/**
* blk_path_error - returns true if error may be path related
* @error: status the request was completed with