Till now block layer allowed two separate modes of request execution.
A request is always acquired from the request queue via
elv_next_request(). After that, drivers are free to either dequeue it
or process it without dequeueing. Dequeue allows elv_next_request()
to return the next request so that multiple requests can be in flight.
Executing requests without dequeueing has its merits mostly in
allowing drivers for simpler devices which can't do sg to deal with
segments only without considering request boundary. However, the
benefit this brings is dubious and declining while the cost of the API
ambiguity is increasing. Segment based drivers are usually for very
old or limited devices and as converting to dequeueing model isn't
difficult, it doesn't justify the API overhead it puts on block layer
and its more modern users.
Previous patches converted all block low level drivers to dequeueing
model. This patch completes the API transition by...
* renaming elv_next_request() to blk_peek_request()
* renaming blkdev_dequeue_request() to blk_start_request()
* adding blk_fetch_request() which is combination of peek and start
* disallowing completion of queued (not started) requests
* applying new API to all LLDs
Renamings are for consistency and to break out of tree code so that
it's apparent that out of tree drivers need updating.
[ Impact: block request issue API cleanup, no functional change ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Mike Miller <mike.miller@hp.com>
Cc: unsik Kim <donari75@gmail.com>
Cc: Paul Clements <paul.clements@steeleye.com>
Cc: Tim Waugh <tim@cyberelk.net>
Cc: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Laurent Vivier <Laurent@lvivier.info>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Grant Likely <grant.likely@secretlab.ca>
Cc: Adrian McMenamin <adrian@mcmen.demon.co.uk>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Borislav Petkov <petkovbb@googlemail.com>
Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: Pierre Ossman <drzeus@drzeus.cx>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Stefan Weinhuber <wein@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pete Zaitcev <zaitcev@redhat.com>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
struct omap_msg_tx_data *tx_data;
spin_lock(q->queue_lock);
- rq = elv_next_request(q);
- if (rq)
- blkdev_dequeue_request(rq);
+ rq = blk_fetch_request(q);
spin_unlock(q->queue_lock);
if (!rq)
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
- if (rq)
- blkdev_dequeue_request(rq);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
break;
while (1) {
spin_lock_irqsave(q->queue_lock, flags);
- rq = elv_next_request(q);
- if (rq)
- blkdev_dequeue_request(rq);
+ rq = blk_fetch_request(q);
spin_unlock_irqrestore(q->queue_lock, flags);
if (!rq)
while(1){
struct ubd *dev = q->queuedata;
if(dev->end_sg == 0){
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if(req == NULL)
return;
dev->request = req;
- blkdev_dequeue_request(req);
dev->start_sg = 0;
dev->end_sg = blk_rq_map_sg(q, req, dev->sg);
}
}
/* stash away the original request */
- elv_dequeue_request(q, rq);
+ blk_dequeue_request(rq);
q->orig_bar_rq = rq;
rq = NULL;
* Queue ordering not supported. Terminate
* with prejudice.
*/
- elv_dequeue_request(q, rq);
+ blk_dequeue_request(rq);
__blk_end_request_all(rq, -EOPNOTSUPP);
*rqp = NULL;
return false;
*/
void blk_requeue_request(struct request_queue *q, struct request *rq)
{
+ BUG_ON(blk_queued_rq(rq));
+
blk_delete_timer(rq);
blk_clear_rq_complete(rq);
trace_block_rq_requeue(q, rq);
}
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-/**
- * blkdev_dequeue_request - dequeue request and start timeout timer
- * @req: request to dequeue
- *
- * Dequeue @req and start timeout timer on it. This hands off the
- * request to the driver.
- *
- * Block internal functions which don't want to start timer should
- * call elv_dequeue_request().
- */
-void blkdev_dequeue_request(struct request *req)
-{
- elv_dequeue_request(req->q, req);
-
- /*
- * We are now handing the request to the hardware, add the
- * timeout handler.
- */
- blk_add_timer(req);
-}
-EXPORT_SYMBOL(blkdev_dequeue_request);
-
static void blk_account_io_completion(struct request *req, unsigned int bytes)
{
if (blk_do_io_stat(req)) {
}
}
-struct request *elv_next_request(struct request_queue *q)
+/**
+ * blk_peek_request - peek at the top of a request queue
+ * @q: request queue to peek at
+ *
+ * Description:
+ * Return the request at the top of @q. The returned request
+ * should be started using blk_start_request() before LLD starts
+ * processing it.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_peek_request(struct request_queue *q)
{
struct request *rq;
int ret;
return rq;
}
-EXPORT_SYMBOL(elv_next_request);
+EXPORT_SYMBOL(blk_peek_request);
-void elv_dequeue_request(struct request_queue *q, struct request *rq)
+void blk_dequeue_request(struct request *rq)
{
+ struct request_queue *q = rq->q;
+
BUG_ON(list_empty(&rq->queuelist));
BUG_ON(ELV_ON_HASH(rq));
q->in_flight++;
}
+/**
+ * blk_start_request - start request processing on the driver
+ * @req: request to dequeue
+ *
+ * Description:
+ * Dequeue @req and start timeout timer on it. This hands off the
+ * request to the driver.
+ *
+ * Block internal functions which don't want to start timer should
+ * call blk_dequeue_request().
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+void blk_start_request(struct request *req)
+{
+ blk_dequeue_request(req);
+
+ /*
+ * We are now handing the request to the hardware, add the
+ * timeout handler.
+ */
+ blk_add_timer(req);
+}
+EXPORT_SYMBOL(blk_start_request);
+
+/**
+ * blk_fetch_request - fetch a request from a request queue
+ * @q: request queue to fetch a request from
+ *
+ * Description:
+ * Return the request at the top of @q. The request is started on
+ * return and LLD can start processing it immediately.
+ *
+ * Return:
+ * Pointer to the request at the top of @q if available. Null
+ * otherwise.
+ *
+ * Context:
+ * queue_lock must be held.
+ */
+struct request *blk_fetch_request(struct request_queue *q)
+{
+ struct request *rq;
+
+ rq = blk_peek_request(q);
+ if (rq)
+ blk_start_request(rq);
+ return rq;
+}
+EXPORT_SYMBOL(blk_fetch_request);
+
/**
* blk_update_request - Special helper function for request stacking drivers
* @rq: the request being processed
*/
static void blk_finish_request(struct request *req, int error)
{
+ BUG_ON(blk_queued_rq(req));
+
if (blk_rq_tagged(req))
blk_queue_end_tag(req->q, req);
- if (blk_queued_rq(req))
- elv_dequeue_request(req->q, req);
-
if (unlikely(laptop_mode) && blk_fs_request(req))
laptop_io_completion();
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
void init_request_from_bio(struct request *req, struct bio *bio);
void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
struct bio *bio);
+void blk_dequeue_request(struct request *rq);
void __blk_queue_free_tags(struct request_queue *q);
void blk_unplug_work(struct work_struct *work);
DAC960_Command_T *Command;
while(1) {
- Request = elv_next_request(req_q);
+ Request = blk_peek_request(req_q);
if (!Request)
return 1;
Command->BlockNumber = blk_rq_pos(Request);
Command->BlockCount = blk_rq_sectors(Request);
Command->Request = Request;
- blkdev_dequeue_request(Request);
+ blk_start_request(Request);
Command->SegmentCount = blk_rq_map_sg(req_q,
Command->Request, Command->cmd_sglist);
/* pci_map_sg MAY change the value of SegCount */
int err;
next_req:
- rq = elv_next_request(floppy_queue);
+ rq = blk_fetch_request(floppy_queue);
if (!rq) {
/* Nothing left to do */
return;
}
- blkdev_dequeue_request(rq);
floppy = rq->rq_disk->private_data;
drive = floppy - unit;
repeat:
if (!fd_request) {
- fd_request = elv_next_request(floppy_queue);
+ fd_request = blk_fetch_request(floppy_queue);
if (!fd_request)
goto the_end;
- blkdev_dequeue_request(fd_request);
}
floppy = fd_request->rq_disk->private_data;
goto startio;
queue:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
if ((c = cmd_alloc(h, 1)) == NULL)
goto full;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
spin_unlock_irq(q->queue_lock);
goto startio;
queue_next:
- creq = elv_next_request(q);
+ creq = blk_peek_request(q);
if (!creq)
goto startio;
if ((c = cmd_alloc(h,1)) == NULL)
goto startio;
- blkdev_dequeue_request(creq);
+ blk_start_request(creq);
c->ctlr = h->ctlr;
c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
del_timer(&fd_timeout);
cont = NULL;
clear_bit(0, &fdc_busy);
- if (current_req || elv_next_request(floppy_queue))
+ if (current_req || blk_peek_request(floppy_queue))
do_fd_request(floppy_queue);
spin_unlock_irqrestore(&floppy_lock, flags);
wake_up(&fdc_wait);
struct request *req;
spin_lock_irq(floppy_queue->queue_lock);
- req = elv_next_request(floppy_queue);
- if (req)
- blkdev_dequeue_request(req);
+ req = blk_fetch_request(floppy_queue);
spin_unlock_irq(floppy_queue->queue_lock);
if (!req) {
do_floppy = NULL;
del_timer(&device_timer);
if (!hd_req) {
- hd_req = elv_next_request(hd_queue);
+ hd_req = blk_fetch_request(hd_queue);
if (!hd_req) {
do_hd = NULL;
return;
}
- blkdev_dequeue_request(hd_req);
}
req = hd_req;
while (1) {
if (!host->req) {
- host->req = elv_next_request(q);
- if (host->req)
- blkdev_dequeue_request(host->req);
- else
+ host->req = blk_fetch_request(q);
+ if (!host->req)
break;
}
while (1) {
if (!host->req) {
- host->req = elv_next_request(q);
- if (host->req)
- blkdev_dequeue_request(host->req);
- else
+ host->req = blk_fetch_request(q);
+ if (!host->req)
break;
}
req = host->req;
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_fetch_request(q)) != NULL) {
struct nbd_device *lo;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
return;
while (1) {
if (!pcd_req) {
- pcd_req = elv_next_request(q);
+ pcd_req = blk_fetch_request(q);
if (!pcd_req)
return;
- blkdev_dequeue_request(pcd_req);
}
if (rq_data_dir(pcd_req) == READ) {
spin_lock_irqsave(&pd_lock, saved_flags);
if (!__blk_end_request_cur(pd_req,
res == Ok ? 0 : -EIO)) {
- pd_req = elv_next_request(pd_queue);
+ pd_req = blk_fetch_request(pd_queue);
if (!pd_req)
stop = 1;
- else
- blkdev_dequeue_request(pd_req);
}
spin_unlock_irqrestore(&pd_lock, saved_flags);
if (stop)
{
if (pd_req)
return;
- pd_req = elv_next_request(q);
+ pd_req = blk_fetch_request(q);
if (!pd_req)
return;
- blkdev_dequeue_request(pd_req);
schedule_fsm();
}
return;
repeat:
if (!pf_req) {
- pf_req = elv_next_request(q);
+ pf_req = blk_fetch_request(q);
if (!pf_req)
return;
- blkdev_dequeue_request(pf_req);
}
pf_current = pf_req->rq_disk->private_data;
dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
- while ((req = elv_next_request(q))) {
- blkdev_dequeue_request(req);
-
+ while ((req = blk_fetch_request(q))) {
if (blk_fs_request(req)) {
if (ps3disk_submit_request_sg(dev, req))
break;
static void do_vdc_request(struct request_queue *q)
{
while (1) {
- struct request *req = elv_next_request(q);
+ struct request *req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
if (__send_request(req) < 0)
__blk_end_request_all(req, -EIO);
}
struct request *req;
struct floppy_state *fs;
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
-
+ req = blk_fetch_request(q);
while (req) {
int err = -EIO;
break;
}
done:
- if (!__blk_end_request_cur(req, err)) {
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
- }
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
}
while (fs->state == idle) {
if (!fd_req) {
- fd_req = elv_next_request(swim3_queue);
+ fd_req = blk_fetch_request(swim3_queue);
if (!fd_req)
break;
- blkdev_dequeue_request(fd_req);
}
req = fd_req;
#if 0
while (1) {
DPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_fetch_request(q);
if (!rq)
break;
- blkdev_dequeue_request(rq);
-
crq = rq->special;
assert(crq != NULL);
assert(crq->rq == rq);
queue_one_request:
VPRINTK("get req\n");
- rq = elv_next_request(q);
+ rq = blk_peek_request(q);
if (!rq)
return;
}
crq->rq = rq;
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
if (rq_data_dir(rq) == WRITE) {
writing = 1;
struct ub_lun *lun = q->queuedata;
struct request *rq;
- while ((rq = elv_next_request(q)) != NULL) {
+ while ((rq = blk_peek_request(q)) != NULL) {
if (ub_request_fn_1(lun, rq) != 0) {
blk_stop_queue(q);
break;
int n_elem;
if (atomic_read(&sc->poison)) {
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq));
return 0;
}
if (lun->changed && !blk_pc_request(rq)) {
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq));
return 0;
}
return -1;
memset(cmd, 0, sizeof(struct ub_scsi_cmd));
- blkdev_dequeue_request(rq);
+ blk_start_request(rq);
urq = &lun->urq;
memset(urq, 0, sizeof(struct ub_request));
* back later.
*/
while (num_req_outstanding < VIOMAXREQ) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (req == NULL)
return;
- /* dequeue the current request from the queue */
- blkdev_dequeue_request(req);
/* check that request contains a valid command */
if (!blk_fs_request(req)) {
viodasd_end_request(req, -EIO, blk_rq_sectors(req));
struct request *req;
unsigned int issued = 0;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_peek_request(q)) != NULL) {
vblk = req->rq_disk->private_data;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
blk_stop_queue(q);
break;
}
- blkdev_dequeue_request(req);
+ blk_start_request(req);
issued++;
}
if (xdc_busy)
return;
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
-
+ req = blk_fetch_request(q);
while (req) {
unsigned block = blk_rq_pos(req);
unsigned count = blk_rq_cur_sectors(req);
block, count);
done:
/* wrap up, 0 = success, -errno = fail */
- if (!__blk_end_request_cur(req, res)) {
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
- }
+ if (!__blk_end_request_cur(req, res))
+ req = blk_fetch_request(q);
}
}
queued = 0;
- while ((req = elv_next_request(rq)) != NULL) {
+ while ((req = blk_peek_request(rq)) != NULL) {
info = req->rq_disk->private_data;
if (RING_FULL(&info->ring))
goto wait;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
if (!blk_fs_request(req)) {
__blk_end_request_all(req, -EIO);
{
struct request *req;
- while ((req = elv_next_request(q)) != NULL) {
+ while ((req = blk_peek_request(q)) != NULL) {
if (blk_fs_request(req))
break;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
__blk_end_request_all(req, -EIO);
}
return req;
__blk_end_request_all(ace->req, -EIO);
ace->req = NULL;
}
- while ((req = elv_next_request(ace->queue)) != NULL) {
- blkdev_dequeue_request(req);
+ while ((req = blk_fetch_request(ace->queue)) != NULL)
__blk_end_request_all(req, -EIO);
- }
/* Drop back to IDLE state and notify waiters */
ace->fsm_state = ACE_FSM_STATE_IDLE;
ace->fsm_state = ACE_FSM_STATE_IDLE;
break;
}
- blkdev_dequeue_request(req);
+ blk_start_request(req);
/* Okay, it's a data request, set it up for transfer */
dev_dbg(ace->dev,
{
struct request *req;
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
-
+ req = blk_fetch_request(q);
while (req) {
unsigned long start = blk_rq_pos(req) << 9;
unsigned long len = blk_rq_cur_bytes(req);
len -= size;
}
done:
- if (!__blk_end_request_cur(req, err)) {
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
- }
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
{
struct request *req;
- while ((req = elv_next_request(rq)) != NULL) {
- blkdev_dequeue_request(req);
-
+ while ((req = blk_fetch_request(rq)) != NULL) {
if (!blk_fs_request(req)) {
printk(KERN_DEBUG "GDROM: Non-fs request ignored\n");
__blk_end_request_all(req, -EIO);
{
struct request *req;
- while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
- blkdev_dequeue_request(req);
-
+ while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) {
if (!blk_fs_request(req))
__blk_end_request_all(req, -EIO);
else if (send_request(req) < 0) {
blk_requeue_request(failed_rq->q, failed_rq);
drive->hwif->rq = NULL;
if (ide_queue_sense_rq(drive, pc)) {
- blkdev_dequeue_request(failed_rq);
+ blk_start_request(failed_rq);
ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq));
}
}
* we know that the queue isn't empty, but this can happen
* if the q->prep_rq_fn() decides to kill a request
*/
- if (!rq) {
- rq = elv_next_request(drive->queue);
- if (rq)
- blkdev_dequeue_request(rq);
- }
+ if (!rq)
+ rq = blk_fetch_request(drive->queue);
spin_unlock_irq(q->queue_lock);
spin_lock_irq(&hwif->lock);
/*
* Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as
- * blk_stop_queue() doesn't prevent the elv_next_request()
+ * blk_stop_queue() doesn't prevent the blk_fetch_request()
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
return 0;
}
- dev_dbg(&card->dev, "elv_next\n");
- msb->block_req = elv_next_request(msb->queue);
+ dev_dbg(&card->dev, "blk_fetch\n");
+ msb->block_req = blk_fetch_request(msb->queue);
if (!msb->block_req) {
dev_dbg(&card->dev, "issue end\n");
return -EAGAIN;
}
- blkdev_dequeue_request(msb->block_req);
dev_dbg(&card->dev, "trying again\n");
chunk = 1;
return;
if (msb->eject) {
- while ((req = elv_next_request(q)) != NULL) {
- blkdev_dequeue_request(req);
+ while ((req = blk_fetch_request(q)) != NULL)
__blk_end_request_all(req, -ENODEV);
- }
return;
}
struct request *req;
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req)
break;
if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) {
if (!i2o_block_transfer(req)) {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
continue;
} else
osm_info("transfer error\n");
break;
}
} else {
- blkdev_dequeue_request(req);
+ blk_start_request(req);
__blk_end_request_all(req, -EIO);
}
}
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
- if (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
- }
+ if (!blk_queue_plugged(q))
+ req = blk_fetch_request(q);
mq->req = req;
spin_unlock_irq(q->queue_lock);
if (!mq) {
printk(KERN_ERR "MMC: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL) {
- blkdev_dequeue_request(req);
+ while ((req = blk_fetch_request(q)) != NULL)
__blk_end_request_all(req, -EIO);
- }
return;
}
struct mtd_blktrans_dev *dev;
int res;
- if (!req) {
- req = elv_next_request(rq);
- if (req)
- blkdev_dequeue_request(req);
- }
- if (!req) {
+ if (!req && !(req = blk_fetch_request(rq))) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irq(rq->queue_lock);
schedule();
if (basedev->state < DASD_STATE_READY)
return;
/* Now we try to fetch requests from the request queue */
- while (!blk_queue_plugged(queue) &&
- elv_next_request(queue)) {
-
- req = elv_next_request(queue);
-
+ while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) {
if (basedev->features & DASD_FEATURE_READONLY &&
rq_data_dir(req) == WRITE) {
DBF_DEV_EVENT(DBF_ERR, basedev,
"Rejecting write request %p",
req);
- blkdev_dequeue_request(req);
+ blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
"CCW creation failed (rc=%ld) "
"on request %p",
PTR_ERR(cqr), req);
- blkdev_dequeue_request(req);
+ blk_start_request(req);
__blk_end_request_all(req, -EIO);
continue;
}
*/
cqr->callback_data = (void *) req;
cqr->status = DASD_CQR_FILLED;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
list_add_tail(&cqr->blocklist, &block->ccw_queue);
dasd_profile_start(block, cqr, req);
}
return;
spin_lock_irq(&block->request_queue_lock);
- while ((req = elv_next_request(block->request_queue))) {
- blkdev_dequeue_request(req);
+ while ((req = blk_fetch_request(block->request_queue)))
__blk_end_request_all(req, -EIO);
- }
spin_unlock_irq(&block->request_queue_lock);
}
device->blk_data.block_position = -1;
device->discipline->free_bread(ccw_req);
if (!list_empty(&device->req_queue) ||
- elv_next_request(device->blk_data.request_queue))
+ blk_peek_request(device->blk_data.request_queue))
tapeblock_trigger_requeue(device);
}
spin_lock_irq(&device->blk_data.request_queue_lock);
while (
!blk_queue_plugged(queue) &&
- elv_next_request(queue) &&
+ (req = blk_fetch_request(queue)) &&
nr_queued < TAPEBLOCK_MIN_REQUEUE
) {
- req = elv_next_request(queue);
if (rq_data_dir(req) == WRITE) {
DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
- blkdev_dequeue_request(req);
spin_unlock_irq(&device->blk_data.request_queue_lock);
blk_end_request_all(req, -EIO);
spin_lock_irq(&device->blk_data.request_queue_lock);
continue;
}
- blkdev_dequeue_request(req);
nr_queued++;
spin_unlock_irq(&device->blk_data.request_queue_lock);
rc = tapeblock_start_request(device, req);
{
struct request *req;
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
-
+ req = blk_fetch_request(q);
while (req) {
struct jsfd_part *jdp = req->rq_disk->private_data;
unsigned long offset = blk_rq_pos(req) << 9;
jsfd_read(req->buffer, jdp->dbase + offset, len);
err = 0;
end:
- if (!__blk_end_request_cur(req, err)) {
- req = elv_next_request(q);
- if (req)
- blkdev_dequeue_request(req);
- }
+ if (!__blk_end_request_cur(req, err))
+ req = blk_fetch_request(q);
}
}
break;
case BLKPREP_DEFER:
/*
- * If we defer, the elv_next_request() returns NULL, but the
+ * If we defer, the blk_peek_request() returns NULL, but the
* queue must be restarted, so we plug here if no returning
* command will automatically do that.
*/
struct scsi_target *starget = scsi_target(sdev);
struct Scsi_Host *shost = sdev->host;
- blkdev_dequeue_request(req);
+ blk_start_request(req);
if (unlikely(cmd == NULL)) {
printk(KERN_CRIT "impossible request in %s.\n",
if (!sdev) {
printk("scsi: killing requests for dead queue\n");
- while ((req = elv_next_request(q)) != NULL)
+ while ((req = blk_peek_request(q)) != NULL)
scsi_kill_request(req, q);
return;
}
* that the request is fully prepared even if we cannot
* accept it.
*/
- req = elv_next_request(q);
+ req = blk_peek_request(q);
if (!req || !scsi_dev_queue_ready(q, sdev))
break;
* Remove the request from the request list.
*/
if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
- blkdev_dequeue_request(req);
+ blk_start_request(req);
sdev->device_busy++;
spin_unlock(q->queue_lock);
int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *);
while (!blk_queue_plugged(q)) {
- req = elv_next_request(q);
+ req = blk_fetch_request(q);
if (!req)
break;
- blkdev_dequeue_request(req);
-
spin_unlock_irq(q->queue_lock);
handler = to_sas_internal(shost->transportt)->f->smp_handler;
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
-extern void blkdev_dequeue_request(struct request *req);
-
/*
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
return blk_rq_cur_bytes(rq) >> 9;
}
+/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
+
/*
* Request completion related functions.
*
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
-extern void elv_dequeue_request(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
-extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);