From 5e36bb6ee8d5ff6c6114b60d2aaa1c70d4275f4e Mon Sep 17 00:00:00 2001 From: Kiyoshi Ueda Date: Mon, 28 Jan 2008 10:34:20 +0100 Subject: [PATCH] blk_end_request: changing ide normal caller (take 4) This patch converts "normal" parts of ide to use blk_end_request interfaces. Related 'uptodate' arguments are converted to 'error'. The conversion of 'uptodate' to 'error' is done only for the internal function, __ide_end_request(). ide_end_request() was not changed since it's exported and used by many ide drivers. With this patch, blkdev_dequeue_request() in __ide_end_request() is moved to blk_end_request, since blk_end_request takes care of dequeueing request like below: if (!list_empty(&rq->queuelist)) blkdev_dequeue_request(rq); In the case of ide, o 'dequeue' variable of __ide_end_request() is 1 only when the request is still linked to the queue (i.e. rq->queuelist is not empty) o 'dequeue' variable of __ide_end_request() is 0 only when the request has already been removed from the queue (i.e. rq->queuelist is empty) So blk_end_request can handle it correctly although ide always run thought the code above. Cc: Bartlomiej Zolnierkiewicz Signed-off-by: Kiyoshi Ueda Signed-off-by: Jun'ichi Nomura Signed-off-by: Jens Axboe --- drivers/ide/ide-cd.c | 6 +++--- drivers/ide/ide-io.c | 25 ++++++++++++------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 44b033ec0ab0..282f1580fca9 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c @@ -655,9 +655,9 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate) BUG(); } else { spin_lock_irqsave(&ide_lock, flags); - end_that_request_chunk(failed, 0, - failed->data_len); - end_that_request_last(failed, 0); + if (__blk_end_request(failed, -EIO, + failed->data_len)) + BUG(); spin_unlock_irqrestore(&ide_lock, flags); } } else diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 6f8f544392a8..e6bb9cf24e3d 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -58,15 +58,19 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, unsigned int nr_bytes, int dequeue) { int ret = 1; + int error = 0; + + if (uptodate <= 0) + error = uptodate ? uptodate : -EIO; /* * if failfast is set on a request, override number of sectors and * complete the whole request right now */ - if (blk_noretry_request(rq) && end_io_error(uptodate)) + if (blk_noretry_request(rq) && error) nr_bytes = rq->hard_nr_sectors << 9; - if (!blk_fs_request(rq) && end_io_error(uptodate) && !rq->errors) + if (!blk_fs_request(rq) && error && !rq->errors) rq->errors = -EIO; /* @@ -78,14 +82,9 @@ static int __ide_end_request(ide_drive_t *drive, struct request *rq, ide_dma_on(drive); } - if (!end_that_request_chunk(rq, uptodate, nr_bytes)) { - add_disk_randomness(rq->rq_disk); - if (dequeue) { - if (!list_empty(&rq->queuelist)) - blkdev_dequeue_request(rq); + if (!__blk_end_request(rq, error, nr_bytes)) { + if (dequeue) HWGROUP(drive)->rq = NULL; - } - end_that_request_last(rq, uptodate); ret = 0; } @@ -290,9 +289,9 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq) drive->blocked = 0; blk_start_queue(drive->queue); } - blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; - end_that_request_last(rq, 1); + if (__blk_end_request(rq, 0, 0)) + BUG(); spin_unlock_irqrestore(&ide_lock, flags); } @@ -387,10 +386,10 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err) } spin_lock_irqsave(&ide_lock, flags); - blkdev_dequeue_request(rq); HWGROUP(drive)->rq = NULL; rq->errors = err; - end_that_request_last(rq, !rq->errors); + if (__blk_end_request(rq, (rq->errors ? -EIO : 0), 0)) + BUG(); spin_unlock_irqrestore(&ide_lock, flags); } -- 2.30.2