summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-28 04:06:11 (GMT)
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 06:14:51 (GMT)
commite091eb67af957bac4e4f7410c5d1aa263ee483a4 (patch)
tree2be45bf08417bb1e2159f0a7519e9c79ac7d5a8f /drivers
parentf81f2f7c9fee307e371f37424577d46f9eaf8692 (diff)
downloadlinux-fsl-qoriq-e091eb67af957bac4e4f7410c5d1aa263ee483a4.tar.xz
hd: clean up request completion paths
hd read/write_intr() functions manually manipulate request to incrementally complete it, which block layer already supports. Simply use block layer completion routines instead of manual partial completion. While at it, clear unnecessary elv_next_request() check at the tail of read_intr(). This also makes read and write_intr() more consistent. [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/hd.c36
1 files changed, 12 insertions, 24 deletions
diff --git a/drivers/block/hd.c b/drivers/block/hd.c
index 5cb300b..75b9ca9 100644
--- a/drivers/block/hd.c
+++ b/drivers/block/hd.c
@@ -452,32 +452,25 @@ static void read_intr(void)
bad_rw_intr();
hd_request();
return;
+
ok_to_read:
req = CURRENT;
insw(HD_DATA, req->buffer, 256);
- req->sector++;
- req->buffer += 512;
- req->errors = 0;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
#ifdef DEBUG
printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n",
- req->rq_disk->disk_name, req->sector, req->nr_sectors,
+ req->rq_disk->disk_name, req->sector + 1, req->nr_sectors - 1,
req->buffer+512);
#endif
- if (req->current_nr_sectors <= 0)
- __blk_end_request_cur(req, 0);
- if (i > 0) {
+ if (__blk_end_request(req, 0, 512)) {
SET_HANDLER(&read_intr);
return;
}
+
(void) inb_p(HD_STATUS);
#if (HD_DELAY > 0)
last_req = read_timer();
#endif
- if (elv_next_request(QUEUE))
- hd_request();
- return;
+ hd_request();
}
static void write_intr(void)
@@ -499,23 +492,18 @@ static void write_intr(void)
bad_rw_intr();
hd_request();
return;
+
ok_to_write:
- req->sector++;
- i = --req->nr_sectors;
- --req->current_nr_sectors;
- req->buffer += 512;
- if (!i || (req->bio && req->current_nr_sectors <= 0))
- __blk_end_request_cur(req, 0);
- if (i > 0) {
+ if (__blk_end_request(req, 0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
- } else {
+ return;
+ }
+
#if (HD_DELAY > 0)
- last_req = read_timer();
+ last_req = read_timer();
#endif
- hd_request();
- }
- return;
+ hd_request();
}
static void recal_intr(void)