aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ide
diff options
context:
space:
mode:
authorJens Axboe <axboe@suse.de>2006-01-09 10:03:35 -0500
committerJens Axboe <axboe@suse.de>2006-01-09 10:03:35 -0500
commit8672d57138b34447719cd7749f3d21070e1175a1 (patch)
treedd23c85cf45c11f5d8f13d748002cddcb5551ab4 /drivers/ide
parent1aea6434eebd25e532d2e5ddabf2733af4e1ff0b (diff)
[IDE] Use the block layer deferred softirq request completion
This patch makes IDE use the new blk_complete_request() interface. There's still room for improvement, as __ide_end_request() really could drop the lock after getting HWGROUP->rq (why does it need to hold it in the first place? If ->rq access isn't serialized, we are screwed anyways). Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'drivers/ide')
-rw-r--r--drivers/ide/ide-io.c42
-rw-r--r--drivers/ide/ide-probe.c2
2 files changed, 37 insertions, 7 deletions
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index b5dc6df8e67d..dea2d4dcc698 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -55,9 +55,22 @@
55#include <asm/io.h> 55#include <asm/io.h>
56#include <asm/bitops.h> 56#include <asm/bitops.h>
57 57
58void ide_softirq_done(struct request *rq)
59{
60 request_queue_t *q = rq->q;
61
62 add_disk_randomness(rq->rq_disk);
63 end_that_request_chunk(rq, rq->errors, rq->data_len);
64
65 spin_lock_irq(q->queue_lock);
66 end_that_request_last(rq, rq->errors);
67 spin_unlock_irq(q->queue_lock);
68}
69
58int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, 70int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
59 int nr_sectors) 71 int nr_sectors)
60{ 72{
73 unsigned int nbytes;
61 int ret = 1; 74 int ret = 1;
62 75
63 BUG_ON(!(rq->flags & REQ_STARTED)); 76 BUG_ON(!(rq->flags & REQ_STARTED));
@@ -81,17 +94,28 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
81 HWGROUP(drive)->hwif->ide_dma_on(drive); 94 HWGROUP(drive)->hwif->ide_dma_on(drive);
82 } 95 }
83 96
84 if (!end_that_request_first(rq, uptodate, nr_sectors)) { 97 /*
85 add_disk_randomness(rq->rq_disk); 98 * For partial completions (or non fs/pc requests), use the regular
86 99 * direct completion path.
87 if (blk_rq_tagged(rq)) 100 */
88 blk_queue_end_tag(drive->queue, rq); 101 nbytes = nr_sectors << 9;
89 102 if (rq_all_done(rq, nbytes)) {
103 rq->errors = uptodate;
104 rq->data_len = nbytes;
90 blkdev_dequeue_request(rq); 105 blkdev_dequeue_request(rq);
91 HWGROUP(drive)->rq = NULL; 106 HWGROUP(drive)->rq = NULL;
92 end_that_request_last(rq, uptodate); 107 blk_complete_request(rq);
93 ret = 0; 108 ret = 0;
109 } else {
110 if (!end_that_request_first(rq, uptodate, nr_sectors)) {
111 add_disk_randomness(rq->rq_disk);
112 blkdev_dequeue_request(rq);
113 HWGROUP(drive)->rq = NULL;
114 end_that_request_last(rq, uptodate);
115 ret = 0;
116 }
94 } 117 }
118
95 return ret; 119 return ret;
96} 120}
97EXPORT_SYMBOL(__ide_end_request); 121EXPORT_SYMBOL(__ide_end_request);
@@ -113,6 +137,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
113 unsigned long flags; 137 unsigned long flags;
114 int ret = 1; 138 int ret = 1;
115 139
140 /*
141 * room for locking improvements here, the calls below don't
142 * need the queue lock held at all
143 */
116 spin_lock_irqsave(&ide_lock, flags); 144 spin_lock_irqsave(&ide_lock, flags);
117 rq = HWGROUP(drive)->rq; 145 rq = HWGROUP(drive)->rq;
118 146
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 02167a5b751d..1ddaa71a8f45 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1011,6 +1011,8 @@ static int ide_init_queue(ide_drive_t *drive)
1011 blk_queue_max_hw_segments(q, max_sg_entries); 1011 blk_queue_max_hw_segments(q, max_sg_entries);
1012 blk_queue_max_phys_segments(q, max_sg_entries); 1012 blk_queue_max_phys_segments(q, max_sg_entries);
1013 1013
1014 blk_queue_softirq_done(q, ide_softirq_done);
1015
1014 /* assign drive queue */ 1016 /* assign drive queue */
1015 drive->queue = q; 1017 drive->queue = q;
1016 1018