aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/plat-omap/mailbox.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-05-07 22:54:15 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-11 03:52:17 -0400
commit296b2f6ae654581adc27f0d6f0af454c7f3d06ee (patch)
tree8fab2b91741336d41e559a839b547d7ac3090524 /arch/arm/plat-omap/mailbox.c
parentfb3ac7f6b811eac8e0aafa3df1c16ed872e898a8 (diff)
block: convert to dequeueing model (easy ones)
plat-omap/mailbox, floppy, viocd, mspro_block, i2o_block and mmc/card/queue are already pretty close to dequeueing model and can be converted with simple changes. Convert them. While at it, * xen-blkfront: !fs check moved downwards to share dequeue call with normal path. * mspro_block: __blk_end_request(..., blk_rq_cur_byte()) converted to __blk_end_request_cur() * mmc/card/queue: loop of __blk_end_request() converted to __blk_end_request_all() [ Impact: dequeue in-flight request ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Alex Dubov <oakad@yahoo.com> Cc: Markus Lidel <Markus.Lidel@shadowconnect.com> Cc: Pierre Ossman <drzeus@drzeus.cx> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/arm/plat-omap/mailbox.c')
-rw-r--r--arch/arm/plat-omap/mailbox.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 538ba7541d3f..7a1f5c25fd17 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -198,6 +198,8 @@ static void mbox_tx_work(struct work_struct *work)
198 198
199 spin_lock(q->queue_lock); 199 spin_lock(q->queue_lock);
200 rq = elv_next_request(q); 200 rq = elv_next_request(q);
201 if (rq)
202 blkdev_dequeue_request(rq);
201 spin_unlock(q->queue_lock); 203 spin_unlock(q->queue_lock);
202 204
203 if (!rq) 205 if (!rq)
@@ -208,6 +210,9 @@ static void mbox_tx_work(struct work_struct *work)
208 ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg); 210 ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
209 if (ret) { 211 if (ret) {
210 enable_mbox_irq(mbox, IRQ_TX); 212 enable_mbox_irq(mbox, IRQ_TX);
213 spin_lock(q->queue_lock);
214 blk_requeue_request(q, rq);
215 spin_unlock(q->queue_lock);
211 return; 216 return;
212 } 217 }
213 218
@@ -238,6 +243,8 @@ static void mbox_rx_work(struct work_struct *work)
238 while (1) { 243 while (1) {
239 spin_lock_irqsave(q->queue_lock, flags); 244 spin_lock_irqsave(q->queue_lock, flags);
240 rq = elv_next_request(q); 245 rq = elv_next_request(q);
246 if (rq)
247 blkdev_dequeue_request(rq);
241 spin_unlock_irqrestore(q->queue_lock, flags); 248 spin_unlock_irqrestore(q->queue_lock, flags);
242 if (!rq) 249 if (!rq)
243 break; 250 break;
@@ -345,6 +352,8 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
345 while (1) { 352 while (1) {
346 spin_lock_irqsave(q->queue_lock, flags); 353 spin_lock_irqsave(q->queue_lock, flags);
347 rq = elv_next_request(q); 354 rq = elv_next_request(q);
355 if (rq)
356 blkdev_dequeue_request(rq);
348 spin_unlock_irqrestore(q->queue_lock, flags); 357 spin_unlock_irqrestore(q->queue_lock, flags);
349 358
350 if (!rq) 359 if (!rq)