aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-28 00:06:09 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 02:14:50 -0400
commit4d6c84d91d1a539ebc47d1a36a35e9390ba11fdc (patch)
tree62917c8a59bef037231ba0869a688651aba099dc /arch/um
parent044208506d35bd62396c4673176e2c12393905b8 (diff)
ubd: cleanup completion path
ubd had its own block request partial completion mechanism, which is unnecessary as block layer already does it. Kill ubd_end_request() and ubd_finish() and replace them with direct call to blk_end_request(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jeff Dike <jdike@linux.intel.com> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/um')
-rw-r--r--arch/um/drivers/ubd_kern.c23
1 files changed, 1 insertions, 22 deletions
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c
index f934225fd8ef..36ca9fa89d05 100644
--- a/arch/um/drivers/ubd_kern.c
+++ b/arch/um/drivers/ubd_kern.c
@@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q);
451 451
452/* Only changed by ubd_init, which is an initcall. */ 452/* Only changed by ubd_init, which is an initcall. */
453static int thread_fd = -1; 453static int thread_fd = -1;
454
455static void ubd_end_request(struct request *req, int bytes, int error)
456{
457 blk_end_request(req, error, bytes);
458}
459
460/* Callable only from interrupt context - otherwise you need to do
461 * spin_lock_irq()/spin_lock_irqsave() */
462static inline void ubd_finish(struct request *req, int bytes)
463{
464 if(bytes < 0){
465 ubd_end_request(req, 0, -EIO);
466 return;
467 }
468 ubd_end_request(req, bytes, 0);
469}
470
471static LIST_HEAD(restart); 454static LIST_HEAD(restart);
472 455
473/* XXX - move this inside ubd_intr. */ 456/* XXX - move this inside ubd_intr. */
@@ -475,7 +458,6 @@ static LIST_HEAD(restart);
475static void ubd_handler(void) 458static void ubd_handler(void)
476{ 459{
477 struct io_thread_req *req; 460 struct io_thread_req *req;
478 struct request *rq;
479 struct ubd *ubd; 461 struct ubd *ubd;
480 struct list_head *list, *next_ele; 462 struct list_head *list, *next_ele;
481 unsigned long flags; 463 unsigned long flags;
@@ -492,10 +474,7 @@ static void ubd_handler(void)
492 return; 474 return;
493 } 475 }
494 476
495 rq = req->req; 477 blk_end_request(req->req, 0, req->length);
496 rq->nr_sectors -= req->length >> 9;
497 if(rq->nr_sectors == 0)
498 ubd_finish(rq, rq->hard_nr_sectors << 9);
499 kfree(req); 478 kfree(req);
500 } 479 }
501 reactivate_fd(thread_fd, UBD_IRQ); 480 reactivate_fd(thread_fd, UBD_IRQ);