From 4d6c84d91d1a539ebc47d1a36a35e9390ba11fdc Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 28 Apr 2009 13:06:09 +0900 Subject: ubd: cleanup completion path ubd had its own block request partial completion mechanism, which is unnecessary as block layer already does it. Kill ubd_end_request() and ubd_finish() and replace them with direct call to blk_end_request(). [ Impact: cleanup ] Signed-off-by: Tejun Heo Cc: Jeff Dike Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) (limited to 'arch/um') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index f934225fd8ef..36ca9fa89d05 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -451,23 +451,6 @@ static void do_ubd_request(struct request_queue * q); /* Only changed by ubd_init, which is an initcall. */ static int thread_fd = -1; - -static void ubd_end_request(struct request *req, int bytes, int error) -{ - blk_end_request(req, error, bytes); -} - -/* Callable only from interrupt context - otherwise you need to do - * spin_lock_irq()/spin_lock_irqsave() */ -static inline void ubd_finish(struct request *req, int bytes) -{ - if(bytes < 0){ - ubd_end_request(req, 0, -EIO); - return; - } - ubd_end_request(req, bytes, 0); -} - static LIST_HEAD(restart); /* XXX - move this inside ubd_intr. */ @@ -475,7 +458,6 @@ static LIST_HEAD(restart); static void ubd_handler(void) { struct io_thread_req *req; - struct request *rq; struct ubd *ubd; struct list_head *list, *next_ele; unsigned long flags; @@ -492,10 +474,7 @@ static void ubd_handler(void) return; } - rq = req->req; - rq->nr_sectors -= req->length >> 9; - if(rq->nr_sectors == 0) - ubd_finish(rq, rq->hard_nr_sectors << 9); + blk_end_request(req->req, 0, req->length); kfree(req); } reactivate_fd(thread_fd, UBD_IRQ); -- cgit v1.2.2 From f81f2f7c9fee307e371f37424577d46f9eaf8692 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 28 Apr 2009 13:06:10 +0900 Subject: ubd: drop unnecessary rq->sector manipulation ubd curiously updates rq->sector while issuing the request in multiple pieces. Don't do it and simply use local copy of sector. [ Impact: cleanup ] Signed-off-by: Tejun Heo Cc: Jeff Dike Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/um') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 36ca9fa89d05..433012764a37 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1222,7 +1222,8 @@ static void do_ubd_request(struct request_queue *q) { struct io_thread_req *io_req; struct request *req; - int n, last_sectors; + sector_t sector; + int n; while(1){ struct ubd *dev = q->queuedata; @@ -1238,11 +1239,10 @@ static void do_ubd_request(struct request_queue *q) } req = dev->request; - last_sectors = 0; + sector = req->sector; while(dev->start_sg < dev->end_sg){ struct scatterlist *sg = &dev->sg[dev->start_sg]; - req->sector += last_sectors; io_req = kmalloc(sizeof(struct io_thread_req), GFP_ATOMIC); if(io_req == NULL){ @@ -1251,10 +1251,10 @@ static void do_ubd_request(struct request_queue *q) return; } prepare_request(req, io_req, - (unsigned long long) req->sector << 9, + (unsigned long long)sector << 9, sg->offset, sg->length, sg_page(sg)); - last_sectors = sg->length >> 9; + sector += sg->length >> 9; n = os_write_file(thread_fd, &io_req, sizeof(struct io_thread_req *)); if(n != sizeof(struct io_thread_req *)){ -- cgit v1.2.2 From 83096ebf1263b2c1ee5e653ba37d993d02e3eb7b Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Thu, 7 May 2009 22:24:39 +0900 Subject: block: convert to pos and nr_sectors accessors With recent cleanups, there is no place where low level driver directly manipulates request fields. This means that the 'hard' request fields always equal the !hard fields. Convert all rq->sectors, nr_sectors and current_nr_sectors references to accessors. While at it, drop superflous blk_rq_pos() < 0 test in swim.c. [ Impact: use pos and nr_sectors accessors ] Signed-off-by: Tejun Heo Acked-by: Geert Uytterhoeven Tested-by: Grant Likely Acked-by: Grant Likely Tested-by: Adrian McMenamin Acked-by: Adrian McMenamin Acked-by: Mike Miller Cc: James Bottomley Cc: Bartlomiej Zolnierkiewicz Cc: Borislav Petkov Cc: Sergei Shtylyov Cc: Eric Moore Cc: Alan Stern Cc: FUJITA Tomonori Cc: Pete Zaitcev Cc: Stephen Rothwell Cc: Paul Clements Cc: Tim Waugh Cc: Jeff Garzik Cc: Jeremy Fitzhardinge Cc: Alex Dubov Cc: David Woodhouse Cc: Martin Schwidefsky Cc: Dario Ballabio Cc: David S. Miller Cc: Rusty Russell Cc: unsik Kim Cc: Laurent Vivier Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/um') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 433012764a37..402ba8f70fc9 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1239,7 +1239,7 @@ static void do_ubd_request(struct request_queue *q) } req = dev->request; - sector = req->sector; + sector = blk_rq_pos(req); while(dev->start_sg < dev->end_sg){ struct scatterlist *sg = &dev->sg[dev->start_sg]; -- cgit v1.2.2 From 9934c8c04561413609d2bc38c6b9f268cba774a4 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Fri, 8 May 2009 11:54:16 +0900 Subject: block: implement and enforce request peek/start/fetch Till now block layer allowed two separate modes of request execution. A request is always acquired from the request queue via elv_next_request(). After that, drivers are free to either dequeue it or process it without dequeueing. Dequeue allows elv_next_request() to return the next request so that multiple requests can be in flight. Executing requests without dequeueing has its merits mostly in allowing drivers for simpler devices which can't do sg to deal with segments only without considering request boundary. However, the benefit this brings is dubious and declining while the cost of the API ambiguity is increasing. Segment based drivers are usually for very old or limited devices and as converting to dequeueing model isn't difficult, it doesn't justify the API overhead it puts on block layer and its more modern users. Previous patches converted all block low level drivers to dequeueing model. This patch completes the API transition by... * renaming elv_next_request() to blk_peek_request() * renaming blkdev_dequeue_request() to blk_start_request() * adding blk_fetch_request() which is combination of peek and start * disallowing completion of queued (not started) requests * applying new API to all LLDs Renamings are for consistency and to break out of tree code so that it's apparent that out of tree drivers need updating. [ Impact: block request issue API cleanup, no functional change ] Signed-off-by: Tejun Heo Cc: Rusty Russell Cc: James Bottomley Cc: Mike Miller Cc: unsik Kim Cc: Paul Clements Cc: Tim Waugh Cc: Geert Uytterhoeven Cc: David S. Miller Cc: Laurent Vivier Cc: Jeff Garzik Cc: Jeremy Fitzhardinge Cc: Grant Likely Cc: Adrian McMenamin Cc: Stephen Rothwell Cc: Bartlomiej Zolnierkiewicz Cc: Borislav Petkov Cc: Sergei Shtylyov Cc: Alex Dubov Cc: Pierre Ossman Cc: David Woodhouse Cc: Markus Lidel Cc: Stefan Weinhuber Cc: Martin Schwidefsky Cc: Pete Zaitcev Cc: FUJITA Tomonori Signed-off-by: Jens Axboe --- arch/um/drivers/ubd_kern.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/um') diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 402ba8f70fc9..aa9e926e13d7 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c @@ -1228,12 +1228,11 @@ static void do_ubd_request(struct request_queue *q) while(1){ struct ubd *dev = q->queuedata; if(dev->end_sg == 0){ - struct request *req = elv_next_request(q); + struct request *req = blk_fetch_request(q); if(req == NULL) return; dev->request = req; - blkdev_dequeue_request(req); dev->start_sg = 0; dev->end_sg = blk_rq_map_sg(q, req, dev->sg); } -- cgit v1.2.2