aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorNick Piggin <nickpiggin@yahoo.com.au>2005-06-28 23:45:13 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-29 00:20:34 -0400
commit450991bc1026135ee30482a4a806d069915ab2f6 (patch)
tree186359995e27df92fd6539ad0a0657df8e79322e /drivers/block/ll_rw_blk.c
parent69f63c5c34d0b34ee2cbf10c5ff7fcff0404879e (diff)
[PATCH] blk: __make_request efficiency
In the case where the request is not able to be merged by the elevator, don't retake the lock and retry the merge mechanism after allocating a new request. Instead assume that the chance of a merge remains slim, and now that we've done most of the work allocating a request we may as well just go with it. Also be rid of the GFP_ATOMIC allocation: we've got working mempools for the block layer now, so let's save atomic memory for things like networking. Lastly, in get_request_wait, do an initial get_request call before going into the waitqueue. This is reported to help efficiency. Signed-off-by: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Jens Axboe <axboe@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/block/ll_rw_blk.c')
-rw-r--r--drivers/block/ll_rw_blk.c62
1 files changed, 21 insertions, 41 deletions
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 6c98cf042714..67431f280154 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1971,10 +1971,11 @@ out:
1971static struct request *get_request_wait(request_queue_t *q, int rw, 1971static struct request *get_request_wait(request_queue_t *q, int rw,
1972 struct bio *bio) 1972 struct bio *bio)
1973{ 1973{
1974 DEFINE_WAIT(wait);
1975 struct request *rq; 1974 struct request *rq;
1976 1975
1977 do { 1976 rq = get_request(q, rw, bio, GFP_NOIO);
1977 while (!rq) {
1978 DEFINE_WAIT(wait);
1978 struct request_list *rl = &q->rq; 1979 struct request_list *rl = &q->rq;
1979 1980
1980 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 1981 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
@@ -1999,7 +2000,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1999 put_io_context(ioc); 2000 put_io_context(ioc);
2000 } 2001 }
2001 finish_wait(&rl->wait[rw], &wait); 2002 finish_wait(&rl->wait[rw], &wait);
2002 } while (!rq); 2003 }
2003 2004
2004 return rq; 2005 return rq;
2005} 2006}
@@ -2521,7 +2522,7 @@ EXPORT_SYMBOL(blk_attempt_remerge);
2521 2522
2522static int __make_request(request_queue_t *q, struct bio *bio) 2523static int __make_request(request_queue_t *q, struct bio *bio)
2523{ 2524{
2524 struct request *req, *freereq = NULL; 2525 struct request *req;
2525 int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; 2526 int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
2526 unsigned short prio; 2527 unsigned short prio;
2527 sector_t sector; 2528 sector_t sector;
@@ -2549,14 +2550,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2549 goto end_io; 2550 goto end_io;
2550 } 2551 }
2551 2552
2552again:
2553 spin_lock_irq(q->queue_lock); 2553 spin_lock_irq(q->queue_lock);
2554 2554
2555 if (elv_queue_empty(q)) { 2555 if (unlikely(barrier) || elv_queue_empty(q))
2556 blk_plug_device(q);
2557 goto get_rq;
2558 }
2559 if (barrier)
2560 goto get_rq; 2556 goto get_rq;
2561 2557
2562 el_ret = elv_merge(q, &req, bio); 2558 el_ret = elv_merge(q, &req, bio);
@@ -2601,40 +2597,23 @@ again:
2601 elv_merged_request(q, req); 2597 elv_merged_request(q, req);
2602 goto out; 2598 goto out;
2603 2599
2604 /* 2600 /* ELV_NO_MERGE: elevator says don't/can't merge. */
2605 * elevator says don't/can't merge. get new request
2606 */
2607 case ELEVATOR_NO_MERGE:
2608 break;
2609
2610 default: 2601 default:
2611 printk("elevator returned crap (%d)\n", el_ret); 2602 ;
2612 BUG();
2613 } 2603 }
2614 2604
2605get_rq:
2615 /* 2606 /*
2616 * Grab a free request from the freelist - if that is empty, check 2607 * Grab a free request. This is might sleep but can not fail.
2617 * if we are doing read ahead and abort instead of blocking for 2608 */
2618 * a free slot. 2609 spin_unlock_irq(q->queue_lock);
2610 req = get_request_wait(q, rw, bio);
2611 /*
2612 * After dropping the lock and possibly sleeping here, our request
2613 * may now be mergeable after it had proven unmergeable (above).
2614 * We don't worry about that case for efficiency. It won't happen
2615 * often, and the elevators are able to handle it.
2619 */ 2616 */
2620get_rq:
2621 if (freereq) {
2622 req = freereq;
2623 freereq = NULL;
2624 } else {
2625 spin_unlock_irq(q->queue_lock);
2626 if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
2627 /*
2628 * READA bit set
2629 */
2630 err = -EWOULDBLOCK;
2631 if (bio_rw_ahead(bio))
2632 goto end_io;
2633
2634 freereq = get_request_wait(q, rw, bio);
2635 }
2636 goto again;
2637 }
2638 2617
2639 req->flags |= REQ_CMD; 2618 req->flags |= REQ_CMD;
2640 2619
@@ -2663,10 +2642,11 @@ get_rq:
2663 req->rq_disk = bio->bi_bdev->bd_disk; 2642 req->rq_disk = bio->bi_bdev->bd_disk;
2664 req->start_time = jiffies; 2643 req->start_time = jiffies;
2665 2644
2645 spin_lock_irq(q->queue_lock);
2646 if (elv_queue_empty(q))
2647 blk_plug_device(q);
2666 add_request(q, req); 2648 add_request(q, req);
2667out: 2649out:
2668 if (freereq)
2669 __blk_put_request(q, freereq);
2670 if (sync) 2650 if (sync)
2671 __generic_unplug_device(q); 2651 __generic_unplug_device(q);
2672 2652