aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 23:40:59 -0400
committerJens Axboe <axboe@kernel.dk>2012-06-25 05:53:52 -0400
commit5b788ce3e2acac9bf109743b1281d77347cf2101 (patch)
tree907477e42d27bec9a2060fcc709402b7636390c9 /block
parent8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (diff)
block: prepare for multiple request_lists
Request allocation is about to be made per-blkg meaning that there'll be multiple request lists. * Make queue full state per request_list. blk_*queue_full() functions are renamed to blk_*rl_full() and takes @rl instead of @q. * Rename blk_init_free_list() to blk_init_rl() and make it take @rl instead of @q. Also add @gfp_mask parameter. * Add blk_exit_rl() instead of destroying rl directly from blk_release_queue(). * Add request_list->q and make request alloc/free functions - blk_free_request(), [__]freed_request(), __get_request() - take @rl instead of @q. This patch doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-core.c56
-rw-r--r--block/blk-sysfs.c12
-rw-r--r--block/blk.h3
3 files changed, 39 insertions, 32 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index a2648153691f..f392a2edf462 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -517,13 +517,13 @@ void blk_cleanup_queue(struct request_queue *q)
517} 517}
518EXPORT_SYMBOL(blk_cleanup_queue); 518EXPORT_SYMBOL(blk_cleanup_queue);
519 519
520static int blk_init_free_list(struct request_queue *q) 520int blk_init_rl(struct request_list *rl, struct request_queue *q,
521 gfp_t gfp_mask)
521{ 522{
522 struct request_list *rl = &q->rq;
523
524 if (unlikely(rl->rq_pool)) 523 if (unlikely(rl->rq_pool))
525 return 0; 524 return 0;
526 525
526 rl->q = q;
527 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 527 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
528 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 528 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
529 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 529 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
@@ -531,13 +531,19 @@ static int blk_init_free_list(struct request_queue *q)
531 531
532 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, 532 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
533 mempool_free_slab, request_cachep, 533 mempool_free_slab, request_cachep,
534 GFP_KERNEL, q->node); 534 gfp_mask, q->node);
535 if (!rl->rq_pool) 535 if (!rl->rq_pool)
536 return -ENOMEM; 536 return -ENOMEM;
537 537
538 return 0; 538 return 0;
539} 539}
540 540
541void blk_exit_rl(struct request_list *rl)
542{
543 if (rl->rq_pool)
544 mempool_destroy(rl->rq_pool);
545}
546
541struct request_queue *blk_alloc_queue(gfp_t gfp_mask) 547struct request_queue *blk_alloc_queue(gfp_t gfp_mask)
542{ 548{
543 return blk_alloc_queue_node(gfp_mask, -1); 549 return blk_alloc_queue_node(gfp_mask, -1);
@@ -679,7 +685,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
679 if (!q) 685 if (!q)
680 return NULL; 686 return NULL;
681 687
682 if (blk_init_free_list(q)) 688 if (blk_init_rl(&q->rq, q, GFP_KERNEL))
683 return NULL; 689 return NULL;
684 690
685 q->request_fn = rfn; 691 q->request_fn = rfn;
@@ -721,15 +727,15 @@ bool blk_get_queue(struct request_queue *q)
721} 727}
722EXPORT_SYMBOL(blk_get_queue); 728EXPORT_SYMBOL(blk_get_queue);
723 729
724static inline void blk_free_request(struct request_queue *q, struct request *rq) 730static inline void blk_free_request(struct request_list *rl, struct request *rq)
725{ 731{
726 if (rq->cmd_flags & REQ_ELVPRIV) { 732 if (rq->cmd_flags & REQ_ELVPRIV) {
727 elv_put_request(q, rq); 733 elv_put_request(rl->q, rq);
728 if (rq->elv.icq) 734 if (rq->elv.icq)
729 put_io_context(rq->elv.icq->ioc); 735 put_io_context(rq->elv.icq->ioc);
730 } 736 }
731 737
732 mempool_free(rq, q->rq.rq_pool); 738 mempool_free(rq, rl->rq_pool);
733} 739}
734 740
735/* 741/*
@@ -766,9 +772,9 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc)
766 ioc->last_waited = jiffies; 772 ioc->last_waited = jiffies;
767} 773}
768 774
769static void __freed_request(struct request_queue *q, int sync) 775static void __freed_request(struct request_list *rl, int sync)
770{ 776{
771 struct request_list *rl = &q->rq; 777 struct request_queue *q = rl->q;
772 778
773 if (rl->count[sync] < queue_congestion_off_threshold(q)) 779 if (rl->count[sync] < queue_congestion_off_threshold(q))
774 blk_clear_queue_congested(q, sync); 780 blk_clear_queue_congested(q, sync);
@@ -777,7 +783,7 @@ static void __freed_request(struct request_queue *q, int sync)
777 if (waitqueue_active(&rl->wait[sync])) 783 if (waitqueue_active(&rl->wait[sync]))
778 wake_up(&rl->wait[sync]); 784 wake_up(&rl->wait[sync]);
779 785
780 blk_clear_queue_full(q, sync); 786 blk_clear_rl_full(rl, sync);
781 } 787 }
782} 788}
783 789
@@ -785,9 +791,9 @@ static void __freed_request(struct request_queue *q, int sync)
785 * A request has just been released. Account for it, update the full and 791 * A request has just been released. Account for it, update the full and
786 * congestion status, wake up any waiters. Called under q->queue_lock. 792 * congestion status, wake up any waiters. Called under q->queue_lock.
787 */ 793 */
788static void freed_request(struct request_queue *q, unsigned int flags) 794static void freed_request(struct request_list *rl, unsigned int flags)
789{ 795{
790 struct request_list *rl = &q->rq; 796 struct request_queue *q = rl->q;
791 int sync = rw_is_sync(flags); 797 int sync = rw_is_sync(flags);
792 798
793 q->nr_rqs[sync]--; 799 q->nr_rqs[sync]--;
@@ -795,10 +801,10 @@ static void freed_request(struct request_queue *q, unsigned int flags)
795 if (flags & REQ_ELVPRIV) 801 if (flags & REQ_ELVPRIV)
796 q->nr_rqs_elvpriv--; 802 q->nr_rqs_elvpriv--;
797 803
798 __freed_request(q, sync); 804 __freed_request(rl, sync);
799 805
800 if (unlikely(rl->starved[sync ^ 1])) 806 if (unlikely(rl->starved[sync ^ 1]))
801 __freed_request(q, sync ^ 1); 807 __freed_request(rl, sync ^ 1);
802} 808}
803 809
804/* 810/*
@@ -838,7 +844,7 @@ static struct io_context *rq_ioc(struct bio *bio)
838 844
839/** 845/**
840 * __get_request - get a free request 846 * __get_request - get a free request
841 * @q: request_queue to allocate request from 847 * @rl: request list to allocate from
842 * @rw_flags: RW and SYNC flags 848 * @rw_flags: RW and SYNC flags
843 * @bio: bio to allocate request for (can be %NULL) 849 * @bio: bio to allocate request for (can be %NULL)
844 * @gfp_mask: allocation mask 850 * @gfp_mask: allocation mask
@@ -850,11 +856,11 @@ static struct io_context *rq_ioc(struct bio *bio)
850 * Returns %NULL on failure, with @q->queue_lock held. 856 * Returns %NULL on failure, with @q->queue_lock held.
851 * Returns !%NULL on success, with @q->queue_lock *not held*. 857 * Returns !%NULL on success, with @q->queue_lock *not held*.
852 */ 858 */
853static struct request *__get_request(struct request_queue *q, int rw_flags, 859static struct request *__get_request(struct request_list *rl, int rw_flags,
854 struct bio *bio, gfp_t gfp_mask) 860 struct bio *bio, gfp_t gfp_mask)
855{ 861{
862 struct request_queue *q = rl->q;
856 struct request *rq; 863 struct request *rq;
857 struct request_list *rl = &q->rq;
858 struct elevator_type *et = q->elevator->type; 864 struct elevator_type *et = q->elevator->type;
859 struct io_context *ioc = rq_ioc(bio); 865 struct io_context *ioc = rq_ioc(bio);
860 struct io_cq *icq = NULL; 866 struct io_cq *icq = NULL;
@@ -876,9 +882,9 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
876 * This process will be allowed to complete a batch of 882 * This process will be allowed to complete a batch of
877 * requests, others will be blocked. 883 * requests, others will be blocked.
878 */ 884 */
879 if (!blk_queue_full(q, is_sync)) { 885 if (!blk_rl_full(rl, is_sync)) {
880 ioc_set_batching(q, ioc); 886 ioc_set_batching(q, ioc);
881 blk_set_queue_full(q, is_sync); 887 blk_set_rl_full(rl, is_sync);
882 } else { 888 } else {
883 if (may_queue != ELV_MQUEUE_MUST 889 if (may_queue != ELV_MQUEUE_MUST
884 && !ioc_batching(q, ioc)) { 890 && !ioc_batching(q, ioc)) {
@@ -928,7 +934,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
928 spin_unlock_irq(q->queue_lock); 934 spin_unlock_irq(q->queue_lock);
929 935
930 /* allocate and init request */ 936 /* allocate and init request */
931 rq = mempool_alloc(q->rq.rq_pool, gfp_mask); 937 rq = mempool_alloc(rl->rq_pool, gfp_mask);
932 if (!rq) 938 if (!rq)
933 goto fail_alloc; 939 goto fail_alloc;
934 940
@@ -992,7 +998,7 @@ fail_alloc:
992 * queue, but this is pretty rare. 998 * queue, but this is pretty rare.
993 */ 999 */
994 spin_lock_irq(q->queue_lock); 1000 spin_lock_irq(q->queue_lock);
995 freed_request(q, rw_flags); 1001 freed_request(rl, rw_flags);
996 1002
997 /* 1003 /*
998 * in the very unlikely event that allocation failed and no 1004 * in the very unlikely event that allocation failed and no
@@ -1029,7 +1035,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags,
1029 struct request_list *rl = &q->rq; 1035 struct request_list *rl = &q->rq;
1030 struct request *rq; 1036 struct request *rq;
1031retry: 1037retry:
1032 rq = __get_request(q, rw_flags, bio, gfp_mask); 1038 rq = __get_request(&q->rq, rw_flags, bio, gfp_mask);
1033 if (rq) 1039 if (rq)
1034 return rq; 1040 return rq;
1035 1041
@@ -1229,8 +1235,8 @@ void __blk_put_request(struct request_queue *q, struct request *req)
1229 BUG_ON(!list_empty(&req->queuelist)); 1235 BUG_ON(!list_empty(&req->queuelist));
1230 BUG_ON(!hlist_unhashed(&req->hash)); 1236 BUG_ON(!hlist_unhashed(&req->hash));
1231 1237
1232 blk_free_request(q, req); 1238 blk_free_request(&q->rq, req);
1233 freed_request(q, flags); 1239 freed_request(&q->rq, flags);
1234 } 1240 }
1235} 1241}
1236EXPORT_SYMBOL_GPL(__blk_put_request); 1242EXPORT_SYMBOL_GPL(__blk_put_request);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index aa41b47c22d2..234ce7c082fa 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -66,16 +66,16 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
66 blk_clear_queue_congested(q, BLK_RW_ASYNC); 66 blk_clear_queue_congested(q, BLK_RW_ASYNC);
67 67
68 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 68 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
69 blk_set_queue_full(q, BLK_RW_SYNC); 69 blk_set_rl_full(rl, BLK_RW_SYNC);
70 } else { 70 } else {
71 blk_clear_queue_full(q, BLK_RW_SYNC); 71 blk_clear_rl_full(rl, BLK_RW_SYNC);
72 wake_up(&rl->wait[BLK_RW_SYNC]); 72 wake_up(&rl->wait[BLK_RW_SYNC]);
73 } 73 }
74 74
75 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { 75 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
76 blk_set_queue_full(q, BLK_RW_ASYNC); 76 blk_set_rl_full(rl, BLK_RW_ASYNC);
77 } else { 77 } else {
78 blk_clear_queue_full(q, BLK_RW_ASYNC); 78 blk_clear_rl_full(rl, BLK_RW_ASYNC);
79 wake_up(&rl->wait[BLK_RW_ASYNC]); 79 wake_up(&rl->wait[BLK_RW_ASYNC]);
80 } 80 }
81 spin_unlock_irq(q->queue_lock); 81 spin_unlock_irq(q->queue_lock);
@@ -476,7 +476,6 @@ static void blk_release_queue(struct kobject *kobj)
476{ 476{
477 struct request_queue *q = 477 struct request_queue *q =
478 container_of(kobj, struct request_queue, kobj); 478 container_of(kobj, struct request_queue, kobj);
479 struct request_list *rl = &q->rq;
480 479
481 blk_sync_queue(q); 480 blk_sync_queue(q);
482 481
@@ -489,8 +488,7 @@ static void blk_release_queue(struct kobject *kobj)
489 elevator_exit(q->elevator); 488 elevator_exit(q->elevator);
490 } 489 }
491 490
492 if (rl->rq_pool) 491 blk_exit_rl(&q->rq);
493 mempool_destroy(rl->rq_pool);
494 492
495 if (q->queue_tags) 493 if (q->queue_tags)
496 __blk_queue_free_tags(q); 494 __blk_queue_free_tags(q);
diff --git a/block/blk.h b/block/blk.h
index 85f6ae42f7d3..a134231fd22a 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -18,6 +18,9 @@ static inline void __blk_get_queue(struct request_queue *q)
18 kobject_get(&q->kobj); 18 kobject_get(&q->kobj);
19} 19}
20 20
21int blk_init_rl(struct request_list *rl, struct request_queue *q,
22 gfp_t gfp_mask);
23void blk_exit_rl(struct request_list *rl);
21void init_request_from_bio(struct request *req, struct bio *bio); 24void init_request_from_bio(struct request *req, struct bio *bio);
22void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 25void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
23 struct bio *bio); 26 struct bio *bio);