diff options
author | Jens Axboe <axboe@fb.com> | 2014-05-20 13:49:02 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-20 13:49:02 -0400 |
commit | e3a2b3f931f59d5284abd13faf8bded726884ffd (patch) | |
tree | f5426a4745996e95afc2f01f826e846710929dc2 /block/blk-core.c | |
parent | 64b14519e5913e8d4de9f2e5d9ef59abba3ed83d (diff) |
blk-mq: allow changing of queue depth through sysfs
For request_fn based devices, the block layer exports a 'nr_requests'
file through sysfs to allow adjusting of queue depth on the fly.
Currently this returns -EINVAL for blk-mq, since it's not wired up.
Wire this up for blk-mq, so that it now also always dynamic
adjustments of the allowed queue depth for any given block device
managed by blk-mq.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 41 |
1 files changed, 41 insertions, 0 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index a6bd3e702201..fe81e19099a1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -848,6 +848,47 @@ static void freed_request(struct request_list *rl, unsigned int flags) | |||
848 | __freed_request(rl, sync ^ 1); | 848 | __freed_request(rl, sync ^ 1); |
849 | } | 849 | } |
850 | 850 | ||
851 | int blk_update_nr_requests(struct request_queue *q, unsigned int nr) | ||
852 | { | ||
853 | struct request_list *rl; | ||
854 | |||
855 | spin_lock_irq(q->queue_lock); | ||
856 | q->nr_requests = nr; | ||
857 | blk_queue_congestion_threshold(q); | ||
858 | |||
859 | /* congestion isn't cgroup aware and follows root blkcg for now */ | ||
860 | rl = &q->root_rl; | ||
861 | |||
862 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) | ||
863 | blk_set_queue_congested(q, BLK_RW_SYNC); | ||
864 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) | ||
865 | blk_clear_queue_congested(q, BLK_RW_SYNC); | ||
866 | |||
867 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) | ||
868 | blk_set_queue_congested(q, BLK_RW_ASYNC); | ||
869 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) | ||
870 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | ||
871 | |||
872 | blk_queue_for_each_rl(rl, q) { | ||
873 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | ||
874 | blk_set_rl_full(rl, BLK_RW_SYNC); | ||
875 | } else { | ||
876 | blk_clear_rl_full(rl, BLK_RW_SYNC); | ||
877 | wake_up(&rl->wait[BLK_RW_SYNC]); | ||
878 | } | ||
879 | |||
880 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | ||
881 | blk_set_rl_full(rl, BLK_RW_ASYNC); | ||
882 | } else { | ||
883 | blk_clear_rl_full(rl, BLK_RW_ASYNC); | ||
884 | wake_up(&rl->wait[BLK_RW_ASYNC]); | ||
885 | } | ||
886 | } | ||
887 | |||
888 | spin_unlock_irq(q->queue_lock); | ||
889 | return 0; | ||
890 | } | ||
891 | |||
851 | /* | 892 | /* |
852 | * Determine if elevator data should be initialized when allocating the | 893 | * Determine if elevator data should be initialized when allocating the |
853 | * request associated with @bio. | 894 | * request associated with @bio. |