diff options
author | Jens Axboe <axboe@fb.com> | 2014-05-20 13:49:02 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-20 13:49:02 -0400 |
commit | e3a2b3f931f59d5284abd13faf8bded726884ffd (patch) | |
tree | f5426a4745996e95afc2f01f826e846710929dc2 /block/blk-sysfs.c | |
parent | 64b14519e5913e8d4de9f2e5d9ef59abba3ed83d (diff) |
blk-mq: allow changing of queue depth through sysfs
For request_fn based devices, the block layer exports a 'nr_requests'
file through sysfs to allow adjusting of queue depth on the fly.
Currently this returns -EINVAL for blk-mq, since it's not wired up.
Wire this up for blk-mq, so that it now also always dynamic
adjustments of the allowed queue depth for any given block device
managed by blk-mq.
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 45 |
1 files changed, 9 insertions, 36 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 7500f876dae4..4d6811ac13fd 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -48,11 +48,10 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page) | |||
48 | static ssize_t | 48 | static ssize_t |
49 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | 49 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
50 | { | 50 | { |
51 | struct request_list *rl; | ||
52 | unsigned long nr; | 51 | unsigned long nr; |
53 | int ret; | 52 | int ret, err; |
54 | 53 | ||
55 | if (!q->request_fn) | 54 | if (!q->request_fn && !q->mq_ops) |
56 | return -EINVAL; | 55 | return -EINVAL; |
57 | 56 | ||
58 | ret = queue_var_store(&nr, page, count); | 57 | ret = queue_var_store(&nr, page, count); |
@@ -62,40 +61,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
62 | if (nr < BLKDEV_MIN_RQ) | 61 | if (nr < BLKDEV_MIN_RQ) |
63 | nr = BLKDEV_MIN_RQ; | 62 | nr = BLKDEV_MIN_RQ; |
64 | 63 | ||
65 | spin_lock_irq(q->queue_lock); | 64 | if (q->request_fn) |
66 | q->nr_requests = nr; | 65 | err = blk_update_nr_requests(q, nr); |
67 | blk_queue_congestion_threshold(q); | 66 | else |
68 | 67 | err = blk_mq_update_nr_requests(q, nr); | |
69 | /* congestion isn't cgroup aware and follows root blkcg for now */ | 68 | |
70 | rl = &q->root_rl; | 69 | if (err) |
71 | 70 | return err; | |
72 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) | ||
73 | blk_set_queue_congested(q, BLK_RW_SYNC); | ||
74 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) | ||
75 | blk_clear_queue_congested(q, BLK_RW_SYNC); | ||
76 | |||
77 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) | ||
78 | blk_set_queue_congested(q, BLK_RW_ASYNC); | ||
79 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) | ||
80 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | ||
81 | |||
82 | blk_queue_for_each_rl(rl, q) { | ||
83 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | ||
84 | blk_set_rl_full(rl, BLK_RW_SYNC); | ||
85 | } else { | ||
86 | blk_clear_rl_full(rl, BLK_RW_SYNC); | ||
87 | wake_up(&rl->wait[BLK_RW_SYNC]); | ||
88 | } | ||
89 | |||
90 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | ||
91 | blk_set_rl_full(rl, BLK_RW_ASYNC); | ||
92 | } else { | ||
93 | blk_clear_rl_full(rl, BLK_RW_ASYNC); | ||
94 | wake_up(&rl->wait[BLK_RW_ASYNC]); | ||
95 | } | ||
96 | } | ||
97 | 71 | ||
98 | spin_unlock_irq(q->queue_lock); | ||
99 | return ret; | 72 | return ret; |
100 | } | 73 | } |
101 | 74 | ||