aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c34
1 files changed, 19 insertions, 15 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index aa41b47c22d2..9628b291f960 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -40,7 +40,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
40static ssize_t 40static ssize_t
41queue_requests_store(struct request_queue *q, const char *page, size_t count) 41queue_requests_store(struct request_queue *q, const char *page, size_t count)
42{ 42{
43 struct request_list *rl = &q->rq; 43 struct request_list *rl;
44 unsigned long nr; 44 unsigned long nr;
45 int ret; 45 int ret;
46 46
@@ -55,6 +55,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
55 q->nr_requests = nr; 55 q->nr_requests = nr;
56 blk_queue_congestion_threshold(q); 56 blk_queue_congestion_threshold(q);
57 57
58 /* congestion isn't cgroup aware and follows root blkcg for now */
59 rl = &q->root_rl;
60
58 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) 61 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
59 blk_set_queue_congested(q, BLK_RW_SYNC); 62 blk_set_queue_congested(q, BLK_RW_SYNC);
60 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) 63 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -65,19 +68,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
65 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) 68 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
66 blk_clear_queue_congested(q, BLK_RW_ASYNC); 69 blk_clear_queue_congested(q, BLK_RW_ASYNC);
67 70
68 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { 71 blk_queue_for_each_rl(rl, q) {
69 blk_set_queue_full(q, BLK_RW_SYNC); 72 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
70 } else { 73 blk_set_rl_full(rl, BLK_RW_SYNC);
71 blk_clear_queue_full(q, BLK_RW_SYNC); 74 } else {
72 wake_up(&rl->wait[BLK_RW_SYNC]); 75 blk_clear_rl_full(rl, BLK_RW_SYNC);
76 wake_up(&rl->wait[BLK_RW_SYNC]);
77 }
78
79 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
80 blk_set_rl_full(rl, BLK_RW_ASYNC);
81 } else {
82 blk_clear_rl_full(rl, BLK_RW_ASYNC);
83 wake_up(&rl->wait[BLK_RW_ASYNC]);
84 }
73 } 85 }
74 86
75 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
76 blk_set_queue_full(q, BLK_RW_ASYNC);
77 } else {
78 blk_clear_queue_full(q, BLK_RW_ASYNC);
79 wake_up(&rl->wait[BLK_RW_ASYNC]);
80 }
81 spin_unlock_irq(q->queue_lock); 87 spin_unlock_irq(q->queue_lock);
82 return ret; 88 return ret;
83} 89}
@@ -476,7 +482,6 @@ static void blk_release_queue(struct kobject *kobj)
476{ 482{
477 struct request_queue *q = 483 struct request_queue *q =
478 container_of(kobj, struct request_queue, kobj); 484 container_of(kobj, struct request_queue, kobj);
479 struct request_list *rl = &q->rq;
480 485
481 blk_sync_queue(q); 486 blk_sync_queue(q);
482 487
@@ -489,8 +494,7 @@ static void blk_release_queue(struct kobject *kobj)
489 elevator_exit(q->elevator); 494 elevator_exit(q->elevator);
490 } 495 }
491 496
492 if (rl->rq_pool) 497 blk_exit_rl(&q->root_rl);
493 mempool_destroy(rl->rq_pool);
494 498
495 if (q->queue_tags) 499 if (q->queue_tags)
496 __blk_queue_free_tags(q); 500 __blk_queue_free_tags(q);