aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-sysfs.c
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-06 08:48:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 11:04:53 -0400
commit1faa16d22877f4839bd433547d770c676d1d964c (patch)
tree9a0d50be1ef0358c1f53d7107413100904e7d526 /block/blk-sysfs.c
parent0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff)
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead of doing the split on writes vs reads. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r--block/blk-sysfs.c40
1 files changed, 20 insertions, 20 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e29ddfc73cf..3ff9bba3379 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
48 q->nr_requests = nr; 48 q->nr_requests = nr;
49 blk_queue_congestion_threshold(q); 49 blk_queue_congestion_threshold(q);
50 50
51 if (rl->count[READ] >= queue_congestion_on_threshold(q)) 51 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
52 blk_set_queue_congested(q, READ); 52 blk_set_queue_congested(q, BLK_RW_SYNC);
53 else if (rl->count[READ] < queue_congestion_off_threshold(q)) 53 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
54 blk_clear_queue_congested(q, READ); 54 blk_clear_queue_congested(q, BLK_RW_SYNC);
55 55
56 if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) 56 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
57 blk_set_queue_congested(q, WRITE); 57 blk_set_queue_congested(q, BLK_RW_ASYNC);
58 else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) 58 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
59 blk_clear_queue_congested(q, WRITE); 59 blk_clear_queue_congested(q, BLK_RW_ASYNC);
60 60
61 if (rl->count[READ] >= q->nr_requests) { 61 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
62 blk_set_queue_full(q, READ); 62 blk_set_queue_full(q, BLK_RW_SYNC);
63 } else if (rl->count[READ]+1 <= q->nr_requests) { 63 } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
64 blk_clear_queue_full(q, READ); 64 blk_clear_queue_full(q, BLK_RW_SYNC);
65 wake_up(&rl->wait[READ]); 65 wake_up(&rl->wait[BLK_RW_SYNC]);
66 } 66 }
67 67
68 if (rl->count[WRITE] >= q->nr_requests) { 68 if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
69 blk_set_queue_full(q, WRITE); 69 blk_set_queue_full(q, BLK_RW_ASYNC);
70 } else if (rl->count[WRITE]+1 <= q->nr_requests) { 70 } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
71 blk_clear_queue_full(q, WRITE); 71 blk_clear_queue_full(q, BLK_RW_ASYNC);
72 wake_up(&rl->wait[WRITE]); 72 wake_up(&rl->wait[BLK_RW_ASYNC]);
73 } 73 }
74 spin_unlock_irq(q->queue_lock); 74 spin_unlock_irq(q->queue_lock);
75 return ret; 75 return ret;