aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2008-09-25 05:42:41 -0400
committerJens Axboe <jens.axboe@oracle.com>2008-10-09 02:56:19 -0400
commite3ba9ae58a5599226e3976b29c8093041ae7c332 (patch)
tree5e8a7c0c1b675e19fa6209489479fe9b799a1b3e /block
parentf7d7b7a7a3db6526a84ea755c1c54a051e9a52de (diff)
block: reserve some tags just for sync IO
By only allowing async IO to consume 3/4 ths of the tag depth, we always have slots free to serve sync IO. This is important to avoid having writes fill the entire tag queue, thus starving reads. Original patch and idea from Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r--block/blk-tag.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 8a99688eb1b1..c0d419e84ce7 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
337int blk_queue_start_tag(struct request_queue *q, struct request *rq) 337int blk_queue_start_tag(struct request_queue *q, struct request *rq)
338{ 338{
339 struct blk_queue_tag *bqt = q->queue_tags; 339 struct blk_queue_tag *bqt = q->queue_tags;
340 unsigned max_depth, offset;
340 int tag; 341 int tag;
341 342
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 343 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
350 /* 351 /*
351 * Protect against shared tag maps, as we may not have exclusive 352 * Protect against shared tag maps, as we may not have exclusive
352 * access to the tag map. 353 * access to the tag map.
354 *
355 * We reserve a few tags just for sync IO, since we don't want
356 * to starve sync IO on behalf of flooding async IO.
353 */ 357 */
358 max_depth = bqt->max_depth;
359 if (rq_is_sync(rq))
360 offset = 0;
361 else
362 offset = max_depth >> 2;
363
354 do { 364 do {
355 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); 365 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
356 if (tag >= bqt->max_depth) 366 if (tag >= max_depth)
357 return 1; 367 return 1;
358 368
359 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 369 } while (test_and_set_bit_lock(tag, bqt->tag_map));