aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-tag.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 8a99688eb1b1..c0d419e84ce7 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -337,6 +337,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
337int blk_queue_start_tag(struct request_queue *q, struct request *rq) 337int blk_queue_start_tag(struct request_queue *q, struct request *rq)
338{ 338{
339 struct blk_queue_tag *bqt = q->queue_tags; 339 struct blk_queue_tag *bqt = q->queue_tags;
340 unsigned max_depth, offset;
340 int tag; 341 int tag;
341 342
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 343 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -350,10 +351,19 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
350 /* 351 /*
351 * Protect against shared tag maps, as we may not have exclusive 352 * Protect against shared tag maps, as we may not have exclusive
352 * access to the tag map. 353 * access to the tag map.
354 *
355 * We reserve a few tags just for sync IO, since we don't want
356 * to starve sync IO on behalf of flooding async IO.
353 */ 357 */
358 max_depth = bqt->max_depth;
359 if (rq_is_sync(rq))
360 offset = 0;
361 else
362 offset = max_depth >> 2;
363
354 do { 364 do {
355 tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); 365 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset);
356 if (tag >= bqt->max_depth) 366 if (tag >= max_depth)
357 return 1; 367 return 1;
358 368
359 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 369 } while (test_and_set_bit_lock(tag, bqt->tag_map));