aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-05-20 02:54:31 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-05-20 02:54:31 -0400
commit0a7ae2ff0d29bb3b327edff4c8ab67b3834fa811 (patch)
tree88309ac99a39e15b9b23525a00a15b1564957b32
parentac36552a52a6ec8563ac0a109e2a0935673f4abb (diff)
block: change the tag sync vs async restriction logic
Make them fully share the tag space, but disallow async requests using the last any two slots. Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c2
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-tag.c15
-rw-r--r--block/elevator.c8
-rw-r--r--include/linux/blkdev.h7
5 files changed, 21 insertions, 13 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0ab81a0a7502..0d98054cdbd7 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -218,7 +218,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
218 } else 218 } else
219 skip |= QUEUE_ORDSEQ_PREFLUSH; 219 skip |= QUEUE_ORDSEQ_PREFLUSH;
220 220
221 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 221 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q))
222 rq = NULL; 222 rq = NULL;
223 else 223 else
224 skip |= QUEUE_ORDSEQ_DRAIN; 224 skip |= QUEUE_ORDSEQ_DRAIN;
diff --git a/block/blk-core.c b/block/blk-core.c
index 49065075d462..1c7484038829 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1815,7 +1815,7 @@ void blk_dequeue_request(struct request *rq)
1815 * the driver side. 1815 * the driver side.
1816 */ 1816 */
1817 if (blk_account_rq(rq)) 1817 if (blk_account_rq(rq))
1818 q->in_flight++; 1818 q->in_flight[rq_is_sync(rq)]++;
1819} 1819}
1820 1820
1821/** 1821/**
diff --git a/block/blk-tag.c b/block/blk-tag.c
index c260f7c30dda..2e5cfeb59333 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(blk_queue_end_tag);
336int blk_queue_start_tag(struct request_queue *q, struct request *rq) 336int blk_queue_start_tag(struct request_queue *q, struct request *rq)
337{ 337{
338 struct blk_queue_tag *bqt = q->queue_tags; 338 struct blk_queue_tag *bqt = q->queue_tags;
339 unsigned max_depth, offset; 339 unsigned max_depth;
340 int tag; 340 int tag;
341 341
342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
@@ -355,13 +355,16 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
355 * to starve sync IO on behalf of flooding async IO. 355 * to starve sync IO on behalf of flooding async IO.
356 */ 356 */
357 max_depth = bqt->max_depth; 357 max_depth = bqt->max_depth;
358 if (rq_is_sync(rq)) 358 if (!rq_is_sync(rq) && max_depth > 1) {
359 offset = 0; 359 max_depth -= 2;
360 else 360 if (!max_depth)
361 offset = max_depth >> 2; 361 max_depth = 1;
362 if (q->in_flight[0] > max_depth)
363 return 1;
364 }
362 365
363 do { 366 do {
364 tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); 367 tag = find_first_zero_bit(bqt->tag_map, max_depth);
365 if (tag >= max_depth) 368 if (tag >= max_depth)
366 return 1; 369 return 1;
367 370
diff --git a/block/elevator.c b/block/elevator.c
index 918920056e42..ebee948293eb 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -546,7 +546,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
546 * in_flight count again 546 * in_flight count again
547 */ 547 */
548 if (blk_account_rq(rq)) { 548 if (blk_account_rq(rq)) {
549 q->in_flight--; 549 q->in_flight[rq_is_sync(rq)]--;
550 if (blk_sorted_rq(rq)) 550 if (blk_sorted_rq(rq))
551 elv_deactivate_rq(q, rq); 551 elv_deactivate_rq(q, rq);
552 } 552 }
@@ -685,7 +685,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
685 685
686 if (unplug_it && blk_queue_plugged(q)) { 686 if (unplug_it && blk_queue_plugged(q)) {
687 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 687 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
688 - q->in_flight; 688 - queue_in_flight(q);
689 689
690 if (nrq >= q->unplug_thresh) 690 if (nrq >= q->unplug_thresh)
691 __generic_unplug_device(q); 691 __generic_unplug_device(q);
@@ -823,7 +823,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
823 * request is released from the driver, io must be done 823 * request is released from the driver, io must be done
824 */ 824 */
825 if (blk_account_rq(rq)) { 825 if (blk_account_rq(rq)) {
826 q->in_flight--; 826 q->in_flight[rq_is_sync(rq)]--;
827 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 827 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
828 e->ops->elevator_completed_req_fn(q, rq); 828 e->ops->elevator_completed_req_fn(q, rq);
829 } 829 }
@@ -838,7 +838,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
838 if (!list_empty(&q->queue_head)) 838 if (!list_empty(&q->queue_head))
839 next = list_entry_rq(q->queue_head.next); 839 next = list_entry_rq(q->queue_head.next);
840 840
841 if (!q->in_flight && 841 if (!queue_in_flight(q) &&
842 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 842 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
843 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 843 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
844 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 844 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 564445be7a6d..a967dd775dbd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -404,7 +404,7 @@ struct request_queue
404 struct list_head tag_busy_list; 404 struct list_head tag_busy_list;
405 405
406 unsigned int nr_sorted; 406 unsigned int nr_sorted;
407 unsigned int in_flight; 407 unsigned int in_flight[2];
408 408
409 unsigned int rq_timeout; 409 unsigned int rq_timeout;
410 struct timer_list timeout; 410 struct timer_list timeout;
@@ -511,6 +511,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
511 __clear_bit(flag, &q->queue_flags); 511 __clear_bit(flag, &q->queue_flags);
512} 512}
513 513
514static inline int queue_in_flight(struct request_queue *q)
515{
516 return q->in_flight[0] + q->in_flight[1];
517}
518
514static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 519static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
515{ 520{
516 WARN_ON_ONCE(!queue_is_locked(q)); 521 WARN_ON_ONCE(!queue_is_locked(q));