aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-06-04 23:40:58 -0400
committerJens Axboe <axboe@kernel.dk>2012-06-25 05:53:51 -0400
commit8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (patch)
tree36ff209f0655c5da9cfb7c5c6f6e9b9786841201
parentb1208b56f31408f7d8381ff5d08e970aa5ee761c (diff)
block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv
Add q->nr_rqs[] which currently behaves the same as q->rq.count[] and move q->rq.elvpriv to q->nr_rqs_elvpriv. blk_drain_queue() is updated to use q->nr_rqs[] instead of q->rq.count[]. These counters separates queue-wide request statistics from the request list and allow implementation of per-queue request allocation. While at it, properly indent fields of struct request_list. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c13
-rw-r--r--include/linux/blkdev.h11
2 files changed, 13 insertions, 11 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 71894e143b91..a2648153691f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
387 if (!list_empty(&q->queue_head) && q->request_fn) 387 if (!list_empty(&q->queue_head) && q->request_fn)
388 __blk_run_queue(q); 388 __blk_run_queue(q);
389 389
390 drain |= q->rq.elvpriv; 390 drain |= q->nr_rqs_elvpriv;
391 391
392 /* 392 /*
393 * Unfortunately, requests are queued at and tracked from 393 * Unfortunately, requests are queued at and tracked from
@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
397 if (drain_all) { 397 if (drain_all) {
398 drain |= !list_empty(&q->queue_head); 398 drain |= !list_empty(&q->queue_head);
399 for (i = 0; i < 2; i++) { 399 for (i = 0; i < 2; i++) {
400 drain |= q->rq.count[i]; 400 drain |= q->nr_rqs[i];
401 drain |= q->in_flight[i]; 401 drain |= q->in_flight[i];
402 drain |= !list_empty(&q->flush_queue[i]); 402 drain |= !list_empty(&q->flush_queue[i]);
403 } 403 }
@@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q)
526 526
527 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; 527 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0;
528 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; 528 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0;
529 rl->elvpriv = 0;
530 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); 529 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]);
531 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); 530 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]);
532 531
@@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags)
791 struct request_list *rl = &q->rq; 790 struct request_list *rl = &q->rq;
792 int sync = rw_is_sync(flags); 791 int sync = rw_is_sync(flags);
793 792
793 q->nr_rqs[sync]--;
794 rl->count[sync]--; 794 rl->count[sync]--;
795 if (flags & REQ_ELVPRIV) 795 if (flags & REQ_ELVPRIV)
796 rl->elvpriv--; 796 q->nr_rqs_elvpriv--;
797 797
798 __freed_request(q, sync); 798 __freed_request(q, sync);
799 799
@@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
902 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) 902 if (rl->count[is_sync] >= (3 * q->nr_requests / 2))
903 return NULL; 903 return NULL;
904 904
905 q->nr_rqs[is_sync]++;
905 rl->count[is_sync]++; 906 rl->count[is_sync]++;
906 rl->starved[is_sync] = 0; 907 rl->starved[is_sync] = 0;
907 908
@@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags,
917 */ 918 */
918 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { 919 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
919 rw_flags |= REQ_ELVPRIV; 920 rw_flags |= REQ_ELVPRIV;
920 rl->elvpriv++; 921 q->nr_rqs_elvpriv++;
921 if (et->icq_cache && ioc) 922 if (et->icq_cache && ioc)
922 icq = ioc_lookup_icq(ioc, q); 923 icq = ioc_lookup_icq(ioc, q);
923 } 924 }
@@ -978,7 +979,7 @@ fail_elvpriv:
978 rq->elv.icq = NULL; 979 rq->elv.icq = NULL;
979 980
980 spin_lock_irq(q->queue_lock); 981 spin_lock_irq(q->queue_lock);
981 rl->elvpriv--; 982 q->nr_rqs_elvpriv--;
982 spin_unlock_irq(q->queue_lock); 983 spin_unlock_irq(q->queue_lock);
983 goto out; 984 goto out;
984 985
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 07954b05b86c..7e44ed93f84b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -51,11 +51,10 @@ struct request_list {
51 * count[], starved[], and wait[] are indexed by 51 * count[], starved[], and wait[] are indexed by
52 * BLK_RW_SYNC/BLK_RW_ASYNC 52 * BLK_RW_SYNC/BLK_RW_ASYNC
53 */ 53 */
54 int count[2]; 54 int count[2];
55 int starved[2]; 55 int starved[2];
56 int elvpriv; 56 mempool_t *rq_pool;
57 mempool_t *rq_pool; 57 wait_queue_head_t wait[2];
58 wait_queue_head_t wait[2];
59}; 58};
60 59
61/* 60/*
@@ -282,6 +281,8 @@ struct request_queue {
282 struct list_head queue_head; 281 struct list_head queue_head;
283 struct request *last_merge; 282 struct request *last_merge;
284 struct elevator_queue *elevator; 283 struct elevator_queue *elevator;
284 int nr_rqs[2]; /* # allocated [a]sync rqs */
285 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
285 286
286 /* 287 /*
287 * the queue request freelist, one for reads and one for writes 288 * the queue request freelist, one for reads and one for writes