diff options
author | Tejun Heo <tj@kernel.org> | 2012-06-04 23:40:58 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-06-25 05:53:51 -0400 |
commit | 8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (patch) | |
tree | 36ff209f0655c5da9cfb7c5c6f6e9b9786841201 /block | |
parent | b1208b56f31408f7d8381ff5d08e970aa5ee761c (diff) |
block: add q->nr_rqs[] and move q->rq.elvpriv to q->nr_rqs_elvpriv
Add q->nr_rqs[] which currently behaves the same as q->rq.count[] and
move q->rq.elvpriv to q->nr_rqs_elvpriv. blk_drain_queue() is updated
to use q->nr_rqs[] instead of q->rq.count[].
These counters separates queue-wide request statistics from the
request list and allow implementation of per-queue request allocation.
While at it, properly indent fields of struct request_list.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 13 |
1 files changed, 7 insertions, 6 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 71894e143b91..a2648153691f 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -387,7 +387,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) | |||
387 | if (!list_empty(&q->queue_head) && q->request_fn) | 387 | if (!list_empty(&q->queue_head) && q->request_fn) |
388 | __blk_run_queue(q); | 388 | __blk_run_queue(q); |
389 | 389 | ||
390 | drain |= q->rq.elvpriv; | 390 | drain |= q->nr_rqs_elvpriv; |
391 | 391 | ||
392 | /* | 392 | /* |
393 | * Unfortunately, requests are queued at and tracked from | 393 | * Unfortunately, requests are queued at and tracked from |
@@ -397,7 +397,7 @@ void blk_drain_queue(struct request_queue *q, bool drain_all) | |||
397 | if (drain_all) { | 397 | if (drain_all) { |
398 | drain |= !list_empty(&q->queue_head); | 398 | drain |= !list_empty(&q->queue_head); |
399 | for (i = 0; i < 2; i++) { | 399 | for (i = 0; i < 2; i++) { |
400 | drain |= q->rq.count[i]; | 400 | drain |= q->nr_rqs[i]; |
401 | drain |= q->in_flight[i]; | 401 | drain |= q->in_flight[i]; |
402 | drain |= !list_empty(&q->flush_queue[i]); | 402 | drain |= !list_empty(&q->flush_queue[i]); |
403 | } | 403 | } |
@@ -526,7 +526,6 @@ static int blk_init_free_list(struct request_queue *q) | |||
526 | 526 | ||
527 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; | 527 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; |
528 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; | 528 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; |
529 | rl->elvpriv = 0; | ||
530 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); | 529 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); |
531 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); | 530 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); |
532 | 531 | ||
@@ -791,9 +790,10 @@ static void freed_request(struct request_queue *q, unsigned int flags) | |||
791 | struct request_list *rl = &q->rq; | 790 | struct request_list *rl = &q->rq; |
792 | int sync = rw_is_sync(flags); | 791 | int sync = rw_is_sync(flags); |
793 | 792 | ||
793 | q->nr_rqs[sync]--; | ||
794 | rl->count[sync]--; | 794 | rl->count[sync]--; |
795 | if (flags & REQ_ELVPRIV) | 795 | if (flags & REQ_ELVPRIV) |
796 | rl->elvpriv--; | 796 | q->nr_rqs_elvpriv--; |
797 | 797 | ||
798 | __freed_request(q, sync); | 798 | __freed_request(q, sync); |
799 | 799 | ||
@@ -902,6 +902,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, | |||
902 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) | 902 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) |
903 | return NULL; | 903 | return NULL; |
904 | 904 | ||
905 | q->nr_rqs[is_sync]++; | ||
905 | rl->count[is_sync]++; | 906 | rl->count[is_sync]++; |
906 | rl->starved[is_sync] = 0; | 907 | rl->starved[is_sync] = 0; |
907 | 908 | ||
@@ -917,7 +918,7 @@ static struct request *__get_request(struct request_queue *q, int rw_flags, | |||
917 | */ | 918 | */ |
918 | if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { | 919 | if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) { |
919 | rw_flags |= REQ_ELVPRIV; | 920 | rw_flags |= REQ_ELVPRIV; |
920 | rl->elvpriv++; | 921 | q->nr_rqs_elvpriv++; |
921 | if (et->icq_cache && ioc) | 922 | if (et->icq_cache && ioc) |
922 | icq = ioc_lookup_icq(ioc, q); | 923 | icq = ioc_lookup_icq(ioc, q); |
923 | } | 924 | } |
@@ -978,7 +979,7 @@ fail_elvpriv: | |||
978 | rq->elv.icq = NULL; | 979 | rq->elv.icq = NULL; |
979 | 980 | ||
980 | spin_lock_irq(q->queue_lock); | 981 | spin_lock_irq(q->queue_lock); |
981 | rl->elvpriv--; | 982 | q->nr_rqs_elvpriv--; |
982 | spin_unlock_irq(q->queue_lock); | 983 | spin_unlock_irq(q->queue_lock); |
983 | goto out; | 984 | goto out; |
984 | 985 | ||