diff options
author | Tejun Heo <tj@kernel.org> | 2012-06-04 23:40:59 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-06-25 05:53:52 -0400 |
commit | 5b788ce3e2acac9bf109743b1281d77347cf2101 (patch) | |
tree | 907477e42d27bec9a2060fcc709402b7636390c9 /block/blk-sysfs.c | |
parent | 8a5ecdd42862bf87ceab00bf2a15d7eabf58c02d (diff) |
block: prepare for multiple request_lists
Request allocation is about to be made per-blkg meaning that there'll
be multiple request lists.
* Make queue full state per request_list. blk_*queue_full() functions
are renamed to blk_*rl_full() and takes @rl instead of @q.
* Rename blk_init_free_list() to blk_init_rl() and make it take @rl
instead of @q. Also add @gfp_mask parameter.
* Add blk_exit_rl() instead of destroying rl directly from
blk_release_queue().
* Add request_list->q and make request alloc/free functions -
blk_free_request(), [__]freed_request(), __get_request() - take @rl
instead of @q.
This patch doesn't introduce any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index aa41b47c22d2..234ce7c082fa 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -66,16 +66,16 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
66 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | 66 | blk_clear_queue_congested(q, BLK_RW_ASYNC); |
67 | 67 | ||
68 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | 68 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
69 | blk_set_queue_full(q, BLK_RW_SYNC); | 69 | blk_set_rl_full(rl, BLK_RW_SYNC); |
70 | } else { | 70 | } else { |
71 | blk_clear_queue_full(q, BLK_RW_SYNC); | 71 | blk_clear_rl_full(rl, BLK_RW_SYNC); |
72 | wake_up(&rl->wait[BLK_RW_SYNC]); | 72 | wake_up(&rl->wait[BLK_RW_SYNC]); |
73 | } | 73 | } |
74 | 74 | ||
75 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | 75 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
76 | blk_set_queue_full(q, BLK_RW_ASYNC); | 76 | blk_set_rl_full(rl, BLK_RW_ASYNC); |
77 | } else { | 77 | } else { |
78 | blk_clear_queue_full(q, BLK_RW_ASYNC); | 78 | blk_clear_rl_full(rl, BLK_RW_ASYNC); |
79 | wake_up(&rl->wait[BLK_RW_ASYNC]); | 79 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
80 | } | 80 | } |
81 | spin_unlock_irq(q->queue_lock); | 81 | spin_unlock_irq(q->queue_lock); |
@@ -476,7 +476,6 @@ static void blk_release_queue(struct kobject *kobj) | |||
476 | { | 476 | { |
477 | struct request_queue *q = | 477 | struct request_queue *q = |
478 | container_of(kobj, struct request_queue, kobj); | 478 | container_of(kobj, struct request_queue, kobj); |
479 | struct request_list *rl = &q->rq; | ||
480 | 479 | ||
481 | blk_sync_queue(q); | 480 | blk_sync_queue(q); |
482 | 481 | ||
@@ -489,8 +488,7 @@ static void blk_release_queue(struct kobject *kobj) | |||
489 | elevator_exit(q->elevator); | 488 | elevator_exit(q->elevator); |
490 | } | 489 | } |
491 | 490 | ||
492 | if (rl->rq_pool) | 491 | blk_exit_rl(&q->rq); |
493 | mempool_destroy(rl->rq_pool); | ||
494 | 492 | ||
495 | if (q->queue_tags) | 493 | if (q->queue_tags) |
496 | __blk_queue_free_tags(q); | 494 | __blk_queue_free_tags(q); |