diff options
author | Shaohua Li <shli@fb.com> | 2015-01-15 20:32:25 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2015-01-23 16:15:46 -0500 |
commit | ee1b6f7aff94019c09e73837054979063f722046 (patch) | |
tree | 79c7b943d7c6d62fec1874afd2c50964de054aa0 /block | |
parent | bb5c3cdda37aad22996d6da2addd58cadc0436c0 (diff) |
block: support different tag allocation policy
The libata tag allocation is using a round-robin policy. Next patch will
make libata use block generic tag allocation, so let's add a policy to
tag allocation.
Currently two policies: FIFO (default) and round-robin.
Cc: Jens Axboe <axboe@fb.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Shaohua Li <shli@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-tag.c | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c index a185b86741e5..f0344e6939d5 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
@@ -119,7 +119,7 @@ fail: | |||
119 | } | 119 | } |
120 | 120 | ||
121 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | 121 | static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, |
122 | int depth) | 122 | int depth, int alloc_policy) |
123 | { | 123 | { |
124 | struct blk_queue_tag *tags; | 124 | struct blk_queue_tag *tags; |
125 | 125 | ||
@@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, | |||
131 | goto fail; | 131 | goto fail; |
132 | 132 | ||
133 | atomic_set(&tags->refcnt, 1); | 133 | atomic_set(&tags->refcnt, 1); |
134 | tags->alloc_policy = alloc_policy; | ||
135 | tags->next_tag = 0; | ||
134 | return tags; | 136 | return tags; |
135 | fail: | 137 | fail: |
136 | kfree(tags); | 138 | kfree(tags); |
@@ -140,10 +142,11 @@ fail: | |||
140 | /** | 142 | /** |
141 | * blk_init_tags - initialize the tag info for an external tag map | 143 | * blk_init_tags - initialize the tag info for an external tag map |
142 | * @depth: the maximum queue depth supported | 144 | * @depth: the maximum queue depth supported |
145 | * @alloc_policy: tag allocation policy | ||
143 | **/ | 146 | **/ |
144 | struct blk_queue_tag *blk_init_tags(int depth) | 147 | struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy) |
145 | { | 148 | { |
146 | return __blk_queue_init_tags(NULL, depth); | 149 | return __blk_queue_init_tags(NULL, depth, alloc_policy); |
147 | } | 150 | } |
148 | EXPORT_SYMBOL(blk_init_tags); | 151 | EXPORT_SYMBOL(blk_init_tags); |
149 | 152 | ||
@@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags); | |||
152 | * @q: the request queue for the device | 155 | * @q: the request queue for the device |
153 | * @depth: the maximum queue depth supported | 156 | * @depth: the maximum queue depth supported |
154 | * @tags: the tag to use | 157 | * @tags: the tag to use |
158 | * @alloc_policy: tag allocation policy | ||
155 | * | 159 | * |
156 | * Queue lock must be held here if the function is called to resize an | 160 | * Queue lock must be held here if the function is called to resize an |
157 | * existing map. | 161 | * existing map. |
158 | **/ | 162 | **/ |
159 | int blk_queue_init_tags(struct request_queue *q, int depth, | 163 | int blk_queue_init_tags(struct request_queue *q, int depth, |
160 | struct blk_queue_tag *tags) | 164 | struct blk_queue_tag *tags, int alloc_policy) |
161 | { | 165 | { |
162 | int rc; | 166 | int rc; |
163 | 167 | ||
164 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); | 168 | BUG_ON(tags && q->queue_tags && tags != q->queue_tags); |
165 | 169 | ||
166 | if (!tags && !q->queue_tags) { | 170 | if (!tags && !q->queue_tags) { |
167 | tags = __blk_queue_init_tags(q, depth); | 171 | tags = __blk_queue_init_tags(q, depth, alloc_policy); |
168 | 172 | ||
169 | if (!tags) | 173 | if (!tags) |
170 | return -ENOMEM; | 174 | return -ENOMEM; |
@@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
344 | } | 348 | } |
345 | 349 | ||
346 | do { | 350 | do { |
347 | tag = find_first_zero_bit(bqt->tag_map, max_depth); | 351 | if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) { |
348 | if (tag >= max_depth) | 352 | tag = find_first_zero_bit(bqt->tag_map, max_depth); |
349 | return 1; | 353 | if (tag >= max_depth) |
354 | return 1; | ||
355 | } else { | ||
356 | int start = bqt->next_tag; | ||
357 | int size = min_t(int, bqt->max_depth, max_depth + start); | ||
358 | tag = find_next_zero_bit(bqt->tag_map, size, start); | ||
359 | if (tag >= size && start + size > bqt->max_depth) { | ||
360 | size = start + size - bqt->max_depth; | ||
361 | tag = find_first_zero_bit(bqt->tag_map, size); | ||
362 | } | ||
363 | if (tag >= size) | ||
364 | return 1; | ||
365 | } | ||
350 | 366 | ||
351 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); | 367 | } while (test_and_set_bit_lock(tag, bqt->tag_map)); |
352 | /* | 368 | /* |
@@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) | |||
354 | * See blk_queue_end_tag for details. | 370 | * See blk_queue_end_tag for details. |
355 | */ | 371 | */ |
356 | 372 | ||
373 | bqt->next_tag = (tag + 1) % bqt->max_depth; | ||
357 | rq->cmd_flags |= REQ_QUEUED; | 374 | rq->cmd_flags |= REQ_QUEUED; |
358 | rq->tag = tag; | 375 | rq->tag = tag; |
359 | bqt->tag_index[tag] = rq; | 376 | bqt->tag_index[tag] = rq; |