aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2015-01-15 20:32:25 -0500
committerJens Axboe <axboe@fb.com>2015-01-23 16:15:46 -0500
commitee1b6f7aff94019c09e73837054979063f722046 (patch)
tree79c7b943d7c6d62fec1874afd2c50964de054aa0
parentbb5c3cdda37aad22996d6da2addd58cadc0436c0 (diff)
block: support different tag allocation policy
The libata tag allocation is using a round-robin policy. Next patch will make libata use block generic tag allocation, so let's add a policy to tag allocation. Currently two policies: FIFO (default) and round-robin. Cc: Jens Axboe <axboe@fb.com> Cc: Tejun Heo <tj@kernel.org> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-tag.c33
-rw-r--r--drivers/block/osdblk.c2
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--include/linux/blkdev.h8
-rw-r--r--include/scsi/scsi_host.h3
-rw-r--r--include/scsi/scsi_tcq.h3
6 files changed, 39 insertions, 13 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index a185b86741e5..f0344e6939d5 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -119,7 +119,7 @@ fail:
119} 119}
120 120
121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, 121static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
122 int depth) 122 int depth, int alloc_policy)
123{ 123{
124 struct blk_queue_tag *tags; 124 struct blk_queue_tag *tags;
125 125
@@ -131,6 +131,8 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
131 goto fail; 131 goto fail;
132 132
133 atomic_set(&tags->refcnt, 1); 133 atomic_set(&tags->refcnt, 1);
134 tags->alloc_policy = alloc_policy;
135 tags->next_tag = 0;
134 return tags; 136 return tags;
135fail: 137fail:
136 kfree(tags); 138 kfree(tags);
@@ -140,10 +142,11 @@ fail:
140/** 142/**
141 * blk_init_tags - initialize the tag info for an external tag map 143 * blk_init_tags - initialize the tag info for an external tag map
142 * @depth: the maximum queue depth supported 144 * @depth: the maximum queue depth supported
145 * @alloc_policy: tag allocation policy
143 **/ 146 **/
144struct blk_queue_tag *blk_init_tags(int depth) 147struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
145{ 148{
146 return __blk_queue_init_tags(NULL, depth); 149 return __blk_queue_init_tags(NULL, depth, alloc_policy);
147} 150}
148EXPORT_SYMBOL(blk_init_tags); 151EXPORT_SYMBOL(blk_init_tags);
149 152
@@ -152,19 +155,20 @@ EXPORT_SYMBOL(blk_init_tags);
152 * @q: the request queue for the device 155 * @q: the request queue for the device
153 * @depth: the maximum queue depth supported 156 * @depth: the maximum queue depth supported
154 * @tags: the tag to use 157 * @tags: the tag to use
158 * @alloc_policy: tag allocation policy
155 * 159 *
156 * Queue lock must be held here if the function is called to resize an 160 * Queue lock must be held here if the function is called to resize an
157 * existing map. 161 * existing map.
158 **/ 162 **/
159int blk_queue_init_tags(struct request_queue *q, int depth, 163int blk_queue_init_tags(struct request_queue *q, int depth,
160 struct blk_queue_tag *tags) 164 struct blk_queue_tag *tags, int alloc_policy)
161{ 165{
162 int rc; 166 int rc;
163 167
164 BUG_ON(tags && q->queue_tags && tags != q->queue_tags); 168 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
165 169
166 if (!tags && !q->queue_tags) { 170 if (!tags && !q->queue_tags) {
167 tags = __blk_queue_init_tags(q, depth); 171 tags = __blk_queue_init_tags(q, depth, alloc_policy);
168 172
169 if (!tags) 173 if (!tags)
170 return -ENOMEM; 174 return -ENOMEM;
@@ -344,9 +348,21 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
344 } 348 }
345 349
346 do { 350 do {
347 tag = find_first_zero_bit(bqt->tag_map, max_depth); 351 if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
348 if (tag >= max_depth) 352 tag = find_first_zero_bit(bqt->tag_map, max_depth);
349 return 1; 353 if (tag >= max_depth)
354 return 1;
355 } else {
356 int start = bqt->next_tag;
357 int size = min_t(int, bqt->max_depth, max_depth + start);
358 tag = find_next_zero_bit(bqt->tag_map, size, start);
359 if (tag >= size && start + size > bqt->max_depth) {
360 size = start + size - bqt->max_depth;
361 tag = find_first_zero_bit(bqt->tag_map, size);
362 }
363 if (tag >= size)
364 return 1;
365 }
350 366
351 } while (test_and_set_bit_lock(tag, bqt->tag_map)); 367 } while (test_and_set_bit_lock(tag, bqt->tag_map));
352 /* 368 /*
@@ -354,6 +370,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
354 * See blk_queue_end_tag for details. 370 * See blk_queue_end_tag for details.
355 */ 371 */
356 372
373 bqt->next_tag = (tag + 1) % bqt->max_depth;
357 rq->cmd_flags |= REQ_QUEUED; 374 rq->cmd_flags |= REQ_QUEUED;
358 rq->tag = tag; 375 rq->tag = tag;
359 bqt->tag_index[tag] = rq; 376 bqt->tag_index[tag] = rq;
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 79aa179305b5..e22942596207 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -423,7 +423,7 @@ static int osdblk_init_disk(struct osdblk_device *osdev)
423 } 423 }
424 424
425 /* switch queue to TCQ mode; allocate tag map */ 425 /* switch queue to TCQ mode; allocate tag map */
426 rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL); 426 rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO);
427 if (rc) { 427 if (rc) {
428 blk_cleanup_queue(q); 428 blk_cleanup_queue(q);
429 put_disk(disk); 429 put_disk(disk);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 983aed10ff2f..921a8c897eb2 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -290,7 +290,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
290 if (!shost_use_blk_mq(sdev->host) && 290 if (!shost_use_blk_mq(sdev->host) &&
291 (shost->bqt || shost->hostt->use_blk_tags)) { 291 (shost->bqt || shost->hostt->use_blk_tags)) {
292 blk_queue_init_tags(sdev->request_queue, 292 blk_queue_init_tags(sdev->request_queue,
293 sdev->host->cmd_per_lun, shost->bqt); 293 sdev->host->cmd_per_lun, shost->bqt,
294 shost->hostt->tag_alloc_policy);
294 } 295 }
295 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun); 296 scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
296 297
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4c4b732d7556..6f388fd1c11c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -272,7 +272,11 @@ struct blk_queue_tag {
272 int max_depth; /* what we will send to device */ 272 int max_depth; /* what we will send to device */
273 int real_max_depth; /* what the array can hold */ 273 int real_max_depth; /* what the array can hold */
274 atomic_t refcnt; /* map can be shared */ 274 atomic_t refcnt; /* map can be shared */
275 int alloc_policy; /* tag allocation policy */
276 int next_tag; /* next tag */
275}; 277};
278#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
279#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
276 280
277#define BLK_SCSI_MAX_CMDS (256) 281#define BLK_SCSI_MAX_CMDS (256)
278#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) 282#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
@@ -1139,11 +1143,11 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1139extern int blk_queue_start_tag(struct request_queue *, struct request *); 1143extern int blk_queue_start_tag(struct request_queue *, struct request *);
1140extern struct request *blk_queue_find_tag(struct request_queue *, int); 1144extern struct request *blk_queue_find_tag(struct request_queue *, int);
1141extern void blk_queue_end_tag(struct request_queue *, struct request *); 1145extern void blk_queue_end_tag(struct request_queue *, struct request *);
1142extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); 1146extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1143extern void blk_queue_free_tags(struct request_queue *); 1147extern void blk_queue_free_tags(struct request_queue *);
1144extern int blk_queue_resize_tags(struct request_queue *, int); 1148extern int blk_queue_resize_tags(struct request_queue *, int);
1145extern void blk_queue_invalidate_tags(struct request_queue *); 1149extern void blk_queue_invalidate_tags(struct request_queue *);
1146extern struct blk_queue_tag *blk_init_tags(int); 1150extern struct blk_queue_tag *blk_init_tags(int, int);
1147extern void blk_free_tags(struct blk_queue_tag *); 1151extern void blk_free_tags(struct blk_queue_tag *);
1148 1152
1149static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, 1153static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 019e66858ce6..e113c757d555 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -402,6 +402,9 @@ struct scsi_host_template {
402 */ 402 */
403 unsigned char present; 403 unsigned char present;
404 404
405 /* If use block layer to manage tags, this is tag allocation policy */
406 int tag_alloc_policy;
407
405 /* 408 /*
406 * Let the block layer assigns tags to all commands. 409 * Let the block layer assigns tags to all commands.
407 */ 410 */
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index 9708b28bd2aa..b27977e8aaed 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -66,7 +66,8 @@ static inline int scsi_init_shared_tag_map(struct Scsi_Host *shost, int depth)
66 * devices on the shared host (for libata) 66 * devices on the shared host (for libata)
67 */ 67 */
68 if (!shost->bqt) { 68 if (!shost->bqt) {
69 shost->bqt = blk_init_tags(depth); 69 shost->bqt = blk_init_tags(depth,
70 shost->hostt->tag_alloc_policy);
70 if (!shost->bqt) 71 if (!shost->bqt)
71 return -ENOMEM; 72 return -ENOMEM;
72 } 73 }