aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-tag.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-20 09:12:13 -0400
committerJens Axboe <axboe@fb.com>2016-10-28 10:45:17 -0400
commite806402130c9c494e22c73ae9ead4e79d2a5811c (patch)
treebac59e1eb3f1b5945409bd0780a4824e9b8383f8 /block/blk-tag.c
parent8d2bbd4c8236e9e38e6b36ac9e2c54fdcfe5b335 (diff)
block: split out request-only flags into a new namespace
A lot of the REQ_* flags are only used on struct requests, and only of use to the block layer and a few drivers that dig into struct request internals. This patch adds a new req_flags_t rq_flags field to struct request for them, and thus dramatically shrinks the number of common requests. It also removes the unfortunate situation where we have to fit the fields from the same enum into 32 bits for struct bio and 64 bits for struct request. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Shaun Tancheff <shaun.tancheff@seagate.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-tag.c')
-rw-r--r--block/blk-tag.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c
index f0344e6939d5..bae1decb6ec3 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -270,7 +270,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
270 BUG_ON(tag >= bqt->real_max_depth); 270 BUG_ON(tag >= bqt->real_max_depth);
271 271
272 list_del_init(&rq->queuelist); 272 list_del_init(&rq->queuelist);
273 rq->cmd_flags &= ~REQ_QUEUED; 273 rq->rq_flags &= ~RQF_QUEUED;
274 rq->tag = -1; 274 rq->tag = -1;
275 275
276 if (unlikely(bqt->tag_index[tag] == NULL)) 276 if (unlikely(bqt->tag_index[tag] == NULL))
@@ -316,7 +316,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
316 unsigned max_depth; 316 unsigned max_depth;
317 int tag; 317 int tag;
318 318
319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 319 if (unlikely((rq->rq_flags & RQF_QUEUED))) {
320 printk(KERN_ERR 320 printk(KERN_ERR
321 "%s: request %p for device [%s] already tagged %d", 321 "%s: request %p for device [%s] already tagged %d",
322 __func__, rq, 322 __func__, rq,
@@ -371,7 +371,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
371 */ 371 */
372 372
373 bqt->next_tag = (tag + 1) % bqt->max_depth; 373 bqt->next_tag = (tag + 1) % bqt->max_depth;
374 rq->cmd_flags |= REQ_QUEUED; 374 rq->rq_flags |= RQF_QUEUED;
375 rq->tag = tag; 375 rq->tag = tag;
376 bqt->tag_index[tag] = rq; 376 bqt->tag_index[tag] = rq;
377 blk_start_request(rq); 377 blk_start_request(rq);