aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-02-10 11:29:00 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 11:29:00 -0500
commit18741986a4b1dc4b1f171634c4191abc3b0fa023 (patch)
treed0f632fa9b205d5fbcc76ff1cf8cba63112c7da8 /include/linux/blk-mq.h
parentce2c350b2cfe5b5ca5023a6b1ec4d21821d39add (diff)
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h5
1 files changed, 1 insertions, 4 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 468be242db90..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
36 struct list_head page_list; 36 struct list_head page_list;
37 struct blk_mq_tags *tags; 37 struct blk_mq_tags *tags;
38 38
39 atomic_t pending_flush;
40
41 unsigned long queued; 39 unsigned long queued;
42 unsigned long run; 40 unsigned long run;
43#define BLK_MQ_MAX_DISPATCH_ORDER 10 41#define BLK_MQ_MAX_DISPATCH_ORDER 10
44 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
45 43
46 unsigned int queue_depth; 44 unsigned int queue_depth;
47 unsigned int reserved_tags;
48 unsigned int numa_node; 45 unsigned int numa_node;
49 unsigned int cmd_size; /* per-request extra data */ 46 unsigned int cmd_size; /* per-request extra data */
50 47
@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
129void blk_mq_run_queues(struct request_queue *q, bool async); 126void blk_mq_run_queues(struct request_queue *q, bool async);
130void blk_mq_free_request(struct request *rq); 127void blk_mq_free_request(struct request *rq);
131bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 128bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
132struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); 129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
133struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
134struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
135 132