diff options
author | Christoph Hellwig <hch@lst.de> | 2014-02-10 11:29:00 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-02-10 11:29:00 -0500 |
commit | 18741986a4b1dc4b1f171634c4191abc3b0fa023 (patch) | |
tree | d0f632fa9b205d5fbcc76ff1cf8cba63112c7da8 /include/linux/blk-mq.h | |
parent | ce2c350b2cfe5b5ca5023a6b1ec4d21821d39add (diff) |
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done
with the old request path. This allows us to set up the request properly
with a tag from the actually allowed range and ->rq_disk as needed by
some drivers. To make life easier we also switch to dynamic allocation
of ->flush_rq for the old path.
This effectively reverts most of
"blk-mq: fix for flush deadlock"
and
"blk-mq: Don't reserve a tag for flush request"
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r-- | include/linux/blk-mq.h | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 468be242db90..18ba8a627f46 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx { | |||
36 | struct list_head page_list; | 36 | struct list_head page_list; |
37 | struct blk_mq_tags *tags; | 37 | struct blk_mq_tags *tags; |
38 | 38 | ||
39 | atomic_t pending_flush; | ||
40 | |||
41 | unsigned long queued; | 39 | unsigned long queued; |
42 | unsigned long run; | 40 | unsigned long run; |
43 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 | 41 | #define BLK_MQ_MAX_DISPATCH_ORDER 10 |
44 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | 42 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
45 | 43 | ||
46 | unsigned int queue_depth; | 44 | unsigned int queue_depth; |
47 | unsigned int reserved_tags; | ||
48 | unsigned int numa_node; | 45 | unsigned int numa_node; |
49 | unsigned int cmd_size; /* per-request extra data */ | 46 | unsigned int cmd_size; /* per-request extra data */ |
50 | 47 | ||
@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *, | |||
129 | void blk_mq_run_queues(struct request_queue *q, bool async); | 126 | void blk_mq_run_queues(struct request_queue *q, bool async); |
130 | void blk_mq_free_request(struct request *rq); | 127 | void blk_mq_free_request(struct request *rq); |
131 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 128 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
132 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); | 129 | struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); |
133 | struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); | 130 | struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); |
134 | struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); | 131 | struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); |
135 | 132 | ||