aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-02-10 11:29:00 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 11:29:00 -0500
commit18741986a4b1dc4b1f171634c4191abc3b0fa023 (patch)
treed0f632fa9b205d5fbcc76ff1cf8cba63112c7da8 /include/linux
parentce2c350b2cfe5b5ca5023a6b1ec4d21821d39add (diff)
blk-mq: rework flush sequencing logic
Witch to using a preallocated flush_rq for blk-mq similar to what's done with the old request path. This allows us to set up the request properly with a tag from the actually allowed range and ->rq_disk as needed by some drivers. To make life easier we also switch to dynamic allocation of ->flush_rq for the old path. This effectively reverts most of "blk-mq: fix for flush deadlock" and "blk-mq: Don't reserve a tag for flush request" Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-mq.h5
-rw-r--r--include/linux/blkdev.h11
2 files changed, 4 insertions, 12 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 468be242db90..18ba8a627f46 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -36,15 +36,12 @@ struct blk_mq_hw_ctx {
36 struct list_head page_list; 36 struct list_head page_list;
37 struct blk_mq_tags *tags; 37 struct blk_mq_tags *tags;
38 38
39 atomic_t pending_flush;
40
41 unsigned long queued; 39 unsigned long queued;
42 unsigned long run; 40 unsigned long run;
43#define BLK_MQ_MAX_DISPATCH_ORDER 10 41#define BLK_MQ_MAX_DISPATCH_ORDER 10
44 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
45 43
46 unsigned int queue_depth; 44 unsigned int queue_depth;
47 unsigned int reserved_tags;
48 unsigned int numa_node; 45 unsigned int numa_node;
49 unsigned int cmd_size; /* per-request extra data */ 46 unsigned int cmd_size; /* per-request extra data */
50 47
@@ -129,7 +126,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *,
129void blk_mq_run_queues(struct request_queue *q, bool async); 126void blk_mq_run_queues(struct request_queue *q, bool async);
130void blk_mq_free_request(struct request *rq); 127void blk_mq_free_request(struct request *rq);
131bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 128bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
132struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved); 129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
133struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
134struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
135 132
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0375654adb28..b2d25ecbcbc1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -101,7 +101,7 @@ struct request {
101 }; 101 };
102 union { 102 union {
103 struct call_single_data csd; 103 struct call_single_data csd;
104 struct work_struct mq_flush_data; 104 struct work_struct mq_flush_work;
105 }; 105 };
106 106
107 struct request_queue *q; 107 struct request_queue *q;
@@ -451,13 +451,8 @@ struct request_queue {
451 unsigned long flush_pending_since; 451 unsigned long flush_pending_since;
452 struct list_head flush_queue[2]; 452 struct list_head flush_queue[2];
453 struct list_head flush_data_in_flight; 453 struct list_head flush_data_in_flight;
454 union { 454 struct request *flush_rq;
455 struct request flush_rq; 455 spinlock_t mq_flush_lock;
456 struct {
457 spinlock_t mq_flush_lock;
458 struct work_struct mq_flush_work;
459 };
460 };
461 456
462 struct mutex sysfs_lock; 457 struct mutex sysfs_lock;
463 458