aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorShaohua Li <shli@kernel.org>2013-12-30 22:38:50 -0500
committerJens Axboe <axboe@kernel.dk>2014-01-30 14:57:25 -0500
commitf0276924fa35a3607920a58cf5d878212824b951 (patch)
tree5759cef09f3ba6b2f206ace779fef298a8b9d7be /include
parentd835502f3dacad1638d516ab156d66f0ba377cf5 (diff)
blk-mq: Don't reserve a tag for flush request
Reserving a tag (request) for flush to avoid dead lock is a overkill. A tag is valuable resource. We can track the number of flush requests and disallow having too many pending flush requests allocated. With this patch, blk_mq_alloc_request_pinned() could do a busy nop (but not a dead loop) if too many pending requests are allocated and new flush request is allocated. But this should not be a problem, too many pending flush requests are very rare case. I verified this can fix the deadlock caused by too many pending flush requests. Signed-off-by: Shaohua Li <shli@fusionio.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h3
1 files changed, 3 insertions, 0 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 161b23105b1e..1e8f16f65af4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -36,12 +36,15 @@ struct blk_mq_hw_ctx {
36 struct list_head page_list; 36 struct list_head page_list;
37 struct blk_mq_tags *tags; 37 struct blk_mq_tags *tags;
38 38
39 atomic_t pending_flush;
40
39 unsigned long queued; 41 unsigned long queued;
40 unsigned long run; 42 unsigned long run;
41#define BLK_MQ_MAX_DISPATCH_ORDER 10 43#define BLK_MQ_MAX_DISPATCH_ORDER 10
42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 44 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43 45
44 unsigned int queue_depth; 46 unsigned int queue_depth;
47 unsigned int reserved_tags;
45 unsigned int numa_node; 48 unsigned int numa_node;
46 unsigned int cmd_size; /* per-request extra data */ 49 unsigned int cmd_size; /* per-request extra data */
47 50