aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:53:51 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-18 14:53:51 -0400
commitd3dc366bbaf07c125561e90d6da4bb147741101a (patch)
tree6eb7e79a8ec9df1fa705393c6d15ccea3d104661 /include/linux/blk-mq.h
parent511c41d9e6665a07aca94eb00983cf6d77dd87ff (diff)
parente19a8a0ad2d255316830ead05b59c5a704434cbb (diff)
Merge branch 'for-3.18/core' of git://git.kernel.dk/linux-block
Pull core block layer changes from Jens Axboe: "This is the core block IO pull request for 3.18. Apart from the new and improved flush machinery for blk-mq, this is all mostly bug fixes and cleanups. - blk-mq timeout updates and fixes from Christoph. - Removal of REQ_END, also from Christoph. We pass it through the ->queue_rq() hook for blk-mq instead, freeing up one of the request bits. The space was overly tight on 32-bit, so Martin also killed REQ_KERNEL since it's no longer used. - blk integrity updates and fixes from Martin and Gu Zheng. - Update to the flush machinery for blk-mq from Ming Lei. Now we have a per hardware context flush request, which both cleans up the code should scale better for flush intensive workloads on blk-mq. - Improve the error printing, from Rob Elliott. - Backing device improvements and cleanups from Tejun. - Fixup of a misplaced rq_complete() tracepoint from Hannes. - Make blk_get_request() return error pointers, fixing up issues where we NULL deref when a device goes bad or missing. From Joe Lawrence. - Prep work for drastically reducing the memory consumption of dm devices from Junichi Nomura. This allows creating clone bio sets without preallocating a lot of memory. - Fix a blk-mq hang on certain combinations of queue depths and hardware queues from me. - Limit memory consumption for blk-mq devices for crash dump scenarios and drivers that use crazy high depths (certain SCSI shared tag setups). We now just use a single queue and limited depth for that" * 'for-3.18/core' of git://git.kernel.dk/linux-block: (58 commits) block: Remove REQ_KERNEL blk-mq: allocate cpumask on the home node bio-integrity: remove the needless fail handle of bip_slab creating block: include func name in __get_request prints block: make blk_update_request print prefix match ratelimited prefix blk-merge: don't compute bi_phys_segments from bi_vcnt for cloned bio block: fix alignment_offset math that assumes io_min is a power-of-2 blk-mq: Make bt_clear_tag() easier to read blk-mq: fix potential hang if rolling wakeup depth is too high block: add bioset_create_nobvec() block: use bio_clone_fast() in blk_rq_prep_clone() block: misplaced rq_complete tracepoint sd: Honor block layer integrity handling flags block: Replace strnicmp with strncasecmp block: Add T10 Protection Information functions block: Don't merge requests if integrity flags differ block: Integrity checksum flag block: Relocate bio integrity flags block: Add a disk flag to block integrity profile block: Add prefix to block integrity profile flags ...
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h22
1 files changed, 17 insertions, 5 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index c13a0c09faea..c9be1589415a 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,6 +4,7 @@
4#include <linux/blkdev.h> 4#include <linux/blkdev.h>
5 5
6struct blk_mq_tags; 6struct blk_mq_tags;
7struct blk_flush_queue;
7 8
8struct blk_mq_cpu_notifier { 9struct blk_mq_cpu_notifier {
9 struct list_head list; 10 struct list_head list;
@@ -34,6 +35,7 @@ struct blk_mq_hw_ctx {
34 35
35 struct request_queue *queue; 36 struct request_queue *queue;
36 unsigned int queue_num; 37 unsigned int queue_num;
38 struct blk_flush_queue *fq;
37 39
38 void *driver_data; 40 void *driver_data;
39 41
@@ -77,8 +79,9 @@ struct blk_mq_tag_set {
77 struct list_head tag_list; 79 struct list_head tag_list;
78}; 80};
79 81
80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 82typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool);
81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 83typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
84typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 85typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 86typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
84typedef int (init_request_fn)(void *, struct request *, unsigned int, 87typedef int (init_request_fn)(void *, struct request *, unsigned int,
@@ -86,6 +89,9 @@ typedef int (init_request_fn)(void *, struct request *, unsigned int,
86typedef void (exit_request_fn)(void *, struct request *, unsigned int, 89typedef void (exit_request_fn)(void *, struct request *, unsigned int,
87 unsigned int); 90 unsigned int);
88 91
92typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
93 bool);
94
89struct blk_mq_ops { 95struct blk_mq_ops {
90 /* 96 /*
91 * Queue request 97 * Queue request
@@ -100,7 +106,7 @@ struct blk_mq_ops {
100 /* 106 /*
101 * Called on request timeout 107 * Called on request timeout
102 */ 108 */
103 rq_timed_out_fn *timeout; 109 timeout_fn *timeout;
104 110
105 softirq_done_fn *complete; 111 softirq_done_fn *complete;
106 112
@@ -115,6 +121,10 @@ struct blk_mq_ops {
115 /* 121 /*
116 * Called for every command allocated by the block layer to allow 122 * Called for every command allocated by the block layer to allow
117 * the driver to set up driver specific data. 123 * the driver to set up driver specific data.
124 *
125 * Tag greater than or equal to queue_depth is for setting up
126 * flush request.
127 *
118 * Ditto for exit/teardown. 128 * Ditto for exit/teardown.
119 */ 129 */
120 init_request_fn *init_request; 130 init_request_fn *init_request;
@@ -160,8 +170,9 @@ struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
160struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 170struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
161struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 171struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
162 172
163void blk_mq_end_io(struct request *rq, int error); 173void blk_mq_start_request(struct request *rq);
164void __blk_mq_end_io(struct request *rq, int error); 174void blk_mq_end_request(struct request *rq, int error);
175void __blk_mq_end_request(struct request *rq, int error);
165 176
166void blk_mq_requeue_request(struct request *rq); 177void blk_mq_requeue_request(struct request *rq);
167void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 178void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
@@ -174,7 +185,8 @@ void blk_mq_stop_hw_queues(struct request_queue *q);
174void blk_mq_start_hw_queues(struct request_queue *q); 185void blk_mq_start_hw_queues(struct request_queue *q);
175void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); 186void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
176void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 187void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
177void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data); 188void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
189 void *priv);
178 190
179/* 191/*
180 * Driver command data is immediately after the request. So subtract request 192 * Driver command data is immediately after the request. So subtract request