aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h18
1 files changed, 15 insertions, 3 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..7aec86127335 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
34 unsigned long flags; /* BLK_MQ_F_* flags */ 34 unsigned long flags; /* BLK_MQ_F_* flags */
35 35
36 struct request_queue *queue; 36 struct request_queue *queue;
37 unsigned int queue_num;
38 struct blk_flush_queue *fq; 37 struct blk_flush_queue *fq;
39 38
40 void *driver_data; 39 void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
54 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 53 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
55 54
56 unsigned int numa_node; 55 unsigned int numa_node;
57 unsigned int cmd_size; /* per-request extra data */ 56 unsigned int queue_num;
58 57
59 atomic_t nr_active; 58 atomic_t nr_active;
60 59
@@ -147,6 +146,8 @@ enum {
147 BLK_MQ_F_SG_MERGE = 1 << 2, 146 BLK_MQ_F_SG_MERGE = 1 << 2,
148 BLK_MQ_F_SYSFS_UP = 1 << 3, 147 BLK_MQ_F_SYSFS_UP = 1 << 3,
149 BLK_MQ_F_DEFER_ISSUE = 1 << 4, 148 BLK_MQ_F_DEFER_ISSUE = 1 << 4,
149 BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
150 BLK_MQ_F_ALLOC_POLICY_BITS = 1,
150 151
151 BLK_MQ_S_STOPPED = 0, 152 BLK_MQ_S_STOPPED = 0,
152 BLK_MQ_S_TAG_ACTIVE = 1, 153 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -155,6 +156,12 @@ enum {
155 156
156 BLK_MQ_CPU_WORK_BATCH = 8, 157 BLK_MQ_CPU_WORK_BATCH = 8,
157}; 158};
159#define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \
160 ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \
161 ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1))
162#define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \
163 ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \
164 << BLK_MQ_F_ALLOC_POLICY_START_BIT)
158 165
159struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); 166struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
160void blk_mq_finish_init(struct request_queue *q); 167void blk_mq_finish_init(struct request_queue *q);
@@ -167,7 +174,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
167void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 174void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
168 175
169void blk_mq_insert_request(struct request *, bool, bool, bool); 176void blk_mq_insert_request(struct request *, bool, bool, bool);
170void blk_mq_run_queues(struct request_queue *q, bool async);
171void blk_mq_free_request(struct request *rq); 177void blk_mq_free_request(struct request *rq);
172void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 178void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
173bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 179bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
@@ -195,13 +201,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
195struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 201struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
196struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 202struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
197 203
204int blk_mq_request_started(struct request *rq);
198void blk_mq_start_request(struct request *rq); 205void blk_mq_start_request(struct request *rq);
199void blk_mq_end_request(struct request *rq, int error); 206void blk_mq_end_request(struct request *rq, int error);
200void __blk_mq_end_request(struct request *rq, int error); 207void __blk_mq_end_request(struct request *rq, int error);
201 208
202void blk_mq_requeue_request(struct request *rq); 209void blk_mq_requeue_request(struct request *rq);
203void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); 210void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
211void blk_mq_cancel_requeue_work(struct request_queue *q);
204void blk_mq_kick_requeue_list(struct request_queue *q); 212void blk_mq_kick_requeue_list(struct request_queue *q);
213void blk_mq_abort_requeue_list(struct request_queue *q);
205void blk_mq_complete_request(struct request *rq); 214void blk_mq_complete_request(struct request *rq);
206 215
207void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 216void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +221,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
212void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); 221void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
213void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, 222void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
214 void *priv); 223 void *priv);
224void blk_mq_freeze_queue(struct request_queue *q);
225void blk_mq_unfreeze_queue(struct request_queue *q);
226void blk_mq_freeze_queue_start(struct request_queue *q);
215 227
216/* 228/*
217 * Driver command data is immediately after the request. So subtract request 229 * Driver command data is immediately after the request. So subtract request