diff options
Diffstat (limited to 'include/linux/blk-mq.h')
| -rw-r--r-- | include/linux/blk-mq.h | 18 |
1 files changed, 15 insertions, 3 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9ab2e4e..7aec86127335 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { | |||
| 34 | unsigned long flags; /* BLK_MQ_F_* flags */ | 34 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 35 | 35 | ||
| 36 | struct request_queue *queue; | 36 | struct request_queue *queue; |
| 37 | unsigned int queue_num; | ||
| 38 | struct blk_flush_queue *fq; | 37 | struct blk_flush_queue *fq; |
| 39 | 38 | ||
| 40 | void *driver_data; | 39 | void *driver_data; |
| @@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { | |||
| 54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | 53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 55 | 54 | ||
| 56 | unsigned int numa_node; | 55 | unsigned int numa_node; |
| 57 | unsigned int cmd_size; /* per-request extra data */ | 56 | unsigned int queue_num; |
| 58 | 57 | ||
| 59 | atomic_t nr_active; | 58 | atomic_t nr_active; |
| 60 | 59 | ||
| @@ -147,6 +146,8 @@ enum { | |||
| 147 | BLK_MQ_F_SG_MERGE = 1 << 2, | 146 | BLK_MQ_F_SG_MERGE = 1 << 2, |
| 148 | BLK_MQ_F_SYSFS_UP = 1 << 3, | 147 | BLK_MQ_F_SYSFS_UP = 1 << 3, |
| 149 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, | 148 | BLK_MQ_F_DEFER_ISSUE = 1 << 4, |
| 149 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | ||
| 150 | BLK_MQ_F_ALLOC_POLICY_BITS = 1, | ||
| 150 | 151 | ||
| 151 | BLK_MQ_S_STOPPED = 0, | 152 | BLK_MQ_S_STOPPED = 0, |
| 152 | BLK_MQ_S_TAG_ACTIVE = 1, | 153 | BLK_MQ_S_TAG_ACTIVE = 1, |
| @@ -155,6 +156,12 @@ enum { | |||
| 155 | 156 | ||
| 156 | BLK_MQ_CPU_WORK_BATCH = 8, | 157 | BLK_MQ_CPU_WORK_BATCH = 8, |
| 157 | }; | 158 | }; |
| 159 | #define BLK_MQ_FLAG_TO_ALLOC_POLICY(flags) \ | ||
| 160 | ((flags >> BLK_MQ_F_ALLOC_POLICY_START_BIT) & \ | ||
| 161 | ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) | ||
| 162 | #define BLK_ALLOC_POLICY_TO_MQ_FLAG(policy) \ | ||
| 163 | ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ | ||
| 164 | << BLK_MQ_F_ALLOC_POLICY_START_BIT) | ||
| 158 | 165 | ||
| 159 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); | 166 | struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); |
| 160 | void blk_mq_finish_init(struct request_queue *q); | 167 | void blk_mq_finish_init(struct request_queue *q); |
| @@ -167,7 +174,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set); | |||
| 167 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); | 174 | void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); |
| 168 | 175 | ||
| 169 | void blk_mq_insert_request(struct request *, bool, bool, bool); | 176 | void blk_mq_insert_request(struct request *, bool, bool, bool); |
| 170 | void blk_mq_run_queues(struct request_queue *q, bool async); | ||
| 171 | void blk_mq_free_request(struct request *rq); | 177 | void blk_mq_free_request(struct request *rq); |
| 172 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); | 178 | void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); |
| 173 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); | 179 | bool blk_mq_can_queue(struct blk_mq_hw_ctx *); |
| @@ -195,13 +201,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |||
| 195 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 201 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
| 196 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 202 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
| 197 | 203 | ||
| 204 | int blk_mq_request_started(struct request *rq); | ||
| 198 | void blk_mq_start_request(struct request *rq); | 205 | void blk_mq_start_request(struct request *rq); |
| 199 | void blk_mq_end_request(struct request *rq, int error); | 206 | void blk_mq_end_request(struct request *rq, int error); |
| 200 | void __blk_mq_end_request(struct request *rq, int error); | 207 | void __blk_mq_end_request(struct request *rq, int error); |
| 201 | 208 | ||
| 202 | void blk_mq_requeue_request(struct request *rq); | 209 | void blk_mq_requeue_request(struct request *rq); |
| 203 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 210 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
| 211 | void blk_mq_cancel_requeue_work(struct request_queue *q); | ||
| 204 | void blk_mq_kick_requeue_list(struct request_queue *q); | 212 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 213 | void blk_mq_abort_requeue_list(struct request_queue *q); | ||
| 205 | void blk_mq_complete_request(struct request *rq); | 214 | void blk_mq_complete_request(struct request *rq); |
| 206 | 215 | ||
| 207 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 216 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| @@ -212,6 +221,9 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | |||
| 212 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 221 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 213 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 222 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
| 214 | void *priv); | 223 | void *priv); |
| 224 | void blk_mq_freeze_queue(struct request_queue *q); | ||
| 225 | void blk_mq_unfreeze_queue(struct request_queue *q); | ||
| 226 | void blk_mq_freeze_queue_start(struct request_queue *q); | ||
| 215 | 227 | ||
| 216 | /* | 228 | /* |
| 217 | * Driver command data is immediately after the request. So subtract request | 229 | * Driver command data is immediately after the request. So subtract request |
