diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/blk-mq.h | 8 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 2 |
2 files changed, 8 insertions, 2 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9ab2e4e..5735e7130d63 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { | |||
| 34 | unsigned long flags; /* BLK_MQ_F_* flags */ | 34 | unsigned long flags; /* BLK_MQ_F_* flags */ |
| 35 | 35 | ||
| 36 | struct request_queue *queue; | 36 | struct request_queue *queue; |
| 37 | unsigned int queue_num; | ||
| 38 | struct blk_flush_queue *fq; | 37 | struct blk_flush_queue *fq; |
| 39 | 38 | ||
| 40 | void *driver_data; | 39 | void *driver_data; |
| @@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { | |||
| 54 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; | 53 | unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; |
| 55 | 54 | ||
| 56 | unsigned int numa_node; | 55 | unsigned int numa_node; |
| 57 | unsigned int cmd_size; /* per-request extra data */ | 56 | unsigned int queue_num; |
| 58 | 57 | ||
| 59 | atomic_t nr_active; | 58 | atomic_t nr_active; |
| 60 | 59 | ||
| @@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) | |||
| 195 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); | 194 | struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); |
| 196 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); | 195 | struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); |
| 197 | 196 | ||
| 197 | int blk_mq_request_started(struct request *rq); | ||
| 198 | void blk_mq_start_request(struct request *rq); | 198 | void blk_mq_start_request(struct request *rq); |
| 199 | void blk_mq_end_request(struct request *rq, int error); | 199 | void blk_mq_end_request(struct request *rq, int error); |
| 200 | void __blk_mq_end_request(struct request *rq, int error); | 200 | void __blk_mq_end_request(struct request *rq, int error); |
| 201 | 201 | ||
| 202 | void blk_mq_requeue_request(struct request *rq); | 202 | void blk_mq_requeue_request(struct request *rq); |
| 203 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); | 203 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); |
| 204 | void blk_mq_cancel_requeue_work(struct request_queue *q); | ||
| 204 | void blk_mq_kick_requeue_list(struct request_queue *q); | 205 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 206 | void blk_mq_abort_requeue_list(struct request_queue *q); | ||
| 205 | void blk_mq_complete_request(struct request *rq); | 207 | void blk_mq_complete_request(struct request *rq); |
| 206 | 208 | ||
| 207 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); | 209 | void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); |
| @@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); | |||
| 212 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); | 214 | void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); |
| 213 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, | 215 | void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, |
| 214 | void *priv); | 216 | void *priv); |
| 217 | void blk_mq_unfreeze_queue(struct request_queue *q); | ||
| 218 | void blk_mq_freeze_queue_start(struct request_queue *q); | ||
| 215 | 219 | ||
| 216 | /* | 220 | /* |
| 217 | * Driver command data is immediately after the request. So subtract request | 221 | * Driver command data is immediately after the request. So subtract request |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 445d59231bc4..c294e3e25e37 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -190,6 +190,7 @@ enum rq_flag_bits { | |||
| 190 | __REQ_PM, /* runtime pm request */ | 190 | __REQ_PM, /* runtime pm request */ |
| 191 | __REQ_HASHED, /* on IO scheduler merge hash */ | 191 | __REQ_HASHED, /* on IO scheduler merge hash */ |
| 192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ | 192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
| 193 | __REQ_NO_TIMEOUT, /* requests may never expire */ | ||
| 193 | __REQ_NR_BITS, /* stops here */ | 194 | __REQ_NR_BITS, /* stops here */ |
| 194 | }; | 195 | }; |
| 195 | 196 | ||
| @@ -243,5 +244,6 @@ enum rq_flag_bits { | |||
| 243 | #define REQ_PM (1ULL << __REQ_PM) | 244 | #define REQ_PM (1ULL << __REQ_PM) |
| 244 | #define REQ_HASHED (1ULL << __REQ_HASHED) | 245 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
| 245 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 246 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
| 247 | #define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) | ||
| 246 | 248 | ||
| 247 | #endif /* __LINUX_BLK_TYPES_H */ | 249 | #endif /* __LINUX_BLK_TYPES_H */ |
