aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/blk-mq.h3
-rw-r--r--include/linux/blkdev.h2
2 files changed, 3 insertions, 2 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 91dfb75ce39f..ad3adb73cc70 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -129,6 +129,7 @@ enum {
129 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
130 BLK_MQ_F_SHOULD_SORT = 1 << 1, 130 BLK_MQ_F_SHOULD_SORT = 1 << 1,
131 BLK_MQ_F_TAG_SHARED = 1 << 2, 131 BLK_MQ_F_TAG_SHARED = 1 << 2,
132 BLK_MQ_F_SG_MERGE = 1 << 3,
132 133
133 BLK_MQ_S_STOPPED = 0, 134 BLK_MQ_S_STOPPED = 0,
134 BLK_MQ_S_TAG_ACTIVE = 1, 135 BLK_MQ_S_TAG_ACTIVE = 1,
@@ -153,7 +154,7 @@ void blk_mq_free_request(struct request *rq);
153bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 154bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
154struct request *blk_mq_alloc_request(struct request_queue *q, int rw, 155struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
155 gfp_t gfp, bool reserved); 156 gfp_t gfp, bool reserved);
156struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag); 157struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag);
157 158
158struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 159struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
159struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); 160struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e90e1692e052..8aba35f46f87 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -510,6 +510,7 @@ struct request_queue {
510#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 510#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
511#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 511#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
512#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 512#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
513#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
513 514
514#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 515#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
515 (1 << QUEUE_FLAG_STACKABLE) | \ 516 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -1069,7 +1070,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1069 * schedule() where blk_schedule_flush_plug() is called. 1070 * schedule() where blk_schedule_flush_plug() is called.
1070 */ 1071 */
1071struct blk_plug { 1072struct blk_plug {
1072 unsigned long magic; /* detect uninitialized use-cases */
1073 struct list_head list; /* requests */ 1073 struct list_head list; /* requests */
1074 struct list_head mq_list; /* blk-mq requests */ 1074 struct list_head mq_list; /* blk-mq requests */
1075 struct list_head cb_list; /* md requires an unplug callback */ 1075 struct list_head cb_list; /* md requires an unplug callback */