diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 63 |
1 files changed, 52 insertions, 11 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0e6f765aa1f5..1b135d49b279 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/major.h> | 8 | #include <linux/major.h> |
9 | #include <linux/genhd.h> | 9 | #include <linux/genhd.h> |
10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
11 | #include <linux/llist.h> | ||
11 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
12 | #include <linux/workqueue.h> | 13 | #include <linux/workqueue.h> |
13 | #include <linux/pagemap.h> | 14 | #include <linux/pagemap.h> |
@@ -94,12 +95,19 @@ enum rq_cmd_type_bits { | |||
94 | * as well! | 95 | * as well! |
95 | */ | 96 | */ |
96 | struct request { | 97 | struct request { |
97 | struct list_head queuelist; | 98 | union { |
98 | struct call_single_data csd; | 99 | struct list_head queuelist; |
100 | struct llist_node ll_list; | ||
101 | }; | ||
102 | union { | ||
103 | struct call_single_data csd; | ||
104 | struct work_struct mq_flush_data; | ||
105 | }; | ||
99 | 106 | ||
100 | struct request_queue *q; | 107 | struct request_queue *q; |
108 | struct blk_mq_ctx *mq_ctx; | ||
101 | 109 | ||
102 | unsigned int cmd_flags; | 110 | u64 cmd_flags; |
103 | enum rq_cmd_type_bits cmd_type; | 111 | enum rq_cmd_type_bits cmd_type; |
104 | unsigned long atomic_flags; | 112 | unsigned long atomic_flags; |
105 | 113 | ||
@@ -160,8 +168,6 @@ struct request { | |||
160 | 168 | ||
161 | unsigned short ioprio; | 169 | unsigned short ioprio; |
162 | 170 | ||
163 | int ref_count; | ||
164 | |||
165 | void *special; /* opaque pointer available for LLD use */ | 171 | void *special; /* opaque pointer available for LLD use */ |
166 | char *buffer; /* kaddr of the current segment if available */ | 172 | char *buffer; /* kaddr of the current segment if available */ |
167 | 173 | ||
@@ -215,6 +221,8 @@ struct request_pm_state | |||
215 | 221 | ||
216 | #include <linux/elevator.h> | 222 | #include <linux/elevator.h> |
217 | 223 | ||
224 | struct blk_queue_ctx; | ||
225 | |||
218 | typedef void (request_fn_proc) (struct request_queue *q); | 226 | typedef void (request_fn_proc) (struct request_queue *q); |
219 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); | 227 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); |
220 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 228 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
@@ -313,6 +321,18 @@ struct request_queue { | |||
313 | dma_drain_needed_fn *dma_drain_needed; | 321 | dma_drain_needed_fn *dma_drain_needed; |
314 | lld_busy_fn *lld_busy_fn; | 322 | lld_busy_fn *lld_busy_fn; |
315 | 323 | ||
324 | struct blk_mq_ops *mq_ops; | ||
325 | |||
326 | unsigned int *mq_map; | ||
327 | |||
328 | /* sw queues */ | ||
329 | struct blk_mq_ctx *queue_ctx; | ||
330 | unsigned int nr_queues; | ||
331 | |||
332 | /* hw dispatch queues */ | ||
333 | struct blk_mq_hw_ctx **queue_hw_ctx; | ||
334 | unsigned int nr_hw_queues; | ||
335 | |||
316 | /* | 336 | /* |
317 | * Dispatch queue sorting | 337 | * Dispatch queue sorting |
318 | */ | 338 | */ |
@@ -361,6 +381,11 @@ struct request_queue { | |||
361 | */ | 381 | */ |
362 | struct kobject kobj; | 382 | struct kobject kobj; |
363 | 383 | ||
384 | /* | ||
385 | * mq queue kobject | ||
386 | */ | ||
387 | struct kobject mq_kobj; | ||
388 | |||
364 | #ifdef CONFIG_PM_RUNTIME | 389 | #ifdef CONFIG_PM_RUNTIME |
365 | struct device *dev; | 390 | struct device *dev; |
366 | int rpm_status; | 391 | int rpm_status; |
@@ -425,7 +450,13 @@ struct request_queue { | |||
425 | unsigned long flush_pending_since; | 450 | unsigned long flush_pending_since; |
426 | struct list_head flush_queue[2]; | 451 | struct list_head flush_queue[2]; |
427 | struct list_head flush_data_in_flight; | 452 | struct list_head flush_data_in_flight; |
428 | struct request flush_rq; | 453 | union { |
454 | struct request flush_rq; | ||
455 | struct { | ||
456 | spinlock_t mq_flush_lock; | ||
457 | struct work_struct mq_flush_work; | ||
458 | }; | ||
459 | }; | ||
429 | 460 | ||
430 | struct mutex sysfs_lock; | 461 | struct mutex sysfs_lock; |
431 | 462 | ||
@@ -437,14 +468,14 @@ struct request_queue { | |||
437 | struct bsg_class_device bsg_dev; | 468 | struct bsg_class_device bsg_dev; |
438 | #endif | 469 | #endif |
439 | 470 | ||
440 | #ifdef CONFIG_BLK_CGROUP | ||
441 | struct list_head all_q_node; | ||
442 | #endif | ||
443 | #ifdef CONFIG_BLK_DEV_THROTTLING | 471 | #ifdef CONFIG_BLK_DEV_THROTTLING |
444 | /* Throttle data */ | 472 | /* Throttle data */ |
445 | struct throtl_data *td; | 473 | struct throtl_data *td; |
446 | #endif | 474 | #endif |
447 | struct rcu_head rcu_head; | 475 | struct rcu_head rcu_head; |
476 | wait_queue_head_t mq_freeze_wq; | ||
477 | struct percpu_counter mq_usage_counter; | ||
478 | struct list_head all_q_node; | ||
448 | }; | 479 | }; |
449 | 480 | ||
450 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 481 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
@@ -467,12 +498,16 @@ struct request_queue { | |||
467 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 498 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
468 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | 499 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ |
469 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 500 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
501 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | ||
470 | 502 | ||
471 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 503 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
472 | (1 << QUEUE_FLAG_STACKABLE) | \ | 504 | (1 << QUEUE_FLAG_STACKABLE) | \ |
473 | (1 << QUEUE_FLAG_SAME_COMP) | \ | 505 | (1 << QUEUE_FLAG_SAME_COMP) | \ |
474 | (1 << QUEUE_FLAG_ADD_RANDOM)) | 506 | (1 << QUEUE_FLAG_ADD_RANDOM)) |
475 | 507 | ||
508 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
509 | (1 << QUEUE_FLAG_SAME_COMP)) | ||
510 | |||
476 | static inline void queue_lockdep_assert_held(struct request_queue *q) | 511 | static inline void queue_lockdep_assert_held(struct request_queue *q) |
477 | { | 512 | { |
478 | if (q->queue_lock) | 513 | if (q->queue_lock) |
@@ -539,6 +574,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
539 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) | 574 | #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) |
540 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | 575 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) |
541 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) | 576 | #define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) |
577 | #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) | ||
542 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 578 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
543 | #define blk_queue_noxmerges(q) \ | 579 | #define blk_queue_noxmerges(q) \ |
544 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 580 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
@@ -570,7 +606,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
570 | 606 | ||
571 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 607 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
572 | 608 | ||
573 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 609 | #define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) |
574 | 610 | ||
575 | static inline unsigned int blk_queue_cluster(struct request_queue *q) | 611 | static inline unsigned int blk_queue_cluster(struct request_queue *q) |
576 | { | 612 | { |
@@ -1013,6 +1049,7 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {} | |||
1013 | struct blk_plug { | 1049 | struct blk_plug { |
1014 | unsigned long magic; /* detect uninitialized use-cases */ | 1050 | unsigned long magic; /* detect uninitialized use-cases */ |
1015 | struct list_head list; /* requests */ | 1051 | struct list_head list; /* requests */ |
1052 | struct list_head mq_list; /* blk-mq requests */ | ||
1016 | struct list_head cb_list; /* md requires an unplug callback */ | 1053 | struct list_head cb_list; /* md requires an unplug callback */ |
1017 | }; | 1054 | }; |
1018 | #define BLK_MAX_REQUEST_COUNT 16 | 1055 | #define BLK_MAX_REQUEST_COUNT 16 |
@@ -1050,7 +1087,10 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) | |||
1050 | { | 1087 | { |
1051 | struct blk_plug *plug = tsk->plug; | 1088 | struct blk_plug *plug = tsk->plug; |
1052 | 1089 | ||
1053 | return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list)); | 1090 | return plug && |
1091 | (!list_empty(&plug->list) || | ||
1092 | !list_empty(&plug->mq_list) || | ||
1093 | !list_empty(&plug->cb_list)); | ||
1054 | } | 1094 | } |
1055 | 1095 | ||
1056 | /* | 1096 | /* |
@@ -1325,6 +1365,7 @@ static inline void put_dev_sector(Sector p) | |||
1325 | 1365 | ||
1326 | struct work_struct; | 1366 | struct work_struct; |
1327 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1367 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1368 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1328 | 1369 | ||
1329 | #ifdef CONFIG_BLK_CGROUP | 1370 | #ifdef CONFIG_BLK_CGROUP |
1330 | /* | 1371 | /* |