aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2015-11-05 12:44:55 -0500
committerJens Axboe <axboe@fb.com>2015-11-07 12:40:47 -0500
commit05229beeddf7e75e2e616ddaad4b70e7fca9528d (patch)
tree2f8c5efffcdba6f6fd113960da9009979cf679e4 /include/linux
parent7b371636fb6d187873d9d2730c2b1febc48a9b47 (diff)
block: add block polling support
Add basic support for polling for specific IO to complete. This uses the cookie that blk-mq passes back, which enables the block layer to pass this cookie to the driver to spin for a specific request. This will be combined with request latency tracking, so we can make qualified decisions about when to poll and when not to. For now, for benchmark purposes, we add a sysfs file that controls whether polling is enabled or not. Signed-off-by: Jens Axboe <axboe@fb.com> Acked-by: Christoph Hellwig <hch@lst.de> Acked-by: Keith Busch <keith.busch@intel.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-mq.h10
-rw-r--r--include/linux/blkdev.h3
2 files changed, 13 insertions, 0 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 83cc9d4e5455..daf17d70aeca 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -59,6 +59,9 @@ struct blk_mq_hw_ctx {
59 59
60 struct blk_mq_cpu_notifier cpu_notifier; 60 struct blk_mq_cpu_notifier cpu_notifier;
61 struct kobject kobj; 61 struct kobject kobj;
62
63 unsigned long poll_invoked;
64 unsigned long poll_success;
62}; 65};
63 66
64struct blk_mq_tag_set { 67struct blk_mq_tag_set {
@@ -97,6 +100,8 @@ typedef void (exit_request_fn)(void *, struct request *, unsigned int,
97typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *, 100typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
98 bool); 101 bool);
99typedef void (busy_tag_iter_fn)(struct request *, void *, bool); 102typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
103typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
104
100 105
101struct blk_mq_ops { 106struct blk_mq_ops {
102 /* 107 /*
@@ -114,6 +119,11 @@ struct blk_mq_ops {
114 */ 119 */
115 timeout_fn *timeout; 120 timeout_fn *timeout;
116 121
122 /*
123 * Called to poll for completion of a specific tag.
124 */
125 poll_fn *poll;
126
117 softirq_done_fn *complete; 127 softirq_done_fn *complete;
118 128
119 /* 129 /*
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5ee0f5243025..3fe27f8d91f0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -487,6 +487,7 @@ struct request_queue {
487#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 487#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
488#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 488#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
489#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 489#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
490#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
490 491
491#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 492#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
492 (1 << QUEUE_FLAG_STACKABLE) | \ 493 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -814,6 +815,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
814extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 815extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
815 struct request *, int, rq_end_io_fn *); 816 struct request *, int, rq_end_io_fn *);
816 817
818bool blk_poll(struct request_queue *q, blk_qc_t cookie);
819
817static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 820static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
818{ 821{
819 return bdev->bd_disk->queue; /* this is never NULL */ 822 return bdev->bd_disk->queue; /* this is never NULL */