summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2018-05-09 05:08:50 -0400
committerJens Axboe <axboe@kernel.dk>2018-05-09 10:33:05 -0400
commit544ccc8dc904db55d4576c27a1eb66a888ffacea (patch)
tree5fa92e3cf1a5d33eddc61dfd65d088dd5cdb6f84
parent5238dcf4136fd7287be8e7d38752645bfa5782ec (diff)
block: get rid of struct blk_issue_stat
struct blk_issue_stat squashes three things into one u64: - The time the driver started working on a request - The original size of the request (for the io.low controller) - Flags for writeback throttling It turns out that on x86_64, we have a 4 byte hole in struct request which we can fill with the non-timestamp fields from blk_issue_stat, simplifying things quite a bit. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-mq.c8
-rw-r--r--block/blk-stat.c7
-rw-r--r--block/blk-stat.h43
-rw-r--r--block/blk-throttle.c3
-rw-r--r--block/blk-wbt.c12
-rw-r--r--block/blk-wbt.h4
-rw-r--r--block/kyber-iosched.c6
-rw-r--r--include/linux/blk_types.h4
-rw-r--r--include/linux/blkdev.h26
10 files changed, 41 insertions, 77 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fe2f457ed27d..33d5c7d85da1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2991,7 +2991,10 @@ void blk_start_request(struct request *req)
2991 blk_dequeue_request(req); 2991 blk_dequeue_request(req);
2992 2992
2993 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) { 2993 if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
2994 blk_stat_set_issue(&req->issue_stat, blk_rq_sectors(req)); 2994 req->io_start_time_ns = ktime_get_ns();
2995#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2996 req->throtl_size = blk_rq_sectors(req);
2997#endif
2995 req->rq_flags |= RQF_STATS; 2998 req->rq_flags |= RQF_STATS;
2996 wbt_issue(req->q->rq_wb, req); 2999 wbt_issue(req->q->rq_wb, req);
2997 } 3000 }
diff --git a/block/blk-mq.c b/block/blk-mq.c
index deb85b5c6c21..17612e04d041 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -310,6 +310,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
310 rq->rq_disk = NULL; 310 rq->rq_disk = NULL;
311 rq->part = NULL; 311 rq->part = NULL;
312 rq->start_time = jiffies; 312 rq->start_time = jiffies;
313 rq->io_start_time_ns = 0;
313 rq->nr_phys_segments = 0; 314 rq->nr_phys_segments = 0;
314#if defined(CONFIG_BLK_DEV_INTEGRITY) 315#if defined(CONFIG_BLK_DEV_INTEGRITY)
315 rq->nr_integrity_segments = 0; 316 rq->nr_integrity_segments = 0;
@@ -329,7 +330,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
329#ifdef CONFIG_BLK_CGROUP 330#ifdef CONFIG_BLK_CGROUP
330 rq->rl = NULL; 331 rq->rl = NULL;
331 set_start_time_ns(rq); 332 set_start_time_ns(rq);
332 rq->io_start_time_ns = 0; 333 rq->cgroup_io_start_time_ns = 0;
333#endif 334#endif
334 335
335 data->ctx->rq_dispatched[op_is_sync(op)]++; 336 data->ctx->rq_dispatched[op_is_sync(op)]++;
@@ -669,7 +670,10 @@ void blk_mq_start_request(struct request *rq)
669 trace_block_rq_issue(q, rq); 670 trace_block_rq_issue(q, rq);
670 671
671 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { 672 if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
672 blk_stat_set_issue(&rq->issue_stat, blk_rq_sectors(rq)); 673 rq->io_start_time_ns = ktime_get_ns();
674#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
675 rq->throtl_size = blk_rq_sectors(rq);
676#endif
673 rq->rq_flags |= RQF_STATS; 677 rq->rq_flags |= RQF_STATS;
674 wbt_issue(q->rq_wb, rq); 678 wbt_issue(q->rq_wb, rq);
675 } 679 }
diff --git a/block/blk-stat.c b/block/blk-stat.c
index bd365a95fcf8..725a881723b0 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -55,11 +55,8 @@ void blk_stat_add(struct request *rq)
55 int bucket; 55 int bucket;
56 u64 now, value; 56 u64 now, value;
57 57
58 now = __blk_stat_time(ktime_to_ns(ktime_get())); 58 now = ktime_get_ns();
59 if (now < blk_stat_time(&rq->issue_stat)) 59 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
60 return;
61
62 value = now - blk_stat_time(&rq->issue_stat);
63 60
64 blk_throtl_stat_add(rq, value); 61 blk_throtl_stat_add(rq, value);
65 62
diff --git a/block/blk-stat.h b/block/blk-stat.h
index c22049a8125e..17c812db0aca 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -8,21 +8,6 @@
8#include <linux/rcupdate.h> 8#include <linux/rcupdate.h>
9#include <linux/timer.h> 9#include <linux/timer.h>
10 10
11/*
12 * from upper:
13 * 4 bits: reserved for other usage
14 * 12 bits: size
15 * 48 bits: time
16 */
17#define BLK_STAT_RES_BITS 4
18#define BLK_STAT_SIZE_BITS 12
19#define BLK_STAT_RES_SHIFT (64 - BLK_STAT_RES_BITS)
20#define BLK_STAT_SIZE_SHIFT (BLK_STAT_RES_SHIFT - BLK_STAT_SIZE_BITS)
21#define BLK_STAT_TIME_MASK ((1ULL << BLK_STAT_SIZE_SHIFT) - 1)
22#define BLK_STAT_SIZE_MASK \
23 (((1ULL << BLK_STAT_SIZE_BITS) - 1) << BLK_STAT_SIZE_SHIFT)
24#define BLK_STAT_RES_MASK (~((1ULL << BLK_STAT_RES_SHIFT) - 1))
25
26/** 11/**
27 * struct blk_stat_callback - Block statistics callback. 12 * struct blk_stat_callback - Block statistics callback.
28 * 13 *
@@ -82,34 +67,6 @@ void blk_free_queue_stats(struct blk_queue_stats *);
82 67
83void blk_stat_add(struct request *); 68void blk_stat_add(struct request *);
84 69
85static inline u64 __blk_stat_time(u64 time)
86{
87 return time & BLK_STAT_TIME_MASK;
88}
89
90static inline u64 blk_stat_time(struct blk_issue_stat *stat)
91{
92 return __blk_stat_time(stat->stat);
93}
94
95static inline sector_t blk_capped_size(sector_t size)
96{
97 return size & ((1ULL << BLK_STAT_SIZE_BITS) - 1);
98}
99
100static inline sector_t blk_stat_size(struct blk_issue_stat *stat)
101{
102 return (stat->stat & BLK_STAT_SIZE_MASK) >> BLK_STAT_SIZE_SHIFT;
103}
104
105static inline void blk_stat_set_issue(struct blk_issue_stat *stat,
106 sector_t size)
107{
108 stat->stat = (stat->stat & BLK_STAT_RES_MASK) |
109 (ktime_to_ns(ktime_get()) & BLK_STAT_TIME_MASK) |
110 (((u64)blk_capped_size(size)) << BLK_STAT_SIZE_SHIFT);
111}
112
113/* record time/size info in request but not add a callback */ 70/* record time/size info in request but not add a callback */
114void blk_stat_enable_accounting(struct request_queue *q); 71void blk_stat_enable_accounting(struct request_queue *q);
115 72
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 35f9b8ff40d7..f63d88c92c3a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2279,8 +2279,7 @@ void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2279 struct request_queue *q = rq->q; 2279 struct request_queue *q = rq->q;
2280 struct throtl_data *td = q->td; 2280 struct throtl_data *td = q->td;
2281 2281
2282 throtl_track_latency(td, blk_stat_size(&rq->issue_stat), 2282 throtl_track_latency(td, rq->throtl_size, req_op(rq), time_ns >> 10);
2283 req_op(rq), time_ns >> 10);
2284} 2283}
2285 2284
2286void blk_throtl_bio_endio(struct bio *bio) 2285void blk_throtl_bio_endio(struct bio *bio)
diff --git a/block/blk-wbt.c b/block/blk-wbt.c
index 592e914c9890..4f89b28fa652 100644
--- a/block/blk-wbt.c
+++ b/block/blk-wbt.c
@@ -31,22 +31,22 @@
31 31
32static inline void wbt_clear_state(struct request *rq) 32static inline void wbt_clear_state(struct request *rq)
33{ 33{
34 rq->issue_stat.stat &= ~BLK_STAT_RES_MASK; 34 rq->wbt_flags = 0;
35} 35}
36 36
37static inline enum wbt_flags wbt_flags(struct request *rq) 37static inline enum wbt_flags wbt_flags(struct request *rq)
38{ 38{
39 return (rq->issue_stat.stat & BLK_STAT_RES_MASK) >> BLK_STAT_RES_SHIFT; 39 return rq->wbt_flags;
40} 40}
41 41
42static inline bool wbt_is_tracked(struct request *rq) 42static inline bool wbt_is_tracked(struct request *rq)
43{ 43{
44 return (rq->issue_stat.stat >> BLK_STAT_RES_SHIFT) & WBT_TRACKED; 44 return rq->wbt_flags & WBT_TRACKED;
45} 45}
46 46
47static inline bool wbt_is_read(struct request *rq) 47static inline bool wbt_is_read(struct request *rq)
48{ 48{
49 return (rq->issue_stat.stat >> BLK_STAT_RES_SHIFT) & WBT_READ; 49 return rq->wbt_flags & WBT_READ;
50} 50}
51 51
52enum { 52enum {
@@ -657,7 +657,7 @@ void wbt_issue(struct rq_wb *rwb, struct request *rq)
657 */ 657 */
658 if (wbt_is_read(rq) && !rwb->sync_issue) { 658 if (wbt_is_read(rq) && !rwb->sync_issue) {
659 rwb->sync_cookie = rq; 659 rwb->sync_cookie = rq;
660 rwb->sync_issue = blk_stat_time(&rq->issue_stat); 660 rwb->sync_issue = rq->io_start_time_ns;
661 } 661 }
662} 662}
663 663
@@ -746,8 +746,6 @@ int wbt_init(struct request_queue *q)
746 struct rq_wb *rwb; 746 struct rq_wb *rwb;
747 int i; 747 int i;
748 748
749 BUILD_BUG_ON(WBT_NR_BITS > BLK_STAT_RES_BITS);
750
751 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL); 749 rwb = kzalloc(sizeof(*rwb), GFP_KERNEL);
752 if (!rwb) 750 if (!rwb)
753 return -ENOMEM; 751 return -ENOMEM;
diff --git a/block/blk-wbt.h b/block/blk-wbt.h
index 85fbcccf12a5..300df531d0a6 100644
--- a/block/blk-wbt.h
+++ b/block/blk-wbt.h
@@ -63,7 +63,7 @@ struct rq_wb {
63 63
64 struct blk_stat_callback *cb; 64 struct blk_stat_callback *cb;
65 65
66 s64 sync_issue; 66 u64 sync_issue;
67 void *sync_cookie; 67 void *sync_cookie;
68 68
69 unsigned int wc; 69 unsigned int wc;
@@ -90,7 +90,7 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
90 90
91static inline void wbt_track(struct request *rq, enum wbt_flags flags) 91static inline void wbt_track(struct request *rq, enum wbt_flags flags)
92{ 92{
93 rq->issue_stat.stat |= ((u64)flags) << BLK_STAT_RES_SHIFT; 93 rq->wbt_flags |= flags;
94} 94}
95 95
96void __wbt_done(struct rq_wb *, enum wbt_flags); 96void __wbt_done(struct rq_wb *, enum wbt_flags);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 0d6d25e32e1f..564967fafe5f 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -485,11 +485,11 @@ static void kyber_completed_request(struct request *rq)
485 if (blk_stat_is_active(kqd->cb)) 485 if (blk_stat_is_active(kqd->cb))
486 return; 486 return;
487 487
488 now = __blk_stat_time(ktime_to_ns(ktime_get())); 488 now = ktime_get_ns();
489 if (now < blk_stat_time(&rq->issue_stat)) 489 if (now < rq->io_start_time_ns)
490 return; 490 return;
491 491
492 latency = now - blk_stat_time(&rq->issue_stat); 492 latency = now - rq->io_start_time_ns;
493 493
494 if (latency > target) 494 if (latency > target)
495 blk_stat_activate_msecs(kqd->cb, 10); 495 blk_stat_activate_msecs(kqd->cb, 10);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index b6f1d53cf113..4cb970cdcd11 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -91,10 +91,6 @@ static inline bool blk_path_error(blk_status_t error)
91 return true; 91 return true;
92} 92}
93 93
94struct blk_issue_stat {
95 u64 stat;
96};
97
98/* 94/*
99 * From most significant bit: 95 * From most significant bit:
100 * 1 bit: reserved for other usage, see below 96 * 1 bit: reserved for other usage, see below
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c4eee043191..f2c2fc011e6b 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -206,8 +206,18 @@ struct request {
206 struct gendisk *rq_disk; 206 struct gendisk *rq_disk;
207 struct hd_struct *part; 207 struct hd_struct *part;
208 unsigned long start_time; 208 unsigned long start_time;
209 struct blk_issue_stat issue_stat; 209 /* Time that I/O was submitted to the device. */
210 /* Number of scatter-gather DMA addr+len pairs after 210 u64 io_start_time_ns;
211
212#ifdef CONFIG_BLK_WBT
213 unsigned short wbt_flags;
214#endif
215#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
216 unsigned short throtl_size;
217#endif
218
219 /*
220 * Number of scatter-gather DMA addr+len pairs after
211 * physical address coalescing is performed. 221 * physical address coalescing is performed.
212 */ 222 */
213 unsigned short nr_phys_segments; 223 unsigned short nr_phys_segments;
@@ -267,8 +277,8 @@ struct request {
267 277
268#ifdef CONFIG_BLK_CGROUP 278#ifdef CONFIG_BLK_CGROUP
269 struct request_list *rl; /* rl this rq is alloced from */ 279 struct request_list *rl; /* rl this rq is alloced from */
270 unsigned long long start_time_ns; 280 unsigned long long cgroup_start_time_ns;
271 unsigned long long io_start_time_ns; /* when passed to hardware */ 281 unsigned long long cgroup_io_start_time_ns; /* when passed to hardware */
272#endif 282#endif
273}; 283};
274 284
@@ -1797,25 +1807,25 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo
1797static inline void set_start_time_ns(struct request *req) 1807static inline void set_start_time_ns(struct request *req)
1798{ 1808{
1799 preempt_disable(); 1809 preempt_disable();
1800 req->start_time_ns = sched_clock(); 1810 req->cgroup_start_time_ns = sched_clock();
1801 preempt_enable(); 1811 preempt_enable();
1802} 1812}
1803 1813
1804static inline void set_io_start_time_ns(struct request *req) 1814static inline void set_io_start_time_ns(struct request *req)
1805{ 1815{
1806 preempt_disable(); 1816 preempt_disable();
1807 req->io_start_time_ns = sched_clock(); 1817 req->cgroup_io_start_time_ns = sched_clock();
1808 preempt_enable(); 1818 preempt_enable();
1809} 1819}
1810 1820
1811static inline uint64_t rq_start_time_ns(struct request *req) 1821static inline uint64_t rq_start_time_ns(struct request *req)
1812{ 1822{
1813 return req->start_time_ns; 1823 return req->cgroup_start_time_ns;
1814} 1824}
1815 1825
1816static inline uint64_t rq_io_start_time_ns(struct request *req) 1826static inline uint64_t rq_io_start_time_ns(struct request *req)
1817{ 1827{
1818 return req->io_start_time_ns; 1828 return req->cgroup_io_start_time_ns;
1819} 1829}
1820#else 1830#else
1821static inline void set_start_time_ns(struct request *req) {} 1831static inline void set_start_time_ns(struct request *req) {}