diff options
author | Shaohua Li <shli@fb.com> | 2017-03-27 13:51:38 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-03-28 10:02:20 -0400 |
commit | d61fcfa4bb18992dc8e171996808e1034dc643bb (patch) | |
tree | b604e697ded4fdb51b750b35bfbe76436d2c34f6 | |
parent | 297e3d854784821d3b8ff3ae117f20d71f125504 (diff) |
blk-throttle: choose a small throtl_slice for SSD
The throtl_slice is 100ms by default. This is a long time for SSD, a lot
of IO can run. To make cgroups have smoother throughput, we choose a
small value (20ms) for SSD.
Signed-off-by: Shaohua Li <shli@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-sysfs.c | 2 | ||||
-rw-r--r-- | block/blk-throttle.c | 23 | ||||
-rw-r--r-- | block/blk.h | 2 |
3 files changed, 24 insertions, 3 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b315e62cc914..7f090dd15ca6 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -906,6 +906,8 @@ int blk_register_queue(struct gendisk *disk) | |||
906 | 906 | ||
907 | blk_wb_init(q); | 907 | blk_wb_init(q); |
908 | 908 | ||
909 | blk_throtl_register_queue(q); | ||
910 | |||
909 | if (q->request_fn || (q->mq_ops && q->elevator)) { | 911 | if (q->request_fn || (q->mq_ops && q->elevator)) { |
910 | ret = elv_register_queue(q); | 912 | ret = elv_register_queue(q); |
911 | if (ret) { | 913 | if (ret) { |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 93841da808a1..d00c1c1e99e4 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -18,8 +18,9 @@ static int throtl_grp_quantum = 8; | |||
18 | /* Total max dispatch from all groups in one round */ | 18 | /* Total max dispatch from all groups in one round */ |
19 | static int throtl_quantum = 32; | 19 | static int throtl_quantum = 32; |
20 | 20 | ||
21 | /* Throttling is performed over 100ms slice and after that slice is renewed */ | 21 | /* Throttling is performed over a slice and after that slice is renewed */ |
22 | #define DFL_THROTL_SLICE (HZ / 10) | 22 | #define DFL_THROTL_SLICE_HD (HZ / 10) |
23 | #define DFL_THROTL_SLICE_SSD (HZ / 50) | ||
23 | #define MAX_THROTL_SLICE (HZ) | 24 | #define MAX_THROTL_SLICE (HZ) |
24 | 25 | ||
25 | static struct blkcg_policy blkcg_policy_throtl; | 26 | static struct blkcg_policy blkcg_policy_throtl; |
@@ -1961,7 +1962,6 @@ int blk_throtl_init(struct request_queue *q) | |||
1961 | 1962 | ||
1962 | q->td = td; | 1963 | q->td = td; |
1963 | td->queue = q; | 1964 | td->queue = q; |
1964 | td->throtl_slice = DFL_THROTL_SLICE; | ||
1965 | 1965 | ||
1966 | td->limit_valid[LIMIT_MAX] = true; | 1966 | td->limit_valid[LIMIT_MAX] = true; |
1967 | td->limit_index = LIMIT_MAX; | 1967 | td->limit_index = LIMIT_MAX; |
@@ -1982,6 +1982,23 @@ void blk_throtl_exit(struct request_queue *q) | |||
1982 | kfree(q->td); | 1982 | kfree(q->td); |
1983 | } | 1983 | } |
1984 | 1984 | ||
1985 | void blk_throtl_register_queue(struct request_queue *q) | ||
1986 | { | ||
1987 | struct throtl_data *td; | ||
1988 | |||
1989 | td = q->td; | ||
1990 | BUG_ON(!td); | ||
1991 | |||
1992 | if (blk_queue_nonrot(q)) | ||
1993 | td->throtl_slice = DFL_THROTL_SLICE_SSD; | ||
1994 | else | ||
1995 | td->throtl_slice = DFL_THROTL_SLICE_HD; | ||
1996 | #ifndef CONFIG_BLK_DEV_THROTTLING_LOW | ||
1997 | /* if no low limit, use previous default */ | ||
1998 | td->throtl_slice = DFL_THROTL_SLICE_HD; | ||
1999 | #endif | ||
2000 | } | ||
2001 | |||
1985 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | 2002 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
1986 | ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) | 2003 | ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) |
1987 | { | 2004 | { |
diff --git a/block/blk.h b/block/blk.h index bcd3de6c1081..13070c325858 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -319,10 +319,12 @@ static inline struct io_context *create_io_context(gfp_t gfp_mask, int node) | |||
319 | extern void blk_throtl_drain(struct request_queue *q); | 319 | extern void blk_throtl_drain(struct request_queue *q); |
320 | extern int blk_throtl_init(struct request_queue *q); | 320 | extern int blk_throtl_init(struct request_queue *q); |
321 | extern void blk_throtl_exit(struct request_queue *q); | 321 | extern void blk_throtl_exit(struct request_queue *q); |
322 | extern void blk_throtl_register_queue(struct request_queue *q); | ||
322 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 323 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
323 | static inline void blk_throtl_drain(struct request_queue *q) { } | 324 | static inline void blk_throtl_drain(struct request_queue *q) { } |
324 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 325 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
325 | static inline void blk_throtl_exit(struct request_queue *q) { } | 326 | static inline void blk_throtl_exit(struct request_queue *q) { } |
327 | static inline void blk_throtl_register_queue(struct request_queue *q) { } | ||
326 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 328 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
327 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW | 329 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
328 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); | 330 | extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); |