summaryrefslogtreecommitdiffstats
path: root/block/blk-throttle.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-08-18 19:08:13 -0400
committerJens Axboe <axboe@kernel.dk>2017-08-23 17:56:33 -0400
commitea0ea2bc6dd8923d86a0fa98743dbeed98645486 (patch)
tree462c57a8375b3c19d642d11dc43344a6f7aecab7 /block/blk-throttle.c
parent6470812e22261d2342ef1597be62e63a0423d691 (diff)
blk-throttle: cap discard request size
discard request usually is very big and easily use all bandwidth budget of a cgroup. discard request size doesn't really mean the size of data written, so it doesn't make sense to account it into bandwidth budget. Jens pointed out treating the size 0 doesn't make sense too, because discard request does have cost. But it's not easy to find the actual cost. This patch simply makes the size one sector. Signed-off-by: Shaohua Li <shli@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r--block/blk-throttle.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a7285bf2831c..80f5481fe9f6 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
382 } \ 382 } \
383} while (0) 383} while (0)
384 384
385static inline unsigned int throtl_bio_data_size(struct bio *bio)
386{
387 /* assume it's one sector */
388 if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389 return 512;
390 return bio->bi_iter.bi_size;
391}
392
385static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) 393static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
386{ 394{
387 INIT_LIST_HEAD(&qn->node); 395 INIT_LIST_HEAD(&qn->node);
@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
934 bool rw = bio_data_dir(bio); 942 bool rw = bio_data_dir(bio);
935 u64 bytes_allowed, extra_bytes, tmp; 943 u64 bytes_allowed, extra_bytes, tmp;
936 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; 944 unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
945 unsigned int bio_size = throtl_bio_data_size(bio);
937 946
938 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; 947 jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
939 948
@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
947 do_div(tmp, HZ); 956 do_div(tmp, HZ);
948 bytes_allowed = tmp; 957 bytes_allowed = tmp;
949 958
950 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { 959 if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
951 if (wait) 960 if (wait)
952 *wait = 0; 961 *wait = 0;
953 return true; 962 return true;
954 } 963 }
955 964
956 /* Calc approx time to dispatch */ 965 /* Calc approx time to dispatch */
957 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; 966 extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
958 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); 967 jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
959 968
960 if (!jiffy_wait) 969 if (!jiffy_wait)
@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
1034static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) 1043static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1035{ 1044{
1036 bool rw = bio_data_dir(bio); 1045 bool rw = bio_data_dir(bio);
1046 unsigned int bio_size = throtl_bio_data_size(bio);
1037 1047
1038 /* Charge the bio to the group */ 1048 /* Charge the bio to the group */
1039 tg->bytes_disp[rw] += bio->bi_iter.bi_size; 1049 tg->bytes_disp[rw] += bio_size;
1040 tg->io_disp[rw]++; 1050 tg->io_disp[rw]++;
1041 tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; 1051 tg->last_bytes_disp[rw] += bio_size;
1042 tg->last_io_disp[rw]++; 1052 tg->last_io_disp[rw]++;
1043 1053
1044 /* 1054 /*