diff options
author | Vivek Goyal <vgoyal@redhat.com> | 2011-03-22 16:54:29 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-22 16:55:00 -0400 |
commit | 04521db04e9a11e74b0252d222051cb194487f4d (patch) | |
tree | 4a07811529a16f9e25c4de44ef0bf2621d360249 /block | |
parent | 9026e521c0da0731eb31f9f9022dd00cc3cd8885 (diff) |
blk-throttle: Reset group slice when limits are changed
Lina reported that if throttle limits are initially very high and then
dropped, then no new bio might be dispatched for a long time. And the
reason being that after dropping the limits we don't reset the existing
slice and do the rate calculation with new low rate and account the bios
dispatched at high rate. To fix it, reset the slice upon rate change.
https://lkml.org/lkml/2011/3/10/298
Another problem with very high limit is that we never queued the
bio on throtl service tree. That means we kept on extending the
group slice but never trimmed it. Fix that also by regulary
trimming the slice even if bio is not being queued up.
Reported-by: Lina Lu <lulina_nuaa@foxmail.com>
Signed-off-by: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 37abbfc68590..5352bdafbcf0 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -756,6 +756,15 @@ static void throtl_process_limit_change(struct throtl_data *td) | |||
756 | " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], | 756 | " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], |
757 | tg->iops[READ], tg->iops[WRITE]); | 757 | tg->iops[READ], tg->iops[WRITE]); |
758 | 758 | ||
759 | /* | ||
760 | * Restart the slices for both READ and WRITES. It | ||
761 | * might happen that a group's limit are dropped | ||
762 | * suddenly and we don't want to account recently | ||
763 | * dispatched IO with new low rate | ||
764 | */ | ||
765 | throtl_start_new_slice(td, tg, 0); | ||
766 | throtl_start_new_slice(td, tg, 1); | ||
767 | |||
759 | if (throtl_tg_on_rr(tg)) | 768 | if (throtl_tg_on_rr(tg)) |
760 | tg_update_disptime(td, tg); | 769 | tg_update_disptime(td, tg); |
761 | } | 770 | } |
@@ -821,7 +830,8 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
821 | 830 | ||
822 | struct delayed_work *dwork = &td->throtl_work; | 831 | struct delayed_work *dwork = &td->throtl_work; |
823 | 832 | ||
824 | if (total_nr_queued(td) > 0) { | 833 | /* schedule work if limits changed even if no bio is queued */ |
834 | if (total_nr_queued(td) > 0 || td->limits_changed) { | ||
825 | /* | 835 | /* |
826 | * We might have a work scheduled to be executed in future. | 836 | * We might have a work scheduled to be executed in future. |
827 | * Cancel that and schedule a new one. | 837 | * Cancel that and schedule a new one. |
@@ -1002,6 +1012,19 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1002 | /* Bio is with-in rate limit of group */ | 1012 | /* Bio is with-in rate limit of group */ |
1003 | if (tg_may_dispatch(td, tg, bio, NULL)) { | 1013 | if (tg_may_dispatch(td, tg, bio, NULL)) { |
1004 | throtl_charge_bio(tg, bio); | 1014 | throtl_charge_bio(tg, bio); |
1015 | |||
1016 | /* | ||
1017 | * We need to trim slice even when bios are not being queued | ||
1018 | * otherwise it might happen that a bio is not queued for | ||
1019 | * a long time and slice keeps on extending and trim is not | ||
1020 | * called for a long time. Now if limits are reduced suddenly | ||
1021 | * we take into account all the IO dispatched so far at new | ||
1022 | * low rate and * newly queued IO gets a really long dispatch | ||
1023 | * time. | ||
1024 | * | ||
1025 | * So keep on trimming slice even if bio is not queued. | ||
1026 | */ | ||
1027 | throtl_trim_slice(td, tg, rw); | ||
1005 | goto out; | 1028 | goto out; |
1006 | } | 1029 | } |
1007 | 1030 | ||