diff options
-rw-r--r-- | block/blk-throttle.c | 25 |
1 files changed, 24 insertions, 1 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 37abbfc68590..5352bdafbcf0 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -756,6 +756,15 @@ static void throtl_process_limit_change(struct throtl_data *td) | |||
756 | " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], | 756 | " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], |
757 | tg->iops[READ], tg->iops[WRITE]); | 757 | tg->iops[READ], tg->iops[WRITE]); |
758 | 758 | ||
759 | /* | ||
760 | * Restart the slices for both READ and WRITES. It | ||
761 | * might happen that a group's limit are dropped | ||
762 | * suddenly and we don't want to account recently | ||
763 | * dispatched IO with new low rate | ||
764 | */ | ||
765 | throtl_start_new_slice(td, tg, 0); | ||
766 | throtl_start_new_slice(td, tg, 1); | ||
767 | |||
759 | if (throtl_tg_on_rr(tg)) | 768 | if (throtl_tg_on_rr(tg)) |
760 | tg_update_disptime(td, tg); | 769 | tg_update_disptime(td, tg); |
761 | } | 770 | } |
@@ -821,7 +830,8 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
821 | 830 | ||
822 | struct delayed_work *dwork = &td->throtl_work; | 831 | struct delayed_work *dwork = &td->throtl_work; |
823 | 832 | ||
824 | if (total_nr_queued(td) > 0) { | 833 | /* schedule work if limits changed even if no bio is queued */ |
834 | if (total_nr_queued(td) > 0 || td->limits_changed) { | ||
825 | /* | 835 | /* |
826 | * We might have a work scheduled to be executed in future. | 836 | * We might have a work scheduled to be executed in future. |
827 | * Cancel that and schedule a new one. | 837 | * Cancel that and schedule a new one. |
@@ -1002,6 +1012,19 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1002 | /* Bio is with-in rate limit of group */ | 1012 | /* Bio is with-in rate limit of group */ |
1003 | if (tg_may_dispatch(td, tg, bio, NULL)) { | 1013 | if (tg_may_dispatch(td, tg, bio, NULL)) { |
1004 | throtl_charge_bio(tg, bio); | 1014 | throtl_charge_bio(tg, bio); |
1015 | |||
1016 | /* | ||
1017 | * We need to trim slice even when bios are not being queued | ||
1018 | * otherwise it might happen that a bio is not queued for | ||
1019 | * a long time and slice keeps on extending and trim is not | ||
1020 | * called for a long time. Now if limits are reduced suddenly | ||
1021 | * we take into account all the IO dispatched so far at new | ||
1022 | * low rate and * newly queued IO gets a really long dispatch | ||
1023 | * time. | ||
1024 | * | ||
1025 | * So keep on trimming slice even if bio is not queued. | ||
1026 | */ | ||
1027 | throtl_trim_slice(td, tg, rw); | ||
1005 | goto out; | 1028 | goto out; |
1006 | } | 1029 | } |
1007 | 1030 | ||