diff options
author | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:31 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-05-14 16:52:31 -0400 |
commit | 632b44935f4c99a61c56f8a6f805a1080ab5a432 (patch) | |
tree | 9184a0ca295c8fdd0dd9b1cccf902baf46e9c3a8 /block/blk-throttle.c | |
parent | 2db6314c213bb21102dd1dad06cfda6a8682d624 (diff) |
blk-throttle: remove deferred config application mechanism
When bps or iops configuration changes, blk-throttle records the new
configuration and sets a flag indicating that the config has changed.
The flag is checked in the bio dispatch path and applied. This
deferred config application was necessary due to limitations in blkcg
framework, which haven't existed for quite a while now.
This patch removes the deferred config application mechanism and
applies new configurations directly from tg_set_conf(), which is
simpler.
v2: Dropped unnecessary throtl_schedule_delayed_work() call from
tg_set_conf() as suggested by Vivek Goyal.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Vivek Goyal <vgoyal@redhat.com>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 74 |
1 files changed, 20 insertions, 54 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 3960787358b6..7dbd0e695df0 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -85,9 +85,6 @@ struct throtl_grp { | |||
85 | unsigned long slice_start[2]; | 85 | unsigned long slice_start[2]; |
86 | unsigned long slice_end[2]; | 86 | unsigned long slice_end[2]; |
87 | 87 | ||
88 | /* Some throttle limits got updated for the group */ | ||
89 | int limits_changed; | ||
90 | |||
91 | /* Per cpu stats pointer */ | 88 | /* Per cpu stats pointer */ |
92 | struct tg_stats_cpu __percpu *stats_cpu; | 89 | struct tg_stats_cpu __percpu *stats_cpu; |
93 | 90 | ||
@@ -112,8 +109,6 @@ struct throtl_data | |||
112 | 109 | ||
113 | /* Work for dispatching throttled bios */ | 110 | /* Work for dispatching throttled bios */ |
114 | struct delayed_work throtl_work; | 111 | struct delayed_work throtl_work; |
115 | |||
116 | int limits_changed; | ||
117 | }; | 112 | }; |
118 | 113 | ||
119 | /* list and work item to allocate percpu group stats */ | 114 | /* list and work item to allocate percpu group stats */ |
@@ -223,7 +218,6 @@ static void throtl_pd_init(struct blkcg_gq *blkg) | |||
223 | RB_CLEAR_NODE(&tg->rb_node); | 218 | RB_CLEAR_NODE(&tg->rb_node); |
224 | bio_list_init(&tg->bio_lists[0]); | 219 | bio_list_init(&tg->bio_lists[0]); |
225 | bio_list_init(&tg->bio_lists[1]); | 220 | bio_list_init(&tg->bio_lists[1]); |
226 | tg->limits_changed = false; | ||
227 | 221 | ||
228 | tg->bps[READ] = -1; | 222 | tg->bps[READ] = -1; |
229 | tg->bps[WRITE] = -1; | 223 | tg->bps[WRITE] = -1; |
@@ -826,45 +820,6 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | |||
826 | return nr_disp; | 820 | return nr_disp; |
827 | } | 821 | } |
828 | 822 | ||
829 | static void throtl_process_limit_change(struct throtl_data *td) | ||
830 | { | ||
831 | struct request_queue *q = td->queue; | ||
832 | struct blkcg_gq *blkg, *n; | ||
833 | |||
834 | if (!td->limits_changed) | ||
835 | return; | ||
836 | |||
837 | xchg(&td->limits_changed, false); | ||
838 | |||
839 | throtl_log(td, "limits changed"); | ||
840 | |||
841 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { | ||
842 | struct throtl_grp *tg = blkg_to_tg(blkg); | ||
843 | |||
844 | if (!tg->limits_changed) | ||
845 | continue; | ||
846 | |||
847 | if (!xchg(&tg->limits_changed, false)) | ||
848 | continue; | ||
849 | |||
850 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu" | ||
851 | " riops=%u wiops=%u", tg->bps[READ], tg->bps[WRITE], | ||
852 | tg->iops[READ], tg->iops[WRITE]); | ||
853 | |||
854 | /* | ||
855 | * Restart the slices for both READ and WRITES. It | ||
856 | * might happen that a group's limit are dropped | ||
857 | * suddenly and we don't want to account recently | ||
858 | * dispatched IO with new low rate | ||
859 | */ | ||
860 | throtl_start_new_slice(td, tg, 0); | ||
861 | throtl_start_new_slice(td, tg, 1); | ||
862 | |||
863 | if (throtl_tg_on_rr(tg)) | ||
864 | tg_update_disptime(td, tg); | ||
865 | } | ||
866 | } | ||
867 | |||
868 | /* Dispatch throttled bios. Should be called without queue lock held. */ | 823 | /* Dispatch throttled bios. Should be called without queue lock held. */ |
869 | static int throtl_dispatch(struct request_queue *q) | 824 | static int throtl_dispatch(struct request_queue *q) |
870 | { | 825 | { |
@@ -876,8 +831,6 @@ static int throtl_dispatch(struct request_queue *q) | |||
876 | 831 | ||
877 | spin_lock_irq(q->queue_lock); | 832 | spin_lock_irq(q->queue_lock); |
878 | 833 | ||
879 | throtl_process_limit_change(td); | ||
880 | |||
881 | if (!total_nr_queued(td)) | 834 | if (!total_nr_queued(td)) |
882 | goto out; | 835 | goto out; |
883 | 836 | ||
@@ -925,8 +878,7 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
925 | 878 | ||
926 | struct delayed_work *dwork = &td->throtl_work; | 879 | struct delayed_work *dwork = &td->throtl_work; |
927 | 880 | ||
928 | /* schedule work if limits changed even if no bio is queued */ | 881 | if (total_nr_queued(td)) { |
929 | if (total_nr_queued(td) || td->limits_changed) { | ||
930 | mod_delayed_work(kthrotld_workqueue, dwork, delay); | 882 | mod_delayed_work(kthrotld_workqueue, dwork, delay); |
931 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", | 883 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", |
932 | delay, jiffies); | 884 | delay, jiffies); |
@@ -1023,10 +975,25 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf, | |||
1023 | else | 975 | else |
1024 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; | 976 | *(unsigned int *)((void *)tg + cft->private) = ctx.v; |
1025 | 977 | ||
1026 | /* XXX: we don't need the following deferred processing */ | 978 | throtl_log_tg(td, tg, "limit change rbps=%llu wbps=%llu riops=%u wiops=%u", |
1027 | xchg(&tg->limits_changed, true); | 979 | tg->bps[READ], tg->bps[WRITE], |
1028 | xchg(&td->limits_changed, true); | 980 | tg->iops[READ], tg->iops[WRITE]); |
1029 | throtl_schedule_delayed_work(td, 0); | 981 | |
982 | /* | ||
983 | * We're already holding queue_lock and know @tg is valid. Let's | ||
984 | * apply the new config directly. | ||
985 | * | ||
986 | * Restart the slices for both READ and WRITES. It might happen | ||
987 | * that a group's limit are dropped suddenly and we don't want to | ||
988 | * account recently dispatched IO with new low rate. | ||
989 | */ | ||
990 | throtl_start_new_slice(td, tg, 0); | ||
991 | throtl_start_new_slice(td, tg, 1); | ||
992 | |||
993 | if (throtl_tg_on_rr(tg)) { | ||
994 | tg_update_disptime(td, tg); | ||
995 | throtl_schedule_next_dispatch(td); | ||
996 | } | ||
1030 | 997 | ||
1031 | blkg_conf_finish(&ctx); | 998 | blkg_conf_finish(&ctx); |
1032 | return 0; | 999 | return 0; |
@@ -1239,7 +1206,6 @@ int blk_throtl_init(struct request_queue *q) | |||
1239 | return -ENOMEM; | 1206 | return -ENOMEM; |
1240 | 1207 | ||
1241 | td->tg_service_tree = THROTL_RB_ROOT; | 1208 | td->tg_service_tree = THROTL_RB_ROOT; |
1242 | td->limits_changed = false; | ||
1243 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); | 1209 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); |
1244 | 1210 | ||
1245 | q->td = td; | 1211 | q->td = td; |