diff options
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 49 |
1 files changed, 23 insertions, 26 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index b2fddaf20b98..c15d38307e1d 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -41,9 +41,6 @@ struct throtl_rb_root { | |||
41 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) | 41 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) |
42 | 42 | ||
43 | struct throtl_grp { | 43 | struct throtl_grp { |
44 | /* List of throtl groups on the request queue*/ | ||
45 | struct hlist_node tg_node; | ||
46 | |||
47 | /* active throtl group service_tree member */ | 44 | /* active throtl group service_tree member */ |
48 | struct rb_node rb_node; | 45 | struct rb_node rb_node; |
49 | 46 | ||
@@ -83,9 +80,6 @@ struct throtl_grp { | |||
83 | 80 | ||
84 | struct throtl_data | 81 | struct throtl_data |
85 | { | 82 | { |
86 | /* List of throtl groups */ | ||
87 | struct hlist_head tg_list; | ||
88 | |||
89 | /* service tree for active throtl groups */ | 83 | /* service tree for active throtl groups */ |
90 | struct throtl_rb_root tg_service_tree; | 84 | struct throtl_rb_root tg_service_tree; |
91 | 85 | ||
@@ -152,7 +146,6 @@ static void throtl_init_blkio_group(struct blkio_group *blkg) | |||
152 | { | 146 | { |
153 | struct throtl_grp *tg = blkg_to_tg(blkg); | 147 | struct throtl_grp *tg = blkg_to_tg(blkg); |
154 | 148 | ||
155 | INIT_HLIST_NODE(&tg->tg_node); | ||
156 | RB_CLEAR_NODE(&tg->rb_node); | 149 | RB_CLEAR_NODE(&tg->rb_node); |
157 | bio_list_init(&tg->bio_lists[0]); | 150 | bio_list_init(&tg->bio_lists[0]); |
158 | bio_list_init(&tg->bio_lists[1]); | 151 | bio_list_init(&tg->bio_lists[1]); |
@@ -167,11 +160,9 @@ static void throtl_init_blkio_group(struct blkio_group *blkg) | |||
167 | static void throtl_link_blkio_group(struct request_queue *q, | 160 | static void throtl_link_blkio_group(struct request_queue *q, |
168 | struct blkio_group *blkg) | 161 | struct blkio_group *blkg) |
169 | { | 162 | { |
170 | struct throtl_data *td = q->td; | 163 | list_add(&blkg->q_node[BLKIO_POLICY_THROTL], |
171 | struct throtl_grp *tg = blkg_to_tg(blkg); | 164 | &q->blkg_list[BLKIO_POLICY_THROTL]); |
172 | 165 | q->nr_blkgs[BLKIO_POLICY_THROTL]++; | |
173 | hlist_add_head(&tg->tg_node, &td->tg_list); | ||
174 | td->nr_undestroyed_grps++; | ||
175 | } | 166 | } |
176 | 167 | ||
177 | static struct | 168 | static struct |
@@ -711,8 +702,8 @@ static int throtl_select_dispatch(struct throtl_data *td, struct bio_list *bl) | |||
711 | 702 | ||
712 | static void throtl_process_limit_change(struct throtl_data *td) | 703 | static void throtl_process_limit_change(struct throtl_data *td) |
713 | { | 704 | { |
714 | struct throtl_grp *tg; | 705 | struct request_queue *q = td->queue; |
715 | struct hlist_node *pos, *n; | 706 | struct blkio_group *blkg, *n; |
716 | 707 | ||
717 | if (!td->limits_changed) | 708 | if (!td->limits_changed) |
718 | return; | 709 | return; |
@@ -721,7 +712,10 @@ static void throtl_process_limit_change(struct throtl_data *td) | |||
721 | 712 | ||
722 | throtl_log(td, "limits changed"); | 713 | throtl_log(td, "limits changed"); |
723 | 714 | ||
724 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | 715 | list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_THROTL], |
716 | q_node[BLKIO_POLICY_THROTL]) { | ||
717 | struct throtl_grp *tg = blkg_to_tg(blkg); | ||
718 | |||
725 | if (!tg->limits_changed) | 719 | if (!tg->limits_changed) |
726 | continue; | 720 | continue; |
727 | 721 | ||
@@ -822,26 +816,31 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
822 | static void | 816 | static void |
823 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) | 817 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) |
824 | { | 818 | { |
819 | struct blkio_group *blkg = tg_to_blkg(tg); | ||
820 | |||
825 | /* Something wrong if we are trying to remove same group twice */ | 821 | /* Something wrong if we are trying to remove same group twice */ |
826 | BUG_ON(hlist_unhashed(&tg->tg_node)); | 822 | WARN_ON_ONCE(list_empty(&blkg->q_node[BLKIO_POLICY_THROTL])); |
827 | 823 | ||
828 | hlist_del_init(&tg->tg_node); | 824 | list_del_init(&blkg->q_node[BLKIO_POLICY_THROTL]); |
829 | 825 | ||
830 | /* | 826 | /* |
831 | * Put the reference taken at the time of creation so that when all | 827 | * Put the reference taken at the time of creation so that when all |
832 | * queues are gone, group can be destroyed. | 828 | * queues are gone, group can be destroyed. |
833 | */ | 829 | */ |
834 | blkg_put(tg_to_blkg(tg)); | 830 | blkg_put(tg_to_blkg(tg)); |
835 | td->nr_undestroyed_grps--; | 831 | td->queue->nr_blkgs[BLKIO_POLICY_THROTL]--; |
836 | } | 832 | } |
837 | 833 | ||
838 | static bool throtl_release_tgs(struct throtl_data *td, bool release_root) | 834 | static bool throtl_release_tgs(struct throtl_data *td, bool release_root) |
839 | { | 835 | { |
840 | struct hlist_node *pos, *n; | 836 | struct request_queue *q = td->queue; |
841 | struct throtl_grp *tg; | 837 | struct blkio_group *blkg, *n; |
842 | bool empty = true; | 838 | bool empty = true; |
843 | 839 | ||
844 | hlist_for_each_entry_safe(tg, pos, n, &td->tg_list, tg_node) { | 840 | list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_THROTL], |
841 | q_node[BLKIO_POLICY_THROTL]) { | ||
842 | struct throtl_grp *tg = blkg_to_tg(blkg); | ||
843 | |||
845 | /* skip root? */ | 844 | /* skip root? */ |
846 | if (!release_root && tg == td->root_tg) | 845 | if (!release_root && tg == td->root_tg) |
847 | continue; | 846 | continue; |
@@ -851,7 +850,7 @@ static bool throtl_release_tgs(struct throtl_data *td, bool release_root) | |||
851 | * it from cgroup list, then it will take care of destroying | 850 | * it from cgroup list, then it will take care of destroying |
852 | * cfqg also. | 851 | * cfqg also. |
853 | */ | 852 | */ |
854 | if (!blkiocg_del_blkio_group(tg_to_blkg(tg))) | 853 | if (!blkiocg_del_blkio_group(blkg)) |
855 | throtl_destroy_tg(td, tg); | 854 | throtl_destroy_tg(td, tg); |
856 | else | 855 | else |
857 | empty = false; | 856 | empty = false; |
@@ -1114,7 +1113,6 @@ int blk_throtl_init(struct request_queue *q) | |||
1114 | if (!td) | 1113 | if (!td) |
1115 | return -ENOMEM; | 1114 | return -ENOMEM; |
1116 | 1115 | ||
1117 | INIT_HLIST_HEAD(&td->tg_list); | ||
1118 | td->tg_service_tree = THROTL_RB_ROOT; | 1116 | td->tg_service_tree = THROTL_RB_ROOT; |
1119 | td->limits_changed = false; | 1117 | td->limits_changed = false; |
1120 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); | 1118 | INIT_DELAYED_WORK(&td->throtl_work, blk_throtl_work); |
@@ -1144,7 +1142,7 @@ int blk_throtl_init(struct request_queue *q) | |||
1144 | void blk_throtl_exit(struct request_queue *q) | 1142 | void blk_throtl_exit(struct request_queue *q) |
1145 | { | 1143 | { |
1146 | struct throtl_data *td = q->td; | 1144 | struct throtl_data *td = q->td; |
1147 | bool wait = false; | 1145 | bool wait; |
1148 | 1146 | ||
1149 | BUG_ON(!td); | 1147 | BUG_ON(!td); |
1150 | 1148 | ||
@@ -1154,8 +1152,7 @@ void blk_throtl_exit(struct request_queue *q) | |||
1154 | throtl_release_tgs(td, true); | 1152 | throtl_release_tgs(td, true); |
1155 | 1153 | ||
1156 | /* If there are other groups */ | 1154 | /* If there are other groups */ |
1157 | if (td->nr_undestroyed_grps > 0) | 1155 | wait = q->nr_blkgs[BLKIO_POLICY_THROTL]; |
1158 | wait = true; | ||
1159 | 1156 | ||
1160 | spin_unlock_irq(q->queue_lock); | 1157 | spin_unlock_irq(q->queue_lock); |
1161 | 1158 | ||