diff options
author | Tejun Heo <tj@kernel.org> | 2012-03-05 16:15:19 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-03-06 15:27:23 -0500 |
commit | 03aa264ac15637b6f98374270bcdf31400965505 (patch) | |
tree | 6fa9ca54d3f775fba19123790f6655158034a1d8 /block/blk-throttle.c | |
parent | 4eef3049986e8397d5003916aed8cad6567a5e02 (diff) |
blkcg: let blkcg core manage per-queue blkg list and counter
With the previous patch to move blkg list heads and counters to
request_queue and blkg, logic to manage them in both policies are
almost identical and can be moved to blkcg core.
This patch moves blkg link logic into blkg_lookup_create(), implements
common blkg unlink code in blkg_destroy(), and updates
blkg_destory_all() so that it's policy specific and can skip root
group. The updated blkg_destroy_all() is now used to both clear queue
for bypassing and elv switching, and release all blkgs on q exit.
This patch introduces a race window where policy [de]registration may
race against queue blkg clearing. This can only be a problem on cfq
unload and shouldn't be a real problem in practice (and we have many
other places where this race already exists). Future patches will
remove these unlikely races.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 99 |
1 files changed, 2 insertions, 97 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index c15d38307e1d..132941260e58 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -157,14 +157,6 @@ static void throtl_init_blkio_group(struct blkio_group *blkg) | |||
157 | tg->iops[WRITE] = -1; | 157 | tg->iops[WRITE] = -1; |
158 | } | 158 | } |
159 | 159 | ||
160 | static void throtl_link_blkio_group(struct request_queue *q, | ||
161 | struct blkio_group *blkg) | ||
162 | { | ||
163 | list_add(&blkg->q_node[BLKIO_POLICY_THROTL], | ||
164 | &q->blkg_list[BLKIO_POLICY_THROTL]); | ||
165 | q->nr_blkgs[BLKIO_POLICY_THROTL]++; | ||
166 | } | ||
167 | |||
168 | static struct | 160 | static struct |
169 | throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | 161 | throtl_grp *throtl_lookup_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) |
170 | { | 162 | { |
@@ -813,89 +805,6 @@ throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | |||
813 | } | 805 | } |
814 | } | 806 | } |
815 | 807 | ||
816 | static void | ||
817 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) | ||
818 | { | ||
819 | struct blkio_group *blkg = tg_to_blkg(tg); | ||
820 | |||
821 | /* Something wrong if we are trying to remove same group twice */ | ||
822 | WARN_ON_ONCE(list_empty(&blkg->q_node[BLKIO_POLICY_THROTL])); | ||
823 | |||
824 | list_del_init(&blkg->q_node[BLKIO_POLICY_THROTL]); | ||
825 | |||
826 | /* | ||
827 | * Put the reference taken at the time of creation so that when all | ||
828 | * queues are gone, group can be destroyed. | ||
829 | */ | ||
830 | blkg_put(tg_to_blkg(tg)); | ||
831 | td->queue->nr_blkgs[BLKIO_POLICY_THROTL]--; | ||
832 | } | ||
833 | |||
834 | static bool throtl_release_tgs(struct throtl_data *td, bool release_root) | ||
835 | { | ||
836 | struct request_queue *q = td->queue; | ||
837 | struct blkio_group *blkg, *n; | ||
838 | bool empty = true; | ||
839 | |||
840 | list_for_each_entry_safe(blkg, n, &q->blkg_list[BLKIO_POLICY_THROTL], | ||
841 | q_node[BLKIO_POLICY_THROTL]) { | ||
842 | struct throtl_grp *tg = blkg_to_tg(blkg); | ||
843 | |||
844 | /* skip root? */ | ||
845 | if (!release_root && tg == td->root_tg) | ||
846 | continue; | ||
847 | |||
848 | /* | ||
849 | * If cgroup removal path got to blk_group first and removed | ||
850 | * it from cgroup list, then it will take care of destroying | ||
851 | * cfqg also. | ||
852 | */ | ||
853 | if (!blkiocg_del_blkio_group(blkg)) | ||
854 | throtl_destroy_tg(td, tg); | ||
855 | else | ||
856 | empty = false; | ||
857 | } | ||
858 | return empty; | ||
859 | } | ||
860 | |||
861 | /* | ||
862 | * Blk cgroup controller notification saying that blkio_group object is being | ||
863 | * delinked as associated cgroup object is going away. That also means that | ||
864 | * no new IO will come in this group. So get rid of this group as soon as | ||
865 | * any pending IO in the group is finished. | ||
866 | * | ||
867 | * This function is called under rcu_read_lock(). @q is the rcu protected | ||
868 | * pointer. That means @q is a valid request_queue pointer as long as we | ||
869 | * are rcu read lock. | ||
870 | * | ||
871 | * @q was fetched from blkio_group under blkio_cgroup->lock. That means | ||
872 | * it should not be NULL as even if queue was going away, cgroup deltion | ||
873 | * path got to it first. | ||
874 | */ | ||
875 | void throtl_unlink_blkio_group(struct request_queue *q, | ||
876 | struct blkio_group *blkg) | ||
877 | { | ||
878 | unsigned long flags; | ||
879 | |||
880 | spin_lock_irqsave(q->queue_lock, flags); | ||
881 | throtl_destroy_tg(q->td, blkg_to_tg(blkg)); | ||
882 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
883 | } | ||
884 | |||
885 | static bool throtl_clear_queue(struct request_queue *q) | ||
886 | { | ||
887 | lockdep_assert_held(q->queue_lock); | ||
888 | |||
889 | /* | ||
890 | * Clear tgs but leave the root one alone. This is necessary | ||
891 | * because root_tg is expected to be persistent and safe because | ||
892 | * blk-throtl can never be disabled while @q is alive. This is a | ||
893 | * kludge to prepare for unified blkg. This whole function will be | ||
894 | * removed soon. | ||
895 | */ | ||
896 | return throtl_release_tgs(q->td, false); | ||
897 | } | ||
898 | |||
899 | static void throtl_update_blkio_group_common(struct throtl_data *td, | 808 | static void throtl_update_blkio_group_common(struct throtl_data *td, |
900 | struct throtl_grp *tg) | 809 | struct throtl_grp *tg) |
901 | { | 810 | { |
@@ -960,9 +869,6 @@ static void throtl_shutdown_wq(struct request_queue *q) | |||
960 | static struct blkio_policy_type blkio_policy_throtl = { | 869 | static struct blkio_policy_type blkio_policy_throtl = { |
961 | .ops = { | 870 | .ops = { |
962 | .blkio_init_group_fn = throtl_init_blkio_group, | 871 | .blkio_init_group_fn = throtl_init_blkio_group, |
963 | .blkio_link_group_fn = throtl_link_blkio_group, | ||
964 | .blkio_unlink_group_fn = throtl_unlink_blkio_group, | ||
965 | .blkio_clear_queue_fn = throtl_clear_queue, | ||
966 | .blkio_update_group_read_bps_fn = | 872 | .blkio_update_group_read_bps_fn = |
967 | throtl_update_blkio_group_read_bps, | 873 | throtl_update_blkio_group_read_bps, |
968 | .blkio_update_group_write_bps_fn = | 874 | .blkio_update_group_write_bps_fn = |
@@ -1148,12 +1054,11 @@ void blk_throtl_exit(struct request_queue *q) | |||
1148 | 1054 | ||
1149 | throtl_shutdown_wq(q); | 1055 | throtl_shutdown_wq(q); |
1150 | 1056 | ||
1151 | spin_lock_irq(q->queue_lock); | 1057 | blkg_destroy_all(q, BLKIO_POLICY_THROTL, true); |
1152 | throtl_release_tgs(td, true); | ||
1153 | 1058 | ||
1154 | /* If there are other groups */ | 1059 | /* If there are other groups */ |
1060 | spin_lock_irq(q->queue_lock); | ||
1155 | wait = q->nr_blkgs[BLKIO_POLICY_THROTL]; | 1061 | wait = q->nr_blkgs[BLKIO_POLICY_THROTL]; |
1156 | |||
1157 | spin_unlock_irq(q->queue_lock); | 1062 | spin_unlock_irq(q->queue_lock); |
1158 | 1063 | ||
1159 | /* | 1064 | /* |