diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2010-05-03 08:28:55 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-05-03 08:28:55 -0400 |
commit | 0f3942a39ed768c967cb71ea0e9be7fc94112713 (patch) | |
tree | 337b13077be49921cbd77523c946788e12e4bac9 /block/blk-cgroup.c | |
parent | 7407cf355fdf5500430be966dbbde84a27293bad (diff) |
block: kill some useless goto's in blk-cgroup.c
goto has its place, but lets cut back on some of the more
frivolous uses of it.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r-- | block/blk-cgroup.c | 84 |
1 files changed, 40 insertions, 44 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index d02bbf88de13..60bb049b6106 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -376,17 +376,16 @@ int blkiocg_del_blkio_group(struct blkio_group *blkg) | |||
376 | 376 | ||
377 | rcu_read_lock(); | 377 | rcu_read_lock(); |
378 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | 378 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); |
379 | if (!css) | 379 | if (css) { |
380 | goto out; | 380 | blkcg = container_of(css, struct blkio_cgroup, css); |
381 | 381 | spin_lock_irqsave(&blkcg->lock, flags); | |
382 | blkcg = container_of(css, struct blkio_cgroup, css); | 382 | if (!hlist_unhashed(&blkg->blkcg_node)) { |
383 | spin_lock_irqsave(&blkcg->lock, flags); | 383 | __blkiocg_del_blkio_group(blkg); |
384 | if (!hlist_unhashed(&blkg->blkcg_node)) { | 384 | ret = 0; |
385 | __blkiocg_del_blkio_group(blkg); | 385 | } |
386 | ret = 0; | 386 | spin_unlock_irqrestore(&blkcg->lock, flags); |
387 | } | 387 | } |
388 | spin_unlock_irqrestore(&blkcg->lock, flags); | 388 | |
389 | out: | ||
390 | rcu_read_unlock(); | 389 | rcu_read_unlock(); |
391 | return ret; | 390 | return ret; |
392 | } | 391 | } |
@@ -815,17 +814,15 @@ static int blkiocg_weight_device_read(struct cgroup *cgrp, struct cftype *cft, | |||
815 | seq_printf(m, "dev\tweight\n"); | 814 | seq_printf(m, "dev\tweight\n"); |
816 | 815 | ||
817 | blkcg = cgroup_to_blkio_cgroup(cgrp); | 816 | blkcg = cgroup_to_blkio_cgroup(cgrp); |
818 | if (list_empty(&blkcg->policy_list)) | 817 | if (!list_empty(&blkcg->policy_list)) { |
819 | goto out; | 818 | spin_lock_irq(&blkcg->lock); |
820 | 819 | list_for_each_entry(pn, &blkcg->policy_list, node) { | |
821 | spin_lock_irq(&blkcg->lock); | 820 | seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), |
822 | list_for_each_entry(pn, &blkcg->policy_list, node) { | 821 | MINOR(pn->dev), pn->weight); |
823 | seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev), | 822 | } |
824 | MINOR(pn->dev), pn->weight); | 823 | spin_unlock_irq(&blkcg->lock); |
825 | } | 824 | } |
826 | spin_unlock_irq(&blkcg->lock); | ||
827 | 825 | ||
828 | out: | ||
829 | return 0; | 826 | return 0; |
830 | } | 827 | } |
831 | 828 | ||
@@ -917,40 +914,39 @@ static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |||
917 | struct blkio_policy_node *pn, *pntmp; | 914 | struct blkio_policy_node *pn, *pntmp; |
918 | 915 | ||
919 | rcu_read_lock(); | 916 | rcu_read_lock(); |
920 | remove_entry: | 917 | do { |
921 | spin_lock_irqsave(&blkcg->lock, flags); | 918 | spin_lock_irqsave(&blkcg->lock, flags); |
922 | 919 | ||
923 | if (hlist_empty(&blkcg->blkg_list)) { | 920 | if (hlist_empty(&blkcg->blkg_list)) { |
924 | spin_unlock_irqrestore(&blkcg->lock, flags); | 921 | spin_unlock_irqrestore(&blkcg->lock, flags); |
925 | goto done; | 922 | break; |
926 | } | 923 | } |
927 | 924 | ||
928 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | 925 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, |
929 | blkcg_node); | 926 | blkcg_node); |
930 | key = rcu_dereference(blkg->key); | 927 | key = rcu_dereference(blkg->key); |
931 | __blkiocg_del_blkio_group(blkg); | 928 | __blkiocg_del_blkio_group(blkg); |
932 | 929 | ||
933 | spin_unlock_irqrestore(&blkcg->lock, flags); | 930 | spin_unlock_irqrestore(&blkcg->lock, flags); |
934 | 931 | ||
935 | /* | 932 | /* |
936 | * This blkio_group is being unlinked as associated cgroup is going | 933 | * This blkio_group is being unlinked as associated cgroup is |
937 | * away. Let all the IO controlling policies know about this event. | 934 | * going away. Let all the IO controlling policies know about |
938 | * | 935 | * this event. Currently this is static call to one io |
939 | * Currently this is static call to one io controlling policy. Once | 936 | * controlling policy. Once we have more policies in place, we |
940 | * we have more policies in place, we need some dynamic registration | 937 | * need some dynamic registration of callback function. |
941 | * of callback function. | 938 | */ |
942 | */ | 939 | spin_lock(&blkio_list_lock); |
943 | spin_lock(&blkio_list_lock); | 940 | list_for_each_entry(blkiop, &blkio_list, list) |
944 | list_for_each_entry(blkiop, &blkio_list, list) | 941 | blkiop->ops.blkio_unlink_group_fn(key, blkg); |
945 | blkiop->ops.blkio_unlink_group_fn(key, blkg); | 942 | spin_unlock(&blkio_list_lock); |
946 | spin_unlock(&blkio_list_lock); | 943 | } while (1); |
947 | goto remove_entry; | ||
948 | 944 | ||
949 | done: | ||
950 | list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { | 945 | list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) { |
951 | blkio_policy_delete_node(pn); | 946 | blkio_policy_delete_node(pn); |
952 | kfree(pn); | 947 | kfree(pn); |
953 | } | 948 | } |
949 | |||
954 | free_css_id(&blkio_subsys, &blkcg->css); | 950 | free_css_id(&blkio_subsys, &blkcg->css); |
955 | rcu_read_unlock(); | 951 | rcu_read_unlock(); |
956 | if (blkcg != &blkio_root_cgroup) | 952 | if (blkcg != &blkio_root_cgroup) |