diff options
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r-- | kernel/cgroup.c | 148 |
1 files changed, 76 insertions, 72 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 86cb5c6e8932..75c0ff00aca6 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -837,6 +837,8 @@ static void put_css_set_locked(struct css_set *cset) | |||
837 | 837 | ||
838 | static void put_css_set(struct css_set *cset) | 838 | static void put_css_set(struct css_set *cset) |
839 | { | 839 | { |
840 | unsigned long flags; | ||
841 | |||
840 | /* | 842 | /* |
841 | * Ensure that the refcount doesn't hit zero while any readers | 843 | * Ensure that the refcount doesn't hit zero while any readers |
842 | * can see it. Similar to atomic_dec_and_lock(), but for an | 844 | * can see it. Similar to atomic_dec_and_lock(), but for an |
@@ -845,9 +847,9 @@ static void put_css_set(struct css_set *cset) | |||
845 | if (atomic_add_unless(&cset->refcount, -1, 1)) | 847 | if (atomic_add_unless(&cset->refcount, -1, 1)) |
846 | return; | 848 | return; |
847 | 849 | ||
848 | spin_lock_bh(&css_set_lock); | 850 | spin_lock_irqsave(&css_set_lock, flags); |
849 | put_css_set_locked(cset); | 851 | put_css_set_locked(cset); |
850 | spin_unlock_bh(&css_set_lock); | 852 | spin_unlock_irqrestore(&css_set_lock, flags); |
851 | } | 853 | } |
852 | 854 | ||
853 | /* | 855 | /* |
@@ -1070,11 +1072,11 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
1070 | 1072 | ||
1071 | /* First see if we already have a cgroup group that matches | 1073 | /* First see if we already have a cgroup group that matches |
1072 | * the desired set */ | 1074 | * the desired set */ |
1073 | spin_lock_bh(&css_set_lock); | 1075 | spin_lock_irq(&css_set_lock); |
1074 | cset = find_existing_css_set(old_cset, cgrp, template); | 1076 | cset = find_existing_css_set(old_cset, cgrp, template); |
1075 | if (cset) | 1077 | if (cset) |
1076 | get_css_set(cset); | 1078 | get_css_set(cset); |
1077 | spin_unlock_bh(&css_set_lock); | 1079 | spin_unlock_irq(&css_set_lock); |
1078 | 1080 | ||
1079 | if (cset) | 1081 | if (cset) |
1080 | return cset; | 1082 | return cset; |
@@ -1102,7 +1104,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
1102 | * find_existing_css_set() */ | 1104 | * find_existing_css_set() */ |
1103 | memcpy(cset->subsys, template, sizeof(cset->subsys)); | 1105 | memcpy(cset->subsys, template, sizeof(cset->subsys)); |
1104 | 1106 | ||
1105 | spin_lock_bh(&css_set_lock); | 1107 | spin_lock_irq(&css_set_lock); |
1106 | /* Add reference counts and links from the new css_set. */ | 1108 | /* Add reference counts and links from the new css_set. */ |
1107 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { | 1109 | list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) { |
1108 | struct cgroup *c = link->cgrp; | 1110 | struct cgroup *c = link->cgrp; |
@@ -1128,7 +1130,7 @@ static struct css_set *find_css_set(struct css_set *old_cset, | |||
1128 | css_get(css); | 1130 | css_get(css); |
1129 | } | 1131 | } |
1130 | 1132 | ||
1131 | spin_unlock_bh(&css_set_lock); | 1133 | spin_unlock_irq(&css_set_lock); |
1132 | 1134 | ||
1133 | return cset; | 1135 | return cset; |
1134 | } | 1136 | } |
@@ -1192,7 +1194,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) | |||
1192 | * Release all the links from cset_links to this hierarchy's | 1194 | * Release all the links from cset_links to this hierarchy's |
1193 | * root cgroup | 1195 | * root cgroup |
1194 | */ | 1196 | */ |
1195 | spin_lock_bh(&css_set_lock); | 1197 | spin_lock_irq(&css_set_lock); |
1196 | 1198 | ||
1197 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { | 1199 | list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) { |
1198 | list_del(&link->cset_link); | 1200 | list_del(&link->cset_link); |
@@ -1200,7 +1202,7 @@ static void cgroup_destroy_root(struct cgroup_root *root) | |||
1200 | kfree(link); | 1202 | kfree(link); |
1201 | } | 1203 | } |
1202 | 1204 | ||
1203 | spin_unlock_bh(&css_set_lock); | 1205 | spin_unlock_irq(&css_set_lock); |
1204 | 1206 | ||
1205 | if (!list_empty(&root->root_list)) { | 1207 | if (!list_empty(&root->root_list)) { |
1206 | list_del(&root->root_list); | 1208 | list_del(&root->root_list); |
@@ -1600,11 +1602,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask) | |||
1600 | ss->root = dst_root; | 1602 | ss->root = dst_root; |
1601 | css->cgroup = dcgrp; | 1603 | css->cgroup = dcgrp; |
1602 | 1604 | ||
1603 | spin_lock_bh(&css_set_lock); | 1605 | spin_lock_irq(&css_set_lock); |
1604 | hash_for_each(css_set_table, i, cset, hlist) | 1606 | hash_for_each(css_set_table, i, cset, hlist) |
1605 | list_move_tail(&cset->e_cset_node[ss->id], | 1607 | list_move_tail(&cset->e_cset_node[ss->id], |
1606 | &dcgrp->e_csets[ss->id]); | 1608 | &dcgrp->e_csets[ss->id]); |
1607 | spin_unlock_bh(&css_set_lock); | 1609 | spin_unlock_irq(&css_set_lock); |
1608 | 1610 | ||
1609 | /* default hierarchy doesn't enable controllers by default */ | 1611 | /* default hierarchy doesn't enable controllers by default */ |
1610 | dst_root->subsys_mask |= 1 << ssid; | 1612 | dst_root->subsys_mask |= 1 << ssid; |
@@ -1640,10 +1642,10 @@ static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node, | |||
1640 | if (!buf) | 1642 | if (!buf) |
1641 | return -ENOMEM; | 1643 | return -ENOMEM; |
1642 | 1644 | ||
1643 | spin_lock_bh(&css_set_lock); | 1645 | spin_lock_irq(&css_set_lock); |
1644 | ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); | 1646 | ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot); |
1645 | len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); | 1647 | len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX); |
1646 | spin_unlock_bh(&css_set_lock); | 1648 | spin_unlock_irq(&css_set_lock); |
1647 | 1649 | ||
1648 | if (len >= PATH_MAX) | 1650 | if (len >= PATH_MAX) |
1649 | len = -ERANGE; | 1651 | len = -ERANGE; |
@@ -1897,7 +1899,7 @@ static void cgroup_enable_task_cg_lists(void) | |||
1897 | { | 1899 | { |
1898 | struct task_struct *p, *g; | 1900 | struct task_struct *p, *g; |
1899 | 1901 | ||
1900 | spin_lock_bh(&css_set_lock); | 1902 | spin_lock_irq(&css_set_lock); |
1901 | 1903 | ||
1902 | if (use_task_css_set_links) | 1904 | if (use_task_css_set_links) |
1903 | goto out_unlock; | 1905 | goto out_unlock; |
@@ -1922,8 +1924,12 @@ static void cgroup_enable_task_cg_lists(void) | |||
1922 | * entry won't be deleted though the process has exited. | 1924 | * entry won't be deleted though the process has exited. |
1923 | * Do it while holding siglock so that we don't end up | 1925 | * Do it while holding siglock so that we don't end up |
1924 | * racing against cgroup_exit(). | 1926 | * racing against cgroup_exit(). |
1927 | * | ||
1928 | * Interrupts were already disabled while acquiring | ||
1929 | * the css_set_lock, so we do not need to disable it | ||
1930 | * again when acquiring the sighand->siglock here. | ||
1925 | */ | 1931 | */ |
1926 | spin_lock_irq(&p->sighand->siglock); | 1932 | spin_lock(&p->sighand->siglock); |
1927 | if (!(p->flags & PF_EXITING)) { | 1933 | if (!(p->flags & PF_EXITING)) { |
1928 | struct css_set *cset = task_css_set(p); | 1934 | struct css_set *cset = task_css_set(p); |
1929 | 1935 | ||
@@ -1932,11 +1938,11 @@ static void cgroup_enable_task_cg_lists(void) | |||
1932 | list_add_tail(&p->cg_list, &cset->tasks); | 1938 | list_add_tail(&p->cg_list, &cset->tasks); |
1933 | get_css_set(cset); | 1939 | get_css_set(cset); |
1934 | } | 1940 | } |
1935 | spin_unlock_irq(&p->sighand->siglock); | 1941 | spin_unlock(&p->sighand->siglock); |
1936 | } while_each_thread(g, p); | 1942 | } while_each_thread(g, p); |
1937 | read_unlock(&tasklist_lock); | 1943 | read_unlock(&tasklist_lock); |
1938 | out_unlock: | 1944 | out_unlock: |
1939 | spin_unlock_bh(&css_set_lock); | 1945 | spin_unlock_irq(&css_set_lock); |
1940 | } | 1946 | } |
1941 | 1947 | ||
1942 | static void init_cgroup_housekeeping(struct cgroup *cgrp) | 1948 | static void init_cgroup_housekeeping(struct cgroup *cgrp) |
@@ -2043,13 +2049,13 @@ static int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) | |||
2043 | * Link the root cgroup in this hierarchy into all the css_set | 2049 | * Link the root cgroup in this hierarchy into all the css_set |
2044 | * objects. | 2050 | * objects. |
2045 | */ | 2051 | */ |
2046 | spin_lock_bh(&css_set_lock); | 2052 | spin_lock_irq(&css_set_lock); |
2047 | hash_for_each(css_set_table, i, cset, hlist) { | 2053 | hash_for_each(css_set_table, i, cset, hlist) { |
2048 | link_css_set(&tmp_links, cset, root_cgrp); | 2054 | link_css_set(&tmp_links, cset, root_cgrp); |
2049 | if (css_set_populated(cset)) | 2055 | if (css_set_populated(cset)) |
2050 | cgroup_update_populated(root_cgrp, true); | 2056 | cgroup_update_populated(root_cgrp, true); |
2051 | } | 2057 | } |
2052 | spin_unlock_bh(&css_set_lock); | 2058 | spin_unlock_irq(&css_set_lock); |
2053 | 2059 | ||
2054 | BUG_ON(!list_empty(&root_cgrp->self.children)); | 2060 | BUG_ON(!list_empty(&root_cgrp->self.children)); |
2055 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); | 2061 | BUG_ON(atomic_read(&root->nr_cgrps) != 1); |
@@ -2256,11 +2262,11 @@ out_mount: | |||
2256 | struct cgroup *cgrp; | 2262 | struct cgroup *cgrp; |
2257 | 2263 | ||
2258 | mutex_lock(&cgroup_mutex); | 2264 | mutex_lock(&cgroup_mutex); |
2259 | spin_lock_bh(&css_set_lock); | 2265 | spin_lock_irq(&css_set_lock); |
2260 | 2266 | ||
2261 | cgrp = cset_cgroup_from_root(ns->root_cset, root); | 2267 | cgrp = cset_cgroup_from_root(ns->root_cset, root); |
2262 | 2268 | ||
2263 | spin_unlock_bh(&css_set_lock); | 2269 | spin_unlock_irq(&css_set_lock); |
2264 | mutex_unlock(&cgroup_mutex); | 2270 | mutex_unlock(&cgroup_mutex); |
2265 | 2271 | ||
2266 | nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); | 2272 | nsdentry = kernfs_node_dentry(cgrp->kn, dentry->d_sb); |
@@ -2337,11 +2343,11 @@ char *cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, | |||
2337 | char *ret; | 2343 | char *ret; |
2338 | 2344 | ||
2339 | mutex_lock(&cgroup_mutex); | 2345 | mutex_lock(&cgroup_mutex); |
2340 | spin_lock_bh(&css_set_lock); | 2346 | spin_lock_irq(&css_set_lock); |
2341 | 2347 | ||
2342 | ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); | 2348 | ret = cgroup_path_ns_locked(cgrp, buf, buflen, ns); |
2343 | 2349 | ||
2344 | spin_unlock_bh(&css_set_lock); | 2350 | spin_unlock_irq(&css_set_lock); |
2345 | mutex_unlock(&cgroup_mutex); | 2351 | mutex_unlock(&cgroup_mutex); |
2346 | 2352 | ||
2347 | return ret; | 2353 | return ret; |
@@ -2369,7 +2375,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) | |||
2369 | char *path = NULL; | 2375 | char *path = NULL; |
2370 | 2376 | ||
2371 | mutex_lock(&cgroup_mutex); | 2377 | mutex_lock(&cgroup_mutex); |
2372 | spin_lock_bh(&css_set_lock); | 2378 | spin_lock_irq(&css_set_lock); |
2373 | 2379 | ||
2374 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); | 2380 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
2375 | 2381 | ||
@@ -2382,7 +2388,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) | |||
2382 | path = buf; | 2388 | path = buf; |
2383 | } | 2389 | } |
2384 | 2390 | ||
2385 | spin_unlock_bh(&css_set_lock); | 2391 | spin_unlock_irq(&css_set_lock); |
2386 | mutex_unlock(&cgroup_mutex); | 2392 | mutex_unlock(&cgroup_mutex); |
2387 | return path; | 2393 | return path; |
2388 | } | 2394 | } |
@@ -2557,7 +2563,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, | |||
2557 | * the new cgroup. There are no failure cases after here, so this | 2563 | * the new cgroup. There are no failure cases after here, so this |
2558 | * is the commit point. | 2564 | * is the commit point. |
2559 | */ | 2565 | */ |
2560 | spin_lock_bh(&css_set_lock); | 2566 | spin_lock_irq(&css_set_lock); |
2561 | list_for_each_entry(cset, &tset->src_csets, mg_node) { | 2567 | list_for_each_entry(cset, &tset->src_csets, mg_node) { |
2562 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { | 2568 | list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) { |
2563 | struct css_set *from_cset = task_css_set(task); | 2569 | struct css_set *from_cset = task_css_set(task); |
@@ -2568,7 +2574,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset, | |||
2568 | put_css_set_locked(from_cset); | 2574 | put_css_set_locked(from_cset); |
2569 | } | 2575 | } |
2570 | } | 2576 | } |
2571 | spin_unlock_bh(&css_set_lock); | 2577 | spin_unlock_irq(&css_set_lock); |
2572 | 2578 | ||
2573 | /* | 2579 | /* |
2574 | * Migration is committed, all target tasks are now on dst_csets. | 2580 | * Migration is committed, all target tasks are now on dst_csets. |
@@ -2597,13 +2603,13 @@ out_cancel_attach: | |||
2597 | } | 2603 | } |
2598 | } while_each_subsys_mask(); | 2604 | } while_each_subsys_mask(); |
2599 | out_release_tset: | 2605 | out_release_tset: |
2600 | spin_lock_bh(&css_set_lock); | 2606 | spin_lock_irq(&css_set_lock); |
2601 | list_splice_init(&tset->dst_csets, &tset->src_csets); | 2607 | list_splice_init(&tset->dst_csets, &tset->src_csets); |
2602 | list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { | 2608 | list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) { |
2603 | list_splice_tail_init(&cset->mg_tasks, &cset->tasks); | 2609 | list_splice_tail_init(&cset->mg_tasks, &cset->tasks); |
2604 | list_del_init(&cset->mg_node); | 2610 | list_del_init(&cset->mg_node); |
2605 | } | 2611 | } |
2606 | spin_unlock_bh(&css_set_lock); | 2612 | spin_unlock_irq(&css_set_lock); |
2607 | return ret; | 2613 | return ret; |
2608 | } | 2614 | } |
2609 | 2615 | ||
@@ -2634,7 +2640,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) | |||
2634 | 2640 | ||
2635 | lockdep_assert_held(&cgroup_mutex); | 2641 | lockdep_assert_held(&cgroup_mutex); |
2636 | 2642 | ||
2637 | spin_lock_bh(&css_set_lock); | 2643 | spin_lock_irq(&css_set_lock); |
2638 | list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { | 2644 | list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) { |
2639 | cset->mg_src_cgrp = NULL; | 2645 | cset->mg_src_cgrp = NULL; |
2640 | cset->mg_dst_cgrp = NULL; | 2646 | cset->mg_dst_cgrp = NULL; |
@@ -2642,7 +2648,7 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets) | |||
2642 | list_del_init(&cset->mg_preload_node); | 2648 | list_del_init(&cset->mg_preload_node); |
2643 | put_css_set_locked(cset); | 2649 | put_css_set_locked(cset); |
2644 | } | 2650 | } |
2645 | spin_unlock_bh(&css_set_lock); | 2651 | spin_unlock_irq(&css_set_lock); |
2646 | } | 2652 | } |
2647 | 2653 | ||
2648 | /** | 2654 | /** |
@@ -2783,7 +2789,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup, | |||
2783 | * already PF_EXITING could be freed from underneath us unless we | 2789 | * already PF_EXITING could be freed from underneath us unless we |
2784 | * take an rcu_read_lock. | 2790 | * take an rcu_read_lock. |
2785 | */ | 2791 | */ |
2786 | spin_lock_bh(&css_set_lock); | 2792 | spin_lock_irq(&css_set_lock); |
2787 | rcu_read_lock(); | 2793 | rcu_read_lock(); |
2788 | task = leader; | 2794 | task = leader; |
2789 | do { | 2795 | do { |
@@ -2792,7 +2798,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup, | |||
2792 | break; | 2798 | break; |
2793 | } while_each_thread(leader, task); | 2799 | } while_each_thread(leader, task); |
2794 | rcu_read_unlock(); | 2800 | rcu_read_unlock(); |
2795 | spin_unlock_bh(&css_set_lock); | 2801 | spin_unlock_irq(&css_set_lock); |
2796 | 2802 | ||
2797 | return cgroup_taskset_migrate(&tset, root); | 2803 | return cgroup_taskset_migrate(&tset, root); |
2798 | } | 2804 | } |
@@ -2816,7 +2822,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, | |||
2816 | return -EBUSY; | 2822 | return -EBUSY; |
2817 | 2823 | ||
2818 | /* look up all src csets */ | 2824 | /* look up all src csets */ |
2819 | spin_lock_bh(&css_set_lock); | 2825 | spin_lock_irq(&css_set_lock); |
2820 | rcu_read_lock(); | 2826 | rcu_read_lock(); |
2821 | task = leader; | 2827 | task = leader; |
2822 | do { | 2828 | do { |
@@ -2826,7 +2832,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp, | |||
2826 | break; | 2832 | break; |
2827 | } while_each_thread(leader, task); | 2833 | } while_each_thread(leader, task); |
2828 | rcu_read_unlock(); | 2834 | rcu_read_unlock(); |
2829 | spin_unlock_bh(&css_set_lock); | 2835 | spin_unlock_irq(&css_set_lock); |
2830 | 2836 | ||
2831 | /* prepare dst csets and commit */ | 2837 | /* prepare dst csets and commit */ |
2832 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | 2838 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); |
@@ -2859,9 +2865,9 @@ static int cgroup_procs_write_permission(struct task_struct *task, | |||
2859 | struct cgroup *cgrp; | 2865 | struct cgroup *cgrp; |
2860 | struct inode *inode; | 2866 | struct inode *inode; |
2861 | 2867 | ||
2862 | spin_lock_bh(&css_set_lock); | 2868 | spin_lock_irq(&css_set_lock); |
2863 | cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); | 2869 | cgrp = task_cgroup_from_root(task, &cgrp_dfl_root); |
2864 | spin_unlock_bh(&css_set_lock); | 2870 | spin_unlock_irq(&css_set_lock); |
2865 | 2871 | ||
2866 | while (!cgroup_is_descendant(dst_cgrp, cgrp)) | 2872 | while (!cgroup_is_descendant(dst_cgrp, cgrp)) |
2867 | cgrp = cgroup_parent(cgrp); | 2873 | cgrp = cgroup_parent(cgrp); |
@@ -2962,9 +2968,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | |||
2962 | if (root == &cgrp_dfl_root) | 2968 | if (root == &cgrp_dfl_root) |
2963 | continue; | 2969 | continue; |
2964 | 2970 | ||
2965 | spin_lock_bh(&css_set_lock); | 2971 | spin_lock_irq(&css_set_lock); |
2966 | from_cgrp = task_cgroup_from_root(from, root); | 2972 | from_cgrp = task_cgroup_from_root(from, root); |
2967 | spin_unlock_bh(&css_set_lock); | 2973 | spin_unlock_irq(&css_set_lock); |
2968 | 2974 | ||
2969 | retval = cgroup_attach_task(from_cgrp, tsk, false); | 2975 | retval = cgroup_attach_task(from_cgrp, tsk, false); |
2970 | if (retval) | 2976 | if (retval) |
@@ -3080,7 +3086,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
3080 | percpu_down_write(&cgroup_threadgroup_rwsem); | 3086 | percpu_down_write(&cgroup_threadgroup_rwsem); |
3081 | 3087 | ||
3082 | /* look up all csses currently attached to @cgrp's subtree */ | 3088 | /* look up all csses currently attached to @cgrp's subtree */ |
3083 | spin_lock_bh(&css_set_lock); | 3089 | spin_lock_irq(&css_set_lock); |
3084 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { | 3090 | cgroup_for_each_live_descendant_pre(dsct, d_css, cgrp) { |
3085 | struct cgrp_cset_link *link; | 3091 | struct cgrp_cset_link *link; |
3086 | 3092 | ||
@@ -3088,14 +3094,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
3088 | cgroup_migrate_add_src(link->cset, dsct, | 3094 | cgroup_migrate_add_src(link->cset, dsct, |
3089 | &preloaded_csets); | 3095 | &preloaded_csets); |
3090 | } | 3096 | } |
3091 | spin_unlock_bh(&css_set_lock); | 3097 | spin_unlock_irq(&css_set_lock); |
3092 | 3098 | ||
3093 | /* NULL dst indicates self on default hierarchy */ | 3099 | /* NULL dst indicates self on default hierarchy */ |
3094 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | 3100 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); |
3095 | if (ret) | 3101 | if (ret) |
3096 | goto out_finish; | 3102 | goto out_finish; |
3097 | 3103 | ||
3098 | spin_lock_bh(&css_set_lock); | 3104 | spin_lock_irq(&css_set_lock); |
3099 | list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { | 3105 | list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) { |
3100 | struct task_struct *task, *ntask; | 3106 | struct task_struct *task, *ntask; |
3101 | 3107 | ||
@@ -3107,7 +3113,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) | |||
3107 | list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) | 3113 | list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list) |
3108 | cgroup_taskset_add(task, &tset); | 3114 | cgroup_taskset_add(task, &tset); |
3109 | } | 3115 | } |
3110 | spin_unlock_bh(&css_set_lock); | 3116 | spin_unlock_irq(&css_set_lock); |
3111 | 3117 | ||
3112 | ret = cgroup_taskset_migrate(&tset, cgrp->root); | 3118 | ret = cgroup_taskset_migrate(&tset, cgrp->root); |
3113 | out_finish: | 3119 | out_finish: |
@@ -3908,10 +3914,10 @@ static int cgroup_task_count(const struct cgroup *cgrp) | |||
3908 | int count = 0; | 3914 | int count = 0; |
3909 | struct cgrp_cset_link *link; | 3915 | struct cgrp_cset_link *link; |
3910 | 3916 | ||
3911 | spin_lock_bh(&css_set_lock); | 3917 | spin_lock_irq(&css_set_lock); |
3912 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | 3918 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
3913 | count += atomic_read(&link->cset->refcount); | 3919 | count += atomic_read(&link->cset->refcount); |
3914 | spin_unlock_bh(&css_set_lock); | 3920 | spin_unlock_irq(&css_set_lock); |
3915 | return count; | 3921 | return count; |
3916 | } | 3922 | } |
3917 | 3923 | ||
@@ -4249,7 +4255,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, | |||
4249 | 4255 | ||
4250 | memset(it, 0, sizeof(*it)); | 4256 | memset(it, 0, sizeof(*it)); |
4251 | 4257 | ||
4252 | spin_lock_bh(&css_set_lock); | 4258 | spin_lock_irq(&css_set_lock); |
4253 | 4259 | ||
4254 | it->ss = css->ss; | 4260 | it->ss = css->ss; |
4255 | 4261 | ||
@@ -4262,7 +4268,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, | |||
4262 | 4268 | ||
4263 | css_task_iter_advance_css_set(it); | 4269 | css_task_iter_advance_css_set(it); |
4264 | 4270 | ||
4265 | spin_unlock_bh(&css_set_lock); | 4271 | spin_unlock_irq(&css_set_lock); |
4266 | } | 4272 | } |
4267 | 4273 | ||
4268 | /** | 4274 | /** |
@@ -4280,7 +4286,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
4280 | it->cur_task = NULL; | 4286 | it->cur_task = NULL; |
4281 | } | 4287 | } |
4282 | 4288 | ||
4283 | spin_lock_bh(&css_set_lock); | 4289 | spin_lock_irq(&css_set_lock); |
4284 | 4290 | ||
4285 | if (it->task_pos) { | 4291 | if (it->task_pos) { |
4286 | it->cur_task = list_entry(it->task_pos, struct task_struct, | 4292 | it->cur_task = list_entry(it->task_pos, struct task_struct, |
@@ -4289,7 +4295,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
4289 | css_task_iter_advance(it); | 4295 | css_task_iter_advance(it); |
4290 | } | 4296 | } |
4291 | 4297 | ||
4292 | spin_unlock_bh(&css_set_lock); | 4298 | spin_unlock_irq(&css_set_lock); |
4293 | 4299 | ||
4294 | return it->cur_task; | 4300 | return it->cur_task; |
4295 | } | 4301 | } |
@@ -4303,10 +4309,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) | |||
4303 | void css_task_iter_end(struct css_task_iter *it) | 4309 | void css_task_iter_end(struct css_task_iter *it) |
4304 | { | 4310 | { |
4305 | if (it->cur_cset) { | 4311 | if (it->cur_cset) { |
4306 | spin_lock_bh(&css_set_lock); | 4312 | spin_lock_irq(&css_set_lock); |
4307 | list_del(&it->iters_node); | 4313 | list_del(&it->iters_node); |
4308 | put_css_set_locked(it->cur_cset); | 4314 | put_css_set_locked(it->cur_cset); |
4309 | spin_unlock_bh(&css_set_lock); | 4315 | spin_unlock_irq(&css_set_lock); |
4310 | } | 4316 | } |
4311 | 4317 | ||
4312 | if (it->cur_task) | 4318 | if (it->cur_task) |
@@ -4338,10 +4344,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) | |||
4338 | mutex_lock(&cgroup_mutex); | 4344 | mutex_lock(&cgroup_mutex); |
4339 | 4345 | ||
4340 | /* all tasks in @from are being moved, all csets are source */ | 4346 | /* all tasks in @from are being moved, all csets are source */ |
4341 | spin_lock_bh(&css_set_lock); | 4347 | spin_lock_irq(&css_set_lock); |
4342 | list_for_each_entry(link, &from->cset_links, cset_link) | 4348 | list_for_each_entry(link, &from->cset_links, cset_link) |
4343 | cgroup_migrate_add_src(link->cset, to, &preloaded_csets); | 4349 | cgroup_migrate_add_src(link->cset, to, &preloaded_csets); |
4344 | spin_unlock_bh(&css_set_lock); | 4350 | spin_unlock_irq(&css_set_lock); |
4345 | 4351 | ||
4346 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); | 4352 | ret = cgroup_migrate_prepare_dst(&preloaded_csets); |
4347 | if (ret) | 4353 | if (ret) |
@@ -5063,6 +5069,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css, | |||
5063 | memset(css, 0, sizeof(*css)); | 5069 | memset(css, 0, sizeof(*css)); |
5064 | css->cgroup = cgrp; | 5070 | css->cgroup = cgrp; |
5065 | css->ss = ss; | 5071 | css->ss = ss; |
5072 | css->id = -1; | ||
5066 | INIT_LIST_HEAD(&css->sibling); | 5073 | INIT_LIST_HEAD(&css->sibling); |
5067 | INIT_LIST_HEAD(&css->children); | 5074 | INIT_LIST_HEAD(&css->children); |
5068 | css->serial_nr = css_serial_nr_next++; | 5075 | css->serial_nr = css_serial_nr_next++; |
@@ -5150,7 +5157,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, | |||
5150 | 5157 | ||
5151 | err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); | 5158 | err = cgroup_idr_alloc(&ss->css_idr, NULL, 2, 0, GFP_KERNEL); |
5152 | if (err < 0) | 5159 | if (err < 0) |
5153 | goto err_free_percpu_ref; | 5160 | goto err_free_css; |
5154 | css->id = err; | 5161 | css->id = err; |
5155 | 5162 | ||
5156 | /* @css is ready to be brought online now, make it visible */ | 5163 | /* @css is ready to be brought online now, make it visible */ |
@@ -5174,9 +5181,6 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp, | |||
5174 | 5181 | ||
5175 | err_list_del: | 5182 | err_list_del: |
5176 | list_del_rcu(&css->sibling); | 5183 | list_del_rcu(&css->sibling); |
5177 | cgroup_idr_remove(&ss->css_idr, css->id); | ||
5178 | err_free_percpu_ref: | ||
5179 | percpu_ref_exit(&css->refcnt); | ||
5180 | err_free_css: | 5184 | err_free_css: |
5181 | call_rcu(&css->rcu_head, css_free_rcu_fn); | 5185 | call_rcu(&css->rcu_head, css_free_rcu_fn); |
5182 | return ERR_PTR(err); | 5186 | return ERR_PTR(err); |
@@ -5451,10 +5455,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) | |||
5451 | */ | 5455 | */ |
5452 | cgrp->self.flags &= ~CSS_ONLINE; | 5456 | cgrp->self.flags &= ~CSS_ONLINE; |
5453 | 5457 | ||
5454 | spin_lock_bh(&css_set_lock); | 5458 | spin_lock_irq(&css_set_lock); |
5455 | list_for_each_entry(link, &cgrp->cset_links, cset_link) | 5459 | list_for_each_entry(link, &cgrp->cset_links, cset_link) |
5456 | link->cset->dead = true; | 5460 | link->cset->dead = true; |
5457 | spin_unlock_bh(&css_set_lock); | 5461 | spin_unlock_irq(&css_set_lock); |
5458 | 5462 | ||
5459 | /* initiate massacre of all css's */ | 5463 | /* initiate massacre of all css's */ |
5460 | for_each_css(css, ssid, cgrp) | 5464 | for_each_css(css, ssid, cgrp) |
@@ -5725,7 +5729,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, | |||
5725 | goto out; | 5729 | goto out; |
5726 | 5730 | ||
5727 | mutex_lock(&cgroup_mutex); | 5731 | mutex_lock(&cgroup_mutex); |
5728 | spin_lock_bh(&css_set_lock); | 5732 | spin_lock_irq(&css_set_lock); |
5729 | 5733 | ||
5730 | for_each_root(root) { | 5734 | for_each_root(root) { |
5731 | struct cgroup_subsys *ss; | 5735 | struct cgroup_subsys *ss; |
@@ -5778,7 +5782,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, | |||
5778 | 5782 | ||
5779 | retval = 0; | 5783 | retval = 0; |
5780 | out_unlock: | 5784 | out_unlock: |
5781 | spin_unlock_bh(&css_set_lock); | 5785 | spin_unlock_irq(&css_set_lock); |
5782 | mutex_unlock(&cgroup_mutex); | 5786 | mutex_unlock(&cgroup_mutex); |
5783 | kfree(buf); | 5787 | kfree(buf); |
5784 | out: | 5788 | out: |
@@ -5923,13 +5927,13 @@ void cgroup_post_fork(struct task_struct *child) | |||
5923 | if (use_task_css_set_links) { | 5927 | if (use_task_css_set_links) { |
5924 | struct css_set *cset; | 5928 | struct css_set *cset; |
5925 | 5929 | ||
5926 | spin_lock_bh(&css_set_lock); | 5930 | spin_lock_irq(&css_set_lock); |
5927 | cset = task_css_set(current); | 5931 | cset = task_css_set(current); |
5928 | if (list_empty(&child->cg_list)) { | 5932 | if (list_empty(&child->cg_list)) { |
5929 | get_css_set(cset); | 5933 | get_css_set(cset); |
5930 | css_set_move_task(child, NULL, cset, false); | 5934 | css_set_move_task(child, NULL, cset, false); |
5931 | } | 5935 | } |
5932 | spin_unlock_bh(&css_set_lock); | 5936 | spin_unlock_irq(&css_set_lock); |
5933 | } | 5937 | } |
5934 | 5938 | ||
5935 | /* | 5939 | /* |
@@ -5974,9 +5978,9 @@ void cgroup_exit(struct task_struct *tsk) | |||
5974 | cset = task_css_set(tsk); | 5978 | cset = task_css_set(tsk); |
5975 | 5979 | ||
5976 | if (!list_empty(&tsk->cg_list)) { | 5980 | if (!list_empty(&tsk->cg_list)) { |
5977 | spin_lock_bh(&css_set_lock); | 5981 | spin_lock_irq(&css_set_lock); |
5978 | css_set_move_task(tsk, cset, NULL, false); | 5982 | css_set_move_task(tsk, cset, NULL, false); |
5979 | spin_unlock_bh(&css_set_lock); | 5983 | spin_unlock_irq(&css_set_lock); |
5980 | } else { | 5984 | } else { |
5981 | get_css_set(cset); | 5985 | get_css_set(cset); |
5982 | } | 5986 | } |
@@ -6044,9 +6048,9 @@ static void cgroup_release_agent(struct work_struct *work) | |||
6044 | if (!pathbuf || !agentbuf) | 6048 | if (!pathbuf || !agentbuf) |
6045 | goto out; | 6049 | goto out; |
6046 | 6050 | ||
6047 | spin_lock_bh(&css_set_lock); | 6051 | spin_lock_irq(&css_set_lock); |
6048 | path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); | 6052 | path = cgroup_path_ns_locked(cgrp, pathbuf, PATH_MAX, &init_cgroup_ns); |
6049 | spin_unlock_bh(&css_set_lock); | 6053 | spin_unlock_irq(&css_set_lock); |
6050 | if (!path) | 6054 | if (!path) |
6051 | goto out; | 6055 | goto out; |
6052 | 6056 | ||
@@ -6306,12 +6310,12 @@ struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, | |||
6306 | return ERR_PTR(-EPERM); | 6310 | return ERR_PTR(-EPERM); |
6307 | 6311 | ||
6308 | mutex_lock(&cgroup_mutex); | 6312 | mutex_lock(&cgroup_mutex); |
6309 | spin_lock_bh(&css_set_lock); | 6313 | spin_lock_irq(&css_set_lock); |
6310 | 6314 | ||
6311 | cset = task_css_set(current); | 6315 | cset = task_css_set(current); |
6312 | get_css_set(cset); | 6316 | get_css_set(cset); |
6313 | 6317 | ||
6314 | spin_unlock_bh(&css_set_lock); | 6318 | spin_unlock_irq(&css_set_lock); |
6315 | mutex_unlock(&cgroup_mutex); | 6319 | mutex_unlock(&cgroup_mutex); |
6316 | 6320 | ||
6317 | new_ns = alloc_cgroup_ns(); | 6321 | new_ns = alloc_cgroup_ns(); |
@@ -6435,7 +6439,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) | |||
6435 | if (!name_buf) | 6439 | if (!name_buf) |
6436 | return -ENOMEM; | 6440 | return -ENOMEM; |
6437 | 6441 | ||
6438 | spin_lock_bh(&css_set_lock); | 6442 | spin_lock_irq(&css_set_lock); |
6439 | rcu_read_lock(); | 6443 | rcu_read_lock(); |
6440 | cset = rcu_dereference(current->cgroups); | 6444 | cset = rcu_dereference(current->cgroups); |
6441 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { | 6445 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
@@ -6446,7 +6450,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) | |||
6446 | c->root->hierarchy_id, name_buf); | 6450 | c->root->hierarchy_id, name_buf); |
6447 | } | 6451 | } |
6448 | rcu_read_unlock(); | 6452 | rcu_read_unlock(); |
6449 | spin_unlock_bh(&css_set_lock); | 6453 | spin_unlock_irq(&css_set_lock); |
6450 | kfree(name_buf); | 6454 | kfree(name_buf); |
6451 | return 0; | 6455 | return 0; |
6452 | } | 6456 | } |
@@ -6457,7 +6461,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) | |||
6457 | struct cgroup_subsys_state *css = seq_css(seq); | 6461 | struct cgroup_subsys_state *css = seq_css(seq); |
6458 | struct cgrp_cset_link *link; | 6462 | struct cgrp_cset_link *link; |
6459 | 6463 | ||
6460 | spin_lock_bh(&css_set_lock); | 6464 | spin_lock_irq(&css_set_lock); |
6461 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { | 6465 | list_for_each_entry(link, &css->cgroup->cset_links, cset_link) { |
6462 | struct css_set *cset = link->cset; | 6466 | struct css_set *cset = link->cset; |
6463 | struct task_struct *task; | 6467 | struct task_struct *task; |
@@ -6480,7 +6484,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v) | |||
6480 | overflow: | 6484 | overflow: |
6481 | seq_puts(seq, " ...\n"); | 6485 | seq_puts(seq, " ...\n"); |
6482 | } | 6486 | } |
6483 | spin_unlock_bh(&css_set_lock); | 6487 | spin_unlock_irq(&css_set_lock); |
6484 | return 0; | 6488 | return 0; |
6485 | } | 6489 | } |
6486 | 6490 | ||