diff options
Diffstat (limited to 'kernel/cgroup.c')
| -rw-r--r-- | kernel/cgroup.c | 57 |
1 files changed, 30 insertions, 27 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1d2b6ceea95d..d9d5648f3cdc 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list) | |||
| 265 | /* the list of cgroups eligible for automatic release. Protected by | 265 | /* the list of cgroups eligible for automatic release. Protected by |
| 266 | * release_list_lock */ | 266 | * release_list_lock */ |
| 267 | static LIST_HEAD(release_list); | 267 | static LIST_HEAD(release_list); |
| 268 | static DEFINE_SPINLOCK(release_list_lock); | 268 | static DEFINE_RAW_SPINLOCK(release_list_lock); |
| 269 | static void cgroup_release_agent(struct work_struct *work); | 269 | static void cgroup_release_agent(struct work_struct *work); |
| 270 | static DECLARE_WORK(release_agent_work, cgroup_release_agent); | 270 | static DECLARE_WORK(release_agent_work, cgroup_release_agent); |
| 271 | static void check_for_release(struct cgroup *cgrp); | 271 | static void check_for_release(struct cgroup *cgrp); |
| @@ -2027,7 +2027,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
| 2027 | goto out_free_group_list; | 2027 | goto out_free_group_list; |
| 2028 | 2028 | ||
| 2029 | /* prevent changes to the threadgroup list while we take a snapshot. */ | 2029 | /* prevent changes to the threadgroup list while we take a snapshot. */ |
| 2030 | rcu_read_lock(); | 2030 | read_lock(&tasklist_lock); |
| 2031 | if (!thread_group_leader(leader)) { | 2031 | if (!thread_group_leader(leader)) { |
| 2032 | /* | 2032 | /* |
| 2033 | * a race with de_thread from another thread's exec() may strip | 2033 | * a race with de_thread from another thread's exec() may strip |
| @@ -2036,7 +2036,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
| 2036 | * throw this task away and try again (from cgroup_procs_write); | 2036 | * throw this task away and try again (from cgroup_procs_write); |
| 2037 | * this is "double-double-toil-and-trouble-check locking". | 2037 | * this is "double-double-toil-and-trouble-check locking". |
| 2038 | */ | 2038 | */ |
| 2039 | rcu_read_unlock(); | 2039 | read_unlock(&tasklist_lock); |
| 2040 | retval = -EAGAIN; | 2040 | retval = -EAGAIN; |
| 2041 | goto out_free_group_list; | 2041 | goto out_free_group_list; |
| 2042 | } | 2042 | } |
| @@ -2057,7 +2057,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
| 2057 | } while_each_thread(leader, tsk); | 2057 | } while_each_thread(leader, tsk); |
| 2058 | /* remember the number of threads in the array for later. */ | 2058 | /* remember the number of threads in the array for later. */ |
| 2059 | group_size = i; | 2059 | group_size = i; |
| 2060 | rcu_read_unlock(); | 2060 | read_unlock(&tasklist_lock); |
| 2061 | 2061 | ||
| 2062 | /* | 2062 | /* |
| 2063 | * step 1: check that we can legitimately attach to the cgroup. | 2063 | * step 1: check that we can legitimately attach to the cgroup. |
| @@ -2135,14 +2135,17 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | |||
| 2135 | oldcgrp = task_cgroup_from_root(tsk, root); | 2135 | oldcgrp = task_cgroup_from_root(tsk, root); |
| 2136 | if (cgrp == oldcgrp) | 2136 | if (cgrp == oldcgrp) |
| 2137 | continue; | 2137 | continue; |
| 2138 | /* attach each task to each subsystem */ | ||
| 2139 | for_each_subsys(root, ss) { | ||
| 2140 | if (ss->attach_task) | ||
| 2141 | ss->attach_task(cgrp, tsk); | ||
| 2142 | } | ||
| 2143 | /* if the thread is PF_EXITING, it can just get skipped. */ | 2138 | /* if the thread is PF_EXITING, it can just get skipped. */ |
| 2144 | retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true); | 2139 | retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true); |
| 2145 | BUG_ON(retval != 0 && retval != -ESRCH); | 2140 | if (retval == 0) { |
| 2141 | /* attach each task to each subsystem */ | ||
| 2142 | for_each_subsys(root, ss) { | ||
| 2143 | if (ss->attach_task) | ||
| 2144 | ss->attach_task(cgrp, tsk); | ||
| 2145 | } | ||
| 2146 | } else { | ||
| 2147 | BUG_ON(retval != -ESRCH); | ||
| 2148 | } | ||
| 2146 | } | 2149 | } |
| 2147 | /* nothing is sensitive to fork() after this point. */ | 2150 | /* nothing is sensitive to fork() after this point. */ |
| 2148 | 2151 | ||
| @@ -4014,11 +4017,11 @@ again: | |||
| 4014 | finish_wait(&cgroup_rmdir_waitq, &wait); | 4017 | finish_wait(&cgroup_rmdir_waitq, &wait); |
| 4015 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | 4018 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); |
| 4016 | 4019 | ||
| 4017 | spin_lock(&release_list_lock); | 4020 | raw_spin_lock(&release_list_lock); |
| 4018 | set_bit(CGRP_REMOVED, &cgrp->flags); | 4021 | set_bit(CGRP_REMOVED, &cgrp->flags); |
| 4019 | if (!list_empty(&cgrp->release_list)) | 4022 | if (!list_empty(&cgrp->release_list)) |
| 4020 | list_del_init(&cgrp->release_list); | 4023 | list_del_init(&cgrp->release_list); |
| 4021 | spin_unlock(&release_list_lock); | 4024 | raw_spin_unlock(&release_list_lock); |
| 4022 | 4025 | ||
| 4023 | cgroup_lock_hierarchy(cgrp->root); | 4026 | cgroup_lock_hierarchy(cgrp->root); |
| 4024 | /* delete this cgroup from parent->children */ | 4027 | /* delete this cgroup from parent->children */ |
| @@ -4671,13 +4674,13 @@ static void check_for_release(struct cgroup *cgrp) | |||
| 4671 | * already queued for a userspace notification, queue | 4674 | * already queued for a userspace notification, queue |
| 4672 | * it now */ | 4675 | * it now */ |
| 4673 | int need_schedule_work = 0; | 4676 | int need_schedule_work = 0; |
| 4674 | spin_lock(&release_list_lock); | 4677 | raw_spin_lock(&release_list_lock); |
| 4675 | if (!cgroup_is_removed(cgrp) && | 4678 | if (!cgroup_is_removed(cgrp) && |
| 4676 | list_empty(&cgrp->release_list)) { | 4679 | list_empty(&cgrp->release_list)) { |
| 4677 | list_add(&cgrp->release_list, &release_list); | 4680 | list_add(&cgrp->release_list, &release_list); |
| 4678 | need_schedule_work = 1; | 4681 | need_schedule_work = 1; |
| 4679 | } | 4682 | } |
| 4680 | spin_unlock(&release_list_lock); | 4683 | raw_spin_unlock(&release_list_lock); |
| 4681 | if (need_schedule_work) | 4684 | if (need_schedule_work) |
| 4682 | schedule_work(&release_agent_work); | 4685 | schedule_work(&release_agent_work); |
| 4683 | } | 4686 | } |
| @@ -4729,7 +4732,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4729 | { | 4732 | { |
| 4730 | BUG_ON(work != &release_agent_work); | 4733 | BUG_ON(work != &release_agent_work); |
| 4731 | mutex_lock(&cgroup_mutex); | 4734 | mutex_lock(&cgroup_mutex); |
| 4732 | spin_lock(&release_list_lock); | 4735 | raw_spin_lock(&release_list_lock); |
| 4733 | while (!list_empty(&release_list)) { | 4736 | while (!list_empty(&release_list)) { |
| 4734 | char *argv[3], *envp[3]; | 4737 | char *argv[3], *envp[3]; |
| 4735 | int i; | 4738 | int i; |
| @@ -4738,7 +4741,7 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4738 | struct cgroup, | 4741 | struct cgroup, |
| 4739 | release_list); | 4742 | release_list); |
| 4740 | list_del_init(&cgrp->release_list); | 4743 | list_del_init(&cgrp->release_list); |
| 4741 | spin_unlock(&release_list_lock); | 4744 | raw_spin_unlock(&release_list_lock); |
| 4742 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); | 4745 | pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); |
| 4743 | if (!pathbuf) | 4746 | if (!pathbuf) |
| 4744 | goto continue_free; | 4747 | goto continue_free; |
| @@ -4768,9 +4771,9 @@ static void cgroup_release_agent(struct work_struct *work) | |||
| 4768 | continue_free: | 4771 | continue_free: |
| 4769 | kfree(pathbuf); | 4772 | kfree(pathbuf); |
| 4770 | kfree(agentbuf); | 4773 | kfree(agentbuf); |
| 4771 | spin_lock(&release_list_lock); | 4774 | raw_spin_lock(&release_list_lock); |
| 4772 | } | 4775 | } |
| 4773 | spin_unlock(&release_list_lock); | 4776 | raw_spin_unlock(&release_list_lock); |
| 4774 | mutex_unlock(&cgroup_mutex); | 4777 | mutex_unlock(&cgroup_mutex); |
| 4775 | } | 4778 | } |
| 4776 | 4779 | ||
| @@ -4880,9 +4883,9 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) | |||
| 4880 | 4883 | ||
| 4881 | rcu_assign_pointer(id->css, NULL); | 4884 | rcu_assign_pointer(id->css, NULL); |
| 4882 | rcu_assign_pointer(css->id, NULL); | 4885 | rcu_assign_pointer(css->id, NULL); |
| 4883 | spin_lock(&ss->id_lock); | 4886 | write_lock(&ss->id_lock); |
| 4884 | idr_remove(&ss->idr, id->id); | 4887 | idr_remove(&ss->idr, id->id); |
| 4885 | spin_unlock(&ss->id_lock); | 4888 | write_unlock(&ss->id_lock); |
| 4886 | kfree_rcu(id, rcu_head); | 4889 | kfree_rcu(id, rcu_head); |
| 4887 | } | 4890 | } |
| 4888 | EXPORT_SYMBOL_GPL(free_css_id); | 4891 | EXPORT_SYMBOL_GPL(free_css_id); |
| @@ -4908,10 +4911,10 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) | |||
| 4908 | error = -ENOMEM; | 4911 | error = -ENOMEM; |
| 4909 | goto err_out; | 4912 | goto err_out; |
| 4910 | } | 4913 | } |
| 4911 | spin_lock(&ss->id_lock); | 4914 | write_lock(&ss->id_lock); |
| 4912 | /* Don't use 0. allocates an ID of 1-65535 */ | 4915 | /* Don't use 0. allocates an ID of 1-65535 */ |
| 4913 | error = idr_get_new_above(&ss->idr, newid, 1, &myid); | 4916 | error = idr_get_new_above(&ss->idr, newid, 1, &myid); |
| 4914 | spin_unlock(&ss->id_lock); | 4917 | write_unlock(&ss->id_lock); |
| 4915 | 4918 | ||
| 4916 | /* Returns error when there are no free spaces for new ID.*/ | 4919 | /* Returns error when there are no free spaces for new ID.*/ |
| 4917 | if (error) { | 4920 | if (error) { |
| @@ -4926,9 +4929,9 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) | |||
| 4926 | return newid; | 4929 | return newid; |
| 4927 | remove_idr: | 4930 | remove_idr: |
| 4928 | error = -ENOSPC; | 4931 | error = -ENOSPC; |
| 4929 | spin_lock(&ss->id_lock); | 4932 | write_lock(&ss->id_lock); |
| 4930 | idr_remove(&ss->idr, myid); | 4933 | idr_remove(&ss->idr, myid); |
| 4931 | spin_unlock(&ss->id_lock); | 4934 | write_unlock(&ss->id_lock); |
| 4932 | err_out: | 4935 | err_out: |
| 4933 | kfree(newid); | 4936 | kfree(newid); |
| 4934 | return ERR_PTR(error); | 4937 | return ERR_PTR(error); |
| @@ -4940,7 +4943,7 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss, | |||
| 4940 | { | 4943 | { |
| 4941 | struct css_id *newid; | 4944 | struct css_id *newid; |
| 4942 | 4945 | ||
| 4943 | spin_lock_init(&ss->id_lock); | 4946 | rwlock_init(&ss->id_lock); |
| 4944 | idr_init(&ss->idr); | 4947 | idr_init(&ss->idr); |
| 4945 | 4948 | ||
| 4946 | newid = get_new_cssid(ss, 0); | 4949 | newid = get_new_cssid(ss, 0); |
| @@ -5035,9 +5038,9 @@ css_get_next(struct cgroup_subsys *ss, int id, | |||
| 5035 | * scan next entry from bitmap(tree), tmpid is updated after | 5038 | * scan next entry from bitmap(tree), tmpid is updated after |
| 5036 | * idr_get_next(). | 5039 | * idr_get_next(). |
| 5037 | */ | 5040 | */ |
| 5038 | spin_lock(&ss->id_lock); | 5041 | read_lock(&ss->id_lock); |
| 5039 | tmp = idr_get_next(&ss->idr, &tmpid); | 5042 | tmp = idr_get_next(&ss->idr, &tmpid); |
| 5040 | spin_unlock(&ss->id_lock); | 5043 | read_unlock(&ss->id_lock); |
| 5041 | 5044 | ||
| 5042 | if (!tmp) | 5045 | if (!tmp) |
| 5043 | break; | 5046 | break; |
