aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-07-25 10:47:45 -0400
committerIngo Molnar <mingo@elte.hu>2011-09-13 05:11:49 -0400
commitcdcc136ffd264849a943acb42c36ffe9b458f811 (patch)
tree9ec3752bb848a1b5af87ccd08172367ec7417989
parentf032a450812f6c7edd532772cc7c48091bca9f27 (diff)
locking, sched, cgroups: Annotate release_list_lock as raw
The release_list_lock can be taken in atomic context and therefore cannot be preempted on -rt - annotate it. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/cgroup.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 1d2b6ceea95d..453100a4159d 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -265,7 +265,7 @@ list_for_each_entry(_root, &roots, root_list)
265/* the list of cgroups eligible for automatic release. Protected by 265/* the list of cgroups eligible for automatic release. Protected by
266 * release_list_lock */ 266 * release_list_lock */
267static LIST_HEAD(release_list); 267static LIST_HEAD(release_list);
268static DEFINE_SPINLOCK(release_list_lock); 268static DEFINE_RAW_SPINLOCK(release_list_lock);
269static void cgroup_release_agent(struct work_struct *work); 269static void cgroup_release_agent(struct work_struct *work);
270static DECLARE_WORK(release_agent_work, cgroup_release_agent); 270static DECLARE_WORK(release_agent_work, cgroup_release_agent);
271static void check_for_release(struct cgroup *cgrp); 271static void check_for_release(struct cgroup *cgrp);
@@ -4014,11 +4014,11 @@ again:
4014 finish_wait(&cgroup_rmdir_waitq, &wait); 4014 finish_wait(&cgroup_rmdir_waitq, &wait);
4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
4016 4016
4017 spin_lock(&release_list_lock); 4017 raw_spin_lock(&release_list_lock);
4018 set_bit(CGRP_REMOVED, &cgrp->flags); 4018 set_bit(CGRP_REMOVED, &cgrp->flags);
4019 if (!list_empty(&cgrp->release_list)) 4019 if (!list_empty(&cgrp->release_list))
4020 list_del_init(&cgrp->release_list); 4020 list_del_init(&cgrp->release_list);
4021 spin_unlock(&release_list_lock); 4021 raw_spin_unlock(&release_list_lock);
4022 4022
4023 cgroup_lock_hierarchy(cgrp->root); 4023 cgroup_lock_hierarchy(cgrp->root);
4024 /* delete this cgroup from parent->children */ 4024 /* delete this cgroup from parent->children */
@@ -4671,13 +4671,13 @@ static void check_for_release(struct cgroup *cgrp)
4671 * already queued for a userspace notification, queue 4671 * already queued for a userspace notification, queue
4672 * it now */ 4672 * it now */
4673 int need_schedule_work = 0; 4673 int need_schedule_work = 0;
4674 spin_lock(&release_list_lock); 4674 raw_spin_lock(&release_list_lock);
4675 if (!cgroup_is_removed(cgrp) && 4675 if (!cgroup_is_removed(cgrp) &&
4676 list_empty(&cgrp->release_list)) { 4676 list_empty(&cgrp->release_list)) {
4677 list_add(&cgrp->release_list, &release_list); 4677 list_add(&cgrp->release_list, &release_list);
4678 need_schedule_work = 1; 4678 need_schedule_work = 1;
4679 } 4679 }
4680 spin_unlock(&release_list_lock); 4680 raw_spin_unlock(&release_list_lock);
4681 if (need_schedule_work) 4681 if (need_schedule_work)
4682 schedule_work(&release_agent_work); 4682 schedule_work(&release_agent_work);
4683 } 4683 }
@@ -4729,7 +4729,7 @@ static void cgroup_release_agent(struct work_struct *work)
4729{ 4729{
4730 BUG_ON(work != &release_agent_work); 4730 BUG_ON(work != &release_agent_work);
4731 mutex_lock(&cgroup_mutex); 4731 mutex_lock(&cgroup_mutex);
4732 spin_lock(&release_list_lock); 4732 raw_spin_lock(&release_list_lock);
4733 while (!list_empty(&release_list)) { 4733 while (!list_empty(&release_list)) {
4734 char *argv[3], *envp[3]; 4734 char *argv[3], *envp[3];
4735 int i; 4735 int i;
@@ -4738,7 +4738,7 @@ static void cgroup_release_agent(struct work_struct *work)
4738 struct cgroup, 4738 struct cgroup,
4739 release_list); 4739 release_list);
4740 list_del_init(&cgrp->release_list); 4740 list_del_init(&cgrp->release_list);
4741 spin_unlock(&release_list_lock); 4741 raw_spin_unlock(&release_list_lock);
4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
4743 if (!pathbuf) 4743 if (!pathbuf)
4744 goto continue_free; 4744 goto continue_free;
@@ -4768,9 +4768,9 @@ static void cgroup_release_agent(struct work_struct *work)
4768 continue_free: 4768 continue_free:
4769 kfree(pathbuf); 4769 kfree(pathbuf);
4770 kfree(agentbuf); 4770 kfree(agentbuf);
4771 spin_lock(&release_list_lock); 4771 raw_spin_lock(&release_list_lock);
4772 } 4772 }
4773 spin_unlock(&release_list_lock); 4773 raw_spin_unlock(&release_list_lock);
4774 mutex_unlock(&cgroup_mutex); 4774 mutex_unlock(&cgroup_mutex);
4775} 4775}
4776 4776