aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-11-29 10:42:59 -0500
committerTejun Heo <tj@kernel.org>2013-11-29 10:42:59 -0500
commit069df3b7aeb3f4e926c4da9630c92010909af512 (patch)
tree75e84d904908603d009824bb8cb43315e2683b7f
parente6b817103d168a76e4044ebcdbc08225d77a81cb (diff)
cgroup: remove cgroup_pidlist->rwsem
cgroup_pidlist locking is needlessly complicated. It has outer cgroup->pidlist_mutex to protect the list of pidlists associated with a cgroup and then each pidlist has rwsem to synchronize updates and reads. Given that the only read access is from seq_file operations which are always invoked back-to-back, the rwsem is a giant overkill. All it does is adding unnecessary complexity. This patch removes cgroup_pidlist->rwsem and protects all accesses to pidlists belonging to a cgroup with cgroup->pidlist_mutex. pidlist->rwsem locking is removed if it's nested inside cgroup->pidlist_mutex; otherwise, it's replaced with cgroup->pidlist_mutex locking. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
-rw-r--r--kernel/cgroup.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index d58c30d3b828..dc39e1774542 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -3462,8 +3462,6 @@ struct cgroup_pidlist {
3462 struct list_head links; 3462 struct list_head links;
3463 /* pointer to the cgroup we belong to, for list removal purposes */ 3463 /* pointer to the cgroup we belong to, for list removal purposes */
3464 struct cgroup *owner; 3464 struct cgroup *owner;
3465 /* protects the other fields */
3466 struct rw_semaphore rwsem;
3467 /* for delayed destruction */ 3465 /* for delayed destruction */
3468 struct delayed_work destroy_dwork; 3466 struct delayed_work destroy_dwork;
3469}; 3467};
@@ -3522,7 +3520,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
3522 struct cgroup_pidlist *tofree = NULL; 3520 struct cgroup_pidlist *tofree = NULL;
3523 3521
3524 mutex_lock(&l->owner->pidlist_mutex); 3522 mutex_lock(&l->owner->pidlist_mutex);
3525 down_write(&l->rwsem);
3526 3523
3527 /* 3524 /*
3528 * Destroy iff we didn't race with a new user or get queued again. 3525 * Destroy iff we didn't race with a new user or get queued again.
@@ -3535,7 +3532,6 @@ static void cgroup_pidlist_destroy_work_fn(struct work_struct *work)
3535 tofree = l; 3532 tofree = l;
3536 } 3533 }
3537 3534
3538 up_write(&l->rwsem);
3539 mutex_unlock(&l->owner->pidlist_mutex); 3535 mutex_unlock(&l->owner->pidlist_mutex);
3540 kfree(tofree); 3536 kfree(tofree);
3541} 3537}
@@ -3612,7 +3608,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find_create(struct cgroup *cgrp,
3612 if (!l) 3608 if (!l)
3613 return l; 3609 return l;
3614 3610
3615 init_rwsem(&l->rwsem);
3616 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn); 3611 INIT_DELAYED_WORK(&l->destroy_dwork, cgroup_pidlist_destroy_work_fn);
3617 l->key.type = type; 3612 l->key.type = type;
3618 /* don't need task_nsproxy() if we're looking at ourself */ 3613 /* don't need task_nsproxy() if we're looking at ourself */
@@ -3675,12 +3670,10 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
3675 } 3670 }
3676 3671
3677 /* store array, freeing old if necessary */ 3672 /* store array, freeing old if necessary */
3678 down_write(&l->rwsem);
3679 pidlist_free(l->list); 3673 pidlist_free(l->list);
3680 l->list = array; 3674 l->list = array;
3681 l->length = length; 3675 l->length = length;
3682 l->use_count++; 3676 l->use_count++;
3683 up_write(&l->rwsem);
3684 3677
3685 mutex_unlock(&cgrp->pidlist_mutex); 3678 mutex_unlock(&cgrp->pidlist_mutex);
3686 3679
@@ -3762,7 +3755,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
3762 int index = 0, pid = *pos; 3755 int index = 0, pid = *pos;
3763 int *iter; 3756 int *iter;
3764 3757
3765 down_read(&l->rwsem); 3758 mutex_lock(&of->cgrp->pidlist_mutex);
3766 if (pid) { 3759 if (pid) {
3767 int end = l->length; 3760 int end = l->length;
3768 3761
@@ -3790,7 +3783,7 @@ static void cgroup_pidlist_stop(struct seq_file *s, void *v)
3790{ 3783{
3791 struct cgroup_pidlist_open_file *of = s->private; 3784 struct cgroup_pidlist_open_file *of = s->private;
3792 3785
3793 up_read(&of->pidlist->rwsem); 3786 mutex_unlock(&of->cgrp->pidlist_mutex);
3794} 3787}
3795 3788
3796static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) 3789static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
@@ -3830,13 +3823,13 @@ static const struct seq_operations cgroup_pidlist_seq_operations = {
3830 3823
3831static void cgroup_release_pid_array(struct cgroup_pidlist *l) 3824static void cgroup_release_pid_array(struct cgroup_pidlist *l)
3832{ 3825{
3833 down_write(&l->rwsem); 3826 mutex_lock(&l->owner->pidlist_mutex);
3834 BUG_ON(!l->use_count); 3827 BUG_ON(!l->use_count);
3835 /* if the last user, arm the destroy work */ 3828 /* if the last user, arm the destroy work */
3836 if (!--l->use_count) 3829 if (!--l->use_count)
3837 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork, 3830 mod_delayed_work(cgroup_pidlist_destroy_wq, &l->destroy_dwork,
3838 CGROUP_PIDLIST_DESTROY_DELAY); 3831 CGROUP_PIDLIST_DESTROY_DELAY);
3839 up_write(&l->rwsem); 3832 mutex_unlock(&l->owner->pidlist_mutex);
3840} 3833}
3841 3834
3842static int cgroup_pidlist_release(struct inode *inode, struct file *file) 3835static int cgroup_pidlist_release(struct inode *inode, struct file *file)