aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-01-24 15:21:51 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-01-24 15:21:51 -0500
commit19ca2c8fecb1592d623fe5e82d6796f8d446268d (patch)
tree0756e115d125846f46b302085307768f069434a1
parenta4685d2f58e2230d4e27fb2ee581d7ea35e5d046 (diff)
parent880a38547ff08715ce4f1daf9a4bb30c87676e68 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace
Pull namespace fix from Eric Biederman: "This has a single brown bag fix. The possible deadlock with dec_pid_namespaces that I had thought was fixed earlier turned out only to have been moved. So instead of being cleaver this change takes ucounts_lock with irqs disabled. So dec_ucount can be used from any context without fear of deadlock. The items accounted for dec_ucount and inc_ucount are all comparatively heavy weight objects so I don't exepct this will have any measurable performance impact" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace: userns: Make ucounts lock irq-safe
-rw-r--r--kernel/ucount.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5dd298a..4bbd38ec3788 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
128 struct hlist_head *hashent = ucounts_hashentry(ns, uid); 128 struct hlist_head *hashent = ucounts_hashentry(ns, uid);
129 struct ucounts *ucounts, *new; 129 struct ucounts *ucounts, *new;
130 130
131 spin_lock(&ucounts_lock); 131 spin_lock_irq(&ucounts_lock);
132 ucounts = find_ucounts(ns, uid, hashent); 132 ucounts = find_ucounts(ns, uid, hashent);
133 if (!ucounts) { 133 if (!ucounts) {
134 spin_unlock(&ucounts_lock); 134 spin_unlock_irq(&ucounts_lock);
135 135
136 new = kzalloc(sizeof(*new), GFP_KERNEL); 136 new = kzalloc(sizeof(*new), GFP_KERNEL);
137 if (!new) 137 if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
141 new->uid = uid; 141 new->uid = uid;
142 atomic_set(&new->count, 0); 142 atomic_set(&new->count, 0);
143 143
144 spin_lock(&ucounts_lock); 144 spin_lock_irq(&ucounts_lock);
145 ucounts = find_ucounts(ns, uid, hashent); 145 ucounts = find_ucounts(ns, uid, hashent);
146 if (ucounts) { 146 if (ucounts) {
147 kfree(new); 147 kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
152 } 152 }
153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 153 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
154 ucounts = NULL; 154 ucounts = NULL;
155 spin_unlock(&ucounts_lock); 155 spin_unlock_irq(&ucounts_lock);
156 return ucounts; 156 return ucounts;
157} 157}
158 158
159static void put_ucounts(struct ucounts *ucounts) 159static void put_ucounts(struct ucounts *ucounts)
160{ 160{
161 unsigned long flags;
162
161 if (atomic_dec_and_test(&ucounts->count)) { 163 if (atomic_dec_and_test(&ucounts->count)) {
162 spin_lock(&ucounts_lock); 164 spin_lock_irqsave(&ucounts_lock, flags);
163 hlist_del_init(&ucounts->node); 165 hlist_del_init(&ucounts->node);
164 spin_unlock(&ucounts_lock); 166 spin_unlock_irqrestore(&ucounts_lock, flags);
165 167
166 kfree(ucounts); 168 kfree(ucounts);
167 } 169 }