aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2017-03-05 16:03:22 -0500
committerEric W. Biederman <ebiederm@xmission.com>2017-03-06 16:26:37 -0500
commit040757f738e13caaa9c5078bca79aa97e11dde88 (patch)
treed2254d13ea030261eb3d894f8d7c3401f3e8a5e3
parentc1ae3cfa0e89fa1a7ecc4c99031f5e9ae99d9201 (diff)
ucount: Remove the atomicity from ucount->count
Always increment/decrement ucount->count under the ucounts_lock. The increments are there already and moving the decrements there means the locking logic of the code is simpler. This simplification in the locking logic fixes a race between put_ucounts and get_ucounts that could result in a use-after-free because the count could go zero then be found by get_ucounts and then be freed by put_ucounts. A bug presumably this one was found by a combination of syzkaller and KASAN. JongWhan Kim reported the syzkaller failure and Dmitry Vyukov spotted the race in the code. Cc: stable@vger.kernel.org Fixes: f6b2db1a3e8d ("userns: Make the count of user namespaces per user") Reported-by: JongHwan Kim <zzoru007@gmail.com> Reported-by: Dmitry Vyukov <dvyukov@google.com> Reviewed-by: Andrei Vagin <avagin@gmail.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
-rw-r--r--include/linux/user_namespace.h2
-rw-r--r--kernel/ucount.c18
2 files changed, 12 insertions, 8 deletions
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index be765234c0a2..32354b4b4b2b 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -72,7 +72,7 @@ struct ucounts {
72 struct hlist_node node; 72 struct hlist_node node;
73 struct user_namespace *ns; 73 struct user_namespace *ns;
74 kuid_t uid; 74 kuid_t uid;
75 atomic_t count; 75 int count;
76 atomic_t ucount[UCOUNT_COUNTS]; 76 atomic_t ucount[UCOUNT_COUNTS];
77}; 77};
78 78
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 62630a40ab3a..b4eeee03934f 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -144,7 +144,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
144 144
145 new->ns = ns; 145 new->ns = ns;
146 new->uid = uid; 146 new->uid = uid;
147 atomic_set(&new->count, 0); 147 new->count = 0;
148 148
149 spin_lock_irq(&ucounts_lock); 149 spin_lock_irq(&ucounts_lock);
150 ucounts = find_ucounts(ns, uid, hashent); 150 ucounts = find_ucounts(ns, uid, hashent);
@@ -155,8 +155,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
155 ucounts = new; 155 ucounts = new;
156 } 156 }
157 } 157 }
158 if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) 158 if (ucounts->count == INT_MAX)
159 ucounts = NULL; 159 ucounts = NULL;
160 else
161 ucounts->count += 1;
160 spin_unlock_irq(&ucounts_lock); 162 spin_unlock_irq(&ucounts_lock);
161 return ucounts; 163 return ucounts;
162} 164}
@@ -165,13 +167,15 @@ static void put_ucounts(struct ucounts *ucounts)
165{ 167{
166 unsigned long flags; 168 unsigned long flags;
167 169
168 if (atomic_dec_and_test(&ucounts->count)) { 170 spin_lock_irqsave(&ucounts_lock, flags);
169 spin_lock_irqsave(&ucounts_lock, flags); 171 ucounts->count -= 1;
172 if (!ucounts->count)
170 hlist_del_init(&ucounts->node); 173 hlist_del_init(&ucounts->node);
171 spin_unlock_irqrestore(&ucounts_lock, flags); 174 else
175 ucounts = NULL;
176 spin_unlock_irqrestore(&ucounts_lock, flags);
172 177
173 kfree(ucounts); 178 kfree(ucounts);
174 }
175} 179}
176 180
177static inline bool atomic_inc_below(atomic_t *v, int u) 181static inline bool atomic_inc_below(atomic_t *v, int u)