diff options
| author | Eric W. Biederman <ebiederm@xmission.com> | 2017-03-05 16:03:22 -0500 |
|---|---|---|
| committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-03-18 07:14:29 -0400 |
| commit | ee6f7ee1e4cdb0098fee4593ddf11ca6028abef2 (patch) | |
| tree | 3c469849bf880367cc69d071cbd90160e7909285 /kernel | |
| parent | 8bb208d02accfe50cee2a9b5db3ea9da49a208b3 (diff) | |
ucount: Remove the atomicity from ucount->count
commit 040757f738e13caaa9c5078bca79aa97e11dde88 upstream.
Always increment/decrement ucount->count under the ucounts_lock. The
increments are there already and moving the decrements there means the
locking logic of the code is simpler. This simplification in the
locking logic fixes a race between put_ucounts and get_ucounts that
could result in a use-after-free because the count could go zero then
be found by get_ucounts and then be freed by put_ucounts.
A bug presumably this one was found by a combination of syzkaller and
KASAN. JongWhan Kim reported the syzkaller failure and Dmitry Vyukov
spotted the race in the code.
Fixes: f6b2db1a3e8d ("userns: Make the count of user namespaces per user")
Reported-by: JongHwan Kim <zzoru007@gmail.com>
Reported-by: Dmitry Vyukov <dvyukov@google.com>
Reviewed-by: Andrei Vagin <avagin@gmail.com>
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/ucount.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c index 4bbd38ec3788..f4ac18509ecf 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c | |||
| @@ -139,7 +139,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
| 139 | 139 | ||
| 140 | new->ns = ns; | 140 | new->ns = ns; |
| 141 | new->uid = uid; | 141 | new->uid = uid; |
| 142 | atomic_set(&new->count, 0); | 142 | new->count = 0; |
| 143 | 143 | ||
| 144 | spin_lock_irq(&ucounts_lock); | 144 | spin_lock_irq(&ucounts_lock); |
| 145 | ucounts = find_ucounts(ns, uid, hashent); | 145 | ucounts = find_ucounts(ns, uid, hashent); |
| @@ -150,8 +150,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid) | |||
| 150 | ucounts = new; | 150 | ucounts = new; |
| 151 | } | 151 | } |
| 152 | } | 152 | } |
| 153 | if (!atomic_add_unless(&ucounts->count, 1, INT_MAX)) | 153 | if (ucounts->count == INT_MAX) |
| 154 | ucounts = NULL; | 154 | ucounts = NULL; |
| 155 | else | ||
| 156 | ucounts->count += 1; | ||
| 155 | spin_unlock_irq(&ucounts_lock); | 157 | spin_unlock_irq(&ucounts_lock); |
| 156 | return ucounts; | 158 | return ucounts; |
| 157 | } | 159 | } |
| @@ -160,13 +162,15 @@ static void put_ucounts(struct ucounts *ucounts) | |||
| 160 | { | 162 | { |
| 161 | unsigned long flags; | 163 | unsigned long flags; |
| 162 | 164 | ||
| 163 | if (atomic_dec_and_test(&ucounts->count)) { | 165 | spin_lock_irqsave(&ucounts_lock, flags); |
| 164 | spin_lock_irqsave(&ucounts_lock, flags); | 166 | ucounts->count -= 1; |
| 167 | if (!ucounts->count) | ||
| 165 | hlist_del_init(&ucounts->node); | 168 | hlist_del_init(&ucounts->node); |
| 166 | spin_unlock_irqrestore(&ucounts_lock, flags); | 169 | else |
| 170 | ucounts = NULL; | ||
| 171 | spin_unlock_irqrestore(&ucounts_lock, flags); | ||
| 167 | 172 | ||
| 168 | kfree(ucounts); | 173 | kfree(ucounts); |
| 169 | } | ||
| 170 | } | 174 | } |
| 171 | 175 | ||
| 172 | static inline bool atomic_inc_below(atomic_t *v, int u) | 176 | static inline bool atomic_inc_below(atomic_t *v, int u) |
