From 4021cb279a532728c3208a16b9b09b0ca8016850 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Wed, 25 Jan 2006 15:23:07 +0100 Subject: [PATCH] fix uidhash_lock <-> RCU deadlock RCU task-struct freeing can call free_uid(), which is taking uidhash_lock - while other users of uidhash_lock are softirq-unsafe. The fix is to always take the uidhash_spinlock in a softirq-safe manner. Signed-off-by: Ingo Molnar Acked-by: Paul E. McKenney Signed-off-by: Linus Torvalds --- kernel/user.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'kernel') diff --git a/kernel/user.c b/kernel/user.c index 89e562feb1b1..d1ae2349347e 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -13,6 +13,7 @@ #include #include #include +#include /* * UID task count cache, to get fast user lookup in "alloc_uid" @@ -27,6 +28,12 @@ static kmem_cache_t *uid_cachep; static struct list_head uidhash_table[UIDHASH_SZ]; + +/* + * The uidhash_lock is mostly taken from process context, but it is + * occasionally also taken from softirq/tasklet context, when + * task-structs get RCU-freed. Hence all locking must be softirq-safe. + */ static DEFINE_SPINLOCK(uidhash_lock); struct user_struct root_user = { @@ -83,14 +90,15 @@ struct user_struct *find_user(uid_t uid) { struct user_struct *ret; - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); ret = uid_hash_find(uid, uidhashentry(uid)); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); return ret; } void free_uid(struct user_struct *up) { + local_bh_disable(); if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { uid_hash_remove(up); key_put(up->uid_keyring); @@ -98,6 +106,7 @@ void free_uid(struct user_struct *up) kmem_cache_free(uid_cachep, up); spin_unlock(&uidhash_lock); } + local_bh_enable(); } struct user_struct * alloc_uid(uid_t uid) @@ -105,9 +114,9 @@ struct user_struct * alloc_uid(uid_t uid) struct list_head *hashent = uidhashentry(uid); struct user_struct *up; - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); up = uid_hash_find(uid, hashent); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); if (!up) { struct user_struct *new; @@ -137,7 +146,7 @@ struct user_struct * alloc_uid(uid_t uid) * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); @@ -147,7 +156,7 @@ struct user_struct * alloc_uid(uid_t uid) uid_hash_insert(new, hashent); up = new; } - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); } return up; @@ -183,9 +192,9 @@ static int __init uid_cache_init(void) INIT_LIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock(&uidhash_lock); + spin_lock_bh(&uidhash_lock); uid_hash_insert(&root_user, uidhashentry(0)); - spin_unlock(&uidhash_lock); + spin_unlock_bh(&uidhash_lock); return 0; } -- cgit v1.2.2