diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-09-19 01:46:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-09-19 14:24:18 -0400 |
commit | 735de2230f09741077a645a913de0a04b10208bf (patch) | |
tree | b408a671c9c07bec41a9ca8056fa7a6710982f3b /kernel/user.c | |
parent | d8a4821dca693867a7953104c1e3cc830eb9191f (diff) |
Convert uid hash to hlist
Surprisingly, but (spotted by Alexey Dobriyan) the uid hash still uses
list_heads, thus occupying twice as much place as it could. Convert it to
hlist_heads.
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: Alexey Dobriyan <adobriyan@openvz.org>
Acked-by: Serge Hallyn <serue@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/user.c')
-rw-r--r-- | kernel/user.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/kernel/user.c b/kernel/user.c index e080ba863ae..add57c7e4c0 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -55,21 +55,22 @@ struct user_struct root_user = { | |||
55 | /* | 55 | /* |
56 | * These routines must be called with the uidhash spinlock held! | 56 | * These routines must be called with the uidhash spinlock held! |
57 | */ | 57 | */ |
58 | static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) | 58 | static inline void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) |
59 | { | 59 | { |
60 | list_add(&up->uidhash_list, hashent); | 60 | hlist_add_head(&up->uidhash_node, hashent); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline void uid_hash_remove(struct user_struct *up) | 63 | static inline void uid_hash_remove(struct user_struct *up) |
64 | { | 64 | { |
65 | list_del(&up->uidhash_list); | 65 | hlist_del(&up->uidhash_node); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) | 68 | static inline struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent) |
69 | { | 69 | { |
70 | struct user_struct *user; | 70 | struct user_struct *user; |
71 | struct hlist_node *h; | ||
71 | 72 | ||
72 | list_for_each_entry(user, hashent, uidhash_list) { | 73 | hlist_for_each_entry(user, h, hashent, uidhash_node) { |
73 | if(user->uid == uid) { | 74 | if(user->uid == uid) { |
74 | atomic_inc(&user->__count); | 75 | atomic_inc(&user->__count); |
75 | return user; | 76 | return user; |
@@ -118,7 +119,7 @@ void free_uid(struct user_struct *up) | |||
118 | 119 | ||
119 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | 120 | struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) |
120 | { | 121 | { |
121 | struct list_head *hashent = uidhashentry(ns, uid); | 122 | struct hlist_head *hashent = uidhashentry(ns, uid); |
122 | struct user_struct *up; | 123 | struct user_struct *up; |
123 | 124 | ||
124 | spin_lock_irq(&uidhash_lock); | 125 | spin_lock_irq(&uidhash_lock); |
@@ -207,7 +208,7 @@ static int __init uid_cache_init(void) | |||
207 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 208 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
208 | 209 | ||
209 | for(n = 0; n < UIDHASH_SZ; ++n) | 210 | for(n = 0; n < UIDHASH_SZ; ++n) |
210 | INIT_LIST_HEAD(init_user_ns.uidhash_table + n); | 211 | INIT_HLIST_HEAD(init_user_ns.uidhash_table + n); |
211 | 212 | ||
212 | /* Insert the root user immediately (init already runs as root) */ | 213 | /* Insert the root user immediately (init already runs as root) */ |
213 | spin_lock_irq(&uidhash_lock); | 214 | spin_lock_irq(&uidhash_lock); |