aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-18 20:42:33 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 22:20:18 -0500
commitfc0abb1451c64c79ac80665d5ba74450ce274e4d (patch)
tree30301b6134b122a638104645bd6bd4b45014dd2c /mm/slab.c
parent1743660b911bfb849b1fb33830522254561b9f9b (diff)
[PATCH] sem2mutex: mm/slab.c
Convert mm/swapfile.c's swapon_sem to swapon_mutex. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c46
1 files changed, 23 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9374293a3012..bd0317f1e06c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the semaphore 'cache_chain_sem'. 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/string.h> 104#include <linux/string.h>
105#include <linux/nodemask.h> 105#include <linux/nodemask.h>
106#include <linux/mutex.h>
106 107
107#include <asm/uaccess.h> 108#include <asm/uaccess.h>
108#include <asm/cacheflush.h> 109#include <asm/cacheflush.h>
@@ -631,7 +632,7 @@ static kmem_cache_t cache_cache = {
631}; 632};
632 633
633/* Guard access to the cache-chain. */ 634/* Guard access to the cache-chain. */
634static struct semaphore cache_chain_sem; 635static DEFINE_MUTEX(cache_chain_mutex);
635static struct list_head cache_chain; 636static struct list_head cache_chain;
636 637
637/* 638/*
@@ -857,7 +858,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
857 858
858 switch (action) { 859 switch (action) {
859 case CPU_UP_PREPARE: 860 case CPU_UP_PREPARE:
860 down(&cache_chain_sem); 861 mutex_lock(&cache_chain_mutex);
861 /* we need to do this right in the beginning since 862 /* we need to do this right in the beginning since
862 * alloc_arraycache's are going to use this list. 863 * alloc_arraycache's are going to use this list.
863 * kmalloc_node allows us to add the slab to the right 864 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +913,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
912 l3->shared = nc; 913 l3->shared = nc;
913 } 914 }
914 } 915 }
915 up(&cache_chain_sem); 916 mutex_unlock(&cache_chain_mutex);
916 break; 917 break;
917 case CPU_ONLINE: 918 case CPU_ONLINE:
918 start_cpu_timer(cpu); 919 start_cpu_timer(cpu);
@@ -921,7 +922,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
921 case CPU_DEAD: 922 case CPU_DEAD:
922 /* fall thru */ 923 /* fall thru */
923 case CPU_UP_CANCELED: 924 case CPU_UP_CANCELED:
924 down(&cache_chain_sem); 925 mutex_lock(&cache_chain_mutex);
925 926
926 list_for_each_entry(cachep, &cache_chain, next) { 927 list_for_each_entry(cachep, &cache_chain, next) {
927 struct array_cache *nc; 928 struct array_cache *nc;
@@ -973,13 +974,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
973 spin_unlock_irq(&cachep->spinlock); 974 spin_unlock_irq(&cachep->spinlock);
974 kfree(nc); 975 kfree(nc);
975 } 976 }
976 up(&cache_chain_sem); 977 mutex_unlock(&cache_chain_mutex);
977 break; 978 break;
978#endif 979#endif
979 } 980 }
980 return NOTIFY_OK; 981 return NOTIFY_OK;
981 bad: 982 bad:
982 up(&cache_chain_sem); 983 mutex_unlock(&cache_chain_mutex);
983 return NOTIFY_BAD; 984 return NOTIFY_BAD;
984} 985}
985 986
@@ -1047,7 +1048,6 @@ void __init kmem_cache_init(void)
1047 */ 1048 */
1048 1049
1049 /* 1) create the cache_cache */ 1050 /* 1) create the cache_cache */
1050 init_MUTEX(&cache_chain_sem);
1051 INIT_LIST_HEAD(&cache_chain); 1051 INIT_LIST_HEAD(&cache_chain);
1052 list_add(&cache_cache.next, &cache_chain); 1052 list_add(&cache_cache.next, &cache_chain);
1053 cache_cache.colour_off = cache_line_size(); 1053 cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1168,10 @@ void __init kmem_cache_init(void)
1168 /* 6) resize the head arrays to their final sizes */ 1168 /* 6) resize the head arrays to their final sizes */
1169 { 1169 {
1170 kmem_cache_t *cachep; 1170 kmem_cache_t *cachep;
1171 down(&cache_chain_sem); 1171 mutex_lock(&cache_chain_mutex);
1172 list_for_each_entry(cachep, &cache_chain, next) 1172 list_for_each_entry(cachep, &cache_chain, next)
1173 enable_cpucache(cachep); 1173 enable_cpucache(cachep);
1174 up(&cache_chain_sem); 1174 mutex_unlock(&cache_chain_mutex);
1175 } 1175 }
1176 1176
1177 /* Done! */ 1177 /* Done! */
@@ -1590,7 +1590,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1590 BUG(); 1590 BUG();
1591 } 1591 }
1592 1592
1593 down(&cache_chain_sem); 1593 mutex_lock(&cache_chain_mutex);
1594 1594
1595 list_for_each(p, &cache_chain) { 1595 list_for_each(p, &cache_chain) {
1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1856,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1856 if (!cachep && (flags & SLAB_PANIC)) 1856 if (!cachep && (flags & SLAB_PANIC))
1857 panic("kmem_cache_create(): failed to create slab `%s'\n", 1857 panic("kmem_cache_create(): failed to create slab `%s'\n",
1858 name); 1858 name);
1859 up(&cache_chain_sem); 1859 mutex_unlock(&cache_chain_mutex);
1860 return cachep; 1860 return cachep;
1861} 1861}
1862EXPORT_SYMBOL(kmem_cache_create); 1862EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2044,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
2044 lock_cpu_hotplug(); 2044 lock_cpu_hotplug();
2045 2045
2046 /* Find the cache in the chain of caches. */ 2046 /* Find the cache in the chain of caches. */
2047 down(&cache_chain_sem); 2047 mutex_lock(&cache_chain_mutex);
2048 /* 2048 /*
2049 * the chain is never empty, cache_cache is never destroyed 2049 * the chain is never empty, cache_cache is never destroyed
2050 */ 2050 */
2051 list_del(&cachep->next); 2051 list_del(&cachep->next);
2052 up(&cache_chain_sem); 2052 mutex_unlock(&cache_chain_mutex);
2053 2053
2054 if (__cache_shrink(cachep)) { 2054 if (__cache_shrink(cachep)) {
2055 slab_error(cachep, "Can't free all objects"); 2055 slab_error(cachep, "Can't free all objects");
2056 down(&cache_chain_sem); 2056 mutex_lock(&cache_chain_mutex);
2057 list_add(&cachep->next, &cache_chain); 2057 list_add(&cachep->next, &cache_chain);
2058 up(&cache_chain_sem); 2058 mutex_unlock(&cache_chain_mutex);
2059 unlock_cpu_hotplug(); 2059 unlock_cpu_hotplug();
2060 return 1; 2060 return 1;
2061 } 2061 }
@@ -3314,7 +3314,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
3314 * - clear the per-cpu caches for this CPU. 3314 * - clear the per-cpu caches for this CPU.
3315 * - return freeable pages to the main free memory pool. 3315 * - return freeable pages to the main free memory pool.
3316 * 3316 *
3317 * If we cannot acquire the cache chain semaphore then just give up - we'll 3317 * If we cannot acquire the cache chain mutex then just give up - we'll
3318 * try again on the next iteration. 3318 * try again on the next iteration.
3319 */ 3319 */
3320static void cache_reap(void *unused) 3320static void cache_reap(void *unused)
@@ -3322,7 +3322,7 @@ static void cache_reap(void *unused)
3322 struct list_head *walk; 3322 struct list_head *walk;
3323 struct kmem_list3 *l3; 3323 struct kmem_list3 *l3;
3324 3324
3325 if (down_trylock(&cache_chain_sem)) { 3325 if (!mutex_trylock(&cache_chain_mutex)) {
3326 /* Give up. Setup the next iteration. */ 3326 /* Give up. Setup the next iteration. */
3327 schedule_delayed_work(&__get_cpu_var(reap_work), 3327 schedule_delayed_work(&__get_cpu_var(reap_work),
3328 REAPTIMEOUT_CPUC); 3328 REAPTIMEOUT_CPUC);
@@ -3393,7 +3393,7 @@ static void cache_reap(void *unused)
3393 cond_resched(); 3393 cond_resched();
3394 } 3394 }
3395 check_irq_on(); 3395 check_irq_on();
3396 up(&cache_chain_sem); 3396 mutex_unlock(&cache_chain_mutex);
3397 drain_remote_pages(); 3397 drain_remote_pages();
3398 /* Setup the next iteration */ 3398 /* Setup the next iteration */
3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3429,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3429 loff_t n = *pos; 3429 loff_t n = *pos;
3430 struct list_head *p; 3430 struct list_head *p;
3431 3431
3432 down(&cache_chain_sem); 3432 mutex_lock(&cache_chain_mutex);
3433 if (!n) 3433 if (!n)
3434 print_slabinfo_header(m); 3434 print_slabinfo_header(m);
3435 p = cache_chain.next; 3435 p = cache_chain.next;
@@ -3451,7 +3451,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3451 3451
3452static void s_stop(struct seq_file *m, void *p) 3452static void s_stop(struct seq_file *m, void *p)
3453{ 3453{
3454 up(&cache_chain_sem); 3454 mutex_unlock(&cache_chain_mutex);
3455} 3455}
3456 3456
3457static int s_show(struct seq_file *m, void *p) 3457static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3603,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3603 return -EINVAL; 3603 return -EINVAL;
3604 3604
3605 /* Find the cache in the chain of caches. */ 3605 /* Find the cache in the chain of caches. */
3606 down(&cache_chain_sem); 3606 mutex_lock(&cache_chain_mutex);
3607 res = -EINVAL; 3607 res = -EINVAL;
3608 list_for_each(p, &cache_chain) { 3608 list_for_each(p, &cache_chain) {
3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3620,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3620 break; 3620 break;
3621 } 3621 }
3622 } 3622 }
3623 up(&cache_chain_sem); 3623 mutex_unlock(&cache_chain_mutex);
3624 if (res >= 0) 3624 if (res >= 0)
3625 res = count; 3625 res = count;
3626 return res; 3626 return res;