aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-01-18 20:42:33 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-18 22:20:18 -0500
commitfc0abb1451c64c79ac80665d5ba74450ce274e4d (patch)
tree30301b6134b122a638104645bd6bd4b45014dd2c
parent1743660b911bfb849b1fb33830522254561b9f9b (diff)
[PATCH] sem2mutex: mm/slab.c
Convert mm/swapfile.c's swapon_sem to swapon_mutex. Signed-off-by: Ingo Molnar <mingo@elte.hu> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/slab.c46
-rw-r--r--mm/swapfile.c17
2 files changed, 32 insertions, 31 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9374293a3012..bd0317f1e06c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the semaphore 'cache_chain_sem'. 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -103,6 +103,7 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/string.h> 104#include <linux/string.h>
105#include <linux/nodemask.h> 105#include <linux/nodemask.h>
106#include <linux/mutex.h>
106 107
107#include <asm/uaccess.h> 108#include <asm/uaccess.h>
108#include <asm/cacheflush.h> 109#include <asm/cacheflush.h>
@@ -631,7 +632,7 @@ static kmem_cache_t cache_cache = {
631}; 632};
632 633
633/* Guard access to the cache-chain. */ 634/* Guard access to the cache-chain. */
634static struct semaphore cache_chain_sem; 635static DEFINE_MUTEX(cache_chain_mutex);
635static struct list_head cache_chain; 636static struct list_head cache_chain;
636 637
637/* 638/*
@@ -857,7 +858,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
857 858
858 switch (action) { 859 switch (action) {
859 case CPU_UP_PREPARE: 860 case CPU_UP_PREPARE:
860 down(&cache_chain_sem); 861 mutex_lock(&cache_chain_mutex);
861 /* we need to do this right in the beginning since 862 /* we need to do this right in the beginning since
862 * alloc_arraycache's are going to use this list. 863 * alloc_arraycache's are going to use this list.
863 * kmalloc_node allows us to add the slab to the right 864 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +913,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
912 l3->shared = nc; 913 l3->shared = nc;
913 } 914 }
914 } 915 }
915 up(&cache_chain_sem); 916 mutex_unlock(&cache_chain_mutex);
916 break; 917 break;
917 case CPU_ONLINE: 918 case CPU_ONLINE:
918 start_cpu_timer(cpu); 919 start_cpu_timer(cpu);
@@ -921,7 +922,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
921 case CPU_DEAD: 922 case CPU_DEAD:
922 /* fall thru */ 923 /* fall thru */
923 case CPU_UP_CANCELED: 924 case CPU_UP_CANCELED:
924 down(&cache_chain_sem); 925 mutex_lock(&cache_chain_mutex);
925 926
926 list_for_each_entry(cachep, &cache_chain, next) { 927 list_for_each_entry(cachep, &cache_chain, next) {
927 struct array_cache *nc; 928 struct array_cache *nc;
@@ -973,13 +974,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
973 spin_unlock_irq(&cachep->spinlock); 974 spin_unlock_irq(&cachep->spinlock);
974 kfree(nc); 975 kfree(nc);
975 } 976 }
976 up(&cache_chain_sem); 977 mutex_unlock(&cache_chain_mutex);
977 break; 978 break;
978#endif 979#endif
979 } 980 }
980 return NOTIFY_OK; 981 return NOTIFY_OK;
981 bad: 982 bad:
982 up(&cache_chain_sem); 983 mutex_unlock(&cache_chain_mutex);
983 return NOTIFY_BAD; 984 return NOTIFY_BAD;
984} 985}
985 986
@@ -1047,7 +1048,6 @@ void __init kmem_cache_init(void)
1047 */ 1048 */
1048 1049
1049 /* 1) create the cache_cache */ 1050 /* 1) create the cache_cache */
1050 init_MUTEX(&cache_chain_sem);
1051 INIT_LIST_HEAD(&cache_chain); 1051 INIT_LIST_HEAD(&cache_chain);
1052 list_add(&cache_cache.next, &cache_chain); 1052 list_add(&cache_cache.next, &cache_chain);
1053 cache_cache.colour_off = cache_line_size(); 1053 cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1168,10 @@ void __init kmem_cache_init(void)
1168 /* 6) resize the head arrays to their final sizes */ 1168 /* 6) resize the head arrays to their final sizes */
1169 { 1169 {
1170 kmem_cache_t *cachep; 1170 kmem_cache_t *cachep;
1171 down(&cache_chain_sem); 1171 mutex_lock(&cache_chain_mutex);
1172 list_for_each_entry(cachep, &cache_chain, next) 1172 list_for_each_entry(cachep, &cache_chain, next)
1173 enable_cpucache(cachep); 1173 enable_cpucache(cachep);
1174 up(&cache_chain_sem); 1174 mutex_unlock(&cache_chain_mutex);
1175 } 1175 }
1176 1176
1177 /* Done! */ 1177 /* Done! */
@@ -1590,7 +1590,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1590 BUG(); 1590 BUG();
1591 } 1591 }
1592 1592
1593 down(&cache_chain_sem); 1593 mutex_lock(&cache_chain_mutex);
1594 1594
1595 list_for_each(p, &cache_chain) { 1595 list_for_each(p, &cache_chain) {
1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1856,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1856 if (!cachep && (flags & SLAB_PANIC)) 1856 if (!cachep && (flags & SLAB_PANIC))
1857 panic("kmem_cache_create(): failed to create slab `%s'\n", 1857 panic("kmem_cache_create(): failed to create slab `%s'\n",
1858 name); 1858 name);
1859 up(&cache_chain_sem); 1859 mutex_unlock(&cache_chain_mutex);
1860 return cachep; 1860 return cachep;
1861} 1861}
1862EXPORT_SYMBOL(kmem_cache_create); 1862EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2044,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
2044 lock_cpu_hotplug(); 2044 lock_cpu_hotplug();
2045 2045
2046 /* Find the cache in the chain of caches. */ 2046 /* Find the cache in the chain of caches. */
2047 down(&cache_chain_sem); 2047 mutex_lock(&cache_chain_mutex);
2048 /* 2048 /*
2049 * the chain is never empty, cache_cache is never destroyed 2049 * the chain is never empty, cache_cache is never destroyed
2050 */ 2050 */
2051 list_del(&cachep->next); 2051 list_del(&cachep->next);
2052 up(&cache_chain_sem); 2052 mutex_unlock(&cache_chain_mutex);
2053 2053
2054 if (__cache_shrink(cachep)) { 2054 if (__cache_shrink(cachep)) {
2055 slab_error(cachep, "Can't free all objects"); 2055 slab_error(cachep, "Can't free all objects");
2056 down(&cache_chain_sem); 2056 mutex_lock(&cache_chain_mutex);
2057 list_add(&cachep->next, &cache_chain); 2057 list_add(&cachep->next, &cache_chain);
2058 up(&cache_chain_sem); 2058 mutex_unlock(&cache_chain_mutex);
2059 unlock_cpu_hotplug(); 2059 unlock_cpu_hotplug();
2060 return 1; 2060 return 1;
2061 } 2061 }
@@ -3314,7 +3314,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
3314 * - clear the per-cpu caches for this CPU. 3314 * - clear the per-cpu caches for this CPU.
3315 * - return freeable pages to the main free memory pool. 3315 * - return freeable pages to the main free memory pool.
3316 * 3316 *
3317 * If we cannot acquire the cache chain semaphore then just give up - we'll 3317 * If we cannot acquire the cache chain mutex then just give up - we'll
3318 * try again on the next iteration. 3318 * try again on the next iteration.
3319 */ 3319 */
3320static void cache_reap(void *unused) 3320static void cache_reap(void *unused)
@@ -3322,7 +3322,7 @@ static void cache_reap(void *unused)
3322 struct list_head *walk; 3322 struct list_head *walk;
3323 struct kmem_list3 *l3; 3323 struct kmem_list3 *l3;
3324 3324
3325 if (down_trylock(&cache_chain_sem)) { 3325 if (!mutex_trylock(&cache_chain_mutex)) {
3326 /* Give up. Setup the next iteration. */ 3326 /* Give up. Setup the next iteration. */
3327 schedule_delayed_work(&__get_cpu_var(reap_work), 3327 schedule_delayed_work(&__get_cpu_var(reap_work),
3328 REAPTIMEOUT_CPUC); 3328 REAPTIMEOUT_CPUC);
@@ -3393,7 +3393,7 @@ static void cache_reap(void *unused)
3393 cond_resched(); 3393 cond_resched();
3394 } 3394 }
3395 check_irq_on(); 3395 check_irq_on();
3396 up(&cache_chain_sem); 3396 mutex_unlock(&cache_chain_mutex);
3397 drain_remote_pages(); 3397 drain_remote_pages();
3398 /* Setup the next iteration */ 3398 /* Setup the next iteration */
3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3429,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3429 loff_t n = *pos; 3429 loff_t n = *pos;
3430 struct list_head *p; 3430 struct list_head *p;
3431 3431
3432 down(&cache_chain_sem); 3432 mutex_lock(&cache_chain_mutex);
3433 if (!n) 3433 if (!n)
3434 print_slabinfo_header(m); 3434 print_slabinfo_header(m);
3435 p = cache_chain.next; 3435 p = cache_chain.next;
@@ -3451,7 +3451,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3451 3451
3452static void s_stop(struct seq_file *m, void *p) 3452static void s_stop(struct seq_file *m, void *p)
3453{ 3453{
3454 up(&cache_chain_sem); 3454 mutex_unlock(&cache_chain_mutex);
3455} 3455}
3456 3456
3457static int s_show(struct seq_file *m, void *p) 3457static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3603,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3603 return -EINVAL; 3603 return -EINVAL;
3604 3604
3605 /* Find the cache in the chain of caches. */ 3605 /* Find the cache in the chain of caches. */
3606 down(&cache_chain_sem); 3606 mutex_lock(&cache_chain_mutex);
3607 res = -EINVAL; 3607 res = -EINVAL;
3608 list_for_each(p, &cache_chain) { 3608 list_for_each(p, &cache_chain) {
3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3620,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3620 break; 3620 break;
3621 } 3621 }
3622 } 3622 }
3623 up(&cache_chain_sem); 3623 mutex_unlock(&cache_chain_mutex);
3624 if (res >= 0) 3624 if (res >= 0)
3625 res = count; 3625 res = count;
3626 return res; 3626 return res;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 957fef43fa60..f1e69c30d203 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -25,6 +25,7 @@
25#include <linux/rmap.h> 25#include <linux/rmap.h>
26#include <linux/security.h> 26#include <linux/security.h>
27#include <linux/backing-dev.h> 27#include <linux/backing-dev.h>
28#include <linux/mutex.h>
28#include <linux/capability.h> 29#include <linux/capability.h>
29#include <linux/syscalls.h> 30#include <linux/syscalls.h>
30 31
@@ -46,12 +47,12 @@ struct swap_list_t swap_list = {-1, -1};
46 47
47struct swap_info_struct swap_info[MAX_SWAPFILES]; 48struct swap_info_struct swap_info[MAX_SWAPFILES];
48 49
49static DECLARE_MUTEX(swapon_sem); 50static DEFINE_MUTEX(swapon_mutex);
50 51
51/* 52/*
52 * We need this because the bdev->unplug_fn can sleep and we cannot 53 * We need this because the bdev->unplug_fn can sleep and we cannot
53 * hold swap_lock while calling the unplug_fn. And swap_lock 54 * hold swap_lock while calling the unplug_fn. And swap_lock
54 * cannot be turned into a semaphore. 55 * cannot be turned into a mutex.
55 */ 56 */
56static DECLARE_RWSEM(swap_unplug_sem); 57static DECLARE_RWSEM(swap_unplug_sem);
57 58
@@ -1161,7 +1162,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1161 up_write(&swap_unplug_sem); 1162 up_write(&swap_unplug_sem);
1162 1163
1163 destroy_swap_extents(p); 1164 destroy_swap_extents(p);
1164 down(&swapon_sem); 1165 mutex_lock(&swapon_mutex);
1165 spin_lock(&swap_lock); 1166 spin_lock(&swap_lock);
1166 drain_mmlist(); 1167 drain_mmlist();
1167 1168
@@ -1180,7 +1181,7 @@ asmlinkage long sys_swapoff(const char __user * specialfile)
1180 p->swap_map = NULL; 1181 p->swap_map = NULL;
1181 p->flags = 0; 1182 p->flags = 0;
1182 spin_unlock(&swap_lock); 1183 spin_unlock(&swap_lock);
1183 up(&swapon_sem); 1184 mutex_unlock(&swapon_mutex);
1184 vfree(swap_map); 1185 vfree(swap_map);
1185 inode = mapping->host; 1186 inode = mapping->host;
1186 if (S_ISBLK(inode->i_mode)) { 1187 if (S_ISBLK(inode->i_mode)) {
@@ -1209,7 +1210,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
1209 int i; 1210 int i;
1210 loff_t l = *pos; 1211 loff_t l = *pos;
1211 1212
1212 down(&swapon_sem); 1213 mutex_lock(&swapon_mutex);
1213 1214
1214 for (i = 0; i < nr_swapfiles; i++, ptr++) { 1215 for (i = 0; i < nr_swapfiles; i++, ptr++) {
1215 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1216 if (!(ptr->flags & SWP_USED) || !ptr->swap_map)
@@ -1238,7 +1239,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
1238 1239
1239static void swap_stop(struct seq_file *swap, void *v) 1240static void swap_stop(struct seq_file *swap, void *v)
1240{ 1241{
1241 up(&swapon_sem); 1242 mutex_unlock(&swapon_mutex);
1242} 1243}
1243 1244
1244static int swap_show(struct seq_file *swap, void *v) 1245static int swap_show(struct seq_file *swap, void *v)
@@ -1540,7 +1541,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1540 goto bad_swap; 1541 goto bad_swap;
1541 } 1542 }
1542 1543
1543 down(&swapon_sem); 1544 mutex_lock(&swapon_mutex);
1544 spin_lock(&swap_lock); 1545 spin_lock(&swap_lock);
1545 p->flags = SWP_ACTIVE; 1546 p->flags = SWP_ACTIVE;
1546 nr_swap_pages += nr_good_pages; 1547 nr_swap_pages += nr_good_pages;
@@ -1566,7 +1567,7 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
1566 swap_info[prev].next = p - swap_info; 1567 swap_info[prev].next = p - swap_info;
1567 } 1568 }
1568 spin_unlock(&swap_lock); 1569 spin_unlock(&swap_lock);
1569 up(&swapon_sem); 1570 mutex_unlock(&swapon_mutex);
1570 error = 0; 1571 error = 0;
1571 goto out; 1572 goto out;
1572bad_swap: 1573bad_swap: