aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c58
1 files changed, 35 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 9374293a3012..6f8495e2185b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -68,7 +68,7 @@
68 * Further notes from the original documentation: 68 * Further notes from the original documentation:
69 * 69 *
70 * 11 April '97. Started multi-threading - markhe 70 * 11 April '97. Started multi-threading - markhe
71 * The global cache-chain is protected by the semaphore 'cache_chain_sem'. 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72 * The sem is only needed when accessing/extending the cache-chain, which 72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(), 73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()). 74 * kmem_cache_shrink() and kmem_cache_reap()).
@@ -103,6 +103,8 @@
103#include <linux/rcupdate.h> 103#include <linux/rcupdate.h>
104#include <linux/string.h> 104#include <linux/string.h>
105#include <linux/nodemask.h> 105#include <linux/nodemask.h>
106#include <linux/mempolicy.h>
107#include <linux/mutex.h>
106 108
107#include <asm/uaccess.h> 109#include <asm/uaccess.h>
108#include <asm/cacheflush.h> 110#include <asm/cacheflush.h>
@@ -631,7 +633,7 @@ static kmem_cache_t cache_cache = {
631}; 633};
632 634
633/* Guard access to the cache-chain. */ 635/* Guard access to the cache-chain. */
634static struct semaphore cache_chain_sem; 636static DEFINE_MUTEX(cache_chain_mutex);
635static struct list_head cache_chain; 637static struct list_head cache_chain;
636 638
637/* 639/*
@@ -772,6 +774,8 @@ static struct array_cache *alloc_arraycache(int node, int entries,
772} 774}
773 775
774#ifdef CONFIG_NUMA 776#ifdef CONFIG_NUMA
777static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int);
778
775static inline struct array_cache **alloc_alien_cache(int node, int limit) 779static inline struct array_cache **alloc_alien_cache(int node, int limit)
776{ 780{
777 struct array_cache **ac_ptr; 781 struct array_cache **ac_ptr;
@@ -857,7 +861,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
857 861
858 switch (action) { 862 switch (action) {
859 case CPU_UP_PREPARE: 863 case CPU_UP_PREPARE:
860 down(&cache_chain_sem); 864 mutex_lock(&cache_chain_mutex);
861 /* we need to do this right in the beginning since 865 /* we need to do this right in the beginning since
862 * alloc_arraycache's are going to use this list. 866 * alloc_arraycache's are going to use this list.
863 * kmalloc_node allows us to add the slab to the right 867 * kmalloc_node allows us to add the slab to the right
@@ -912,7 +916,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
912 l3->shared = nc; 916 l3->shared = nc;
913 } 917 }
914 } 918 }
915 up(&cache_chain_sem); 919 mutex_unlock(&cache_chain_mutex);
916 break; 920 break;
917 case CPU_ONLINE: 921 case CPU_ONLINE:
918 start_cpu_timer(cpu); 922 start_cpu_timer(cpu);
@@ -921,7 +925,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
921 case CPU_DEAD: 925 case CPU_DEAD:
922 /* fall thru */ 926 /* fall thru */
923 case CPU_UP_CANCELED: 927 case CPU_UP_CANCELED:
924 down(&cache_chain_sem); 928 mutex_lock(&cache_chain_mutex);
925 929
926 list_for_each_entry(cachep, &cache_chain, next) { 930 list_for_each_entry(cachep, &cache_chain, next) {
927 struct array_cache *nc; 931 struct array_cache *nc;
@@ -973,13 +977,13 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
973 spin_unlock_irq(&cachep->spinlock); 977 spin_unlock_irq(&cachep->spinlock);
974 kfree(nc); 978 kfree(nc);
975 } 979 }
976 up(&cache_chain_sem); 980 mutex_unlock(&cache_chain_mutex);
977 break; 981 break;
978#endif 982#endif
979 } 983 }
980 return NOTIFY_OK; 984 return NOTIFY_OK;
981 bad: 985 bad:
982 up(&cache_chain_sem); 986 mutex_unlock(&cache_chain_mutex);
983 return NOTIFY_BAD; 987 return NOTIFY_BAD;
984} 988}
985 989
@@ -1047,7 +1051,6 @@ void __init kmem_cache_init(void)
1047 */ 1051 */
1048 1052
1049 /* 1) create the cache_cache */ 1053 /* 1) create the cache_cache */
1050 init_MUTEX(&cache_chain_sem);
1051 INIT_LIST_HEAD(&cache_chain); 1054 INIT_LIST_HEAD(&cache_chain);
1052 list_add(&cache_cache.next, &cache_chain); 1055 list_add(&cache_cache.next, &cache_chain);
1053 cache_cache.colour_off = cache_line_size(); 1056 cache_cache.colour_off = cache_line_size();
@@ -1168,10 +1171,10 @@ void __init kmem_cache_init(void)
1168 /* 6) resize the head arrays to their final sizes */ 1171 /* 6) resize the head arrays to their final sizes */
1169 { 1172 {
1170 kmem_cache_t *cachep; 1173 kmem_cache_t *cachep;
1171 down(&cache_chain_sem); 1174 mutex_lock(&cache_chain_mutex);
1172 list_for_each_entry(cachep, &cache_chain, next) 1175 list_for_each_entry(cachep, &cache_chain, next)
1173 enable_cpucache(cachep); 1176 enable_cpucache(cachep);
1174 up(&cache_chain_sem); 1177 mutex_unlock(&cache_chain_mutex);
1175 } 1178 }
1176 1179
1177 /* Done! */ 1180 /* Done! */
@@ -1590,7 +1593,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1590 BUG(); 1593 BUG();
1591 } 1594 }
1592 1595
1593 down(&cache_chain_sem); 1596 mutex_lock(&cache_chain_mutex);
1594 1597
1595 list_for_each(p, &cache_chain) { 1598 list_for_each(p, &cache_chain) {
1596 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next); 1599 kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
@@ -1856,7 +1859,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
1856 if (!cachep && (flags & SLAB_PANIC)) 1859 if (!cachep && (flags & SLAB_PANIC))
1857 panic("kmem_cache_create(): failed to create slab `%s'\n", 1860 panic("kmem_cache_create(): failed to create slab `%s'\n",
1858 name); 1861 name);
1859 up(&cache_chain_sem); 1862 mutex_unlock(&cache_chain_mutex);
1860 return cachep; 1863 return cachep;
1861} 1864}
1862EXPORT_SYMBOL(kmem_cache_create); 1865EXPORT_SYMBOL(kmem_cache_create);
@@ -2044,18 +2047,18 @@ int kmem_cache_destroy(kmem_cache_t *cachep)
2044 lock_cpu_hotplug(); 2047 lock_cpu_hotplug();
2045 2048
2046 /* Find the cache in the chain of caches. */ 2049 /* Find the cache in the chain of caches. */
2047 down(&cache_chain_sem); 2050 mutex_lock(&cache_chain_mutex);
2048 /* 2051 /*
2049 * the chain is never empty, cache_cache is never destroyed 2052 * the chain is never empty, cache_cache is never destroyed
2050 */ 2053 */
2051 list_del(&cachep->next); 2054 list_del(&cachep->next);
2052 up(&cache_chain_sem); 2055 mutex_unlock(&cache_chain_mutex);
2053 2056
2054 if (__cache_shrink(cachep)) { 2057 if (__cache_shrink(cachep)) {
2055 slab_error(cachep, "Can't free all objects"); 2058 slab_error(cachep, "Can't free all objects");
2056 down(&cache_chain_sem); 2059 mutex_lock(&cache_chain_mutex);
2057 list_add(&cachep->next, &cache_chain); 2060 list_add(&cachep->next, &cache_chain);
2058 up(&cache_chain_sem); 2061 mutex_unlock(&cache_chain_mutex);
2059 unlock_cpu_hotplug(); 2062 unlock_cpu_hotplug();
2060 return 1; 2063 return 1;
2061 } 2064 }
@@ -2570,6 +2573,15 @@ static inline void *____cache_alloc(kmem_cache_t *cachep, gfp_t flags)
2570 void *objp; 2573 void *objp;
2571 struct array_cache *ac; 2574 struct array_cache *ac;
2572 2575
2576#ifdef CONFIG_NUMA
2577 if (unlikely(current->mempolicy && !in_interrupt())) {
2578 int nid = slab_node(current->mempolicy);
2579
2580 if (nid != numa_node_id())
2581 return __cache_alloc_node(cachep, flags, nid);
2582 }
2583#endif
2584
2573 check_irq_off(); 2585 check_irq_off();
2574 ac = ac_data(cachep); 2586 ac = ac_data(cachep);
2575 if (likely(ac->avail)) { 2587 if (likely(ac->avail)) {
@@ -3314,7 +3326,7 @@ static void drain_array_locked(kmem_cache_t *cachep, struct array_cache *ac,
3314 * - clear the per-cpu caches for this CPU. 3326 * - clear the per-cpu caches for this CPU.
3315 * - return freeable pages to the main free memory pool. 3327 * - return freeable pages to the main free memory pool.
3316 * 3328 *
3317 * If we cannot acquire the cache chain semaphore then just give up - we'll 3329 * If we cannot acquire the cache chain mutex then just give up - we'll
3318 * try again on the next iteration. 3330 * try again on the next iteration.
3319 */ 3331 */
3320static void cache_reap(void *unused) 3332static void cache_reap(void *unused)
@@ -3322,7 +3334,7 @@ static void cache_reap(void *unused)
3322 struct list_head *walk; 3334 struct list_head *walk;
3323 struct kmem_list3 *l3; 3335 struct kmem_list3 *l3;
3324 3336
3325 if (down_trylock(&cache_chain_sem)) { 3337 if (!mutex_trylock(&cache_chain_mutex)) {
3326 /* Give up. Setup the next iteration. */ 3338 /* Give up. Setup the next iteration. */
3327 schedule_delayed_work(&__get_cpu_var(reap_work), 3339 schedule_delayed_work(&__get_cpu_var(reap_work),
3328 REAPTIMEOUT_CPUC); 3340 REAPTIMEOUT_CPUC);
@@ -3393,7 +3405,7 @@ static void cache_reap(void *unused)
3393 cond_resched(); 3405 cond_resched();
3394 } 3406 }
3395 check_irq_on(); 3407 check_irq_on();
3396 up(&cache_chain_sem); 3408 mutex_unlock(&cache_chain_mutex);
3397 drain_remote_pages(); 3409 drain_remote_pages();
3398 /* Setup the next iteration */ 3410 /* Setup the next iteration */
3399 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); 3411 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
@@ -3429,7 +3441,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
3429 loff_t n = *pos; 3441 loff_t n = *pos;
3430 struct list_head *p; 3442 struct list_head *p;
3431 3443
3432 down(&cache_chain_sem); 3444 mutex_lock(&cache_chain_mutex);
3433 if (!n) 3445 if (!n)
3434 print_slabinfo_header(m); 3446 print_slabinfo_header(m);
3435 p = cache_chain.next; 3447 p = cache_chain.next;
@@ -3451,7 +3463,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3451 3463
3452static void s_stop(struct seq_file *m, void *p) 3464static void s_stop(struct seq_file *m, void *p)
3453{ 3465{
3454 up(&cache_chain_sem); 3466 mutex_unlock(&cache_chain_mutex);
3455} 3467}
3456 3468
3457static int s_show(struct seq_file *m, void *p) 3469static int s_show(struct seq_file *m, void *p)
@@ -3603,7 +3615,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3603 return -EINVAL; 3615 return -EINVAL;
3604 3616
3605 /* Find the cache in the chain of caches. */ 3617 /* Find the cache in the chain of caches. */
3606 down(&cache_chain_sem); 3618 mutex_lock(&cache_chain_mutex);
3607 res = -EINVAL; 3619 res = -EINVAL;
3608 list_for_each(p, &cache_chain) { 3620 list_for_each(p, &cache_chain) {
3609 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next); 3621 kmem_cache_t *cachep = list_entry(p, kmem_cache_t, next);
@@ -3620,7 +3632,7 @@ ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3620 break; 3632 break;
3621 } 3633 }
3622 } 3634 }
3623 up(&cache_chain_sem); 3635 mutex_unlock(&cache_chain_mutex);
3624 if (res >= 0) 3636 if (res >= 0)
3625 res = count; 3637 res = count;
3626 return res; 3638 return res;