aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c148
1 files changed, 84 insertions, 64 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d2713a944ebd..e17cc2c337b8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
490 490
491#endif 491#endif
492 492
493#ifdef CONFIG_KMEMTRACE 493#ifdef CONFIG_TRACING
494size_t slab_buffer_size(struct kmem_cache *cachep) 494size_t slab_buffer_size(struct kmem_cache *cachep)
495{ 495{
496 return cachep->buffer_size; 496 return cachep->buffer_size;
@@ -604,6 +604,26 @@ static struct kmem_cache cache_cache = {
604 604
605#define BAD_ALIEN_MAGIC 0x01020304ul 605#define BAD_ALIEN_MAGIC 0x01020304ul
606 606
607/*
608 * chicken and egg problem: delay the per-cpu array allocation
609 * until the general caches are up.
610 */
611static enum {
612 NONE,
613 PARTIAL_AC,
614 PARTIAL_L3,
615 EARLY,
616 FULL
617} g_cpucache_up;
618
619/*
620 * used by boot code to determine if it can use slab based allocator
621 */
622int slab_is_available(void)
623{
624 return g_cpucache_up >= EARLY;
625}
626
607#ifdef CONFIG_LOCKDEP 627#ifdef CONFIG_LOCKDEP
608 628
609/* 629/*
@@ -620,40 +640,52 @@ static struct kmem_cache cache_cache = {
620static struct lock_class_key on_slab_l3_key; 640static struct lock_class_key on_slab_l3_key;
621static struct lock_class_key on_slab_alc_key; 641static struct lock_class_key on_slab_alc_key;
622 642
623static inline void init_lock_keys(void) 643static void init_node_lock_keys(int q)
624
625{ 644{
626 int q;
627 struct cache_sizes *s = malloc_sizes; 645 struct cache_sizes *s = malloc_sizes;
628 646
629 while (s->cs_size != ULONG_MAX) { 647 if (g_cpucache_up != FULL)
630 for_each_node(q) { 648 return;
631 struct array_cache **alc; 649
632 int r; 650 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
633 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q]; 651 struct array_cache **alc;
634 if (!l3 || OFF_SLAB(s->cs_cachep)) 652 struct kmem_list3 *l3;
635 continue; 653 int r;
636 lockdep_set_class(&l3->list_lock, &on_slab_l3_key); 654
637 alc = l3->alien; 655 l3 = s->cs_cachep->nodelists[q];
638 /* 656 if (!l3 || OFF_SLAB(s->cs_cachep))
639 * FIXME: This check for BAD_ALIEN_MAGIC 657 return;
640 * should go away when common slab code is taught to 658 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
641 * work even without alien caches. 659 alc = l3->alien;
642 * Currently, non NUMA code returns BAD_ALIEN_MAGIC 660 /*
643 * for alloc_alien_cache, 661 * FIXME: This check for BAD_ALIEN_MAGIC
644 */ 662 * should go away when common slab code is taught to
645 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC) 663 * work even without alien caches.
646 continue; 664 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
647 for_each_node(r) { 665 * for alloc_alien_cache,
648 if (alc[r]) 666 */
649 lockdep_set_class(&alc[r]->lock, 667 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
650 &on_slab_alc_key); 668 return;
651 } 669 for_each_node(r) {
670 if (alc[r])
671 lockdep_set_class(&alc[r]->lock,
672 &on_slab_alc_key);
652 } 673 }
653 s++;
654 } 674 }
655} 675}
676
677static inline void init_lock_keys(void)
678{
679 int node;
680
681 for_each_node(node)
682 init_node_lock_keys(node);
683}
656#else 684#else
685static void init_node_lock_keys(int q)
686{
687}
688
657static inline void init_lock_keys(void) 689static inline void init_lock_keys(void)
658{ 690{
659} 691}
@@ -665,27 +697,7 @@ static inline void init_lock_keys(void)
665static DEFINE_MUTEX(cache_chain_mutex); 697static DEFINE_MUTEX(cache_chain_mutex);
666static struct list_head cache_chain; 698static struct list_head cache_chain;
667 699
668/* 700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
669 * chicken and egg problem: delay the per-cpu array allocation
670 * until the general caches are up.
671 */
672static enum {
673 NONE,
674 PARTIAL_AC,
675 PARTIAL_L3,
676 EARLY,
677 FULL
678} g_cpucache_up;
679
680/*
681 * used by boot code to determine if it can use slab based allocator
682 */
683int slab_is_available(void)
684{
685 return g_cpucache_up >= EARLY;
686}
687
688static DEFINE_PER_CPU(struct delayed_work, reap_work);
689 701
690static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
691{ 703{
@@ -826,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
826 * objects freed on different nodes from which they were allocated) and the 838 * objects freed on different nodes from which they were allocated) and the
827 * flushing of remote pcps by calling drain_node_pages. 839 * flushing of remote pcps by calling drain_node_pages.
828 */ 840 */
829static DEFINE_PER_CPU(unsigned long, reap_node); 841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
830 842
831static void init_reap_node(int cpu) 843static void init_reap_node(int cpu)
832{ 844{
@@ -836,17 +848,17 @@ static void init_reap_node(int cpu)
836 if (node == MAX_NUMNODES) 848 if (node == MAX_NUMNODES)
837 node = first_node(node_online_map); 849 node = first_node(node_online_map);
838 850
839 per_cpu(reap_node, cpu) = node; 851 per_cpu(slab_reap_node, cpu) = node;
840} 852}
841 853
842static void next_reap_node(void) 854static void next_reap_node(void)
843{ 855{
844 int node = __get_cpu_var(reap_node); 856 int node = __get_cpu_var(slab_reap_node);
845 857
846 node = next_node(node, node_online_map); 858 node = next_node(node, node_online_map);
847 if (unlikely(node >= MAX_NUMNODES)) 859 if (unlikely(node >= MAX_NUMNODES))
848 node = first_node(node_online_map); 860 node = first_node(node_online_map);
849 __get_cpu_var(reap_node) = node; 861 __get_cpu_var(slab_reap_node) = node;
850} 862}
851 863
852#else 864#else
@@ -863,7 +875,7 @@ static void next_reap_node(void)
863 */ 875 */
864static void __cpuinit start_cpu_timer(int cpu) 876static void __cpuinit start_cpu_timer(int cpu)
865{ 877{
866 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
867 879
868 /* 880 /*
869 * When this gets called from do_initcalls via cpucache_init(), 881 * When this gets called from do_initcalls via cpucache_init(),
@@ -1027,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1027 */ 1039 */
1028static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1029{ 1041{
1030 int node = __get_cpu_var(reap_node); 1042 int node = __get_cpu_var(slab_reap_node);
1031 1043
1032 if (l3->alien) { 1044 if (l3->alien) {
1033 struct array_cache *ac = l3->alien[node]; 1045 struct array_cache *ac = l3->alien[node];
@@ -1254,6 +1266,8 @@ static int __cpuinit cpuup_prepare(long cpu)
1254 kfree(shared); 1266 kfree(shared);
1255 free_alien_cache(alien); 1267 free_alien_cache(alien);
1256 } 1268 }
1269 init_node_lock_keys(node);
1270
1257 return 0; 1271 return 0;
1258bad: 1272bad:
1259 cpuup_canceled(cpu); 1273 cpuup_canceled(cpu);
@@ -1286,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1286 * anything expensive but will only modify reap_work 1300 * anything expensive but will only modify reap_work
1287 * and reschedule the timer. 1301 * and reschedule the timer.
1288 */ 1302 */
1289 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1303 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1290 /* Now the cache_reaper is guaranteed to be not running. */ 1304 /* Now the cache_reaper is guaranteed to be not running. */
1291 per_cpu(reap_work, cpu).work.func = NULL; 1305 per_cpu(slab_reap_work, cpu).work.func = NULL;
1292 break; 1306 break;
1293 case CPU_DOWN_FAILED: 1307 case CPU_DOWN_FAILED:
1294 case CPU_DOWN_FAILED_FROZEN: 1308 case CPU_DOWN_FAILED_FROZEN:
@@ -3105,13 +3119,19 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3105 } else { 3119 } else {
3106 STATS_INC_ALLOCMISS(cachep); 3120 STATS_INC_ALLOCMISS(cachep);
3107 objp = cache_alloc_refill(cachep, flags); 3121 objp = cache_alloc_refill(cachep, flags);
3122 /*
3123 * the 'ac' may be updated by cache_alloc_refill(),
3124 * and kmemleak_erase() requires its correct value.
3125 */
3126 ac = cpu_cache_get(cachep);
3108 } 3127 }
3109 /* 3128 /*
3110 * To avoid a false negative, if an object that is in one of the 3129 * To avoid a false negative, if an object that is in one of the
3111 * per-CPU caches is leaked, we need to make sure kmemleak doesn't 3130 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3112 * treat the array pointers as a reference to the object. 3131 * treat the array pointers as a reference to the object.
3113 */ 3132 */
3114 kmemleak_erase(&ac->entry[ac->avail]); 3133 if (objp)
3134 kmemleak_erase(&ac->entry[ac->avail]);
3115 return objp; 3135 return objp;
3116} 3136}
3117 3137
@@ -3308,7 +3328,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3308 cache_alloc_debugcheck_before(cachep, flags); 3328 cache_alloc_debugcheck_before(cachep, flags);
3309 local_irq_save(save_flags); 3329 local_irq_save(save_flags);
3310 3330
3311 if (unlikely(nodeid == -1)) 3331 if (nodeid == -1)
3312 nodeid = numa_node_id(); 3332 nodeid = numa_node_id();
3313 3333
3314 if (unlikely(!cachep->nodelists[nodeid])) { 3334 if (unlikely(!cachep->nodelists[nodeid])) {
@@ -3560,7 +3580,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3560} 3580}
3561EXPORT_SYMBOL(kmem_cache_alloc); 3581EXPORT_SYMBOL(kmem_cache_alloc);
3562 3582
3563#ifdef CONFIG_KMEMTRACE 3583#ifdef CONFIG_TRACING
3564void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3584void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3565{ 3585{
3566 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3586 return __cache_alloc(cachep, flags, __builtin_return_address(0));
@@ -3623,7 +3643,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3623} 3643}
3624EXPORT_SYMBOL(kmem_cache_alloc_node); 3644EXPORT_SYMBOL(kmem_cache_alloc_node);
3625 3645
3626#ifdef CONFIG_KMEMTRACE 3646#ifdef CONFIG_TRACING
3627void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3647void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3628 gfp_t flags, 3648 gfp_t flags,
3629 int nodeid) 3649 int nodeid)
@@ -3651,7 +3671,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3651 return ret; 3671 return ret;
3652} 3672}
3653 3673
3654#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3674#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3655void *__kmalloc_node(size_t size, gfp_t flags, int node) 3675void *__kmalloc_node(size_t size, gfp_t flags, int node)
3656{ 3676{
3657 return __do_kmalloc_node(size, flags, node, 3677 return __do_kmalloc_node(size, flags, node,
@@ -3671,7 +3691,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3671 return __do_kmalloc_node(size, flags, node, NULL); 3691 return __do_kmalloc_node(size, flags, node, NULL);
3672} 3692}
3673EXPORT_SYMBOL(__kmalloc_node); 3693EXPORT_SYMBOL(__kmalloc_node);
3674#endif /* CONFIG_DEBUG_SLAB */ 3694#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3675#endif /* CONFIG_NUMA */ 3695#endif /* CONFIG_NUMA */
3676 3696
3677/** 3697/**
@@ -3703,7 +3723,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3703} 3723}
3704 3724
3705 3725
3706#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3726#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3707void *__kmalloc(size_t size, gfp_t flags) 3727void *__kmalloc(size_t size, gfp_t flags)
3708{ 3728{
3709 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3729 return __do_kmalloc(size, flags, __builtin_return_address(0));