aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a6c9166996a9..3f4822938f46 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -490,7 +490,7 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
490 490
491#endif 491#endif
492 492
493#ifdef CONFIG_KMEMTRACE 493#ifdef CONFIG_TRACING
494size_t slab_buffer_size(struct kmem_cache *cachep) 494size_t slab_buffer_size(struct kmem_cache *cachep)
495{ 495{
496 return cachep->buffer_size; 496 return cachep->buffer_size;
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
697static DEFINE_MUTEX(cache_chain_mutex); 697static DEFINE_MUTEX(cache_chain_mutex);
698static struct list_head cache_chain; 698static struct list_head cache_chain;
699 699
700static DEFINE_PER_CPU(struct delayed_work, reap_work); 700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
701 701
702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
703{ 703{
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
838 * objects freed on different nodes from which they were allocated) and the 838 * objects freed on different nodes from which they were allocated) and the
839 * flushing of remote pcps by calling drain_node_pages. 839 * flushing of remote pcps by calling drain_node_pages.
840 */ 840 */
841static DEFINE_PER_CPU(unsigned long, reap_node); 841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
842 842
843static void init_reap_node(int cpu) 843static void init_reap_node(int cpu)
844{ 844{
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
848 if (node == MAX_NUMNODES) 848 if (node == MAX_NUMNODES)
849 node = first_node(node_online_map); 849 node = first_node(node_online_map);
850 850
851 per_cpu(reap_node, cpu) = node; 851 per_cpu(slab_reap_node, cpu) = node;
852} 852}
853 853
854static void next_reap_node(void) 854static void next_reap_node(void)
855{ 855{
856 int node = __get_cpu_var(reap_node); 856 int node = __get_cpu_var(slab_reap_node);
857 857
858 node = next_node(node, node_online_map); 858 node = next_node(node, node_online_map);
859 if (unlikely(node >= MAX_NUMNODES)) 859 if (unlikely(node >= MAX_NUMNODES))
860 node = first_node(node_online_map); 860 node = first_node(node_online_map);
861 __get_cpu_var(reap_node) = node; 861 __get_cpu_var(slab_reap_node) = node;
862} 862}
863 863
864#else 864#else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
875 */ 875 */
876static void __cpuinit start_cpu_timer(int cpu) 876static void __cpuinit start_cpu_timer(int cpu)
877{ 877{
878 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
879 879
880 /* 880 /*
881 * When this gets called from do_initcalls via cpucache_init(), 881 * When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1039 */ 1039 */
1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1041{ 1041{
1042 int node = __get_cpu_var(reap_node); 1042 int node = __get_cpu_var(slab_reap_node);
1043 1043
1044 if (l3->alien) { 1044 if (l3->alien) {
1045 struct array_cache *ac = l3->alien[node]; 1045 struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1300 * anything expensive but will only modify reap_work 1300 * anything expensive but will only modify reap_work
1301 * and reschedule the timer. 1301 * and reschedule the timer.
1302 */ 1302 */
1303 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1303 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1304 /* Now the cache_reaper is guaranteed to be not running. */ 1304 /* Now the cache_reaper is guaranteed to be not running. */
1305 per_cpu(reap_work, cpu).work.func = NULL; 1305 per_cpu(slab_reap_work, cpu).work.func = NULL;
1306 break; 1306 break;
1307 case CPU_DOWN_FAILED: 1307 case CPU_DOWN_FAILED:
1308 case CPU_DOWN_FAILED_FROZEN: 1308 case CPU_DOWN_FAILED_FROZEN:
@@ -3578,7 +3578,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3578} 3578}
3579EXPORT_SYMBOL(kmem_cache_alloc); 3579EXPORT_SYMBOL(kmem_cache_alloc);
3580 3580
3581#ifdef CONFIG_KMEMTRACE 3581#ifdef CONFIG_TRACING
3582void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3582void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
3583{ 3583{
3584 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3584 return __cache_alloc(cachep, flags, __builtin_return_address(0));
@@ -3641,7 +3641,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3641} 3641}
3642EXPORT_SYMBOL(kmem_cache_alloc_node); 3642EXPORT_SYMBOL(kmem_cache_alloc_node);
3643 3643
3644#ifdef CONFIG_KMEMTRACE 3644#ifdef CONFIG_TRACING
3645void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3645void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
3646 gfp_t flags, 3646 gfp_t flags,
3647 int nodeid) 3647 int nodeid)
@@ -3669,7 +3669,7 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3669 return ret; 3669 return ret;
3670} 3670}
3671 3671
3672#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3672#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3673void *__kmalloc_node(size_t size, gfp_t flags, int node) 3673void *__kmalloc_node(size_t size, gfp_t flags, int node)
3674{ 3674{
3675 return __do_kmalloc_node(size, flags, node, 3675 return __do_kmalloc_node(size, flags, node,
@@ -3689,7 +3689,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
3689 return __do_kmalloc_node(size, flags, node, NULL); 3689 return __do_kmalloc_node(size, flags, node, NULL);
3690} 3690}
3691EXPORT_SYMBOL(__kmalloc_node); 3691EXPORT_SYMBOL(__kmalloc_node);
3692#endif /* CONFIG_DEBUG_SLAB */ 3692#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3693#endif /* CONFIG_NUMA */ 3693#endif /* CONFIG_NUMA */
3694 3694
3695/** 3695/**
@@ -3721,7 +3721,7 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3721} 3721}
3722 3722
3723 3723
3724#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_KMEMTRACE) 3724#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3725void *__kmalloc(size_t size, gfp_t flags) 3725void *__kmalloc(size_t size, gfp_t flags)
3726{ 3726{
3727 return __do_kmalloc(size, flags, __builtin_return_address(0)); 3727 return __do_kmalloc(size, flags, __builtin_return_address(0));