aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7dfa481c96ba..211b1746c63c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -685,7 +685,7 @@ int slab_is_available(void)
685 return g_cpucache_up >= EARLY; 685 return g_cpucache_up >= EARLY;
686} 686}
687 687
688static DEFINE_PER_CPU(struct delayed_work, reap_work); 688static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
689 689
690static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 690static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
691{ 691{
@@ -826,7 +826,7 @@ __setup("noaliencache", noaliencache_setup);
826 * objects freed on different nodes from which they were allocated) and the 826 * objects freed on different nodes from which they were allocated) and the
827 * flushing of remote pcps by calling drain_node_pages. 827 * flushing of remote pcps by calling drain_node_pages.
828 */ 828 */
829static DEFINE_PER_CPU(unsigned long, reap_node); 829static DEFINE_PER_CPU(unsigned long, slab_reap_node);
830 830
831static void init_reap_node(int cpu) 831static void init_reap_node(int cpu)
832{ 832{
@@ -836,17 +836,17 @@ static void init_reap_node(int cpu)
836 if (node == MAX_NUMNODES) 836 if (node == MAX_NUMNODES)
837 node = first_node(node_online_map); 837 node = first_node(node_online_map);
838 838
839 per_cpu(reap_node, cpu) = node; 839 per_cpu(slab_reap_node, cpu) = node;
840} 840}
841 841
842static void next_reap_node(void) 842static void next_reap_node(void)
843{ 843{
844 int node = __get_cpu_var(reap_node); 844 int node = __get_cpu_var(slab_reap_node);
845 845
846 node = next_node(node, node_online_map); 846 node = next_node(node, node_online_map);
847 if (unlikely(node >= MAX_NUMNODES)) 847 if (unlikely(node >= MAX_NUMNODES))
848 node = first_node(node_online_map); 848 node = first_node(node_online_map);
849 __get_cpu_var(reap_node) = node; 849 __get_cpu_var(slab_reap_node) = node;
850} 850}
851 851
852#else 852#else
@@ -863,7 +863,7 @@ static void next_reap_node(void)
863 */ 863 */
864static void __cpuinit start_cpu_timer(int cpu) 864static void __cpuinit start_cpu_timer(int cpu)
865{ 865{
866 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 866 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
867 867
868 /* 868 /*
869 * When this gets called from do_initcalls via cpucache_init(), 869 * When this gets called from do_initcalls via cpucache_init(),
@@ -1027,7 +1027,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1027 */ 1027 */
1028static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1028static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1029{ 1029{
1030 int node = __get_cpu_var(reap_node); 1030 int node = __get_cpu_var(slab_reap_node);
1031 1031
1032 if (l3->alien) { 1032 if (l3->alien) {
1033 struct array_cache *ac = l3->alien[node]; 1033 struct array_cache *ac = l3->alien[node];
@@ -1286,9 +1286,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1286 * anything expensive but will only modify reap_work 1286 * anything expensive but will only modify reap_work
1287 * and reschedule the timer. 1287 * and reschedule the timer.
1288 */ 1288 */
1289 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1289 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1290 /* Now the cache_reaper is guaranteed to be not running. */ 1290 /* Now the cache_reaper is guaranteed to be not running. */
1291 per_cpu(reap_work, cpu).work.func = NULL; 1291 per_cpu(slab_reap_work, cpu).work.func = NULL;
1292 break; 1292 break;
1293 case CPU_DOWN_FAILED: 1293 case CPU_DOWN_FAILED:
1294 case CPU_DOWN_FAILED_FROZEN: 1294 case CPU_DOWN_FAILED_FROZEN: