aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a6c9166996a9..29b09599af7c 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -697,7 +697,7 @@ static inline void init_lock_keys(void)
697static DEFINE_MUTEX(cache_chain_mutex); 697static DEFINE_MUTEX(cache_chain_mutex);
698static struct list_head cache_chain; 698static struct list_head cache_chain;
699 699
700static DEFINE_PER_CPU(struct delayed_work, reap_work); 700static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
701 701
702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 702static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
703{ 703{
@@ -838,7 +838,7 @@ __setup("noaliencache", noaliencache_setup);
838 * objects freed on different nodes from which they were allocated) and the 838 * objects freed on different nodes from which they were allocated) and the
839 * flushing of remote pcps by calling drain_node_pages. 839 * flushing of remote pcps by calling drain_node_pages.
840 */ 840 */
841static DEFINE_PER_CPU(unsigned long, reap_node); 841static DEFINE_PER_CPU(unsigned long, slab_reap_node);
842 842
843static void init_reap_node(int cpu) 843static void init_reap_node(int cpu)
844{ 844{
@@ -848,17 +848,17 @@ static void init_reap_node(int cpu)
848 if (node == MAX_NUMNODES) 848 if (node == MAX_NUMNODES)
849 node = first_node(node_online_map); 849 node = first_node(node_online_map);
850 850
851 per_cpu(reap_node, cpu) = node; 851 per_cpu(slab_reap_node, cpu) = node;
852} 852}
853 853
854static void next_reap_node(void) 854static void next_reap_node(void)
855{ 855{
856 int node = __get_cpu_var(reap_node); 856 int node = __get_cpu_var(slab_reap_node);
857 857
858 node = next_node(node, node_online_map); 858 node = next_node(node, node_online_map);
859 if (unlikely(node >= MAX_NUMNODES)) 859 if (unlikely(node >= MAX_NUMNODES))
860 node = first_node(node_online_map); 860 node = first_node(node_online_map);
861 __get_cpu_var(reap_node) = node; 861 __get_cpu_var(slab_reap_node) = node;
862} 862}
863 863
864#else 864#else
@@ -875,7 +875,7 @@ static void next_reap_node(void)
875 */ 875 */
876static void __cpuinit start_cpu_timer(int cpu) 876static void __cpuinit start_cpu_timer(int cpu)
877{ 877{
878 struct delayed_work *reap_work = &per_cpu(reap_work, cpu); 878 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
879 879
880 /* 880 /*
881 * When this gets called from do_initcalls via cpucache_init(), 881 * When this gets called from do_initcalls via cpucache_init(),
@@ -1039,7 +1039,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1039 */ 1039 */
1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1040static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1041{ 1041{
1042 int node = __get_cpu_var(reap_node); 1042 int node = __get_cpu_var(slab_reap_node);
1043 1043
1044 if (l3->alien) { 1044 if (l3->alien) {
1045 struct array_cache *ac = l3->alien[node]; 1045 struct array_cache *ac = l3->alien[node];
@@ -1300,9 +1300,9 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1300 * anything expensive but will only modify reap_work 1300 * anything expensive but will only modify reap_work
1301 * and reschedule the timer. 1301 * and reschedule the timer.
1302 */ 1302 */
1303 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu)); 1303 cancel_rearming_delayed_work(&per_cpu(slab_reap_work, cpu));
1304 /* Now the cache_reaper is guaranteed to be not running. */ 1304 /* Now the cache_reaper is guaranteed to be not running. */
1305 per_cpu(reap_work, cpu).work.func = NULL; 1305 per_cpu(slab_reap_work, cpu).work.func = NULL;
1306 break; 1306 break;
1307 case CPU_DOWN_FAILED: 1307 case CPU_DOWN_FAILED:
1308 case CPU_DOWN_FAILED_FROZEN: 1308 case CPU_DOWN_FAILED_FROZEN: