diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 6 |
1 files changed, 3 insertions, 3 deletions
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
314 | int node); | 314 | int node); |
315 | static int enable_cpucache(struct kmem_cache *cachep); | 315 | static int enable_cpucache(struct kmem_cache *cachep); |
316 | static void cache_reap(void *unused); | 316 | static void cache_reap(struct work_struct *unused); |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * This function must be completely optimized away if a constant is passed to | 319 | * This function must be completely optimized away if a constant is passed to |
@@ -925,7 +925,7 @@ static void __devinit start_cpu_timer(int cpu) | |||
925 | */ | 925 | */ |
926 | if (keventd_up() && reap_work->work.func == NULL) { | 926 | if (keventd_up() && reap_work->work.func == NULL) { |
927 | init_reap_node(cpu); | 927 | init_reap_node(cpu); |
928 | INIT_DELAYED_WORK(reap_work, cache_reap, NULL); | 928 | INIT_DELAYED_WORK(reap_work, cache_reap); |
929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); |
930 | } | 930 | } |
931 | } | 931 | } |
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try | 3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try |
3816 | * again on the next iteration. | 3816 | * again on the next iteration. |
3817 | */ | 3817 | */ |
3818 | static void cache_reap(void *unused) | 3818 | static void cache_reap(struct work_struct *unused) |
3819 | { | 3819 | { |
3820 | struct kmem_cache *searchp; | 3820 | struct kmem_cache *searchp; |
3821 | struct kmem_list3 *l3; | 3821 | struct kmem_list3 *l3; |