diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 12 | ||||
-rw-r--r-- | mm/swap.c | 4 |
2 files changed, 8 insertions, 8 deletions
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache, | |||
313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 313 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
314 | int node); | 314 | int node); |
315 | static int enable_cpucache(struct kmem_cache *cachep); | 315 | static int enable_cpucache(struct kmem_cache *cachep); |
316 | static void cache_reap(void *unused); | 316 | static void cache_reap(struct work_struct *unused); |
317 | 317 | ||
318 | /* | 318 | /* |
319 | * This function must be completely optimized away if a constant is passed to | 319 | * This function must be completely optimized away if a constant is passed to |
@@ -753,7 +753,7 @@ int slab_is_available(void) | |||
753 | return g_cpucache_up == FULL; | 753 | return g_cpucache_up == FULL; |
754 | } | 754 | } |
755 | 755 | ||
756 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 756 | static DEFINE_PER_CPU(struct delayed_work, reap_work); |
757 | 757 | ||
758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) | 758 | static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) |
759 | { | 759 | { |
@@ -916,16 +916,16 @@ static void next_reap_node(void) | |||
916 | */ | 916 | */ |
917 | static void __devinit start_cpu_timer(int cpu) | 917 | static void __devinit start_cpu_timer(int cpu) |
918 | { | 918 | { |
919 | struct work_struct *reap_work = &per_cpu(reap_work, cpu); | 919 | struct delayed_work *reap_work = &per_cpu(reap_work, cpu); |
920 | 920 | ||
921 | /* | 921 | /* |
922 | * When this gets called from do_initcalls via cpucache_init(), | 922 | * When this gets called from do_initcalls via cpucache_init(), |
923 | * init_workqueues() has already run, so keventd will be setup | 923 | * init_workqueues() has already run, so keventd will be setup |
924 | * at that time. | 924 | * at that time. |
925 | */ | 925 | */ |
926 | if (keventd_up() && reap_work->func == NULL) { | 926 | if (keventd_up() && reap_work->work.func == NULL) { |
927 | init_reap_node(cpu); | 927 | init_reap_node(cpu); |
928 | INIT_WORK(reap_work, cache_reap, NULL); | 928 | INIT_DELAYED_WORK(reap_work, cache_reap); |
929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); | 929 | schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); |
930 | } | 930 | } |
931 | } | 931 | } |
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, | |||
3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try | 3815 | * If we cannot acquire the cache chain mutex then just give up - we'll try |
3816 | * again on the next iteration. | 3816 | * again on the next iteration. |
3817 | */ | 3817 | */ |
3818 | static void cache_reap(void *unused) | 3818 | static void cache_reap(struct work_struct *unused) |
3819 | { | 3819 | { |
3820 | struct kmem_cache *searchp; | 3820 | struct kmem_cache *searchp; |
3821 | struct kmem_list3 *l3; | 3821 | struct kmem_list3 *l3; |
@@ -216,7 +216,7 @@ void lru_add_drain(void) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | #ifdef CONFIG_NUMA | 218 | #ifdef CONFIG_NUMA |
219 | static void lru_add_drain_per_cpu(void *dummy) | 219 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
220 | { | 220 | { |
221 | lru_add_drain(); | 221 | lru_add_drain(); |
222 | } | 222 | } |
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy) | |||
226 | */ | 226 | */ |
227 | int lru_add_drain_all(void) | 227 | int lru_add_drain_all(void) |
228 | { | 228 | { |
229 | return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); | 229 | return schedule_on_each_cpu(lru_add_drain_per_cpu); |
230 | } | 230 | } |
231 | 231 | ||
232 | #else | 232 | #else |