aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-06 11:01:37 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-06 11:01:37 -0500
commitdd8856bda5f1308beb113281b248683992998a9e (patch)
tree5dc35290cdbca32cbdecd93a76fa5b29075ac18c /mm
parentf81cff0d4067e41fd7383d9c013cc82da7c169d2 (diff)
parent06328b4f7919e9d2169d45cadc5a37b828a78eda (diff)
Merge git://git.infradead.org/users/dhowells/workq-2.6
* git://git.infradead.org/users/dhowells/workq-2.6: Actually update the fixed up compile failures. WorkQueue: Fix up arch-specific work items where possible WorkStruct: make allyesconfig WorkStruct: Pass the work_struct pointer instead of context data WorkStruct: Merge the pending bit into the wq_data pointer WorkStruct: Typedef the work function prototype WorkStruct: Separate delayable and non-delayable events.
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c12
-rw-r--r--mm/swap.c4
2 files changed, 8 insertions, 8 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3c4a7e34edd..5de81473df3 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -313,7 +313,7 @@ static int drain_freelist(struct kmem_cache *cache,
313static void free_block(struct kmem_cache *cachep, void **objpp, int len, 313static void free_block(struct kmem_cache *cachep, void **objpp, int len,
314 int node); 314 int node);
315static int enable_cpucache(struct kmem_cache *cachep); 315static int enable_cpucache(struct kmem_cache *cachep);
316static void cache_reap(void *unused); 316static void cache_reap(struct work_struct *unused);
317 317
318/* 318/*
319 * This function must be completely optimized away if a constant is passed to 319 * This function must be completely optimized away if a constant is passed to
@@ -753,7 +753,7 @@ int slab_is_available(void)
753 return g_cpucache_up == FULL; 753 return g_cpucache_up == FULL;
754} 754}
755 755
756static DEFINE_PER_CPU(struct work_struct, reap_work); 756static DEFINE_PER_CPU(struct delayed_work, reap_work);
757 757
758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 758static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
759{ 759{
@@ -916,16 +916,16 @@ static void next_reap_node(void)
916 */ 916 */
917static void __devinit start_cpu_timer(int cpu) 917static void __devinit start_cpu_timer(int cpu)
918{ 918{
919 struct work_struct *reap_work = &per_cpu(reap_work, cpu); 919 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
920 920
921 /* 921 /*
922 * When this gets called from do_initcalls via cpucache_init(), 922 * When this gets called from do_initcalls via cpucache_init(),
923 * init_workqueues() has already run, so keventd will be setup 923 * init_workqueues() has already run, so keventd will be setup
924 * at that time. 924 * at that time.
925 */ 925 */
926 if (keventd_up() && reap_work->func == NULL) { 926 if (keventd_up() && reap_work->work.func == NULL) {
927 init_reap_node(cpu); 927 init_reap_node(cpu);
928 INIT_WORK(reap_work, cache_reap, NULL); 928 INIT_DELAYED_WORK(reap_work, cache_reap);
929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu); 929 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
930 } 930 }
931} 931}
@@ -3815,7 +3815,7 @@ void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3815 * If we cannot acquire the cache chain mutex then just give up - we'll try 3815 * If we cannot acquire the cache chain mutex then just give up - we'll try
3816 * again on the next iteration. 3816 * again on the next iteration.
3817 */ 3817 */
3818static void cache_reap(void *unused) 3818static void cache_reap(struct work_struct *unused)
3819{ 3819{
3820 struct kmem_cache *searchp; 3820 struct kmem_cache *searchp;
3821 struct kmem_list3 *l3; 3821 struct kmem_list3 *l3;
diff --git a/mm/swap.c b/mm/swap.c
index 2e0e871f542..d9a3770d8f3 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -216,7 +216,7 @@ void lru_add_drain(void)
216} 216}
217 217
218#ifdef CONFIG_NUMA 218#ifdef CONFIG_NUMA
219static void lru_add_drain_per_cpu(void *dummy) 219static void lru_add_drain_per_cpu(struct work_struct *dummy)
220{ 220{
221 lru_add_drain(); 221 lru_add_drain();
222} 222}
@@ -226,7 +226,7 @@ static void lru_add_drain_per_cpu(void *dummy)
226 */ 226 */
227int lru_add_drain_all(void) 227int lru_add_drain_all(void)
228{ 228{
229 return schedule_on_each_cpu(lru_add_drain_per_cpu, NULL); 229 return schedule_on_each_cpu(lru_add_drain_per_cpu);
230} 230}
231 231
232#else 232#else