aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-01-06 03:11:14 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:28 -0500
commit80bfed904c690642db9d4178950735299160950b (patch)
tree5aca96e02dfebf4665a9f1c7e1d3fa58d8009c09
parent210fe530305ee50cd889fe9250168228b2994f32 (diff)
[PATCH] consolidate lru_add_drain() and lru_drain_cache()
Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Rajesh Shah <rajesh.shah@intel.com> Cc: Li Shaohua <shaohua.li@intel.com> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/swap.c27
1 files changed, 11 insertions, 16 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 73d351439ef6..ee6d71ccfa56 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -156,16 +156,22 @@ void fastcall lru_cache_add_active(struct page *page)
156 put_cpu_var(lru_add_active_pvecs); 156 put_cpu_var(lru_add_active_pvecs);
157} 157}
158 158
159void lru_add_drain(void) 159static void __lru_add_drain(int cpu)
160{ 160{
161 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); 161 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
162 162
163 /* CPU is dead, so no locking needed. */
163 if (pagevec_count(pvec)) 164 if (pagevec_count(pvec))
164 __pagevec_lru_add(pvec); 165 __pagevec_lru_add(pvec);
165 pvec = &__get_cpu_var(lru_add_active_pvecs); 166 pvec = &per_cpu(lru_add_active_pvecs, cpu);
166 if (pagevec_count(pvec)) 167 if (pagevec_count(pvec))
167 __pagevec_lru_add_active(pvec); 168 __pagevec_lru_add_active(pvec);
168 put_cpu_var(lru_add_pvecs); 169}
170
171void lru_add_drain(void)
172{
173 __lru_add_drain(get_cpu());
174 put_cpu();
169} 175}
170 176
171/* 177/*
@@ -412,17 +418,6 @@ void vm_acct_memory(long pages)
412} 418}
413 419
414#ifdef CONFIG_HOTPLUG_CPU 420#ifdef CONFIG_HOTPLUG_CPU
415static void lru_drain_cache(unsigned int cpu)
416{
417 struct pagevec *pvec = &per_cpu(lru_add_pvecs, cpu);
418
419 /* CPU is dead, so no locking needed. */
420 if (pagevec_count(pvec))
421 __pagevec_lru_add(pvec);
422 pvec = &per_cpu(lru_add_active_pvecs, cpu);
423 if (pagevec_count(pvec))
424 __pagevec_lru_add_active(pvec);
425}
426 421
427/* Drop the CPU's cached committed space back into the central pool. */ 422/* Drop the CPU's cached committed space back into the central pool. */
428static int cpu_swap_callback(struct notifier_block *nfb, 423static int cpu_swap_callback(struct notifier_block *nfb,
@@ -435,7 +430,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
435 if (action == CPU_DEAD) { 430 if (action == CPU_DEAD) {
436 atomic_add(*committed, &vm_committed_space); 431 atomic_add(*committed, &vm_committed_space);
437 *committed = 0; 432 *committed = 0;
438 lru_drain_cache((long)hcpu); 433 __lru_add_drain((long)hcpu);
439 } 434 }
440 return NOTIFY_OK; 435 return NOTIFY_OK;
441} 436}