aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:29:11 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:17 -0500
commit9f8f2172537de7af0b0fbd33502d18d52b1339bc (patch)
tree273c86583ed0295059c5526d3bd6927520a20add
parente2848a0efedef4dad52d1334d37f8719cd6268fd (diff)
Page allocator: clean up pcp draining functions
- Add comments explaing how drain_pages() works. - Eliminate useless functions - Rename drain_all_local_pages to drain_all_pages(). It does drain all pages not only those of the local processor. - Eliminate useless interrupt off / on sequences. drain_pages() disables interrupts on its own. The execution thread is pinned to processor by the caller. So there is no need to disable interrupts. - Put drain_all_pages() declaration in gfp.h and remove the declarations from suspend.h and from mm/memory_hotplug.c - Make software suspend call drain_all_pages(). The draining of processor local pages is may not the right approach if software suspend wants to support SMP. If they call drain_all_pages then we can make drain_pages() static. [akpm@linux-foundation.org: fix build] Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Daniel Walker <dwalker@mvista.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/suspend.h1
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c79
5 files changed, 48 insertions, 44 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 7e93a9ae7064..0c6ce515185d 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -228,5 +228,7 @@ extern void FASTCALL(free_cold_page(struct page *page));
228 228
229void page_alloc_init(void); 229void page_alloc_init(void);
230void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); 230void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
231void drain_all_pages(void);
232void drain_local_pages(void *dummy);
231 233
232#endif /* __LINUX_GFP_H */ 234#endif /* __LINUX_GFP_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 646ce2d068d4..1d7d4c5797ee 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -130,7 +130,6 @@ struct pbe {
130}; 130};
131 131
132/* mm/page_alloc.c */ 132/* mm/page_alloc.c */
133extern void drain_local_pages(void);
134extern void mark_free_pages(struct zone *zone); 133extern void mark_free_pages(struct zone *zone);
135 134
136/** 135/**
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f6a5df934f8d..95250d7c8d91 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
1203 1203
1204 printk(KERN_INFO "PM: Creating hibernation image: \n"); 1204 printk(KERN_INFO "PM: Creating hibernation image: \n");
1205 1205
1206 drain_local_pages(); 1206 drain_local_pages(NULL);
1207 nr_pages = count_data_pages(); 1207 nr_pages = count_data_pages();
1208 nr_highmem = count_highmem_pages(); 1208 nr_highmem = count_highmem_pages();
1209 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem); 1209 printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
1221 /* During allocating of suspend pagedir, new cold pages may appear. 1221 /* During allocating of suspend pagedir, new cold pages may appear.
1222 * Kill them. 1222 * Kill them.
1223 */ 1223 */
1224 drain_local_pages(); 1224 drain_local_pages(NULL);
1225 copy_data_pages(&copy_bm, &orig_bm); 1225 copy_data_pages(&copy_bm, &orig_bm);
1226 1226
1227 /* 1227 /*
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9512a544d044..7469c503580d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
481 return offlined; 481 return offlined;
482} 482}
483 483
484extern void drain_all_local_pages(void);
485
486int offline_pages(unsigned long start_pfn, 484int offline_pages(unsigned long start_pfn,
487 unsigned long end_pfn, unsigned long timeout) 485 unsigned long end_pfn, unsigned long timeout)
488{ 486{
@@ -540,7 +538,7 @@ repeat:
540 lru_add_drain_all(); 538 lru_add_drain_all();
541 flush_scheduled_work(); 539 flush_scheduled_work();
542 cond_resched(); 540 cond_resched();
543 drain_all_local_pages(); 541 drain_all_pages();
544 } 542 }
545 543
546 pfn = scan_lru_pages(start_pfn, end_pfn); 544 pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -563,7 +561,7 @@ repeat:
563 flush_scheduled_work(); 561 flush_scheduled_work();
564 yield(); 562 yield();
565 /* drain pcp pages , this is synchrouns. */ 563 /* drain pcp pages , this is synchrouns. */
566 drain_all_local_pages(); 564 drain_all_pages();
567 /* check again */ 565 /* check again */
568 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 566 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
569 if (offlined_pages < 0) { 567 if (offlined_pages < 0) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2838c24e582..5c7de8e959fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
890} 890}
891#endif 891#endif
892 892
893static void __drain_pages(unsigned int cpu) 893/*
894 * Drain pages of the indicated processor.
895 *
896 * The processor must either be the current processor and the
897 * thread pinned to the current processor or a processor that
898 * is not online.
899 */
900static void drain_pages(unsigned int cpu)
894{ 901{
895 unsigned long flags; 902 unsigned long flags;
896 struct zone *zone; 903 struct zone *zone;
@@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
915 } 922 }
916} 923}
917 924
925/*
926 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
927 */
928void drain_local_pages(void *arg)
929{
930 drain_pages(smp_processor_id());
931}
932
933/*
934 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
935 */
936void drain_all_pages(void)
937{
938 on_each_cpu(drain_local_pages, NULL, 0, 1);
939}
940
918#ifdef CONFIG_HIBERNATION 941#ifdef CONFIG_HIBERNATION
919 942
920void mark_free_pages(struct zone *zone) 943void mark_free_pages(struct zone *zone)
@@ -952,37 +975,6 @@ void mark_free_pages(struct zone *zone)
952#endif /* CONFIG_PM */ 975#endif /* CONFIG_PM */
953 976
954/* 977/*
955 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
956 */
957void drain_local_pages(void)
958{
959 unsigned long flags;
960
961 local_irq_save(flags);
962 __drain_pages(smp_processor_id());
963 local_irq_restore(flags);
964}
965
966void smp_drain_local_pages(void *arg)
967{
968 drain_local_pages();
969}
970
971/*
972 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
973 */
974void drain_all_local_pages(void)
975{
976 unsigned long flags;
977
978 local_irq_save(flags);
979 __drain_pages(smp_processor_id());
980 local_irq_restore(flags);
981
982 smp_call_function(smp_drain_local_pages, NULL, 0, 1);
983}
984
985/*
986 * Free a 0-order page 978 * Free a 0-order page
987 */ 979 */
988static void fastcall free_hot_cold_page(struct page *page, int cold) 980static void fastcall free_hot_cold_page(struct page *page, int cold)
@@ -1569,7 +1561,7 @@ nofail_alloc:
1569 cond_resched(); 1561 cond_resched();
1570 1562
1571 if (order != 0) 1563 if (order != 0)
1572 drain_all_local_pages(); 1564 drain_all_pages();
1573 1565
1574 if (likely(did_some_progress)) { 1566 if (likely(did_some_progress)) {
1575 page = get_page_from_freelist(gfp_mask, order, 1567 page = get_page_from_freelist(gfp_mask, order,
@@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3978 int cpu = (unsigned long)hcpu; 3970 int cpu = (unsigned long)hcpu;
3979 3971
3980 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 3972 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3981 local_irq_disable(); 3973 drain_pages(cpu);
3982 __drain_pages(cpu); 3974
3975 /*
3976 * Spill the event counters of the dead processor
3977 * into the current processors event counters.
3978 * This artificially elevates the count of the current
3979 * processor.
3980 */
3983 vm_events_fold_cpu(cpu); 3981 vm_events_fold_cpu(cpu);
3984 local_irq_enable(); 3982
3983 /*
3984 * Zero the differential counters of the dead processor
3985 * so that the vm statistics are consistent.
3986 *
3987 * This is only okay since the processor is dead and cannot
3988 * race with what we are doing.
3989 */
3985 refresh_cpu_vm_stats(cpu); 3990 refresh_cpu_vm_stats(cpu);
3986 } 3991 }
3987 return NOTIFY_OK; 3992 return NOTIFY_OK;
@@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
4480out: 4485out:
4481 spin_unlock_irqrestore(&zone->lock, flags); 4486 spin_unlock_irqrestore(&zone->lock, flags);
4482 if (!ret) 4487 if (!ret)
4483 drain_all_local_pages(); 4488 drain_all_pages();
4484 return ret; 4489 return ret;
4485} 4490}
4486 4491