aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2008-02-05 01:29:11 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-05 12:44:17 -0500
commit9f8f2172537de7af0b0fbd33502d18d52b1339bc (patch)
tree273c86583ed0295059c5526d3bd6927520a20add /mm/page_alloc.c
parente2848a0efedef4dad52d1334d37f8719cd6268fd (diff)
Page allocator: clean up pcp draining functions
- Add comments explaing how drain_pages() works. - Eliminate useless functions - Rename drain_all_local_pages to drain_all_pages(). It does drain all pages not only those of the local processor. - Eliminate useless interrupt off / on sequences. drain_pages() disables interrupts on its own. The execution thread is pinned to processor by the caller. So there is no need to disable interrupts. - Put drain_all_pages() declaration in gfp.h and remove the declarations from suspend.h and from mm/memory_hotplug.c - Make software suspend call drain_all_pages(). The draining of processor local pages is may not the right approach if software suspend wants to support SMP. If they call drain_all_pages then we can make drain_pages() static. [akpm@linux-foundation.org: fix build] Signed-off-by: Christoph Lameter <clameter@sgi.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Daniel Walker <dwalker@mvista.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c79
1 files changed, 42 insertions, 37 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2838c24e582..5c7de8e959fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
890} 890}
891#endif 891#endif
892 892
893static void __drain_pages(unsigned int cpu) 893/*
894 * Drain pages of the indicated processor.
895 *
896 * The processor must either be the current processor and the
897 * thread pinned to the current processor or a processor that
898 * is not online.
899 */
900static void drain_pages(unsigned int cpu)
894{ 901{
895 unsigned long flags; 902 unsigned long flags;
896 struct zone *zone; 903 struct zone *zone;
@@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
915 } 922 }
916} 923}
917 924
925/*
926 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
927 */
928void drain_local_pages(void *arg)
929{
930 drain_pages(smp_processor_id());
931}
932
933/*
934 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
935 */
936void drain_all_pages(void)
937{
938 on_each_cpu(drain_local_pages, NULL, 0, 1);
939}
940
918#ifdef CONFIG_HIBERNATION 941#ifdef CONFIG_HIBERNATION
919 942
920void mark_free_pages(struct zone *zone) 943void mark_free_pages(struct zone *zone)
@@ -952,37 +975,6 @@ void mark_free_pages(struct zone *zone)
952#endif /* CONFIG_PM */ 975#endif /* CONFIG_PM */
953 976
954/* 977/*
955 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
956 */
957void drain_local_pages(void)
958{
959 unsigned long flags;
960
961 local_irq_save(flags);
962 __drain_pages(smp_processor_id());
963 local_irq_restore(flags);
964}
965
966void smp_drain_local_pages(void *arg)
967{
968 drain_local_pages();
969}
970
971/*
972 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
973 */
974void drain_all_local_pages(void)
975{
976 unsigned long flags;
977
978 local_irq_save(flags);
979 __drain_pages(smp_processor_id());
980 local_irq_restore(flags);
981
982 smp_call_function(smp_drain_local_pages, NULL, 0, 1);
983}
984
985/*
986 * Free a 0-order page 978 * Free a 0-order page
987 */ 979 */
988static void fastcall free_hot_cold_page(struct page *page, int cold) 980static void fastcall free_hot_cold_page(struct page *page, int cold)
@@ -1569,7 +1561,7 @@ nofail_alloc:
1569 cond_resched(); 1561 cond_resched();
1570 1562
1571 if (order != 0) 1563 if (order != 0)
1572 drain_all_local_pages(); 1564 drain_all_pages();
1573 1565
1574 if (likely(did_some_progress)) { 1566 if (likely(did_some_progress)) {
1575 page = get_page_from_freelist(gfp_mask, order, 1567 page = get_page_from_freelist(gfp_mask, order,
@@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3978 int cpu = (unsigned long)hcpu; 3970 int cpu = (unsigned long)hcpu;
3979 3971
3980 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 3972 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3981 local_irq_disable(); 3973 drain_pages(cpu);
3982 __drain_pages(cpu); 3974
3975 /*
3976 * Spill the event counters of the dead processor
3977 * into the current processors event counters.
3978 * This artificially elevates the count of the current
3979 * processor.
3980 */
3983 vm_events_fold_cpu(cpu); 3981 vm_events_fold_cpu(cpu);
3984 local_irq_enable(); 3982
3983 /*
3984 * Zero the differential counters of the dead processor
3985 * so that the vm statistics are consistent.
3986 *
3987 * This is only okay since the processor is dead and cannot
3988 * race with what we are doing.
3989 */
3985 refresh_cpu_vm_stats(cpu); 3990 refresh_cpu_vm_stats(cpu);
3986 } 3991 }
3987 return NOTIFY_OK; 3992 return NOTIFY_OK;
@@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
4480out: 4485out:
4481 spin_unlock_irqrestore(&zone->lock, flags); 4486 spin_unlock_irqrestore(&zone->lock, flags);
4482 if (!ret) 4487 if (!ret)
4483 drain_all_local_pages(); 4488 drain_all_pages();
4484 return ret; 4489 return ret;
4485} 4490}
4486 4491