aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/page_alloc.c79
2 files changed, 44 insertions, 41 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9512a544d044..7469c503580d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -481,8 +481,6 @@ check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
481 return offlined; 481 return offlined;
482} 482}
483 483
484extern void drain_all_local_pages(void);
485
486int offline_pages(unsigned long start_pfn, 484int offline_pages(unsigned long start_pfn,
487 unsigned long end_pfn, unsigned long timeout) 485 unsigned long end_pfn, unsigned long timeout)
488{ 486{
@@ -540,7 +538,7 @@ repeat:
540 lru_add_drain_all(); 538 lru_add_drain_all();
541 flush_scheduled_work(); 539 flush_scheduled_work();
542 cond_resched(); 540 cond_resched();
543 drain_all_local_pages(); 541 drain_all_pages();
544 } 542 }
545 543
546 pfn = scan_lru_pages(start_pfn, end_pfn); 544 pfn = scan_lru_pages(start_pfn, end_pfn);
@@ -563,7 +561,7 @@ repeat:
563 flush_scheduled_work(); 561 flush_scheduled_work();
564 yield(); 562 yield();
565 /* drain pcp pages , this is synchrouns. */ 563 /* drain pcp pages , this is synchrouns. */
566 drain_all_local_pages(); 564 drain_all_pages();
567 /* check again */ 565 /* check again */
568 offlined_pages = check_pages_isolated(start_pfn, end_pfn); 566 offlined_pages = check_pages_isolated(start_pfn, end_pfn);
569 if (offlined_pages < 0) { 567 if (offlined_pages < 0) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b2838c24e582..5c7de8e959fc 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -890,7 +890,14 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
890} 890}
891#endif 891#endif
892 892
893static void __drain_pages(unsigned int cpu) 893/*
894 * Drain pages of the indicated processor.
895 *
896 * The processor must either be the current processor and the
897 * thread pinned to the current processor or a processor that
898 * is not online.
899 */
900static void drain_pages(unsigned int cpu)
894{ 901{
895 unsigned long flags; 902 unsigned long flags;
896 struct zone *zone; 903 struct zone *zone;
@@ -915,6 +922,22 @@ static void __drain_pages(unsigned int cpu)
915 } 922 }
916} 923}
917 924
925/*
926 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
927 */
928void drain_local_pages(void *arg)
929{
930 drain_pages(smp_processor_id());
931}
932
933/*
934 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
935 */
936void drain_all_pages(void)
937{
938 on_each_cpu(drain_local_pages, NULL, 0, 1);
939}
940
918#ifdef CONFIG_HIBERNATION 941#ifdef CONFIG_HIBERNATION
919 942
920void mark_free_pages(struct zone *zone) 943void mark_free_pages(struct zone *zone)
@@ -952,37 +975,6 @@ void mark_free_pages(struct zone *zone)
952#endif /* CONFIG_PM */ 975#endif /* CONFIG_PM */
953 976
954/* 977/*
955 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
956 */
957void drain_local_pages(void)
958{
959 unsigned long flags;
960
961 local_irq_save(flags);
962 __drain_pages(smp_processor_id());
963 local_irq_restore(flags);
964}
965
966void smp_drain_local_pages(void *arg)
967{
968 drain_local_pages();
969}
970
971/*
972 * Spill all the per-cpu pages from all CPUs back into the buddy allocator
973 */
974void drain_all_local_pages(void)
975{
976 unsigned long flags;
977
978 local_irq_save(flags);
979 __drain_pages(smp_processor_id());
980 local_irq_restore(flags);
981
982 smp_call_function(smp_drain_local_pages, NULL, 0, 1);
983}
984
985/*
986 * Free a 0-order page 978 * Free a 0-order page
987 */ 979 */
988static void fastcall free_hot_cold_page(struct page *page, int cold) 980static void fastcall free_hot_cold_page(struct page *page, int cold)
@@ -1569,7 +1561,7 @@ nofail_alloc:
1569 cond_resched(); 1561 cond_resched();
1570 1562
1571 if (order != 0) 1563 if (order != 0)
1572 drain_all_local_pages(); 1564 drain_all_pages();
1573 1565
1574 if (likely(did_some_progress)) { 1566 if (likely(did_some_progress)) {
1575 page = get_page_from_freelist(gfp_mask, order, 1567 page = get_page_from_freelist(gfp_mask, order,
@@ -3978,10 +3970,23 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
3978 int cpu = (unsigned long)hcpu; 3970 int cpu = (unsigned long)hcpu;
3979 3971
3980 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { 3972 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
3981 local_irq_disable(); 3973 drain_pages(cpu);
3982 __drain_pages(cpu); 3974
3975 /*
3976 * Spill the event counters of the dead processor
3977 * into the current processors event counters.
3978 * This artificially elevates the count of the current
3979 * processor.
3980 */
3983 vm_events_fold_cpu(cpu); 3981 vm_events_fold_cpu(cpu);
3984 local_irq_enable(); 3982
3983 /*
3984 * Zero the differential counters of the dead processor
3985 * so that the vm statistics are consistent.
3986 *
3987 * This is only okay since the processor is dead and cannot
3988 * race with what we are doing.
3989 */
3985 refresh_cpu_vm_stats(cpu); 3990 refresh_cpu_vm_stats(cpu);
3986 } 3991 }
3987 return NOTIFY_OK; 3992 return NOTIFY_OK;
@@ -4480,7 +4485,7 @@ int set_migratetype_isolate(struct page *page)
4480out: 4485out:
4481 spin_unlock_irqrestore(&zone->lock, flags); 4486 spin_unlock_irqrestore(&zone->lock, flags);
4482 if (!ret) 4487 if (!ret)
4483 drain_all_local_pages(); 4488 drain_all_pages();
4484 return ret; 4489 return ret;
4485} 4490}
4486 4491