aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChris Metcalf <cmetcalf@tilera.com>2013-09-12 18:13:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:02 -0400
commit5fbc461636c32efdb9d5216d491d37a40d54535b (patch)
tree119599fe279ba3daf94422d54cfc7bd2a5ae4a80 /mm
parent9cb2dc1c950cf0624202c1ea2705705e1e51c278 (diff)
mm: make lru_add_drain_all() selective
make lru_add_drain_all() only selectively interrupt the cpus that have per-cpu free pages that can be drained. This is important in nohz mode where calling mlockall(), for example, otherwise will interrupt every core unnecessarily. This is important on workloads where nohz cores are handling 10 Gb traffic in userspace. Those CPUs do not enter the kernel and place pages into LRU pagevecs and they really, really don't want to be interrupted, or they drop packets on the floor. Signed-off-by: Chris Metcalf <cmetcalf@tilera.com> Reviewed-by: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swap.c44
1 files changed, 39 insertions, 5 deletions
diff --git a/mm/swap.c b/mm/swap.c
index c899502d3e36..759c3caf44bd 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -432,6 +432,11 @@ static void activate_page_drain(int cpu)
432 pagevec_lru_move_fn(pvec, __activate_page, NULL); 432 pagevec_lru_move_fn(pvec, __activate_page, NULL);
433} 433}
434 434
435static bool need_activate_page_drain(int cpu)
436{
437 return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0;
438}
439
435void activate_page(struct page *page) 440void activate_page(struct page *page)
436{ 441{
437 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 442 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -449,6 +454,11 @@ static inline void activate_page_drain(int cpu)
449{ 454{
450} 455}
451 456
457static bool need_activate_page_drain(int cpu)
458{
459 return false;
460}
461
452void activate_page(struct page *page) 462void activate_page(struct page *page)
453{ 463{
454 struct zone *zone = page_zone(page); 464 struct zone *zone = page_zone(page);
@@ -701,12 +711,36 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
701 lru_add_drain(); 711 lru_add_drain();
702} 712}
703 713
704/* 714static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
705 * Returns 0 for success 715
706 */ 716void lru_add_drain_all(void)
707int lru_add_drain_all(void)
708{ 717{
709 return schedule_on_each_cpu(lru_add_drain_per_cpu); 718 static DEFINE_MUTEX(lock);
719 static struct cpumask has_work;
720 int cpu;
721
722 mutex_lock(&lock);
723 get_online_cpus();
724 cpumask_clear(&has_work);
725
726 for_each_online_cpu(cpu) {
727 struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
728
729 if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) ||
730 pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) ||
731 pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
732 need_activate_page_drain(cpu)) {
733 INIT_WORK(work, lru_add_drain_per_cpu);
734 schedule_work_on(cpu, work);
735 cpumask_set_cpu(cpu, &has_work);
736 }
737 }
738
739 for_each_cpu(cpu, &has_work)
740 flush_work(&per_cpu(lru_add_drain_work, cpu));
741
742 put_online_cpus();
743 mutex_unlock(&lock);
710} 744}
711 745
712/* 746/*