aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-08-06 19:07:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:20 -0400
commit0d5d823ab4e608ec7b52ac4410de4cb74bbe0edd (patch)
tree7b9d8f7625e45c28ad626c7b5419e8e33c59788e /mm
parent3484b2de9499df23c4604a513b36f96326ae81ad (diff)
mm: move zone->pages_scanned into a vmstat counter
zone->pages_scanned is a write-intensive cache line during page reclaim and it's also updated during page free. Move the counter into vmstat to take advantage of the per-cpu updates and do not update it in the free paths unless necessary. On a small UMA machine running tiobench the difference is marginal. On a 4-node machine the overhead is more noticable. Note that automatic NUMA balancing was disabled for this test as otherwise the system CPU overhead is unpredictable. 3.16.0-rc3 3.16.0-rc3 3.16.0-rc3 vanillarearrange-v5 vmstat-v5 User 746.94 759.78 774.56 System 65336.22 58350.98 32847.27 Elapsed 27553.52 27282.02 27415.04 Note that the overhead reduction will vary depending on where exactly pages are allocated and freed. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c12
-rw-r--r--mm/vmscan.c7
-rw-r--r--mm/vmstat.c3
3 files changed, 15 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index b7381d11f021..daa016063793 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -680,9 +680,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
680 int migratetype = 0; 680 int migratetype = 0;
681 int batch_free = 0; 681 int batch_free = 0;
682 int to_free = count; 682 int to_free = count;
683 unsigned long nr_scanned;
683 684
684 spin_lock(&zone->lock); 685 spin_lock(&zone->lock);
685 zone->pages_scanned = 0; 686 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
687 if (nr_scanned)
688 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
686 689
687 while (to_free) { 690 while (to_free) {
688 struct page *page; 691 struct page *page;
@@ -731,8 +734,11 @@ static void free_one_page(struct zone *zone,
731 unsigned int order, 734 unsigned int order,
732 int migratetype) 735 int migratetype)
733{ 736{
737 unsigned long nr_scanned;
734 spin_lock(&zone->lock); 738 spin_lock(&zone->lock);
735 zone->pages_scanned = 0; 739 nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
740 if (nr_scanned)
741 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
736 742
737 __free_one_page(page, pfn, zone, order, migratetype); 743 __free_one_page(page, pfn, zone, order, migratetype);
738 if (unlikely(!is_migrate_isolate(migratetype))) 744 if (unlikely(!is_migrate_isolate(migratetype)))
@@ -3248,7 +3254,7 @@ void show_free_areas(unsigned int filter)
3248 K(zone_page_state(zone, NR_BOUNCE)), 3254 K(zone_page_state(zone, NR_BOUNCE)),
3249 K(zone_page_state(zone, NR_FREE_CMA_PAGES)), 3255 K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
3250 K(zone_page_state(zone, NR_WRITEBACK_TEMP)), 3256 K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
3251 zone->pages_scanned, 3257 K(zone_page_state(zone, NR_PAGES_SCANNED)),
3252 (!zone_reclaimable(zone) ? "yes" : "no") 3258 (!zone_reclaimable(zone) ? "yes" : "no")
3253 ); 3259 );
3254 printk("lowmem_reserve[]:"); 3260 printk("lowmem_reserve[]:");
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 5fec1ba9951f..9c8222b499b4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -174,7 +174,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
174 174
175bool zone_reclaimable(struct zone *zone) 175bool zone_reclaimable(struct zone *zone)
176{ 176{
177 return zone->pages_scanned < zone_reclaimable_pages(zone) * 6; 177 return zone_page_state(zone, NR_PAGES_SCANNED) <
178 zone_reclaimable_pages(zone) * 6;
178} 179}
179 180
180static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) 181static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
@@ -1508,7 +1509,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1508 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1509 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1509 1510
1510 if (global_reclaim(sc)) { 1511 if (global_reclaim(sc)) {
1511 zone->pages_scanned += nr_scanned; 1512 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1512 if (current_is_kswapd()) 1513 if (current_is_kswapd())
1513 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned); 1514 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1514 else 1515 else
@@ -1698,7 +1699,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1698 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, 1699 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1699 &nr_scanned, sc, isolate_mode, lru); 1700 &nr_scanned, sc, isolate_mode, lru);
1700 if (global_reclaim(sc)) 1701 if (global_reclaim(sc))
1701 zone->pages_scanned += nr_scanned; 1702 __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
1702 1703
1703 reclaim_stat->recent_scanned[file] += nr_taken; 1704 reclaim_stat->recent_scanned[file] += nr_taken;
1704 1705
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 8267f77d1875..e574e883fa70 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -763,6 +763,7 @@ const char * const vmstat_text[] = {
763 "nr_shmem", 763 "nr_shmem",
764 "nr_dirtied", 764 "nr_dirtied",
765 "nr_written", 765 "nr_written",
766 "nr_pages_scanned",
766 767
767#ifdef CONFIG_NUMA 768#ifdef CONFIG_NUMA
768 "numa_hit", 769 "numa_hit",
@@ -1067,7 +1068,7 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
1067 min_wmark_pages(zone), 1068 min_wmark_pages(zone),
1068 low_wmark_pages(zone), 1069 low_wmark_pages(zone),
1069 high_wmark_pages(zone), 1070 high_wmark_pages(zone),
1070 zone->pages_scanned, 1071 zone_page_state(zone, NR_PAGES_SCANNED),
1071 zone->spanned_pages, 1072 zone->spanned_pages,
1072 zone->present_pages, 1073 zone->present_pages,
1073 zone->managed_pages); 1074 zone->managed_pages);