aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2014-01-21 18:49:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-21 19:19:44 -0500
commitaec6a8889a98a0cd58357cd0937a25189908f191 (patch)
tree4c0bd0fa7a3d9e6edc0f863782e1d5807ebefb12 /arch/parisc/mm
parentece86e222db48d04bda218a2be70e384518bb08c (diff)
mm, show_mem: remove SHOW_MEM_FILTER_PAGE_COUNT
Commit 4b59e6c47309 ("mm, show_mem: suppress page counts in non-blockable contexts") introduced SHOW_MEM_FILTER_PAGE_COUNT to suppress PFN walks on large memory machines. Commit c78e93630d15 ("mm: do not walk all of system memory during show_mem") avoided a PFN walk in the generic show_mem helper which removes the requirement for SHOW_MEM_FILTER_PAGE_COUNT in that case. This patch removes PFN walkers from the arch-specific implementations that report on a per-node or per-zone granularity. ARM and unicore32 still do a PFN walk as they report memory usage on each bank which is a much finer granularity where the debugging information may still be of use. As the remaining arches doing PFN walks have relatively small amounts of memory, this patch simply removes SHOW_MEM_FILTER_PAGE_COUNT. [akpm@linux-foundation.org: fix parisc] Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: David Rientjes <rientjes@google.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: James Bottomley <jejb@parisc-linux.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/parisc/mm')
-rw-r--r--arch/parisc/mm/init.c59
1 files changed, 17 insertions, 42 deletions
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 96f8168cf4ec..ae085ad0fba0 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -645,55 +645,30 @@ EXPORT_SYMBOL(empty_zero_page);
645 645
646void show_mem(unsigned int filter) 646void show_mem(unsigned int filter)
647{ 647{
648 int i,free = 0,total = 0,reserved = 0; 648 int total = 0,reserved = 0;
649 int shared = 0, cached = 0; 649 pg_data_t *pgdat;
650 650
651 printk(KERN_INFO "Mem-info:\n"); 651 printk(KERN_INFO "Mem-info:\n");
652 show_free_areas(filter); 652 show_free_areas(filter);
653 if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
654 return;
655#ifndef CONFIG_DISCONTIGMEM
656 i = max_mapnr;
657 while (i-- > 0) {
658 total++;
659 if (PageReserved(mem_map+i))
660 reserved++;
661 else if (PageSwapCache(mem_map+i))
662 cached++;
663 else if (!page_count(&mem_map[i]))
664 free++;
665 else
666 shared += page_count(&mem_map[i]) - 1;
667 }
668#else
669 for (i = 0; i < npmem_ranges; i++) {
670 int j;
671 653
672 for (j = node_start_pfn(i); j < node_end_pfn(i); j++) { 654 for_each_online_pgdat(pgdat) {
673 struct page *p; 655 unsigned long flags;
674 unsigned long flags; 656 int zoneid;
675 657
676 pgdat_resize_lock(NODE_DATA(i), &flags); 658 pgdat_resize_lock(pgdat, &flags);
677 p = nid_page_nr(i, j) - node_start_pfn(i); 659 for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
678 660 struct zone *zone = &pgdat->node_zones[zoneid];
679 total++; 661 if (!populated_zone(zone))
680 if (PageReserved(p)) 662 continue;
681 reserved++; 663
682 else if (PageSwapCache(p)) 664 total += zone->present_pages;
683 cached++; 665 reserved = zone->present_pages - zone->managed_pages;
684 else if (!page_count(p)) 666 }
685 free++; 667 pgdat_resize_unlock(pgdat, &flags);
686 else
687 shared += page_count(p) - 1;
688 pgdat_resize_unlock(NODE_DATA(i), &flags);
689 }
690 } 668 }
691#endif 669
692 printk(KERN_INFO "%d pages of RAM\n", total); 670 printk(KERN_INFO "%d pages of RAM\n", total);
693 printk(KERN_INFO "%d reserved pages\n", reserved); 671 printk(KERN_INFO "%d reserved pages\n", reserved);
694 printk(KERN_INFO "%d pages shared\n", shared);
695 printk(KERN_INFO "%d pages swap cached\n", cached);
696
697 672
698#ifdef CONFIG_DISCONTIGMEM 673#ifdef CONFIG_DISCONTIGMEM
699 { 674 {