aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
authorRobin Holt <holt@sgi.com>2006-04-13 18:34:45 -0400
committerTony Luck <tony.luck@intel.com>2006-04-13 18:34:45 -0400
commitace1d816a13ff42d4f41989862552032f9c19853 (patch)
tree084277670a76f9a50449e82b308ecc7b881fd5ac /arch/ia64/mm
parent356a5c1c6fdfb8eed6dbb3979d90c7cc7060017a (diff)
[IA64] Make show_mem() skip holes in a pgdat
This patch modifies ia64's show_mem() to walk the vmem_map page tables and rapidly skip forward across regions where the page tables are missing. This prevents the pfn_valid() check from causing numerous unnecessary page faults. Without this patch on a 512 node 512 cpu system where every node has four memory holes, the show_mem() call takes 1 hour 18 minutes. With this patch, it takes less than 3 seconds. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/discontig.c66
1 files changed, 65 insertions, 1 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index ec9eeb89975d..b6bcc9fa3603 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -519,6 +519,68 @@ void __cpuinit *per_cpu_init(void)
519} 519}
520#endif /* CONFIG_SMP */ 520#endif /* CONFIG_SMP */
521 521
522#ifdef CONFIG_VIRTUAL_MEM_MAP
523static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
524{
525 unsigned long end_address, hole_next_pfn;
526 unsigned long stop_address;
527
528 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
529 end_address = PAGE_ALIGN(end_address);
530
531 stop_address = (unsigned long) &vmem_map[
532 pgdat->node_start_pfn + pgdat->node_spanned_pages];
533
534 do {
535 pgd_t *pgd;
536 pud_t *pud;
537 pmd_t *pmd;
538 pte_t *pte;
539
540 pgd = pgd_offset_k(end_address);
541 if (pgd_none(*pgd)) {
542 end_address += PGDIR_SIZE;
543 continue;
544 }
545
546 pud = pud_offset(pgd, end_address);
547 if (pud_none(*pud)) {
548 end_address += PUD_SIZE;
549 continue;
550 }
551
552 pmd = pmd_offset(pud, end_address);
553 if (pmd_none(*pmd)) {
554 end_address += PMD_SIZE;
555 continue;
556 }
557
558 pte = pte_offset_kernel(pmd, end_address);
559retry_pte:
560 if (pte_none(*pte)) {
561 end_address += PAGE_SIZE;
562 pte++;
563 if ((end_address < stop_address) &&
564 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
565 goto retry_pte;
566 continue;
567 }
568 /* Found next valid vmem_map page */
569 break;
570 } while (end_address < stop_address);
571
572 end_address = min(end_address, stop_address);
573 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
574 hole_next_pfn = end_address / sizeof(struct page);
575 return hole_next_pfn - pgdat->node_start_pfn;
576}
577#else
578static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
579{
580 return i + 1;
581}
582#endif
583
522/** 584/**
523 * show_mem - give short summary of memory stats 585 * show_mem - give short summary of memory stats
524 * 586 *
@@ -547,8 +609,10 @@ void show_mem(void)
547 struct page *page; 609 struct page *page;
548 if (pfn_valid(pgdat->node_start_pfn + i)) 610 if (pfn_valid(pgdat->node_start_pfn + i))
549 page = pfn_to_page(pgdat->node_start_pfn + i); 611 page = pfn_to_page(pgdat->node_start_pfn + i);
550 else 612 else {
613 i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
551 continue; 614 continue;
615 }
552 if (PageReserved(page)) 616 if (PageReserved(page))
553 reserved++; 617 reserved++;
554 else if (PageSwapCache(page)) 618 else if (PageSwapCache(page))