aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 04:24:15 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:51 -0400
commit0889eba5b38f66d7d892a167d88284daddd3d43b (patch)
treeadb578bb92cc18dbf842a9bde67e3861db030557 /arch/x86
parent29c71111d0557385328211b130246a90f9223b46 (diff)
x86_64: SPARSEMEM_VMEMMAP 2M page size support
x86_64 uses 2M page table entries to map its 1-1 kernel space. We also implement the virtual memmap using 2M page table entries. So there is no additional runtime overhead over FLATMEM, initialisation is slightly more complex. As FLATMEM still references memory to obtain the mem_map pointer and SPARSEMEM_VMEMMAP uses a compile time constant, SPARSEMEM_VMEMMAP should be superior. With this SPARSEMEM becomes the most efficient way of handling virt_to_page, pfn_to_page and friends for UP, SMP and NUMA on x86_64. [apw@shadowen.org: code resplit, style fixups] [apw@shadowen.org: vmemmap x86_64: ensure end of section memmap is initialised] Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Cc: Andi Kleen <ak@suse.de> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/init_64.c45
1 files changed, 45 insertions, 0 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 458893b376f8..7d4fc633a9c9 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -748,3 +748,48 @@ const char *arch_vma_name(struct vm_area_struct *vma)
748 return "[vsyscall]"; 748 return "[vsyscall]";
749 return NULL; 749 return NULL;
750} 750}
751
752#ifdef CONFIG_SPARSEMEM_VMEMMAP
753/*
754 * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
755 */
756int __meminit vmemmap_populate(struct page *start_page,
757 unsigned long size, int node)
758{
759 unsigned long addr = (unsigned long)start_page;
760 unsigned long end = (unsigned long)(start_page + size);
761 unsigned long next;
762 pgd_t *pgd;
763 pud_t *pud;
764 pmd_t *pmd;
765
766 for (; addr < end; addr = next) {
767 next = pmd_addr_end(addr, end);
768
769 pgd = vmemmap_pgd_populate(addr, node);
770 if (!pgd)
771 return -ENOMEM;
772 pud = vmemmap_pud_populate(pgd, addr, node);
773 if (!pud)
774 return -ENOMEM;
775
776 pmd = pmd_offset(pud, addr);
777 if (pmd_none(*pmd)) {
778 pte_t entry;
779 void *p = vmemmap_alloc_block(PMD_SIZE, node);
780 if (!p)
781 return -ENOMEM;
782
783 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
784 mk_pte_huge(entry);
785 set_pmd(pmd, __pmd(pte_val(entry)));
786
787 printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
788 addr, addr + PMD_SIZE - 1, p, node);
789 } else
790 vmemmap_verify((pte_t *)pmd, node, addr, next);
791 }
792
793 return 0;
794}
795#endif