aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-04-29 18:07:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:35 -0400
commit0aad818b2de455f1bfd7ef87c28cdbbaaed9a699 (patch)
treea86fe62f7c740d431f76bd2262abae5825e1a21e /arch/arm64
parent055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0 (diff)
sparse-vmemmap: specify vmemmap population range in bytes
The sparse code, when asking the architecture to populate the vmemmap, specifies the section range as a starting page and a number of pages. This is an awkward interface, because none of the arch-specific code actually thinks of the range in terms of 'struct page' units and always translates it to bytes first. In addition, later patches mix huge page and regular page backing for the vmemmap. For this, they need to call vmemmap_populate_basepages() on sub-section ranges with PAGE_SIZE and PMD_SIZE in mind. But these are not necessarily multiples of the 'struct page' size and so this unit is too coarse. Just translate the section range into bytes once in the generic sparse code, then pass byte ranges down the stack. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: David S. Miller <davem@davemloft.net> Tested-by: David S. Miller <davem@davemloft.net> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/mm/mmu.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 70b8cd4021c4..eeecc9c8ed68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
391} 391}
392#ifdef CONFIG_SPARSEMEM_VMEMMAP 392#ifdef CONFIG_SPARSEMEM_VMEMMAP
393#ifdef CONFIG_ARM64_64K_PAGES 393#ifdef CONFIG_ARM64_64K_PAGES
394int __meminit vmemmap_populate(struct page *start_page, 394int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
395 unsigned long size, int node)
396{ 395{
397 return vmemmap_populate_basepages(start_page, size, node); 396 return vmemmap_populate_basepages(start, end, node);
398} 397}
399#else /* !CONFIG_ARM64_64K_PAGES */ 398#else /* !CONFIG_ARM64_64K_PAGES */
400int __meminit vmemmap_populate(struct page *start_page, 399int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401 unsigned long size, int node)
402{ 400{
403 unsigned long addr = (unsigned long)start_page; 401 unsigned long addr = start;
404 unsigned long end = (unsigned long)(start_page + size);
405 unsigned long next; 402 unsigned long next;
406 pgd_t *pgd; 403 pgd_t *pgd;
407 pud_t *pud; 404 pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
434 return 0; 431 return 0;
435} 432}
436#endif /* CONFIG_ARM64_64K_PAGES */ 433#endif /* CONFIG_ARM64_64K_PAGES */
437void vmemmap_free(struct page *memmap, unsigned long nr_pages) 434void vmemmap_free(unsigned long start, unsigned long end)
438{ 435{
439} 436}
440#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 437#endif /* CONFIG_SPARSEMEM_VMEMMAP */