aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2013-04-29 18:07:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:35 -0400
commit0aad818b2de455f1bfd7ef87c28cdbbaaed9a699 (patch)
treea86fe62f7c740d431f76bd2262abae5825e1a21e /arch
parent055e4fd96e95b0eee0d92fd54a26be7f0d3bcad0 (diff)
sparse-vmemmap: specify vmemmap population range in bytes
The sparse code, when asking the architecture to populate the vmemmap, specifies the section range as a starting page and a number of pages. This is an awkward interface, because none of the arch-specific code actually thinks of the range in terms of 'struct page' units and always translates it to bytes first. In addition, later patches mix huge page and regular page backing for the vmemmap. For this, they need to call vmemmap_populate_basepages() on sub-section ranges with PAGE_SIZE and PMD_SIZE in mind. But these are not necessarily multiples of the 'struct page' size and so this unit is too coarse. Just translate the section range into bytes once in the generic sparse code, then pass byte ranges down the stack. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Bernhard Schmidt <Bernhard.Schmidt@lrz.de> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Acked-by: David S. Miller <davem@davemloft.net> Tested-by: David S. Miller <davem@davemloft.net> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/ia64/mm/discontig.c7
-rw-r--r--arch/powerpc/mm/init_64.c11
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/sparc/mm/init_64.c7
-rw-r--r--arch/x86/mm/init_64.c15
6 files changed, 25 insertions, 43 deletions
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 70b8cd4021c4..eeecc9c8ed68 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -391,17 +391,14 @@ int kern_addr_valid(unsigned long addr)
391} 391}
392#ifdef CONFIG_SPARSEMEM_VMEMMAP 392#ifdef CONFIG_SPARSEMEM_VMEMMAP
393#ifdef CONFIG_ARM64_64K_PAGES 393#ifdef CONFIG_ARM64_64K_PAGES
394int __meminit vmemmap_populate(struct page *start_page, 394int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
395 unsigned long size, int node)
396{ 395{
397 return vmemmap_populate_basepages(start_page, size, node); 396 return vmemmap_populate_basepages(start, end, node);
398} 397}
399#else /* !CONFIG_ARM64_64K_PAGES */ 398#else /* !CONFIG_ARM64_64K_PAGES */
400int __meminit vmemmap_populate(struct page *start_page, 399int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
401 unsigned long size, int node)
402{ 400{
403 unsigned long addr = (unsigned long)start_page; 401 unsigned long addr = start;
404 unsigned long end = (unsigned long)(start_page + size);
405 unsigned long next; 402 unsigned long next;
406 pgd_t *pgd; 403 pgd_t *pgd;
407 pud_t *pud; 404 pud_t *pud;
@@ -434,7 +431,7 @@ int __meminit vmemmap_populate(struct page *start_page,
434 return 0; 431 return 0;
435} 432}
436#endif /* CONFIG_ARM64_64K_PAGES */ 433#endif /* CONFIG_ARM64_64K_PAGES */
437void vmemmap_free(struct page *memmap, unsigned long nr_pages) 434void vmemmap_free(unsigned long start, unsigned long end)
438{ 435{
439} 436}
440#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 437#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index a57436e5d405..ae4db4bd6d97 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -819,13 +819,12 @@ void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
819#endif 819#endif
820 820
821#ifdef CONFIG_SPARSEMEM_VMEMMAP 821#ifdef CONFIG_SPARSEMEM_VMEMMAP
822int __meminit vmemmap_populate(struct page *start_page, 822int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
823 unsigned long size, int node)
824{ 823{
825 return vmemmap_populate_basepages(start_page, size, node); 824 return vmemmap_populate_basepages(start, end, node);
826} 825}
827 826
828void vmemmap_free(struct page *memmap, unsigned long nr_pages) 827void vmemmap_free(unsigned long start, unsigned long end)
829{ 828{
830} 829}
831#endif 830#endif
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 7e2246fb2f31..5a535b73ea18 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -263,19 +263,14 @@ static __meminit void vmemmap_list_populate(unsigned long phys,
263 vmemmap_list = vmem_back; 263 vmemmap_list = vmem_back;
264} 264}
265 265
266int __meminit vmemmap_populate(struct page *start_page, 266int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
267 unsigned long nr_pages, int node)
268{ 267{
269 unsigned long start = (unsigned long)start_page;
270 unsigned long end = (unsigned long)(start_page + nr_pages);
271 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; 268 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
272 269
273 /* Align to the page size of the linear mapping. */ 270 /* Align to the page size of the linear mapping. */
274 start = _ALIGN_DOWN(start, page_size); 271 start = _ALIGN_DOWN(start, page_size);
275 272
276 pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", 273 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
277 start_page, nr_pages, node);
278 pr_debug(" -> map %lx..%lx\n", start, end);
279 274
280 for (; start < end; start += page_size) { 275 for (; start < end; start += page_size) {
281 void *p; 276 void *p;
@@ -298,7 +293,7 @@ int __meminit vmemmap_populate(struct page *start_page,
298 return 0; 293 return 0;
299} 294}
300 295
301void vmemmap_free(struct page *memmap, unsigned long nr_pages) 296void vmemmap_free(unsigned long start, unsigned long end)
302{ 297{
303} 298}
304 299
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ffab84db6907..35837054f734 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -191,19 +191,16 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
191/* 191/*
192 * Add a backed mem_map array to the virtual mem_map array. 192 * Add a backed mem_map array to the virtual mem_map array.
193 */ 193 */
194int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 194int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
195{ 195{
196 unsigned long address, start_addr, end_addr; 196 unsigned long address = start;
197 pgd_t *pg_dir; 197 pgd_t *pg_dir;
198 pud_t *pu_dir; 198 pud_t *pu_dir;
199 pmd_t *pm_dir; 199 pmd_t *pm_dir;
200 pte_t *pt_dir; 200 pte_t *pt_dir;
201 int ret = -ENOMEM; 201 int ret = -ENOMEM;
202 202
203 start_addr = (unsigned long) start; 203 for (address = start; address < end;) {
204 end_addr = (unsigned long) (start + nr);
205
206 for (address = start_addr; address < end_addr;) {
207 pg_dir = pgd_offset_k(address); 204 pg_dir = pgd_offset_k(address);
208 if (pgd_none(*pg_dir)) { 205 if (pgd_none(*pg_dir)) {
209 pu_dir = vmem_pud_alloc(); 206 pu_dir = vmem_pud_alloc();
@@ -262,14 +259,14 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
262 } 259 }
263 address += PAGE_SIZE; 260 address += PAGE_SIZE;
264 } 261 }
265 memset(start, 0, nr * sizeof(struct page)); 262 memset((void *)start, 0, end - start);
266 ret = 0; 263 ret = 0;
267out: 264out:
268 flush_tlb_kernel_range(start_addr, end_addr); 265 flush_tlb_kernel_range(start, end);
269 return ret; 266 return ret;
270} 267}
271 268
272void vmemmap_free(struct page *memmap, unsigned long nr_pages) 269void vmemmap_free(unsigned long start, unsigned long end)
273{ 270{
274} 271}
275 272
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 1588d33d5492..6ac99d64a13c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2181,10 +2181,9 @@ unsigned long vmemmap_table[VMEMMAP_SIZE];
2181static long __meminitdata addr_start, addr_end; 2181static long __meminitdata addr_start, addr_end;
2182static int __meminitdata node_start; 2182static int __meminitdata node_start;
2183 2183
2184int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) 2184int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2185 int node)
2185{ 2186{
2186 unsigned long vstart = (unsigned long) start;
2187 unsigned long vend = (unsigned long) (start + nr);
2188 unsigned long phys_start = (vstart - VMEMMAP_BASE); 2187 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2189 unsigned long phys_end = (vend - VMEMMAP_BASE); 2188 unsigned long phys_end = (vend - VMEMMAP_BASE);
2190 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK; 2189 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
@@ -2236,7 +2235,7 @@ void __meminit vmemmap_populate_print_last(void)
2236 } 2235 }
2237} 2236}
2238 2237
2239void vmemmap_free(struct page *memmap, unsigned long nr_pages) 2238void vmemmap_free(unsigned long start, unsigned long end)
2240{ 2239{
2241} 2240}
2242 2241
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2ef81f19bd6c..528c143f467c 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1011,11 +1011,8 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct)
1011 flush_tlb_all(); 1011 flush_tlb_all();
1012} 1012}
1013 1013
1014void __ref vmemmap_free(struct page *memmap, unsigned long nr_pages) 1014void __ref vmemmap_free(unsigned long start, unsigned long end)
1015{ 1015{
1016 unsigned long start = (unsigned long)memmap;
1017 unsigned long end = (unsigned long)(memmap + nr_pages);
1018
1019 remove_pagetable(start, end, false); 1016 remove_pagetable(start, end, false);
1020} 1017}
1021 1018
@@ -1284,17 +1281,15 @@ static long __meminitdata addr_start, addr_end;
1284static void __meminitdata *p_start, *p_end; 1281static void __meminitdata *p_start, *p_end;
1285static int __meminitdata node_start; 1282static int __meminitdata node_start;
1286 1283
1287int __meminit 1284int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
1288vmemmap_populate(struct page *start_page, unsigned long size, int node)
1289{ 1285{
1290 unsigned long addr = (unsigned long)start_page; 1286 unsigned long addr;
1291 unsigned long end = (unsigned long)(start_page + size);
1292 unsigned long next; 1287 unsigned long next;
1293 pgd_t *pgd; 1288 pgd_t *pgd;
1294 pud_t *pud; 1289 pud_t *pud;
1295 pmd_t *pmd; 1290 pmd_t *pmd;
1296 1291
1297 for (; addr < end; addr = next) { 1292 for (addr = start; addr < end; addr = next) {
1298 void *p = NULL; 1293 void *p = NULL;
1299 1294
1300 pgd = vmemmap_pgd_populate(addr, node); 1295 pgd = vmemmap_pgd_populate(addr, node);
@@ -1351,7 +1346,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
1351 } 1346 }
1352 1347
1353 } 1348 }
1354 sync_global_pgds((unsigned long)start_page, end - 1); 1349 sync_global_pgds(start, end - 1);
1355 return 0; 1350 return 0;
1356} 1351}
1357 1352