aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-09-25 00:20:14 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-05 19:53:39 -0400
commitc06240c7f5c39c83dfd7849c0770775562441b96 (patch)
treece4deb5dadaea70c75d3f6680495055f16efb9c3 /arch
parent0dd5b7b09e13dae32869371e08e1048349fd040c (diff)
sparc64: Use kernel page tables for vmemmap.
For sparse memory configurations, the vmemmap array behaves terribly and it takes up an inordinate amount of space in the BSS section of the kernel image unconditionally. Just build huge PMDs and look them up just like we do for TLB misses in the vmalloc area. Kernel BSS shrinks by about 2MB. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Bob Picco <bob.picco@oracle.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/kernel/ktlb.S9
-rw-r--r--arch/sparc/mm/init_64.c72
-rw-r--r--arch/sparc/mm/init_64.h11
3 files changed, 36 insertions, 56 deletions
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 94a1e6648bd0..2627a7fa33d9 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -186,13 +186,8 @@ kvmap_dtlb_load:
186 186
187#ifdef CONFIG_SPARSEMEM_VMEMMAP 187#ifdef CONFIG_SPARSEMEM_VMEMMAP
188kvmap_vmemmap: 188kvmap_vmemmap:
189 sub %g4, %g5, %g5 189 KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
190 srlx %g5, ILOG2_4MB, %g5 190 ba,a,pt %xcc, kvmap_dtlb_load
191 sethi %hi(vmemmap_table), %g1
192 sllx %g5, 3, %g5
193 or %g1, %lo(vmemmap_table), %g1
194 ba,pt %xcc, kvmap_dtlb_load
195 ldx [%g1 + %g5], %g5
196#endif 191#endif
197 192
198kvmap_dtlb_nonlinear: 193kvmap_dtlb_nonlinear:
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 848440a33125..6d5d562a652e 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2308,18 +2308,9 @@ unsigned long _PAGE_CACHE __read_mostly;
2308EXPORT_SYMBOL(_PAGE_CACHE); 2308EXPORT_SYMBOL(_PAGE_CACHE);
2309 2309
2310#ifdef CONFIG_SPARSEMEM_VMEMMAP 2310#ifdef CONFIG_SPARSEMEM_VMEMMAP
2311unsigned long vmemmap_table[VMEMMAP_SIZE];
2312
2313static long __meminitdata addr_start, addr_end;
2314static int __meminitdata node_start;
2315
2316int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend, 2311int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2317 int node) 2312 int node)
2318{ 2313{
2319 unsigned long phys_start = (vstart - VMEMMAP_BASE);
2320 unsigned long phys_end = (vend - VMEMMAP_BASE);
2321 unsigned long addr = phys_start & VMEMMAP_CHUNK_MASK;
2322 unsigned long end = VMEMMAP_ALIGN(phys_end);
2323 unsigned long pte_base; 2314 unsigned long pte_base;
2324 2315
2325 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U | 2316 pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4U |
@@ -2330,47 +2321,52 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
2330 _PAGE_CP_4V | _PAGE_CV_4V | 2321 _PAGE_CP_4V | _PAGE_CV_4V |
2331 _PAGE_P_4V | _PAGE_W_4V); 2322 _PAGE_P_4V | _PAGE_W_4V);
2332 2323
2333 for (; addr < end; addr += VMEMMAP_CHUNK) { 2324 pte_base |= _PAGE_PMD_HUGE;
2334 unsigned long *vmem_pp =
2335 vmemmap_table + (addr >> VMEMMAP_CHUNK_SHIFT);
2336 void *block;
2337 2325
2338 if (!(*vmem_pp & _PAGE_VALID)) { 2326 vstart = vstart & PMD_MASK;
2339 block = vmemmap_alloc_block(1UL << ILOG2_4MB, node); 2327 vend = ALIGN(vend, PMD_SIZE);
2340 if (!block) 2328 for (; vstart < vend; vstart += PMD_SIZE) {
2329 pgd_t *pgd = pgd_offset_k(vstart);
2330 unsigned long pte;
2331 pud_t *pud;
2332 pmd_t *pmd;
2333
2334 if (pgd_none(*pgd)) {
2335 pud_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2336
2337 if (!new)
2341 return -ENOMEM; 2338 return -ENOMEM;
2339 pgd_populate(&init_mm, pgd, new);
2340 }
2342 2341
2343 *vmem_pp = pte_base | __pa(block); 2342 pud = pud_offset(pgd, vstart);
2343 if (pud_none(*pud)) {
2344 pmd_t *new = vmemmap_alloc_block(PAGE_SIZE, node);
2344 2345
2345 /* check to see if we have contiguous blocks */ 2346 if (!new)
2346 if (addr_end != addr || node_start != node) { 2347 return -ENOMEM;
2347 if (addr_start) 2348 pud_populate(&init_mm, pud, new);
2348 printk(KERN_DEBUG " [%lx-%lx] on node %d\n",
2349 addr_start, addr_end-1, node_start);
2350 addr_start = addr;
2351 node_start = node;
2352 }
2353 addr_end = addr + VMEMMAP_CHUNK;
2354 } 2349 }
2355 }
2356 return 0;
2357}
2358 2350
2359void __meminit vmemmap_populate_print_last(void) 2351 pmd = pmd_offset(pud, vstart);
2360{ 2352
2361 if (addr_start) { 2353 pte = pmd_val(*pmd);
2362 printk(KERN_DEBUG " [%lx-%lx] on node %d\n", 2354 if (!(pte & _PAGE_VALID)) {
2363 addr_start, addr_end-1, node_start); 2355 void *block = vmemmap_alloc_block(PMD_SIZE, node);
2364 addr_start = 0; 2356
2365 addr_end = 0; 2357 if (!block)
2366 node_start = 0; 2358 return -ENOMEM;
2359
2360 pmd_val(*pmd) = pte_base | __pa(block);
2361 }
2367 } 2362 }
2363
2364 return 0;
2368} 2365}
2369 2366
2370void vmemmap_free(unsigned long start, unsigned long end) 2367void vmemmap_free(unsigned long start, unsigned long end)
2371{ 2368{
2372} 2369}
2373
2374#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 2370#endif /* CONFIG_SPARSEMEM_VMEMMAP */
2375 2371
2376static void prot_init_common(unsigned long page_none, 2372static void prot_init_common(unsigned long page_none,
diff --git a/arch/sparc/mm/init_64.h b/arch/sparc/mm/init_64.h
index 29ff73fc96b4..a4c09603b05c 100644
--- a/arch/sparc/mm/init_64.h
+++ b/arch/sparc/mm/init_64.h
@@ -31,15 +31,4 @@ extern unsigned long kern_locked_tte_data;
31 31
32void prom_world(int enter); 32void prom_world(int enter);
33 33
34#ifdef CONFIG_SPARSEMEM_VMEMMAP
35#define VMEMMAP_CHUNK_SHIFT 22
36#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
37#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
38#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
39
40#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
41 sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
42extern unsigned long vmemmap_table[VMEMMAP_SIZE];
43#endif
44
45#endif /* _SPARC64_MM_INIT_H */ 34#endif /* _SPARC64_MM_INIT_H */