aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-09-27 00:19:46 -0400
committerDavid S. Miller <davem@davemloft.net>2014-10-05 19:53:38 -0400
commitac55c768143aa34cc3789c4820cbb0809a76fd9c (patch)
treef6bb7af5b71e7935169a778e3f7e7694fbd1416f /arch/sparc/mm
parent473ad7f4fb005d1bb727e4ef27d370d28703a062 (diff)
sparc64: Switch to 4-level page tables.
This has become necessary with chips that support more than 43-bits of physical addressing. Based almost entirely upon a patch by Bob Picco. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Bob Picco <bob.picco@oracle.com>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/init_64.c31
1 files changed, 27 insertions, 4 deletions
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index bd08ed49bbaa..091f846e6192 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1390,6 +1390,13 @@ static unsigned long __ref kernel_map_range(unsigned long pstart,
1390 pmd_t *pmd; 1390 pmd_t *pmd;
1391 pte_t *pte; 1391 pte_t *pte;
1392 1392
1393 if (pgd_none(*pgd)) {
1394 pud_t *new;
1395
1396 new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
1397 alloc_bytes += PAGE_SIZE;
1398 pgd_populate(&init_mm, pgd, new);
1399 }
1393 pud = pud_offset(pgd, vstart); 1400 pud = pud_offset(pgd, vstart);
1394 if (pud_none(*pud)) { 1401 if (pud_none(*pud)) {
1395 pmd_t *new; 1402 pmd_t *new;
@@ -1856,7 +1863,12 @@ static void __init sun4v_linear_pte_xor_finalize(void)
1856/* paging_init() sets up the page tables */ 1863/* paging_init() sets up the page tables */
1857 1864
1858static unsigned long last_valid_pfn; 1865static unsigned long last_valid_pfn;
1859pgd_t swapper_pg_dir[PTRS_PER_PGD]; 1866
1867/* These must be page aligned in order to not trigger the
1868 * alignment tests of pgd_bad() and pud_bad().
1869 */
1870pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((aligned (PAGE_SIZE)));
1871static pud_t swapper_pud_dir[PTRS_PER_PUD] __attribute__ ((aligned (PAGE_SIZE)));
1860 1872
1861static void sun4u_pgprot_init(void); 1873static void sun4u_pgprot_init(void);
1862static void sun4v_pgprot_init(void); 1874static void sun4v_pgprot_init(void);
@@ -1911,6 +1923,8 @@ void __init paging_init(void)
1911{ 1923{
1912 unsigned long end_pfn, shift, phys_base; 1924 unsigned long end_pfn, shift, phys_base;
1913 unsigned long real_end, i; 1925 unsigned long real_end, i;
1926 pud_t *pud;
1927 pmd_t *pmd;
1914 int node; 1928 int node;
1915 1929
1916 setup_page_offset(); 1930 setup_page_offset();
@@ -2008,9 +2022,18 @@ void __init paging_init(void)
2008 2022
2009 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir)); 2023 memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
2010 2024
2011 /* Now can init the kernel/bad page tables. */ 2025 /* The kernel page tables we publish into what the rest of the
2012 pud_set(pud_offset(&swapper_pg_dir[0], 0), 2026 * world sees must be adjusted so that they see the PAGE_OFFSET
2013 swapper_low_pmd_dir + (shift / sizeof(pgd_t))); 2027 * address of these in-kerenel data structures. However right
2028 * here we must access them from the kernel image side, because
2029 * the trap tables haven't been taken over and therefore we cannot
2030 * take TLB misses in the PAGE_OFFSET linear mappings yet.
2031 */
2032 pud = swapper_pud_dir + (shift / sizeof(pud_t));
2033 pgd_set(&swapper_pg_dir[0], pud);
2034
2035 pmd = swapper_low_pmd_dir + (shift / sizeof(pmd_t));
2036 pud_set(&swapper_pud_dir[0], pmd);
2014 2037
2015 inherit_prom_mappings(); 2038 inherit_prom_mappings();
2016 2039