diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-07-23 19:15:16 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-08-19 20:12:40 -0400 |
commit | a245067e204f69c69abf92d94fc45ec65bf1f07e (patch) | |
tree | 8a9ee8920c8b42d5d13cd2f7a05f578d99dd438e /arch/powerpc/mm/pgtable_64.c | |
parent | 1fe1a21005c14ad772caeb9005580f473c4b6c57 (diff) |
powerpc/mm: Add support for early ioremap on non-hash 64-bit processors
This adds some code to do early ioremap's using page tables instead of
bolting entries in the hash table. This will be used by the upcoming
64-bits BookE port.
The patch also changes the test for early vs. late ioremap to use
slab_is_available() instead of our old hackish mem_init_done.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r-- | arch/powerpc/mm/pgtable_64.c | 59 |
1 files changed, 54 insertions, 5 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index bfa7db6b2fd5..93ed1a3c8729 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -33,6 +33,8 @@ | |||
33 | #include <linux/stddef.h> | 33 | #include <linux/stddef.h> |
34 | #include <linux/vmalloc.h> | 34 | #include <linux/vmalloc.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/bootmem.h> | ||
37 | #include <linux/lmb.h> | ||
36 | 38 | ||
37 | #include <asm/pgalloc.h> | 39 | #include <asm/pgalloc.h> |
38 | #include <asm/page.h> | 40 | #include <asm/page.h> |
@@ -55,19 +57,36 @@ | |||
55 | 57 | ||
56 | unsigned long ioremap_bot = IOREMAP_BASE; | 58 | unsigned long ioremap_bot = IOREMAP_BASE; |
57 | 59 | ||
60 | |||
61 | #ifdef CONFIG_PPC_MMU_NOHASH | ||
62 | static void *early_alloc_pgtable(unsigned long size) | ||
63 | { | ||
64 | void *pt; | ||
65 | |||
66 | if (init_bootmem_done) | ||
67 | pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS)); | ||
68 | else | ||
69 | pt = __va(lmb_alloc_base(size, size, | ||
70 | __pa(MAX_DMA_ADDRESS))); | ||
71 | memset(pt, 0, size); | ||
72 | |||
73 | return pt; | ||
74 | } | ||
75 | #endif /* CONFIG_PPC_MMU_NOHASH */ | ||
76 | |||
58 | /* | 77 | /* |
59 | * map_io_page currently only called by __ioremap | 78 | * map_kernel_page currently only called by __ioremap |
60 | * map_io_page adds an entry to the ioremap page table | 79 | * map_kernel_page adds an entry to the ioremap page table |
61 | * and adds an entry to the HPT, possibly bolting it | 80 | * and adds an entry to the HPT, possibly bolting it |
62 | */ | 81 | */ |
63 | static int map_io_page(unsigned long ea, unsigned long pa, int flags) | 82 | static int map_kernel_page(unsigned long ea, unsigned long pa, int flags) |
64 | { | 83 | { |
65 | pgd_t *pgdp; | 84 | pgd_t *pgdp; |
66 | pud_t *pudp; | 85 | pud_t *pudp; |
67 | pmd_t *pmdp; | 86 | pmd_t *pmdp; |
68 | pte_t *ptep; | 87 | pte_t *ptep; |
69 | 88 | ||
70 | if (mem_init_done) { | 89 | if (slab_is_available()) { |
71 | pgdp = pgd_offset_k(ea); | 90 | pgdp = pgd_offset_k(ea); |
72 | pudp = pud_alloc(&init_mm, pgdp, ea); | 91 | pudp = pud_alloc(&init_mm, pgdp, ea); |
73 | if (!pudp) | 92 | if (!pudp) |
@@ -81,6 +100,35 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
81 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 100 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
82 | __pgprot(flags))); | 101 | __pgprot(flags))); |
83 | } else { | 102 | } else { |
103 | #ifdef CONFIG_PPC_MMU_NOHASH | ||
104 | /* Warning ! This will blow up if bootmem is not initialized | ||
105 | * which our ppc64 code is keen to do that, we'll need to | ||
106 | * fix it and/or be more careful | ||
107 | */ | ||
108 | pgdp = pgd_offset_k(ea); | ||
109 | #ifdef PUD_TABLE_SIZE | ||
110 | if (pgd_none(*pgdp)) { | ||
111 | pudp = early_alloc_pgtable(PUD_TABLE_SIZE); | ||
112 | BUG_ON(pudp == NULL); | ||
113 | pgd_populate(&init_mm, pgdp, pudp); | ||
114 | } | ||
115 | #endif /* PUD_TABLE_SIZE */ | ||
116 | pudp = pud_offset(pgdp, ea); | ||
117 | if (pud_none(*pudp)) { | ||
118 | pmdp = early_alloc_pgtable(PMD_TABLE_SIZE); | ||
119 | BUG_ON(pmdp == NULL); | ||
120 | pud_populate(&init_mm, pudp, pmdp); | ||
121 | } | ||
122 | pmdp = pmd_offset(pudp, ea); | ||
123 | if (!pmd_present(*pmdp)) { | ||
124 | ptep = early_alloc_pgtable(PAGE_SIZE); | ||
125 | BUG_ON(ptep == NULL); | ||
126 | pmd_populate_kernel(&init_mm, pmdp, ptep); | ||
127 | } | ||
128 | ptep = pte_offset_kernel(pmdp, ea); | ||
129 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | ||
130 | __pgprot(flags))); | ||
131 | #else /* CONFIG_PPC_MMU_NOHASH */ | ||
84 | /* | 132 | /* |
85 | * If the mm subsystem is not fully up, we cannot create a | 133 | * If the mm subsystem is not fully up, we cannot create a |
86 | * linux page table entry for this mapping. Simply bolt an | 134 | * linux page table entry for this mapping. Simply bolt an |
@@ -93,6 +141,7 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
93 | "memory at %016lx !\n", pa); | 141 | "memory at %016lx !\n", pa); |
94 | return -ENOMEM; | 142 | return -ENOMEM; |
95 | } | 143 | } |
144 | #endif /* !CONFIG_PPC_MMU_NOHASH */ | ||
96 | } | 145 | } |
97 | return 0; | 146 | return 0; |
98 | } | 147 | } |
@@ -124,7 +173,7 @@ void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size, | |||
124 | WARN_ON(size & ~PAGE_MASK); | 173 | WARN_ON(size & ~PAGE_MASK); |
125 | 174 | ||
126 | for (i = 0; i < size; i += PAGE_SIZE) | 175 | for (i = 0; i < size; i += PAGE_SIZE) |
127 | if (map_io_page((unsigned long)ea+i, pa+i, flags)) | 176 | if (map_kernel_page((unsigned long)ea+i, pa+i, flags)) |
128 | return NULL; | 177 | return NULL; |
129 | 178 | ||
130 | return (void __iomem *)ea; | 179 | return (void __iomem *)ea; |