aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMark Salter <msalter@redhat.com>2014-03-12 12:28:06 -0400
committerMatt Fleming <matt.fleming@intel.com>2014-04-30 14:49:52 -0400
commitd7ecbddf4caefbac1b99478dd2b679f83dfc2545 (patch)
tree891461182b7dc468662b8913cba8702cf671570a /arch/arm64
parent0302f71c0aa59571ac306f93068fbbfe65ea349b (diff)
arm64: Add function to create identity mappings
At boot time, before switching to a virtual UEFI memory map, firmware expects UEFI memory and IO regions to be identity mapped whenever kernel makes runtime services calls. The existing early boot code creates an identity map of kernel text/data but this is not sufficient for UEFI. This patch adds a create_id_mapping() function which reuses the core code of the existing create_mapping(). Signed-off-by: Mark Salter <msalter@redhat.com> [ Fixed error message formatting (%pa). ] Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Leif Lindholm <leif.lindholm@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Matt Fleming <matt.fleming@intel.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/mmu.h2
-rw-r--r--arch/arm64/mm/mmu.c65
2 files changed, 49 insertions, 18 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index f600d400c07d..29ed1d865e13 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -28,5 +28,7 @@ extern void paging_init(void);
28extern void setup_mm_for_reboot(void); 28extern void setup_mm_for_reboot(void);
29extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); 29extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
30extern void init_mem_pgprot(void); 30extern void init_mem_pgprot(void);
31/* create an identity mapping for memory (or io if map_io is true) */
32extern void create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io);
31 33
32#endif 34#endif
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6b7e89569a3a..971eb45e8bda 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -168,7 +168,8 @@ static void __init *early_alloc(unsigned long sz)
168} 168}
169 169
170static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 170static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
171 unsigned long end, unsigned long pfn) 171 unsigned long end, unsigned long pfn,
172 pgprot_t prot)
172{ 173{
173 pte_t *pte; 174 pte_t *pte;
174 175
@@ -180,16 +181,28 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
180 181
181 pte = pte_offset_kernel(pmd, addr); 182 pte = pte_offset_kernel(pmd, addr);
182 do { 183 do {
183 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); 184 set_pte(pte, pfn_pte(pfn, prot));
184 pfn++; 185 pfn++;
185 } while (pte++, addr += PAGE_SIZE, addr != end); 186 } while (pte++, addr += PAGE_SIZE, addr != end);
186} 187}
187 188
188static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, 189static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
189 unsigned long end, phys_addr_t phys) 190 unsigned long end, phys_addr_t phys,
191 int map_io)
190{ 192{
191 pmd_t *pmd; 193 pmd_t *pmd;
192 unsigned long next; 194 unsigned long next;
195 pmdval_t prot_sect;
196 pgprot_t prot_pte;
197
198 if (map_io) {
199 prot_sect = PMD_TYPE_SECT | PMD_SECT_AF |
200 PMD_ATTRINDX(MT_DEVICE_nGnRE);
201 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
202 } else {
203 prot_sect = prot_sect_kernel;
204 prot_pte = PAGE_KERNEL_EXEC;
205 }
193 206
194 /* 207 /*
195 * Check for initial section mappings in the pgd/pud and remove them. 208 * Check for initial section mappings in the pgd/pud and remove them.
@@ -205,7 +218,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
205 /* try section mapping first */ 218 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) { 219 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd; 220 pmd_t old_pmd =*pmd;
208 set_pmd(pmd, __pmd(phys | prot_sect_kernel)); 221 set_pmd(pmd, __pmd(phys | prot_sect));
209 /* 222 /*
210 * Check for previous table entries created during 223 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them. 224 * boot (__create_page_tables) and flush them.
@@ -213,21 +226,23 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
213 if (!pmd_none(old_pmd)) 226 if (!pmd_none(old_pmd))
214 flush_tlb_all(); 227 flush_tlb_all();
215 } else { 228 } else {
216 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); 229 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
230 prot_pte);
217 } 231 }
218 phys += next - addr; 232 phys += next - addr;
219 } while (pmd++, addr = next, addr != end); 233 } while (pmd++, addr = next, addr != end);
220} 234}
221 235
222static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 236static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
223 unsigned long end, unsigned long phys) 237 unsigned long end, unsigned long phys,
238 int map_io)
224{ 239{
225 pud_t *pud = pud_offset(pgd, addr); 240 pud_t *pud = pud_offset(pgd, addr);
226 unsigned long next; 241 unsigned long next;
227 242
228 do { 243 do {
229 next = pud_addr_end(addr, end); 244 next = pud_addr_end(addr, end);
230 alloc_init_pmd(pud, addr, next, phys); 245 alloc_init_pmd(pud, addr, next, phys, map_io);
231 phys += next - addr; 246 phys += next - addr;
232 } while (pud++, addr = next, addr != end); 247 } while (pud++, addr = next, addr != end);
233} 248}
@@ -236,30 +251,44 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
236 * Create the page directory entries and any necessary page tables for the 251 * Create the page directory entries and any necessary page tables for the
237 * mapping specified by 'md'. 252 * mapping specified by 'md'.
238 */ 253 */
239static void __init create_mapping(phys_addr_t phys, unsigned long virt, 254static void __init __create_mapping(pgd_t *pgd, phys_addr_t phys,
240 phys_addr_t size) 255 unsigned long virt, phys_addr_t size,
256 int map_io)
241{ 257{
242 unsigned long addr, length, end, next; 258 unsigned long addr, length, end, next;
243 pgd_t *pgd;
244
245 if (virt < VMALLOC_START) {
246 pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
247 phys, virt);
248 return;
249 }
250 259
251 addr = virt & PAGE_MASK; 260 addr = virt & PAGE_MASK;
252 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); 261 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
253 262
254 pgd = pgd_offset_k(addr);
255 end = addr + length; 263 end = addr + length;
256 do { 264 do {
257 next = pgd_addr_end(addr, end); 265 next = pgd_addr_end(addr, end);
258 alloc_init_pud(pgd, addr, next, phys); 266 alloc_init_pud(pgd, addr, next, phys, map_io);
259 phys += next - addr; 267 phys += next - addr;
260 } while (pgd++, addr = next, addr != end); 268 } while (pgd++, addr = next, addr != end);
261} 269}
262 270
271static void __init create_mapping(phys_addr_t phys, unsigned long virt,
272 phys_addr_t size)
273{
274 if (virt < VMALLOC_START) {
275 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
276 &phys, virt);
277 return;
278 }
279 __create_mapping(pgd_offset_k(virt & PAGE_MASK), phys, virt, size, 0);
280}
281
282void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
283{
284 if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
285 pr_warn("BUG: not creating id mapping for %pa\n", &addr);
286 return;
287 }
288 __create_mapping(&idmap_pg_dir[pgd_index(addr)],
289 addr, addr, size, map_io);
290}
291
263static void __init map_mem(void) 292static void __init map_mem(void)
264{ 293{
265 struct memblock_region *reg; 294 struct memblock_region *reg;