aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/mm/ioremap.c66
1 files changed, 63 insertions, 3 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6aa13d59c858..7eac87f05180 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -33,8 +33,8 @@
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34 34
35/* 35/*
36 * Used by ioremap() and iounmap() code to mark section-mapped I/O regions 36 * Used by ioremap() and iounmap() code to mark (super)section-mapped
37 * in vm_struct->flags field. 37 * I/O regions in vm_struct->flags field.
38 */ 38 */
39#define VM_ARM_SECTION_MAPPING 0x80000000 39#define VM_ARM_SECTION_MAPPING 0x80000000
40 40
@@ -233,6 +233,54 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
233 233
234 return 0; 234 return 0;
235} 235}
236
237static int
238remap_area_supersections(unsigned long virt, unsigned long pfn,
239 unsigned long size, unsigned long flags)
240{
241 unsigned long prot, addr = virt, end = virt + size;
242 pgd_t *pgd;
243
244 /*
245 * Remove and free any PTE-based mapping, and
246 * sync the current kernel mapping.
247 */
248 unmap_area_sections(virt, size);
249
250 prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
251 PMD_DOMAIN(DOMAIN_IO) |
252 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
253
254 /*
255 * ARMv6 and above need XN set to prevent speculative prefetches
256 * hitting IO.
257 */
258 if (cpu_architecture() >= CPU_ARCH_ARMv6)
259 prot |= PMD_SECT_XN;
260
261 pgd = pgd_offset_k(virt);
262 do {
263 unsigned long super_pmd_val, i;
264
265 super_pmd_val = __pfn_to_phys(pfn) | prot;
266 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
267
268 for (i = 0; i < 8; i++) {
269 pmd_t *pmd = pmd_offset(pgd, addr);
270
271 pmd[0] = __pmd(super_pmd_val);
272 pmd[1] = __pmd(super_pmd_val);
273 flush_pmd_entry(pmd);
274
275 addr += PGDIR_SIZE;
276 pgd++;
277 }
278
279 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
280 } while (addr < end);
281
282 return 0;
283}
236#endif 284#endif
237 285
238 286
@@ -255,6 +303,13 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
255 int err; 303 int err;
256 unsigned long addr; 304 unsigned long addr;
257 struct vm_struct * area; 305 struct vm_struct * area;
306 unsigned int cr = get_cr();
307
308 /*
309 * High mappings must be supersection aligned
310 */
311 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
312 return NULL;
258 313
259 area = get_vm_area(size, VM_IOREMAP); 314 area = get_vm_area(size, VM_IOREMAP);
260 if (!area) 315 if (!area)
@@ -262,7 +317,12 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
262 addr = (unsigned long)area->addr; 317 addr = (unsigned long)area->addr;
263 318
264#ifndef CONFIG_SMP 319#ifndef CONFIG_SMP
265 if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { 320 if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (cr & CR_XP)) ||
321 cpu_is_xsc3()) &&
322 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
323 area->flags |= VM_ARM_SECTION_MAPPING;
324 err = remap_area_supersections(addr, pfn, size, flags);
325 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
266 area->flags |= VM_ARM_SECTION_MAPPING; 326 area->flags |= VM_ARM_SECTION_MAPPING;
267 err = remap_area_sections(addr, pfn, size, flags); 327 err = remap_area_sections(addr, pfn, size, flags);
268 } else 328 } else