diff options
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 80 |
1 files changed, 34 insertions, 46 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ac615c0f798..d6167ad4e011 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -32,6 +32,9 @@ | |||
32 | #include <asm/tlbflush.h> | 32 | #include <asm/tlbflush.h> |
33 | #include <asm/sizes.h> | 33 | #include <asm/sizes.h> |
34 | 34 | ||
35 | #include <asm/mach/map.h> | ||
36 | #include "mm.h" | ||
37 | |||
35 | /* | 38 | /* |
36 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | 39 | * Used by ioremap() and iounmap() code to mark (super)section-mapped |
37 | * I/O regions in vm_struct->flags field. | 40 | * I/O regions in vm_struct->flags field. |
@@ -39,8 +42,9 @@ | |||
39 | #define VM_ARM_SECTION_MAPPING 0x80000000 | 42 | #define VM_ARM_SECTION_MAPPING 0x80000000 |
40 | 43 | ||
41 | static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, | 44 | static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, |
42 | unsigned long phys_addr, pgprot_t prot) | 45 | unsigned long phys_addr, const struct mem_type *type) |
43 | { | 46 | { |
47 | pgprot_t prot = __pgprot(type->prot_pte); | ||
44 | pte_t *pte; | 48 | pte_t *pte; |
45 | 49 | ||
46 | pte = pte_alloc_kernel(pmd, addr); | 50 | pte = pte_alloc_kernel(pmd, addr); |
@@ -51,7 +55,8 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
51 | if (!pte_none(*pte)) | 55 | if (!pte_none(*pte)) |
52 | goto bad; | 56 | goto bad; |
53 | 57 | ||
54 | set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0); | 58 | set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), |
59 | type->prot_pte_ext); | ||
55 | phys_addr += PAGE_SIZE; | 60 | phys_addr += PAGE_SIZE; |
56 | } while (pte++, addr += PAGE_SIZE, addr != end); | 61 | } while (pte++, addr += PAGE_SIZE, addr != end); |
57 | return 0; | 62 | return 0; |
@@ -63,7 +68,7 @@ static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end, | |||
63 | 68 | ||
64 | static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, | 69 | static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, |
65 | unsigned long end, unsigned long phys_addr, | 70 | unsigned long end, unsigned long phys_addr, |
66 | pgprot_t prot) | 71 | const struct mem_type *type) |
67 | { | 72 | { |
68 | unsigned long next; | 73 | unsigned long next; |
69 | pmd_t *pmd; | 74 | pmd_t *pmd; |
@@ -75,7 +80,7 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, | |||
75 | 80 | ||
76 | do { | 81 | do { |
77 | next = pmd_addr_end(addr, end); | 82 | next = pmd_addr_end(addr, end); |
78 | ret = remap_area_pte(pmd, addr, next, phys_addr, prot); | 83 | ret = remap_area_pte(pmd, addr, next, phys_addr, type); |
79 | if (ret) | 84 | if (ret) |
80 | return ret; | 85 | return ret; |
81 | phys_addr += next - addr; | 86 | phys_addr += next - addr; |
@@ -84,13 +89,11 @@ static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr, | |||
84 | } | 89 | } |
85 | 90 | ||
86 | static int remap_area_pages(unsigned long start, unsigned long pfn, | 91 | static int remap_area_pages(unsigned long start, unsigned long pfn, |
87 | unsigned long size, unsigned long flags) | 92 | size_t size, const struct mem_type *type) |
88 | { | 93 | { |
89 | unsigned long addr = start; | 94 | unsigned long addr = start; |
90 | unsigned long next, end = start + size; | 95 | unsigned long next, end = start + size; |
91 | unsigned long phys_addr = __pfn_to_phys(pfn); | 96 | unsigned long phys_addr = __pfn_to_phys(pfn); |
92 | pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | ||
93 | L_PTE_DIRTY | L_PTE_WRITE | flags); | ||
94 | pgd_t *pgd; | 97 | pgd_t *pgd; |
95 | int err = 0; | 98 | int err = 0; |
96 | 99 | ||
@@ -98,7 +101,7 @@ static int remap_area_pages(unsigned long start, unsigned long pfn, | |||
98 | pgd = pgd_offset_k(addr); | 101 | pgd = pgd_offset_k(addr); |
99 | do { | 102 | do { |
100 | next = pgd_addr_end(addr, end); | 103 | next = pgd_addr_end(addr, end); |
101 | err = remap_area_pmd(pgd, addr, next, phys_addr, prot); | 104 | err = remap_area_pmd(pgd, addr, next, phys_addr, type); |
102 | if (err) | 105 | if (err) |
103 | break; | 106 | break; |
104 | phys_addr += next - addr; | 107 | phys_addr += next - addr; |
@@ -178,9 +181,9 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
178 | 181 | ||
179 | static int | 182 | static int |
180 | remap_area_sections(unsigned long virt, unsigned long pfn, | 183 | remap_area_sections(unsigned long virt, unsigned long pfn, |
181 | unsigned long size, unsigned long flags) | 184 | size_t size, const struct mem_type *type) |
182 | { | 185 | { |
183 | unsigned long prot, addr = virt, end = virt + size; | 186 | unsigned long addr = virt, end = virt + size; |
184 | pgd_t *pgd; | 187 | pgd_t *pgd; |
185 | 188 | ||
186 | /* | 189 | /* |
@@ -189,23 +192,13 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
189 | */ | 192 | */ |
190 | unmap_area_sections(virt, size); | 193 | unmap_area_sections(virt, size); |
191 | 194 | ||
192 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) | | ||
193 | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); | ||
194 | |||
195 | /* | ||
196 | * ARMv6 and above need XN set to prevent speculative prefetches | ||
197 | * hitting IO. | ||
198 | */ | ||
199 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
200 | prot |= PMD_SECT_XN; | ||
201 | |||
202 | pgd = pgd_offset_k(addr); | 195 | pgd = pgd_offset_k(addr); |
203 | do { | 196 | do { |
204 | pmd_t *pmd = pmd_offset(pgd, addr); | 197 | pmd_t *pmd = pmd_offset(pgd, addr); |
205 | 198 | ||
206 | pmd[0] = __pmd(__pfn_to_phys(pfn) | prot); | 199 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
207 | pfn += SZ_1M >> PAGE_SHIFT; | 200 | pfn += SZ_1M >> PAGE_SHIFT; |
208 | pmd[1] = __pmd(__pfn_to_phys(pfn) | prot); | 201 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
209 | pfn += SZ_1M >> PAGE_SHIFT; | 202 | pfn += SZ_1M >> PAGE_SHIFT; |
210 | flush_pmd_entry(pmd); | 203 | flush_pmd_entry(pmd); |
211 | 204 | ||
@@ -218,9 +211,9 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
218 | 211 | ||
219 | static int | 212 | static int |
220 | remap_area_supersections(unsigned long virt, unsigned long pfn, | 213 | remap_area_supersections(unsigned long virt, unsigned long pfn, |
221 | unsigned long size, unsigned long flags) | 214 | size_t size, const struct mem_type *type) |
222 | { | 215 | { |
223 | unsigned long prot, addr = virt, end = virt + size; | 216 | unsigned long addr = virt, end = virt + size; |
224 | pgd_t *pgd; | 217 | pgd_t *pgd; |
225 | 218 | ||
226 | /* | 219 | /* |
@@ -229,22 +222,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
229 | */ | 222 | */ |
230 | unmap_area_sections(virt, size); | 223 | unmap_area_sections(virt, size); |
231 | 224 | ||
232 | prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE | | ||
233 | PMD_DOMAIN(DOMAIN_IO) | | ||
234 | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); | ||
235 | |||
236 | /* | ||
237 | * ARMv6 and above need XN set to prevent speculative prefetches | ||
238 | * hitting IO. | ||
239 | */ | ||
240 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
241 | prot |= PMD_SECT_XN; | ||
242 | |||
243 | pgd = pgd_offset_k(virt); | 225 | pgd = pgd_offset_k(virt); |
244 | do { | 226 | do { |
245 | unsigned long super_pmd_val, i; | 227 | unsigned long super_pmd_val, i; |
246 | 228 | ||
247 | super_pmd_val = __pfn_to_phys(pfn) | prot; | 229 | super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect | |
230 | PMD_SECT_SUPER; | ||
248 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | 231 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
249 | 232 | ||
250 | for (i = 0; i < 8; i++) { | 233 | for (i = 0; i < 8; i++) { |
@@ -279,9 +262,10 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
279 | * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. | 262 | * mapping. See include/asm-arm/proc-armv/pgtable.h for more information. |
280 | */ | 263 | */ |
281 | void __iomem * | 264 | void __iomem * |
282 | __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | 265 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, |
283 | unsigned long flags) | 266 | unsigned int mtype) |
284 | { | 267 | { |
268 | const struct mem_type *type; | ||
285 | int err; | 269 | int err; |
286 | unsigned long addr; | 270 | unsigned long addr; |
287 | struct vm_struct * area; | 271 | struct vm_struct * area; |
@@ -292,6 +276,10 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
292 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 276 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
293 | return NULL; | 277 | return NULL; |
294 | 278 | ||
279 | type = get_mem_type(mtype); | ||
280 | if (!type) | ||
281 | return NULL; | ||
282 | |||
295 | size = PAGE_ALIGN(size); | 283 | size = PAGE_ALIGN(size); |
296 | 284 | ||
297 | area = get_vm_area(size, VM_IOREMAP); | 285 | area = get_vm_area(size, VM_IOREMAP); |
@@ -302,16 +290,16 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
302 | #ifndef CONFIG_SMP | 290 | #ifndef CONFIG_SMP |
303 | if (DOMAIN_IO == 0 && | 291 | if (DOMAIN_IO == 0 && |
304 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 292 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
305 | cpu_is_xsc3()) && | 293 | cpu_is_xsc3()) && pfn >= 0x100000 && |
306 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { | 294 | !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { |
307 | area->flags |= VM_ARM_SECTION_MAPPING; | 295 | area->flags |= VM_ARM_SECTION_MAPPING; |
308 | err = remap_area_supersections(addr, pfn, size, flags); | 296 | err = remap_area_supersections(addr, pfn, size, type); |
309 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | 297 | } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { |
310 | area->flags |= VM_ARM_SECTION_MAPPING; | 298 | area->flags |= VM_ARM_SECTION_MAPPING; |
311 | err = remap_area_sections(addr, pfn, size, flags); | 299 | err = remap_area_sections(addr, pfn, size, type); |
312 | } else | 300 | } else |
313 | #endif | 301 | #endif |
314 | err = remap_area_pages(addr, pfn, size, flags); | 302 | err = remap_area_pages(addr, pfn, size, type); |
315 | 303 | ||
316 | if (err) { | 304 | if (err) { |
317 | vunmap((void *)addr); | 305 | vunmap((void *)addr); |
@@ -321,10 +309,10 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
321 | flush_cache_vmap(addr, addr + size); | 309 | flush_cache_vmap(addr, addr + size); |
322 | return (void __iomem *) (offset + addr); | 310 | return (void __iomem *) (offset + addr); |
323 | } | 311 | } |
324 | EXPORT_SYMBOL(__ioremap_pfn); | 312 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
325 | 313 | ||
326 | void __iomem * | 314 | void __iomem * |
327 | __ioremap(unsigned long phys_addr, size_t size, unsigned long flags) | 315 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) |
328 | { | 316 | { |
329 | unsigned long last_addr; | 317 | unsigned long last_addr; |
330 | unsigned long offset = phys_addr & ~PAGE_MASK; | 318 | unsigned long offset = phys_addr & ~PAGE_MASK; |
@@ -342,9 +330,9 @@ __ioremap(unsigned long phys_addr, size_t size, unsigned long flags) | |||
342 | */ | 330 | */ |
343 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | 331 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
344 | 332 | ||
345 | return __ioremap_pfn(pfn, offset, size, flags); | 333 | return __arm_ioremap_pfn(pfn, offset, size, mtype); |
346 | } | 334 | } |
347 | EXPORT_SYMBOL(__ioremap); | 335 | EXPORT_SYMBOL(__arm_ioremap); |
348 | 336 | ||
349 | void __iounmap(volatile void __iomem *addr) | 337 | void __iounmap(volatile void __iomem *addr) |
350 | { | 338 | { |