aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/ioremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r--arch/arm/mm/ioremap.c227
1 files changed, 223 insertions, 4 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 7691cfdba567..7eac87f05180 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -27,7 +27,16 @@
27 27
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/mmu_context.h>
31#include <asm/pgalloc.h>
30#include <asm/tlbflush.h> 32#include <asm/tlbflush.h>
33#include <asm/sizes.h>
34
35/*
36 * Used by ioremap() and iounmap() code to mark (super)section-mapped
37 * I/O regions in vm_struct->flags field.
38 */
39#define VM_ARM_SECTION_MAPPING 0x80000000
31 40
32static inline void 41static inline void
33remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, 42remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
@@ -113,10 +122,168 @@ remap_area_pages(unsigned long start, unsigned long pfn,
113 dir++; 122 dir++;
114 } while (address && (address < end)); 123 } while (address && (address < end));
115 124
116 flush_cache_vmap(start, end);
117 return err; 125 return err;
118} 126}
119 127
128
129void __check_kvm_seq(struct mm_struct *mm)
130{
131 unsigned int seq;
132
133 do {
134 seq = init_mm.context.kvm_seq;
135 memcpy(pgd_offset(mm, VMALLOC_START),
136 pgd_offset_k(VMALLOC_START),
137 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
138 pgd_index(VMALLOC_START)));
139 mm->context.kvm_seq = seq;
140 } while (seq != init_mm.context.kvm_seq);
141}
142
143#ifndef CONFIG_SMP
144/*
145 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
146 * the other CPUs will not see this change until their next context switch.
147 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
148 * which requires the new ioremap'd region to be referenced, the CPU will
149 * reference the _old_ region.
150 *
151 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
152 * the size back to 1MB aligned or we will overflow in the loop below.
153 */
154static void unmap_area_sections(unsigned long virt, unsigned long size)
155{
156 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
157 pgd_t *pgd;
158
159 flush_cache_vunmap(addr, end);
160 pgd = pgd_offset_k(addr);
161 do {
162 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
163
164 pmd = *pmdp;
165 if (!pmd_none(pmd)) {
166 /*
167 * Clear the PMD from the page table, and
168 * increment the kvm sequence so others
169 * notice this change.
170 *
171 * Note: this is still racy on SMP machines.
172 */
173 pmd_clear(pmdp);
174 init_mm.context.kvm_seq++;
175
176 /*
177 * Free the page table, if there was one.
178 */
179 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
180 pte_free_kernel(pmd_page_kernel(pmd));
181 }
182
183 addr += PGDIR_SIZE;
184 pgd++;
185 } while (addr < end);
186
187 /*
188 * Ensure that the active_mm is up to date - we want to
189 * catch any use-after-iounmap cases.
190 */
191 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
192 __check_kvm_seq(current->active_mm);
193
194 flush_tlb_kernel_range(virt, end);
195}
196
197static int
198remap_area_sections(unsigned long virt, unsigned long pfn,
199 unsigned long size, unsigned long flags)
200{
201 unsigned long prot, addr = virt, end = virt + size;
202 pgd_t *pgd;
203
204 /*
205 * Remove and free any PTE-based mapping, and
206 * sync the current kernel mapping.
207 */
208 unmap_area_sections(virt, size);
209
210 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
211 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
212
213 /*
214 * ARMv6 and above need XN set to prevent speculative prefetches
215 * hitting IO.
216 */
217 if (cpu_architecture() >= CPU_ARCH_ARMv6)
218 prot |= PMD_SECT_XN;
219
220 pgd = pgd_offset_k(addr);
221 do {
222 pmd_t *pmd = pmd_offset(pgd, addr);
223
224 pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
225 pfn += SZ_1M >> PAGE_SHIFT;
226 pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
227 pfn += SZ_1M >> PAGE_SHIFT;
228 flush_pmd_entry(pmd);
229
230 addr += PGDIR_SIZE;
231 pgd++;
232 } while (addr < end);
233
234 return 0;
235}
236
237static int
238remap_area_supersections(unsigned long virt, unsigned long pfn,
239 unsigned long size, unsigned long flags)
240{
241 unsigned long prot, addr = virt, end = virt + size;
242 pgd_t *pgd;
243
244 /*
245 * Remove and free any PTE-based mapping, and
246 * sync the current kernel mapping.
247 */
248 unmap_area_sections(virt, size);
249
250 prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
251 PMD_DOMAIN(DOMAIN_IO) |
252 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
253
254 /*
255 * ARMv6 and above need XN set to prevent speculative prefetches
256 * hitting IO.
257 */
258 if (cpu_architecture() >= CPU_ARCH_ARMv6)
259 prot |= PMD_SECT_XN;
260
261 pgd = pgd_offset_k(virt);
262 do {
263 unsigned long super_pmd_val, i;
264
265 super_pmd_val = __pfn_to_phys(pfn) | prot;
266 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
267
268 for (i = 0; i < 8; i++) {
269 pmd_t *pmd = pmd_offset(pgd, addr);
270
271 pmd[0] = __pmd(super_pmd_val);
272 pmd[1] = __pmd(super_pmd_val);
273 flush_pmd_entry(pmd);
274
275 addr += PGDIR_SIZE;
276 pgd++;
277 }
278
279 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
280 } while (addr < end);
281
282 return 0;
283}
284#endif
285
286
120/* 287/*
121 * Remap an arbitrary physical address space into the kernel virtual 288 * Remap an arbitrary physical address space into the kernel virtual
122 * address space. Needed when the kernel wants to access high addresses 289 * address space. Needed when the kernel wants to access high addresses
@@ -133,18 +300,42 @@ void __iomem *
133__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, 300__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
134 unsigned long flags) 301 unsigned long flags)
135{ 302{
303 int err;
136 unsigned long addr; 304 unsigned long addr;
137 struct vm_struct * area; 305 struct vm_struct * area;
306 unsigned int cr = get_cr();
307
308 /*
309 * High mappings must be supersection aligned
310 */
311 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
312 return NULL;
138 313
139 area = get_vm_area(size, VM_IOREMAP); 314 area = get_vm_area(size, VM_IOREMAP);
140 if (!area) 315 if (!area)
141 return NULL; 316 return NULL;
142 addr = (unsigned long)area->addr; 317 addr = (unsigned long)area->addr;
143 if (remap_area_pages(addr, pfn, size, flags)) { 318
319#ifndef CONFIG_SMP
320 if ((((cpu_architecture() >= CPU_ARCH_ARMv6) && (cr & CR_XP)) ||
321 cpu_is_xsc3()) &&
322 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
323 area->flags |= VM_ARM_SECTION_MAPPING;
324 err = remap_area_supersections(addr, pfn, size, flags);
325 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
326 area->flags |= VM_ARM_SECTION_MAPPING;
327 err = remap_area_sections(addr, pfn, size, flags);
328 } else
329#endif
330 err = remap_area_pages(addr, pfn, size, flags);
331
332 if (err) {
144 vunmap((void *)addr); 333 vunmap((void *)addr);
145 return NULL; 334 return NULL;
146 } 335 }
147 return (void __iomem *) (offset + (char *)addr); 336
337 flush_cache_vmap(addr, addr + size);
338 return (void __iomem *) (offset + addr);
148} 339}
149EXPORT_SYMBOL(__ioremap_pfn); 340EXPORT_SYMBOL(__ioremap_pfn);
150 341
@@ -173,6 +364,34 @@ EXPORT_SYMBOL(__ioremap);
173 364
174void __iounmap(void __iomem *addr) 365void __iounmap(void __iomem *addr)
175{ 366{
176 vunmap((void *)(PAGE_MASK & (unsigned long)addr)); 367 struct vm_struct **p, *tmp;
368 unsigned int section_mapping = 0;
369
370 addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
371
372 /*
373 * If this is a section based mapping we need to handle it
374 * specially as the VM subysystem does not know how to handle
375 * such a beast. We need the lock here b/c we need to clear
376 * all the mappings before the area can be reclaimed
377 * by someone else.
378 */
379 write_lock(&vmlist_lock);
380 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
381 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
382 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
383 *p = tmp->next;
384 unmap_area_sections((unsigned long)tmp->addr,
385 tmp->size);
386 kfree(tmp);
387 section_mapping = 1;
388 }
389 break;
390 }
391 }
392 write_unlock(&vmlist_lock);
393
394 if (!section_mapping)
395 vunmap(addr);
177} 396}
178EXPORT_SYMBOL(__iounmap); 397EXPORT_SYMBOL(__iounmap);