diff options
-rw-r--r-- | arch/arm/mm/ioremap.c | 167 | ||||
-rw-r--r-- | include/asm-arm/memory.h | 5 | ||||
-rw-r--r-- | include/asm-arm/mmu.h | 1 | ||||
-rw-r--r-- | include/asm-arm/mmu_context.h | 12 |
4 files changed, 180 insertions, 5 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 7691cfdba567..6aa13d59c858 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -27,7 +27,16 @@ | |||
27 | 27 | ||
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/pgalloc.h> | ||
30 | #include <asm/tlbflush.h> | 32 | #include <asm/tlbflush.h> |
33 | #include <asm/sizes.h> | ||
34 | |||
35 | /* | ||
36 | * Used by ioremap() and iounmap() code to mark section-mapped I/O regions | ||
37 | * in vm_struct->flags field. | ||
38 | */ | ||
39 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
31 | 40 | ||
32 | static inline void | 41 | static inline void |
33 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | 42 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, |
@@ -113,10 +122,120 @@ remap_area_pages(unsigned long start, unsigned long pfn, | |||
113 | dir++; | 122 | dir++; |
114 | } while (address && (address < end)); | 123 | } while (address && (address < end)); |
115 | 124 | ||
116 | flush_cache_vmap(start, end); | ||
117 | return err; | 125 | return err; |
118 | } | 126 | } |
119 | 127 | ||
128 | |||
129 | void __check_kvm_seq(struct mm_struct *mm) | ||
130 | { | ||
131 | unsigned int seq; | ||
132 | |||
133 | do { | ||
134 | seq = init_mm.context.kvm_seq; | ||
135 | memcpy(pgd_offset(mm, VMALLOC_START), | ||
136 | pgd_offset_k(VMALLOC_START), | ||
137 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | ||
138 | pgd_index(VMALLOC_START))); | ||
139 | mm->context.kvm_seq = seq; | ||
140 | } while (seq != init_mm.context.kvm_seq); | ||
141 | } | ||
142 | |||
143 | #ifndef CONFIG_SMP | ||
144 | /* | ||
145 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | ||
146 | * the other CPUs will not see this change until their next context switch. | ||
147 | * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs | ||
148 | * which requires the new ioremap'd region to be referenced, the CPU will | ||
149 | * reference the _old_ region. | ||
150 | * | ||
151 | * Note that get_vm_area() allocates a guard 4K page, so we need to mask | ||
152 | * the size back to 1MB aligned or we will overflow in the loop below. | ||
153 | */ | ||
154 | static void unmap_area_sections(unsigned long virt, unsigned long size) | ||
155 | { | ||
156 | unsigned long addr = virt, end = virt + (size & ~SZ_1M); | ||
157 | pgd_t *pgd; | ||
158 | |||
159 | flush_cache_vunmap(addr, end); | ||
160 | pgd = pgd_offset_k(addr); | ||
161 | do { | ||
162 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | ||
163 | |||
164 | pmd = *pmdp; | ||
165 | if (!pmd_none(pmd)) { | ||
166 | /* | ||
167 | * Clear the PMD from the page table, and | ||
168 | * increment the kvm sequence so others | ||
169 | * notice this change. | ||
170 | * | ||
171 | * Note: this is still racy on SMP machines. | ||
172 | */ | ||
173 | pmd_clear(pmdp); | ||
174 | init_mm.context.kvm_seq++; | ||
175 | |||
176 | /* | ||
177 | * Free the page table, if there was one. | ||
178 | */ | ||
179 | if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE) | ||
180 | pte_free_kernel(pmd_page_kernel(pmd)); | ||
181 | } | ||
182 | |||
183 | addr += PGDIR_SIZE; | ||
184 | pgd++; | ||
185 | } while (addr < end); | ||
186 | |||
187 | /* | ||
188 | * Ensure that the active_mm is up to date - we want to | ||
189 | * catch any use-after-iounmap cases. | ||
190 | */ | ||
191 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) | ||
192 | __check_kvm_seq(current->active_mm); | ||
193 | |||
194 | flush_tlb_kernel_range(virt, end); | ||
195 | } | ||
196 | |||
197 | static int | ||
198 | remap_area_sections(unsigned long virt, unsigned long pfn, | ||
199 | unsigned long size, unsigned long flags) | ||
200 | { | ||
201 | unsigned long prot, addr = virt, end = virt + size; | ||
202 | pgd_t *pgd; | ||
203 | |||
204 | /* | ||
205 | * Remove and free any PTE-based mapping, and | ||
206 | * sync the current kernel mapping. | ||
207 | */ | ||
208 | unmap_area_sections(virt, size); | ||
209 | |||
210 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) | | ||
211 | (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE)); | ||
212 | |||
213 | /* | ||
214 | * ARMv6 and above need XN set to prevent speculative prefetches | ||
215 | * hitting IO. | ||
216 | */ | ||
217 | if (cpu_architecture() >= CPU_ARCH_ARMv6) | ||
218 | prot |= PMD_SECT_XN; | ||
219 | |||
220 | pgd = pgd_offset_k(addr); | ||
221 | do { | ||
222 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
223 | |||
224 | pmd[0] = __pmd(__pfn_to_phys(pfn) | prot); | ||
225 | pfn += SZ_1M >> PAGE_SHIFT; | ||
226 | pmd[1] = __pmd(__pfn_to_phys(pfn) | prot); | ||
227 | pfn += SZ_1M >> PAGE_SHIFT; | ||
228 | flush_pmd_entry(pmd); | ||
229 | |||
230 | addr += PGDIR_SIZE; | ||
231 | pgd++; | ||
232 | } while (addr < end); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | #endif | ||
237 | |||
238 | |||
120 | /* | 239 | /* |
121 | * Remap an arbitrary physical address space into the kernel virtual | 240 | * Remap an arbitrary physical address space into the kernel virtual |
122 | * address space. Needed when the kernel wants to access high addresses | 241 | * address space. Needed when the kernel wants to access high addresses |
@@ -133,6 +252,7 @@ void __iomem * | |||
133 | __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | 252 | __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, |
134 | unsigned long flags) | 253 | unsigned long flags) |
135 | { | 254 | { |
255 | int err; | ||
136 | unsigned long addr; | 256 | unsigned long addr; |
137 | struct vm_struct * area; | 257 | struct vm_struct * area; |
138 | 258 | ||
@@ -140,11 +260,22 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
140 | if (!area) | 260 | if (!area) |
141 | return NULL; | 261 | return NULL; |
142 | addr = (unsigned long)area->addr; | 262 | addr = (unsigned long)area->addr; |
143 | if (remap_area_pages(addr, pfn, size, flags)) { | 263 | |
264 | #ifndef CONFIG_SMP | ||
265 | if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { | ||
266 | area->flags |= VM_ARM_SECTION_MAPPING; | ||
267 | err = remap_area_sections(addr, pfn, size, flags); | ||
268 | } else | ||
269 | #endif | ||
270 | err = remap_area_pages(addr, pfn, size, flags); | ||
271 | |||
272 | if (err) { | ||
144 | vunmap((void *)addr); | 273 | vunmap((void *)addr); |
145 | return NULL; | 274 | return NULL; |
146 | } | 275 | } |
147 | return (void __iomem *) (offset + (char *)addr); | 276 | |
277 | flush_cache_vmap(addr, addr + size); | ||
278 | return (void __iomem *) (offset + addr); | ||
148 | } | 279 | } |
149 | EXPORT_SYMBOL(__ioremap_pfn); | 280 | EXPORT_SYMBOL(__ioremap_pfn); |
150 | 281 | ||
@@ -173,6 +304,34 @@ EXPORT_SYMBOL(__ioremap); | |||
173 | 304 | ||
174 | void __iounmap(void __iomem *addr) | 305 | void __iounmap(void __iomem *addr) |
175 | { | 306 | { |
176 | vunmap((void *)(PAGE_MASK & (unsigned long)addr)); | 307 | struct vm_struct **p, *tmp; |
308 | unsigned int section_mapping = 0; | ||
309 | |||
310 | addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr); | ||
311 | |||
312 | /* | ||
313 | * If this is a section based mapping we need to handle it | ||
314 | * specially as the VM subysystem does not know how to handle | ||
315 | * such a beast. We need the lock here b/c we need to clear | ||
316 | * all the mappings before the area can be reclaimed | ||
317 | * by someone else. | ||
318 | */ | ||
319 | write_lock(&vmlist_lock); | ||
320 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | ||
321 | if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | ||
322 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { | ||
323 | *p = tmp->next; | ||
324 | unmap_area_sections((unsigned long)tmp->addr, | ||
325 | tmp->size); | ||
326 | kfree(tmp); | ||
327 | section_mapping = 1; | ||
328 | } | ||
329 | break; | ||
330 | } | ||
331 | } | ||
332 | write_unlock(&vmlist_lock); | ||
333 | |||
334 | if (!section_mapping) | ||
335 | vunmap(addr); | ||
177 | } | 336 | } |
178 | EXPORT_SYMBOL(__iounmap); | 337 | EXPORT_SYMBOL(__iounmap); |
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index 94f973b704f1..176a4fb04989 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h | |||
@@ -68,6 +68,11 @@ | |||
68 | */ | 68 | */ |
69 | #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) | 69 | #define XIP_VIRT_ADDR(physaddr) (MODULE_START + ((physaddr) & 0x000fffff)) |
70 | 70 | ||
71 | /* | ||
72 | * Allow 2MB-aligned ioremap pages | ||
73 | */ | ||
74 | #define IOREMAP_MAX_ORDER 21 | ||
75 | |||
71 | #else /* CONFIG_MMU */ | 76 | #else /* CONFIG_MMU */ |
72 | 77 | ||
73 | /* | 78 | /* |
diff --git a/include/asm-arm/mmu.h b/include/asm-arm/mmu.h index 23dde52e0945..fe2a23b5627b 100644 --- a/include/asm-arm/mmu.h +++ b/include/asm-arm/mmu.h | |||
@@ -7,6 +7,7 @@ typedef struct { | |||
7 | #if __LINUX_ARM_ARCH__ >= 6 | 7 | #if __LINUX_ARM_ARCH__ >= 6 |
8 | unsigned int id; | 8 | unsigned int id; |
9 | #endif | 9 | #endif |
10 | unsigned int kvm_seq; | ||
10 | } mm_context_t; | 11 | } mm_context_t; |
11 | 12 | ||
12 | #if __LINUX_ARM_ARCH__ >= 6 | 13 | #if __LINUX_ARM_ARCH__ >= 6 |
diff --git a/include/asm-arm/mmu_context.h b/include/asm-arm/mmu_context.h index 9fadb01e030d..d1a65b1edcaa 100644 --- a/include/asm-arm/mmu_context.h +++ b/include/asm-arm/mmu_context.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/proc-fns.h> | 18 | #include <asm/proc-fns.h> |
19 | 19 | ||
20 | void __check_kvm_seq(struct mm_struct *mm); | ||
21 | |||
20 | #if __LINUX_ARM_ARCH__ >= 6 | 22 | #if __LINUX_ARM_ARCH__ >= 6 |
21 | 23 | ||
22 | /* | 24 | /* |
@@ -45,13 +47,21 @@ static inline void check_context(struct mm_struct *mm) | |||
45 | { | 47 | { |
46 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) | 48 | if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) |
47 | __new_context(mm); | 49 | __new_context(mm); |
50 | |||
51 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | ||
52 | __check_kvm_seq(mm); | ||
48 | } | 53 | } |
49 | 54 | ||
50 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) | 55 | #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0) |
51 | 56 | ||
52 | #else | 57 | #else |
53 | 58 | ||
54 | #define check_context(mm) do { } while (0) | 59 | static inline void check_context(struct mm_struct *mm) |
60 | { | ||
61 | if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq)) | ||
62 | __check_kvm_seq(mm); | ||
63 | } | ||
64 | |||
55 | #define init_new_context(tsk,mm) 0 | 65 | #define init_new_context(tsk,mm) 0 |
56 | 66 | ||
57 | #endif | 67 | #endif |