diff options
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 119 |
1 files changed, 75 insertions, 44 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bdb248c4f55c..80632e8d7538 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -36,12 +36,6 @@ | |||
36 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | /* | ||
40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | ||
41 | * I/O regions in vm_struct->flags field. | ||
42 | */ | ||
43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
44 | |||
45 | int ioremap_page(unsigned long virt, unsigned long phys, | 39 | int ioremap_page(unsigned long virt, unsigned long phys, |
46 | const struct mem_type *mtype) | 40 | const struct mem_type *mtype) |
47 | { | 41 | { |
@@ -64,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
64 | } while (seq != init_mm.context.kvm_seq); | 58 | } while (seq != init_mm.context.kvm_seq); |
65 | } | 59 | } |
66 | 60 | ||
67 | #ifndef CONFIG_SMP | 61 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
68 | /* | 62 | /* |
69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | 63 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, |
70 | * the other CPUs will not see this change until their next context switch. | 64 | * the other CPUs will not see this change until their next context switch. |
@@ -79,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
79 | { | 73 | { |
80 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); | 74 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
81 | pgd_t *pgd; | 75 | pgd_t *pgd; |
76 | pud_t *pud; | ||
77 | pmd_t *pmdp; | ||
82 | 78 | ||
83 | flush_cache_vunmap(addr, end); | 79 | flush_cache_vunmap(addr, end); |
84 | pgd = pgd_offset_k(addr); | 80 | pgd = pgd_offset_k(addr); |
81 | pud = pud_offset(pgd, addr); | ||
82 | pmdp = pmd_offset(pud, addr); | ||
85 | do { | 83 | do { |
86 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); | 84 | pmd_t pmd = *pmdp; |
87 | 85 | ||
88 | pmd = *pmdp; | ||
89 | if (!pmd_none(pmd)) { | 86 | if (!pmd_none(pmd)) { |
90 | /* | 87 | /* |
91 | * Clear the PMD from the page table, and | 88 | * Clear the PMD from the page table, and |
@@ -104,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); | 101 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
105 | } | 102 | } |
106 | 103 | ||
107 | addr += PGDIR_SIZE; | 104 | addr += PMD_SIZE; |
108 | pgd++; | 105 | pmdp += 2; |
109 | } while (addr < end); | 106 | } while (addr < end); |
110 | 107 | ||
111 | /* | 108 | /* |
@@ -124,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
124 | { | 121 | { |
125 | unsigned long addr = virt, end = virt + size; | 122 | unsigned long addr = virt, end = virt + size; |
126 | pgd_t *pgd; | 123 | pgd_t *pgd; |
124 | pud_t *pud; | ||
125 | pmd_t *pmd; | ||
127 | 126 | ||
128 | /* | 127 | /* |
129 | * Remove and free any PTE-based mapping, and | 128 | * Remove and free any PTE-based mapping, and |
@@ -132,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
132 | unmap_area_sections(virt, size); | 131 | unmap_area_sections(virt, size); |
133 | 132 | ||
134 | pgd = pgd_offset_k(addr); | 133 | pgd = pgd_offset_k(addr); |
134 | pud = pud_offset(pgd, addr); | ||
135 | pmd = pmd_offset(pud, addr); | ||
135 | do { | 136 | do { |
136 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
137 | |||
138 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 137 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
139 | pfn += SZ_1M >> PAGE_SHIFT; | 138 | pfn += SZ_1M >> PAGE_SHIFT; |
140 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 139 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
141 | pfn += SZ_1M >> PAGE_SHIFT; | 140 | pfn += SZ_1M >> PAGE_SHIFT; |
142 | flush_pmd_entry(pmd); | 141 | flush_pmd_entry(pmd); |
143 | 142 | ||
144 | addr += PGDIR_SIZE; | 143 | addr += PMD_SIZE; |
145 | pgd++; | 144 | pmd += 2; |
146 | } while (addr < end); | 145 | } while (addr < end); |
147 | 146 | ||
148 | return 0; | 147 | return 0; |
@@ -154,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
154 | { | 153 | { |
155 | unsigned long addr = virt, end = virt + size; | 154 | unsigned long addr = virt, end = virt + size; |
156 | pgd_t *pgd; | 155 | pgd_t *pgd; |
156 | pud_t *pud; | ||
157 | pmd_t *pmd; | ||
157 | 158 | ||
158 | /* | 159 | /* |
159 | * Remove and free any PTE-based mapping, and | 160 | * Remove and free any PTE-based mapping, and |
@@ -162,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
162 | unmap_area_sections(virt, size); | 163 | unmap_area_sections(virt, size); |
163 | 164 | ||
164 | pgd = pgd_offset_k(virt); | 165 | pgd = pgd_offset_k(virt); |
166 | pud = pud_offset(pgd, addr); | ||
167 | pmd = pmd_offset(pud, addr); | ||
165 | do { | 168 | do { |
166 | unsigned long super_pmd_val, i; | 169 | unsigned long super_pmd_val, i; |
167 | 170 | ||
@@ -170,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
170 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | 173 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
171 | 174 | ||
172 | for (i = 0; i < 8; i++) { | 175 | for (i = 0; i < 8; i++) { |
173 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
174 | |||
175 | pmd[0] = __pmd(super_pmd_val); | 176 | pmd[0] = __pmd(super_pmd_val); |
176 | pmd[1] = __pmd(super_pmd_val); | 177 | pmd[1] = __pmd(super_pmd_val); |
177 | flush_pmd_entry(pmd); | 178 | flush_pmd_entry(pmd); |
178 | 179 | ||
179 | addr += PGDIR_SIZE; | 180 | addr += PMD_SIZE; |
180 | pgd++; | 181 | pmd += 2; |
181 | } | 182 | } |
182 | 183 | ||
183 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | 184 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; |
@@ -195,17 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
195 | unsigned long addr; | 196 | unsigned long addr; |
196 | struct vm_struct * area; | 197 | struct vm_struct * area; |
197 | 198 | ||
199 | #ifndef CONFIG_ARM_LPAE | ||
198 | /* | 200 | /* |
199 | * High mappings must be supersection aligned | 201 | * High mappings must be supersection aligned |
200 | */ | 202 | */ |
201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 203 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
202 | return NULL; | 204 | return NULL; |
203 | 205 | #endif | |
204 | /* | ||
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
206 | */ | ||
207 | if (WARN_ON(pfn_valid(pfn))) | ||
208 | return NULL; | ||
209 | 206 | ||
210 | type = get_mem_type(mtype); | 207 | type = get_mem_type(mtype); |
211 | if (!type) | 208 | if (!type) |
@@ -216,12 +213,40 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
216 | */ | 213 | */ |
217 | size = PAGE_ALIGN(offset + size); | 214 | size = PAGE_ALIGN(offset + size); |
218 | 215 | ||
216 | /* | ||
217 | * Try to reuse one of the static mapping whenever possible. | ||
218 | */ | ||
219 | read_lock(&vmlist_lock); | ||
220 | for (area = vmlist; area; area = area->next) { | ||
221 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | ||
222 | break; | ||
223 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | ||
224 | continue; | ||
225 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
226 | continue; | ||
227 | if (__phys_to_pfn(area->phys_addr) > pfn || | ||
228 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
229 | continue; | ||
230 | /* we can drop the lock here as we know *area is static */ | ||
231 | read_unlock(&vmlist_lock); | ||
232 | addr = (unsigned long)area->addr; | ||
233 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
234 | return (void __iomem *) (offset + addr); | ||
235 | } | ||
236 | read_unlock(&vmlist_lock); | ||
237 | |||
238 | /* | ||
239 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
240 | */ | ||
241 | if (WARN_ON(pfn_valid(pfn))) | ||
242 | return NULL; | ||
243 | |||
219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 244 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
220 | if (!area) | 245 | if (!area) |
221 | return NULL; | 246 | return NULL; |
222 | addr = (unsigned long)area->addr; | 247 | addr = (unsigned long)area->addr; |
223 | 248 | ||
224 | #ifndef CONFIG_SMP | 249 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
225 | if (DOMAIN_IO == 0 && | 250 | if (DOMAIN_IO == 0 && |
226 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 251 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
227 | cpu_is_xsc3()) && pfn >= 0x100000 && | 252 | cpu_is_xsc3()) && pfn >= 0x100000 && |
@@ -313,28 +338,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
313 | void __iounmap(volatile void __iomem *io_addr) | 338 | void __iounmap(volatile void __iomem *io_addr) |
314 | { | 339 | { |
315 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 340 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
316 | #ifndef CONFIG_SMP | 341 | struct vm_struct *vm; |
317 | struct vm_struct **p, *tmp; | ||
318 | 342 | ||
319 | /* | 343 | read_lock(&vmlist_lock); |
320 | * If this is a section based mapping we need to handle it | 344 | for (vm = vmlist; vm; vm = vm->next) { |
321 | * specially as the VM subsystem does not know how to handle | 345 | if (vm->addr > addr) |
322 | * such a beast. We need the lock here b/c we need to clear | 346 | break; |
323 | * all the mappings before the area can be reclaimed | 347 | if (!(vm->flags & VM_IOREMAP)) |
324 | * by someone else. | 348 | continue; |
325 | */ | 349 | /* If this is a static mapping we must leave it alone */ |
326 | write_lock(&vmlist_lock); | 350 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && |
327 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | 351 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { |
328 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | 352 | read_unlock(&vmlist_lock); |
329 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { | 353 | return; |
330 | unmap_area_sections((unsigned long)tmp->addr, | 354 | } |
331 | tmp->size); | 355 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) |
332 | } | 356 | /* |
357 | * If this is a section based mapping we need to handle it | ||
358 | * specially as the VM subsystem does not know how to handle | ||
359 | * such a beast. | ||
360 | */ | ||
361 | if ((vm->addr == addr) && | ||
362 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
363 | unmap_area_sections((unsigned long)vm->addr, vm->size); | ||
333 | break; | 364 | break; |
334 | } | 365 | } |
335 | } | ||
336 | write_unlock(&vmlist_lock); | ||
337 | #endif | 366 | #endif |
367 | } | ||
368 | read_unlock(&vmlist_lock); | ||
338 | 369 | ||
339 | vunmap(addr); | 370 | vunmap(addr); |
340 | } | 371 | } |