diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2011-09-16 01:14:23 -0400 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2011-11-26 19:21:28 -0500 |
commit | 576d2f2525612ecb5af029a76f21f22a3b82563d (patch) | |
tree | 81f9564c432ceeb4068dd3a5de204134a32c98f3 /arch/arm/mm/ioremap.c | |
parent | 6ee723a6570a897208b76ab3e9a495e9106b2f8c (diff) |
ARM: add generic ioremap optimization by reusing static mappings
Now that we have all the static mappings from iotable_init() located
in the vmalloc area, it is trivial to optimize ioremap by reusing those
static mappings when the requested physical area fits in one of them,
and so in a generic way for all platforms.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Kevin Hilman <khilman@ti.com>
Tested-by: Jamie Iles <jamie@jamieiles.com>
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 72 |
1 files changed, 48 insertions, 24 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bc7d9bd766d1..12c7ad215ce7 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -36,12 +36,6 @@ | |||
36 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | /* | ||
40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | ||
41 | * I/O regions in vm_struct->flags field. | ||
42 | */ | ||
43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
44 | |||
45 | int ioremap_page(unsigned long virt, unsigned long phys, | 39 | int ioremap_page(unsigned long virt, unsigned long phys, |
46 | const struct mem_type *mtype) | 40 | const struct mem_type *mtype) |
47 | { | 41 | { |
@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 195 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
202 | return NULL; | 196 | return NULL; |
203 | 197 | ||
204 | /* | ||
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
206 | */ | ||
207 | if (WARN_ON(pfn_valid(pfn))) | ||
208 | return NULL; | ||
209 | |||
210 | type = get_mem_type(mtype); | 198 | type = get_mem_type(mtype); |
211 | if (!type) | 199 | if (!type) |
212 | return NULL; | 200 | return NULL; |
@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
216 | */ | 204 | */ |
217 | size = PAGE_ALIGN(offset + size); | 205 | size = PAGE_ALIGN(offset + size); |
218 | 206 | ||
207 | /* | ||
208 | * Try to reuse one of the static mapping whenever possible. | ||
209 | */ | ||
210 | read_lock(&vmlist_lock); | ||
211 | for (area = vmlist; area; area = area->next) { | ||
212 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | ||
213 | break; | ||
214 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | ||
215 | continue; | ||
216 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
217 | continue; | ||
218 | if (__phys_to_pfn(area->phys_addr) > pfn || | ||
219 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
220 | continue; | ||
221 | /* we can drop the lock here as we know *area is static */ | ||
222 | read_unlock(&vmlist_lock); | ||
223 | addr = (unsigned long)area->addr; | ||
224 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
225 | return (void __iomem *) (offset + addr); | ||
226 | } | ||
227 | read_unlock(&vmlist_lock); | ||
228 | |||
229 | /* | ||
230 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
231 | */ | ||
232 | if (WARN_ON(pfn_valid(pfn))) | ||
233 | return NULL; | ||
234 | |||
219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 235 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
220 | if (!area) | 236 | if (!area) |
221 | return NULL; | 237 | return NULL; |
@@ -313,26 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
313 | void __iounmap(volatile void __iomem *io_addr) | 329 | void __iounmap(volatile void __iomem *io_addr) |
314 | { | 330 | { |
315 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 331 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
316 | #ifndef CONFIG_SMP | ||
317 | struct vm_struct *vm; | 332 | struct vm_struct *vm; |
318 | 333 | ||
319 | /* | ||
320 | * If this is a section based mapping we need to handle it | ||
321 | * specially as the VM subsystem does not know how to handle | ||
322 | * such a beast. | ||
323 | */ | ||
324 | read_lock(&vmlist_lock); | 334 | read_lock(&vmlist_lock); |
325 | for (vm = vmlist; vm; vm = vm->next) { | 335 | for (vm = vmlist; vm; vm = vm->next) { |
326 | if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { | 336 | if (vm->addr > addr) |
327 | if (vm->flags & VM_ARM_SECTION_MAPPING) { | ||
328 | unmap_area_sections((unsigned long)vm->addr, | ||
329 | vm->size); | ||
330 | } | ||
331 | break; | 337 | break; |
338 | if (!(vm->flags & VM_IOREMAP)) | ||
339 | continue; | ||
340 | /* If this is a static mapping we must leave it alone */ | ||
341 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | ||
342 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | ||
343 | read_unlock(&vmlist_lock); | ||
344 | return; | ||
332 | } | 345 | } |
346 | #ifndef CONFIG_SMP | ||
347 | /* | ||
348 | * If this is a section based mapping we need to handle it | ||
349 | * specially as the VM subsystem does not know how to handle | ||
350 | * such a beast. | ||
351 | */ | ||
352 | if ((vm->addr == addr) && | ||
353 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
354 | unmap_area_sections((unsigned long)vm->addr, vm->size); | ||
355 | break; | ||
356 | } | ||
357 | #endif | ||
333 | } | 358 | } |
334 | read_unlock(&vmlist_lock); | 359 | read_unlock(&vmlist_lock); |
335 | #endif | ||
336 | 360 | ||
337 | vunmap(addr); | 361 | vunmap(addr); |
338 | } | 362 | } |