diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2011-09-16 01:14:23 -0400 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2011-11-26 19:21:28 -0500 |
commit | 576d2f2525612ecb5af029a76f21f22a3b82563d (patch) | |
tree | 81f9564c432ceeb4068dd3a5de204134a32c98f3 /arch | |
parent | 6ee723a6570a897208b76ab3e9a495e9106b2f8c (diff) |
ARM: add generic ioremap optimization by reusing static mappings
Now that we have all the static mappings from iotable_init() located
in the vmalloc area, it is trivial to optimize ioremap by reusing those
static mappings when the requested physical area fits in one of them,
and so in a generic way for all platforms.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
Tested-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Kevin Hilman <khilman@ti.com>
Tested-by: Jamie Iles <jamie@jamieiles.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mm/ioremap.c | 72 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 14 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 3 |
3 files changed, 64 insertions, 25 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bc7d9bd766d1..12c7ad215ce7 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -36,12 +36,6 @@ | |||
36 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | /* | ||
40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | ||
41 | * I/O regions in vm_struct->flags field. | ||
42 | */ | ||
43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
44 | |||
45 | int ioremap_page(unsigned long virt, unsigned long phys, | 39 | int ioremap_page(unsigned long virt, unsigned long phys, |
46 | const struct mem_type *mtype) | 40 | const struct mem_type *mtype) |
47 | { | 41 | { |
@@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 195 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
202 | return NULL; | 196 | return NULL; |
203 | 197 | ||
204 | /* | ||
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
206 | */ | ||
207 | if (WARN_ON(pfn_valid(pfn))) | ||
208 | return NULL; | ||
209 | |||
210 | type = get_mem_type(mtype); | 198 | type = get_mem_type(mtype); |
211 | if (!type) | 199 | if (!type) |
212 | return NULL; | 200 | return NULL; |
@@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
216 | */ | 204 | */ |
217 | size = PAGE_ALIGN(offset + size); | 205 | size = PAGE_ALIGN(offset + size); |
218 | 206 | ||
207 | /* | ||
208 | * Try to reuse one of the static mapping whenever possible. | ||
209 | */ | ||
210 | read_lock(&vmlist_lock); | ||
211 | for (area = vmlist; area; area = area->next) { | ||
212 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | ||
213 | break; | ||
214 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | ||
215 | continue; | ||
216 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
217 | continue; | ||
218 | if (__phys_to_pfn(area->phys_addr) > pfn || | ||
219 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
220 | continue; | ||
221 | /* we can drop the lock here as we know *area is static */ | ||
222 | read_unlock(&vmlist_lock); | ||
223 | addr = (unsigned long)area->addr; | ||
224 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
225 | return (void __iomem *) (offset + addr); | ||
226 | } | ||
227 | read_unlock(&vmlist_lock); | ||
228 | |||
229 | /* | ||
230 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
231 | */ | ||
232 | if (WARN_ON(pfn_valid(pfn))) | ||
233 | return NULL; | ||
234 | |||
219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 235 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
220 | if (!area) | 236 | if (!area) |
221 | return NULL; | 237 | return NULL; |
@@ -313,26 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | |||
313 | void __iounmap(volatile void __iomem *io_addr) | 329 | void __iounmap(volatile void __iomem *io_addr) |
314 | { | 330 | { |
315 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 331 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
316 | #ifndef CONFIG_SMP | ||
317 | struct vm_struct *vm; | 332 | struct vm_struct *vm; |
318 | 333 | ||
319 | /* | ||
320 | * If this is a section based mapping we need to handle it | ||
321 | * specially as the VM subsystem does not know how to handle | ||
322 | * such a beast. | ||
323 | */ | ||
324 | read_lock(&vmlist_lock); | 334 | read_lock(&vmlist_lock); |
325 | for (vm = vmlist; vm; vm = vm->next) { | 335 | for (vm = vmlist; vm; vm = vm->next) { |
326 | if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { | 336 | if (vm->addr > addr) |
327 | if (vm->flags & VM_ARM_SECTION_MAPPING) { | ||
328 | unmap_area_sections((unsigned long)vm->addr, | ||
329 | vm->size); | ||
330 | } | ||
331 | break; | 337 | break; |
338 | if (!(vm->flags & VM_IOREMAP)) | ||
339 | continue; | ||
340 | /* If this is a static mapping we must leave it alone */ | ||
341 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | ||
342 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | ||
343 | read_unlock(&vmlist_lock); | ||
344 | return; | ||
332 | } | 345 | } |
346 | #ifndef CONFIG_SMP | ||
347 | /* | ||
348 | * If this is a section based mapping we need to handle it | ||
349 | * specially as the VM subsystem does not know how to handle | ||
350 | * such a beast. | ||
351 | */ | ||
352 | if ((vm->addr == addr) && | ||
353 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
354 | unmap_area_sections((unsigned long)vm->addr, vm->size); | ||
355 | break; | ||
356 | } | ||
357 | #endif | ||
333 | } | 358 | } |
334 | read_unlock(&vmlist_lock); | 359 | read_unlock(&vmlist_lock); |
335 | #endif | ||
336 | 360 | ||
337 | vunmap(addr); | 361 | vunmap(addr); |
338 | } | 362 | } |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index ad7cce3bc431..70f6d3ea4834 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type); | |||
21 | 21 | ||
22 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | 22 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); |
23 | 23 | ||
24 | /* | ||
25 | * ARM specific vm_struct->flags bits. | ||
26 | */ | ||
27 | |||
28 | /* (super)section-mapped I/O regions used by ioremap()/iounmap() */ | ||
29 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
30 | |||
31 | /* permanent static mappings from iotable_init() */ | ||
32 | #define VM_ARM_STATIC_MAPPING 0x40000000 | ||
33 | |||
34 | /* mapping type (attributes) for permanent static mappings */ | ||
35 | #define VM_ARM_MTYPE(mt) ((mt) << 20) | ||
36 | #define VM_ARM_MTYPE_MASK (0x1f << 20) | ||
37 | |||
24 | #endif | 38 | #endif |
25 | 39 | ||
26 | #ifdef CONFIG_ZONE_DMA | 40 | #ifdef CONFIG_ZONE_DMA |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c61481577ae1..27e366af67f9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -749,7 +749,8 @@ void __init iotable_init(struct map_desc *io_desc, int nr) | |||
749 | vm->addr = (void *)(md->virtual & PAGE_MASK); | 749 | vm->addr = (void *)(md->virtual & PAGE_MASK); |
750 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 750 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
751 | vm->phys_addr = __pfn_to_phys(md->pfn); | 751 | vm->phys_addr = __pfn_to_phys(md->pfn); |
752 | vm->flags = VM_IOREMAP; | 752 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; |
753 | vm->flags |= VM_ARM_MTYPE(md->type); | ||
753 | vm->caller = iotable_init; | 754 | vm->caller = iotable_init; |
754 | vm_area_add_early(vm++); | 755 | vm_area_add_early(vm++); |
755 | } | 756 | } |