aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-01-25 12:36:34 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-01-25 12:36:34 -0500
commit24f11ec001920f1cfaeeed8e8b55725d900bbb56 (patch)
treebb7a1007374c9ce9f60962c22f8bccb31bd6cb44
parentfb22d72782b023cda5e9876d3381f30932a64f91 (diff)
[ARM] fix section-based ioremap
Tomi Valkeinen reports: Running with latest linux-omap kernel on OMAP3 SDP board, I have problem with iounmap(). It looks like iounmap() does not properly free large areas. Below is a test which fails for me in 6-7 loops. for (i = 0; i < 200; ++i) { vaddr = ioremap(paddr, size); if (!vaddr) { printk("couldn't ioremap\n"); break; } iounmap(vaddr); } The changes to vmalloc.c weren't reflected in the ARM ioremap implementation. Turns out the fix is rather simple. Tested-by: Tomi Valkeinen <tomi.valkeinen@nokia.com> Tested-by: Matt Gerassimoff <mgeras@gmail.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/mm/ioremap.c11
1 files changed, 2 insertions, 9 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 18373f73f2fc..9f88dd3be601 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -138,7 +138,7 @@ void __check_kvm_seq(struct mm_struct *mm)
138 */ 138 */
139static void unmap_area_sections(unsigned long virt, unsigned long size) 139static void unmap_area_sections(unsigned long virt, unsigned long size)
140{ 140{
141 unsigned long addr = virt, end = virt + (size & ~SZ_1M); 141 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
142 pgd_t *pgd; 142 pgd_t *pgd;
143 143
144 flush_cache_vunmap(addr, end); 144 flush_cache_vunmap(addr, end);
@@ -337,10 +337,7 @@ void __iounmap(volatile void __iomem *io_addr)
337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 337 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
338#ifndef CONFIG_SMP 338#ifndef CONFIG_SMP
339 struct vm_struct **p, *tmp; 339 struct vm_struct **p, *tmp;
340#endif
341 unsigned int section_mapping = 0;
342 340
343#ifndef CONFIG_SMP
344 /* 341 /*
345 * If this is a section based mapping we need to handle it 342 * If this is a section based mapping we need to handle it
346 * specially as the VM subsystem does not know how to handle 343 * specially as the VM subsystem does not know how to handle
@@ -352,11 +349,8 @@ void __iounmap(volatile void __iomem *io_addr)
352 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 349 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
353 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 350 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
354 if (tmp->flags & VM_ARM_SECTION_MAPPING) { 351 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
355 *p = tmp->next;
356 unmap_area_sections((unsigned long)tmp->addr, 352 unmap_area_sections((unsigned long)tmp->addr,
357 tmp->size); 353 tmp->size);
358 kfree(tmp);
359 section_mapping = 1;
360 } 354 }
361 break; 355 break;
362 } 356 }
@@ -364,7 +358,6 @@ void __iounmap(volatile void __iomem *io_addr)
364 write_unlock(&vmlist_lock); 358 write_unlock(&vmlist_lock);
365#endif 359#endif
366 360
367 if (!section_mapping) 361 vunmap(addr);
368 vunmap(addr);
369} 362}
370EXPORT_SYMBOL(__iounmap); 363EXPORT_SYMBOL(__iounmap);