diff options
author | Nicolas Pitre <nicolas.pitre@linaro.org> | 2011-09-15 22:12:19 -0400 |
---|---|---|
committer | Nicolas Pitre <nico@fluxnic.net> | 2011-11-26 19:21:27 -0500 |
commit | 6ee723a6570a897208b76ab3e9a495e9106b2f8c (patch) | |
tree | 6822cd8b588c789d5e71229f290a6e7b6e32e448 | |
parent | 0536bdf33faff4d940ac094c77998cfac368cfff (diff) |
ARM: simplify __iounmap() when dealing with section based mapping
Firstly, there is no need to have a double pointer here as we're only
walking the vmlist and not modifying it.
Secondly, for the same reason, we don't need a write lock but only a
read lock here, since the lock only protects the coherency of the list
nothing else.
Lastly, the reason for holding a lock is not what the comment says, so
let's remove that misleading piece of information.
Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org>
-rw-r--r-- | arch/arm/mm/ioremap.c | 20 |
1 files changed, 9 insertions, 11 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bdb248c4f55c..bc7d9bd766d1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr) | |||
314 | { | 314 | { |
315 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 315 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
316 | #ifndef CONFIG_SMP | 316 | #ifndef CONFIG_SMP |
317 | struct vm_struct **p, *tmp; | 317 | struct vm_struct *vm; |
318 | 318 | ||
319 | /* | 319 | /* |
320 | * If this is a section based mapping we need to handle it | 320 | * If this is a section based mapping we need to handle it |
321 | * specially as the VM subsystem does not know how to handle | 321 | * specially as the VM subsystem does not know how to handle |
322 | * such a beast. We need the lock here b/c we need to clear | 322 | * such a beast. |
323 | * all the mappings before the area can be reclaimed | ||
324 | * by someone else. | ||
325 | */ | 323 | */ |
326 | write_lock(&vmlist_lock); | 324 | read_lock(&vmlist_lock); |
327 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { | 325 | for (vm = vmlist; vm; vm = vm->next) { |
328 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { | 326 | if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { |
329 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { | 327 | if (vm->flags & VM_ARM_SECTION_MAPPING) { |
330 | unmap_area_sections((unsigned long)tmp->addr, | 328 | unmap_area_sections((unsigned long)vm->addr, |
331 | tmp->size); | 329 | vm->size); |
332 | } | 330 | } |
333 | break; | 331 | break; |
334 | } | 332 | } |
335 | } | 333 | } |
336 | write_unlock(&vmlist_lock); | 334 | read_unlock(&vmlist_lock); |
337 | #endif | 335 | #endif |
338 | 336 | ||
339 | vunmap(addr); | 337 | vunmap(addr); |