aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-04-29 18:07:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:33 -0400
commitef93247325028a35e089f3012c270379a89d052c (patch)
treed468969e31fe3a07a319c038771c220381a5cf4a /arch
parent7136851117744f1d291bed6d307432699d405109 (diff)
mm, vmalloc: change iterating a vmlist to find_vm_area()
This patchset removes vm_struct list management after initializing vmalloc. Adding and removing an entry to vmlist is linear time complexity, so it is inefficient. If we maintain this list, overall time complexity of adding and removing area to vmalloc space is O(N), although we use rbtree for finding vacant place and it's time complexity is just O(logN). And vmlist and vmlist_lock is used many places of outside of vmalloc.c. It is preferable that we hide this raw data structure and provide well-defined function for supporting them, because it makes that they cannot mistake when manipulating theses structure and it makes us easily maintain vmalloc layer. For kexec and makedumpfile, I export vmap_area_list, instead of vmlist. This comes from Atsushi's recommendation. For more information, please refer below link. https://lkml.org/lkml/2012/12/6/184 This patch: The purpose of iterating a vmlist is finding vm area with specific virtual address. find_vm_area() is provided for this purpose and more efficient, because it uses a rbtree. So change it. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Guan Xuetao <gxt@mprc.pku.edu.cn> Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: Chris Metcalf <cmetcalf@tilera.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Dave Anderson <anderson@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/tile/mm/pgtable.c7
-rw-r--r--arch/unicore32/mm/ioremap.c17
-rw-r--r--arch/x86/mm/ioremap.c7
3 files changed, 7 insertions, 24 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index b3b4972c2451..dfd63ce87327 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -592,12 +592,7 @@ void iounmap(volatile void __iomem *addr_in)
592 in parallel. Reuse of the virtual address is prevented by 592 in parallel. Reuse of the virtual address is prevented by
593 leaving it in the global lists until we're done with it. 593 leaving it in the global lists until we're done with it.
594 cpa takes care of the direct mappings. */ 594 cpa takes care of the direct mappings. */
595 read_lock(&vmlist_lock); 595 p = find_vm_area((void *)addr);
596 for (p = vmlist; p; p = p->next) {
597 if (p->addr == addr)
598 break;
599 }
600 read_unlock(&vmlist_lock);
601 596
602 if (!p) { 597 if (!p) {
603 pr_err("iounmap: bad address %p\n", addr); 598 pr_err("iounmap: bad address %p\n", addr);
diff --git a/arch/unicore32/mm/ioremap.c b/arch/unicore32/mm/ioremap.c
index b7a605597b08..13068ee22f33 100644
--- a/arch/unicore32/mm/ioremap.c
+++ b/arch/unicore32/mm/ioremap.c
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(__uc32_ioremap_cached);
235void __uc32_iounmap(volatile void __iomem *io_addr) 235void __uc32_iounmap(volatile void __iomem *io_addr)
236{ 236{
237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); 237 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
238 struct vm_struct **p, *tmp; 238 struct vm_struct *vm;
239 239
240 /* 240 /*
241 * If this is a section based mapping we need to handle it 241 * If this is a section based mapping we need to handle it
@@ -244,17 +244,10 @@ void __uc32_iounmap(volatile void __iomem *io_addr)
244 * all the mappings before the area can be reclaimed 244 * all the mappings before the area can be reclaimed
245 * by someone else. 245 * by someone else.
246 */ 246 */
247 write_lock(&vmlist_lock); 247 vm = find_vm_area(addr);
248 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 248 if (vm && (vm->flags & VM_IOREMAP) &&
249 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { 249 (vm->flags & VM_UNICORE_SECTION_MAPPING))
250 if (tmp->flags & VM_UNICORE_SECTION_MAPPING) { 250 unmap_area_sections((unsigned long)vm->addr, vm->size);
251 unmap_area_sections((unsigned long)tmp->addr,
252 tmp->size);
253 }
254 break;
255 }
256 }
257 write_unlock(&vmlist_lock);
258 251
259 vunmap(addr); 252 vunmap(addr);
260} 253}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 78fe3f1ac49f..9a1e6583910c 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -282,12 +282,7 @@ void iounmap(volatile void __iomem *addr)
282 in parallel. Reuse of the virtual address is prevented by 282 in parallel. Reuse of the virtual address is prevented by
283 leaving it in the global lists until we're done with it. 283 leaving it in the global lists until we're done with it.
284 cpa takes care of the direct mappings. */ 284 cpa takes care of the direct mappings. */
285 read_lock(&vmlist_lock); 285 p = find_vm_area((void __force *)addr);
286 for (p = vmlist; p; p = p->next) {
287 if (p->addr == (void __force *)addr)
288 break;
289 }
290 read_unlock(&vmlist_lock);
291 286
292 if (!p) { 287 if (!p) {
293 printk(KERN_ERR "iounmap: bad address %p\n", addr); 288 printk(KERN_ERR "iounmap: bad address %p\n", addr);