aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-04-29 18:07:32 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-29 18:54:34 -0400
commite81ce85f960c2e26efb5d0802d56c34533edb1bd (patch)
tree57e1d4ac1704a6311383403f8d11d51bbbd78961 /mm/vmalloc.c
parentc69480adeea15883d9459a8adc3da3f6e8cb7a8c (diff)
mm, vmalloc: iterate vmap_area_list, instead of vmlist in vread/vwrite()
Now, when we hold a vmap_area_lock, va->vm can't be discarded. So we can safely access to va->vm when iterating a vmap_area_list with holding a vmap_area_lock. With this property, change iterating vmlist codes in vread/vwrite() to iterating vmap_area_list. There is a little difference relate to lock, because vmlist_lock is mutex, but, vmap_area_lock is spin_lock. It may introduce a spinning overhead during vread/vwrite() is executing. But, these are debug-oriented functions, so this overhead is not real problem for common case. Signed-off-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Dave Anderson <anderson@redhat.com> Cc: Eric Biederman <ebiederm@xmission.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Cc: Ingo Molnar <mingo@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c48
1 files changed, 32 insertions, 16 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1bf94ad452b6..59aa328007b2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2012,7 +2012,8 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2012 2012
2013long vread(char *buf, char *addr, unsigned long count) 2013long vread(char *buf, char *addr, unsigned long count)
2014{ 2014{
2015 struct vm_struct *tmp; 2015 struct vmap_area *va;
2016 struct vm_struct *vm;
2016 char *vaddr, *buf_start = buf; 2017 char *vaddr, *buf_start = buf;
2017 unsigned long buflen = count; 2018 unsigned long buflen = count;
2018 unsigned long n; 2019 unsigned long n;
@@ -2021,10 +2022,17 @@ long vread(char *buf, char *addr, unsigned long count)
2021 if ((unsigned long) addr + count < count) 2022 if ((unsigned long) addr + count < count)
2022 count = -(unsigned long) addr; 2023 count = -(unsigned long) addr;
2023 2024
2024 read_lock(&vmlist_lock); 2025 spin_lock(&vmap_area_lock);
2025 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 2026 list_for_each_entry(va, &vmap_area_list, list) {
2026 vaddr = (char *) tmp->addr; 2027 if (!count)
2027 if (addr >= vaddr + tmp->size - PAGE_SIZE) 2028 break;
2029
2030 if (!(va->flags & VM_VM_AREA))
2031 continue;
2032
2033 vm = va->vm;
2034 vaddr = (char *) vm->addr;
2035 if (addr >= vaddr + vm->size - PAGE_SIZE)
2028 continue; 2036 continue;
2029 while (addr < vaddr) { 2037 while (addr < vaddr) {
2030 if (count == 0) 2038 if (count == 0)
@@ -2034,10 +2042,10 @@ long vread(char *buf, char *addr, unsigned long count)
2034 addr++; 2042 addr++;
2035 count--; 2043 count--;
2036 } 2044 }
2037 n = vaddr + tmp->size - PAGE_SIZE - addr; 2045 n = vaddr + vm->size - PAGE_SIZE - addr;
2038 if (n > count) 2046 if (n > count)
2039 n = count; 2047 n = count;
2040 if (!(tmp->flags & VM_IOREMAP)) 2048 if (!(vm->flags & VM_IOREMAP))
2041 aligned_vread(buf, addr, n); 2049 aligned_vread(buf, addr, n);
2042 else /* IOREMAP area is treated as memory hole */ 2050 else /* IOREMAP area is treated as memory hole */
2043 memset(buf, 0, n); 2051 memset(buf, 0, n);
@@ -2046,7 +2054,7 @@ long vread(char *buf, char *addr, unsigned long count)
2046 count -= n; 2054 count -= n;
2047 } 2055 }
2048finished: 2056finished:
2049 read_unlock(&vmlist_lock); 2057 spin_unlock(&vmap_area_lock);
2050 2058
2051 if (buf == buf_start) 2059 if (buf == buf_start)
2052 return 0; 2060 return 0;
@@ -2085,7 +2093,8 @@ finished:
2085 2093
2086long vwrite(char *buf, char *addr, unsigned long count) 2094long vwrite(char *buf, char *addr, unsigned long count)
2087{ 2095{
2088 struct vm_struct *tmp; 2096 struct vmap_area *va;
2097 struct vm_struct *vm;
2089 char *vaddr; 2098 char *vaddr;
2090 unsigned long n, buflen; 2099 unsigned long n, buflen;
2091 int copied = 0; 2100 int copied = 0;
@@ -2095,10 +2104,17 @@ long vwrite(char *buf, char *addr, unsigned long count)
2095 count = -(unsigned long) addr; 2104 count = -(unsigned long) addr;
2096 buflen = count; 2105 buflen = count;
2097 2106
2098 read_lock(&vmlist_lock); 2107 spin_lock(&vmap_area_lock);
2099 for (tmp = vmlist; count && tmp; tmp = tmp->next) { 2108 list_for_each_entry(va, &vmap_area_list, list) {
2100 vaddr = (char *) tmp->addr; 2109 if (!count)
2101 if (addr >= vaddr + tmp->size - PAGE_SIZE) 2110 break;
2111
2112 if (!(va->flags & VM_VM_AREA))
2113 continue;
2114
2115 vm = va->vm;
2116 vaddr = (char *) vm->addr;
2117 if (addr >= vaddr + vm->size - PAGE_SIZE)
2102 continue; 2118 continue;
2103 while (addr < vaddr) { 2119 while (addr < vaddr) {
2104 if (count == 0) 2120 if (count == 0)
@@ -2107,10 +2123,10 @@ long vwrite(char *buf, char *addr, unsigned long count)
2107 addr++; 2123 addr++;
2108 count--; 2124 count--;
2109 } 2125 }
2110 n = vaddr + tmp->size - PAGE_SIZE - addr; 2126 n = vaddr + vm->size - PAGE_SIZE - addr;
2111 if (n > count) 2127 if (n > count)
2112 n = count; 2128 n = count;
2113 if (!(tmp->flags & VM_IOREMAP)) { 2129 if (!(vm->flags & VM_IOREMAP)) {
2114 aligned_vwrite(buf, addr, n); 2130 aligned_vwrite(buf, addr, n);
2115 copied++; 2131 copied++;
2116 } 2132 }
@@ -2119,7 +2135,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
2119 count -= n; 2135 count -= n;
2120 } 2136 }
2121finished: 2137finished:
2122 read_unlock(&vmlist_lock); 2138 spin_unlock(&vmap_area_lock);
2123 if (!copied) 2139 if (!copied)
2124 return 0; 2140 return 0;
2125 return buflen; 2141 return buflen;