diff options
author | Joonsoo Kim <js1304@gmail.com> | 2013-04-29 18:07:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 18:54:33 -0400 |
commit | c69480adeea15883d9459a8adc3da3f6e8cb7a8c (patch) | |
tree | 8c26561f7aad8c854ffa031886ebac1fe86cac5d | |
parent | db3808c1bac64740b9d830fda92801ae65f1c851 (diff) |
mm, vmalloc: protect va->vm by vmap_area_lock
Inserting and removing an entry to vmlist is linear time complexity, so
it is inefficient. Following patches will try to remove vmlist
entirely. This patch is preparing step for it.
For removing vmlist, iterating vmlist codes should be changed to
iterating a vmap_area_list. Before implementing that, we should make
sure that when we iterate a vmap_area_list, accessing to va->vm doesn't
cause a race condition. This patch ensure that when iterating a
vmap_area_list, there is no race condition for accessing to vm_struct.
Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Dave Anderson <anderson@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/vmalloc.c | 7 |
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 1d9878b7cf52..1bf94ad452b6 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1290,12 +1290,14 @@ struct vm_struct *vmlist; | |||
1290 | static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, | 1290 | static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, |
1291 | unsigned long flags, const void *caller) | 1291 | unsigned long flags, const void *caller) |
1292 | { | 1292 | { |
1293 | spin_lock(&vmap_area_lock); | ||
1293 | vm->flags = flags; | 1294 | vm->flags = flags; |
1294 | vm->addr = (void *)va->va_start; | 1295 | vm->addr = (void *)va->va_start; |
1295 | vm->size = va->va_end - va->va_start; | 1296 | vm->size = va->va_end - va->va_start; |
1296 | vm->caller = caller; | 1297 | vm->caller = caller; |
1297 | va->vm = vm; | 1298 | va->vm = vm; |
1298 | va->flags |= VM_VM_AREA; | 1299 | va->flags |= VM_VM_AREA; |
1300 | spin_unlock(&vmap_area_lock); | ||
1299 | } | 1301 | } |
1300 | 1302 | ||
1301 | static void insert_vmalloc_vmlist(struct vm_struct *vm) | 1303 | static void insert_vmalloc_vmlist(struct vm_struct *vm) |
@@ -1447,6 +1449,11 @@ struct vm_struct *remove_vm_area(const void *addr) | |||
1447 | if (va && va->flags & VM_VM_AREA) { | 1449 | if (va && va->flags & VM_VM_AREA) { |
1448 | struct vm_struct *vm = va->vm; | 1450 | struct vm_struct *vm = va->vm; |
1449 | 1451 | ||
1452 | spin_lock(&vmap_area_lock); | ||
1453 | va->vm = NULL; | ||
1454 | va->flags &= ~VM_VM_AREA; | ||
1455 | spin_unlock(&vmap_area_lock); | ||
1456 | |||
1450 | if (!(vm->flags & VM_UNLIST)) { | 1457 | if (!(vm->flags & VM_UNLIST)) { |
1451 | struct vm_struct *tmp, **p; | 1458 | struct vm_struct *tmp, **p; |
1452 | /* | 1459 | /* |