aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2006-11-10 15:27:48 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-11-13 10:40:42 -0500
commit2b4ac44e7c7e16cf9411b81693ff3e604f332bf1 (patch)
treeaf167ed7cf9e76f7b155d1af53a62c5d9c3b03ba
parent088406bcf66d6c7fd8a5c04c00aa410ae9077403 (diff)
[PATCH] vmalloc: optimization, cleanup, bugfixes
- reorder 'struct vm_struct' to speedup lookups on CPUS with small cache lines. The fields 'next,addr,size' should be now in the same cache line, to speedup lookups. - One minor cleanup in __get_vm_area_node() - Bugfixes in vmalloc_user() and vmalloc_32_user() NULL returns from __vmalloc() and __find_vm_area() were not tested. [akpm@osdl.org: remove redundant BUG_ONs] Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--include/linux/vmalloc.h3
-rw-r--r--mm/vmalloc.c26
2 files changed, 15 insertions, 14 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index dc9a29d84abc..924e502905d4 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -23,13 +23,14 @@ struct vm_area_struct;
23#endif 23#endif
24 24
25struct vm_struct { 25struct vm_struct {
26 /* keep next,addr,size together to speedup lookups */
27 struct vm_struct *next;
26 void *addr; 28 void *addr;
27 unsigned long size; 29 unsigned long size;
28 unsigned long flags; 30 unsigned long flags;
29 struct page **pages; 31 struct page **pages;
30 unsigned int nr_pages; 32 unsigned int nr_pages;
31 unsigned long phys_addr; 33 unsigned long phys_addr;
32 struct vm_struct *next;
33}; 34};
34 35
35/* 36/*
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 46606c133e82..7dc6aa745166 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -186,10 +186,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long fl
186 if (unlikely(!area)) 186 if (unlikely(!area))
187 return NULL; 187 return NULL;
188 188
189 if (unlikely(!size)) { 189 if (unlikely(!size))
190 kfree (area);
191 return NULL; 190 return NULL;
192 }
193 191
194 /* 192 /*
195 * We always allocate a guard page. 193 * We always allocate a guard page.
@@ -532,11 +530,12 @@ void *vmalloc_user(unsigned long size)
532 void *ret; 530 void *ret;
533 531
534 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 532 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
535 write_lock(&vmlist_lock); 533 if (ret) {
536 area = __find_vm_area(ret); 534 write_lock(&vmlist_lock);
537 area->flags |= VM_USERMAP; 535 area = __find_vm_area(ret);
538 write_unlock(&vmlist_lock); 536 area->flags |= VM_USERMAP;
539 537 write_unlock(&vmlist_lock);
538 }
540 return ret; 539 return ret;
541} 540}
542EXPORT_SYMBOL(vmalloc_user); 541EXPORT_SYMBOL(vmalloc_user);
@@ -605,11 +604,12 @@ void *vmalloc_32_user(unsigned long size)
605 void *ret; 604 void *ret;
606 605
607 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 606 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
608 write_lock(&vmlist_lock); 607 if (ret) {
609 area = __find_vm_area(ret); 608 write_lock(&vmlist_lock);
610 area->flags |= VM_USERMAP; 609 area = __find_vm_area(ret);
611 write_unlock(&vmlist_lock); 610 area->flags |= VM_USERMAP;
612 611 write_unlock(&vmlist_lock);
612 }
613 return ret; 613 return ret;
614} 614}
615EXPORT_SYMBOL(vmalloc_32_user); 615EXPORT_SYMBOL(vmalloc_32_user);