summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorPengfei Li <lpf.vector@gmail.com>2019-09-23 18:36:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-09-24 18:54:10 -0400
commit688fcbfc06e4fdfbb7e1d5a942a1460fe6379d2d (patch)
tree59aed6b336daeb448f621b901a180f80841b58a7 /mm/vmalloc.c
parentdd3b8353bae79395b12a178de057b183ff0122eb (diff)
mm/vmalloc: modify struct vmap_area to reduce its size
Objective --------- The current implementation of struct vmap_area wasted space. After applying this commit, sizeof(struct vmap_area) has been reduced from 11 words to 8 words. Description ----------- 1) Pack "subtree_max_size", "vm" and "purge_list". This is no problem because A) "subtree_max_size" is only used when vmap_area is in "free" tree B) "vm" is only used when vmap_area is in "busy" tree C) "purge_list" is only used when vmap_area is in vmap_purge_list 2) Eliminate "flags". ;Since only one flag VM_VM_AREA is being used, and the same thing can be done by judging whether "vm" is NULL, then the "flags" can be eliminated. Link: http://lkml.kernel.org/r/20190716152656.12255-3-lpf.vector@gmail.com Signed-off-by: Pengfei Li <lpf.vector@gmail.com> Suggested-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Reviewed-by: Uladzislau Rezki (Sony) <urezki@gmail.com> Cc: Hillf Danton <hdanton@sina.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sonymobile.com> Cc: Roman Gushchin <guro@fb.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c24
1 files changed, 10 insertions, 14 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d535ef125bda..f095843fc243 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -329,7 +329,6 @@ EXPORT_SYMBOL(vmalloc_to_pfn);
329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0 329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0 330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
331 331
332#define VM_VM_AREA 0x04
333 332
334static DEFINE_SPINLOCK(vmap_area_lock); 333static DEFINE_SPINLOCK(vmap_area_lock);
335/* Export for kexec only */ 334/* Export for kexec only */
@@ -1115,7 +1114,7 @@ retry:
1115 1114
1116 va->va_start = addr; 1115 va->va_start = addr;
1117 va->va_end = addr + size; 1116 va->va_end = addr + size;
1118 va->flags = 0; 1117 va->vm = NULL;
1119 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); 1118 insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1120 1119
1121 spin_unlock(&vmap_area_lock); 1120 spin_unlock(&vmap_area_lock);
@@ -1928,7 +1927,6 @@ void __init vmalloc_init(void)
1928 if (WARN_ON_ONCE(!va)) 1927 if (WARN_ON_ONCE(!va))
1929 continue; 1928 continue;
1930 1929
1931 va->flags = VM_VM_AREA;
1932 va->va_start = (unsigned long)tmp->addr; 1930 va->va_start = (unsigned long)tmp->addr;
1933 va->va_end = va->va_start + tmp->size; 1931 va->va_end = va->va_start + tmp->size;
1934 va->vm = tmp; 1932 va->vm = tmp;
@@ -2026,7 +2024,6 @@ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2026 vm->size = va->va_end - va->va_start; 2024 vm->size = va->va_end - va->va_start;
2027 vm->caller = caller; 2025 vm->caller = caller;
2028 va->vm = vm; 2026 va->vm = vm;
2029 va->flags |= VM_VM_AREA;
2030 spin_unlock(&vmap_area_lock); 2027 spin_unlock(&vmap_area_lock);
2031} 2028}
2032 2029
@@ -2131,10 +2128,10 @@ struct vm_struct *find_vm_area(const void *addr)
2131 struct vmap_area *va; 2128 struct vmap_area *va;
2132 2129
2133 va = find_vmap_area((unsigned long)addr); 2130 va = find_vmap_area((unsigned long)addr);
2134 if (va && va->flags & VM_VM_AREA) 2131 if (!va)
2135 return va->vm; 2132 return NULL;
2136 2133
2137 return NULL; 2134 return va->vm;
2138} 2135}
2139 2136
2140/** 2137/**
@@ -2155,11 +2152,10 @@ struct vm_struct *remove_vm_area(const void *addr)
2155 2152
2156 spin_lock(&vmap_area_lock); 2153 spin_lock(&vmap_area_lock);
2157 va = __find_vmap_area((unsigned long)addr); 2154 va = __find_vmap_area((unsigned long)addr);
2158 if (va && va->flags & VM_VM_AREA) { 2155 if (va && va->vm) {
2159 struct vm_struct *vm = va->vm; 2156 struct vm_struct *vm = va->vm;
2160 2157
2161 va->vm = NULL; 2158 va->vm = NULL;
2162 va->flags &= ~VM_VM_AREA;
2163 spin_unlock(&vmap_area_lock); 2159 spin_unlock(&vmap_area_lock);
2164 2160
2165 kasan_free_shadow(vm); 2161 kasan_free_shadow(vm);
@@ -2862,7 +2858,7 @@ long vread(char *buf, char *addr, unsigned long count)
2862 if (!count) 2858 if (!count)
2863 break; 2859 break;
2864 2860
2865 if (!(va->flags & VM_VM_AREA)) 2861 if (!va->vm)
2866 continue; 2862 continue;
2867 2863
2868 vm = va->vm; 2864 vm = va->vm;
@@ -2942,7 +2938,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
2942 if (!count) 2938 if (!count)
2943 break; 2939 break;
2944 2940
2945 if (!(va->flags & VM_VM_AREA)) 2941 if (!va->vm)
2946 continue; 2942 continue;
2947 2943
2948 vm = va->vm; 2944 vm = va->vm;
@@ -3485,10 +3481,10 @@ static int s_show(struct seq_file *m, void *p)
3485 va = list_entry(p, struct vmap_area, list); 3481 va = list_entry(p, struct vmap_area, list);
3486 3482
3487 /* 3483 /*
3488 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on 3484 * s_show can encounter race with remove_vm_area, !vm on behalf
3489 * behalf of vmap area is being tear down or vm_map_ram allocation. 3485 * of vmap area is being tear down or vm_map_ram allocation.
3490 */ 3486 */
3491 if (!(va->flags & VM_VM_AREA)) { 3487 if (!va->vm) {
3492 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", 3488 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3493 (void *)va->va_start, (void *)va->va_end, 3489 (void *)va->va_start, (void *)va->va_end,
3494 va->va_end - va->va_start); 3490 va->va_end - va->va_start);