diff options
author | Joonsoo Kim <js1304@gmail.com> | 2013-04-29 18:07:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 18:54:34 -0400 |
commit | f98782ddd31ac6f938386b79d8bd7aa7c8a78c50 (patch) | |
tree | 7e4b2133cdc46edb611a1879c86e0bd4f05df753 /mm/vmalloc.c | |
parent | e81ce85f960c2e26efb5d0802d56c34533edb1bd (diff) |
mm, vmalloc: iterate vmap_area_list in get_vmalloc_info()
This patch is a preparatory step for removing vmlist entirely. For
above purpose, we change iterating a vmap_list codes to iterating a
vmap_area_list. It is somewhat trivial change, but just one thing
should be noticed.
vmlist is lack of information about some areas in vmalloc address space.
For example, vm_map_ram() allocate area in vmalloc address space, but it
doesn't make a link with vmlist. To provide full information about
vmalloc address space is better idea, so we don't use va->vm and use
vmap_area directly. This makes get_vmalloc_info() more precise.
Signed-off-by: Joonsoo Kim <js1304@gmail.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Dave Anderson <anderson@redhat.com>
Cc: Eric Biederman <ebiederm@xmission.com>
Cc: Guan Xuetao <gxt@mprc.pku.edu.cn>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Vivek Goyal <vgoyal@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 56 |
1 files changed, 30 insertions, 26 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 59aa328007b2..aee1f61727a3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -2671,46 +2671,50 @@ module_init(proc_vmalloc_init); | |||
2671 | 2671 | ||
2672 | void get_vmalloc_info(struct vmalloc_info *vmi) | 2672 | void get_vmalloc_info(struct vmalloc_info *vmi) |
2673 | { | 2673 | { |
2674 | struct vm_struct *vma; | 2674 | struct vmap_area *va; |
2675 | unsigned long free_area_size; | 2675 | unsigned long free_area_size; |
2676 | unsigned long prev_end; | 2676 | unsigned long prev_end; |
2677 | 2677 | ||
2678 | vmi->used = 0; | 2678 | vmi->used = 0; |
2679 | vmi->largest_chunk = 0; | ||
2679 | 2680 | ||
2680 | if (!vmlist) { | 2681 | prev_end = VMALLOC_START; |
2681 | vmi->largest_chunk = VMALLOC_TOTAL; | ||
2682 | } else { | ||
2683 | vmi->largest_chunk = 0; | ||
2684 | 2682 | ||
2685 | prev_end = VMALLOC_START; | 2683 | spin_lock(&vmap_area_lock); |
2686 | |||
2687 | read_lock(&vmlist_lock); | ||
2688 | 2684 | ||
2689 | for (vma = vmlist; vma; vma = vma->next) { | 2685 | if (list_empty(&vmap_area_list)) { |
2690 | unsigned long addr = (unsigned long) vma->addr; | 2686 | vmi->largest_chunk = VMALLOC_TOTAL; |
2687 | goto out; | ||
2688 | } | ||
2691 | 2689 | ||
2692 | /* | 2690 | list_for_each_entry(va, &vmap_area_list, list) { |
2693 | * Some archs keep another range for modules in vmlist | 2691 | unsigned long addr = va->va_start; |
2694 | */ | ||
2695 | if (addr < VMALLOC_START) | ||
2696 | continue; | ||
2697 | if (addr >= VMALLOC_END) | ||
2698 | break; | ||
2699 | 2692 | ||
2700 | vmi->used += vma->size; | 2693 | /* |
2694 | * Some archs keep another range for modules in vmalloc space | ||
2695 | */ | ||
2696 | if (addr < VMALLOC_START) | ||
2697 | continue; | ||
2698 | if (addr >= VMALLOC_END) | ||
2699 | break; | ||
2701 | 2700 | ||
2702 | free_area_size = addr - prev_end; | 2701 | if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING)) |
2703 | if (vmi->largest_chunk < free_area_size) | 2702 | continue; |
2704 | vmi->largest_chunk = free_area_size; | ||
2705 | 2703 | ||
2706 | prev_end = vma->size + addr; | 2704 | vmi->used += (va->va_end - va->va_start); |
2707 | } | ||
2708 | 2705 | ||
2709 | if (VMALLOC_END - prev_end > vmi->largest_chunk) | 2706 | free_area_size = addr - prev_end; |
2710 | vmi->largest_chunk = VMALLOC_END - prev_end; | 2707 | if (vmi->largest_chunk < free_area_size) |
2708 | vmi->largest_chunk = free_area_size; | ||
2711 | 2709 | ||
2712 | read_unlock(&vmlist_lock); | 2710 | prev_end = va->va_end; |
2713 | } | 2711 | } |
2712 | |||
2713 | if (VMALLOC_END - prev_end > vmi->largest_chunk) | ||
2714 | vmi->largest_chunk = VMALLOC_END - prev_end; | ||
2715 | |||
2716 | out: | ||
2717 | spin_unlock(&vmap_area_lock); | ||
2714 | } | 2718 | } |
2715 | #endif | 2719 | #endif |
2716 | 2720 | ||