aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoman Pen <r.peniaev@gmail.com>2015-04-15 19:13:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:18 -0400
commitcf725ce274ba026e132c225cb8e5b61973c63403 (patch)
treef0fb230b8973a3bbc9b65341563b84d1f199f96e
parent68ac546f265ba36cd4f29c77b3841fb777315581 (diff)
mm/vmalloc: occupy newly allocated vmap block just after allocation
Previous implementation allocates new vmap block and repeats search of a free block from the very beginning, iterating over the CPU free list. Why it can be better?? 1. Allocation can happen on one CPU, but search can be done on another CPU. In worst case we preallocate amount of vmap blocks which is equal to CPU number on the system. 2. In previous patch I added newly allocated block to the tail of free list to avoid soon exhaustion of virtual space and give a chance to occupy blocks which were allocated long time ago. Thus to find newly allocated block all the search sequence should be repeated, seems it is not efficient. In this patch newly allocated block is occupied right away, address of virtual space is returned to the caller, so there is no any need to repeat the search sequence, allocation job is done. Signed-off-by: Roman Pen <r.peniaev@gmail.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Eric Dumazet <edumazet@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Cc: WANG Chao <chaowang@redhat.com> Cc: Fabian Frederick <fabf@skynet.be> Cc: Christoph Lameter <cl@linux.com> Cc: Gioh Kim <gioh.kim@lge.com> Cc: Rob Jones <rob.jones@codethink.co.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/vmalloc.c58
1 files changed, 37 insertions, 21 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 84feb5249b12..21ec16b7e6e1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -796,13 +796,31 @@ static unsigned long addr_to_vb_idx(unsigned long addr)
796 return addr; 796 return addr;
797} 797}
798 798
799static struct vmap_block *new_vmap_block(gfp_t gfp_mask) 799static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
800{
801 unsigned long addr;
802
803 addr = va_start + (pages_off << PAGE_SHIFT);
804 BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
805 return (void *)addr;
806}
807
808/**
809 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
810 * block. Of course pages number can't exceed VMAP_BBMAP_BITS
811 * @order: how many 2^order pages should be occupied in newly allocated block
812 * @gfp_mask: flags for the page level allocator
813 *
814 * Returns: virtual address in a newly allocated block or ERR_PTR(-errno)
815 */
816static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
800{ 817{
801 struct vmap_block_queue *vbq; 818 struct vmap_block_queue *vbq;
802 struct vmap_block *vb; 819 struct vmap_block *vb;
803 struct vmap_area *va; 820 struct vmap_area *va;
804 unsigned long vb_idx; 821 unsigned long vb_idx;
805 int node, err; 822 int node, err;
823 void *vaddr;
806 824
807 node = numa_node_id(); 825 node = numa_node_id();
808 826
@@ -826,9 +844,12 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
826 return ERR_PTR(err); 844 return ERR_PTR(err);
827 } 845 }
828 846
847 vaddr = vmap_block_vaddr(va->va_start, 0);
829 spin_lock_init(&vb->lock); 848 spin_lock_init(&vb->lock);
830 vb->va = va; 849 vb->va = va;
831 vb->free = VMAP_BBMAP_BITS; 850 /* At least something should be left free */
851 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
852 vb->free = VMAP_BBMAP_BITS - (1UL << order);
832 vb->dirty = 0; 853 vb->dirty = 0;
833 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 854 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
834 INIT_LIST_HEAD(&vb->free_list); 855 INIT_LIST_HEAD(&vb->free_list);
@@ -846,7 +867,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
846 spin_unlock(&vbq->lock); 867 spin_unlock(&vbq->lock);
847 put_cpu_var(vmap_block_queue); 868 put_cpu_var(vmap_block_queue);
848 869
849 return vb; 870 return vaddr;
850} 871}
851 872
852static void free_vmap_block(struct vmap_block *vb) 873static void free_vmap_block(struct vmap_block *vb)
@@ -910,7 +931,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
910{ 931{
911 struct vmap_block_queue *vbq; 932 struct vmap_block_queue *vbq;
912 struct vmap_block *vb; 933 struct vmap_block *vb;
913 unsigned long addr = 0; 934 void *vaddr = NULL;
914 unsigned int order; 935 unsigned int order;
915 936
916 BUG_ON(size & ~PAGE_MASK); 937 BUG_ON(size & ~PAGE_MASK);
@@ -925,43 +946,38 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
925 } 946 }
926 order = get_order(size); 947 order = get_order(size);
927 948
928again:
929 rcu_read_lock(); 949 rcu_read_lock();
930 vbq = &get_cpu_var(vmap_block_queue); 950 vbq = &get_cpu_var(vmap_block_queue);
931 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 951 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
932 int i; 952 unsigned long pages_off;
933 953
934 spin_lock(&vb->lock); 954 spin_lock(&vb->lock);
935 if (vb->free < 1UL << order) 955 if (vb->free < (1UL << order)) {
936 goto next; 956 spin_unlock(&vb->lock);
957 continue;
958 }
937 959
938 i = VMAP_BBMAP_BITS - vb->free; 960 pages_off = VMAP_BBMAP_BITS - vb->free;
939 addr = vb->va->va_start + (i << PAGE_SHIFT); 961 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
940 BUG_ON(addr_to_vb_idx(addr) !=
941 addr_to_vb_idx(vb->va->va_start));
942 vb->free -= 1UL << order; 962 vb->free -= 1UL << order;
943 if (vb->free == 0) { 963 if (vb->free == 0) {
944 spin_lock(&vbq->lock); 964 spin_lock(&vbq->lock);
945 list_del_rcu(&vb->free_list); 965 list_del_rcu(&vb->free_list);
946 spin_unlock(&vbq->lock); 966 spin_unlock(&vbq->lock);
947 } 967 }
968
948 spin_unlock(&vb->lock); 969 spin_unlock(&vb->lock);
949 break; 970 break;
950next:
951 spin_unlock(&vb->lock);
952 } 971 }
953 972
954 put_cpu_var(vmap_block_queue); 973 put_cpu_var(vmap_block_queue);
955 rcu_read_unlock(); 974 rcu_read_unlock();
956 975
957 if (!addr) { 976 /* Allocate new block if nothing was found */
958 vb = new_vmap_block(gfp_mask); 977 if (!vaddr)
959 if (IS_ERR(vb)) 978 vaddr = new_vmap_block(order, gfp_mask);
960 return vb;
961 goto again;
962 }
963 979
964 return (void *)addr; 980 return vaddr;
965} 981}
966 982
967static void vb_free(const void *addr, unsigned long size) 983static void vb_free(const void *addr, unsigned long size)