aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRoman Pen <r.peniaev@gmail.com>2015-04-15 19:13:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-15 19:35:18 -0400
commit7d61bfe8fddecad76eb37cc477aab369c5c81ed3 (patch)
tree55a653f735abb80715b011fba519e8d28556cb20 /mm
parentcf725ce274ba026e132c225cb8e5b61973c63403 (diff)
mm/vmalloc: get rid of dirty bitmap inside vmap_block structure
In original implementation of vm_map_ram made by Nick Piggin there were two bitmaps: alloc_map and dirty_map. None of them were used as supposed to be: finding a suitable free hole for next allocation in block. vm_map_ram allocates space sequentially in block and on free call marks pages as dirty, so freed space can't be reused anymore. Actually it would be very interesting to know the real meaning of those bitmaps, maybe implementation was incomplete, etc. But long time ago Zhang Yanfei removed alloc_map by these two commits: mm/vmalloc.c: remove dead code in vb_alloc 3fcd76e8028e0be37b02a2002b4f56755daeda06 mm/vmalloc.c: remove alloc_map from vmap_block b8e748b6c32999f221ea4786557b8e7e6c4e4e7a In this patch I replaced dirty_map with two range variables: dirty min and max. These variables store minimum and maximum position of dirty space in a block, since we need only to know the dirty range, not exact position of dirty pages. Why it was made? Several reasons: at first glance it seems that vm_map_ram allocator concerns about fragmentation thus it uses bitmaps for finding free hole, but it is not true. To avoid complexity seems it is better to use something simple, like min or max range values. Secondly, code also becomes simpler, without iteration over bitmap, just comparing values in min and max macros. Thirdly, bitmap occupies up to 1024 bits (4MB is a max size of a block). Here I replaced the whole bitmap with two longs. Finally vm_unmap_aliases should be slightly faster and the whole vmap_block structure occupies less memory. Signed-off-by: Roman Pen <r.peniaev@gmail.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Cc: Eric Dumazet <edumazet@google.com> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Cc: WANG Chao <chaowang@redhat.com> Cc: Fabian Frederick <fabf@skynet.be> Cc: Christoph Lameter <cl@linux.com> Cc: Gioh Kim <gioh.kim@lge.com> Cc: Rob Jones <rob.jones@codethink.co.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c35
1 files changed, 17 insertions, 18 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 21ec16b7e6e1..2faaa2976447 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -765,7 +765,7 @@ struct vmap_block {
765 spinlock_t lock; 765 spinlock_t lock;
766 struct vmap_area *va; 766 struct vmap_area *va;
767 unsigned long free, dirty; 767 unsigned long free, dirty;
768 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 768 unsigned long dirty_min, dirty_max; /*< dirty range */
769 struct list_head free_list; 769 struct list_head free_list;
770 struct rcu_head rcu_head; 770 struct rcu_head rcu_head;
771 struct list_head purge; 771 struct list_head purge;
@@ -851,7 +851,8 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
851 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order)); 851 BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
852 vb->free = VMAP_BBMAP_BITS - (1UL << order); 852 vb->free = VMAP_BBMAP_BITS - (1UL << order);
853 vb->dirty = 0; 853 vb->dirty = 0;
854 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 854 vb->dirty_min = VMAP_BBMAP_BITS;
855 vb->dirty_max = 0;
855 INIT_LIST_HEAD(&vb->free_list); 856 INIT_LIST_HEAD(&vb->free_list);
856 857
857 vb_idx = addr_to_vb_idx(va->va_start); 858 vb_idx = addr_to_vb_idx(va->va_start);
@@ -902,7 +903,8 @@ static void purge_fragmented_blocks(int cpu)
902 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { 903 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
903 vb->free = 0; /* prevent further allocs after releasing lock */ 904 vb->free = 0; /* prevent further allocs after releasing lock */
904 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ 905 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
905 bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS); 906 vb->dirty_min = 0;
907 vb->dirty_max = VMAP_BBMAP_BITS;
906 spin_lock(&vbq->lock); 908 spin_lock(&vbq->lock);
907 list_del_rcu(&vb->free_list); 909 list_del_rcu(&vb->free_list);
908 spin_unlock(&vbq->lock); 910 spin_unlock(&vbq->lock);
@@ -995,6 +997,7 @@ static void vb_free(const void *addr, unsigned long size)
995 order = get_order(size); 997 order = get_order(size);
996 998
997 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 999 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1000 offset >>= PAGE_SHIFT;
998 1001
999 vb_idx = addr_to_vb_idx((unsigned long)addr); 1002 vb_idx = addr_to_vb_idx((unsigned long)addr);
1000 rcu_read_lock(); 1003 rcu_read_lock();
@@ -1005,7 +1008,10 @@ static void vb_free(const void *addr, unsigned long size)
1005 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size); 1008 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1006 1009
1007 spin_lock(&vb->lock); 1010 spin_lock(&vb->lock);
1008 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 1011
1012 /* Expand dirty range */
1013 vb->dirty_min = min(vb->dirty_min, offset);
1014 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1009 1015
1010 vb->dirty += 1UL << order; 1016 vb->dirty += 1UL << order;
1011 if (vb->dirty == VMAP_BBMAP_BITS) { 1017 if (vb->dirty == VMAP_BBMAP_BITS) {
@@ -1044,25 +1050,18 @@ void vm_unmap_aliases(void)
1044 1050
1045 rcu_read_lock(); 1051 rcu_read_lock();
1046 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1052 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1047 int i, j;
1048
1049 spin_lock(&vb->lock); 1053 spin_lock(&vb->lock);
1050 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 1054 if (vb->dirty) {
1051 if (i < VMAP_BBMAP_BITS) { 1055 unsigned long va_start = vb->va->va_start;
1052 unsigned long s, e; 1056 unsigned long s, e;
1053 1057
1054 j = find_last_bit(vb->dirty_map, 1058 s = va_start + (vb->dirty_min << PAGE_SHIFT);
1055 VMAP_BBMAP_BITS); 1059 e = va_start + (vb->dirty_max << PAGE_SHIFT);
1056 j = j + 1; /* need exclusive index */
1057 1060
1058 s = vb->va->va_start + (i << PAGE_SHIFT); 1061 start = min(s, start);
1059 e = vb->va->va_start + (j << PAGE_SHIFT); 1062 end = max(e, end);
1060 flush = 1;
1061 1063
1062 if (s < start) 1064 flush = 1;
1063 start = s;
1064 if (e > end)
1065 end = e;
1066 } 1065 }
1067 spin_unlock(&vb->lock); 1066 spin_unlock(&vb->lock);
1068 } 1067 }