aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorMinChan Kim <minchan.kim@gmail.com>2009-03-31 18:19:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-01 11:59:11 -0400
commitd086817dc0d42f1be8db4138233d33e1dd16a956 (patch)
treede5dfc0e262b78e625d88309b0ab5c8f94ed8320 /mm/vmalloc.c
parentef161a9863b045909142daea9490b067997f3dc5 (diff)
vmap: remove needless lock and list in vmap
vmap's dirty_list is unused. It's for optimizing flushing. but Nick didn't write the code yet. so, we don't need it until time as it is needed. This patch removes vmap_block's dirty_list and codes related to it. Signed-off-by: MinChan Kim <minchan.kim@gmail.com> Acked-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c19
1 files changed, 3 insertions, 16 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index af58324c361a..fab19876b4d1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -671,10 +671,7 @@ struct vmap_block {
671 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 671 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS);
672 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 672 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
673 union { 673 union {
674 struct { 674 struct list_head free_list;
675 struct list_head free_list;
676 struct list_head dirty_list;
677 };
678 struct rcu_head rcu_head; 675 struct rcu_head rcu_head;
679 }; 676 };
680}; 677};
@@ -741,7 +738,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
741 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 738 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS);
742 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 739 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
743 INIT_LIST_HEAD(&vb->free_list); 740 INIT_LIST_HEAD(&vb->free_list);
744 INIT_LIST_HEAD(&vb->dirty_list);
745 741
746 vb_idx = addr_to_vb_idx(va->va_start); 742 vb_idx = addr_to_vb_idx(va->va_start);
747 spin_lock(&vmap_block_tree_lock); 743 spin_lock(&vmap_block_tree_lock);
@@ -772,12 +768,7 @@ static void free_vmap_block(struct vmap_block *vb)
772 struct vmap_block *tmp; 768 struct vmap_block *tmp;
773 unsigned long vb_idx; 769 unsigned long vb_idx;
774 770
775 spin_lock(&vb->vbq->lock); 771 BUG_ON(!list_empty(&vb->free_list));
776 if (!list_empty(&vb->free_list))
777 list_del(&vb->free_list);
778 if (!list_empty(&vb->dirty_list))
779 list_del(&vb->dirty_list);
780 spin_unlock(&vb->vbq->lock);
781 772
782 vb_idx = addr_to_vb_idx(vb->va->va_start); 773 vb_idx = addr_to_vb_idx(vb->va->va_start);
783 spin_lock(&vmap_block_tree_lock); 774 spin_lock(&vmap_block_tree_lock);
@@ -862,11 +853,7 @@ static void vb_free(const void *addr, unsigned long size)
862 853
863 spin_lock(&vb->lock); 854 spin_lock(&vb->lock);
864 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); 855 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order);
865 if (!vb->dirty) { 856
866 spin_lock(&vb->vbq->lock);
867 list_add(&vb->dirty_list, &vb->vbq->dirty);
868 spin_unlock(&vb->vbq->lock);
869 }
870 vb->dirty += 1UL << order; 857 vb->dirty += 1UL << order;
871 if (vb->dirty == VMAP_BBMAP_BITS) { 858 if (vb->dirty == VMAP_BBMAP_BITS) {
872 BUG_ON(vb->free || !list_empty(&vb->free_list)); 859 BUG_ON(vb->free || !list_empty(&vb->free_list));