diff options
author | Nick Piggin <npiggin@suse.de> | 2010-02-01 06:24:18 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-02 15:50:47 -0500 |
commit | de5604231ce4bc8db1bc1dcd27d8540cbedf1518 (patch) | |
tree | 1b64b0554b8ecffd58d4d06614075e5ccbb6bfb4 /mm/vmalloc.c | |
parent | 489b24f2cbdcc1c93f55a2707733bba702ba8dbf (diff) |
mm: percpu-vmap fix RCU list walking
RCU list walking of the per-cpu vmap cache was broken. It did not use
RCU primitives, and also the union of free_list and rcu_head is
obviously wrong (because free_list is indeed the list we are RCU
walking).
While we are there, remove a couple of unused fields from an earlier
iteration.
These APIs aren't actually used anywhere, because of problems with the
XFS conversion. Christoph has now verified that the problems are solved
with these patches. Also it is an exported interface, so I think it
will be good to be merged now (and Christoph wants to get the XFS
changes into their local tree).
Cc: stable@kernel.org
Cc: linux-mm@kvack.org
Tested-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Nick Piggin <npiggin@suse.de>
--
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 20 |
1 files changed, 6 insertions, 14 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d55d905463eb..cf76ff6ba596 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -667,8 +667,6 @@ static bool vmap_initialized __read_mostly = false; | |||
667 | struct vmap_block_queue { | 667 | struct vmap_block_queue { |
668 | spinlock_t lock; | 668 | spinlock_t lock; |
669 | struct list_head free; | 669 | struct list_head free; |
670 | struct list_head dirty; | ||
671 | unsigned int nr_dirty; | ||
672 | }; | 670 | }; |
673 | 671 | ||
674 | struct vmap_block { | 672 | struct vmap_block { |
@@ -678,10 +676,8 @@ struct vmap_block { | |||
678 | unsigned long free, dirty; | 676 | unsigned long free, dirty; |
679 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); | 677 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); |
680 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); | 678 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); |
681 | union { | 679 | struct list_head free_list; |
682 | struct list_head free_list; | 680 | struct rcu_head rcu_head; |
683 | struct rcu_head rcu_head; | ||
684 | }; | ||
685 | }; | 681 | }; |
686 | 682 | ||
687 | /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ | 683 | /* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ |
@@ -757,7 +753,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
757 | vbq = &get_cpu_var(vmap_block_queue); | 753 | vbq = &get_cpu_var(vmap_block_queue); |
758 | vb->vbq = vbq; | 754 | vb->vbq = vbq; |
759 | spin_lock(&vbq->lock); | 755 | spin_lock(&vbq->lock); |
760 | list_add(&vb->free_list, &vbq->free); | 756 | list_add_rcu(&vb->free_list, &vbq->free); |
761 | spin_unlock(&vbq->lock); | 757 | spin_unlock(&vbq->lock); |
762 | put_cpu_var(vmap_block_queue); | 758 | put_cpu_var(vmap_block_queue); |
763 | 759 | ||
@@ -776,8 +772,6 @@ static void free_vmap_block(struct vmap_block *vb) | |||
776 | struct vmap_block *tmp; | 772 | struct vmap_block *tmp; |
777 | unsigned long vb_idx; | 773 | unsigned long vb_idx; |
778 | 774 | ||
779 | BUG_ON(!list_empty(&vb->free_list)); | ||
780 | |||
781 | vb_idx = addr_to_vb_idx(vb->va->va_start); | 775 | vb_idx = addr_to_vb_idx(vb->va->va_start); |
782 | spin_lock(&vmap_block_tree_lock); | 776 | spin_lock(&vmap_block_tree_lock); |
783 | tmp = radix_tree_delete(&vmap_block_tree, vb_idx); | 777 | tmp = radix_tree_delete(&vmap_block_tree, vb_idx); |
@@ -816,7 +810,7 @@ again: | |||
816 | vb->free -= 1UL << order; | 810 | vb->free -= 1UL << order; |
817 | if (vb->free == 0) { | 811 | if (vb->free == 0) { |
818 | spin_lock(&vbq->lock); | 812 | spin_lock(&vbq->lock); |
819 | list_del_init(&vb->free_list); | 813 | list_del_rcu(&vb->free_list); |
820 | spin_unlock(&vbq->lock); | 814 | spin_unlock(&vbq->lock); |
821 | } | 815 | } |
822 | spin_unlock(&vb->lock); | 816 | spin_unlock(&vb->lock); |
@@ -860,11 +854,11 @@ static void vb_free(const void *addr, unsigned long size) | |||
860 | BUG_ON(!vb); | 854 | BUG_ON(!vb); |
861 | 855 | ||
862 | spin_lock(&vb->lock); | 856 | spin_lock(&vb->lock); |
863 | bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); | 857 | BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); |
864 | 858 | ||
865 | vb->dirty += 1UL << order; | 859 | vb->dirty += 1UL << order; |
866 | if (vb->dirty == VMAP_BBMAP_BITS) { | 860 | if (vb->dirty == VMAP_BBMAP_BITS) { |
867 | BUG_ON(vb->free || !list_empty(&vb->free_list)); | 861 | BUG_ON(vb->free); |
868 | spin_unlock(&vb->lock); | 862 | spin_unlock(&vb->lock); |
869 | free_vmap_block(vb); | 863 | free_vmap_block(vb); |
870 | } else | 864 | } else |
@@ -1033,8 +1027,6 @@ void __init vmalloc_init(void) | |||
1033 | vbq = &per_cpu(vmap_block_queue, i); | 1027 | vbq = &per_cpu(vmap_block_queue, i); |
1034 | spin_lock_init(&vbq->lock); | 1028 | spin_lock_init(&vbq->lock); |
1035 | INIT_LIST_HEAD(&vbq->free); | 1029 | INIT_LIST_HEAD(&vbq->free); |
1036 | INIT_LIST_HEAD(&vbq->dirty); | ||
1037 | vbq->nr_dirty = 0; | ||
1038 | } | 1030 | } |
1039 | 1031 | ||
1040 | /* Import existing vmlist entries. */ | 1032 | /* Import existing vmlist entries. */ |