aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNick Piggin <npiggin@suse.de>2008-12-01 16:13:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2008-12-01 22:55:23 -0500
commitb29acbdcf877009af3f1fc0750bcac314c51e055 (patch)
treef4afe2fcecfe414b75934681cb19a037a953a4e8 /mm
parent8650e51ac94b5fe93c02e3c8fef02e416f14501c (diff)
mm: vmalloc fix lazy unmapping cache aliasing
Jim Radford has reported that the vmap subsystem rewrite was sometimes causing his VIVT ARM system to behave strangely (seemed like going into infinite loops trying to fault in pages to userspace). We determined that the problem was most likely due to a cache aliasing issue. flush_cache_vunmap was only being called at the moment the page tables were to be taken down, however with lazy unmapping, this can happen after the page has subsequently been freed and allocated for something else. The dangling alias may still have dirty data attached to it. The fix for this problem is to do the cache flushing when the caller has called vunmap -- it would be a bug for them to write anything else to the mapping at that point. That appeared to solve Jim's problems. Reported-by: Jim Radford <radford@blackbean.org> Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: Russell King <rmk@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c20
1 files changed, 16 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 30f826d484f0..f3f6e0758562 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -77,7 +77,6 @@ static void vunmap_page_range(unsigned long addr, unsigned long end)
77 77
78 BUG_ON(addr >= end); 78 BUG_ON(addr >= end);
79 pgd = pgd_offset_k(addr); 79 pgd = pgd_offset_k(addr);
80 flush_cache_vunmap(addr, end);
81 do { 80 do {
82 next = pgd_addr_end(addr, end); 81 next = pgd_addr_end(addr, end);
83 if (pgd_none_or_clear_bad(pgd)) 82 if (pgd_none_or_clear_bad(pgd))
@@ -543,9 +542,10 @@ static void purge_vmap_area_lazy(void)
543} 542}
544 543
545/* 544/*
546 * Free and unmap a vmap area 545 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
546 * called for the correct range previously.
547 */ 547 */
548static void free_unmap_vmap_area(struct vmap_area *va) 548static void free_unmap_vmap_area_noflush(struct vmap_area *va)
549{ 549{
550 va->flags |= VM_LAZY_FREE; 550 va->flags |= VM_LAZY_FREE;
551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 551 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
@@ -553,6 +553,15 @@ static void free_unmap_vmap_area(struct vmap_area *va)
553 try_purge_vmap_area_lazy(); 553 try_purge_vmap_area_lazy();
554} 554}
555 555
556/*
557 * Free and unmap a vmap area
558 */
559static void free_unmap_vmap_area(struct vmap_area *va)
560{
561 flush_cache_vunmap(va->va_start, va->va_end);
562 free_unmap_vmap_area_noflush(va);
563}
564
556static struct vmap_area *find_vmap_area(unsigned long addr) 565static struct vmap_area *find_vmap_area(unsigned long addr)
557{ 566{
558 struct vmap_area *va; 567 struct vmap_area *va;
@@ -734,7 +743,7 @@ static void free_vmap_block(struct vmap_block *vb)
734 spin_unlock(&vmap_block_tree_lock); 743 spin_unlock(&vmap_block_tree_lock);
735 BUG_ON(tmp != vb); 744 BUG_ON(tmp != vb);
736 745
737 free_unmap_vmap_area(vb->va); 746 free_unmap_vmap_area_noflush(vb->va);
738 call_rcu(&vb->rcu_head, rcu_free_vb); 747 call_rcu(&vb->rcu_head, rcu_free_vb);
739} 748}
740 749
@@ -796,6 +805,9 @@ static void vb_free(const void *addr, unsigned long size)
796 805
797 BUG_ON(size & ~PAGE_MASK); 806 BUG_ON(size & ~PAGE_MASK);
798 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); 807 BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
808
809 flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
810
799 order = get_order(size); 811 order = get_order(size);
800 812
801 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1); 813 offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);