aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-18 11:24:34 -0500
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-01-20 08:48:30 -0500
commit56dd47098abe1fdde598a8d8b7c04d775506f456 (patch)
tree5eda8a531087c9c068702aa426db8479a77147f8 /arch/arm/mm/fault-armv.c
parentf8a85f1164a33e3eb5b421b137ced793ed53ee33 (diff)
ARM: make_coherent: fix problems with highpte, part 1
update_mmu_cache() is called with a page table already mapped. We call make_coherent(), which then calls adjust_pte() which wants to map other page tables. This causes kmap_atomic() to BUG() because the slot its trying to use is already taken. Since do_adjust_pte() modifies the page tables, we are also missing any form of locking, so we're risking corrupting the page tables. Fix this by using pte_offset_map_nested(), and taking the pte page table lock around do_adjust_pte(). Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 7a8efe1b37d8..8e9bc517132e 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -67,6 +67,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
67 67
68static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 68static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
69{ 69{
70 spinlock_t *ptl;
70 pgd_t *pgd; 71 pgd_t *pgd;
71 pmd_t *pmd; 72 pmd_t *pmd;
72 pte_t *pte; 73 pte_t *pte;
@@ -80,11 +81,19 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
80 if (pmd_none_or_clear_bad(pmd)) 81 if (pmd_none_or_clear_bad(pmd))
81 return 0; 82 return 0;
82 83
83 pte = pte_offset_map(pmd, address); 84 /*
85 * This is called while another page table is mapped, so we
86 * must use the nested version. This also means we need to
87 * open-code the spin-locking.
88 */
89 ptl = pte_lockptr(vma->vm_mm, pmd);
90 pte = pte_offset_map_nested(pmd, address);
91 spin_lock(ptl);
84 92
85 ret = do_adjust_pte(vma, address, pte); 93 ret = do_adjust_pte(vma, address, pte);
86 94
87 pte_unmap(pte); 95 spin_unlock(ptl);
96 pte_unmap_nested(pte);
88 97
89 return ret; 98 return ret;
90} 99}