aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault-armv.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 7a8efe1b37d8..8e9bc517132e 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -67,6 +67,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
67 67
68static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 68static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
69{ 69{
70 spinlock_t *ptl;
70 pgd_t *pgd; 71 pgd_t *pgd;
71 pmd_t *pmd; 72 pmd_t *pmd;
72 pte_t *pte; 73 pte_t *pte;
@@ -80,11 +81,19 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
80 if (pmd_none_or_clear_bad(pmd)) 81 if (pmd_none_or_clear_bad(pmd))
81 return 0; 82 return 0;
82 83
83 pte = pte_offset_map(pmd, address); 84 /*
85 * This is called while another page table is mapped, so we
86 * must use the nested version. This also means we need to
87 * open-code the spin-locking.
88 */
89 ptl = pte_lockptr(vma->vm_mm, pmd);
90 pte = pte_offset_map_nested(pmd, address);
91 spin_lock(ptl);
84 92
85 ret = do_adjust_pte(vma, address, pte); 93 ret = do_adjust_pte(vma, address, pte);
86 94
87 pte_unmap(pte); 95 spin_unlock(ptl);
96 pte_unmap_nested(pte);
88 97
89 return ret; 98 return ret;
90} 99}