aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/fault-armv.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm/fault-armv.c')
-rw-r--r--arch/arm/mm/fault-armv.c85
1 files changed, 46 insertions, 39 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 56ee15321b00..c9b97e9836a2 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
36 * Therefore those configurations which might call adjust_pte (those 36 * Therefore those configurations which might call adjust_pte (those
37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. 37 * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
38 */ 38 */
39static int adjust_pte(struct vm_area_struct *vma, unsigned long address) 39static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
40 unsigned long pfn, pte_t *ptep)
40{ 41{
41 pgd_t *pgd; 42 pte_t entry = *ptep;
42 pmd_t *pmd;
43 pte_t *pte, entry;
44 int ret; 43 int ret;
45 44
46 pgd = pgd_offset(vma->vm_mm, address);
47 if (pgd_none(*pgd))
48 goto no_pgd;
49 if (pgd_bad(*pgd))
50 goto bad_pgd;
51
52 pmd = pmd_offset(pgd, address);
53 if (pmd_none(*pmd))
54 goto no_pmd;
55 if (pmd_bad(*pmd))
56 goto bad_pmd;
57
58 pte = pte_offset_map(pmd, address);
59 entry = *pte;
60
61 /* 45 /*
62 * If this page is present, it's actually being shared. 46 * If this page is present, it's actually being shared.
63 */ 47 */
@@ -68,33 +52,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
68 * fault (ie, is old), we can safely ignore any issues. 52 * fault (ie, is old), we can safely ignore any issues.
69 */ 53 */
70 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { 54 if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
71 unsigned long pfn = pte_pfn(entry);
72 flush_cache_page(vma, address, pfn); 55 flush_cache_page(vma, address, pfn);
73 outer_flush_range((pfn << PAGE_SHIFT), 56 outer_flush_range((pfn << PAGE_SHIFT),
74 (pfn << PAGE_SHIFT) + PAGE_SIZE); 57 (pfn << PAGE_SHIFT) + PAGE_SIZE);
75 pte_val(entry) &= ~L_PTE_MT_MASK; 58 pte_val(entry) &= ~L_PTE_MT_MASK;
76 pte_val(entry) |= shared_pte_mask; 59 pte_val(entry) |= shared_pte_mask;
77 set_pte_at(vma->vm_mm, address, pte, entry); 60 set_pte_at(vma->vm_mm, address, ptep, entry);
78 flush_tlb_page(vma, address); 61 flush_tlb_page(vma, address);
79 } 62 }
80 pte_unmap(pte); 63
81 return ret; 64 return ret;
65}
66
67static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
68 unsigned long pfn)
69{
70 spinlock_t *ptl;
71 pgd_t *pgd;
72 pmd_t *pmd;
73 pte_t *pte;
74 int ret;
75
76 pgd = pgd_offset(vma->vm_mm, address);
77 if (pgd_none_or_clear_bad(pgd))
78 return 0;
79
80 pmd = pmd_offset(pgd, address);
81 if (pmd_none_or_clear_bad(pmd))
82 return 0;
82 83
83bad_pgd: 84 /*
84 pgd_ERROR(*pgd); 85 * This is called while another page table is mapped, so we
85 pgd_clear(pgd); 86 * must use the nested version. This also means we need to
86no_pgd: 87 * open-code the spin-locking.
87 return 0; 88 */
88 89 ptl = pte_lockptr(vma->vm_mm, pmd);
89bad_pmd: 90 pte = pte_offset_map_nested(pmd, address);
90 pmd_ERROR(*pmd); 91 spin_lock(ptl);
91 pmd_clear(pmd); 92
92no_pmd: 93 ret = do_adjust_pte(vma, address, pfn, pte);
93 return 0; 94
95 spin_unlock(ptl);
96 pte_unmap_nested(pte);
97
98 return ret;
94} 99}
95 100
96static void 101static void
97make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) 102make_coherent(struct address_space *mapping, struct vm_area_struct *vma,
103 unsigned long addr, pte_t *ptep, unsigned long pfn)
98{ 104{
99 struct mm_struct *mm = vma->vm_mm; 105 struct mm_struct *mm = vma->vm_mm;
100 struct vm_area_struct *mpnt; 106 struct vm_area_struct *mpnt;
@@ -122,11 +128,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
122 if (!(mpnt->vm_flags & VM_MAYSHARE)) 128 if (!(mpnt->vm_flags & VM_MAYSHARE))
123 continue; 129 continue;
124 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 130 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
125 aliases += adjust_pte(mpnt, mpnt->vm_start + offset); 131 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
126 } 132 }
127 flush_dcache_mmap_unlock(mapping); 133 flush_dcache_mmap_unlock(mapping);
128 if (aliases) 134 if (aliases)
129 adjust_pte(vma, addr); 135 do_adjust_pte(vma, addr, pfn, ptep);
130 else 136 else
131 flush_cache_page(vma, addr, pfn); 137 flush_cache_page(vma, addr, pfn);
132} 138}
@@ -144,9 +150,10 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
144 * 150 *
145 * Note that the pte lock will be held. 151 * Note that the pte lock will be held.
146 */ 152 */
147void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) 153void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
154 pte_t *ptep)
148{ 155{
149 unsigned long pfn = pte_pfn(pte); 156 unsigned long pfn = pte_pfn(*ptep);
150 struct address_space *mapping; 157 struct address_space *mapping;
151 struct page *page; 158 struct page *page;
152 159
@@ -168,7 +175,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
168#endif 175#endif
169 if (mapping) { 176 if (mapping) {
170 if (cache_is_vivt()) 177 if (cache_is_vivt())
171 make_coherent(mapping, vma, addr, pfn); 178 make_coherent(mapping, vma, addr, ptep, pfn);
172 else if (vma->vm_flags & VM_EXEC) 179 else if (vma->vm_flags & VM_EXEC)
173 __flush_icache_all(); 180 __flush_icache_all();
174 } 181 }