aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/pgtable.h26
-rw-r--r--arch/arm/include/asm/tlbflush.h10
-rw-r--r--arch/arm/mm/fault-armv.c4
-rw-r--r--arch/arm/mm/flush.c30
4 files changed, 64 insertions, 6 deletions
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index ab68cf1ef80f..42e694f1d58e 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -278,9 +278,24 @@ extern struct page *empty_zero_page;
278 278
279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 279#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
280 280
281#define set_pte_at(mm,addr,ptep,pteval) do { \ 281#if __LINUX_ARM_ARCH__ < 6
282 set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \ 282static inline void __sync_icache_dcache(pte_t pteval)
283 } while (0) 283{
284}
285#else
286extern void __sync_icache_dcache(pte_t pteval);
287#endif
288
289static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
290 pte_t *ptep, pte_t pteval)
291{
292 if (addr >= TASK_SIZE)
293 set_pte_ext(ptep, pteval, 0);
294 else {
295 __sync_icache_dcache(pteval);
296 set_pte_ext(ptep, pteval, PTE_EXT_NG);
297 }
298}
284 299
285/* 300/*
286 * The following only work if pte_present() is true. 301 * The following only work if pte_present() is true.
@@ -290,8 +305,13 @@ extern struct page *empty_zero_page;
290#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE) 305#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)
291#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) 306#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)
292#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) 307#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)
308#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)
293#define pte_special(pte) (0) 309#define pte_special(pte) (0)
294 310
311#define pte_present_user(pte) \
312 ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \
313 (L_PTE_PRESENT | L_PTE_USER))
314
295#define PTE_BIT_FUNC(fn,op) \ 315#define PTE_BIT_FUNC(fn,op) \
296static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 316static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
297 317
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 9ad329ad7458..989c9e57d92b 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -562,10 +562,18 @@ extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
562/* 562/*
563 * If PG_dcache_clean is not set for the page, we need to ensure that any 563 * If PG_dcache_clean is not set for the page, we need to ensure that any
564 * cache entries for the kernels virtual memory range are written 564 * cache entries for the kernels virtual memory range are written
565 * back to the page. 565 * back to the page. On ARMv6 and later, the cache coherency is handled via
566 * the set_pte_at() function.
566 */ 567 */
568#if __LINUX_ARM_ARCH__ < 6
567extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, 569extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
568 pte_t *ptep); 570 pte_t *ptep);
571#else
572static inline void update_mmu_cache(struct vm_area_struct *vma,
573 unsigned long addr, pte_t *ptep)
574{
575}
576#endif
569 577
570#endif 578#endif
571 579
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 58846cbd0e0b..8440d952ba6d 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -28,6 +28,7 @@
28 28
29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 29static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
30 30
31#if __LINUX_ARM_ARCH__ < 6
31/* 32/*
32 * We take the easy way out of this problem - we make the 33 * We take the easy way out of this problem - we make the
33 * PTE uncacheable. However, we leave the write buffer on. 34 * PTE uncacheable. However, we leave the write buffer on.
@@ -168,10 +169,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
168 return; 169 return;
169 170
170 mapping = page_mapping(page); 171 mapping = page_mapping(page);
171#ifndef CONFIG_SMP
172 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) 172 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
173 __flush_dcache_page(mapping, page); 173 __flush_dcache_page(mapping, page);
174#endif
175 if (mapping) { 174 if (mapping) {
176 if (cache_is_vivt()) 175 if (cache_is_vivt())
177 make_coherent(mapping, vma, addr, ptep, pfn); 176 make_coherent(mapping, vma, addr, ptep, pfn);
@@ -179,6 +178,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
179 __flush_icache_all(); 178 __flush_icache_all();
180 } 179 }
181} 180}
181#endif /* __LINUX_ARM_ARCH__ < 6 */
182 182
183/* 183/*
184 * Check whether the write buffer has physical address aliasing 184 * Check whether the write buffer has physical address aliasing
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b4efce9b7985..dd5b0120b92e 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -215,6 +215,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 flush_dcache_mmap_unlock(mapping); 215 flush_dcache_mmap_unlock(mapping);
216} 216}
217 217
218#if __LINUX_ARM_ARCH__ >= 6
219void __sync_icache_dcache(pte_t pteval)
220{
221 unsigned long pfn;
222 struct page *page;
223 struct address_space *mapping;
224
225 if (!pte_present_user(pteval))
226 return;
227 if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
228 /* only flush non-aliasing VIPT caches for exec mappings */
229 return;
230 pfn = pte_pfn(pteval);
231 if (!pfn_valid(pfn))
232 return;
233
234 page = pfn_to_page(pfn);
235 if (cache_is_vipt_aliasing())
236 mapping = page_mapping(page);
237 else
238 mapping = NULL;
239
240 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
241 __flush_dcache_page(mapping, page);
242 /* pte_exec() already checked above for non-aliasing VIPT cache */
243 if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
244 __flush_icache_all();
245}
246#endif
247
218/* 248/*
219 * Ensure cache coherency between kernel mapping and userspace mapping 249 * Ensure cache coherency between kernel mapping and userspace mapping
220 * of this page. 250 * of this page.