aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm/tlb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/tlb.c')
-rw-r--r--arch/arc/mm/tlb.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 003d69ac6ffa..066145b5f348 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
421/* 421/*
422 * Called at the end of pagefault, for a userspace mapped page 422 * Called at the end of pagefault, for a userspace mapped page
423 * -pre-install the corresponding TLB entry into MMU 423 * -pre-install the corresponding TLB entry into MMU
424 * -Finalize the delayed D-cache flush (wback+inv kernel mapping) 424 * -Finalize the delayed D-cache flush of kernel mapping of page due to
425 * flush_dcache_page(), copy_user_page()
426 *
427 * Note that flush (when done) involves both WBACK - so physical page is
428 * in sync as well as INV - so any non-congruent aliases don't remain
425 */ 429 */
426void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, 430void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
427 pte_t *ptep) 431 pte_t *ptep)
428{ 432{
429 unsigned long vaddr = vaddr_unaligned & PAGE_MASK; 433 unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
434 unsigned long paddr = pte_val(*ptep) & PAGE_MASK;
430 435
431 create_tlb(vma, vaddr, ptep); 436 create_tlb(vma, vaddr, ptep);
432 437
433 /* icache doesn't snoop dcache, thus needs to be made coherent here */ 438 /*
434 if (vma->vm_flags & VM_EXEC) { 439 * Exec page : Independent of aliasing/page-color considerations,
440 * since icache doesn't snoop dcache on ARC, any dirty
441 * K-mapping of a code page needs to be wback+inv so that
442 * icache fetch by userspace sees code correctly.
443 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
444 * so userspace sees the right data.
445 * (Avoids the flush for Non-exec + congruent mapping case)
446 */
447 if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) {
435 struct page *page = pfn_to_page(pte_pfn(*ptep)); 448 struct page *page = pfn_to_page(pte_pfn(*ptep));
436 449
437 /* if page was dcache dirty, flush now */
438 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
439 if (dirty) { 451 if (dirty) {
440 unsigned long paddr = pte_val(*ptep) & PAGE_MASK; 452 /* wback + inv dcache lines */
441 __flush_dcache_page(paddr); 453 __flush_dcache_page(paddr, paddr);
442 __inv_icache_page(paddr, vaddr); 454
455 /* invalidate any existing icache lines */
456 if (vma->vm_flags & VM_EXEC)
457 __inv_icache_page(paddr, vaddr);
443 } 458 }
444 } 459 }
445} 460}