diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-05-09 12:24:51 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-05-09 12:29:46 -0400 |
commit | 4102b53392d6397d80b6e09b516517efacf7ea77 (patch) | |
tree | b79aa87af1b3a1ff39b4a00b889cb806dfe32ae7 /arch/arc/mm | |
parent | 6ec18a81b22ab2b40df8424f2b5fc6be20ccad87 (diff) |
ARC: [mm] Aliasing VIPT dcache support 2/4
This is the meat of the series which prevents any dcache alias creation
by always keeping the U and K mapping of a page congruent.
If a mapping already exists, and other tries to access the page, prev
one is flushed to physical page (wback+inv)
Essentially flush_dcache_page()/copy_user_highpage() create K-mapping
of a page, but try to defer flushing, unless U-mapping exist.
When page is actually mapped to userspace, update_mmu_cache() flushes
the K-mapping (in certain cases this can be optimised out)
Additonally flush_cache_mm(), flush_cache_range(), flush_cache_page()
handle the puring of stale userspace mappings on exit/munmap...
flush_anon_page() handles the existing U-mapping for anon page before
kernel reads it via the GUP path.
Note that while not complete, this is enough to boot a simple
dynamically linked Busybox based rootfs
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm')
-rw-r--r-- | arch/arc/mm/cache_arc700.c | 134 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 27 |
2 files changed, 150 insertions, 11 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index a9a37089257a..9887195379ef 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -68,6 +68,7 @@ | |||
68 | #include <linux/mmu_context.h> | 68 | #include <linux/mmu_context.h> |
69 | #include <linux/syscalls.h> | 69 | #include <linux/syscalls.h> |
70 | #include <linux/uaccess.h> | 70 | #include <linux/uaccess.h> |
71 | #include <linux/pagemap.h> | ||
71 | #include <asm/cacheflush.h> | 72 | #include <asm/cacheflush.h> |
72 | #include <asm/cachectl.h> | 73 | #include <asm/cachectl.h> |
73 | #include <asm/setup.h> | 74 | #include <asm/setup.h> |
@@ -138,6 +139,7 @@ void __cpuinit arc_cache_init(void) | |||
138 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; | 139 | struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; |
139 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; | 140 | struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; |
140 | int way_pg_ratio = way_pg_ratio; | 141 | int way_pg_ratio = way_pg_ratio; |
142 | int dcache_does_alias; | ||
141 | char str[256]; | 143 | char str[256]; |
142 | 144 | ||
143 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); | 145 | printk(arc_cache_mumbojumbo(0, str, sizeof(str))); |
@@ -184,9 +186,13 @@ chk_dc: | |||
184 | panic("Cache H/W doesn't match kernel Config"); | 186 | panic("Cache H/W doesn't match kernel Config"); |
185 | } | 187 | } |
186 | 188 | ||
189 | dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; | ||
190 | |||
187 | /* check for D-Cache aliasing */ | 191 | /* check for D-Cache aliasing */ |
188 | if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) | 192 | if (dcache_does_alias && !cache_is_vipt_aliasing()) |
189 | panic("D$ aliasing not handled right now\n"); | 193 | panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); |
194 | else if (!dcache_does_alias && cache_is_vipt_aliasing()) | ||
195 | panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); | ||
190 | #endif | 196 | #endif |
191 | 197 | ||
192 | /* Set the default Invalidate Mode to "simpy discard dirty lines" | 198 | /* Set the default Invalidate Mode to "simpy discard dirty lines" |
@@ -312,7 +318,7 @@ static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, | |||
312 | } | 318 | } |
313 | } | 319 | } |
314 | 320 | ||
315 | /* For kernel mappings cache op index is same as paddr */ | 321 | /* For kernel mappings cache operation: index is same as paddr */ |
316 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) | 322 | #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) |
317 | 323 | ||
318 | /* | 324 | /* |
@@ -464,10 +470,47 @@ static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, | |||
464 | * Exported APIs | 470 | * Exported APIs |
465 | */ | 471 | */ |
466 | 472 | ||
473 | /* | ||
474 | * Handle cache congruency of kernel and userspace mappings of page when kernel | ||
475 | * writes-to/reads-from | ||
476 | * | ||
477 | * The idea is to defer flushing of kernel mapping after a WRITE, possible if: | ||
478 | * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent | ||
479 | * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) | ||
480 | * -In SMP, if hardware caches are coherent | ||
481 | * | ||
482 | * There's a corollary case, where kernel READs from a userspace mapped page. | ||
483 | * If the U-mapping is not congruent to to K-mapping, former needs flushing. | ||
484 | */ | ||
467 | void flush_dcache_page(struct page *page) | 485 | void flush_dcache_page(struct page *page) |
468 | { | 486 | { |
469 | /* Make a note that dcache is not yet flushed for this page */ | 487 | struct address_space *mapping; |
470 | set_bit(PG_arch_1, &page->flags); | 488 | |
489 | if (!cache_is_vipt_aliasing()) { | ||
490 | set_bit(PG_arch_1, &page->flags); | ||
491 | return; | ||
492 | } | ||
493 | |||
494 | /* don't handle anon pages here */ | ||
495 | mapping = page_mapping(page); | ||
496 | if (!mapping) | ||
497 | return; | ||
498 | |||
499 | /* | ||
500 | * pagecache page, file not yet mapped to userspace | ||
501 | * Make a note that K-mapping is dirty | ||
502 | */ | ||
503 | if (!mapping_mapped(mapping)) { | ||
504 | set_bit(PG_arch_1, &page->flags); | ||
505 | } else if (page_mapped(page)) { | ||
506 | |||
507 | /* kernel reading from page with U-mapping */ | ||
508 | void *paddr = page_address(page); | ||
509 | unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; | ||
510 | |||
511 | if (addr_not_cache_congruent(paddr, vaddr)) | ||
512 | __flush_dcache_page(paddr, vaddr); | ||
513 | } | ||
471 | } | 514 | } |
472 | EXPORT_SYMBOL(flush_dcache_page); | 515 | EXPORT_SYMBOL(flush_dcache_page); |
473 | 516 | ||
@@ -612,6 +655,87 @@ noinline void flush_cache_all(void) | |||
612 | 655 | ||
613 | } | 656 | } |
614 | 657 | ||
658 | #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING | ||
659 | |||
660 | void flush_cache_mm(struct mm_struct *mm) | ||
661 | { | ||
662 | flush_cache_all(); | ||
663 | } | ||
664 | |||
665 | void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, | ||
666 | unsigned long pfn) | ||
667 | { | ||
668 | unsigned int paddr = pfn << PAGE_SHIFT; | ||
669 | |||
670 | __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); | ||
671 | } | ||
672 | |||
673 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, | ||
674 | unsigned long end) | ||
675 | { | ||
676 | flush_cache_all(); | ||
677 | } | ||
678 | |||
679 | void copy_user_highpage(struct page *to, struct page *from, | ||
680 | unsigned long u_vaddr, struct vm_area_struct *vma) | ||
681 | { | ||
682 | void *kfrom = page_address(from); | ||
683 | void *kto = page_address(to); | ||
684 | int clean_src_k_mappings = 0; | ||
685 | |||
686 | /* | ||
687 | * If SRC page was already mapped in userspace AND it's U-mapping is | ||
688 | * not congruent with K-mapping, sync former to physical page so that | ||
689 | * K-mapping in memcpy below, sees the right data | ||
690 | * | ||
691 | * Note that while @u_vaddr refers to DST page's userspace vaddr, it is | ||
692 | * equally valid for SRC page as well | ||
693 | */ | ||
694 | if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { | ||
695 | __flush_dcache_page(kfrom, u_vaddr); | ||
696 | clean_src_k_mappings = 1; | ||
697 | } | ||
698 | |||
699 | copy_page(kto, kfrom); | ||
700 | |||
701 | /* | ||
702 | * Mark DST page K-mapping as dirty for a later finalization by | ||
703 | * update_mmu_cache(). Although the finalization could have been done | ||
704 | * here as well (given that both vaddr/paddr are available). | ||
705 | * But update_mmu_cache() already has code to do that for other | ||
706 | * non copied user pages (e.g. read faults which wire in pagecache page | ||
707 | * directly). | ||
708 | */ | ||
709 | set_bit(PG_arch_1, &to->flags); | ||
710 | |||
711 | /* | ||
712 | * if SRC was already usermapped and non-congruent to kernel mapping | ||
713 | * sync the kernel mapping back to physical page | ||
714 | */ | ||
715 | if (clean_src_k_mappings) { | ||
716 | __flush_dcache_page(kfrom, kfrom); | ||
717 | } else { | ||
718 | set_bit(PG_arch_1, &from->flags); | ||
719 | } | ||
720 | } | ||
721 | |||
722 | void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) | ||
723 | { | ||
724 | clear_page(to); | ||
725 | set_bit(PG_arch_1, &page->flags); | ||
726 | } | ||
727 | |||
728 | void flush_anon_page(struct vm_area_struct *vma, struct page *page, | ||
729 | unsigned long u_vaddr) | ||
730 | { | ||
731 | /* TBD: do we really need to clear the kernel mapping */ | ||
732 | __flush_dcache_page(page_address(page), u_vaddr); | ||
733 | __flush_dcache_page(page_address(page), page_address(page)); | ||
734 | |||
735 | } | ||
736 | |||
737 | #endif | ||
738 | |||
615 | /********************************************************************** | 739 | /********************************************************************** |
616 | * Explicit Cache flush request from user space via syscall | 740 | * Explicit Cache flush request from user space via syscall |
617 | * Needed for JITs which generate code on the fly | 741 | * Needed for JITs which generate code on the fly |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 5810c7a92b77..066145b5f348 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -421,25 +421,40 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
421 | /* | 421 | /* |
422 | * Called at the end of pagefault, for a userspace mapped page | 422 | * Called at the end of pagefault, for a userspace mapped page |
423 | * -pre-install the corresponding TLB entry into MMU | 423 | * -pre-install the corresponding TLB entry into MMU |
424 | * -Finalize the delayed D-cache flush (wback+inv kernel mapping) | 424 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
425 | * flush_dcache_page(), copy_user_page() | ||
426 | * | ||
427 | * Note that flush (when done) involves both WBACK - so physical page is | ||
428 | * in sync as well as INV - so any non-congruent aliases don't remain | ||
425 | */ | 429 | */ |
426 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, | 430 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
427 | pte_t *ptep) | 431 | pte_t *ptep) |
428 | { | 432 | { |
429 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; | 433 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
434 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; | ||
430 | 435 | ||
431 | create_tlb(vma, vaddr, ptep); | 436 | create_tlb(vma, vaddr, ptep); |
432 | 437 | ||
433 | /* icache doesn't snoop dcache, thus needs to be made coherent here */ | 438 | /* |
434 | if (vma->vm_flags & VM_EXEC) { | 439 | * Exec page : Independent of aliasing/page-color considerations, |
440 | * since icache doesn't snoop dcache on ARC, any dirty | ||
441 | * K-mapping of a code page needs to be wback+inv so that | ||
442 | * icache fetch by userspace sees code correctly. | ||
443 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | ||
444 | * so userspace sees the right data. | ||
445 | * (Avoids the flush for Non-exec + congruent mapping case) | ||
446 | */ | ||
447 | if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { | ||
435 | struct page *page = pfn_to_page(pte_pfn(*ptep)); | 448 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
436 | 449 | ||
437 | /* if page was dcache dirty, flush now */ | ||
438 | int dirty = test_and_clear_bit(PG_arch_1, &page->flags); | 450 | int dirty = test_and_clear_bit(PG_arch_1, &page->flags); |
439 | if (dirty) { | 451 | if (dirty) { |
440 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; | 452 | /* wback + inv dcache lines */ |
441 | __flush_dcache_page(paddr, paddr); | 453 | __flush_dcache_page(paddr, paddr); |
442 | __inv_icache_page(paddr, vaddr); | 454 | |
455 | /* invalidate any existing icache lines */ | ||
456 | if (vma->vm_flags & VM_EXEC) | ||
457 | __inv_icache_page(paddr, vaddr); | ||
443 | } | 458 | } |
444 | } | 459 | } |
445 | } | 460 | } |