aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn David Anglin <dave.anglin@bell.net>2013-07-23 12:27:52 -0400
committerHelge Deller <deller@gmx.de>2013-07-31 17:41:47 -0400
commit50861f5a02dbf939c27d35a26c472885e2844188 (patch)
treea039ce8beb259f6d205493c7f6380f2b52dbca1a
parent06693f305e60202d2795a10bee7fb7da23bc2acc (diff)
parisc: Fix cache routines to ignore vma's with an invalid pfn
The parisc architecture does not have a pte special bit. As a result, special mappings are handled with the VM_PFNMAP and VM_MIXEDMAP flags. VM_MIXEDMAP mappings may or may not have a "struct page" backing. When pfn_valid() is false, there is no "struct page" backing. Otherwise, they are treated as normal pages. The FireGL driver uses the VM_MIXEDMAP without a backing "struct page". This treatment caused a panic due to a TLB data miss in update_mmu_cache. This appeared to be in the code generated for page_address(). We were in fact using a very circular bit of code to determine the physical address of the PFN in various cache routines. This wasn't valid when there was no "struct page" backing. The needed address can in fact be determined simply from the PFN itself without using the "struct page". The attached patch updates update_mmu_cache(), flush_cache_mm(), flush_cache_range() and flush_cache_page() to check pfn_valid() and to directly compute the PFN physical and virtual addresses. Signed-off-by: John David Anglin <dave.anglin@bell.net> Cc: <stable@vger.kernel.org> # 3.10 Signed-off-by: Helge Deller <deller@gmx.de>
-rw-r--r--arch/parisc/kernel/cache.c135
1 files changed, 71 insertions, 64 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd10..c035673209f7 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
71} 71}
72EXPORT_SYMBOL(flush_cache_all_local); 72EXPORT_SYMBOL(flush_cache_all_local);
73 73
74/* Virtual address of pfn. */
75#define pfn_va(pfn) __va(PFN_PHYS(pfn))
76
74void 77void
75update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 78update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
76{ 79{
77 struct page *page = pte_page(*ptep); 80 unsigned long pfn = pte_pfn(*ptep);
81 struct page *page;
78 82
79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && 83 /* We don't have pte special. As a result, we can be called with
80 test_bit(PG_dcache_dirty, &page->flags)) { 84 an invalid pfn and we don't need to flush the kernel dcache page.
85 This occurs with FireGL card in C8000. */
86 if (!pfn_valid(pfn))
87 return;
81 88
82 flush_kernel_dcache_page(page); 89 page = pfn_to_page(pfn);
90 if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
91 flush_kernel_dcache_page_addr(pfn_va(pfn));
83 clear_bit(PG_dcache_dirty, &page->flags); 92 clear_bit(PG_dcache_dirty, &page->flags);
84 } else if (parisc_requires_coherency()) 93 } else if (parisc_requires_coherency())
85 flush_kernel_dcache_page(page); 94 flush_kernel_dcache_page_addr(pfn_va(pfn));
86} 95}
87 96
88void 97void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
495 504
496void flush_cache_mm(struct mm_struct *mm) 505void flush_cache_mm(struct mm_struct *mm)
497{ 506{
507 struct vm_area_struct *vma;
508 pgd_t *pgd;
509
498 /* Flushing the whole cache on each cpu takes forever on 510 /* Flushing the whole cache on each cpu takes forever on
499 rp3440, etc. So, avoid it if the mm isn't too big. */ 511 rp3440, etc. So, avoid it if the mm isn't too big. */
500 if (mm_total_size(mm) < parisc_cache_flush_threshold) { 512 if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
501 struct vm_area_struct *vma; 513 flush_cache_all();
502 514 return;
503 if (mm->context == mfsp(3)) { 515 }
504 for (vma = mm->mmap; vma; vma = vma->vm_next) { 516
505 flush_user_dcache_range_asm(vma->vm_start, 517 if (mm->context == mfsp(3)) {
506 vma->vm_end); 518 for (vma = mm->mmap; vma; vma = vma->vm_next) {
507 if (vma->vm_flags & VM_EXEC) 519 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
508 flush_user_icache_range_asm( 520 if ((vma->vm_flags & VM_EXEC) == 0)
509 vma->vm_start, vma->vm_end); 521 continue;
510 } 522 flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
511 } else {
512 pgd_t *pgd = mm->pgd;
513
514 for (vma = mm->mmap; vma; vma = vma->vm_next) {
515 unsigned long addr;
516
517 for (addr = vma->vm_start; addr < vma->vm_end;
518 addr += PAGE_SIZE) {
519 pte_t *ptep = get_ptep(pgd, addr);
520 if (ptep != NULL) {
521 pte_t pte = *ptep;
522 __flush_cache_page(vma, addr,
523 page_to_phys(pte_page(pte)));
524 }
525 }
526 }
527 } 523 }
528 return; 524 return;
529 } 525 }
530 526
531#ifdef CONFIG_SMP 527 pgd = mm->pgd;
532 flush_cache_all(); 528 for (vma = mm->mmap; vma; vma = vma->vm_next) {
533#else 529 unsigned long addr;
534 flush_cache_all_local(); 530
535#endif 531 for (addr = vma->vm_start; addr < vma->vm_end;
532 addr += PAGE_SIZE) {
533 unsigned long pfn;
534 pte_t *ptep = get_ptep(pgd, addr);
535 if (!ptep)
536 continue;
537 pfn = pte_pfn(*ptep);
538 if (!pfn_valid(pfn))
539 continue;
540 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
541 }
542 }
536} 543}
537 544
538void 545void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
556void flush_cache_range(struct vm_area_struct *vma, 563void flush_cache_range(struct vm_area_struct *vma,
557 unsigned long start, unsigned long end) 564 unsigned long start, unsigned long end)
558{ 565{
566 unsigned long addr;
567 pgd_t *pgd;
568
559 BUG_ON(!vma->vm_mm->context); 569 BUG_ON(!vma->vm_mm->context);
560 570
561 if ((end - start) < parisc_cache_flush_threshold) { 571 if ((end - start) >= parisc_cache_flush_threshold) {
562 if (vma->vm_mm->context == mfsp(3)) {
563 flush_user_dcache_range_asm(start, end);
564 if (vma->vm_flags & VM_EXEC)
565 flush_user_icache_range_asm(start, end);
566 } else {
567 unsigned long addr;
568 pgd_t *pgd = vma->vm_mm->pgd;
569
570 for (addr = start & PAGE_MASK; addr < end;
571 addr += PAGE_SIZE) {
572 pte_t *ptep = get_ptep(pgd, addr);
573 if (ptep != NULL) {
574 pte_t pte = *ptep;
575 flush_cache_page(vma,
576 addr, pte_pfn(pte));
577 }
578 }
579 }
580 } else {
581#ifdef CONFIG_SMP
582 flush_cache_all(); 572 flush_cache_all();
583#else 573 return;
584 flush_cache_all_local(); 574 }
585#endif 575
576 if (vma->vm_mm->context == mfsp(3)) {
577 flush_user_dcache_range_asm(start, end);
578 if (vma->vm_flags & VM_EXEC)
579 flush_user_icache_range_asm(start, end);
580 return;
581 }
582
583 pgd = vma->vm_mm->pgd;
584 for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
585 unsigned long pfn;
586 pte_t *ptep = get_ptep(pgd, addr);
587 if (!ptep)
588 continue;
589 pfn = pte_pfn(*ptep);
590 if (pfn_valid(pfn))
591 __flush_cache_page(vma, addr, PFN_PHYS(pfn));
586 } 592 }
587} 593}
588 594
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
591{ 597{
592 BUG_ON(!vma->vm_mm->context); 598 BUG_ON(!vma->vm_mm->context);
593 599
594 flush_tlb_page(vma, vmaddr); 600 if (pfn_valid(pfn)) {
595 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 601 flush_tlb_page(vma, vmaddr);
596 602 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
603 }
597} 604}
598 605
599#ifdef CONFIG_PARISC_TMPALIAS 606#ifdef CONFIG_PARISC_TMPALIAS