aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh64/mm
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commit60ec5585496871345c1a8113d7b60ed9d9474866 (patch)
tree172df570995ec777ca2a271dda7e3fcbb2dc5acb /arch/sh64/mm
parentdeceb6cd17e6dfafe4c4f81b1b4153bc41b2cb70 (diff)
[PATCH] mm: i386 sh sh64 ready for split ptlock
Use pte_offset_map_lock, instead of pte_offset_map (or inappropriate pte_offset_kernel) and mm-wide page_table_lock, in sundry arch places. The i386 vm86 mark_screen_rdonly: yes, there was and is an assumption that the screen fits inside the one page table, as indeed it does. The sh __do_page_fault: which handles both kernel faults (without lock) and user mm faults (locked - though it set_pte without locking before). The sh64 flush_cache_range and helpers: which wrongly thought callers held page_table_lock before (only its tlb_start_vma did, and no longer does so); moved the flush loop down, and adjusted the large versus small range decision to consider a range which spans page tables as large. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sh64/mm')
-rw-r--r--arch/sh64/mm/cache.c68
1 files changed, 30 insertions, 38 deletions
diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c
index 3b87e25ea773..c0c1b21350d8 100644
--- a/arch/sh64/mm/cache.c
+++ b/arch/sh64/mm/cache.c
@@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
584 } 584 }
585} 585}
586 586
587static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) 587static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
588 unsigned long addr, unsigned long end)
588{ 589{
589 pgd_t *pgd; 590 pgd_t *pgd;
590 pmd_t *pmd; 591 pmd_t *pmd;
591 pte_t *pte; 592 pte_t *pte;
592 pte_t entry; 593 pte_t entry;
594 spinlock_t *ptl;
593 unsigned long paddr; 595 unsigned long paddr;
594 596
595 /* NOTE : all the callers of this have mm->page_table_lock held, so the 597 if (!mm)
596 following page table traversal is safe even on SMP/pre-emptible. */ 598 return; /* No way to find physical address of page */
597 599
598 if (!mm) return; /* No way to find physical address of page */ 600 pgd = pgd_offset(mm, addr);
599 pgd = pgd_offset(mm, eaddr); 601 if (pgd_bad(*pgd))
600 if (pgd_bad(*pgd)) return; 602 return;
601 603
602 pmd = pmd_offset(pgd, eaddr); 604 pmd = pmd_offset(pgd, addr);
603 if (pmd_none(*pmd) || pmd_bad(*pmd)) return; 605 if (pmd_none(*pmd) || pmd_bad(*pmd))
604 606 return;
605 pte = pte_offset_kernel(pmd, eaddr); 607
606 entry = *pte; 608 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
607 if (pte_none(entry) || !pte_present(entry)) return; 609 do {
608 610 entry = *pte;
609 paddr = pte_val(entry) & PAGE_MASK; 611 if (pte_none(entry) || !pte_present(entry))
610 612 continue;
611 sh64_dcache_purge_coloured_phy_page(paddr, eaddr); 613 paddr = pte_val(entry) & PAGE_MASK;
612 614 sh64_dcache_purge_coloured_phy_page(paddr, addr);
615 } while (pte++, addr += PAGE_SIZE, addr != end);
616 pte_unmap_unlock(pte - 1, ptl);
613} 617}
614/****************************************************************************/ 618/****************************************************************************/
615 619
@@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
668 int n_pages; 672 int n_pages;
669 673
670 n_pages = ((end - start) >> PAGE_SHIFT); 674 n_pages = ((end - start) >> PAGE_SHIFT);
671 if (n_pages >= 64) { 675 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
672#if 1 676#if 1
673 sh64_dcache_purge_all(); 677 sh64_dcache_purge_all();
674#else 678#else
@@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
707 } 711 }
708#endif 712#endif
709 } else { 713 } else {
710 /* 'Small' range */ 714 /* Small range, covered by a single page table page */
711 unsigned long aligned_start; 715 start &= PAGE_MASK; /* should already be so */
712 unsigned long eaddr; 716 end = PAGE_ALIGN(end); /* should already be so */
713 unsigned long last_page_start; 717 sh64_dcache_purge_user_pages(mm, start, end);
714
715 aligned_start = start & PAGE_MASK;
716 /* 'end' is 1 byte beyond the end of the range */
717 last_page_start = (end - 1) & PAGE_MASK;
718
719 eaddr = aligned_start;
720 while (eaddr <= last_page_start) {
721 sh64_dcache_purge_user_page(mm, eaddr);
722 eaddr += PAGE_SIZE;
723 }
724 } 718 }
725 return; 719 return;
726} 720}
@@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
880 addresses from the user address space specified by mm, after writing 874 addresses from the user address space specified by mm, after writing
881 back any dirty data. 875 back any dirty data.
882 876
883 Note(1), 'end' is 1 byte beyond the end of the range to flush. 877 Note, 'end' is 1 byte beyond the end of the range to flush. */
884
885 Note(2), this is called with mm->page_table_lock held.*/
886 878
887 sh64_dcache_purge_user_range(mm, start, end); 879 sh64_dcache_purge_user_range(mm, start, end);
888 sh64_icache_inv_user_page_range(mm, start, end); 880 sh64_icache_inv_user_page_range(mm, start, end);
@@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned
898 the I-cache must be searched too in case the page in question is 890 the I-cache must be searched too in case the page in question is
899 both writable and being executed from (e.g. stack trampolines.) 891 both writable and being executed from (e.g. stack trampolines.)
900 892
901 Note(1), this is called with mm->page_table_lock held. 893 Note, this is called with pte lock held.
902 */ 894 */
903 895
904 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 896 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);