aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:41 -0400
commit60ec5585496871345c1a8113d7b60ed9d9474866 (patch)
tree172df570995ec777ca2a271dda7e3fcbb2dc5acb /arch
parentdeceb6cd17e6dfafe4c4f81b1b4153bc41b2cb70 (diff)
[PATCH] mm: i386 sh sh64 ready for split ptlock
Use pte_offset_map_lock, instead of pte_offset_map (or inappropriate pte_offset_kernel) and mm-wide page_table_lock, in sundry arch places. The i386 vm86 mark_screen_rdonly: yes, there was and is an assumption that the screen fits inside the one page table, as indeed it does. The sh __do_page_fault: which handles both kernel faults (without lock) and user mm faults (locked - though it set_pte without locking before). The sh64 flush_cache_range and helpers: which wrongly thought callers held page_table_lock before (only its tlb_start_vma did, and no longer does so); moved the flush loop down, and adjusted the large versus small range decision to consider a range which spans page tables as large. Signed-off-by: Hugh Dickins <hugh@veritas.com> Acked-by: Paul Mundt <lethal@linux-sh.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/vm86.c17
-rw-r--r--arch/sh/mm/fault.c40
-rw-r--r--arch/sh64/mm/cache.c68
3 files changed, 60 insertions, 65 deletions
diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c
index 16b485009622..fc1993564f98 100644
--- a/arch/i386/kernel/vm86.c
+++ b/arch/i386/kernel/vm86.c
@@ -134,17 +134,16 @@ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
134 return ret; 134 return ret;
135} 135}
136 136
137static void mark_screen_rdonly(struct task_struct * tsk) 137static void mark_screen_rdonly(struct mm_struct *mm)
138{ 138{
139 pgd_t *pgd; 139 pgd_t *pgd;
140 pud_t *pud; 140 pud_t *pud;
141 pmd_t *pmd; 141 pmd_t *pmd;
142 pte_t *pte, *mapped; 142 pte_t *pte;
143 spinlock_t *ptl;
143 int i; 144 int i;
144 145
145 preempt_disable(); 146 pgd = pgd_offset(mm, 0xA0000);
146 spin_lock(&tsk->mm->page_table_lock);
147 pgd = pgd_offset(tsk->mm, 0xA0000);
148 if (pgd_none_or_clear_bad(pgd)) 147 if (pgd_none_or_clear_bad(pgd))
149 goto out; 148 goto out;
150 pud = pud_offset(pgd, 0xA0000); 149 pud = pud_offset(pgd, 0xA0000);
@@ -153,16 +152,14 @@ static void mark_screen_rdonly(struct task_struct * tsk)
153 pmd = pmd_offset(pud, 0xA0000); 152 pmd = pmd_offset(pud, 0xA0000);
154 if (pmd_none_or_clear_bad(pmd)) 153 if (pmd_none_or_clear_bad(pmd))
155 goto out; 154 goto out;
156 pte = mapped = pte_offset_map(pmd, 0xA0000); 155 pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl);
157 for (i = 0; i < 32; i++) { 156 for (i = 0; i < 32; i++) {
158 if (pte_present(*pte)) 157 if (pte_present(*pte))
159 set_pte(pte, pte_wrprotect(*pte)); 158 set_pte(pte, pte_wrprotect(*pte));
160 pte++; 159 pte++;
161 } 160 }
162 pte_unmap(mapped); 161 pte_unmap_unlock(pte, ptl);
163out: 162out:
164 spin_unlock(&tsk->mm->page_table_lock);
165 preempt_enable();
166 flush_tlb(); 163 flush_tlb();
167} 164}
168 165
@@ -306,7 +303,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
306 303
307 tsk->thread.screen_bitmap = info->screen_bitmap; 304 tsk->thread.screen_bitmap = info->screen_bitmap;
308 if (info->flags & VM86_SCREEN_BITMAP) 305 if (info->flags & VM86_SCREEN_BITMAP)
309 mark_screen_rdonly(tsk); 306 mark_screen_rdonly(tsk->mm);
310 __asm__ __volatile__( 307 __asm__ __volatile__(
311 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t" 308 "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs\n\t"
312 "movl %0,%%esp\n\t" 309 "movl %0,%%esp\n\t"
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 7abba2161da6..775f86cd3fe8 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -194,10 +194,13 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
194 unsigned long address) 194 unsigned long address)
195{ 195{
196 unsigned long addrmax = P4SEG; 196 unsigned long addrmax = P4SEG;
197 pgd_t *dir; 197 pgd_t *pgd;
198 pmd_t *pmd; 198 pmd_t *pmd;
199 pte_t *pte; 199 pte_t *pte;
200 pte_t entry; 200 pte_t entry;
201 struct mm_struct *mm;
202 spinlock_t *ptl;
203 int ret = 1;
201 204
202#ifdef CONFIG_SH_KGDB 205#ifdef CONFIG_SH_KGDB
203 if (kgdb_nofault && kgdb_bus_err_hook) 206 if (kgdb_nofault && kgdb_bus_err_hook)
@@ -208,28 +211,28 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
208 addrmax = P4SEG_STORE_QUE + 0x04000000; 211 addrmax = P4SEG_STORE_QUE + 0x04000000;
209#endif 212#endif
210 213
211 if (address >= P3SEG && address < addrmax) 214 if (address >= P3SEG && address < addrmax) {
212 dir = pgd_offset_k(address); 215 pgd = pgd_offset_k(address);
213 else if (address >= TASK_SIZE) 216 mm = NULL;
217 } else if (address >= TASK_SIZE)
214 return 1; 218 return 1;
215 else if (!current->mm) 219 else if (!(mm = current->mm))
216 return 1; 220 return 1;
217 else 221 else
218 dir = pgd_offset(current->mm, address); 222 pgd = pgd_offset(mm, address);
219 223
220 pmd = pmd_offset(dir, address); 224 pmd = pmd_offset(pgd, address);
221 if (pmd_none(*pmd)) 225 if (pmd_none_or_clear_bad(pmd))
222 return 1;
223 if (pmd_bad(*pmd)) {
224 pmd_ERROR(*pmd);
225 pmd_clear(pmd);
226 return 1; 226 return 1;
227 } 227 if (mm)
228 pte = pte_offset_kernel(pmd, address); 228 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
229 else
230 pte = pte_offset_kernel(pmd, address);
231
229 entry = *pte; 232 entry = *pte;
230 if (pte_none(entry) || pte_not_present(entry) 233 if (pte_none(entry) || pte_not_present(entry)
231 || (writeaccess && !pte_write(entry))) 234 || (writeaccess && !pte_write(entry)))
232 return 1; 235 goto unlock;
233 236
234 if (writeaccess) 237 if (writeaccess)
235 entry = pte_mkdirty(entry); 238 entry = pte_mkdirty(entry);
@@ -251,8 +254,11 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess,
251 254
252 set_pte(pte, entry); 255 set_pte(pte, entry);
253 update_mmu_cache(NULL, address, entry); 256 update_mmu_cache(NULL, address, entry);
254 257 ret = 0;
255 return 0; 258unlock:
259 if (mm)
260 pte_unmap_unlock(pte, ptl);
261 return ret;
256} 262}
257 263
258void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 264void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
diff --git a/arch/sh64/mm/cache.c b/arch/sh64/mm/cache.c
index 3b87e25ea773..c0c1b21350d8 100644
--- a/arch/sh64/mm/cache.c
+++ b/arch/sh64/mm/cache.c
@@ -584,32 +584,36 @@ static void sh64_dcache_purge_phy_page(unsigned long paddr)
584 } 584 }
585} 585}
586 586
587static void sh64_dcache_purge_user_page(struct mm_struct *mm, unsigned long eaddr) 587static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
588 unsigned long addr, unsigned long end)
588{ 589{
589 pgd_t *pgd; 590 pgd_t *pgd;
590 pmd_t *pmd; 591 pmd_t *pmd;
591 pte_t *pte; 592 pte_t *pte;
592 pte_t entry; 593 pte_t entry;
594 spinlock_t *ptl;
593 unsigned long paddr; 595 unsigned long paddr;
594 596
595 /* NOTE : all the callers of this have mm->page_table_lock held, so the 597 if (!mm)
596 following page table traversal is safe even on SMP/pre-emptible. */ 598 return; /* No way to find physical address of page */
597 599
598 if (!mm) return; /* No way to find physical address of page */ 600 pgd = pgd_offset(mm, addr);
599 pgd = pgd_offset(mm, eaddr); 601 if (pgd_bad(*pgd))
600 if (pgd_bad(*pgd)) return; 602 return;
601 603
602 pmd = pmd_offset(pgd, eaddr); 604 pmd = pmd_offset(pgd, addr);
603 if (pmd_none(*pmd) || pmd_bad(*pmd)) return; 605 if (pmd_none(*pmd) || pmd_bad(*pmd))
604 606 return;
605 pte = pte_offset_kernel(pmd, eaddr); 607
606 entry = *pte; 608 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
607 if (pte_none(entry) || !pte_present(entry)) return; 609 do {
608 610 entry = *pte;
609 paddr = pte_val(entry) & PAGE_MASK; 611 if (pte_none(entry) || !pte_present(entry))
610 612 continue;
611 sh64_dcache_purge_coloured_phy_page(paddr, eaddr); 613 paddr = pte_val(entry) & PAGE_MASK;
612 614 sh64_dcache_purge_coloured_phy_page(paddr, addr);
615 } while (pte++, addr += PAGE_SIZE, addr != end);
616 pte_unmap_unlock(pte - 1, ptl);
613} 617}
614/****************************************************************************/ 618/****************************************************************************/
615 619
@@ -668,7 +672,7 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
668 int n_pages; 672 int n_pages;
669 673
670 n_pages = ((end - start) >> PAGE_SHIFT); 674 n_pages = ((end - start) >> PAGE_SHIFT);
671 if (n_pages >= 64) { 675 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
672#if 1 676#if 1
673 sh64_dcache_purge_all(); 677 sh64_dcache_purge_all();
674#else 678#else
@@ -707,20 +711,10 @@ static void sh64_dcache_purge_user_range(struct mm_struct *mm,
707 } 711 }
708#endif 712#endif
709 } else { 713 } else {
710 /* 'Small' range */ 714 /* Small range, covered by a single page table page */
711 unsigned long aligned_start; 715 start &= PAGE_MASK; /* should already be so */
712 unsigned long eaddr; 716 end = PAGE_ALIGN(end); /* should already be so */
713 unsigned long last_page_start; 717 sh64_dcache_purge_user_pages(mm, start, end);
714
715 aligned_start = start & PAGE_MASK;
716 /* 'end' is 1 byte beyond the end of the range */
717 last_page_start = (end - 1) & PAGE_MASK;
718
719 eaddr = aligned_start;
720 while (eaddr <= last_page_start) {
721 sh64_dcache_purge_user_page(mm, eaddr);
722 eaddr += PAGE_SIZE;
723 }
724 } 718 }
725 return; 719 return;
726} 720}
@@ -880,9 +874,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
880 addresses from the user address space specified by mm, after writing 874 addresses from the user address space specified by mm, after writing
881 back any dirty data. 875 back any dirty data.
882 876
883 Note(1), 'end' is 1 byte beyond the end of the range to flush. 877 Note, 'end' is 1 byte beyond the end of the range to flush. */
884
885 Note(2), this is called with mm->page_table_lock held.*/
886 878
887 sh64_dcache_purge_user_range(mm, start, end); 879 sh64_dcache_purge_user_range(mm, start, end);
888 sh64_icache_inv_user_page_range(mm, start, end); 880 sh64_icache_inv_user_page_range(mm, start, end);
@@ -898,7 +890,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr, unsigned
898 the I-cache must be searched too in case the page in question is 890 the I-cache must be searched too in case the page in question is
899 both writable and being executed from (e.g. stack trampolines.) 891 both writable and being executed from (e.g. stack trampolines.)
900 892
901 Note(1), this is called with mm->page_table_lock held. 893 Note, this is called with pte lock held.
902 */ 894 */
903 895
904 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); 896 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);