diff options
author | Hugh Dickins <hugh@veritas.com> | 2005-10-29 21:16:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-10-30 00:40:41 -0400 |
commit | 60ec5585496871345c1a8113d7b60ed9d9474866 (patch) | |
tree | 172df570995ec777ca2a271dda7e3fcbb2dc5acb /arch/sh/mm/fault.c | |
parent | deceb6cd17e6dfafe4c4f81b1b4153bc41b2cb70 (diff) |
[PATCH] mm: i386 sh sh64 ready for split ptlock
Use pte_offset_map_lock, instead of pte_offset_map (or inappropriate
pte_offset_kernel) and mm-wide page_table_lock, in sundry arch places.
The i386 vm86 mark_screen_rdonly: yes, there was and is an assumption that the
screen fits inside the one page table, as indeed it does.
The sh __do_page_fault: which handles both kernel faults (without lock) and
user mm faults (locked - though it set_pte without locking before).
The sh64 flush_cache_range and helpers: which wrongly thought callers held
page_table_lock before (only its tlb_start_vma did, and no longer does so);
moved the flush loop down, and adjusted the large versus small range decision
to consider a range which spans page tables as large.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Paul Mundt <lethal@linux-sh.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r-- | arch/sh/mm/fault.c | 40 |
1 files changed, 23 insertions, 17 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 7abba2161da6..775f86cd3fe8 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -194,10 +194,13 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
194 | unsigned long address) | 194 | unsigned long address) |
195 | { | 195 | { |
196 | unsigned long addrmax = P4SEG; | 196 | unsigned long addrmax = P4SEG; |
197 | pgd_t *dir; | 197 | pgd_t *pgd; |
198 | pmd_t *pmd; | 198 | pmd_t *pmd; |
199 | pte_t *pte; | 199 | pte_t *pte; |
200 | pte_t entry; | 200 | pte_t entry; |
201 | struct mm_struct *mm; | ||
202 | spinlock_t *ptl; | ||
203 | int ret = 1; | ||
201 | 204 | ||
202 | #ifdef CONFIG_SH_KGDB | 205 | #ifdef CONFIG_SH_KGDB |
203 | if (kgdb_nofault && kgdb_bus_err_hook) | 206 | if (kgdb_nofault && kgdb_bus_err_hook) |
@@ -208,28 +211,28 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
208 | addrmax = P4SEG_STORE_QUE + 0x04000000; | 211 | addrmax = P4SEG_STORE_QUE + 0x04000000; |
209 | #endif | 212 | #endif |
210 | 213 | ||
211 | if (address >= P3SEG && address < addrmax) | 214 | if (address >= P3SEG && address < addrmax) { |
212 | dir = pgd_offset_k(address); | 215 | pgd = pgd_offset_k(address); |
213 | else if (address >= TASK_SIZE) | 216 | mm = NULL; |
217 | } else if (address >= TASK_SIZE) | ||
214 | return 1; | 218 | return 1; |
215 | else if (!current->mm) | 219 | else if (!(mm = current->mm)) |
216 | return 1; | 220 | return 1; |
217 | else | 221 | else |
218 | dir = pgd_offset(current->mm, address); | 222 | pgd = pgd_offset(mm, address); |
219 | 223 | ||
220 | pmd = pmd_offset(dir, address); | 224 | pmd = pmd_offset(pgd, address); |
221 | if (pmd_none(*pmd)) | 225 | if (pmd_none_or_clear_bad(pmd)) |
222 | return 1; | ||
223 | if (pmd_bad(*pmd)) { | ||
224 | pmd_ERROR(*pmd); | ||
225 | pmd_clear(pmd); | ||
226 | return 1; | 226 | return 1; |
227 | } | 227 | if (mm) |
228 | pte = pte_offset_kernel(pmd, address); | 228 | pte = pte_offset_map_lock(mm, pmd, address, &ptl); |
229 | else | ||
230 | pte = pte_offset_kernel(pmd, address); | ||
231 | |||
229 | entry = *pte; | 232 | entry = *pte; |
230 | if (pte_none(entry) || pte_not_present(entry) | 233 | if (pte_none(entry) || pte_not_present(entry) |
231 | || (writeaccess && !pte_write(entry))) | 234 | || (writeaccess && !pte_write(entry))) |
232 | return 1; | 235 | goto unlock; |
233 | 236 | ||
234 | if (writeaccess) | 237 | if (writeaccess) |
235 | entry = pte_mkdirty(entry); | 238 | entry = pte_mkdirty(entry); |
@@ -251,8 +254,11 @@ asmlinkage int __do_page_fault(struct pt_regs *regs, unsigned long writeaccess, | |||
251 | 254 | ||
252 | set_pte(pte, entry); | 255 | set_pte(pte, entry); |
253 | update_mmu_cache(NULL, address, entry); | 256 | update_mmu_cache(NULL, address, entry); |
254 | 257 | ret = 0; | |
255 | return 0; | 258 | unlock: |
259 | if (mm) | ||
260 | pte_unmap_unlock(pte, ptl); | ||
261 | return ret; | ||
256 | } | 262 | } |
257 | 263 | ||
258 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | 264 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |