aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault.c
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2006-11-23 21:42:24 -0500
committerPaul Mundt <lethal@linux-sh.org>2006-12-05 20:45:38 -0500
commit9b3a53ab76771e3669e50086c131e1574fe25847 (patch)
tree07dab1cd3972c7b82ddd5b7ad1e28628d7756dbb /arch/sh/mm/fault.c
parent9daa0c257d6c200b58092e0bfc32b77c4618a8af (diff)
sh: TLB miss fast-path optimizations.
Handle simple TLB miss faults which can be resolved completely from the page table in assembler. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r--arch/sh/mm/fault.c86
1 files changed, 0 insertions, 86 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 128907ef7fcd..123fb80c859d 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -223,89 +223,3 @@ do_sigbus:
223 if (!user_mode(regs)) 223 if (!user_mode(regs))
224 goto no_context; 224 goto no_context;
225} 225}
226
227#ifdef CONFIG_SH_STORE_QUEUES
228/*
229 * This is a special case for the SH-4 store queues, as pages for this
230 * space still need to be faulted in before it's possible to flush the
231 * store queue cache for writeout to the remapped region.
232 */
233#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
234#else
235#define P3_ADDR_MAX P4SEG
236#endif
237
238/*
239 * Called with interrupts disabled.
240 */
241asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
242 unsigned long writeaccess,
243 unsigned long address)
244{
245 pgd_t *pgd;
246 pud_t *pud;
247 pmd_t *pmd;
248 pte_t *pte;
249 pte_t entry;
250 struct mm_struct *mm = current->mm;
251 spinlock_t *ptl;
252 int ret = 1;
253
254#ifdef CONFIG_SH_KGDB
255 if (kgdb_nofault && kgdb_bus_err_hook)
256 kgdb_bus_err_hook();
257#endif
258
259 /*
260 * We don't take page faults for P1, P2, and parts of P4, these
261 * are always mapped, whether it be due to legacy behaviour in
262 * 29-bit mode, or due to PMB configuration in 32-bit mode.
263 */
264 if (address >= P3SEG && address < P3_ADDR_MAX) {
265 pgd = pgd_offset_k(address);
266 mm = NULL;
267 } else {
268 if (unlikely(address >= TASK_SIZE || !mm))
269 return 1;
270
271 pgd = pgd_offset(mm, address);
272 }
273
274 pud = pud_offset(pgd, address);
275 if (pud_none_or_clear_bad(pud))
276 return 1;
277 pmd = pmd_offset(pud, address);
278 if (pmd_none_or_clear_bad(pmd))
279 return 1;
280
281 if (mm)
282 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
283 else
284 pte = pte_offset_kernel(pmd, address);
285
286 entry = *pte;
287 if (unlikely(pte_none(entry) || pte_not_present(entry)))
288 goto unlock;
289 if (unlikely(writeaccess && !pte_write(entry)))
290 goto unlock;
291
292 if (writeaccess)
293 entry = pte_mkdirty(entry);
294 entry = pte_mkyoung(entry);
295
296#ifdef CONFIG_CPU_SH4
297 /*
298 * ITLB is not affected by "ldtlb" instruction.
299 * So, we need to flush the entry by ourselves.
300 */
301 __flush_tlb_page(get_asid(), address & PAGE_MASK);
302#endif
303
304 set_pte(pte, entry);
305 update_mmu_cache(NULL, address, entry);
306 ret = 0;
307unlock:
308 if (mm)
309 pte_unmap_unlock(pte, ptl);
310 return ret;
311}