aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/fault.c')
-rw-r--r--arch/sh/mm/fault.c87
1 files changed, 87 insertions, 0 deletions
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 716ebf568af2..fa5d7f0b9f18 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -17,6 +17,7 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
20#include <asm/kgdb.h> 21#include <asm/kgdb.h>
21 22
22extern void die(const char *,struct pt_regs *,long); 23extern void die(const char *,struct pt_regs *,long);
@@ -224,3 +225,89 @@ do_sigbus:
224 if (!user_mode(regs)) 225 if (!user_mode(regs))
225 goto no_context; 226 goto no_context;
226} 227}
228
229#ifdef CONFIG_SH_STORE_QUEUES
230/*
231 * This is a special case for the SH-4 store queues, as pages for this
232 * space still need to be faulted in before it's possible to flush the
233 * store queue cache for writeout to the remapped region.
234 */
235#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
236#else
237#define P3_ADDR_MAX P4SEG
238#endif
239
240/*
241 * Called with interrupts disabled.
242 */
243asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
244 unsigned long writeaccess,
245 unsigned long address)
246{
247 pgd_t *pgd;
248 pud_t *pud;
249 pmd_t *pmd;
250 pte_t *pte;
251 pte_t entry;
252 struct mm_struct *mm = current->mm;
253 spinlock_t *ptl;
254 int ret = 1;
255
256#ifdef CONFIG_SH_KGDB
257 if (kgdb_nofault && kgdb_bus_err_hook)
258 kgdb_bus_err_hook();
259#endif
260
261 /*
262 * We don't take page faults for P1, P2, and parts of P4, these
263 * are always mapped, whether it be due to legacy behaviour in
264 * 29-bit mode, or due to PMB configuration in 32-bit mode.
265 */
266 if (address >= P3SEG && address < P3_ADDR_MAX) {
267 pgd = pgd_offset_k(address);
268 mm = NULL;
269 } else {
270 if (unlikely(address >= TASK_SIZE || !mm))
271 return 1;
272
273 pgd = pgd_offset(mm, address);
274 }
275
276 pud = pud_offset(pgd, address);
277 if (pud_none_or_clear_bad(pud))
278 return 1;
279 pmd = pmd_offset(pud, address);
280 if (pmd_none_or_clear_bad(pmd))
281 return 1;
282
283 if (mm)
284 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
285 else
286 pte = pte_offset_kernel(pmd, address);
287
288 entry = *pte;
289 if (unlikely(pte_none(entry) || pte_not_present(entry)))
290 goto unlock;
291 if (unlikely(writeaccess && !pte_write(entry)))
292 goto unlock;
293
294 if (writeaccess)
295 entry = pte_mkdirty(entry);
296 entry = pte_mkyoung(entry);
297
298#ifdef CONFIG_CPU_SH4
299 /*
300 * ITLB is not affected by "ldtlb" instruction.
301 * So, we need to flush the entry by ourselves.
302 */
303 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
304#endif
305
306 set_pte(pte, entry);
307 update_mmu_cache(NULL, address, entry);
308 ret = 0;
309unlock:
310 if (mm)
311 pte_unmap_unlock(pte, ptl);
312 return ret;
313}