aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S201
-rw-r--r--arch/sh/mm/fault.c87
2 files changed, 107 insertions, 181 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 1c520358ba90..c19205b0f2c0 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,10 +13,8 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <asm/cpu/mmu_context.h> 16#include <asm/cpu/mmu_context.h>
18#include <asm/pgtable.h> 17#include <asm/unistd.h>
19#include <asm/page.h>
20 18
21! NOTE: 19! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -138,14 +136,29 @@ ENTRY(tlb_protection_violation_store)
138 136
139call_dpf: 137call_dpf:
140 mov.l 1f, r0 138 mov.l 1f, r0
141 mov.l @r0, r6 ! address 139 mov r5, r8
140 mov.l @r0, r6
141 mov r6, r9
142 mov.l 2f, r0
143 sts pr, r10
144 jsr @r0
145 mov r15, r4
146 !
147 tst r0, r0
148 bf/s 0f
149 lds r10, pr
150 rts
151 nop
1520: sti
142 mov.l 3f, r0 153 mov.l 3f, r0
143 154 mov r9, r6
155 mov r8, r5
144 jmp @r0 156 jmp @r0
145 mov r15, r4 ! regs 157 mov r15, r4
146 158
147 .align 2 159 .align 2
1481: .long MMU_TEA 1601: .long MMU_TEA
1612: .long __do_page_fault
1493: .long do_page_fault 1623: .long do_page_fault
150 163
151 .align 2 164 .align 2
@@ -332,171 +345,9 @@ general_exception:
332! 345!
333! 346!
334 347
335/* gas doesn't flag impossible values for mov #immediate as an error */
336#if (_PAGE_PRESENT >> 2) > 0x7f
337#error cannot load PAGE_PRESENT as an immediate
338#endif
339#if _PAGE_DIRTY > 0x7f
340#error cannot load PAGE_DIRTY as an immediate
341#endif
342#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
343#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
344#endif
345
346#if defined(CONFIG_CPU_SH4)
347#define ldmmupteh(r) mov.l 8f, r
348#else
349#define ldmmupteh(r) mov #MMU_PTEH, r
350#endif
351
352 .balign 1024,0,1024 348 .balign 1024,0,1024
353tlb_miss: 349tlb_miss:
354#ifdef COUNT_EXCEPTIONS 350 mov.l 1f, k2
355 ! Increment the counts
356 mov.l 9f, k1
357 mov.l @k1, k2
358 add #1, k2
359 mov.l k2, @k1
360#endif
361
362 ! k0 scratch
363 ! k1 pgd and pte pointers
364 ! k2 faulting address
365 ! k3 pgd and pte index masks
366 ! k4 shift
367
368 ! Load up the pgd entry (k1)
369
370 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
371
372 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
373 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
374
375 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
376
377 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
378
379 mov k2, k0 ! 5 MT (latency=0)
380 shld k4, k0 ! 99 EX
381
382 and k3, k0 ! 78 EX
383
384 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
385 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
386
387 ! Load up the pte entry (k2)
388
389 mov k2, k0 ! 5 MT (latency=0)
390 shld k4, k0 ! 99 EX
391
392 tst k1, k1 ! 86 MT
393
394 bt 20f ! 110 BR
395
396 mov.w 3f, k3 ! 8 LS (latency=2) (PTRS_PER_PTE-1) << 2
397 and k3, k0 ! 78 EX
398 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
399
400 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
401 add k0, k1 ! 49 EX
402
403#ifdef CONFIG_CPU_HAS_PTEA
404 ! Test the entry for present and _PAGE_ACCESSED
405
406 mov #-28, k3 ! 6 EX
407 mov k2, k0 ! 5 MT (latency=0)
408
409 tst k4, k2 ! 68 MT
410 shld k3, k0 ! 99 EX
411
412 bt 20f ! 110 BR
413
414 ! Set PTEA register
415 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
416 !
417 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
418
419 and #0xe, k0 ! 79 EX
420
421 mov k0, k3 ! 5 MT (latency=0)
422 mov k2, k0 ! 5 MT (latency=0)
423
424 and #1, k0 ! 79 EX
425
426 or k0, k3 ! 82 EX
427
428 ldmmupteh(k0) ! 9 LS (latency=2)
429 shll2 k4 ! 101 EX _PAGE_ACCESSED
430
431 tst k4, k2 ! 68 MT
432
433 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
434
435 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
436
437 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
438#else
439
440 ! Test the entry for present and _PAGE_ACCESSED
441
442 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
443 tst k4, k2 ! 68 MT
444
445 shll2 k4 ! 101 EX _PAGE_ACCESSED
446 ldmmupteh(k0) ! 9 LS (latency=2)
447
448 bt 20f ! 110 BR
449 tst k4, k2 ! 68 MT
450
451 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
452
453#endif
454
455 ! Set up the entry
456
457 and k2, k3 ! 78 EX
458 bt/s 10f ! 108 BR
459
460 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
461
462 ldtlb ! 128 CO
463
464 ! At least one instruction between ldtlb and rte
465 nop ! 119 NOP
466
467 rte ! 126 CO
468
469 nop ! 119 NOP
470
471
47210: or k4, k2 ! 82 EX
473
474 ldtlb ! 128 CO
475
476 ! At least one instruction between ldtlb and rte
477 mov.l k2, @k1 ! 27 LS
478
479 rte ! 126 CO
480
481 ! Note we cannot execute mov here, because it is executed after
482 ! restoring SSR, so would be executed in user space.
483 nop ! 119 NOP
484
485
486 .align 5
487 ! Once cache line if possible...
4881: .long swapper_pg_dir
4893: .short (PTRS_PER_PTE-1) << 2
4904: .short (PTRS_PER_PGD-1) << 2
4915: .long _PAGE_PRESENT
4927: .long _PAGE_FLAGS_HARDWARE_MASK
4938: .long MMU_PTEH
494#ifdef COUNT_EXCEPTIONS
4959: .long exception_count_miss
496#endif
497
498 ! Either pgd or pte not present
49920: mov.l 1f, k2
500 mov.l 4f, k3 351 mov.l 4f, k3
501 bra handle_exception 352 bra handle_exception
502 mov.l @k2, k2 353 mov.l @k2, k2
@@ -647,15 +498,6 @@ skip_save:
647 bf interrupt_exception 498 bf interrupt_exception
648 shlr2 r8 499 shlr2 r8
649 shlr r8 500 shlr r8
650
651#ifdef COUNT_EXCEPTIONS
652 mov.l 5f, r9
653 add r8, r9
654 mov.l @r9, r10
655 add #1, r10
656 mov.l r10, @r9
657#endif
658
659 mov.l 4f, r9 501 mov.l 4f, r9
660 add r8, r9 502 add r8, r9
661 mov.l @r9, r9 503 mov.l @r9, r9
@@ -669,9 +511,6 @@ skip_save:
6692: .long 0x000080f0 ! FD=1, IMASK=15 5112: .long 0x000080f0 ! FD=1, IMASK=15
6703: .long 0xcfffffff ! RB=0, BL=0 5123: .long 0xcfffffff ! RB=0, BL=0
6714: .long exception_handling_table 5134: .long exception_handling_table
672#ifdef COUNT_EXCEPTIONS
6735: .long exception_count_table
674#endif
675 514
676interrupt_exception: 515interrupt_exception:
677 mov.l 1f, r9 516 mov.l 1f, r9
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 716ebf568af2..fa5d7f0b9f18 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -17,6 +17,7 @@
17#include <linux/kprobes.h> 17#include <linux/kprobes.h>
18#include <asm/system.h> 18#include <asm/system.h>
19#include <asm/mmu_context.h> 19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
20#include <asm/kgdb.h> 21#include <asm/kgdb.h>
21 22
22extern void die(const char *,struct pt_regs *,long); 23extern void die(const char *,struct pt_regs *,long);
@@ -224,3 +225,89 @@ do_sigbus:
224 if (!user_mode(regs)) 225 if (!user_mode(regs))
225 goto no_context; 226 goto no_context;
226} 227}
228
229#ifdef CONFIG_SH_STORE_QUEUES
230/*
231 * This is a special case for the SH-4 store queues, as pages for this
232 * space still need to be faulted in before it's possible to flush the
233 * store queue cache for writeout to the remapped region.
234 */
235#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
236#else
237#define P3_ADDR_MAX P4SEG
238#endif
239
240/*
241 * Called with interrupts disabled.
242 */
243asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
244 unsigned long writeaccess,
245 unsigned long address)
246{
247 pgd_t *pgd;
248 pud_t *pud;
249 pmd_t *pmd;
250 pte_t *pte;
251 pte_t entry;
252 struct mm_struct *mm = current->mm;
253 spinlock_t *ptl;
254 int ret = 1;
255
256#ifdef CONFIG_SH_KGDB
257 if (kgdb_nofault && kgdb_bus_err_hook)
258 kgdb_bus_err_hook();
259#endif
260
261 /*
262 * We don't take page faults for P1, P2, and parts of P4, these
263 * are always mapped, whether it be due to legacy behaviour in
264 * 29-bit mode, or due to PMB configuration in 32-bit mode.
265 */
266 if (address >= P3SEG && address < P3_ADDR_MAX) {
267 pgd = pgd_offset_k(address);
268 mm = NULL;
269 } else {
270 if (unlikely(address >= TASK_SIZE || !mm))
271 return 1;
272
273 pgd = pgd_offset(mm, address);
274 }
275
276 pud = pud_offset(pgd, address);
277 if (pud_none_or_clear_bad(pud))
278 return 1;
279 pmd = pmd_offset(pud, address);
280 if (pmd_none_or_clear_bad(pmd))
281 return 1;
282
283 if (mm)
284 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
285 else
286 pte = pte_offset_kernel(pmd, address);
287
288 entry = *pte;
289 if (unlikely(pte_none(entry) || pte_not_present(entry)))
290 goto unlock;
291 if (unlikely(writeaccess && !pte_write(entry)))
292 goto unlock;
293
294 if (writeaccess)
295 entry = pte_mkdirty(entry);
296 entry = pte_mkyoung(entry);
297
298#ifdef CONFIG_CPU_SH4
299 /*
300 * ITLB is not affected by "ldtlb" instruction.
301 * So, we need to flush the entry by ourselves.
302 */
303 local_flush_tlb_one(get_asid(), address & PAGE_MASK);
304#endif
305
306 set_pte(pte, entry);
307 update_mmu_cache(NULL, address, entry);
308 ret = 0;
309unlock:
310 if (mm)
311 pte_unmap_unlock(pte, ptl);
312 return ret;
313}