aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/cpu/sh3/entry.S
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-02-14 00:13:10 -0500
committerPaul Mundt <lethal@linux-sh.org>2007-02-14 00:13:10 -0500
commitdb2e1fa3f0eefbbe04e90d6e4d290ee176b28248 (patch)
tree5d63957fbd938cc02c31bec286f37caadf471eb2 /arch/sh/kernel/cpu/sh3/entry.S
parent401e9093a326725780aed270a6eb53e7ddab14ff (diff)
sh: Revert TLB miss fast-path changes that broke PTEA parts.
This ended up causing problems for older parts (particularly ones using PTEA). Revert this for now, it can be added back in once it's had some more testing. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/kernel/cpu/sh3/entry.S')
-rw-r--r--arch/sh/kernel/cpu/sh3/entry.S201
1 files changed, 20 insertions, 181 deletions
diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S
index 1c520358ba90..c19205b0f2c0 100644
--- a/arch/sh/kernel/cpu/sh3/entry.S
+++ b/arch/sh/kernel/cpu/sh3/entry.S
@@ -13,10 +13,8 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/asm-offsets.h> 14#include <asm/asm-offsets.h>
15#include <asm/thread_info.h> 15#include <asm/thread_info.h>
16#include <asm/unistd.h>
17#include <asm/cpu/mmu_context.h> 16#include <asm/cpu/mmu_context.h>
18#include <asm/pgtable.h> 17#include <asm/unistd.h>
19#include <asm/page.h>
20 18
21! NOTE: 19! NOTE:
22! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address 20! GNU as (as of 2.9.1) changes bf/s into bt/s and bra, when the address
@@ -138,14 +136,29 @@ ENTRY(tlb_protection_violation_store)
138 136
139call_dpf: 137call_dpf:
140 mov.l 1f, r0 138 mov.l 1f, r0
141 mov.l @r0, r6 ! address 139 mov r5, r8
140 mov.l @r0, r6
141 mov r6, r9
142 mov.l 2f, r0
143 sts pr, r10
144 jsr @r0
145 mov r15, r4
146 !
147 tst r0, r0
148 bf/s 0f
149 lds r10, pr
150 rts
151 nop
1520: sti
142 mov.l 3f, r0 153 mov.l 3f, r0
143 154 mov r9, r6
155 mov r8, r5
144 jmp @r0 156 jmp @r0
145 mov r15, r4 ! regs 157 mov r15, r4
146 158
147 .align 2 159 .align 2
1481: .long MMU_TEA 1601: .long MMU_TEA
1612: .long __do_page_fault
1493: .long do_page_fault 1623: .long do_page_fault
150 163
151 .align 2 164 .align 2
@@ -332,171 +345,9 @@ general_exception:
332! 345!
333! 346!
334 347
335/* gas doesn't flag impossible values for mov #immediate as an error */
336#if (_PAGE_PRESENT >> 2) > 0x7f
337#error cannot load PAGE_PRESENT as an immediate
338#endif
339#if _PAGE_DIRTY > 0x7f
340#error cannot load PAGE_DIRTY as an immediate
341#endif
342#if (_PAGE_PRESENT << 2) != _PAGE_ACCESSED
343#error cannot derive PAGE_ACCESSED from PAGE_PRESENT
344#endif
345
346#if defined(CONFIG_CPU_SH4)
347#define ldmmupteh(r) mov.l 8f, r
348#else
349#define ldmmupteh(r) mov #MMU_PTEH, r
350#endif
351
352 .balign 1024,0,1024 348 .balign 1024,0,1024
353tlb_miss: 349tlb_miss:
354#ifdef COUNT_EXCEPTIONS 350 mov.l 1f, k2
355 ! Increment the counts
356 mov.l 9f, k1
357 mov.l @k1, k2
358 add #1, k2
359 mov.l k2, @k1
360#endif
361
362 ! k0 scratch
363 ! k1 pgd and pte pointers
364 ! k2 faulting address
365 ! k3 pgd and pte index masks
366 ! k4 shift
367
368 ! Load up the pgd entry (k1)
369
370 ldmmupteh(k0) ! 9 LS (latency=2) MMU_PTEH
371
372 mov.w 4f, k3 ! 8 LS (latency=2) (PTRS_PER_PGD-1) << 2
373 mov #-(PGDIR_SHIFT-2), k4 ! 6 EX
374
375 mov.l @(MMU_TEA-MMU_PTEH,k0), k2 ! 18 LS (latency=2)
376
377 mov.l @(MMU_TTB-MMU_PTEH,k0), k1 ! 18 LS (latency=2)
378
379 mov k2, k0 ! 5 MT (latency=0)
380 shld k4, k0 ! 99 EX
381
382 and k3, k0 ! 78 EX
383
384 mov.l @(k0, k1), k1 ! 21 LS (latency=2)
385 mov #-(PAGE_SHIFT-2), k4 ! 6 EX
386
387 ! Load up the pte entry (k2)
388
389 mov k2, k0 ! 5 MT (latency=0)
390 shld k4, k0 ! 99 EX
391
392 tst k1, k1 ! 86 MT
393
394 bt 20f ! 110 BR
395
396 mov.w 3f, k3 ! 8 LS (latency=2) (PTRS_PER_PTE-1) << 2
397 and k3, k0 ! 78 EX
398 mov.w 5f, k4 ! 8 LS (latency=2) _PAGE_PRESENT
399
400 mov.l @(k0, k1), k2 ! 21 LS (latency=2)
401 add k0, k1 ! 49 EX
402
403#ifdef CONFIG_CPU_HAS_PTEA
404 ! Test the entry for present and _PAGE_ACCESSED
405
406 mov #-28, k3 ! 6 EX
407 mov k2, k0 ! 5 MT (latency=0)
408
409 tst k4, k2 ! 68 MT
410 shld k3, k0 ! 99 EX
411
412 bt 20f ! 110 BR
413
414 ! Set PTEA register
415 ! MMU_PTEA = ((pteval >> 28) & 0xe) | (pteval & 0x1)
416 !
417 ! k0=pte>>28, k1=pte*, k2=pte, k3=<unused>, k4=_PAGE_PRESENT
418
419 and #0xe, k0 ! 79 EX
420
421 mov k0, k3 ! 5 MT (latency=0)
422 mov k2, k0 ! 5 MT (latency=0)
423
424 and #1, k0 ! 79 EX
425
426 or k0, k3 ! 82 EX
427
428 ldmmupteh(k0) ! 9 LS (latency=2)
429 shll2 k4 ! 101 EX _PAGE_ACCESSED
430
431 tst k4, k2 ! 68 MT
432
433 mov.l k3, @(MMU_PTEA-MMU_PTEH,k0) ! 27 LS
434
435 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
436
437 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
438#else
439
440 ! Test the entry for present and _PAGE_ACCESSED
441
442 mov.l 7f, k3 ! 9 LS (latency=2) _PAGE_FLAGS_HARDWARE_MASK
443 tst k4, k2 ! 68 MT
444
445 shll2 k4 ! 101 EX _PAGE_ACCESSED
446 ldmmupteh(k0) ! 9 LS (latency=2)
447
448 bt 20f ! 110 BR
449 tst k4, k2 ! 68 MT
450
451 ! k0=MMU_PTEH, k1=pte*, k2=pte, k3=_PAGE_FLAGS_HARDWARE, k4=_PAGE_ACCESSED
452
453#endif
454
455 ! Set up the entry
456
457 and k2, k3 ! 78 EX
458 bt/s 10f ! 108 BR
459
460 mov.l k3, @(MMU_PTEL-MMU_PTEH,k0) ! 27 LS
461
462 ldtlb ! 128 CO
463
464 ! At least one instruction between ldtlb and rte
465 nop ! 119 NOP
466
467 rte ! 126 CO
468
469 nop ! 119 NOP
470
471
47210: or k4, k2 ! 82 EX
473
474 ldtlb ! 128 CO
475
476 ! At least one instruction between ldtlb and rte
477 mov.l k2, @k1 ! 27 LS
478
479 rte ! 126 CO
480
481 ! Note we cannot execute mov here, because it is executed after
482 ! restoring SSR, so would be executed in user space.
483 nop ! 119 NOP
484
485
486 .align 5
487 ! Once cache line if possible...
4881: .long swapper_pg_dir
4893: .short (PTRS_PER_PTE-1) << 2
4904: .short (PTRS_PER_PGD-1) << 2
4915: .long _PAGE_PRESENT
4927: .long _PAGE_FLAGS_HARDWARE_MASK
4938: .long MMU_PTEH
494#ifdef COUNT_EXCEPTIONS
4959: .long exception_count_miss
496#endif
497
498 ! Either pgd or pte not present
49920: mov.l 1f, k2
500 mov.l 4f, k3 351 mov.l 4f, k3
501 bra handle_exception 352 bra handle_exception
502 mov.l @k2, k2 353 mov.l @k2, k2
@@ -647,15 +498,6 @@ skip_save:
647 bf interrupt_exception 498 bf interrupt_exception
648 shlr2 r8 499 shlr2 r8
649 shlr r8 500 shlr r8
650
651#ifdef COUNT_EXCEPTIONS
652 mov.l 5f, r9
653 add r8, r9
654 mov.l @r9, r10
655 add #1, r10
656 mov.l r10, @r9
657#endif
658
659 mov.l 4f, r9 501 mov.l 4f, r9
660 add r8, r9 502 add r8, r9
661 mov.l @r9, r9 503 mov.l @r9, r9
@@ -669,9 +511,6 @@ skip_save:
6692: .long 0x000080f0 ! FD=1, IMASK=15 5112: .long 0x000080f0 ! FD=1, IMASK=15
6703: .long 0xcfffffff ! RB=0, BL=0 5123: .long 0xcfffffff ! RB=0, BL=0
6714: .long exception_handling_table 5134: .long exception_handling_table
672#ifdef COUNT_EXCEPTIONS
6735: .long exception_count_table
674#endif
675 514
676interrupt_exception: 515interrupt_exception:
677 mov.l 1f, r9 516 mov.l 1f, r9