aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/tlb_nohash_low.S
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:42 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commit2a4aca1144394653269720ffbb5a325a77abd5fa (patch)
tree553bbcbb294ac5923f72430b7317b5c80a27141c /arch/powerpc/mm/tlb_nohash_low.S
parentf048aace29e007f2b642097e2da8231e0e9cce2d (diff)
powerpc/mm: Split low level tlb invalidate for nohash processors
Currently, the various forms of low level TLB invalidations are all implemented in misc_32.S for 32-bit processors, in a fairly scary mess of #ifdef's and with interesting duplication such as a whole bunch of code for FSL _tlbie and _tlbia which are no longer used. This moves things around such that _tlbie is now defined in hash_low_32.S and is only used by the 32-bit hash code, and all nohash CPUs use the various _tlbil_* forms that are now moved to a new file, tlb_nohash_low.S. I moved all the definitions for that stuff out of include/asm/tlbflush.h as they are really internal mm stuff, into mm/mmu_decl.h The code should have no functional changes. I kept some variants inline for trivial forms on things like 40x and 8xx. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/tlb_nohash_low.S')
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S165
1 files changed, 165 insertions, 0 deletions
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
new file mode 100644
index 00000000000..763c59fe007
--- /dev/null
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -0,0 +1,165 @@
1/*
2 * This file contains low-level functions for performing various
3 * types of TLB invalidations on various processors with no hash
4 * table.
5 *
6 * This file implements the following functions for all no-hash
7 * processors. Some aren't implemented for some variants. Some
8 * are inline in tlbflush.h
9 *
10 * - tlbil_va
11 * - tlbil_pid
12 * - tlbil_all
13 * - tlbivax_bcast (not yet)
14 *
15 * Code mostly moved over from misc_32.S
16 *
17 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
18 *
19 * Partially rewritten by Cort Dougan (cort@cs.nmt.edu)
20 * Paul Mackerras, Kumar Gala and Benjamin Herrenschmidt.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 *
27 */
28
29#include <asm/reg.h>
30#include <asm/page.h>
31#include <asm/cputable.h>
32#include <asm/mmu.h>
33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h>
35#include <asm/processor.h>
36
37#if defined(CONFIG_40x)
38
39/*
40 * 40x implementation needs only tlbil_va
41 */
42_GLOBAL(_tlbil_va)
43 /* We run the search with interrupts disabled because we have to change
44 * the PID and I don't want to preempt when that happens.
45 */
46 mfmsr r5
47 mfspr r6,SPRN_PID
48 wrteei 0
49 mtspr SPRN_PID,r4
50 tlbsx. r3, 0, r3
51 mtspr SPRN_PID,r6
52 wrtee r5
53 bne 1f
54 sync
55 /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is
56 * clear. Since 25 is the V bit in the TLB_TAG, loading this value
57 * will invalidate the TLB entry. */
58 tlbwe r3, r3, TLB_TAG
59 isync
601: blr
61
62#elif defined(CONFIG_8xx)
63
64/*
65 * Nothing to do for 8xx, everything is inline
66 */
67
68#elif defined(CONFIG_44x)
69
70/*
71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
72 * of the TLB for everything else.
73 */
74_GLOBAL(_tlbil_va)
75 mfspr r5,SPRN_MMUCR
76 rlwimi r5,r4,0,24,31 /* Set TID */
77
78 /* We have to run the search with interrupts disabled, even critical
79 * and debug interrupts (in fact the only critical exceptions we have
80 * are debug and machine check). Otherwise an interrupt which causes
81 * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
82 mfmsr r4
83 lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
84 addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
85 andc r6,r4,r6
86 mtmsr r6
87 mtspr SPRN_MMUCR,r5
88 tlbsx. r3, 0, r3
89 mtmsr r4
90 bne 1f
91 sync
92 /* There are only 64 TLB entries, so r3 < 64,
93 * which means bit 22, is clear. Since 22 is
94 * the V bit in the TLB_PAGEID, loading this
95 * value will invalidate the TLB entry.
96 */
97 tlbwe r3, r3, PPC44x_TLB_PAGEID
98 isync
991: blr
100
101_GLOBAL(_tlbil_all)
102_GLOBAL(_tlbil_pid)
103 li r3,0
104 sync
105
106 /* Load high watermark */
107 lis r4,tlb_44x_hwater@ha
108 lwz r5,tlb_44x_hwater@l(r4)
109
1101: tlbwe r3,r3,PPC44x_TLB_PAGEID
111 addi r3,r3,1
112 cmpw 0,r3,r5
113 ble 1b
114
115 isync
116 blr
117
118#elif defined(CONFIG_FSL_BOOKE)
119/*
120 * FSL BookE implementations. Currently _pid and _all are the
121 * same. This will change when tlbilx is actually supported and
122 * performs invalidate-by-PID. This change will be driven by
123 * mmu_features conditional
124 */
125
126/*
127 * Flush MMU TLB on the local processor
128 */
129_GLOBAL(_tlbil_pid)
130_GLOBAL(_tlbil_all)
131#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
132 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
133 li r3,(MMUCSR0_TLBFI)@l
134 mtspr SPRN_MMUCSR0, r3
1351:
136 mfspr r3,SPRN_MMUCSR0
137 andi. r3,r3,MMUCSR0_TLBFI@l
138 bne 1b
139 msync
140 isync
141 blr
142
143/*
144 * Flush MMU TLB for a particular address, but only on the local processor
145 * (no broadcast)
146 */
147_GLOBAL(_tlbil_va)
148 mfmsr r10
149 wrteei 0
150 slwi r4,r4,16
151 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
152 tlbsx 0,r3
153 mfspr r4,SPRN_MAS1 /* check valid */
154 andis. r3,r4,MAS1_VALID@h
155 beq 1f
156 rlwinm r4,r4,0,1,31
157 mtspr SPRN_MAS1,r4
158 tlbwe
159 msync
160 isync
1611: wrtee r10
162 blr
163#elif
164#error Unsupported processor type !
165#endif