aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r--arch/mn10300/mm/Kconfig.cache101
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-by-reg.S308
-rw-r--r--arch/mn10300/mm/cache-flush-by-tag.S251
-rw-r--r--arch/mn10300/mm/cache-flush-icache.c155
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-inv-by-reg.S356
-rw-r--r--arch/mn10300/mm/cache-inv-by-tag.S348
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c129
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache-smp-flush.c156
-rw-r--r--arch/mn10300/mm/cache-smp-inv.c153
-rw-r--r--arch/mn10300/mm/cache-smp.c105
-rw-r--r--arch/mn10300/mm/cache-smp.h69
-rw-r--r--arch/mn10300/mm/cache.c95
-rw-r--r--arch/mn10300/mm/fault.c17
-rw-r--r--arch/mn10300/mm/init.c26
-rw-r--r--arch/mn10300/mm/misalignment.c3
-rw-r--r--arch/mn10300/mm/mmu-context.c41
-rw-r--r--arch/mn10300/mm/pgtable.c2
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S59
-rw-r--r--arch/mn10300/mm/tlb-smp.c214
22 files changed, 2447 insertions, 636 deletions
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644
index 000000000000..c4fd923a55a0
--- /dev/null
+++ b/arch/mn10300/mm/Kconfig.cache
@@ -0,0 +1,101 @@
1#
2# MN10300 CPU cache options
3#
4
5choice
6 prompt "CPU Caching mode"
7 default MN10300_CACHE_WBACK
8 help
9 This option determines the caching mode for the kernel.
10
11 Write-Back caching mode involves the all reads and writes causing
12 the affected cacheline to be read into the cache first before being
13 operated upon. Memory is not then updated by a write until the cache
14 is filled and a cacheline needs to be displaced from the cache to
15 make room. Only at that point is it written back.
16
17 Write-Through caching only fetches cachelines from memory on a
18 read. Writes always get written directly to memory. If the affected
19 cacheline is also in cache, it will be updated too.
20
21 The final option is to turn of caching entirely.
22
23config MN10300_CACHE_WBACK
24 bool "Write-Back"
25 help
26 The dcache operates in delayed write-back mode. It must be manually
27 flushed if writes are made that subsequently need to be executed or
28 to be DMA'd by a device.
29
30config MN10300_CACHE_WTHRU
31 bool "Write-Through"
32 help
33 The dcache operates in immediate write-through mode. Writes are
34 committed to RAM immediately in addition to being stored in the
35 cache. This means that the written data is immediately available for
36 execution or DMA.
37
38 This is not available for use with an SMP kernel if cache flushing
39 and invalidation by automatic purge register is not selected.
40
41config MN10300_CACHE_DISABLED
42 bool "Disabled"
43 help
44 The icache and dcache are disabled.
45
46endchoice
47
48config MN10300_CACHE_ENABLED
49 def_bool y if !MN10300_CACHE_DISABLED
50
51
52choice
53 prompt "CPU cache flush/invalidate method"
54 default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
55 default MN10300_CACHE_MANAGE_BY_REG if AM34_2
56 depends on MN10300_CACHE_ENABLED
57 help
58 This determines the method by which CPU cache flushing and
59 invalidation is performed.
60
61config MN10300_CACHE_MANAGE_BY_TAG
62 bool "Use the cache tag registers directly"
63 depends on !(SMP && MN10300_CACHE_WTHRU)
64
65config MN10300_CACHE_MANAGE_BY_REG
66 bool "Flush areas by way of automatic purge registers (AM34 only)"
67 depends on AM34_2
68
69endchoice
70
71config MN10300_CACHE_INV_BY_TAG
72 def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
73
74config MN10300_CACHE_INV_BY_REG
75 def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
76
77config MN10300_CACHE_FLUSH_BY_TAG
78 def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
79
80config MN10300_CACHE_FLUSH_BY_REG
81 def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
82
83
84config MN10300_HAS_CACHE_SNOOP
85 def_bool n
86
87config MN10300_CACHE_SNOOP
88 bool "Use CPU Cache Snooping"
89 depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
90 default y
91
92config MN10300_CACHE_FLUSH_ICACHE
93 def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
94 help
95 Set if we need the dcache flushing before the icache is invalidated.
96
97config MN10300_CACHE_INV_ICACHE
98 def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
99 help
100 Set if we need the icache to be invalidated, even if the dcache is in
101 write-through mode and doesn't need flushing.
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
index 1557277fbc5c..203fee23f7d7 100644
--- a/arch/mn10300/mm/Makefile
+++ b/arch/mn10300/mm/Makefile
@@ -2,11 +2,21 @@
2# Makefile for the MN10300-specific memory management code 2# Makefile for the MN10300-specific memory management code
3# 3#
4 4
5cacheflush-y := cache.o cache-mn10300.o 5cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
6cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o 6
7cacheflush-y := cache.o
8cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
9cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
10cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
11cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
12cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
13cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
14cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
7 15
8cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o 16cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
9 17
10obj-y := \ 18obj-y := \
11 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \ 19 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
12 misalignment.o dma-alloc.o $(cacheflush-y) 20 misalignment.o dma-alloc.o $(cacheflush-y)
21
22obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644
index 000000000000..1dcae0211671
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-reg.S
@@ -0,0 +1,308 @@
1/* MN10300 CPU core caching routines, using indirect regs on cache controller
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/smp.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17#include <asm/irqflags.h>
18
19 .am33_2
20
21#ifndef CONFIG_SMP
22 .globl mn10300_dcache_flush
23 .globl mn10300_dcache_flush_page
24 .globl mn10300_dcache_flush_range
25 .globl mn10300_dcache_flush_range2
26 .globl mn10300_dcache_flush_inv
27 .globl mn10300_dcache_flush_inv_page
28 .globl mn10300_dcache_flush_inv_range
29 .globl mn10300_dcache_flush_inv_range2
30
31mn10300_dcache_flush = mn10300_local_dcache_flush
32mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
33mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
34mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
35mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
36mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
37mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
38mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
39
40#endif /* !CONFIG_SMP */
41
42###############################################################################
43#
44# void mn10300_local_dcache_flush(void)
45# Flush the entire data cache back to RAM
46#
47###############################################################################
48 ALIGN
49 .globl mn10300_local_dcache_flush
50 .type mn10300_local_dcache_flush,@function
51mn10300_local_dcache_flush:
52 movhu (CHCTR),d0
53 btst CHCTR_DCEN,d0
54 beq mn10300_local_dcache_flush_end
55
56 mov DCPGCR,a0
57
58 LOCAL_CLI_SAVE(d1)
59
60 # wait for busy bit of area purge
61 setlb
62 mov (a0),d0
63 btst DCPGCR_DCPGBSY,d0
64 lne
65
66 # set mask
67 clr d0
68 mov d0,(DCPGMR)
69
70 # area purge
71 #
72 # DCPGCR = DCPGCR_DCP
73 #
74 mov DCPGCR_DCP,d0
75 mov d0,(a0)
76
77 # wait for busy bit of area purge
78 setlb
79 mov (a0),d0
80 btst DCPGCR_DCPGBSY,d0
81 lne
82
83 LOCAL_IRQ_RESTORE(d1)
84
85mn10300_local_dcache_flush_end:
86 ret [],0
87 .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
88
89###############################################################################
90#
91# void mn10300_local_dcache_flush_page(unsigned long start)
92# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
93# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
94# Flush a range of addresses on a page in the dcache
95#
96###############################################################################
97 ALIGN
98 .globl mn10300_local_dcache_flush_page
99 .globl mn10300_local_dcache_flush_range
100 .globl mn10300_local_dcache_flush_range2
101 .type mn10300_local_dcache_flush_page,@function
102 .type mn10300_local_dcache_flush_range,@function
103 .type mn10300_local_dcache_flush_range2,@function
104mn10300_local_dcache_flush_page:
105 and ~(PAGE_SIZE-1),d0
106 mov PAGE_SIZE,d1
107mn10300_local_dcache_flush_range2:
108 add d0,d1
109mn10300_local_dcache_flush_range:
110 movm [d2,d3,a2],(sp)
111
112 movhu (CHCTR),d2
113 btst CHCTR_DCEN,d2
114 beq mn10300_local_dcache_flush_range_end
115
116 # calculate alignsize
117 #
118 # alignsize = L1_CACHE_BYTES;
119 # for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1)
120 # alignsize <<= 1;
121 # d2 = alignsize;
122 #
123 mov L1_CACHE_BYTES,d2
124 sub d0,d1,d3
125 add -1,d3
126 lsr L1_CACHE_SHIFT,d3
127 beq 2f
1281:
129 add d2,d2
130 lsr 1,d3
131 bne 1b
1322:
133 mov d1,a1 # a1 = end
134
135 LOCAL_CLI_SAVE(d3)
136 mov DCPGCR,a0
137
138 # wait for busy bit of area purge
139 setlb
140 mov (a0),d1
141 btst DCPGCR_DCPGBSY,d1
142 lne
143
144 # determine the mask
145 mov d2,d1
146 add -1,d1
147 not d1 # d1 = mask = ~(alignsize-1)
148 mov d1,(DCPGMR)
149
150 and d1,d0,a2 # a2 = mask & start
151
152dcpgloop:
153 # area purge
154 mov a2,d0
155 or DCPGCR_DCP,d0
156 mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCP
157
158 # wait for busy bit of area purge
159 setlb
160 mov (a0),d1
161 btst DCPGCR_DCPGBSY,d1
162 lne
163
164 # check purge of end address
165 add d2,a2 # a2 += alignsize
166 cmp a1,a2 # if (a2 < end) goto dcpgloop
167 bns dcpgloop
168
169 LOCAL_IRQ_RESTORE(d3)
170
171mn10300_local_dcache_flush_range_end:
172 ret [d2,d3,a2],12
173
174 .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
175 .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
176 .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
177
178###############################################################################
179#
180# void mn10300_local_dcache_flush_inv(void)
181# Flush the entire data cache and invalidate all entries
182#
183###############################################################################
184 ALIGN
185 .globl mn10300_local_dcache_flush_inv
186 .type mn10300_local_dcache_flush_inv,@function
187mn10300_local_dcache_flush_inv:
188 movhu (CHCTR),d0
189 btst CHCTR_DCEN,d0
190 beq mn10300_local_dcache_flush_inv_end
191
192 mov DCPGCR,a0
193
194 LOCAL_CLI_SAVE(d1)
195
196 # wait for busy bit of area purge & invalidate
197 setlb
198 mov (a0),d0
199 btst DCPGCR_DCPGBSY,d0
200 lne
201
202 # set the mask to cover everything
203 clr d0
204 mov d0,(DCPGMR)
205
206 # area purge & invalidate
207 mov DCPGCR_DCP|DCPGCR_DCI,d0
208 mov d0,(a0)
209
210 # wait for busy bit of area purge & invalidate
211 setlb
212 mov (a0),d0
213 btst DCPGCR_DCPGBSY,d0
214 lne
215
216 LOCAL_IRQ_RESTORE(d1)
217
218mn10300_local_dcache_flush_inv_end:
219 ret [],0
220 .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
221
222###############################################################################
223#
224# void mn10300_local_dcache_flush_inv_page(unsigned long start)
225# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
226# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
227# Flush and invalidate a range of addresses on a page in the dcache
228#
229###############################################################################
230 ALIGN
231 .globl mn10300_local_dcache_flush_inv_page
232 .globl mn10300_local_dcache_flush_inv_range
233 .globl mn10300_local_dcache_flush_inv_range2
234 .type mn10300_local_dcache_flush_inv_page,@function
235 .type mn10300_local_dcache_flush_inv_range,@function
236 .type mn10300_local_dcache_flush_inv_range2,@function
237mn10300_local_dcache_flush_inv_page:
238 and ~(PAGE_SIZE-1),d0
239 mov PAGE_SIZE,d1
240mn10300_local_dcache_flush_inv_range2:
241 add d0,d1
242mn10300_local_dcache_flush_inv_range:
243 movm [d2,d3,a2],(sp)
244
245 movhu (CHCTR),d2
246 btst CHCTR_DCEN,d2
247 beq mn10300_local_dcache_flush_inv_range_end
248
249 # calculate alignsize
250 #
251 # alignsize = L1_CACHE_BYTES;
252 # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
253 # alignsize <<= 1;
254 # d2 = alignsize
255 #
256 mov L1_CACHE_BYTES,d2
257 sub d0,d1,d3
258 add -1,d3
259 lsr L1_CACHE_SHIFT,d3
260 beq 2f
2611:
262 add d2,d2
263 lsr 1,d3
264 bne 1b
2652:
266 mov d1,a1 # a1 = end
267
268 LOCAL_CLI_SAVE(d3)
269 mov DCPGCR,a0
270
271 # wait for busy bit of area purge & invalidate
272 setlb
273 mov (a0),d1
274 btst DCPGCR_DCPGBSY,d1
275 lne
276
277 # set the mask
278 mov d2,d1
279 add -1,d1
280 not d1 # d1 = mask = ~(alignsize-1)
281 mov d1,(DCPGMR)
282
283 and d1,d0,a2 # a2 = mask & start
284
285dcpgivloop:
286 # area purge & invalidate
287 mov a2,d0
288 or DCPGCR_DCP|DCPGCR_DCI,d0
289 mov d0,(a0) # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
290
291 # wait for busy bit of area purge & invalidate
292 setlb
293 mov (a0),d1
294 btst DCPGCR_DCPGBSY,d1
295 lne
296
297 # check purge & invalidate of end address
298 add d2,a2 # a2 += alignsize
299 cmp a1,a2 # if (a2 < end) goto dcpgivloop
300 bns dcpgivloop
301
302 LOCAL_IRQ_RESTORE(d3)
303
304mn10300_local_dcache_flush_inv_range_end:
305 ret [d2,d3,a2],12
306 .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
307 .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
308 .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644
index 000000000000..5cd6a27dd63e
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-by-tag.S
@@ -0,0 +1,251 @@
1/* MN10300 CPU core caching routines, using direct tag flushing
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/smp.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17#include <asm/irqflags.h>
18
19 .am33_2
20
21#ifndef CONFIG_SMP
22 .globl mn10300_dcache_flush
23 .globl mn10300_dcache_flush_page
24 .globl mn10300_dcache_flush_range
25 .globl mn10300_dcache_flush_range2
26 .globl mn10300_dcache_flush_inv
27 .globl mn10300_dcache_flush_inv_page
28 .globl mn10300_dcache_flush_inv_range
29 .globl mn10300_dcache_flush_inv_range2
30
31mn10300_dcache_flush = mn10300_local_dcache_flush
32mn10300_dcache_flush_page = mn10300_local_dcache_flush_page
33mn10300_dcache_flush_range = mn10300_local_dcache_flush_range
34mn10300_dcache_flush_range2 = mn10300_local_dcache_flush_range2
35mn10300_dcache_flush_inv = mn10300_local_dcache_flush_inv
36mn10300_dcache_flush_inv_page = mn10300_local_dcache_flush_inv_page
37mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
38mn10300_dcache_flush_inv_range2 = mn10300_local_dcache_flush_inv_range2
39
40#endif /* !CONFIG_SMP */
41
42###############################################################################
43#
44# void mn10300_local_dcache_flush(void)
45# Flush the entire data cache back to RAM
46#
47###############################################################################
48 ALIGN
49 .globl mn10300_local_dcache_flush
50 .type mn10300_local_dcache_flush,@function
51mn10300_local_dcache_flush:
52 movhu (CHCTR),d0
53 btst CHCTR_DCEN,d0
54 beq mn10300_local_dcache_flush_end
55
56 # read the addresses tagged in the cache's tag RAM and attempt to flush
57 # those addresses specifically
58 # - we rely on the hardware to filter out invalid tag entry addresses
59 mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
60 mov DCACHE_PURGE(0,0),a1 # dcache purge request address
61 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
62
63mn10300_local_dcache_flush_loop:
64 mov (a0),d0
65 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
66 or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
67 # cache
68 mov d0,(a1) # conditional purge
69
70 add L1_CACHE_BYTES,a0
71 add L1_CACHE_BYTES,a1
72 add -1,d1
73 bne mn10300_local_dcache_flush_loop
74
75mn10300_local_dcache_flush_end:
76 ret [],0
77 .size mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
78
79###############################################################################
80#
81# void mn10300_local_dcache_flush_page(unsigned long start)
82# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
83# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
84# Flush a range of addresses on a page in the dcache
85#
86###############################################################################
87 ALIGN
88 .globl mn10300_local_dcache_flush_page
89 .globl mn10300_local_dcache_flush_range
90 .globl mn10300_local_dcache_flush_range2
91 .type mn10300_local_dcache_flush_page,@function
92 .type mn10300_local_dcache_flush_range,@function
93 .type mn10300_local_dcache_flush_range2,@function
94mn10300_local_dcache_flush_page:
95 and ~(PAGE_SIZE-1),d0
96 mov PAGE_SIZE,d1
97mn10300_local_dcache_flush_range2:
98 add d0,d1
99mn10300_local_dcache_flush_range:
100 movm [d2],(sp)
101
102 movhu (CHCTR),d2
103 btst CHCTR_DCEN,d2
104 beq mn10300_local_dcache_flush_range_end
105
106 sub d0,d1,a0
107 cmp MN10300_DCACHE_FLUSH_BORDER,a0
108 ble 1f
109
110 movm (sp),[d2]
111 bra mn10300_local_dcache_flush
1121:
113
114 # round start addr down
115 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
116 mov d0,a1
117
118 add L1_CACHE_BYTES,d1 # round end addr up
119 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
120
121 # write a request to flush all instances of an address from the cache
122 mov DCACHE_PURGE(0,0),a0
123 mov a1,d0
124 and L1_CACHE_TAG_ENTRY,d0
125 add d0,a0 # starting dcache purge control
126 # reg address
127
128 sub a1,d1
129 lsr L1_CACHE_SHIFT,d1 # total number of entries to
130 # examine
131
132 or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
133 # cache
134
135mn10300_local_dcache_flush_range_loop:
136 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
137 # all ways
138
139 add L1_CACHE_BYTES,a0
140 add L1_CACHE_BYTES,a1
141 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
142 add -1,d1
143 bne mn10300_local_dcache_flush_range_loop
144
145mn10300_local_dcache_flush_range_end:
146 ret [d2],4
147
148 .size mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
149 .size mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
150 .size mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
151
152###############################################################################
153#
154# void mn10300_local_dcache_flush_inv(void)
155# Flush the entire data cache and invalidate all entries
156#
157###############################################################################
158 ALIGN
159 .globl mn10300_local_dcache_flush_inv
160 .type mn10300_local_dcache_flush_inv,@function
161mn10300_local_dcache_flush_inv:
162 movhu (CHCTR),d0
163 btst CHCTR_DCEN,d0
164 beq mn10300_local_dcache_flush_inv_end
165
166 mov L1_CACHE_NENTRIES,d1
167 clr a1
168
169mn10300_local_dcache_flush_inv_loop:
170 mov (DCACHE_PURGE_WAY0(0),a1),d0 # unconditional purge
171 mov (DCACHE_PURGE_WAY1(0),a1),d0 # unconditional purge
172 mov (DCACHE_PURGE_WAY2(0),a1),d0 # unconditional purge
173 mov (DCACHE_PURGE_WAY3(0),a1),d0 # unconditional purge
174
175 add L1_CACHE_BYTES,a1
176 add -1,d1
177 bne mn10300_local_dcache_flush_inv_loop
178
179mn10300_local_dcache_flush_inv_end:
180 ret [],0
181 .size mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
182
183###############################################################################
184#
185# void mn10300_local_dcache_flush_inv_page(unsigned long start)
186# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
187# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
188# Flush and invalidate a range of addresses on a page in the dcache
189#
190###############################################################################
191 ALIGN
192 .globl mn10300_local_dcache_flush_inv_page
193 .globl mn10300_local_dcache_flush_inv_range
194 .globl mn10300_local_dcache_flush_inv_range2
195 .type mn10300_local_dcache_flush_inv_page,@function
196 .type mn10300_local_dcache_flush_inv_range,@function
197 .type mn10300_local_dcache_flush_inv_range2,@function
198mn10300_local_dcache_flush_inv_page:
199 and ~(PAGE_SIZE-1),d0
200 mov PAGE_SIZE,d1
201mn10300_local_dcache_flush_inv_range2:
202 add d0,d1
203mn10300_local_dcache_flush_inv_range:
204 movm [d2],(sp)
205
206 movhu (CHCTR),d2
207 btst CHCTR_DCEN,d2
208 beq mn10300_local_dcache_flush_inv_range_end
209
210 sub d0,d1,a0
211 cmp MN10300_DCACHE_FLUSH_INV_BORDER,a0
212 ble 1f
213
214 movm (sp),[d2]
215 bra mn10300_local_dcache_flush_inv
2161:
217
218 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
219 # addr down
220 mov d0,a1
221
222 add L1_CACHE_BYTES,d1 # round end addr up
223 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
224
225 # write a request to flush and invalidate all instances of an address
226 # from the cache
227 mov DCACHE_PURGE(0,0),a0
228 mov a1,d0
229 and L1_CACHE_TAG_ENTRY,d0
230 add d0,a0 # starting dcache purge control
231 # reg address
232
233 sub a1,d1
234 lsr L1_CACHE_SHIFT,d1 # total number of entries to
235 # examine
236
237mn10300_local_dcache_flush_inv_range_loop:
238 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
239 # in all ways
240
241 add L1_CACHE_BYTES,a0
242 add L1_CACHE_BYTES,a1
243 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
244 add -1,d1
245 bne mn10300_local_dcache_flush_inv_range_loop
246
247mn10300_local_dcache_flush_inv_range_end:
248 ret [d2],4
249 .size mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
250 .size mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
251 .size mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644
index 000000000000..fdb1a9db20f0
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-icache.c
@@ -0,0 +1,155 @@
1/* Flush dcache and invalidate icache when the dcache is in writeback mode
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <asm/cacheflush.h>
14#include <asm/smp.h>
15#include "cache-smp.h"
16
17/**
18 * flush_icache_page - Flush a page from the dcache and invalidate the icache
19 * @vma: The VMA the page is part of.
20 * @page: The page to be flushed.
21 *
22 * Write a page back from the dcache and invalidate the icache so that we can
23 * run code from it that we've just written into it
24 */
25void flush_icache_page(struct vm_area_struct *vma, struct page *page)
26{
27 unsigned long start = page_to_phys(page);
28 unsigned long flags;
29
30 flags = smp_lock_cache();
31
32 mn10300_local_dcache_flush_page(start);
33 mn10300_local_icache_inv_page(start);
34
35 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
36 smp_unlock_cache(flags);
37}
38EXPORT_SYMBOL(flush_icache_page);
39
40/**
41 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
42 * single page
43 * @start: The starting virtual address of the page part.
44 * @end: The ending virtual address of the page part.
45 *
46 * Flush the dcache and invalidate the icache for part of a single page, as
47 * determined by the virtual addresses given. The page must be in the paged
48 * area.
49 */
50static void flush_icache_page_range(unsigned long start, unsigned long end)
51{
52 unsigned long addr, size, off;
53 struct page *page;
54 pgd_t *pgd;
55 pud_t *pud;
56 pmd_t *pmd;
57 pte_t *ppte, pte;
58
59 /* work out how much of the page to flush */
60 off = start & ~PAGE_MASK;
61 size = end - start;
62
63 /* get the physical address the page is mapped to from the page
64 * tables */
65 pgd = pgd_offset(current->mm, start);
66 if (!pgd || !pgd_val(*pgd))
67 return;
68
69 pud = pud_offset(pgd, start);
70 if (!pud || !pud_val(*pud))
71 return;
72
73 pmd = pmd_offset(pud, start);
74 if (!pmd || !pmd_val(*pmd))
75 return;
76
77 ppte = pte_offset_map(pmd, start);
78 if (!ppte)
79 return;
80 pte = *ppte;
81 pte_unmap(ppte);
82
83 if (pte_none(pte))
84 return;
85
86 page = pte_page(pte);
87 if (!page)
88 return;
89
90 addr = page_to_phys(page);
91
92 /* flush the dcache and invalidate the icache coverage on that
93 * region */
94 mn10300_local_dcache_flush_range2(addr + off, size);
95 mn10300_local_icache_inv_range2(addr + off, size);
96 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
97}
98
99/**
100 * flush_icache_range - Globally flush dcache and invalidate icache for region
101 * @start: The starting virtual address of the region.
102 * @end: The ending virtual address of the region.
103 *
104 * This is used by the kernel to globally flush some code it has just written
105 * from the dcache back to RAM and then to globally invalidate the icache over
106 * that region so that that code can be run on all CPUs in the system.
107 */
108void flush_icache_range(unsigned long start, unsigned long end)
109{
110 unsigned long start_page, end_page;
111 unsigned long flags;
112
113 flags = smp_lock_cache();
114
115 if (end > 0x80000000UL) {
116 /* addresses above 0xa0000000 do not go through the cache */
117 if (end > 0xa0000000UL) {
118 end = 0xa0000000UL;
119 if (start >= end)
120 goto done;
121 }
122
123 /* kernel addresses between 0x80000000 and 0x9fffffff do not
124 * require page tables, so we just map such addresses
125 * directly */
126 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
127 mn10300_local_dcache_flush_range(start_page, end);
128 mn10300_local_icache_inv_range(start_page, end);
129 smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
130 if (start_page == start)
131 goto done;
132 end = start_page;
133 }
134
135 start_page = start & PAGE_MASK;
136 end_page = (end - 1) & PAGE_MASK;
137
138 if (start_page == end_page) {
139 /* the first and last bytes are on the same page */
140 flush_icache_page_range(start, end);
141 } else if (start_page + 1 == end_page) {
142 /* split over two virtually contiguous pages */
143 flush_icache_page_range(start, end_page);
144 flush_icache_page_range(end_page, end);
145 } else {
146 /* more than 2 pages; just flush the entire cache */
147 mn10300_dcache_flush();
148 mn10300_icache_inv();
149 smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
150 }
151
152done:
153 smp_unlock_cache(flags);
154}
155EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644
index c8ed1cbac107..000000000000
--- a/arch/mn10300/mm/cache-flush-mn10300.S
+++ /dev/null
@@ -1,192 +0,0 @@
1/* MN10300 CPU core caching routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/smp.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17
18 .am33_2
19 .globl mn10300_dcache_flush
20 .globl mn10300_dcache_flush_page
21 .globl mn10300_dcache_flush_range
22 .globl mn10300_dcache_flush_range2
23 .globl mn10300_dcache_flush_inv
24 .globl mn10300_dcache_flush_inv_page
25 .globl mn10300_dcache_flush_inv_range
26 .globl mn10300_dcache_flush_inv_range2
27
28###############################################################################
29#
30# void mn10300_dcache_flush(void)
31# Flush the entire data cache back to RAM
32#
33###############################################################################
34 ALIGN
35mn10300_dcache_flush:
36 movhu (CHCTR),d0
37 btst CHCTR_DCEN,d0
38 beq mn10300_dcache_flush_end
39
40 # read the addresses tagged in the cache's tag RAM and attempt to flush
41 # those addresses specifically
42 # - we rely on the hardware to filter out invalid tag entry addresses
43 mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
44 mov DCACHE_PURGE(0,0),a1 # dcache purge request address
45 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
46
47mn10300_dcache_flush_loop:
48 mov (a0),d0
49 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
50 or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
51 # cache
52 mov d0,(a1) # conditional purge
53
54mn10300_dcache_flush_skip:
55 add L1_CACHE_BYTES,a0
56 add L1_CACHE_BYTES,a1
57 add -1,d1
58 bne mn10300_dcache_flush_loop
59
60mn10300_dcache_flush_end:
61 ret [],0
62
63###############################################################################
64#
65# void mn10300_dcache_flush_page(unsigned start)
66# void mn10300_dcache_flush_range(unsigned start, unsigned end)
67# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
68# Flush a range of addresses on a page in the dcache
69#
70###############################################################################
71 ALIGN
72mn10300_dcache_flush_page:
73 mov PAGE_SIZE,d1
74mn10300_dcache_flush_range2:
75 add d0,d1
76mn10300_dcache_flush_range:
77 movm [d2,d3],(sp)
78
79 movhu (CHCTR),d2
80 btst CHCTR_DCEN,d2
81 beq mn10300_dcache_flush_range_end
82
83 # round start addr down
84 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
85 mov d0,a1
86
87 add L1_CACHE_BYTES,d1 # round end addr up
88 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
89
90 # write a request to flush all instances of an address from the cache
91 mov DCACHE_PURGE(0,0),a0
92 mov a1,d0
93 and L1_CACHE_TAG_ENTRY,d0
94 add d0,a0 # starting dcache purge control
95 # reg address
96
97 sub a1,d1
98 lsr L1_CACHE_SHIFT,d1 # total number of entries to
99 # examine
100
101 or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
102 # cache
103
104mn10300_dcache_flush_range_loop:
105 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
106 # all ways
107
108 add L1_CACHE_BYTES,a0
109 add L1_CACHE_BYTES,a1
110 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
111 add -1,d1
112 bne mn10300_dcache_flush_range_loop
113
114mn10300_dcache_flush_range_end:
115 ret [d2,d3],8
116
117###############################################################################
118#
119# void mn10300_dcache_flush_inv(void)
120# Flush the entire data cache and invalidate all entries
121#
122###############################################################################
123 ALIGN
124mn10300_dcache_flush_inv:
125 movhu (CHCTR),d0
126 btst CHCTR_DCEN,d0
127 beq mn10300_dcache_flush_inv_end
128
129 # hit each line in the dcache with an unconditional purge
130 mov DCACHE_PURGE(0,0),a1 # dcache purge request address
131 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
132
133mn10300_dcache_flush_inv_loop:
134 mov (a1),d0 # unconditional purge
135
136 add L1_CACHE_BYTES,a1
137 add -1,d1
138 bne mn10300_dcache_flush_inv_loop
139
140mn10300_dcache_flush_inv_end:
141 ret [],0
142
143###############################################################################
144#
145# void mn10300_dcache_flush_inv_page(unsigned start)
146# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
147# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
148# Flush and invalidate a range of addresses on a page in the dcache
149#
150###############################################################################
151 ALIGN
152mn10300_dcache_flush_inv_page:
153 mov PAGE_SIZE,d1
154mn10300_dcache_flush_inv_range2:
155 add d0,d1
156mn10300_dcache_flush_inv_range:
157 movm [d2,d3],(sp)
158 movhu (CHCTR),d2
159 btst CHCTR_DCEN,d2
160 beq mn10300_dcache_flush_inv_range_end
161
162 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
163 # addr down
164 mov d0,a1
165
166 add L1_CACHE_BYTES,d1 # round end addr up
167 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
168
169 # write a request to flush and invalidate all instances of an address
170 # from the cache
171 mov DCACHE_PURGE(0,0),a0
172 mov a1,d0
173 and L1_CACHE_TAG_ENTRY,d0
174 add d0,a0 # starting dcache purge control
175 # reg address
176
177 sub a1,d1
178 lsr L1_CACHE_SHIFT,d1 # total number of entries to
179 # examine
180
181mn10300_dcache_flush_inv_range_loop:
182 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
183 # in all ways
184
185 add L1_CACHE_BYTES,a0
186 add L1_CACHE_BYTES,a1
187 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
188 add -1,d1
189 bne mn10300_dcache_flush_inv_range_loop
190
191mn10300_dcache_flush_inv_range_end:
192 ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644
index 000000000000..c8950861ed77
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-reg.S
@@ -0,0 +1,356 @@
1/* MN10300 CPU cache invalidation routines, using automatic purge registers
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sys.h>
12#include <linux/linkage.h>
13#include <asm/smp.h>
14#include <asm/page.h>
15#include <asm/cache.h>
16#include <asm/irqflags.h>
17#include <asm/cacheflush.h>
18
19#define mn10300_local_dcache_inv_range_intr_interval \
20 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
21
22#if mn10300_local_dcache_inv_range_intr_interval > 0xff
23#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
24#endif
25
26 .am33_2
27
28#ifndef CONFIG_SMP
29 .globl mn10300_icache_inv
30 .globl mn10300_icache_inv_page
31 .globl mn10300_icache_inv_range
32 .globl mn10300_icache_inv_range2
33 .globl mn10300_dcache_inv
34 .globl mn10300_dcache_inv_page
35 .globl mn10300_dcache_inv_range
36 .globl mn10300_dcache_inv_range2
37
38mn10300_icache_inv = mn10300_local_icache_inv
39mn10300_icache_inv_page = mn10300_local_icache_inv_page
40mn10300_icache_inv_range = mn10300_local_icache_inv_range
41mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
42mn10300_dcache_inv = mn10300_local_dcache_inv
43mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
44mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
45mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
46
47#endif /* !CONFIG_SMP */
48
49###############################################################################
50#
51# void mn10300_local_icache_inv(void)
52# Invalidate the entire icache
53#
54###############################################################################
55 ALIGN
56 .globl mn10300_local_icache_inv
57 .type mn10300_local_icache_inv,@function
58mn10300_local_icache_inv:
59 mov CHCTR,a0
60
61 movhu (a0),d0
62 btst CHCTR_ICEN,d0
63 beq mn10300_local_icache_inv_end
64
65 # invalidate
66 or CHCTR_ICINV,d0
67 movhu d0,(a0)
68 movhu (a0),d0
69
70mn10300_local_icache_inv_end:
71 ret [],0
72 .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
73
74###############################################################################
75#
76# void mn10300_local_dcache_inv(void)
77# Invalidate the entire dcache
78#
79###############################################################################
80 ALIGN
81 .globl mn10300_local_dcache_inv
82 .type mn10300_local_dcache_inv,@function
83mn10300_local_dcache_inv:
84 mov CHCTR,a0
85
86 movhu (a0),d0
87 btst CHCTR_DCEN,d0
88 beq mn10300_local_dcache_inv_end
89
90 # invalidate
91 or CHCTR_DCINV,d0
92 movhu d0,(a0)
93 movhu (a0),d0
94
95mn10300_local_dcache_inv_end:
96 ret [],0
97 .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
98
99###############################################################################
100#
101# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
102# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
103# void mn10300_local_dcache_inv_page(unsigned long start)
104# Invalidate a range of addresses on a page in the dcache
105#
106###############################################################################
107 ALIGN
108 .globl mn10300_local_dcache_inv_page
109 .globl mn10300_local_dcache_inv_range
110 .globl mn10300_local_dcache_inv_range2
111 .type mn10300_local_dcache_inv_page,@function
112 .type mn10300_local_dcache_inv_range,@function
113 .type mn10300_local_dcache_inv_range2,@function
114mn10300_local_dcache_inv_page:
115 and ~(PAGE_SIZE-1),d0
116 mov PAGE_SIZE,d1
117mn10300_local_dcache_inv_range2:
118 add d0,d1
119mn10300_local_dcache_inv_range:
120 # If we are in writeback mode we check the start and end alignments,
121 # and if they're not cacheline-aligned, we must flush any bits outside
122 # the range that share cachelines with stuff inside the range
123#ifdef CONFIG_MN10300_CACHE_WBACK
124 btst ~(L1_CACHE_BYTES-1),d0
125 bne 1f
126 btst ~(L1_CACHE_BYTES-1),d1
127 beq 2f
1281:
129 bra mn10300_local_dcache_flush_inv_range
1302:
131#endif /* CONFIG_MN10300_CACHE_WBACK */
132
133 movm [d2,d3,a2],(sp)
134
135 mov CHCTR,a0
136 movhu (a0),d2
137 btst CHCTR_DCEN,d2
138 beq mn10300_local_dcache_inv_range_end
139
140 # round the addresses out to be full cachelines, unless we're in
141 # writeback mode, in which case we would be in flush and invalidate by
142 # now
143#ifndef CONFIG_MN10300_CACHE_WBACK
144 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
145 # addr down
146
147 mov L1_CACHE_BYTES-1,d2
148 add d2,d1
149 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1 # round end addr up
150#endif /* !CONFIG_MN10300_CACHE_WBACK */
151
152 sub d0,d1,d2 # calculate the total size
153 mov d0,a2 # A2 = start address
154 mov d1,a1 # A1 = end address
155
156 LOCAL_CLI_SAVE(d3)
157
158 mov DCPGCR,a0 # make sure the purger isn't busy
159 setlb
160 mov (a0),d0
161 btst DCPGCR_DCPGBSY,d0
162 lne
163
164 # skip initial address alignment calculation if address is zero
165 mov d2,d1
166 cmp 0,a2
167 beq 1f
168
169dcivloop:
170 /* calculate alignsize
171 *
172 * alignsize = L1_CACHE_BYTES;
173 * while (! start & alignsize) {
174 * alignsize <<=1;
175 * }
176 * d1 = alignsize;
177 */
178 mov L1_CACHE_BYTES,d1
179 lsr 1,d1
180 setlb
181 add d1,d1
182 mov d1,d0
183 and a2,d0
184 leq
185
1861:
187 /* calculate invsize
188 *
189 * if (totalsize > alignsize) {
190 * invsize = alignsize;
191 * } else {
192 * invsize = totalsize;
193 * tmp = 0x80000000;
194 * while (! invsize & tmp) {
195 * tmp >>= 1;
196 * }
197 * invsize = tmp;
198 * }
199 * d1 = invsize
200 */
201 cmp d2,d1
202 bns 2f
203 mov d2,d1
204
205 mov 0x80000000,d0 # start from 31bit=1
206 setlb
207 lsr 1,d0
208 mov d0,e0
209 and d1,e0
210 leq
211 mov d0,d1
212
2132:
214 /* set mask
215 *
216 * mask = ~(invsize-1);
217 * DCPGMR = mask;
218 */
219 mov d1,d0
220 add -1,d0
221 not d0
222 mov d0,(DCPGMR)
223
224 # invalidate area
225 mov a2,d0
226 or DCPGCR_DCI,d0
227 mov d0,(a0) # DCPGCR = (mask & start) | DCPGCR_DCI
228
229 setlb # wait for the purge to complete
230 mov (a0),d0
231 btst DCPGCR_DCPGBSY,d0
232 lne
233
234 sub d1,d2 # decrease size remaining
235 add d1,a2 # increase next start address
236
237 /* check invalidating of end address
238 *
239 * a2 = a2 + invsize
240 * if (a2 < end) {
241 * goto dcivloop;
242 * } */
243 cmp a1,a2
244 bns dcivloop
245
246 LOCAL_IRQ_RESTORE(d3)
247
248mn10300_local_dcache_inv_range_end:
249 ret [d2,d3,a2],12
250 .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
251 .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
252 .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
253
254###############################################################################
255#
256# void mn10300_local_icache_inv_page(unsigned long start)
257# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
258# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
259# Invalidate a range of addresses on a page in the icache
260#
261###############################################################################
262 ALIGN
263 .globl mn10300_local_icache_inv_page
264 .globl mn10300_local_icache_inv_range
265 .globl mn10300_local_icache_inv_range2
266 .type mn10300_local_icache_inv_page,@function
267 .type mn10300_local_icache_inv_range,@function
268 .type mn10300_local_icache_inv_range2,@function
269mn10300_local_icache_inv_page:
270 and ~(PAGE_SIZE-1),d0
271 mov PAGE_SIZE,d1
272mn10300_local_icache_inv_range2:
273 add d0,d1
274mn10300_local_icache_inv_range:
275 movm [d2,d3,a2],(sp)
276
277 mov CHCTR,a0
278 movhu (a0),d2
279 btst CHCTR_ICEN,d2
280 beq mn10300_local_icache_inv_range_reg_end
281
282 /* calculate alignsize
283 *
284 * alignsize = L1_CACHE_BYTES;
285 * for (i = (end - start - 1) / L1_CACHE_BYTES ; i > 0; i >>= 1) {
286 * alignsize <<= 1;
287 * }
288 * d2 = alignsize;
289 */
290 mov L1_CACHE_BYTES,d2
291 sub d0,d1,d3
292 add -1,d3
293 lsr L1_CACHE_SHIFT,d3
294 beq 2f
2951:
296 add d2,d2
297 lsr 1,d3
298 bne 1b
2992:
300
301 /* a1 = end */
302 mov d1,a1
303
304 LOCAL_CLI_SAVE(d3)
305
306 mov ICIVCR,a0
307 /* wait for busy bit of area invalidation */
308 setlb
309 mov (a0),d1
310 btst ICIVCR_ICIVBSY,d1
311 lne
312
313 /* set mask
314 *
315 * mask = ~(alignsize-1);
316 * ICIVMR = mask;
317 */
318 mov d2,d1
319 add -1,d1
320 not d1
321 mov d1,(ICIVMR)
322 /* a2 = mask & start */
323 and d1,d0,a2
324
325icivloop:
326 /* area invalidate
327 *
328 * ICIVCR = (mask & start) | ICIVCR_ICI
329 */
330 mov a2,d0
331 or ICIVCR_ICI,d0
332 mov d0,(a0)
333
334 /* wait for busy bit of area invalidation */
335 setlb
336 mov (a0),d1
337 btst ICIVCR_ICIVBSY,d1
338 lne
339
340 /* check invalidating of end address
341 *
342 * a2 = a2 + alignsize
343 * if (a2 < end) {
344 * goto icivloop;
345 * } */
346 add d2,a2
347 cmp a1,a2
348 bns icivloop
349
350 LOCAL_IRQ_RESTORE(d3)
351
352mn10300_local_icache_inv_range_reg_end:
353 ret [d2,d3,a2],12
354 .size mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
355 .size mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
356 .size mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644
index 000000000000..e9713b40c0ff
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-by-tag.S
@@ -0,0 +1,348 @@
1/* MN10300 CPU core caching routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sys.h>
12#include <linux/linkage.h>
13#include <asm/smp.h>
14#include <asm/page.h>
15#include <asm/cache.h>
16#include <asm/irqflags.h>
17#include <asm/cacheflush.h>
18
19#define mn10300_local_dcache_inv_range_intr_interval \
20 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
21
22#if mn10300_local_dcache_inv_range_intr_interval > 0xff
23#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
24#endif
25
26 .am33_2
27
28 .globl mn10300_local_icache_inv_page
29 .globl mn10300_local_icache_inv_range
30 .globl mn10300_local_icache_inv_range2
31
32mn10300_local_icache_inv_page = mn10300_local_icache_inv
33mn10300_local_icache_inv_range = mn10300_local_icache_inv
34mn10300_local_icache_inv_range2 = mn10300_local_icache_inv
35
36#ifndef CONFIG_SMP
37 .globl mn10300_icache_inv
38 .globl mn10300_icache_inv_page
39 .globl mn10300_icache_inv_range
40 .globl mn10300_icache_inv_range2
41 .globl mn10300_dcache_inv
42 .globl mn10300_dcache_inv_page
43 .globl mn10300_dcache_inv_range
44 .globl mn10300_dcache_inv_range2
45
46mn10300_icache_inv = mn10300_local_icache_inv
47mn10300_icache_inv_page = mn10300_local_icache_inv_page
48mn10300_icache_inv_range = mn10300_local_icache_inv_range
49mn10300_icache_inv_range2 = mn10300_local_icache_inv_range2
50mn10300_dcache_inv = mn10300_local_dcache_inv
51mn10300_dcache_inv_page = mn10300_local_dcache_inv_page
52mn10300_dcache_inv_range = mn10300_local_dcache_inv_range
53mn10300_dcache_inv_range2 = mn10300_local_dcache_inv_range2
54
55#endif /* !CONFIG_SMP */
56
57###############################################################################
58#
59# void mn10300_local_icache_inv(void)
60# Invalidate the entire icache
61#
62###############################################################################
63 ALIGN
64 .globl mn10300_local_icache_inv
65 .type mn10300_local_icache_inv,@function
66mn10300_local_icache_inv:
67 mov CHCTR,a0
68
69 movhu (a0),d0
70 btst CHCTR_ICEN,d0
71 beq mn10300_local_icache_inv_end
72
73#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
74 LOCAL_CLI_SAVE(d1)
75
76 # disable the icache
77 and ~CHCTR_ICEN,d0
78 movhu d0,(a0)
79
80 # and wait for it to calm down
81 setlb
82 movhu (a0),d0
83 btst CHCTR_ICBUSY,d0
84 lne
85
86 # invalidate
87 or CHCTR_ICINV,d0
88 movhu d0,(a0)
89
90 # wait for the cache to finish
91 mov CHCTR,a0
92 setlb
93 movhu (a0),d0
94 btst CHCTR_ICBUSY,d0
95 lne
96
97 # and reenable it
98 and ~CHCTR_ICINV,d0
99 or CHCTR_ICEN,d0
100 movhu d0,(a0)
101 movhu (a0),d0
102
103 LOCAL_IRQ_RESTORE(d1)
104#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
105 # invalidate
106 or CHCTR_ICINV,d0
107 movhu d0,(a0)
108 movhu (a0),d0
109#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
110
111mn10300_local_icache_inv_end:
112 ret [],0
113 .size mn10300_local_icache_inv,.-mn10300_local_icache_inv
114
115###############################################################################
116#
117# void mn10300_local_dcache_inv(void)
118# Invalidate the entire dcache
119#
120###############################################################################
121 ALIGN
122 .globl mn10300_local_dcache_inv
123 .type mn10300_local_dcache_inv,@function
124mn10300_local_dcache_inv:
125 mov CHCTR,a0
126
127 movhu (a0),d0
128 btst CHCTR_DCEN,d0
129 beq mn10300_local_dcache_inv_end
130
131#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
132 LOCAL_CLI_SAVE(d1)
133
134 # disable the dcache
135 and ~CHCTR_DCEN,d0
136 movhu d0,(a0)
137
138 # and wait for it to calm down
139 setlb
140 movhu (a0),d0
141 btst CHCTR_DCBUSY,d0
142 lne
143
144 # invalidate
145 or CHCTR_DCINV,d0
146 movhu d0,(a0)
147
148 # wait for the cache to finish
149 mov CHCTR,a0
150 setlb
151 movhu (a0),d0
152 btst CHCTR_DCBUSY,d0
153 lne
154
155 # and reenable it
156 and ~CHCTR_DCINV,d0
157 or CHCTR_DCEN,d0
158 movhu d0,(a0)
159 movhu (a0),d0
160
161 LOCAL_IRQ_RESTORE(d1)
162#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
163 # invalidate
164 or CHCTR_DCINV,d0
165 movhu d0,(a0)
166 movhu (a0),d0
167#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
168
169mn10300_local_dcache_inv_end:
170 ret [],0
171 .size mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
172
173###############################################################################
174#
175# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
176# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
177# void mn10300_local_dcache_inv_page(unsigned long start)
178# Invalidate a range of addresses on a page in the dcache
179#
180###############################################################################
181 ALIGN
182 .globl mn10300_local_dcache_inv_page
183 .globl mn10300_local_dcache_inv_range
184 .globl mn10300_local_dcache_inv_range2
185 .type mn10300_local_dcache_inv_page,@function
186 .type mn10300_local_dcache_inv_range,@function
187 .type mn10300_local_dcache_inv_range2,@function
188mn10300_local_dcache_inv_page:
189 and ~(PAGE_SIZE-1),d0
190 mov PAGE_SIZE,d1
191mn10300_local_dcache_inv_range2:
192 add d0,d1
193mn10300_local_dcache_inv_range:
194 # If we are in writeback mode we check the start and end alignments,
195 # and if they're not cacheline-aligned, we must flush any bits outside
196 # the range that share cachelines with stuff inside the range
197#ifdef CONFIG_MN10300_CACHE_WBACK
198 btst ~(L1_CACHE_BYTES-1),d0
199 bne 1f
200 btst ~(L1_CACHE_BYTES-1),d1
201 beq 2f
2021:
203 bra mn10300_local_dcache_flush_inv_range
2042:
205#endif /* CONFIG_MN10300_CACHE_WBACK */
206
207 movm [d2,d3,a2],(sp)
208
209 mov CHCTR,a2
210 movhu (a2),d2
211 btst CHCTR_DCEN,d2
212 beq mn10300_local_dcache_inv_range_end
213
214#ifndef CONFIG_MN10300_CACHE_WBACK
215 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
216 # addr down
217
218 add L1_CACHE_BYTES,d1 # round end addr up
219 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
220#endif /* !CONFIG_MN10300_CACHE_WBACK */
221 mov d0,a1
222
223 clr d2 # we're going to clear tag RAM
224 # entries
225
226 # read the tags from the tag RAM, and if they indicate a valid dirty
227 # cache line then invalidate that line
228 mov DCACHE_TAG(0,0),a0
229 mov a1,d0
230 and L1_CACHE_TAG_ENTRY,d0
231 add d0,a0 # starting dcache tag RAM
232 # access address
233
234 sub a1,d1
235 lsr L1_CACHE_SHIFT,d1 # total number of entries to
236 # examine
237
238 and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
239
240mn10300_local_dcache_inv_range_outer_loop:
241 LOCAL_CLI_SAVE(d3)
242
243 # disable the dcache
244 movhu (a2),d0
245 and ~CHCTR_DCEN,d0
246 movhu d0,(a2)
247
248 # and wait for it to calm down
249 setlb
250 movhu (a2),d0
251 btst CHCTR_DCBUSY,d0
252 lne
253
254mn10300_local_dcache_inv_range_loop:
255
256 # process the way 0 slot
257 mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
258 btst L1_CACHE_TAG_VALID,d0
259 beq mn10300_local_dcache_inv_range_skip_0 # jump if this cacheline
260 # is not valid
261
262 xor a1,d0
263 lsr 12,d0
264 bne mn10300_local_dcache_inv_range_skip_0 # jump if not this cacheline
265
266 mov d2,(L1_CACHE_WAYDISP*0,a0) # kill the tag
267
268mn10300_local_dcache_inv_range_skip_0:
269
270 # process the way 1 slot
271 mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
272 btst L1_CACHE_TAG_VALID,d0
273 beq mn10300_local_dcache_inv_range_skip_1 # jump if this cacheline
274 # is not valid
275
276 xor a1,d0
277 lsr 12,d0
278 bne mn10300_local_dcache_inv_range_skip_1 # jump if not this cacheline
279
280 mov d2,(L1_CACHE_WAYDISP*1,a0) # kill the tag
281
282mn10300_local_dcache_inv_range_skip_1:
283
284 # process the way 2 slot
285 mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
286 btst L1_CACHE_TAG_VALID,d0
287 beq mn10300_local_dcache_inv_range_skip_2 # jump if this cacheline
288 # is not valid
289
290 xor a1,d0
291 lsr 12,d0
292 bne mn10300_local_dcache_inv_range_skip_2 # jump if not this cacheline
293
294 mov d2,(L1_CACHE_WAYDISP*2,a0) # kill the tag
295
296mn10300_local_dcache_inv_range_skip_2:
297
298 # process the way 3 slot
299 mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
300 btst L1_CACHE_TAG_VALID,d0
301 beq mn10300_local_dcache_inv_range_skip_3 # jump if this cacheline
302 # is not valid
303
304 xor a1,d0
305 lsr 12,d0
306 bne mn10300_local_dcache_inv_range_skip_3 # jump if not this cacheline
307
308 mov d2,(L1_CACHE_WAYDISP*3,a0) # kill the tag
309
310mn10300_local_dcache_inv_range_skip_3:
311
312 # approx every N steps we re-enable the cache and see if there are any
313 # interrupts to be processed
314 # we also break out if we've reached the end of the loop
315 # (the bottom nibble of the count is zero in both cases)
316 add L1_CACHE_BYTES,a0
317 add L1_CACHE_BYTES,a1
318 and ~L1_CACHE_WAYDISP,a0
319 add -1,d1
320 btst mn10300_local_dcache_inv_range_intr_interval,d1
321 bne mn10300_local_dcache_inv_range_loop
322
323 # wait for the cache to finish what it's doing
324 setlb
325 movhu (a2),d0
326 btst CHCTR_DCBUSY,d0
327 lne
328
329 # and reenable it
330 or CHCTR_DCEN,d0
331 movhu d0,(a2)
332 movhu (a2),d0
333
334 # re-enable interrupts
335 # - we don't bother with delay NOPs as we'll have enough instructions
336 # before we disable interrupts again to give the interrupts a chance
337 # to happen
338 LOCAL_IRQ_RESTORE(d3)
339
340 # go around again if the counter hasn't yet reached zero
341 add 0,d1
342 bne mn10300_local_dcache_inv_range_outer_loop
343
344mn10300_local_dcache_inv_range_end:
345 ret [d2,d3,a2],12
346 .size mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
347 .size mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
348 .size mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644
index 000000000000..a8933a60b2d4
--- /dev/null
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -0,0 +1,129 @@
1/* Invalidate icache when dcache doesn't need invalidation as it's in
2 * write-through mode
3 *
4 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <asm/cacheflush.h>
15#include <asm/smp.h>
16#include "cache-smp.h"
17
18/**
19 * flush_icache_page_range - Flush dcache and invalidate icache for part of a
20 * single page
21 * @start: The starting virtual address of the page part.
22 * @end: The ending virtual address of the page part.
23 *
24 * Invalidate the icache for part of a single page, as determined by the
25 * virtual addresses given. The page must be in the paged area. The dcache is
26 * not flushed as the cache must be in write-through mode to get here.
27 */
28static void flush_icache_page_range(unsigned long start, unsigned long end)
29{
30 unsigned long addr, size, off;
31 struct page *page;
32 pgd_t *pgd;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *ppte, pte;
36
37 /* work out how much of the page to flush */
38 off = start & ~PAGE_MASK;
39 size = end - start;
40
41 /* get the physical address the page is mapped to from the page
42 * tables */
43 pgd = pgd_offset(current->mm, start);
44 if (!pgd || !pgd_val(*pgd))
45 return;
46
47 pud = pud_offset(pgd, start);
48 if (!pud || !pud_val(*pud))
49 return;
50
51 pmd = pmd_offset(pud, start);
52 if (!pmd || !pmd_val(*pmd))
53 return;
54
55 ppte = pte_offset_map(pmd, start);
56 if (!ppte)
57 return;
58 pte = *ppte;
59 pte_unmap(ppte);
60
61 if (pte_none(pte))
62 return;
63
64 page = pte_page(pte);
65 if (!page)
66 return;
67
68 addr = page_to_phys(page);
69
70 /* invalidate the icache coverage on that region */
71 mn10300_local_icache_inv_range2(addr + off, size);
72 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
73}
74
75/**
76 * flush_icache_range - Globally flush dcache and invalidate icache for region
77 * @start: The starting virtual address of the region.
78 * @end: The ending virtual address of the region.
79 *
80 * This is used by the kernel to globally flush some code it has just written
81 * from the dcache back to RAM and then to globally invalidate the icache over
82 * that region so that that code can be run on all CPUs in the system.
83 */
84void flush_icache_range(unsigned long start, unsigned long end)
85{
86 unsigned long start_page, end_page;
87 unsigned long flags;
88
89 flags = smp_lock_cache();
90
91 if (end > 0x80000000UL) {
92 /* addresses above 0xa0000000 do not go through the cache */
93 if (end > 0xa0000000UL) {
94 end = 0xa0000000UL;
95 if (start >= end)
96 goto done;
97 }
98
99 /* kernel addresses between 0x80000000 and 0x9fffffff do not
100 * require page tables, so we just map such addresses
101 * directly */
102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
103 mn10300_icache_inv_range(start_page, end);
104 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
105 if (start_page == start)
106 goto done;
107 end = start_page;
108 }
109
110 start_page = start & PAGE_MASK;
111 end_page = (end - 1) & PAGE_MASK;
112
113 if (start_page == end_page) {
114 /* the first and last bytes are on the same page */
115 flush_icache_page_range(start, end);
116 } else if (start_page + 1 == end_page) {
117 /* split over two virtually contiguous pages */
118 flush_icache_page_range(start, end_page);
119 flush_icache_page_range(end_page, end);
120 } else {
121 /* more than 2 pages; just flush the entire cache */
122 mn10300_local_icache_inv();
123 smp_cache_call(SMP_ICACHE_INV, 0, 0);
124 }
125
126done:
127 smp_unlock_cache(flags);
128}
129EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644
index e839d0aedd69..000000000000
--- a/arch/mn10300/mm/cache-mn10300.S
+++ /dev/null
@@ -1,289 +0,0 @@
1/* MN10300 CPU core caching routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sys.h>
12#include <linux/linkage.h>
13#include <asm/smp.h>
14#include <asm/page.h>
15#include <asm/cache.h>
16
17#define mn10300_dcache_inv_range_intr_interval \
18 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
19
20#if mn10300_dcache_inv_range_intr_interval > 0xff
21#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
22#endif
23
24 .am33_2
25
26 .globl mn10300_icache_inv
27 .globl mn10300_dcache_inv
28 .globl mn10300_dcache_inv_range
29 .globl mn10300_dcache_inv_range2
30 .globl mn10300_dcache_inv_page
31
32###############################################################################
33#
34# void mn10300_icache_inv(void)
35# Invalidate the entire icache
36#
37###############################################################################
38 ALIGN
39mn10300_icache_inv:
40 mov CHCTR,a0
41
42 movhu (a0),d0
43 btst CHCTR_ICEN,d0
44 beq mn10300_icache_inv_end
45
46 mov epsw,d1
47 and ~EPSW_IE,epsw
48 nop
49 nop
50
51 # disable the icache
52 and ~CHCTR_ICEN,d0
53 movhu d0,(a0)
54
55 # and wait for it to calm down
56 setlb
57 movhu (a0),d0
58 btst CHCTR_ICBUSY,d0
59 lne
60
61 # invalidate
62 or CHCTR_ICINV,d0
63 movhu d0,(a0)
64
65 # wait for the cache to finish
66 mov CHCTR,a0
67 setlb
68 movhu (a0),d0
69 btst CHCTR_ICBUSY,d0
70 lne
71
72 # and reenable it
73 and ~CHCTR_ICINV,d0
74 or CHCTR_ICEN,d0
75 movhu d0,(a0)
76 movhu (a0),d0
77
78 mov d1,epsw
79
80mn10300_icache_inv_end:
81 ret [],0
82
83###############################################################################
84#
85# void mn10300_dcache_inv(void)
86# Invalidate the entire dcache
87#
88###############################################################################
89 ALIGN
90mn10300_dcache_inv:
91 mov CHCTR,a0
92
93 movhu (a0),d0
94 btst CHCTR_DCEN,d0
95 beq mn10300_dcache_inv_end
96
97 mov epsw,d1
98 and ~EPSW_IE,epsw
99 nop
100 nop
101
102 # disable the dcache
103 and ~CHCTR_DCEN,d0
104 movhu d0,(a0)
105
106 # and wait for it to calm down
107 setlb
108 movhu (a0),d0
109 btst CHCTR_DCBUSY,d0
110 lne
111
112 # invalidate
113 or CHCTR_DCINV,d0
114 movhu d0,(a0)
115
116 # wait for the cache to finish
117 mov CHCTR,a0
118 setlb
119 movhu (a0),d0
120 btst CHCTR_DCBUSY,d0
121 lne
122
123 # and reenable it
124 and ~CHCTR_DCINV,d0
125 or CHCTR_DCEN,d0
126 movhu d0,(a0)
127 movhu (a0),d0
128
129 mov d1,epsw
130
131mn10300_dcache_inv_end:
132 ret [],0
133
134###############################################################################
135#
136# void mn10300_dcache_inv_range(unsigned start, unsigned end)
137# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
138# void mn10300_dcache_inv_page(unsigned start)
139# Invalidate a range of addresses on a page in the dcache
140#
141###############################################################################
142 ALIGN
143mn10300_dcache_inv_page:
144 mov PAGE_SIZE,d1
145mn10300_dcache_inv_range2:
146 add d0,d1
147mn10300_dcache_inv_range:
148 movm [d2,d3,a2],(sp)
149 mov CHCTR,a2
150
151 movhu (a2),d2
152 btst CHCTR_DCEN,d2
153 beq mn10300_dcache_inv_range_end
154
155 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
156 # addr down
157 mov d0,a1
158
159 add L1_CACHE_BYTES,d1 # round end addr up
160 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
161
162 clr d2 # we're going to clear tag ram
163 # entries
164
165 # read the tags from the tag RAM, and if they indicate a valid dirty
166 # cache line then invalidate that line
167 mov DCACHE_TAG(0,0),a0
168 mov a1,d0
169 and L1_CACHE_TAG_ENTRY,d0
170 add d0,a0 # starting dcache tag RAM
171 # access address
172
173 sub a1,d1
174 lsr L1_CACHE_SHIFT,d1 # total number of entries to
175 # examine
176
177 and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
178
179mn10300_dcache_inv_range_outer_loop:
180 # disable interrupts
181 mov epsw,d3
182 and ~EPSW_IE,epsw
183 nop # note that reading CHCTR and
184 # AND'ing D0 occupy two delay
185 # slots after disabling
186 # interrupts
187
188 # disable the dcache
189 movhu (a2),d0
190 and ~CHCTR_DCEN,d0
191 movhu d0,(a2)
192
193 # and wait for it to calm down
194 setlb
195 movhu (a2),d0
196 btst CHCTR_DCBUSY,d0
197 lne
198
199mn10300_dcache_inv_range_loop:
200
201 # process the way 0 slot
202 mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
203 btst L1_CACHE_TAG_VALID,d0
204 beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
205 # valid
206
207 xor a1,d0
208 lsr 12,d0
209 bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
210
211 mov d2,(a0) # kill the tag
212
213mn10300_dcache_inv_range_skip_0:
214
215 # process the way 1 slot
216 mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
217 btst L1_CACHE_TAG_VALID,d0
218 beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
219 # valid
220
221 xor a1,d0
222 lsr 12,d0
223 bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
224
225 mov d2,(a0) # kill the tag
226
227mn10300_dcache_inv_range_skip_1:
228
229 # process the way 2 slot
230 mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
231 btst L1_CACHE_TAG_VALID,d0
232 beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
233 # valid
234
235 xor a1,d0
236 lsr 12,d0
237 bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
238
239 mov d2,(a0) # kill the tag
240
241mn10300_dcache_inv_range_skip_2:
242
243 # process the way 3 slot
244 mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
245 btst L1_CACHE_TAG_VALID,d0
246 beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
247 # valid
248
249 xor a1,d0
250 lsr 12,d0
251 bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
252
253 mov d2,(a0) # kill the tag
254
255mn10300_dcache_inv_range_skip_3:
256
257 # approx every N steps we re-enable the cache and see if there are any
258 # interrupts to be processed
259 # we also break out if we've reached the end of the loop
260 # (the bottom nibble of the count is zero in both cases)
261 add L1_CACHE_BYTES,a0
262 add L1_CACHE_BYTES,a1
263 add -1,d1
264 btst mn10300_dcache_inv_range_intr_interval,d1
265 bne mn10300_dcache_inv_range_loop
266
267 # wait for the cache to finish what it's doing
268 setlb
269 movhu (a2),d0
270 btst CHCTR_DCBUSY,d0
271 lne
272
273 # and reenable it
274 or CHCTR_DCEN,d0
275 movhu d0,(a2)
276 movhu (a2),d0
277
278 # re-enable interrupts
279 # - we don't bother with delay NOPs as we'll have enough instructions
280 # before we disable interrupts again to give the interrupts a chance
281 # to happen
282 mov d3,epsw
283
284 # go around again if the counter hasn't yet reached zero
285 add 0,d1
286 bne mn10300_dcache_inv_range_outer_loop
287
288mn10300_dcache_inv_range_end:
289 ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644
index 000000000000..fd51af5eaf70
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-flush.c
@@ -0,0 +1,156 @@
1/* Functions for global dcache flush when writeback caching in SMP
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12#include <asm/cacheflush.h>
13#include "cache-smp.h"
14
15/**
16 * mn10300_dcache_flush - Globally flush data cache
17 *
18 * Flush the data cache on all CPUs.
19 */
20void mn10300_dcache_flush(void)
21{
22 unsigned long flags;
23
24 flags = smp_lock_cache();
25 mn10300_local_dcache_flush();
26 smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
27 smp_unlock_cache(flags);
28}
29
30/**
31 * mn10300_dcache_flush_page - Globally flush a page of data cache
32 * @start: The address of the page of memory to be flushed.
33 *
34 * Flush a range of addresses in the data cache on all CPUs covering
35 * the page that includes the given address.
36 */
37void mn10300_dcache_flush_page(unsigned long start)
38{
39 unsigned long flags;
40
41 start &= ~(PAGE_SIZE-1);
42
43 flags = smp_lock_cache();
44 mn10300_local_dcache_flush_page(start);
45 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
46 smp_unlock_cache(flags);
47}
48
49/**
50 * mn10300_dcache_flush_range - Globally flush range of data cache
51 * @start: The start address of the region to be flushed.
52 * @end: The end address of the region to be flushed.
53 *
54 * Flush a range of addresses in the data cache on all CPUs, between start and
55 * end-1 inclusive.
56 */
57void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
58{
59 unsigned long flags;
60
61 flags = smp_lock_cache();
62 mn10300_local_dcache_flush_range(start, end);
63 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
64 smp_unlock_cache(flags);
65}
66
67/**
68 * mn10300_dcache_flush_range2 - Globally flush range of data cache
69 * @start: The start address of the region to be flushed.
70 * @size: The size of the region to be flushed.
71 *
72 * Flush a range of addresses in the data cache on all CPUs, between start and
73 * start+size-1 inclusive.
74 */
75void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
76{
77 unsigned long flags;
78
79 flags = smp_lock_cache();
80 mn10300_local_dcache_flush_range2(start, size);
81 smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
82 smp_unlock_cache(flags);
83}
84
85/**
86 * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
87 *
88 * Flush and invalidate the data cache on all CPUs.
89 */
90void mn10300_dcache_flush_inv(void)
91{
92 unsigned long flags;
93
94 flags = smp_lock_cache();
95 mn10300_local_dcache_flush_inv();
96 smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
97 smp_unlock_cache(flags);
98}
99
100/**
101 * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
102 * cache
103 * @start: The address of the page of memory to be flushed and invalidated.
104 *
105 * Flush and invalidate a range of addresses in the data cache on all CPUs
106 * covering the page that includes the given address.
107 */
108void mn10300_dcache_flush_inv_page(unsigned long start)
109{
110 unsigned long flags;
111
112 start &= ~(PAGE_SIZE-1);
113
114 flags = smp_lock_cache();
115 mn10300_local_dcache_flush_inv_page(start);
116 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
117 smp_unlock_cache(flags);
118}
119
120/**
121 * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
122 * cache
123 * @start: The start address of the region to be flushed and invalidated.
124 * @end: The end address of the region to be flushed and invalidated.
125 *
126 * Flush and invalidate a range of addresses in the data cache on all CPUs,
127 * between start and end-1 inclusive.
128 */
129void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
130{
131 unsigned long flags;
132
133 flags = smp_lock_cache();
134 mn10300_local_dcache_flush_inv_range(start, end);
135 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
136 smp_unlock_cache(flags);
137}
138
139/**
140 * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
141 * cache
142 * @start: The start address of the region to be flushed and invalidated.
143 * @size: The size of the region to be flushed and invalidated.
144 *
145 * Flush and invalidate a range of addresses in the data cache on all CPUs,
146 * between start and start+size-1 inclusive.
147 */
148void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
149{
150 unsigned long flags;
151
152 flags = smp_lock_cache();
153 mn10300_local_dcache_flush_inv_range2(start, size);
154 smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
155 smp_unlock_cache(flags);
156}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644
index 000000000000..ff1787358c8e
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp-inv.c
@@ -0,0 +1,153 @@
1/* Functions for global i/dcache invalidation when caching in SMP
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/mm.h>
12#include <asm/cacheflush.h>
13#include "cache-smp.h"
14
15/**
16 * mn10300_icache_inv - Globally invalidate instruction cache
17 *
18 * Invalidate the instruction cache on all CPUs.
19 */
20void mn10300_icache_inv(void)
21{
22 unsigned long flags;
23
24 flags = smp_lock_cache();
25 mn10300_local_icache_inv();
26 smp_cache_call(SMP_ICACHE_INV, 0, 0);
27 smp_unlock_cache(flags);
28}
29
30/**
31 * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
32 * @start: The address of the page of memory to be invalidated.
33 *
34 * Invalidate a range of addresses in the instruction cache on all CPUs
35 * covering the page that includes the given address.
36 */
37void mn10300_icache_inv_page(unsigned long start)
38{
39 unsigned long flags;
40
41 start &= ~(PAGE_SIZE-1);
42
43 flags = smp_lock_cache();
44 mn10300_local_icache_inv_page(start);
45 smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
46 smp_unlock_cache(flags);
47}
48
49/**
50 * mn10300_icache_inv_range - Globally invalidate range of instruction cache
51 * @start: The start address of the region to be invalidated.
52 * @end: The end address of the region to be invalidated.
53 *
54 * Invalidate a range of addresses in the instruction cache on all CPUs,
55 * between start and end-1 inclusive.
56 */
57void mn10300_icache_inv_range(unsigned long start, unsigned long end)
58{
59 unsigned long flags;
60
61 flags = smp_lock_cache();
62 mn10300_local_icache_inv_range(start, end);
63 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
64 smp_unlock_cache(flags);
65}
66
67/**
68 * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
69 * @start: The start address of the region to be invalidated.
70 * @size: The size of the region to be invalidated.
71 *
72 * Invalidate a range of addresses in the instruction cache on all CPUs,
73 * between start and start+size-1 inclusive.
74 */
75void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
76{
77 unsigned long flags;
78
79 flags = smp_lock_cache();
80 mn10300_local_icache_inv_range2(start, size);
81 smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
82 smp_unlock_cache(flags);
83}
84
85/**
86 * mn10300_dcache_inv - Globally invalidate data cache
87 *
88 * Invalidate the data cache on all CPUs.
89 */
90void mn10300_dcache_inv(void)
91{
92 unsigned long flags;
93
94 flags = smp_lock_cache();
95 mn10300_local_dcache_inv();
96 smp_cache_call(SMP_DCACHE_INV, 0, 0);
97 smp_unlock_cache(flags);
98}
99
100/**
101 * mn10300_dcache_inv_page - Globally invalidate a page of data cache
102 * @start: The address of the page of memory to be invalidated.
103 *
104 * Invalidate a range of addresses in the data cache on all CPUs covering the
105 * page that includes the given address.
106 */
107void mn10300_dcache_inv_page(unsigned long start)
108{
109 unsigned long flags;
110
111 start &= ~(PAGE_SIZE-1);
112
113 flags = smp_lock_cache();
114 mn10300_local_dcache_inv_page(start);
115 smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
116 smp_unlock_cache(flags);
117}
118
119/**
120 * mn10300_dcache_inv_range - Globally invalidate range of data cache
121 * @start: The start address of the region to be invalidated.
122 * @end: The end address of the region to be invalidated.
123 *
124 * Invalidate a range of addresses in the data cache on all CPUs, between start
125 * and end-1 inclusive.
126 */
127void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
128{
129 unsigned long flags;
130
131 flags = smp_lock_cache();
132 mn10300_local_dcache_inv_range(start, end);
133 smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
134 smp_unlock_cache(flags);
135}
136
137/**
138 * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
139 * @start: The start address of the region to be invalidated.
140 * @size: The size of the region to be invalidated.
141 *
142 * Invalidate a range of addresses in the data cache on all CPUs, between start
143 * and start+size-1 inclusive.
144 */
145void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
146{
147 unsigned long flags;
148
149 flags = smp_lock_cache();
150 mn10300_local_dcache_inv_range2(start, size);
151 smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
152 smp_unlock_cache(flags);
153}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644
index 000000000000..4a6e9a4b5b27
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.c
@@ -0,0 +1,105 @@
1/* SMP global caching code
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/threads.h>
15#include <linux/interrupt.h>
16#include <asm/page.h>
17#include <asm/pgtable.h>
18#include <asm/processor.h>
19#include <asm/cacheflush.h>
20#include <asm/io.h>
21#include <asm/uaccess.h>
22#include <asm/smp.h>
23#include "cache-smp.h"
24
25DEFINE_SPINLOCK(smp_cache_lock);
26static unsigned long smp_cache_mask;
27static unsigned long smp_cache_start;
28static unsigned long smp_cache_end;
29static cpumask_t smp_cache_ipi_map; /* Bitmask of cache IPI done CPUs */
30
31/**
32 * smp_cache_interrupt - Handle IPI request to flush caches.
33 *
34 * Handle a request delivered by IPI to flush the current CPU's
35 * caches. The parameters are stored in smp_cache_*.
36 */
37void smp_cache_interrupt(void)
38{
39 unsigned long opr_mask = smp_cache_mask;
40
41 switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
42 case SMP_DCACHE_NOP:
43 break;
44 case SMP_DCACHE_INV:
45 mn10300_local_dcache_inv();
46 break;
47 case SMP_DCACHE_INV_RANGE:
48 mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
49 break;
50 case SMP_DCACHE_FLUSH:
51 mn10300_local_dcache_flush();
52 break;
53 case SMP_DCACHE_FLUSH_RANGE:
54 mn10300_local_dcache_flush_range(smp_cache_start,
55 smp_cache_end);
56 break;
57 case SMP_DCACHE_FLUSH_INV:
58 mn10300_local_dcache_flush_inv();
59 break;
60 case SMP_DCACHE_FLUSH_INV_RANGE:
61 mn10300_local_dcache_flush_inv_range(smp_cache_start,
62 smp_cache_end);
63 break;
64 }
65
66 switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
67 case SMP_ICACHE_NOP:
68 break;
69 case SMP_ICACHE_INV:
70 mn10300_local_icache_inv();
71 break;
72 case SMP_ICACHE_INV_RANGE:
73 mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
74 break;
75 }
76
77 cpu_clear(smp_processor_id(), smp_cache_ipi_map);
78}
79
80/**
81 * smp_cache_call - Issue an IPI to request the other CPUs flush caches
82 * @opr_mask: Cache operation flags
83 * @start: Start address of request
84 * @end: End address of request
85 *
86 * Send cache flush IPI to other CPUs. This invokes smp_cache_interrupt()
87 * above on those other CPUs and then waits for them to finish.
88 *
89 * The caller must hold smp_cache_lock.
90 */
91void smp_cache_call(unsigned long opr_mask,
92 unsigned long start, unsigned long end)
93{
94 smp_cache_mask = opr_mask;
95 smp_cache_start = start;
96 smp_cache_end = end;
97 smp_cache_ipi_map = cpu_online_map;
98 cpu_clear(smp_processor_id(), smp_cache_ipi_map);
99
100 send_IPI_allbutself(FLUSH_CACHE_IPI);
101
102 while (!cpus_empty(smp_cache_ipi_map))
103 /* nothing. lockup detection does not belong here */
104 mb();
105}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644
index 000000000000..cb52892aa66a
--- /dev/null
+++ b/arch/mn10300/mm/cache-smp.h
@@ -0,0 +1,69 @@
1/* SMP caching definitions
2 *
3 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12
13/*
14 * Operation requests for smp_cache_call().
15 *
16 * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
17 */
18enum smp_icache_ops {
19 SMP_ICACHE_NOP = 0x0000,
20 SMP_ICACHE_INV = 0x0001,
21 SMP_ICACHE_INV_RANGE = 0x0002,
22};
23#define SMP_ICACHE_OP_MASK 0x0003
24
25enum smp_dcache_ops {
26 SMP_DCACHE_NOP = 0x0000,
27 SMP_DCACHE_INV = 0x0004,
28 SMP_DCACHE_INV_RANGE = 0x0008,
29 SMP_DCACHE_FLUSH = 0x000c,
30 SMP_DCACHE_FLUSH_RANGE = 0x0010,
31 SMP_DCACHE_FLUSH_INV = 0x0014,
32 SMP_DCACHE_FLUSH_INV_RANGE = 0x0018,
33};
34#define SMP_DCACHE_OP_MASK 0x001c
35
36#define SMP_IDCACHE_INV_FLUSH (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
37#define SMP_IDCACHE_INV_FLUSH_RANGE (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
38
39/*
40 * cache-smp.c
41 */
42#ifdef CONFIG_SMP
43extern spinlock_t smp_cache_lock;
44
45extern void smp_cache_call(unsigned long opr_mask,
46 unsigned long addr, unsigned long end);
47
48static inline unsigned long smp_lock_cache(void)
49 __acquires(&smp_cache_lock)
50{
51 unsigned long flags;
52 spin_lock_irqsave(&smp_cache_lock, flags);
53 return flags;
54}
55
56static inline void smp_unlock_cache(unsigned long flags)
57 __releases(&smp_cache_lock)
58{
59 spin_unlock_irqrestore(&smp_cache_lock, flags);
60}
61
62#else
63static inline unsigned long smp_lock_cache(void) { return 0; }
64static inline void smp_unlock_cache(unsigned long flags) {}
65static inline void smp_cache_call(unsigned long opr_mask,
66 unsigned long addr, unsigned long end)
67{
68}
69#endif /* CONFIG_SMP */
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
index 9261217e8d2c..0a1f0aa92ebc 100644
--- a/arch/mn10300/mm/cache.c
+++ b/arch/mn10300/mm/cache.c
@@ -18,8 +18,13 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/uaccess.h> 20#include <asm/uaccess.h>
21#include <asm/smp.h>
22#include "cache-smp.h"
21 23
22EXPORT_SYMBOL(mn10300_icache_inv); 24EXPORT_SYMBOL(mn10300_icache_inv);
25EXPORT_SYMBOL(mn10300_icache_inv_range);
26EXPORT_SYMBOL(mn10300_icache_inv_range2);
27EXPORT_SYMBOL(mn10300_icache_inv_page);
23EXPORT_SYMBOL(mn10300_dcache_inv); 28EXPORT_SYMBOL(mn10300_dcache_inv);
24EXPORT_SYMBOL(mn10300_dcache_inv_range); 29EXPORT_SYMBOL(mn10300_dcache_inv_range);
25EXPORT_SYMBOL(mn10300_dcache_inv_range2); 30EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -37,96 +42,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_page);
37#endif 42#endif
38 43
39/* 44/*
40 * write a page back from the dcache and invalidate the icache so that we can
41 * run code from it that we've just written into it
42 */
43void flush_icache_page(struct vm_area_struct *vma, struct page *page)
44{
45 mn10300_dcache_flush_page(page_to_phys(page));
46 mn10300_icache_inv();
47}
48EXPORT_SYMBOL(flush_icache_page);
49
50/*
51 * write some code we've just written back from the dcache and invalidate the
52 * icache so that we can run that code
53 */
54void flush_icache_range(unsigned long start, unsigned long end)
55{
56#ifdef CONFIG_MN10300_CACHE_WBACK
57 unsigned long addr, size, base, off;
58 struct page *page;
59 pgd_t *pgd;
60 pud_t *pud;
61 pmd_t *pmd;
62 pte_t *ppte, pte;
63
64 if (end > 0x80000000UL) {
65 /* addresses above 0xa0000000 do not go through the cache */
66 if (end > 0xa0000000UL) {
67 end = 0xa0000000UL;
68 if (start >= end)
69 return;
70 }
71
72 /* kernel addresses between 0x80000000 and 0x9fffffff do not
73 * require page tables, so we just map such addresses directly */
74 base = (start >= 0x80000000UL) ? start : 0x80000000UL;
75 mn10300_dcache_flush_range(base, end);
76 if (base == start)
77 goto invalidate;
78 end = base;
79 }
80
81 for (; start < end; start += size) {
82 /* work out how much of the page to flush */
83 off = start & (PAGE_SIZE - 1);
84
85 size = end - start;
86 if (size > PAGE_SIZE - off)
87 size = PAGE_SIZE - off;
88
89 /* get the physical address the page is mapped to from the page
90 * tables */
91 pgd = pgd_offset(current->mm, start);
92 if (!pgd || !pgd_val(*pgd))
93 continue;
94
95 pud = pud_offset(pgd, start);
96 if (!pud || !pud_val(*pud))
97 continue;
98
99 pmd = pmd_offset(pud, start);
100 if (!pmd || !pmd_val(*pmd))
101 continue;
102
103 ppte = pte_offset_map(pmd, start);
104 if (!ppte)
105 continue;
106 pte = *ppte;
107 pte_unmap(ppte);
108
109 if (pte_none(pte))
110 continue;
111
112 page = pte_page(pte);
113 if (!page)
114 continue;
115
116 addr = page_to_phys(page);
117
118 /* flush the dcache and invalidate the icache coverage on that
119 * region */
120 mn10300_dcache_flush_range2(addr + off, size);
121 }
122#endif
123
124invalidate:
125 mn10300_icache_inv();
126}
127EXPORT_SYMBOL(flush_icache_range);
128
129/*
130 * allow userspace to flush the instruction cache 45 * allow userspace to flush the instruction cache
131 */ 46 */
132asmlinkage long sys_cacheflush(unsigned long start, unsigned long end) 47asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 81f153fa51b4..59c3da49d9d9 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
39{ 39{
40 if (yes) { 40 if (yes) {
41 oops_in_progress = 1; 41 oops_in_progress = 1;
42#ifdef CONFIG_SMP
43 /* Many serial drivers do __global_cli() */
44 global_irq_lock = 0;
45#endif
46 } else { 42 } else {
47 int loglevel_save = console_loglevel; 43 int loglevel_save = console_loglevel;
48#ifdef CONFIG_VT 44#ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
100} 96}
101#endif 97#endif
102 98
103asmlinkage void monitor_signal(struct pt_regs *);
104
105/* 99/*
106 * This routine handles page faults. It determines the address, 100 * This routine handles page faults. It determines the address,
107 * and the problem, and then passes it off to one of the appropriate 101 * and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
279 */ 273 */
280bad_area: 274bad_area:
281 up_read(&mm->mmap_sem); 275 up_read(&mm->mmap_sem);
282 monitor_signal(regs);
283 276
284 /* User mode accesses just cause a SIGSEGV */ 277 /* User mode accesses just cause a SIGSEGV */
285 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { 278 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
292 } 285 }
293 286
294no_context: 287no_context:
295 monitor_signal(regs);
296 /* Are we prepared to handle this kernel fault? */ 288 /* Are we prepared to handle this kernel fault? */
297 if (fixup_exception(regs)) 289 if (fixup_exception(regs))
298 return; 290 return;
@@ -338,14 +330,13 @@ no_context:
338 */ 330 */
339out_of_memory: 331out_of_memory:
340 up_read(&mm->mmap_sem); 332 up_read(&mm->mmap_sem);
341 if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR) 333 printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
342 goto no_context; 334 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
343 pagefault_out_of_memory(); 335 do_exit(SIGKILL);
344 return; 336 goto no_context;
345 337
346do_sigbus: 338do_sigbus:
347 up_read(&mm->mmap_sem); 339 up_read(&mm->mmap_sem);
348 monitor_signal(regs);
349 340
350 /* 341 /*
351 * Send a sigbus, regardless of whether we were in kernel 342 * Send a sigbus, regardless of whether we were in kernel
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
index 6e6bc0e51521..48907cc3bdb7 100644
--- a/arch/mn10300/mm/init.c
+++ b/arch/mn10300/mm/init.c
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41 41
42unsigned long highstart_pfn, highend_pfn; 42unsigned long highstart_pfn, highend_pfn;
43 43
44#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
45static struct vm_struct user_iomap_vm;
46#endif
47
44/* 48/*
45 * set up paging 49 * set up paging
46 */ 50 */
@@ -73,7 +77,24 @@ void __init paging_init(void)
73 /* pass the memory from the bootmem allocator to the main allocator */ 77 /* pass the memory from the bootmem allocator to the main allocator */
74 free_area_init(zones_size); 78 free_area_init(zones_size);
75 79
76 __flush_tlb_all(); 80#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
81 /* The Atomic Operation Unit registers need to be mapped to userspace
82 * for all processes. The following uses vm_area_register_early() to
83 * reserve the first page of the vmalloc area and sets the pte for that
84 * page.
85 *
86 * glibc hardcodes this virtual mapping, so we're pretty much stuck with
87 * it from now on.
88 */
89 user_iomap_vm.flags = VM_USERMAP;
90 user_iomap_vm.size = 1 << PAGE_SHIFT;
91 vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
92 ppte = kernel_vmalloc_ptes;
93 set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
94 PAGE_USERIO));
95#endif
96
97 local_flush_tlb_all();
77} 98}
78 99
79/* 100/*
@@ -84,8 +105,7 @@ void __init mem_init(void)
84 int codesize, reservedpages, datasize, initsize; 105 int codesize, reservedpages, datasize, initsize;
85 int tmp; 106 int tmp;
86 107
87 if (!mem_map) 108 BUG_ON(!mem_map);
88 BUG();
89 109
90#define START_PFN (contig_page_data.bdata->node_min_pfn) 110#define START_PFN (contig_page_data.bdata->node_min_pfn)
91#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) 111#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
index 6dffbf97ac26..eef989c1d0c1 100644
--- a/arch/mn10300/mm/misalignment.c
+++ b/arch/mn10300/mm/misalignment.c
@@ -449,8 +449,7 @@ found_opcode:
449 regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]); 449 regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
450 450
451 tmp = format_tbl[pop->format].opsz; 451 tmp = format_tbl[pop->format].opsz;
452 if (tmp > noc) 452 BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
453 BUG(); /* match was less complete than it ought to have been */
454 453
455 if (tmp < noc) { 454 if (tmp < noc) {
456 tmp = noc - tmp; 455 tmp = noc - tmp;
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
index 36ba02191d40..a4f7d3dcc6e6 100644
--- a/arch/mn10300/mm/mmu-context.c
+++ b/arch/mn10300/mm/mmu-context.c
@@ -13,40 +13,15 @@
13#include <asm/mmu_context.h> 13#include <asm/mmu_context.h>
14#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
15 15
16#ifdef CONFIG_MN10300_TLB_USE_PIDR
16/* 17/*
17 * list of the MMU contexts last allocated on each CPU 18 * list of the MMU contexts last allocated on each CPU
18 */ 19 */
19unsigned long mmu_context_cache[NR_CPUS] = { 20unsigned long mmu_context_cache[NR_CPUS] = {
20 [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1, 21 [0 ... NR_CPUS - 1] =
22 MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
21}; 23};
22 24#endif /* CONFIG_MN10300_TLB_USE_PIDR */
23/*
24 * flush the specified TLB entry
25 */
26void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
27{
28 unsigned long pteu, cnx, flags;
29
30 addr &= PAGE_MASK;
31
32 /* make sure the context doesn't migrate and defend against
33 * interference from vmalloc'd regions */
34 local_irq_save(flags);
35
36 cnx = mm_context(vma->vm_mm);
37
38 if (cnx != MMU_NO_CONTEXT) {
39 pteu = addr | (cnx & 0x000000ffUL);
40 IPTEU = pteu;
41 DPTEU = pteu;
42 if (IPTEL & xPTEL_V)
43 IPTEL = 0;
44 if (DPTEL & xPTEL_V)
45 DPTEL = 0;
46 }
47
48 local_irq_restore(flags);
49}
50 25
51/* 26/*
52 * preemptively set a TLB entry 27 * preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
63 * interference from vmalloc'd regions */ 38 * interference from vmalloc'd regions */
64 local_irq_save(flags); 39 local_irq_save(flags);
65 40
41 cnx = ~MMU_NO_CONTEXT;
42#ifdef CONFIG_MN10300_TLB_USE_PIDR
66 cnx = mm_context(vma->vm_mm); 43 cnx = mm_context(vma->vm_mm);
44#endif
67 45
68 if (cnx != MMU_NO_CONTEXT) { 46 if (cnx != MMU_NO_CONTEXT) {
69 pteu = addr | (cnx & 0x000000ffUL); 47 pteu = addr;
48#ifdef CONFIG_MN10300_TLB_USE_PIDR
49 pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
50#endif
70 if (!(pte_val(pte) & _PAGE_NX)) { 51 if (!(pte_val(pte) & _PAGE_NX)) {
71 IPTEU = pteu; 52 IPTEU = pteu;
72 if (IPTEL & xPTEL_V) 53 if (IPTEL & xPTEL_V)
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
index 9c1624c9e4e9..450f7ba3f8f2 100644
--- a/arch/mn10300/mm/pgtable.c
+++ b/arch/mn10300/mm/pgtable.c
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
59 * It's enough to flush this one mapping. 59 * It's enough to flush this one mapping.
60 * (PGE mappings get flushed as well) 60 * (PGE mappings get flushed as well)
61 */ 61 */
62 __flush_tlb_one(vaddr); 62 local_flush_tlb_one(vaddr);
63} 63}
64 64
65pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 65pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
index 7095147dcb8b..b9940177d81b 100644
--- a/arch/mn10300/mm/tlb-mn10300.S
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -27,7 +27,6 @@
27############################################################################### 27###############################################################################
28 .type itlb_miss,@function 28 .type itlb_miss,@function
29ENTRY(itlb_miss) 29ENTRY(itlb_miss)
30 and ~EPSW_NMID,epsw
31#ifdef CONFIG_GDBSTUB 30#ifdef CONFIG_GDBSTUB
32 movm [d2,d3,a2],(sp) 31 movm [d2,d3,a2],(sp)
33#else 32#else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
38 nop 37 nop
39#endif 38#endif
40 39
40#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
41 mov (MMUCTR),d2
42 mov d2,(MMUCTR)
43#endif
44
45 and ~EPSW_NMID,epsw
41 mov (IPTEU),d3 46 mov (IPTEU),d3
42 mov (PTBR),a2 47 mov (PTBR),a2
43 mov d3,d2 48 mov d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
56 btst _PAGE_VALID,d2 61 btst _PAGE_VALID,d2
57 beq itlb_miss_fault # jump if doesn't point to a page 62 beq itlb_miss_fault # jump if doesn't point to a page
58 # (might be a swap id) 63 # (might be a swap id)
64#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
59 bset _PAGE_ACCESSED,(0,a2) 65 bset _PAGE_ACCESSED,(0,a2)
60 and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2 66#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
67 bset +(_PAGE_ACCESSED >> 8),(1,a2)
68#else
69#error "_PAGE_ACCESSED value is out of range"
70#endif
71 and ~xPTEL2_UNUSED1,d2
61itlb_miss_set: 72itlb_miss_set:
62 mov d2,(IPTEL) # change the TLB 73 mov d2,(IPTEL2) # change the TLB
63#ifdef CONFIG_GDBSTUB 74#ifdef CONFIG_GDBSTUB
64 movm (sp),[d2,d3,a2] 75 movm (sp),[d2,d3,a2]
65#endif 76#endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
79############################################################################### 90###############################################################################
80 .type dtlb_miss,@function 91 .type dtlb_miss,@function
81ENTRY(dtlb_miss) 92ENTRY(dtlb_miss)
82 and ~EPSW_NMID,epsw
83#ifdef CONFIG_GDBSTUB 93#ifdef CONFIG_GDBSTUB
84 movm [d2,d3,a2],(sp) 94 movm [d2,d3,a2],(sp)
85#else 95#else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
90 nop 100 nop
91#endif 101#endif
92 102
103#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
104 mov (MMUCTR),d2
105 mov d2,(MMUCTR)
106#endif
107
108 and ~EPSW_NMID,epsw
93 mov (DPTEU),d3 109 mov (DPTEU),d3
94 mov (PTBR),a2 110 mov (PTBR),a2
95 mov d3,d2 111 mov d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
108 btst _PAGE_VALID,d2 124 btst _PAGE_VALID,d2
109 beq dtlb_miss_fault # jump if doesn't point to a page 125 beq dtlb_miss_fault # jump if doesn't point to a page
110 # (might be a swap id) 126 # (might be a swap id)
127#if ((_PAGE_ACCESSED & 0xffffff00) == 0)
111 bset _PAGE_ACCESSED,(0,a2) 128 bset _PAGE_ACCESSED,(0,a2)
112 and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2 129#elif ((_PAGE_ACCESSED & 0xffff00ff) == 0)
130 bset +(_PAGE_ACCESSED >> 8),(1,a2)
131#else
132#error "_PAGE_ACCESSED value is out of range"
133#endif
134 and ~xPTEL2_UNUSED1,d2
113dtlb_miss_set: 135dtlb_miss_set:
114 mov d2,(DPTEL) # change the TLB 136 mov d2,(DPTEL2) # change the TLB
115#ifdef CONFIG_GDBSTUB 137#ifdef CONFIG_GDBSTUB
116 movm (sp),[d2,d3,a2] 138 movm (sp),[d2,d3,a2]
117#endif 139#endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
130############################################################################### 152###############################################################################
131 .type itlb_aerror,@function 153 .type itlb_aerror,@function
132ENTRY(itlb_aerror) 154ENTRY(itlb_aerror)
133 and ~EPSW_NMID,epsw
134 add -4,sp 155 add -4,sp
135 SAVE_ALL 156 SAVE_ALL
157
158#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
159 mov (MMUCTR),d1
160 mov d1,(MMUCTR)
161#endif
162
163 and ~EPSW_NMID,epsw
136 add -4,sp # need to pass three params 164 add -4,sp # need to pass three params
137 165
138 # calculate the fault code 166 # calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
140 or 0x00010000,d1 # it's an instruction fetch 168 or 0x00010000,d1 # it's an instruction fetch
141 169
142 # determine the page address 170 # determine the page address
143 mov (IPTEU),a2 171 mov (IPTEU),d0
144 mov a2,d0
145 and PAGE_MASK,d0 172 and PAGE_MASK,d0
146 mov d0,(12,sp) 173 mov d0,(12,sp)
147 174
148 clr d0 175 clr d0
149 mov d0,(IPTEL) 176 mov d0,(IPTEL2)
150 177
151 and ~EPSW_NMID,epsw
152 or EPSW_IE,epsw 178 or EPSW_IE,epsw
153 mov fp,d0 179 mov fp,d0
154 call do_page_fault[],0 # do_page_fault(regs,code,addr 180 call do_page_fault[],0 # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
163############################################################################### 189###############################################################################
164 .type dtlb_aerror,@function 190 .type dtlb_aerror,@function
165ENTRY(dtlb_aerror) 191ENTRY(dtlb_aerror)
166 and ~EPSW_NMID,epsw
167 add -4,sp 192 add -4,sp
168 SAVE_ALL 193 SAVE_ALL
194
195#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
196 mov (MMUCTR),d1
197 mov d1,(MMUCTR)
198#endif
199
169 add -4,sp # need to pass three params 200 add -4,sp # need to pass three params
201 and ~EPSW_NMID,epsw
170 202
171 # calculate the fault code 203 # calculate the fault code
172 movhu (MMUFCR_DFC),d1 204 movhu (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
178 mov d0,(12,sp) 210 mov d0,(12,sp)
179 211
180 clr d0 212 clr d0
181 mov d0,(DPTEL) 213 mov d0,(DPTEL2)
182 214
183 and ~EPSW_NMID,epsw
184 or EPSW_IE,epsw 215 or EPSW_IE,epsw
185 mov fp,d0 216 mov fp,d0
186 call do_page_fault[],0 # do_page_fault(regs,code,addr 217 call do_page_fault[],0 # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644
index 000000000000..0b6a5ad1960e
--- /dev/null
+++ b/arch/mn10300/mm/tlb-smp.c
@@ -0,0 +1,214 @@
1/* SMP TLB support routines.
2 *
3 * Copyright (C) 2006-2008 Panasonic Corporation
4 * All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17#include <linux/init.h>
18#include <linux/jiffies.h>
19#include <linux/cpumask.h>
20#include <linux/err.h>
21#include <linux/kernel.h>
22#include <linux/delay.h>
23#include <linux/sched.h>
24#include <linux/profile.h>
25#include <linux/smp.h>
26#include <asm/tlbflush.h>
27#include <asm/system.h>
28#include <asm/bitops.h>
29#include <asm/processor.h>
30#include <asm/bug.h>
31#include <asm/exceptions.h>
32#include <asm/hardirq.h>
33#include <asm/fpu.h>
34#include <asm/mmu_context.h>
35#include <asm/thread_info.h>
36#include <asm/cpu-regs.h>
37#include <asm/intctl-regs.h>
38
39/*
40 * For flush TLB
41 */
42#define FLUSH_ALL 0xffffffff
43
44static cpumask_t flush_cpumask;
45static struct mm_struct *flush_mm;
46static unsigned long flush_va;
47static DEFINE_SPINLOCK(tlbstate_lock);
48
49DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
50 &init_mm, 0
51};
52
53static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
54 unsigned long va);
55static void do_flush_tlb_all(void *info);
56
57/**
58 * smp_flush_tlb - Callback to invalidate the TLB.
59 * @unused: Callback context (ignored).
60 */
61void smp_flush_tlb(void *unused)
62{
63 unsigned long cpu_id;
64
65 cpu_id = get_cpu();
66
67 if (!cpu_isset(cpu_id, flush_cpumask))
68 /* This was a BUG() but until someone can quote me the line
69 * from the intel manual that guarantees an IPI to multiple
70 * CPUs is retried _only_ on the erroring CPUs its staying as a
71 * return
72 *
73 * BUG();
74 */
75 goto out;
76
77 if (flush_va == FLUSH_ALL)
78 local_flush_tlb();
79 else
80 local_flush_tlb_page(flush_mm, flush_va);
81
82 smp_mb__before_clear_bit();
83 cpu_clear(cpu_id, flush_cpumask);
84 smp_mb__after_clear_bit();
85out:
86 put_cpu();
87}
88
89/**
90 * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
91 * @cpumask: The list of CPUs to target.
92 * @mm: The VM context to flush from (if va!=FLUSH_ALL).
93 * @va: Virtual address to flush or FLUSH_ALL to flush everything.
94 */
95static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
96 unsigned long va)
97{
98 cpumask_t tmp;
99
100 /* A couple of sanity checks (to be removed):
101 * - mask must not be empty
102 * - current CPU must not be in mask
103 * - we do not send IPIs to as-yet unbooted CPUs.
104 */
105 BUG_ON(!mm);
106 BUG_ON(cpus_empty(cpumask));
107 BUG_ON(cpu_isset(smp_processor_id(), cpumask));
108
109 cpus_and(tmp, cpumask, cpu_online_map);
110 BUG_ON(!cpus_equal(cpumask, tmp));
111
112 /* I'm not happy about this global shared spinlock in the MM hot path,
113 * but we'll see how contended it is.
114 *
115 * Temporarily this turns IRQs off, so that lockups are detected by the
116 * NMI watchdog.
117 */
118 spin_lock(&tlbstate_lock);
119
120 flush_mm = mm;
121 flush_va = va;
122#if NR_CPUS <= BITS_PER_LONG
123 atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
124#else
125#error Not supported.
126#endif
127
128 /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 smp_call_function(smp_flush_tlb, NULL, 1);
130
131 while (!cpus_empty(flush_cpumask))
132 /* Lockup detection does not belong here */
133 smp_mb();
134
135 flush_mm = NULL;
136 flush_va = 0;
137 spin_unlock(&tlbstate_lock);
138}
139
140/**
141 * flush_tlb_mm - Invalidate TLB of specified VM context
142 * @mm: The VM context to invalidate.
143 */
144void flush_tlb_mm(struct mm_struct *mm)
145{
146 cpumask_t cpu_mask;
147
148 preempt_disable();
149 cpu_mask = mm->cpu_vm_mask;
150 cpu_clear(smp_processor_id(), cpu_mask);
151
152 local_flush_tlb();
153 if (!cpus_empty(cpu_mask))
154 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155
156 preempt_enable();
157}
158
159/**
160 * flush_tlb_current_task - Invalidate TLB of current task
161 */
162void flush_tlb_current_task(void)
163{
164 struct mm_struct *mm = current->mm;
165 cpumask_t cpu_mask;
166
167 preempt_disable();
168 cpu_mask = mm->cpu_vm_mask;
169 cpu_clear(smp_processor_id(), cpu_mask);
170
171 local_flush_tlb();
172 if (!cpus_empty(cpu_mask))
173 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174
175 preempt_enable();
176}
177
178/**
179 * flush_tlb_page - Invalidate TLB of page
180 * @vma: The VM context to invalidate the page for.
181 * @va: The virtual address of the page to invalidate.
182 */
183void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
184{
185 struct mm_struct *mm = vma->vm_mm;
186 cpumask_t cpu_mask;
187
188 preempt_disable();
189 cpu_mask = mm->cpu_vm_mask;
190 cpu_clear(smp_processor_id(), cpu_mask);
191
192 local_flush_tlb_page(mm, va);
193 if (!cpus_empty(cpu_mask))
194 flush_tlb_others(cpu_mask, mm, va);
195
196 preempt_enable();
197}
198
199/**
200 * do_flush_tlb_all - Callback to completely invalidate a TLB
201 * @unused: Callback context (ignored).
202 */
203static void do_flush_tlb_all(void *unused)
204{
205 local_flush_tlb_all();
206}
207
208/**
209 * flush_tlb_all - Completely invalidate TLBs on all CPUs
210 */
211void flush_tlb_all(void)
212{
213 on_each_cpu(do_flush_tlb_all, 0, 1);
214}