diff options
author | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
---|---|---|
committer | Jonathan Herman <hermanjl@cs.unc.edu> | 2013-01-17 16:15:55 -0500 |
commit | 8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch) | |
tree | a8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /arch/arm/mm | |
parent | 406089d01562f1e2bf9f089fd7637009ebaad589 (diff) |
Patched in Tegra support.
Diffstat (limited to 'arch/arm/mm')
71 files changed, 1869 insertions, 4368 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 3fd629d5a51..aaea6d487ba 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -4,14 +4,31 @@ comment "Processor Type" | |||
4 | # which CPUs we support in the kernel image, and the compiler instruction | 4 | # which CPUs we support in the kernel image, and the compiler instruction |
5 | # optimiser behaviour. | 5 | # optimiser behaviour. |
6 | 6 | ||
7 | # ARM610 | ||
8 | config CPU_ARM610 | ||
9 | bool "Support ARM610 processor" if ARCH_RPC | ||
10 | select CPU_32v3 | ||
11 | select CPU_CACHE_V3 | ||
12 | select CPU_CACHE_VIVT | ||
13 | select CPU_CP15_MMU | ||
14 | select CPU_COPY_V3 if MMU | ||
15 | select CPU_TLB_V3 if MMU | ||
16 | select CPU_PABRT_LEGACY | ||
17 | help | ||
18 | The ARM610 is the successor to the ARM3 processor | ||
19 | and was produced by VLSI Technology Inc. | ||
20 | |||
21 | Say Y if you want support for the ARM610 processor. | ||
22 | Otherwise, say N. | ||
23 | |||
7 | # ARM7TDMI | 24 | # ARM7TDMI |
8 | config CPU_ARM7TDMI | 25 | config CPU_ARM7TDMI |
9 | bool "Support ARM7TDMI processor" | 26 | bool "Support ARM7TDMI processor" |
10 | depends on !MMU | 27 | depends on !MMU |
11 | select CPU_32v4T | 28 | select CPU_32v4T |
12 | select CPU_ABRT_LV4T | 29 | select CPU_ABRT_LV4T |
13 | select CPU_CACHE_V4 | ||
14 | select CPU_PABRT_LEGACY | 30 | select CPU_PABRT_LEGACY |
31 | select CPU_CACHE_V4 | ||
15 | help | 32 | help |
16 | A 32-bit RISC microprocessor based on the ARM7 processor core | 33 | A 32-bit RISC microprocessor based on the ARM7 processor core |
17 | which has no memory control unit and cache. | 34 | which has no memory control unit and cache. |
@@ -19,16 +36,35 @@ config CPU_ARM7TDMI | |||
19 | Say Y if you want support for the ARM7TDMI processor. | 36 | Say Y if you want support for the ARM7TDMI processor. |
20 | Otherwise, say N. | 37 | Otherwise, say N. |
21 | 38 | ||
39 | # ARM710 | ||
40 | config CPU_ARM710 | ||
41 | bool "Support ARM710 processor" if ARCH_RPC | ||
42 | select CPU_32v3 | ||
43 | select CPU_CACHE_V3 | ||
44 | select CPU_CACHE_VIVT | ||
45 | select CPU_CP15_MMU | ||
46 | select CPU_COPY_V3 if MMU | ||
47 | select CPU_TLB_V3 if MMU | ||
48 | select CPU_PABRT_LEGACY | ||
49 | help | ||
50 | A 32-bit RISC microprocessor based on the ARM7 processor core | ||
51 | designed by Advanced RISC Machines Ltd. The ARM710 is the | ||
52 | successor to the ARM610 processor. It was released in | ||
53 | July 1994 by VLSI Technology Inc. | ||
54 | |||
55 | Say Y if you want support for the ARM710 processor. | ||
56 | Otherwise, say N. | ||
57 | |||
22 | # ARM720T | 58 | # ARM720T |
23 | config CPU_ARM720T | 59 | config CPU_ARM720T |
24 | bool "Support ARM720T processor" if ARCH_INTEGRATOR | 60 | bool "Support ARM720T processor" if ARCH_INTEGRATOR |
25 | select CPU_32v4T | 61 | select CPU_32v4T |
26 | select CPU_ABRT_LV4T | 62 | select CPU_ABRT_LV4T |
63 | select CPU_PABRT_LEGACY | ||
27 | select CPU_CACHE_V4 | 64 | select CPU_CACHE_V4 |
28 | select CPU_CACHE_VIVT | 65 | select CPU_CACHE_VIVT |
29 | select CPU_COPY_V4WT if MMU | ||
30 | select CPU_CP15_MMU | 66 | select CPU_CP15_MMU |
31 | select CPU_PABRT_LEGACY | 67 | select CPU_COPY_V4WT if MMU |
32 | select CPU_TLB_V4WT if MMU | 68 | select CPU_TLB_V4WT if MMU |
33 | help | 69 | help |
34 | A 32-bit RISC processor with 8kByte Cache, Write Buffer and | 70 | A 32-bit RISC processor with 8kByte Cache, Write Buffer and |
@@ -43,9 +79,9 @@ config CPU_ARM740T | |||
43 | depends on !MMU | 79 | depends on !MMU |
44 | select CPU_32v4T | 80 | select CPU_32v4T |
45 | select CPU_ABRT_LV4T | 81 | select CPU_ABRT_LV4T |
82 | select CPU_PABRT_LEGACY | ||
46 | select CPU_CACHE_V3 # although the core is v4t | 83 | select CPU_CACHE_V3 # although the core is v4t |
47 | select CPU_CP15_MPU | 84 | select CPU_CP15_MPU |
48 | select CPU_PABRT_LEGACY | ||
49 | help | 85 | help |
50 | A 32-bit RISC processor with 8KB cache or 4KB variants, | 86 | A 32-bit RISC processor with 8KB cache or 4KB variants, |
51 | write buffer and MPU(Protection Unit) built around | 87 | write buffer and MPU(Protection Unit) built around |
@@ -60,8 +96,8 @@ config CPU_ARM9TDMI | |||
60 | depends on !MMU | 96 | depends on !MMU |
61 | select CPU_32v4T | 97 | select CPU_32v4T |
62 | select CPU_ABRT_NOMMU | 98 | select CPU_ABRT_NOMMU |
63 | select CPU_CACHE_V4 | ||
64 | select CPU_PABRT_LEGACY | 99 | select CPU_PABRT_LEGACY |
100 | select CPU_CACHE_V4 | ||
65 | help | 101 | help |
66 | A 32-bit RISC microprocessor based on the ARM9 processor core | 102 | A 32-bit RISC microprocessor based on the ARM9 processor core |
67 | which has no memory control unit and cache. | 103 | which has no memory control unit and cache. |
@@ -74,11 +110,11 @@ config CPU_ARM920T | |||
74 | bool "Support ARM920T processor" if ARCH_INTEGRATOR | 110 | bool "Support ARM920T processor" if ARCH_INTEGRATOR |
75 | select CPU_32v4T | 111 | select CPU_32v4T |
76 | select CPU_ABRT_EV4T | 112 | select CPU_ABRT_EV4T |
113 | select CPU_PABRT_LEGACY | ||
77 | select CPU_CACHE_V4WT | 114 | select CPU_CACHE_V4WT |
78 | select CPU_CACHE_VIVT | 115 | select CPU_CACHE_VIVT |
79 | select CPU_COPY_V4WB if MMU | ||
80 | select CPU_CP15_MMU | 116 | select CPU_CP15_MMU |
81 | select CPU_PABRT_LEGACY | 117 | select CPU_COPY_V4WB if MMU |
82 | select CPU_TLB_V4WBI if MMU | 118 | select CPU_TLB_V4WBI if MMU |
83 | help | 119 | help |
84 | The ARM920T is licensed to be produced by numerous vendors, | 120 | The ARM920T is licensed to be produced by numerous vendors, |
@@ -92,11 +128,11 @@ config CPU_ARM922T | |||
92 | bool "Support ARM922T processor" if ARCH_INTEGRATOR | 128 | bool "Support ARM922T processor" if ARCH_INTEGRATOR |
93 | select CPU_32v4T | 129 | select CPU_32v4T |
94 | select CPU_ABRT_EV4T | 130 | select CPU_ABRT_EV4T |
131 | select CPU_PABRT_LEGACY | ||
95 | select CPU_CACHE_V4WT | 132 | select CPU_CACHE_V4WT |
96 | select CPU_CACHE_VIVT | 133 | select CPU_CACHE_VIVT |
97 | select CPU_COPY_V4WB if MMU | ||
98 | select CPU_CP15_MMU | 134 | select CPU_CP15_MMU |
99 | select CPU_PABRT_LEGACY | 135 | select CPU_COPY_V4WB if MMU |
100 | select CPU_TLB_V4WBI if MMU | 136 | select CPU_TLB_V4WBI if MMU |
101 | help | 137 | help |
102 | The ARM922T is a version of the ARM920T, but with smaller | 138 | The ARM922T is a version of the ARM920T, but with smaller |
@@ -111,11 +147,11 @@ config CPU_ARM925T | |||
111 | bool "Support ARM925T processor" if ARCH_OMAP1 | 147 | bool "Support ARM925T processor" if ARCH_OMAP1 |
112 | select CPU_32v4T | 148 | select CPU_32v4T |
113 | select CPU_ABRT_EV4T | 149 | select CPU_ABRT_EV4T |
150 | select CPU_PABRT_LEGACY | ||
114 | select CPU_CACHE_V4WT | 151 | select CPU_CACHE_V4WT |
115 | select CPU_CACHE_VIVT | 152 | select CPU_CACHE_VIVT |
116 | select CPU_COPY_V4WB if MMU | ||
117 | select CPU_CP15_MMU | 153 | select CPU_CP15_MMU |
118 | select CPU_PABRT_LEGACY | 154 | select CPU_COPY_V4WB if MMU |
119 | select CPU_TLB_V4WBI if MMU | 155 | select CPU_TLB_V4WBI if MMU |
120 | help | 156 | help |
121 | The ARM925T is a mix between the ARM920T and ARM926T, but with | 157 | The ARM925T is a mix between the ARM920T and ARM926T, but with |
@@ -130,10 +166,10 @@ config CPU_ARM926T | |||
130 | bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB | 166 | bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB |
131 | select CPU_32v5 | 167 | select CPU_32v5 |
132 | select CPU_ABRT_EV5TJ | 168 | select CPU_ABRT_EV5TJ |
169 | select CPU_PABRT_LEGACY | ||
133 | select CPU_CACHE_VIVT | 170 | select CPU_CACHE_VIVT |
134 | select CPU_COPY_V4WB if MMU | ||
135 | select CPU_CP15_MMU | 171 | select CPU_CP15_MMU |
136 | select CPU_PABRT_LEGACY | 172 | select CPU_COPY_V4WB if MMU |
137 | select CPU_TLB_V4WBI if MMU | 173 | select CPU_TLB_V4WBI if MMU |
138 | help | 174 | help |
139 | This is a variant of the ARM920. It has slightly different | 175 | This is a variant of the ARM920. It has slightly different |
@@ -148,11 +184,11 @@ config CPU_FA526 | |||
148 | bool | 184 | bool |
149 | select CPU_32v4 | 185 | select CPU_32v4 |
150 | select CPU_ABRT_EV4 | 186 | select CPU_ABRT_EV4 |
151 | select CPU_CACHE_FA | 187 | select CPU_PABRT_LEGACY |
152 | select CPU_CACHE_VIVT | 188 | select CPU_CACHE_VIVT |
153 | select CPU_COPY_FA if MMU | ||
154 | select CPU_CP15_MMU | 189 | select CPU_CP15_MMU |
155 | select CPU_PABRT_LEGACY | 190 | select CPU_CACHE_FA |
191 | select CPU_COPY_FA if MMU | ||
156 | select CPU_TLB_FA if MMU | 192 | select CPU_TLB_FA if MMU |
157 | help | 193 | help |
158 | The FA526 is a version of the ARMv4 compatible processor with | 194 | The FA526 is a version of the ARMv4 compatible processor with |
@@ -167,9 +203,9 @@ config CPU_ARM940T | |||
167 | depends on !MMU | 203 | depends on !MMU |
168 | select CPU_32v4T | 204 | select CPU_32v4T |
169 | select CPU_ABRT_NOMMU | 205 | select CPU_ABRT_NOMMU |
206 | select CPU_PABRT_LEGACY | ||
170 | select CPU_CACHE_VIVT | 207 | select CPU_CACHE_VIVT |
171 | select CPU_CP15_MPU | 208 | select CPU_CP15_MPU |
172 | select CPU_PABRT_LEGACY | ||
173 | help | 209 | help |
174 | ARM940T is a member of the ARM9TDMI family of general- | 210 | ARM940T is a member of the ARM9TDMI family of general- |
175 | purpose microprocessors with MPU and separate 4KB | 211 | purpose microprocessors with MPU and separate 4KB |
@@ -185,9 +221,9 @@ config CPU_ARM946E | |||
185 | depends on !MMU | 221 | depends on !MMU |
186 | select CPU_32v5 | 222 | select CPU_32v5 |
187 | select CPU_ABRT_NOMMU | 223 | select CPU_ABRT_NOMMU |
224 | select CPU_PABRT_LEGACY | ||
188 | select CPU_CACHE_VIVT | 225 | select CPU_CACHE_VIVT |
189 | select CPU_CP15_MPU | 226 | select CPU_CP15_MPU |
190 | select CPU_PABRT_LEGACY | ||
191 | help | 227 | help |
192 | ARM946E-S is a member of the ARM9E-S family of high- | 228 | ARM946E-S is a member of the ARM9E-S family of high- |
193 | performance, 32-bit system-on-chip processor solutions. | 229 | performance, 32-bit system-on-chip processor solutions. |
@@ -201,11 +237,11 @@ config CPU_ARM1020 | |||
201 | bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR | 237 | bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR |
202 | select CPU_32v5 | 238 | select CPU_32v5 |
203 | select CPU_ABRT_EV4T | 239 | select CPU_ABRT_EV4T |
240 | select CPU_PABRT_LEGACY | ||
204 | select CPU_CACHE_V4WT | 241 | select CPU_CACHE_V4WT |
205 | select CPU_CACHE_VIVT | 242 | select CPU_CACHE_VIVT |
206 | select CPU_COPY_V4WB if MMU | ||
207 | select CPU_CP15_MMU | 243 | select CPU_CP15_MMU |
208 | select CPU_PABRT_LEGACY | 244 | select CPU_COPY_V4WB if MMU |
209 | select CPU_TLB_V4WBI if MMU | 245 | select CPU_TLB_V4WBI if MMU |
210 | help | 246 | help |
211 | The ARM1020 is the 32K cached version of the ARM10 processor, | 247 | The ARM1020 is the 32K cached version of the ARM10 processor, |
@@ -217,25 +253,25 @@ config CPU_ARM1020 | |||
217 | # ARM1020E - needs validating | 253 | # ARM1020E - needs validating |
218 | config CPU_ARM1020E | 254 | config CPU_ARM1020E |
219 | bool "Support ARM1020E processor" if ARCH_INTEGRATOR | 255 | bool "Support ARM1020E processor" if ARCH_INTEGRATOR |
220 | depends on n | ||
221 | select CPU_32v5 | 256 | select CPU_32v5 |
222 | select CPU_ABRT_EV4T | 257 | select CPU_ABRT_EV4T |
258 | select CPU_PABRT_LEGACY | ||
223 | select CPU_CACHE_V4WT | 259 | select CPU_CACHE_V4WT |
224 | select CPU_CACHE_VIVT | 260 | select CPU_CACHE_VIVT |
225 | select CPU_COPY_V4WB if MMU | ||
226 | select CPU_CP15_MMU | 261 | select CPU_CP15_MMU |
227 | select CPU_PABRT_LEGACY | 262 | select CPU_COPY_V4WB if MMU |
228 | select CPU_TLB_V4WBI if MMU | 263 | select CPU_TLB_V4WBI if MMU |
264 | depends on n | ||
229 | 265 | ||
230 | # ARM1022E | 266 | # ARM1022E |
231 | config CPU_ARM1022 | 267 | config CPU_ARM1022 |
232 | bool "Support ARM1022E processor" if ARCH_INTEGRATOR | 268 | bool "Support ARM1022E processor" if ARCH_INTEGRATOR |
233 | select CPU_32v5 | 269 | select CPU_32v5 |
234 | select CPU_ABRT_EV4T | 270 | select CPU_ABRT_EV4T |
271 | select CPU_PABRT_LEGACY | ||
235 | select CPU_CACHE_VIVT | 272 | select CPU_CACHE_VIVT |
236 | select CPU_COPY_V4WB if MMU # can probably do better | ||
237 | select CPU_CP15_MMU | 273 | select CPU_CP15_MMU |
238 | select CPU_PABRT_LEGACY | 274 | select CPU_COPY_V4WB if MMU # can probably do better |
239 | select CPU_TLB_V4WBI if MMU | 275 | select CPU_TLB_V4WBI if MMU |
240 | help | 276 | help |
241 | The ARM1022E is an implementation of the ARMv5TE architecture | 277 | The ARM1022E is an implementation of the ARMv5TE architecture |
@@ -250,10 +286,10 @@ config CPU_ARM1026 | |||
250 | bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR | 286 | bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR |
251 | select CPU_32v5 | 287 | select CPU_32v5 |
252 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 | 288 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 |
289 | select CPU_PABRT_LEGACY | ||
253 | select CPU_CACHE_VIVT | 290 | select CPU_CACHE_VIVT |
254 | select CPU_COPY_V4WB if MMU # can probably do better | ||
255 | select CPU_CP15_MMU | 291 | select CPU_CP15_MMU |
256 | select CPU_PABRT_LEGACY | 292 | select CPU_COPY_V4WB if MMU # can probably do better |
257 | select CPU_TLB_V4WBI if MMU | 293 | select CPU_TLB_V4WBI if MMU |
258 | help | 294 | help |
259 | The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture | 295 | The ARM1026EJ-S is an implementation of the ARMv5TEJ architecture |
@@ -268,11 +304,11 @@ config CPU_SA110 | |||
268 | select CPU_32v3 if ARCH_RPC | 304 | select CPU_32v3 if ARCH_RPC |
269 | select CPU_32v4 if !ARCH_RPC | 305 | select CPU_32v4 if !ARCH_RPC |
270 | select CPU_ABRT_EV4 | 306 | select CPU_ABRT_EV4 |
307 | select CPU_PABRT_LEGACY | ||
271 | select CPU_CACHE_V4WB | 308 | select CPU_CACHE_V4WB |
272 | select CPU_CACHE_VIVT | 309 | select CPU_CACHE_VIVT |
273 | select CPU_COPY_V4WB if MMU | ||
274 | select CPU_CP15_MMU | 310 | select CPU_CP15_MMU |
275 | select CPU_PABRT_LEGACY | 311 | select CPU_COPY_V4WB if MMU |
276 | select CPU_TLB_V4WB if MMU | 312 | select CPU_TLB_V4WB if MMU |
277 | help | 313 | help |
278 | The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and | 314 | The Intel StrongARM(R) SA-110 is a 32-bit microprocessor and |
@@ -288,10 +324,10 @@ config CPU_SA1100 | |||
288 | bool | 324 | bool |
289 | select CPU_32v4 | 325 | select CPU_32v4 |
290 | select CPU_ABRT_EV4 | 326 | select CPU_ABRT_EV4 |
327 | select CPU_PABRT_LEGACY | ||
291 | select CPU_CACHE_V4WB | 328 | select CPU_CACHE_V4WB |
292 | select CPU_CACHE_VIVT | 329 | select CPU_CACHE_VIVT |
293 | select CPU_CP15_MMU | 330 | select CPU_CP15_MMU |
294 | select CPU_PABRT_LEGACY | ||
295 | select CPU_TLB_V4WB if MMU | 331 | select CPU_TLB_V4WB if MMU |
296 | 332 | ||
297 | # XScale | 333 | # XScale |
@@ -299,9 +335,9 @@ config CPU_XSCALE | |||
299 | bool | 335 | bool |
300 | select CPU_32v5 | 336 | select CPU_32v5 |
301 | select CPU_ABRT_EV5T | 337 | select CPU_ABRT_EV5T |
338 | select CPU_PABRT_LEGACY | ||
302 | select CPU_CACHE_VIVT | 339 | select CPU_CACHE_VIVT |
303 | select CPU_CP15_MMU | 340 | select CPU_CP15_MMU |
304 | select CPU_PABRT_LEGACY | ||
305 | select CPU_TLB_V4WBI if MMU | 341 | select CPU_TLB_V4WBI if MMU |
306 | 342 | ||
307 | # XScale Core Version 3 | 343 | # XScale Core Version 3 |
@@ -309,9 +345,9 @@ config CPU_XSC3 | |||
309 | bool | 345 | bool |
310 | select CPU_32v5 | 346 | select CPU_32v5 |
311 | select CPU_ABRT_EV5T | 347 | select CPU_ABRT_EV5T |
348 | select CPU_PABRT_LEGACY | ||
312 | select CPU_CACHE_VIVT | 349 | select CPU_CACHE_VIVT |
313 | select CPU_CP15_MMU | 350 | select CPU_CP15_MMU |
314 | select CPU_PABRT_LEGACY | ||
315 | select CPU_TLB_V4WBI if MMU | 351 | select CPU_TLB_V4WBI if MMU |
316 | select IO_36 | 352 | select IO_36 |
317 | 353 | ||
@@ -320,21 +356,21 @@ config CPU_MOHAWK | |||
320 | bool | 356 | bool |
321 | select CPU_32v5 | 357 | select CPU_32v5 |
322 | select CPU_ABRT_EV5T | 358 | select CPU_ABRT_EV5T |
359 | select CPU_PABRT_LEGACY | ||
323 | select CPU_CACHE_VIVT | 360 | select CPU_CACHE_VIVT |
324 | select CPU_COPY_V4WB if MMU | ||
325 | select CPU_CP15_MMU | 361 | select CPU_CP15_MMU |
326 | select CPU_PABRT_LEGACY | ||
327 | select CPU_TLB_V4WBI if MMU | 362 | select CPU_TLB_V4WBI if MMU |
363 | select CPU_COPY_V4WB if MMU | ||
328 | 364 | ||
329 | # Feroceon | 365 | # Feroceon |
330 | config CPU_FEROCEON | 366 | config CPU_FEROCEON |
331 | bool | 367 | bool |
332 | select CPU_32v5 | 368 | select CPU_32v5 |
333 | select CPU_ABRT_EV5T | 369 | select CPU_ABRT_EV5T |
370 | select CPU_PABRT_LEGACY | ||
334 | select CPU_CACHE_VIVT | 371 | select CPU_CACHE_VIVT |
335 | select CPU_COPY_FEROCEON if MMU | ||
336 | select CPU_CP15_MMU | 372 | select CPU_CP15_MMU |
337 | select CPU_PABRT_LEGACY | 373 | select CPU_COPY_FEROCEON if MMU |
338 | select CPU_TLB_FEROCEON if MMU | 374 | select CPU_TLB_FEROCEON if MMU |
339 | 375 | ||
340 | config CPU_FEROCEON_OLD_ID | 376 | config CPU_FEROCEON_OLD_ID |
@@ -349,24 +385,20 @@ config CPU_FEROCEON_OLD_ID | |||
349 | # Marvell PJ4 | 385 | # Marvell PJ4 |
350 | config CPU_PJ4 | 386 | config CPU_PJ4 |
351 | bool | 387 | bool |
352 | select ARM_THUMBEE | ||
353 | select CPU_V7 | ||
354 | |||
355 | config CPU_PJ4B | ||
356 | bool | ||
357 | select CPU_V7 | 388 | select CPU_V7 |
389 | select ARM_THUMBEE | ||
358 | 390 | ||
359 | # ARMv6 | 391 | # ARMv6 |
360 | config CPU_V6 | 392 | config CPU_V6 |
361 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 393 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
362 | select CPU_32v6 | 394 | select CPU_32v6 |
363 | select CPU_ABRT_EV6 | 395 | select CPU_ABRT_EV6 |
396 | select CPU_PABRT_V6 | ||
364 | select CPU_CACHE_V6 | 397 | select CPU_CACHE_V6 |
365 | select CPU_CACHE_VIPT | 398 | select CPU_CACHE_VIPT |
366 | select CPU_COPY_V6 if MMU | ||
367 | select CPU_CP15_MMU | 399 | select CPU_CP15_MMU |
368 | select CPU_HAS_ASID if MMU | 400 | select CPU_HAS_ASID if MMU |
369 | select CPU_PABRT_V6 | 401 | select CPU_COPY_V6 if MMU |
370 | select CPU_TLB_V6 if MMU | 402 | select CPU_TLB_V6 if MMU |
371 | 403 | ||
372 | # ARMv6k | 404 | # ARMv6k |
@@ -375,12 +407,12 @@ config CPU_V6K | |||
375 | select CPU_32v6 | 407 | select CPU_32v6 |
376 | select CPU_32v6K | 408 | select CPU_32v6K |
377 | select CPU_ABRT_EV6 | 409 | select CPU_ABRT_EV6 |
410 | select CPU_PABRT_V6 | ||
378 | select CPU_CACHE_V6 | 411 | select CPU_CACHE_V6 |
379 | select CPU_CACHE_VIPT | 412 | select CPU_CACHE_VIPT |
380 | select CPU_COPY_V6 if MMU | ||
381 | select CPU_CP15_MMU | 413 | select CPU_CP15_MMU |
382 | select CPU_HAS_ASID if MMU | 414 | select CPU_HAS_ASID if MMU |
383 | select CPU_PABRT_V6 | 415 | select CPU_COPY_V6 if MMU |
384 | select CPU_TLB_V6 if MMU | 416 | select CPU_TLB_V6 if MMU |
385 | 417 | ||
386 | # ARMv7 | 418 | # ARMv7 |
@@ -389,44 +421,44 @@ config CPU_V7 | |||
389 | select CPU_32v6K | 421 | select CPU_32v6K |
390 | select CPU_32v7 | 422 | select CPU_32v7 |
391 | select CPU_ABRT_EV7 | 423 | select CPU_ABRT_EV7 |
424 | select CPU_PABRT_V7 | ||
392 | select CPU_CACHE_V7 | 425 | select CPU_CACHE_V7 |
393 | select CPU_CACHE_VIPT | 426 | select CPU_CACHE_VIPT |
394 | select CPU_COPY_V6 if MMU | ||
395 | select CPU_CP15_MMU | 427 | select CPU_CP15_MMU |
396 | select CPU_HAS_ASID if MMU | 428 | select CPU_HAS_ASID if MMU |
397 | select CPU_PABRT_V7 | 429 | select CPU_COPY_V6 if MMU |
398 | select CPU_TLB_V7 if MMU | 430 | select CPU_TLB_V7 if MMU |
399 | 431 | ||
400 | # Figure out what processor architecture version we should be using. | 432 | # Figure out what processor architecture version we should be using. |
401 | # This defines the compiler instruction set which depends on the machine type. | 433 | # This defines the compiler instruction set which depends on the machine type. |
402 | config CPU_32v3 | 434 | config CPU_32v3 |
403 | bool | 435 | bool |
404 | select CPU_USE_DOMAINS if MMU | ||
405 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
406 | select TLS_REG_EMUL if SMP || !MMU | 436 | select TLS_REG_EMUL if SMP || !MMU |
437 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
438 | select CPU_USE_DOMAINS if MMU | ||
407 | 439 | ||
408 | config CPU_32v4 | 440 | config CPU_32v4 |
409 | bool | 441 | bool |
410 | select CPU_USE_DOMAINS if MMU | ||
411 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
412 | select TLS_REG_EMUL if SMP || !MMU | 442 | select TLS_REG_EMUL if SMP || !MMU |
443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
444 | select CPU_USE_DOMAINS if MMU | ||
413 | 445 | ||
414 | config CPU_32v4T | 446 | config CPU_32v4T |
415 | bool | 447 | bool |
416 | select CPU_USE_DOMAINS if MMU | ||
417 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
418 | select TLS_REG_EMUL if SMP || !MMU | 448 | select TLS_REG_EMUL if SMP || !MMU |
449 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
450 | select CPU_USE_DOMAINS if MMU | ||
419 | 451 | ||
420 | config CPU_32v5 | 452 | config CPU_32v5 |
421 | bool | 453 | bool |
422 | select CPU_USE_DOMAINS if MMU | ||
423 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
424 | select TLS_REG_EMUL if SMP || !MMU | 454 | select TLS_REG_EMUL if SMP || !MMU |
455 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | ||
456 | select CPU_USE_DOMAINS if MMU | ||
425 | 457 | ||
426 | config CPU_32v6 | 458 | config CPU_32v6 |
427 | bool | 459 | bool |
428 | select CPU_USE_DOMAINS if CPU_V6 && MMU | ||
429 | select TLS_REG_EMUL if !CPU_32v6K && !MMU | 460 | select TLS_REG_EMUL if !CPU_32v6K && !MMU |
461 | select CPU_USE_DOMAINS if CPU_V6 && MMU | ||
430 | 462 | ||
431 | config CPU_32v6K | 463 | config CPU_32v6K |
432 | bool | 464 | bool |
@@ -498,6 +530,9 @@ config CPU_CACHE_FA | |||
498 | 530 | ||
499 | if MMU | 531 | if MMU |
500 | # The copy-page model | 532 | # The copy-page model |
533 | config CPU_COPY_V3 | ||
534 | bool | ||
535 | |||
501 | config CPU_COPY_V4WT | 536 | config CPU_COPY_V4WT |
502 | bool | 537 | bool |
503 | 538 | ||
@@ -514,6 +549,11 @@ config CPU_COPY_V6 | |||
514 | bool | 549 | bool |
515 | 550 | ||
516 | # This selects the TLB model | 551 | # This selects the TLB model |
552 | config CPU_TLB_V3 | ||
553 | bool | ||
554 | help | ||
555 | ARM Architecture Version 3 TLB. | ||
556 | |||
517 | config CPU_TLB_V4WT | 557 | config CPU_TLB_V4WT |
518 | bool | 558 | bool |
519 | help | 559 | help |
@@ -589,24 +629,6 @@ config IO_36 | |||
589 | 629 | ||
590 | comment "Processor Features" | 630 | comment "Processor Features" |
591 | 631 | ||
592 | config ARM_LPAE | ||
593 | bool "Support for the Large Physical Address Extension" | ||
594 | depends on MMU && CPU_32v7 && !CPU_32v6 && !CPU_32v5 && \ | ||
595 | !CPU_32v4 && !CPU_32v3 | ||
596 | help | ||
597 | Say Y if you have an ARMv7 processor supporting the LPAE page | ||
598 | table format and you would like to access memory beyond the | ||
599 | 4GB limit. The resulting kernel image will not run on | ||
600 | processors without the LPA extension. | ||
601 | |||
602 | If unsure, say N. | ||
603 | |||
604 | config ARCH_PHYS_ADDR_T_64BIT | ||
605 | def_bool ARM_LPAE | ||
606 | |||
607 | config ARCH_DMA_ADDR_T_64BIT | ||
608 | bool | ||
609 | |||
610 | config ARM_THUMB | 632 | config ARM_THUMB |
611 | bool "Support Thumb user binaries" | 633 | bool "Support Thumb user binaries" |
612 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 634 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
@@ -628,28 +650,11 @@ config ARM_THUMBEE | |||
628 | Say Y here if you have a CPU with the ThumbEE extension and code to | 650 | Say Y here if you have a CPU with the ThumbEE extension and code to |
629 | make use of it. Say N for code that can run on CPUs without ThumbEE. | 651 | make use of it. Say N for code that can run on CPUs without ThumbEE. |
630 | 652 | ||
631 | config ARM_VIRT_EXT | ||
632 | bool "Native support for the ARM Virtualization Extensions" | ||
633 | depends on MMU && CPU_V7 | ||
634 | help | ||
635 | Enable the kernel to make use of the ARM Virtualization | ||
636 | Extensions to install hypervisors without run-time firmware | ||
637 | assistance. | ||
638 | |||
639 | A compliant bootloader is required in order to make maximum | ||
640 | use of this feature. Refer to Documentation/arm/Booting for | ||
641 | details. | ||
642 | |||
643 | It is safe to enable this option even if the kernel may not be | ||
644 | booted in HYP mode, may not have support for the | ||
645 | virtualization extensions, or may be booted with a | ||
646 | non-compliant bootloader. | ||
647 | |||
648 | config SWP_EMULATE | 653 | config SWP_EMULATE |
649 | bool "Emulate SWP/SWPB instructions" | 654 | bool "Emulate SWP/SWPB instructions" |
650 | depends on !CPU_USE_DOMAINS && CPU_V7 | 655 | depends on !CPU_USE_DOMAINS && CPU_V7 |
651 | default y if SMP | ||
652 | select HAVE_PROC_CPU if PROC_FS | 656 | select HAVE_PROC_CPU if PROC_FS |
657 | default y if SMP | ||
653 | help | 658 | help |
654 | ARMv6 architecture deprecates use of the SWP/SWPB instructions. | 659 | ARMv6 architecture deprecates use of the SWP/SWPB instructions. |
655 | ARMv7 multiprocessing extensions introduce the ability to disable | 660 | ARMv7 multiprocessing extensions introduce the ability to disable |
@@ -700,7 +705,7 @@ config CPU_HIGH_VECTOR | |||
700 | bool "Select the High exception vector" | 705 | bool "Select the High exception vector" |
701 | help | 706 | help |
702 | Say Y here to select high exception vector(0xFFFF0000~). | 707 | Say Y here to select high exception vector(0xFFFF0000~). |
703 | The exception vector can vary depending on the platform | 708 | The exception vector can be vary depending on the platform |
704 | design in nommu mode. If your platform needs to select | 709 | design in nommu mode. If your platform needs to select |
705 | high exception vector, say Y. | 710 | high exception vector, say Y. |
706 | Otherwise or if you are unsure, say N, and the low exception | 711 | Otherwise or if you are unsure, say N, and the low exception |
@@ -708,7 +713,7 @@ config CPU_HIGH_VECTOR | |||
708 | 713 | ||
709 | config CPU_ICACHE_DISABLE | 714 | config CPU_ICACHE_DISABLE |
710 | bool "Disable I-Cache (I-bit)" | 715 | bool "Disable I-Cache (I-bit)" |
711 | depends on CPU_CP15 && !(CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3) | 716 | depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3) |
712 | help | 717 | help |
713 | Say Y here to disable the processor instruction cache. Unless | 718 | Say Y here to disable the processor instruction cache. Unless |
714 | you have a reason not to or are unsure, say N. | 719 | you have a reason not to or are unsure, say N. |
@@ -811,23 +816,14 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
811 | Say Y here to use the Feroceon L2 cache in writethrough mode. | 816 | Say Y here to use the Feroceon L2 cache in writethrough mode. |
812 | Unless you specifically require this, say N for writeback mode. | 817 | Unless you specifically require this, say N for writeback mode. |
813 | 818 | ||
814 | config MIGHT_HAVE_CACHE_L2X0 | ||
815 | bool | ||
816 | help | ||
817 | This option should be selected by machines which have a L2x0 | ||
818 | or PL310 cache controller, but where its use is optional. | ||
819 | |||
820 | The only effect of this option is to make CACHE_L2X0 and | ||
821 | related options available to the user for configuration. | ||
822 | |||
823 | Boards or SoCs which always require the cache controller | ||
824 | support to be present should select CACHE_L2X0 directly | ||
825 | instead of this option, thus preventing the user from | ||
826 | inadvertently configuring a broken kernel. | ||
827 | |||
828 | config CACHE_L2X0 | 819 | config CACHE_L2X0 |
829 | bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0 | 820 | bool "Enable the L2x0 outer cache controller" |
830 | default MIGHT_HAVE_CACHE_L2X0 | 821 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
822 | REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ | ||
823 | ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ | ||
824 | ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \ | ||
825 | ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX | ||
826 | default y | ||
831 | select OUTER_CACHE | 827 | select OUTER_CACHE |
832 | select OUTER_CACHE_SYNC | 828 | select OUTER_CACHE_SYNC |
833 | help | 829 | help |
@@ -860,7 +856,6 @@ config CACHE_XSC3L2 | |||
860 | 856 | ||
861 | config ARM_L1_CACHE_SHIFT_6 | 857 | config ARM_L1_CACHE_SHIFT_6 |
862 | bool | 858 | bool |
863 | default y if CPU_V7 | ||
864 | help | 859 | help |
865 | Setting ARM L1 cache line size to 64 Bytes. | 860 | Setting ARM L1 cache line size to 64 Bytes. |
866 | 861 | ||
@@ -895,3 +890,18 @@ config ARCH_HAS_BARRIERS | |||
895 | help | 890 | help |
896 | This option allows the use of custom mandatory barriers | 891 | This option allows the use of custom mandatory barriers |
897 | included via the mach/barriers.h file. | 892 | included via the mach/barriers.h file. |
893 | |||
894 | config ARM_SAVE_DEBUG_CONTEXT | ||
895 | bool "Save CPU debug state across suspend/resume" | ||
896 | depends on PM_SLEEP && CPU_V7 | ||
897 | help | ||
898 | This option enables save/restore of the ARM debug registers | ||
899 | across CPU powerdown. | ||
900 | |||
901 | config CPA | ||
902 | bool "Change Page Attributes" | ||
903 | depends on CPU_V7 | ||
904 | help | ||
905 | This option enables Changing Page Attibutes for low memory. | ||
906 | This is needed to avoid conflicting memory mappings for low memory, | ||
907 | One from kernel page table and others from user process page tables. | ||
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb50a9..47e2e3ba190 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | mmap.o pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o vmregion.o pageattr.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
@@ -44,6 +44,7 @@ obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o | |||
44 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 | 44 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 |
45 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a | 45 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a |
46 | 46 | ||
47 | obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o | ||
47 | obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o | 48 | obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o |
48 | obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o | 49 | obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o |
49 | obj-$(CONFIG_CPU_COPY_FEROCEON) += copypage-feroceon.o | 50 | obj-$(CONFIG_CPU_COPY_FEROCEON) += copypage-feroceon.o |
@@ -53,6 +54,7 @@ obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o | |||
53 | obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o | 54 | obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o |
54 | obj-$(CONFIG_CPU_COPY_FA) += copypage-fa.o | 55 | obj-$(CONFIG_CPU_COPY_FA) += copypage-fa.o |
55 | 56 | ||
57 | obj-$(CONFIG_CPU_TLB_V3) += tlb-v3.o | ||
56 | obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o | 58 | obj-$(CONFIG_CPU_TLB_V4WT) += tlb-v4.o |
57 | obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o | 59 | obj-$(CONFIG_CPU_TLB_V4WB) += tlb-v4wb.o |
58 | obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o | 60 | obj-$(CONFIG_CPU_TLB_V4WBI) += tlb-v4wbi.o |
@@ -64,6 +66,8 @@ obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o | |||
64 | AFLAGS_tlb-v6.o :=-Wa,-march=armv6 | 66 | AFLAGS_tlb-v6.o :=-Wa,-march=armv6 |
65 | AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a | 67 | AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a |
66 | 68 | ||
69 | obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o | ||
70 | obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o | ||
67 | obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o | 71 | obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o |
68 | obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o | 72 | obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o |
69 | obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o | 73 | obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o |
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 80741992a9f..ff1f7cc11f8 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -26,23 +26,18 @@ ENTRY(v6_early_abort) | |||
26 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 26 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
27 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 27 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
28 | /* | 28 | /* |
29 | * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR. | 29 | * Faulty SWP instruction on 1136 doesn't set bit 11 in DFSR (erratum 326103). |
30 | * The test below covers all the write situations, including Java bytecodes | ||
30 | */ | 31 | */ |
31 | #ifdef CONFIG_ARM_ERRATA_326103 | 32 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR |
32 | ldr ip, =0x4107b36 | ||
33 | mrc p15, 0, r3, c0, c0, 0 @ get processor id | ||
34 | teq ip, r3, lsr #4 @ r0 ARM1136? | ||
35 | bne do_DataAbort | ||
36 | tst r5, #PSR_J_BIT @ Java? | 33 | tst r5, #PSR_J_BIT @ Java? |
37 | tsteq r5, #PSR_T_BIT @ Thumb? | ||
38 | bne do_DataAbort | 34 | bne do_DataAbort |
39 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR | 35 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
40 | ldr r3, [r4] @ read aborted ARM instruction | 36 | ldreq r3, [r4] @ read aborted ARM instruction |
41 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 37 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
42 | rev r3, r3 | 38 | reveq r3, r3 |
43 | #endif | 39 | #endif |
44 | do_ldrd_abort tmp=ip, insn=r3 | 40 | do_ldrd_abort tmp=ip, insn=r3 |
45 | tst r3, #1 << 20 @ L = 0 -> write | 41 | tst r3, #1 << 20 @ L = 0 -> write |
46 | orreq r1, r1, #1 << 11 @ yes. | 42 | orreq r1, r1, #1 << 11 @ yes. |
47 | #endif | ||
48 | b do_DataAbort | 43 | b do_DataAbort |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b820edaf318..cfbcf8b9559 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -22,8 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | 24 | ||
25 | #include <asm/cp15.h> | 25 | #include <asm/system.h> |
26 | #include <asm/system_info.h> | ||
27 | #include <asm/unaligned.h> | 26 | #include <asm/unaligned.h> |
28 | 27 | ||
29 | #include "fault.h" | 28 | #include "fault.h" |
@@ -87,6 +86,16 @@ core_param(alignment, ai_usermode, int, 0600); | |||
87 | #define UM_FIXUP (1 << 1) | 86 | #define UM_FIXUP (1 << 1) |
88 | #define UM_SIGNAL (1 << 2) | 87 | #define UM_SIGNAL (1 << 2) |
89 | 88 | ||
89 | #ifdef CONFIG_PROC_FS | ||
90 | static const char *usermode_action[] = { | ||
91 | "ignored", | ||
92 | "warn", | ||
93 | "fixup", | ||
94 | "fixup+warn", | ||
95 | "signal", | ||
96 | "signal+warn" | ||
97 | }; | ||
98 | |||
90 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ | 99 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ |
91 | static bool cpu_is_v6_unaligned(void) | 100 | static bool cpu_is_v6_unaligned(void) |
92 | { | 101 | { |
@@ -114,16 +123,6 @@ static int safe_usermode(int new_usermode, bool warn) | |||
114 | return new_usermode; | 123 | return new_usermode; |
115 | } | 124 | } |
116 | 125 | ||
117 | #ifdef CONFIG_PROC_FS | ||
118 | static const char *usermode_action[] = { | ||
119 | "ignored", | ||
120 | "warn", | ||
121 | "fixup", | ||
122 | "fixup+warn", | ||
123 | "signal", | ||
124 | "signal+warn" | ||
125 | }; | ||
126 | |||
127 | static int alignment_proc_show(struct seq_file *m, void *v) | 126 | static int alignment_proc_show(struct seq_file *m, void *v) |
128 | { | 127 | { |
129 | seq_printf(m, "User:\t\t%lu\n", ai_user); | 128 | seq_printf(m, "User:\t\t%lu\n", ai_user); |
@@ -699,6 +698,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, | |||
699 | unsigned long instr = *pinstr; | 698 | unsigned long instr = *pinstr; |
700 | u16 tinst1 = (instr >> 16) & 0xffff; | 699 | u16 tinst1 = (instr >> 16) & 0xffff; |
701 | u16 tinst2 = instr & 0xffff; | 700 | u16 tinst2 = instr & 0xffff; |
701 | poffset->un = 0; | ||
702 | 702 | ||
703 | switch (tinst1 & 0xffe0) { | 703 | switch (tinst1 & 0xffe0) { |
704 | /* A6.3.5 Load/Store multiple */ | 704 | /* A6.3.5 Load/Store multiple */ |
@@ -745,7 +745,7 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, | |||
745 | static int | 745 | static int |
746 | do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | 746 | do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
747 | { | 747 | { |
748 | union offset_union uninitialized_var(offset); | 748 | union offset_union offset; |
749 | unsigned long instr = 0, instrptr; | 749 | unsigned long instr = 0, instrptr; |
750 | int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); | 750 | int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs); |
751 | unsigned int type; | 751 | unsigned int type; |
@@ -853,13 +853,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
853 | break; | 853 | break; |
854 | 854 | ||
855 | case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ | 855 | case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ |
856 | if (thumb2_32b) { | 856 | if (thumb2_32b) |
857 | offset.un = 0; | ||
858 | handler = do_alignment_t32_to_handler(&instr, regs, &offset); | 857 | handler = do_alignment_t32_to_handler(&instr, regs, &offset); |
859 | } else { | 858 | else |
860 | offset.un = 0; | ||
861 | handler = do_alignment_ldmstm; | 859 | handler = do_alignment_ldmstm; |
862 | } | ||
863 | break; | 860 | break; |
864 | 861 | ||
865 | default: | 862 | default: |
@@ -971,7 +968,7 @@ static int __init alignment_init(void) | |||
971 | ai_usermode = safe_usermode(ai_usermode, false); | 968 | ai_usermode = safe_usermode(ai_usermode, false); |
972 | } | 969 | } |
973 | 970 | ||
974 | hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, | 971 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, |
975 | "alignment exception"); | 972 | "alignment exception"); |
976 | 973 | ||
977 | /* | 974 | /* |
diff --git a/arch/arm/mm/cache-aurora-l2.h b/arch/arm/mm/cache-aurora-l2.h deleted file mode 100644 index c8612476983..00000000000 --- a/arch/arm/mm/cache-aurora-l2.h +++ /dev/null | |||
@@ -1,55 +0,0 @@ | |||
1 | /* | ||
2 | * AURORA shared L2 cache controller support | ||
3 | * | ||
4 | * Copyright (C) 2012 Marvell | ||
5 | * | ||
6 | * Yehuda Yitschak <yehuday@marvell.com> | ||
7 | * Gregory CLEMENT <gregory.clement@free-electrons.com> | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public | ||
10 | * License version 2. This program is licensed "as is" without any | ||
11 | * warranty of any kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_ARM_HARDWARE_AURORA_L2_H | ||
15 | #define __ASM_ARM_HARDWARE_AURORA_L2_H | ||
16 | |||
17 | #define AURORA_SYNC_REG 0x700 | ||
18 | #define AURORA_RANGE_BASE_ADDR_REG 0x720 | ||
19 | #define AURORA_FLUSH_PHY_ADDR_REG 0x7f0 | ||
20 | #define AURORA_INVAL_RANGE_REG 0x774 | ||
21 | #define AURORA_CLEAN_RANGE_REG 0x7b4 | ||
22 | #define AURORA_FLUSH_RANGE_REG 0x7f4 | ||
23 | |||
24 | #define AURORA_ACR_REPLACEMENT_OFFSET 27 | ||
25 | #define AURORA_ACR_REPLACEMENT_MASK \ | ||
26 | (0x3 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
27 | #define AURORA_ACR_REPLACEMENT_TYPE_WAYRR \ | ||
28 | (0 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
29 | #define AURORA_ACR_REPLACEMENT_TYPE_LFSR \ | ||
30 | (1 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
31 | #define AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU \ | ||
32 | (3 << AURORA_ACR_REPLACEMENT_OFFSET) | ||
33 | |||
34 | #define AURORA_ACR_FORCE_WRITE_POLICY_OFFSET 0 | ||
35 | #define AURORA_ACR_FORCE_WRITE_POLICY_MASK \ | ||
36 | (0x3 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
37 | #define AURORA_ACR_FORCE_WRITE_POLICY_DIS \ | ||
38 | (0 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
39 | #define AURORA_ACR_FORCE_WRITE_BACK_POLICY \ | ||
40 | (1 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
41 | #define AURORA_ACR_FORCE_WRITE_THRO_POLICY \ | ||
42 | (2 << AURORA_ACR_FORCE_WRITE_POLICY_OFFSET) | ||
43 | |||
44 | #define MAX_RANGE_SIZE 1024 | ||
45 | |||
46 | #define AURORA_WAY_SIZE_SHIFT 2 | ||
47 | |||
48 | #define AURORA_CTRL_FW 0x100 | ||
49 | |||
50 | /* chose a number outside L2X0_CACHE_ID_PART_MASK to be sure to make | ||
51 | * the distinction between a number coming from hardware and a number | ||
52 | * coming from the device tree */ | ||
53 | #define AURORA_CACHE_ID 0x100 | ||
54 | |||
55 | #endif /* __ASM_ARM_HARDWARE_AURORA_L2_H */ | ||
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index e505befe51b..07201637109 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -240,9 +240,6 @@ ENTRY(fa_dma_unmap_area) | |||
240 | mov pc, lr | 240 | mov pc, lr |
241 | ENDPROC(fa_dma_unmap_area) | 241 | ENDPROC(fa_dma_unmap_area) |
242 | 242 | ||
243 | .globl fa_flush_kern_cache_louis | ||
244 | .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all | ||
245 | |||
246 | __INITDATA | 243 | __INITDATA |
247 | 244 | ||
248 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 245 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index dd3d59122cc..e0b0e7a4ec6 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/cp15.h> | ||
19 | #include <plat/cache-feroceon-l2.h> | 18 | #include <plat/cache-feroceon-l2.h> |
20 | 19 | ||
21 | /* | 20 | /* |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c2f37390308..0dddb54ea98 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -16,44 +16,35 @@ | |||
16 | * along with this program; if not, write to the Free Software | 16 | * along with this program; if not, write to the Free Software |
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/err.h> | ||
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
22 | #include <linux/io.h> | 21 | #include <linux/io.h> |
23 | #include <linux/of.h> | ||
24 | #include <linux/of_address.h> | ||
25 | 22 | ||
26 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
27 | #include <asm/hardware/cache-l2x0.h> | 24 | #include <asm/hardware/cache-l2x0.h> |
28 | #include "cache-aurora-l2.h" | ||
29 | 25 | ||
30 | #define CACHE_LINE_SIZE 32 | 26 | #define CACHE_LINE_SIZE 32 |
31 | 27 | ||
32 | static void __iomem *l2x0_base; | 28 | static void __iomem *l2x0_base; |
33 | static DEFINE_RAW_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
34 | static u32 l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
35 | static u32 l2x0_size; | 31 | static uint32_t l2x0_size; |
36 | static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; | 32 | static u32 l2x0_cache_id; |
33 | static unsigned int l2x0_sets; | ||
34 | static unsigned int l2x0_ways; | ||
37 | 35 | ||
38 | /* Aurora don't have the cache ID register available, so we have to | 36 | static inline bool is_pl310_rev(int rev) |
39 | * pass it though the device tree */ | 37 | { |
40 | static u32 cache_id_part_number_from_dt; | 38 | return (l2x0_cache_id & |
41 | 39 | (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) == | |
42 | struct l2x0_regs l2x0_saved_regs; | 40 | (L2X0_CACHE_ID_PART_L310 | rev); |
43 | 41 | } | |
44 | struct l2x0_of_data { | ||
45 | void (*setup)(const struct device_node *, u32 *, u32 *); | ||
46 | void (*save)(void); | ||
47 | struct outer_cache_fns outer_cache; | ||
48 | }; | ||
49 | |||
50 | static bool of_init = false; | ||
51 | 42 | ||
52 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) | 43 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
53 | { | 44 | { |
54 | /* wait for cache operation by line or way to complete */ | 45 | /* wait for cache operation by line or way to complete */ |
55 | while (readl_relaxed(reg) & mask) | 46 | while (readl_relaxed(reg) & mask) |
56 | cpu_relax(); | 47 | ; |
57 | } | 48 | } |
58 | 49 | ||
59 | #ifdef CONFIG_CACHE_PL310 | 50 | #ifdef CONFIG_CACHE_PL310 |
@@ -69,7 +60,12 @@ static inline void cache_sync(void) | |||
69 | { | 60 | { |
70 | void __iomem *base = l2x0_base; | 61 | void __iomem *base = l2x0_base; |
71 | 62 | ||
72 | writel_relaxed(0, base + sync_reg_offset); | 63 | #ifdef CONFIG_ARM_ERRATA_753970 |
64 | /* write to an unmmapped register */ | ||
65 | writel_relaxed(0, base + L2X0_DUMMY_REG); | ||
66 | #else | ||
67 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | ||
68 | #endif | ||
73 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 69 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
74 | } | 70 | } |
75 | 71 | ||
@@ -88,13 +84,10 @@ static inline void l2x0_inv_line(unsigned long addr) | |||
88 | } | 84 | } |
89 | 85 | ||
90 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) | 86 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) |
91 | static inline void debug_writel(unsigned long val) | ||
92 | { | ||
93 | if (outer_cache.set_debug) | ||
94 | outer_cache.set_debug(val); | ||
95 | } | ||
96 | 87 | ||
97 | static void pl310_set_debug(unsigned long val) | 88 | #define debug_writel(val) outer_cache.set_debug(val) |
89 | |||
90 | static void l2x0_set_debug(unsigned long val) | ||
98 | { | 91 | { |
99 | writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); | 92 | writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); |
100 | } | 93 | } |
@@ -104,7 +97,7 @@ static inline void debug_writel(unsigned long val) | |||
104 | { | 97 | { |
105 | } | 98 | } |
106 | 99 | ||
107 | #define pl310_set_debug NULL | 100 | #define l2x0_set_debug NULL |
108 | #endif | 101 | #endif |
109 | 102 | ||
110 | #ifdef CONFIG_PL310_ERRATA_588369 | 103 | #ifdef CONFIG_PL310_ERRATA_588369 |
@@ -132,10 +125,27 @@ static void l2x0_cache_sync(void) | |||
132 | { | 125 | { |
133 | unsigned long flags; | 126 | unsigned long flags; |
134 | 127 | ||
135 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 128 | spin_lock_irqsave(&l2x0_lock, flags); |
136 | cache_sync(); | 129 | cache_sync(); |
137 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 130 | spin_unlock_irqrestore(&l2x0_lock, flags); |
131 | } | ||
132 | |||
133 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
134 | static void l2x0_for_each_set_way(void __iomem *reg) | ||
135 | { | ||
136 | int set; | ||
137 | int way; | ||
138 | unsigned long flags; | ||
139 | |||
140 | for (way = 0; way < l2x0_ways; way++) { | ||
141 | spin_lock_irqsave(&l2x0_lock, flags); | ||
142 | for (set = 0; set < l2x0_sets; set++) | ||
143 | writel_relaxed((way << 28) | (set << 5), reg); | ||
144 | cache_sync(); | ||
145 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
146 | } | ||
138 | } | 147 | } |
148 | #endif | ||
139 | 149 | ||
140 | static void __l2x0_flush_all(void) | 150 | static void __l2x0_flush_all(void) |
141 | { | 151 | { |
@@ -150,22 +160,38 @@ static void l2x0_flush_all(void) | |||
150 | { | 160 | { |
151 | unsigned long flags; | 161 | unsigned long flags; |
152 | 162 | ||
163 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
164 | if (is_pl310_rev(REV_PL310_R2P0)) { | ||
165 | l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX); | ||
166 | return; | ||
167 | } | ||
168 | #endif | ||
169 | |||
153 | /* clean all ways */ | 170 | /* clean all ways */ |
154 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 171 | spin_lock_irqsave(&l2x0_lock, flags); |
155 | __l2x0_flush_all(); | 172 | __l2x0_flush_all(); |
156 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 173 | spin_unlock_irqrestore(&l2x0_lock, flags); |
157 | } | 174 | } |
158 | 175 | ||
159 | static void l2x0_clean_all(void) | 176 | static void l2x0_clean_all(void) |
160 | { | 177 | { |
161 | unsigned long flags; | 178 | unsigned long flags; |
162 | 179 | ||
180 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
181 | if (is_pl310_rev(REV_PL310_R2P0)) { | ||
182 | l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX); | ||
183 | return; | ||
184 | } | ||
185 | #endif | ||
186 | |||
163 | /* clean all ways */ | 187 | /* clean all ways */ |
164 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 188 | spin_lock_irqsave(&l2x0_lock, flags); |
189 | debug_writel(0x03); | ||
165 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | 190 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); |
166 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | 191 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); |
167 | cache_sync(); | 192 | cache_sync(); |
168 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 193 | debug_writel(0x00); |
194 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
169 | } | 195 | } |
170 | 196 | ||
171 | static void l2x0_inv_all(void) | 197 | static void l2x0_inv_all(void) |
@@ -173,13 +199,13 @@ static void l2x0_inv_all(void) | |||
173 | unsigned long flags; | 199 | unsigned long flags; |
174 | 200 | ||
175 | /* invalidate all ways */ | 201 | /* invalidate all ways */ |
176 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 202 | spin_lock_irqsave(&l2x0_lock, flags); |
177 | /* Invalidating when L2 is enabled is a nono */ | 203 | /* Invalidating when L2 is enabled is a nono */ |
178 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); | 204 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); |
179 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 205 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
180 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 206 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
181 | cache_sync(); | 207 | cache_sync(); |
182 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 208 | spin_unlock_irqrestore(&l2x0_lock, flags); |
183 | } | 209 | } |
184 | 210 | ||
185 | static void l2x0_inv_range(unsigned long start, unsigned long end) | 211 | static void l2x0_inv_range(unsigned long start, unsigned long end) |
@@ -187,7 +213,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
187 | void __iomem *base = l2x0_base; | 213 | void __iomem *base = l2x0_base; |
188 | unsigned long flags; | 214 | unsigned long flags; |
189 | 215 | ||
190 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 216 | spin_lock_irqsave(&l2x0_lock, flags); |
191 | if (start & (CACHE_LINE_SIZE - 1)) { | 217 | if (start & (CACHE_LINE_SIZE - 1)) { |
192 | start &= ~(CACHE_LINE_SIZE - 1); | 218 | start &= ~(CACHE_LINE_SIZE - 1); |
193 | debug_writel(0x03); | 219 | debug_writel(0x03); |
@@ -212,13 +238,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) | |||
212 | } | 238 | } |
213 | 239 | ||
214 | if (blk_end < end) { | 240 | if (blk_end < end) { |
215 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 241 | spin_unlock_irqrestore(&l2x0_lock, flags); |
216 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 242 | spin_lock_irqsave(&l2x0_lock, flags); |
217 | } | 243 | } |
218 | } | 244 | } |
219 | cache_wait(base + L2X0_INV_LINE_PA, 1); | 245 | cache_wait(base + L2X0_INV_LINE_PA, 1); |
220 | cache_sync(); | 246 | cache_sync(); |
221 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 247 | spin_unlock_irqrestore(&l2x0_lock, flags); |
222 | } | 248 | } |
223 | 249 | ||
224 | static void l2x0_clean_range(unsigned long start, unsigned long end) | 250 | static void l2x0_clean_range(unsigned long start, unsigned long end) |
@@ -231,7 +257,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
231 | return; | 257 | return; |
232 | } | 258 | } |
233 | 259 | ||
234 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 260 | spin_lock_irqsave(&l2x0_lock, flags); |
235 | start &= ~(CACHE_LINE_SIZE - 1); | 261 | start &= ~(CACHE_LINE_SIZE - 1); |
236 | while (start < end) { | 262 | while (start < end) { |
237 | unsigned long blk_end = start + min(end - start, 4096UL); | 263 | unsigned long blk_end = start + min(end - start, 4096UL); |
@@ -242,13 +268,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
242 | } | 268 | } |
243 | 269 | ||
244 | if (blk_end < end) { | 270 | if (blk_end < end) { |
245 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 271 | spin_unlock_irqrestore(&l2x0_lock, flags); |
246 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 272 | spin_lock_irqsave(&l2x0_lock, flags); |
247 | } | 273 | } |
248 | } | 274 | } |
249 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | 275 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); |
250 | cache_sync(); | 276 | cache_sync(); |
251 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 277 | spin_unlock_irqrestore(&l2x0_lock, flags); |
252 | } | 278 | } |
253 | 279 | ||
254 | static void l2x0_flush_range(unsigned long start, unsigned long end) | 280 | static void l2x0_flush_range(unsigned long start, unsigned long end) |
@@ -261,7 +287,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
261 | return; | 287 | return; |
262 | } | 288 | } |
263 | 289 | ||
264 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 290 | spin_lock_irqsave(&l2x0_lock, flags); |
265 | start &= ~(CACHE_LINE_SIZE - 1); | 291 | start &= ~(CACHE_LINE_SIZE - 1); |
266 | while (start < end) { | 292 | while (start < end) { |
267 | unsigned long blk_end = start + min(end - start, 4096UL); | 293 | unsigned long blk_end = start + min(end - start, 4096UL); |
@@ -274,43 +300,46 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
274 | debug_writel(0x00); | 300 | debug_writel(0x00); |
275 | 301 | ||
276 | if (blk_end < end) { | 302 | if (blk_end < end) { |
277 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 303 | spin_unlock_irqrestore(&l2x0_lock, flags); |
278 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 304 | spin_lock_irqsave(&l2x0_lock, flags); |
279 | } | 305 | } |
280 | } | 306 | } |
281 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | 307 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); |
282 | cache_sync(); | 308 | cache_sync(); |
283 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 309 | spin_unlock_irqrestore(&l2x0_lock, flags); |
310 | } | ||
311 | |||
312 | /* enables l2x0 after l2x0_disable, does not invalidate */ | ||
313 | void l2x0_enable(void) | ||
314 | { | ||
315 | unsigned long flags; | ||
316 | |||
317 | spin_lock_irqsave(&l2x0_lock, flags); | ||
318 | writel_relaxed(1, l2x0_base + L2X0_CTRL); | ||
319 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
284 | } | 320 | } |
285 | 321 | ||
286 | static void l2x0_disable(void) | 322 | static void l2x0_disable(void) |
287 | { | 323 | { |
288 | unsigned long flags; | 324 | unsigned long flags; |
289 | 325 | ||
290 | raw_spin_lock_irqsave(&l2x0_lock, flags); | 326 | spin_lock_irqsave(&l2x0_lock, flags); |
291 | __l2x0_flush_all(); | 327 | __l2x0_flush_all(); |
292 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | 328 | writel_relaxed(0, l2x0_base + L2X0_CTRL); |
293 | dsb(); | 329 | dsb(); |
294 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | 330 | spin_unlock_irqrestore(&l2x0_lock, flags); |
295 | } | 331 | } |
296 | 332 | ||
297 | static void l2x0_unlock(u32 cache_id) | 333 | static void __init l2x0_unlock(__u32 cache_id) |
298 | { | 334 | { |
299 | int lockregs; | 335 | int lockregs; |
300 | int i; | 336 | int i; |
301 | 337 | ||
302 | switch (cache_id) { | 338 | if (cache_id == L2X0_CACHE_ID_PART_L310) |
303 | case L2X0_CACHE_ID_PART_L310: | ||
304 | lockregs = 8; | 339 | lockregs = 8; |
305 | break; | 340 | else |
306 | case AURORA_CACHE_ID: | ||
307 | lockregs = 4; | ||
308 | break; | ||
309 | default: | ||
310 | /* L210 and unknown types */ | 341 | /* L210 and unknown types */ |
311 | lockregs = 1; | 342 | lockregs = 1; |
312 | break; | ||
313 | } | ||
314 | 343 | ||
315 | for (i = 0; i < lockregs; i++) { | 344 | for (i = 0; i < lockregs; i++) { |
316 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + | 345 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + |
@@ -320,78 +349,58 @@ static void l2x0_unlock(u32 cache_id) | |||
320 | } | 349 | } |
321 | } | 350 | } |
322 | 351 | ||
323 | void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | 352 | void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
324 | { | 353 | { |
325 | u32 aux; | 354 | __u32 aux; |
326 | u32 cache_id; | 355 | __u32 way_size = 0; |
327 | u32 way_size = 0; | ||
328 | int ways; | ||
329 | int way_size_shift = L2X0_WAY_SIZE_SHIFT; | ||
330 | const char *type; | 356 | const char *type; |
331 | 357 | ||
332 | l2x0_base = base; | 358 | l2x0_base = base; |
333 | if (cache_id_part_number_from_dt) | 359 | |
334 | cache_id = cache_id_part_number_from_dt; | 360 | l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
335 | else | ||
336 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID) | ||
337 | & L2X0_CACHE_ID_PART_MASK; | ||
338 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 361 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
339 | 362 | ||
340 | aux &= aux_mask; | 363 | aux &= aux_mask; |
341 | aux |= aux_val; | 364 | aux |= aux_val; |
342 | 365 | ||
343 | /* Determine the number of ways */ | 366 | /* Determine the number of ways */ |
344 | switch (cache_id) { | 367 | switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) { |
345 | case L2X0_CACHE_ID_PART_L310: | 368 | case L2X0_CACHE_ID_PART_L310: |
346 | if (aux & (1 << 16)) | 369 | if (aux & (1 << 16)) |
347 | ways = 16; | 370 | l2x0_ways = 16; |
348 | else | 371 | else |
349 | ways = 8; | 372 | l2x0_ways = 8; |
350 | type = "L310"; | 373 | type = "L310"; |
351 | #ifdef CONFIG_PL310_ERRATA_753970 | ||
352 | /* Unmapped register. */ | ||
353 | sync_reg_offset = L2X0_DUMMY_REG; | ||
354 | #endif | ||
355 | if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) | ||
356 | outer_cache.set_debug = pl310_set_debug; | ||
357 | break; | 374 | break; |
358 | case L2X0_CACHE_ID_PART_L210: | 375 | case L2X0_CACHE_ID_PART_L210: |
359 | ways = (aux >> 13) & 0xf; | 376 | l2x0_ways = (aux >> 13) & 0xf; |
360 | type = "L210"; | 377 | type = "L210"; |
361 | break; | 378 | break; |
362 | |||
363 | case AURORA_CACHE_ID: | ||
364 | sync_reg_offset = AURORA_SYNC_REG; | ||
365 | ways = (aux >> 13) & 0xf; | ||
366 | ways = 2 << ((ways + 1) >> 2); | ||
367 | way_size_shift = AURORA_WAY_SIZE_SHIFT; | ||
368 | type = "Aurora"; | ||
369 | break; | ||
370 | default: | 379 | default: |
371 | /* Assume unknown chips have 8 ways */ | 380 | /* Assume unknown chips have 8 ways */ |
372 | ways = 8; | 381 | l2x0_ways = 8; |
373 | type = "L2x0 series"; | 382 | type = "L2x0 series"; |
374 | break; | 383 | break; |
375 | } | 384 | } |
376 | 385 | ||
377 | l2x0_way_mask = (1 << ways) - 1; | 386 | l2x0_way_mask = (1 << l2x0_ways) - 1; |
378 | 387 | ||
379 | /* | 388 | /* |
380 | * L2 cache Size = Way size * Number of ways | 389 | * L2 cache Size = Way size * Number of ways |
381 | */ | 390 | */ |
382 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | 391 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; |
383 | way_size = 1 << (way_size + way_size_shift); | 392 | way_size = SZ_1K << (way_size + 3); |
384 | 393 | l2x0_size = l2x0_ways * way_size; | |
385 | l2x0_size = ways * way_size * SZ_1K; | 394 | l2x0_sets = way_size / CACHE_LINE_SIZE; |
386 | 395 | ||
387 | /* | 396 | /* |
388 | * Check if l2x0 controller is already enabled. | 397 | * Check if l2x0 controller is already enabled. |
389 | * If you are booting from non-secure mode | 398 | * If you are booting from non-secure mode |
390 | * accessing the below registers will fault. | 399 | * accessing the below registers will fault. |
391 | */ | 400 | */ |
392 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | 401 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { |
393 | /* Make sure that I&D is not locked down when starting */ | 402 | /* Make sure that I&D is not locked down when starting */ |
394 | l2x0_unlock(cache_id); | 403 | l2x0_unlock(l2x0_cache_id); |
395 | 404 | ||
396 | /* l2x0 controller is disabled */ | 405 | /* l2x0 controller is disabled */ |
397 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); | 406 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); |
@@ -399,425 +408,19 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) | |||
399 | l2x0_inv_all(); | 408 | l2x0_inv_all(); |
400 | 409 | ||
401 | /* enable L2X0 */ | 410 | /* enable L2X0 */ |
402 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); | 411 | writel_relaxed(1, l2x0_base + L2X0_CTRL); |
403 | } | ||
404 | |||
405 | /* Re-read it in case some bits are reserved. */ | ||
406 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | ||
407 | |||
408 | /* Save the value for resuming. */ | ||
409 | l2x0_saved_regs.aux_ctrl = aux; | ||
410 | |||
411 | if (!of_init) { | ||
412 | outer_cache.inv_range = l2x0_inv_range; | ||
413 | outer_cache.clean_range = l2x0_clean_range; | ||
414 | outer_cache.flush_range = l2x0_flush_range; | ||
415 | outer_cache.sync = l2x0_cache_sync; | ||
416 | outer_cache.flush_all = l2x0_flush_all; | ||
417 | outer_cache.inv_all = l2x0_inv_all; | ||
418 | outer_cache.disable = l2x0_disable; | ||
419 | } | ||
420 | |||
421 | printk(KERN_INFO "%s cache controller enabled\n", type); | ||
422 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", | ||
423 | ways, cache_id, aux, l2x0_size); | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_OF | ||
427 | static int l2_wt_override; | ||
428 | |||
429 | /* | ||
430 | * Note that the end addresses passed to Linux primitives are | ||
431 | * noninclusive, while the hardware cache range operations use | ||
432 | * inclusive start and end addresses. | ||
433 | */ | ||
434 | static unsigned long calc_range_end(unsigned long start, unsigned long end) | ||
435 | { | ||
436 | /* | ||
437 | * Limit the number of cache lines processed at once, | ||
438 | * since cache range operations stall the CPU pipeline | ||
439 | * until completion. | ||
440 | */ | ||
441 | if (end > start + MAX_RANGE_SIZE) | ||
442 | end = start + MAX_RANGE_SIZE; | ||
443 | |||
444 | /* | ||
445 | * Cache range operations can't straddle a page boundary. | ||
446 | */ | ||
447 | if (end > PAGE_ALIGN(start+1)) | ||
448 | end = PAGE_ALIGN(start+1); | ||
449 | |||
450 | return end; | ||
451 | } | ||
452 | |||
453 | /* | ||
454 | * Make sure 'start' and 'end' reference the same page, as L2 is PIPT | ||
455 | * and range operations only do a TLB lookup on the start address. | ||
456 | */ | ||
457 | static void aurora_pa_range(unsigned long start, unsigned long end, | ||
458 | unsigned long offset) | ||
459 | { | ||
460 | unsigned long flags; | ||
461 | |||
462 | raw_spin_lock_irqsave(&l2x0_lock, flags); | ||
463 | writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG); | ||
464 | writel_relaxed(end, l2x0_base + offset); | ||
465 | raw_spin_unlock_irqrestore(&l2x0_lock, flags); | ||
466 | |||
467 | cache_sync(); | ||
468 | } | ||
469 | |||
470 | static void aurora_inv_range(unsigned long start, unsigned long end) | ||
471 | { | ||
472 | /* | ||
473 | * round start and end adresses up to cache line size | ||
474 | */ | ||
475 | start &= ~(CACHE_LINE_SIZE - 1); | ||
476 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
477 | |||
478 | /* | ||
479 | * Invalidate all full cache lines between 'start' and 'end'. | ||
480 | */ | ||
481 | while (start < end) { | ||
482 | unsigned long range_end = calc_range_end(start, end); | ||
483 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
484 | AURORA_INVAL_RANGE_REG); | ||
485 | start = range_end; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | static void aurora_clean_range(unsigned long start, unsigned long end) | ||
490 | { | ||
491 | /* | ||
492 | * If L2 is forced to WT, the L2 will always be clean and we | ||
493 | * don't need to do anything here. | ||
494 | */ | ||
495 | if (!l2_wt_override) { | ||
496 | start &= ~(CACHE_LINE_SIZE - 1); | ||
497 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
498 | while (start != end) { | ||
499 | unsigned long range_end = calc_range_end(start, end); | ||
500 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
501 | AURORA_CLEAN_RANGE_REG); | ||
502 | start = range_end; | ||
503 | } | ||
504 | } | ||
505 | } | ||
506 | |||
507 | static void aurora_flush_range(unsigned long start, unsigned long end) | ||
508 | { | ||
509 | start &= ~(CACHE_LINE_SIZE - 1); | ||
510 | end = ALIGN(end, CACHE_LINE_SIZE); | ||
511 | while (start != end) { | ||
512 | unsigned long range_end = calc_range_end(start, end); | ||
513 | /* | ||
514 | * If L2 is forced to WT, the L2 will always be clean and we | ||
515 | * just need to invalidate. | ||
516 | */ | ||
517 | if (l2_wt_override) | ||
518 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
519 | AURORA_INVAL_RANGE_REG); | ||
520 | else | ||
521 | aurora_pa_range(start, range_end - CACHE_LINE_SIZE, | ||
522 | AURORA_FLUSH_RANGE_REG); | ||
523 | start = range_end; | ||
524 | } | ||
525 | } | ||
526 | |||
527 | static void __init l2x0_of_setup(const struct device_node *np, | ||
528 | u32 *aux_val, u32 *aux_mask) | ||
529 | { | ||
530 | u32 data[2] = { 0, 0 }; | ||
531 | u32 tag = 0; | ||
532 | u32 dirty = 0; | ||
533 | u32 val = 0, mask = 0; | ||
534 | |||
535 | of_property_read_u32(np, "arm,tag-latency", &tag); | ||
536 | if (tag) { | ||
537 | mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; | ||
538 | val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; | ||
539 | } | ||
540 | |||
541 | of_property_read_u32_array(np, "arm,data-latency", | ||
542 | data, ARRAY_SIZE(data)); | ||
543 | if (data[0] && data[1]) { | ||
544 | mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | | ||
545 | L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; | ||
546 | val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | | ||
547 | ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); | ||
548 | } | ||
549 | |||
550 | of_property_read_u32(np, "arm,dirty-latency", &dirty); | ||
551 | if (dirty) { | ||
552 | mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; | ||
553 | val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; | ||
554 | } | ||
555 | |||
556 | *aux_val &= ~mask; | ||
557 | *aux_val |= val; | ||
558 | *aux_mask &= ~mask; | ||
559 | } | ||
560 | |||
561 | static void __init pl310_of_setup(const struct device_node *np, | ||
562 | u32 *aux_val, u32 *aux_mask) | ||
563 | { | ||
564 | u32 data[3] = { 0, 0, 0 }; | ||
565 | u32 tag[3] = { 0, 0, 0 }; | ||
566 | u32 filter[2] = { 0, 0 }; | ||
567 | |||
568 | of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); | ||
569 | if (tag[0] && tag[1] && tag[2]) | ||
570 | writel_relaxed( | ||
571 | ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | ||
572 | ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | ||
573 | ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | ||
574 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
575 | |||
576 | of_property_read_u32_array(np, "arm,data-latency", | ||
577 | data, ARRAY_SIZE(data)); | ||
578 | if (data[0] && data[1] && data[2]) | ||
579 | writel_relaxed( | ||
580 | ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | | ||
581 | ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | | ||
582 | ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), | ||
583 | l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
584 | |||
585 | of_property_read_u32_array(np, "arm,filter-ranges", | ||
586 | filter, ARRAY_SIZE(filter)); | ||
587 | if (filter[1]) { | ||
588 | writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), | ||
589 | l2x0_base + L2X0_ADDR_FILTER_END); | ||
590 | writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, | ||
591 | l2x0_base + L2X0_ADDR_FILTER_START); | ||
592 | } | ||
593 | } | ||
594 | |||
595 | static void __init pl310_save(void) | ||
596 | { | ||
597 | u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & | ||
598 | L2X0_CACHE_ID_RTL_MASK; | ||
599 | |||
600 | l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + | ||
601 | L2X0_TAG_LATENCY_CTRL); | ||
602 | l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + | ||
603 | L2X0_DATA_LATENCY_CTRL); | ||
604 | l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + | ||
605 | L2X0_ADDR_FILTER_END); | ||
606 | l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + | ||
607 | L2X0_ADDR_FILTER_START); | ||
608 | |||
609 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { | ||
610 | /* | ||
611 | * From r2p0, there is Prefetch offset/control register | ||
612 | */ | ||
613 | l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + | ||
614 | L2X0_PREFETCH_CTRL); | ||
615 | /* | ||
616 | * From r3p0, there is Power control register | ||
617 | */ | ||
618 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) | ||
619 | l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + | ||
620 | L2X0_POWER_CTRL); | ||
621 | } | ||
622 | } | ||
623 | |||
624 | static void aurora_save(void) | ||
625 | { | ||
626 | l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); | ||
627 | l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | ||
628 | } | ||
629 | |||
630 | static void l2x0_resume(void) | ||
631 | { | ||
632 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
633 | /* restore aux ctrl and enable l2 */ | ||
634 | l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); | ||
635 | |||
636 | writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + | ||
637 | L2X0_AUX_CTRL); | ||
638 | |||
639 | l2x0_inv_all(); | ||
640 | |||
641 | writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); | ||
642 | } | ||
643 | } | ||
644 | |||
645 | static void pl310_resume(void) | ||
646 | { | ||
647 | u32 l2x0_revision; | ||
648 | |||
649 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
650 | /* restore pl310 setup */ | ||
651 | writel_relaxed(l2x0_saved_regs.tag_latency, | ||
652 | l2x0_base + L2X0_TAG_LATENCY_CTRL); | ||
653 | writel_relaxed(l2x0_saved_regs.data_latency, | ||
654 | l2x0_base + L2X0_DATA_LATENCY_CTRL); | ||
655 | writel_relaxed(l2x0_saved_regs.filter_end, | ||
656 | l2x0_base + L2X0_ADDR_FILTER_END); | ||
657 | writel_relaxed(l2x0_saved_regs.filter_start, | ||
658 | l2x0_base + L2X0_ADDR_FILTER_START); | ||
659 | |||
660 | l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & | ||
661 | L2X0_CACHE_ID_RTL_MASK; | ||
662 | |||
663 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { | ||
664 | writel_relaxed(l2x0_saved_regs.prefetch_ctrl, | ||
665 | l2x0_base + L2X0_PREFETCH_CTRL); | ||
666 | if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) | ||
667 | writel_relaxed(l2x0_saved_regs.pwr_ctrl, | ||
668 | l2x0_base + L2X0_POWER_CTRL); | ||
669 | } | ||
670 | } | ||
671 | |||
672 | l2x0_resume(); | ||
673 | } | ||
674 | |||
675 | static void aurora_resume(void) | ||
676 | { | ||
677 | if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
678 | writel_relaxed(l2x0_saved_regs.aux_ctrl, | ||
679 | l2x0_base + L2X0_AUX_CTRL); | ||
680 | writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); | ||
681 | } | ||
682 | } | ||
683 | |||
684 | static void __init aurora_broadcast_l2_commands(void) | ||
685 | { | ||
686 | __u32 u; | ||
687 | /* Enable Broadcasting of cache commands to L2*/ | ||
688 | __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); | ||
689 | u |= AURORA_CTRL_FW; /* Set the FW bit */ | ||
690 | __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); | ||
691 | isb(); | ||
692 | } | ||
693 | |||
694 | static void __init aurora_of_setup(const struct device_node *np, | ||
695 | u32 *aux_val, u32 *aux_mask) | ||
696 | { | ||
697 | u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; | ||
698 | u32 mask = AURORA_ACR_REPLACEMENT_MASK; | ||
699 | |||
700 | of_property_read_u32(np, "cache-id-part", | ||
701 | &cache_id_part_number_from_dt); | ||
702 | |||
703 | /* Determine and save the write policy */ | ||
704 | l2_wt_override = of_property_read_bool(np, "wt-override"); | ||
705 | |||
706 | if (l2_wt_override) { | ||
707 | val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; | ||
708 | mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; | ||
709 | } | ||
710 | |||
711 | *aux_val &= ~mask; | ||
712 | *aux_val |= val; | ||
713 | *aux_mask &= ~mask; | ||
714 | } | ||
715 | |||
716 | static const struct l2x0_of_data pl310_data = { | ||
717 | .setup = pl310_of_setup, | ||
718 | .save = pl310_save, | ||
719 | .outer_cache = { | ||
720 | .resume = pl310_resume, | ||
721 | .inv_range = l2x0_inv_range, | ||
722 | .clean_range = l2x0_clean_range, | ||
723 | .flush_range = l2x0_flush_range, | ||
724 | .sync = l2x0_cache_sync, | ||
725 | .flush_all = l2x0_flush_all, | ||
726 | .inv_all = l2x0_inv_all, | ||
727 | .disable = l2x0_disable, | ||
728 | .set_debug = pl310_set_debug, | ||
729 | }, | ||
730 | }; | ||
731 | |||
732 | static const struct l2x0_of_data l2x0_data = { | ||
733 | .setup = l2x0_of_setup, | ||
734 | .save = NULL, | ||
735 | .outer_cache = { | ||
736 | .resume = l2x0_resume, | ||
737 | .inv_range = l2x0_inv_range, | ||
738 | .clean_range = l2x0_clean_range, | ||
739 | .flush_range = l2x0_flush_range, | ||
740 | .sync = l2x0_cache_sync, | ||
741 | .flush_all = l2x0_flush_all, | ||
742 | .inv_all = l2x0_inv_all, | ||
743 | .disable = l2x0_disable, | ||
744 | }, | ||
745 | }; | ||
746 | |||
747 | static const struct l2x0_of_data aurora_with_outer_data = { | ||
748 | .setup = aurora_of_setup, | ||
749 | .save = aurora_save, | ||
750 | .outer_cache = { | ||
751 | .resume = aurora_resume, | ||
752 | .inv_range = aurora_inv_range, | ||
753 | .clean_range = aurora_clean_range, | ||
754 | .flush_range = aurora_flush_range, | ||
755 | .sync = l2x0_cache_sync, | ||
756 | .flush_all = l2x0_flush_all, | ||
757 | .inv_all = l2x0_inv_all, | ||
758 | .disable = l2x0_disable, | ||
759 | }, | ||
760 | }; | ||
761 | |||
762 | static const struct l2x0_of_data aurora_no_outer_data = { | ||
763 | .setup = aurora_of_setup, | ||
764 | .save = aurora_save, | ||
765 | .outer_cache = { | ||
766 | .resume = aurora_resume, | ||
767 | }, | ||
768 | }; | ||
769 | |||
770 | static const struct of_device_id l2x0_ids[] __initconst = { | ||
771 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | ||
772 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | ||
773 | { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, | ||
774 | { .compatible = "marvell,aurora-system-cache", | ||
775 | .data = (void *)&aurora_no_outer_data}, | ||
776 | { .compatible = "marvell,aurora-outer-cache", | ||
777 | .data = (void *)&aurora_with_outer_data}, | ||
778 | {} | ||
779 | }; | ||
780 | |||
781 | int __init l2x0_of_init(u32 aux_val, u32 aux_mask) | ||
782 | { | ||
783 | struct device_node *np; | ||
784 | const struct l2x0_of_data *data; | ||
785 | struct resource res; | ||
786 | |||
787 | np = of_find_matching_node(NULL, l2x0_ids); | ||
788 | if (!np) | ||
789 | return -ENODEV; | ||
790 | |||
791 | if (of_address_to_resource(np, 0, &res)) | ||
792 | return -ENODEV; | ||
793 | |||
794 | l2x0_base = ioremap(res.start, resource_size(&res)); | ||
795 | if (!l2x0_base) | ||
796 | return -ENOMEM; | ||
797 | |||
798 | l2x0_saved_regs.phy_base = res.start; | ||
799 | |||
800 | data = of_match_node(l2x0_ids, np)->data; | ||
801 | |||
802 | /* L2 configuration can only be changed if the cache is disabled */ | ||
803 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { | ||
804 | if (data->setup) | ||
805 | data->setup(np, &aux_val, &aux_mask); | ||
806 | |||
807 | /* For aurora cache in no outer mode select the | ||
808 | * correct mode using the coprocessor*/ | ||
809 | if (data == &aurora_no_outer_data) | ||
810 | aurora_broadcast_l2_commands(); | ||
811 | } | 412 | } |
812 | 413 | ||
813 | if (data->save) | 414 | outer_cache.inv_range = l2x0_inv_range; |
814 | data->save(); | 415 | outer_cache.clean_range = l2x0_clean_range; |
815 | 416 | outer_cache.flush_range = l2x0_flush_range; | |
816 | of_init = true; | 417 | outer_cache.sync = l2x0_cache_sync; |
817 | l2x0_init(l2x0_base, aux_val, aux_mask); | 418 | outer_cache.flush_all = l2x0_flush_all; |
419 | outer_cache.inv_all = l2x0_inv_all; | ||
420 | outer_cache.disable = l2x0_disable; | ||
421 | outer_cache.set_debug = l2x0_set_debug; | ||
818 | 422 | ||
819 | memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); | 423 | pr_info_once("%s cache controller enabled\n", type); |
820 | 424 | pr_info_once("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", | |
821 | return 0; | 425 | l2x0_ways, l2x0_cache_id, aux, l2x0_size); |
822 | } | 426 | } |
823 | #endif | ||
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 1be0f4e5e6e..50868651890 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c | |||
@@ -15,11 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/of.h> | ||
19 | #include <linux/of_address.h> | ||
20 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
21 | #include <asm/cp15.h> | ||
22 | #include <asm/cputype.h> | ||
23 | #include <asm/hardware/cache-tauros2.h> | 19 | #include <asm/hardware/cache-tauros2.h> |
24 | 20 | ||
25 | 21 | ||
@@ -111,26 +107,6 @@ static void tauros2_flush_range(unsigned long start, unsigned long end) | |||
111 | 107 | ||
112 | dsb(); | 108 | dsb(); |
113 | } | 109 | } |
114 | |||
115 | static void tauros2_disable(void) | ||
116 | { | ||
117 | __asm__ __volatile__ ( | ||
118 | "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t" | ||
119 | "mrc p15, 0, %0, c1, c0, 0\n\t" | ||
120 | "bic %0, %0, #(1 << 26)\n\t" | ||
121 | "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t" | ||
122 | : : "r" (0x0)); | ||
123 | } | ||
124 | |||
125 | static void tauros2_resume(void) | ||
126 | { | ||
127 | __asm__ __volatile__ ( | ||
128 | "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t" | ||
129 | "mrc p15, 0, %0, c1, c0, 0\n\t" | ||
130 | "orr %0, %0, #(1 << 26)\n\t" | ||
131 | "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t" | ||
132 | : : "r" (0x0)); | ||
133 | } | ||
134 | #endif | 110 | #endif |
135 | 111 | ||
136 | static inline u32 __init read_extra_features(void) | 112 | static inline u32 __init read_extra_features(void) |
@@ -147,8 +123,25 @@ static inline void __init write_extra_features(u32 u) | |||
147 | __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); | 123 | __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); |
148 | } | 124 | } |
149 | 125 | ||
126 | static void __init disable_l2_prefetch(void) | ||
127 | { | ||
128 | u32 u; | ||
129 | |||
130 | /* | ||
131 | * Read the CPU Extra Features register and verify that the | ||
132 | * Disable L2 Prefetch bit is set. | ||
133 | */ | ||
134 | u = read_extra_features(); | ||
135 | if (!(u & 0x01000000)) { | ||
136 | printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n"); | ||
137 | write_extra_features(u | 0x01000000); | ||
138 | } | ||
139 | } | ||
140 | |||
150 | static inline int __init cpuid_scheme(void) | 141 | static inline int __init cpuid_scheme(void) |
151 | { | 142 | { |
143 | extern int processor_id; | ||
144 | |||
152 | return !!((processor_id & 0x000f0000) == 0x000f0000); | 145 | return !!((processor_id & 0x000f0000) == 0x000f0000); |
153 | } | 146 | } |
154 | 147 | ||
@@ -175,36 +168,12 @@ static inline void __init write_actlr(u32 actlr) | |||
175 | __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); | 168 | __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); |
176 | } | 169 | } |
177 | 170 | ||
178 | static void enable_extra_feature(unsigned int features) | 171 | void __init tauros2_init(void) |
179 | { | 172 | { |
180 | u32 u; | 173 | extern int processor_id; |
181 | 174 | char *mode; | |
182 | u = read_extra_features(); | ||
183 | |||
184 | if (features & CACHE_TAUROS2_PREFETCH_ON) | ||
185 | u &= ~0x01000000; | ||
186 | else | ||
187 | u |= 0x01000000; | ||
188 | printk(KERN_INFO "Tauros2: %s L2 prefetch.\n", | ||
189 | (features & CACHE_TAUROS2_PREFETCH_ON) | ||
190 | ? "Enabling" : "Disabling"); | ||
191 | |||
192 | if (features & CACHE_TAUROS2_LINEFILL_BURST8) | ||
193 | u |= 0x00100000; | ||
194 | else | ||
195 | u &= ~0x00100000; | ||
196 | printk(KERN_INFO "Tauros2: %s line fill burt8.\n", | ||
197 | (features & CACHE_TAUROS2_LINEFILL_BURST8) | ||
198 | ? "Enabling" : "Disabling"); | ||
199 | |||
200 | write_extra_features(u); | ||
201 | } | ||
202 | |||
203 | static void __init tauros2_internal_init(unsigned int features) | ||
204 | { | ||
205 | char *mode = NULL; | ||
206 | 175 | ||
207 | enable_extra_feature(features); | 176 | disable_l2_prefetch(); |
208 | 177 | ||
209 | #ifdef CONFIG_CPU_32v5 | 178 | #ifdef CONFIG_CPU_32v5 |
210 | if ((processor_id & 0xff0f0000) == 0x56050000) { | 179 | if ((processor_id & 0xff0f0000) == 0x56050000) { |
@@ -224,8 +193,6 @@ static void __init tauros2_internal_init(unsigned int features) | |||
224 | outer_cache.inv_range = tauros2_inv_range; | 193 | outer_cache.inv_range = tauros2_inv_range; |
225 | outer_cache.clean_range = tauros2_clean_range; | 194 | outer_cache.clean_range = tauros2_clean_range; |
226 | outer_cache.flush_range = tauros2_flush_range; | 195 | outer_cache.flush_range = tauros2_flush_range; |
227 | outer_cache.disable = tauros2_disable; | ||
228 | outer_cache.resume = tauros2_resume; | ||
229 | } | 196 | } |
230 | #endif | 197 | #endif |
231 | 198 | ||
@@ -251,8 +218,6 @@ static void __init tauros2_internal_init(unsigned int features) | |||
251 | outer_cache.inv_range = tauros2_inv_range; | 218 | outer_cache.inv_range = tauros2_inv_range; |
252 | outer_cache.clean_range = tauros2_clean_range; | 219 | outer_cache.clean_range = tauros2_clean_range; |
253 | outer_cache.flush_range = tauros2_flush_range; | 220 | outer_cache.flush_range = tauros2_flush_range; |
254 | outer_cache.disable = tauros2_disable; | ||
255 | outer_cache.resume = tauros2_resume; | ||
256 | } | 221 | } |
257 | #endif | 222 | #endif |
258 | 223 | ||
@@ -296,34 +261,3 @@ static void __init tauros2_internal_init(unsigned int features) | |||
296 | printk(KERN_INFO "Tauros2: L2 cache support initialised " | 261 | printk(KERN_INFO "Tauros2: L2 cache support initialised " |
297 | "in %s mode.\n", mode); | 262 | "in %s mode.\n", mode); |
298 | } | 263 | } |
299 | |||
300 | #ifdef CONFIG_OF | ||
301 | static const struct of_device_id tauros2_ids[] __initconst = { | ||
302 | { .compatible = "marvell,tauros2-cache"}, | ||
303 | {} | ||
304 | }; | ||
305 | #endif | ||
306 | |||
307 | void __init tauros2_init(unsigned int features) | ||
308 | { | ||
309 | #ifdef CONFIG_OF | ||
310 | struct device_node *node; | ||
311 | int ret; | ||
312 | unsigned int f; | ||
313 | |||
314 | node = of_find_matching_node(NULL, tauros2_ids); | ||
315 | if (!node) { | ||
316 | pr_info("Not found marvell,tauros2-cache, disable it\n"); | ||
317 | return; | ||
318 | } | ||
319 | |||
320 | ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f); | ||
321 | if (ret) { | ||
322 | pr_info("Not found marvell,tauros-cache-features property, " | ||
323 | "disable extra features\n"); | ||
324 | features = 0; | ||
325 | } else | ||
326 | features = f; | ||
327 | #endif | ||
328 | tauros2_internal_init(features); | ||
329 | } | ||
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 8a3fadece8d..c2301f22610 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -78,7 +78,6 @@ ENTRY(v3_coherent_kern_range) | |||
78 | * - end - virtual end address | 78 | * - end - virtual end address |
79 | */ | 79 | */ |
80 | ENTRY(v3_coherent_user_range) | 80 | ENTRY(v3_coherent_user_range) |
81 | mov r0, #0 | ||
82 | mov pc, lr | 81 | mov pc, lr |
83 | 82 | ||
84 | /* | 83 | /* |
@@ -128,9 +127,6 @@ ENTRY(v3_dma_map_area) | |||
128 | ENDPROC(v3_dma_unmap_area) | 127 | ENDPROC(v3_dma_unmap_area) |
129 | ENDPROC(v3_dma_map_area) | 128 | ENDPROC(v3_dma_map_area) |
130 | 129 | ||
131 | .globl v3_flush_kern_cache_louis | ||
132 | .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all | ||
133 | |||
134 | __INITDATA | 130 | __INITDATA |
135 | 131 | ||
136 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 132 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 43e5d77be67..fd9bb7addc8 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -88,7 +88,6 @@ ENTRY(v4_coherent_kern_range) | |||
88 | * - end - virtual end address | 88 | * - end - virtual end address |
89 | */ | 89 | */ |
90 | ENTRY(v4_coherent_user_range) | 90 | ENTRY(v4_coherent_user_range) |
91 | mov r0, #0 | ||
92 | mov pc, lr | 91 | mov pc, lr |
93 | 92 | ||
94 | /* | 93 | /* |
@@ -140,9 +139,6 @@ ENTRY(v4_dma_map_area) | |||
140 | ENDPROC(v4_dma_unmap_area) | 139 | ENDPROC(v4_dma_unmap_area) |
141 | ENDPROC(v4_dma_map_area) | 140 | ENDPROC(v4_dma_map_area) |
142 | 141 | ||
143 | .globl v4_flush_kern_cache_louis | ||
144 | .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all | ||
145 | |||
146 | __INITDATA | 142 | __INITDATA |
147 | 143 | ||
148 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 144 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index cd494532140..4f2c14151cc 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -167,9 +167,9 @@ ENTRY(v4wb_coherent_user_range) | |||
167 | add r0, r0, #CACHE_DLINESIZE | 167 | add r0, r0, #CACHE_DLINESIZE |
168 | cmp r0, r1 | 168 | cmp r0, r1 |
169 | blo 1b | 169 | blo 1b |
170 | mov r0, #0 | 170 | mov ip, #0 |
171 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | 171 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
172 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 172 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
173 | mov pc, lr | 173 | mov pc, lr |
174 | 174 | ||
175 | 175 | ||
@@ -251,9 +251,6 @@ ENTRY(v4wb_dma_unmap_area) | |||
251 | mov pc, lr | 251 | mov pc, lr |
252 | ENDPROC(v4wb_dma_unmap_area) | 252 | ENDPROC(v4wb_dma_unmap_area) |
253 | 253 | ||
254 | .globl v4wb_flush_kern_cache_louis | ||
255 | .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all | ||
256 | |||
257 | __INITDATA | 254 | __INITDATA |
258 | 255 | ||
259 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 256 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 11e5e5838bc..4d7b467631c 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -125,7 +125,6 @@ ENTRY(v4wt_coherent_user_range) | |||
125 | add r0, r0, #CACHE_DLINESIZE | 125 | add r0, r0, #CACHE_DLINESIZE |
126 | cmp r0, r1 | 126 | cmp r0, r1 |
127 | blo 1b | 127 | blo 1b |
128 | mov r0, #0 | ||
129 | mov pc, lr | 128 | mov pc, lr |
130 | 129 | ||
131 | /* | 130 | /* |
@@ -196,9 +195,6 @@ ENTRY(v4wt_dma_map_area) | |||
196 | ENDPROC(v4wt_dma_unmap_area) | 195 | ENDPROC(v4wt_dma_unmap_area) |
197 | ENDPROC(v4wt_dma_map_area) | 196 | ENDPROC(v4wt_dma_map_area) |
198 | 197 | ||
199 | .globl v4wt_flush_kern_cache_louis | ||
200 | .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all | ||
201 | |||
202 | __INITDATA | 198 | __INITDATA |
203 | 199 | ||
204 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 200 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index d8fd4d4bd3d..2edb6f67f69 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/errno.h> | ||
16 | #include <asm/unwind.h> | 15 | #include <asm/unwind.h> |
17 | 16 | ||
18 | #include "proc-macros.S" | 17 | #include "proc-macros.S" |
@@ -136,6 +135,7 @@ ENTRY(v6_coherent_user_range) | |||
136 | 1: | 135 | 1: |
137 | USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line | 136 | USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line |
138 | add r0, r0, #CACHE_LINE_SIZE | 137 | add r0, r0, #CACHE_LINE_SIZE |
138 | 2: | ||
139 | cmp r0, r1 | 139 | cmp r0, r1 |
140 | blo 1b | 140 | blo 1b |
141 | #endif | 141 | #endif |
@@ -154,11 +154,13 @@ ENTRY(v6_coherent_user_range) | |||
154 | 154 | ||
155 | /* | 155 | /* |
156 | * Fault handling for the cache operation above. If the virtual address in r0 | 156 | * Fault handling for the cache operation above. If the virtual address in r0 |
157 | * isn't mapped, fail with -EFAULT. | 157 | * isn't mapped, just try the next page. |
158 | */ | 158 | */ |
159 | 9001: | 159 | 9001: |
160 | mov r0, #-EFAULT | 160 | mov r0, r0, lsr #12 |
161 | mov pc, lr | 161 | mov r0, r0, lsl #12 |
162 | add r0, r0, #4096 | ||
163 | b 2b | ||
162 | UNWIND(.fnend ) | 164 | UNWIND(.fnend ) |
163 | ENDPROC(v6_coherent_user_range) | 165 | ENDPROC(v6_coherent_user_range) |
164 | ENDPROC(v6_coherent_kern_range) | 166 | ENDPROC(v6_coherent_kern_range) |
@@ -270,6 +272,11 @@ v6_dma_clean_range: | |||
270 | * - end - virtual end address of region | 272 | * - end - virtual end address of region |
271 | */ | 273 | */ |
272 | ENTRY(v6_dma_flush_range) | 274 | ENTRY(v6_dma_flush_range) |
275 | #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
276 | sub r2, r1, r0 | ||
277 | cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
278 | bhi v6_dma_flush_dcache_all | ||
279 | #endif | ||
273 | #ifdef CONFIG_DMA_CACHE_RWFO | 280 | #ifdef CONFIG_DMA_CACHE_RWFO |
274 | ldrb r2, [r0] @ read for ownership | 281 | ldrb r2, [r0] @ read for ownership |
275 | strb r2, [r0] @ write for ownership | 282 | strb r2, [r0] @ write for ownership |
@@ -292,6 +299,18 @@ ENTRY(v6_dma_flush_range) | |||
292 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 299 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
293 | mov pc, lr | 300 | mov pc, lr |
294 | 301 | ||
302 | #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
303 | v6_dma_flush_dcache_all: | ||
304 | mov r0, #0 | ||
305 | #ifdef HARVARD_CACHE | ||
306 | mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate | ||
307 | #else | ||
308 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate | ||
309 | #endif | ||
310 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | ||
311 | mov pc, lr | ||
312 | #endif | ||
313 | |||
295 | /* | 314 | /* |
296 | * dma_map_area(start, size, dir) | 315 | * dma_map_area(start, size, dir) |
297 | * - start - kernel virtual start address | 316 | * - start - kernel virtual start address |
@@ -326,9 +345,6 @@ ENTRY(v6_dma_unmap_area) | |||
326 | mov pc, lr | 345 | mov pc, lr |
327 | ENDPROC(v6_dma_unmap_area) | 346 | ENDPROC(v6_dma_unmap_area) |
328 | 347 | ||
329 | .globl v6_flush_kern_cache_louis | ||
330 | .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all | ||
331 | |||
332 | __INITDATA | 348 | __INITDATA |
333 | 349 | ||
334 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 350 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 7539ec27506..ea33896449b 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <asm/assembler.h> | 15 | #include <asm/assembler.h> |
16 | #include <asm/errno.h> | ||
17 | #include <asm/unwind.h> | 16 | #include <asm/unwind.h> |
18 | 17 | ||
19 | #include "proc-macros.S" | 18 | #include "proc-macros.S" |
@@ -33,48 +32,29 @@ ENTRY(v7_flush_icache_all) | |||
33 | mov pc, lr | 32 | mov pc, lr |
34 | ENDPROC(v7_flush_icache_all) | 33 | ENDPROC(v7_flush_icache_all) |
35 | 34 | ||
36 | /* | ||
37 | * v7_flush_dcache_louis() | ||
38 | * | ||
39 | * Flush the D-cache up to the Level of Unification Inner Shareable | ||
40 | * | ||
41 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) | ||
42 | */ | ||
43 | |||
44 | ENTRY(v7_flush_dcache_louis) | ||
45 | dmb @ ensure ordering with previous memory accesses | ||
46 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr | ||
47 | ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr | ||
48 | ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr | ||
49 | ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 | ||
50 | ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 | ||
51 | moveq pc, lr @ return if level == 0 | ||
52 | mov r10, #0 @ r10 (starting level) = 0 | ||
53 | b flush_levels @ start flushing cache levels | ||
54 | ENDPROC(v7_flush_dcache_louis) | ||
55 | |||
56 | /* | 35 | /* |
57 | * v7_flush_dcache_all() | 36 | * v7_op_dcache_all op |
58 | * | 37 | * |
59 | * Flush the whole D-cache. | 38 | * op=c14, Flush the whole D-cache. |
39 | * op=c10, Clean the whole D-cache. | ||
60 | * | 40 | * |
61 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) | 41 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) |
62 | * | 42 | * |
63 | * - mm - mm_struct describing address space | 43 | * - mm - mm_struct describing address space |
64 | */ | 44 | */ |
65 | ENTRY(v7_flush_dcache_all) | 45 | .macro v7_op_dcache_all op @ op=c10 clean, op=c14 flush |
66 | dmb @ ensure ordering with previous memory accesses | 46 | dmb @ ensure ordering with previous memory accesses |
67 | mrc p15, 1, r0, c0, c0, 1 @ read clidr | 47 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
68 | ands r3, r0, #0x7000000 @ extract loc from clidr | 48 | ands r3, r0, #0x7000000 @ extract loc from clidr |
69 | mov r3, r3, lsr #23 @ left align loc bit field | 49 | mov r3, r3, lsr #23 @ left align loc bit field |
70 | beq finished @ if loc is 0, then no need to clean | 50 | beq 1005f @ if loc is 0, then no need to clean |
71 | mov r10, #0 @ start clean at cache level 0 | 51 | mov r10, #0 @ start clean at cache level 0 |
72 | flush_levels: | 52 | 1001: |
73 | add r2, r10, r10, lsr #1 @ work out 3x current cache level | 53 | add r2, r10, r10, lsr #1 @ work out 3x current cache level |
74 | mov r1, r0, lsr r2 @ extract cache type bits from clidr | 54 | mov r1, r0, lsr r2 @ extract cache type bits from clidr |
75 | and r1, r1, #7 @ mask of the bits for current cache only | 55 | and r1, r1, #7 @ mask of the bits for current cache only |
76 | cmp r1, #2 @ see what cache we have at this level | 56 | cmp r1, #2 @ see what cache we have at this level |
77 | blt skip @ skip if no cache, or just i-cache | 57 | blt 1004f @ skip if no cache, or just i-cache |
78 | #ifdef CONFIG_PREEMPT | 58 | #ifdef CONFIG_PREEMPT |
79 | save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic | 59 | save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic |
80 | #endif | 60 | #endif |
@@ -91,32 +71,40 @@ flush_levels: | |||
91 | clz r5, r4 @ find bit position of way size increment | 71 | clz r5, r4 @ find bit position of way size increment |
92 | ldr r7, =0x7fff | 72 | ldr r7, =0x7fff |
93 | ands r7, r7, r1, lsr #13 @ extract max number of the index size | 73 | ands r7, r7, r1, lsr #13 @ extract max number of the index size |
94 | loop1: | 74 | 1002: |
95 | mov r9, r4 @ create working copy of max way size | 75 | mov r9, r4 @ create working copy of max way size |
96 | loop2: | 76 | 1003: |
97 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 | 77 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
98 | THUMB( lsl r6, r9, r5 ) | 78 | THUMB( lsl r6, r9, r5 ) |
99 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 | 79 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 |
100 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 | 80 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 |
101 | THUMB( lsl r6, r7, r2 ) | 81 | THUMB( lsl r6, r7, r2 ) |
102 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 | 82 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 |
103 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way | 83 | mcr p15, 0, r11, c7, \op, 2 @ op=c10/c14, clean/flush by set/way |
104 | subs r9, r9, #1 @ decrement the way | 84 | subs r9, r9, #1 @ decrement the way |
105 | bge loop2 | 85 | bge 1003b |
106 | subs r7, r7, #1 @ decrement the index | 86 | subs r7, r7, #1 @ decrement the index |
107 | bge loop1 | 87 | bge 1002b |
108 | skip: | 88 | 1004: |
109 | add r10, r10, #2 @ increment cache number | 89 | add r10, r10, #2 @ increment cache number |
110 | cmp r3, r10 | 90 | cmp r3, r10 |
111 | bgt flush_levels | 91 | bgt 1001b |
112 | finished: | 92 | 1005: |
113 | mov r10, #0 @ swith back to cache level 0 | 93 | mov r10, #0 @ swith back to cache level 0 |
114 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | 94 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
115 | dsb | 95 | dsb |
116 | isb | 96 | isb |
117 | mov pc, lr | 97 | mov pc, lr |
98 | .endm | ||
99 | |||
100 | ENTRY(v7_flush_dcache_all) | ||
101 | v7_op_dcache_all c14 | ||
118 | ENDPROC(v7_flush_dcache_all) | 102 | ENDPROC(v7_flush_dcache_all) |
119 | 103 | ||
104 | ENTRY(v7_clean_dcache_all) | ||
105 | v7_op_dcache_all c10 | ||
106 | ENDPROC(v7_clean_dcache_all) | ||
107 | |||
120 | /* | 108 | /* |
121 | * v7_flush_cache_all() | 109 | * v7_flush_cache_all() |
122 | * | 110 | * |
@@ -140,23 +128,23 @@ ENTRY(v7_flush_kern_cache_all) | |||
140 | mov pc, lr | 128 | mov pc, lr |
141 | ENDPROC(v7_flush_kern_cache_all) | 129 | ENDPROC(v7_flush_kern_cache_all) |
142 | 130 | ||
143 | /* | 131 | /* |
144 | * v7_flush_kern_cache_louis(void) | 132 | * v7_clean_kern_cache_all() |
145 | * | ||
146 | * Flush the data cache up to Level of Unification Inner Shareable. | ||
147 | * Invalidate the I-cache to the point of unification. | ||
148 | */ | 133 | */ |
149 | ENTRY(v7_flush_kern_cache_louis) | 134 | ENTRY(v7_clean_kern_cache_all) |
150 | ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) | 135 | ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
151 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) | 136 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) |
152 | bl v7_flush_dcache_louis | 137 | bl v7_clean_dcache_all |
153 | mov r0, #0 | 138 | mov r0, #0 |
154 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable | 139 | #ifdef CONFIG_SMP |
155 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | 140 | mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable |
141 | #else | ||
142 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | ||
143 | #endif | ||
156 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) | 144 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
157 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) | 145 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) |
158 | mov pc, lr | 146 | mov pc, lr |
159 | ENDPROC(v7_flush_kern_cache_louis) | 147 | ENDPROC(v7_clean_kern_cache_all) |
160 | 148 | ||
161 | /* | 149 | /* |
162 | * v7_flush_cache_all() | 150 | * v7_flush_cache_all() |
@@ -237,6 +225,7 @@ ENTRY(v7_coherent_user_range) | |||
237 | add r12, r12, r2 | 225 | add r12, r12, r2 |
238 | cmp r12, r1 | 226 | cmp r12, r1 |
239 | blo 2b | 227 | blo 2b |
228 | 3: | ||
240 | mov r0, #0 | 229 | mov r0, #0 |
241 | ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable | 230 | ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable |
242 | ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB | 231 | ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB |
@@ -246,14 +235,13 @@ ENTRY(v7_coherent_user_range) | |||
246 | 235 | ||
247 | /* | 236 | /* |
248 | * Fault handling for the cache operation above. If the virtual address in r0 | 237 | * Fault handling for the cache operation above. If the virtual address in r0 |
249 | * isn't mapped, fail with -EFAULT. | 238 | * isn't mapped, just try the next page. |
250 | */ | 239 | */ |
251 | 9001: | 240 | 9001: |
252 | #ifdef CONFIG_ARM_ERRATA_775420 | 241 | mov r12, r12, lsr #12 |
253 | dsb | 242 | mov r12, r12, lsl #12 |
254 | #endif | 243 | add r12, r12, #4096 |
255 | mov r0, #-EFAULT | 244 | b 3b |
256 | mov pc, lr | ||
257 | UNWIND(.fnend ) | 245 | UNWIND(.fnend ) |
258 | ENDPROC(v7_coherent_kern_range) | 246 | ENDPROC(v7_coherent_kern_range) |
259 | ENDPROC(v7_coherent_user_range) | 247 | ENDPROC(v7_coherent_user_range) |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 6c3edeb66e7..5a32020471e 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <asm/cp15.h> | 21 | #include <asm/system.h> |
22 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | 24 | ||
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb7..b0ee9ba3cfa 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -2,9 +2,6 @@ | |||
2 | * linux/arch/arm/mm/context.c | 2 | * linux/arch/arm/mm/context.c |
3 | * | 3 | * |
4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. | 4 | * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. |
5 | * Copyright (C) 2012 ARM Limited | ||
6 | * | ||
7 | * Author: Will Deacon <will.deacon@arm.com> | ||
8 | * | 5 | * |
9 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 7 | * it under the terms of the GNU General Public License version 2 as |
@@ -17,200 +14,144 @@ | |||
17 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
18 | 15 | ||
19 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
20 | #include <asm/smp_plat.h> | ||
21 | #include <asm/thread_notify.h> | ||
22 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
23 | 18 | ||
19 | static DEFINE_SPINLOCK(cpu_asid_lock); | ||
20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | ||
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
24 | |||
24 | /* | 25 | /* |
25 | * On ARMv6, we have the following structure in the Context ID: | 26 | * We fork()ed a process, and we need a new context for the child |
26 | * | 27 | * to run in. We reserve version 0 for initial tasks so we will |
27 | * 31 7 0 | 28 | * always allocate an ASID. The ASID 0 is reserved for the TTBR |
28 | * +-------------------------+-----------+ | 29 | * register changing sequence. |
29 | * | process ID | ASID | | ||
30 | * +-------------------------+-----------+ | ||
31 | * | context ID | | ||
32 | * +-------------------------------------+ | ||
33 | * | ||
34 | * The ASID is used to tag entries in the CPU caches and TLBs. | ||
35 | * The context ID is used by debuggers and trace logic, and | ||
36 | * should be unique within all running processes. | ||
37 | */ | 30 | */ |
38 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
39 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | ||
40 | |||
41 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
42 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
43 | |||
44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | ||
45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | ||
46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | ||
47 | |||
48 | static DEFINE_PER_CPU(atomic64_t, active_asids); | ||
49 | static DEFINE_PER_CPU(u64, reserved_asids); | ||
50 | static cpumask_t tlb_flush_pending; | ||
51 | |||
52 | #ifdef CONFIG_ARM_LPAE | ||
53 | static void cpu_set_reserved_ttbr0(void) | ||
54 | { | 32 | { |
55 | unsigned long ttbl = __pa(swapper_pg_dir); | 33 | mm->context.id = 0; |
56 | unsigned long ttbh = 0; | 34 | spin_lock_init(&mm->context.id_lock); |
57 | |||
58 | /* | ||
59 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | ||
60 | * ASID is set to 0. | ||
61 | */ | ||
62 | asm volatile( | ||
63 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | ||
64 | : | ||
65 | : "r" (ttbl), "r" (ttbh)); | ||
66 | isb(); | ||
67 | } | ||
68 | #else | ||
69 | static void cpu_set_reserved_ttbr0(void) | ||
70 | { | ||
71 | u32 ttb; | ||
72 | /* Copy TTBR1 into TTBR0 */ | ||
73 | asm volatile( | ||
74 | " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" | ||
75 | " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" | ||
76 | : "=r" (ttb)); | ||
77 | isb(); | ||
78 | } | 35 | } |
79 | #endif | ||
80 | 36 | ||
81 | #ifdef CONFIG_PID_IN_CONTEXTIDR | 37 | static void flush_context(void) |
82 | static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, | ||
83 | void *t) | ||
84 | { | 38 | { |
85 | u32 contextidr; | 39 | /* set the reserved ASID before flushing the TLB */ |
86 | pid_t pid; | 40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); |
87 | struct thread_info *thread = t; | ||
88 | |||
89 | if (cmd != THREAD_NOTIFY_SWITCH) | ||
90 | return NOTIFY_DONE; | ||
91 | |||
92 | pid = task_pid_nr(thread->task) << ASID_BITS; | ||
93 | asm volatile( | ||
94 | " mrc p15, 0, %0, c13, c0, 1\n" | ||
95 | " and %0, %0, %2\n" | ||
96 | " orr %0, %0, %1\n" | ||
97 | " mcr p15, 0, %0, c13, c0, 1\n" | ||
98 | : "=r" (contextidr), "+r" (pid) | ||
99 | : "I" (~ASID_MASK)); | ||
100 | isb(); | 41 | isb(); |
101 | 42 | local_flush_tlb_all(); | |
102 | return NOTIFY_OK; | 43 | if (icache_is_vivt_asid_tagged()) { |
103 | } | ||
104 | |||
105 | static struct notifier_block contextidr_notifier_block = { | ||
106 | .notifier_call = contextidr_notifier, | ||
107 | }; | ||
108 | |||
109 | static int __init contextidr_notifier_init(void) | ||
110 | { | ||
111 | return thread_register_notifier(&contextidr_notifier_block); | ||
112 | } | ||
113 | arch_initcall(contextidr_notifier_init); | ||
114 | #endif | ||
115 | |||
116 | static void flush_context(unsigned int cpu) | ||
117 | { | ||
118 | int i; | ||
119 | u64 asid; | ||
120 | |||
121 | /* Update the list of reserved ASIDs and the ASID bitmap. */ | ||
122 | bitmap_clear(asid_map, 0, NUM_USER_ASIDS); | ||
123 | for_each_possible_cpu(i) { | ||
124 | if (i == cpu) { | ||
125 | asid = 0; | ||
126 | } else { | ||
127 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | ||
128 | __set_bit(ASID_TO_IDX(asid), asid_map); | ||
129 | } | ||
130 | per_cpu(reserved_asids, i) = asid; | ||
131 | } | ||
132 | |||
133 | /* Queue a TLB invalidate and flush the I-cache if necessary. */ | ||
134 | if (!tlb_ops_need_broadcast()) | ||
135 | cpumask_set_cpu(cpu, &tlb_flush_pending); | ||
136 | else | ||
137 | cpumask_setall(&tlb_flush_pending); | ||
138 | |||
139 | if (icache_is_vivt_asid_tagged()) | ||
140 | __flush_icache_all(); | 44 | __flush_icache_all(); |
45 | dsb(); | ||
46 | } | ||
141 | } | 47 | } |
142 | 48 | ||
143 | static int is_reserved_asid(u64 asid) | 49 | #ifdef CONFIG_SMP |
144 | { | ||
145 | int cpu; | ||
146 | for_each_possible_cpu(cpu) | ||
147 | if (per_cpu(reserved_asids, cpu) == asid) | ||
148 | return 1; | ||
149 | return 0; | ||
150 | } | ||
151 | 50 | ||
152 | static void new_context(struct mm_struct *mm, unsigned int cpu) | 51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) |
153 | { | 52 | { |
154 | u64 asid = mm->context.id; | 53 | unsigned long flags; |
155 | u64 generation = atomic64_read(&asid_generation); | ||
156 | 54 | ||
157 | if (asid != 0 && is_reserved_asid(asid)) { | 55 | /* |
158 | /* | 56 | * Locking needed for multi-threaded applications where the |
159 | * Our current ASID was active during a rollover, we can | 57 | * same mm->context.id could be set from different CPUs during |
160 | * continue to use it and this was just a false alarm. | 58 | * the broadcast. This function is also called via IPI so the |
161 | */ | 59 | * mm->context.id_lock has to be IRQ-safe. |
162 | asid = generation | (asid & ~ASID_MASK); | 60 | */ |
163 | } else { | 61 | spin_lock_irqsave(&mm->context.id_lock, flags); |
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
164 | /* | 63 | /* |
165 | * Allocate a free ASID. If we can't find one, take a | 64 | * Old version of ASID found. Set the new one and |
166 | * note of the currently active ASIDs and mark the TLBs | 65 | * reset mm_cpumask(mm). |
167 | * as requiring flushes. | ||
168 | */ | 66 | */ |
169 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 67 | mm->context.id = asid; |
170 | if (asid == NUM_USER_ASIDS) { | ||
171 | generation = atomic64_add_return(ASID_FIRST_VERSION, | ||
172 | &asid_generation); | ||
173 | flush_context(cpu); | ||
174 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | ||
175 | } | ||
176 | __set_bit(asid, asid_map); | ||
177 | asid = generation | IDX_TO_ASID(asid); | ||
178 | cpumask_clear(mm_cpumask(mm)); | 68 | cpumask_clear(mm_cpumask(mm)); |
179 | } | 69 | } |
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
180 | 71 | ||
181 | mm->context.id = asid; | 72 | /* |
73 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
74 | */ | ||
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
182 | } | 76 | } |
183 | 77 | ||
184 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | 78 | /* |
79 | * Reset the ASID on the current CPU. This function call is broadcast | ||
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
81 | */ | ||
82 | static void reset_context(void *info) | ||
185 | { | 83 | { |
186 | unsigned long flags; | 84 | unsigned int asid; |
187 | unsigned int cpu = smp_processor_id(); | 85 | unsigned int cpu = smp_processor_id(); |
188 | 86 | struct mm_struct *mm = per_cpu(current_mm, cpu); | |
189 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) | ||
190 | __check_vmalloc_seq(mm); | ||
191 | 87 | ||
192 | /* | 88 | /* |
193 | * Required during context switch to avoid speculative page table | 89 | * Check if a current_mm was set on this CPU as it might still |
194 | * walking with the wrong TTBR. | 90 | * be in the early booting stages and using the reserved ASID. |
195 | */ | 91 | */ |
196 | cpu_set_reserved_ttbr0(); | 92 | if (!mm) |
93 | return; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu + 1; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | ||
103 | isb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
197 | 107 | ||
198 | if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) |
199 | && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) | 109 | { |
200 | goto switch_mm_fastpath; | 110 | mm->context.id = asid; |
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
112 | } | ||
201 | 113 | ||
202 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | 114 | #endif |
203 | /* Check that our ASID belongs to the current generation. */ | ||
204 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | ||
205 | new_context(mm, cpu); | ||
206 | 115 | ||
207 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); | 116 | void __new_context(struct mm_struct *mm) |
208 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 117 | { |
118 | unsigned int asid; | ||
209 | 119 | ||
210 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) | 120 | spin_lock(&cpu_asid_lock); |
211 | local_flush_tlb_all(); | 121 | #ifdef CONFIG_SMP |
212 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | 122 | /* |
123 | * Check the ASID again, in case the change was broadcast from | ||
124 | * another CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with | ||
134 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
135 | * are changed simultaneously via IPI. | ||
136 | */ | ||
137 | asid = ++cpu_last_asid; | ||
138 | if (asid == 0) | ||
139 | asid = cpu_last_asid = ASID_FIRST_VERSION; | ||
140 | |||
141 | /* | ||
142 | * If we've used up all our ASIDs, we need | ||
143 | * to start a new version and flush the TLB. | ||
144 | */ | ||
145 | if (unlikely((asid & ~ASID_MASK) == 0)) { | ||
146 | asid = cpu_last_asid + smp_processor_id() + 1; | ||
147 | flush_context(); | ||
148 | #ifdef CONFIG_SMP | ||
149 | smp_wmb(); | ||
150 | smp_call_function(reset_context, NULL, 1); | ||
151 | #endif | ||
152 | cpu_last_asid += NR_CPUS; | ||
153 | } | ||
213 | 154 | ||
214 | switch_mm_fastpath: | 155 | set_mm_context(mm, asid); |
215 | cpu_switch_mm(mm->pgd, mm); | 156 | spin_unlock(&cpu_asid_lock); |
216 | } | 157 | } |
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d130a5ece5d..d2852e1635b 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c | |||
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from, | |||
44 | { | 44 | { |
45 | void *kto, *kfrom; | 45 | void *kto, *kfrom; |
46 | 46 | ||
47 | kto = kmap_atomic(to); | 47 | kto = kmap_atomic(to, KM_USER0); |
48 | kfrom = kmap_atomic(from); | 48 | kfrom = kmap_atomic(from, KM_USER1); |
49 | fa_copy_user_page(kto, kfrom); | 49 | fa_copy_user_page(kto, kfrom); |
50 | kunmap_atomic(kfrom); | 50 | kunmap_atomic(kfrom, KM_USER1); |
51 | kunmap_atomic(kto); | 51 | kunmap_atomic(kto, KM_USER0); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | /* |
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from, | |||
58 | */ | 58 | */ |
59 | void fa_clear_user_highpage(struct page *page, unsigned long vaddr) | 59 | void fa_clear_user_highpage(struct page *page, unsigned long vaddr) |
60 | { | 60 | { |
61 | void *ptr, *kaddr = kmap_atomic(page); | 61 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
62 | asm volatile("\ | 62 | asm volatile("\ |
63 | mov r1, %2 @ 1\n\ | 63 | mov r1, %2 @ 1\n\ |
64 | mov r2, #0 @ 1\n\ | 64 | mov r2, #0 @ 1\n\ |
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
77 | : "=r" (ptr) | 77 | : "=r" (ptr) |
78 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 78 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
79 | : "r1", "r2", "r3", "ip", "lr"); | 79 | : "r1", "r2", "r3", "ip", "lr"); |
80 | kunmap_atomic(kaddr); | 80 | kunmap_atomic(kaddr, KM_USER0); |
81 | } | 81 | } |
82 | 82 | ||
83 | struct cpu_user_fns fa_user_fns __initdata = { | 83 | struct cpu_user_fns fa_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 49ee0c1a720..ac163de7dc0 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, | |||
72 | { | 72 | { |
73 | void *kto, *kfrom; | 73 | void *kto, *kfrom; |
74 | 74 | ||
75 | kto = kmap_atomic(to); | 75 | kto = kmap_atomic(to, KM_USER0); |
76 | kfrom = kmap_atomic(from); | 76 | kfrom = kmap_atomic(from, KM_USER1); |
77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
78 | feroceon_copy_user_page(kto, kfrom); | 78 | feroceon_copy_user_page(kto, kfrom); |
79 | kunmap_atomic(kfrom); | 79 | kunmap_atomic(kfrom, KM_USER1); |
80 | kunmap_atomic(kto); | 80 | kunmap_atomic(kto, KM_USER0); |
81 | } | 81 | } |
82 | 82 | ||
83 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | 83 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) |
84 | { | 84 | { |
85 | void *ptr, *kaddr = kmap_atomic(page); | 85 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
86 | asm volatile ("\ | 86 | asm volatile ("\ |
87 | mov r1, %2 \n\ | 87 | mov r1, %2 \n\ |
88 | mov r2, #0 \n\ | 88 | mov r2, #0 \n\ |
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
102 | : "=r" (ptr) | 102 | : "=r" (ptr) |
103 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 103 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
104 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | 104 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); |
105 | kunmap_atomic(kaddr); | 105 | kunmap_atomic(kaddr, KM_USER0); |
106 | } | 106 | } |
107 | 107 | ||
108 | struct cpu_user_fns feroceon_user_fns __initdata = { | 108 | struct cpu_user_fns feroceon_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 1267e64133b..b8061519ce7 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -23,10 +23,14 @@ | |||
23 | 23 | ||
24 | #include "mm.h" | 24 | #include "mm.h" |
25 | 25 | ||
26 | /* | ||
27 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
28 | * specific hacks for copying pages efficiently. | ||
29 | */ | ||
26 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
27 | L_PTE_MT_MINICACHE) | 31 | L_PTE_MT_MINICACHE) |
28 | 32 | ||
29 | static DEFINE_RAW_SPINLOCK(minicache_lock); | 33 | static DEFINE_SPINLOCK(minicache_lock); |
30 | 34 | ||
31 | /* | 35 | /* |
32 | * ARMv4 mini-dcache optimised copy_user_highpage | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
@@ -67,20 +71,21 @@ mc_copy_user_page(void *from, void *to) | |||
67 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, | 71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, |
68 | unsigned long vaddr, struct vm_area_struct *vma) | 72 | unsigned long vaddr, struct vm_area_struct *vma) |
69 | { | 73 | { |
70 | void *kto = kmap_atomic(to); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
71 | 75 | ||
72 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
73 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
74 | 78 | ||
75 | raw_spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
76 | 80 | ||
77 | set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
82 | flush_tlb_kernel_page(0xffff8000); | ||
78 | 83 | ||
79 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
80 | 85 | ||
81 | raw_spin_unlock(&minicache_lock); | 86 | spin_unlock(&minicache_lock); |
82 | 87 | ||
83 | kunmap_atomic(kto); | 88 | kunmap_atomic(kto, KM_USER1); |
84 | } | 89 | } |
85 | 90 | ||
86 | /* | 91 | /* |
@@ -88,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
88 | */ | 93 | */ |
89 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
90 | { | 95 | { |
91 | void *ptr, *kaddr = kmap_atomic(page); | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
92 | asm volatile("\ | 97 | asm volatile("\ |
93 | mov r1, %2 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
94 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
@@ -106,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
106 | : "=r" (ptr) | 111 | : "=r" (ptr) |
107 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
108 | : "r1", "r2", "r3", "ip", "lr"); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
109 | kunmap_atomic(kaddr); | 114 | kunmap_atomic(kaddr, KM_USER0); |
110 | } | 115 | } |
111 | 116 | ||
112 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 067d0fdd630..cb589cbb2b6 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
52 | { | 52 | { |
53 | void *kto, *kfrom; | 53 | void *kto, *kfrom; |
54 | 54 | ||
55 | kto = kmap_atomic(to); | 55 | kto = kmap_atomic(to, KM_USER0); |
56 | kfrom = kmap_atomic(from); | 56 | kfrom = kmap_atomic(from, KM_USER1); |
57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
58 | v4wb_copy_user_page(kto, kfrom); | 58 | v4wb_copy_user_page(kto, kfrom); |
59 | kunmap_atomic(kfrom); | 59 | kunmap_atomic(kfrom, KM_USER1); |
60 | kunmap_atomic(kto); | 60 | kunmap_atomic(kto, KM_USER0); |
61 | } | 61 | } |
62 | 62 | ||
63 | /* | 63 | /* |
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
67 | */ | 67 | */ |
68 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | 68 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) |
69 | { | 69 | { |
70 | void *ptr, *kaddr = kmap_atomic(page); | 70 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
71 | asm volatile("\ | 71 | asm volatile("\ |
72 | mov r1, %2 @ 1\n\ | 72 | mov r1, %2 @ 1\n\ |
73 | mov r2, #0 @ 1\n\ | 73 | mov r2, #0 @ 1\n\ |
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
86 | : "=r" (ptr) | 86 | : "=r" (ptr) |
87 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 87 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
88 | : "r1", "r2", "r3", "ip", "lr"); | 88 | : "r1", "r2", "r3", "ip", "lr"); |
89 | kunmap_atomic(kaddr); | 89 | kunmap_atomic(kaddr, KM_USER0); |
90 | } | 90 | } |
91 | 91 | ||
92 | struct cpu_user_fns v4wb_user_fns __initdata = { | 92 | struct cpu_user_fns v4wb_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b85c5da2e51..30c7d048a32 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
48 | { | 48 | { |
49 | void *kto, *kfrom; | 49 | void *kto, *kfrom; |
50 | 50 | ||
51 | kto = kmap_atomic(to); | 51 | kto = kmap_atomic(to, KM_USER0); |
52 | kfrom = kmap_atomic(from); | 52 | kfrom = kmap_atomic(from, KM_USER1); |
53 | v4wt_copy_user_page(kto, kfrom); | 53 | v4wt_copy_user_page(kto, kfrom); |
54 | kunmap_atomic(kfrom); | 54 | kunmap_atomic(kfrom, KM_USER1); |
55 | kunmap_atomic(kto); | 55 | kunmap_atomic(kto, KM_USER0); |
56 | } | 56 | } |
57 | 57 | ||
58 | /* | 58 | /* |
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
62 | */ | 62 | */ |
63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | 63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) |
64 | { | 64 | { |
65 | void *ptr, *kaddr = kmap_atomic(page); | 65 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
66 | asm volatile("\ | 66 | asm volatile("\ |
67 | mov r1, %2 @ 1\n\ | 67 | mov r1, %2 @ 1\n\ |
68 | mov r2, #0 @ 1\n\ | 68 | mov r2, #0 @ 1\n\ |
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
79 | : "=r" (ptr) | 79 | : "=r" (ptr) |
80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
81 | : "r1", "r2", "r3", "ip", "lr"); | 81 | : "r1", "r2", "r3", "ip", "lr"); |
82 | kunmap_atomic(kaddr); | 82 | kunmap_atomic(kaddr, KM_USER0); |
83 | } | 83 | } |
84 | 84 | ||
85 | struct cpu_user_fns v4wt_user_fns __initdata = { | 85 | struct cpu_user_fns v4wt_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index b9bcc9d7917..63cca009713 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -24,7 +24,10 @@ | |||
24 | #error FIX ME | 24 | #error FIX ME |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | static DEFINE_RAW_SPINLOCK(v6_lock); | 27 | #define from_address (0xffff8000) |
28 | #define to_address (0xffffc000) | ||
29 | |||
30 | static DEFINE_SPINLOCK(v6_lock); | ||
28 | 31 | ||
29 | /* | 32 | /* |
30 | * Copy the user page. No aliasing to deal with so we can just | 33 | * Copy the user page. No aliasing to deal with so we can just |
@@ -35,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
35 | { | 38 | { |
36 | void *kto, *kfrom; | 39 | void *kto, *kfrom; |
37 | 40 | ||
38 | kfrom = kmap_atomic(from); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
39 | kto = kmap_atomic(to); | 42 | kto = kmap_atomic(to, KM_USER1); |
40 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
41 | kunmap_atomic(kto); | 44 | kunmap_atomic(kto, KM_USER1); |
42 | kunmap_atomic(kfrom); | 45 | kunmap_atomic(kfrom, KM_USER0); |
43 | } | 46 | } |
44 | 47 | ||
45 | /* | 48 | /* |
@@ -48,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
48 | */ | 51 | */ |
49 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) | 52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
50 | { | 53 | { |
51 | void *kaddr = kmap_atomic(page); | 54 | void *kaddr = kmap_atomic(page, KM_USER0); |
52 | clear_page(kaddr); | 55 | clear_page(kaddr); |
53 | kunmap_atomic(kaddr); | 56 | kunmap_atomic(kaddr, KM_USER0); |
54 | } | 57 | } |
55 | 58 | ||
56 | /* | 59 | /* |
@@ -85,17 +88,20 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
85 | * Now copy the page using the same cache colour as the | 88 | * Now copy the page using the same cache colour as the |
86 | * pages ultimate destination. | 89 | * pages ultimate destination. |
87 | */ | 90 | */ |
88 | raw_spin_lock(&v6_lock); | 91 | spin_lock(&v6_lock); |
92 | |||
93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); | ||
94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); | ||
89 | 95 | ||
90 | kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); | 96 | kfrom = from_address + (offset << PAGE_SHIFT); |
91 | kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); | 97 | kto = to_address + (offset << PAGE_SHIFT); |
92 | 98 | ||
93 | set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); | 99 | flush_tlb_kernel_page(kfrom); |
94 | set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); | 100 | flush_tlb_kernel_page(kto); |
95 | 101 | ||
96 | copy_page((void *)kto, (void *)kfrom); | 102 | copy_page((void *)kto, (void *)kfrom); |
97 | 103 | ||
98 | raw_spin_unlock(&v6_lock); | 104 | spin_unlock(&v6_lock); |
99 | } | 105 | } |
100 | 106 | ||
101 | /* | 107 | /* |
@@ -105,7 +111,8 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
105 | */ | 111 | */ |
106 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) | 112 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
107 | { | 113 | { |
108 | unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 114 | unsigned int offset = CACHE_COLOUR(vaddr); |
115 | unsigned long to = to_address + (offset << PAGE_SHIFT); | ||
109 | 116 | ||
110 | /* FIXME: not highmem safe */ | 117 | /* FIXME: not highmem safe */ |
111 | discard_old_kernel_data(page_address(page)); | 118 | discard_old_kernel_data(page_address(page)); |
@@ -114,12 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad | |||
114 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
115 | * the pages ultimate destination. | 122 | * the pages ultimate destination. |
116 | */ | 123 | */ |
117 | raw_spin_lock(&v6_lock); | 124 | spin_lock(&v6_lock); |
118 | 125 | ||
119 | set_top_pte(to, mk_pte(page, PAGE_KERNEL)); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
127 | flush_tlb_kernel_page(to); | ||
120 | clear_page((void *)to); | 128 | clear_page((void *)to); |
121 | 129 | ||
122 | raw_spin_unlock(&v6_lock); | 130 | spin_unlock(&v6_lock); |
123 | } | 131 | } |
124 | 132 | ||
125 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 03a2042aced..f9cde0702f1 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
75 | { | 75 | { |
76 | void *kto, *kfrom; | 76 | void *kto, *kfrom; |
77 | 77 | ||
78 | kto = kmap_atomic(to); | 78 | kto = kmap_atomic(to, KM_USER0); |
79 | kfrom = kmap_atomic(from); | 79 | kfrom = kmap_atomic(from, KM_USER1); |
80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
81 | xsc3_mc_copy_user_page(kto, kfrom); | 81 | xsc3_mc_copy_user_page(kto, kfrom); |
82 | kunmap_atomic(kfrom); | 82 | kunmap_atomic(kfrom, KM_USER1); |
83 | kunmap_atomic(kto); | 83 | kunmap_atomic(kto, KM_USER0); |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
90 | */ | 90 | */ |
91 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 91 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
92 | { | 92 | { |
93 | void *ptr, *kaddr = kmap_atomic(page); | 93 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
94 | asm volatile ("\ | 94 | asm volatile ("\ |
95 | mov r1, %2 \n\ | 95 | mov r1, %2 \n\ |
96 | mov r2, #0 \n\ | 96 | mov r2, #0 \n\ |
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
105 | : "=r" (ptr) | 105 | : "=r" (ptr) |
106 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 106 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
107 | : "r1", "r2", "r3"); | 107 | : "r1", "r2", "r3"); |
108 | kunmap_atomic(kaddr); | 108 | kunmap_atomic(kaddr, KM_USER0); |
109 | } | 109 | } |
110 | 110 | ||
111 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { | 111 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 0fb85025344..649bbcd325b 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -23,10 +23,16 @@ | |||
23 | 23 | ||
24 | #include "mm.h" | 24 | #include "mm.h" |
25 | 25 | ||
26 | /* | ||
27 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
28 | * specific hacks for copying pages efficiently. | ||
29 | */ | ||
30 | #define COPYPAGE_MINICACHE 0xffff8000 | ||
31 | |||
26 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
27 | L_PTE_MT_MINICACHE) | 33 | L_PTE_MT_MINICACHE) |
28 | 34 | ||
29 | static DEFINE_RAW_SPINLOCK(minicache_lock); | 35 | static DEFINE_SPINLOCK(minicache_lock); |
30 | 36 | ||
31 | /* | 37 | /* |
32 | * XScale mini-dcache optimised copy_user_highpage | 38 | * XScale mini-dcache optimised copy_user_highpage |
@@ -87,20 +93,21 @@ mc_copy_user_page(void *from, void *to) | |||
87 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
88 | unsigned long vaddr, struct vm_area_struct *vma) | 94 | unsigned long vaddr, struct vm_area_struct *vma) |
89 | { | 95 | { |
90 | void *kto = kmap_atomic(to); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
91 | 97 | ||
92 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
93 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
94 | 100 | ||
95 | raw_spin_lock(&minicache_lock); | 101 | spin_lock(&minicache_lock); |
96 | 102 | ||
97 | set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); | 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | ||
98 | 105 | ||
99 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
100 | 107 | ||
101 | raw_spin_unlock(&minicache_lock); | 108 | spin_unlock(&minicache_lock); |
102 | 109 | ||
103 | kunmap_atomic(kto); | 110 | kunmap_atomic(kto, KM_USER1); |
104 | } | 111 | } |
105 | 112 | ||
106 | /* | 113 | /* |
@@ -109,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
109 | void | 116 | void |
110 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
111 | { | 118 | { |
112 | void *ptr, *kaddr = kmap_atomic(page); | 119 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
113 | asm volatile( | 120 | asm volatile( |
114 | "mov r1, %2 \n\ | 121 | "mov r1, %2 \n\ |
115 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
@@ -126,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
126 | : "=r" (ptr) | 133 | : "=r" (ptr) |
127 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
128 | : "r1", "r2", "r3", "ip"); | 135 | : "r1", "r2", "r3", "ip"); |
129 | kunmap_atomic(kaddr); | 136 | kunmap_atomic(kaddr, KM_USER0); |
130 | } | 137 | } |
131 | 138 | ||
132 | struct cpu_user_fns xscale_mc_user_fns __initdata = { | 139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b2fb87c869..9cd5334019e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -17,146 +17,16 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/dma-contiguous.h> | ||
21 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
22 | #include <linux/memblock.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/iommu.h> | ||
25 | #include <linux/io.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/sizes.h> | ||
28 | 21 | ||
29 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
30 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
31 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
32 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
33 | #include <asm/mach/arch.h> | 26 | #include <asm/sizes.h> |
34 | #include <asm/dma-iommu.h> | ||
35 | #include <asm/mach/map.h> | ||
36 | #include <asm/system_info.h> | ||
37 | #include <asm/dma-contiguous.h> | ||
38 | 27 | ||
39 | #include "mm.h" | 28 | #include "mm.h" |
40 | 29 | ||
41 | /* | ||
42 | * The DMA API is built upon the notion of "buffer ownership". A buffer | ||
43 | * is either exclusively owned by the CPU (and therefore may be accessed | ||
44 | * by it) or exclusively owned by the DMA device. These helper functions | ||
45 | * represent the transitions between these two ownership states. | ||
46 | * | ||
47 | * Note, however, that on later ARMs, this notion does not work due to | ||
48 | * speculative prefetches. We model our approach on the assumption that | ||
49 | * the CPU does do speculative prefetches, which means we clean caches | ||
50 | * before transfers and delay cache invalidation until transfer completion. | ||
51 | * | ||
52 | */ | ||
53 | static void __dma_page_cpu_to_dev(struct page *, unsigned long, | ||
54 | size_t, enum dma_data_direction); | ||
55 | static void __dma_page_dev_to_cpu(struct page *, unsigned long, | ||
56 | size_t, enum dma_data_direction); | ||
57 | |||
58 | /** | ||
59 | * arm_dma_map_page - map a portion of a page for streaming DMA | ||
60 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
61 | * @page: page that buffer resides in | ||
62 | * @offset: offset into page for start of buffer | ||
63 | * @size: size of buffer to map | ||
64 | * @dir: DMA transfer direction | ||
65 | * | ||
66 | * Ensure that any data held in the cache is appropriately discarded | ||
67 | * or written back. | ||
68 | * | ||
69 | * The device owns this memory once this call has completed. The CPU | ||
70 | * can regain ownership by calling dma_unmap_page(). | ||
71 | */ | ||
72 | static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, | ||
73 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
74 | struct dma_attrs *attrs) | ||
75 | { | ||
76 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
77 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
78 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
79 | } | ||
80 | |||
81 | static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, | ||
82 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
83 | struct dma_attrs *attrs) | ||
84 | { | ||
85 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() | ||
90 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | ||
91 | * @handle: DMA address of buffer | ||
92 | * @size: size of buffer (same as passed to dma_map_page) | ||
93 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
94 | * | ||
95 | * Unmap a page streaming mode DMA translation. The handle and size | ||
96 | * must match what was provided in the previous dma_map_page() call. | ||
97 | * All other usages are undefined. | ||
98 | * | ||
99 | * After this call, reads by the CPU to the buffer are guaranteed to see | ||
100 | * whatever the device wrote there. | ||
101 | */ | ||
102 | static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, | ||
103 | size_t size, enum dma_data_direction dir, | ||
104 | struct dma_attrs *attrs) | ||
105 | { | ||
106 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
107 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), | ||
108 | handle & ~PAGE_MASK, size, dir); | ||
109 | } | ||
110 | |||
111 | static void arm_dma_sync_single_for_cpu(struct device *dev, | ||
112 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
113 | { | ||
114 | unsigned int offset = handle & (PAGE_SIZE - 1); | ||
115 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | ||
116 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
117 | } | ||
118 | |||
119 | static void arm_dma_sync_single_for_device(struct device *dev, | ||
120 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
121 | { | ||
122 | unsigned int offset = handle & (PAGE_SIZE - 1); | ||
123 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); | ||
124 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
125 | } | ||
126 | |||
127 | struct dma_map_ops arm_dma_ops = { | ||
128 | .alloc = arm_dma_alloc, | ||
129 | .free = arm_dma_free, | ||
130 | .mmap = arm_dma_mmap, | ||
131 | .get_sgtable = arm_dma_get_sgtable, | ||
132 | .map_page = arm_dma_map_page, | ||
133 | .unmap_page = arm_dma_unmap_page, | ||
134 | .map_sg = arm_dma_map_sg, | ||
135 | .unmap_sg = arm_dma_unmap_sg, | ||
136 | .sync_single_for_cpu = arm_dma_sync_single_for_cpu, | ||
137 | .sync_single_for_device = arm_dma_sync_single_for_device, | ||
138 | .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, | ||
139 | .sync_sg_for_device = arm_dma_sync_sg_for_device, | ||
140 | .set_dma_mask = arm_dma_set_mask, | ||
141 | }; | ||
142 | EXPORT_SYMBOL(arm_dma_ops); | ||
143 | |||
144 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | ||
145 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); | ||
146 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | ||
147 | dma_addr_t handle, struct dma_attrs *attrs); | ||
148 | |||
149 | struct dma_map_ops arm_coherent_dma_ops = { | ||
150 | .alloc = arm_coherent_dma_alloc, | ||
151 | .free = arm_coherent_dma_free, | ||
152 | .mmap = arm_dma_mmap, | ||
153 | .get_sgtable = arm_dma_get_sgtable, | ||
154 | .map_page = arm_coherent_dma_map_page, | ||
155 | .map_sg = arm_dma_map_sg, | ||
156 | .set_dma_mask = arm_dma_set_mask, | ||
157 | }; | ||
158 | EXPORT_SYMBOL(arm_coherent_dma_ops); | ||
159 | |||
160 | static u64 get_coherent_dma_mask(struct device *dev) | 30 | static u64 get_coherent_dma_mask(struct device *dev) |
161 | { | 31 | { |
162 | u64 mask = (u64)arm_dma_limit; | 32 | u64 mask = (u64)arm_dma_limit; |
@@ -184,21 +54,6 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
184 | return mask; | 54 | return mask; |
185 | } | 55 | } |
186 | 56 | ||
187 | static void __dma_clear_buffer(struct page *page, size_t size) | ||
188 | { | ||
189 | void *ptr; | ||
190 | /* | ||
191 | * Ensure that the allocated pages are zeroed, and that any data | ||
192 | * lurking in the kernel direct-mapped region is invalidated. | ||
193 | */ | ||
194 | ptr = page_address(page); | ||
195 | if (ptr) { | ||
196 | memset(ptr, 0, size); | ||
197 | dmac_flush_range(ptr, ptr + size); | ||
198 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* | 57 | /* |
203 | * Allocate a DMA buffer for 'dev' of size 'size' using the | 58 | * Allocate a DMA buffer for 'dev' of size 'size' using the |
204 | * specified gfp mask. Note that 'size' must be page aligned. | 59 | * specified gfp mask. Note that 'size' must be page aligned. |
@@ -207,6 +62,23 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf | |||
207 | { | 62 | { |
208 | unsigned long order = get_order(size); | 63 | unsigned long order = get_order(size); |
209 | struct page *page, *p, *e; | 64 | struct page *page, *p, *e; |
65 | void *ptr; | ||
66 | u64 mask = get_coherent_dma_mask(dev); | ||
67 | |||
68 | #ifdef CONFIG_DMA_API_DEBUG | ||
69 | u64 limit = (mask + 1) & ~mask; | ||
70 | if (limit && size >= limit) { | ||
71 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", | ||
72 | size, mask); | ||
73 | return NULL; | ||
74 | } | ||
75 | #endif | ||
76 | |||
77 | if (!mask) | ||
78 | return NULL; | ||
79 | |||
80 | if (mask < 0xffffffffULL) | ||
81 | gfp |= GFP_DMA; | ||
210 | 82 | ||
211 | page = alloc_pages(gfp, order); | 83 | page = alloc_pages(gfp, order); |
212 | if (!page) | 84 | if (!page) |
@@ -219,7 +91,14 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf | |||
219 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) | 91 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) |
220 | __free_page(p); | 92 | __free_page(p); |
221 | 93 | ||
222 | __dma_clear_buffer(page, size); | 94 | /* |
95 | * Ensure that the allocated pages are zeroed, and that any data | ||
96 | * lurking in the kernel direct-mapped region is invalidated. | ||
97 | */ | ||
98 | ptr = page_address(page); | ||
99 | memset(ptr, 0, size); | ||
100 | dmac_flush_range(ptr, ptr + size); | ||
101 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | ||
223 | 102 | ||
224 | return page; | 103 | return page; |
225 | } | 104 | } |
@@ -238,417 +117,222 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
238 | } | 117 | } |
239 | 118 | ||
240 | #ifdef CONFIG_MMU | 119 | #ifdef CONFIG_MMU |
241 | #ifdef CONFIG_HUGETLB_PAGE | 120 | /* Sanity check size */ |
242 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 121 | #if (CONSISTENT_DMA_SIZE % SZ_2M) |
122 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | ||
243 | #endif | 123 | #endif |
244 | 124 | ||
245 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 125 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
246 | pgprot_t prot, struct page **ret_page); | 126 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
247 | 127 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | |
248 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, | ||
249 | pgprot_t prot, struct page **ret_page, | ||
250 | const void *caller); | ||
251 | |||
252 | static void * | ||
253 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, | ||
254 | const void *caller) | ||
255 | { | ||
256 | struct vm_struct *area; | ||
257 | unsigned long addr; | ||
258 | |||
259 | /* | ||
260 | * DMA allocation can be mapped to user space, so lets | ||
261 | * set VM_USERMAP flags too. | ||
262 | */ | ||
263 | area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, | ||
264 | caller); | ||
265 | if (!area) | ||
266 | return NULL; | ||
267 | addr = (unsigned long)area->addr; | ||
268 | area->phys_addr = __pfn_to_phys(page_to_pfn(page)); | ||
269 | |||
270 | if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { | ||
271 | vunmap((void *)addr); | ||
272 | return NULL; | ||
273 | } | ||
274 | return (void *)addr; | ||
275 | } | ||
276 | |||
277 | static void __dma_free_remap(void *cpu_addr, size_t size) | ||
278 | { | ||
279 | unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; | ||
280 | struct vm_struct *area = find_vm_area(cpu_addr); | ||
281 | if (!area || (area->flags & flags) != flags) { | ||
282 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | ||
283 | return; | ||
284 | } | ||
285 | unmap_kernel_range((unsigned long)cpu_addr, size); | ||
286 | vunmap(cpu_addr); | ||
287 | } | ||
288 | |||
289 | #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K | ||
290 | |||
291 | struct dma_pool { | ||
292 | size_t size; | ||
293 | spinlock_t lock; | ||
294 | unsigned long *bitmap; | ||
295 | unsigned long nr_pages; | ||
296 | void *vaddr; | ||
297 | struct page **pages; | ||
298 | }; | ||
299 | |||
300 | static struct dma_pool atomic_pool = { | ||
301 | .size = DEFAULT_DMA_COHERENT_POOL_SIZE, | ||
302 | }; | ||
303 | |||
304 | static int __init early_coherent_pool(char *p) | ||
305 | { | ||
306 | atomic_pool.size = memparse(p, &p); | ||
307 | return 0; | ||
308 | } | ||
309 | early_param("coherent_pool", early_coherent_pool); | ||
310 | |||
311 | void __init init_dma_coherent_pool_size(unsigned long size) | ||
312 | { | ||
313 | /* | ||
314 | * Catch any attempt to set the pool size too late. | ||
315 | */ | ||
316 | BUG_ON(atomic_pool.vaddr); | ||
317 | |||
318 | /* | ||
319 | * Set architecture specific coherent pool size only if | ||
320 | * it has not been changed by kernel command line parameter. | ||
321 | */ | ||
322 | if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) | ||
323 | atomic_pool.size = size; | ||
324 | } | ||
325 | 128 | ||
326 | /* | 129 | /* |
327 | * Initialise the coherent pool for atomic allocations. | 130 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
328 | */ | 131 | */ |
329 | static int __init atomic_pool_init(void) | 132 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; |
330 | { | ||
331 | struct dma_pool *pool = &atomic_pool; | ||
332 | pgprot_t prot = pgprot_dmacoherent(pgprot_kernel); | ||
333 | unsigned long nr_pages = pool->size >> PAGE_SHIFT; | ||
334 | unsigned long *bitmap; | ||
335 | struct page *page; | ||
336 | struct page **pages; | ||
337 | void *ptr; | ||
338 | int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); | ||
339 | 133 | ||
340 | bitmap = kzalloc(bitmap_size, GFP_KERNEL); | 134 | #include "vmregion.h" |
341 | if (!bitmap) | ||
342 | goto no_bitmap; | ||
343 | 135 | ||
344 | pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); | 136 | static struct arm_vmregion_head consistent_head = { |
345 | if (!pages) | 137 | .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), |
346 | goto no_pages; | 138 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), |
139 | .vm_start = CONSISTENT_BASE, | ||
140 | .vm_end = CONSISTENT_END, | ||
141 | }; | ||
347 | 142 | ||
348 | if (IS_ENABLED(CONFIG_CMA)) | 143 | #ifdef CONFIG_HUGETLB_PAGE |
349 | ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); | 144 | #error ARM Coherent DMA allocator does not (yet) support huge TLB |
350 | else | 145 | #endif |
351 | ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, | ||
352 | &page, NULL); | ||
353 | if (ptr) { | ||
354 | int i; | ||
355 | |||
356 | for (i = 0; i < nr_pages; i++) | ||
357 | pages[i] = page + i; | ||
358 | |||
359 | spin_lock_init(&pool->lock); | ||
360 | pool->vaddr = ptr; | ||
361 | pool->pages = pages; | ||
362 | pool->bitmap = bitmap; | ||
363 | pool->nr_pages = nr_pages; | ||
364 | pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", | ||
365 | (unsigned)pool->size / 1024); | ||
366 | return 0; | ||
367 | } | ||
368 | 146 | ||
369 | kfree(pages); | ||
370 | no_pages: | ||
371 | kfree(bitmap); | ||
372 | no_bitmap: | ||
373 | pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", | ||
374 | (unsigned)pool->size / 1024); | ||
375 | return -ENOMEM; | ||
376 | } | ||
377 | /* | 147 | /* |
378 | * CMA is activated by core_initcall, so we must be called after it. | 148 | * Initialise the consistent memory allocation. |
379 | */ | 149 | */ |
380 | postcore_initcall(atomic_pool_init); | 150 | static int __init consistent_init(void) |
381 | |||
382 | struct dma_contig_early_reserve { | ||
383 | phys_addr_t base; | ||
384 | unsigned long size; | ||
385 | }; | ||
386 | |||
387 | static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; | ||
388 | |||
389 | static int dma_mmu_remap_num __initdata; | ||
390 | |||
391 | void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) | ||
392 | { | 151 | { |
393 | dma_mmu_remap[dma_mmu_remap_num].base = base; | 152 | int ret = 0; |
394 | dma_mmu_remap[dma_mmu_remap_num].size = size; | 153 | pgd_t *pgd; |
395 | dma_mmu_remap_num++; | 154 | pud_t *pud; |
396 | } | 155 | pmd_t *pmd; |
156 | pte_t *pte; | ||
157 | int i = 0; | ||
158 | u32 base = CONSISTENT_BASE; | ||
397 | 159 | ||
398 | void __init dma_contiguous_remap(void) | 160 | do { |
399 | { | 161 | pgd = pgd_offset(&init_mm, base); |
400 | int i; | ||
401 | for (i = 0; i < dma_mmu_remap_num; i++) { | ||
402 | phys_addr_t start = dma_mmu_remap[i].base; | ||
403 | phys_addr_t end = start + dma_mmu_remap[i].size; | ||
404 | struct map_desc map; | ||
405 | unsigned long addr; | ||
406 | |||
407 | if (end > arm_lowmem_limit) | ||
408 | end = arm_lowmem_limit; | ||
409 | if (start >= end) | ||
410 | continue; | ||
411 | 162 | ||
412 | map.pfn = __phys_to_pfn(start); | 163 | pud = pud_alloc(&init_mm, pgd, base); |
413 | map.virtual = __phys_to_virt(start); | 164 | if (!pud) { |
414 | map.length = end - start; | 165 | printk(KERN_ERR "%s: no pud tables\n", __func__); |
415 | map.type = MT_MEMORY_DMA_READY; | 166 | ret = -ENOMEM; |
167 | break; | ||
168 | } | ||
416 | 169 | ||
417 | /* | 170 | pmd = pmd_alloc(&init_mm, pud, base); |
418 | * Clear previous low-memory mapping | 171 | if (!pmd) { |
419 | */ | 172 | printk(KERN_ERR "%s: no pmd tables\n", __func__); |
420 | for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); | 173 | ret = -ENOMEM; |
421 | addr += PMD_SIZE) | 174 | break; |
422 | pmd_clear(pmd_off_k(addr)); | 175 | } |
176 | WARN_ON(!pmd_none(*pmd)); | ||
423 | 177 | ||
424 | iotable_init(&map, 1); | 178 | pte = pte_alloc_kernel(pmd, base); |
425 | } | 179 | if (!pte) { |
426 | } | 180 | printk(KERN_ERR "%s: no pte tables\n", __func__); |
181 | ret = -ENOMEM; | ||
182 | break; | ||
183 | } | ||
427 | 184 | ||
428 | static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, | 185 | consistent_pte[i++] = pte; |
429 | void *data) | 186 | base += (1 << PGDIR_SHIFT); |
430 | { | 187 | } while (base < CONSISTENT_END); |
431 | struct page *page = virt_to_page(addr); | ||
432 | pgprot_t prot = *(pgprot_t *)data; | ||
433 | 188 | ||
434 | set_pte_ext(pte, mk_pte(page, prot), 0); | 189 | return ret; |
435 | return 0; | ||
436 | } | 190 | } |
437 | 191 | ||
438 | static void __dma_remap(struct page *page, size_t size, pgprot_t prot) | 192 | core_initcall(consistent_init); |
439 | { | ||
440 | unsigned long start = (unsigned long) page_address(page); | ||
441 | unsigned end = start + size; | ||
442 | |||
443 | apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); | ||
444 | dsb(); | ||
445 | flush_tlb_kernel_range(start, end); | ||
446 | } | ||
447 | 193 | ||
448 | static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, | 194 | static void * |
449 | pgprot_t prot, struct page **ret_page, | 195 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) |
450 | const void *caller) | ||
451 | { | 196 | { |
452 | struct page *page; | 197 | struct arm_vmregion *c; |
453 | void *ptr; | 198 | size_t align; |
454 | page = __dma_alloc_buffer(dev, size, gfp); | 199 | int bit; |
455 | if (!page) | ||
456 | return NULL; | ||
457 | 200 | ||
458 | ptr = __dma_alloc_remap(page, size, gfp, prot, caller); | 201 | if (!consistent_pte[0]) { |
459 | if (!ptr) { | 202 | printk(KERN_ERR "%s: not initialised\n", __func__); |
460 | __dma_free_buffer(page, size); | 203 | dump_stack(); |
461 | return NULL; | ||
462 | } | ||
463 | |||
464 | *ret_page = page; | ||
465 | return ptr; | ||
466 | } | ||
467 | |||
468 | static void *__alloc_from_pool(size_t size, struct page **ret_page) | ||
469 | { | ||
470 | struct dma_pool *pool = &atomic_pool; | ||
471 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
472 | unsigned int pageno; | ||
473 | unsigned long flags; | ||
474 | void *ptr = NULL; | ||
475 | unsigned long align_mask; | ||
476 | |||
477 | if (!pool->vaddr) { | ||
478 | WARN(1, "coherent pool not initialised!\n"); | ||
479 | return NULL; | 204 | return NULL; |
480 | } | 205 | } |
481 | 206 | ||
482 | /* | 207 | /* |
483 | * Align the region allocation - allocations from pool are rather | 208 | * Align the virtual region allocation - maximum alignment is |
484 | * small, so align them to their order in pages, minimum is a page | 209 | * a section size, minimum is a page size. This helps reduce |
485 | * size. This helps reduce fragmentation of the DMA space. | 210 | * fragmentation of the DMA space, and also prevents allocations |
211 | * smaller than a section from crossing a section boundary. | ||
486 | */ | 212 | */ |
487 | align_mask = (1 << get_order(size)) - 1; | 213 | bit = fls(size - 1); |
488 | 214 | if (bit > SECTION_SHIFT) | |
489 | spin_lock_irqsave(&pool->lock, flags); | 215 | bit = SECTION_SHIFT; |
490 | pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, | 216 | align = 1 << bit; |
491 | 0, count, align_mask); | ||
492 | if (pageno < pool->nr_pages) { | ||
493 | bitmap_set(pool->bitmap, pageno, count); | ||
494 | ptr = pool->vaddr + PAGE_SIZE * pageno; | ||
495 | *ret_page = pool->pages[pageno]; | ||
496 | } else { | ||
497 | pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" | ||
498 | "Please increase it with coherent_pool= kernel parameter!\n", | ||
499 | (unsigned)pool->size / 1024); | ||
500 | } | ||
501 | spin_unlock_irqrestore(&pool->lock, flags); | ||
502 | 217 | ||
503 | return ptr; | 218 | /* |
504 | } | 219 | * Allocate a virtual address in the consistent mapping region. |
505 | 220 | */ | |
506 | static bool __in_atomic_pool(void *start, size_t size) | 221 | c = arm_vmregion_alloc(&consistent_head, align, size, |
507 | { | 222 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
508 | struct dma_pool *pool = &atomic_pool; | 223 | if (c) { |
509 | void *end = start + size; | 224 | pte_t *pte; |
510 | void *pool_start = pool->vaddr; | 225 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); |
511 | void *pool_end = pool->vaddr + pool->size; | 226 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); |
512 | 227 | ||
513 | if (start < pool_start || start >= pool_end) | 228 | pte = consistent_pte[idx] + off; |
514 | return false; | 229 | c->vm_pages = page; |
515 | 230 | ||
516 | if (end <= pool_end) | 231 | do { |
517 | return true; | 232 | BUG_ON(!pte_none(*pte)); |
518 | 233 | ||
519 | WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", | 234 | set_pte_ext(pte, mk_pte(page, prot), 0); |
520 | start, end - 1, pool_start, pool_end - 1); | 235 | page++; |
521 | 236 | pte++; | |
522 | return false; | 237 | off++; |
523 | } | 238 | if (off >= PTRS_PER_PTE) { |
524 | 239 | off = 0; | |
525 | static int __free_from_pool(void *start, size_t size) | 240 | pte = consistent_pte[++idx]; |
526 | { | 241 | } |
527 | struct dma_pool *pool = &atomic_pool; | 242 | } while (size -= PAGE_SIZE); |
528 | unsigned long pageno, count; | ||
529 | unsigned long flags; | ||
530 | |||
531 | if (!__in_atomic_pool(start, size)) | ||
532 | return 0; | ||
533 | |||
534 | pageno = (start - pool->vaddr) >> PAGE_SHIFT; | ||
535 | count = size >> PAGE_SHIFT; | ||
536 | 243 | ||
537 | spin_lock_irqsave(&pool->lock, flags); | 244 | dsb(); |
538 | bitmap_clear(pool->bitmap, pageno, count); | ||
539 | spin_unlock_irqrestore(&pool->lock, flags); | ||
540 | 245 | ||
541 | return 1; | 246 | return (void *)c->vm_start; |
247 | } | ||
248 | return NULL; | ||
542 | } | 249 | } |
543 | 250 | ||
544 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 251 | static void __dma_free_remap(void *cpu_addr, size_t size) |
545 | pgprot_t prot, struct page **ret_page) | ||
546 | { | 252 | { |
547 | unsigned long order = get_order(size); | 253 | struct arm_vmregion *c; |
548 | size_t count = size >> PAGE_SHIFT; | 254 | unsigned long addr; |
549 | struct page *page; | 255 | pte_t *ptep; |
256 | int idx; | ||
257 | u32 off; | ||
258 | |||
259 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | ||
260 | if (!c) { | ||
261 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
262 | __func__, cpu_addr); | ||
263 | dump_stack(); | ||
264 | return; | ||
265 | } | ||
550 | 266 | ||
551 | page = dma_alloc_from_contiguous(dev, count, order); | 267 | if ((c->vm_end - c->vm_start) != size) { |
552 | if (!page) | 268 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", |
553 | return NULL; | 269 | __func__, c->vm_end - c->vm_start, size); |
270 | dump_stack(); | ||
271 | size = c->vm_end - c->vm_start; | ||
272 | } | ||
554 | 273 | ||
555 | __dma_clear_buffer(page, size); | 274 | idx = CONSISTENT_PTE_INDEX(c->vm_start); |
556 | __dma_remap(page, size, prot); | 275 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); |
276 | ptep = consistent_pte[idx] + off; | ||
277 | addr = c->vm_start; | ||
278 | do { | ||
279 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
280 | |||
281 | ptep++; | ||
282 | addr += PAGE_SIZE; | ||
283 | off++; | ||
284 | if (off >= PTRS_PER_PTE) { | ||
285 | off = 0; | ||
286 | ptep = consistent_pte[++idx]; | ||
287 | } | ||
557 | 288 | ||
558 | *ret_page = page; | 289 | if (pte_none(pte) || !pte_present(pte)) |
559 | return page_address(page); | 290 | printk(KERN_CRIT "%s: bad page in kernel page table\n", |
560 | } | 291 | __func__); |
292 | } while (size -= PAGE_SIZE); | ||
561 | 293 | ||
562 | static void __free_from_contiguous(struct device *dev, struct page *page, | 294 | flush_tlb_kernel_range(c->vm_start, c->vm_end); |
563 | size_t size) | ||
564 | { | ||
565 | __dma_remap(page, size, pgprot_kernel); | ||
566 | dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT); | ||
567 | } | ||
568 | 295 | ||
569 | static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) | 296 | arm_vmregion_free(&consistent_head, c); |
570 | { | ||
571 | prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ? | ||
572 | pgprot_writecombine(prot) : | ||
573 | pgprot_dmacoherent(prot); | ||
574 | return prot; | ||
575 | } | 297 | } |
576 | 298 | ||
577 | #define nommu() 0 | ||
578 | |||
579 | #else /* !CONFIG_MMU */ | 299 | #else /* !CONFIG_MMU */ |
580 | 300 | ||
581 | #define nommu() 1 | 301 | #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) |
582 | 302 | #define __dma_free_remap(addr, size) do { } while (0) | |
583 | #define __get_dma_pgprot(attrs, prot) __pgprot(0) | ||
584 | #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL | ||
585 | #define __alloc_from_pool(size, ret_page) NULL | ||
586 | #define __alloc_from_contiguous(dev, size, prot, ret) NULL | ||
587 | #define __free_from_pool(cpu_addr, size) 0 | ||
588 | #define __free_from_contiguous(dev, page, size) do { } while (0) | ||
589 | #define __dma_free_remap(cpu_addr, size) do { } while (0) | ||
590 | 303 | ||
591 | #endif /* CONFIG_MMU */ | 304 | #endif /* CONFIG_MMU */ |
592 | 305 | ||
593 | static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, | 306 | static void * |
594 | struct page **ret_page) | 307 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
308 | pgprot_t prot) | ||
595 | { | 309 | { |
596 | struct page *page; | 310 | struct page *page; |
597 | page = __dma_alloc_buffer(dev, size, gfp); | ||
598 | if (!page) | ||
599 | return NULL; | ||
600 | |||
601 | *ret_page = page; | ||
602 | return page_address(page); | ||
603 | } | ||
604 | |||
605 | |||
606 | |||
607 | static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | ||
608 | gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller) | ||
609 | { | ||
610 | u64 mask = get_coherent_dma_mask(dev); | ||
611 | struct page *page = NULL; | ||
612 | void *addr; | 311 | void *addr; |
613 | 312 | ||
614 | #ifdef CONFIG_DMA_API_DEBUG | 313 | /* Following is a work-around (a.k.a. hack) to prevent pages |
615 | u64 limit = (mask + 1) & ~mask; | ||
616 | if (limit && size >= limit) { | ||
617 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", | ||
618 | size, mask); | ||
619 | return NULL; | ||
620 | } | ||
621 | #endif | ||
622 | |||
623 | if (!mask) | ||
624 | return NULL; | ||
625 | |||
626 | if (mask < 0xffffffffULL) | ||
627 | gfp |= GFP_DMA; | ||
628 | |||
629 | /* | ||
630 | * Following is a work-around (a.k.a. hack) to prevent pages | ||
631 | * with __GFP_COMP being passed to split_page() which cannot | 314 | * with __GFP_COMP being passed to split_page() which cannot |
632 | * handle them. The real problem is that this flag probably | 315 | * handle them. The real problem is that this flag probably |
633 | * should be 0 on ARM as it is not supported on this | 316 | * should be 0 on ARM as it is not supported on this |
634 | * platform; see CONFIG_HUGETLBFS. | 317 | * platform--see CONFIG_HUGETLB_PAGE. */ |
635 | */ | ||
636 | gfp &= ~(__GFP_COMP); | 318 | gfp &= ~(__GFP_COMP); |
637 | 319 | ||
638 | *handle = DMA_ERROR_CODE; | 320 | *handle = ~0; |
639 | size = PAGE_ALIGN(size); | 321 | size = PAGE_ALIGN(size); |
640 | 322 | ||
641 | if (is_coherent || nommu()) | 323 | page = __dma_alloc_buffer(dev, size, gfp); |
642 | addr = __alloc_simple_buffer(dev, size, gfp, &page); | 324 | if (!page) |
643 | else if (gfp & GFP_ATOMIC) | 325 | return NULL; |
644 | addr = __alloc_from_pool(size, &page); | 326 | |
645 | else if (!IS_ENABLED(CONFIG_CMA)) | 327 | if (!arch_is_coherent()) |
646 | addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); | 328 | addr = __dma_alloc_remap(page, size, gfp, prot); |
647 | else | 329 | else |
648 | addr = __alloc_from_contiguous(dev, size, prot, &page); | 330 | addr = page_address(page); |
649 | 331 | ||
650 | if (addr) | 332 | if (addr) |
651 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 333 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
334 | else | ||
335 | __dma_free_buffer(page, size); | ||
652 | 336 | ||
653 | return addr; | 337 | return addr; |
654 | } | 338 | } |
@@ -657,118 +341,136 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | |||
657 | * Allocate DMA-coherent memory space and return both the kernel remapped | 341 | * Allocate DMA-coherent memory space and return both the kernel remapped |
658 | * virtual and bus address for that space. | 342 | * virtual and bus address for that space. |
659 | */ | 343 | */ |
660 | void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, | 344 | void * |
661 | gfp_t gfp, struct dma_attrs *attrs) | 345 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
662 | { | 346 | { |
663 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | ||
664 | void *memory; | 347 | void *memory; |
665 | 348 | ||
666 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 349 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) |
667 | return memory; | 350 | return memory; |
668 | 351 | ||
669 | return __dma_alloc(dev, size, handle, gfp, prot, false, | 352 | return __dma_alloc(dev, size, handle, gfp, |
670 | __builtin_return_address(0)); | 353 | pgprot_dmacoherent(pgprot_kernel)); |
671 | } | 354 | } |
355 | EXPORT_SYMBOL(dma_alloc_coherent); | ||
672 | 356 | ||
673 | static void *arm_coherent_dma_alloc(struct device *dev, size_t size, | 357 | /* |
674 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | 358 | * Allocate a writecombining region, in much the same way as |
359 | * dma_alloc_coherent above. | ||
360 | */ | ||
361 | void * | ||
362 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | ||
675 | { | 363 | { |
676 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | 364 | return __dma_alloc(dev, size, handle, gfp, |
677 | void *memory; | 365 | pgprot_writecombine(pgprot_kernel)); |
678 | |||
679 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | ||
680 | return memory; | ||
681 | |||
682 | return __dma_alloc(dev, size, handle, gfp, prot, true, | ||
683 | __builtin_return_address(0)); | ||
684 | } | 366 | } |
367 | EXPORT_SYMBOL(dma_alloc_writecombine); | ||
685 | 368 | ||
686 | /* | 369 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, |
687 | * Create userspace mapping for the DMA-coherent memory. | 370 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
688 | */ | ||
689 | int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, | ||
690 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
691 | struct dma_attrs *attrs) | ||
692 | { | 371 | { |
693 | int ret = -ENXIO; | 372 | int ret = -ENXIO; |
694 | #ifdef CONFIG_MMU | 373 | #ifdef CONFIG_MMU |
695 | unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 374 | unsigned long user_size, kern_size; |
696 | unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | 375 | struct arm_vmregion *c; |
697 | unsigned long pfn = dma_to_pfn(dev, dma_addr); | ||
698 | unsigned long off = vma->vm_pgoff; | ||
699 | 376 | ||
700 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | 377 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
701 | 378 | ||
702 | if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) | 379 | c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); |
703 | return ret; | 380 | if (c) { |
381 | unsigned long off = vma->vm_pgoff; | ||
704 | 382 | ||
705 | if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { | 383 | kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT; |
706 | ret = remap_pfn_range(vma, vma->vm_start, | 384 | |
707 | pfn + off, | 385 | if (off < kern_size && |
708 | vma->vm_end - vma->vm_start, | 386 | user_size <= (kern_size - off)) { |
709 | vma->vm_page_prot); | 387 | ret = remap_pfn_range(vma, vma->vm_start, |
388 | page_to_pfn(c->vm_pages) + off, | ||
389 | user_size << PAGE_SHIFT, | ||
390 | vma->vm_page_prot); | ||
391 | } | ||
710 | } | 392 | } |
711 | #endif /* CONFIG_MMU */ | 393 | #endif /* CONFIG_MMU */ |
712 | 394 | ||
713 | return ret; | 395 | return ret; |
714 | } | 396 | } |
715 | 397 | ||
398 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
399 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
400 | { | ||
401 | vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); | ||
402 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
403 | } | ||
404 | EXPORT_SYMBOL(dma_mmap_coherent); | ||
405 | |||
406 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, | ||
407 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | ||
408 | { | ||
409 | vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); | ||
410 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | ||
411 | } | ||
412 | EXPORT_SYMBOL(dma_mmap_writecombine); | ||
413 | |||
716 | /* | 414 | /* |
717 | * Free a buffer as defined by the above mapping. | 415 | * free a page as defined by the above mapping. |
416 | * Must not be called with IRQs disabled. | ||
718 | */ | 417 | */ |
719 | static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 418 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
720 | dma_addr_t handle, struct dma_attrs *attrs, | ||
721 | bool is_coherent) | ||
722 | { | 419 | { |
723 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 420 | WARN_ON(irqs_disabled()); |
724 | 421 | ||
725 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 422 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
726 | return; | 423 | return; |
727 | 424 | ||
728 | size = PAGE_ALIGN(size); | 425 | size = PAGE_ALIGN(size); |
729 | 426 | ||
730 | if (is_coherent || nommu()) { | 427 | if (!arch_is_coherent()) |
731 | __dma_free_buffer(page, size); | ||
732 | } else if (__free_from_pool(cpu_addr, size)) { | ||
733 | return; | ||
734 | } else if (!IS_ENABLED(CONFIG_CMA)) { | ||
735 | __dma_free_remap(cpu_addr, size); | 428 | __dma_free_remap(cpu_addr, size); |
736 | __dma_free_buffer(page, size); | ||
737 | } else { | ||
738 | /* | ||
739 | * Non-atomic allocations cannot be freed with IRQs disabled | ||
740 | */ | ||
741 | WARN_ON(irqs_disabled()); | ||
742 | __free_from_contiguous(dev, page, size); | ||
743 | } | ||
744 | } | ||
745 | 429 | ||
746 | void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, | 430 | __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); |
747 | dma_addr_t handle, struct dma_attrs *attrs) | ||
748 | { | ||
749 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); | ||
750 | } | 431 | } |
432 | EXPORT_SYMBOL(dma_free_coherent); | ||
751 | 433 | ||
752 | static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, | 434 | /* |
753 | dma_addr_t handle, struct dma_attrs *attrs) | 435 | * Make an area consistent for devices. |
436 | * Note: Drivers should NOT use this function directly, as it will break | ||
437 | * platforms with CONFIG_DMABOUNCE. | ||
438 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
439 | */ | ||
440 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, | ||
441 | enum dma_data_direction dir) | ||
754 | { | 442 | { |
755 | __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); | 443 | unsigned long paddr; |
444 | |||
445 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | ||
446 | |||
447 | dmac_map_area(kaddr, size, dir); | ||
448 | |||
449 | paddr = __pa(kaddr); | ||
450 | if (dir == DMA_FROM_DEVICE) { | ||
451 | outer_inv_range(paddr, paddr + size); | ||
452 | } else { | ||
453 | outer_clean_range(paddr, paddr + size); | ||
454 | } | ||
455 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
756 | } | 456 | } |
457 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); | ||
757 | 458 | ||
758 | int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, | 459 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, |
759 | void *cpu_addr, dma_addr_t handle, size_t size, | 460 | enum dma_data_direction dir) |
760 | struct dma_attrs *attrs) | ||
761 | { | 461 | { |
762 | struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); | 462 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); |
763 | int ret; | ||
764 | 463 | ||
765 | ret = sg_alloc_table(sgt, 1, GFP_KERNEL); | 464 | /* FIXME: non-speculating: not required */ |
766 | if (unlikely(ret)) | 465 | /* don't bother invalidating if DMA to device */ |
767 | return ret; | 466 | if (dir != DMA_TO_DEVICE) { |
467 | unsigned long paddr = __pa(kaddr); | ||
468 | outer_inv_range(paddr, paddr + size); | ||
469 | } | ||
768 | 470 | ||
769 | sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); | 471 | dmac_unmap_area(kaddr, size, dir); |
770 | return 0; | ||
771 | } | 472 | } |
473 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | ||
772 | 474 | ||
773 | static void dma_cache_maint_page(struct page *page, unsigned long offset, | 475 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
774 | size_t size, enum dma_data_direction dir, | 476 | size_t size, enum dma_data_direction dir, |
@@ -814,13 +516,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
814 | } while (left); | 516 | } while (left); |
815 | } | 517 | } |
816 | 518 | ||
817 | /* | 519 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, |
818 | * Make an area consistent for devices. | ||
819 | * Note: Drivers should NOT use this function directly, as it will break | ||
820 | * platforms with CONFIG_DMABOUNCE. | ||
821 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | ||
822 | */ | ||
823 | static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
824 | size_t size, enum dma_data_direction dir) | 520 | size_t size, enum dma_data_direction dir) |
825 | { | 521 | { |
826 | unsigned long paddr; | 522 | unsigned long paddr; |
@@ -835,8 +531,9 @@ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, | |||
835 | } | 531 | } |
836 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | 532 | /* FIXME: non-speculating: flush on bidirectional mappings? */ |
837 | } | 533 | } |
534 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
838 | 535 | ||
839 | static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | 536 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, |
840 | size_t size, enum dma_data_direction dir) | 537 | size_t size, enum dma_data_direction dir) |
841 | { | 538 | { |
842 | unsigned long paddr = page_to_phys(page) + off; | 539 | unsigned long paddr = page_to_phys(page) + off; |
@@ -854,9 +551,10 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
854 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 551 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) |
855 | set_bit(PG_dcache_clean, &page->flags); | 552 | set_bit(PG_dcache_clean, &page->flags); |
856 | } | 553 | } |
554 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
857 | 555 | ||
858 | /** | 556 | /** |
859 | * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA | 557 | * dma_map_sg - map a set of SG buffers for streaming mode DMA |
860 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 558 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
861 | * @sg: list of buffers | 559 | * @sg: list of buffers |
862 | * @nents: number of buffers to map | 560 | * @nents: number of buffers to map |
@@ -871,32 +569,32 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
871 | * Device ownership issues as mentioned for dma_map_single are the same | 569 | * Device ownership issues as mentioned for dma_map_single are the same |
872 | * here. | 570 | * here. |
873 | */ | 571 | */ |
874 | int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | 572 | int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
875 | enum dma_data_direction dir, struct dma_attrs *attrs) | 573 | enum dma_data_direction dir) |
876 | { | 574 | { |
877 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
878 | struct scatterlist *s; | 575 | struct scatterlist *s; |
879 | int i, j; | 576 | int i, j; |
880 | 577 | ||
578 | BUG_ON(!valid_dma_direction(dir)); | ||
579 | |||
881 | for_each_sg(sg, s, nents, i) { | 580 | for_each_sg(sg, s, nents, i) { |
882 | #ifdef CONFIG_NEED_SG_DMA_LENGTH | 581 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
883 | s->dma_length = s->length; | 582 | s->length, dir); |
884 | #endif | ||
885 | s->dma_address = ops->map_page(dev, sg_page(s), s->offset, | ||
886 | s->length, dir, attrs); | ||
887 | if (dma_mapping_error(dev, s->dma_address)) | 583 | if (dma_mapping_error(dev, s->dma_address)) |
888 | goto bad_mapping; | 584 | goto bad_mapping; |
889 | } | 585 | } |
586 | debug_dma_map_sg(dev, sg, nents, nents, dir); | ||
890 | return nents; | 587 | return nents; |
891 | 588 | ||
892 | bad_mapping: | 589 | bad_mapping: |
893 | for_each_sg(sg, s, i, j) | 590 | for_each_sg(sg, s, i, j) |
894 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); | 591 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
895 | return 0; | 592 | return 0; |
896 | } | 593 | } |
594 | EXPORT_SYMBOL(dma_map_sg); | ||
897 | 595 | ||
898 | /** | 596 | /** |
899 | * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 597 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
900 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 598 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
901 | * @sg: list of buffers | 599 | * @sg: list of buffers |
902 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | 600 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) |
@@ -905,55 +603,70 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
905 | * Unmap a set of streaming mode DMA translations. Again, CPU access | 603 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
906 | * rules concerning calls here are the same as for dma_unmap_single(). | 604 | * rules concerning calls here are the same as for dma_unmap_single(). |
907 | */ | 605 | */ |
908 | void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | 606 | void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
909 | enum dma_data_direction dir, struct dma_attrs *attrs) | 607 | enum dma_data_direction dir) |
910 | { | 608 | { |
911 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
912 | struct scatterlist *s; | 609 | struct scatterlist *s; |
913 | |||
914 | int i; | 610 | int i; |
915 | 611 | ||
612 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
613 | |||
916 | for_each_sg(sg, s, nents, i) | 614 | for_each_sg(sg, s, nents, i) |
917 | ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); | 615 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
918 | } | 616 | } |
617 | EXPORT_SYMBOL(dma_unmap_sg); | ||
919 | 618 | ||
920 | /** | 619 | /** |
921 | * arm_dma_sync_sg_for_cpu | 620 | * dma_sync_sg_for_cpu |
922 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 621 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
923 | * @sg: list of buffers | 622 | * @sg: list of buffers |
924 | * @nents: number of buffers to map (returned from dma_map_sg) | 623 | * @nents: number of buffers to map (returned from dma_map_sg) |
925 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 624 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
926 | */ | 625 | */ |
927 | void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 626 | void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
928 | int nents, enum dma_data_direction dir) | 627 | int nents, enum dma_data_direction dir) |
929 | { | 628 | { |
930 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
931 | struct scatterlist *s; | 629 | struct scatterlist *s; |
932 | int i; | 630 | int i; |
933 | 631 | ||
934 | for_each_sg(sg, s, nents, i) | 632 | for_each_sg(sg, s, nents, i) { |
935 | ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, | 633 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, |
936 | dir); | 634 | sg_dma_len(s), dir)) |
635 | continue; | ||
636 | |||
637 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
638 | s->length, dir); | ||
639 | } | ||
640 | |||
641 | debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); | ||
937 | } | 642 | } |
643 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | ||
938 | 644 | ||
939 | /** | 645 | /** |
940 | * arm_dma_sync_sg_for_device | 646 | * dma_sync_sg_for_device |
941 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 647 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
942 | * @sg: list of buffers | 648 | * @sg: list of buffers |
943 | * @nents: number of buffers to map (returned from dma_map_sg) | 649 | * @nents: number of buffers to map (returned from dma_map_sg) |
944 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 650 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
945 | */ | 651 | */ |
946 | void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 652 | void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
947 | int nents, enum dma_data_direction dir) | 653 | int nents, enum dma_data_direction dir) |
948 | { | 654 | { |
949 | struct dma_map_ops *ops = get_dma_ops(dev); | ||
950 | struct scatterlist *s; | 655 | struct scatterlist *s; |
951 | int i; | 656 | int i; |
952 | 657 | ||
953 | for_each_sg(sg, s, nents, i) | 658 | for_each_sg(sg, s, nents, i) { |
954 | ops->sync_single_for_device(dev, sg_dma_address(s), s->length, | 659 | if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, |
955 | dir); | 660 | sg_dma_len(s), dir)) |
661 | continue; | ||
662 | |||
663 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | ||
664 | s->length, dir); | ||
665 | } | ||
666 | |||
667 | debug_dma_sync_sg_for_device(dev, sg, nents, dir); | ||
956 | } | 668 | } |
669 | EXPORT_SYMBOL(dma_sync_sg_for_device); | ||
957 | 670 | ||
958 | /* | 671 | /* |
959 | * Return whether the given device DMA address mask can be supported | 672 | * Return whether the given device DMA address mask can be supported |
@@ -969,15 +682,18 @@ int dma_supported(struct device *dev, u64 mask) | |||
969 | } | 682 | } |
970 | EXPORT_SYMBOL(dma_supported); | 683 | EXPORT_SYMBOL(dma_supported); |
971 | 684 | ||
972 | int arm_dma_set_mask(struct device *dev, u64 dma_mask) | 685 | int dma_set_mask(struct device *dev, u64 dma_mask) |
973 | { | 686 | { |
974 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | 687 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
975 | return -EIO; | 688 | return -EIO; |
976 | 689 | ||
690 | #ifndef CONFIG_DMABOUNCE | ||
977 | *dev->dma_mask = dma_mask; | 691 | *dev->dma_mask = dma_mask; |
692 | #endif | ||
978 | 693 | ||
979 | return 0; | 694 | return 0; |
980 | } | 695 | } |
696 | EXPORT_SYMBOL(dma_set_mask); | ||
981 | 697 | ||
982 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 698 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
983 | 699 | ||
@@ -987,859 +703,3 @@ static int __init dma_debug_do_init(void) | |||
987 | return 0; | 703 | return 0; |
988 | } | 704 | } |
989 | fs_initcall(dma_debug_do_init); | 705 | fs_initcall(dma_debug_do_init); |
990 | |||
991 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | ||
992 | |||
993 | /* IOMMU */ | ||
994 | |||
995 | static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, | ||
996 | size_t size) | ||
997 | { | ||
998 | unsigned int order = get_order(size); | ||
999 | unsigned int align = 0; | ||
1000 | unsigned int count, start; | ||
1001 | unsigned long flags; | ||
1002 | |||
1003 | count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + | ||
1004 | (1 << mapping->order) - 1) >> mapping->order; | ||
1005 | |||
1006 | if (order > mapping->order) | ||
1007 | align = (1 << (order - mapping->order)) - 1; | ||
1008 | |||
1009 | spin_lock_irqsave(&mapping->lock, flags); | ||
1010 | start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, | ||
1011 | count, align); | ||
1012 | if (start > mapping->bits) { | ||
1013 | spin_unlock_irqrestore(&mapping->lock, flags); | ||
1014 | return DMA_ERROR_CODE; | ||
1015 | } | ||
1016 | |||
1017 | bitmap_set(mapping->bitmap, start, count); | ||
1018 | spin_unlock_irqrestore(&mapping->lock, flags); | ||
1019 | |||
1020 | return mapping->base + (start << (mapping->order + PAGE_SHIFT)); | ||
1021 | } | ||
1022 | |||
1023 | static inline void __free_iova(struct dma_iommu_mapping *mapping, | ||
1024 | dma_addr_t addr, size_t size) | ||
1025 | { | ||
1026 | unsigned int start = (addr - mapping->base) >> | ||
1027 | (mapping->order + PAGE_SHIFT); | ||
1028 | unsigned int count = ((size >> PAGE_SHIFT) + | ||
1029 | (1 << mapping->order) - 1) >> mapping->order; | ||
1030 | unsigned long flags; | ||
1031 | |||
1032 | spin_lock_irqsave(&mapping->lock, flags); | ||
1033 | bitmap_clear(mapping->bitmap, start, count); | ||
1034 | spin_unlock_irqrestore(&mapping->lock, flags); | ||
1035 | } | ||
1036 | |||
1037 | static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, | ||
1038 | gfp_t gfp, struct dma_attrs *attrs) | ||
1039 | { | ||
1040 | struct page **pages; | ||
1041 | int count = size >> PAGE_SHIFT; | ||
1042 | int array_size = count * sizeof(struct page *); | ||
1043 | int i = 0; | ||
1044 | |||
1045 | if (array_size <= PAGE_SIZE) | ||
1046 | pages = kzalloc(array_size, gfp); | ||
1047 | else | ||
1048 | pages = vzalloc(array_size); | ||
1049 | if (!pages) | ||
1050 | return NULL; | ||
1051 | |||
1052 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) | ||
1053 | { | ||
1054 | unsigned long order = get_order(size); | ||
1055 | struct page *page; | ||
1056 | |||
1057 | page = dma_alloc_from_contiguous(dev, count, order); | ||
1058 | if (!page) | ||
1059 | goto error; | ||
1060 | |||
1061 | __dma_clear_buffer(page, size); | ||
1062 | |||
1063 | for (i = 0; i < count; i++) | ||
1064 | pages[i] = page + i; | ||
1065 | |||
1066 | return pages; | ||
1067 | } | ||
1068 | |||
1069 | while (count) { | ||
1070 | int j, order = __fls(count); | ||
1071 | |||
1072 | pages[i] = alloc_pages(gfp | __GFP_NOWARN, order); | ||
1073 | while (!pages[i] && order) | ||
1074 | pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order); | ||
1075 | if (!pages[i]) | ||
1076 | goto error; | ||
1077 | |||
1078 | if (order) { | ||
1079 | split_page(pages[i], order); | ||
1080 | j = 1 << order; | ||
1081 | while (--j) | ||
1082 | pages[i + j] = pages[i] + j; | ||
1083 | } | ||
1084 | |||
1085 | __dma_clear_buffer(pages[i], PAGE_SIZE << order); | ||
1086 | i += 1 << order; | ||
1087 | count -= 1 << order; | ||
1088 | } | ||
1089 | |||
1090 | return pages; | ||
1091 | error: | ||
1092 | while (i--) | ||
1093 | if (pages[i]) | ||
1094 | __free_pages(pages[i], 0); | ||
1095 | if (array_size <= PAGE_SIZE) | ||
1096 | kfree(pages); | ||
1097 | else | ||
1098 | vfree(pages); | ||
1099 | return NULL; | ||
1100 | } | ||
1101 | |||
1102 | static int __iommu_free_buffer(struct device *dev, struct page **pages, | ||
1103 | size_t size, struct dma_attrs *attrs) | ||
1104 | { | ||
1105 | int count = size >> PAGE_SHIFT; | ||
1106 | int array_size = count * sizeof(struct page *); | ||
1107 | int i; | ||
1108 | |||
1109 | if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { | ||
1110 | dma_release_from_contiguous(dev, pages[0], count); | ||
1111 | } else { | ||
1112 | for (i = 0; i < count; i++) | ||
1113 | if (pages[i]) | ||
1114 | __free_pages(pages[i], 0); | ||
1115 | } | ||
1116 | |||
1117 | if (array_size <= PAGE_SIZE) | ||
1118 | kfree(pages); | ||
1119 | else | ||
1120 | vfree(pages); | ||
1121 | return 0; | ||
1122 | } | ||
1123 | |||
1124 | /* | ||
1125 | * Create a CPU mapping for a specified pages | ||
1126 | */ | ||
1127 | static void * | ||
1128 | __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, | ||
1129 | const void *caller) | ||
1130 | { | ||
1131 | unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1132 | struct vm_struct *area; | ||
1133 | unsigned long p; | ||
1134 | |||
1135 | area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, | ||
1136 | caller); | ||
1137 | if (!area) | ||
1138 | return NULL; | ||
1139 | |||
1140 | area->pages = pages; | ||
1141 | area->nr_pages = nr_pages; | ||
1142 | p = (unsigned long)area->addr; | ||
1143 | |||
1144 | for (i = 0; i < nr_pages; i++) { | ||
1145 | phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); | ||
1146 | if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) | ||
1147 | goto err; | ||
1148 | p += PAGE_SIZE; | ||
1149 | } | ||
1150 | return area->addr; | ||
1151 | err: | ||
1152 | unmap_kernel_range((unsigned long)area->addr, size); | ||
1153 | vunmap(area->addr); | ||
1154 | return NULL; | ||
1155 | } | ||
1156 | |||
1157 | /* | ||
1158 | * Create a mapping in device IO address space for specified pages | ||
1159 | */ | ||
1160 | static dma_addr_t | ||
1161 | __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) | ||
1162 | { | ||
1163 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1164 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1165 | dma_addr_t dma_addr, iova; | ||
1166 | int i, ret = DMA_ERROR_CODE; | ||
1167 | |||
1168 | dma_addr = __alloc_iova(mapping, size); | ||
1169 | if (dma_addr == DMA_ERROR_CODE) | ||
1170 | return dma_addr; | ||
1171 | |||
1172 | iova = dma_addr; | ||
1173 | for (i = 0; i < count; ) { | ||
1174 | unsigned int next_pfn = page_to_pfn(pages[i]) + 1; | ||
1175 | phys_addr_t phys = page_to_phys(pages[i]); | ||
1176 | unsigned int len, j; | ||
1177 | |||
1178 | for (j = i + 1; j < count; j++, next_pfn++) | ||
1179 | if (page_to_pfn(pages[j]) != next_pfn) | ||
1180 | break; | ||
1181 | |||
1182 | len = (j - i) << PAGE_SHIFT; | ||
1183 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | ||
1184 | if (ret < 0) | ||
1185 | goto fail; | ||
1186 | iova += len; | ||
1187 | i = j; | ||
1188 | } | ||
1189 | return dma_addr; | ||
1190 | fail: | ||
1191 | iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); | ||
1192 | __free_iova(mapping, dma_addr, size); | ||
1193 | return DMA_ERROR_CODE; | ||
1194 | } | ||
1195 | |||
1196 | static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) | ||
1197 | { | ||
1198 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1199 | |||
1200 | /* | ||
1201 | * add optional in-page offset from iova to size and align | ||
1202 | * result to page size | ||
1203 | */ | ||
1204 | size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); | ||
1205 | iova &= PAGE_MASK; | ||
1206 | |||
1207 | iommu_unmap(mapping->domain, iova, size); | ||
1208 | __free_iova(mapping, iova, size); | ||
1209 | return 0; | ||
1210 | } | ||
1211 | |||
1212 | static struct page **__atomic_get_pages(void *addr) | ||
1213 | { | ||
1214 | struct dma_pool *pool = &atomic_pool; | ||
1215 | struct page **pages = pool->pages; | ||
1216 | int offs = (addr - pool->vaddr) >> PAGE_SHIFT; | ||
1217 | |||
1218 | return pages + offs; | ||
1219 | } | ||
1220 | |||
1221 | static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) | ||
1222 | { | ||
1223 | struct vm_struct *area; | ||
1224 | |||
1225 | if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) | ||
1226 | return __atomic_get_pages(cpu_addr); | ||
1227 | |||
1228 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | ||
1229 | return cpu_addr; | ||
1230 | |||
1231 | area = find_vm_area(cpu_addr); | ||
1232 | if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) | ||
1233 | return area->pages; | ||
1234 | return NULL; | ||
1235 | } | ||
1236 | |||
1237 | static void *__iommu_alloc_atomic(struct device *dev, size_t size, | ||
1238 | dma_addr_t *handle) | ||
1239 | { | ||
1240 | struct page *page; | ||
1241 | void *addr; | ||
1242 | |||
1243 | addr = __alloc_from_pool(size, &page); | ||
1244 | if (!addr) | ||
1245 | return NULL; | ||
1246 | |||
1247 | *handle = __iommu_create_mapping(dev, &page, size); | ||
1248 | if (*handle == DMA_ERROR_CODE) | ||
1249 | goto err_mapping; | ||
1250 | |||
1251 | return addr; | ||
1252 | |||
1253 | err_mapping: | ||
1254 | __free_from_pool(addr, size); | ||
1255 | return NULL; | ||
1256 | } | ||
1257 | |||
1258 | static void __iommu_free_atomic(struct device *dev, struct page **pages, | ||
1259 | dma_addr_t handle, size_t size) | ||
1260 | { | ||
1261 | __iommu_remove_mapping(dev, handle, size); | ||
1262 | __free_from_pool(page_address(pages[0]), size); | ||
1263 | } | ||
1264 | |||
1265 | static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | ||
1266 | dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) | ||
1267 | { | ||
1268 | pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel); | ||
1269 | struct page **pages; | ||
1270 | void *addr = NULL; | ||
1271 | |||
1272 | *handle = DMA_ERROR_CODE; | ||
1273 | size = PAGE_ALIGN(size); | ||
1274 | |||
1275 | if (gfp & GFP_ATOMIC) | ||
1276 | return __iommu_alloc_atomic(dev, size, handle); | ||
1277 | |||
1278 | pages = __iommu_alloc_buffer(dev, size, gfp, attrs); | ||
1279 | if (!pages) | ||
1280 | return NULL; | ||
1281 | |||
1282 | *handle = __iommu_create_mapping(dev, pages, size); | ||
1283 | if (*handle == DMA_ERROR_CODE) | ||
1284 | goto err_buffer; | ||
1285 | |||
1286 | if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) | ||
1287 | return pages; | ||
1288 | |||
1289 | addr = __iommu_alloc_remap(pages, size, gfp, prot, | ||
1290 | __builtin_return_address(0)); | ||
1291 | if (!addr) | ||
1292 | goto err_mapping; | ||
1293 | |||
1294 | return addr; | ||
1295 | |||
1296 | err_mapping: | ||
1297 | __iommu_remove_mapping(dev, *handle, size); | ||
1298 | err_buffer: | ||
1299 | __iommu_free_buffer(dev, pages, size, attrs); | ||
1300 | return NULL; | ||
1301 | } | ||
1302 | |||
1303 | static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | ||
1304 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | ||
1305 | struct dma_attrs *attrs) | ||
1306 | { | ||
1307 | unsigned long uaddr = vma->vm_start; | ||
1308 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
1309 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | ||
1310 | |||
1311 | vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); | ||
1312 | |||
1313 | if (!pages) | ||
1314 | return -ENXIO; | ||
1315 | |||
1316 | do { | ||
1317 | int ret = vm_insert_page(vma, uaddr, *pages++); | ||
1318 | if (ret) { | ||
1319 | pr_err("Remapping memory failed: %d\n", ret); | ||
1320 | return ret; | ||
1321 | } | ||
1322 | uaddr += PAGE_SIZE; | ||
1323 | usize -= PAGE_SIZE; | ||
1324 | } while (usize > 0); | ||
1325 | |||
1326 | return 0; | ||
1327 | } | ||
1328 | |||
1329 | /* | ||
1330 | * free a page as defined by the above mapping. | ||
1331 | * Must not be called with IRQs disabled. | ||
1332 | */ | ||
1333 | void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | ||
1334 | dma_addr_t handle, struct dma_attrs *attrs) | ||
1335 | { | ||
1336 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | ||
1337 | size = PAGE_ALIGN(size); | ||
1338 | |||
1339 | if (!pages) { | ||
1340 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | ||
1341 | return; | ||
1342 | } | ||
1343 | |||
1344 | if (__in_atomic_pool(cpu_addr, size)) { | ||
1345 | __iommu_free_atomic(dev, pages, handle, size); | ||
1346 | return; | ||
1347 | } | ||
1348 | |||
1349 | if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { | ||
1350 | unmap_kernel_range((unsigned long)cpu_addr, size); | ||
1351 | vunmap(cpu_addr); | ||
1352 | } | ||
1353 | |||
1354 | __iommu_remove_mapping(dev, handle, size); | ||
1355 | __iommu_free_buffer(dev, pages, size, attrs); | ||
1356 | } | ||
1357 | |||
1358 | static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
1359 | void *cpu_addr, dma_addr_t dma_addr, | ||
1360 | size_t size, struct dma_attrs *attrs) | ||
1361 | { | ||
1362 | unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
1363 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | ||
1364 | |||
1365 | if (!pages) | ||
1366 | return -ENXIO; | ||
1367 | |||
1368 | return sg_alloc_table_from_pages(sgt, pages, count, 0, size, | ||
1369 | GFP_KERNEL); | ||
1370 | } | ||
1371 | |||
1372 | /* | ||
1373 | * Map a part of the scatter-gather list into contiguous io address space | ||
1374 | */ | ||
1375 | static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, | ||
1376 | size_t size, dma_addr_t *handle, | ||
1377 | enum dma_data_direction dir, struct dma_attrs *attrs, | ||
1378 | bool is_coherent) | ||
1379 | { | ||
1380 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1381 | dma_addr_t iova, iova_base; | ||
1382 | int ret = 0; | ||
1383 | unsigned int count; | ||
1384 | struct scatterlist *s; | ||
1385 | |||
1386 | size = PAGE_ALIGN(size); | ||
1387 | *handle = DMA_ERROR_CODE; | ||
1388 | |||
1389 | iova_base = iova = __alloc_iova(mapping, size); | ||
1390 | if (iova == DMA_ERROR_CODE) | ||
1391 | return -ENOMEM; | ||
1392 | |||
1393 | for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { | ||
1394 | phys_addr_t phys = page_to_phys(sg_page(s)); | ||
1395 | unsigned int len = PAGE_ALIGN(s->offset + s->length); | ||
1396 | |||
1397 | if (!is_coherent && | ||
1398 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1399 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | ||
1400 | |||
1401 | ret = iommu_map(mapping->domain, iova, phys, len, 0); | ||
1402 | if (ret < 0) | ||
1403 | goto fail; | ||
1404 | count += len >> PAGE_SHIFT; | ||
1405 | iova += len; | ||
1406 | } | ||
1407 | *handle = iova_base; | ||
1408 | |||
1409 | return 0; | ||
1410 | fail: | ||
1411 | iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); | ||
1412 | __free_iova(mapping, iova_base, size); | ||
1413 | return ret; | ||
1414 | } | ||
1415 | |||
1416 | static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
1417 | enum dma_data_direction dir, struct dma_attrs *attrs, | ||
1418 | bool is_coherent) | ||
1419 | { | ||
1420 | struct scatterlist *s = sg, *dma = sg, *start = sg; | ||
1421 | int i, count = 0; | ||
1422 | unsigned int offset = s->offset; | ||
1423 | unsigned int size = s->offset + s->length; | ||
1424 | unsigned int max = dma_get_max_seg_size(dev); | ||
1425 | |||
1426 | for (i = 1; i < nents; i++) { | ||
1427 | s = sg_next(s); | ||
1428 | |||
1429 | s->dma_address = DMA_ERROR_CODE; | ||
1430 | s->dma_length = 0; | ||
1431 | |||
1432 | if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { | ||
1433 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, | ||
1434 | dir, attrs, is_coherent) < 0) | ||
1435 | goto bad_mapping; | ||
1436 | |||
1437 | dma->dma_address += offset; | ||
1438 | dma->dma_length = size - offset; | ||
1439 | |||
1440 | size = offset = s->offset; | ||
1441 | start = s; | ||
1442 | dma = sg_next(dma); | ||
1443 | count += 1; | ||
1444 | } | ||
1445 | size += s->length; | ||
1446 | } | ||
1447 | if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, | ||
1448 | is_coherent) < 0) | ||
1449 | goto bad_mapping; | ||
1450 | |||
1451 | dma->dma_address += offset; | ||
1452 | dma->dma_length = size - offset; | ||
1453 | |||
1454 | return count+1; | ||
1455 | |||
1456 | bad_mapping: | ||
1457 | for_each_sg(sg, s, count, i) | ||
1458 | __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); | ||
1459 | return 0; | ||
1460 | } | ||
1461 | |||
1462 | /** | ||
1463 | * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA | ||
1464 | * @dev: valid struct device pointer | ||
1465 | * @sg: list of buffers | ||
1466 | * @nents: number of buffers to map | ||
1467 | * @dir: DMA transfer direction | ||
1468 | * | ||
1469 | * Map a set of i/o coherent buffers described by scatterlist in streaming | ||
1470 | * mode for DMA. The scatter gather list elements are merged together (if | ||
1471 | * possible) and tagged with the appropriate dma address and length. They are | ||
1472 | * obtained via sg_dma_{address,length}. | ||
1473 | */ | ||
1474 | int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, | ||
1475 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1476 | { | ||
1477 | return __iommu_map_sg(dev, sg, nents, dir, attrs, true); | ||
1478 | } | ||
1479 | |||
1480 | /** | ||
1481 | * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA | ||
1482 | * @dev: valid struct device pointer | ||
1483 | * @sg: list of buffers | ||
1484 | * @nents: number of buffers to map | ||
1485 | * @dir: DMA transfer direction | ||
1486 | * | ||
1487 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | ||
1488 | * The scatter gather list elements are merged together (if possible) and | ||
1489 | * tagged with the appropriate dma address and length. They are obtained via | ||
1490 | * sg_dma_{address,length}. | ||
1491 | */ | ||
1492 | int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, | ||
1493 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1494 | { | ||
1495 | return __iommu_map_sg(dev, sg, nents, dir, attrs, false); | ||
1496 | } | ||
1497 | |||
1498 | static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
1499 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs, | ||
1500 | bool is_coherent) | ||
1501 | { | ||
1502 | struct scatterlist *s; | ||
1503 | int i; | ||
1504 | |||
1505 | for_each_sg(sg, s, nents, i) { | ||
1506 | if (sg_dma_len(s)) | ||
1507 | __iommu_remove_mapping(dev, sg_dma_address(s), | ||
1508 | sg_dma_len(s)); | ||
1509 | if (!is_coherent && | ||
1510 | !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1511 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
1512 | s->length, dir); | ||
1513 | } | ||
1514 | } | ||
1515 | |||
1516 | /** | ||
1517 | * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
1518 | * @dev: valid struct device pointer | ||
1519 | * @sg: list of buffers | ||
1520 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | ||
1521 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1522 | * | ||
1523 | * Unmap a set of streaming mode DMA translations. Again, CPU access | ||
1524 | * rules concerning calls here are the same as for dma_unmap_single(). | ||
1525 | */ | ||
1526 | void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, | ||
1527 | int nents, enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1528 | { | ||
1529 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); | ||
1530 | } | ||
1531 | |||
1532 | /** | ||
1533 | * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | ||
1534 | * @dev: valid struct device pointer | ||
1535 | * @sg: list of buffers | ||
1536 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) | ||
1537 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1538 | * | ||
1539 | * Unmap a set of streaming mode DMA translations. Again, CPU access | ||
1540 | * rules concerning calls here are the same as for dma_unmap_single(). | ||
1541 | */ | ||
1542 | void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
1543 | enum dma_data_direction dir, struct dma_attrs *attrs) | ||
1544 | { | ||
1545 | __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); | ||
1546 | } | ||
1547 | |||
1548 | /** | ||
1549 | * arm_iommu_sync_sg_for_cpu | ||
1550 | * @dev: valid struct device pointer | ||
1551 | * @sg: list of buffers | ||
1552 | * @nents: number of buffers to map (returned from dma_map_sg) | ||
1553 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1554 | */ | ||
1555 | void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
1556 | int nents, enum dma_data_direction dir) | ||
1557 | { | ||
1558 | struct scatterlist *s; | ||
1559 | int i; | ||
1560 | |||
1561 | for_each_sg(sg, s, nents, i) | ||
1562 | __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); | ||
1563 | |||
1564 | } | ||
1565 | |||
1566 | /** | ||
1567 | * arm_iommu_sync_sg_for_device | ||
1568 | * @dev: valid struct device pointer | ||
1569 | * @sg: list of buffers | ||
1570 | * @nents: number of buffers to map (returned from dma_map_sg) | ||
1571 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | ||
1572 | */ | ||
1573 | void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
1574 | int nents, enum dma_data_direction dir) | ||
1575 | { | ||
1576 | struct scatterlist *s; | ||
1577 | int i; | ||
1578 | |||
1579 | for_each_sg(sg, s, nents, i) | ||
1580 | __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); | ||
1581 | } | ||
1582 | |||
1583 | |||
1584 | /** | ||
1585 | * arm_coherent_iommu_map_page | ||
1586 | * @dev: valid struct device pointer | ||
1587 | * @page: page that buffer resides in | ||
1588 | * @offset: offset into page for start of buffer | ||
1589 | * @size: size of buffer to map | ||
1590 | * @dir: DMA transfer direction | ||
1591 | * | ||
1592 | * Coherent IOMMU aware version of arm_dma_map_page() | ||
1593 | */ | ||
1594 | static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, | ||
1595 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
1596 | struct dma_attrs *attrs) | ||
1597 | { | ||
1598 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1599 | dma_addr_t dma_addr; | ||
1600 | int ret, len = PAGE_ALIGN(size + offset); | ||
1601 | |||
1602 | dma_addr = __alloc_iova(mapping, len); | ||
1603 | if (dma_addr == DMA_ERROR_CODE) | ||
1604 | return dma_addr; | ||
1605 | |||
1606 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); | ||
1607 | if (ret < 0) | ||
1608 | goto fail; | ||
1609 | |||
1610 | return dma_addr + offset; | ||
1611 | fail: | ||
1612 | __free_iova(mapping, dma_addr, len); | ||
1613 | return DMA_ERROR_CODE; | ||
1614 | } | ||
1615 | |||
1616 | /** | ||
1617 | * arm_iommu_map_page | ||
1618 | * @dev: valid struct device pointer | ||
1619 | * @page: page that buffer resides in | ||
1620 | * @offset: offset into page for start of buffer | ||
1621 | * @size: size of buffer to map | ||
1622 | * @dir: DMA transfer direction | ||
1623 | * | ||
1624 | * IOMMU aware version of arm_dma_map_page() | ||
1625 | */ | ||
1626 | static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, | ||
1627 | unsigned long offset, size_t size, enum dma_data_direction dir, | ||
1628 | struct dma_attrs *attrs) | ||
1629 | { | ||
1630 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1631 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
1632 | |||
1633 | return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); | ||
1634 | } | ||
1635 | |||
1636 | /** | ||
1637 | * arm_coherent_iommu_unmap_page | ||
1638 | * @dev: valid struct device pointer | ||
1639 | * @handle: DMA address of buffer | ||
1640 | * @size: size of buffer (same as passed to dma_map_page) | ||
1641 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
1642 | * | ||
1643 | * Coherent IOMMU aware version of arm_dma_unmap_page() | ||
1644 | */ | ||
1645 | static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, | ||
1646 | size_t size, enum dma_data_direction dir, | ||
1647 | struct dma_attrs *attrs) | ||
1648 | { | ||
1649 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1650 | dma_addr_t iova = handle & PAGE_MASK; | ||
1651 | int offset = handle & ~PAGE_MASK; | ||
1652 | int len = PAGE_ALIGN(size + offset); | ||
1653 | |||
1654 | if (!iova) | ||
1655 | return; | ||
1656 | |||
1657 | iommu_unmap(mapping->domain, iova, len); | ||
1658 | __free_iova(mapping, iova, len); | ||
1659 | } | ||
1660 | |||
1661 | /** | ||
1662 | * arm_iommu_unmap_page | ||
1663 | * @dev: valid struct device pointer | ||
1664 | * @handle: DMA address of buffer | ||
1665 | * @size: size of buffer (same as passed to dma_map_page) | ||
1666 | * @dir: DMA transfer direction (same as passed to dma_map_page) | ||
1667 | * | ||
1668 | * IOMMU aware version of arm_dma_unmap_page() | ||
1669 | */ | ||
1670 | static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, | ||
1671 | size_t size, enum dma_data_direction dir, | ||
1672 | struct dma_attrs *attrs) | ||
1673 | { | ||
1674 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1675 | dma_addr_t iova = handle & PAGE_MASK; | ||
1676 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | ||
1677 | int offset = handle & ~PAGE_MASK; | ||
1678 | int len = PAGE_ALIGN(size + offset); | ||
1679 | |||
1680 | if (!iova) | ||
1681 | return; | ||
1682 | |||
1683 | if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
1684 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
1685 | |||
1686 | iommu_unmap(mapping->domain, iova, len); | ||
1687 | __free_iova(mapping, iova, len); | ||
1688 | } | ||
1689 | |||
1690 | static void arm_iommu_sync_single_for_cpu(struct device *dev, | ||
1691 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
1692 | { | ||
1693 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1694 | dma_addr_t iova = handle & PAGE_MASK; | ||
1695 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | ||
1696 | unsigned int offset = handle & ~PAGE_MASK; | ||
1697 | |||
1698 | if (!iova) | ||
1699 | return; | ||
1700 | |||
1701 | __dma_page_dev_to_cpu(page, offset, size, dir); | ||
1702 | } | ||
1703 | |||
1704 | static void arm_iommu_sync_single_for_device(struct device *dev, | ||
1705 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
1706 | { | ||
1707 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | ||
1708 | dma_addr_t iova = handle & PAGE_MASK; | ||
1709 | struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); | ||
1710 | unsigned int offset = handle & ~PAGE_MASK; | ||
1711 | |||
1712 | if (!iova) | ||
1713 | return; | ||
1714 | |||
1715 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
1716 | } | ||
1717 | |||
1718 | struct dma_map_ops iommu_ops = { | ||
1719 | .alloc = arm_iommu_alloc_attrs, | ||
1720 | .free = arm_iommu_free_attrs, | ||
1721 | .mmap = arm_iommu_mmap_attrs, | ||
1722 | .get_sgtable = arm_iommu_get_sgtable, | ||
1723 | |||
1724 | .map_page = arm_iommu_map_page, | ||
1725 | .unmap_page = arm_iommu_unmap_page, | ||
1726 | .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, | ||
1727 | .sync_single_for_device = arm_iommu_sync_single_for_device, | ||
1728 | |||
1729 | .map_sg = arm_iommu_map_sg, | ||
1730 | .unmap_sg = arm_iommu_unmap_sg, | ||
1731 | .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, | ||
1732 | .sync_sg_for_device = arm_iommu_sync_sg_for_device, | ||
1733 | }; | ||
1734 | |||
1735 | struct dma_map_ops iommu_coherent_ops = { | ||
1736 | .alloc = arm_iommu_alloc_attrs, | ||
1737 | .free = arm_iommu_free_attrs, | ||
1738 | .mmap = arm_iommu_mmap_attrs, | ||
1739 | .get_sgtable = arm_iommu_get_sgtable, | ||
1740 | |||
1741 | .map_page = arm_coherent_iommu_map_page, | ||
1742 | .unmap_page = arm_coherent_iommu_unmap_page, | ||
1743 | |||
1744 | .map_sg = arm_coherent_iommu_map_sg, | ||
1745 | .unmap_sg = arm_coherent_iommu_unmap_sg, | ||
1746 | }; | ||
1747 | |||
1748 | /** | ||
1749 | * arm_iommu_create_mapping | ||
1750 | * @bus: pointer to the bus holding the client device (for IOMMU calls) | ||
1751 | * @base: start address of the valid IO address space | ||
1752 | * @size: size of the valid IO address space | ||
1753 | * @order: accuracy of the IO addresses allocations | ||
1754 | * | ||
1755 | * Creates a mapping structure which holds information about used/unused | ||
1756 | * IO address ranges, which is required to perform memory allocation and | ||
1757 | * mapping with IOMMU aware functions. | ||
1758 | * | ||
1759 | * The client device need to be attached to the mapping with | ||
1760 | * arm_iommu_attach_device function. | ||
1761 | */ | ||
1762 | struct dma_iommu_mapping * | ||
1763 | arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, | ||
1764 | int order) | ||
1765 | { | ||
1766 | unsigned int count = size >> (PAGE_SHIFT + order); | ||
1767 | unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); | ||
1768 | struct dma_iommu_mapping *mapping; | ||
1769 | int err = -ENOMEM; | ||
1770 | |||
1771 | if (!count) | ||
1772 | return ERR_PTR(-EINVAL); | ||
1773 | |||
1774 | mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); | ||
1775 | if (!mapping) | ||
1776 | goto err; | ||
1777 | |||
1778 | mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | ||
1779 | if (!mapping->bitmap) | ||
1780 | goto err2; | ||
1781 | |||
1782 | mapping->base = base; | ||
1783 | mapping->bits = BITS_PER_BYTE * bitmap_size; | ||
1784 | mapping->order = order; | ||
1785 | spin_lock_init(&mapping->lock); | ||
1786 | |||
1787 | mapping->domain = iommu_domain_alloc(bus); | ||
1788 | if (!mapping->domain) | ||
1789 | goto err3; | ||
1790 | |||
1791 | kref_init(&mapping->kref); | ||
1792 | return mapping; | ||
1793 | err3: | ||
1794 | kfree(mapping->bitmap); | ||
1795 | err2: | ||
1796 | kfree(mapping); | ||
1797 | err: | ||
1798 | return ERR_PTR(err); | ||
1799 | } | ||
1800 | |||
1801 | static void release_iommu_mapping(struct kref *kref) | ||
1802 | { | ||
1803 | struct dma_iommu_mapping *mapping = | ||
1804 | container_of(kref, struct dma_iommu_mapping, kref); | ||
1805 | |||
1806 | iommu_domain_free(mapping->domain); | ||
1807 | kfree(mapping->bitmap); | ||
1808 | kfree(mapping); | ||
1809 | } | ||
1810 | |||
1811 | void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) | ||
1812 | { | ||
1813 | if (mapping) | ||
1814 | kref_put(&mapping->kref, release_iommu_mapping); | ||
1815 | } | ||
1816 | |||
1817 | /** | ||
1818 | * arm_iommu_attach_device | ||
1819 | * @dev: valid struct device pointer | ||
1820 | * @mapping: io address space mapping structure (returned from | ||
1821 | * arm_iommu_create_mapping) | ||
1822 | * | ||
1823 | * Attaches specified io address space mapping to the provided device, | ||
1824 | * this replaces the dma operations (dma_map_ops pointer) with the | ||
1825 | * IOMMU aware version. More than one client might be attached to | ||
1826 | * the same io address space mapping. | ||
1827 | */ | ||
1828 | int arm_iommu_attach_device(struct device *dev, | ||
1829 | struct dma_iommu_mapping *mapping) | ||
1830 | { | ||
1831 | int err; | ||
1832 | |||
1833 | err = iommu_attach_device(mapping->domain, dev); | ||
1834 | if (err) | ||
1835 | return err; | ||
1836 | |||
1837 | kref_get(&mapping->kref); | ||
1838 | dev->archdata.mapping = mapping; | ||
1839 | set_dma_ops(dev, &iommu_ops); | ||
1840 | |||
1841 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | ||
1842 | return 0; | ||
1843 | } | ||
1844 | |||
1845 | #endif | ||
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 2a5907b5c8d..7cab7917942 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
9 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
10 | */ | 10 | */ |
11 | #include <linux/module.h> | ||
11 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
12 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
13 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
@@ -134,6 +135,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |||
134 | { | 135 | { |
135 | struct mm_struct *mm = vma->vm_mm; | 136 | struct mm_struct *mm = vma->vm_mm; |
136 | struct vm_area_struct *mpnt; | 137 | struct vm_area_struct *mpnt; |
138 | struct prio_tree_iter iter; | ||
137 | unsigned long offset; | 139 | unsigned long offset; |
138 | pgoff_t pgoff; | 140 | pgoff_t pgoff; |
139 | int aliases = 0; | 141 | int aliases = 0; |
@@ -146,7 +148,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |||
146 | * cache coherency. | 148 | * cache coherency. |
147 | */ | 149 | */ |
148 | flush_dcache_mmap_lock(mapping); | 150 | flush_dcache_mmap_lock(mapping); |
149 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { | 151 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { |
150 | /* | 152 | /* |
151 | * If this VMA is not in our MM, we can ignore it. | 153 | * If this VMA is not in our MM, we can ignore it. |
152 | * Note that we intentionally mask out the VMA | 154 | * Note that we intentionally mask out the VMA |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5dbf13f954f..3b5ea68acbb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -20,14 +20,25 @@ | |||
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/perf_event.h> | 21 | #include <linux/perf_event.h> |
22 | 22 | ||
23 | #include <asm/exception.h> | 23 | #include <asm/system.h> |
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | #include <asm/system_misc.h> | ||
26 | #include <asm/system_info.h> | ||
27 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
28 | 26 | ||
29 | #include "fault.h" | 27 | #include "fault.h" |
30 | 28 | ||
29 | /* | ||
30 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
31 | */ | ||
32 | #define FSR_LNX_PF (1 << 31) | ||
33 | #define FSR_WRITE (1 << 11) | ||
34 | #define FSR_FS4 (1 << 10) | ||
35 | #define FSR_FS3_0 (15) | ||
36 | |||
37 | static inline int fsr_fs(unsigned int fsr) | ||
38 | { | ||
39 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
40 | } | ||
41 | |||
31 | #ifdef CONFIG_MMU | 42 | #ifdef CONFIG_MMU |
32 | 43 | ||
33 | #ifdef CONFIG_KPROBES | 44 | #ifdef CONFIG_KPROBES |
@@ -111,10 +122,8 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
111 | 122 | ||
112 | pte = pte_offset_map(pmd, addr); | 123 | pte = pte_offset_map(pmd, addr); |
113 | printk(", *pte=%08llx", (long long)pte_val(*pte)); | 124 | printk(", *pte=%08llx", (long long)pte_val(*pte)); |
114 | #ifndef CONFIG_ARM_LPAE | ||
115 | printk(", *ppte=%08llx", | 125 | printk(", *ppte=%08llx", |
116 | (long long)pte_val(pte[PTE_HWTABLE_PTRS])); | 126 | (long long)pte_val(pte[PTE_HWTABLE_PTRS])); |
117 | #endif | ||
118 | pte_unmap(pte); | 127 | pte_unmap(pte); |
119 | } while(0); | 128 | } while(0); |
120 | 129 | ||
@@ -165,8 +174,7 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, | |||
165 | struct siginfo si; | 174 | struct siginfo si; |
166 | 175 | ||
167 | #ifdef CONFIG_DEBUG_USER | 176 | #ifdef CONFIG_DEBUG_USER |
168 | if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || | 177 | if (user_debug & UDBG_SEGV) { |
169 | ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { | ||
170 | printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", | 178 | printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", |
171 | tsk->comm, sig, addr, fsr); | 179 | tsk->comm, sig, addr, fsr); |
172 | show_pte(tsk->mm, addr); | 180 | show_pte(tsk->mm, addr); |
@@ -222,7 +230,7 @@ static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | |||
222 | 230 | ||
223 | static int __kprobes | 231 | static int __kprobes |
224 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 232 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
225 | unsigned int flags, struct task_struct *tsk) | 233 | struct task_struct *tsk) |
226 | { | 234 | { |
227 | struct vm_area_struct *vma; | 235 | struct vm_area_struct *vma; |
228 | int fault; | 236 | int fault; |
@@ -244,12 +252,21 @@ good_area: | |||
244 | goto out; | 252 | goto out; |
245 | } | 253 | } |
246 | 254 | ||
247 | return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags); | 255 | /* |
256 | * If for any reason at all we couldn't handle the fault, make | ||
257 | * sure we exit gracefully rather than endlessly redo the fault. | ||
258 | */ | ||
259 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); | ||
260 | if (unlikely(fault & VM_FAULT_ERROR)) | ||
261 | return fault; | ||
262 | if (fault & VM_FAULT_MAJOR) | ||
263 | tsk->maj_flt++; | ||
264 | else | ||
265 | tsk->min_flt++; | ||
266 | return fault; | ||
248 | 267 | ||
249 | check_stack: | 268 | check_stack: |
250 | /* Don't allow expansion below FIRST_USER_ADDRESS */ | 269 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) |
251 | if (vma->vm_flags & VM_GROWSDOWN && | ||
252 | addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr)) | ||
253 | goto good_area; | 270 | goto good_area; |
254 | out: | 271 | out: |
255 | return fault; | 272 | return fault; |
@@ -261,9 +278,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
261 | struct task_struct *tsk; | 278 | struct task_struct *tsk; |
262 | struct mm_struct *mm; | 279 | struct mm_struct *mm; |
263 | int fault, sig, code; | 280 | int fault, sig, code; |
264 | int write = fsr & FSR_WRITE; | ||
265 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | | ||
266 | (write ? FAULT_FLAG_WRITE : 0); | ||
267 | 281 | ||
268 | if (notify_page_fault(regs, fsr)) | 282 | if (notify_page_fault(regs, fsr)) |
269 | return 0; | 283 | return 0; |
@@ -290,7 +304,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
290 | if (!down_read_trylock(&mm->mmap_sem)) { | 304 | if (!down_read_trylock(&mm->mmap_sem)) { |
291 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) | 305 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) |
292 | goto no_context; | 306 | goto no_context; |
293 | retry: | ||
294 | down_read(&mm->mmap_sem); | 307 | down_read(&mm->mmap_sem); |
295 | } else { | 308 | } else { |
296 | /* | 309 | /* |
@@ -306,42 +319,14 @@ retry: | |||
306 | #endif | 319 | #endif |
307 | } | 320 | } |
308 | 321 | ||
309 | fault = __do_page_fault(mm, addr, fsr, flags, tsk); | 322 | fault = __do_page_fault(mm, addr, fsr, tsk); |
310 | 323 | up_read(&mm->mmap_sem); | |
311 | /* If we need to retry but a fatal signal is pending, handle the | ||
312 | * signal first. We do not need to release the mmap_sem because | ||
313 | * it would already be released in __lock_page_or_retry in | ||
314 | * mm/filemap.c. */ | ||
315 | if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) | ||
316 | return 0; | ||
317 | |||
318 | /* | ||
319 | * Major/minor page fault accounting is only done on the | ||
320 | * initial attempt. If we go through a retry, it is extremely | ||
321 | * likely that the page will be found in page cache at that point. | ||
322 | */ | ||
323 | 324 | ||
324 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); | 325 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
325 | if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) { | 326 | if (fault & VM_FAULT_MAJOR) |
326 | if (fault & VM_FAULT_MAJOR) { | 327 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); |
327 | tsk->maj_flt++; | 328 | else if (fault & VM_FAULT_MINOR) |
328 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, | 329 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); |
329 | regs, addr); | ||
330 | } else { | ||
331 | tsk->min_flt++; | ||
332 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, | ||
333 | regs, addr); | ||
334 | } | ||
335 | if (fault & VM_FAULT_RETRY) { | ||
336 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
337 | * of starvation. */ | ||
338 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
339 | flags |= FAULT_FLAG_TRIED; | ||
340 | goto retry; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | up_read(&mm->mmap_sem); | ||
345 | 330 | ||
346 | /* | 331 | /* |
347 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 332 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
@@ -433,6 +418,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
433 | 418 | ||
434 | index = pgd_index(addr); | 419 | index = pgd_index(addr); |
435 | 420 | ||
421 | /* | ||
422 | * FIXME: CP15 C1 is write only on ARMv3 architectures. | ||
423 | */ | ||
436 | pgd = cpu_get_pgd() + index; | 424 | pgd = cpu_get_pgd() + index; |
437 | pgd_k = init_mm.pgd + index; | 425 | pgd_k = init_mm.pgd + index; |
438 | 426 | ||
@@ -452,12 +440,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
452 | pmd = pmd_offset(pud, addr); | 440 | pmd = pmd_offset(pud, addr); |
453 | pmd_k = pmd_offset(pud_k, addr); | 441 | pmd_k = pmd_offset(pud_k, addr); |
454 | 442 | ||
455 | #ifdef CONFIG_ARM_LPAE | ||
456 | /* | ||
457 | * Only one hardware entry per PMD with LPAE. | ||
458 | */ | ||
459 | index = 0; | ||
460 | #else | ||
461 | /* | 443 | /* |
462 | * On ARM one Linux PGD entry contains two hardware entries (see page | 444 | * On ARM one Linux PGD entry contains two hardware entries (see page |
463 | * tables layout in pgtable.h). We normally guarantee that we always | 445 | * tables layout in pgtable.h). We normally guarantee that we always |
@@ -467,7 +449,6 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
467 | * for the first of pair. | 449 | * for the first of pair. |
468 | */ | 450 | */ |
469 | index = (addr >> SECTION_SHIFT) & 1; | 451 | index = (addr >> SECTION_SHIFT) & 1; |
470 | #endif | ||
471 | if (pmd_none(pmd_k[index])) | 452 | if (pmd_none(pmd_k[index])) |
472 | goto bad_area; | 453 | goto bad_area; |
473 | 454 | ||
@@ -507,20 +488,55 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
507 | return 1; | 488 | return 1; |
508 | } | 489 | } |
509 | 490 | ||
510 | struct fsr_info { | 491 | static struct fsr_info { |
511 | int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | 492 | int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); |
512 | int sig; | 493 | int sig; |
513 | int code; | 494 | int code; |
514 | const char *name; | 495 | const char *name; |
496 | } fsr_info[] = { | ||
497 | /* | ||
498 | * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 | ||
499 | * defines these to be "precise" aborts. | ||
500 | */ | ||
501 | { do_bad, SIGSEGV, 0, "vector exception" }, | ||
502 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
503 | { do_bad, SIGKILL, 0, "terminal exception" }, | ||
504 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
505 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
506 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
507 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
508 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
509 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
510 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
511 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
512 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
513 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
514 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
515 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
516 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
517 | /* | ||
518 | * The following are "imprecise" aborts, which are signalled by bit | ||
519 | * 10 of the FSR, and may not be recoverable. These are only | ||
520 | * supported if the CPU abort handler supports bit 10. | ||
521 | */ | ||
522 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
523 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
524 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
525 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
526 | { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ | ||
527 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
528 | { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ | ||
529 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
530 | { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ | ||
531 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
532 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
533 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
534 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
535 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
536 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
537 | { do_bad, SIGBUS, 0, "unknown 31" } | ||
515 | }; | 538 | }; |
516 | 539 | ||
517 | /* FSR definition */ | ||
518 | #ifdef CONFIG_ARM_LPAE | ||
519 | #include "fsr-3level.c" | ||
520 | #else | ||
521 | #include "fsr-2level.c" | ||
522 | #endif | ||
523 | |||
524 | void __init | 540 | void __init |
525 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | 541 | hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
526 | int sig, int code, const char *name) | 542 | int sig, int code, const char *name) |
@@ -556,6 +572,42 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
556 | arm_notify_die("", regs, &info, fsr, 0); | 572 | arm_notify_die("", regs, &info, fsr, 0); |
557 | } | 573 | } |
558 | 574 | ||
575 | |||
576 | static struct fsr_info ifsr_info[] = { | ||
577 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
578 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
579 | { do_bad, SIGBUS, 0, "debug event" }, | ||
580 | { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, | ||
581 | { do_bad, SIGBUS, 0, "unknown 4" }, | ||
582 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
583 | { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, | ||
584 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
585 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
586 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
587 | { do_bad, SIGBUS, 0, "unknown 10" }, | ||
588 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
589 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
590 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
591 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
592 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
593 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
594 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
595 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
596 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
597 | { do_bad, SIGBUS, 0, "unknown 20" }, | ||
598 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
599 | { do_bad, SIGBUS, 0, "unknown 22" }, | ||
600 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
601 | { do_bad, SIGBUS, 0, "unknown 24" }, | ||
602 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
603 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
604 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
605 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
606 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
607 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
608 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
609 | }; | ||
610 | |||
559 | void __init | 611 | void __init |
560 | hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | 612 | hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), |
561 | int sig, int code, const char *name) | 613 | int sig, int code, const char *name) |
@@ -588,7 +640,6 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | |||
588 | arm_notify_die("", regs, &info, ifsr, 0); | 640 | arm_notify_die("", regs, &info, ifsr, 0); |
589 | } | 641 | } |
590 | 642 | ||
591 | #ifndef CONFIG_ARM_LPAE | ||
592 | static int __init exceptions_init(void) | 643 | static int __init exceptions_init(void) |
593 | { | 644 | { |
594 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { | 645 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { |
@@ -611,4 +662,3 @@ static int __init exceptions_init(void) | |||
611 | } | 662 | } |
612 | 663 | ||
613 | arch_initcall(exceptions_init); | 664 | arch_initcall(exceptions_init); |
614 | #endif | ||
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index cf08bdfbe0d..49e9e3804de 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h | |||
@@ -1,28 +1,3 @@ | |||
1 | #ifndef __ARCH_ARM_FAULT_H | ||
2 | #define __ARCH_ARM_FAULT_H | ||
3 | |||
4 | /* | ||
5 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
6 | */ | ||
7 | #define FSR_LNX_PF (1 << 31) | ||
8 | #define FSR_WRITE (1 << 11) | ||
9 | #define FSR_FS4 (1 << 10) | ||
10 | #define FSR_FS3_0 (15) | ||
11 | #define FSR_FS5_0 (0x3f) | ||
12 | |||
13 | #ifdef CONFIG_ARM_LPAE | ||
14 | static inline int fsr_fs(unsigned int fsr) | ||
15 | { | ||
16 | return fsr & FSR_FS5_0; | ||
17 | } | ||
18 | #else | ||
19 | static inline int fsr_fs(unsigned int fsr) | ||
20 | { | ||
21 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
22 | } | ||
23 | #endif | ||
24 | |||
25 | void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); | 1 | void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); |
26 | unsigned long search_exception_table(unsigned long addr); | ||
27 | 2 | ||
28 | #endif /* __ARCH_ARM_FAULT_H */ | 3 | unsigned long search_exception_table(unsigned long addr); |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1c8f7f56417..1a8d4aa821b 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -16,18 +16,22 @@ | |||
16 | #include <asm/cachetype.h> | 16 | #include <asm/cachetype.h> |
17 | #include <asm/highmem.h> | 17 | #include <asm/highmem.h> |
18 | #include <asm/smp_plat.h> | 18 | #include <asm/smp_plat.h> |
19 | #include <asm/system.h> | ||
19 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
20 | 21 | ||
21 | #include "mm.h" | 22 | #include "mm.h" |
22 | 23 | ||
23 | #ifdef CONFIG_CPU_CACHE_VIPT | 24 | #ifdef CONFIG_CPU_CACHE_VIPT |
24 | 25 | ||
26 | #define ALIAS_FLUSH_START 0xffff4000 | ||
27 | |||
25 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | 28 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
26 | { | 29 | { |
27 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 30 | unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
28 | const int zero = 0; | 31 | const int zero = 0; |
29 | 32 | ||
30 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); | 33 | set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); |
34 | flush_tlb_kernel_page(to); | ||
31 | 35 | ||
32 | asm( "mcrr p15, 0, %1, %0, c14\n" | 36 | asm( "mcrr p15, 0, %1, %0, c14\n" |
33 | " mcr p15, 0, %2, c7, c10, 4" | 37 | " mcr p15, 0, %2, c7, c10, 4" |
@@ -38,12 +42,13 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
38 | 42 | ||
39 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) | 43 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
40 | { | 44 | { |
41 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 45 | unsigned long colour = CACHE_COLOUR(vaddr); |
42 | unsigned long offset = vaddr & (PAGE_SIZE - 1); | 46 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
43 | unsigned long to; | 47 | unsigned long to; |
44 | 48 | ||
45 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); | 49 | set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); |
46 | to = va + offset; | 50 | to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; |
51 | flush_tlb_kernel_page(to); | ||
47 | flush_icache_range(to, to + len); | 52 | flush_icache_range(to, to + len); |
48 | } | 53 | } |
49 | 54 | ||
@@ -196,6 +201,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
196 | { | 201 | { |
197 | struct mm_struct *mm = current->active_mm; | 202 | struct mm_struct *mm = current->active_mm; |
198 | struct vm_area_struct *mpnt; | 203 | struct vm_area_struct *mpnt; |
204 | struct prio_tree_iter iter; | ||
199 | pgoff_t pgoff; | 205 | pgoff_t pgoff; |
200 | 206 | ||
201 | /* | 207 | /* |
@@ -207,7 +213,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
207 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | 213 | pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); |
208 | 214 | ||
209 | flush_dcache_mmap_lock(mapping); | 215 | flush_dcache_mmap_lock(mapping); |
210 | vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { | 216 | vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) { |
211 | unsigned long offset; | 217 | unsigned long offset; |
212 | 218 | ||
213 | /* | 219 | /* |
@@ -230,6 +236,8 @@ void __sync_icache_dcache(pte_t pteval) | |||
230 | struct page *page; | 236 | struct page *page; |
231 | struct address_space *mapping; | 237 | struct address_space *mapping; |
232 | 238 | ||
239 | if (!pte_present_user(pteval)) | ||
240 | return; | ||
233 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) | 241 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) |
234 | /* only flush non-aliasing VIPT caches for exec mappings */ | 242 | /* only flush non-aliasing VIPT caches for exec mappings */ |
235 | return; | 243 | return; |
diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c deleted file mode 100644 index 18ca74c0f34..00000000000 --- a/arch/arm/mm/fsr-2level.c +++ /dev/null | |||
@@ -1,78 +0,0 @@ | |||
1 | static struct fsr_info fsr_info[] = { | ||
2 | /* | ||
3 | * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 | ||
4 | * defines these to be "precise" aborts. | ||
5 | */ | ||
6 | { do_bad, SIGSEGV, 0, "vector exception" }, | ||
7 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
8 | { do_bad, SIGKILL, 0, "terminal exception" }, | ||
9 | { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, | ||
10 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
11 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
12 | { do_bad, SIGBUS, 0, "external abort on linefetch" }, | ||
13 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
14 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
16 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
17 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
18 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
19 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
20 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
21 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
22 | /* | ||
23 | * The following are "imprecise" aborts, which are signalled by bit | ||
24 | * 10 of the FSR, and may not be recoverable. These are only | ||
25 | * supported if the CPU abort handler supports bit 10. | ||
26 | */ | ||
27 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
28 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
29 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
30 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
31 | { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ | ||
32 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
33 | { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ | ||
34 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
35 | { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ | ||
36 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
37 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
38 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
39 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
40 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
41 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
42 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
43 | }; | ||
44 | |||
45 | static struct fsr_info ifsr_info[] = { | ||
46 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
47 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
48 | { do_bad, SIGBUS, 0, "debug event" }, | ||
49 | { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, | ||
50 | { do_bad, SIGBUS, 0, "unknown 4" }, | ||
51 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
52 | { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, | ||
53 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
54 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
55 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
56 | { do_bad, SIGBUS, 0, "unknown 10" }, | ||
57 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
58 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
59 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
60 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
61 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
62 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
63 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
64 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
65 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
66 | { do_bad, SIGBUS, 0, "unknown 20" }, | ||
67 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
68 | { do_bad, SIGBUS, 0, "unknown 22" }, | ||
69 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
70 | { do_bad, SIGBUS, 0, "unknown 24" }, | ||
71 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
72 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
73 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
74 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
75 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
76 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
77 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
78 | }; | ||
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c deleted file mode 100644 index 05a4e943183..00000000000 --- a/arch/arm/mm/fsr-3level.c +++ /dev/null | |||
@@ -1,68 +0,0 @@ | |||
1 | static struct fsr_info fsr_info[] = { | ||
2 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
3 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
4 | { do_bad, SIGBUS, 0, "unknown 2" }, | ||
5 | { do_bad, SIGBUS, 0, "unknown 3" }, | ||
6 | { do_bad, SIGBUS, 0, "reserved translation fault" }, | ||
7 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, | ||
8 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, | ||
9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | ||
10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | ||
11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | ||
12 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | ||
13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | ||
14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | ||
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | ||
16 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | ||
17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | ||
18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | ||
19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | ||
20 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
21 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
22 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
23 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
24 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
25 | { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, | ||
26 | { do_bad, SIGBUS, 0, "synchronous parity error" }, | ||
27 | { do_bad, SIGBUS, 0, "asynchronous parity error" }, | ||
28 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
29 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
30 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
31 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
32 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
33 | { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, | ||
34 | { do_bad, SIGBUS, 0, "unknown 32" }, | ||
35 | { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, | ||
36 | { do_bad, SIGBUS, 0, "debug event" }, | ||
37 | { do_bad, SIGBUS, 0, "unknown 35" }, | ||
38 | { do_bad, SIGBUS, 0, "unknown 36" }, | ||
39 | { do_bad, SIGBUS, 0, "unknown 37" }, | ||
40 | { do_bad, SIGBUS, 0, "unknown 38" }, | ||
41 | { do_bad, SIGBUS, 0, "unknown 39" }, | ||
42 | { do_bad, SIGBUS, 0, "unknown 40" }, | ||
43 | { do_bad, SIGBUS, 0, "unknown 41" }, | ||
44 | { do_bad, SIGBUS, 0, "unknown 42" }, | ||
45 | { do_bad, SIGBUS, 0, "unknown 43" }, | ||
46 | { do_bad, SIGBUS, 0, "unknown 44" }, | ||
47 | { do_bad, SIGBUS, 0, "unknown 45" }, | ||
48 | { do_bad, SIGBUS, 0, "unknown 46" }, | ||
49 | { do_bad, SIGBUS, 0, "unknown 47" }, | ||
50 | { do_bad, SIGBUS, 0, "unknown 48" }, | ||
51 | { do_bad, SIGBUS, 0, "unknown 49" }, | ||
52 | { do_bad, SIGBUS, 0, "unknown 50" }, | ||
53 | { do_bad, SIGBUS, 0, "unknown 51" }, | ||
54 | { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, | ||
55 | { do_bad, SIGBUS, 0, "unknown 53" }, | ||
56 | { do_bad, SIGBUS, 0, "unknown 54" }, | ||
57 | { do_bad, SIGBUS, 0, "unknown 55" }, | ||
58 | { do_bad, SIGBUS, 0, "unknown 56" }, | ||
59 | { do_bad, SIGBUS, 0, "unknown 57" }, | ||
60 | { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, | ||
61 | { do_bad, SIGBUS, 0, "unknown 59" }, | ||
62 | { do_bad, SIGBUS, 0, "unknown 60" }, | ||
63 | { do_bad, SIGBUS, 0, "unknown 61" }, | ||
64 | { do_bad, SIGBUS, 0, "unknown 62" }, | ||
65 | { do_bad, SIGBUS, 0, "unknown 63" }, | ||
66 | }; | ||
67 | |||
68 | #define ifsr_info fsr_info | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 21b9e1bf9b7..807c0573abb 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -36,7 +36,7 @@ void kunmap(struct page *page) | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(kunmap); | 37 | EXPORT_SYMBOL(kunmap); |
38 | 38 | ||
39 | void *kmap_atomic(struct page *page) | 39 | void *__kmap_atomic(struct page *page) |
40 | { | 40 | { |
41 | unsigned int idx; | 41 | unsigned int idx; |
42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
@@ -69,18 +69,19 @@ void *kmap_atomic(struct page *page) | |||
69 | * With debugging enabled, kunmap_atomic forces that entry to 0. | 69 | * With debugging enabled, kunmap_atomic forces that entry to 0. |
70 | * Make sure it was indeed properly unmapped. | 70 | * Make sure it was indeed properly unmapped. |
71 | */ | 71 | */ |
72 | BUG_ON(!pte_none(get_top_pte(vaddr))); | 72 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); |
73 | #endif | 73 | #endif |
74 | set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); | ||
74 | /* | 75 | /* |
75 | * When debugging is off, kunmap_atomic leaves the previous mapping | 76 | * When debugging is off, kunmap_atomic leaves the previous mapping |
76 | * in place, so the contained TLB flush ensures the TLB is updated | 77 | * in place, so this TLB flush ensures the TLB is updated with the |
77 | * with the new mapping. | 78 | * new mapping. |
78 | */ | 79 | */ |
79 | set_top_pte(vaddr, mk_pte(page, kmap_prot)); | 80 | local_flush_tlb_kernel_page(vaddr); |
80 | 81 | ||
81 | return (void *)vaddr; | 82 | return (void *)vaddr; |
82 | } | 83 | } |
83 | EXPORT_SYMBOL(kmap_atomic); | 84 | EXPORT_SYMBOL(__kmap_atomic); |
84 | 85 | ||
85 | void __kunmap_atomic(void *kvaddr) | 86 | void __kunmap_atomic(void *kvaddr) |
86 | { | 87 | { |
@@ -95,7 +96,8 @@ void __kunmap_atomic(void *kvaddr) | |||
95 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 96 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
96 | #ifdef CONFIG_DEBUG_HIGHMEM | 97 | #ifdef CONFIG_DEBUG_HIGHMEM |
97 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 98 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
98 | set_top_pte(vaddr, __pte(0)); | 99 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
100 | local_flush_tlb_kernel_page(vaddr); | ||
99 | #else | 101 | #else |
100 | (void) idx; /* to kill a warning */ | 102 | (void) idx; /* to kill a warning */ |
101 | #endif | 103 | #endif |
@@ -119,9 +121,10 @@ void *kmap_atomic_pfn(unsigned long pfn) | |||
119 | idx = type + KM_TYPE_NR * smp_processor_id(); | 121 | idx = type + KM_TYPE_NR * smp_processor_id(); |
120 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 122 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
121 | #ifdef CONFIG_DEBUG_HIGHMEM | 123 | #ifdef CONFIG_DEBUG_HIGHMEM |
122 | BUG_ON(!pte_none(get_top_pte(vaddr))); | 124 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); |
123 | #endif | 125 | #endif |
124 | set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); | 126 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); |
127 | local_flush_tlb_kernel_page(vaddr); | ||
125 | 128 | ||
126 | return (void *)vaddr; | 129 | return (void *)vaddr; |
127 | } | 130 | } |
@@ -129,9 +132,11 @@ void *kmap_atomic_pfn(unsigned long pfn) | |||
129 | struct page *kmap_atomic_to_page(const void *ptr) | 132 | struct page *kmap_atomic_to_page(const void *ptr) |
130 | { | 133 | { |
131 | unsigned long vaddr = (unsigned long)ptr; | 134 | unsigned long vaddr = (unsigned long)ptr; |
135 | pte_t *pte; | ||
132 | 136 | ||
133 | if (vaddr < FIXADDR_START) | 137 | if (vaddr < FIXADDR_START) |
134 | return virt_to_page(ptr); | 138 | return virt_to_page(ptr); |
135 | 139 | ||
136 | return pte_page(get_top_pte(vaddr)); | 140 | pte = TOP_PTE(vaddr); |
141 | return pte_page(*pte); | ||
137 | } | 142 | } |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 99db769307e..2be9139a4ef 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
@@ -1,39 +1,9 @@ | |||
1 | #include <linux/kernel.h> | 1 | #include <linux/kernel.h> |
2 | 2 | ||
3 | #include <asm/cputype.h> | 3 | #include <asm/cputype.h> |
4 | #include <asm/idmap.h> | ||
5 | #include <asm/pgalloc.h> | 4 | #include <asm/pgalloc.h> |
6 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
7 | #include <asm/sections.h> | ||
8 | #include <asm/system_info.h> | ||
9 | 6 | ||
10 | pgd_t *idmap_pgd; | ||
11 | |||
12 | #ifdef CONFIG_ARM_LPAE | ||
13 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | ||
14 | unsigned long prot) | ||
15 | { | ||
16 | pmd_t *pmd; | ||
17 | unsigned long next; | ||
18 | |||
19 | if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { | ||
20 | pmd = pmd_alloc_one(&init_mm, addr); | ||
21 | if (!pmd) { | ||
22 | pr_warning("Failed to allocate identity pmd.\n"); | ||
23 | return; | ||
24 | } | ||
25 | pud_populate(&init_mm, pud, pmd); | ||
26 | pmd += pmd_index(addr); | ||
27 | } else | ||
28 | pmd = pmd_offset(pud, addr); | ||
29 | |||
30 | do { | ||
31 | next = pmd_addr_end(addr, end); | ||
32 | *pmd = __pmd((addr & PMD_MASK) | prot); | ||
33 | flush_pmd_entry(pmd); | ||
34 | } while (pmd++, addr = next, addr != end); | ||
35 | } | ||
36 | #else /* !CONFIG_ARM_LPAE */ | ||
37 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | 7 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, |
38 | unsigned long prot) | 8 | unsigned long prot) |
39 | { | 9 | { |
@@ -45,7 +15,6 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | |||
45 | pmd[1] = __pmd(addr); | 15 | pmd[1] = __pmd(addr); |
46 | flush_pmd_entry(pmd); | 16 | flush_pmd_entry(pmd); |
47 | } | 17 | } |
48 | #endif /* CONFIG_ARM_LPAE */ | ||
49 | 18 | ||
50 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | 19 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
51 | unsigned long prot) | 20 | unsigned long prot) |
@@ -59,11 +28,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | |||
59 | } while (pud++, addr = next, addr != end); | 28 | } while (pud++, addr = next, addr != end); |
60 | } | 29 | } |
61 | 30 | ||
62 | static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) | 31 | void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) |
63 | { | 32 | { |
64 | unsigned long prot, next; | 33 | unsigned long prot, next; |
65 | 34 | ||
66 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; | 35 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; |
67 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 36 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
68 | prot |= PMD_BIT4; | 37 | prot |= PMD_BIT4; |
69 | 38 | ||
@@ -74,47 +43,48 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e | |||
74 | } while (pgd++, addr = next, addr != end); | 43 | } while (pgd++, addr = next, addr != end); |
75 | } | 44 | } |
76 | 45 | ||
77 | extern char __idmap_text_start[], __idmap_text_end[]; | 46 | #ifdef CONFIG_SMP |
78 | 47 | static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) | |
79 | static int __init init_static_idmap(void) | ||
80 | { | 48 | { |
81 | phys_addr_t idmap_start, idmap_end; | 49 | pmd_t *pmd = pmd_offset(pud, addr); |
82 | 50 | pmd_clear(pmd); | |
83 | idmap_pgd = pgd_alloc(&init_mm); | 51 | } |
84 | if (!idmap_pgd) | ||
85 | return -ENOMEM; | ||
86 | 52 | ||
87 | /* Add an identity mapping for the physical address of the section. */ | 53 | static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) |
88 | idmap_start = virt_to_phys((void *)__idmap_text_start); | 54 | { |
89 | idmap_end = virt_to_phys((void *)__idmap_text_end); | 55 | pud_t *pud = pud_offset(pgd, addr); |
56 | unsigned long next; | ||
90 | 57 | ||
91 | pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", | 58 | do { |
92 | (long long)idmap_start, (long long)idmap_end); | 59 | next = pud_addr_end(addr, end); |
93 | identity_mapping_add(idmap_pgd, idmap_start, idmap_end); | 60 | idmap_del_pmd(pud, addr, next); |
61 | } while (pud++, addr = next, addr != end); | ||
62 | } | ||
94 | 63 | ||
95 | /* Flush L1 for the hardware to see this page table content */ | 64 | void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) |
96 | flush_cache_louis(); | 65 | { |
66 | unsigned long next; | ||
97 | 67 | ||
98 | return 0; | 68 | pgd += pgd_index(addr); |
69 | do { | ||
70 | next = pgd_addr_end(addr, end); | ||
71 | idmap_del_pud(pgd, addr, next); | ||
72 | } while (pgd++, addr = next, addr != end); | ||
99 | } | 73 | } |
100 | early_initcall(init_static_idmap); | 74 | #endif |
101 | 75 | ||
102 | /* | 76 | /* |
103 | * In order to soft-boot, we need to switch to a 1:1 mapping for the | 77 | * In order to soft-boot, we need to insert a 1:1 mapping in place of |
104 | * cpu_reset functions. This will then ensure that we have predictable | 78 | * the user-mode pages. This will then ensure that we have predictable |
105 | * results when turning off the mmu. | 79 | * results when turning the mmu off |
106 | */ | 80 | */ |
107 | void setup_mm_for_reboot(void) | 81 | void setup_mm_for_reboot(char mode) |
108 | { | 82 | { |
109 | /* Switch to the identity mapping. */ | ||
110 | cpu_switch_mm(idmap_pgd, &init_mm); | ||
111 | |||
112 | #ifdef CONFIG_CPU_HAS_ASID | ||
113 | /* | 83 | /* |
114 | * We don't have a clean ASID for the identity mapping, which | 84 | * We need to access to user-mode page tables here. For kernel threads |
115 | * may clash with virtual addresses of the previous page tables | 85 | * we don't have any user-mode mappings so we use the context that we |
116 | * and therefore potentially in the TLB. | 86 | * "borrowed". |
117 | */ | 87 | */ |
88 | identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); | ||
118 | local_flush_tlb_all(); | 89 | local_flush_tlb_all(); |
119 | #endif | ||
120 | } | 90 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index ad722f1208a..f8037ba338a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -13,21 +13,19 @@ | |||
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/bootmem.h> | 14 | #include <linux/bootmem.h> |
15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
16 | #include <linux/export.h> | ||
17 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
18 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
19 | #include <linux/of_fdt.h> | 18 | #include <linux/of_fdt.h> |
20 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
21 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
22 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
23 | #include <linux/dma-contiguous.h> | 22 | #include <linux/sort.h> |
24 | #include <linux/sizes.h> | ||
25 | 23 | ||
26 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
27 | #include <asm/memblock.h> | ||
28 | #include <asm/prom.h> | 25 | #include <asm/prom.h> |
29 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
30 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include <asm/sizes.h> | ||
31 | #include <asm/tlb.h> | 29 | #include <asm/tlb.h> |
32 | #include <asm/fixmap.h> | 30 | #include <asm/fixmap.h> |
33 | 31 | ||
@@ -135,18 +133,30 @@ void show_mem(unsigned int filter) | |||
135 | } | 133 | } |
136 | 134 | ||
137 | static void __init find_limits(unsigned long *min, unsigned long *max_low, | 135 | static void __init find_limits(unsigned long *min, unsigned long *max_low, |
138 | unsigned long *max_high) | 136 | unsigned long *max_high) |
139 | { | 137 | { |
140 | struct meminfo *mi = &meminfo; | 138 | struct meminfo *mi = &meminfo; |
141 | int i; | 139 | int i; |
142 | 140 | ||
143 | /* This assumes the meminfo array is properly sorted */ | 141 | *min = -1UL; |
144 | *min = bank_pfn_start(&mi->bank[0]); | 142 | *max_low = *max_high = 0; |
145 | for_each_bank (i, mi) | 143 | |
146 | if (mi->bank[i].highmem) | 144 | for_each_bank (i, mi) { |
147 | break; | 145 | struct membank *bank = &mi->bank[i]; |
148 | *max_low = bank_pfn_end(&mi->bank[i - 1]); | 146 | unsigned long start, end; |
149 | *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); | 147 | |
148 | start = bank_pfn_start(bank); | ||
149 | end = bank_pfn_end(bank); | ||
150 | |||
151 | if (*min > start) | ||
152 | *min = start; | ||
153 | if (*max_high < end) | ||
154 | *max_high = end; | ||
155 | if (bank->highmem) | ||
156 | continue; | ||
157 | if (*max_low < end) | ||
158 | *max_low = end; | ||
159 | } | ||
150 | } | 160 | } |
151 | 161 | ||
152 | static void __init arm_bootmem_init(unsigned long start_pfn, | 162 | static void __init arm_bootmem_init(unsigned long start_pfn, |
@@ -212,7 +222,7 @@ EXPORT_SYMBOL(arm_dma_zone_size); | |||
212 | * allocations. This must be the smallest DMA mask in the system, | 222 | * allocations. This must be the smallest DMA mask in the system, |
213 | * so a successful GFP_DMA allocation will always satisfy this. | 223 | * so a successful GFP_DMA allocation will always satisfy this. |
214 | */ | 224 | */ |
215 | phys_addr_t arm_dma_limit; | 225 | u32 arm_dma_limit; |
216 | 226 | ||
217 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 227 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
218 | unsigned long dma_size) | 228 | unsigned long dma_size) |
@@ -227,17 +237,6 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | |||
227 | } | 237 | } |
228 | #endif | 238 | #endif |
229 | 239 | ||
230 | void __init setup_dma_zone(struct machine_desc *mdesc) | ||
231 | { | ||
232 | #ifdef CONFIG_ZONE_DMA | ||
233 | if (mdesc->dma_zone_size) { | ||
234 | arm_dma_zone_size = mdesc->dma_zone_size; | ||
235 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; | ||
236 | } else | ||
237 | arm_dma_limit = 0xffffffff; | ||
238 | #endif | ||
239 | } | ||
240 | |||
241 | static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | 240 | static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, |
242 | unsigned long max_high) | 241 | unsigned long max_high) |
243 | { | 242 | { |
@@ -285,9 +284,12 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | |||
285 | * Adjust the sizes according to any special requirements for | 284 | * Adjust the sizes according to any special requirements for |
286 | * this machine type. | 285 | * this machine type. |
287 | */ | 286 | */ |
288 | if (arm_dma_zone_size) | 287 | if (arm_dma_zone_size) { |
289 | arm_adjust_dma_zone(zone_size, zhole_size, | 288 | arm_adjust_dma_zone(zone_size, zhole_size, |
290 | arm_dma_zone_size >> PAGE_SHIFT); | 289 | arm_dma_zone_size >> PAGE_SHIFT); |
290 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; | ||
291 | } else | ||
292 | arm_dma_limit = 0xffffffff; | ||
291 | #endif | 293 | #endif |
292 | 294 | ||
293 | free_area_init_node(0, zone_size, min, zhole_size); | 295 | free_area_init_node(0, zone_size, min, zhole_size); |
@@ -302,11 +304,11 @@ EXPORT_SYMBOL(pfn_valid); | |||
302 | #endif | 304 | #endif |
303 | 305 | ||
304 | #ifndef CONFIG_SPARSEMEM | 306 | #ifndef CONFIG_SPARSEMEM |
305 | static void __init arm_memory_present(void) | 307 | static void arm_memory_present(void) |
306 | { | 308 | { |
307 | } | 309 | } |
308 | #else | 310 | #else |
309 | static void __init arm_memory_present(void) | 311 | static void arm_memory_present(void) |
310 | { | 312 | { |
311 | struct memblock_region *reg; | 313 | struct memblock_region *reg; |
312 | 314 | ||
@@ -316,25 +318,20 @@ static void __init arm_memory_present(void) | |||
316 | } | 318 | } |
317 | #endif | 319 | #endif |
318 | 320 | ||
319 | static bool arm_memblock_steal_permitted = true; | 321 | static int __init meminfo_cmp(const void *_a, const void *_b) |
320 | |||
321 | phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) | ||
322 | { | 322 | { |
323 | phys_addr_t phys; | 323 | const struct membank *a = _a, *b = _b; |
324 | 324 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | |
325 | BUG_ON(!arm_memblock_steal_permitted); | 325 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; |
326 | |||
327 | phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | ||
328 | memblock_free(phys, size); | ||
329 | memblock_remove(phys, size); | ||
330 | |||
331 | return phys; | ||
332 | } | 326 | } |
333 | 327 | ||
334 | void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | 328 | void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) |
335 | { | 329 | { |
336 | int i; | 330 | int i; |
337 | 331 | ||
332 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | ||
333 | |||
334 | memblock_init(); | ||
338 | for (i = 0; i < mi->nr_banks; i++) | 335 | for (i = 0; i < mi->nr_banks; i++) |
339 | memblock_add(mi->bank[i].start, mi->bank[i].size); | 336 | memblock_add(mi->bank[i].start, mi->bank[i].size); |
340 | 337 | ||
@@ -373,14 +370,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
373 | if (mdesc->reserve) | 370 | if (mdesc->reserve) |
374 | mdesc->reserve(); | 371 | mdesc->reserve(); |
375 | 372 | ||
376 | /* | 373 | memblock_analyze(); |
377 | * reserve memory for DMA contigouos allocations, | ||
378 | * must come from DMA area inside low memory | ||
379 | */ | ||
380 | dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); | ||
381 | |||
382 | arm_memblock_steal_permitted = false; | ||
383 | memblock_allow_resize(); | ||
384 | memblock_dump_all(); | 374 | memblock_dump_all(); |
385 | } | 375 | } |
386 | 376 | ||
@@ -412,6 +402,8 @@ void __init bootmem_init(void) | |||
412 | */ | 402 | */ |
413 | arm_bootmem_free(min, max_low, max_high); | 403 | arm_bootmem_free(min, max_low, max_high); |
414 | 404 | ||
405 | high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; | ||
406 | |||
415 | /* | 407 | /* |
416 | * This doesn't seem to be used by the Linux memory manager any | 408 | * This doesn't seem to be used by the Linux memory manager any |
417 | * more, but is used by ll_rw_block. If we can get rid of it, we | 409 | * more, but is used by ll_rw_block. If we can get rid of it, we |
@@ -668,14 +660,15 @@ void __init mem_init(void) | |||
668 | " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" | 660 | " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n" |
669 | #endif | 661 | #endif |
670 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | 662 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
663 | #ifdef CONFIG_MMU | ||
664 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
665 | #endif | ||
671 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | 666 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
672 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | 667 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" |
673 | #ifdef CONFIG_HIGHMEM | 668 | #ifdef CONFIG_HIGHMEM |
674 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | 669 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" |
675 | #endif | 670 | #endif |
676 | #ifdef CONFIG_MODULES | ||
677 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 671 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
678 | #endif | ||
679 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 672 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
680 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | 673 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" |
681 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" | 674 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
@@ -688,15 +681,16 @@ void __init mem_init(void) | |||
688 | MLK(ITCM_OFFSET, (unsigned long) itcm_end), | 681 | MLK(ITCM_OFFSET, (unsigned long) itcm_end), |
689 | #endif | 682 | #endif |
690 | MLK(FIXADDR_START, FIXADDR_TOP), | 683 | MLK(FIXADDR_START, FIXADDR_TOP), |
684 | #ifdef CONFIG_MMU | ||
685 | MLM(CONSISTENT_BASE, CONSISTENT_END), | ||
686 | #endif | ||
691 | MLM(VMALLOC_START, VMALLOC_END), | 687 | MLM(VMALLOC_START, VMALLOC_END), |
692 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | 688 | MLM(PAGE_OFFSET, (unsigned long)high_memory), |
693 | #ifdef CONFIG_HIGHMEM | 689 | #ifdef CONFIG_HIGHMEM |
694 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * | 690 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * |
695 | (PAGE_SIZE)), | 691 | (PAGE_SIZE)), |
696 | #endif | 692 | #endif |
697 | #ifdef CONFIG_MODULES | ||
698 | MLM(MODULES_VADDR, MODULES_END), | 693 | MLM(MODULES_VADDR, MODULES_END), |
699 | #endif | ||
700 | 694 | ||
701 | MLK_ROUNDUP(_text, _etext), | 695 | MLK_ROUNDUP(_text, _etext), |
702 | MLK_ROUNDUP(__init_begin, __init_end), | 696 | MLK_ROUNDUP(__init_begin, __init_end), |
@@ -712,6 +706,9 @@ void __init mem_init(void) | |||
712 | * be detected at build time already. | 706 | * be detected at build time already. |
713 | */ | 707 | */ |
714 | #ifdef CONFIG_MMU | 708 | #ifdef CONFIG_MMU |
709 | BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
710 | BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
711 | |||
715 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | 712 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); |
716 | BUG_ON(TASK_SIZE > MODULES_VADDR); | 713 | BUG_ON(TASK_SIZE > MODULES_VADDR); |
717 | #endif | 714 | #endif |
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c index 4614208369f..430df1a5978 100644 --- a/arch/arm/mm/iomap.c +++ b/arch/arm/mm/iomap.c | |||
@@ -32,6 +32,30 @@ EXPORT_SYMBOL(pcibios_min_io); | |||
32 | unsigned long pcibios_min_mem = 0x01000000; | 32 | unsigned long pcibios_min_mem = 0x01000000; |
33 | EXPORT_SYMBOL(pcibios_min_mem); | 33 | EXPORT_SYMBOL(pcibios_min_mem); |
34 | 34 | ||
35 | unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC; | ||
36 | EXPORT_SYMBOL(pci_flags); | ||
37 | |||
38 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | ||
39 | { | ||
40 | resource_size_t start = pci_resource_start(dev, bar); | ||
41 | resource_size_t len = pci_resource_len(dev, bar); | ||
42 | unsigned long flags = pci_resource_flags(dev, bar); | ||
43 | |||
44 | if (!len || !start) | ||
45 | return NULL; | ||
46 | if (maxlen && len > maxlen) | ||
47 | len = maxlen; | ||
48 | if (flags & IORESOURCE_IO) | ||
49 | return ioport_map(start, len); | ||
50 | if (flags & IORESOURCE_MEM) { | ||
51 | if (flags & IORESOURCE_CACHEABLE) | ||
52 | return ioremap(start, len); | ||
53 | return ioremap_nocache(start, len); | ||
54 | } | ||
55 | return NULL; | ||
56 | } | ||
57 | EXPORT_SYMBOL(pci_iomap); | ||
58 | |||
35 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) | 59 | void pci_iounmap(struct pci_dev *dev, void __iomem *addr) |
36 | { | 60 | { |
37 | if ((unsigned long)addr >= VMALLOC_START && | 61 | if ((unsigned long)addr >= VMALLOC_START && |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86cf3d9..ab506272b2d 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -25,20 +25,23 @@ | |||
25 | #include <linux/mm.h> | 25 | #include <linux/mm.h> |
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/sizes.h> | ||
29 | 28 | ||
30 | #include <asm/cp15.h> | ||
31 | #include <asm/cputype.h> | 29 | #include <asm/cputype.h> |
32 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
33 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
34 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
35 | #include <asm/tlbflush.h> | 33 | #include <asm/tlbflush.h> |
36 | #include <asm/system_info.h> | 34 | #include <asm/sizes.h> |
37 | 35 | ||
38 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
39 | #include <asm/mach/pci.h> | ||
40 | #include "mm.h" | 37 | #include "mm.h" |
41 | 38 | ||
39 | /* | ||
40 | * Used by ioremap() and iounmap() code to mark (super)section-mapped | ||
41 | * I/O regions in vm_struct->flags field. | ||
42 | */ | ||
43 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
44 | |||
42 | int ioremap_page(unsigned long virt, unsigned long phys, | 45 | int ioremap_page(unsigned long virt, unsigned long phys, |
43 | const struct mem_type *mtype) | 46 | const struct mem_type *mtype) |
44 | { | 47 | { |
@@ -47,21 +50,21 @@ int ioremap_page(unsigned long virt, unsigned long phys, | |||
47 | } | 50 | } |
48 | EXPORT_SYMBOL(ioremap_page); | 51 | EXPORT_SYMBOL(ioremap_page); |
49 | 52 | ||
50 | void __check_vmalloc_seq(struct mm_struct *mm) | 53 | void __check_kvm_seq(struct mm_struct *mm) |
51 | { | 54 | { |
52 | unsigned int seq; | 55 | unsigned int seq; |
53 | 56 | ||
54 | do { | 57 | do { |
55 | seq = init_mm.context.vmalloc_seq; | 58 | seq = init_mm.context.kvm_seq; |
56 | memcpy(pgd_offset(mm, VMALLOC_START), | 59 | memcpy(pgd_offset(mm, VMALLOC_START), |
57 | pgd_offset_k(VMALLOC_START), | 60 | pgd_offset_k(VMALLOC_START), |
58 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - | 61 | sizeof(pgd_t) * (pgd_index(VMALLOC_END) - |
59 | pgd_index(VMALLOC_START))); | 62 | pgd_index(VMALLOC_START))); |
60 | mm->context.vmalloc_seq = seq; | 63 | mm->context.kvm_seq = seq; |
61 | } while (seq != init_mm.context.vmalloc_seq); | 64 | } while (seq != init_mm.context.kvm_seq); |
62 | } | 65 | } |
63 | 66 | ||
64 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 67 | #ifndef CONFIG_SMP |
65 | /* | 68 | /* |
66 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, | 69 | * Section support is unsafe on SMP - If you iounmap and ioremap a region, |
67 | * the other CPUs will not see this change until their next context switch. | 70 | * the other CPUs will not see this change until their next context switch. |
@@ -76,26 +79,23 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
76 | { | 79 | { |
77 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); | 80 | unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); |
78 | pgd_t *pgd; | 81 | pgd_t *pgd; |
79 | pud_t *pud; | ||
80 | pmd_t *pmdp; | ||
81 | 82 | ||
82 | flush_cache_vunmap(addr, end); | 83 | flush_cache_vunmap(addr, end); |
83 | pgd = pgd_offset_k(addr); | 84 | pgd = pgd_offset_k(addr); |
84 | pud = pud_offset(pgd, addr); | ||
85 | pmdp = pmd_offset(pud, addr); | ||
86 | do { | 85 | do { |
87 | pmd_t pmd = *pmdp; | 86 | pmd_t pmd, *pmdp = pmd_offset(pgd, addr); |
88 | 87 | ||
88 | pmd = *pmdp; | ||
89 | if (!pmd_none(pmd)) { | 89 | if (!pmd_none(pmd)) { |
90 | /* | 90 | /* |
91 | * Clear the PMD from the page table, and | 91 | * Clear the PMD from the page table, and |
92 | * increment the vmalloc sequence so others | 92 | * increment the kvm sequence so others |
93 | * notice this change. | 93 | * notice this change. |
94 | * | 94 | * |
95 | * Note: this is still racy on SMP machines. | 95 | * Note: this is still racy on SMP machines. |
96 | */ | 96 | */ |
97 | pmd_clear(pmdp); | 97 | pmd_clear(pmdp); |
98 | init_mm.context.vmalloc_seq++; | 98 | init_mm.context.kvm_seq++; |
99 | 99 | ||
100 | /* | 100 | /* |
101 | * Free the page table, if there was one. | 101 | * Free the page table, if there was one. |
@@ -104,16 +104,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) | |||
104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); | 104 | pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); |
105 | } | 105 | } |
106 | 106 | ||
107 | addr += PMD_SIZE; | 107 | addr += PGDIR_SIZE; |
108 | pmdp += 2; | 108 | pgd++; |
109 | } while (addr < end); | 109 | } while (addr < end); |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Ensure that the active_mm is up to date - we want to | 112 | * Ensure that the active_mm is up to date - we want to |
113 | * catch any use-after-iounmap cases. | 113 | * catch any use-after-iounmap cases. |
114 | */ | 114 | */ |
115 | if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq) | 115 | if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq) |
116 | __check_vmalloc_seq(current->active_mm); | 116 | __check_kvm_seq(current->active_mm); |
117 | 117 | ||
118 | flush_tlb_kernel_range(virt, end); | 118 | flush_tlb_kernel_range(virt, end); |
119 | } | 119 | } |
@@ -124,8 +124,6 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
124 | { | 124 | { |
125 | unsigned long addr = virt, end = virt + size; | 125 | unsigned long addr = virt, end = virt + size; |
126 | pgd_t *pgd; | 126 | pgd_t *pgd; |
127 | pud_t *pud; | ||
128 | pmd_t *pmd; | ||
129 | 127 | ||
130 | /* | 128 | /* |
131 | * Remove and free any PTE-based mapping, and | 129 | * Remove and free any PTE-based mapping, and |
@@ -134,17 +132,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn, | |||
134 | unmap_area_sections(virt, size); | 132 | unmap_area_sections(virt, size); |
135 | 133 | ||
136 | pgd = pgd_offset_k(addr); | 134 | pgd = pgd_offset_k(addr); |
137 | pud = pud_offset(pgd, addr); | ||
138 | pmd = pmd_offset(pud, addr); | ||
139 | do { | 135 | do { |
136 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
137 | |||
140 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 138 | pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
141 | pfn += SZ_1M >> PAGE_SHIFT; | 139 | pfn += SZ_1M >> PAGE_SHIFT; |
142 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); | 140 | pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); |
143 | pfn += SZ_1M >> PAGE_SHIFT; | 141 | pfn += SZ_1M >> PAGE_SHIFT; |
144 | flush_pmd_entry(pmd); | 142 | flush_pmd_entry(pmd); |
145 | 143 | ||
146 | addr += PMD_SIZE; | 144 | addr += PGDIR_SIZE; |
147 | pmd += 2; | 145 | pgd++; |
148 | } while (addr < end); | 146 | } while (addr < end); |
149 | 147 | ||
150 | return 0; | 148 | return 0; |
@@ -156,8 +154,6 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
156 | { | 154 | { |
157 | unsigned long addr = virt, end = virt + size; | 155 | unsigned long addr = virt, end = virt + size; |
158 | pgd_t *pgd; | 156 | pgd_t *pgd; |
159 | pud_t *pud; | ||
160 | pmd_t *pmd; | ||
161 | 157 | ||
162 | /* | 158 | /* |
163 | * Remove and free any PTE-based mapping, and | 159 | * Remove and free any PTE-based mapping, and |
@@ -166,8 +162,6 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
166 | unmap_area_sections(virt, size); | 162 | unmap_area_sections(virt, size); |
167 | 163 | ||
168 | pgd = pgd_offset_k(virt); | 164 | pgd = pgd_offset_k(virt); |
169 | pud = pud_offset(pgd, addr); | ||
170 | pmd = pmd_offset(pud, addr); | ||
171 | do { | 165 | do { |
172 | unsigned long super_pmd_val, i; | 166 | unsigned long super_pmd_val, i; |
173 | 167 | ||
@@ -176,12 +170,14 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
176 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; | 170 | super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; |
177 | 171 | ||
178 | for (i = 0; i < 8; i++) { | 172 | for (i = 0; i < 8; i++) { |
173 | pmd_t *pmd = pmd_offset(pgd, addr); | ||
174 | |||
179 | pmd[0] = __pmd(super_pmd_val); | 175 | pmd[0] = __pmd(super_pmd_val); |
180 | pmd[1] = __pmd(super_pmd_val); | 176 | pmd[1] = __pmd(super_pmd_val); |
181 | flush_pmd_entry(pmd); | 177 | flush_pmd_entry(pmd); |
182 | 178 | ||
183 | addr += PMD_SIZE; | 179 | addr += PGDIR_SIZE; |
184 | pmd += 2; | 180 | pgd++; |
185 | } | 181 | } |
186 | 182 | ||
187 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; | 183 | pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; |
@@ -199,13 +195,17 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
199 | unsigned long addr; | 195 | unsigned long addr; |
200 | struct vm_struct * area; | 196 | struct vm_struct * area; |
201 | 197 | ||
202 | #ifndef CONFIG_ARM_LPAE | ||
203 | /* | 198 | /* |
204 | * High mappings must be supersection aligned | 199 | * High mappings must be supersection aligned |
205 | */ | 200 | */ |
206 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) | 201 | if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) |
207 | return NULL; | 202 | return NULL; |
208 | #endif | 203 | |
204 | /* | ||
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
206 | */ | ||
207 | if (WARN_ON(pfn_valid(pfn))) | ||
208 | return NULL; | ||
209 | 209 | ||
210 | type = get_mem_type(mtype); | 210 | type = get_mem_type(mtype); |
211 | if (!type) | 211 | if (!type) |
@@ -216,41 +216,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
216 | */ | 216 | */ |
217 | size = PAGE_ALIGN(offset + size); | 217 | size = PAGE_ALIGN(offset + size); |
218 | 218 | ||
219 | /* | ||
220 | * Try to reuse one of the static mapping whenever possible. | ||
221 | */ | ||
222 | read_lock(&vmlist_lock); | ||
223 | for (area = vmlist; area; area = area->next) { | ||
224 | if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) | ||
225 | break; | ||
226 | if (!(area->flags & VM_ARM_STATIC_MAPPING)) | ||
227 | continue; | ||
228 | if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) | ||
229 | continue; | ||
230 | if (__phys_to_pfn(area->phys_addr) > pfn || | ||
231 | __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) | ||
232 | continue; | ||
233 | /* we can drop the lock here as we know *area is static */ | ||
234 | read_unlock(&vmlist_lock); | ||
235 | addr = (unsigned long)area->addr; | ||
236 | addr += __pfn_to_phys(pfn) - area->phys_addr; | ||
237 | return (void __iomem *) (offset + addr); | ||
238 | } | ||
239 | read_unlock(&vmlist_lock); | ||
240 | |||
241 | /* | ||
242 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | ||
243 | */ | ||
244 | if (WARN_ON(pfn_valid(pfn))) | ||
245 | return NULL; | ||
246 | |||
247 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 219 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
248 | if (!area) | 220 | if (!area) |
249 | return NULL; | 221 | return NULL; |
250 | addr = (unsigned long)area->addr; | 222 | addr = (unsigned long)area->addr; |
251 | area->phys_addr = __pfn_to_phys(pfn); | ||
252 | 223 | ||
253 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 224 | #ifndef CONFIG_SMP |
254 | if (DOMAIN_IO == 0 && | 225 | if (DOMAIN_IO == 0 && |
255 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || | 226 | (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || |
256 | cpu_is_xsc3()) && pfn >= 0x100000 && | 227 | cpu_is_xsc3()) && pfn >= 0x100000 && |
@@ -310,91 +281,40 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
310 | } | 281 | } |
311 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 282 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
312 | 283 | ||
313 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | ||
314 | unsigned int, void *) = | ||
315 | __arm_ioremap_caller; | ||
316 | |||
317 | void __iomem * | 284 | void __iomem * |
318 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 285 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) |
319 | { | 286 | { |
320 | return arch_ioremap_caller(phys_addr, size, mtype, | ||
321 | __builtin_return_address(0)); | ||
322 | } | ||
323 | EXPORT_SYMBOL(__arm_ioremap); | ||
324 | |||
325 | /* | ||
326 | * Remap an arbitrary physical address space into the kernel virtual | ||
327 | * address space as memory. Needed when the kernel wants to execute | ||
328 | * code in external memory. This is needed for reprogramming source | ||
329 | * clocks that would affect normal memory for example. Please see | ||
330 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | ||
331 | */ | ||
332 | void __iomem * | ||
333 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | ||
334 | { | ||
335 | unsigned int mtype; | ||
336 | |||
337 | if (cached) | ||
338 | mtype = MT_MEMORY; | ||
339 | else | ||
340 | mtype = MT_MEMORY_NONCACHED; | ||
341 | |||
342 | return __arm_ioremap_caller(phys_addr, size, mtype, | 287 | return __arm_ioremap_caller(phys_addr, size, mtype, |
343 | __builtin_return_address(0)); | 288 | __builtin_return_address(0)); |
344 | } | 289 | } |
290 | EXPORT_SYMBOL(__arm_ioremap); | ||
345 | 291 | ||
346 | void __iounmap(volatile void __iomem *io_addr) | 292 | void __iounmap(volatile void __iomem *io_addr) |
347 | { | 293 | { |
348 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 294 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |
349 | struct vm_struct *vm; | 295 | #ifndef CONFIG_SMP |
296 | struct vm_struct **p, *tmp; | ||
350 | 297 | ||
351 | read_lock(&vmlist_lock); | 298 | /* |
352 | for (vm = vmlist; vm; vm = vm->next) { | 299 | * If this is a section based mapping we need to handle it |
353 | if (vm->addr > addr) | 300 | * specially as the VM subsystem does not know how to handle |
354 | break; | 301 | * such a beast. We need the lock here b/c we need to clear |
355 | if (!(vm->flags & VM_IOREMAP)) | 302 | * all the mappings before the area can be reclaimed |
356 | continue; | 303 | * by someone else. |
357 | /* If this is a static mapping we must leave it alone */ | 304 | */ |
358 | if ((vm->flags & VM_ARM_STATIC_MAPPING) && | 305 | write_lock(&vmlist_lock); |
359 | (vm->addr <= addr) && (vm->addr + vm->size > addr)) { | 306 | for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { |
360 | read_unlock(&vmlist_lock); | 307 | if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { |
361 | return; | 308 | if (tmp->flags & VM_ARM_SECTION_MAPPING) { |
362 | } | 309 | unmap_area_sections((unsigned long)tmp->addr, |
363 | #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) | 310 | tmp->size); |
364 | /* | 311 | } |
365 | * If this is a section based mapping we need to handle it | ||
366 | * specially as the VM subsystem does not know how to handle | ||
367 | * such a beast. | ||
368 | */ | ||
369 | if ((vm->addr == addr) && | ||
370 | (vm->flags & VM_ARM_SECTION_MAPPING)) { | ||
371 | unmap_area_sections((unsigned long)vm->addr, vm->size); | ||
372 | break; | 312 | break; |
373 | } | 313 | } |
374 | #endif | ||
375 | } | 314 | } |
376 | read_unlock(&vmlist_lock); | 315 | write_unlock(&vmlist_lock); |
316 | #endif | ||
377 | 317 | ||
378 | vunmap(addr); | 318 | vunmap(addr); |
379 | } | 319 | } |
380 | 320 | EXPORT_SYMBOL(__iounmap); | |
381 | void (*arch_iounmap)(volatile void __iomem *) = __iounmap; | ||
382 | |||
383 | void __arm_iounmap(volatile void __iomem *io_addr) | ||
384 | { | ||
385 | arch_iounmap(io_addr); | ||
386 | } | ||
387 | EXPORT_SYMBOL(__arm_iounmap); | ||
388 | |||
389 | #ifdef CONFIG_PCI | ||
390 | int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr) | ||
391 | { | ||
392 | BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT); | ||
393 | |||
394 | return ioremap_page_range(PCI_IO_VIRT_BASE + offset, | ||
395 | PCI_IO_VIRT_BASE + offset + SZ_64K, | ||
396 | phys_addr, | ||
397 | __pgprot(get_mem_type(MT_DEVICE)->prot_pte)); | ||
398 | } | ||
399 | EXPORT_SYMBOL_GPL(pci_ioremap_io); | ||
400 | #endif | ||
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92da354..010566799c8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -3,31 +3,7 @@ | |||
3 | /* the upper-most page table pointer */ | 3 | /* the upper-most page table pointer */ |
4 | extern pmd_t *top_pmd; | 4 | extern pmd_t *top_pmd; |
5 | 5 | ||
6 | /* | 6 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) |
7 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
8 | * specific hacks for copying pages efficiently, while 0xffff4000 | ||
9 | * is reserved for VIPT aliasing flushing by generic code. | ||
10 | * | ||
11 | * Note that we don't allow VIPT aliasing caches with SMP. | ||
12 | */ | ||
13 | #define COPYPAGE_MINICACHE 0xffff8000 | ||
14 | #define COPYPAGE_V6_FROM 0xffff8000 | ||
15 | #define COPYPAGE_V6_TO 0xffffc000 | ||
16 | /* PFN alias flushing, for VIPT caches */ | ||
17 | #define FLUSH_ALIAS_START 0xffff4000 | ||
18 | |||
19 | static inline void set_top_pte(unsigned long va, pte_t pte) | ||
20 | { | ||
21 | pte_t *ptep = pte_offset_kernel(top_pmd, va); | ||
22 | set_pte_ext(ptep, pte, 0); | ||
23 | local_flush_tlb_kernel_page(va); | ||
24 | } | ||
25 | |||
26 | static inline pte_t get_top_pte(unsigned long va) | ||
27 | { | ||
28 | pte_t *ptep = pte_offset_kernel(top_pmd, va); | ||
29 | return *ptep; | ||
30 | } | ||
31 | 7 | ||
32 | static inline pmd_t *pmd_off_k(unsigned long virt) | 8 | static inline pmd_t *pmd_off_k(unsigned long virt) |
33 | { | 9 | { |
@@ -36,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt) | |||
36 | 12 | ||
37 | struct mem_type { | 13 | struct mem_type { |
38 | pteval_t prot_pte; | 14 | pteval_t prot_pte; |
39 | pmdval_t prot_l1; | 15 | unsigned int prot_l1; |
40 | pmdval_t prot_sect; | 16 | unsigned int prot_sect; |
41 | unsigned int domain; | 17 | unsigned int domain; |
42 | }; | 18 | }; |
43 | 19 | ||
@@ -45,36 +21,13 @@ const struct mem_type *get_mem_type(unsigned int type); | |||
45 | 21 | ||
46 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | 22 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); |
47 | 23 | ||
48 | /* | ||
49 | * ARM specific vm_struct->flags bits. | ||
50 | */ | ||
51 | |||
52 | /* (super)section-mapped I/O regions used by ioremap()/iounmap() */ | ||
53 | #define VM_ARM_SECTION_MAPPING 0x80000000 | ||
54 | |||
55 | /* permanent static mappings from iotable_init() */ | ||
56 | #define VM_ARM_STATIC_MAPPING 0x40000000 | ||
57 | |||
58 | /* empty mapping */ | ||
59 | #define VM_ARM_EMPTY_MAPPING 0x20000000 | ||
60 | |||
61 | /* mapping type (attributes) for permanent static mappings */ | ||
62 | #define VM_ARM_MTYPE(mt) ((mt) << 20) | ||
63 | #define VM_ARM_MTYPE_MASK (0x1f << 20) | ||
64 | |||
65 | /* consistent regions used by dma_alloc_attrs() */ | ||
66 | #define VM_ARM_DMA_CONSISTENT 0x20000000 | ||
67 | |||
68 | #endif | 24 | #endif |
69 | 25 | ||
70 | #ifdef CONFIG_ZONE_DMA | 26 | #ifdef CONFIG_ZONE_DMA |
71 | extern phys_addr_t arm_dma_limit; | 27 | extern u32 arm_dma_limit; |
72 | #else | 28 | #else |
73 | #define arm_dma_limit ((phys_addr_t)~0) | 29 | #define arm_dma_limit ((u32)~0) |
74 | #endif | 30 | #endif |
75 | 31 | ||
76 | extern phys_addr_t arm_lowmem_limit; | ||
77 | |||
78 | void __init bootmem_init(void); | 32 | void __init bootmem_init(void); |
79 | void arm_mm_memblock_reserve(void); | 33 | void arm_mm_memblock_reserve(void); |
80 | void dma_contiguous_remap(void); | ||
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ceadd1..74be05f3e03 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -9,39 +9,13 @@ | |||
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/personality.h> | 10 | #include <linux/personality.h> |
11 | #include <linux/random.h> | 11 | #include <linux/random.h> |
12 | #include <asm/cachetype.h> | 12 | #include <asm/cputype.h> |
13 | #include <asm/system.h> | ||
13 | 14 | ||
14 | #define COLOUR_ALIGN(addr,pgoff) \ | 15 | #define COLOUR_ALIGN(addr,pgoff) \ |
15 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ | 16 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
16 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) | 17 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) |
17 | 18 | ||
18 | /* gap between mmap and stack */ | ||
19 | #define MIN_GAP (128*1024*1024UL) | ||
20 | #define MAX_GAP ((TASK_SIZE)/6*5) | ||
21 | |||
22 | static int mmap_is_legacy(void) | ||
23 | { | ||
24 | if (current->personality & ADDR_COMPAT_LAYOUT) | ||
25 | return 1; | ||
26 | |||
27 | if (rlimit(RLIMIT_STACK) == RLIM_INFINITY) | ||
28 | return 1; | ||
29 | |||
30 | return sysctl_legacy_va_layout; | ||
31 | } | ||
32 | |||
33 | static unsigned long mmap_base(unsigned long rnd) | ||
34 | { | ||
35 | unsigned long gap = rlimit(RLIMIT_STACK); | ||
36 | |||
37 | if (gap < MIN_GAP) | ||
38 | gap = MIN_GAP; | ||
39 | else if (gap > MAX_GAP) | ||
40 | gap = MAX_GAP; | ||
41 | |||
42 | return PAGE_ALIGN(TASK_SIZE - gap - rnd); | ||
43 | } | ||
44 | |||
45 | /* | 19 | /* |
46 | * We need to ensure that shared mappings are correctly aligned to | 20 | * We need to ensure that shared mappings are correctly aligned to |
47 | * avoid aliasing issues with VIPT caches. We need to ensure that | 21 | * avoid aliasing issues with VIPT caches. We need to ensure that |
@@ -57,16 +31,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
57 | { | 31 | { |
58 | struct mm_struct *mm = current->mm; | 32 | struct mm_struct *mm = current->mm; |
59 | struct vm_area_struct *vma; | 33 | struct vm_area_struct *vma; |
60 | int do_align = 0; | 34 | unsigned long start_addr; |
61 | int aliasing = cache_is_vipt_aliasing(); | 35 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
62 | struct vm_unmapped_area_info info; | 36 | unsigned int cache_type; |
37 | int do_align = 0, aliasing = 0; | ||
63 | 38 | ||
64 | /* | 39 | /* |
65 | * We only need to do colour alignment if either the I or D | 40 | * We only need to do colour alignment if either the I or D |
66 | * caches alias. | 41 | * caches alias. This is indicated by bits 9 and 21 of the |
42 | * cache type register. | ||
67 | */ | 43 | */ |
68 | if (aliasing) | 44 | cache_type = read_cpuid_cachetype(); |
69 | do_align = filp || (flags & MAP_SHARED); | 45 | if (cache_type != read_cpuid_id()) { |
46 | aliasing = (cache_type | cache_type >> 12) & (1 << 11); | ||
47 | if (aliasing) | ||
48 | do_align = filp || flags & MAP_SHARED; | ||
49 | } | ||
50 | #else | ||
51 | #define do_align 0 | ||
52 | #define aliasing 0 | ||
53 | #endif | ||
70 | 54 | ||
71 | /* | 55 | /* |
72 | * We enforce the MAP_FIXED case. | 56 | * We enforce the MAP_FIXED case. |
@@ -92,108 +76,58 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
92 | (!vma || addr + len <= vma->vm_start)) | 76 | (!vma || addr + len <= vma->vm_start)) |
93 | return addr; | 77 | return addr; |
94 | } | 78 | } |
95 | 79 | if (len > mm->cached_hole_size) { | |
96 | info.flags = 0; | 80 | start_addr = addr = mm->free_area_cache; |
97 | info.length = len; | 81 | } else { |
98 | info.low_limit = mm->mmap_base; | 82 | start_addr = addr = TASK_UNMAPPED_BASE; |
99 | info.high_limit = TASK_SIZE; | 83 | mm->cached_hole_size = 0; |
100 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | ||
101 | info.align_offset = pgoff << PAGE_SHIFT; | ||
102 | return vm_unmapped_area(&info); | ||
103 | } | ||
104 | |||
105 | unsigned long | ||
106 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | ||
107 | const unsigned long len, const unsigned long pgoff, | ||
108 | const unsigned long flags) | ||
109 | { | ||
110 | struct vm_area_struct *vma; | ||
111 | struct mm_struct *mm = current->mm; | ||
112 | unsigned long addr = addr0; | ||
113 | int do_align = 0; | ||
114 | int aliasing = cache_is_vipt_aliasing(); | ||
115 | struct vm_unmapped_area_info info; | ||
116 | |||
117 | /* | ||
118 | * We only need to do colour alignment if either the I or D | ||
119 | * caches alias. | ||
120 | */ | ||
121 | if (aliasing) | ||
122 | do_align = filp || (flags & MAP_SHARED); | ||
123 | |||
124 | /* requested length too big for entire address space */ | ||
125 | if (len > TASK_SIZE) | ||
126 | return -ENOMEM; | ||
127 | |||
128 | if (flags & MAP_FIXED) { | ||
129 | if (aliasing && flags & MAP_SHARED && | ||
130 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | ||
131 | return -EINVAL; | ||
132 | return addr; | ||
133 | } | ||
134 | |||
135 | /* requesting a specific address */ | ||
136 | if (addr) { | ||
137 | if (do_align) | ||
138 | addr = COLOUR_ALIGN(addr, pgoff); | ||
139 | else | ||
140 | addr = PAGE_ALIGN(addr); | ||
141 | vma = find_vma(mm, addr); | ||
142 | if (TASK_SIZE - len >= addr && | ||
143 | (!vma || addr + len <= vma->vm_start)) | ||
144 | return addr; | ||
145 | } | ||
146 | |||
147 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; | ||
148 | info.length = len; | ||
149 | info.low_limit = PAGE_SIZE; | ||
150 | info.high_limit = mm->mmap_base; | ||
151 | info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; | ||
152 | info.align_offset = pgoff << PAGE_SHIFT; | ||
153 | addr = vm_unmapped_area(&info); | ||
154 | |||
155 | /* | ||
156 | * A failed mmap() very likely causes application failure, | ||
157 | * so fall back to the bottom-up function here. This scenario | ||
158 | * can happen with large stack limits and large mmap() | ||
159 | * allocations. | ||
160 | */ | ||
161 | if (addr & ~PAGE_MASK) { | ||
162 | VM_BUG_ON(addr != -ENOMEM); | ||
163 | info.flags = 0; | ||
164 | info.low_limit = mm->mmap_base; | ||
165 | info.high_limit = TASK_SIZE; | ||
166 | addr = vm_unmapped_area(&info); | ||
167 | } | 84 | } |
168 | |||
169 | return addr; | ||
170 | } | ||
171 | |||
172 | void arch_pick_mmap_layout(struct mm_struct *mm) | ||
173 | { | ||
174 | unsigned long random_factor = 0UL; | ||
175 | |||
176 | /* 8 bits of randomness in 20 address space bits */ | 85 | /* 8 bits of randomness in 20 address space bits */ |
177 | if ((current->flags & PF_RANDOMIZE) && | 86 | if ((current->flags & PF_RANDOMIZE) && |
178 | !(current->personality & ADDR_NO_RANDOMIZE)) | 87 | !(current->personality & ADDR_NO_RANDOMIZE)) |
179 | random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT; | 88 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; |
180 | 89 | ||
181 | if (mmap_is_legacy()) { | 90 | full_search: |
182 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 91 | if (do_align) |
183 | mm->get_unmapped_area = arch_get_unmapped_area; | 92 | addr = COLOUR_ALIGN(addr, pgoff); |
184 | mm->unmap_area = arch_unmap_area; | 93 | else |
185 | } else { | 94 | addr = PAGE_ALIGN(addr); |
186 | mm->mmap_base = mmap_base(random_factor); | 95 | |
187 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 96 | for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { |
188 | mm->unmap_area = arch_unmap_area_topdown; | 97 | /* At this point: (!vma || addr < vma->vm_end). */ |
98 | if (TASK_SIZE - len < addr) { | ||
99 | /* | ||
100 | * Start a new search - just in case we missed | ||
101 | * some holes. | ||
102 | */ | ||
103 | if (start_addr != TASK_UNMAPPED_BASE) { | ||
104 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
105 | mm->cached_hole_size = 0; | ||
106 | goto full_search; | ||
107 | } | ||
108 | return -ENOMEM; | ||
109 | } | ||
110 | if (!vma || addr + len <= vma->vm_start) { | ||
111 | /* | ||
112 | * Remember the place where we stopped the search: | ||
113 | */ | ||
114 | mm->free_area_cache = addr + len; | ||
115 | return addr; | ||
116 | } | ||
117 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
118 | mm->cached_hole_size = vma->vm_start - addr; | ||
119 | addr = vma->vm_end; | ||
120 | if (do_align) | ||
121 | addr = COLOUR_ALIGN(addr, pgoff); | ||
189 | } | 122 | } |
190 | } | 123 | } |
191 | 124 | ||
125 | |||
192 | /* | 126 | /* |
193 | * You really shouldn't be using read() or write() on /dev/mem. This | 127 | * You really shouldn't be using read() or write() on /dev/mem. This |
194 | * might go away in the future. | 128 | * might go away in the future. |
195 | */ | 129 | */ |
196 | int valid_phys_addr_range(phys_addr_t addr, size_t size) | 130 | int valid_phys_addr_range(unsigned long addr, size_t size) |
197 | { | 131 | { |
198 | if (addr < PHYS_OFFSET) | 132 | if (addr < PHYS_OFFSET) |
199 | return 0; | 133 | return 0; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd..4fa9c246ae9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -15,23 +15,19 @@ | |||
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/sizes.h> | ||
20 | 18 | ||
21 | #include <asm/cp15.h> | ||
22 | #include <asm/cputype.h> | 19 | #include <asm/cputype.h> |
23 | #include <asm/sections.h> | 20 | #include <asm/sections.h> |
24 | #include <asm/cachetype.h> | 21 | #include <asm/cachetype.h> |
25 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
23 | #include <asm/sizes.h> | ||
26 | #include <asm/smp_plat.h> | 24 | #include <asm/smp_plat.h> |
27 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
28 | #include <asm/highmem.h> | 26 | #include <asm/highmem.h> |
29 | #include <asm/system_info.h> | ||
30 | #include <asm/traps.h> | 27 | #include <asm/traps.h> |
31 | 28 | ||
32 | #include <asm/mach/arch.h> | 29 | #include <asm/mach/arch.h> |
33 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
34 | #include <asm/mach/pci.h> | ||
35 | 31 | ||
36 | #include "mm.h" | 32 | #include "mm.h" |
37 | 33 | ||
@@ -64,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel); | |||
64 | struct cachepolicy { | 60 | struct cachepolicy { |
65 | const char policy[16]; | 61 | const char policy[16]; |
66 | unsigned int cr_mask; | 62 | unsigned int cr_mask; |
67 | pmdval_t pmd; | 63 | unsigned int pmd; |
68 | pteval_t pte; | 64 | pteval_t pte; |
69 | }; | 65 | }; |
70 | 66 | ||
@@ -154,7 +150,6 @@ static int __init early_nowrite(char *__unused) | |||
154 | } | 150 | } |
155 | early_param("nowb", early_nowrite); | 151 | early_param("nowb", early_nowrite); |
156 | 152 | ||
157 | #ifndef CONFIG_ARM_LPAE | ||
158 | static int __init early_ecc(char *p) | 153 | static int __init early_ecc(char *p) |
159 | { | 154 | { |
160 | if (memcmp(p, "on", 2) == 0) | 155 | if (memcmp(p, "on", 2) == 0) |
@@ -164,7 +159,6 @@ static int __init early_ecc(char *p) | |||
164 | return 0; | 159 | return 0; |
165 | } | 160 | } |
166 | early_param("ecc", early_ecc); | 161 | early_param("ecc", early_ecc); |
167 | #endif | ||
168 | 162 | ||
169 | static int __init noalign_setup(char *__unused) | 163 | static int __init noalign_setup(char *__unused) |
170 | { | 164 | { |
@@ -217,7 +211,7 @@ static struct mem_type mem_types[] = { | |||
217 | .prot_l1 = PMD_TYPE_TABLE, | 211 | .prot_l1 = PMD_TYPE_TABLE, |
218 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, | 212 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
219 | .domain = DOMAIN_IO, | 213 | .domain = DOMAIN_IO, |
220 | }, | 214 | }, |
221 | [MT_DEVICE_WC] = { /* ioremap_wc */ | 215 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
222 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, | 216 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
223 | .prot_l1 = PMD_TYPE_TABLE, | 217 | .prot_l1 = PMD_TYPE_TABLE, |
@@ -234,12 +228,10 @@ static struct mem_type mem_types[] = { | |||
234 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 228 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
235 | .domain = DOMAIN_KERNEL, | 229 | .domain = DOMAIN_KERNEL, |
236 | }, | 230 | }, |
237 | #ifndef CONFIG_ARM_LPAE | ||
238 | [MT_MINICLEAN] = { | 231 | [MT_MINICLEAN] = { |
239 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, | 232 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, |
240 | .domain = DOMAIN_KERNEL, | 233 | .domain = DOMAIN_KERNEL, |
241 | }, | 234 | }, |
242 | #endif | ||
243 | [MT_LOW_VECTORS] = { | 235 | [MT_LOW_VECTORS] = { |
244 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 236 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
245 | L_PTE_RDONLY, | 237 | L_PTE_RDONLY, |
@@ -281,19 +273,6 @@ static struct mem_type mem_types[] = { | |||
281 | .prot_l1 = PMD_TYPE_TABLE, | 273 | .prot_l1 = PMD_TYPE_TABLE, |
282 | .domain = DOMAIN_KERNEL, | 274 | .domain = DOMAIN_KERNEL, |
283 | }, | 275 | }, |
284 | [MT_MEMORY_SO] = { | ||
285 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | ||
286 | L_PTE_MT_UNCACHED, | ||
287 | .prot_l1 = PMD_TYPE_TABLE, | ||
288 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | | ||
289 | PMD_SECT_UNCACHED | PMD_SECT_XN, | ||
290 | .domain = DOMAIN_KERNEL, | ||
291 | }, | ||
292 | [MT_MEMORY_DMA_READY] = { | ||
293 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, | ||
294 | .prot_l1 = PMD_TYPE_TABLE, | ||
295 | .domain = DOMAIN_KERNEL, | ||
296 | }, | ||
297 | }; | 276 | }; |
298 | 277 | ||
299 | const struct mem_type *get_mem_type(unsigned int type) | 278 | const struct mem_type *get_mem_type(unsigned int type) |
@@ -309,7 +288,7 @@ static void __init build_mem_type_table(void) | |||
309 | { | 288 | { |
310 | struct cachepolicy *cp; | 289 | struct cachepolicy *cp; |
311 | unsigned int cr = get_cr(); | 290 | unsigned int cr = get_cr(); |
312 | pteval_t user_pgprot, kern_pgprot, vecs_pgprot; | 291 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
313 | int cpu_arch = cpu_architecture(); | 292 | int cpu_arch = cpu_architecture(); |
314 | int i; | 293 | int i; |
315 | 294 | ||
@@ -423,10 +402,25 @@ static void __init build_mem_type_table(void) | |||
423 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 402 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
424 | 403 | ||
425 | /* | 404 | /* |
405 | * Only use write-through for non-SMP systems | ||
406 | */ | ||
407 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | ||
408 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | ||
409 | |||
410 | /* | ||
411 | * Enable CPU-specific coherency if supported. | ||
412 | * (Only available on XSC3 at the moment.) | ||
413 | */ | ||
414 | if (arch_is_coherent() && cpu_is_xsc3()) { | ||
415 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | ||
416 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | ||
417 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | ||
418 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | ||
419 | } | ||
420 | /* | ||
426 | * ARMv6 and above have extended page tables. | 421 | * ARMv6 and above have extended page tables. |
427 | */ | 422 | */ |
428 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { | 423 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
429 | #ifndef CONFIG_ARM_LPAE | ||
430 | /* | 424 | /* |
431 | * Mark cache clean areas and XIP ROM read only | 425 | * Mark cache clean areas and XIP ROM read only |
432 | * from SVC mode and no access from userspace. | 426 | * from SVC mode and no access from userspace. |
@@ -434,7 +428,6 @@ static void __init build_mem_type_table(void) | |||
434 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 428 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
435 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 429 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
436 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 430 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
437 | #endif | ||
438 | 431 | ||
439 | if (is_smp()) { | 432 | if (is_smp()) { |
440 | /* | 433 | /* |
@@ -450,7 +443,6 @@ static void __init build_mem_type_table(void) | |||
450 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 443 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
451 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 444 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
452 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | 445 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; |
453 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; | ||
454 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 446 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
455 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | 447 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; |
456 | } | 448 | } |
@@ -474,21 +466,8 @@ static void __init build_mem_type_table(void) | |||
474 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; | 466 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; |
475 | } | 467 | } |
476 | 468 | ||
477 | #ifdef CONFIG_ARM_LPAE | ||
478 | /* | ||
479 | * Do not generate access flag faults for the kernel mappings. | ||
480 | */ | ||
481 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { | ||
482 | mem_types[i].prot_pte |= PTE_EXT_AF; | ||
483 | if (mem_types[i].prot_sect) | ||
484 | mem_types[i].prot_sect |= PMD_SECT_AF; | ||
485 | } | ||
486 | kern_pgprot |= PTE_EXT_AF; | ||
487 | vecs_pgprot |= PTE_EXT_AF; | ||
488 | #endif | ||
489 | |||
490 | for (i = 0; i < 16; i++) { | 469 | for (i = 0; i < 16; i++) { |
491 | pteval_t v = pgprot_val(protection_map[i]); | 470 | unsigned long v = pgprot_val(protection_map[i]); |
492 | protection_map[i] = __pgprot(v | user_pgprot); | 471 | protection_map[i] = __pgprot(v | user_pgprot); |
493 | } | 472 | } |
494 | 473 | ||
@@ -503,7 +482,6 @@ static void __init build_mem_type_table(void) | |||
503 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 482 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
504 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; | 483 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
505 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; | 484 | mem_types[MT_MEMORY].prot_pte |= kern_pgprot; |
506 | mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; | ||
507 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; | 485 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; |
508 | mem_types[MT_ROM].prot_sect |= cp->pmd; | 486 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
509 | 487 | ||
@@ -543,18 +521,13 @@ EXPORT_SYMBOL(phys_mem_access_prot); | |||
543 | 521 | ||
544 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) | 522 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
545 | 523 | ||
546 | static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) | 524 | static void __init *early_alloc(unsigned long sz) |
547 | { | 525 | { |
548 | void *ptr = __va(memblock_alloc(sz, align)); | 526 | void *ptr = __va(memblock_alloc(sz, sz)); |
549 | memset(ptr, 0, sz); | 527 | memset(ptr, 0, sz); |
550 | return ptr; | 528 | return ptr; |
551 | } | 529 | } |
552 | 530 | ||
553 | static void __init *early_alloc(unsigned long sz) | ||
554 | { | ||
555 | return early_alloc_aligned(sz, sz); | ||
556 | } | ||
557 | |||
558 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) | 531 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) |
559 | { | 532 | { |
560 | if (pmd_none(*pmd)) { | 533 | if (pmd_none(*pmd)) { |
@@ -581,6 +554,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
581 | const struct mem_type *type) | 554 | const struct mem_type *type) |
582 | { | 555 | { |
583 | pmd_t *pmd = pmd_offset(pud, addr); | 556 | pmd_t *pmd = pmd_offset(pud, addr); |
557 | unsigned long pages_2m = 0, pages_4k = 0; | ||
558 | unsigned long stash_phys = phys; | ||
584 | 559 | ||
585 | /* | 560 | /* |
586 | * Try a section mapping - end, addr and phys must all be aligned | 561 | * Try a section mapping - end, addr and phys must all be aligned |
@@ -588,13 +563,13 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
588 | * L1 entries, whereas PGDs refer to a group of L1 entries making | 563 | * L1 entries, whereas PGDs refer to a group of L1 entries making |
589 | * up one logical pointer to an L2 table. | 564 | * up one logical pointer to an L2 table. |
590 | */ | 565 | */ |
591 | if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) { | 566 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { |
592 | pmd_t *p = pmd; | 567 | pmd_t *p = pmd; |
593 | 568 | ||
594 | #ifndef CONFIG_ARM_LPAE | 569 | pages_2m = (end - addr) >> (PGDIR_SHIFT); |
570 | |||
595 | if (addr & SECTION_SIZE) | 571 | if (addr & SECTION_SIZE) |
596 | pmd++; | 572 | pmd++; |
597 | #endif | ||
598 | 573 | ||
599 | do { | 574 | do { |
600 | *pmd = __pmd(phys | type->prot_sect); | 575 | *pmd = __pmd(phys | type->prot_sect); |
@@ -603,16 +578,22 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
603 | 578 | ||
604 | flush_pmd_entry(p); | 579 | flush_pmd_entry(p); |
605 | } else { | 580 | } else { |
581 | pages_4k = (end - addr) >> PAGE_SHIFT; | ||
606 | /* | 582 | /* |
607 | * No need to loop; pte's aren't interested in the | 583 | * No need to loop; pte's aren't interested in the |
608 | * individual L1 entries. | 584 | * individual L1 entries. |
609 | */ | 585 | */ |
610 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 586 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); |
611 | } | 587 | } |
588 | |||
589 | if ((stash_phys >= PHYS_OFFSET) && (stash_phys < lowmem_limit)) { | ||
590 | update_page_count(PG_LEVEL_2M, pages_2m); | ||
591 | update_page_count(PG_LEVEL_4K, pages_4k); | ||
592 | } | ||
612 | } | 593 | } |
613 | 594 | ||
614 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 595 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
615 | unsigned long end, unsigned long phys, const struct mem_type *type) | 596 | unsigned long phys, const struct mem_type *type) |
616 | { | 597 | { |
617 | pud_t *pud = pud_offset(pgd, addr); | 598 | pud_t *pud = pud_offset(pgd, addr); |
618 | unsigned long next; | 599 | unsigned long next; |
@@ -624,7 +605,6 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
624 | } while (pud++, addr = next, addr != end); | 605 | } while (pud++, addr = next, addr != end); |
625 | } | 606 | } |
626 | 607 | ||
627 | #ifndef CONFIG_ARM_LPAE | ||
628 | static void __init create_36bit_mapping(struct map_desc *md, | 608 | static void __init create_36bit_mapping(struct map_desc *md, |
629 | const struct mem_type *type) | 609 | const struct mem_type *type) |
630 | { | 610 | { |
@@ -684,7 +664,6 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
684 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; | 664 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; |
685 | } while (addr != end); | 665 | } while (addr != end); |
686 | } | 666 | } |
687 | #endif /* !CONFIG_ARM_LPAE */ | ||
688 | 667 | ||
689 | /* | 668 | /* |
690 | * Create the page directory entries and any necessary | 669 | * Create the page directory entries and any necessary |
@@ -708,16 +687,14 @@ static void __init create_mapping(struct map_desc *md) | |||
708 | } | 687 | } |
709 | 688 | ||
710 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 689 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
711 | md->virtual >= PAGE_OFFSET && | 690 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
712 | (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { | ||
713 | printk(KERN_WARNING "BUG: mapping for 0x%08llx" | 691 | printk(KERN_WARNING "BUG: mapping for 0x%08llx" |
714 | " at 0x%08lx out of vmalloc space\n", | 692 | " at 0x%08lx overlaps vmalloc space\n", |
715 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); | 693 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); |
716 | } | 694 | } |
717 | 695 | ||
718 | type = &mem_types[md->type]; | 696 | type = &mem_types[md->type]; |
719 | 697 | ||
720 | #ifndef CONFIG_ARM_LPAE | ||
721 | /* | 698 | /* |
722 | * Catch 36-bit addresses | 699 | * Catch 36-bit addresses |
723 | */ | 700 | */ |
@@ -725,7 +702,6 @@ static void __init create_mapping(struct map_desc *md) | |||
725 | create_36bit_mapping(md, type); | 702 | create_36bit_mapping(md, type); |
726 | return; | 703 | return; |
727 | } | 704 | } |
728 | #endif | ||
729 | 705 | ||
730 | addr = md->virtual & PAGE_MASK; | 706 | addr = md->virtual & PAGE_MASK; |
731 | phys = __pfn_to_phys(md->pfn); | 707 | phys = __pfn_to_phys(md->pfn); |
@@ -755,150 +731,18 @@ static void __init create_mapping(struct map_desc *md) | |||
755 | */ | 731 | */ |
756 | void __init iotable_init(struct map_desc *io_desc, int nr) | 732 | void __init iotable_init(struct map_desc *io_desc, int nr) |
757 | { | 733 | { |
758 | struct map_desc *md; | 734 | int i; |
759 | struct vm_struct *vm; | ||
760 | |||
761 | if (!nr) | ||
762 | return; | ||
763 | |||
764 | vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); | ||
765 | |||
766 | for (md = io_desc; nr; md++, nr--) { | ||
767 | create_mapping(md); | ||
768 | vm->addr = (void *)(md->virtual & PAGE_MASK); | ||
769 | vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | ||
770 | vm->phys_addr = __pfn_to_phys(md->pfn); | ||
771 | vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; | ||
772 | vm->flags |= VM_ARM_MTYPE(md->type); | ||
773 | vm->caller = iotable_init; | ||
774 | vm_area_add_early(vm++); | ||
775 | } | ||
776 | } | ||
777 | |||
778 | void __init vm_reserve_area_early(unsigned long addr, unsigned long size, | ||
779 | void *caller) | ||
780 | { | ||
781 | struct vm_struct *vm; | ||
782 | |||
783 | vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); | ||
784 | vm->addr = (void *)addr; | ||
785 | vm->size = size; | ||
786 | vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; | ||
787 | vm->caller = caller; | ||
788 | vm_area_add_early(vm); | ||
789 | } | ||
790 | |||
791 | #ifndef CONFIG_ARM_LPAE | ||
792 | |||
793 | /* | ||
794 | * The Linux PMD is made of two consecutive section entries covering 2MB | ||
795 | * (see definition in include/asm/pgtable-2level.h). However a call to | ||
796 | * create_mapping() may optimize static mappings by using individual | ||
797 | * 1MB section mappings. This leaves the actual PMD potentially half | ||
798 | * initialized if the top or bottom section entry isn't used, leaving it | ||
799 | * open to problems if a subsequent ioremap() or vmalloc() tries to use | ||
800 | * the virtual space left free by that unused section entry. | ||
801 | * | ||
802 | * Let's avoid the issue by inserting dummy vm entries covering the unused | ||
803 | * PMD halves once the static mappings are in place. | ||
804 | */ | ||
805 | |||
806 | static void __init pmd_empty_section_gap(unsigned long addr) | ||
807 | { | ||
808 | vm_reserve_area_early(addr, SECTION_SIZE, pmd_empty_section_gap); | ||
809 | } | ||
810 | |||
811 | static void __init fill_pmd_gaps(void) | ||
812 | { | ||
813 | struct vm_struct *vm; | ||
814 | unsigned long addr, next = 0; | ||
815 | pmd_t *pmd; | ||
816 | |||
817 | /* we're still single threaded hence no lock needed here */ | ||
818 | for (vm = vmlist; vm; vm = vm->next) { | ||
819 | if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) | ||
820 | continue; | ||
821 | addr = (unsigned long)vm->addr; | ||
822 | if (addr < next) | ||
823 | continue; | ||
824 | |||
825 | /* | ||
826 | * Check if this vm starts on an odd section boundary. | ||
827 | * If so and the first section entry for this PMD is free | ||
828 | * then we block the corresponding virtual address. | ||
829 | */ | ||
830 | if ((addr & ~PMD_MASK) == SECTION_SIZE) { | ||
831 | pmd = pmd_off_k(addr); | ||
832 | if (pmd_none(*pmd)) | ||
833 | pmd_empty_section_gap(addr & PMD_MASK); | ||
834 | } | ||
835 | |||
836 | /* | ||
837 | * Then check if this vm ends on an odd section boundary. | ||
838 | * If so and the second section entry for this PMD is empty | ||
839 | * then we block the corresponding virtual address. | ||
840 | */ | ||
841 | addr += vm->size; | ||
842 | if ((addr & ~PMD_MASK) == SECTION_SIZE) { | ||
843 | pmd = pmd_off_k(addr) + 1; | ||
844 | if (pmd_none(*pmd)) | ||
845 | pmd_empty_section_gap(addr); | ||
846 | } | ||
847 | |||
848 | /* no need to look at any vm entry until we hit the next PMD */ | ||
849 | next = (addr + PMD_SIZE - 1) & PMD_MASK; | ||
850 | } | ||
851 | } | ||
852 | |||
853 | #else | ||
854 | #define fill_pmd_gaps() do { } while (0) | ||
855 | #endif | ||
856 | |||
857 | #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) | ||
858 | static void __init pci_reserve_io(void) | ||
859 | { | ||
860 | struct vm_struct *vm; | ||
861 | unsigned long addr; | ||
862 | |||
863 | /* we're still single threaded hence no lock needed here */ | ||
864 | for (vm = vmlist; vm; vm = vm->next) { | ||
865 | if (!(vm->flags & VM_ARM_STATIC_MAPPING)) | ||
866 | continue; | ||
867 | addr = (unsigned long)vm->addr; | ||
868 | addr &= ~(SZ_2M - 1); | ||
869 | if (addr == PCI_IO_VIRT_BASE) | ||
870 | return; | ||
871 | |||
872 | } | ||
873 | vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); | ||
874 | } | ||
875 | #else | ||
876 | #define pci_reserve_io() do { } while (0) | ||
877 | #endif | ||
878 | |||
879 | #ifdef CONFIG_DEBUG_LL | ||
880 | void __init debug_ll_io_init(void) | ||
881 | { | ||
882 | struct map_desc map; | ||
883 | 735 | ||
884 | debug_ll_addr(&map.pfn, &map.virtual); | 736 | for (i = 0; i < nr; i++) |
885 | if (!map.pfn || !map.virtual) | 737 | create_mapping(io_desc + i); |
886 | return; | ||
887 | map.pfn = __phys_to_pfn(map.pfn); | ||
888 | map.virtual &= PAGE_MASK; | ||
889 | map.length = PAGE_SIZE; | ||
890 | map.type = MT_DEVICE; | ||
891 | create_mapping(&map); | ||
892 | } | 738 | } |
893 | #endif | ||
894 | 739 | ||
895 | static void * __initdata vmalloc_min = | 740 | static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); |
896 | (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); | ||
897 | 741 | ||
898 | /* | 742 | /* |
899 | * vmalloc=size forces the vmalloc area to be exactly 'size' | 743 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
900 | * bytes. This can be used to increase (or decrease) the vmalloc | 744 | * bytes. This can be used to increase (or decrease) the vmalloc |
901 | * area - the default is 240m. | 745 | * area - the default is 128m. |
902 | */ | 746 | */ |
903 | static int __init early_vmalloc(char *arg) | 747 | static int __init early_vmalloc(char *arg) |
904 | { | 748 | { |
@@ -923,7 +767,7 @@ static int __init early_vmalloc(char *arg) | |||
923 | } | 767 | } |
924 | early_param("vmalloc", early_vmalloc); | 768 | early_param("vmalloc", early_vmalloc); |
925 | 769 | ||
926 | phys_addr_t arm_lowmem_limit __initdata = 0; | 770 | phys_addr_t lowmem_limit; |
927 | 771 | ||
928 | void __init sanity_check_meminfo(void) | 772 | void __init sanity_check_meminfo(void) |
929 | { | 773 | { |
@@ -933,9 +777,6 @@ void __init sanity_check_meminfo(void) | |||
933 | struct membank *bank = &meminfo.bank[j]; | 777 | struct membank *bank = &meminfo.bank[j]; |
934 | *bank = meminfo.bank[i]; | 778 | *bank = meminfo.bank[i]; |
935 | 779 | ||
936 | if (bank->start > ULONG_MAX) | ||
937 | highmem = 1; | ||
938 | |||
939 | #ifdef CONFIG_HIGHMEM | 780 | #ifdef CONFIG_HIGHMEM |
940 | if (__va(bank->start) >= vmalloc_min || | 781 | if (__va(bank->start) >= vmalloc_min || |
941 | __va(bank->start) < (void *)PAGE_OFFSET) | 782 | __va(bank->start) < (void *)PAGE_OFFSET) |
@@ -947,7 +788,7 @@ void __init sanity_check_meminfo(void) | |||
947 | * Split those memory banks which are partially overlapping | 788 | * Split those memory banks which are partially overlapping |
948 | * the vmalloc area greatly simplifying things later. | 789 | * the vmalloc area greatly simplifying things later. |
949 | */ | 790 | */ |
950 | if (!highmem && __va(bank->start) < vmalloc_min && | 791 | if (__va(bank->start) < vmalloc_min && |
951 | bank->size > vmalloc_min - __va(bank->start)) { | 792 | bank->size > vmalloc_min - __va(bank->start)) { |
952 | if (meminfo.nr_banks >= NR_BANKS) { | 793 | if (meminfo.nr_banks >= NR_BANKS) { |
953 | printk(KERN_CRIT "NR_BANKS too low, " | 794 | printk(KERN_CRIT "NR_BANKS too low, " |
@@ -968,17 +809,6 @@ void __init sanity_check_meminfo(void) | |||
968 | bank->highmem = highmem; | 809 | bank->highmem = highmem; |
969 | 810 | ||
970 | /* | 811 | /* |
971 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | ||
972 | */ | ||
973 | if (highmem) { | ||
974 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
975 | "(!CONFIG_HIGHMEM).\n", | ||
976 | (unsigned long long)bank->start, | ||
977 | (unsigned long long)bank->start + bank->size - 1); | ||
978 | continue; | ||
979 | } | ||
980 | |||
981 | /* | ||
982 | * Check whether this memory bank would entirely overlap | 812 | * Check whether this memory bank would entirely overlap |
983 | * the vmalloc area. | 813 | * the vmalloc area. |
984 | */ | 814 | */ |
@@ -995,8 +825,8 @@ void __init sanity_check_meminfo(void) | |||
995 | * Check whether this memory bank would partially overlap | 825 | * Check whether this memory bank would partially overlap |
996 | * the vmalloc area. | 826 | * the vmalloc area. |
997 | */ | 827 | */ |
998 | if (__va(bank->start + bank->size - 1) >= vmalloc_min || | 828 | if (__va(bank->start + bank->size) > vmalloc_min || |
999 | __va(bank->start + bank->size - 1) <= __va(bank->start)) { | 829 | __va(bank->start + bank->size) < __va(bank->start)) { |
1000 | unsigned long newsize = vmalloc_min - __va(bank->start); | 830 | unsigned long newsize = vmalloc_min - __va(bank->start); |
1001 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 831 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
1002 | "to -%.8llx (vmalloc region overlap).\n", | 832 | "to -%.8llx (vmalloc region overlap).\n", |
@@ -1006,8 +836,8 @@ void __init sanity_check_meminfo(void) | |||
1006 | bank->size = newsize; | 836 | bank->size = newsize; |
1007 | } | 837 | } |
1008 | #endif | 838 | #endif |
1009 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 839 | if (!bank->highmem && bank->start + bank->size > lowmem_limit) |
1010 | arm_lowmem_limit = bank->start + bank->size; | 840 | lowmem_limit = bank->start + bank->size; |
1011 | 841 | ||
1012 | j++; | 842 | j++; |
1013 | } | 843 | } |
@@ -1032,8 +862,7 @@ void __init sanity_check_meminfo(void) | |||
1032 | } | 862 | } |
1033 | #endif | 863 | #endif |
1034 | meminfo.nr_banks = j; | 864 | meminfo.nr_banks = j; |
1035 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 865 | memblock_set_current_limit(lowmem_limit); |
1036 | memblock_set_current_limit(arm_lowmem_limit); | ||
1037 | } | 866 | } |
1038 | 867 | ||
1039 | static inline void prepare_page_table(void) | 868 | static inline void prepare_page_table(void) |
@@ -1044,40 +873,32 @@ static inline void prepare_page_table(void) | |||
1044 | /* | 873 | /* |
1045 | * Clear out all the mappings below the kernel image. | 874 | * Clear out all the mappings below the kernel image. |
1046 | */ | 875 | */ |
1047 | for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE) | 876 | for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) |
1048 | pmd_clear(pmd_off_k(addr)); | 877 | pmd_clear(pmd_off_k(addr)); |
1049 | 878 | ||
1050 | #ifdef CONFIG_XIP_KERNEL | 879 | #ifdef CONFIG_XIP_KERNEL |
1051 | /* The XIP kernel is mapped in the module area -- skip over it */ | 880 | /* The XIP kernel is mapped in the module area -- skip over it */ |
1052 | addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK; | 881 | addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; |
1053 | #endif | 882 | #endif |
1054 | for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE) | 883 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) |
1055 | pmd_clear(pmd_off_k(addr)); | 884 | pmd_clear(pmd_off_k(addr)); |
1056 | 885 | ||
1057 | /* | 886 | /* |
1058 | * Find the end of the first block of lowmem. | 887 | * Find the end of the first block of lowmem. |
1059 | */ | 888 | */ |
1060 | end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; | 889 | end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; |
1061 | if (end >= arm_lowmem_limit) | 890 | if (end >= lowmem_limit) |
1062 | end = arm_lowmem_limit; | 891 | end = lowmem_limit; |
1063 | 892 | ||
1064 | /* | 893 | /* |
1065 | * Clear out all the kernel space mappings, except for the first | 894 | * Clear out all the kernel space mappings, except for the first |
1066 | * memory bank, up to the vmalloc region. | 895 | * memory bank, up to the end of the vmalloc region. |
1067 | */ | 896 | */ |
1068 | for (addr = __phys_to_virt(end); | 897 | for (addr = __phys_to_virt(end); |
1069 | addr < VMALLOC_START; addr += PMD_SIZE) | 898 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
1070 | pmd_clear(pmd_off_k(addr)); | 899 | pmd_clear(pmd_off_k(addr)); |
1071 | } | 900 | } |
1072 | 901 | ||
1073 | #ifdef CONFIG_ARM_LPAE | ||
1074 | /* the first page is reserved for pgd */ | ||
1075 | #define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \ | ||
1076 | PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t)) | ||
1077 | #else | ||
1078 | #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) | ||
1079 | #endif | ||
1080 | |||
1081 | /* | 902 | /* |
1082 | * Reserve the special regions of memory | 903 | * Reserve the special regions of memory |
1083 | */ | 904 | */ |
@@ -1087,7 +908,7 @@ void __init arm_mm_memblock_reserve(void) | |||
1087 | * Reserve the page tables. These are already in use, | 908 | * Reserve the page tables. These are already in use, |
1088 | * and can only be in node 0. | 909 | * and can only be in node 0. |
1089 | */ | 910 | */ |
1090 | memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE); | 911 | memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); |
1091 | 912 | ||
1092 | #ifdef CONFIG_SA1111 | 913 | #ifdef CONFIG_SA1111 |
1093 | /* | 914 | /* |
@@ -1099,8 +920,8 @@ void __init arm_mm_memblock_reserve(void) | |||
1099 | } | 920 | } |
1100 | 921 | ||
1101 | /* | 922 | /* |
1102 | * Set up the device mappings. Since we clear out the page tables for all | 923 | * Set up device the mappings. Since we clear out the page tables for all |
1103 | * mappings above VMALLOC_START, we will remove any debug device mappings. | 924 | * mappings above VMALLOC_END, we will remove any debug device mappings. |
1104 | * This means you have to be careful how you debug this function, or any | 925 | * This means you have to be careful how you debug this function, or any |
1105 | * called function. This means you can't use any function or debugging | 926 | * called function. This means you can't use any function or debugging |
1106 | * method which may touch any device, otherwise the kernel _will_ crash. | 927 | * method which may touch any device, otherwise the kernel _will_ crash. |
@@ -1109,16 +930,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1109 | { | 930 | { |
1110 | struct map_desc map; | 931 | struct map_desc map; |
1111 | unsigned long addr; | 932 | unsigned long addr; |
1112 | void *vectors; | ||
1113 | 933 | ||
1114 | /* | 934 | /* |
1115 | * Allocate the vector page early. | 935 | * Allocate the vector page early. |
1116 | */ | 936 | */ |
1117 | vectors = early_alloc(PAGE_SIZE); | 937 | vectors_page = early_alloc(PAGE_SIZE); |
1118 | |||
1119 | early_trap_init(vectors); | ||
1120 | 938 | ||
1121 | for (addr = VMALLOC_START; addr; addr += PMD_SIZE) | 939 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
1122 | pmd_clear(pmd_off_k(addr)); | 940 | pmd_clear(pmd_off_k(addr)); |
1123 | 941 | ||
1124 | /* | 942 | /* |
@@ -1156,7 +974,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1156 | * location (0xffff0000). If we aren't using high-vectors, also | 974 | * location (0xffff0000). If we aren't using high-vectors, also |
1157 | * create a mapping at the low-vectors virtual address. | 975 | * create a mapping at the low-vectors virtual address. |
1158 | */ | 976 | */ |
1159 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 977 | map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); |
1160 | map.virtual = 0xffff0000; | 978 | map.virtual = 0xffff0000; |
1161 | map.length = PAGE_SIZE; | 979 | map.length = PAGE_SIZE; |
1162 | map.type = MT_HIGH_VECTORS; | 980 | map.type = MT_HIGH_VECTORS; |
@@ -1173,10 +991,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1173 | */ | 991 | */ |
1174 | if (mdesc->map_io) | 992 | if (mdesc->map_io) |
1175 | mdesc->map_io(); | 993 | mdesc->map_io(); |
1176 | fill_pmd_gaps(); | ||
1177 | |||
1178 | /* Reserve fixed i/o space in VMALLOC region */ | ||
1179 | pci_reserve_io(); | ||
1180 | 994 | ||
1181 | /* | 995 | /* |
1182 | * Finally flush the caches and tlb to ensure that we're in a | 996 | * Finally flush the caches and tlb to ensure that we're in a |
@@ -1206,8 +1020,8 @@ static void __init map_lowmem(void) | |||
1206 | phys_addr_t end = start + reg->size; | 1020 | phys_addr_t end = start + reg->size; |
1207 | struct map_desc map; | 1021 | struct map_desc map; |
1208 | 1022 | ||
1209 | if (end > arm_lowmem_limit) | 1023 | if (end > lowmem_limit) |
1210 | end = arm_lowmem_limit; | 1024 | end = lowmem_limit; |
1211 | if (start >= end) | 1025 | if (start >= end) |
1212 | break; | 1026 | break; |
1213 | 1027 | ||
@@ -1228,12 +1042,11 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1228 | { | 1042 | { |
1229 | void *zero_page; | 1043 | void *zero_page; |
1230 | 1044 | ||
1231 | memblock_set_current_limit(arm_lowmem_limit); | 1045 | memblock_set_current_limit(lowmem_limit); |
1232 | 1046 | ||
1233 | build_mem_type_table(); | 1047 | build_mem_type_table(); |
1234 | prepare_page_table(); | 1048 | prepare_page_table(); |
1235 | map_lowmem(); | 1049 | map_lowmem(); |
1236 | dma_contiguous_remap(); | ||
1237 | devicemaps_init(mdesc); | 1050 | devicemaps_init(mdesc); |
1238 | kmap_init(); | 1051 | kmap_init(); |
1239 | 1052 | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index d51225f90ae..941a98c9e8a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/setup.h> | 15 | #include <asm/setup.h> |
16 | #include <asm/traps.h> | ||
17 | #include <asm/mach/arch.h> | 16 | #include <asm/mach/arch.h> |
18 | 17 | ||
19 | #include "mm.h" | 18 | #include "mm.h" |
@@ -30,8 +29,6 @@ void __init arm_mm_memblock_reserve(void) | |||
30 | 29 | ||
31 | void __init sanity_check_meminfo(void) | 30 | void __init sanity_check_meminfo(void) |
32 | { | 31 | { |
33 | phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | ||
34 | high_memory = __va(end - 1) + 1; | ||
35 | } | 32 | } |
36 | 33 | ||
37 | /* | 34 | /* |
@@ -40,14 +37,13 @@ void __init sanity_check_meminfo(void) | |||
40 | */ | 37 | */ |
41 | void __init paging_init(struct machine_desc *mdesc) | 38 | void __init paging_init(struct machine_desc *mdesc) |
42 | { | 39 | { |
43 | early_trap_init((void *)CONFIG_VECTORS_BASE); | ||
44 | bootmem_init(); | 40 | bootmem_init(); |
45 | } | 41 | } |
46 | 42 | ||
47 | /* | 43 | /* |
48 | * We don't need to do anything here for nommu machines. | 44 | * We don't need to do anything here for nommu machines. |
49 | */ | 45 | */ |
50 | void setup_mm_for_reboot(void) | 46 | void setup_mm_for_reboot(char mode) |
51 | { | 47 | { |
52 | } | 48 | } |
53 | 49 | ||
@@ -88,17 +84,13 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | |||
88 | } | 84 | } |
89 | EXPORT_SYMBOL(__arm_ioremap); | 85 | EXPORT_SYMBOL(__arm_ioremap); |
90 | 86 | ||
91 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); | ||
92 | |||
93 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 87 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
94 | unsigned int mtype, void *caller) | 88 | unsigned int mtype, void *caller) |
95 | { | 89 | { |
96 | return __arm_ioremap(phys_addr, size, mtype); | 90 | return __arm_ioremap(phys_addr, size, mtype); |
97 | } | 91 | } |
98 | 92 | ||
99 | void (*arch_iounmap)(volatile void __iomem *); | 93 | void __iounmap(volatile void __iomem *addr) |
100 | |||
101 | void __arm_iounmap(volatile void __iomem *addr) | ||
102 | { | 94 | { |
103 | } | 95 | } |
104 | EXPORT_SYMBOL(__arm_iounmap); | 96 | EXPORT_SYMBOL(__iounmap); |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 0acb089d0f7..3e9503bb7bf 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -10,22 +10,29 @@ | |||
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/slab.h> | ||
14 | 13 | ||
15 | #include <asm/cp15.h> | ||
16 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
17 | #include <asm/page.h> | 15 | #include <asm/page.h> |
18 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
19 | 17 | ||
20 | #include "mm.h" | 18 | #include "mm.h" |
21 | 19 | ||
22 | #ifdef CONFIG_ARM_LPAE | 20 | DEFINE_SPINLOCK(pgd_lock); |
23 | #define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) | 21 | LIST_HEAD(pgd_list); |
24 | #define __pgd_free(pgd) kfree(pgd) | 22 | |
25 | #else | 23 | static inline void pgd_list_add(pgd_t *pgd) |
26 | #define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) | 24 | { |
27 | #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) | 25 | struct page *page = virt_to_page(pgd); |
28 | #endif | 26 | |
27 | list_add(&page->lru, &pgd_list); | ||
28 | } | ||
29 | |||
30 | static inline void pgd_list_del(pgd_t *pgd) | ||
31 | { | ||
32 | struct page *page = virt_to_page(pgd); | ||
33 | |||
34 | list_del(&page->lru); | ||
35 | } | ||
29 | 36 | ||
30 | /* | 37 | /* |
31 | * need to get a 16k page for level 1 | 38 | * need to get a 16k page for level 1 |
@@ -36,13 +43,15 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
36 | pud_t *new_pud, *init_pud; | 43 | pud_t *new_pud, *init_pud; |
37 | pmd_t *new_pmd, *init_pmd; | 44 | pmd_t *new_pmd, *init_pmd; |
38 | pte_t *new_pte, *init_pte; | 45 | pte_t *new_pte, *init_pte; |
46 | unsigned long flags; | ||
39 | 47 | ||
40 | new_pgd = __pgd_alloc(); | 48 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); |
41 | if (!new_pgd) | 49 | if (!new_pgd) |
42 | goto no_pgd; | 50 | goto no_pgd; |
43 | 51 | ||
44 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | 52 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
45 | 53 | ||
54 | spin_lock_irqsave(&pgd_lock, flags); | ||
46 | /* | 55 | /* |
47 | * Copy over the kernel and IO PGD entries | 56 | * Copy over the kernel and IO PGD entries |
48 | */ | 57 | */ |
@@ -50,27 +59,17 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
50 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, | 59 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, |
51 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 60 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
52 | 61 | ||
62 | #if !defined(CONFIG_CPU_CACHE_V7) || !defined(CONFIG_SMP) | ||
53 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 63 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
54 | |||
55 | #ifdef CONFIG_ARM_LPAE | ||
56 | /* | ||
57 | * Allocate PMD table for modules and pkmap mappings. | ||
58 | */ | ||
59 | new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), | ||
60 | MODULES_VADDR); | ||
61 | if (!new_pud) | ||
62 | goto no_pud; | ||
63 | |||
64 | new_pmd = pmd_alloc(mm, new_pud, 0); | ||
65 | if (!new_pmd) | ||
66 | goto no_pmd; | ||
67 | #endif | 64 | #endif |
68 | 65 | ||
66 | pgd_list_add(new_pgd); | ||
67 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
68 | |||
69 | if (!vectors_high()) { | 69 | if (!vectors_high()) { |
70 | /* | 70 | /* |
71 | * On ARM, first page must always be allocated since it | 71 | * On ARM, first page must always be allocated since it |
72 | * contains the machine vectors. The vectors are always high | 72 | * contains the machine vectors. |
73 | * with LPAE. | ||
74 | */ | 73 | */ |
75 | new_pud = pud_alloc(mm, new_pgd, 0); | 74 | new_pud = pud_alloc(mm, new_pgd, 0); |
76 | if (!new_pud) | 75 | if (!new_pud) |
@@ -99,7 +98,10 @@ no_pte: | |||
99 | no_pmd: | 98 | no_pmd: |
100 | pud_free(mm, new_pud); | 99 | pud_free(mm, new_pud); |
101 | no_pud: | 100 | no_pud: |
102 | __pgd_free(new_pgd); | 101 | spin_lock_irqsave(&pgd_lock, flags); |
102 | pgd_list_del(new_pgd); | ||
103 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
104 | free_pages((unsigned long)new_pgd, 2); | ||
103 | no_pgd: | 105 | no_pgd: |
104 | return NULL; | 106 | return NULL; |
105 | } | 107 | } |
@@ -110,10 +112,15 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) | |||
110 | pud_t *pud; | 112 | pud_t *pud; |
111 | pmd_t *pmd; | 113 | pmd_t *pmd; |
112 | pgtable_t pte; | 114 | pgtable_t pte; |
115 | unsigned long flags; | ||
113 | 116 | ||
114 | if (!pgd_base) | 117 | if (!pgd_base) |
115 | return; | 118 | return; |
116 | 119 | ||
120 | spin_lock_irqsave(&pgd_lock, flags); | ||
121 | pgd_list_del(pgd_base); | ||
122 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
123 | |||
117 | pgd = pgd_base + pgd_index(0); | 124 | pgd = pgd_base + pgd_index(0); |
118 | if (pgd_none_or_clear_bad(pgd)) | 125 | if (pgd_none_or_clear_bad(pgd)) |
119 | goto no_pgd; | 126 | goto no_pgd; |
@@ -136,24 +143,5 @@ no_pud: | |||
136 | pgd_clear(pgd); | 143 | pgd_clear(pgd); |
137 | pud_free(mm, pud); | 144 | pud_free(mm, pud); |
138 | no_pgd: | 145 | no_pgd: |
139 | #ifdef CONFIG_ARM_LPAE | 146 | free_pages((unsigned long) pgd_base, 2); |
140 | /* | ||
141 | * Free modules/pkmap or identity pmd tables. | ||
142 | */ | ||
143 | for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { | ||
144 | if (pgd_none_or_clear_bad(pgd)) | ||
145 | continue; | ||
146 | if (pgd_val(*pgd) & L_PGD_SWAPPER) | ||
147 | continue; | ||
148 | pud = pud_offset(pgd, 0); | ||
149 | if (pud_none_or_clear_bad(pud)) | ||
150 | continue; | ||
151 | pmd = pmd_offset(pud, 0); | ||
152 | pud_clear(pud); | ||
153 | pmd_free(mm, pmd); | ||
154 | pgd_clear(pgd); | ||
155 | pud_free(mm, pud); | ||
156 | } | ||
157 | #endif | ||
158 | __pgd_free(pgd_base); | ||
159 | } | 147 | } |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 2bb61e703d6..67469665d47 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -95,7 +95,6 @@ ENTRY(cpu_arm1020_proc_fin) | |||
95 | * loc: location to jump to for soft reset | 95 | * loc: location to jump to for soft reset |
96 | */ | 96 | */ |
97 | .align 5 | 97 | .align 5 |
98 | .pushsection .idmap.text, "ax" | ||
99 | ENTRY(cpu_arm1020_reset) | 98 | ENTRY(cpu_arm1020_reset) |
100 | mov ip, #0 | 99 | mov ip, #0 |
101 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 100 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -108,8 +107,6 @@ ENTRY(cpu_arm1020_reset) | |||
108 | bic ip, ip, #0x1100 @ ...i...s........ | 107 | bic ip, ip, #0x1100 @ ...i...s........ |
109 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 108 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
110 | mov pc, r0 | 109 | mov pc, r0 |
111 | ENDPROC(cpu_arm1020_reset) | ||
112 | .popsection | ||
113 | 110 | ||
114 | /* | 111 | /* |
115 | * cpu_arm1020_do_idle() | 112 | * cpu_arm1020_do_idle() |
@@ -241,7 +238,6 @@ ENTRY(arm1020_coherent_user_range) | |||
241 | cmp r0, r1 | 238 | cmp r0, r1 |
242 | blo 1b | 239 | blo 1b |
243 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 240 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
244 | mov r0, #0 | ||
245 | mov pc, lr | 241 | mov pc, lr |
246 | 242 | ||
247 | /* | 243 | /* |
@@ -368,9 +364,6 @@ ENTRY(arm1020_dma_unmap_area) | |||
368 | mov pc, lr | 364 | mov pc, lr |
369 | ENDPROC(arm1020_dma_unmap_area) | 365 | ENDPROC(arm1020_dma_unmap_area) |
370 | 366 | ||
371 | .globl arm1020_flush_kern_cache_louis | ||
372 | .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all | ||
373 | |||
374 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 367 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
375 | define_cache_functions arm1020 | 368 | define_cache_functions arm1020 |
376 | 369 | ||
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 8f96aa40f51..4251421c0ed 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -95,7 +95,6 @@ ENTRY(cpu_arm1020e_proc_fin) | |||
95 | * loc: location to jump to for soft reset | 95 | * loc: location to jump to for soft reset |
96 | */ | 96 | */ |
97 | .align 5 | 97 | .align 5 |
98 | .pushsection .idmap.text, "ax" | ||
99 | ENTRY(cpu_arm1020e_reset) | 98 | ENTRY(cpu_arm1020e_reset) |
100 | mov ip, #0 | 99 | mov ip, #0 |
101 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 100 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -108,8 +107,6 @@ ENTRY(cpu_arm1020e_reset) | |||
108 | bic ip, ip, #0x1100 @ ...i...s........ | 107 | bic ip, ip, #0x1100 @ ...i...s........ |
109 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 108 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
110 | mov pc, r0 | 109 | mov pc, r0 |
111 | ENDPROC(cpu_arm1020e_reset) | ||
112 | .popsection | ||
113 | 110 | ||
114 | /* | 111 | /* |
115 | * cpu_arm1020e_do_idle() | 112 | * cpu_arm1020e_do_idle() |
@@ -235,7 +232,6 @@ ENTRY(arm1020e_coherent_user_range) | |||
235 | cmp r0, r1 | 232 | cmp r0, r1 |
236 | blo 1b | 233 | blo 1b |
237 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 234 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
238 | mov r0, #0 | ||
239 | mov pc, lr | 235 | mov pc, lr |
240 | 236 | ||
241 | /* | 237 | /* |
@@ -354,9 +350,6 @@ ENTRY(arm1020e_dma_unmap_area) | |||
354 | mov pc, lr | 350 | mov pc, lr |
355 | ENDPROC(arm1020e_dma_unmap_area) | 351 | ENDPROC(arm1020e_dma_unmap_area) |
356 | 352 | ||
357 | .globl arm1020e_flush_kern_cache_louis | ||
358 | .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all | ||
359 | |||
360 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 353 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
361 | define_cache_functions arm1020e | 354 | define_cache_functions arm1020e |
362 | 355 | ||
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 8ebe4a469a2..d283cf3d06e 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -84,7 +84,6 @@ ENTRY(cpu_arm1022_proc_fin) | |||
84 | * loc: location to jump to for soft reset | 84 | * loc: location to jump to for soft reset |
85 | */ | 85 | */ |
86 | .align 5 | 86 | .align 5 |
87 | .pushsection .idmap.text, "ax" | ||
88 | ENTRY(cpu_arm1022_reset) | 87 | ENTRY(cpu_arm1022_reset) |
89 | mov ip, #0 | 88 | mov ip, #0 |
90 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 89 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -97,8 +96,6 @@ ENTRY(cpu_arm1022_reset) | |||
97 | bic ip, ip, #0x1100 @ ...i...s........ | 96 | bic ip, ip, #0x1100 @ ...i...s........ |
98 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 97 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
99 | mov pc, r0 | 98 | mov pc, r0 |
100 | ENDPROC(cpu_arm1022_reset) | ||
101 | .popsection | ||
102 | 99 | ||
103 | /* | 100 | /* |
104 | * cpu_arm1022_do_idle() | 101 | * cpu_arm1022_do_idle() |
@@ -224,7 +221,6 @@ ENTRY(arm1022_coherent_user_range) | |||
224 | cmp r0, r1 | 221 | cmp r0, r1 |
225 | blo 1b | 222 | blo 1b |
226 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 223 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
227 | mov r0, #0 | ||
228 | mov pc, lr | 224 | mov pc, lr |
229 | 225 | ||
230 | /* | 226 | /* |
@@ -343,9 +339,6 @@ ENTRY(arm1022_dma_unmap_area) | |||
343 | mov pc, lr | 339 | mov pc, lr |
344 | ENDPROC(arm1022_dma_unmap_area) | 340 | ENDPROC(arm1022_dma_unmap_area) |
345 | 341 | ||
346 | .globl arm1022_flush_kern_cache_louis | ||
347 | .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all | ||
348 | |||
349 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 342 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
350 | define_cache_functions arm1022 | 343 | define_cache_functions arm1022 |
351 | 344 | ||
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 093fc7e520c..678a1ceafed 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -84,7 +84,6 @@ ENTRY(cpu_arm1026_proc_fin) | |||
84 | * loc: location to jump to for soft reset | 84 | * loc: location to jump to for soft reset |
85 | */ | 85 | */ |
86 | .align 5 | 86 | .align 5 |
87 | .pushsection .idmap.text, "ax" | ||
88 | ENTRY(cpu_arm1026_reset) | 87 | ENTRY(cpu_arm1026_reset) |
89 | mov ip, #0 | 88 | mov ip, #0 |
90 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 89 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -97,8 +96,6 @@ ENTRY(cpu_arm1026_reset) | |||
97 | bic ip, ip, #0x1100 @ ...i...s........ | 96 | bic ip, ip, #0x1100 @ ...i...s........ |
98 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 97 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
99 | mov pc, r0 | 98 | mov pc, r0 |
100 | ENDPROC(cpu_arm1026_reset) | ||
101 | .popsection | ||
102 | 99 | ||
103 | /* | 100 | /* |
104 | * cpu_arm1026_do_idle() | 101 | * cpu_arm1026_do_idle() |
@@ -218,7 +215,6 @@ ENTRY(arm1026_coherent_user_range) | |||
218 | cmp r0, r1 | 215 | cmp r0, r1 |
219 | blo 1b | 216 | blo 1b |
220 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 217 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
221 | mov r0, #0 | ||
222 | mov pc, lr | 218 | mov pc, lr |
223 | 219 | ||
224 | /* | 220 | /* |
@@ -337,9 +333,6 @@ ENTRY(arm1026_dma_unmap_area) | |||
337 | mov pc, lr | 333 | mov pc, lr |
338 | ENDPROC(arm1026_dma_unmap_area) | 334 | ENDPROC(arm1026_dma_unmap_area) |
339 | 335 | ||
340 | .globl arm1026_flush_kern_cache_louis | ||
341 | .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all | ||
342 | |||
343 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 336 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
344 | define_cache_functions arm1026 | 337 | define_cache_functions arm1026 |
345 | 338 | ||
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0ac908c7ade..55f4e290665 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
@@ -101,7 +101,6 @@ ENTRY(cpu_arm720_set_pte_ext) | |||
101 | * Params : r0 = address to jump to | 101 | * Params : r0 = address to jump to |
102 | * Notes : This sets up everything for a reset | 102 | * Notes : This sets up everything for a reset |
103 | */ | 103 | */ |
104 | .pushsection .idmap.text, "ax" | ||
105 | ENTRY(cpu_arm720_reset) | 104 | ENTRY(cpu_arm720_reset) |
106 | mov ip, #0 | 105 | mov ip, #0 |
107 | mcr p15, 0, ip, c7, c7, 0 @ invalidate cache | 106 | mcr p15, 0, ip, c7, c7, 0 @ invalidate cache |
@@ -113,8 +112,6 @@ ENTRY(cpu_arm720_reset) | |||
113 | bic ip, ip, #0x2100 @ ..v....s........ | 112 | bic ip, ip, #0x2100 @ ..v....s........ |
114 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 113 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
115 | mov pc, r0 | 114 | mov pc, r0 |
116 | ENDPROC(cpu_arm720_reset) | ||
117 | .popsection | ||
118 | 115 | ||
119 | __CPUINIT | 116 | __CPUINIT |
120 | 117 | ||
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index dc5de5d53f2..4506be3adda 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -49,7 +49,6 @@ ENTRY(cpu_arm740_proc_fin) | |||
49 | * Params : r0 = address to jump to | 49 | * Params : r0 = address to jump to |
50 | * Notes : This sets up everything for a reset | 50 | * Notes : This sets up everything for a reset |
51 | */ | 51 | */ |
52 | .pushsection .idmap.text, "ax" | ||
53 | ENTRY(cpu_arm740_reset) | 52 | ENTRY(cpu_arm740_reset) |
54 | mov ip, #0 | 53 | mov ip, #0 |
55 | mcr p15, 0, ip, c7, c0, 0 @ invalidate cache | 54 | mcr p15, 0, ip, c7, c0, 0 @ invalidate cache |
@@ -57,8 +56,6 @@ ENTRY(cpu_arm740_reset) | |||
57 | bic ip, ip, #0x0000000c @ ............wc.. | 56 | bic ip, ip, #0x0000000c @ ............wc.. |
58 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 57 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
59 | mov pc, r0 | 58 | mov pc, r0 |
60 | ENDPROC(cpu_arm740_reset) | ||
61 | .popsection | ||
62 | 59 | ||
63 | __CPUINIT | 60 | __CPUINIT |
64 | 61 | ||
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 6ddea3e464b..7e0e1fe4ed4 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
@@ -45,11 +45,8 @@ ENTRY(cpu_arm7tdmi_proc_fin) | |||
45 | * Params : loc(r0) address to jump to | 45 | * Params : loc(r0) address to jump to |
46 | * Purpose : Sets up everything for a reset and jump to the location for soft reset. | 46 | * Purpose : Sets up everything for a reset and jump to the location for soft reset. |
47 | */ | 47 | */ |
48 | .pushsection .idmap.text, "ax" | ||
49 | ENTRY(cpu_arm7tdmi_reset) | 48 | ENTRY(cpu_arm7tdmi_reset) |
50 | mov pc, r0 | 49 | mov pc, r0 |
51 | ENDPROC(cpu_arm7tdmi_reset) | ||
52 | .popsection | ||
53 | 50 | ||
54 | __CPUINIT | 51 | __CPUINIT |
55 | 52 | ||
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2c3b9421ab5..2e6849b41f6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -85,7 +85,6 @@ ENTRY(cpu_arm920_proc_fin) | |||
85 | * loc: location to jump to for soft reset | 85 | * loc: location to jump to for soft reset |
86 | */ | 86 | */ |
87 | .align 5 | 87 | .align 5 |
88 | .pushsection .idmap.text, "ax" | ||
89 | ENTRY(cpu_arm920_reset) | 88 | ENTRY(cpu_arm920_reset) |
90 | mov ip, #0 | 89 | mov ip, #0 |
91 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 90 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -98,8 +97,6 @@ ENTRY(cpu_arm920_reset) | |||
98 | bic ip, ip, #0x1100 @ ...i...s........ | 97 | bic ip, ip, #0x1100 @ ...i...s........ |
99 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 98 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
100 | mov pc, r0 | 99 | mov pc, r0 |
101 | ENDPROC(cpu_arm920_reset) | ||
102 | .popsection | ||
103 | 100 | ||
104 | /* | 101 | /* |
105 | * cpu_arm920_do_idle() | 102 | * cpu_arm920_do_idle() |
@@ -210,7 +207,6 @@ ENTRY(arm920_coherent_user_range) | |||
210 | cmp r0, r1 | 207 | cmp r0, r1 |
211 | blo 1b | 208 | blo 1b |
212 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 209 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
213 | mov r0, #0 | ||
214 | mov pc, lr | 210 | mov pc, lr |
215 | 211 | ||
216 | /* | 212 | /* |
@@ -319,9 +315,6 @@ ENTRY(arm920_dma_unmap_area) | |||
319 | mov pc, lr | 315 | mov pc, lr |
320 | ENDPROC(arm920_dma_unmap_area) | 316 | ENDPROC(arm920_dma_unmap_area) |
321 | 317 | ||
322 | .globl arm920_flush_kern_cache_louis | ||
323 | .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all | ||
324 | |||
325 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 318 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
326 | define_cache_functions arm920 | 319 | define_cache_functions arm920 |
327 | #endif | 320 | #endif |
@@ -386,26 +379,31 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
386 | 379 | ||
387 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 380 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
388 | .globl cpu_arm920_suspend_size | 381 | .globl cpu_arm920_suspend_size |
389 | .equ cpu_arm920_suspend_size, 4 * 3 | 382 | .equ cpu_arm920_suspend_size, 4 * 4 |
390 | #ifdef CONFIG_PM_SLEEP | 383 | #ifdef CONFIG_PM_SLEEP |
391 | ENTRY(cpu_arm920_do_suspend) | 384 | ENTRY(cpu_arm920_do_suspend) |
392 | stmfd sp!, {r4 - r6, lr} | 385 | stmfd sp!, {r4 - r7, lr} |
393 | mrc p15, 0, r4, c13, c0, 0 @ PID | 386 | mrc p15, 0, r4, c13, c0, 0 @ PID |
394 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | 387 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID |
395 | mrc p15, 0, r6, c1, c0, 0 @ Control register | 388 | mrc p15, 0, r6, c2, c0, 0 @ TTB address |
396 | stmia r0, {r4 - r6} | 389 | mrc p15, 0, r7, c1, c0, 0 @ Control register |
397 | ldmfd sp!, {r4 - r6, pc} | 390 | stmia r0, {r4 - r7} |
391 | ldmfd sp!, {r4 - r7, pc} | ||
398 | ENDPROC(cpu_arm920_do_suspend) | 392 | ENDPROC(cpu_arm920_do_suspend) |
399 | 393 | ||
400 | ENTRY(cpu_arm920_do_resume) | 394 | ENTRY(cpu_arm920_do_resume) |
401 | mov ip, #0 | 395 | mov ip, #0 |
402 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs | 396 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs |
403 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches | 397 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches |
404 | ldmia r0, {r4 - r6} | 398 | ldmia r0, {r4 - r7} |
405 | mcr p15, 0, r4, c13, c0, 0 @ PID | 399 | mcr p15, 0, r4, c13, c0, 0 @ PID |
406 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | 400 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID |
407 | mcr p15, 0, r1, c2, c0, 0 @ TTB address | 401 | mcr p15, 0, r6, c2, c0, 0 @ TTB address |
408 | mov r0, r6 @ control register | 402 | mov r0, r7 @ control register |
403 | mov r2, r6, lsr #14 @ get TTB0 base | ||
404 | mov r2, r2, lsl #14 | ||
405 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
406 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | ||
409 | b cpu_resume_mmu | 407 | b cpu_resume_mmu |
410 | ENDPROC(cpu_arm920_do_resume) | 408 | ENDPROC(cpu_arm920_do_resume) |
411 | #endif | 409 | #endif |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 4464c49d744..490e1883385 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -87,7 +87,6 @@ ENTRY(cpu_arm922_proc_fin) | |||
87 | * loc: location to jump to for soft reset | 87 | * loc: location to jump to for soft reset |
88 | */ | 88 | */ |
89 | .align 5 | 89 | .align 5 |
90 | .pushsection .idmap.text, "ax" | ||
91 | ENTRY(cpu_arm922_reset) | 90 | ENTRY(cpu_arm922_reset) |
92 | mov ip, #0 | 91 | mov ip, #0 |
93 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 92 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -100,8 +99,6 @@ ENTRY(cpu_arm922_reset) | |||
100 | bic ip, ip, #0x1100 @ ...i...s........ | 99 | bic ip, ip, #0x1100 @ ...i...s........ |
101 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 100 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
102 | mov pc, r0 | 101 | mov pc, r0 |
103 | ENDPROC(cpu_arm922_reset) | ||
104 | .popsection | ||
105 | 102 | ||
106 | /* | 103 | /* |
107 | * cpu_arm922_do_idle() | 104 | * cpu_arm922_do_idle() |
@@ -212,7 +209,6 @@ ENTRY(arm922_coherent_user_range) | |||
212 | cmp r0, r1 | 209 | cmp r0, r1 |
213 | blo 1b | 210 | blo 1b |
214 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 211 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
215 | mov r0, #0 | ||
216 | mov pc, lr | 212 | mov pc, lr |
217 | 213 | ||
218 | /* | 214 | /* |
@@ -321,9 +317,6 @@ ENTRY(arm922_dma_unmap_area) | |||
321 | mov pc, lr | 317 | mov pc, lr |
322 | ENDPROC(arm922_dma_unmap_area) | 318 | ENDPROC(arm922_dma_unmap_area) |
323 | 319 | ||
324 | .globl arm922_flush_kern_cache_louis | ||
325 | .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all | ||
326 | |||
327 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 320 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
328 | define_cache_functions arm922 | 321 | define_cache_functions arm922 |
329 | #endif | 322 | #endif |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 281eb9b9c1d..51d494be057 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -108,7 +108,6 @@ ENTRY(cpu_arm925_proc_fin) | |||
108 | * loc: location to jump to for soft reset | 108 | * loc: location to jump to for soft reset |
109 | */ | 109 | */ |
110 | .align 5 | 110 | .align 5 |
111 | .pushsection .idmap.text, "ax" | ||
112 | ENTRY(cpu_arm925_reset) | 111 | ENTRY(cpu_arm925_reset) |
113 | /* Send software reset to MPU and DSP */ | 112 | /* Send software reset to MPU and DSP */ |
114 | mov ip, #0xff000000 | 113 | mov ip, #0xff000000 |
@@ -116,8 +115,6 @@ ENTRY(cpu_arm925_reset) | |||
116 | orr ip, ip, #0x0000ce00 | 115 | orr ip, ip, #0x0000ce00 |
117 | mov r4, #1 | 116 | mov r4, #1 |
118 | strh r4, [ip, #0x10] | 117 | strh r4, [ip, #0x10] |
119 | ENDPROC(cpu_arm925_reset) | ||
120 | .popsection | ||
121 | 118 | ||
122 | mov ip, #0 | 119 | mov ip, #0 |
123 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 120 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -258,7 +255,6 @@ ENTRY(arm925_coherent_user_range) | |||
258 | cmp r0, r1 | 255 | cmp r0, r1 |
259 | blo 1b | 256 | blo 1b |
260 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 257 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
261 | mov r0, #0 | ||
262 | mov pc, lr | 258 | mov pc, lr |
263 | 259 | ||
264 | /* | 260 | /* |
@@ -376,9 +372,6 @@ ENTRY(arm925_dma_unmap_area) | |||
376 | mov pc, lr | 372 | mov pc, lr |
377 | ENDPROC(arm925_dma_unmap_area) | 373 | ENDPROC(arm925_dma_unmap_area) |
378 | 374 | ||
379 | .globl arm925_flush_kern_cache_louis | ||
380 | .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all | ||
381 | |||
382 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 375 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
383 | define_cache_functions arm925 | 376 | define_cache_functions arm925 |
384 | 377 | ||
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index f1803f7e297..cd8f79c3a28 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -77,7 +77,6 @@ ENTRY(cpu_arm926_proc_fin) | |||
77 | * loc: location to jump to for soft reset | 77 | * loc: location to jump to for soft reset |
78 | */ | 78 | */ |
79 | .align 5 | 79 | .align 5 |
80 | .pushsection .idmap.text, "ax" | ||
81 | ENTRY(cpu_arm926_reset) | 80 | ENTRY(cpu_arm926_reset) |
82 | mov ip, #0 | 81 | mov ip, #0 |
83 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 82 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -90,8 +89,6 @@ ENTRY(cpu_arm926_reset) | |||
90 | bic ip, ip, #0x1100 @ ...i...s........ | 89 | bic ip, ip, #0x1100 @ ...i...s........ |
91 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 90 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
92 | mov pc, r0 | 91 | mov pc, r0 |
93 | ENDPROC(cpu_arm926_reset) | ||
94 | .popsection | ||
95 | 92 | ||
96 | /* | 93 | /* |
97 | * cpu_arm926_do_idle() | 94 | * cpu_arm926_do_idle() |
@@ -221,7 +218,6 @@ ENTRY(arm926_coherent_user_range) | |||
221 | cmp r0, r1 | 218 | cmp r0, r1 |
222 | blo 1b | 219 | blo 1b |
223 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 220 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
224 | mov r0, #0 | ||
225 | mov pc, lr | 221 | mov pc, lr |
226 | 222 | ||
227 | /* | 223 | /* |
@@ -339,9 +335,6 @@ ENTRY(arm926_dma_unmap_area) | |||
339 | mov pc, lr | 335 | mov pc, lr |
340 | ENDPROC(arm926_dma_unmap_area) | 336 | ENDPROC(arm926_dma_unmap_area) |
341 | 337 | ||
342 | .globl arm926_flush_kern_cache_louis | ||
343 | .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all | ||
344 | |||
345 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 338 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
346 | define_cache_functions arm926 | 339 | define_cache_functions arm926 |
347 | 340 | ||
@@ -401,26 +394,31 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
401 | 394 | ||
402 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 395 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
403 | .globl cpu_arm926_suspend_size | 396 | .globl cpu_arm926_suspend_size |
404 | .equ cpu_arm926_suspend_size, 4 * 3 | 397 | .equ cpu_arm926_suspend_size, 4 * 4 |
405 | #ifdef CONFIG_PM_SLEEP | 398 | #ifdef CONFIG_PM_SLEEP |
406 | ENTRY(cpu_arm926_do_suspend) | 399 | ENTRY(cpu_arm926_do_suspend) |
407 | stmfd sp!, {r4 - r6, lr} | 400 | stmfd sp!, {r4 - r7, lr} |
408 | mrc p15, 0, r4, c13, c0, 0 @ PID | 401 | mrc p15, 0, r4, c13, c0, 0 @ PID |
409 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | 402 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID |
410 | mrc p15, 0, r6, c1, c0, 0 @ Control register | 403 | mrc p15, 0, r6, c2, c0, 0 @ TTB address |
411 | stmia r0, {r4 - r6} | 404 | mrc p15, 0, r7, c1, c0, 0 @ Control register |
412 | ldmfd sp!, {r4 - r6, pc} | 405 | stmia r0, {r4 - r7} |
406 | ldmfd sp!, {r4 - r7, pc} | ||
413 | ENDPROC(cpu_arm926_do_suspend) | 407 | ENDPROC(cpu_arm926_do_suspend) |
414 | 408 | ||
415 | ENTRY(cpu_arm926_do_resume) | 409 | ENTRY(cpu_arm926_do_resume) |
416 | mov ip, #0 | 410 | mov ip, #0 |
417 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs | 411 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs |
418 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches | 412 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches |
419 | ldmia r0, {r4 - r6} | 413 | ldmia r0, {r4 - r7} |
420 | mcr p15, 0, r4, c13, c0, 0 @ PID | 414 | mcr p15, 0, r4, c13, c0, 0 @ PID |
421 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | 415 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID |
422 | mcr p15, 0, r1, c2, c0, 0 @ TTB address | 416 | mcr p15, 0, r6, c2, c0, 0 @ TTB address |
423 | mov r0, r6 @ control register | 417 | mov r0, r7 @ control register |
418 | mov r2, r6, lsr #14 @ get TTB0 base | ||
419 | mov r2, r2, lsl #14 | ||
420 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
421 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | ||
424 | b cpu_resume_mmu | 422 | b cpu_resume_mmu |
425 | ENDPROC(cpu_arm926_do_resume) | 423 | ENDPROC(cpu_arm926_do_resume) |
426 | #endif | 424 | #endif |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8da189d4a40..ac750d50615 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -48,7 +48,6 @@ ENTRY(cpu_arm940_proc_fin) | |||
48 | * Params : r0 = address to jump to | 48 | * Params : r0 = address to jump to |
49 | * Notes : This sets up everything for a reset | 49 | * Notes : This sets up everything for a reset |
50 | */ | 50 | */ |
51 | .pushsection .idmap.text, "ax" | ||
52 | ENTRY(cpu_arm940_reset) | 51 | ENTRY(cpu_arm940_reset) |
53 | mov ip, #0 | 52 | mov ip, #0 |
54 | mcr p15, 0, ip, c7, c5, 0 @ flush I cache | 53 | mcr p15, 0, ip, c7, c5, 0 @ flush I cache |
@@ -59,8 +58,6 @@ ENTRY(cpu_arm940_reset) | |||
59 | bic ip, ip, #0x00001000 @ i-cache | 58 | bic ip, ip, #0x00001000 @ i-cache |
60 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 59 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
61 | mov pc, r0 | 60 | mov pc, r0 |
62 | ENDPROC(cpu_arm940_reset) | ||
63 | .popsection | ||
64 | 61 | ||
65 | /* | 62 | /* |
66 | * cpu_arm940_do_idle() | 63 | * cpu_arm940_do_idle() |
@@ -160,7 +157,7 @@ ENTRY(arm940_coherent_user_range) | |||
160 | * - size - region size | 157 | * - size - region size |
161 | */ | 158 | */ |
162 | ENTRY(arm940_flush_kern_dcache_area) | 159 | ENTRY(arm940_flush_kern_dcache_area) |
163 | mov r0, #0 | 160 | mov ip, #0 |
164 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments | 161 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments |
165 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | 162 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries |
166 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index | 163 | 2: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index |
@@ -168,8 +165,8 @@ ENTRY(arm940_flush_kern_dcache_area) | |||
168 | bcs 2b @ entries 63 to 0 | 165 | bcs 2b @ entries 63 to 0 |
169 | subs r1, r1, #1 << 4 | 166 | subs r1, r1, #1 << 4 |
170 | bcs 1b @ segments 7 to 0 | 167 | bcs 1b @ segments 7 to 0 |
171 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | 168 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
172 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 169 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
173 | mov pc, lr | 170 | mov pc, lr |
174 | 171 | ||
175 | /* | 172 | /* |
@@ -267,9 +264,6 @@ ENTRY(arm940_dma_unmap_area) | |||
267 | mov pc, lr | 264 | mov pc, lr |
268 | ENDPROC(arm940_dma_unmap_area) | 265 | ENDPROC(arm940_dma_unmap_area) |
269 | 266 | ||
270 | .globl arm940_flush_kern_cache_louis | ||
271 | .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all | ||
272 | |||
273 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 267 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
274 | define_cache_functions arm940 | 268 | define_cache_functions arm940 |
275 | 269 | ||
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index f666cf34075..683af3a182b 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -55,7 +55,6 @@ ENTRY(cpu_arm946_proc_fin) | |||
55 | * Params : r0 = address to jump to | 55 | * Params : r0 = address to jump to |
56 | * Notes : This sets up everything for a reset | 56 | * Notes : This sets up everything for a reset |
57 | */ | 57 | */ |
58 | .pushsection .idmap.text, "ax" | ||
59 | ENTRY(cpu_arm946_reset) | 58 | ENTRY(cpu_arm946_reset) |
60 | mov ip, #0 | 59 | mov ip, #0 |
61 | mcr p15, 0, ip, c7, c5, 0 @ flush I cache | 60 | mcr p15, 0, ip, c7, c5, 0 @ flush I cache |
@@ -66,8 +65,6 @@ ENTRY(cpu_arm946_reset) | |||
66 | bic ip, ip, #0x00001000 @ i-cache | 65 | bic ip, ip, #0x00001000 @ i-cache |
67 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 66 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
68 | mov pc, r0 | 67 | mov pc, r0 |
69 | ENDPROC(cpu_arm946_reset) | ||
70 | .popsection | ||
71 | 68 | ||
72 | /* | 69 | /* |
73 | * cpu_arm946_do_idle() | 70 | * cpu_arm946_do_idle() |
@@ -190,7 +187,6 @@ ENTRY(arm946_coherent_user_range) | |||
190 | cmp r0, r1 | 187 | cmp r0, r1 |
191 | blo 1b | 188 | blo 1b |
192 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 189 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
193 | mov r0, #0 | ||
194 | mov pc, lr | 190 | mov pc, lr |
195 | 191 | ||
196 | /* | 192 | /* |
@@ -310,9 +306,6 @@ ENTRY(arm946_dma_unmap_area) | |||
310 | mov pc, lr | 306 | mov pc, lr |
311 | ENDPROC(arm946_dma_unmap_area) | 307 | ENDPROC(arm946_dma_unmap_area) |
312 | 308 | ||
313 | .globl arm946_flush_kern_cache_louis | ||
314 | .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all | ||
315 | |||
316 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 309 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
317 | define_cache_functions arm946 | 310 | define_cache_functions arm946 |
318 | 311 | ||
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 8881391dfb9..2120f9e2af7 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
@@ -45,11 +45,8 @@ ENTRY(cpu_arm9tdmi_proc_fin) | |||
45 | * Params : loc(r0) address to jump to | 45 | * Params : loc(r0) address to jump to |
46 | * Purpose : Sets up everything for a reset and jump to the location for soft reset. | 46 | * Purpose : Sets up everything for a reset and jump to the location for soft reset. |
47 | */ | 47 | */ |
48 | .pushsection .idmap.text, "ax" | ||
49 | ENTRY(cpu_arm9tdmi_reset) | 48 | ENTRY(cpu_arm9tdmi_reset) |
50 | mov pc, r0 | 49 | mov pc, r0 |
51 | ENDPROC(cpu_arm9tdmi_reset) | ||
52 | .popsection | ||
53 | 50 | ||
54 | __CPUINIT | 51 | __CPUINIT |
55 | 52 | ||
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index d217e9795d7..4c7a5710472 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
23 | #include <asm/page.h> | 23 | #include <asm/page.h> |
24 | #include <asm/ptrace.h> | 24 | #include <asm/ptrace.h> |
25 | #include <asm/system.h> | ||
25 | 26 | ||
26 | #include "proc-macros.S" | 27 | #include "proc-macros.S" |
27 | 28 | ||
@@ -56,7 +57,6 @@ ENTRY(cpu_fa526_proc_fin) | |||
56 | * loc: location to jump to for soft reset | 57 | * loc: location to jump to for soft reset |
57 | */ | 58 | */ |
58 | .align 4 | 59 | .align 4 |
59 | .pushsection .idmap.text, "ax" | ||
60 | ENTRY(cpu_fa526_reset) | 60 | ENTRY(cpu_fa526_reset) |
61 | /* TODO: Use CP8 if possible... */ | 61 | /* TODO: Use CP8 if possible... */ |
62 | mov ip, #0 | 62 | mov ip, #0 |
@@ -73,8 +73,6 @@ ENTRY(cpu_fa526_reset) | |||
73 | nop | 73 | nop |
74 | nop | 74 | nop |
75 | mov pc, r0 | 75 | mov pc, r0 |
76 | ENDPROC(cpu_fa526_reset) | ||
77 | .popsection | ||
78 | 76 | ||
79 | /* | 77 | /* |
80 | * cpu_fa526_do_idle() | 78 | * cpu_fa526_do_idle() |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 4106b09e0c2..8a6c2f78c1c 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -98,7 +98,6 @@ ENTRY(cpu_feroceon_proc_fin) | |||
98 | * loc: location to jump to for soft reset | 98 | * loc: location to jump to for soft reset |
99 | */ | 99 | */ |
100 | .align 5 | 100 | .align 5 |
101 | .pushsection .idmap.text, "ax" | ||
102 | ENTRY(cpu_feroceon_reset) | 101 | ENTRY(cpu_feroceon_reset) |
103 | mov ip, #0 | 102 | mov ip, #0 |
104 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 103 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -111,8 +110,6 @@ ENTRY(cpu_feroceon_reset) | |||
111 | bic ip, ip, #0x1100 @ ...i...s........ | 110 | bic ip, ip, #0x1100 @ ...i...s........ |
112 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 111 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
113 | mov pc, r0 | 112 | mov pc, r0 |
114 | ENDPROC(cpu_feroceon_reset) | ||
115 | .popsection | ||
116 | 113 | ||
117 | /* | 114 | /* |
118 | * cpu_feroceon_do_idle() | 115 | * cpu_feroceon_do_idle() |
@@ -232,7 +229,6 @@ ENTRY(feroceon_coherent_user_range) | |||
232 | cmp r0, r1 | 229 | cmp r0, r1 |
233 | blo 1b | 230 | blo 1b |
234 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 231 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
235 | mov r0, #0 | ||
236 | mov pc, lr | 232 | mov pc, lr |
237 | 233 | ||
238 | /* | 234 | /* |
@@ -415,9 +411,6 @@ ENTRY(feroceon_dma_unmap_area) | |||
415 | mov pc, lr | 411 | mov pc, lr |
416 | ENDPROC(feroceon_dma_unmap_area) | 412 | ENDPROC(feroceon_dma_unmap_area) |
417 | 413 | ||
418 | .globl feroceon_flush_kern_cache_louis | ||
419 | .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all | ||
420 | |||
421 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 414 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
422 | define_cache_functions feroceon | 415 | define_cache_functions feroceon |
423 | 416 | ||
@@ -434,7 +427,6 @@ ENDPROC(feroceon_dma_unmap_area) | |||
434 | range_alias flush_icache_all | 427 | range_alias flush_icache_all |
435 | range_alias flush_user_cache_all | 428 | range_alias flush_user_cache_all |
436 | range_alias flush_kern_cache_all | 429 | range_alias flush_kern_cache_all |
437 | range_alias flush_kern_cache_louis | ||
438 | range_alias flush_user_cache_range | 430 | range_alias flush_user_cache_range |
439 | range_alias coherent_kern_range | 431 | range_alias coherent_kern_range |
440 | range_alias coherent_user_range | 432 | range_alias coherent_user_range |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73bc8b..87f8ee2ebf7 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -91,9 +91,8 @@ | |||
91 | #if L_PTE_SHARED != PTE_EXT_SHARED | 91 | #if L_PTE_SHARED != PTE_EXT_SHARED |
92 | #error PTE shared bit mismatch | 92 | #error PTE shared bit mismatch |
93 | #endif | 93 | #endif |
94 | #if !defined (CONFIG_ARM_LPAE) && \ | 94 | #if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ |
95 | (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ | 95 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED |
96 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | ||
97 | #error Invalid Linux PTE bit settings | 96 | #error Invalid Linux PTE bit settings |
98 | #endif | 97 | #endif |
99 | #endif /* CONFIG_MMU */ | 98 | #endif /* CONFIG_MMU */ |
@@ -122,7 +121,7 @@ | |||
122 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 121 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
123 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 122 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
124 | .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 123 | .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
125 | .long 0x00 @ unused | 124 | .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB |
126 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) | 125 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) |
127 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC | 126 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC |
128 | .long 0x00 @ unused | 127 | .long 0x00 @ unused |
@@ -167,10 +166,6 @@ | |||
167 | tst r1, #L_PTE_YOUNG | 166 | tst r1, #L_PTE_YOUNG |
168 | tstne r1, #L_PTE_PRESENT | 167 | tstne r1, #L_PTE_PRESENT |
169 | moveq r3, #0 | 168 | moveq r3, #0 |
170 | #ifndef CONFIG_CPU_USE_DOMAINS | ||
171 | tstne r1, #L_PTE_NONE | ||
172 | movne r3, #0 | ||
173 | #endif | ||
174 | 169 | ||
175 | str r3, [r0] | 170 | str r3, [r0] |
176 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 171 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte |
@@ -303,7 +298,6 @@ ENTRY(\name\()_processor_functions) | |||
303 | ENTRY(\name\()_cache_fns) | 298 | ENTRY(\name\()_cache_fns) |
304 | .long \name\()_flush_icache_all | 299 | .long \name\()_flush_icache_all |
305 | .long \name\()_flush_kern_cache_all | 300 | .long \name\()_flush_kern_cache_all |
306 | .long \name\()_flush_kern_cache_louis | ||
307 | .long \name\()_flush_user_cache_all | 301 | .long \name\()_flush_user_cache_all |
308 | .long \name\()_flush_user_cache_range | 302 | .long \name\()_flush_user_cache_range |
309 | .long \name\()_coherent_kern_range | 303 | .long \name\()_coherent_kern_range |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 82f9cdc751d..db52b0fb14a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -69,7 +69,6 @@ ENTRY(cpu_mohawk_proc_fin) | |||
69 | * (same as arm926) | 69 | * (same as arm926) |
70 | */ | 70 | */ |
71 | .align 5 | 71 | .align 5 |
72 | .pushsection .idmap.text, "ax" | ||
73 | ENTRY(cpu_mohawk_reset) | 72 | ENTRY(cpu_mohawk_reset) |
74 | mov ip, #0 | 73 | mov ip, #0 |
75 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 74 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -80,8 +79,6 @@ ENTRY(cpu_mohawk_reset) | |||
80 | bic ip, ip, #0x1100 @ ...i...s........ | 79 | bic ip, ip, #0x1100 @ ...i...s........ |
81 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 80 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
82 | mov pc, r0 | 81 | mov pc, r0 |
83 | ENDPROC(cpu_mohawk_reset) | ||
84 | .popsection | ||
85 | 82 | ||
86 | /* | 83 | /* |
87 | * cpu_mohawk_do_idle() | 84 | * cpu_mohawk_do_idle() |
@@ -193,7 +190,6 @@ ENTRY(mohawk_coherent_user_range) | |||
193 | cmp r0, r1 | 190 | cmp r0, r1 |
194 | blo 1b | 191 | blo 1b |
195 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 192 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
196 | mov r0, #0 | ||
197 | mov pc, lr | 193 | mov pc, lr |
198 | 194 | ||
199 | /* | 195 | /* |
@@ -303,9 +299,6 @@ ENTRY(mohawk_dma_unmap_area) | |||
303 | mov pc, lr | 299 | mov pc, lr |
304 | ENDPROC(mohawk_dma_unmap_area) | 300 | ENDPROC(mohawk_dma_unmap_area) |
305 | 301 | ||
306 | .globl mohawk_flush_kern_cache_louis | ||
307 | .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all | ||
308 | |||
309 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 302 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
310 | define_cache_functions mohawk | 303 | define_cache_functions mohawk |
311 | 304 | ||
@@ -348,41 +341,6 @@ ENTRY(cpu_mohawk_set_pte_ext) | |||
348 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 341 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
349 | mov pc, lr | 342 | mov pc, lr |
350 | 343 | ||
351 | .globl cpu_mohawk_suspend_size | ||
352 | .equ cpu_mohawk_suspend_size, 4 * 6 | ||
353 | #ifdef CONFIG_PM_SLEEP | ||
354 | ENTRY(cpu_mohawk_do_suspend) | ||
355 | stmfd sp!, {r4 - r9, lr} | ||
356 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | ||
357 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg | ||
358 | mrc p15, 0, r6, c13, c0, 0 @ PID | ||
359 | mrc p15, 0, r7, c3, c0, 0 @ domain ID | ||
360 | mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg | ||
361 | mrc p15, 0, r9, c1, c0, 0 @ control reg | ||
362 | bic r4, r4, #2 @ clear frequency change bit | ||
363 | stmia r0, {r4 - r9} @ store cp regs | ||
364 | ldmia sp!, {r4 - r9, pc} | ||
365 | ENDPROC(cpu_mohawk_do_suspend) | ||
366 | |||
367 | ENTRY(cpu_mohawk_do_resume) | ||
368 | ldmia r0, {r4 - r9} @ load cp regs | ||
369 | mov ip, #0 | ||
370 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | ||
371 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer | ||
372 | mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer | ||
373 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | ||
374 | mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. | ||
375 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg | ||
376 | mcr p15, 0, r6, c13, c0, 0 @ PID | ||
377 | mcr p15, 0, r7, c3, c0, 0 @ domain ID | ||
378 | orr r1, r1, #0x18 @ cache the page table in L2 | ||
379 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr | ||
380 | mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg | ||
381 | mov r0, r9 @ control register | ||
382 | b cpu_resume_mmu | ||
383 | ENDPROC(cpu_mohawk_do_resume) | ||
384 | #endif | ||
385 | |||
386 | __CPUINIT | 344 | __CPUINIT |
387 | 345 | ||
388 | .type __mohawk_setup, #function | 346 | .type __mohawk_setup, #function |
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 775d70fba93..d50ada26edd 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
@@ -62,7 +62,6 @@ ENTRY(cpu_sa110_proc_fin) | |||
62 | * loc: location to jump to for soft reset | 62 | * loc: location to jump to for soft reset |
63 | */ | 63 | */ |
64 | .align 5 | 64 | .align 5 |
65 | .pushsection .idmap.text, "ax" | ||
66 | ENTRY(cpu_sa110_reset) | 65 | ENTRY(cpu_sa110_reset) |
67 | mov ip, #0 | 66 | mov ip, #0 |
68 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 67 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -75,8 +74,6 @@ ENTRY(cpu_sa110_reset) | |||
75 | bic ip, ip, #0x1100 @ ...i...s........ | 74 | bic ip, ip, #0x1100 @ ...i...s........ |
76 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 75 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
77 | mov pc, r0 | 76 | mov pc, r0 |
78 | ENDPROC(cpu_sa110_reset) | ||
79 | .popsection | ||
80 | 77 | ||
81 | /* | 78 | /* |
82 | * cpu_sa110_do_idle(type) | 79 | * cpu_sa110_do_idle(type) |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 3aa0da11fd8..69e7f2ef738 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -70,7 +70,6 @@ ENTRY(cpu_sa1100_proc_fin) | |||
70 | * loc: location to jump to for soft reset | 70 | * loc: location to jump to for soft reset |
71 | */ | 71 | */ |
72 | .align 5 | 72 | .align 5 |
73 | .pushsection .idmap.text, "ax" | ||
74 | ENTRY(cpu_sa1100_reset) | 73 | ENTRY(cpu_sa1100_reset) |
75 | mov ip, #0 | 74 | mov ip, #0 |
76 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches | 75 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches |
@@ -83,8 +82,6 @@ ENTRY(cpu_sa1100_reset) | |||
83 | bic ip, ip, #0x1100 @ ...i...s........ | 82 | bic ip, ip, #0x1100 @ ...i...s........ |
84 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 83 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
85 | mov pc, r0 | 84 | mov pc, r0 |
86 | ENDPROC(cpu_sa1100_reset) | ||
87 | .popsection | ||
88 | 85 | ||
89 | /* | 86 | /* |
90 | * cpu_sa1100_do_idle(type) | 87 | * cpu_sa1100_do_idle(type) |
@@ -171,19 +168,20 @@ ENTRY(cpu_sa1100_set_pte_ext) | |||
171 | mov pc, lr | 168 | mov pc, lr |
172 | 169 | ||
173 | .globl cpu_sa1100_suspend_size | 170 | .globl cpu_sa1100_suspend_size |
174 | .equ cpu_sa1100_suspend_size, 4 * 3 | 171 | .equ cpu_sa1100_suspend_size, 4*4 |
175 | #ifdef CONFIG_PM_SLEEP | 172 | #ifdef CONFIG_PM_SLEEP |
176 | ENTRY(cpu_sa1100_do_suspend) | 173 | ENTRY(cpu_sa1100_do_suspend) |
177 | stmfd sp!, {r4 - r6, lr} | 174 | stmfd sp!, {r4 - r7, lr} |
178 | mrc p15, 0, r4, c3, c0, 0 @ domain ID | 175 | mrc p15, 0, r4, c3, c0, 0 @ domain ID |
179 | mrc p15, 0, r5, c13, c0, 0 @ PID | 176 | mrc p15, 0, r5, c2, c0, 0 @ translation table base addr |
180 | mrc p15, 0, r6, c1, c0, 0 @ control reg | 177 | mrc p15, 0, r6, c13, c0, 0 @ PID |
181 | stmia r0, {r4 - r6} @ store cp regs | 178 | mrc p15, 0, r7, c1, c0, 0 @ control reg |
182 | ldmfd sp!, {r4 - r6, pc} | 179 | stmia r0, {r4 - r7} @ store cp regs |
180 | ldmfd sp!, {r4 - r7, pc} | ||
183 | ENDPROC(cpu_sa1100_do_suspend) | 181 | ENDPROC(cpu_sa1100_do_suspend) |
184 | 182 | ||
185 | ENTRY(cpu_sa1100_do_resume) | 183 | ENTRY(cpu_sa1100_do_resume) |
186 | ldmia r0, {r4 - r6} @ load cp regs | 184 | ldmia r0, {r4 - r7} @ load cp regs |
187 | mov ip, #0 | 185 | mov ip, #0 |
188 | mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs | 186 | mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs |
189 | mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache | 187 | mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache |
@@ -191,9 +189,13 @@ ENTRY(cpu_sa1100_do_resume) | |||
191 | mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB | 189 | mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB |
192 | 190 | ||
193 | mcr p15, 0, r4, c3, c0, 0 @ domain ID | 191 | mcr p15, 0, r4, c3, c0, 0 @ domain ID |
194 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr | 192 | mcr p15, 0, r5, c2, c0, 0 @ translation table base addr |
195 | mcr p15, 0, r5, c13, c0, 0 @ PID | 193 | mcr p15, 0, r6, c13, c0, 0 @ PID |
196 | mov r0, r6 @ control register | 194 | mov r0, r7 @ control register |
195 | mov r2, r5, lsr #14 @ get TTB0 base | ||
196 | mov r2, r2, lsl #14 | ||
197 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
198 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | ||
197 | b cpu_resume_mmu | 199 | b cpu_resume_mmu |
198 | ENDPROC(cpu_sa1100_do_resume) | 200 | ENDPROC(cpu_sa1100_do_resume) |
199 | #endif | 201 | #endif |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233f4df..a923aa0fd00 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -55,7 +55,6 @@ ENTRY(cpu_v6_proc_fin) | |||
55 | * - loc - location to jump to for soft reset | 55 | * - loc - location to jump to for soft reset |
56 | */ | 56 | */ |
57 | .align 5 | 57 | .align 5 |
58 | .pushsection .idmap.text, "ax" | ||
59 | ENTRY(cpu_v6_reset) | 58 | ENTRY(cpu_v6_reset) |
60 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | 59 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register |
61 | bic r1, r1, #0x1 @ ...............m | 60 | bic r1, r1, #0x1 @ ...............m |
@@ -63,8 +62,6 @@ ENTRY(cpu_v6_reset) | |||
63 | mov r1, #0 | 62 | mov r1, #0 |
64 | mcr p15, 0, r1, c7, c5, 4 @ ISB | 63 | mcr p15, 0, r1, c7, c5, 4 @ ISB |
65 | mov pc, r0 | 64 | mov pc, r0 |
66 | ENDPROC(cpu_v6_reset) | ||
67 | .popsection | ||
68 | 65 | ||
69 | /* | 66 | /* |
70 | * cpu_v6_do_idle() | 67 | * cpu_v6_do_idle() |
@@ -89,7 +86,7 @@ ENTRY(cpu_v6_dcache_clean_area) | |||
89 | mov pc, lr | 86 | mov pc, lr |
90 | 87 | ||
91 | /* | 88 | /* |
92 | * cpu_v6_switch_mm(pgd_phys, tsk) | 89 | * cpu_arm926_switch_mm(pgd_phys, tsk) |
93 | * | 90 | * |
94 | * Set the translation table base pointer to be pgd_phys | 91 | * Set the translation table base pointer to be pgd_phys |
95 | * | 92 | * |
@@ -107,12 +104,6 @@ ENTRY(cpu_v6_switch_mm) | |||
107 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 104 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
108 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer | 105 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
109 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 106 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
110 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
111 | mrc p15, 0, r2, c13, c0, 1 @ read current context ID | ||
112 | bic r2, r2, #0xff @ extract the PID | ||
113 | and r1, r1, #0xff | ||
114 | orr r1, r1, r2 @ insert into new context ID | ||
115 | #endif | ||
116 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | 107 | mcr p15, 0, r1, c13, c0, 1 @ set context ID |
117 | #endif | 108 | #endif |
118 | mov pc, lr | 109 | mov pc, lr |
@@ -137,18 +128,20 @@ ENTRY(cpu_v6_set_pte_ext) | |||
137 | 128 | ||
138 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ | 129 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ |
139 | .globl cpu_v6_suspend_size | 130 | .globl cpu_v6_suspend_size |
140 | .equ cpu_v6_suspend_size, 4 * 6 | 131 | .equ cpu_v6_suspend_size, 4 * 8 |
141 | #ifdef CONFIG_PM_SLEEP | 132 | #ifdef CONFIG_PM_SLEEP |
142 | ENTRY(cpu_v6_do_suspend) | 133 | ENTRY(cpu_v6_do_suspend) |
143 | stmfd sp!, {r4 - r9, lr} | 134 | stmfd sp!, {r4 - r11, lr} |
144 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 135 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
145 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | 136 | mrc p15, 0, r5, c13, c0, 1 @ Context ID |
146 | mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 137 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
147 | mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register | 138 | mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 |
148 | mrc p15, 0, r8, c1, c0, 2 @ co-processor access control | 139 | mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 |
149 | mrc p15, 0, r9, c1, c0, 0 @ control register | 140 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control register |
150 | stmia r0, {r4 - r9} | 141 | mrc p15, 0, r10, c1, c0, 2 @ co-processor access control |
151 | ldmfd sp!, {r4- r9, pc} | 142 | mrc p15, 0, r11, c1, c0, 0 @ control register |
143 | stmia r0, {r4 - r11} | ||
144 | ldmfd sp!, {r4- r11, pc} | ||
152 | ENDPROC(cpu_v6_do_suspend) | 145 | ENDPROC(cpu_v6_do_suspend) |
153 | 146 | ||
154 | ENTRY(cpu_v6_do_resume) | 147 | ENTRY(cpu_v6_do_resume) |
@@ -157,21 +150,25 @@ ENTRY(cpu_v6_do_resume) | |||
157 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 150 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
158 | mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache | 151 | mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache |
159 | mcr p15, 0, ip, c7, c10, 4 @ drain write buffer | 152 | mcr p15, 0, ip, c7, c10, 4 @ drain write buffer |
160 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 153 | ldmia r0, {r4 - r11} |
161 | ldmia r0, {r4 - r9} | ||
162 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 154 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
163 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | 155 | mcr p15, 0, r5, c13, c0, 1 @ Context ID |
164 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 156 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
165 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) | 157 | mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 |
166 | mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 | 158 | mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 |
167 | mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 159 | mcr p15, 0, r9, c1, c0, 1 @ auxiliary control register |
168 | mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register | 160 | mcr p15, 0, r10, c1, c0, 2 @ co-processor access control |
169 | mcr p15, 0, r8, c1, c0, 2 @ co-processor access control | ||
170 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | 161 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register |
171 | mcr p15, 0, ip, c7, c5, 4 @ ISB | 162 | mcr p15, 0, ip, c7, c5, 4 @ ISB |
172 | mov r0, r9 @ control register | 163 | mov r0, r11 @ control register |
164 | mov r2, r7, lsr #14 @ get TTB0 base | ||
165 | mov r2, r2, lsl #14 | ||
166 | ldr r3, cpu_resume_l1_flags | ||
173 | b cpu_resume_mmu | 167 | b cpu_resume_mmu |
174 | ENDPROC(cpu_v6_do_resume) | 168 | ENDPROC(cpu_v6_do_resume) |
169 | cpu_resume_l1_flags: | ||
170 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | ||
171 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | ||
175 | #endif | 172 | #endif |
176 | 173 | ||
177 | string cpu_v6_name, "ARMv6-compatible processor" | 174 | string cpu_v6_name, "ARMv6-compatible processor" |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S deleted file mode 100644 index 6d98c13ab82..00000000000 --- a/arch/arm/mm/proc-v7-2level.S +++ /dev/null | |||
@@ -1,175 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/proc-v7-2level.S | ||
3 | * | ||
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #define TTB_S (1 << 1) | ||
12 | #define TTB_RGN_NC (0 << 3) | ||
13 | #define TTB_RGN_OC_WBWA (1 << 3) | ||
14 | #define TTB_RGN_OC_WT (2 << 3) | ||
15 | #define TTB_RGN_OC_WB (3 << 3) | ||
16 | #define TTB_NOS (1 << 5) | ||
17 | #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) | ||
18 | #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) | ||
19 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | ||
20 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | ||
21 | |||
22 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
23 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB | ||
24 | #define PMD_FLAGS_UP PMD_SECT_WB | ||
25 | |||
26 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
27 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | ||
28 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S | ||
29 | |||
30 | /* | ||
31 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
32 | * | ||
33 | * Set the translation table base pointer to be pgd_phys | ||
34 | * | ||
35 | * - pgd_phys - physical address of new TTB | ||
36 | * | ||
37 | * It is assumed that: | ||
38 | * - we are not using split page tables | ||
39 | */ | ||
40 | ENTRY(cpu_v7_switch_mm) | ||
41 | #ifdef CONFIG_MMU | ||
42 | mov r2, #0 | ||
43 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
44 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | ||
45 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
46 | #ifdef CONFIG_ARM_ERRATA_430973 | ||
47 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
48 | #endif | ||
49 | #ifdef CONFIG_PID_IN_CONTEXTIDR | ||
50 | mrc p15, 0, r2, c13, c0, 1 @ read current context ID | ||
51 | lsr r2, r2, #8 @ extract the PID | ||
52 | bfi r1, r2, #8, #24 @ insert into new context ID | ||
53 | #endif | ||
54 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
55 | dsb | ||
56 | #endif | ||
57 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | ||
58 | isb | ||
59 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
60 | isb | ||
61 | #endif | ||
62 | mov pc, lr | ||
63 | ENDPROC(cpu_v7_switch_mm) | ||
64 | |||
65 | /* | ||
66 | * cpu_v7_set_pte_ext(ptep, pte) | ||
67 | * | ||
68 | * Set a level 2 translation table entry. | ||
69 | * | ||
70 | * - ptep - pointer to level 2 translation table entry | ||
71 | * (hardware version is stored at +2048 bytes) | ||
72 | * - pte - PTE value to store | ||
73 | * - ext - value for extended PTE bits | ||
74 | */ | ||
75 | ENTRY(cpu_v7_set_pte_ext) | ||
76 | #ifdef CONFIG_MMU | ||
77 | str r1, [r0] @ linux version | ||
78 | |||
79 | bic r3, r1, #0x000003f0 | ||
80 | bic r3, r3, #PTE_TYPE_MASK | ||
81 | orr r3, r3, r2 | ||
82 | orr r3, r3, #PTE_EXT_AP0 | 2 | ||
83 | |||
84 | tst r1, #1 << 4 | ||
85 | orrne r3, r3, #PTE_EXT_TEX(1) | ||
86 | |||
87 | eor r1, r1, #L_PTE_DIRTY | ||
88 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY | ||
89 | orrne r3, r3, #PTE_EXT_APX | ||
90 | |||
91 | tst r1, #L_PTE_USER | ||
92 | orrne r3, r3, #PTE_EXT_AP1 | ||
93 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
94 | @ allow kernel read/write access to read-only user pages | ||
95 | tstne r3, #PTE_EXT_APX | ||
96 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | ||
97 | #endif | ||
98 | |||
99 | tst r1, #L_PTE_XN | ||
100 | orrne r3, r3, #PTE_EXT_XN | ||
101 | |||
102 | tst r1, #L_PTE_YOUNG | ||
103 | tstne r1, #L_PTE_VALID | ||
104 | #ifndef CONFIG_CPU_USE_DOMAINS | ||
105 | eorne r1, r1, #L_PTE_NONE | ||
106 | tstne r1, #L_PTE_NONE | ||
107 | #endif | ||
108 | moveq r3, #0 | ||
109 | |||
110 | ARM( str r3, [r0, #2048]! ) | ||
111 | THUMB( add r0, r0, #2048 ) | ||
112 | THUMB( str r3, [r0] ) | ||
113 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | ||
114 | #endif | ||
115 | mov pc, lr | ||
116 | ENDPROC(cpu_v7_set_pte_ext) | ||
117 | |||
118 | /* | ||
119 | * Memory region attributes with SCTLR.TRE=1 | ||
120 | * | ||
121 | * n = TEX[0],C,B | ||
122 | * TR = PRRR[2n+1:2n] - memory type | ||
123 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
124 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
125 | * | ||
126 | * n TR IR OR | ||
127 | * UNCACHED 000 00 | ||
128 | * BUFFERABLE 001 10 00 00 | ||
129 | * WRITETHROUGH 010 10 10 10 | ||
130 | * WRITEBACK 011 10 11 11 | ||
131 | * reserved 110 | ||
132 | * WRITEALLOC 111 10 01 01 | ||
133 | * DEV_SHARED 100 01 | ||
134 | * DEV_NONSHARED 100 01 | ||
135 | * DEV_WC 001 10 | ||
136 | * DEV_CACHED 011 10 | ||
137 | * | ||
138 | * Other attributes: | ||
139 | * | ||
140 | * DS0 = PRRR[16] = 0 - device shareable property | ||
141 | * DS1 = PRRR[17] = 1 - device shareable property | ||
142 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
143 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
144 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
145 | */ | ||
146 | .equ PRRR, 0xff0a81a8 | ||
147 | .equ NMRR, 0x40e040e0 | ||
148 | |||
149 | /* | ||
150 | * Macro for setting up the TTBRx and TTBCR registers. | ||
151 | * - \ttb0 and \ttb1 updated with the corresponding flags. | ||
152 | */ | ||
153 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | ||
154 | mcr p15, 0, \zero, c2, c0, 2 @ TTB control register | ||
155 | ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP) | ||
156 | ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP) | ||
157 | ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) | ||
158 | ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) | ||
159 | mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 | ||
160 | .endm | ||
161 | |||
162 | __CPUINIT | ||
163 | |||
164 | /* AT | ||
165 | * TFR EV X F I D LR S | ||
166 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | ||
167 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
168 | * 01 0 110 0011 1100 .111 1101 < we want | ||
169 | */ | ||
170 | .align 2 | ||
171 | .type v7_crval, #object | ||
172 | v7_crval: | ||
173 | crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | ||
174 | |||
175 | .previous | ||
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S deleted file mode 100644 index 7b56386f949..00000000000 --- a/arch/arm/mm/proc-v7-3level.S +++ /dev/null | |||
@@ -1,153 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/proc-v7-3level.S | ||
3 | * | ||
4 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
5 | * Copyright (C) 2011 ARM Ltd. | ||
6 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
7 | * based on arch/arm/mm/proc-v7-2level.S | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
21 | */ | ||
22 | |||
23 | #define TTB_IRGN_NC (0 << 8) | ||
24 | #define TTB_IRGN_WBWA (1 << 8) | ||
25 | #define TTB_IRGN_WT (2 << 8) | ||
26 | #define TTB_IRGN_WB (3 << 8) | ||
27 | #define TTB_RGN_NC (0 << 10) | ||
28 | #define TTB_RGN_OC_WBWA (1 << 10) | ||
29 | #define TTB_RGN_OC_WT (2 << 10) | ||
30 | #define TTB_RGN_OC_WB (3 << 10) | ||
31 | #define TTB_S (3 << 12) | ||
32 | #define TTB_EAE (1 << 31) | ||
33 | |||
34 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
35 | #define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB) | ||
36 | #define PMD_FLAGS_UP (PMD_SECT_WB) | ||
37 | |||
38 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) | ||
40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) | ||
41 | |||
42 | /* | ||
43 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
44 | * | ||
45 | * Set the translation table base pointer to be pgd_phys (physical address of | ||
46 | * the new TTB). | ||
47 | */ | ||
48 | ENTRY(cpu_v7_switch_mm) | ||
49 | #ifdef CONFIG_MMU | ||
50 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
51 | and r3, r1, #0xff | ||
52 | mov r3, r3, lsl #(48 - 32) @ ASID | ||
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | ||
54 | isb | ||
55 | #endif | ||
56 | mov pc, lr | ||
57 | ENDPROC(cpu_v7_switch_mm) | ||
58 | |||
59 | /* | ||
60 | * cpu_v7_set_pte_ext(ptep, pte) | ||
61 | * | ||
62 | * Set a level 2 translation table entry. | ||
63 | * - ptep - pointer to level 3 translation table entry | ||
64 | * - pte - PTE value to store (64-bit in r2 and r3) | ||
65 | */ | ||
66 | ENTRY(cpu_v7_set_pte_ext) | ||
67 | #ifdef CONFIG_MMU | ||
68 | tst r2, #L_PTE_VALID | ||
69 | beq 1f | ||
70 | tst r3, #1 << (57 - 32) @ L_PTE_NONE | ||
71 | bicne r2, #L_PTE_VALID | ||
72 | bne 1f | ||
73 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | ||
74 | orreq r2, #L_PTE_RDONLY | ||
75 | 1: strd r2, r3, [r0] | ||
76 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | ||
77 | #endif | ||
78 | mov pc, lr | ||
79 | ENDPROC(cpu_v7_set_pte_ext) | ||
80 | |||
81 | /* | ||
82 | * Memory region attributes for LPAE (defined in pgtable-3level.h): | ||
83 | * | ||
84 | * n = AttrIndx[2:0] | ||
85 | * | ||
86 | * n MAIR | ||
87 | * UNCACHED 000 00000000 | ||
88 | * BUFFERABLE 001 01000100 | ||
89 | * DEV_WC 001 01000100 | ||
90 | * WRITETHROUGH 010 10101010 | ||
91 | * WRITEBACK 011 11101110 | ||
92 | * DEV_CACHED 011 11101110 | ||
93 | * DEV_SHARED 100 00000100 | ||
94 | * DEV_NONSHARED 100 00000100 | ||
95 | * unused 101 | ||
96 | * unused 110 | ||
97 | * WRITEALLOC 111 11111111 | ||
98 | */ | ||
99 | .equ PRRR, 0xeeaa4400 @ MAIR0 | ||
100 | .equ NMRR, 0xff000004 @ MAIR1 | ||
101 | |||
102 | /* | ||
103 | * Macro for setting up the TTBRx and TTBCR registers. | ||
104 | * - \ttbr1 updated. | ||
105 | */ | ||
106 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | ||
107 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address | ||
108 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) | ||
109 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register | ||
110 | orr \tmp, \tmp, #TTB_EAE | ||
111 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) | ||
112 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) | ||
113 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) | ||
114 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) | ||
115 | /* | ||
116 | * TTBR0/TTBR1 split (PAGE_OFFSET): | ||
117 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | ||
118 | * 0x80000000: T0SZ = 0, T1SZ = 1 | ||
119 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
120 | * | ||
121 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
122 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
123 | * mapping set up in TTBR0. | ||
124 | */ | ||
125 | bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? | ||
126 | orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ | ||
127 | #if defined CONFIG_VMSPLIT_2G | ||
128 | /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ | ||
129 | add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries | ||
130 | #elif defined CONFIG_VMSPLIT_3G | ||
131 | /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ | ||
132 | add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd | ||
133 | #endif | ||
134 | /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ | ||
135 | 9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register | ||
136 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
137 | .endm | ||
138 | |||
139 | __CPUINIT | ||
140 | |||
141 | /* | ||
142 | * AT | ||
143 | * TFR EV X F IHD LR S | ||
144 | * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM | ||
145 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
146 | * 11 0 110 1 0011 1100 .111 1101 < we want | ||
147 | */ | ||
148 | .align 2 | ||
149 | .type v7_crval, #object | ||
150 | v7_crval: | ||
151 | crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c | ||
152 | |||
153 | .previous | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a3c015f8d5..38c78253f76 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -19,11 +19,24 @@ | |||
19 | 19 | ||
20 | #include "proc-macros.S" | 20 | #include "proc-macros.S" |
21 | 21 | ||
22 | #ifdef CONFIG_ARM_LPAE | 22 | #define TTB_S (1 << 1) |
23 | #include "proc-v7-3level.S" | 23 | #define TTB_RGN_NC (0 << 3) |
24 | #else | 24 | #define TTB_RGN_OC_WBWA (1 << 3) |
25 | #include "proc-v7-2level.S" | 25 | #define TTB_RGN_OC_WT (2 << 3) |
26 | #endif | 26 | #define TTB_RGN_OC_WB (3 << 3) |
27 | #define TTB_NOS (1 << 5) | ||
28 | #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) | ||
29 | #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) | ||
30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | ||
31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | ||
32 | |||
33 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | ||
34 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB | ||
35 | #define PMD_FLAGS_UP PMD_SECT_WB | ||
36 | |||
37 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | ||
38 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | ||
39 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S | ||
27 | 40 | ||
28 | ENTRY(cpu_v7_proc_init) | 41 | ENTRY(cpu_v7_proc_init) |
29 | mov pc, lr | 42 | mov pc, lr |
@@ -50,16 +63,14 @@ ENDPROC(cpu_v7_proc_fin) | |||
50 | * caches disabled. | 63 | * caches disabled. |
51 | */ | 64 | */ |
52 | .align 5 | 65 | .align 5 |
53 | .pushsection .idmap.text, "ax" | ||
54 | ENTRY(cpu_v7_reset) | 66 | ENTRY(cpu_v7_reset) |
55 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | 67 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register |
56 | bic r1, r1, #0x1 @ ...............m | 68 | bic r1, r1, #0x1 @ ...............m |
57 | THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) | 69 | THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) |
58 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | 70 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU |
59 | isb | 71 | isb |
60 | bx r0 | 72 | mov pc, r0 |
61 | ENDPROC(cpu_v7_reset) | 73 | ENDPROC(cpu_v7_reset) |
62 | .popsection | ||
63 | 74 | ||
64 | /* | 75 | /* |
65 | * cpu_v7_do_idle() | 76 | * cpu_v7_do_idle() |
@@ -86,58 +97,380 @@ ENTRY(cpu_v7_dcache_clean_area) | |||
86 | mov pc, lr | 97 | mov pc, lr |
87 | ENDPROC(cpu_v7_dcache_clean_area) | 98 | ENDPROC(cpu_v7_dcache_clean_area) |
88 | 99 | ||
100 | /* | ||
101 | * cpu_v7_switch_mm(pgd_phys, tsk) | ||
102 | * | ||
103 | * Set the translation table base pointer to be pgd_phys | ||
104 | * | ||
105 | * - pgd_phys - physical address of new TTB | ||
106 | * | ||
107 | * It is assumed that: | ||
108 | * - we are not using split page tables | ||
109 | */ | ||
110 | ENTRY(cpu_v7_switch_mm) | ||
111 | #ifdef CONFIG_MMU | ||
112 | mov r2, #0 | ||
113 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | ||
114 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) | ||
115 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
116 | #ifdef CONFIG_ARM_ERRATA_430973 | ||
117 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
118 | #endif | ||
119 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
120 | dsb | ||
121 | #endif | ||
122 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | ||
123 | isb | ||
124 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | ||
125 | isb | ||
126 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
127 | dsb | ||
128 | #endif | ||
129 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | ||
130 | isb | ||
131 | #endif | ||
132 | mov pc, lr | ||
133 | ENDPROC(cpu_v7_switch_mm) | ||
134 | |||
135 | /* | ||
136 | * cpu_v7_set_pte_ext(ptep, pte) | ||
137 | * | ||
138 | * Set a level 2 translation table entry. | ||
139 | * | ||
140 | * - ptep - pointer to level 2 translation table entry | ||
141 | * (hardware version is stored at +2048 bytes) | ||
142 | * - pte - PTE value to store | ||
143 | * - ext - value for extended PTE bits | ||
144 | */ | ||
145 | ENTRY(cpu_v7_set_pte_ext) | ||
146 | #ifdef CONFIG_MMU | ||
147 | str r1, [r0] @ linux version | ||
148 | |||
149 | bic r3, r1, #0x000003f0 | ||
150 | bic r3, r3, #PTE_TYPE_MASK | ||
151 | orr r3, r3, r2 | ||
152 | orr r3, r3, #PTE_EXT_AP0 | 2 | ||
153 | |||
154 | tst r1, #1 << 4 | ||
155 | orrne r3, r3, #PTE_EXT_TEX(1) | ||
156 | |||
157 | eor r1, r1, #L_PTE_DIRTY | ||
158 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY | ||
159 | orrne r3, r3, #PTE_EXT_APX | ||
160 | |||
161 | tst r1, #L_PTE_USER | ||
162 | orrne r3, r3, #PTE_EXT_AP1 | ||
163 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
164 | @ allow kernel read/write access to read-only user pages | ||
165 | tstne r3, #PTE_EXT_APX | ||
166 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | ||
167 | #endif | ||
168 | |||
169 | tst r1, #L_PTE_XN | ||
170 | orrne r3, r3, #PTE_EXT_XN | ||
171 | |||
172 | tst r1, #L_PTE_YOUNG | ||
173 | tstne r1, #L_PTE_PRESENT | ||
174 | moveq r3, #0 | ||
175 | |||
176 | ARM( str r3, [r0, #2048]! ) | ||
177 | THUMB( add r0, r0, #2048 ) | ||
178 | THUMB( str r3, [r0] ) | ||
179 | mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3 | ||
180 | tst r3, #0xf << 20 @ check the coherent walk bits | ||
181 | mcreq p15, 0, r0, c7, c10, 1 @ flush_pte | ||
182 | #endif | ||
183 | mov pc, lr | ||
184 | ENDPROC(cpu_v7_set_pte_ext) | ||
185 | |||
89 | string cpu_v7_name, "ARMv7 Processor" | 186 | string cpu_v7_name, "ARMv7 Processor" |
90 | .align | 187 | .align |
91 | 188 | ||
189 | /* | ||
190 | * Memory region attributes with SCTLR.TRE=1 | ||
191 | * | ||
192 | * n = TEX[0],C,B | ||
193 | * TR = PRRR[2n+1:2n] - memory type | ||
194 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
195 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
196 | * | ||
197 | * n TR IR OR | ||
198 | * UNCACHED 000 00 | ||
199 | * BUFFERABLE 001 10 00 00 | ||
200 | * WRITETHROUGH 010 10 10 10 | ||
201 | * WRITEBACK 011 10 11 11 | ||
202 | * reserved 110 | ||
203 | * WRITEALLOC 111 10 01 01 | ||
204 | * DEV_SHARED 100 01 | ||
205 | * DEV_NONSHARED 100 01 | ||
206 | * DEV_WC 001 10 | ||
207 | * DEV_CACHED 011 10 | ||
208 | * | ||
209 | * Other attributes: | ||
210 | * | ||
211 | * DS0 = PRRR[16] = 0 - device shareable property | ||
212 | * DS1 = PRRR[17] = 1 - device shareable property | ||
213 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
214 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
215 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
216 | */ | ||
217 | .equ PRRR, 0xff0a89a8 | ||
218 | .equ NMRR, 0xc0e044e0 | ||
219 | |||
92 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 220 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
221 | .local cpu_v7_debug_suspend_size | ||
222 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
223 | /* | ||
224 | * Debug context: | ||
225 | * 8 CP14 registers | ||
226 | * 16x2 CP14 breakpoint registers (maximum) | ||
227 | * 16x2 CP14 watchpoint registers (maximum) | ||
228 | */ | ||
229 | .equ cpu_v7_debug_suspend_size, (4 * (8 + (16 * 2) + (16 * 2))) | ||
230 | |||
231 | .macro save_brkpt cm | ||
232 | mrc p14, 0, r4, c0, \cm, 4 | ||
233 | mrc p14, 0, r5, c0, \cm, 5 | ||
234 | stmia r0!, {r4 - r5} | ||
235 | .endm | ||
236 | |||
237 | .macro restore_brkpt cm | ||
238 | ldmia r0!, {r4 - r5} | ||
239 | mcr p14, 0, r4, c0, \cm, 4 | ||
240 | mcr p14, 0, r5, c0, \cm, 5 | ||
241 | .endm | ||
242 | |||
243 | .macro save_wpt cm | ||
244 | mrc p14, 0, r4, c0, \cm, 6 | ||
245 | mrc p14, 0, r5, c0, \cm, 7 | ||
246 | stmia r0!, {r4 - r5} | ||
247 | .endm | ||
248 | |||
249 | .macro restore_wpt cm | ||
250 | ldmia r0!, {r4 - r5} | ||
251 | mcr p14, 0, r4, c0, \cm, 6 | ||
252 | mcr p14, 0, r5, c0, \cm, 7 | ||
253 | .endm | ||
254 | |||
255 | #else | ||
256 | .equ cpu_v7_debug_suspend_size, 0 | ||
257 | #endif | ||
258 | |||
93 | .globl cpu_v7_suspend_size | 259 | .globl cpu_v7_suspend_size |
94 | .equ cpu_v7_suspend_size, 4 * 8 | 260 | .equ cpu_v7_suspend_size, (4 * 10) + cpu_v7_debug_suspend_size |
95 | #ifdef CONFIG_ARM_CPU_SUSPEND | 261 | #ifdef CONFIG_PM_SLEEP |
96 | ENTRY(cpu_v7_do_suspend) | 262 | ENTRY(cpu_v7_do_suspend) |
97 | stmfd sp!, {r4 - r10, lr} | 263 | stmfd sp!, {r0, r3 - r11, lr} |
264 | mrc p15, 0, r3, c15, c0, 1 @ diag | ||
98 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 265 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
99 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 266 | mrc p15, 0, r5, c13, c0, 1 @ Context ID |
100 | stmia r0!, {r4 - r5} | 267 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID |
268 | stmia r0!, {r3 - r6} | ||
101 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 269 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
102 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 | 270 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 |
103 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register | 271 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 |
104 | mrc p15, 0, r8, c1, c0, 0 @ Control register | 272 | mrc p15, 0, r9, c1, c0, 0 @ Control register |
105 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register | 273 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register |
106 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control | 274 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control |
107 | stmia r0, {r6 - r11} | 275 | stmia r0!, {r6 - r11} |
108 | ldmfd sp!, {r4 - r10, pc} | 276 | |
277 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
278 | /* Save CP14 debug controller context */ | ||
279 | |||
280 | mrc p14, 0, r4, c0, c2, 2 @ DBGDSCRext | ||
281 | mrc p14, 0, r5, c0, c6, 0 @ DBGWFAR | ||
282 | mrc p14, 0, r6, c0, c7, 0 @ DBGVCR | ||
283 | mrc p14, 0, r7, c7, c9, 6 @ DBGCLAIMCLR | ||
284 | stmia r0!, {r4-r7} | ||
285 | |||
286 | mrc p14, 0, r4, c0, c10, 0 @ DBGDSCCR | ||
287 | mrc p14, 0, r5, c0, c11, 0 @ DBGDSMCR | ||
288 | stmia r0!, {r4-r5} | ||
289 | |||
290 | tst r4, #(1 << 29) @ DBGDSCRext.TXfull | ||
291 | mrcne p14, 0, r4, c0, c3, 2 @ DBGDTRTXext | ||
292 | strne r4, [r0], #4 | ||
293 | |||
294 | tst r4, #(1 << 30) @ DBGDSCRext.RXfull | ||
295 | mrcne p14, 0, r4, c0, c0, 2 @ DBGDTRRXext | ||
296 | strne r4, [r0], #4 | ||
297 | |||
298 | mrc p14, 0, r8, c0, c0, 0 @ read IDR | ||
299 | mov r3, r8, lsr #24 | ||
300 | and r3, r3, #0xf @ r3 has the number of brkpt | ||
301 | rsb r3, r3, #0xf | ||
302 | |||
303 | /* r3 = (15 - #of brkpt) ; | ||
304 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
305 | */ | ||
306 | add r3, r3, r3, lsl #1 | ||
307 | sub r3, r3, #1 | ||
308 | add pc, pc, r3, lsl #2 | ||
309 | |||
310 | save_brkpt c15 | ||
311 | save_brkpt c14 | ||
312 | save_brkpt c13 | ||
313 | save_brkpt c12 | ||
314 | save_brkpt c11 | ||
315 | save_brkpt c10 | ||
316 | save_brkpt c9 | ||
317 | save_brkpt c8 | ||
318 | save_brkpt c7 | ||
319 | save_brkpt c6 | ||
320 | save_brkpt c5 | ||
321 | save_brkpt c4 | ||
322 | save_brkpt c3 | ||
323 | save_brkpt c2 | ||
324 | save_brkpt c1 | ||
325 | save_brkpt c0 | ||
326 | |||
327 | mov r3, r8, lsr #28 @ r3 has the number of wpt | ||
328 | rsb r3, r3, #0xf | ||
329 | |||
330 | /* r3 = (15 - #of wpt) ; | ||
331 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
332 | */ | ||
333 | add r3, r3, r3, lsl #1 | ||
334 | sub r3, r3, #1 | ||
335 | add pc, pc, r3, lsl #2 | ||
336 | |||
337 | save_wpt c15 | ||
338 | save_wpt c14 | ||
339 | save_wpt c13 | ||
340 | save_wpt c12 | ||
341 | save_wpt c11 | ||
342 | save_wpt c10 | ||
343 | save_wpt c9 | ||
344 | save_wpt c8 | ||
345 | save_wpt c7 | ||
346 | save_wpt c6 | ||
347 | save_wpt c5 | ||
348 | save_wpt c4 | ||
349 | save_wpt c3 | ||
350 | save_wpt c2 | ||
351 | save_wpt c1 | ||
352 | save_wpt c0 | ||
353 | #endif | ||
354 | ldmfd sp!, {r0, r3 - r11, pc} | ||
109 | ENDPROC(cpu_v7_do_suspend) | 355 | ENDPROC(cpu_v7_do_suspend) |
110 | 356 | ||
111 | ENTRY(cpu_v7_do_resume) | 357 | ENTRY(cpu_v7_do_resume) |
112 | mov ip, #0 | 358 | mov ip, #0 |
113 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | 359 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs |
114 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 360 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
115 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 361 | ldmia r0!, {r3 - r6} |
116 | ldmia r0!, {r4 - r5} | 362 | #ifndef CONFIG_TRUSTED_FOUNDATIONS |
363 | mcr p15, 0, r3, c15, c0, 1 @ diag | ||
364 | #endif | ||
117 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 365 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
118 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 366 | mcr p15, 0, r5, c13, c0, 1 @ Context ID |
119 | ldmia r0, {r6 - r11} | 367 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID |
368 | ldmia r0!, {r6 - r11} | ||
120 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 369 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
121 | #ifndef CONFIG_ARM_LPAE | 370 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 |
122 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 371 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 |
123 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) | 372 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register |
124 | #endif | ||
125 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 | ||
126 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 | ||
127 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register | ||
128 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | 373 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register |
129 | teq r4, r9 @ Is it already set? | 374 | teq r4, r10 @ Is it already set? |
130 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | 375 | mcrne p15, 0, r10, c1, c0, 1 @ No, so write it |
131 | mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control | 376 | mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control |
132 | ldr r4, =PRRR @ PRRR | 377 | ldr r4, =PRRR @ PRRR |
133 | ldr r5, =NMRR @ NMRR | 378 | ldr r5, =NMRR @ NMRR |
134 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR | 379 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR |
135 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR | 380 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR |
136 | isb | 381 | isb |
382 | |||
383 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
384 | /* Restore CP14 debug controller context */ | ||
385 | |||
386 | ldmia r0!, {r2 - r5} | ||
387 | mcr p14, 0, r3, c0, c6, 0 @ DBGWFAR | ||
388 | mcr p14, 0, r4, c0, c7, 0 @ DBGVCR | ||
389 | mcr p14, 0, r5, c7, c8, 6 @ DBGCLAIMSET | ||
390 | |||
391 | ldmia r0!, {r4-r5} | ||
392 | mcr p14, 0, r4, c0, c10, 0 @ DBGDSCCR | ||
393 | mcr p14, 0, r5, c0, c11, 0 @ DBGDSMCR | ||
394 | |||
395 | tst r2, #(1 << 29) @ DBGDSCRext.TXfull | ||
396 | ldrne r4, [r0], #4 | ||
397 | mcrne p14, 0, r4, c0, c3, 2 @ DBGDTRTXext | ||
398 | |||
399 | tst r2, #(1 << 30) @ DBGDSCRext.RXfull | ||
400 | ldrne r4, [r0], #4 | ||
401 | mcrne p14, 0, r4, c0, c0, 2 @ DBGDTRRXext | ||
402 | |||
403 | mrc p14, 0, r8, c0, c0, 0 @ read IDR | ||
404 | mov r3, r8, lsr #24 | ||
405 | and r3, r3, #0xf @ r3 has the number of brkpt | ||
406 | rsb r3, r3, #0xf | ||
407 | |||
408 | /* r3 = (15 - #of wpt) ; | ||
409 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
410 | */ | ||
411 | add r3, r3, r3, lsl #1 | ||
412 | sub r3, r3, #1 | ||
413 | add pc, pc, r3, lsl #2 | ||
414 | |||
415 | restore_brkpt c15 | ||
416 | restore_brkpt c14 | ||
417 | restore_brkpt c13 | ||
418 | restore_brkpt c12 | ||
419 | restore_brkpt c11 | ||
420 | restore_brkpt c10 | ||
421 | restore_brkpt c9 | ||
422 | restore_brkpt c8 | ||
423 | restore_brkpt c7 | ||
424 | restore_brkpt c6 | ||
425 | restore_brkpt c5 | ||
426 | restore_brkpt c4 | ||
427 | restore_brkpt c3 | ||
428 | restore_brkpt c2 | ||
429 | restore_brkpt c1 | ||
430 | restore_brkpt c0 | ||
431 | |||
432 | mov r3, r8, lsr #28 @ r3 has the number of wpt | ||
433 | rsb r3, r3, #0xf | ||
434 | |||
435 | /* r3 = (15 - #of wpt) ; | ||
436 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
437 | */ | ||
438 | add r3, r3, r3, lsl #1 | ||
439 | sub r3, r3, #1 | ||
440 | add pc, pc, r3, lsl #2 | ||
441 | |||
442 | start_restore_wpt: | ||
443 | restore_wpt c15 | ||
444 | restore_wpt c14 | ||
445 | restore_wpt c13 | ||
446 | restore_wpt c12 | ||
447 | restore_wpt c11 | ||
448 | restore_wpt c10 | ||
449 | restore_wpt c9 | ||
450 | restore_wpt c8 | ||
451 | restore_wpt c7 | ||
452 | restore_wpt c6 | ||
453 | restore_wpt c5 | ||
454 | restore_wpt c4 | ||
455 | restore_wpt c3 | ||
456 | restore_wpt c2 | ||
457 | restore_wpt c1 | ||
458 | restore_wpt c0 | ||
459 | isb | ||
460 | |||
461 | mcr p14, 0, r2, c0, c2, 2 @ DSCR | ||
462 | isb | ||
463 | #endif | ||
137 | dsb | 464 | dsb |
138 | mov r0, r8 @ control register | 465 | mov r0, r9 @ control register |
466 | mov r2, r7, lsr #14 @ get TTB0 base | ||
467 | mov r2, r2, lsl #14 | ||
468 | ldr r3, cpu_resume_l1_flags | ||
139 | b cpu_resume_mmu | 469 | b cpu_resume_mmu |
140 | ENDPROC(cpu_v7_do_resume) | 470 | ENDPROC(cpu_v7_do_resume) |
471 | cpu_resume_l1_flags: | ||
472 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | ||
473 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | ||
141 | #endif | 474 | #endif |
142 | 475 | ||
143 | __CPUINIT | 476 | __CPUINIT |
@@ -157,7 +490,6 @@ __v7_ca5mp_setup: | |||
157 | __v7_ca9mp_setup: | 490 | __v7_ca9mp_setup: |
158 | mov r10, #(1 << 0) @ TLB ops broadcasting | 491 | mov r10, #(1 << 0) @ TLB ops broadcasting |
159 | b 1f | 492 | b 1f |
160 | __v7_ca7mp_setup: | ||
161 | __v7_ca15mp_setup: | 493 | __v7_ca15mp_setup: |
162 | mov r10, #0 | 494 | mov r10, #0 |
163 | 1: | 495 | 1: |
@@ -169,68 +501,10 @@ __v7_ca15mp_setup: | |||
169 | orreq r0, r0, r10 @ Enable CPU-specific SMP bits | 501 | orreq r0, r0, r10 @ Enable CPU-specific SMP bits |
170 | mcreq p15, 0, r0, c1, c0, 1 | 502 | mcreq p15, 0, r0, c1, c0, 1 |
171 | #endif | 503 | #endif |
172 | b __v7_setup | ||
173 | |||
174 | __v7_pj4b_setup: | ||
175 | #ifdef CONFIG_CPU_PJ4B | ||
176 | |||
177 | /* Auxiliary Debug Modes Control 1 Register */ | ||
178 | #define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ | ||
179 | #define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ | ||
180 | #define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */ | ||
181 | #define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ | ||
182 | |||
183 | /* Auxiliary Debug Modes Control 2 Register */ | ||
184 | #define PJ4B_FAST_LDR (1 << 23) /* Disable fast LDR */ | ||
185 | #define PJ4B_SNOOP_DATA (1 << 25) /* Do not interleave write and snoop data */ | ||
186 | #define PJ4B_CWF (1 << 27) /* Disable Critical Word First feature */ | ||
187 | #define PJ4B_OUTSDNG_NC (1 << 29) /* Disable outstanding non cacheable rqst */ | ||
188 | #define PJ4B_L1_REP_RR (1 << 30) /* L1 replacement - Strict round robin */ | ||
189 | #define PJ4B_AUX_DBG_CTRL2 (PJ4B_SNOOP_DATA | PJ4B_CWF |\ | ||
190 | PJ4B_OUTSDNG_NC | PJ4B_L1_REP_RR) | ||
191 | |||
192 | /* Auxiliary Functional Modes Control Register 0 */ | ||
193 | #define PJ4B_SMP_CFB (1 << 1) /* Set SMP mode. Join the coherency fabric */ | ||
194 | #define PJ4B_L1_PAR_CHK (1 << 2) /* Support L1 parity checking */ | ||
195 | #define PJ4B_BROADCAST_CACHE (1 << 8) /* Broadcast Cache and TLB maintenance */ | ||
196 | |||
197 | /* Auxiliary Debug Modes Control 0 Register */ | ||
198 | #define PJ4B_WFI_WFE (1 << 22) /* WFI/WFE - serve the DVM and back to idle */ | ||
199 | |||
200 | /* Auxiliary Debug Modes Control 1 Register */ | ||
201 | mrc p15, 1, r0, c15, c1, 1 | ||
202 | orr r0, r0, #PJ4B_CLEAN_LINE | ||
203 | orr r0, r0, #PJ4B_BCK_OFF_STREX | ||
204 | orr r0, r0, #PJ4B_INTER_PARITY | ||
205 | bic r0, r0, #PJ4B_STATIC_BP | ||
206 | mcr p15, 1, r0, c15, c1, 1 | ||
207 | |||
208 | /* Auxiliary Debug Modes Control 2 Register */ | ||
209 | mrc p15, 1, r0, c15, c1, 2 | ||
210 | bic r0, r0, #PJ4B_FAST_LDR | ||
211 | orr r0, r0, #PJ4B_AUX_DBG_CTRL2 | ||
212 | mcr p15, 1, r0, c15, c1, 2 | ||
213 | |||
214 | /* Auxiliary Functional Modes Control Register 0 */ | ||
215 | mrc p15, 1, r0, c15, c2, 0 | ||
216 | #ifdef CONFIG_SMP | ||
217 | orr r0, r0, #PJ4B_SMP_CFB | ||
218 | #endif | ||
219 | orr r0, r0, #PJ4B_L1_PAR_CHK | ||
220 | orr r0, r0, #PJ4B_BROADCAST_CACHE | ||
221 | mcr p15, 1, r0, c15, c2, 0 | ||
222 | |||
223 | /* Auxiliary Debug Modes Control 0 Register */ | ||
224 | mrc p15, 1, r0, c15, c1, 0 | ||
225 | orr r0, r0, #PJ4B_WFI_WFE | ||
226 | mcr p15, 1, r0, c15, c1, 0 | ||
227 | |||
228 | #endif /* CONFIG_CPU_PJ4B */ | ||
229 | |||
230 | __v7_setup: | 504 | __v7_setup: |
231 | adr r12, __v7_setup_stack @ the local stack | 505 | adr r12, __v7_setup_stack @ the local stack |
232 | stmia r12, {r0-r5, r7, r9, r11, lr} | 506 | stmia r12, {r0-r5, r7, r9, r11, lr} |
233 | bl v7_flush_dcache_louis | 507 | bl v7_flush_dcache_all |
234 | ldmia r12, {r0-r5, r7, r9, r11, lr} | 508 | ldmia r12, {r0-r5, r7, r9, r11, lr} |
235 | 509 | ||
236 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register | 510 | mrc p15, 0, r0, c0, c0, 0 @ read main ID register |
@@ -246,8 +520,7 @@ __v7_setup: | |||
246 | ldr r10, =0x00000c08 @ Cortex-A8 primary part number | 520 | ldr r10, =0x00000c08 @ Cortex-A8 primary part number |
247 | teq r0, r10 | 521 | teq r0, r10 |
248 | bne 2f | 522 | bne 2f |
249 | #if defined(CONFIG_ARM_ERRATA_430973) && !defined(CONFIG_ARCH_MULTIPLATFORM) | 523 | #ifdef CONFIG_ARM_ERRATA_430973 |
250 | |||
251 | teq r5, #0x00100000 @ only present in r1p* | 524 | teq r5, #0x00100000 @ only present in r1p* |
252 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register | 525 | mrceq p15, 0, r10, c1, c0, 1 @ read aux control register |
253 | orreq r10, r10, #(1 << 6) @ set IBE to 1 | 526 | orreq r10, r10, #(1 << 6) @ set IBE to 1 |
@@ -273,6 +546,17 @@ __v7_setup: | |||
273 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number | 546 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number |
274 | teq r0, r10 | 547 | teq r0, r10 |
275 | bne 3f | 548 | bne 3f |
549 | #ifndef CONFIG_TRUSTED_FOUNDATIONS | ||
550 | cmp r6, #0x10 @ power ctrl reg added r1p0 | ||
551 | mrcge p15, 0, r10, c15, c0, 0 @ read power control register | ||
552 | orrge r10, r10, #1 @ enable dynamic clock gating | ||
553 | mcrge p15, 0, r10, c15, c0, 0 @ write power control register | ||
554 | #ifdef CONFIG_ARM_ERRATA_720791 | ||
555 | teq r5, #0x00100000 @ only present in r1p* | ||
556 | mrceq p15, 0, r10, c15, c0, 2 @ read "chicken power ctrl" reg | ||
557 | orreq r10, r10, #0x30 @ disable core clk gate on | ||
558 | mcreq p15, 0, r10, c15, c0, 2 @ instr-side waits | ||
559 | #endif | ||
276 | #ifdef CONFIG_ARM_ERRATA_742230 | 560 | #ifdef CONFIG_ARM_ERRATA_742230 |
277 | cmp r6, #0x22 @ only present up to r2p2 | 561 | cmp r6, #0x22 @ only present up to r2p2 |
278 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register | 562 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register |
@@ -289,18 +573,27 @@ __v7_setup: | |||
289 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 573 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
290 | #endif | 574 | #endif |
291 | #ifdef CONFIG_ARM_ERRATA_743622 | 575 | #ifdef CONFIG_ARM_ERRATA_743622 |
292 | teq r5, #0x00200000 @ only present in r2p* | 576 | teq r6, #0x20 @ present in r2p0 |
577 | teqne r6, #0x21 @ present in r2p1 | ||
578 | teqne r6, #0x22 @ present in r2p2 | ||
579 | teqne r6, #0x27 @ present in r2p7 | ||
580 | teqne r6, #0x29 @ present in r2p9 | ||
293 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | 581 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register |
294 | orreq r10, r10, #1 << 6 @ set bit #6 | 582 | orreq r10, r10, #1 << 6 @ set bit #6 |
295 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 583 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
296 | #endif | 584 | #endif |
297 | #if defined(CONFIG_ARM_ERRATA_751472) && defined(CONFIG_SMP) | 585 | #ifdef CONFIG_ARM_ERRATA_751472 |
298 | ALT_SMP(cmp r6, #0x30) @ present prior to r3p0 | 586 | cmp r6, #0x30 @ present prior to r3p0 |
299 | ALT_UP_B(1f) | ||
300 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | 587 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register |
301 | orrlt r10, r10, #1 << 11 @ set bit #11 | 588 | orrlt r10, r10, #1 << 11 @ set bit #11 |
302 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | 589 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register |
303 | 1: | 590 | #endif |
591 | #ifdef CONFIG_ARM_ERRATA_752520 | ||
592 | cmp r6, #0x29 @ present prior to r2p9 | ||
593 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
594 | orrlt r10, r10, #1 << 20 @ set bit #20 | ||
595 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
596 | #endif | ||
304 | #endif | 597 | #endif |
305 | 598 | ||
306 | 3: mov r10, #0 | 599 | 3: mov r10, #0 |
@@ -308,24 +601,17 @@ __v7_setup: | |||
308 | dsb | 601 | dsb |
309 | #ifdef CONFIG_MMU | 602 | #ifdef CONFIG_MMU |
310 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 603 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
311 | v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup | 604 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register |
605 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) | ||
606 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) | ||
607 | ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) | ||
608 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) | ||
609 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 | ||
312 | ldr r5, =PRRR @ PRRR | 610 | ldr r5, =PRRR @ PRRR |
313 | ldr r6, =NMRR @ NMRR | 611 | ldr r6, =NMRR @ NMRR |
314 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 612 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
315 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 613 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
316 | #endif | 614 | #endif |
317 | #ifndef CONFIG_ARM_THUMBEE | ||
318 | mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE | ||
319 | and r0, r0, #(0xf << 12) @ ThumbEE enabled field | ||
320 | teq r0, #(1 << 12) @ check if ThumbEE is present | ||
321 | bne 1f | ||
322 | mov r5, #0 | ||
323 | mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0 | ||
324 | mrc p14, 6, r0, c0, c0, 0 @ load TEECR | ||
325 | orr r0, r0, #1 @ set the 1st bit in order to | ||
326 | mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access | ||
327 | 1: | ||
328 | #endif | ||
329 | adr r5, v7_crval | 615 | adr r5, v7_crval |
330 | ldmia r5, {r5, r6} | 616 | ldmia r5, {r5, r6} |
331 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 617 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
@@ -342,7 +628,16 @@ __v7_setup: | |||
342 | mov pc, lr @ return to head.S:__ret | 628 | mov pc, lr @ return to head.S:__ret |
343 | ENDPROC(__v7_setup) | 629 | ENDPROC(__v7_setup) |
344 | 630 | ||
345 | .align 2 | 631 | /* AT |
632 | * TFR EV X F I D LR S | ||
633 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | ||
634 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | ||
635 | * 1 0 110 0011 1100 .111 1101 < we want | ||
636 | */ | ||
637 | .type v7_crval, #object | ||
638 | v7_crval: | ||
639 | crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | ||
640 | |||
346 | __v7_setup_stack: | 641 | __v7_setup_stack: |
347 | .space 4 * 11 @ 11 registers | 642 | .space 4 * 11 @ 11 registers |
348 | 643 | ||
@@ -364,11 +659,11 @@ __v7_setup_stack: | |||
364 | */ | 659 | */ |
365 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 | 660 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 |
366 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 661 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
367 | PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) | 662 | PMD_FLAGS_SMP | \mm_mmuflags) |
368 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 663 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
369 | PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) | 664 | PMD_FLAGS_UP | \mm_mmuflags) |
370 | .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ | 665 | .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ |
371 | PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags | 666 | PMD_SECT_AP_READ | \io_mmuflags |
372 | W(b) \initfunc | 667 | W(b) \initfunc |
373 | .long cpu_arch_name | 668 | .long cpu_arch_name |
374 | .long cpu_elf_name | 669 | .long cpu_elf_name |
@@ -381,7 +676,6 @@ __v7_setup_stack: | |||
381 | .long v7_cache_fns | 676 | .long v7_cache_fns |
382 | .endm | 677 | .endm |
383 | 678 | ||
384 | #ifndef CONFIG_ARM_LPAE | ||
385 | /* | 679 | /* |
386 | * ARM Ltd. Cortex A5 processor. | 680 | * ARM Ltd. Cortex A5 processor. |
387 | */ | 681 | */ |
@@ -403,27 +697,6 @@ __v7_ca9mp_proc_info: | |||
403 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | 697 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info |
404 | 698 | ||
405 | /* | 699 | /* |
406 | * Marvell PJ4B processor. | ||
407 | */ | ||
408 | .type __v7_pj4b_proc_info, #object | ||
409 | __v7_pj4b_proc_info: | ||
410 | .long 0x562f5840 | ||
411 | .long 0xfffffff0 | ||
412 | __v7_proc __v7_pj4b_setup | ||
413 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info | ||
414 | #endif /* CONFIG_ARM_LPAE */ | ||
415 | |||
416 | /* | ||
417 | * ARM Ltd. Cortex A7 processor. | ||
418 | */ | ||
419 | .type __v7_ca7mp_proc_info, #object | ||
420 | __v7_ca7mp_proc_info: | ||
421 | .long 0x410fc070 | ||
422 | .long 0xff0ffff0 | ||
423 | __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV | ||
424 | .size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info | ||
425 | |||
426 | /* | ||
427 | * ARM Ltd. Cortex A15 processor. | 700 | * ARM Ltd. Cortex A15 processor. |
428 | */ | 701 | */ |
429 | .type __v7_ca15mp_proc_info, #object | 702 | .type __v7_ca15mp_proc_info, #object |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index eb93d6487f3..1a2021cedc7 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -105,7 +105,6 @@ ENTRY(cpu_xsc3_proc_fin) | |||
105 | * loc: location to jump to for soft reset | 105 | * loc: location to jump to for soft reset |
106 | */ | 106 | */ |
107 | .align 5 | 107 | .align 5 |
108 | .pushsection .idmap.text, "ax" | ||
109 | ENTRY(cpu_xsc3_reset) | 108 | ENTRY(cpu_xsc3_reset) |
110 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | 109 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE |
111 | msr cpsr_c, r1 @ reset CPSR | 110 | msr cpsr_c, r1 @ reset CPSR |
@@ -120,8 +119,6 @@ ENTRY(cpu_xsc3_reset) | |||
120 | @ already containing those two last instructions to survive. | 119 | @ already containing those two last instructions to survive. |
121 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs | 120 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs |
122 | mov pc, r0 | 121 | mov pc, r0 |
123 | ENDPROC(cpu_xsc3_reset) | ||
124 | .popsection | ||
125 | 122 | ||
126 | /* | 123 | /* |
127 | * cpu_xsc3_do_idle() | 124 | * cpu_xsc3_do_idle() |
@@ -337,9 +334,6 @@ ENTRY(xsc3_dma_unmap_area) | |||
337 | mov pc, lr | 334 | mov pc, lr |
338 | ENDPROC(xsc3_dma_unmap_area) | 335 | ENDPROC(xsc3_dma_unmap_area) |
339 | 336 | ||
340 | .globl xsc3_flush_kern_cache_louis | ||
341 | .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all | ||
342 | |||
343 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 337 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
344 | define_cache_functions xsc3 | 338 | define_cache_functions xsc3 |
345 | 339 | ||
@@ -381,7 +375,7 @@ cpu_xsc3_mt_table: | |||
381 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 375 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
382 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 376 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
383 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 377 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
384 | .long 0x00 @ unused | 378 | .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB (not present?) |
385 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) | 379 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) |
386 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) | 380 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) |
387 | .long 0x00 @ unused | 381 | .long 0x00 @ unused |
@@ -412,23 +406,24 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
412 | .align | 406 | .align |
413 | 407 | ||
414 | .globl cpu_xsc3_suspend_size | 408 | .globl cpu_xsc3_suspend_size |
415 | .equ cpu_xsc3_suspend_size, 4 * 6 | 409 | .equ cpu_xsc3_suspend_size, 4 * 7 |
416 | #ifdef CONFIG_PM_SLEEP | 410 | #ifdef CONFIG_PM_SLEEP |
417 | ENTRY(cpu_xsc3_do_suspend) | 411 | ENTRY(cpu_xsc3_do_suspend) |
418 | stmfd sp!, {r4 - r9, lr} | 412 | stmfd sp!, {r4 - r10, lr} |
419 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 413 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
420 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg | 414 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg |
421 | mrc p15, 0, r6, c13, c0, 0 @ PID | 415 | mrc p15, 0, r6, c13, c0, 0 @ PID |
422 | mrc p15, 0, r7, c3, c0, 0 @ domain ID | 416 | mrc p15, 0, r7, c3, c0, 0 @ domain ID |
423 | mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg | 417 | mrc p15, 0, r8, c2, c0, 0 @ translation table base addr |
424 | mrc p15, 0, r9, c1, c0, 0 @ control reg | 418 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg |
419 | mrc p15, 0, r10, c1, c0, 0 @ control reg | ||
425 | bic r4, r4, #2 @ clear frequency change bit | 420 | bic r4, r4, #2 @ clear frequency change bit |
426 | stmia r0, {r4 - r9} @ store cp regs | 421 | stmia r0, {r4 - r10} @ store cp regs |
427 | ldmia sp!, {r4 - r9, pc} | 422 | ldmia sp!, {r4 - r10, pc} |
428 | ENDPROC(cpu_xsc3_do_suspend) | 423 | ENDPROC(cpu_xsc3_do_suspend) |
429 | 424 | ||
430 | ENTRY(cpu_xsc3_do_resume) | 425 | ENTRY(cpu_xsc3_do_resume) |
431 | ldmia r0, {r4 - r9} @ load cp regs | 426 | ldmia r0, {r4 - r10} @ load cp regs |
432 | mov ip, #0 | 427 | mov ip, #0 |
433 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | 428 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB |
434 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer | 429 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer |
@@ -438,10 +433,15 @@ ENTRY(cpu_xsc3_do_resume) | |||
438 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg | 433 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg |
439 | mcr p15, 0, r6, c13, c0, 0 @ PID | 434 | mcr p15, 0, r6, c13, c0, 0 @ PID |
440 | mcr p15, 0, r7, c3, c0, 0 @ domain ID | 435 | mcr p15, 0, r7, c3, c0, 0 @ domain ID |
441 | orr r1, r1, #0x18 @ cache the page table in L2 | 436 | mcr p15, 0, r8, c2, c0, 0 @ translation table base addr |
442 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr | 437 | mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg |
443 | mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg | 438 | |
444 | mov r0, r9 @ control register | 439 | @ temporarily map resume_turn_on_mmu into the page table, |
440 | @ otherwise prefetch abort occurs after MMU is turned on | ||
441 | mov r0, r10 @ control register | ||
442 | mov r2, r8, lsr #14 @ get TTB0 base | ||
443 | mov r2, r2, lsl #14 | ||
444 | ldr r3, =0x542e @ section flags | ||
445 | b cpu_resume_mmu | 445 | b cpu_resume_mmu |
446 | ENDPROC(cpu_xsc3_do_resume) | 446 | ENDPROC(cpu_xsc3_do_resume) |
447 | #endif | 447 | #endif |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 25510361aa1..b0fe4b1e233 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -142,7 +142,6 @@ ENTRY(cpu_xscale_proc_fin) | |||
142 | * Beware PXA270 erratum E7. | 142 | * Beware PXA270 erratum E7. |
143 | */ | 143 | */ |
144 | .align 5 | 144 | .align 5 |
145 | .pushsection .idmap.text, "ax" | ||
146 | ENTRY(cpu_xscale_reset) | 145 | ENTRY(cpu_xscale_reset) |
147 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | 146 | mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE |
148 | msr cpsr_c, r1 @ reset CPSR | 147 | msr cpsr_c, r1 @ reset CPSR |
@@ -161,8 +160,6 @@ ENTRY(cpu_xscale_reset) | |||
161 | @ already containing those two last instructions to survive. | 160 | @ already containing those two last instructions to survive. |
162 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | 161 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
163 | mov pc, r0 | 162 | mov pc, r0 |
164 | ENDPROC(cpu_xscale_reset) | ||
165 | .popsection | ||
166 | 163 | ||
167 | /* | 164 | /* |
168 | * cpu_xscale_do_idle() | 165 | * cpu_xscale_do_idle() |
@@ -410,9 +407,6 @@ ENTRY(xscale_dma_unmap_area) | |||
410 | mov pc, lr | 407 | mov pc, lr |
411 | ENDPROC(xscale_dma_unmap_area) | 408 | ENDPROC(xscale_dma_unmap_area) |
412 | 409 | ||
413 | .globl xscale_flush_kern_cache_louis | ||
414 | .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all | ||
415 | |||
416 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 410 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
417 | define_cache_functions xscale | 411 | define_cache_functions xscale |
418 | 412 | ||
@@ -442,7 +436,6 @@ ENDPROC(xscale_dma_unmap_area) | |||
442 | a0_alias flush_icache_all | 436 | a0_alias flush_icache_all |
443 | a0_alias flush_user_cache_all | 437 | a0_alias flush_user_cache_all |
444 | a0_alias flush_kern_cache_all | 438 | a0_alias flush_kern_cache_all |
445 | a0_alias flush_kern_cache_louis | ||
446 | a0_alias flush_user_cache_range | 439 | a0_alias flush_user_cache_range |
447 | a0_alias coherent_kern_range | 440 | a0_alias coherent_kern_range |
448 | a0_alias coherent_user_range | 441 | a0_alias coherent_user_range |
@@ -491,7 +484,7 @@ cpu_xscale_mt_table: | |||
491 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 484 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
492 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 485 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
493 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 486 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
494 | .long 0x00 @ unused | 487 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB |
495 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE | 488 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE |
496 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC | 489 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC |
497 | .long 0x00 @ unused | 490 | .long 0x00 @ unused |
@@ -527,23 +520,24 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
527 | .align | 520 | .align |
528 | 521 | ||
529 | .globl cpu_xscale_suspend_size | 522 | .globl cpu_xscale_suspend_size |
530 | .equ cpu_xscale_suspend_size, 4 * 6 | 523 | .equ cpu_xscale_suspend_size, 4 * 7 |
531 | #ifdef CONFIG_PM_SLEEP | 524 | #ifdef CONFIG_PM_SLEEP |
532 | ENTRY(cpu_xscale_do_suspend) | 525 | ENTRY(cpu_xscale_do_suspend) |
533 | stmfd sp!, {r4 - r9, lr} | 526 | stmfd sp!, {r4 - r10, lr} |
534 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | 527 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |
535 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg | 528 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg |
536 | mrc p15, 0, r6, c13, c0, 0 @ PID | 529 | mrc p15, 0, r6, c13, c0, 0 @ PID |
537 | mrc p15, 0, r7, c3, c0, 0 @ domain ID | 530 | mrc p15, 0, r7, c3, c0, 0 @ domain ID |
538 | mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg | 531 | mrc p15, 0, r8, c2, c0, 0 @ translation table base addr |
539 | mrc p15, 0, r9, c1, c0, 0 @ control reg | 532 | mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg |
533 | mrc p15, 0, r10, c1, c0, 0 @ control reg | ||
540 | bic r4, r4, #2 @ clear frequency change bit | 534 | bic r4, r4, #2 @ clear frequency change bit |
541 | stmia r0, {r4 - r9} @ store cp regs | 535 | stmia r0, {r4 - r10} @ store cp regs |
542 | ldmfd sp!, {r4 - r9, pc} | 536 | ldmfd sp!, {r4 - r10, pc} |
543 | ENDPROC(cpu_xscale_do_suspend) | 537 | ENDPROC(cpu_xscale_do_suspend) |
544 | 538 | ||
545 | ENTRY(cpu_xscale_do_resume) | 539 | ENTRY(cpu_xscale_do_resume) |
546 | ldmia r0, {r4 - r9} @ load cp regs | 540 | ldmia r0, {r4 - r10} @ load cp regs |
547 | mov ip, #0 | 541 | mov ip, #0 |
548 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | 542 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs |
549 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | 543 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB |
@@ -551,9 +545,13 @@ ENTRY(cpu_xscale_do_resume) | |||
551 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg | 545 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg |
552 | mcr p15, 0, r6, c13, c0, 0 @ PID | 546 | mcr p15, 0, r6, c13, c0, 0 @ PID |
553 | mcr p15, 0, r7, c3, c0, 0 @ domain ID | 547 | mcr p15, 0, r7, c3, c0, 0 @ domain ID |
554 | mcr p15, 0, r1, c2, c0, 0 @ translation table base addr | 548 | mcr p15, 0, r8, c2, c0, 0 @ translation table base addr |
555 | mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg | 549 | mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg |
556 | mov r0, r9 @ control register | 550 | mov r0, r10 @ control register |
551 | mov r2, r8, lsr #14 @ get TTB0 base | ||
552 | mov r2, r2, lsl #14 | ||
553 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
554 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | ||
557 | b cpu_resume_mmu | 555 | b cpu_resume_mmu |
558 | ENDPROC(cpu_xscale_do_resume) | 556 | ENDPROC(cpu_xscale_do_resume) |
559 | #endif | 557 | #endif |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index ea94765acf9..845f461f8ec 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -39,18 +39,10 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
39 | mov r0, r0, lsr #PAGE_SHIFT @ align address | 39 | mov r0, r0, lsr #PAGE_SHIFT @ align address |
40 | mov r1, r1, lsr #PAGE_SHIFT | 40 | mov r1, r1, lsr #PAGE_SHIFT |
41 | asid r3, r3 @ mask ASID | 41 | asid r3, r3 @ mask ASID |
42 | #ifdef CONFIG_ARM_ERRATA_720789 | ||
43 | ALT_SMP(W(mov) r3, #0 ) | ||
44 | ALT_UP(W(nop) ) | ||
45 | #endif | ||
46 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | 42 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA |
47 | mov r1, r1, lsl #PAGE_SHIFT | 43 | mov r1, r1, lsl #PAGE_SHIFT |
48 | 1: | 44 | 1: |
49 | #ifdef CONFIG_ARM_ERRATA_720789 | ||
50 | ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) | ||
51 | #else | ||
52 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) | 45 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
53 | #endif | ||
54 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA | 46 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
55 | 47 | ||
56 | add r0, r0, #PAGE_SZ | 48 | add r0, r0, #PAGE_SZ |
@@ -75,11 +67,7 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
75 | mov r0, r0, lsl #PAGE_SHIFT | 67 | mov r0, r0, lsl #PAGE_SHIFT |
76 | mov r1, r1, lsl #PAGE_SHIFT | 68 | mov r1, r1, lsl #PAGE_SHIFT |
77 | 1: | 69 | 1: |
78 | #ifdef CONFIG_ARM_ERRATA_720789 | ||
79 | ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable) | ||
80 | #else | ||
81 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) | 70 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
82 | #endif | ||
83 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA | 71 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
84 | add r0, r0, #PAGE_SZ | 72 | add r0, r0, #PAGE_SZ |
85 | cmp r0, r1 | 73 | cmp r0, r1 |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index a631016e1f8..036fdbfdd62 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c | |||
@@ -1,8 +1,5 @@ | |||
1 | #include <linux/fs.h> | ||
2 | #include <linux/spinlock.h> | 1 | #include <linux/spinlock.h> |
3 | #include <linux/list.h> | 2 | #include <linux/list.h> |
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/seq_file.h> | ||
6 | #include <linux/slab.h> | 3 | #include <linux/slab.h> |
7 | 4 | ||
8 | #include "vmregion.h" | 5 | #include "vmregion.h" |
@@ -39,7 +36,7 @@ | |||
39 | 36 | ||
40 | struct arm_vmregion * | 37 | struct arm_vmregion * |
41 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | 38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
42 | size_t size, gfp_t gfp, const void *caller) | 39 | size_t size, gfp_t gfp) |
43 | { | 40 | { |
44 | unsigned long start = head->vm_start, addr = head->vm_end; | 41 | unsigned long start = head->vm_start, addr = head->vm_end; |
45 | unsigned long flags; | 42 | unsigned long flags; |
@@ -55,8 +52,6 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | |||
55 | if (!new) | 52 | if (!new) |
56 | goto out; | 53 | goto out; |
57 | 54 | ||
58 | new->caller = caller; | ||
59 | |||
60 | spin_lock_irqsave(&head->vm_lock, flags); | 55 | spin_lock_irqsave(&head->vm_lock, flags); |
61 | 56 | ||
62 | addr = rounddown(addr - size, align); | 57 | addr = rounddown(addr - size, align); |
@@ -134,72 +129,3 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | |||
134 | 129 | ||
135 | kfree(c); | 130 | kfree(c); |
136 | } | 131 | } |
137 | |||
138 | #ifdef CONFIG_PROC_FS | ||
139 | static int arm_vmregion_show(struct seq_file *m, void *p) | ||
140 | { | ||
141 | struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); | ||
142 | |||
143 | seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, | ||
144 | c->vm_end - c->vm_start); | ||
145 | if (c->caller) | ||
146 | seq_printf(m, " %pS", (void *)c->caller); | ||
147 | seq_putc(m, '\n'); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) | ||
152 | { | ||
153 | struct arm_vmregion_head *h = m->private; | ||
154 | spin_lock_irq(&h->vm_lock); | ||
155 | return seq_list_start(&h->vm_list, *pos); | ||
156 | } | ||
157 | |||
158 | static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) | ||
159 | { | ||
160 | struct arm_vmregion_head *h = m->private; | ||
161 | return seq_list_next(p, &h->vm_list, pos); | ||
162 | } | ||
163 | |||
164 | static void arm_vmregion_stop(struct seq_file *m, void *p) | ||
165 | { | ||
166 | struct arm_vmregion_head *h = m->private; | ||
167 | spin_unlock_irq(&h->vm_lock); | ||
168 | } | ||
169 | |||
170 | static const struct seq_operations arm_vmregion_ops = { | ||
171 | .start = arm_vmregion_start, | ||
172 | .stop = arm_vmregion_stop, | ||
173 | .next = arm_vmregion_next, | ||
174 | .show = arm_vmregion_show, | ||
175 | }; | ||
176 | |||
177 | static int arm_vmregion_open(struct inode *inode, struct file *file) | ||
178 | { | ||
179 | struct arm_vmregion_head *h = PDE(inode)->data; | ||
180 | int ret = seq_open(file, &arm_vmregion_ops); | ||
181 | if (!ret) { | ||
182 | struct seq_file *m = file->private_data; | ||
183 | m->private = h; | ||
184 | } | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static const struct file_operations arm_vmregion_fops = { | ||
189 | .open = arm_vmregion_open, | ||
190 | .read = seq_read, | ||
191 | .llseek = seq_lseek, | ||
192 | .release = seq_release, | ||
193 | }; | ||
194 | |||
195 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
196 | { | ||
197 | proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); | ||
198 | return 0; | ||
199 | } | ||
200 | #else | ||
201 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | #endif | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 0f5a5f2a2c7..15e9f044db9 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h | |||
@@ -17,15 +17,13 @@ struct arm_vmregion { | |||
17 | struct list_head vm_list; | 17 | struct list_head vm_list; |
18 | unsigned long vm_start; | 18 | unsigned long vm_start; |
19 | unsigned long vm_end; | 19 | unsigned long vm_end; |
20 | struct page *vm_pages; | ||
20 | int vm_active; | 21 | int vm_active; |
21 | const void *caller; | ||
22 | }; | 22 | }; |
23 | 23 | ||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); | 24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); |
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | 25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); |
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | 26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); |
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | 27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); |
28 | 28 | ||
29 | int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); | ||
30 | |||
31 | #endif | 29 | #endif |