diff options
Diffstat (limited to 'arch/arm/mm')
29 files changed, 764 insertions, 664 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index ab5f7a21350b..d490f3773c01 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -10,8 +10,7 @@ config CPU_32 | |||
10 | 10 | ||
11 | # ARM610 | 11 | # ARM610 |
12 | config CPU_ARM610 | 12 | config CPU_ARM610 |
13 | bool "Support ARM610 processor" | 13 | bool "Support ARM610 processor" if ARCH_RPC |
14 | depends on ARCH_RPC | ||
15 | select CPU_32v3 | 14 | select CPU_32v3 |
16 | select CPU_CACHE_V3 | 15 | select CPU_CACHE_V3 |
17 | select CPU_CACHE_VIVT | 16 | select CPU_CACHE_VIVT |
@@ -43,8 +42,7 @@ config CPU_ARM7TDMI | |||
43 | 42 | ||
44 | # ARM710 | 43 | # ARM710 |
45 | config CPU_ARM710 | 44 | config CPU_ARM710 |
46 | bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC | 45 | bool "Support ARM710 processor" if ARCH_RPC |
47 | default y if ARCH_CLPS7500 | ||
48 | select CPU_32v3 | 46 | select CPU_32v3 |
49 | select CPU_CACHE_V3 | 47 | select CPU_CACHE_V3 |
50 | select CPU_CACHE_VIVT | 48 | select CPU_CACHE_VIVT |
@@ -63,8 +61,7 @@ config CPU_ARM710 | |||
63 | 61 | ||
64 | # ARM720T | 62 | # ARM720T |
65 | config CPU_ARM720T | 63 | config CPU_ARM720T |
66 | bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR | 64 | bool "Support ARM720T processor" if ARCH_INTEGRATOR |
67 | default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X | ||
68 | select CPU_32v4T | 65 | select CPU_32v4T |
69 | select CPU_ABRT_LV4T | 66 | select CPU_ABRT_LV4T |
70 | select CPU_PABRT_NOIFAR | 67 | select CPU_PABRT_NOIFAR |
@@ -114,9 +111,7 @@ config CPU_ARM9TDMI | |||
114 | 111 | ||
115 | # ARM920T | 112 | # ARM920T |
116 | config CPU_ARM920T | 113 | config CPU_ARM920T |
117 | bool "Support ARM920T processor" | 114 | bool "Support ARM920T processor" if ARCH_INTEGRATOR |
118 | depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200 | ||
119 | default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200 | ||
120 | select CPU_32v4T | 115 | select CPU_32v4T |
121 | select CPU_ABRT_EV4T | 116 | select CPU_ABRT_EV4T |
122 | select CPU_PABRT_NOIFAR | 117 | select CPU_PABRT_NOIFAR |
@@ -138,8 +133,6 @@ config CPU_ARM920T | |||
138 | # ARM922T | 133 | # ARM922T |
139 | config CPU_ARM922T | 134 | config CPU_ARM922T |
140 | bool "Support ARM922T processor" if ARCH_INTEGRATOR | 135 | bool "Support ARM922T processor" if ARCH_INTEGRATOR |
141 | depends on ARCH_LH7A40X || ARCH_INTEGRATOR || ARCH_KS8695 | ||
142 | default y if ARCH_LH7A40X || ARCH_KS8695 | ||
143 | select CPU_32v4T | 136 | select CPU_32v4T |
144 | select CPU_ABRT_EV4T | 137 | select CPU_ABRT_EV4T |
145 | select CPU_PABRT_NOIFAR | 138 | select CPU_PABRT_NOIFAR |
@@ -159,8 +152,6 @@ config CPU_ARM922T | |||
159 | # ARM925T | 152 | # ARM925T |
160 | config CPU_ARM925T | 153 | config CPU_ARM925T |
161 | bool "Support ARM925T processor" if ARCH_OMAP1 | 154 | bool "Support ARM925T processor" if ARCH_OMAP1 |
162 | depends on ARCH_OMAP15XX | ||
163 | default y if ARCH_OMAP15XX | ||
164 | select CPU_32v4T | 155 | select CPU_32v4T |
165 | select CPU_ABRT_EV4T | 156 | select CPU_ABRT_EV4T |
166 | select CPU_PABRT_NOIFAR | 157 | select CPU_PABRT_NOIFAR |
@@ -179,22 +170,7 @@ config CPU_ARM925T | |||
179 | 170 | ||
180 | # ARM926T | 171 | # ARM926T |
181 | config CPU_ARM926T | 172 | config CPU_ARM926T |
182 | bool "Support ARM926T processor" | 173 | bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB |
183 | depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || \ | ||
184 | MACH_VERSATILE_AB || ARCH_OMAP730 || \ | ||
185 | ARCH_OMAP16XX || MACH_REALVIEW_EB || \ | ||
186 | ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \ | ||
187 | ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \ | ||
188 | ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \ | ||
189 | ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \ | ||
190 | ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2 | ||
191 | default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || \ | ||
192 | ARCH_OMAP730 || ARCH_OMAP16XX || \ | ||
193 | ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \ | ||
194 | ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \ | ||
195 | ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \ | ||
196 | ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \ | ||
197 | ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2 | ||
198 | select CPU_32v5 | 174 | select CPU_32v5 |
199 | select CPU_ABRT_EV5TJ | 175 | select CPU_ABRT_EV5TJ |
200 | select CPU_PABRT_NOIFAR | 176 | select CPU_PABRT_NOIFAR |
@@ -247,8 +223,7 @@ config CPU_ARM946E | |||
247 | 223 | ||
248 | # ARM1020 - needs validating | 224 | # ARM1020 - needs validating |
249 | config CPU_ARM1020 | 225 | config CPU_ARM1020 |
250 | bool "Support ARM1020T (rev 0) processor" | 226 | bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR |
251 | depends on ARCH_INTEGRATOR | ||
252 | select CPU_32v5 | 227 | select CPU_32v5 |
253 | select CPU_ABRT_EV4T | 228 | select CPU_ABRT_EV4T |
254 | select CPU_PABRT_NOIFAR | 229 | select CPU_PABRT_NOIFAR |
@@ -266,8 +241,7 @@ config CPU_ARM1020 | |||
266 | 241 | ||
267 | # ARM1020E - needs validating | 242 | # ARM1020E - needs validating |
268 | config CPU_ARM1020E | 243 | config CPU_ARM1020E |
269 | bool "Support ARM1020E processor" | 244 | bool "Support ARM1020E processor" if ARCH_INTEGRATOR |
270 | depends on ARCH_INTEGRATOR | ||
271 | select CPU_32v5 | 245 | select CPU_32v5 |
272 | select CPU_ABRT_EV4T | 246 | select CPU_ABRT_EV4T |
273 | select CPU_PABRT_NOIFAR | 247 | select CPU_PABRT_NOIFAR |
@@ -280,8 +254,7 @@ config CPU_ARM1020E | |||
280 | 254 | ||
281 | # ARM1022E | 255 | # ARM1022E |
282 | config CPU_ARM1022 | 256 | config CPU_ARM1022 |
283 | bool "Support ARM1022E processor" | 257 | bool "Support ARM1022E processor" if ARCH_INTEGRATOR |
284 | depends on ARCH_INTEGRATOR | ||
285 | select CPU_32v5 | 258 | select CPU_32v5 |
286 | select CPU_ABRT_EV4T | 259 | select CPU_ABRT_EV4T |
287 | select CPU_PABRT_NOIFAR | 260 | select CPU_PABRT_NOIFAR |
@@ -299,8 +272,7 @@ config CPU_ARM1022 | |||
299 | 272 | ||
300 | # ARM1026EJ-S | 273 | # ARM1026EJ-S |
301 | config CPU_ARM1026 | 274 | config CPU_ARM1026 |
302 | bool "Support ARM1026EJ-S processor" | 275 | bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR |
303 | depends on ARCH_INTEGRATOR | ||
304 | select CPU_32v5 | 276 | select CPU_32v5 |
305 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 | 277 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 |
306 | select CPU_PABRT_NOIFAR | 278 | select CPU_PABRT_NOIFAR |
@@ -317,8 +289,7 @@ config CPU_ARM1026 | |||
317 | 289 | ||
318 | # SA110 | 290 | # SA110 |
319 | config CPU_SA110 | 291 | config CPU_SA110 |
320 | bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && ARCH_RPC | 292 | bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC |
321 | default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI | ||
322 | select CPU_32v3 if ARCH_RPC | 293 | select CPU_32v3 if ARCH_RPC |
323 | select CPU_32v4 if !ARCH_RPC | 294 | select CPU_32v4 if !ARCH_RPC |
324 | select CPU_ABRT_EV4 | 295 | select CPU_ABRT_EV4 |
@@ -340,8 +311,6 @@ config CPU_SA110 | |||
340 | # SA1100 | 311 | # SA1100 |
341 | config CPU_SA1100 | 312 | config CPU_SA1100 |
342 | bool | 313 | bool |
343 | depends on ARCH_SA1100 | ||
344 | default y | ||
345 | select CPU_32v4 | 314 | select CPU_32v4 |
346 | select CPU_ABRT_EV4 | 315 | select CPU_ABRT_EV4 |
347 | select CPU_PABRT_NOIFAR | 316 | select CPU_PABRT_NOIFAR |
@@ -353,8 +322,6 @@ config CPU_SA1100 | |||
353 | # XScale | 322 | # XScale |
354 | config CPU_XSCALE | 323 | config CPU_XSCALE |
355 | bool | 324 | bool |
356 | depends on ARCH_IOP32X || ARCH_IOP33X || PXA25x || PXA27x || ARCH_IXP4XX || ARCH_IXP2000 | ||
357 | default y | ||
358 | select CPU_32v5 | 325 | select CPU_32v5 |
359 | select CPU_ABRT_EV5T | 326 | select CPU_ABRT_EV5T |
360 | select CPU_PABRT_NOIFAR | 327 | select CPU_PABRT_NOIFAR |
@@ -365,8 +332,6 @@ config CPU_XSCALE | |||
365 | # XScale Core Version 3 | 332 | # XScale Core Version 3 |
366 | config CPU_XSC3 | 333 | config CPU_XSC3 |
367 | bool | 334 | bool |
368 | depends on ARCH_IXP23XX || ARCH_IOP13XX || PXA3xx | ||
369 | default y | ||
370 | select CPU_32v5 | 335 | select CPU_32v5 |
371 | select CPU_ABRT_EV5T | 336 | select CPU_ABRT_EV5T |
372 | select CPU_PABRT_NOIFAR | 337 | select CPU_PABRT_NOIFAR |
@@ -378,8 +343,6 @@ config CPU_XSC3 | |||
378 | # Feroceon | 343 | # Feroceon |
379 | config CPU_FEROCEON | 344 | config CPU_FEROCEON |
380 | bool | 345 | bool |
381 | depends on ARCH_ORION5X || ARCH_LOKI || ARCH_KIRKWOOD || ARCH_MV78XX0 | ||
382 | default y | ||
383 | select CPU_32v5 | 346 | select CPU_32v5 |
384 | select CPU_ABRT_EV5T | 347 | select CPU_ABRT_EV5T |
385 | select CPU_PABRT_NOIFAR | 348 | select CPU_PABRT_NOIFAR |
@@ -399,10 +362,7 @@ config CPU_FEROCEON_OLD_ID | |||
399 | 362 | ||
400 | # ARMv6 | 363 | # ARMv6 |
401 | config CPU_V6 | 364 | config CPU_V6 |
402 | bool "Support ARM V6 processor" | 365 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB |
403 | depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 | ||
404 | default y if ARCH_MX3 | ||
405 | default y if ARCH_MSM | ||
406 | select CPU_32v6 | 366 | select CPU_32v6 |
407 | select CPU_ABRT_EV6 | 367 | select CPU_ABRT_EV6 |
408 | select CPU_PABRT_NOIFAR | 368 | select CPU_PABRT_NOIFAR |
@@ -427,8 +387,7 @@ config CPU_32v6K | |||
427 | 387 | ||
428 | # ARMv7 | 388 | # ARMv7 |
429 | config CPU_V7 | 389 | config CPU_V7 |
430 | bool "Support ARM V7 processor" | 390 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB |
431 | depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP3 | ||
432 | select CPU_32v6K | 391 | select CPU_32v6K |
433 | select CPU_32v7 | 392 | select CPU_32v7 |
434 | select CPU_ABRT_EV7 | 393 | select CPU_ABRT_EV7 |
@@ -745,7 +704,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
745 | 704 | ||
746 | config CACHE_L2X0 | 705 | config CACHE_L2X0 |
747 | bool "Enable the L2x0 outer cache controller" | 706 | bool "Enable the L2x0 outer cache controller" |
748 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 | 707 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || REALVIEW_EB_A9MP |
749 | default y | 708 | default y |
750 | select OUTER_CACHE | 709 | select OUTER_CACHE |
751 | help | 710 | help |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 2d5884ce0435..3a398befed41 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/proc_fs.h> | 18 | #include <linux/proc_fs.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/sched.h> | ||
20 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
21 | 22 | ||
22 | #include <asm/unaligned.h> | 23 | #include <asm/unaligned.h> |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 3b3639eb7ca5..8a4abebc478a 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <mach/hardware.h> | ||
13 | #include <asm/page.h> | 12 | #include <asm/page.h> |
14 | #include "proc-macros.S" | 13 | #include "proc-macros.S" |
15 | 14 | ||
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 5786adf10040..3668611cb400 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <mach/hardware.h> | ||
13 | #include <asm/page.h> | 12 | #include <asm/page.h> |
14 | #include "proc-macros.S" | 13 | #include "proc-macros.S" |
15 | 14 | ||
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 51a9b0b273b6..c54fa2cc40e6 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -13,7 +13,6 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/linkage.h> | 14 | #include <linux/linkage.h> |
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <mach/hardware.h> | ||
17 | #include <asm/page.h> | 16 | #include <asm/page.h> |
18 | #include "proc-macros.S" | 17 | #include "proc-macros.S" |
19 | 18 | ||
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index d19c2bec2b1f..be93ff02a98d 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -26,6 +26,7 @@ | |||
26 | * - mm - mm_struct describing address space | 26 | * - mm - mm_struct describing address space |
27 | */ | 27 | */ |
28 | ENTRY(v7_flush_dcache_all) | 28 | ENTRY(v7_flush_dcache_all) |
29 | dmb @ ensure ordering with previous memory accesses | ||
29 | mrc p15, 1, r0, c0, c0, 1 @ read clidr | 30 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
30 | ands r3, r0, #0x7000000 @ extract loc from clidr | 31 | ands r3, r0, #0x7000000 @ extract loc from clidr |
31 | mov r3, r3, lsr #23 @ left align loc bit field | 32 | mov r3, r3, lsr #23 @ left align loc bit field |
@@ -64,6 +65,7 @@ skip: | |||
64 | finished: | 65 | finished: |
65 | mov r10, #0 @ swith back to cache level 0 | 66 | mov r10, #0 @ swith back to cache level 0 |
66 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | 67 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
68 | dsb | ||
67 | isb | 69 | isb |
68 | mov pc, lr | 70 | mov pc, lr |
69 | ENDPROC(v7_flush_dcache_all) | 71 | ENDPROC(v7_flush_dcache_all) |
diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S deleted file mode 100644 index 7eb0d320d240..000000000000 --- a/arch/arm/mm/copypage-feroceon.S +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-feroceon.S | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductors | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This handles copy_user_page and clear_user_page on Feroceon | ||
11 | * more optimally than the generic implementations. | ||
12 | */ | ||
13 | #include <linux/linkage.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | |||
17 | .text | ||
18 | .align 5 | ||
19 | |||
20 | ENTRY(feroceon_copy_user_page) | ||
21 | stmfd sp!, {r4-r9, lr} | ||
22 | mov ip, #PAGE_SZ | ||
23 | 1: mov lr, r1 | ||
24 | ldmia r1!, {r2 - r9} | ||
25 | pld [lr, #32] | ||
26 | pld [lr, #64] | ||
27 | pld [lr, #96] | ||
28 | pld [lr, #128] | ||
29 | pld [lr, #160] | ||
30 | pld [lr, #192] | ||
31 | pld [lr, #224] | ||
32 | stmia r0, {r2 - r9} | ||
33 | ldmia r1!, {r2 - r9} | ||
34 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
35 | add r0, r0, #32 | ||
36 | stmia r0, {r2 - r9} | ||
37 | ldmia r1!, {r2 - r9} | ||
38 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
39 | add r0, r0, #32 | ||
40 | stmia r0, {r2 - r9} | ||
41 | ldmia r1!, {r2 - r9} | ||
42 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
43 | add r0, r0, #32 | ||
44 | stmia r0, {r2 - r9} | ||
45 | ldmia r1!, {r2 - r9} | ||
46 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
47 | add r0, r0, #32 | ||
48 | stmia r0, {r2 - r9} | ||
49 | ldmia r1!, {r2 - r9} | ||
50 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
51 | add r0, r0, #32 | ||
52 | stmia r0, {r2 - r9} | ||
53 | ldmia r1!, {r2 - r9} | ||
54 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
55 | add r0, r0, #32 | ||
56 | stmia r0, {r2 - r9} | ||
57 | ldmia r1!, {r2 - r9} | ||
58 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
59 | add r0, r0, #32 | ||
60 | stmia r0, {r2 - r9} | ||
61 | subs ip, ip, #(32 * 8) | ||
62 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
63 | add r0, r0, #32 | ||
64 | bne 1b | ||
65 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | ||
66 | ldmfd sp!, {r4-r9, pc} | ||
67 | |||
68 | .align 5 | ||
69 | |||
70 | ENTRY(feroceon_clear_user_page) | ||
71 | stmfd sp!, {r4-r7, lr} | ||
72 | mov r1, #PAGE_SZ/32 | ||
73 | mov r2, #0 | ||
74 | mov r3, #0 | ||
75 | mov r4, #0 | ||
76 | mov r5, #0 | ||
77 | mov r6, #0 | ||
78 | mov r7, #0 | ||
79 | mov ip, #0 | ||
80 | mov lr, #0 | ||
81 | 1: stmia r0, {r2-r7, ip, lr} | ||
82 | subs r1, r1, #1 | ||
83 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line | ||
84 | add r0, r0, #32 | ||
85 | bne 1b | ||
86 | mcr p15, 0, r1, c7, c10, 4 @ drain WB | ||
87 | ldmfd sp!, {r4-r7, pc} | ||
88 | |||
89 | __INITDATA | ||
90 | |||
91 | .type feroceon_user_fns, #object | ||
92 | ENTRY(feroceon_user_fns) | ||
93 | .long feroceon_clear_user_page | ||
94 | .long feroceon_copy_user_page | ||
95 | .size feroceon_user_fns, . - feroceon_user_fns | ||
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c new file mode 100644 index 000000000000..c3ba6a94da0c --- /dev/null +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -0,0 +1,111 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-feroceon.S | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductors | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This handles copy_user_highpage and clear_user_page on Feroceon | ||
11 | * more optimally than the generic implementations. | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | static void __attribute__((naked)) | ||
17 | feroceon_copy_user_page(void *kto, const void *kfrom) | ||
18 | { | ||
19 | asm("\ | ||
20 | stmfd sp!, {r4-r9, lr} \n\ | ||
21 | mov ip, %0 \n\ | ||
22 | 1: mov lr, r1 \n\ | ||
23 | ldmia r1!, {r2 - r9} \n\ | ||
24 | pld [lr, #32] \n\ | ||
25 | pld [lr, #64] \n\ | ||
26 | pld [lr, #96] \n\ | ||
27 | pld [lr, #128] \n\ | ||
28 | pld [lr, #160] \n\ | ||
29 | pld [lr, #192] \n\ | ||
30 | pld [lr, #224] \n\ | ||
31 | stmia r0, {r2 - r9} \n\ | ||
32 | ldmia r1!, {r2 - r9} \n\ | ||
33 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
34 | add r0, r0, #32 \n\ | ||
35 | stmia r0, {r2 - r9} \n\ | ||
36 | ldmia r1!, {r2 - r9} \n\ | ||
37 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
38 | add r0, r0, #32 \n\ | ||
39 | stmia r0, {r2 - r9} \n\ | ||
40 | ldmia r1!, {r2 - r9} \n\ | ||
41 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
42 | add r0, r0, #32 \n\ | ||
43 | stmia r0, {r2 - r9} \n\ | ||
44 | ldmia r1!, {r2 - r9} \n\ | ||
45 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
46 | add r0, r0, #32 \n\ | ||
47 | stmia r0, {r2 - r9} \n\ | ||
48 | ldmia r1!, {r2 - r9} \n\ | ||
49 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
50 | add r0, r0, #32 \n\ | ||
51 | stmia r0, {r2 - r9} \n\ | ||
52 | ldmia r1!, {r2 - r9} \n\ | ||
53 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
54 | add r0, r0, #32 \n\ | ||
55 | stmia r0, {r2 - r9} \n\ | ||
56 | ldmia r1!, {r2 - r9} \n\ | ||
57 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
58 | add r0, r0, #32 \n\ | ||
59 | stmia r0, {r2 - r9} \n\ | ||
60 | subs ip, ip, #(32 * 8) \n\ | ||
61 | mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
62 | add r0, r0, #32 \n\ | ||
63 | bne 1b \n\ | ||
64 | mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ | ||
65 | ldmfd sp!, {r4-r9, pc}" | ||
66 | : | ||
67 | : "I" (PAGE_SIZE)); | ||
68 | } | ||
69 | |||
70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, | ||
71 | unsigned long vaddr) | ||
72 | { | ||
73 | void *kto, *kfrom; | ||
74 | |||
75 | kto = kmap_atomic(to, KM_USER0); | ||
76 | kfrom = kmap_atomic(from, KM_USER1); | ||
77 | feroceon_copy_user_page(kto, kfrom); | ||
78 | kunmap_atomic(kfrom, KM_USER1); | ||
79 | kunmap_atomic(kto, KM_USER0); | ||
80 | } | ||
81 | |||
82 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
83 | { | ||
84 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
85 | asm volatile ("\ | ||
86 | mov r1, %2 \n\ | ||
87 | mov r2, #0 \n\ | ||
88 | mov r3, #0 \n\ | ||
89 | mov r4, #0 \n\ | ||
90 | mov r5, #0 \n\ | ||
91 | mov r6, #0 \n\ | ||
92 | mov r7, #0 \n\ | ||
93 | mov ip, #0 \n\ | ||
94 | mov lr, #0 \n\ | ||
95 | 1: stmia %0, {r2-r7, ip, lr} \n\ | ||
96 | subs r1, r1, #1 \n\ | ||
97 | mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ | ||
98 | add %0, %0, #32 \n\ | ||
99 | bne 1b \n\ | ||
100 | mcr p15, 0, r1, c7, c10, 4 @ drain WB" | ||
101 | : "=r" (ptr) | ||
102 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | ||
103 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | ||
104 | kunmap_atomic(kaddr, KM_USER0); | ||
105 | } | ||
106 | |||
107 | struct cpu_user_fns feroceon_user_fns __initdata = { | ||
108 | .cpu_clear_user_highpage = feroceon_clear_user_highpage, | ||
109 | .cpu_copy_user_highpage = feroceon_copy_user_highpage, | ||
110 | }; | ||
111 | |||
diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S deleted file mode 100644 index 2ee394b11bcb..000000000000 --- a/arch/arm/mm/copypage-v3.S +++ /dev/null | |||
@@ -1,67 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | */ | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/assembler.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | |||
17 | .text | ||
18 | .align 5 | ||
19 | /* | ||
20 | * ARMv3 optimised copy_user_page | ||
21 | * | ||
22 | * FIXME: do we need to handle cache stuff... | ||
23 | */ | ||
24 | ENTRY(v3_copy_user_page) | ||
25 | stmfd sp!, {r4, lr} @ 2 | ||
26 | mov r2, #PAGE_SZ/64 @ 1 | ||
27 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
28 | 1: stmia r0!, {r3, r4, ip, lr} @ 4 | ||
29 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
30 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
32 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
34 | subs r2, r2, #1 @ 1 | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
36 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
37 | bne 1b @ 1 | ||
38 | ldmfd sp!, {r4, pc} @ 3 | ||
39 | |||
40 | .align 5 | ||
41 | /* | ||
42 | * ARMv3 optimised clear_user_page | ||
43 | * | ||
44 | * FIXME: do we need to handle cache stuff... | ||
45 | */ | ||
46 | ENTRY(v3_clear_user_page) | ||
47 | str lr, [sp, #-4]! | ||
48 | mov r1, #PAGE_SZ/64 @ 1 | ||
49 | mov r2, #0 @ 1 | ||
50 | mov r3, #0 @ 1 | ||
51 | mov ip, #0 @ 1 | ||
52 | mov lr, #0 @ 1 | ||
53 | 1: stmia r0!, {r2, r3, ip, lr} @ 4 | ||
54 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
55 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
56 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
57 | subs r1, r1, #1 @ 1 | ||
58 | bne 1b @ 1 | ||
59 | ldr pc, [sp], #4 | ||
60 | |||
61 | __INITDATA | ||
62 | |||
63 | .type v3_user_fns, #object | ||
64 | ENTRY(v3_user_fns) | ||
65 | .long v3_clear_user_page | ||
66 | .long v3_copy_user_page | ||
67 | .size v3_user_fns, . - v3_user_fns | ||
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 000000000000..70ed96c8af8e --- /dev/null +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v3.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | /* | ||
14 | * ARMv3 optimised copy_user_highpage | ||
15 | * | ||
16 | * FIXME: do we need to handle cache stuff... | ||
17 | */ | ||
18 | static void __attribute__((naked)) | ||
19 | v3_copy_user_page(void *kto, const void *kfrom) | ||
20 | { | ||
21 | asm("\n\ | ||
22 | stmfd sp!, {r4, lr} @ 2\n\ | ||
23 | mov r2, %2 @ 1\n\ | ||
24 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
25 | 1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
26 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
27 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
28 | ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ | ||
29 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
30 | ldmia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
31 | subs r2, r2, #1 @ 1\n\ | ||
32 | stmia %1!, {r3, r4, ip, lr} @ 4\n\ | ||
33 | ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | bne 1b @ 1\n\ | ||
35 | ldmfd sp!, {r4, pc} @ 3" | ||
36 | : | ||
37 | : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); | ||
38 | } | ||
39 | |||
40 | void v3_copy_user_highpage(struct page *to, struct page *from, | ||
41 | unsigned long vaddr) | ||
42 | { | ||
43 | void *kto, *kfrom; | ||
44 | |||
45 | kto = kmap_atomic(to, KM_USER0); | ||
46 | kfrom = kmap_atomic(from, KM_USER1); | ||
47 | v3_copy_user_page(kto, kfrom); | ||
48 | kunmap_atomic(kfrom, KM_USER1); | ||
49 | kunmap_atomic(kto, KM_USER0); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * ARMv3 optimised clear_user_page | ||
54 | * | ||
55 | * FIXME: do we need to handle cache stuff... | ||
56 | */ | ||
57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
58 | { | ||
59 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
60 | asm volatile("\n\ | ||
61 | mov r1, %2 @ 1\n\ | ||
62 | mov r2, #0 @ 1\n\ | ||
63 | mov r3, #0 @ 1\n\ | ||
64 | mov ip, #0 @ 1\n\ | ||
65 | mov lr, #0 @ 1\n\ | ||
66 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
67 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
68 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
69 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
70 | subs r1, r1, #1 @ 1\n\ | ||
71 | bne 1b @ 1" | ||
72 | : "=r" (ptr) | ||
73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
74 | : "r1", "r2", "r3", "ip", "lr"); | ||
75 | kunmap_atomic(kaddr, KM_USER0); | ||
76 | } | ||
77 | |||
78 | struct cpu_user_fns v3_user_fns __initdata = { | ||
79 | .cpu_clear_user_highpage = v3_clear_user_highpage, | ||
80 | .cpu_copy_user_highpage = v3_copy_user_highpage, | ||
81 | }; | ||
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e2549344..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -15,8 +15,8 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> | ||
18 | 19 | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -33,7 +33,7 @@ | |||
33 | static DEFINE_SPINLOCK(minicache_lock); | 33 | static DEFINE_SPINLOCK(minicache_lock); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * ARMv4 mini-dcache optimised copy_user_page | 36 | * ARMv4 mini-dcache optimised copy_user_highpage |
37 | * | 37 | * |
38 | * We flush the destination cache lines just before we write the data into the | 38 | * We flush the destination cache lines just before we write the data into the |
39 | * corresponding address. Since the Dcache is read-allocate, this removes the | 39 | * corresponding address. Since the Dcache is read-allocate, this removes the |
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); | |||
42 | * | 42 | * |
43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | 43 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" |
44 | * instruction. If your processor does not supply this, you have to write your | 44 | * instruction. If your processor does not supply this, you have to write your |
45 | * own copy_user_page that does the right thing. | 45 | * own copy_user_highpage that does the right thing. |
46 | */ | 46 | */ |
47 | static void __attribute__((naked)) | 47 | static void __attribute__((naked)) |
48 | mc_copy_user_page(void *from, void *to) | 48 | mc_copy_user_page(void *from, void *to) |
@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to) | |||
68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); | 68 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); |
69 | } | 69 | } |
70 | 70 | ||
71 | void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 71 | void v4_mc_copy_user_highpage(struct page *from, struct page *to, |
72 | unsigned long vaddr) | ||
72 | { | 73 | { |
73 | struct page *page = virt_to_page(kfrom); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
74 | 75 | ||
75 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 76 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
76 | __flush_dcache_page(page_mapping(page), page); | 77 | __flush_dcache_page(page_mapping(from), from); |
77 | 78 | ||
78 | spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
79 | 80 | ||
80 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
81 | flush_tlb_kernel_page(0xffff8000); | 82 | flush_tlb_kernel_page(0xffff8000); |
82 | 83 | ||
83 | mc_copy_user_page((void *)0xffff8000, kto); | 84 | mc_copy_user_page((void *)0xffff8000, kto); |
84 | 85 | ||
85 | spin_unlock(&minicache_lock); | 86 | spin_unlock(&minicache_lock); |
87 | |||
88 | kunmap_atomic(kto, KM_USER1); | ||
86 | } | 89 | } |
87 | 90 | ||
88 | /* | 91 | /* |
89 | * ARMv4 optimised clear_user_page | 92 | * ARMv4 optimised clear_user_page |
90 | */ | 93 | */ |
91 | void __attribute__((naked)) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
92 | v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) | ||
93 | { | 95 | { |
94 | asm volatile( | 96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); |
95 | "str lr, [sp, #-4]!\n\ | 97 | asm volatile("\ |
96 | mov r1, %0 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
97 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
98 | mov r3, #0 @ 1\n\ | 100 | mov r3, #0 @ 1\n\ |
99 | mov ip, #0 @ 1\n\ | 101 | mov ip, #0 @ 1\n\ |
100 | mov lr, #0 @ 1\n\ | 102 | mov lr, #0 @ 1\n\ |
101 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 103 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
102 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 104 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
103 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 105 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
104 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | 106 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ |
105 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 107 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
106 | stmia r0!, {r2, r3, ip, lr} @ 4\n\ | 108 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ |
107 | subs r1, r1, #1 @ 1\n\ | 109 | subs r1, r1, #1 @ 1\n\ |
108 | bne 1b @ 1\n\ | 110 | bne 1b @ 1" |
109 | ldr pc, [sp], #4" | 111 | : "=r" (ptr) |
110 | : | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
111 | : "I" (PAGE_SIZE / 64)); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
114 | kunmap_atomic(kaddr, KM_USER0); | ||
112 | } | 115 | } |
113 | 116 | ||
114 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
115 | .cpu_clear_user_page = v4_mc_clear_user_page, | 118 | .cpu_clear_user_highpage = v4_mc_clear_user_highpage, |
116 | .cpu_copy_user_page = v4_mc_copy_user_page, | 119 | .cpu_copy_user_highpage = v4_mc_copy_user_highpage, |
117 | }; | 120 | }; |
diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S deleted file mode 100644 index 83117354b1cd..000000000000 --- a/arch/arm/mm/copypage-v4wb.S +++ /dev/null | |||
@@ -1,79 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | */ | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <asm/asm-offsets.h> | ||
15 | |||
16 | .text | ||
17 | .align 5 | ||
18 | /* | ||
19 | * ARMv4 optimised copy_user_page | ||
20 | * | ||
21 | * We flush the destination cache lines just before we write the data into the | ||
22 | * corresponding address. Since the Dcache is read-allocate, this removes the | ||
23 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, | ||
24 | * and merged as appropriate. | ||
25 | * | ||
26 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | ||
27 | * instruction. If your processor does not supply this, you have to write your | ||
28 | * own copy_user_page that does the right thing. | ||
29 | */ | ||
30 | ENTRY(v4wb_copy_user_page) | ||
31 | stmfd sp!, {r4, lr} @ 2 | ||
32 | mov r2, #PAGE_SZ/64 @ 1 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
34 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
36 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
37 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
38 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
39 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
40 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
41 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
42 | subs r2, r2, #1 @ 1 | ||
43 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
44 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
45 | bne 1b @ 1 | ||
46 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB | ||
47 | ldmfd sp!, {r4, pc} @ 3 | ||
48 | |||
49 | .align 5 | ||
50 | /* | ||
51 | * ARMv4 optimised clear_user_page | ||
52 | * | ||
53 | * Same story as above. | ||
54 | */ | ||
55 | ENTRY(v4wb_clear_user_page) | ||
56 | str lr, [sp, #-4]! | ||
57 | mov r1, #PAGE_SZ/64 @ 1 | ||
58 | mov r2, #0 @ 1 | ||
59 | mov r3, #0 @ 1 | ||
60 | mov ip, #0 @ 1 | ||
61 | mov lr, #0 @ 1 | ||
62 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
63 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
64 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
65 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line | ||
66 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
67 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
68 | subs r1, r1, #1 @ 1 | ||
69 | bne 1b @ 1 | ||
70 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB | ||
71 | ldr pc, [sp], #4 | ||
72 | |||
73 | __INITDATA | ||
74 | |||
75 | .type v4wb_user_fns, #object | ||
76 | ENTRY(v4wb_user_fns) | ||
77 | .long v4wb_clear_user_page | ||
78 | .long v4wb_copy_user_page | ||
79 | .size v4wb_user_fns, . - v4wb_user_fns | ||
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c new file mode 100644 index 000000000000..3ec93dab7656 --- /dev/null +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -0,0 +1,94 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v4wb.c | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/highmem.h> | ||
12 | |||
13 | /* | ||
14 | * ARMv4 optimised copy_user_highpage | ||
15 | * | ||
16 | * We flush the destination cache lines just before we write the data into the | ||
17 | * corresponding address. Since the Dcache is read-allocate, this removes the | ||
18 | * Dcache aliasing issue. The writes will be forwarded to the write buffer, | ||
19 | * and merged as appropriate. | ||
20 | * | ||
21 | * Note: We rely on all ARMv4 processors implementing the "invalidate D line" | ||
22 | * instruction. If your processor does not supply this, you have to write your | ||
23 | * own copy_user_highpage that does the right thing. | ||
24 | */ | ||
25 | static void __attribute__((naked)) | ||
26 | v4wb_copy_user_page(void *kto, const void *kfrom) | ||
27 | { | ||
28 | asm("\ | ||
29 | stmfd sp!, {r4, lr} @ 2\n\ | ||
30 | mov r2, %0 @ 1\n\ | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
32 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
33 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ | ||
35 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
36 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
37 | mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
38 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
39 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
40 | subs r2, r2, #1 @ 1\n\ | ||
41 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
42 | ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
43 | bne 1b @ 1\n\ | ||
44 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ | ||
45 | ldmfd sp!, {r4, pc} @ 3" | ||
46 | : | ||
47 | : "I" (PAGE_SIZE / 64)); | ||
48 | } | ||
49 | |||
50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, | ||
51 | unsigned long vaddr) | ||
52 | { | ||
53 | void *kto, *kfrom; | ||
54 | |||
55 | kto = kmap_atomic(to, KM_USER0); | ||
56 | kfrom = kmap_atomic(from, KM_USER1); | ||
57 | v4wb_copy_user_page(kto, kfrom); | ||
58 | kunmap_atomic(kfrom, KM_USER1); | ||
59 | kunmap_atomic(kto, KM_USER0); | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * ARMv4 optimised clear_user_page | ||
64 | * | ||
65 | * Same story as above. | ||
66 | */ | ||
67 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
68 | { | ||
69 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
70 | asm volatile("\ | ||
71 | mov r1, %2 @ 1\n\ | ||
72 | mov r2, #0 @ 1\n\ | ||
73 | mov r3, #0 @ 1\n\ | ||
74 | mov ip, #0 @ 1\n\ | ||
75 | mov lr, #0 @ 1\n\ | ||
76 | 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
77 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
78 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
79 | mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ | ||
80 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
81 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
82 | subs r1, r1, #1 @ 1\n\ | ||
83 | bne 1b @ 1\n\ | ||
84 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" | ||
85 | : "=r" (ptr) | ||
86 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
87 | : "r1", "r2", "r3", "ip", "lr"); | ||
88 | kunmap_atomic(kaddr, KM_USER0); | ||
89 | } | ||
90 | |||
91 | struct cpu_user_fns v4wb_user_fns __initdata = { | ||
92 | .cpu_clear_user_highpage = v4wb_clear_user_highpage, | ||
93 | .cpu_copy_user_highpage = v4wb_copy_user_highpage, | ||
94 | }; | ||
diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S deleted file mode 100644 index e1f2af28d549..000000000000 --- a/arch/arm/mm/copypage-v4wt.S +++ /dev/null | |||
@@ -1,73 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-v4.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * ASM optimised string functions | ||
11 | * | ||
12 | * This is for CPUs with a writethrough cache and 'flush ID cache' is | ||
13 | * the only supported cache operation. | ||
14 | */ | ||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | |||
19 | .text | ||
20 | .align 5 | ||
21 | /* | ||
22 | * ARMv4 optimised copy_user_page | ||
23 | * | ||
24 | * Since we have writethrough caches, we don't have to worry about | ||
25 | * dirty data in the cache. However, we do have to ensure that | ||
26 | * subsequent reads are up to date. | ||
27 | */ | ||
28 | ENTRY(v4wt_copy_user_page) | ||
29 | stmfd sp!, {r4, lr} @ 2 | ||
30 | mov r2, #PAGE_SZ/64 @ 1 | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
32 | 1: stmia r0!, {r3, r4, ip, lr} @ 4 | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4+1 | ||
34 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
35 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
36 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
37 | ldmia r1!, {r3, r4, ip, lr} @ 4 | ||
38 | subs r2, r2, #1 @ 1 | ||
39 | stmia r0!, {r3, r4, ip, lr} @ 4 | ||
40 | ldmneia r1!, {r3, r4, ip, lr} @ 4 | ||
41 | bne 1b @ 1 | ||
42 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache | ||
43 | ldmfd sp!, {r4, pc} @ 3 | ||
44 | |||
45 | .align 5 | ||
46 | /* | ||
47 | * ARMv4 optimised clear_user_page | ||
48 | * | ||
49 | * Same story as above. | ||
50 | */ | ||
51 | ENTRY(v4wt_clear_user_page) | ||
52 | str lr, [sp, #-4]! | ||
53 | mov r1, #PAGE_SZ/64 @ 1 | ||
54 | mov r2, #0 @ 1 | ||
55 | mov r3, #0 @ 1 | ||
56 | mov ip, #0 @ 1 | ||
57 | mov lr, #0 @ 1 | ||
58 | 1: stmia r0!, {r2, r3, ip, lr} @ 4 | ||
59 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
60 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
61 | stmia r0!, {r2, r3, ip, lr} @ 4 | ||
62 | subs r1, r1, #1 @ 1 | ||
63 | bne 1b @ 1 | ||
64 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache | ||
65 | ldr pc, [sp], #4 | ||
66 | |||
67 | __INITDATA | ||
68 | |||
69 | .type v4wt_user_fns, #object | ||
70 | ENTRY(v4wt_user_fns) | ||
71 | .long v4wt_clear_user_page | ||
72 | .long v4wt_copy_user_page | ||
73 | .size v4wt_user_fns, . - v4wt_user_fns | ||
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c new file mode 100644 index 000000000000..0f1188efae45 --- /dev/null +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-v4wt.S | ||
3 | * | ||
4 | * Copyright (C) 1995-1999 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This is for CPUs with a writethrough cache and 'flush ID cache' is | ||
11 | * the only supported cache operation. | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | /* | ||
17 | * ARMv4 optimised copy_user_highpage | ||
18 | * | ||
19 | * Since we have writethrough caches, we don't have to worry about | ||
20 | * dirty data in the cache. However, we do have to ensure that | ||
21 | * subsequent reads are up to date. | ||
22 | */ | ||
23 | static void __attribute__((naked)) | ||
24 | v4wt_copy_user_page(void *kto, const void *kfrom) | ||
25 | { | ||
26 | asm("\ | ||
27 | stmfd sp!, {r4, lr} @ 2\n\ | ||
28 | mov r2, %0 @ 1\n\ | ||
29 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
30 | 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
31 | ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ | ||
32 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
33 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
34 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
35 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
36 | subs r2, r2, #1 @ 1\n\ | ||
37 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ | ||
38 | ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ | ||
39 | bne 1b @ 1\n\ | ||
40 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ | ||
41 | ldmfd sp!, {r4, pc} @ 3" | ||
42 | : | ||
43 | : "I" (PAGE_SIZE / 64)); | ||
44 | } | ||
45 | |||
46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, | ||
47 | unsigned long vaddr) | ||
48 | { | ||
49 | void *kto, *kfrom; | ||
50 | |||
51 | kto = kmap_atomic(to, KM_USER0); | ||
52 | kfrom = kmap_atomic(from, KM_USER1); | ||
53 | v4wt_copy_user_page(kto, kfrom); | ||
54 | kunmap_atomic(kfrom, KM_USER1); | ||
55 | kunmap_atomic(kto, KM_USER0); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * ARMv4 optimised clear_user_page | ||
60 | * | ||
61 | * Same story as above. | ||
62 | */ | ||
63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
64 | { | ||
65 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
66 | asm volatile("\ | ||
67 | mov r1, %2 @ 1\n\ | ||
68 | mov r2, #0 @ 1\n\ | ||
69 | mov r3, #0 @ 1\n\ | ||
70 | mov ip, #0 @ 1\n\ | ||
71 | mov lr, #0 @ 1\n\ | ||
72 | 1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
73 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
74 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
75 | stmia %0!, {r2, r3, ip, lr} @ 4\n\ | ||
76 | subs r1, r1, #1 @ 1\n\ | ||
77 | bne 1b @ 1\n\ | ||
78 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" | ||
79 | : "=r" (ptr) | ||
80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | ||
81 | : "r1", "r2", "r3", "ip", "lr"); | ||
82 | kunmap_atomic(kaddr, KM_USER0); | ||
83 | } | ||
84 | |||
85 | struct cpu_user_fns v4wt_user_fns __initdata = { | ||
86 | .cpu_clear_user_highpage = v4wt_clear_user_highpage, | ||
87 | .cpu_copy_user_highpage = v4wt_copy_user_highpage, | ||
88 | }; | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0e21c0767580..4127a7bddfe5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -10,8 +10,8 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/highmem.h> | ||
13 | 14 | ||
14 | #include <asm/page.h> | ||
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/shmparam.h> | 16 | #include <asm/shmparam.h> |
17 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
@@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock); | |||
33 | * Copy the user page. No aliasing to deal with so we can just | 33 | * Copy the user page. No aliasing to deal with so we can just |
34 | * attack the kernel's existing mapping of these pages. | 34 | * attack the kernel's existing mapping of these pages. |
35 | */ | 35 | */ |
36 | static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, |
37 | struct page *from, unsigned long vaddr) | ||
37 | { | 38 | { |
39 | void *kto, *kfrom; | ||
40 | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | ||
42 | kto = kmap_atomic(to, KM_USER1); | ||
38 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | kunmap_atomic(kto, KM_USER1); | ||
45 | kunmap_atomic(kfrom, KM_USER0); | ||
39 | } | 46 | } |
40 | 47 | ||
41 | /* | 48 | /* |
42 | * Clear the user page. No aliasing to deal with so we can just | 49 | * Clear the user page. No aliasing to deal with so we can just |
43 | * attack the kernel's existing mapping of this page. | 50 | * attack the kernel's existing mapping of this page. |
44 | */ | 51 | */ |
45 | static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) | 52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
46 | { | 53 | { |
54 | void *kaddr = kmap_atomic(page, KM_USER0); | ||
47 | clear_page(kaddr); | 55 | clear_page(kaddr); |
56 | kunmap_atomic(kaddr, KM_USER0); | ||
48 | } | 57 | } |
49 | 58 | ||
50 | /* | 59 | /* |
51 | * Copy the page, taking account of the cache colour. | 60 | * Discard data in the kernel mapping for the new page. |
61 | * FIXME: needs this MCRR to be supported. | ||
52 | */ | 62 | */ |
53 | static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) | 63 | static void discard_old_kernel_data(void *kto) |
54 | { | 64 | { |
55 | unsigned int offset = CACHE_COLOUR(vaddr); | ||
56 | unsigned long from, to; | ||
57 | struct page *page = virt_to_page(kfrom); | ||
58 | |||
59 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
60 | __flush_dcache_page(page_mapping(page), page); | ||
61 | |||
62 | /* | ||
63 | * Discard data in the kernel mapping for the new page. | ||
64 | * FIXME: needs this MCRR to be supported. | ||
65 | */ | ||
66 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | 65 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" |
67 | : | 66 | : |
68 | : "r" (kto), | 67 | : "r" (kto), |
69 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) | 68 | "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) |
70 | : "cc"); | 69 | : "cc"); |
70 | } | ||
71 | |||
72 | /* | ||
73 | * Copy the page, taking account of the cache colour. | ||
74 | */ | ||
75 | static void v6_copy_user_highpage_aliasing(struct page *to, | ||
76 | struct page *from, unsigned long vaddr) | ||
77 | { | ||
78 | unsigned int offset = CACHE_COLOUR(vaddr); | ||
79 | unsigned long kfrom, kto; | ||
80 | |||
81 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | ||
82 | __flush_dcache_page(page_mapping(from), from); | ||
83 | |||
84 | /* FIXME: not highmem safe */ | ||
85 | discard_old_kernel_data(page_address(to)); | ||
71 | 86 | ||
72 | /* | 87 | /* |
73 | * Now copy the page using the same cache colour as the | 88 | * Now copy the page using the same cache colour as the |
@@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo | |||
75 | */ | 90 | */ |
76 | spin_lock(&v6_lock); | 91 | spin_lock(&v6_lock); |
77 | 92 | ||
78 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); |
79 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); |
80 | 95 | ||
81 | from = from_address + (offset << PAGE_SHIFT); | 96 | kfrom = from_address + (offset << PAGE_SHIFT); |
82 | to = to_address + (offset << PAGE_SHIFT); | 97 | kto = to_address + (offset << PAGE_SHIFT); |
83 | 98 | ||
84 | flush_tlb_kernel_page(from); | 99 | flush_tlb_kernel_page(kfrom); |
85 | flush_tlb_kernel_page(to); | 100 | flush_tlb_kernel_page(kto); |
86 | 101 | ||
87 | copy_page((void *)to, (void *)from); | 102 | copy_page((void *)kto, (void *)kfrom); |
88 | 103 | ||
89 | spin_unlock(&v6_lock); | 104 | spin_unlock(&v6_lock); |
90 | } | 105 | } |
@@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo | |||
94 | * so remap the kernel page into the same cache colour as the user | 109 | * so remap the kernel page into the same cache colour as the user |
95 | * page. | 110 | * page. |
96 | */ | 111 | */ |
97 | static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | 112 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
98 | { | 113 | { |
99 | unsigned int offset = CACHE_COLOUR(vaddr); | 114 | unsigned int offset = CACHE_COLOUR(vaddr); |
100 | unsigned long to = to_address + (offset << PAGE_SHIFT); | 115 | unsigned long to = to_address + (offset << PAGE_SHIFT); |
101 | 116 | ||
102 | /* | 117 | /* FIXME: not highmem safe */ |
103 | * Discard data in the kernel mapping for the new page | 118 | discard_old_kernel_data(page_address(page)); |
104 | * FIXME: needs this MCRR to be supported. | ||
105 | */ | ||
106 | __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" | ||
107 | : | ||
108 | : "r" (kaddr), | ||
109 | "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) | ||
110 | : "cc"); | ||
111 | 119 | ||
112 | /* | 120 | /* |
113 | * Now clear the page using the same cache colour as | 121 | * Now clear the page using the same cache colour as |
@@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
115 | */ | 123 | */ |
116 | spin_lock(&v6_lock); | 124 | spin_lock(&v6_lock); |
117 | 125 | ||
118 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); | 126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); |
119 | flush_tlb_kernel_page(to); | 127 | flush_tlb_kernel_page(to); |
120 | clear_page((void *)to); | 128 | clear_page((void *)to); |
121 | 129 | ||
@@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) | |||
123 | } | 131 | } |
124 | 132 | ||
125 | struct cpu_user_fns v6_user_fns __initdata = { | 133 | struct cpu_user_fns v6_user_fns __initdata = { |
126 | .cpu_clear_user_page = v6_clear_user_page_nonaliasing, | 134 | .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, |
127 | .cpu_copy_user_page = v6_copy_user_page_nonaliasing, | 135 | .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, |
128 | }; | 136 | }; |
129 | 137 | ||
130 | static int __init v6_userpage_init(void) | 138 | static int __init v6_userpage_init(void) |
131 | { | 139 | { |
132 | if (cache_is_vipt_aliasing()) { | 140 | if (cache_is_vipt_aliasing()) { |
133 | cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; | 141 | cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; |
134 | cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; | 142 | cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; |
135 | } | 143 | } |
136 | 144 | ||
137 | return 0; | 145 | return 0; |
diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S deleted file mode 100644 index 9a2cb4332b4c..000000000000 --- a/arch/arm/mm/copypage-xsc3.S +++ /dev/null | |||
@@ -1,97 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/lib/copypage-xsc3.S | ||
3 | * | ||
4 | * Copyright (C) 2004 Intel Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Adapted for 3rd gen XScale core, no more mini-dcache | ||
11 | * Author: Matt Gilbert (matthew.m.gilbert@intel.com) | ||
12 | */ | ||
13 | |||
14 | #include <linux/linkage.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | |||
18 | /* | ||
19 | * General note: | ||
20 | * We don't really want write-allocate cache behaviour for these functions | ||
21 | * since that will just eat through 8K of the cache. | ||
22 | */ | ||
23 | |||
24 | .text | ||
25 | .align 5 | ||
26 | /* | ||
27 | * XSC3 optimised copy_user_page | ||
28 | * r0 = destination | ||
29 | * r1 = source | ||
30 | * r2 = virtual user address of ultimate destination page | ||
31 | * | ||
32 | * The source page may have some clean entries in the cache already, but we | ||
33 | * can safely ignore them - break_cow() will flush them out of the cache | ||
34 | * if we eventually end up using our copied page. | ||
35 | * | ||
36 | */ | ||
37 | ENTRY(xsc3_mc_copy_user_page) | ||
38 | stmfd sp!, {r4, r5, lr} | ||
39 | mov lr, #PAGE_SZ/64-1 | ||
40 | |||
41 | pld [r1, #0] | ||
42 | pld [r1, #32] | ||
43 | 1: pld [r1, #64] | ||
44 | pld [r1, #96] | ||
45 | |||
46 | 2: ldrd r2, [r1], #8 | ||
47 | mov ip, r0 | ||
48 | ldrd r4, [r1], #8 | ||
49 | mcr p15, 0, ip, c7, c6, 1 @ invalidate | ||
50 | strd r2, [r0], #8 | ||
51 | ldrd r2, [r1], #8 | ||
52 | strd r4, [r0], #8 | ||
53 | ldrd r4, [r1], #8 | ||
54 | strd r2, [r0], #8 | ||
55 | strd r4, [r0], #8 | ||
56 | ldrd r2, [r1], #8 | ||
57 | mov ip, r0 | ||
58 | ldrd r4, [r1], #8 | ||
59 | mcr p15, 0, ip, c7, c6, 1 @ invalidate | ||
60 | strd r2, [r0], #8 | ||
61 | ldrd r2, [r1], #8 | ||
62 | subs lr, lr, #1 | ||
63 | strd r4, [r0], #8 | ||
64 | ldrd r4, [r1], #8 | ||
65 | strd r2, [r0], #8 | ||
66 | strd r4, [r0], #8 | ||
67 | bgt 1b | ||
68 | beq 2b | ||
69 | |||
70 | ldmfd sp!, {r4, r5, pc} | ||
71 | |||
72 | .align 5 | ||
73 | /* | ||
74 | * XScale optimised clear_user_page | ||
75 | * r0 = destination | ||
76 | * r1 = virtual user address of ultimate destination page | ||
77 | */ | ||
78 | ENTRY(xsc3_mc_clear_user_page) | ||
79 | mov r1, #PAGE_SZ/32 | ||
80 | mov r2, #0 | ||
81 | mov r3, #0 | ||
82 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line | ||
83 | strd r2, [r0], #8 | ||
84 | strd r2, [r0], #8 | ||
85 | strd r2, [r0], #8 | ||
86 | strd r2, [r0], #8 | ||
87 | subs r1, r1, #1 | ||
88 | bne 1b | ||
89 | mov pc, lr | ||
90 | |||
91 | __INITDATA | ||
92 | |||
93 | .type xsc3_mc_user_fns, #object | ||
94 | ENTRY(xsc3_mc_user_fns) | ||
95 | .long xsc3_mc_clear_user_page | ||
96 | .long xsc3_mc_copy_user_page | ||
97 | .size xsc3_mc_user_fns, . - xsc3_mc_user_fns | ||
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 000000000000..39a994542cad --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -0,0 +1,113 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/copypage-xsc3.S | ||
3 | * | ||
4 | * Copyright (C) 2004 Intel Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * Adapted for 3rd gen XScale core, no more mini-dcache | ||
11 | * Author: Matt Gilbert (matthew.m.gilbert@intel.com) | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/highmem.h> | ||
15 | |||
16 | /* | ||
17 | * General note: | ||
18 | * We don't really want write-allocate cache behaviour for these functions | ||
19 | * since that will just eat through 8K of the cache. | ||
20 | */ | ||
21 | |||
22 | /* | ||
23 | * XSC3 optimised copy_user_highpage | ||
24 | * r0 = destination | ||
25 | * r1 = source | ||
26 | * | ||
27 | * The source page may have some clean entries in the cache already, but we | ||
28 | * can safely ignore them - break_cow() will flush them out of the cache | ||
29 | * if we eventually end up using our copied page. | ||
30 | * | ||
31 | */ | ||
32 | static void __attribute__((naked)) | ||
33 | xsc3_mc_copy_user_page(void *kto, const void *kfrom) | ||
34 | { | ||
35 | asm("\ | ||
36 | stmfd sp!, {r4, r5, lr} \n\ | ||
37 | mov lr, %0 \n\ | ||
38 | \n\ | ||
39 | pld [r1, #0] \n\ | ||
40 | pld [r1, #32] \n\ | ||
41 | 1: pld [r1, #64] \n\ | ||
42 | pld [r1, #96] \n\ | ||
43 | \n\ | ||
44 | 2: ldrd r2, [r1], #8 \n\ | ||
45 | mov ip, r0 \n\ | ||
46 | ldrd r4, [r1], #8 \n\ | ||
47 | mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ | ||
48 | strd r2, [r0], #8 \n\ | ||
49 | ldrd r2, [r1], #8 \n\ | ||
50 | strd r4, [r0], #8 \n\ | ||
51 | ldrd r4, [r1], #8 \n\ | ||
52 | strd r2, [r0], #8 \n\ | ||
53 | strd r4, [r0], #8 \n\ | ||
54 | ldrd r2, [r1], #8 \n\ | ||
55 | mov ip, r0 \n\ | ||
56 | ldrd r4, [r1], #8 \n\ | ||
57 | mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ | ||
58 | strd r2, [r0], #8 \n\ | ||
59 | ldrd r2, [r1], #8 \n\ | ||
60 | subs lr, lr, #1 \n\ | ||
61 | strd r4, [r0], #8 \n\ | ||
62 | ldrd r4, [r1], #8 \n\ | ||
63 | strd r2, [r0], #8 \n\ | ||
64 | strd r4, [r0], #8 \n\ | ||
65 | bgt 1b \n\ | ||
66 | beq 2b \n\ | ||
67 | \n\ | ||
68 | ldmfd sp!, {r4, r5, pc}" | ||
69 | : | ||
70 | : "I" (PAGE_SIZE / 64 - 1)); | ||
71 | } | ||
72 | |||
73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | ||
74 | unsigned long vaddr) | ||
75 | { | ||
76 | void *kto, *kfrom; | ||
77 | |||
78 | kto = kmap_atomic(to, KM_USER0); | ||
79 | kfrom = kmap_atomic(from, KM_USER1); | ||
80 | xsc3_mc_copy_user_page(kto, kfrom); | ||
81 | kunmap_atomic(kfrom, KM_USER1); | ||
82 | kunmap_atomic(kto, KM_USER0); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * XScale optimised clear_user_page | ||
87 | * r0 = destination | ||
88 | * r1 = virtual user address of ultimate destination page | ||
89 | */ | ||
90 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | ||
91 | { | ||
92 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
93 | asm volatile ("\ | ||
94 | mov r1, %2 \n\ | ||
95 | mov r2, #0 \n\ | ||
96 | mov r3, #0 \n\ | ||
97 | 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ | ||
98 | strd r2, [%0], #8 \n\ | ||
99 | strd r2, [%0], #8 \n\ | ||
100 | strd r2, [%0], #8 \n\ | ||
101 | strd r2, [%0], #8 \n\ | ||
102 | subs r1, r1, #1 \n\ | ||
103 | bne 1b" | ||
104 | : "=r" (ptr) | ||
105 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | ||
106 | : "r1", "r2", "r3"); | ||
107 | kunmap_atomic(kaddr, KM_USER0); | ||
108 | } | ||
109 | |||
110 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { | ||
111 | .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, | ||
112 | .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, | ||
113 | }; | ||
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index bad49331bbf9..d18f2397ee2d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -15,8 +15,8 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/highmem.h> | ||
18 | 19 | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/pgtable.h> | 20 | #include <asm/pgtable.h> |
21 | #include <asm/tlbflush.h> | 21 | #include <asm/tlbflush.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -35,7 +35,7 @@ | |||
35 | static DEFINE_SPINLOCK(minicache_lock); | 35 | static DEFINE_SPINLOCK(minicache_lock); |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * XScale mini-dcache optimised copy_user_page | 38 | * XScale mini-dcache optimised copy_user_highpage |
39 | * | 39 | * |
40 | * We flush the destination cache lines just before we write the data into the | 40 | * We flush the destination cache lines just before we write the data into the |
41 | * corresponding address. Since the Dcache is read-allocate, this removes the | 41 | * corresponding address. Since the Dcache is read-allocate, this removes the |
@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to) | |||
90 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); | 90 | : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); |
91 | } | 91 | } |
92 | 92 | ||
93 | void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
94 | unsigned long vaddr) | ||
94 | { | 95 | { |
95 | struct page *page = virt_to_page(kfrom); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
96 | 97 | ||
97 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 98 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) |
98 | __flush_dcache_page(page_mapping(page), page); | 99 | __flush_dcache_page(page_mapping(from), from); |
99 | 100 | ||
100 | spin_lock(&minicache_lock); | 101 | spin_lock(&minicache_lock); |
101 | 102 | ||
102 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); | 103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); |
103 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | 104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); |
104 | 105 | ||
105 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
106 | 107 | ||
107 | spin_unlock(&minicache_lock); | 108 | spin_unlock(&minicache_lock); |
109 | |||
110 | kunmap_atomic(kto, KM_USER1); | ||
108 | } | 111 | } |
109 | 112 | ||
110 | /* | 113 | /* |
111 | * XScale optimised clear_user_page | 114 | * XScale optimised clear_user_page |
112 | */ | 115 | */ |
113 | void __attribute__((naked)) | 116 | void |
114 | xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
115 | { | 118 | { |
119 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | ||
116 | asm volatile( | 120 | asm volatile( |
117 | "mov r1, %0 \n\ | 121 | "mov r1, %2 \n\ |
118 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
119 | mov r3, #0 \n\ | 123 | mov r3, #0 \n\ |
120 | 1: mov ip, r0 \n\ | 124 | 1: mov ip, %0 \n\ |
121 | strd r2, [r0], #8 \n\ | 125 | strd r2, [%0], #8 \n\ |
122 | strd r2, [r0], #8 \n\ | 126 | strd r2, [%0], #8 \n\ |
123 | strd r2, [r0], #8 \n\ | 127 | strd r2, [%0], #8 \n\ |
124 | strd r2, [r0], #8 \n\ | 128 | strd r2, [%0], #8 \n\ |
125 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ | 129 | mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ |
126 | subs r1, r1, #1 \n\ | 130 | subs r1, r1, #1 \n\ |
127 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ | 131 | mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ |
128 | bne 1b \n\ | 132 | bne 1b" |
129 | mov pc, lr" | 133 | : "=r" (ptr) |
130 | : | 134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
131 | : "I" (PAGE_SIZE / 32)); | 135 | : "r1", "r2", "r3", "ip"); |
136 | kunmap_atomic(kaddr, KM_USER0); | ||
132 | } | 137 | } |
133 | 138 | ||
134 | struct cpu_user_fns xscale_mc_user_fns __initdata = { | 139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
135 | .cpu_clear_user_page = xscale_mc_clear_user_page, | 140 | .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, |
136 | .cpu_copy_user_page = xscale_mc_copy_user_page, | 141 | .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, |
137 | }; | 142 | }; |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 22c9530e91e2..0455557a2899 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/kprobes.h> | 16 | #include <linux/kprobes.h> |
17 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
18 | #include <linux/page-flags.h> | ||
18 | 19 | ||
19 | #include <asm/system.h> | 20 | #include <asm/system.h> |
20 | #include <asm/pgtable.h> | 21 | #include <asm/pgtable.h> |
@@ -84,13 +85,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
84 | break; | 85 | break; |
85 | } | 86 | } |
86 | 87 | ||
87 | #ifndef CONFIG_HIGHMEM | ||
88 | /* We must not map this if we have highmem enabled */ | 88 | /* We must not map this if we have highmem enabled */ |
89 | if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) | ||
90 | break; | ||
91 | |||
89 | pte = pte_offset_map(pmd, addr); | 92 | pte = pte_offset_map(pmd, addr); |
90 | printk(", *pte=%08lx", pte_val(*pte)); | 93 | printk(", *pte=%08lx", pte_val(*pte)); |
91 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); | 94 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); |
92 | pte_unmap(pte); | 95 | pte_unmap(pte); |
93 | #endif | ||
94 | } while(0); | 96 | } while(0); |
95 | 97 | ||
96 | printk("\n"); | 98 | printk("\n"); |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 82c4b4217989..34df4d9d03a6 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
18 | 18 | ||
19 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
20 | #include <asm/sections.h> | ||
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <asm/sizes.h> | 22 | #include <asm/sizes.h> |
22 | #include <asm/tlb.h> | 23 | #include <asm/tlb.h> |
@@ -64,10 +65,11 @@ static int __init parse_tag_initrd2(const struct tag *tag) | |||
64 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); | 65 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); |
65 | 66 | ||
66 | /* | 67 | /* |
67 | * This is used to pass memory configuration data from paging_init | 68 | * This keeps memory configuration data used by a couple memory |
68 | * to mem_init, and by show_mem() to skip holes in the memory map. | 69 | * initialization functions, as well as show_mem() for the skipping |
70 | * of holes in the memory map. It is populated by arm_add_memory(). | ||
69 | */ | 71 | */ |
70 | static struct meminfo meminfo = { 0, }; | 72 | struct meminfo meminfo; |
71 | 73 | ||
72 | void show_mem(void) | 74 | void show_mem(void) |
73 | { | 75 | { |
@@ -128,7 +130,7 @@ find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages) | |||
128 | { | 130 | { |
129 | unsigned int start_pfn, i, bootmap_pfn; | 131 | unsigned int start_pfn, i, bootmap_pfn; |
130 | 132 | ||
131 | start_pfn = PAGE_ALIGN(__pa(&_end)) >> PAGE_SHIFT; | 133 | start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT; |
132 | bootmap_pfn = 0; | 134 | bootmap_pfn = 0; |
133 | 135 | ||
134 | for_each_nodebank(i, mi, node) { | 136 | for_each_nodebank(i, mi, node) { |
@@ -331,13 +333,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) | |||
331 | free_area_init_node(node, zone_size, start_pfn, zhole_size); | 333 | free_area_init_node(node, zone_size, start_pfn, zhole_size); |
332 | } | 334 | } |
333 | 335 | ||
334 | void __init bootmem_init(struct meminfo *mi) | 336 | void __init bootmem_init(void) |
335 | { | 337 | { |
338 | struct meminfo *mi = &meminfo; | ||
336 | unsigned long memend_pfn = 0; | 339 | unsigned long memend_pfn = 0; |
337 | int node, initrd_node; | 340 | int node, initrd_node; |
338 | 341 | ||
339 | memcpy(&meminfo, mi, sizeof(meminfo)); | ||
340 | |||
341 | /* | 342 | /* |
342 | * Locate which node contains the ramdisk image, if any. | 343 | * Locate which node contains the ramdisk image, if any. |
343 | */ | 344 | */ |
@@ -394,20 +395,22 @@ void __init bootmem_init(struct meminfo *mi) | |||
394 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; | 395 | max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; |
395 | } | 396 | } |
396 | 397 | ||
397 | static inline void free_area(unsigned long addr, unsigned long end, char *s) | 398 | static inline int free_area(unsigned long pfn, unsigned long end, char *s) |
398 | { | 399 | { |
399 | unsigned int size = (end - addr) >> 10; | 400 | unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); |
400 | 401 | ||
401 | for (; addr < end; addr += PAGE_SIZE) { | 402 | for (; pfn < end; pfn++) { |
402 | struct page *page = virt_to_page(addr); | 403 | struct page *page = pfn_to_page(pfn); |
403 | ClearPageReserved(page); | 404 | ClearPageReserved(page); |
404 | init_page_count(page); | 405 | init_page_count(page); |
405 | free_page(addr); | 406 | __free_page(page); |
406 | totalram_pages++; | 407 | pages++; |
407 | } | 408 | } |
408 | 409 | ||
409 | if (size && s) | 410 | if (size && s) |
410 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); | 411 | printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); |
412 | |||
413 | return pages; | ||
411 | } | 414 | } |
412 | 415 | ||
413 | static inline void | 416 | static inline void |
@@ -478,13 +481,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
478 | */ | 481 | */ |
479 | void __init mem_init(void) | 482 | void __init mem_init(void) |
480 | { | 483 | { |
481 | unsigned int codepages, datapages, initpages; | 484 | unsigned int codesize, datasize, initsize; |
482 | int i, node; | 485 | int i, node; |
483 | 486 | ||
484 | codepages = &_etext - &_text; | ||
485 | datapages = &_end - &__data_start; | ||
486 | initpages = &__init_end - &__init_begin; | ||
487 | |||
488 | #ifndef CONFIG_DISCONTIGMEM | 487 | #ifndef CONFIG_DISCONTIGMEM |
489 | max_mapnr = virt_to_page(high_memory) - mem_map; | 488 | max_mapnr = virt_to_page(high_memory) - mem_map; |
490 | #endif | 489 | #endif |
@@ -501,7 +500,8 @@ void __init mem_init(void) | |||
501 | 500 | ||
502 | #ifdef CONFIG_SA1111 | 501 | #ifdef CONFIG_SA1111 |
503 | /* now that our DMA memory is actually so designated, we can free it */ | 502 | /* now that our DMA memory is actually so designated, we can free it */ |
504 | free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); | 503 | totalram_pages += free_area(PHYS_PFN_OFFSET, |
504 | __phys_to_pfn(__pa(swapper_pg_dir)), NULL); | ||
505 | #endif | 505 | #endif |
506 | 506 | ||
507 | /* | 507 | /* |
@@ -509,18 +509,21 @@ void __init mem_init(void) | |||
509 | * real number of pages we have in this system | 509 | * real number of pages we have in this system |
510 | */ | 510 | */ |
511 | printk(KERN_INFO "Memory:"); | 511 | printk(KERN_INFO "Memory:"); |
512 | |||
513 | num_physpages = 0; | 512 | num_physpages = 0; |
514 | for (i = 0; i < meminfo.nr_banks; i++) { | 513 | for (i = 0; i < meminfo.nr_banks; i++) { |
515 | num_physpages += bank_pfn_size(&meminfo.bank[i]); | 514 | num_physpages += bank_pfn_size(&meminfo.bank[i]); |
516 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); | 515 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); |
517 | } | 516 | } |
518 | |||
519 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 517 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
518 | |||
519 | codesize = _etext - _text; | ||
520 | datasize = _end - _data; | ||
521 | initsize = __init_end - __init_begin; | ||
522 | |||
520 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | 523 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " |
521 | "%dK data, %dK init)\n", | 524 | "%dK data, %dK init)\n", |
522 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 525 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
523 | codepages >> 10, datapages >> 10, initpages >> 10); | 526 | codesize >> 10, datasize >> 10, initsize >> 10); |
524 | 527 | ||
525 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 528 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
526 | extern int sysctl_overcommit_memory; | 529 | extern int sysctl_overcommit_memory; |
@@ -535,11 +538,10 @@ void __init mem_init(void) | |||
535 | 538 | ||
536 | void free_initmem(void) | 539 | void free_initmem(void) |
537 | { | 540 | { |
538 | if (!machine_is_integrator() && !machine_is_cintegrator()) { | 541 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
539 | free_area((unsigned long)(&__init_begin), | 542 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), |
540 | (unsigned long)(&__init_end), | 543 | __phys_to_pfn(__pa(__init_end)), |
541 | "init"); | 544 | "init"); |
542 | } | ||
543 | } | 545 | } |
544 | 546 | ||
545 | #ifdef CONFIG_BLK_DEV_INITRD | 547 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -549,7 +551,9 @@ static int keep_initrd; | |||
549 | void free_initrd_mem(unsigned long start, unsigned long end) | 551 | void free_initrd_mem(unsigned long start, unsigned long end) |
550 | { | 552 | { |
551 | if (!keep_initrd) | 553 | if (!keep_initrd) |
552 | free_area(start, end, "initrd"); | 554 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), |
555 | __phys_to_pfn(__pa(end)), | ||
556 | "initrd"); | ||
553 | } | 557 | } |
554 | 558 | ||
555 | static int __init keepinitrd_setup(char *__unused) | 559 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5d9f53907b4e..95bbe112965e 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -32,7 +32,5 @@ struct meminfo; | |||
32 | struct pglist_data; | 32 | struct pglist_data; |
33 | 33 | ||
34 | void __init create_mapping(struct map_desc *md); | 34 | void __init create_mapping(struct map_desc *md); |
35 | void __init bootmem_init(struct meminfo *mi); | 35 | void __init bootmem_init(void); |
36 | void reserve_node_zero(struct pglist_data *pgdat); | 36 | void reserve_node_zero(struct pglist_data *pgdat); |
37 | |||
38 | extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7f36c825718d..9b36c5cb5e9f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <asm/cputype.h> | 18 | #include <asm/cputype.h> |
19 | #include <asm/mach-types.h> | 19 | #include <asm/mach-types.h> |
20 | #include <asm/sections.h> | ||
20 | #include <asm/setup.h> | 21 | #include <asm/setup.h> |
21 | #include <asm/sizes.h> | 22 | #include <asm/sizes.h> |
22 | #include <asm/tlb.h> | 23 | #include <asm/tlb.h> |
@@ -646,61 +647,79 @@ static void __init early_vmalloc(char **arg) | |||
646 | "vmalloc area too small, limiting to %luMB\n", | 647 | "vmalloc area too small, limiting to %luMB\n", |
647 | vmalloc_reserve >> 20); | 648 | vmalloc_reserve >> 20); |
648 | } | 649 | } |
650 | |||
651 | if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { | ||
652 | vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); | ||
653 | printk(KERN_WARNING | ||
654 | "vmalloc area is too big, limiting to %luMB\n", | ||
655 | vmalloc_reserve >> 20); | ||
656 | } | ||
649 | } | 657 | } |
650 | __early_param("vmalloc=", early_vmalloc); | 658 | __early_param("vmalloc=", early_vmalloc); |
651 | 659 | ||
652 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 660 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
653 | 661 | ||
654 | static int __init check_membank_valid(struct membank *mb) | 662 | static void __init sanity_check_meminfo(void) |
655 | { | 663 | { |
656 | /* | 664 | int i, j; |
657 | * Check whether this memory region has non-zero size or | ||
658 | * invalid node number. | ||
659 | */ | ||
660 | if (mb->size == 0 || mb->node >= MAX_NUMNODES) | ||
661 | return 0; | ||
662 | |||
663 | /* | ||
664 | * Check whether this memory region would entirely overlap | ||
665 | * the vmalloc area. | ||
666 | */ | ||
667 | if (phys_to_virt(mb->start) >= VMALLOC_MIN) { | ||
668 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | ||
669 | "(vmalloc region overlap).\n", | ||
670 | mb->start, mb->start + mb->size - 1); | ||
671 | return 0; | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * Check whether this memory region would partially overlap | ||
676 | * the vmalloc area. | ||
677 | */ | ||
678 | if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || | ||
679 | phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { | ||
680 | unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); | ||
681 | |||
682 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | ||
683 | "to -%.8lx (vmalloc region overlap).\n", | ||
684 | mb->start, mb->start + mb->size - 1, | ||
685 | mb->start + newsize - 1); | ||
686 | mb->size = newsize; | ||
687 | } | ||
688 | 665 | ||
689 | return 1; | 666 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
690 | } | 667 | struct membank *bank = &meminfo.bank[j]; |
668 | *bank = meminfo.bank[i]; | ||
691 | 669 | ||
692 | static void __init sanity_check_meminfo(struct meminfo *mi) | 670 | #ifdef CONFIG_HIGHMEM |
693 | { | 671 | /* |
694 | int i, j; | 672 | * Split those memory banks which are partially overlapping |
673 | * the vmalloc area greatly simplifying things later. | ||
674 | */ | ||
675 | if (__va(bank->start) < VMALLOC_MIN && | ||
676 | bank->size > VMALLOC_MIN - __va(bank->start)) { | ||
677 | if (meminfo.nr_banks >= NR_BANKS) { | ||
678 | printk(KERN_CRIT "NR_BANKS too low, " | ||
679 | "ignoring high memory\n"); | ||
680 | } else { | ||
681 | memmove(bank + 1, bank, | ||
682 | (meminfo.nr_banks - i) * sizeof(*bank)); | ||
683 | meminfo.nr_banks++; | ||
684 | i++; | ||
685 | bank[1].size -= VMALLOC_MIN - __va(bank->start); | ||
686 | bank[1].start = __pa(VMALLOC_MIN - 1) + 1; | ||
687 | j++; | ||
688 | } | ||
689 | bank->size = VMALLOC_MIN - __va(bank->start); | ||
690 | } | ||
691 | #else | ||
692 | /* | ||
693 | * Check whether this memory bank would entirely overlap | ||
694 | * the vmalloc area. | ||
695 | */ | ||
696 | if (__va(bank->start) >= VMALLOC_MIN) { | ||
697 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | ||
698 | "(vmalloc region overlap).\n", | ||
699 | bank->start, bank->start + bank->size - 1); | ||
700 | continue; | ||
701 | } | ||
695 | 702 | ||
696 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | 703 | /* |
697 | if (check_membank_valid(&mi->bank[i])) | 704 | * Check whether this memory bank would partially overlap |
698 | mi->bank[j++] = mi->bank[i]; | 705 | * the vmalloc area. |
706 | */ | ||
707 | if (__va(bank->start + bank->size) > VMALLOC_MIN || | ||
708 | __va(bank->start + bank->size) < __va(bank->start)) { | ||
709 | unsigned long newsize = VMALLOC_MIN - __va(bank->start); | ||
710 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | ||
711 | "to -%.8lx (vmalloc region overlap).\n", | ||
712 | bank->start, bank->start + bank->size - 1, | ||
713 | bank->start + newsize - 1); | ||
714 | bank->size = newsize; | ||
715 | } | ||
716 | #endif | ||
717 | j++; | ||
699 | } | 718 | } |
700 | mi->nr_banks = j; | 719 | meminfo.nr_banks = j; |
701 | } | 720 | } |
702 | 721 | ||
703 | static inline void prepare_page_table(struct meminfo *mi) | 722 | static inline void prepare_page_table(void) |
704 | { | 723 | { |
705 | unsigned long addr; | 724 | unsigned long addr; |
706 | 725 | ||
@@ -712,7 +731,7 @@ static inline void prepare_page_table(struct meminfo *mi) | |||
712 | 731 | ||
713 | #ifdef CONFIG_XIP_KERNEL | 732 | #ifdef CONFIG_XIP_KERNEL |
714 | /* The XIP kernel is mapped in the module area -- skip over it */ | 733 | /* The XIP kernel is mapped in the module area -- skip over it */ |
715 | addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK; | 734 | addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; |
716 | #endif | 735 | #endif |
717 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) | 736 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) |
718 | pmd_clear(pmd_off_k(addr)); | 737 | pmd_clear(pmd_off_k(addr)); |
@@ -721,7 +740,7 @@ static inline void prepare_page_table(struct meminfo *mi) | |||
721 | * Clear out all the kernel space mappings, except for the first | 740 | * Clear out all the kernel space mappings, except for the first |
722 | * memory bank, up to the end of the vmalloc region. | 741 | * memory bank, up to the end of the vmalloc region. |
723 | */ | 742 | */ |
724 | for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); | 743 | for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); |
725 | addr < VMALLOC_END; addr += PGDIR_SIZE) | 744 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
726 | pmd_clear(pmd_off_k(addr)); | 745 | pmd_clear(pmd_off_k(addr)); |
727 | } | 746 | } |
@@ -738,10 +757,10 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
738 | * Note that this can only be in node 0. | 757 | * Note that this can only be in node 0. |
739 | */ | 758 | */ |
740 | #ifdef CONFIG_XIP_KERNEL | 759 | #ifdef CONFIG_XIP_KERNEL |
741 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, | 760 | reserve_bootmem_node(pgdat, __pa(_data), _end - _data, |
742 | BOOTMEM_DEFAULT); | 761 | BOOTMEM_DEFAULT); |
743 | #else | 762 | #else |
744 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, | 763 | reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, |
745 | BOOTMEM_DEFAULT); | 764 | BOOTMEM_DEFAULT); |
746 | #endif | 765 | #endif |
747 | 766 | ||
@@ -808,7 +827,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
808 | * Allocate the vector page early. | 827 | * Allocate the vector page early. |
809 | */ | 828 | */ |
810 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); | 829 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); |
811 | BUG_ON(!vectors); | ||
812 | 830 | ||
813 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 831 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
814 | pmd_clear(pmd_off_k(addr)); | 832 | pmd_clear(pmd_off_k(addr)); |
@@ -820,7 +838,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
820 | #ifdef CONFIG_XIP_KERNEL | 838 | #ifdef CONFIG_XIP_KERNEL |
821 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); | 839 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); |
822 | map.virtual = MODULES_VADDR; | 840 | map.virtual = MODULES_VADDR; |
823 | map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; | 841 | map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; |
824 | map.type = MT_ROM; | 842 | map.type = MT_ROM; |
825 | create_mapping(&map); | 843 | create_mapping(&map); |
826 | #endif | 844 | #endif |
@@ -880,23 +898,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
880 | * paging_init() sets up the page tables, initialises the zone memory | 898 | * paging_init() sets up the page tables, initialises the zone memory |
881 | * maps, and sets up the zero page, bad page and bad page tables. | 899 | * maps, and sets up the zero page, bad page and bad page tables. |
882 | */ | 900 | */ |
883 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | 901 | void __init paging_init(struct machine_desc *mdesc) |
884 | { | 902 | { |
885 | void *zero_page; | 903 | void *zero_page; |
886 | 904 | ||
887 | build_mem_type_table(); | 905 | build_mem_type_table(); |
888 | sanity_check_meminfo(mi); | 906 | sanity_check_meminfo(); |
889 | prepare_page_table(mi); | 907 | prepare_page_table(); |
890 | bootmem_init(mi); | 908 | bootmem_init(); |
891 | devicemaps_init(mdesc); | 909 | devicemaps_init(mdesc); |
892 | 910 | ||
893 | top_pmd = pmd_off_k(0xffff0000); | 911 | top_pmd = pmd_off_k(0xffff0000); |
894 | 912 | ||
895 | /* | 913 | /* |
896 | * allocate the zero page. Note that we count on this going ok. | 914 | * allocate the zero page. Note that this always succeeds and |
915 | * returns a zeroed result. | ||
897 | */ | 916 | */ |
898 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 917 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); |
899 | memzero(zero_page, PAGE_SIZE); | ||
900 | empty_zero_page = virt_to_page(zero_page); | 918 | empty_zero_page = virt_to_page(zero_page); |
901 | flush_dcache_page(empty_zero_page); | 919 | flush_dcache_page(empty_zero_page); |
902 | } | 920 | } |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 07b62b238979..ad7bacc693b2 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
11 | 11 | ||
12 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
13 | #include <asm/sections.h> | ||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | #include <asm/mach/arch.h> | 15 | #include <asm/mach/arch.h> |
15 | 16 | ||
@@ -25,10 +26,10 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
25 | * Note that this can only be in node 0. | 26 | * Note that this can only be in node 0. |
26 | */ | 27 | */ |
27 | #ifdef CONFIG_XIP_KERNEL | 28 | #ifdef CONFIG_XIP_KERNEL |
28 | reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start, | 29 | reserve_bootmem_node(pgdat, __pa(_data), _end - _data, |
29 | BOOTMEM_DEFAULT); | 30 | BOOTMEM_DEFAULT); |
30 | #else | 31 | #else |
31 | reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext, | 32 | reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, |
32 | BOOTMEM_DEFAULT); | 33 | BOOTMEM_DEFAULT); |
33 | #endif | 34 | #endif |
34 | 35 | ||
@@ -41,27 +42,13 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
41 | BOOTMEM_DEFAULT); | 42 | BOOTMEM_DEFAULT); |
42 | } | 43 | } |
43 | 44 | ||
44 | static void __init sanity_check_meminfo(struct meminfo *mi) | ||
45 | { | ||
46 | int i, j; | ||
47 | |||
48 | for (i = 0, j = 0; i < mi->nr_banks; i++) { | ||
49 | struct membank *mb = &mi->bank[i]; | ||
50 | |||
51 | if (mb->size != 0 && mb->node < MAX_NUMNODES) | ||
52 | mi->bank[j++] = mi->bank[i]; | ||
53 | } | ||
54 | mi->nr_banks = j; | ||
55 | } | ||
56 | |||
57 | /* | 45 | /* |
58 | * paging_init() sets up the page tables, initialises the zone memory | 46 | * paging_init() sets up the page tables, initialises the zone memory |
59 | * maps, and sets up the zero page, bad page and bad page tables. | 47 | * maps, and sets up the zero page, bad page and bad page tables. |
60 | */ | 48 | */ |
61 | void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) | 49 | void __init paging_init(struct machine_desc *mdesc) |
62 | { | 50 | { |
63 | sanity_check_meminfo(mi); | 51 | bootmem_init(); |
64 | bootmem_init(mi); | ||
65 | } | 52 | } |
66 | 53 | ||
67 | /* | 54 | /* |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index e0f19ab91163..2690146161ba 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -31,7 +31,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
31 | if (!new_pgd) | 31 | if (!new_pgd) |
32 | goto no_pgd; | 32 | goto no_pgd; |
33 | 33 | ||
34 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 34 | memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); |
35 | 35 | ||
36 | /* | 36 | /* |
37 | * Copy over the kernel and IO PGD entries | 37 | * Copy over the kernel and IO PGD entries |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 2b5ba396e3a6..4ad3bf291ad3 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache); | |||
33 | 33 | ||
34 | #ifdef CONFIG_MMU | 34 | #ifdef CONFIG_MMU |
35 | #ifndef MULTI_USER | 35 | #ifndef MULTI_USER |
36 | EXPORT_SYMBOL(__cpu_clear_user_page); | 36 | EXPORT_SYMBOL(__cpu_clear_user_highpage); |
37 | EXPORT_SYMBOL(__cpu_copy_user_page); | 37 | EXPORT_SYMBOL(__cpu_copy_user_highpage); |
38 | #else | 38 | #else |
39 | EXPORT_SYMBOL(cpu_user); | 39 | EXPORT_SYMBOL(cpu_user); |
40 | #endif | 40 | #endif |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 294943b85973..f0cc599facb7 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -71,6 +71,8 @@ ENTRY(cpu_v6_reset) | |||
71 | * IRQs are already disabled. | 71 | * IRQs are already disabled. |
72 | */ | 72 | */ |
73 | ENTRY(cpu_v6_do_idle) | 73 | ENTRY(cpu_v6_do_idle) |
74 | mov r1, #0 | ||
75 | mcr p15, 0, r1, c7, c10, 4 @ DWB - WFI may enter a low-power mode | ||
74 | mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt | 76 | mcr p15, 0, r1, c7, c0, 4 @ wait for interrupt |
75 | mov pc, lr | 77 | mov pc, lr |
76 | 78 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 4d3c0a73e7fb..d1ebec42521d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -20,9 +20,17 @@ | |||
20 | 20 | ||
21 | #define TTB_C (1 << 0) | 21 | #define TTB_C (1 << 0) |
22 | #define TTB_S (1 << 1) | 22 | #define TTB_S (1 << 1) |
23 | #define TTB_RGN_NC (0 << 3) | ||
24 | #define TTB_RGN_OC_WBWA (1 << 3) | ||
23 | #define TTB_RGN_OC_WT (2 << 3) | 25 | #define TTB_RGN_OC_WT (2 << 3) |
24 | #define TTB_RGN_OC_WB (3 << 3) | 26 | #define TTB_RGN_OC_WB (3 << 3) |
25 | 27 | ||
28 | #ifndef CONFIG_SMP | ||
29 | #define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB | ||
30 | #else | ||
31 | #define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA | ||
32 | #endif | ||
33 | |||
26 | ENTRY(cpu_v7_proc_init) | 34 | ENTRY(cpu_v7_proc_init) |
27 | mov pc, lr | 35 | mov pc, lr |
28 | ENDPROC(cpu_v7_proc_init) | 36 | ENDPROC(cpu_v7_proc_init) |
@@ -55,6 +63,7 @@ ENDPROC(cpu_v7_reset) | |||
55 | * IRQs are already disabled. | 63 | * IRQs are already disabled. |
56 | */ | 64 | */ |
57 | ENTRY(cpu_v7_do_idle) | 65 | ENTRY(cpu_v7_do_idle) |
66 | dsb @ WFI may enter a low-power mode | ||
58 | wfi | 67 | wfi |
59 | mov pc, lr | 68 | mov pc, lr |
60 | ENDPROC(cpu_v7_do_idle) | 69 | ENDPROC(cpu_v7_do_idle) |
@@ -85,7 +94,7 @@ ENTRY(cpu_v7_switch_mm) | |||
85 | #ifdef CONFIG_MMU | 94 | #ifdef CONFIG_MMU |
86 | mov r2, #0 | 95 | mov r2, #0 |
87 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 96 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
88 | orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB | 97 | orr r0, r0, #TTB_FLAGS |
89 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | 98 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID |
90 | isb | 99 | isb |
91 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 100 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
@@ -162,6 +171,11 @@ cpu_v7_name: | |||
162 | * - cache type register is implemented | 171 | * - cache type register is implemented |
163 | */ | 172 | */ |
164 | __v7_setup: | 173 | __v7_setup: |
174 | #ifdef CONFIG_SMP | ||
175 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | ||
176 | orr r0, r0, #(0x1 << 6) | ||
177 | mcr p15, 0, r0, c1, c0, 1 | ||
178 | #endif | ||
165 | adr r12, __v7_setup_stack @ the local stack | 179 | adr r12, __v7_setup_stack @ the local stack |
166 | stmia r12, {r0-r5, r7, r9, r11, lr} | 180 | stmia r12, {r0-r5, r7, r9, r11, lr} |
167 | bl v7_flush_dcache_all | 181 | bl v7_flush_dcache_all |
@@ -174,8 +188,7 @@ __v7_setup: | |||
174 | #ifdef CONFIG_MMU | 188 | #ifdef CONFIG_MMU |
175 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 189 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
176 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register | 190 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register |
177 | orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB | 191 | orr r4, r4, #TTB_FLAGS |
178 | mcr p15, 0, r4, c2, c0, 0 @ load TTB0 | ||
179 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 192 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
180 | mov r10, #0x1f @ domains 0, 1 = manager | 193 | mov r10, #0x1f @ domains 0, 1 = manager |
181 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register | 194 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 8f6cf56c11c0..33515c214b92 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -481,3 +481,28 @@ __xsc3_proc_info: | |||
481 | .long xsc3_mc_user_fns | 481 | .long xsc3_mc_user_fns |
482 | .long xsc3_cache_fns | 482 | .long xsc3_cache_fns |
483 | .size __xsc3_proc_info, . - __xsc3_proc_info | 483 | .size __xsc3_proc_info, . - __xsc3_proc_info |
484 | |||
485 | /* Note: PXA935 changed its implementor ID from Intel to Marvell */ | ||
486 | |||
487 | .type __xsc3_pxa935_proc_info,#object | ||
488 | __xsc3_pxa935_proc_info: | ||
489 | .long 0x56056000 | ||
490 | .long 0xffffe000 | ||
491 | .long PMD_TYPE_SECT | \ | ||
492 | PMD_SECT_BUFFERABLE | \ | ||
493 | PMD_SECT_CACHEABLE | \ | ||
494 | PMD_SECT_AP_WRITE | \ | ||
495 | PMD_SECT_AP_READ | ||
496 | .long PMD_TYPE_SECT | \ | ||
497 | PMD_SECT_AP_WRITE | \ | ||
498 | PMD_SECT_AP_READ | ||
499 | b __xsc3_setup | ||
500 | .long cpu_arch_name | ||
501 | .long cpu_elf_name | ||
502 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
503 | .long cpu_xsc3_name | ||
504 | .long xsc3_processor_functions | ||
505 | .long v4wbi_tlb_fns | ||
506 | .long xsc3_mc_user_fns | ||
507 | .long xsc3_cache_fns | ||
508 | .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info | ||