diff options
Diffstat (limited to 'arch/arm/mm')
56 files changed, 2261 insertions, 882 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9264d814cd7a..5bd7c89a6045 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -1,9 +1,5 @@ | |||
1 | comment "Processor Type" | 1 | comment "Processor Type" |
2 | 2 | ||
3 | config CPU_32 | ||
4 | bool | ||
5 | default y | ||
6 | |||
7 | # Select CPU types depending on the architecture selected. This selects | 3 | # Select CPU types depending on the architecture selected. This selects |
8 | # which CPUs we support in the kernel image, and the compiler instruction | 4 | # which CPUs we support in the kernel image, and the compiler instruction |
9 | # optimiser behaviour. | 5 | # optimiser behaviour. |
@@ -388,7 +384,7 @@ config CPU_FEROCEON_OLD_ID | |||
388 | 384 | ||
389 | # ARMv6 | 385 | # ARMv6 |
390 | config CPU_V6 | 386 | config CPU_V6 |
391 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 387 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE |
392 | select CPU_32v6 | 388 | select CPU_32v6 |
393 | select CPU_ABRT_EV6 | 389 | select CPU_ABRT_EV6 |
394 | select CPU_PABRT_V6 | 390 | select CPU_PABRT_V6 |
@@ -403,7 +399,7 @@ config CPU_V6 | |||
403 | config CPU_32v6K | 399 | config CPU_32v6K |
404 | bool "Support ARM V6K processor extensions" if !SMP | 400 | bool "Support ARM V6K processor extensions" if !SMP |
405 | depends on CPU_V6 | 401 | depends on CPU_V6 |
406 | default y if SMP && !ARCH_MX3 | 402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) |
407 | help | 403 | help |
408 | Say Y here if your ARMv6 processor supports the 'K' extension. | 404 | Say Y here if your ARMv6 processor supports the 'K' extension. |
409 | This enables the kernel to use some instructions not present | 405 | This enables the kernel to use some instructions not present |
@@ -414,7 +410,7 @@ config CPU_32v6K | |||
414 | # ARMv7 | 410 | # ARMv7 |
415 | config CPU_V7 | 411 | config CPU_V7 |
416 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
417 | select CPU_32v6K | 413 | select CPU_32v6K if !ARCH_OMAP2 |
418 | select CPU_32v7 | 414 | select CPU_32v7 |
419 | select CPU_ABRT_EV7 | 415 | select CPU_ABRT_EV7 |
420 | select CPU_PABRT_V7 | 416 | select CPU_PABRT_V7 |
@@ -740,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
740 | config OUTER_CACHE | 736 | config OUTER_CACHE |
741 | bool | 737 | bool |
742 | 738 | ||
739 | config OUTER_CACHE_SYNC | ||
740 | bool | ||
741 | help | ||
742 | The outer cache has a outer_cache_fns.sync function pointer | ||
743 | that can be used to drain the write buffer of the outer cache. | ||
744 | |||
743 | config CACHE_FEROCEON_L2 | 745 | config CACHE_FEROCEON_L2 |
744 | bool "Enable the Feroceon L2 cache controller" | 746 | bool "Enable the Feroceon L2 cache controller" |
745 | depends on ARCH_KIRKWOOD || ARCH_MV78XX0 | 747 | depends on ARCH_KIRKWOOD || ARCH_MV78XX0 |
@@ -758,12 +760,22 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
758 | config CACHE_L2X0 | 760 | config CACHE_L2X0 |
759 | bool "Enable the L2x0 outer cache controller" | 761 | bool "Enable the L2x0 outer cache controller" |
760 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 762 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
761 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK | 763 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 |
762 | default y | 764 | default y |
763 | select OUTER_CACHE | 765 | select OUTER_CACHE |
766 | select OUTER_CACHE_SYNC | ||
764 | help | 767 | help |
765 | This option enables the L2x0 PrimeCell. | 768 | This option enables the L2x0 PrimeCell. |
766 | 769 | ||
770 | config CACHE_TAUROS2 | ||
771 | bool "Enable the Tauros2 L2 cache controller" | ||
772 | depends on ARCH_DOVE | ||
773 | default y | ||
774 | select OUTER_CACHE | ||
775 | help | ||
776 | This option enables the Tauros2 L2 cache controller (as | ||
777 | found on PJ1/PJ4). | ||
778 | |||
767 | config CACHE_XSC3L2 | 779 | config CACHE_XSC3L2 |
768 | bool "Enable the L2 cache on XScale3" | 780 | bool "Enable the L2 cache on XScale3" |
769 | depends on CPU_XSC3 | 781 | depends on CPU_XSC3 |
@@ -774,5 +786,11 @@ config CACHE_XSC3L2 | |||
774 | 786 | ||
775 | config ARM_L1_CACHE_SHIFT | 787 | config ARM_L1_CACHE_SHIFT |
776 | int | 788 | int |
777 | default 6 if ARCH_OMAP3 | 789 | default 6 if ARM_L1_CACHE_SHIFT_6 |
778 | default 5 | 790 | default 5 |
791 | |||
792 | config ARCH_HAS_BARRIERS | ||
793 | bool | ||
794 | help | ||
795 | This option allows the use of custom mandatory barriers | ||
796 | included via the mach/barriers.h file. | ||
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 055cb2aa8134..e8d34a80851c 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ |
9 | pgd.o mmu.o | 9 | pgd.o mmu.o vmregion.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
@@ -27,6 +27,9 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o | |||
27 | obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o | 27 | obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o |
28 | obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o | 28 | obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o |
29 | 29 | ||
30 | AFLAGS_abort-ev6.o :=-Wa,-march=armv6k | ||
31 | AFLAGS_abort-ev7.o :=-Wa,-march=armv7-a | ||
32 | |||
30 | obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o | 33 | obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o |
31 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o | 34 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o |
32 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o | 35 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o |
@@ -39,6 +42,9 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o | |||
39 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o | 42 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o |
40 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o | 43 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o |
41 | 44 | ||
45 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 | ||
46 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a | ||
47 | |||
42 | obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o | 48 | obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o |
43 | obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o | 49 | obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o |
44 | obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o | 50 | obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o |
@@ -58,6 +64,9 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o | |||
58 | obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o | 64 | obj-$(CONFIG_CPU_TLB_V7) += tlb-v7.o |
59 | obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o | 65 | obj-$(CONFIG_CPU_TLB_FA) += tlb-fa.o |
60 | 66 | ||
67 | AFLAGS_tlb-v6.o :=-Wa,-march=armv6 | ||
68 | AFLAGS_tlb-v7.o :=-Wa,-march=armv7-a | ||
69 | |||
61 | obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o | 70 | obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o |
62 | obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o | 71 | obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o |
63 | obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o | 72 | obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o |
@@ -84,7 +93,10 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o | |||
84 | obj-$(CONFIG_CPU_V6) += proc-v6.o | 93 | obj-$(CONFIG_CPU_V6) += proc-v6.o |
85 | obj-$(CONFIG_CPU_V7) += proc-v7.o | 94 | obj-$(CONFIG_CPU_V7) += proc-v7.o |
86 | 95 | ||
96 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | ||
97 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a | ||
98 | |||
87 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o | 99 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o |
88 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | 100 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o |
89 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o | 101 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o |
90 | 102 | obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o | |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b270d6228fe2..a2ab51fa73e2 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/moduleparam.h> | ||
14 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
@@ -77,6 +78,8 @@ static unsigned long ai_dword; | |||
77 | static unsigned long ai_multi; | 78 | static unsigned long ai_multi; |
78 | static int ai_usermode; | 79 | static int ai_usermode; |
79 | 80 | ||
81 | core_param(alignment, ai_usermode, int, 0600); | ||
82 | |||
80 | #define UM_WARN (1 << 0) | 83 | #define UM_WARN (1 << 0) |
81 | #define UM_FIXUP (1 << 1) | 84 | #define UM_FIXUP (1 << 1) |
82 | #define UM_SIGNAL (1 << 2) | 85 | #define UM_SIGNAL (1 << 2) |
@@ -163,15 +166,15 @@ union offset_union { | |||
163 | THUMB( "1: "ins" %1, [%2]\n" ) \ | 166 | THUMB( "1: "ins" %1, [%2]\n" ) \ |
164 | THUMB( " add %2, %2, #1\n" ) \ | 167 | THUMB( " add %2, %2, #1\n" ) \ |
165 | "2:\n" \ | 168 | "2:\n" \ |
166 | " .section .fixup,\"ax\"\n" \ | 169 | " .pushsection .fixup,\"ax\"\n" \ |
167 | " .align 2\n" \ | 170 | " .align 2\n" \ |
168 | "3: mov %0, #1\n" \ | 171 | "3: mov %0, #1\n" \ |
169 | " b 2b\n" \ | 172 | " b 2b\n" \ |
170 | " .previous\n" \ | 173 | " .popsection\n" \ |
171 | " .section __ex_table,\"a\"\n" \ | 174 | " .pushsection __ex_table,\"a\"\n" \ |
172 | " .align 3\n" \ | 175 | " .align 3\n" \ |
173 | " .long 1b, 3b\n" \ | 176 | " .long 1b, 3b\n" \ |
174 | " .previous\n" \ | 177 | " .popsection\n" \ |
175 | : "=r" (err), "=&r" (val), "=r" (addr) \ | 178 | : "=r" (err), "=&r" (val), "=r" (addr) \ |
176 | : "0" (err), "2" (addr)) | 179 | : "0" (err), "2" (addr)) |
177 | 180 | ||
@@ -223,16 +226,16 @@ union offset_union { | |||
223 | " mov %1, %1, "NEXT_BYTE"\n" \ | 226 | " mov %1, %1, "NEXT_BYTE"\n" \ |
224 | "2: "ins" %1, [%2]\n" \ | 227 | "2: "ins" %1, [%2]\n" \ |
225 | "3:\n" \ | 228 | "3:\n" \ |
226 | " .section .fixup,\"ax\"\n" \ | 229 | " .pushsection .fixup,\"ax\"\n" \ |
227 | " .align 2\n" \ | 230 | " .align 2\n" \ |
228 | "4: mov %0, #1\n" \ | 231 | "4: mov %0, #1\n" \ |
229 | " b 3b\n" \ | 232 | " b 3b\n" \ |
230 | " .previous\n" \ | 233 | " .popsection\n" \ |
231 | " .section __ex_table,\"a\"\n" \ | 234 | " .pushsection __ex_table,\"a\"\n" \ |
232 | " .align 3\n" \ | 235 | " .align 3\n" \ |
233 | " .long 1b, 4b\n" \ | 236 | " .long 1b, 4b\n" \ |
234 | " .long 2b, 4b\n" \ | 237 | " .long 2b, 4b\n" \ |
235 | " .previous\n" \ | 238 | " .popsection\n" \ |
236 | : "=r" (err), "=&r" (v), "=&r" (a) \ | 239 | : "=r" (err), "=&r" (v), "=&r" (a) \ |
237 | : "0" (err), "1" (v), "2" (a)); \ | 240 | : "0" (err), "1" (v), "2" (a)); \ |
238 | if (err) \ | 241 | if (err) \ |
@@ -263,18 +266,18 @@ union offset_union { | |||
263 | " mov %1, %1, "NEXT_BYTE"\n" \ | 266 | " mov %1, %1, "NEXT_BYTE"\n" \ |
264 | "4: "ins" %1, [%2]\n" \ | 267 | "4: "ins" %1, [%2]\n" \ |
265 | "5:\n" \ | 268 | "5:\n" \ |
266 | " .section .fixup,\"ax\"\n" \ | 269 | " .pushsection .fixup,\"ax\"\n" \ |
267 | " .align 2\n" \ | 270 | " .align 2\n" \ |
268 | "6: mov %0, #1\n" \ | 271 | "6: mov %0, #1\n" \ |
269 | " b 5b\n" \ | 272 | " b 5b\n" \ |
270 | " .previous\n" \ | 273 | " .popsection\n" \ |
271 | " .section __ex_table,\"a\"\n" \ | 274 | " .pushsection __ex_table,\"a\"\n" \ |
272 | " .align 3\n" \ | 275 | " .align 3\n" \ |
273 | " .long 1b, 6b\n" \ | 276 | " .long 1b, 6b\n" \ |
274 | " .long 2b, 6b\n" \ | 277 | " .long 2b, 6b\n" \ |
275 | " .long 3b, 6b\n" \ | 278 | " .long 3b, 6b\n" \ |
276 | " .long 4b, 6b\n" \ | 279 | " .long 4b, 6b\n" \ |
277 | " .previous\n" \ | 280 | " .popsection\n" \ |
278 | : "=r" (err), "=&r" (v), "=&r" (a) \ | 281 | : "=r" (err), "=&r" (v), "=&r" (a) \ |
279 | : "0" (err), "1" (v), "2" (a)); \ | 282 | : "0" (err), "1" (v), "2" (a)); \ |
280 | if (err) \ | 283 | if (err) \ |
@@ -898,11 +901,7 @@ static int __init alignment_init(void) | |||
898 | #ifdef CONFIG_PROC_FS | 901 | #ifdef CONFIG_PROC_FS |
899 | struct proc_dir_entry *res; | 902 | struct proc_dir_entry *res; |
900 | 903 | ||
901 | res = proc_mkdir("cpu", NULL); | 904 | res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); |
902 | if (!res) | ||
903 | return -ENOMEM; | ||
904 | |||
905 | res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); | ||
906 | if (!res) | 905 | if (!res) |
907 | return -ENOMEM; | 906 | return -ENOMEM; |
908 | 907 | ||
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index b63a8f7b95cf..7148e53e6078 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -127,15 +127,16 @@ ENTRY(fa_coherent_user_range) | |||
127 | mov pc, lr | 127 | mov pc, lr |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * flush_kern_dcache_page(kaddr) | 130 | * flush_kern_dcache_area(void *addr, size_t size) |
131 | * | 131 | * |
132 | * Ensure that the data held in the page kaddr is written back | 132 | * Ensure that the data held in the page kaddr is written back |
133 | * to the page in question. | 133 | * to the page in question. |
134 | * | 134 | * |
135 | * - kaddr - kernel address (guaranteed to be page aligned) | 135 | * - addr - kernel address |
136 | * - size - size of region | ||
136 | */ | 137 | */ |
137 | ENTRY(fa_flush_kern_dcache_page) | 138 | ENTRY(fa_flush_kern_dcache_area) |
138 | add r1, r0, #PAGE_SZ | 139 | add r1, r0, r1 |
139 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 140 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
140 | add r0, r0, #CACHE_DLINESIZE | 141 | add r0, r0, #CACHE_DLINESIZE |
141 | cmp r0, r1 | 142 | cmp r0, r1 |
@@ -156,7 +157,7 @@ ENTRY(fa_flush_kern_dcache_page) | |||
156 | * - start - virtual start address | 157 | * - start - virtual start address |
157 | * - end - virtual end address | 158 | * - end - virtual end address |
158 | */ | 159 | */ |
159 | ENTRY(fa_dma_inv_range) | 160 | fa_dma_inv_range: |
160 | tst r0, #CACHE_DLINESIZE - 1 | 161 | tst r0, #CACHE_DLINESIZE - 1 |
161 | bic r0, r0, #CACHE_DLINESIZE - 1 | 162 | bic r0, r0, #CACHE_DLINESIZE - 1 |
162 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry | 163 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D entry |
@@ -179,7 +180,7 @@ ENTRY(fa_dma_inv_range) | |||
179 | * - start - virtual start address | 180 | * - start - virtual start address |
180 | * - end - virtual end address | 181 | * - end - virtual end address |
181 | */ | 182 | */ |
182 | ENTRY(fa_dma_clean_range) | 183 | fa_dma_clean_range: |
183 | bic r0, r0, #CACHE_DLINESIZE - 1 | 184 | bic r0, r0, #CACHE_DLINESIZE - 1 |
184 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 185 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
185 | add r0, r0, #CACHE_DLINESIZE | 186 | add r0, r0, #CACHE_DLINESIZE |
@@ -204,6 +205,30 @@ ENTRY(fa_dma_flush_range) | |||
204 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 205 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
205 | mov pc, lr | 206 | mov pc, lr |
206 | 207 | ||
208 | /* | ||
209 | * dma_map_area(start, size, dir) | ||
210 | * - start - kernel virtual start address | ||
211 | * - size - size of region | ||
212 | * - dir - DMA direction | ||
213 | */ | ||
214 | ENTRY(fa_dma_map_area) | ||
215 | add r1, r1, r0 | ||
216 | cmp r2, #DMA_TO_DEVICE | ||
217 | beq fa_dma_clean_range | ||
218 | bcs fa_dma_inv_range | ||
219 | b fa_dma_flush_range | ||
220 | ENDPROC(fa_dma_map_area) | ||
221 | |||
222 | /* | ||
223 | * dma_unmap_area(start, size, dir) | ||
224 | * - start - kernel virtual start address | ||
225 | * - size - size of region | ||
226 | * - dir - DMA direction | ||
227 | */ | ||
228 | ENTRY(fa_dma_unmap_area) | ||
229 | mov pc, lr | ||
230 | ENDPROC(fa_dma_unmap_area) | ||
231 | |||
207 | __INITDATA | 232 | __INITDATA |
208 | 233 | ||
209 | .type fa_cache_fns, #object | 234 | .type fa_cache_fns, #object |
@@ -213,8 +238,8 @@ ENTRY(fa_cache_fns) | |||
213 | .long fa_flush_user_cache_range | 238 | .long fa_flush_user_cache_range |
214 | .long fa_coherent_kern_range | 239 | .long fa_coherent_kern_range |
215 | .long fa_coherent_user_range | 240 | .long fa_coherent_user_range |
216 | .long fa_flush_kern_dcache_page | 241 | .long fa_flush_kern_dcache_area |
217 | .long fa_dma_inv_range | 242 | .long fa_dma_map_area |
218 | .long fa_dma_clean_range | 243 | .long fa_dma_unmap_area |
219 | .long fa_dma_flush_range | 244 | .long fa_dma_flush_range |
220 | .size fa_cache_fns, . - fa_cache_fns | 245 | .size fa_cache_fns, . - fa_cache_fns |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index b480f1d3591f..21ad68ba22ba 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -28,69 +28,181 @@ | |||
28 | static void __iomem *l2x0_base; | 28 | static void __iomem *l2x0_base; |
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | 30 | ||
31 | static inline void sync_writel(unsigned long val, unsigned long reg, | 31 | static inline void cache_wait(void __iomem *reg, unsigned long mask) |
32 | unsigned long complete_mask) | ||
33 | { | 32 | { |
34 | unsigned long flags; | ||
35 | |||
36 | spin_lock_irqsave(&l2x0_lock, flags); | ||
37 | writel(val, l2x0_base + reg); | ||
38 | /* wait for the operation to complete */ | 33 | /* wait for the operation to complete */ |
39 | while (readl(l2x0_base + reg) & complete_mask) | 34 | while (readl(reg) & mask) |
40 | ; | 35 | ; |
41 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
42 | } | 36 | } |
43 | 37 | ||
44 | static inline void cache_sync(void) | 38 | static inline void cache_sync(void) |
45 | { | 39 | { |
46 | sync_writel(0, L2X0_CACHE_SYNC, 1); | 40 | void __iomem *base = l2x0_base; |
41 | writel(0, base + L2X0_CACHE_SYNC); | ||
42 | cache_wait(base + L2X0_CACHE_SYNC, 1); | ||
43 | } | ||
44 | |||
45 | static inline void l2x0_clean_line(unsigned long addr) | ||
46 | { | ||
47 | void __iomem *base = l2x0_base; | ||
48 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
49 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
50 | } | ||
51 | |||
52 | static inline void l2x0_inv_line(unsigned long addr) | ||
53 | { | ||
54 | void __iomem *base = l2x0_base; | ||
55 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
56 | writel(addr, base + L2X0_INV_LINE_PA); | ||
57 | } | ||
58 | |||
59 | #ifdef CONFIG_PL310_ERRATA_588369 | ||
60 | static void debug_writel(unsigned long val) | ||
61 | { | ||
62 | extern void omap_smc1(u32 fn, u32 arg); | ||
63 | |||
64 | /* | ||
65 | * Texas Instrument secure monitor api to modify the | ||
66 | * PL310 Debug Control Register. | ||
67 | */ | ||
68 | omap_smc1(0x100, val); | ||
69 | } | ||
70 | |||
71 | static inline void l2x0_flush_line(unsigned long addr) | ||
72 | { | ||
73 | void __iomem *base = l2x0_base; | ||
74 | |||
75 | /* Clean by PA followed by Invalidate by PA */ | ||
76 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
77 | writel(addr, base + L2X0_CLEAN_LINE_PA); | ||
78 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
79 | writel(addr, base + L2X0_INV_LINE_PA); | ||
80 | } | ||
81 | #else | ||
82 | |||
83 | /* Optimised out for non-errata case */ | ||
84 | static inline void debug_writel(unsigned long val) | ||
85 | { | ||
86 | } | ||
87 | |||
88 | static inline void l2x0_flush_line(unsigned long addr) | ||
89 | { | ||
90 | void __iomem *base = l2x0_base; | ||
91 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
92 | writel(addr, base + L2X0_CLEAN_INV_LINE_PA); | ||
93 | } | ||
94 | #endif | ||
95 | |||
96 | static void l2x0_cache_sync(void) | ||
97 | { | ||
98 | unsigned long flags; | ||
99 | |||
100 | spin_lock_irqsave(&l2x0_lock, flags); | ||
101 | cache_sync(); | ||
102 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
47 | } | 103 | } |
48 | 104 | ||
49 | static inline void l2x0_inv_all(void) | 105 | static inline void l2x0_inv_all(void) |
50 | { | 106 | { |
107 | unsigned long flags; | ||
108 | |||
51 | /* invalidate all ways */ | 109 | /* invalidate all ways */ |
52 | sync_writel(0xff, L2X0_INV_WAY, 0xff); | 110 | spin_lock_irqsave(&l2x0_lock, flags); |
111 | writel(0xff, l2x0_base + L2X0_INV_WAY); | ||
112 | cache_wait(l2x0_base + L2X0_INV_WAY, 0xff); | ||
53 | cache_sync(); | 113 | cache_sync(); |
114 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
54 | } | 115 | } |
55 | 116 | ||
56 | static void l2x0_inv_range(unsigned long start, unsigned long end) | 117 | static void l2x0_inv_range(unsigned long start, unsigned long end) |
57 | { | 118 | { |
58 | unsigned long addr; | 119 | void __iomem *base = l2x0_base; |
120 | unsigned long flags; | ||
59 | 121 | ||
122 | spin_lock_irqsave(&l2x0_lock, flags); | ||
60 | if (start & (CACHE_LINE_SIZE - 1)) { | 123 | if (start & (CACHE_LINE_SIZE - 1)) { |
61 | start &= ~(CACHE_LINE_SIZE - 1); | 124 | start &= ~(CACHE_LINE_SIZE - 1); |
62 | sync_writel(start, L2X0_CLEAN_INV_LINE_PA, 1); | 125 | debug_writel(0x03); |
126 | l2x0_flush_line(start); | ||
127 | debug_writel(0x00); | ||
63 | start += CACHE_LINE_SIZE; | 128 | start += CACHE_LINE_SIZE; |
64 | } | 129 | } |
65 | 130 | ||
66 | if (end & (CACHE_LINE_SIZE - 1)) { | 131 | if (end & (CACHE_LINE_SIZE - 1)) { |
67 | end &= ~(CACHE_LINE_SIZE - 1); | 132 | end &= ~(CACHE_LINE_SIZE - 1); |
68 | sync_writel(end, L2X0_CLEAN_INV_LINE_PA, 1); | 133 | debug_writel(0x03); |
134 | l2x0_flush_line(end); | ||
135 | debug_writel(0x00); | ||
69 | } | 136 | } |
70 | 137 | ||
71 | for (addr = start; addr < end; addr += CACHE_LINE_SIZE) | 138 | while (start < end) { |
72 | sync_writel(addr, L2X0_INV_LINE_PA, 1); | 139 | unsigned long blk_end = start + min(end - start, 4096UL); |
140 | |||
141 | while (start < blk_end) { | ||
142 | l2x0_inv_line(start); | ||
143 | start += CACHE_LINE_SIZE; | ||
144 | } | ||
145 | |||
146 | if (blk_end < end) { | ||
147 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
148 | spin_lock_irqsave(&l2x0_lock, flags); | ||
149 | } | ||
150 | } | ||
151 | cache_wait(base + L2X0_INV_LINE_PA, 1); | ||
73 | cache_sync(); | 152 | cache_sync(); |
153 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
74 | } | 154 | } |
75 | 155 | ||
76 | static void l2x0_clean_range(unsigned long start, unsigned long end) | 156 | static void l2x0_clean_range(unsigned long start, unsigned long end) |
77 | { | 157 | { |
78 | unsigned long addr; | 158 | void __iomem *base = l2x0_base; |
159 | unsigned long flags; | ||
79 | 160 | ||
161 | spin_lock_irqsave(&l2x0_lock, flags); | ||
80 | start &= ~(CACHE_LINE_SIZE - 1); | 162 | start &= ~(CACHE_LINE_SIZE - 1); |
81 | for (addr = start; addr < end; addr += CACHE_LINE_SIZE) | 163 | while (start < end) { |
82 | sync_writel(addr, L2X0_CLEAN_LINE_PA, 1); | 164 | unsigned long blk_end = start + min(end - start, 4096UL); |
165 | |||
166 | while (start < blk_end) { | ||
167 | l2x0_clean_line(start); | ||
168 | start += CACHE_LINE_SIZE; | ||
169 | } | ||
170 | |||
171 | if (blk_end < end) { | ||
172 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
173 | spin_lock_irqsave(&l2x0_lock, flags); | ||
174 | } | ||
175 | } | ||
176 | cache_wait(base + L2X0_CLEAN_LINE_PA, 1); | ||
83 | cache_sync(); | 177 | cache_sync(); |
178 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
84 | } | 179 | } |
85 | 180 | ||
86 | static void l2x0_flush_range(unsigned long start, unsigned long end) | 181 | static void l2x0_flush_range(unsigned long start, unsigned long end) |
87 | { | 182 | { |
88 | unsigned long addr; | 183 | void __iomem *base = l2x0_base; |
184 | unsigned long flags; | ||
89 | 185 | ||
186 | spin_lock_irqsave(&l2x0_lock, flags); | ||
90 | start &= ~(CACHE_LINE_SIZE - 1); | 187 | start &= ~(CACHE_LINE_SIZE - 1); |
91 | for (addr = start; addr < end; addr += CACHE_LINE_SIZE) | 188 | while (start < end) { |
92 | sync_writel(addr, L2X0_CLEAN_INV_LINE_PA, 1); | 189 | unsigned long blk_end = start + min(end - start, 4096UL); |
190 | |||
191 | debug_writel(0x03); | ||
192 | while (start < blk_end) { | ||
193 | l2x0_flush_line(start); | ||
194 | start += CACHE_LINE_SIZE; | ||
195 | } | ||
196 | debug_writel(0x00); | ||
197 | |||
198 | if (blk_end < end) { | ||
199 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
200 | spin_lock_irqsave(&l2x0_lock, flags); | ||
201 | } | ||
202 | } | ||
203 | cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); | ||
93 | cache_sync(); | 204 | cache_sync(); |
205 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
94 | } | 206 | } |
95 | 207 | ||
96 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | 208 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
@@ -99,22 +211,30 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
99 | 211 | ||
100 | l2x0_base = base; | 212 | l2x0_base = base; |
101 | 213 | ||
102 | /* disable L2X0 */ | 214 | /* |
103 | writel(0, l2x0_base + L2X0_CTRL); | 215 | * Check if l2x0 controller is already enabled. |
216 | * If you are booting from non-secure mode | ||
217 | * accessing the below registers will fault. | ||
218 | */ | ||
219 | if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { | ||
220 | |||
221 | /* l2x0 controller is disabled */ | ||
104 | 222 | ||
105 | aux = readl(l2x0_base + L2X0_AUX_CTRL); | 223 | aux = readl(l2x0_base + L2X0_AUX_CTRL); |
106 | aux &= aux_mask; | 224 | aux &= aux_mask; |
107 | aux |= aux_val; | 225 | aux |= aux_val; |
108 | writel(aux, l2x0_base + L2X0_AUX_CTRL); | 226 | writel(aux, l2x0_base + L2X0_AUX_CTRL); |
109 | 227 | ||
110 | l2x0_inv_all(); | 228 | l2x0_inv_all(); |
111 | 229 | ||
112 | /* enable L2X0 */ | 230 | /* enable L2X0 */ |
113 | writel(1, l2x0_base + L2X0_CTRL); | 231 | writel(1, l2x0_base + L2X0_CTRL); |
232 | } | ||
114 | 233 | ||
115 | outer_cache.inv_range = l2x0_inv_range; | 234 | outer_cache.inv_range = l2x0_inv_range; |
116 | outer_cache.clean_range = l2x0_clean_range; | 235 | outer_cache.clean_range = l2x0_clean_range; |
117 | outer_cache.flush_range = l2x0_flush_range; | 236 | outer_cache.flush_range = l2x0_flush_range; |
237 | outer_cache.sync = l2x0_cache_sync; | ||
118 | 238 | ||
119 | printk(KERN_INFO "L2X0 cache controller enabled\n"); | 239 | printk(KERN_INFO "L2X0 cache controller enabled\n"); |
120 | } | 240 | } |
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c new file mode 100644 index 000000000000..50868651890f --- /dev/null +++ b/arch/arm/mm/cache-tauros2.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductor | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | * | ||
10 | * References: | ||
11 | * - PJ1 CPU Core Datasheet, | ||
12 | * Document ID MV-S104837-01, Rev 0.7, January 24 2008. | ||
13 | * - PJ4 CPU Core Datasheet, | ||
14 | * Document ID MV-S105190-00, Rev 0.7, March 14 2008. | ||
15 | */ | ||
16 | |||
17 | #include <linux/init.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/hardware/cache-tauros2.h> | ||
20 | |||
21 | |||
22 | /* | ||
23 | * When Tauros2 is used on a CPU that supports the v7 hierarchical | ||
24 | * cache operations, the cache handling code in proc-v7.S takes care | ||
25 | * of everything, including handling DMA coherency. | ||
26 | * | ||
27 | * So, we only need to register outer cache operations here if we're | ||
28 | * being used on a pre-v7 CPU, and we only need to build support for | ||
29 | * outer cache operations into the kernel image if the kernel has been | ||
30 | * configured to support a pre-v7 CPU. | ||
31 | */ | ||
32 | #if __LINUX_ARM_ARCH__ < 7 | ||
33 | /* | ||
34 | * Low-level cache maintenance operations. | ||
35 | */ | ||
36 | static inline void tauros2_clean_pa(unsigned long addr) | ||
37 | { | ||
38 | __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr)); | ||
39 | } | ||
40 | |||
41 | static inline void tauros2_clean_inv_pa(unsigned long addr) | ||
42 | { | ||
43 | __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr)); | ||
44 | } | ||
45 | |||
46 | static inline void tauros2_inv_pa(unsigned long addr) | ||
47 | { | ||
48 | __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr)); | ||
49 | } | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Linux primitives. | ||
54 | * | ||
55 | * Note that the end addresses passed to Linux primitives are | ||
56 | * noninclusive. | ||
57 | */ | ||
58 | #define CACHE_LINE_SIZE 32 | ||
59 | |||
60 | static void tauros2_inv_range(unsigned long start, unsigned long end) | ||
61 | { | ||
62 | /* | ||
63 | * Clean and invalidate partial first cache line. | ||
64 | */ | ||
65 | if (start & (CACHE_LINE_SIZE - 1)) { | ||
66 | tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); | ||
67 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Clean and invalidate partial last cache line. | ||
72 | */ | ||
73 | if (end & (CACHE_LINE_SIZE - 1)) { | ||
74 | tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | ||
75 | end &= ~(CACHE_LINE_SIZE - 1); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Invalidate all full cache lines between 'start' and 'end'. | ||
80 | */ | ||
81 | while (start < end) { | ||
82 | tauros2_inv_pa(start); | ||
83 | start += CACHE_LINE_SIZE; | ||
84 | } | ||
85 | |||
86 | dsb(); | ||
87 | } | ||
88 | |||
89 | static void tauros2_clean_range(unsigned long start, unsigned long end) | ||
90 | { | ||
91 | start &= ~(CACHE_LINE_SIZE - 1); | ||
92 | while (start < end) { | ||
93 | tauros2_clean_pa(start); | ||
94 | start += CACHE_LINE_SIZE; | ||
95 | } | ||
96 | |||
97 | dsb(); | ||
98 | } | ||
99 | |||
100 | static void tauros2_flush_range(unsigned long start, unsigned long end) | ||
101 | { | ||
102 | start &= ~(CACHE_LINE_SIZE - 1); | ||
103 | while (start < end) { | ||
104 | tauros2_clean_inv_pa(start); | ||
105 | start += CACHE_LINE_SIZE; | ||
106 | } | ||
107 | |||
108 | dsb(); | ||
109 | } | ||
110 | #endif | ||
111 | |||
112 | static inline u32 __init read_extra_features(void) | ||
113 | { | ||
114 | u32 u; | ||
115 | |||
116 | __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u)); | ||
117 | |||
118 | return u; | ||
119 | } | ||
120 | |||
121 | static inline void __init write_extra_features(u32 u) | ||
122 | { | ||
123 | __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); | ||
124 | } | ||
125 | |||
126 | static void __init disable_l2_prefetch(void) | ||
127 | { | ||
128 | u32 u; | ||
129 | |||
130 | /* | ||
131 | * Read the CPU Extra Features register and verify that the | ||
132 | * Disable L2 Prefetch bit is set. | ||
133 | */ | ||
134 | u = read_extra_features(); | ||
135 | if (!(u & 0x01000000)) { | ||
136 | printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n"); | ||
137 | write_extra_features(u | 0x01000000); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static inline int __init cpuid_scheme(void) | ||
142 | { | ||
143 | extern int processor_id; | ||
144 | |||
145 | return !!((processor_id & 0x000f0000) == 0x000f0000); | ||
146 | } | ||
147 | |||
148 | static inline u32 __init read_mmfr3(void) | ||
149 | { | ||
150 | u32 mmfr3; | ||
151 | |||
152 | __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3)); | ||
153 | |||
154 | return mmfr3; | ||
155 | } | ||
156 | |||
157 | static inline u32 __init read_actlr(void) | ||
158 | { | ||
159 | u32 actlr; | ||
160 | |||
161 | __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); | ||
162 | |||
163 | return actlr; | ||
164 | } | ||
165 | |||
166 | static inline void __init write_actlr(u32 actlr) | ||
167 | { | ||
168 | __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); | ||
169 | } | ||
170 | |||
171 | void __init tauros2_init(void) | ||
172 | { | ||
173 | extern int processor_id; | ||
174 | char *mode; | ||
175 | |||
176 | disable_l2_prefetch(); | ||
177 | |||
178 | #ifdef CONFIG_CPU_32v5 | ||
179 | if ((processor_id & 0xff0f0000) == 0x56050000) { | ||
180 | u32 feat; | ||
181 | |||
182 | /* | ||
183 | * v5 CPUs with Tauros2 have the L2 cache enable bit | ||
184 | * located in the CPU Extra Features register. | ||
185 | */ | ||
186 | feat = read_extra_features(); | ||
187 | if (!(feat & 0x00400000)) { | ||
188 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
189 | write_extra_features(feat | 0x00400000); | ||
190 | } | ||
191 | |||
192 | mode = "ARMv5"; | ||
193 | outer_cache.inv_range = tauros2_inv_range; | ||
194 | outer_cache.clean_range = tauros2_clean_range; | ||
195 | outer_cache.flush_range = tauros2_flush_range; | ||
196 | } | ||
197 | #endif | ||
198 | |||
199 | #ifdef CONFIG_CPU_32v6 | ||
200 | /* | ||
201 | * Check whether this CPU lacks support for the v7 hierarchical | ||
202 | * cache ops. (PJ4 is in its v6 personality mode if the MMFR3 | ||
203 | * register indicates no support for the v7 hierarchical cache | ||
204 | * ops.) | ||
205 | */ | ||
206 | if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) { | ||
207 | /* | ||
208 | * When Tauros2 is used in an ARMv6 system, the L2 | ||
209 | * enable bit is in the ARMv6 ARM-mandated position | ||
210 | * (bit [26] of the System Control Register). | ||
211 | */ | ||
212 | if (!(get_cr() & 0x04000000)) { | ||
213 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
214 | adjust_cr(0x04000000, 0x04000000); | ||
215 | } | ||
216 | |||
217 | mode = "ARMv6"; | ||
218 | outer_cache.inv_range = tauros2_inv_range; | ||
219 | outer_cache.clean_range = tauros2_clean_range; | ||
220 | outer_cache.flush_range = tauros2_flush_range; | ||
221 | } | ||
222 | #endif | ||
223 | |||
224 | #ifdef CONFIG_CPU_32v7 | ||
225 | /* | ||
226 | * Check whether this CPU has support for the v7 hierarchical | ||
227 | * cache ops. (PJ4 is in its v7 personality mode if the MMFR3 | ||
228 | * register indicates support for the v7 hierarchical cache | ||
229 | * ops.) | ||
230 | * | ||
231 | * (Although strictly speaking there may exist CPUs that | ||
232 | * implement the v7 cache ops but are only ARMv6 CPUs (due to | ||
233 | * not complying with all of the other ARMv7 requirements), | ||
234 | * there are no real-life examples of Tauros2 being used on | ||
235 | * such CPUs as of yet.) | ||
236 | */ | ||
237 | if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) { | ||
238 | u32 actlr; | ||
239 | |||
240 | /* | ||
241 | * When Tauros2 is used in an ARMv7 system, the L2 | ||
242 | * enable bit is located in the Auxiliary System Control | ||
243 | * Register (which is the only register allowed by the | ||
244 | * ARMv7 spec to contain fine-grained cache control bits). | ||
245 | */ | ||
246 | actlr = read_actlr(); | ||
247 | if (!(actlr & 0x00000002)) { | ||
248 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
249 | write_actlr(actlr | 0x00000002); | ||
250 | } | ||
251 | |||
252 | mode = "ARMv7"; | ||
253 | } | ||
254 | #endif | ||
255 | |||
256 | if (mode == NULL) { | ||
257 | printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | printk(KERN_INFO "Tauros2: L2 cache support initialised " | ||
262 | "in %s mode.\n", mode); | ||
263 | } | ||
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 8a4abebc478a..c2ff3c599fee 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -72,28 +72,15 @@ ENTRY(v3_coherent_user_range) | |||
72 | mov pc, lr | 72 | mov pc, lr |
73 | 73 | ||
74 | /* | 74 | /* |
75 | * flush_kern_dcache_page(void *page) | 75 | * flush_kern_dcache_area(void *page, size_t size) |
76 | * | 76 | * |
77 | * Ensure no D cache aliasing occurs, either with itself or | 77 | * Ensure no D cache aliasing occurs, either with itself or |
78 | * the I cache | 78 | * the I cache |
79 | * | 79 | * |
80 | * - addr - page aligned address | 80 | * - addr - kernel address |
81 | * - size - region size | ||
81 | */ | 82 | */ |
82 | ENTRY(v3_flush_kern_dcache_page) | 83 | ENTRY(v3_flush_kern_dcache_area) |
83 | /* FALLTHROUGH */ | ||
84 | |||
85 | /* | ||
86 | * dma_inv_range(start, end) | ||
87 | * | ||
88 | * Invalidate (discard) the specified virtual address range. | ||
89 | * May not write back any entries. If 'start' or 'end' | ||
90 | * are not cache line aligned, those lines must be written | ||
91 | * back. | ||
92 | * | ||
93 | * - start - virtual start address | ||
94 | * - end - virtual end address | ||
95 | */ | ||
96 | ENTRY(v3_dma_inv_range) | ||
97 | /* FALLTHROUGH */ | 84 | /* FALLTHROUGH */ |
98 | 85 | ||
99 | /* | 86 | /* |
@@ -107,18 +94,29 @@ ENTRY(v3_dma_inv_range) | |||
107 | ENTRY(v3_dma_flush_range) | 94 | ENTRY(v3_dma_flush_range) |
108 | mov r0, #0 | 95 | mov r0, #0 |
109 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache | 96 | mcr p15, 0, r0, c7, c0, 0 @ flush ID cache |
97 | mov pc, lr | ||
98 | |||
99 | /* | ||
100 | * dma_unmap_area(start, size, dir) | ||
101 | * - start - kernel virtual start address | ||
102 | * - size - size of region | ||
103 | * - dir - DMA direction | ||
104 | */ | ||
105 | ENTRY(v3_dma_unmap_area) | ||
106 | teq r2, #DMA_TO_DEVICE | ||
107 | bne v3_dma_flush_range | ||
110 | /* FALLTHROUGH */ | 108 | /* FALLTHROUGH */ |
111 | 109 | ||
112 | /* | 110 | /* |
113 | * dma_clean_range(start, end) | 111 | * dma_map_area(start, size, dir) |
114 | * | 112 | * - start - kernel virtual start address |
115 | * Clean (write back) the specified virtual address range. | 113 | * - size - size of region |
116 | * | 114 | * - dir - DMA direction |
117 | * - start - virtual start address | ||
118 | * - end - virtual end address | ||
119 | */ | 115 | */ |
120 | ENTRY(v3_dma_clean_range) | 116 | ENTRY(v3_dma_map_area) |
121 | mov pc, lr | 117 | mov pc, lr |
118 | ENDPROC(v3_dma_unmap_area) | ||
119 | ENDPROC(v3_dma_map_area) | ||
122 | 120 | ||
123 | __INITDATA | 121 | __INITDATA |
124 | 122 | ||
@@ -129,8 +127,8 @@ ENTRY(v3_cache_fns) | |||
129 | .long v3_flush_user_cache_range | 127 | .long v3_flush_user_cache_range |
130 | .long v3_coherent_kern_range | 128 | .long v3_coherent_kern_range |
131 | .long v3_coherent_user_range | 129 | .long v3_coherent_user_range |
132 | .long v3_flush_kern_dcache_page | 130 | .long v3_flush_kern_dcache_area |
133 | .long v3_dma_inv_range | 131 | .long v3_dma_map_area |
134 | .long v3_dma_clean_range | 132 | .long v3_dma_unmap_area |
135 | .long v3_dma_flush_range | 133 | .long v3_dma_flush_range |
136 | .size v3_cache_fns, . - v3_cache_fns | 134 | .size v3_cache_fns, . - v3_cache_fns |
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 3668611cb400..4810f7e3e813 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -82,28 +82,15 @@ ENTRY(v4_coherent_user_range) | |||
82 | mov pc, lr | 82 | mov pc, lr |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * flush_kern_dcache_page(void *page) | 85 | * flush_kern_dcache_area(void *addr, size_t size) |
86 | * | 86 | * |
87 | * Ensure no D cache aliasing occurs, either with itself or | 87 | * Ensure no D cache aliasing occurs, either with itself or |
88 | * the I cache | 88 | * the I cache |
89 | * | 89 | * |
90 | * - addr - page aligned address | 90 | * - addr - kernel address |
91 | * - size - region size | ||
91 | */ | 92 | */ |
92 | ENTRY(v4_flush_kern_dcache_page) | 93 | ENTRY(v4_flush_kern_dcache_area) |
93 | /* FALLTHROUGH */ | ||
94 | |||
95 | /* | ||
96 | * dma_inv_range(start, end) | ||
97 | * | ||
98 | * Invalidate (discard) the specified virtual address range. | ||
99 | * May not write back any entries. If 'start' or 'end' | ||
100 | * are not cache line aligned, those lines must be written | ||
101 | * back. | ||
102 | * | ||
103 | * - start - virtual start address | ||
104 | * - end - virtual end address | ||
105 | */ | ||
106 | ENTRY(v4_dma_inv_range) | ||
107 | /* FALLTHROUGH */ | 94 | /* FALLTHROUGH */ |
108 | 95 | ||
109 | /* | 96 | /* |
@@ -119,18 +106,29 @@ ENTRY(v4_dma_flush_range) | |||
119 | mov r0, #0 | 106 | mov r0, #0 |
120 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache | 107 | mcr p15, 0, r0, c7, c7, 0 @ flush ID cache |
121 | #endif | 108 | #endif |
109 | mov pc, lr | ||
110 | |||
111 | /* | ||
112 | * dma_unmap_area(start, size, dir) | ||
113 | * - start - kernel virtual start address | ||
114 | * - size - size of region | ||
115 | * - dir - DMA direction | ||
116 | */ | ||
117 | ENTRY(v4_dma_unmap_area) | ||
118 | teq r2, #DMA_TO_DEVICE | ||
119 | bne v4_dma_flush_range | ||
122 | /* FALLTHROUGH */ | 120 | /* FALLTHROUGH */ |
123 | 121 | ||
124 | /* | 122 | /* |
125 | * dma_clean_range(start, end) | 123 | * dma_map_area(start, size, dir) |
126 | * | 124 | * - start - kernel virtual start address |
127 | * Clean (write back) the specified virtual address range. | 125 | * - size - size of region |
128 | * | 126 | * - dir - DMA direction |
129 | * - start - virtual start address | ||
130 | * - end - virtual end address | ||
131 | */ | 127 | */ |
132 | ENTRY(v4_dma_clean_range) | 128 | ENTRY(v4_dma_map_area) |
133 | mov pc, lr | 129 | mov pc, lr |
130 | ENDPROC(v4_dma_unmap_area) | ||
131 | ENDPROC(v4_dma_map_area) | ||
134 | 132 | ||
135 | __INITDATA | 133 | __INITDATA |
136 | 134 | ||
@@ -141,8 +139,8 @@ ENTRY(v4_cache_fns) | |||
141 | .long v4_flush_user_cache_range | 139 | .long v4_flush_user_cache_range |
142 | .long v4_coherent_kern_range | 140 | .long v4_coherent_kern_range |
143 | .long v4_coherent_user_range | 141 | .long v4_coherent_user_range |
144 | .long v4_flush_kern_dcache_page | 142 | .long v4_flush_kern_dcache_area |
145 | .long v4_dma_inv_range | 143 | .long v4_dma_map_area |
146 | .long v4_dma_clean_range | 144 | .long v4_dma_unmap_area |
147 | .long v4_dma_flush_range | 145 | .long v4_dma_flush_range |
148 | .size v4_cache_fns, . - v4_cache_fns | 146 | .size v4_cache_fns, . - v4_cache_fns |
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index 2ebc1b3bf856..df8368afa102 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -114,15 +114,16 @@ ENTRY(v4wb_flush_user_cache_range) | |||
114 | mov pc, lr | 114 | mov pc, lr |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * flush_kern_dcache_page(void *page) | 117 | * flush_kern_dcache_area(void *addr, size_t size) |
118 | * | 118 | * |
119 | * Ensure no D cache aliasing occurs, either with itself or | 119 | * Ensure no D cache aliasing occurs, either with itself or |
120 | * the I cache | 120 | * the I cache |
121 | * | 121 | * |
122 | * - addr - page aligned address | 122 | * - addr - kernel address |
123 | * - size - region size | ||
123 | */ | 124 | */ |
124 | ENTRY(v4wb_flush_kern_dcache_page) | 125 | ENTRY(v4wb_flush_kern_dcache_area) |
125 | add r1, r0, #PAGE_SZ | 126 | add r1, r0, r1 |
126 | /* fall through */ | 127 | /* fall through */ |
127 | 128 | ||
128 | /* | 129 | /* |
@@ -172,7 +173,7 @@ ENTRY(v4wb_coherent_user_range) | |||
172 | * - start - virtual start address | 173 | * - start - virtual start address |
173 | * - end - virtual end address | 174 | * - end - virtual end address |
174 | */ | 175 | */ |
175 | ENTRY(v4wb_dma_inv_range) | 176 | v4wb_dma_inv_range: |
176 | tst r0, #CACHE_DLINESIZE - 1 | 177 | tst r0, #CACHE_DLINESIZE - 1 |
177 | bic r0, r0, #CACHE_DLINESIZE - 1 | 178 | bic r0, r0, #CACHE_DLINESIZE - 1 |
178 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 179 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -193,7 +194,7 @@ ENTRY(v4wb_dma_inv_range) | |||
193 | * - start - virtual start address | 194 | * - start - virtual start address |
194 | * - end - virtual end address | 195 | * - end - virtual end address |
195 | */ | 196 | */ |
196 | ENTRY(v4wb_dma_clean_range) | 197 | v4wb_dma_clean_range: |
197 | bic r0, r0, #CACHE_DLINESIZE - 1 | 198 | bic r0, r0, #CACHE_DLINESIZE - 1 |
198 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 199 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
199 | add r0, r0, #CACHE_DLINESIZE | 200 | add r0, r0, #CACHE_DLINESIZE |
@@ -215,6 +216,30 @@ ENTRY(v4wb_dma_clean_range) | |||
215 | .globl v4wb_dma_flush_range | 216 | .globl v4wb_dma_flush_range |
216 | .set v4wb_dma_flush_range, v4wb_coherent_kern_range | 217 | .set v4wb_dma_flush_range, v4wb_coherent_kern_range |
217 | 218 | ||
219 | /* | ||
220 | * dma_map_area(start, size, dir) | ||
221 | * - start - kernel virtual start address | ||
222 | * - size - size of region | ||
223 | * - dir - DMA direction | ||
224 | */ | ||
225 | ENTRY(v4wb_dma_map_area) | ||
226 | add r1, r1, r0 | ||
227 | cmp r2, #DMA_TO_DEVICE | ||
228 | beq v4wb_dma_clean_range | ||
229 | bcs v4wb_dma_inv_range | ||
230 | b v4wb_dma_flush_range | ||
231 | ENDPROC(v4wb_dma_map_area) | ||
232 | |||
233 | /* | ||
234 | * dma_unmap_area(start, size, dir) | ||
235 | * - start - kernel virtual start address | ||
236 | * - size - size of region | ||
237 | * - dir - DMA direction | ||
238 | */ | ||
239 | ENTRY(v4wb_dma_unmap_area) | ||
240 | mov pc, lr | ||
241 | ENDPROC(v4wb_dma_unmap_area) | ||
242 | |||
218 | __INITDATA | 243 | __INITDATA |
219 | 244 | ||
220 | .type v4wb_cache_fns, #object | 245 | .type v4wb_cache_fns, #object |
@@ -224,8 +249,8 @@ ENTRY(v4wb_cache_fns) | |||
224 | .long v4wb_flush_user_cache_range | 249 | .long v4wb_flush_user_cache_range |
225 | .long v4wb_coherent_kern_range | 250 | .long v4wb_coherent_kern_range |
226 | .long v4wb_coherent_user_range | 251 | .long v4wb_coherent_user_range |
227 | .long v4wb_flush_kern_dcache_page | 252 | .long v4wb_flush_kern_dcache_area |
228 | .long v4wb_dma_inv_range | 253 | .long v4wb_dma_map_area |
229 | .long v4wb_dma_clean_range | 254 | .long v4wb_dma_unmap_area |
230 | .long v4wb_dma_flush_range | 255 | .long v4wb_dma_flush_range |
231 | .size v4wb_cache_fns, . - v4wb_cache_fns | 256 | .size v4wb_cache_fns, . - v4wb_cache_fns |
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index c54fa2cc40e6..45c70312f43b 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -117,17 +117,18 @@ ENTRY(v4wt_coherent_user_range) | |||
117 | mov pc, lr | 117 | mov pc, lr |
118 | 118 | ||
119 | /* | 119 | /* |
120 | * flush_kern_dcache_page(void *page) | 120 | * flush_kern_dcache_area(void *addr, size_t size) |
121 | * | 121 | * |
122 | * Ensure no D cache aliasing occurs, either with itself or | 122 | * Ensure no D cache aliasing occurs, either with itself or |
123 | * the I cache | 123 | * the I cache |
124 | * | 124 | * |
125 | * - addr - page aligned address | 125 | * - addr - kernel address |
126 | * - size - region size | ||
126 | */ | 127 | */ |
127 | ENTRY(v4wt_flush_kern_dcache_page) | 128 | ENTRY(v4wt_flush_kern_dcache_area) |
128 | mov r2, #0 | 129 | mov r2, #0 |
129 | mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache | 130 | mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache |
130 | add r1, r0, #PAGE_SZ | 131 | add r1, r0, r1 |
131 | /* fallthrough */ | 132 | /* fallthrough */ |
132 | 133 | ||
133 | /* | 134 | /* |
@@ -141,23 +142,12 @@ ENTRY(v4wt_flush_kern_dcache_page) | |||
141 | * - start - virtual start address | 142 | * - start - virtual start address |
142 | * - end - virtual end address | 143 | * - end - virtual end address |
143 | */ | 144 | */ |
144 | ENTRY(v4wt_dma_inv_range) | 145 | v4wt_dma_inv_range: |
145 | bic r0, r0, #CACHE_DLINESIZE - 1 | 146 | bic r0, r0, #CACHE_DLINESIZE - 1 |
146 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | 147 | 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
147 | add r0, r0, #CACHE_DLINESIZE | 148 | add r0, r0, #CACHE_DLINESIZE |
148 | cmp r0, r1 | 149 | cmp r0, r1 |
149 | blo 1b | 150 | blo 1b |
150 | /* FALLTHROUGH */ | ||
151 | |||
152 | /* | ||
153 | * dma_clean_range(start, end) | ||
154 | * | ||
155 | * Clean the specified virtual address range. | ||
156 | * | ||
157 | * - start - virtual start address | ||
158 | * - end - virtual end address | ||
159 | */ | ||
160 | ENTRY(v4wt_dma_clean_range) | ||
161 | mov pc, lr | 151 | mov pc, lr |
162 | 152 | ||
163 | /* | 153 | /* |
@@ -171,6 +161,29 @@ ENTRY(v4wt_dma_clean_range) | |||
171 | .globl v4wt_dma_flush_range | 161 | .globl v4wt_dma_flush_range |
172 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range | 162 | .equ v4wt_dma_flush_range, v4wt_dma_inv_range |
173 | 163 | ||
164 | /* | ||
165 | * dma_unmap_area(start, size, dir) | ||
166 | * - start - kernel virtual start address | ||
167 | * - size - size of region | ||
168 | * - dir - DMA direction | ||
169 | */ | ||
170 | ENTRY(v4wt_dma_unmap_area) | ||
171 | add r1, r1, r0 | ||
172 | teq r2, #DMA_TO_DEVICE | ||
173 | bne v4wt_dma_inv_range | ||
174 | /* FALLTHROUGH */ | ||
175 | |||
176 | /* | ||
177 | * dma_map_area(start, size, dir) | ||
178 | * - start - kernel virtual start address | ||
179 | * - size - size of region | ||
180 | * - dir - DMA direction | ||
181 | */ | ||
182 | ENTRY(v4wt_dma_map_area) | ||
183 | mov pc, lr | ||
184 | ENDPROC(v4wt_dma_unmap_area) | ||
185 | ENDPROC(v4wt_dma_map_area) | ||
186 | |||
174 | __INITDATA | 187 | __INITDATA |
175 | 188 | ||
176 | .type v4wt_cache_fns, #object | 189 | .type v4wt_cache_fns, #object |
@@ -180,8 +193,8 @@ ENTRY(v4wt_cache_fns) | |||
180 | .long v4wt_flush_user_cache_range | 193 | .long v4wt_flush_user_cache_range |
181 | .long v4wt_coherent_kern_range | 194 | .long v4wt_coherent_kern_range |
182 | .long v4wt_coherent_user_range | 195 | .long v4wt_coherent_user_range |
183 | .long v4wt_flush_kern_dcache_page | 196 | .long v4wt_flush_kern_dcache_area |
184 | .long v4wt_dma_inv_range | 197 | .long v4wt_dma_map_area |
185 | .long v4wt_dma_clean_range | 198 | .long v4wt_dma_unmap_area |
186 | .long v4wt_dma_flush_range | 199 | .long v4wt_dma_flush_range |
187 | .size v4wt_cache_fns, . - v4wt_cache_fns | 200 | .size v4wt_cache_fns, . - v4wt_cache_fns |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 295e25dd6381..e46ecd847138 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -159,15 +159,16 @@ ENDPROC(v6_coherent_user_range) | |||
159 | ENDPROC(v6_coherent_kern_range) | 159 | ENDPROC(v6_coherent_kern_range) |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * v6_flush_kern_dcache_page(kaddr) | 162 | * v6_flush_kern_dcache_area(void *addr, size_t size) |
163 | * | 163 | * |
164 | * Ensure that the data held in the page kaddr is written back | 164 | * Ensure that the data held in the page kaddr is written back |
165 | * to the page in question. | 165 | * to the page in question. |
166 | * | 166 | * |
167 | * - kaddr - kernel address (guaranteed to be page aligned) | 167 | * - addr - kernel address |
168 | * - size - region size | ||
168 | */ | 169 | */ |
169 | ENTRY(v6_flush_kern_dcache_page) | 170 | ENTRY(v6_flush_kern_dcache_area) |
170 | add r1, r0, #PAGE_SZ | 171 | add r1, r0, r1 |
171 | 1: | 172 | 1: |
172 | #ifdef HARVARD_CACHE | 173 | #ifdef HARVARD_CACHE |
173 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 174 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
@@ -194,7 +195,7 @@ ENTRY(v6_flush_kern_dcache_page) | |||
194 | * - start - virtual start address of region | 195 | * - start - virtual start address of region |
195 | * - end - virtual end address of region | 196 | * - end - virtual end address of region |
196 | */ | 197 | */ |
197 | ENTRY(v6_dma_inv_range) | 198 | v6_dma_inv_range: |
198 | tst r0, #D_CACHE_LINE_SIZE - 1 | 199 | tst r0, #D_CACHE_LINE_SIZE - 1 |
199 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 200 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
200 | #ifdef HARVARD_CACHE | 201 | #ifdef HARVARD_CACHE |
@@ -210,6 +211,9 @@ ENTRY(v6_dma_inv_range) | |||
210 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
211 | #endif | 212 | #endif |
212 | 1: | 213 | 1: |
214 | #ifdef CONFIG_SMP | ||
215 | str r0, [r0] @ write for ownership | ||
216 | #endif | ||
213 | #ifdef HARVARD_CACHE | 217 | #ifdef HARVARD_CACHE |
214 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 218 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
215 | #else | 219 | #else |
@@ -227,9 +231,12 @@ ENTRY(v6_dma_inv_range) | |||
227 | * - start - virtual start address of region | 231 | * - start - virtual start address of region |
228 | * - end - virtual end address of region | 232 | * - end - virtual end address of region |
229 | */ | 233 | */ |
230 | ENTRY(v6_dma_clean_range) | 234 | v6_dma_clean_range: |
231 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 235 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
232 | 1: | 236 | 1: |
237 | #ifdef CONFIG_SMP | ||
238 | ldr r2, [r0] @ read for ownership | ||
239 | #endif | ||
233 | #ifdef HARVARD_CACHE | 240 | #ifdef HARVARD_CACHE |
234 | mcr p15, 0, r0, c7, c10, 1 @ clean D line | 241 | mcr p15, 0, r0, c7, c10, 1 @ clean D line |
235 | #else | 242 | #else |
@@ -250,6 +257,10 @@ ENTRY(v6_dma_clean_range) | |||
250 | ENTRY(v6_dma_flush_range) | 257 | ENTRY(v6_dma_flush_range) |
251 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 258 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
252 | 1: | 259 | 1: |
260 | #ifdef CONFIG_SMP | ||
261 | ldr r2, [r0] @ read for ownership | ||
262 | str r2, [r0] @ write for ownership | ||
263 | #endif | ||
253 | #ifdef HARVARD_CACHE | 264 | #ifdef HARVARD_CACHE |
254 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 265 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
255 | #else | 266 | #else |
@@ -262,6 +273,31 @@ ENTRY(v6_dma_flush_range) | |||
262 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 273 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
263 | mov pc, lr | 274 | mov pc, lr |
264 | 275 | ||
276 | /* | ||
277 | * dma_map_area(start, size, dir) | ||
278 | * - start - kernel virtual start address | ||
279 | * - size - size of region | ||
280 | * - dir - DMA direction | ||
281 | */ | ||
282 | ENTRY(v6_dma_map_area) | ||
283 | add r1, r1, r0 | ||
284 | teq r2, #DMA_FROM_DEVICE | ||
285 | beq v6_dma_inv_range | ||
286 | teq r2, #DMA_TO_DEVICE | ||
287 | beq v6_dma_clean_range | ||
288 | b v6_dma_flush_range | ||
289 | ENDPROC(v6_dma_map_area) | ||
290 | |||
291 | /* | ||
292 | * dma_unmap_area(start, size, dir) | ||
293 | * - start - kernel virtual start address | ||
294 | * - size - size of region | ||
295 | * - dir - DMA direction | ||
296 | */ | ||
297 | ENTRY(v6_dma_unmap_area) | ||
298 | mov pc, lr | ||
299 | ENDPROC(v6_dma_unmap_area) | ||
300 | |||
265 | __INITDATA | 301 | __INITDATA |
266 | 302 | ||
267 | .type v6_cache_fns, #object | 303 | .type v6_cache_fns, #object |
@@ -271,8 +307,8 @@ ENTRY(v6_cache_fns) | |||
271 | .long v6_flush_user_cache_range | 307 | .long v6_flush_user_cache_range |
272 | .long v6_coherent_kern_range | 308 | .long v6_coherent_kern_range |
273 | .long v6_coherent_user_range | 309 | .long v6_coherent_user_range |
274 | .long v6_flush_kern_dcache_page | 310 | .long v6_flush_kern_dcache_area |
275 | .long v6_dma_inv_range | 311 | .long v6_dma_map_area |
276 | .long v6_dma_clean_range | 312 | .long v6_dma_unmap_area |
277 | .long v6_dma_flush_range | 313 | .long v6_dma_flush_range |
278 | .size v6_cache_fns, . - v6_cache_fns | 314 | .size v6_cache_fns, . - v6_cache_fns |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index e1bd9759617f..06a90dcfc60a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -167,7 +167,11 @@ ENTRY(v7_coherent_user_range) | |||
167 | cmp r0, r1 | 167 | cmp r0, r1 |
168 | blo 1b | 168 | blo 1b |
169 | mov r0, #0 | 169 | mov r0, #0 |
170 | #ifdef CONFIG_SMP | ||
171 | mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable | ||
172 | #else | ||
170 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | 173 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB |
174 | #endif | ||
171 | dsb | 175 | dsb |
172 | isb | 176 | isb |
173 | mov pc, lr | 177 | mov pc, lr |
@@ -186,16 +190,17 @@ ENDPROC(v7_coherent_kern_range) | |||
186 | ENDPROC(v7_coherent_user_range) | 190 | ENDPROC(v7_coherent_user_range) |
187 | 191 | ||
188 | /* | 192 | /* |
189 | * v7_flush_kern_dcache_page(kaddr) | 193 | * v7_flush_kern_dcache_area(void *addr, size_t size) |
190 | * | 194 | * |
191 | * Ensure that the data held in the page kaddr is written back | 195 | * Ensure that the data held in the page kaddr is written back |
192 | * to the page in question. | 196 | * to the page in question. |
193 | * | 197 | * |
194 | * - kaddr - kernel address (guaranteed to be page aligned) | 198 | * - addr - kernel address |
199 | * - size - region size | ||
195 | */ | 200 | */ |
196 | ENTRY(v7_flush_kern_dcache_page) | 201 | ENTRY(v7_flush_kern_dcache_area) |
197 | dcache_line_size r2, r3 | 202 | dcache_line_size r2, r3 |
198 | add r1, r0, #PAGE_SZ | 203 | add r1, r0, r1 |
199 | 1: | 204 | 1: |
200 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | 205 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line |
201 | add r0, r0, r2 | 206 | add r0, r0, r2 |
@@ -203,7 +208,7 @@ ENTRY(v7_flush_kern_dcache_page) | |||
203 | blo 1b | 208 | blo 1b |
204 | dsb | 209 | dsb |
205 | mov pc, lr | 210 | mov pc, lr |
206 | ENDPROC(v7_flush_kern_dcache_page) | 211 | ENDPROC(v7_flush_kern_dcache_area) |
207 | 212 | ||
208 | /* | 213 | /* |
209 | * v7_dma_inv_range(start,end) | 214 | * v7_dma_inv_range(start,end) |
@@ -215,7 +220,7 @@ ENDPROC(v7_flush_kern_dcache_page) | |||
215 | * - start - virtual start address of region | 220 | * - start - virtual start address of region |
216 | * - end - virtual end address of region | 221 | * - end - virtual end address of region |
217 | */ | 222 | */ |
218 | ENTRY(v7_dma_inv_range) | 223 | v7_dma_inv_range: |
219 | dcache_line_size r2, r3 | 224 | dcache_line_size r2, r3 |
220 | sub r3, r2, #1 | 225 | sub r3, r2, #1 |
221 | tst r0, r3 | 226 | tst r0, r3 |
@@ -239,7 +244,7 @@ ENDPROC(v7_dma_inv_range) | |||
239 | * - start - virtual start address of region | 244 | * - start - virtual start address of region |
240 | * - end - virtual end address of region | 245 | * - end - virtual end address of region |
241 | */ | 246 | */ |
242 | ENTRY(v7_dma_clean_range) | 247 | v7_dma_clean_range: |
243 | dcache_line_size r2, r3 | 248 | dcache_line_size r2, r3 |
244 | sub r3, r2, #1 | 249 | sub r3, r2, #1 |
245 | bic r0, r0, r3 | 250 | bic r0, r0, r3 |
@@ -270,6 +275,32 @@ ENTRY(v7_dma_flush_range) | |||
270 | mov pc, lr | 275 | mov pc, lr |
271 | ENDPROC(v7_dma_flush_range) | 276 | ENDPROC(v7_dma_flush_range) |
272 | 277 | ||
278 | /* | ||
279 | * dma_map_area(start, size, dir) | ||
280 | * - start - kernel virtual start address | ||
281 | * - size - size of region | ||
282 | * - dir - DMA direction | ||
283 | */ | ||
284 | ENTRY(v7_dma_map_area) | ||
285 | add r1, r1, r0 | ||
286 | teq r2, #DMA_FROM_DEVICE | ||
287 | beq v7_dma_inv_range | ||
288 | b v7_dma_clean_range | ||
289 | ENDPROC(v7_dma_map_area) | ||
290 | |||
291 | /* | ||
292 | * dma_unmap_area(start, size, dir) | ||
293 | * - start - kernel virtual start address | ||
294 | * - size - size of region | ||
295 | * - dir - DMA direction | ||
296 | */ | ||
297 | ENTRY(v7_dma_unmap_area) | ||
298 | add r1, r1, r0 | ||
299 | teq r2, #DMA_TO_DEVICE | ||
300 | bne v7_dma_inv_range | ||
301 | mov pc, lr | ||
302 | ENDPROC(v7_dma_unmap_area) | ||
303 | |||
273 | __INITDATA | 304 | __INITDATA |
274 | 305 | ||
275 | .type v7_cache_fns, #object | 306 | .type v7_cache_fns, #object |
@@ -279,8 +310,8 @@ ENTRY(v7_cache_fns) | |||
279 | .long v7_flush_user_cache_range | 310 | .long v7_flush_user_cache_range |
280 | .long v7_coherent_kern_range | 311 | .long v7_coherent_kern_range |
281 | .long v7_coherent_user_range | 312 | .long v7_coherent_user_range |
282 | .long v7_flush_kern_dcache_page | 313 | .long v7_flush_kern_dcache_area |
283 | .long v7_dma_inv_range | 314 | .long v7_dma_map_area |
284 | .long v7_dma_clean_range | 315 | .long v7_dma_unmap_area |
285 | .long v7_dma_flush_range | 316 | .long v7_dma_flush_range |
286 | .size v7_cache_fns, . - v7_cache_fns | 317 | .size v7_cache_fns, . - v7_cache_fns |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5d180cb0bd94..c3154928bccd 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -221,15 +221,14 @@ static int __init xsc3_l2_init(void) | |||
221 | if (!cpu_is_xsc3() || !xsc3_l2_present()) | 221 | if (!cpu_is_xsc3() || !xsc3_l2_present()) |
222 | return 0; | 222 | return 0; |
223 | 223 | ||
224 | if (!(get_cr() & CR_L2)) { | 224 | if (get_cr() & CR_L2) { |
225 | pr_info("XScale3 L2 cache enabled.\n"); | 225 | pr_info("XScale3 L2 cache enabled.\n"); |
226 | adjust_cr(CR_L2, CR_L2); | ||
227 | xsc3_l2_inv_all(); | 226 | xsc3_l2_inv_all(); |
228 | } | ||
229 | 227 | ||
230 | outer_cache.inv_range = xsc3_l2_inv_range; | 228 | outer_cache.inv_range = xsc3_l2_inv_range; |
231 | outer_cache.clean_range = xsc3_l2_clean_range; | 229 | outer_cache.clean_range = xsc3_l2_clean_range; |
232 | outer_cache.flush_range = xsc3_l2_flush_range; | 230 | outer_cache.flush_range = xsc3_l2_flush_range; |
231 | } | ||
233 | 232 | ||
234 | return 0; | 233 | return 0; |
235 | } | 234 | } |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -10,12 +10,17 @@ | |||
10 | #include <linux/init.h> | 10 | #include <linux/init.h> |
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
13 | #include <linux/smp.h> | ||
14 | #include <linux/percpu.h> | ||
13 | 15 | ||
14 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
15 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
16 | 18 | ||
17 | static DEFINE_SPINLOCK(cpu_asid_lock); | 19 | static DEFINE_SPINLOCK(cpu_asid_lock); |
18 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; | 20 | unsigned int cpu_last_asid = ASID_FIRST_VERSION; |
21 | #ifdef CONFIG_SMP | ||
22 | DEFINE_PER_CPU(struct mm_struct *, current_mm); | ||
23 | #endif | ||
19 | 24 | ||
20 | /* | 25 | /* |
21 | * We fork()ed a process, and we need a new context for the child | 26 | * We fork()ed a process, and we need a new context for the child |
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; | |||
26 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) | 31 | void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
27 | { | 32 | { |
28 | mm->context.id = 0; | 33 | mm->context.id = 0; |
34 | spin_lock_init(&mm->context.id_lock); | ||
29 | } | 35 | } |
30 | 36 | ||
37 | static void flush_context(void) | ||
38 | { | ||
39 | /* set the reserved ASID before flushing the TLB */ | ||
40 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); | ||
41 | isb(); | ||
42 | local_flush_tlb_all(); | ||
43 | if (icache_is_vivt_asid_tagged()) { | ||
44 | __flush_icache_all(); | ||
45 | dsb(); | ||
46 | } | ||
47 | } | ||
48 | |||
49 | #ifdef CONFIG_SMP | ||
50 | |||
51 | static void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
52 | { | ||
53 | unsigned long flags; | ||
54 | |||
55 | /* | ||
56 | * Locking needed for multi-threaded applications where the | ||
57 | * same mm->context.id could be set from different CPUs during | ||
58 | * the broadcast. This function is also called via IPI so the | ||
59 | * mm->context.id_lock has to be IRQ-safe. | ||
60 | */ | ||
61 | spin_lock_irqsave(&mm->context.id_lock, flags); | ||
62 | if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { | ||
63 | /* | ||
64 | * Old version of ASID found. Set the new one and | ||
65 | * reset mm_cpumask(mm). | ||
66 | */ | ||
67 | mm->context.id = asid; | ||
68 | cpumask_clear(mm_cpumask(mm)); | ||
69 | } | ||
70 | spin_unlock_irqrestore(&mm->context.id_lock, flags); | ||
71 | |||
72 | /* | ||
73 | * Set the mm_cpumask(mm) bit for the current CPU. | ||
74 | */ | ||
75 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Reset the ASID on the current CPU. This function call is broadcast | ||
80 | * from the CPU handling the ASID rollover and holding cpu_asid_lock. | ||
81 | */ | ||
82 | static void reset_context(void *info) | ||
83 | { | ||
84 | unsigned int asid; | ||
85 | unsigned int cpu = smp_processor_id(); | ||
86 | struct mm_struct *mm = per_cpu(current_mm, cpu); | ||
87 | |||
88 | /* | ||
89 | * Check if a current_mm was set on this CPU as it might still | ||
90 | * be in the early booting stages and using the reserved ASID. | ||
91 | */ | ||
92 | if (!mm) | ||
93 | return; | ||
94 | |||
95 | smp_rmb(); | ||
96 | asid = cpu_last_asid + cpu + 1; | ||
97 | |||
98 | flush_context(); | ||
99 | set_mm_context(mm, asid); | ||
100 | |||
101 | /* set the new ASID */ | ||
102 | asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); | ||
103 | isb(); | ||
104 | } | ||
105 | |||
106 | #else | ||
107 | |||
108 | static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) | ||
109 | { | ||
110 | mm->context.id = asid; | ||
111 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | ||
112 | } | ||
113 | |||
114 | #endif | ||
115 | |||
31 | void __new_context(struct mm_struct *mm) | 116 | void __new_context(struct mm_struct *mm) |
32 | { | 117 | { |
33 | unsigned int asid; | 118 | unsigned int asid; |
34 | 119 | ||
35 | spin_lock(&cpu_asid_lock); | 120 | spin_lock(&cpu_asid_lock); |
121 | #ifdef CONFIG_SMP | ||
122 | /* | ||
123 | * Check the ASID again, in case the change was broadcast from | ||
124 | * another CPU before we acquired the lock. | ||
125 | */ | ||
126 | if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { | ||
127 | cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); | ||
128 | spin_unlock(&cpu_asid_lock); | ||
129 | return; | ||
130 | } | ||
131 | #endif | ||
132 | /* | ||
133 | * At this point, it is guaranteed that the current mm (with | ||
134 | * an old ASID) isn't active on any other CPU since the ASIDs | ||
135 | * are changed simultaneously via IPI. | ||
136 | */ | ||
36 | asid = ++cpu_last_asid; | 137 | asid = ++cpu_last_asid; |
37 | if (asid == 0) | 138 | if (asid == 0) |
38 | asid = cpu_last_asid = ASID_FIRST_VERSION; | 139 | asid = cpu_last_asid = ASID_FIRST_VERSION; |
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) | |||
42 | * to start a new version and flush the TLB. | 143 | * to start a new version and flush the TLB. |
43 | */ | 144 | */ |
44 | if (unlikely((asid & ~ASID_MASK) == 0)) { | 145 | if (unlikely((asid & ~ASID_MASK) == 0)) { |
45 | asid = ++cpu_last_asid; | 146 | asid = cpu_last_asid + smp_processor_id() + 1; |
46 | /* set the reserved ASID before flushing the TLB */ | 147 | flush_context(); |
47 | asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" | 148 | #ifdef CONFIG_SMP |
48 | : | 149 | smp_wmb(); |
49 | : "r" (0)); | 150 | smp_call_function(reset_context, NULL, 1); |
50 | isb(); | 151 | #endif |
51 | flush_tlb_all(); | 152 | cpu_last_asid += NR_CPUS; |
52 | if (icache_is_vivt_asid_tagged()) { | ||
53 | __flush_icache_all(); | ||
54 | dsb(); | ||
55 | } | ||
56 | } | 153 | } |
57 | spin_unlock(&cpu_asid_lock); | ||
58 | 154 | ||
59 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); | 155 | set_mm_context(mm, asid); |
60 | mm->context.id = asid; | 156 | spin_unlock(&cpu_asid_lock); |
61 | } | 157 | } |
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 70997d5bee2d..5eb4fd93893d 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -68,12 +68,13 @@ feroceon_copy_user_page(void *kto, const void *kfrom) | |||
68 | } | 68 | } |
69 | 69 | ||
70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, | 70 | void feroceon_copy_user_highpage(struct page *to, struct page *from, |
71 | unsigned long vaddr) | 71 | unsigned long vaddr, struct vm_area_struct *vma) |
72 | { | 72 | { |
73 | void *kto, *kfrom; | 73 | void *kto, *kfrom; |
74 | 74 | ||
75 | kto = kmap_atomic(to, KM_USER0); | 75 | kto = kmap_atomic(to, KM_USER0); |
76 | kfrom = kmap_atomic(from, KM_USER1); | 76 | kfrom = kmap_atomic(from, KM_USER1); |
77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
77 | feroceon_copy_user_page(kto, kfrom); | 78 | feroceon_copy_user_page(kto, kfrom); |
78 | kunmap_atomic(kfrom, KM_USER1); | 79 | kunmap_atomic(kfrom, KM_USER1); |
79 | kunmap_atomic(kto, KM_USER0); | 80 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index de9c06854ad7..f72303e1d804 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom) | |||
38 | } | 38 | } |
39 | 39 | ||
40 | void v3_copy_user_highpage(struct page *to, struct page *from, | 40 | void v3_copy_user_highpage(struct page *to, struct page *from, |
41 | unsigned long vaddr) | 41 | unsigned long vaddr, struct vm_area_struct *vma) |
42 | { | 42 | { |
43 | void *kto, *kfrom; | 43 | void *kto, *kfrom; |
44 | 44 | ||
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7370a7142b04..598c51ad5071 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, | 71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, |
72 | unsigned long vaddr) | 72 | unsigned long vaddr, struct vm_area_struct *vma) |
73 | { | 73 | { |
74 | void *kto = kmap_atomic(to, KM_USER1); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
75 | 75 | ||
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9ab098414227..7c2eb55cd4a9 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -48,12 +48,13 @@ v4wb_copy_user_page(void *kto, const void *kfrom) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, | 50 | void v4wb_copy_user_highpage(struct page *to, struct page *from, |
51 | unsigned long vaddr) | 51 | unsigned long vaddr, struct vm_area_struct *vma) |
52 | { | 52 | { |
53 | void *kto, *kfrom; | 53 | void *kto, *kfrom; |
54 | 54 | ||
55 | kto = kmap_atomic(to, KM_USER0); | 55 | kto = kmap_atomic(to, KM_USER0); |
56 | kfrom = kmap_atomic(from, KM_USER1); | 56 | kfrom = kmap_atomic(from, KM_USER1); |
57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
57 | v4wb_copy_user_page(kto, kfrom); | 58 | v4wb_copy_user_page(kto, kfrom); |
58 | kunmap_atomic(kfrom, KM_USER1); | 59 | kunmap_atomic(kfrom, KM_USER1); |
59 | kunmap_atomic(kto, KM_USER0); | 60 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 300efafd6643..172e6a55458e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, | 46 | void v4wt_copy_user_highpage(struct page *to, struct page *from, |
47 | unsigned long vaddr) | 47 | unsigned long vaddr, struct vm_area_struct *vma) |
48 | { | 48 | { |
49 | void *kto, *kfrom; | 49 | void *kto, *kfrom; |
50 | 50 | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7bddfe5..f55fa1044f72 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -34,13 +34,14 @@ static DEFINE_SPINLOCK(v6_lock); | |||
34 | * attack the kernel's existing mapping of these pages. | 34 | * attack the kernel's existing mapping of these pages. |
35 | */ | 35 | */ |
36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, | 36 | static void v6_copy_user_highpage_nonaliasing(struct page *to, |
37 | struct page *from, unsigned long vaddr) | 37 | struct page *from, unsigned long vaddr, struct vm_area_struct *vma) |
38 | { | 38 | { |
39 | void *kto, *kfrom; | 39 | void *kto, *kfrom; |
40 | 40 | ||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
44 | kunmap_atomic(kto, KM_USER1); | 45 | kunmap_atomic(kto, KM_USER1); |
45 | kunmap_atomic(kfrom, KM_USER0); | 46 | kunmap_atomic(kfrom, KM_USER0); |
46 | } | 47 | } |
@@ -73,7 +74,7 @@ static void discard_old_kernel_data(void *kto) | |||
73 | * Copy the page, taking account of the cache colour. | 74 | * Copy the page, taking account of the cache colour. |
74 | */ | 75 | */ |
75 | static void v6_copy_user_highpage_aliasing(struct page *to, | 76 | static void v6_copy_user_highpage_aliasing(struct page *to, |
76 | struct page *from, unsigned long vaddr) | 77 | struct page *from, unsigned long vaddr, struct vm_area_struct *vma) |
77 | { | 78 | { |
78 | unsigned int offset = CACHE_COLOUR(vaddr); | 79 | unsigned int offset = CACHE_COLOUR(vaddr); |
79 | unsigned long kfrom, kto; | 80 | unsigned long kfrom, kto; |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index bc4525f5ab23..747ad4140fc7 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -71,12 +71,13 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) | |||
71 | } | 71 | } |
72 | 72 | ||
73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | 73 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, |
74 | unsigned long vaddr) | 74 | unsigned long vaddr, struct vm_area_struct *vma) |
75 | { | 75 | { |
76 | void *kto, *kfrom; | 76 | void *kto, *kfrom; |
77 | 77 | ||
78 | kto = kmap_atomic(to, KM_USER0); | 78 | kto = kmap_atomic(to, KM_USER0); |
79 | kfrom = kmap_atomic(from, KM_USER1); | 79 | kfrom = kmap_atomic(from, KM_USER1); |
80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | ||
80 | xsc3_mc_copy_user_page(kto, kfrom); | 81 | xsc3_mc_copy_user_page(kto, kfrom); |
81 | kunmap_atomic(kfrom, KM_USER1); | 82 | kunmap_atomic(kfrom, KM_USER1); |
82 | kunmap_atomic(kto, KM_USER0); | 83 | kunmap_atomic(kto, KM_USER0); |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 76824d3e966a..9920c0ae2096 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to) | |||
91 | } | 91 | } |
92 | 92 | ||
93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
94 | unsigned long vaddr) | 94 | unsigned long vaddr, struct vm_area_struct *vma) |
95 | { | 95 | { |
96 | void *kto = kmap_atomic(to, KM_USER1); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
97 | 97 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b9590a7085ca..13fa536d82e6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <linux/slab.h> | 14 | #include <linux/gfp.h> |
15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
@@ -29,9 +29,6 @@ | |||
29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" | 29 | #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #define CONSISTENT_END (0xffe00000) | ||
33 | #define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) | ||
34 | |||
35 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) | 32 | #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) |
36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 33 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 34 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
@@ -63,194 +60,152 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
63 | return mask; | 60 | return mask; |
64 | } | 61 | } |
65 | 62 | ||
66 | #ifdef CONFIG_MMU | ||
67 | /* | 63 | /* |
68 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 64 | * Allocate a DMA buffer for 'dev' of size 'size' using the |
65 | * specified gfp mask. Note that 'size' must be page aligned. | ||
69 | */ | 66 | */ |
70 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | 67 | static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) |
71 | static DEFINE_SPINLOCK(consistent_lock); | 68 | { |
69 | unsigned long order = get_order(size); | ||
70 | struct page *page, *p, *e; | ||
71 | void *ptr; | ||
72 | u64 mask = get_coherent_dma_mask(dev); | ||
72 | 73 | ||
73 | /* | 74 | #ifdef CONFIG_DMA_API_DEBUG |
74 | * VM region handling support. | 75 | u64 limit = (mask + 1) & ~mask; |
75 | * | 76 | if (limit && size >= limit) { |
76 | * This should become something generic, handling VM region allocations for | 77 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", |
77 | * vmalloc and similar (ioremap, module space, etc). | 78 | size, mask); |
78 | * | 79 | return NULL; |
79 | * I envisage vmalloc()'s supporting vm_struct becoming: | 80 | } |
80 | * | 81 | #endif |
81 | * struct vm_struct { | ||
82 | * struct vm_region region; | ||
83 | * unsigned long flags; | ||
84 | * struct page **pages; | ||
85 | * unsigned int nr_pages; | ||
86 | * unsigned long phys_addr; | ||
87 | * }; | ||
88 | * | ||
89 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
90 | * struct vm_region head (eg): | ||
91 | * | ||
92 | * struct vm_region vmalloc_head = { | ||
93 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
94 | * .vm_start = VMALLOC_START, | ||
95 | * .vm_end = VMALLOC_END, | ||
96 | * }; | ||
97 | * | ||
98 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
99 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
100 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
101 | */ | ||
102 | struct arm_vm_region { | ||
103 | struct list_head vm_list; | ||
104 | unsigned long vm_start; | ||
105 | unsigned long vm_end; | ||
106 | struct page *vm_pages; | ||
107 | int vm_active; | ||
108 | }; | ||
109 | 82 | ||
110 | static struct arm_vm_region consistent_head = { | 83 | if (!mask) |
111 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | 84 | return NULL; |
112 | .vm_start = CONSISTENT_BASE, | ||
113 | .vm_end = CONSISTENT_END, | ||
114 | }; | ||
115 | 85 | ||
116 | static struct arm_vm_region * | 86 | if (mask < 0xffffffffULL) |
117 | arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) | 87 | gfp |= GFP_DMA; |
118 | { | 88 | |
119 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 89 | page = alloc_pages(gfp, order); |
120 | unsigned long flags; | 90 | if (!page) |
121 | struct arm_vm_region *c, *new; | 91 | return NULL; |
122 | |||
123 | new = kmalloc(sizeof(struct arm_vm_region), gfp); | ||
124 | if (!new) | ||
125 | goto out; | ||
126 | |||
127 | spin_lock_irqsave(&consistent_lock, flags); | ||
128 | |||
129 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
130 | if ((addr + size) < addr) | ||
131 | goto nospc; | ||
132 | if ((addr + size) <= c->vm_start) | ||
133 | goto found; | ||
134 | addr = c->vm_end; | ||
135 | if (addr > end) | ||
136 | goto nospc; | ||
137 | } | ||
138 | 92 | ||
139 | found: | ||
140 | /* | 93 | /* |
141 | * Insert this entry _before_ the one we found. | 94 | * Now split the huge page and free the excess pages |
142 | */ | 95 | */ |
143 | list_add_tail(&new->vm_list, &c->vm_list); | 96 | split_page(page, order); |
144 | new->vm_start = addr; | 97 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) |
145 | new->vm_end = addr + size; | 98 | __free_page(p); |
146 | new->vm_active = 1; | 99 | |
147 | 100 | /* | |
148 | spin_unlock_irqrestore(&consistent_lock, flags); | 101 | * Ensure that the allocated pages are zeroed, and that any data |
149 | return new; | 102 | * lurking in the kernel direct-mapped region is invalidated. |
150 | 103 | */ | |
151 | nospc: | 104 | ptr = page_address(page); |
152 | spin_unlock_irqrestore(&consistent_lock, flags); | 105 | memset(ptr, 0, size); |
153 | kfree(new); | 106 | dmac_flush_range(ptr, ptr + size); |
154 | out: | 107 | outer_flush_range(__pa(ptr), __pa(ptr) + size); |
155 | return NULL; | 108 | |
109 | return page; | ||
156 | } | 110 | } |
157 | 111 | ||
158 | static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) | 112 | /* |
113 | * Free a DMA buffer. 'size' must be page aligned. | ||
114 | */ | ||
115 | static void __dma_free_buffer(struct page *page, size_t size) | ||
159 | { | 116 | { |
160 | struct arm_vm_region *c; | 117 | struct page *e = page + (size >> PAGE_SHIFT); |
161 | 118 | ||
162 | list_for_each_entry(c, &head->vm_list, vm_list) { | 119 | while (page < e) { |
163 | if (c->vm_active && c->vm_start == addr) | 120 | __free_page(page); |
164 | goto out; | 121 | page++; |
165 | } | 122 | } |
166 | c = NULL; | ||
167 | out: | ||
168 | return c; | ||
169 | } | 123 | } |
170 | 124 | ||
125 | #ifdef CONFIG_MMU | ||
126 | /* | ||
127 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | ||
128 | */ | ||
129 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | ||
130 | |||
131 | #include "vmregion.h" | ||
132 | |||
133 | static struct arm_vmregion_head consistent_head = { | ||
134 | .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), | ||
135 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
136 | .vm_start = CONSISTENT_BASE, | ||
137 | .vm_end = CONSISTENT_END, | ||
138 | }; | ||
139 | |||
171 | #ifdef CONFIG_HUGETLB_PAGE | 140 | #ifdef CONFIG_HUGETLB_PAGE |
172 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 141 | #error ARM Coherent DMA allocator does not (yet) support huge TLB |
173 | #endif | 142 | #endif |
174 | 143 | ||
175 | static void * | 144 | /* |
176 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 145 | * Initialise the consistent memory allocation. |
177 | pgprot_t prot) | 146 | */ |
147 | static int __init consistent_init(void) | ||
178 | { | 148 | { |
179 | struct page *page; | 149 | int ret = 0; |
180 | struct arm_vm_region *c; | 150 | pgd_t *pgd; |
181 | unsigned long order; | 151 | pmd_t *pmd; |
182 | u64 mask = get_coherent_dma_mask(dev); | 152 | pte_t *pte; |
183 | u64 limit; | 153 | int i = 0; |
154 | u32 base = CONSISTENT_BASE; | ||
184 | 155 | ||
185 | if (!consistent_pte[0]) { | 156 | do { |
186 | printk(KERN_ERR "%s: not initialised\n", __func__); | 157 | pgd = pgd_offset(&init_mm, base); |
187 | dump_stack(); | 158 | pmd = pmd_alloc(&init_mm, pgd, base); |
188 | return NULL; | 159 | if (!pmd) { |
189 | } | 160 | printk(KERN_ERR "%s: no pmd tables\n", __func__); |
161 | ret = -ENOMEM; | ||
162 | break; | ||
163 | } | ||
164 | WARN_ON(!pmd_none(*pmd)); | ||
190 | 165 | ||
191 | if (!mask) | 166 | pte = pte_alloc_kernel(pmd, base); |
192 | goto no_page; | 167 | if (!pte) { |
168 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
169 | ret = -ENOMEM; | ||
170 | break; | ||
171 | } | ||
193 | 172 | ||
194 | /* | 173 | consistent_pte[i++] = pte; |
195 | * Sanity check the allocation size. | 174 | base += (1 << PGDIR_SHIFT); |
196 | */ | 175 | } while (base < CONSISTENT_END); |
197 | size = PAGE_ALIGN(size); | ||
198 | limit = (mask + 1) & ~mask; | ||
199 | if ((limit && size >= limit) || | ||
200 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | ||
201 | printk(KERN_WARNING "coherent allocation too big " | ||
202 | "(requested %#x mask %#llx)\n", size, mask); | ||
203 | goto no_page; | ||
204 | } | ||
205 | 176 | ||
206 | order = get_order(size); | 177 | return ret; |
178 | } | ||
207 | 179 | ||
208 | if (mask < 0xffffffffULL) | 180 | core_initcall(consistent_init); |
209 | gfp |= GFP_DMA; | ||
210 | 181 | ||
211 | page = alloc_pages(gfp, order); | 182 | static void * |
212 | if (!page) | 183 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) |
213 | goto no_page; | 184 | { |
185 | struct arm_vmregion *c; | ||
214 | 186 | ||
215 | /* | 187 | if (!consistent_pte[0]) { |
216 | * Invalidate any data that might be lurking in the | 188 | printk(KERN_ERR "%s: not initialised\n", __func__); |
217 | * kernel direct-mapped region for device DMA. | 189 | dump_stack(); |
218 | */ | 190 | return NULL; |
219 | { | ||
220 | void *ptr = page_address(page); | ||
221 | memset(ptr, 0, size); | ||
222 | dmac_flush_range(ptr, ptr + size); | ||
223 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | ||
224 | } | 191 | } |
225 | 192 | ||
226 | /* | 193 | /* |
227 | * Allocate a virtual address in the consistent mapping region. | 194 | * Allocate a virtual address in the consistent mapping region. |
228 | */ | 195 | */ |
229 | c = arm_vm_region_alloc(&consistent_head, size, | 196 | c = arm_vmregion_alloc(&consistent_head, size, |
230 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 197 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
231 | if (c) { | 198 | if (c) { |
232 | pte_t *pte; | 199 | pte_t *pte; |
233 | struct page *end = page + (1 << order); | ||
234 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); | 200 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); |
235 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 201 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); |
236 | 202 | ||
237 | pte = consistent_pte[idx] + off; | 203 | pte = consistent_pte[idx] + off; |
238 | c->vm_pages = page; | 204 | c->vm_pages = page; |
239 | 205 | ||
240 | split_page(page, order); | ||
241 | |||
242 | /* | ||
243 | * Set the "dma handle" | ||
244 | */ | ||
245 | *handle = page_to_dma(dev, page); | ||
246 | |||
247 | do { | 206 | do { |
248 | BUG_ON(!pte_none(*pte)); | 207 | BUG_ON(!pte_none(*pte)); |
249 | 208 | ||
250 | /* | ||
251 | * x86 does not mark the pages reserved... | ||
252 | */ | ||
253 | SetPageReserved(page); | ||
254 | set_pte_ext(pte, mk_pte(page, prot), 0); | 209 | set_pte_ext(pte, mk_pte(page, prot), 0); |
255 | page++; | 210 | page++; |
256 | pte++; | 211 | pte++; |
@@ -261,48 +216,90 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
261 | } | 216 | } |
262 | } while (size -= PAGE_SIZE); | 217 | } while (size -= PAGE_SIZE); |
263 | 218 | ||
264 | /* | ||
265 | * Free the otherwise unused pages. | ||
266 | */ | ||
267 | while (page < end) { | ||
268 | __free_page(page); | ||
269 | page++; | ||
270 | } | ||
271 | |||
272 | return (void *)c->vm_start; | 219 | return (void *)c->vm_start; |
273 | } | 220 | } |
274 | |||
275 | if (page) | ||
276 | __free_pages(page, order); | ||
277 | no_page: | ||
278 | *handle = ~0; | ||
279 | return NULL; | 221 | return NULL; |
280 | } | 222 | } |
223 | |||
224 | static void __dma_free_remap(void *cpu_addr, size_t size) | ||
225 | { | ||
226 | struct arm_vmregion *c; | ||
227 | unsigned long addr; | ||
228 | pte_t *ptep; | ||
229 | int idx; | ||
230 | u32 off; | ||
231 | |||
232 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | ||
233 | if (!c) { | ||
234 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
235 | __func__, cpu_addr); | ||
236 | dump_stack(); | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | if ((c->vm_end - c->vm_start) != size) { | ||
241 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
242 | __func__, c->vm_end - c->vm_start, size); | ||
243 | dump_stack(); | ||
244 | size = c->vm_end - c->vm_start; | ||
245 | } | ||
246 | |||
247 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
248 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
249 | ptep = consistent_pte[idx] + off; | ||
250 | addr = c->vm_start; | ||
251 | do { | ||
252 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
253 | |||
254 | ptep++; | ||
255 | addr += PAGE_SIZE; | ||
256 | off++; | ||
257 | if (off >= PTRS_PER_PTE) { | ||
258 | off = 0; | ||
259 | ptep = consistent_pte[++idx]; | ||
260 | } | ||
261 | |||
262 | if (pte_none(pte) || !pte_present(pte)) | ||
263 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
264 | __func__); | ||
265 | } while (size -= PAGE_SIZE); | ||
266 | |||
267 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
268 | |||
269 | arm_vmregion_free(&consistent_head, c); | ||
270 | } | ||
271 | |||
281 | #else /* !CONFIG_MMU */ | 272 | #else /* !CONFIG_MMU */ |
273 | |||
274 | #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) | ||
275 | #define __dma_free_remap(addr, size) do { } while (0) | ||
276 | |||
277 | #endif /* CONFIG_MMU */ | ||
278 | |||
282 | static void * | 279 | static void * |
283 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 280 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
284 | pgprot_t prot) | 281 | pgprot_t prot) |
285 | { | 282 | { |
286 | void *virt; | 283 | struct page *page; |
287 | u64 mask = get_coherent_dma_mask(dev); | 284 | void *addr; |
288 | 285 | ||
289 | if (!mask) | 286 | *handle = ~0; |
290 | goto error; | 287 | size = PAGE_ALIGN(size); |
291 | 288 | ||
292 | if (mask < 0xffffffffULL) | 289 | page = __dma_alloc_buffer(dev, size, gfp); |
293 | gfp |= GFP_DMA; | 290 | if (!page) |
294 | virt = kmalloc(size, gfp); | 291 | return NULL; |
295 | if (!virt) | ||
296 | goto error; | ||
297 | 292 | ||
298 | *handle = virt_to_dma(dev, virt); | 293 | if (!arch_is_coherent()) |
299 | return virt; | 294 | addr = __dma_alloc_remap(page, size, gfp, prot); |
295 | else | ||
296 | addr = page_address(page); | ||
300 | 297 | ||
301 | error: | 298 | if (addr) |
302 | *handle = ~0; | 299 | *handle = page_to_dma(dev, page); |
303 | return NULL; | 300 | |
301 | return addr; | ||
304 | } | 302 | } |
305 | #endif /* CONFIG_MMU */ | ||
306 | 303 | ||
307 | /* | 304 | /* |
308 | * Allocate DMA-coherent memory space and return both the kernel remapped | 305 | * Allocate DMA-coherent memory space and return both the kernel remapped |
@@ -316,19 +313,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
316 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 313 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) |
317 | return memory; | 314 | return memory; |
318 | 315 | ||
319 | if (arch_is_coherent()) { | ||
320 | void *virt; | ||
321 | |||
322 | virt = kmalloc(size, gfp); | ||
323 | if (!virt) | ||
324 | return NULL; | ||
325 | *handle = virt_to_dma(dev, virt); | ||
326 | |||
327 | return virt; | ||
328 | } | ||
329 | |||
330 | return __dma_alloc(dev, size, handle, gfp, | 316 | return __dma_alloc(dev, size, handle, gfp, |
331 | pgprot_noncached(pgprot_kernel)); | 317 | pgprot_dmacoherent(pgprot_kernel)); |
332 | } | 318 | } |
333 | EXPORT_SYMBOL(dma_alloc_coherent); | 319 | EXPORT_SYMBOL(dma_alloc_coherent); |
334 | 320 | ||
@@ -349,15 +335,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
349 | { | 335 | { |
350 | int ret = -ENXIO; | 336 | int ret = -ENXIO; |
351 | #ifdef CONFIG_MMU | 337 | #ifdef CONFIG_MMU |
352 | unsigned long flags, user_size, kern_size; | 338 | unsigned long user_size, kern_size; |
353 | struct arm_vm_region *c; | 339 | struct arm_vmregion *c; |
354 | 340 | ||
355 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 341 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
356 | 342 | ||
357 | spin_lock_irqsave(&consistent_lock, flags); | 343 | c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); |
358 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); | ||
359 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
360 | |||
361 | if (c) { | 344 | if (c) { |
362 | unsigned long off = vma->vm_pgoff; | 345 | unsigned long off = vma->vm_pgoff; |
363 | 346 | ||
@@ -379,7 +362,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
379 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 362 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
380 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 363 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
381 | { | 364 | { |
382 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 365 | vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); |
383 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 366 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); |
384 | } | 367 | } |
385 | EXPORT_SYMBOL(dma_mmap_coherent); | 368 | EXPORT_SYMBOL(dma_mmap_coherent); |
@@ -396,221 +379,66 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
396 | * free a page as defined by the above mapping. | 379 | * free a page as defined by the above mapping. |
397 | * Must not be called with IRQs disabled. | 380 | * Must not be called with IRQs disabled. |
398 | */ | 381 | */ |
399 | #ifdef CONFIG_MMU | ||
400 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 382 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
401 | { | 383 | { |
402 | struct arm_vm_region *c; | ||
403 | unsigned long flags, addr; | ||
404 | pte_t *ptep; | ||
405 | int idx; | ||
406 | u32 off; | ||
407 | |||
408 | WARN_ON(irqs_disabled()); | 384 | WARN_ON(irqs_disabled()); |
409 | 385 | ||
410 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 386 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
411 | return; | 387 | return; |
412 | 388 | ||
413 | if (arch_is_coherent()) { | ||
414 | kfree(cpu_addr); | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | size = PAGE_ALIGN(size); | 389 | size = PAGE_ALIGN(size); |
419 | 390 | ||
420 | spin_lock_irqsave(&consistent_lock, flags); | 391 | if (!arch_is_coherent()) |
421 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 392 | __dma_free_remap(cpu_addr, size); |
422 | if (!c) | ||
423 | goto no_area; | ||
424 | |||
425 | c->vm_active = 0; | ||
426 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
427 | |||
428 | if ((c->vm_end - c->vm_start) != size) { | ||
429 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
430 | __func__, c->vm_end - c->vm_start, size); | ||
431 | dump_stack(); | ||
432 | size = c->vm_end - c->vm_start; | ||
433 | } | ||
434 | |||
435 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
436 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
437 | ptep = consistent_pte[idx] + off; | ||
438 | addr = c->vm_start; | ||
439 | do { | ||
440 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
441 | unsigned long pfn; | ||
442 | |||
443 | ptep++; | ||
444 | addr += PAGE_SIZE; | ||
445 | off++; | ||
446 | if (off >= PTRS_PER_PTE) { | ||
447 | off = 0; | ||
448 | ptep = consistent_pte[++idx]; | ||
449 | } | ||
450 | |||
451 | if (!pte_none(pte) && pte_present(pte)) { | ||
452 | pfn = pte_pfn(pte); | ||
453 | |||
454 | if (pfn_valid(pfn)) { | ||
455 | struct page *page = pfn_to_page(pfn); | ||
456 | |||
457 | /* | ||
458 | * x86 does not mark the pages reserved... | ||
459 | */ | ||
460 | ClearPageReserved(page); | ||
461 | |||
462 | __free_page(page); | ||
463 | continue; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
468 | __func__); | ||
469 | } while (size -= PAGE_SIZE); | ||
470 | |||
471 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
472 | 393 | ||
473 | spin_lock_irqsave(&consistent_lock, flags); | 394 | __dma_free_buffer(dma_to_page(dev, handle), size); |
474 | list_del(&c->vm_list); | ||
475 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
476 | |||
477 | kfree(c); | ||
478 | return; | ||
479 | |||
480 | no_area: | ||
481 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
482 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
483 | __func__, cpu_addr); | ||
484 | dump_stack(); | ||
485 | } | ||
486 | #else /* !CONFIG_MMU */ | ||
487 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | ||
488 | { | ||
489 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
490 | return; | ||
491 | kfree(cpu_addr); | ||
492 | } | 395 | } |
493 | #endif /* CONFIG_MMU */ | ||
494 | EXPORT_SYMBOL(dma_free_coherent); | 396 | EXPORT_SYMBOL(dma_free_coherent); |
495 | 397 | ||
496 | /* | 398 | /* |
497 | * Initialise the consistent memory allocation. | ||
498 | */ | ||
499 | static int __init consistent_init(void) | ||
500 | { | ||
501 | int ret = 0; | ||
502 | #ifdef CONFIG_MMU | ||
503 | pgd_t *pgd; | ||
504 | pmd_t *pmd; | ||
505 | pte_t *pte; | ||
506 | int i = 0; | ||
507 | u32 base = CONSISTENT_BASE; | ||
508 | |||
509 | do { | ||
510 | pgd = pgd_offset(&init_mm, base); | ||
511 | pmd = pmd_alloc(&init_mm, pgd, base); | ||
512 | if (!pmd) { | ||
513 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | ||
514 | ret = -ENOMEM; | ||
515 | break; | ||
516 | } | ||
517 | WARN_ON(!pmd_none(*pmd)); | ||
518 | |||
519 | pte = pte_alloc_kernel(pmd, base); | ||
520 | if (!pte) { | ||
521 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
522 | ret = -ENOMEM; | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | consistent_pte[i++] = pte; | ||
527 | base += (1 << PGDIR_SHIFT); | ||
528 | } while (base < CONSISTENT_END); | ||
529 | #endif /* !CONFIG_MMU */ | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | |||
534 | core_initcall(consistent_init); | ||
535 | |||
536 | /* | ||
537 | * Make an area consistent for devices. | 399 | * Make an area consistent for devices. |
538 | * Note: Drivers should NOT use this function directly, as it will break | 400 | * Note: Drivers should NOT use this function directly, as it will break |
539 | * platforms with CONFIG_DMABOUNCE. | 401 | * platforms with CONFIG_DMABOUNCE. |
540 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) | 402 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) |
541 | */ | 403 | */ |
542 | void dma_cache_maint(const void *start, size_t size, int direction) | 404 | void ___dma_single_cpu_to_dev(const void *kaddr, size_t size, |
405 | enum dma_data_direction dir) | ||
543 | { | 406 | { |
544 | void (*inner_op)(const void *, const void *); | 407 | unsigned long paddr; |
545 | void (*outer_op)(unsigned long, unsigned long); | 408 | |
546 | 409 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); | |
547 | BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1)); | 410 | |
548 | 411 | dmac_map_area(kaddr, size, dir); | |
549 | switch (direction) { | ||
550 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
551 | inner_op = dmac_inv_range; | ||
552 | outer_op = outer_inv_range; | ||
553 | break; | ||
554 | case DMA_TO_DEVICE: /* writeback only */ | ||
555 | inner_op = dmac_clean_range; | ||
556 | outer_op = outer_clean_range; | ||
557 | break; | ||
558 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
559 | inner_op = dmac_flush_range; | ||
560 | outer_op = outer_flush_range; | ||
561 | break; | ||
562 | default: | ||
563 | BUG(); | ||
564 | } | ||
565 | 412 | ||
566 | inner_op(start, start + size); | 413 | paddr = __pa(kaddr); |
567 | outer_op(__pa(start), __pa(start) + size); | 414 | if (dir == DMA_FROM_DEVICE) { |
415 | outer_inv_range(paddr, paddr + size); | ||
416 | } else { | ||
417 | outer_clean_range(paddr, paddr + size); | ||
418 | } | ||
419 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
568 | } | 420 | } |
569 | EXPORT_SYMBOL(dma_cache_maint); | 421 | EXPORT_SYMBOL(___dma_single_cpu_to_dev); |
570 | 422 | ||
571 | static void dma_cache_maint_contiguous(struct page *page, unsigned long offset, | 423 | void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, |
572 | size_t size, int direction) | 424 | enum dma_data_direction dir) |
573 | { | 425 | { |
574 | void *vaddr; | 426 | BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1)); |
575 | unsigned long paddr; | ||
576 | void (*inner_op)(const void *, const void *); | ||
577 | void (*outer_op)(unsigned long, unsigned long); | ||
578 | |||
579 | switch (direction) { | ||
580 | case DMA_FROM_DEVICE: /* invalidate only */ | ||
581 | inner_op = dmac_inv_range; | ||
582 | outer_op = outer_inv_range; | ||
583 | break; | ||
584 | case DMA_TO_DEVICE: /* writeback only */ | ||
585 | inner_op = dmac_clean_range; | ||
586 | outer_op = outer_clean_range; | ||
587 | break; | ||
588 | case DMA_BIDIRECTIONAL: /* writeback and invalidate */ | ||
589 | inner_op = dmac_flush_range; | ||
590 | outer_op = outer_flush_range; | ||
591 | break; | ||
592 | default: | ||
593 | BUG(); | ||
594 | } | ||
595 | 427 | ||
596 | if (!PageHighMem(page)) { | 428 | /* FIXME: non-speculating: not required */ |
597 | vaddr = page_address(page) + offset; | 429 | /* don't bother invalidating if DMA to device */ |
598 | inner_op(vaddr, vaddr + size); | 430 | if (dir != DMA_TO_DEVICE) { |
599 | } else { | 431 | unsigned long paddr = __pa(kaddr); |
600 | vaddr = kmap_high_get(page); | 432 | outer_inv_range(paddr, paddr + size); |
601 | if (vaddr) { | ||
602 | vaddr += offset; | ||
603 | inner_op(vaddr, vaddr + size); | ||
604 | kunmap_high(page); | ||
605 | } | ||
606 | } | 433 | } |
607 | 434 | ||
608 | paddr = page_to_phys(page) + offset; | 435 | dmac_unmap_area(kaddr, size, dir); |
609 | outer_op(paddr, paddr + size); | ||
610 | } | 436 | } |
437 | EXPORT_SYMBOL(___dma_single_dev_to_cpu); | ||
611 | 438 | ||
612 | void dma_cache_maint_page(struct page *page, unsigned long offset, | 439 | static void dma_cache_maint_page(struct page *page, unsigned long offset, |
613 | size_t size, int dir) | 440 | size_t size, enum dma_data_direction dir, |
441 | void (*op)(const void *, size_t, int)) | ||
614 | { | 442 | { |
615 | /* | 443 | /* |
616 | * A single sg entry may refer to multiple physically contiguous | 444 | * A single sg entry may refer to multiple physically contiguous |
@@ -621,20 +449,67 @@ void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
621 | size_t left = size; | 449 | size_t left = size; |
622 | do { | 450 | do { |
623 | size_t len = left; | 451 | size_t len = left; |
624 | if (PageHighMem(page) && len + offset > PAGE_SIZE) { | 452 | void *vaddr; |
625 | if (offset >= PAGE_SIZE) { | 453 | |
626 | page += offset / PAGE_SIZE; | 454 | if (PageHighMem(page)) { |
627 | offset %= PAGE_SIZE; | 455 | if (len + offset > PAGE_SIZE) { |
456 | if (offset >= PAGE_SIZE) { | ||
457 | page += offset / PAGE_SIZE; | ||
458 | offset %= PAGE_SIZE; | ||
459 | } | ||
460 | len = PAGE_SIZE - offset; | ||
461 | } | ||
462 | vaddr = kmap_high_get(page); | ||
463 | if (vaddr) { | ||
464 | vaddr += offset; | ||
465 | op(vaddr, len, dir); | ||
466 | kunmap_high(page); | ||
467 | } else if (cache_is_vipt()) { | ||
468 | pte_t saved_pte; | ||
469 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | ||
470 | op(vaddr + offset, len, dir); | ||
471 | kunmap_high_l1_vipt(page, saved_pte); | ||
628 | } | 472 | } |
629 | len = PAGE_SIZE - offset; | 473 | } else { |
474 | vaddr = page_address(page) + offset; | ||
475 | op(vaddr, len, dir); | ||
630 | } | 476 | } |
631 | dma_cache_maint_contiguous(page, offset, len, dir); | ||
632 | offset = 0; | 477 | offset = 0; |
633 | page++; | 478 | page++; |
634 | left -= len; | 479 | left -= len; |
635 | } while (left); | 480 | } while (left); |
636 | } | 481 | } |
637 | EXPORT_SYMBOL(dma_cache_maint_page); | 482 | |
483 | void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, | ||
484 | size_t size, enum dma_data_direction dir) | ||
485 | { | ||
486 | unsigned long paddr; | ||
487 | |||
488 | dma_cache_maint_page(page, off, size, dir, dmac_map_area); | ||
489 | |||
490 | paddr = page_to_phys(page) + off; | ||
491 | if (dir == DMA_FROM_DEVICE) { | ||
492 | outer_inv_range(paddr, paddr + size); | ||
493 | } else { | ||
494 | outer_clean_range(paddr, paddr + size); | ||
495 | } | ||
496 | /* FIXME: non-speculating: flush on bidirectional mappings? */ | ||
497 | } | ||
498 | EXPORT_SYMBOL(___dma_page_cpu_to_dev); | ||
499 | |||
500 | void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | ||
501 | size_t size, enum dma_data_direction dir) | ||
502 | { | ||
503 | unsigned long paddr = page_to_phys(page) + off; | ||
504 | |||
505 | /* FIXME: non-speculating: not required */ | ||
506 | /* don't bother invalidating if DMA to device */ | ||
507 | if (dir != DMA_TO_DEVICE) | ||
508 | outer_inv_range(paddr, paddr + size); | ||
509 | |||
510 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | ||
511 | } | ||
512 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | ||
638 | 513 | ||
639 | /** | 514 | /** |
640 | * dma_map_sg - map a set of SG buffers for streaming mode DMA | 515 | * dma_map_sg - map a set of SG buffers for streaming mode DMA |
@@ -708,8 +583,12 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
708 | int i; | 583 | int i; |
709 | 584 | ||
710 | for_each_sg(sg, s, nents, i) { | 585 | for_each_sg(sg, s, nents, i) { |
711 | dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, | 586 | if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, |
712 | sg_dma_len(s), dir); | 587 | sg_dma_len(s), dir)) |
588 | continue; | ||
589 | |||
590 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | ||
591 | s->length, dir); | ||
713 | } | 592 | } |
714 | } | 593 | } |
715 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 594 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
@@ -732,9 +611,8 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
732 | sg_dma_len(s), dir)) | 611 | sg_dma_len(s), dir)) |
733 | continue; | 612 | continue; |
734 | 613 | ||
735 | if (!arch_is_coherent()) | 614 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
736 | dma_cache_maint_page(sg_page(s), s->offset, | 615 | s->length, dir); |
737 | s->length, dir); | ||
738 | } | 616 | } |
739 | } | 617 | } |
740 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 618 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index d0d17b6a3703..0d414c28eb2c 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/pagemap.h> | 18 | #include <linux/pagemap.h> |
19 | #include <linux/gfp.h> | ||
19 | 20 | ||
20 | #include <asm/bugs.h> | 21 | #include <asm/bugs.h> |
21 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
@@ -23,6 +24,8 @@ | |||
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
25 | 26 | ||
27 | #include "mm.h" | ||
28 | |||
26 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
27 | 30 | ||
28 | /* | 31 | /* |
@@ -34,28 +37,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | |||
34 | * Therefore those configurations which might call adjust_pte (those | 37 | * Therefore those configurations which might call adjust_pte (those |
35 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 38 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
36 | */ | 39 | */ |
37 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | 40 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
41 | unsigned long pfn, pte_t *ptep) | ||
38 | { | 42 | { |
39 | pgd_t *pgd; | 43 | pte_t entry = *ptep; |
40 | pmd_t *pmd; | ||
41 | pte_t *pte, entry; | ||
42 | int ret; | 44 | int ret; |
43 | 45 | ||
44 | pgd = pgd_offset(vma->vm_mm, address); | ||
45 | if (pgd_none(*pgd)) | ||
46 | goto no_pgd; | ||
47 | if (pgd_bad(*pgd)) | ||
48 | goto bad_pgd; | ||
49 | |||
50 | pmd = pmd_offset(pgd, address); | ||
51 | if (pmd_none(*pmd)) | ||
52 | goto no_pmd; | ||
53 | if (pmd_bad(*pmd)) | ||
54 | goto bad_pmd; | ||
55 | |||
56 | pte = pte_offset_map(pmd, address); | ||
57 | entry = *pte; | ||
58 | |||
59 | /* | 46 | /* |
60 | * If this page is present, it's actually being shared. | 47 | * If this page is present, it's actually being shared. |
61 | */ | 48 | */ |
@@ -66,33 +53,55 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
66 | * fault (ie, is old), we can safely ignore any issues. | 53 | * fault (ie, is old), we can safely ignore any issues. |
67 | */ | 54 | */ |
68 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { | 55 | if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) { |
69 | unsigned long pfn = pte_pfn(entry); | ||
70 | flush_cache_page(vma, address, pfn); | 56 | flush_cache_page(vma, address, pfn); |
71 | outer_flush_range((pfn << PAGE_SHIFT), | 57 | outer_flush_range((pfn << PAGE_SHIFT), |
72 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
73 | pte_val(entry) &= ~L_PTE_MT_MASK; | 59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
74 | pte_val(entry) |= shared_pte_mask; | 60 | pte_val(entry) |= shared_pte_mask; |
75 | set_pte_at(vma->vm_mm, address, pte, entry); | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
76 | flush_tlb_page(vma, address); | 62 | flush_tlb_page(vma, address); |
77 | } | 63 | } |
78 | pte_unmap(pte); | 64 | |
79 | return ret; | 65 | return ret; |
66 | } | ||
67 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | ||
69 | unsigned long pfn) | ||
70 | { | ||
71 | spinlock_t *ptl; | ||
72 | pgd_t *pgd; | ||
73 | pmd_t *pmd; | ||
74 | pte_t *pte; | ||
75 | int ret; | ||
80 | 76 | ||
81 | bad_pgd: | 77 | pgd = pgd_offset(vma->vm_mm, address); |
82 | pgd_ERROR(*pgd); | 78 | if (pgd_none_or_clear_bad(pgd)) |
83 | pgd_clear(pgd); | 79 | return 0; |
84 | no_pgd: | 80 | |
85 | return 0; | 81 | pmd = pmd_offset(pgd, address); |
86 | 82 | if (pmd_none_or_clear_bad(pmd)) | |
87 | bad_pmd: | 83 | return 0; |
88 | pmd_ERROR(*pmd); | 84 | |
89 | pmd_clear(pmd); | 85 | /* |
90 | no_pmd: | 86 | * This is called while another page table is mapped, so we |
91 | return 0; | 87 | * must use the nested version. This also means we need to |
88 | * open-code the spin-locking. | ||
89 | */ | ||
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | ||
91 | pte = pte_offset_map_nested(pmd, address); | ||
92 | spin_lock(ptl); | ||
93 | |||
94 | ret = do_adjust_pte(vma, address, pfn, pte); | ||
95 | |||
96 | spin_unlock(ptl); | ||
97 | pte_unmap_nested(pte); | ||
98 | |||
99 | return ret; | ||
92 | } | 100 | } |
93 | 101 | ||
94 | static void | 102 | static void |
95 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) | 103 | make_coherent(struct address_space *mapping, struct vm_area_struct *vma, |
104 | unsigned long addr, pte_t *ptep, unsigned long pfn) | ||
96 | { | 105 | { |
97 | struct mm_struct *mm = vma->vm_mm; | 106 | struct mm_struct *mm = vma->vm_mm; |
98 | struct vm_area_struct *mpnt; | 107 | struct vm_area_struct *mpnt; |
@@ -120,11 +129,11 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
120 | if (!(mpnt->vm_flags & VM_MAYSHARE)) | 129 | if (!(mpnt->vm_flags & VM_MAYSHARE)) |
121 | continue; | 130 | continue; |
122 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; | 131 | offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; |
123 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset); | 132 | aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); |
124 | } | 133 | } |
125 | flush_dcache_mmap_unlock(mapping); | 134 | flush_dcache_mmap_unlock(mapping); |
126 | if (aliases) | 135 | if (aliases) |
127 | adjust_pte(vma, addr); | 136 | do_adjust_pte(vma, addr, pfn, ptep); |
128 | else | 137 | else |
129 | flush_cache_page(vma, addr, pfn); | 138 | flush_cache_page(vma, addr, pfn); |
130 | } | 139 | } |
@@ -142,16 +151,24 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne | |||
142 | * | 151 | * |
143 | * Note that the pte lock will be held. | 152 | * Note that the pte lock will be held. |
144 | */ | 153 | */ |
145 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | 154 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, |
155 | pte_t *ptep) | ||
146 | { | 156 | { |
147 | unsigned long pfn = pte_pfn(pte); | 157 | unsigned long pfn = pte_pfn(*ptep); |
148 | struct address_space *mapping; | 158 | struct address_space *mapping; |
149 | struct page *page; | 159 | struct page *page; |
150 | 160 | ||
151 | if (!pfn_valid(pfn)) | 161 | if (!pfn_valid(pfn)) |
152 | return; | 162 | return; |
153 | 163 | ||
164 | /* | ||
165 | * The zero page is never written to, so never has any dirty | ||
166 | * cache lines, and therefore never needs to be flushed. | ||
167 | */ | ||
154 | page = pfn_to_page(pfn); | 168 | page = pfn_to_page(pfn); |
169 | if (page == ZERO_PAGE(0)) | ||
170 | return; | ||
171 | |||
155 | mapping = page_mapping(page); | 172 | mapping = page_mapping(page); |
156 | #ifndef CONFIG_SMP | 173 | #ifndef CONFIG_SMP |
157 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 174 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
@@ -159,7 +176,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |||
159 | #endif | 176 | #endif |
160 | if (mapping) { | 177 | if (mapping) { |
161 | if (cache_is_vivt()) | 178 | if (cache_is_vivt()) |
162 | make_coherent(mapping, vma, addr, pfn); | 179 | make_coherent(mapping, vma, addr, ptep, pfn); |
163 | else if (vma->vm_flags & VM_EXEC) | 180 | else if (vma->vm_flags & VM_EXEC) |
164 | __flush_icache_all(); | 181 | __flush_icache_all(); |
165 | } | 182 | } |
@@ -198,9 +215,8 @@ void __init check_writebuffer_bugs(void) | |||
198 | page = alloc_page(GFP_KERNEL); | 215 | page = alloc_page(GFP_KERNEL); |
199 | if (page) { | 216 | if (page) { |
200 | unsigned long *p1, *p2; | 217 | unsigned long *p1, *p2; |
201 | pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG| | 218 | pgprot_t prot = __pgprot_modify(PAGE_KERNEL, |
202 | L_PTE_DIRTY|L_PTE_WRITE| | 219 | L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE); |
203 | L_PTE_MT_BUFFERABLE); | ||
204 | 220 | ||
205 | p1 = vmap(&page, 1, VM_IOREMAP, prot); | 221 | p1 = vmap(&page, 1, VM_IOREMAP, prot); |
206 | p2 = vmap(&page, 1, VM_IOREMAP, prot); | 222 | p2 = vmap(&page, 1, VM_IOREMAP, prot); |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e06801afb3..9d40c341e07e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/page-flags.h> | 18 | #include <linux/page-flags.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/perf_event.h> | ||
21 | 22 | ||
22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
@@ -302,6 +303,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
302 | fault = __do_page_fault(mm, addr, fsr, tsk); | 303 | fault = __do_page_fault(mm, addr, fsr, tsk); |
303 | up_read(&mm->mmap_sem); | 304 | up_read(&mm->mmap_sem); |
304 | 305 | ||
306 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | ||
307 | if (fault & VM_FAULT_MAJOR) | ||
308 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | ||
309 | else if (fault & VM_FAULT_MINOR) | ||
310 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | ||
311 | |||
305 | /* | 312 | /* |
306 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 313 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
307 | */ | 314 | */ |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7f294f307c83..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -13,6 +13,8 @@ | |||
13 | 13 | ||
14 | #include <asm/cacheflush.h> | 14 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 15 | #include <asm/cachetype.h> |
16 | #include <asm/highmem.h> | ||
17 | #include <asm/smp_plat.h> | ||
16 | #include <asm/system.h> | 18 | #include <asm/system.h> |
17 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
18 | 20 | ||
@@ -35,14 +37,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
35 | : | 37 | : |
36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) | 38 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
37 | : "cc"); | 39 | : "cc"); |
38 | __flush_icache_all(); | ||
39 | } | 40 | } |
40 | 41 | ||
41 | void flush_cache_mm(struct mm_struct *mm) | 42 | void flush_cache_mm(struct mm_struct *mm) |
42 | { | 43 | { |
43 | if (cache_is_vivt()) { | 44 | if (cache_is_vivt()) { |
44 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | 45 | vivt_flush_cache_mm(mm); |
45 | __cpuc_flush_user_all(); | ||
46 | return; | 46 | return; |
47 | } | 47 | } |
48 | 48 | ||
@@ -52,16 +52,13 @@ void flush_cache_mm(struct mm_struct *mm) | |||
52 | : | 52 | : |
53 | : "r" (0) | 53 | : "r" (0) |
54 | : "cc"); | 54 | : "cc"); |
55 | __flush_icache_all(); | ||
56 | } | 55 | } |
57 | } | 56 | } |
58 | 57 | ||
59 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 58 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
60 | { | 59 | { |
61 | if (cache_is_vivt()) { | 60 | if (cache_is_vivt()) { |
62 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 61 | vivt_flush_cache_range(vma, start, end); |
63 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | ||
64 | vma->vm_flags); | ||
65 | return; | 62 | return; |
66 | } | 63 | } |
67 | 64 | ||
@@ -71,27 +68,41 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |||
71 | : | 68 | : |
72 | : "r" (0) | 69 | : "r" (0) |
73 | : "cc"); | 70 | : "cc"); |
74 | __flush_icache_all(); | ||
75 | } | 71 | } |
72 | |||
73 | if (vma->vm_flags & VM_EXEC) | ||
74 | __flush_icache_all(); | ||
76 | } | 75 | } |
77 | 76 | ||
78 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 77 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
79 | { | 78 | { |
80 | if (cache_is_vivt()) { | 79 | if (cache_is_vivt()) { |
81 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 80 | vivt_flush_cache_page(vma, user_addr, pfn); |
82 | unsigned long addr = user_addr & PAGE_MASK; | ||
83 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | ||
84 | } | ||
85 | return; | 81 | return; |
86 | } | 82 | } |
87 | 83 | ||
88 | if (cache_is_vipt_aliasing()) | 84 | if (cache_is_vipt_aliasing()) { |
89 | flush_pfn_alias(pfn, user_addr); | 85 | flush_pfn_alias(pfn, user_addr); |
86 | __flush_icache_all(); | ||
87 | } | ||
88 | |||
89 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | ||
90 | __flush_icache_all(); | ||
90 | } | 91 | } |
92 | #else | ||
93 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | ||
94 | #endif | ||
91 | 95 | ||
96 | #ifdef CONFIG_SMP | ||
97 | static void flush_ptrace_access_other(void *args) | ||
98 | { | ||
99 | __flush_icache_all(); | ||
100 | } | ||
101 | #endif | ||
102 | |||
103 | static | ||
92 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 104 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
93 | unsigned long uaddr, void *kaddr, | 105 | unsigned long uaddr, void *kaddr, unsigned long len) |
94 | unsigned long len, int write) | ||
95 | { | 106 | { |
96 | if (cache_is_vivt()) { | 107 | if (cache_is_vivt()) { |
97 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 108 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
@@ -103,20 +114,42 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
103 | 114 | ||
104 | if (cache_is_vipt_aliasing()) { | 115 | if (cache_is_vipt_aliasing()) { |
105 | flush_pfn_alias(page_to_pfn(page), uaddr); | 116 | flush_pfn_alias(page_to_pfn(page), uaddr); |
117 | __flush_icache_all(); | ||
106 | return; | 118 | return; |
107 | } | 119 | } |
108 | 120 | ||
109 | /* VIPT non-aliasing cache */ | 121 | /* VIPT non-aliasing cache */ |
110 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && | 122 | if (vma->vm_flags & VM_EXEC) { |
111 | vma->vm_flags & VM_EXEC) { | ||
112 | unsigned long addr = (unsigned long)kaddr; | 123 | unsigned long addr = (unsigned long)kaddr; |
113 | /* only flushing the kernel mapping on non-aliasing VIPT */ | ||
114 | __cpuc_coherent_kern_range(addr, addr + len); | 124 | __cpuc_coherent_kern_range(addr, addr + len); |
125 | #ifdef CONFIG_SMP | ||
126 | if (cache_ops_need_broadcast()) | ||
127 | smp_call_function(flush_ptrace_access_other, | ||
128 | NULL, 1); | ||
129 | #endif | ||
115 | } | 130 | } |
116 | } | 131 | } |
117 | #else | 132 | |
118 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 133 | /* |
134 | * Copy user data from/to a page which is mapped into a different | ||
135 | * processes address space. Really, we want to allow our "user | ||
136 | * space" model to handle this. | ||
137 | * | ||
138 | * Note that this code needs to run on the current CPU. | ||
139 | */ | ||
140 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
141 | unsigned long uaddr, void *dst, const void *src, | ||
142 | unsigned long len) | ||
143 | { | ||
144 | #ifdef CONFIG_SMP | ||
145 | preempt_disable(); | ||
146 | #endif | ||
147 | memcpy(dst, src, len); | ||
148 | flush_ptrace_access(vma, page, uaddr, dst, len); | ||
149 | #ifdef CONFIG_SMP | ||
150 | preempt_enable(); | ||
119 | #endif | 151 | #endif |
152 | } | ||
120 | 153 | ||
121 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 154 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
122 | { | 155 | { |
@@ -125,14 +158,20 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
125 | * page. This ensures that data in the physical page is mutually | 158 | * page. This ensures that data in the physical page is mutually |
126 | * coherent with the kernels mapping. | 159 | * coherent with the kernels mapping. |
127 | */ | 160 | */ |
128 | #ifdef CONFIG_HIGHMEM | 161 | if (!PageHighMem(page)) { |
129 | /* | 162 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
130 | * kmap_atomic() doesn't set the page virtual address, and | 163 | } else { |
131 | * kunmap_atomic() takes care of cache flushing already. | 164 | void *addr = kmap_high_get(page); |
132 | */ | 165 | if (addr) { |
133 | if (page_address(page)) | 166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
134 | #endif | 167 | kunmap_high(page); |
135 | __cpuc_flush_dcache_page(page_address(page)); | 168 | } else if (cache_is_vipt()) { |
169 | pte_t saved_pte; | ||
170 | addr = kmap_high_l1_vipt(page, &saved_pte); | ||
171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
172 | kunmap_high_l1_vipt(page, saved_pte); | ||
173 | } | ||
174 | } | ||
136 | 175 | ||
137 | /* | 176 | /* |
138 | * If this is a page cache page, and we have an aliasing VIPT cache, | 177 | * If this is a page cache page, and we have an aliasing VIPT cache, |
@@ -196,7 +235,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
196 | */ | 235 | */ |
197 | void flush_dcache_page(struct page *page) | 236 | void flush_dcache_page(struct page *page) |
198 | { | 237 | { |
199 | struct address_space *mapping = page_mapping(page); | 238 | struct address_space *mapping; |
239 | |||
240 | /* | ||
241 | * The zero page is never written to, so never has any dirty | ||
242 | * cache lines, and therefore never needs to be flushed. | ||
243 | */ | ||
244 | if (page == ZERO_PAGE(0)) | ||
245 | return; | ||
246 | |||
247 | mapping = page_mapping(page); | ||
200 | 248 | ||
201 | #ifndef CONFIG_SMP | 249 | #ifndef CONFIG_SMP |
202 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) | 250 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) |
@@ -242,6 +290,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l | |||
242 | * userspace address only. | 290 | * userspace address only. |
243 | */ | 291 | */ |
244 | flush_pfn_alias(pfn, vmaddr); | 292 | flush_pfn_alias(pfn, vmaddr); |
293 | __flush_icache_all(); | ||
245 | } | 294 | } |
246 | 295 | ||
247 | /* | 296 | /* |
@@ -249,5 +298,5 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l | |||
249 | * in this mapping of the page. FIXME: this is overkill | 298 | * in this mapping of the page. FIXME: this is overkill |
250 | * since we actually ask for a write-back and invalidate. | 299 | * since we actually ask for a write-back and invalidate. |
251 | */ | 300 | */ |
252 | __cpuc_flush_dcache_page(page_address(page)); | 301 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
253 | } | 302 | } |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 30f82fb5918c..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 79 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
80 | 80 | ||
81 | if (kvaddr >= (void *)FIXADDR_START) { | 81 | if (kvaddr >= (void *)FIXADDR_START) { |
82 | __cpuc_flush_dcache_page((void *)vaddr); | 82 | if (cache_is_vivt()) |
83 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | ||
83 | #ifdef CONFIG_DEBUG_HIGHMEM | 84 | #ifdef CONFIG_DEBUG_HIGHMEM |
84 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 85 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
85 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 86 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
@@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
124 | pte = TOP_PTE(vaddr); | 125 | pte = TOP_PTE(vaddr); |
125 | return pte_page(*pte); | 126 | return pte_page(*pte); |
126 | } | 127 | } |
128 | |||
129 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
130 | |||
131 | #include <linux/percpu.h> | ||
132 | |||
133 | /* | ||
134 | * The VIVT cache of a highmem page is always flushed before the page | ||
135 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
136 | * in that case. | ||
137 | * | ||
138 | * However unmapped pages may still be cached with a VIPT cache, and | ||
139 | * it is not possible to perform cache maintenance on them using physical | ||
140 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
141 | * virtual mapping for that purpose. | ||
142 | * | ||
143 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
144 | * functions which are possibly called from interrupt context. As we don't | ||
145 | * want to keep interrupt disabled all the time when such maintenance is | ||
146 | * taking place, we therefore allow for some reentrancy by preserving and | ||
147 | * restoring the previous fixmap entry before the interrupted context is | ||
148 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
149 | * the previous fixmap, and leaving the current one in place allow it to | ||
150 | * be reused the next time without a TLB flush (common with DMA). | ||
151 | */ | ||
152 | |||
153 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
154 | |||
155 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
156 | { | ||
157 | unsigned int idx, cpu = smp_processor_id(); | ||
158 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
159 | unsigned long vaddr, flags; | ||
160 | pte_t pte, *ptep; | ||
161 | |||
162 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
163 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
164 | ptep = TOP_PTE(vaddr); | ||
165 | pte = mk_pte(page, kmap_prot); | ||
166 | |||
167 | if (!in_interrupt()) | ||
168 | preempt_disable(); | ||
169 | |||
170 | raw_local_irq_save(flags); | ||
171 | (*depth)++; | ||
172 | if (pte_val(*ptep) == pte_val(pte)) { | ||
173 | *saved_pte = pte; | ||
174 | } else { | ||
175 | *saved_pte = *ptep; | ||
176 | set_pte_ext(ptep, pte, 0); | ||
177 | local_flush_tlb_kernel_page(vaddr); | ||
178 | } | ||
179 | raw_local_irq_restore(flags); | ||
180 | |||
181 | return (void *)vaddr; | ||
182 | } | ||
183 | |||
184 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
185 | { | ||
186 | unsigned int idx, cpu = smp_processor_id(); | ||
187 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
188 | unsigned long vaddr, flags; | ||
189 | pte_t pte, *ptep; | ||
190 | |||
191 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
192 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
193 | ptep = TOP_PTE(vaddr); | ||
194 | pte = mk_pte(page, kmap_prot); | ||
195 | |||
196 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
197 | BUG_ON(*depth <= 0); | ||
198 | |||
199 | raw_local_irq_save(flags); | ||
200 | (*depth)--; | ||
201 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
202 | set_pte_ext(ptep, saved_pte, 0); | ||
203 | local_flush_tlb_kernel_page(vaddr); | ||
204 | } | ||
205 | raw_local_irq_restore(flags); | ||
206 | |||
207 | if (!in_interrupt()) | ||
208 | preempt_enable(); | ||
209 | } | ||
210 | |||
211 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 52c40d155672..0ed29bfeba1c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -17,12 +17,14 @@ | |||
17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
18 | #include <linux/sort.h> | 18 | #include <linux/sort.h> |
19 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
20 | #include <linux/gfp.h> | ||
20 | 21 | ||
21 | #include <asm/mach-types.h> | 22 | #include <asm/mach-types.h> |
22 | #include <asm/sections.h> | 23 | #include <asm/sections.h> |
23 | #include <asm/setup.h> | 24 | #include <asm/setup.h> |
24 | #include <asm/sizes.h> | 25 | #include <asm/sizes.h> |
25 | #include <asm/tlb.h> | 26 | #include <asm/tlb.h> |
27 | #include <asm/fixmap.h> | ||
26 | 28 | ||
27 | #include <asm/mach/arch.h> | 29 | #include <asm/mach/arch.h> |
28 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
@@ -32,19 +34,21 @@ | |||
32 | static unsigned long phys_initrd_start __initdata = 0; | 34 | static unsigned long phys_initrd_start __initdata = 0; |
33 | static unsigned long phys_initrd_size __initdata = 0; | 35 | static unsigned long phys_initrd_size __initdata = 0; |
34 | 36 | ||
35 | static void __init early_initrd(char **p) | 37 | static int __init early_initrd(char *p) |
36 | { | 38 | { |
37 | unsigned long start, size; | 39 | unsigned long start, size; |
40 | char *endp; | ||
38 | 41 | ||
39 | start = memparse(*p, p); | 42 | start = memparse(p, &endp); |
40 | if (**p == ',') { | 43 | if (*endp == ',') { |
41 | size = memparse((*p) + 1, p); | 44 | size = memparse(endp + 1, NULL); |
42 | 45 | ||
43 | phys_initrd_start = start; | 46 | phys_initrd_start = start; |
44 | phys_initrd_size = size; | 47 | phys_initrd_size = size; |
45 | } | 48 | } |
49 | return 0; | ||
46 | } | 50 | } |
47 | __early_param("initrd=", early_initrd); | 51 | early_param("initrd", early_initrd); |
48 | 52 | ||
49 | static int __init parse_tag_initrd(const struct tag *tag) | 53 | static int __init parse_tag_initrd(const struct tag *tag) |
50 | { | 54 | { |
@@ -82,9 +86,6 @@ void show_mem(void) | |||
82 | printk("Mem-info:\n"); | 86 | printk("Mem-info:\n"); |
83 | show_free_areas(); | 87 | show_free_areas(); |
84 | for_each_online_node(node) { | 88 | for_each_online_node(node) { |
85 | pg_data_t *n = NODE_DATA(node); | ||
86 | struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; | ||
87 | |||
88 | for_each_nodebank (i,mi,node) { | 89 | for_each_nodebank (i,mi,node) { |
89 | struct membank *bank = &mi->bank[i]; | 90 | struct membank *bank = &mi->bank[i]; |
90 | unsigned int pfn1, pfn2; | 91 | unsigned int pfn1, pfn2; |
@@ -93,8 +94,8 @@ void show_mem(void) | |||
93 | pfn1 = bank_pfn_start(bank); | 94 | pfn1 = bank_pfn_start(bank); |
94 | pfn2 = bank_pfn_end(bank); | 95 | pfn2 = bank_pfn_end(bank); |
95 | 96 | ||
96 | page = map + pfn1; | 97 | page = pfn_to_page(pfn1); |
97 | end = map + pfn2; | 98 | end = pfn_to_page(pfn2 - 1) + 1; |
98 | 99 | ||
99 | do { | 100 | do { |
100 | total++; | 101 | total++; |
@@ -560,7 +561,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) | |||
560 | */ | 561 | */ |
561 | void __init mem_init(void) | 562 | void __init mem_init(void) |
562 | { | 563 | { |
563 | unsigned int codesize, datasize, initsize; | 564 | unsigned long reserved_pages, free_pages; |
564 | int i, node; | 565 | int i, node; |
565 | 566 | ||
566 | #ifndef CONFIG_DISCONTIGMEM | 567 | #ifndef CONFIG_DISCONTIGMEM |
@@ -596,6 +597,30 @@ void __init mem_init(void) | |||
596 | totalram_pages += totalhigh_pages; | 597 | totalram_pages += totalhigh_pages; |
597 | #endif | 598 | #endif |
598 | 599 | ||
600 | reserved_pages = free_pages = 0; | ||
601 | |||
602 | for_each_online_node(node) { | ||
603 | for_each_nodebank(i, &meminfo, node) { | ||
604 | struct membank *bank = &meminfo.bank[i]; | ||
605 | unsigned int pfn1, pfn2; | ||
606 | struct page *page, *end; | ||
607 | |||
608 | pfn1 = bank_pfn_start(bank); | ||
609 | pfn2 = bank_pfn_end(bank); | ||
610 | |||
611 | page = pfn_to_page(pfn1); | ||
612 | end = pfn_to_page(pfn2 - 1) + 1; | ||
613 | |||
614 | do { | ||
615 | if (PageReserved(page)) | ||
616 | reserved_pages++; | ||
617 | else if (!page_count(page)) | ||
618 | free_pages++; | ||
619 | page++; | ||
620 | } while (page < end); | ||
621 | } | ||
622 | } | ||
623 | |||
599 | /* | 624 | /* |
600 | * Since our memory may not be contiguous, calculate the | 625 | * Since our memory may not be contiguous, calculate the |
601 | * real number of pages we have in this system | 626 | * real number of pages we have in this system |
@@ -608,15 +633,70 @@ void __init mem_init(void) | |||
608 | } | 633 | } |
609 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 634 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
610 | 635 | ||
611 | codesize = _etext - _text; | 636 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", |
612 | datasize = _end - _data; | 637 | nr_free_pages() << (PAGE_SHIFT-10), |
613 | initsize = __init_end - __init_begin; | 638 | free_pages << (PAGE_SHIFT-10), |
639 | reserved_pages << (PAGE_SHIFT-10), | ||
640 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
641 | |||
642 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | ||
643 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | ||
644 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) | ||
614 | 645 | ||
615 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | 646 | printk(KERN_NOTICE "Virtual kernel memory layout:\n" |
616 | "%dK data, %dK init, %luK highmem)\n", | 647 | " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" |
617 | nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, | 648 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
618 | datasize >> 10, initsize >> 10, | 649 | #ifdef CONFIG_MMU |
619 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); | 650 | " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" |
651 | #endif | ||
652 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
653 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
654 | #ifdef CONFIG_HIGHMEM | ||
655 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
656 | #endif | ||
657 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | ||
658 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
659 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
660 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n", | ||
661 | |||
662 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + | ||
663 | (PAGE_SIZE)), | ||
664 | MLK(FIXADDR_START, FIXADDR_TOP), | ||
665 | #ifdef CONFIG_MMU | ||
666 | MLM(CONSISTENT_BASE, CONSISTENT_END), | ||
667 | #endif | ||
668 | MLM(VMALLOC_START, VMALLOC_END), | ||
669 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | ||
670 | #ifdef CONFIG_HIGHMEM | ||
671 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * | ||
672 | (PAGE_SIZE)), | ||
673 | #endif | ||
674 | MLM(MODULES_VADDR, MODULES_END), | ||
675 | |||
676 | MLK_ROUNDUP(__init_begin, __init_end), | ||
677 | MLK_ROUNDUP(_text, _etext), | ||
678 | MLK_ROUNDUP(_data, _edata)); | ||
679 | |||
680 | #undef MLK | ||
681 | #undef MLM | ||
682 | #undef MLK_ROUNDUP | ||
683 | |||
684 | /* | ||
685 | * Check boundaries twice: Some fundamental inconsistencies can | ||
686 | * be detected at build time already. | ||
687 | */ | ||
688 | #ifdef CONFIG_MMU | ||
689 | BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
690 | BUG_ON(VMALLOC_END > CONSISTENT_BASE); | ||
691 | |||
692 | BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
693 | BUG_ON(TASK_SIZE > MODULES_VADDR); | ||
694 | #endif | ||
695 | |||
696 | #ifdef CONFIG_HIGHMEM | ||
697 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
698 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | ||
699 | #endif | ||
620 | 700 | ||
621 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 701 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
622 | extern int sysctl_overcommit_memory; | 702 | extern int sysctl_overcommit_memory; |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c60f7cf..28c8b950ef04 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) | |||
139 | * which requires the new ioremap'd region to be referenced, the CPU will | 139 | * which requires the new ioremap'd region to be referenced, the CPU will |
140 | * reference the _old_ region. | 140 | * reference the _old_ region. |
141 | * | 141 | * |
142 | * Note that get_vm_area() allocates a guard 4K page, so we need to mask | 142 | * Note that get_vm_area_caller() allocates a guard 4K page, so we need to |
143 | * the size back to 1MB aligned or we will overflow in the loop below. | 143 | * mask the size back to 1MB aligned or we will overflow in the loop below. |
144 | */ | 144 | */ |
145 | static void unmap_area_sections(unsigned long virt, unsigned long size) | 145 | static void unmap_area_sections(unsigned long virt, unsigned long size) |
146 | { | 146 | { |
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, | |||
254 | } | 254 | } |
255 | #endif | 255 | #endif |
256 | 256 | ||
257 | 257 | void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |
258 | /* | 258 | unsigned long offset, size_t size, unsigned int mtype, void *caller) |
259 | * Remap an arbitrary physical address space into the kernel virtual | ||
260 | * address space. Needed when the kernel wants to access high addresses | ||
261 | * directly. | ||
262 | * | ||
263 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
264 | * have to convert them into an offset in a page-aligned mapping, but the | ||
265 | * caller shouldn't need to know that small detail. | ||
266 | * | ||
267 | * 'flags' are the extra L_PTE_ flags that you want to specify for this | ||
268 | * mapping. See <asm/pgtable.h> for more information. | ||
269 | */ | ||
270 | void __iomem * | ||
271 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
272 | unsigned int mtype) | ||
273 | { | 259 | { |
274 | const struct mem_type *type; | 260 | const struct mem_type *type; |
275 | int err; | 261 | int err; |
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
291 | */ | 277 | */ |
292 | size = PAGE_ALIGN(offset + size); | 278 | size = PAGE_ALIGN(offset + size); |
293 | 279 | ||
294 | area = get_vm_area(size, VM_IOREMAP); | 280 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
295 | if (!area) | 281 | if (!area) |
296 | return NULL; | 282 | return NULL; |
297 | addr = (unsigned long)area->addr; | 283 | addr = (unsigned long)area->addr; |
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
318 | flush_cache_vmap(addr, addr + size); | 304 | flush_cache_vmap(addr, addr + size); |
319 | return (void __iomem *) (offset + addr); | 305 | return (void __iomem *) (offset + addr); |
320 | } | 306 | } |
321 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
322 | 307 | ||
323 | void __iomem * | 308 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, |
324 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 309 | unsigned int mtype, void *caller) |
325 | { | 310 | { |
326 | unsigned long last_addr; | 311 | unsigned long last_addr; |
327 | unsigned long offset = phys_addr & ~PAGE_MASK; | 312 | unsigned long offset = phys_addr & ~PAGE_MASK; |
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | |||
334 | if (!size || last_addr < phys_addr) | 319 | if (!size || last_addr < phys_addr) |
335 | return NULL; | 320 | return NULL; |
336 | 321 | ||
337 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 322 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, |
323 | caller); | ||
324 | } | ||
325 | |||
326 | /* | ||
327 | * Remap an arbitrary physical address space into the kernel virtual | ||
328 | * address space. Needed when the kernel wants to access high addresses | ||
329 | * directly. | ||
330 | * | ||
331 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
332 | * have to convert them into an offset in a page-aligned mapping, but the | ||
333 | * caller shouldn't need to know that small detail. | ||
334 | */ | ||
335 | void __iomem * | ||
336 | __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | ||
337 | unsigned int mtype) | ||
338 | { | ||
339 | return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, | ||
340 | __builtin_return_address(0)); | ||
341 | } | ||
342 | EXPORT_SYMBOL(__arm_ioremap_pfn); | ||
343 | |||
344 | void __iomem * | ||
345 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | ||
346 | { | ||
347 | return __arm_ioremap_caller(phys_addr, size, mtype, | ||
348 | __builtin_return_address(0)); | ||
338 | } | 349 | } |
339 | EXPORT_SYMBOL(__arm_ioremap); | 350 | EXPORT_SYMBOL(__arm_ioremap); |
340 | 351 | ||
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c4f6f05198e0..a888363398f8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -24,6 +24,8 @@ struct mem_type { | |||
24 | 24 | ||
25 | const struct mem_type *get_mem_type(unsigned int type); | 25 | const struct mem_type *get_mem_type(unsigned int type); |
26 | 26 | ||
27 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | ||
28 | |||
27 | #endif | 29 | #endif |
28 | 30 | ||
29 | struct map_desc; | 31 | struct map_desc; |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 2b7996401b0f..f5abc51c5a07 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -54,7 +54,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
54 | * We enforce the MAP_FIXED case. | 54 | * We enforce the MAP_FIXED case. |
55 | */ | 55 | */ |
56 | if (flags & MAP_FIXED) { | 56 | if (flags & MAP_FIXED) { |
57 | if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) | 57 | if (aliasing && flags & MAP_SHARED && |
58 | (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) | ||
58 | return -EINVAL; | 59 | return -EINVAL; |
59 | return addr; | 60 | return addr; |
60 | } | 61 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ea67be0223ac..241c24a1c18f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { | |||
100 | * writebuffer to be turned off. (Note: the write | 100 | * writebuffer to be turned off. (Note: the write |
101 | * buffer should not be on and the cache off). | 101 | * buffer should not be on and the cache off). |
102 | */ | 102 | */ |
103 | static void __init early_cachepolicy(char **p) | 103 | static int __init early_cachepolicy(char *p) |
104 | { | 104 | { |
105 | int i; | 105 | int i; |
106 | 106 | ||
107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | 107 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
108 | int len = strlen(cache_policies[i].policy); | 108 | int len = strlen(cache_policies[i].policy); |
109 | 109 | ||
110 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { | 110 | if (memcmp(p, cache_policies[i].policy, len) == 0) { |
111 | cachepolicy = i; | 111 | cachepolicy = i; |
112 | cr_alignment &= ~cache_policies[i].cr_mask; | 112 | cr_alignment &= ~cache_policies[i].cr_mask; |
113 | cr_no_alignment &= ~cache_policies[i].cr_mask; | 113 | cr_no_alignment &= ~cache_policies[i].cr_mask; |
114 | *p += len; | ||
115 | break; | 114 | break; |
116 | } | 115 | } |
117 | } | 116 | } |
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) | |||
130 | } | 129 | } |
131 | flush_cache_all(); | 130 | flush_cache_all(); |
132 | set_cr(cr_alignment); | 131 | set_cr(cr_alignment); |
132 | return 0; | ||
133 | } | 133 | } |
134 | __early_param("cachepolicy=", early_cachepolicy); | 134 | early_param("cachepolicy", early_cachepolicy); |
135 | 135 | ||
136 | static void __init early_nocache(char **__unused) | 136 | static int __init early_nocache(char *__unused) |
137 | { | 137 | { |
138 | char *p = "buffered"; | 138 | char *p = "buffered"; |
139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); | 139 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); |
140 | early_cachepolicy(&p); | 140 | early_cachepolicy(p); |
141 | return 0; | ||
141 | } | 142 | } |
142 | __early_param("nocache", early_nocache); | 143 | early_param("nocache", early_nocache); |
143 | 144 | ||
144 | static void __init early_nowrite(char **__unused) | 145 | static int __init early_nowrite(char *__unused) |
145 | { | 146 | { |
146 | char *p = "uncached"; | 147 | char *p = "uncached"; |
147 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); | 148 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); |
148 | early_cachepolicy(&p); | 149 | early_cachepolicy(p); |
150 | return 0; | ||
149 | } | 151 | } |
150 | __early_param("nowb", early_nowrite); | 152 | early_param("nowb", early_nowrite); |
151 | 153 | ||
152 | static void __init early_ecc(char **p) | 154 | static int __init early_ecc(char *p) |
153 | { | 155 | { |
154 | if (memcmp(*p, "on", 2) == 0) { | 156 | if (memcmp(p, "on", 2) == 0) |
155 | ecc_mask = PMD_PROTECTION; | 157 | ecc_mask = PMD_PROTECTION; |
156 | *p += 2; | 158 | else if (memcmp(p, "off", 3) == 0) |
157 | } else if (memcmp(*p, "off", 3) == 0) { | ||
158 | ecc_mask = 0; | 159 | ecc_mask = 0; |
159 | *p += 3; | 160 | return 0; |
160 | } | ||
161 | } | 161 | } |
162 | __early_param("ecc=", early_ecc); | 162 | early_param("ecc", early_ecc); |
163 | 163 | ||
164 | static int __init noalign_setup(char *__unused) | 164 | static int __init noalign_setup(char *__unused) |
165 | { | 165 | { |
@@ -420,6 +420,10 @@ static void __init build_mem_type_table(void) | |||
420 | user_pgprot |= L_PTE_SHARED; | 420 | user_pgprot |= L_PTE_SHARED; |
421 | kern_pgprot |= L_PTE_SHARED; | 421 | kern_pgprot |= L_PTE_SHARED; |
422 | vecs_pgprot |= L_PTE_SHARED; | 422 | vecs_pgprot |= L_PTE_SHARED; |
423 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | ||
424 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | ||
425 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | ||
426 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | ||
423 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 427 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
424 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 428 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
425 | #endif | 429 | #endif |
@@ -453,8 +457,7 @@ static void __init build_mem_type_table(void) | |||
453 | 457 | ||
454 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 458 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
455 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 459 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
456 | L_PTE_DIRTY | L_PTE_WRITE | | 460 | L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); |
457 | L_PTE_EXEC | kern_pgprot); | ||
458 | 461 | ||
459 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 462 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
460 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 463 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
@@ -671,9 +674,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; | |||
671 | * bytes. This can be used to increase (or decrease) the vmalloc | 674 | * bytes. This can be used to increase (or decrease) the vmalloc |
672 | * area - the default is 128m. | 675 | * area - the default is 128m. |
673 | */ | 676 | */ |
674 | static void __init early_vmalloc(char **arg) | 677 | static int __init early_vmalloc(char *arg) |
675 | { | 678 | { |
676 | vmalloc_reserve = memparse(*arg, arg); | 679 | vmalloc_reserve = memparse(arg, NULL); |
677 | 680 | ||
678 | if (vmalloc_reserve < SZ_16M) { | 681 | if (vmalloc_reserve < SZ_16M) { |
679 | vmalloc_reserve = SZ_16M; | 682 | vmalloc_reserve = SZ_16M; |
@@ -688,8 +691,9 @@ static void __init early_vmalloc(char **arg) | |||
688 | "vmalloc area is too big, limiting to %luMB\n", | 691 | "vmalloc area is too big, limiting to %luMB\n", |
689 | vmalloc_reserve >> 20); | 692 | vmalloc_reserve >> 20); |
690 | } | 693 | } |
694 | return 0; | ||
691 | } | 695 | } |
692 | __early_param("vmalloc=", early_vmalloc); | 696 | early_param("vmalloc", early_vmalloc); |
693 | 697 | ||
694 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) | 698 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
695 | 699 | ||
@@ -881,7 +885,7 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
881 | BOOTMEM_EXCLUSIVE); | 885 | BOOTMEM_EXCLUSIVE); |
882 | } | 886 | } |
883 | 887 | ||
884 | if (machine_is_treo680()) { | 888 | if (machine_is_treo680() || machine_is_centro()) { |
885 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | 889 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, |
886 | BOOTMEM_EXCLUSIVE); | 890 | BOOTMEM_EXCLUSIVE); |
887 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, | 891 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, |
@@ -1036,7 +1040,7 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1036 | */ | 1040 | */ |
1037 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 1041 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); |
1038 | empty_zero_page = virt_to_page(zero_page); | 1042 | empty_zero_page = virt_to_page(zero_page); |
1039 | flush_dcache_page(empty_zero_page); | 1043 | __flush_dcache_page(NULL, empty_zero_page); |
1040 | } | 1044 | } |
1041 | 1045 | ||
1042 | /* | 1046 | /* |
@@ -1050,10 +1054,12 @@ void setup_mm_for_reboot(char mode) | |||
1050 | pgd_t *pgd; | 1054 | pgd_t *pgd; |
1051 | int i; | 1055 | int i; |
1052 | 1056 | ||
1053 | if (current->mm && current->mm->pgd) | 1057 | /* |
1054 | pgd = current->mm->pgd; | 1058 | * We need to access to user-mode page tables here. For kernel threads |
1055 | else | 1059 | * we don't have any user-mode mappings so we use the context that we |
1056 | pgd = init_mm.pgd; | 1060 | * "borrowed". |
1061 | */ | ||
1062 | pgd = current->active_mm->pgd; | ||
1057 | 1063 | ||
1058 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | 1064 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
1059 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | 1065 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
@@ -1068,4 +1074,6 @@ void setup_mm_for_reboot(char mode) | |||
1068 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | 1074 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); |
1069 | flush_pmd_entry(pmd); | 1075 | flush_pmd_entry(pmd); |
1070 | } | 1076 | } |
1077 | |||
1078 | local_flush_tlb_all(); | ||
1071 | } | 1079 | } |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 900811cc9130..33b327379f07 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -61,10 +61,19 @@ void setup_mm_for_reboot(char mode) | |||
61 | 61 | ||
62 | void flush_dcache_page(struct page *page) | 62 | void flush_dcache_page(struct page *page) |
63 | { | 63 | { |
64 | __cpuc_flush_dcache_page(page_address(page)); | 64 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
65 | } | 65 | } |
66 | EXPORT_SYMBOL(flush_dcache_page); | 66 | EXPORT_SYMBOL(flush_dcache_page); |
67 | 67 | ||
68 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | ||
69 | unsigned long uaddr, void *dst, const void *src, | ||
70 | unsigned long len) | ||
71 | { | ||
72 | memcpy(dst, src, len); | ||
73 | if (vma->vm_flags & VM_EXEC) | ||
74 | __cpuc_coherent_user_range(uaddr, uaddr + len); | ||
75 | } | ||
76 | |||
68 | void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, | 77 | void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, |
69 | size_t size, unsigned int mtype) | 78 | size_t size, unsigned int mtype) |
70 | { | 79 | { |
@@ -74,6 +83,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, | |||
74 | } | 83 | } |
75 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 84 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
76 | 85 | ||
86 | void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | ||
87 | size_t size, unsigned int mtype, void *caller) | ||
88 | { | ||
89 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | ||
90 | } | ||
91 | |||
77 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 92 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, |
78 | unsigned int mtype) | 93 | unsigned int mtype) |
79 | { | 94 | { |
@@ -81,6 +96,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | |||
81 | } | 96 | } |
82 | EXPORT_SYMBOL(__arm_ioremap); | 97 | EXPORT_SYMBOL(__arm_ioremap); |
83 | 98 | ||
99 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | ||
100 | unsigned int mtype, void *caller) | ||
101 | { | ||
102 | return __arm_ioremap(phys_addr, size, mtype); | ||
103 | } | ||
104 | |||
84 | void __iounmap(volatile void __iomem *addr) | 105 | void __iounmap(volatile void __iomem *addr) |
85 | { | 106 | { |
86 | } | 107 | } |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 2690146161ba..be5f58e153bf 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/gfp.h> | ||
11 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
12 | 13 | ||
13 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index d9fb4b98c49f..72507c630ceb 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -231,17 +231,18 @@ ENTRY(arm1020_coherent_user_range) | |||
231 | mov pc, lr | 231 | mov pc, lr |
232 | 232 | ||
233 | /* | 233 | /* |
234 | * flush_kern_dcache_page(void *page) | 234 | * flush_kern_dcache_area(void *addr, size_t size) |
235 | * | 235 | * |
236 | * Ensure no D cache aliasing occurs, either with itself or | 236 | * Ensure no D cache aliasing occurs, either with itself or |
237 | * the I cache | 237 | * the I cache |
238 | * | 238 | * |
239 | * - page - page aligned address | 239 | * - addr - kernel address |
240 | * - size - region size | ||
240 | */ | 241 | */ |
241 | ENTRY(arm1020_flush_kern_dcache_page) | 242 | ENTRY(arm1020_flush_kern_dcache_area) |
242 | mov ip, #0 | 243 | mov ip, #0 |
243 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 244 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
244 | add r1, r0, #PAGE_SZ | 245 | add r1, r0, r1 |
245 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 246 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
246 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 247 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
247 | add r0, r0, #CACHE_DLINESIZE | 248 | add r0, r0, #CACHE_DLINESIZE |
@@ -264,7 +265,7 @@ ENTRY(arm1020_flush_kern_dcache_page) | |||
264 | * | 265 | * |
265 | * (same as v4wb) | 266 | * (same as v4wb) |
266 | */ | 267 | */ |
267 | ENTRY(arm1020_dma_inv_range) | 268 | arm1020_dma_inv_range: |
268 | mov ip, #0 | 269 | mov ip, #0 |
269 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 270 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
270 | tst r0, #CACHE_DLINESIZE - 1 | 271 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -294,7 +295,7 @@ ENTRY(arm1020_dma_inv_range) | |||
294 | * | 295 | * |
295 | * (same as v4wb) | 296 | * (same as v4wb) |
296 | */ | 297 | */ |
297 | ENTRY(arm1020_dma_clean_range) | 298 | arm1020_dma_clean_range: |
298 | mov ip, #0 | 299 | mov ip, #0 |
299 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 300 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
300 | bic r0, r0, #CACHE_DLINESIZE - 1 | 301 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -329,15 +330,39 @@ ENTRY(arm1020_dma_flush_range) | |||
329 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 330 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
330 | mov pc, lr | 331 | mov pc, lr |
331 | 332 | ||
333 | /* | ||
334 | * dma_map_area(start, size, dir) | ||
335 | * - start - kernel virtual start address | ||
336 | * - size - size of region | ||
337 | * - dir - DMA direction | ||
338 | */ | ||
339 | ENTRY(arm1020_dma_map_area) | ||
340 | add r1, r1, r0 | ||
341 | cmp r2, #DMA_TO_DEVICE | ||
342 | beq arm1020_dma_clean_range | ||
343 | bcs arm1020_dma_inv_range | ||
344 | b arm1020_dma_flush_range | ||
345 | ENDPROC(arm1020_dma_map_area) | ||
346 | |||
347 | /* | ||
348 | * dma_unmap_area(start, size, dir) | ||
349 | * - start - kernel virtual start address | ||
350 | * - size - size of region | ||
351 | * - dir - DMA direction | ||
352 | */ | ||
353 | ENTRY(arm1020_dma_unmap_area) | ||
354 | mov pc, lr | ||
355 | ENDPROC(arm1020_dma_unmap_area) | ||
356 | |||
332 | ENTRY(arm1020_cache_fns) | 357 | ENTRY(arm1020_cache_fns) |
333 | .long arm1020_flush_kern_cache_all | 358 | .long arm1020_flush_kern_cache_all |
334 | .long arm1020_flush_user_cache_all | 359 | .long arm1020_flush_user_cache_all |
335 | .long arm1020_flush_user_cache_range | 360 | .long arm1020_flush_user_cache_range |
336 | .long arm1020_coherent_kern_range | 361 | .long arm1020_coherent_kern_range |
337 | .long arm1020_coherent_user_range | 362 | .long arm1020_coherent_user_range |
338 | .long arm1020_flush_kern_dcache_page | 363 | .long arm1020_flush_kern_dcache_area |
339 | .long arm1020_dma_inv_range | 364 | .long arm1020_dma_map_area |
340 | .long arm1020_dma_clean_range | 365 | .long arm1020_dma_unmap_area |
341 | .long arm1020_dma_flush_range | 366 | .long arm1020_dma_flush_range |
342 | 367 | ||
343 | .align 5 | 368 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 7453b75dcea5..d27829805609 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -225,17 +225,18 @@ ENTRY(arm1020e_coherent_user_range) | |||
225 | mov pc, lr | 225 | mov pc, lr |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * flush_kern_dcache_page(void *page) | 228 | * flush_kern_dcache_area(void *addr, size_t size) |
229 | * | 229 | * |
230 | * Ensure no D cache aliasing occurs, either with itself or | 230 | * Ensure no D cache aliasing occurs, either with itself or |
231 | * the I cache | 231 | * the I cache |
232 | * | 232 | * |
233 | * - page - page aligned address | 233 | * - addr - kernel address |
234 | * - size - region size | ||
234 | */ | 235 | */ |
235 | ENTRY(arm1020e_flush_kern_dcache_page) | 236 | ENTRY(arm1020e_flush_kern_dcache_area) |
236 | mov ip, #0 | 237 | mov ip, #0 |
237 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 238 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
238 | add r1, r0, #PAGE_SZ | 239 | add r1, r0, r1 |
239 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 240 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
240 | add r0, r0, #CACHE_DLINESIZE | 241 | add r0, r0, #CACHE_DLINESIZE |
241 | cmp r0, r1 | 242 | cmp r0, r1 |
@@ -257,7 +258,7 @@ ENTRY(arm1020e_flush_kern_dcache_page) | |||
257 | * | 258 | * |
258 | * (same as v4wb) | 259 | * (same as v4wb) |
259 | */ | 260 | */ |
260 | ENTRY(arm1020e_dma_inv_range) | 261 | arm1020e_dma_inv_range: |
261 | mov ip, #0 | 262 | mov ip, #0 |
262 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 263 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
263 | tst r0, #CACHE_DLINESIZE - 1 | 264 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -283,7 +284,7 @@ ENTRY(arm1020e_dma_inv_range) | |||
283 | * | 284 | * |
284 | * (same as v4wb) | 285 | * (same as v4wb) |
285 | */ | 286 | */ |
286 | ENTRY(arm1020e_dma_clean_range) | 287 | arm1020e_dma_clean_range: |
287 | mov ip, #0 | 288 | mov ip, #0 |
288 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 289 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
289 | bic r0, r0, #CACHE_DLINESIZE - 1 | 290 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -315,15 +316,39 @@ ENTRY(arm1020e_dma_flush_range) | |||
315 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 316 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
316 | mov pc, lr | 317 | mov pc, lr |
317 | 318 | ||
319 | /* | ||
320 | * dma_map_area(start, size, dir) | ||
321 | * - start - kernel virtual start address | ||
322 | * - size - size of region | ||
323 | * - dir - DMA direction | ||
324 | */ | ||
325 | ENTRY(arm1020e_dma_map_area) | ||
326 | add r1, r1, r0 | ||
327 | cmp r2, #DMA_TO_DEVICE | ||
328 | beq arm1020e_dma_clean_range | ||
329 | bcs arm1020e_dma_inv_range | ||
330 | b arm1020e_dma_flush_range | ||
331 | ENDPROC(arm1020e_dma_map_area) | ||
332 | |||
333 | /* | ||
334 | * dma_unmap_area(start, size, dir) | ||
335 | * - start - kernel virtual start address | ||
336 | * - size - size of region | ||
337 | * - dir - DMA direction | ||
338 | */ | ||
339 | ENTRY(arm1020e_dma_unmap_area) | ||
340 | mov pc, lr | ||
341 | ENDPROC(arm1020e_dma_unmap_area) | ||
342 | |||
318 | ENTRY(arm1020e_cache_fns) | 343 | ENTRY(arm1020e_cache_fns) |
319 | .long arm1020e_flush_kern_cache_all | 344 | .long arm1020e_flush_kern_cache_all |
320 | .long arm1020e_flush_user_cache_all | 345 | .long arm1020e_flush_user_cache_all |
321 | .long arm1020e_flush_user_cache_range | 346 | .long arm1020e_flush_user_cache_range |
322 | .long arm1020e_coherent_kern_range | 347 | .long arm1020e_coherent_kern_range |
323 | .long arm1020e_coherent_user_range | 348 | .long arm1020e_coherent_user_range |
324 | .long arm1020e_flush_kern_dcache_page | 349 | .long arm1020e_flush_kern_dcache_area |
325 | .long arm1020e_dma_inv_range | 350 | .long arm1020e_dma_map_area |
326 | .long arm1020e_dma_clean_range | 351 | .long arm1020e_dma_unmap_area |
327 | .long arm1020e_dma_flush_range | 352 | .long arm1020e_dma_flush_range |
328 | 353 | ||
329 | .align 5 | 354 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 8eb72d75a8b6..ce13e4a827de 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -214,17 +214,18 @@ ENTRY(arm1022_coherent_user_range) | |||
214 | mov pc, lr | 214 | mov pc, lr |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * flush_kern_dcache_page(void *page) | 217 | * flush_kern_dcache_area(void *addr, size_t size) |
218 | * | 218 | * |
219 | * Ensure no D cache aliasing occurs, either with itself or | 219 | * Ensure no D cache aliasing occurs, either with itself or |
220 | * the I cache | 220 | * the I cache |
221 | * | 221 | * |
222 | * - page - page aligned address | 222 | * - addr - kernel address |
223 | * - size - region size | ||
223 | */ | 224 | */ |
224 | ENTRY(arm1022_flush_kern_dcache_page) | 225 | ENTRY(arm1022_flush_kern_dcache_area) |
225 | mov ip, #0 | 226 | mov ip, #0 |
226 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 227 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
227 | add r1, r0, #PAGE_SZ | 228 | add r1, r0, r1 |
228 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 229 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
229 | add r0, r0, #CACHE_DLINESIZE | 230 | add r0, r0, #CACHE_DLINESIZE |
230 | cmp r0, r1 | 231 | cmp r0, r1 |
@@ -246,7 +247,7 @@ ENTRY(arm1022_flush_kern_dcache_page) | |||
246 | * | 247 | * |
247 | * (same as v4wb) | 248 | * (same as v4wb) |
248 | */ | 249 | */ |
249 | ENTRY(arm1022_dma_inv_range) | 250 | arm1022_dma_inv_range: |
250 | mov ip, #0 | 251 | mov ip, #0 |
251 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 252 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
252 | tst r0, #CACHE_DLINESIZE - 1 | 253 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -272,7 +273,7 @@ ENTRY(arm1022_dma_inv_range) | |||
272 | * | 273 | * |
273 | * (same as v4wb) | 274 | * (same as v4wb) |
274 | */ | 275 | */ |
275 | ENTRY(arm1022_dma_clean_range) | 276 | arm1022_dma_clean_range: |
276 | mov ip, #0 | 277 | mov ip, #0 |
277 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 278 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
278 | bic r0, r0, #CACHE_DLINESIZE - 1 | 279 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -304,15 +305,39 @@ ENTRY(arm1022_dma_flush_range) | |||
304 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 305 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
305 | mov pc, lr | 306 | mov pc, lr |
306 | 307 | ||
308 | /* | ||
309 | * dma_map_area(start, size, dir) | ||
310 | * - start - kernel virtual start address | ||
311 | * - size - size of region | ||
312 | * - dir - DMA direction | ||
313 | */ | ||
314 | ENTRY(arm1022_dma_map_area) | ||
315 | add r1, r1, r0 | ||
316 | cmp r2, #DMA_TO_DEVICE | ||
317 | beq arm1022_dma_clean_range | ||
318 | bcs arm1022_dma_inv_range | ||
319 | b arm1022_dma_flush_range | ||
320 | ENDPROC(arm1022_dma_map_area) | ||
321 | |||
322 | /* | ||
323 | * dma_unmap_area(start, size, dir) | ||
324 | * - start - kernel virtual start address | ||
325 | * - size - size of region | ||
326 | * - dir - DMA direction | ||
327 | */ | ||
328 | ENTRY(arm1022_dma_unmap_area) | ||
329 | mov pc, lr | ||
330 | ENDPROC(arm1022_dma_unmap_area) | ||
331 | |||
307 | ENTRY(arm1022_cache_fns) | 332 | ENTRY(arm1022_cache_fns) |
308 | .long arm1022_flush_kern_cache_all | 333 | .long arm1022_flush_kern_cache_all |
309 | .long arm1022_flush_user_cache_all | 334 | .long arm1022_flush_user_cache_all |
310 | .long arm1022_flush_user_cache_range | 335 | .long arm1022_flush_user_cache_range |
311 | .long arm1022_coherent_kern_range | 336 | .long arm1022_coherent_kern_range |
312 | .long arm1022_coherent_user_range | 337 | .long arm1022_coherent_user_range |
313 | .long arm1022_flush_kern_dcache_page | 338 | .long arm1022_flush_kern_dcache_area |
314 | .long arm1022_dma_inv_range | 339 | .long arm1022_dma_map_area |
315 | .long arm1022_dma_clean_range | 340 | .long arm1022_dma_unmap_area |
316 | .long arm1022_dma_flush_range | 341 | .long arm1022_dma_flush_range |
317 | 342 | ||
318 | .align 5 | 343 | .align 5 |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 3b59f0d67139..636672a29c6d 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -208,17 +208,18 @@ ENTRY(arm1026_coherent_user_range) | |||
208 | mov pc, lr | 208 | mov pc, lr |
209 | 209 | ||
210 | /* | 210 | /* |
211 | * flush_kern_dcache_page(void *page) | 211 | * flush_kern_dcache_area(void *addr, size_t size) |
212 | * | 212 | * |
213 | * Ensure no D cache aliasing occurs, either with itself or | 213 | * Ensure no D cache aliasing occurs, either with itself or |
214 | * the I cache | 214 | * the I cache |
215 | * | 215 | * |
216 | * - page - page aligned address | 216 | * - addr - kernel address |
217 | * - size - region size | ||
217 | */ | 218 | */ |
218 | ENTRY(arm1026_flush_kern_dcache_page) | 219 | ENTRY(arm1026_flush_kern_dcache_area) |
219 | mov ip, #0 | 220 | mov ip, #0 |
220 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 221 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
221 | add r1, r0, #PAGE_SZ | 222 | add r1, r0, r1 |
222 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 223 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
223 | add r0, r0, #CACHE_DLINESIZE | 224 | add r0, r0, #CACHE_DLINESIZE |
224 | cmp r0, r1 | 225 | cmp r0, r1 |
@@ -240,7 +241,7 @@ ENTRY(arm1026_flush_kern_dcache_page) | |||
240 | * | 241 | * |
241 | * (same as v4wb) | 242 | * (same as v4wb) |
242 | */ | 243 | */ |
243 | ENTRY(arm1026_dma_inv_range) | 244 | arm1026_dma_inv_range: |
244 | mov ip, #0 | 245 | mov ip, #0 |
245 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 246 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
246 | tst r0, #CACHE_DLINESIZE - 1 | 247 | tst r0, #CACHE_DLINESIZE - 1 |
@@ -266,7 +267,7 @@ ENTRY(arm1026_dma_inv_range) | |||
266 | * | 267 | * |
267 | * (same as v4wb) | 268 | * (same as v4wb) |
268 | */ | 269 | */ |
269 | ENTRY(arm1026_dma_clean_range) | 270 | arm1026_dma_clean_range: |
270 | mov ip, #0 | 271 | mov ip, #0 |
271 | #ifndef CONFIG_CPU_DCACHE_DISABLE | 272 | #ifndef CONFIG_CPU_DCACHE_DISABLE |
272 | bic r0, r0, #CACHE_DLINESIZE - 1 | 273 | bic r0, r0, #CACHE_DLINESIZE - 1 |
@@ -298,15 +299,39 @@ ENTRY(arm1026_dma_flush_range) | |||
298 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 299 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
299 | mov pc, lr | 300 | mov pc, lr |
300 | 301 | ||
302 | /* | ||
303 | * dma_map_area(start, size, dir) | ||
304 | * - start - kernel virtual start address | ||
305 | * - size - size of region | ||
306 | * - dir - DMA direction | ||
307 | */ | ||
308 | ENTRY(arm1026_dma_map_area) | ||
309 | add r1, r1, r0 | ||
310 | cmp r2, #DMA_TO_DEVICE | ||
311 | beq arm1026_dma_clean_range | ||
312 | bcs arm1026_dma_inv_range | ||
313 | b arm1026_dma_flush_range | ||
314 | ENDPROC(arm1026_dma_map_area) | ||
315 | |||
316 | /* | ||
317 | * dma_unmap_area(start, size, dir) | ||
318 | * - start - kernel virtual start address | ||
319 | * - size - size of region | ||
320 | * - dir - DMA direction | ||
321 | */ | ||
322 | ENTRY(arm1026_dma_unmap_area) | ||
323 | mov pc, lr | ||
324 | ENDPROC(arm1026_dma_unmap_area) | ||
325 | |||
301 | ENTRY(arm1026_cache_fns) | 326 | ENTRY(arm1026_cache_fns) |
302 | .long arm1026_flush_kern_cache_all | 327 | .long arm1026_flush_kern_cache_all |
303 | .long arm1026_flush_user_cache_all | 328 | .long arm1026_flush_user_cache_all |
304 | .long arm1026_flush_user_cache_range | 329 | .long arm1026_flush_user_cache_range |
305 | .long arm1026_coherent_kern_range | 330 | .long arm1026_coherent_kern_range |
306 | .long arm1026_coherent_user_range | 331 | .long arm1026_coherent_user_range |
307 | .long arm1026_flush_kern_dcache_page | 332 | .long arm1026_flush_kern_dcache_area |
308 | .long arm1026_dma_inv_range | 333 | .long arm1026_dma_map_area |
309 | .long arm1026_dma_clean_range | 334 | .long arm1026_dma_unmap_area |
310 | .long arm1026_dma_flush_range | 335 | .long arm1026_dma_flush_range |
311 | 336 | ||
312 | .align 5 | 337 | .align 5 |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 3f9cd3d8f6d5..795dc615f43b 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -41,7 +41,7 @@ ENTRY(cpu_arm7_dcache_clean_area) | |||
41 | ENTRY(cpu_arm7_data_abort) | 41 | ENTRY(cpu_arm7_data_abort) |
42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
44 | ldr r8, [r0] @ read arm instruction | 44 | ldr r8, [r2] @ read arm instruction |
45 | tst r8, #1 << 20 @ L = 0 -> write? | 45 | tst r8, #1 << 20 @ L = 0 -> write? |
46 | orreq r1, r1, #1 << 11 @ yes. | 46 | orreq r1, r1, #1 << 11 @ yes. |
47 | and r7, r8, #15 << 24 | 47 | and r7, r8, #15 << 24 |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2b7c197cc58d..8be81992645d 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -207,15 +207,16 @@ ENTRY(arm920_coherent_user_range) | |||
207 | mov pc, lr | 207 | mov pc, lr |
208 | 208 | ||
209 | /* | 209 | /* |
210 | * flush_kern_dcache_page(void *page) | 210 | * flush_kern_dcache_area(void *addr, size_t size) |
211 | * | 211 | * |
212 | * Ensure no D cache aliasing occurs, either with itself or | 212 | * Ensure no D cache aliasing occurs, either with itself or |
213 | * the I cache | 213 | * the I cache |
214 | * | 214 | * |
215 | * - addr - page aligned address | 215 | * - addr - kernel address |
216 | * - size - region size | ||
216 | */ | 217 | */ |
217 | ENTRY(arm920_flush_kern_dcache_page) | 218 | ENTRY(arm920_flush_kern_dcache_area) |
218 | add r1, r0, #PAGE_SZ | 219 | add r1, r0, r1 |
219 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 220 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
220 | add r0, r0, #CACHE_DLINESIZE | 221 | add r0, r0, #CACHE_DLINESIZE |
221 | cmp r0, r1 | 222 | cmp r0, r1 |
@@ -238,7 +239,7 @@ ENTRY(arm920_flush_kern_dcache_page) | |||
238 | * | 239 | * |
239 | * (same as v4wb) | 240 | * (same as v4wb) |
240 | */ | 241 | */ |
241 | ENTRY(arm920_dma_inv_range) | 242 | arm920_dma_inv_range: |
242 | tst r0, #CACHE_DLINESIZE - 1 | 243 | tst r0, #CACHE_DLINESIZE - 1 |
243 | bic r0, r0, #CACHE_DLINESIZE - 1 | 244 | bic r0, r0, #CACHE_DLINESIZE - 1 |
244 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 245 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -261,7 +262,7 @@ ENTRY(arm920_dma_inv_range) | |||
261 | * | 262 | * |
262 | * (same as v4wb) | 263 | * (same as v4wb) |
263 | */ | 264 | */ |
264 | ENTRY(arm920_dma_clean_range) | 265 | arm920_dma_clean_range: |
265 | bic r0, r0, #CACHE_DLINESIZE - 1 | 266 | bic r0, r0, #CACHE_DLINESIZE - 1 |
266 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 267 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
267 | add r0, r0, #CACHE_DLINESIZE | 268 | add r0, r0, #CACHE_DLINESIZE |
@@ -287,15 +288,39 @@ ENTRY(arm920_dma_flush_range) | |||
287 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 288 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
288 | mov pc, lr | 289 | mov pc, lr |
289 | 290 | ||
291 | /* | ||
292 | * dma_map_area(start, size, dir) | ||
293 | * - start - kernel virtual start address | ||
294 | * - size - size of region | ||
295 | * - dir - DMA direction | ||
296 | */ | ||
297 | ENTRY(arm920_dma_map_area) | ||
298 | add r1, r1, r0 | ||
299 | cmp r2, #DMA_TO_DEVICE | ||
300 | beq arm920_dma_clean_range | ||
301 | bcs arm920_dma_inv_range | ||
302 | b arm920_dma_flush_range | ||
303 | ENDPROC(arm920_dma_map_area) | ||
304 | |||
305 | /* | ||
306 | * dma_unmap_area(start, size, dir) | ||
307 | * - start - kernel virtual start address | ||
308 | * - size - size of region | ||
309 | * - dir - DMA direction | ||
310 | */ | ||
311 | ENTRY(arm920_dma_unmap_area) | ||
312 | mov pc, lr | ||
313 | ENDPROC(arm920_dma_unmap_area) | ||
314 | |||
290 | ENTRY(arm920_cache_fns) | 315 | ENTRY(arm920_cache_fns) |
291 | .long arm920_flush_kern_cache_all | 316 | .long arm920_flush_kern_cache_all |
292 | .long arm920_flush_user_cache_all | 317 | .long arm920_flush_user_cache_all |
293 | .long arm920_flush_user_cache_range | 318 | .long arm920_flush_user_cache_range |
294 | .long arm920_coherent_kern_range | 319 | .long arm920_coherent_kern_range |
295 | .long arm920_coherent_user_range | 320 | .long arm920_coherent_user_range |
296 | .long arm920_flush_kern_dcache_page | 321 | .long arm920_flush_kern_dcache_area |
297 | .long arm920_dma_inv_range | 322 | .long arm920_dma_map_area |
298 | .long arm920_dma_clean_range | 323 | .long arm920_dma_unmap_area |
299 | .long arm920_dma_flush_range | 324 | .long arm920_dma_flush_range |
300 | 325 | ||
301 | #endif | 326 | #endif |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 06a1aa4e3398..c0ff8e4b1074 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -209,15 +209,16 @@ ENTRY(arm922_coherent_user_range) | |||
209 | mov pc, lr | 209 | mov pc, lr |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * flush_kern_dcache_page(void *page) | 212 | * flush_kern_dcache_area(void *addr, size_t size) |
213 | * | 213 | * |
214 | * Ensure no D cache aliasing occurs, either with itself or | 214 | * Ensure no D cache aliasing occurs, either with itself or |
215 | * the I cache | 215 | * the I cache |
216 | * | 216 | * |
217 | * - addr - page aligned address | 217 | * - addr - kernel address |
218 | * - size - region size | ||
218 | */ | 219 | */ |
219 | ENTRY(arm922_flush_kern_dcache_page) | 220 | ENTRY(arm922_flush_kern_dcache_area) |
220 | add r1, r0, #PAGE_SZ | 221 | add r1, r0, r1 |
221 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 222 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
222 | add r0, r0, #CACHE_DLINESIZE | 223 | add r0, r0, #CACHE_DLINESIZE |
223 | cmp r0, r1 | 224 | cmp r0, r1 |
@@ -240,7 +241,7 @@ ENTRY(arm922_flush_kern_dcache_page) | |||
240 | * | 241 | * |
241 | * (same as v4wb) | 242 | * (same as v4wb) |
242 | */ | 243 | */ |
243 | ENTRY(arm922_dma_inv_range) | 244 | arm922_dma_inv_range: |
244 | tst r0, #CACHE_DLINESIZE - 1 | 245 | tst r0, #CACHE_DLINESIZE - 1 |
245 | bic r0, r0, #CACHE_DLINESIZE - 1 | 246 | bic r0, r0, #CACHE_DLINESIZE - 1 |
246 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 247 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -263,7 +264,7 @@ ENTRY(arm922_dma_inv_range) | |||
263 | * | 264 | * |
264 | * (same as v4wb) | 265 | * (same as v4wb) |
265 | */ | 266 | */ |
266 | ENTRY(arm922_dma_clean_range) | 267 | arm922_dma_clean_range: |
267 | bic r0, r0, #CACHE_DLINESIZE - 1 | 268 | bic r0, r0, #CACHE_DLINESIZE - 1 |
268 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 269 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
269 | add r0, r0, #CACHE_DLINESIZE | 270 | add r0, r0, #CACHE_DLINESIZE |
@@ -289,15 +290,39 @@ ENTRY(arm922_dma_flush_range) | |||
289 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 290 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
290 | mov pc, lr | 291 | mov pc, lr |
291 | 292 | ||
293 | /* | ||
294 | * dma_map_area(start, size, dir) | ||
295 | * - start - kernel virtual start address | ||
296 | * - size - size of region | ||
297 | * - dir - DMA direction | ||
298 | */ | ||
299 | ENTRY(arm922_dma_map_area) | ||
300 | add r1, r1, r0 | ||
301 | cmp r2, #DMA_TO_DEVICE | ||
302 | beq arm922_dma_clean_range | ||
303 | bcs arm922_dma_inv_range | ||
304 | b arm922_dma_flush_range | ||
305 | ENDPROC(arm922_dma_map_area) | ||
306 | |||
307 | /* | ||
308 | * dma_unmap_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(arm922_dma_unmap_area) | ||
314 | mov pc, lr | ||
315 | ENDPROC(arm922_dma_unmap_area) | ||
316 | |||
292 | ENTRY(arm922_cache_fns) | 317 | ENTRY(arm922_cache_fns) |
293 | .long arm922_flush_kern_cache_all | 318 | .long arm922_flush_kern_cache_all |
294 | .long arm922_flush_user_cache_all | 319 | .long arm922_flush_user_cache_all |
295 | .long arm922_flush_user_cache_range | 320 | .long arm922_flush_user_cache_range |
296 | .long arm922_coherent_kern_range | 321 | .long arm922_coherent_kern_range |
297 | .long arm922_coherent_user_range | 322 | .long arm922_coherent_user_range |
298 | .long arm922_flush_kern_dcache_page | 323 | .long arm922_flush_kern_dcache_area |
299 | .long arm922_dma_inv_range | 324 | .long arm922_dma_map_area |
300 | .long arm922_dma_clean_range | 325 | .long arm922_dma_unmap_area |
301 | .long arm922_dma_flush_range | 326 | .long arm922_dma_flush_range |
302 | 327 | ||
303 | #endif | 328 | #endif |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index cb53435a85ae..3c6cffe400f6 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -251,15 +251,16 @@ ENTRY(arm925_coherent_user_range) | |||
251 | mov pc, lr | 251 | mov pc, lr |
252 | 252 | ||
253 | /* | 253 | /* |
254 | * flush_kern_dcache_page(void *page) | 254 | * flush_kern_dcache_area(void *addr, size_t size) |
255 | * | 255 | * |
256 | * Ensure no D cache aliasing occurs, either with itself or | 256 | * Ensure no D cache aliasing occurs, either with itself or |
257 | * the I cache | 257 | * the I cache |
258 | * | 258 | * |
259 | * - addr - page aligned address | 259 | * - addr - kernel address |
260 | * - size - region size | ||
260 | */ | 261 | */ |
261 | ENTRY(arm925_flush_kern_dcache_page) | 262 | ENTRY(arm925_flush_kern_dcache_area) |
262 | add r1, r0, #PAGE_SZ | 263 | add r1, r0, r1 |
263 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 264 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
264 | add r0, r0, #CACHE_DLINESIZE | 265 | add r0, r0, #CACHE_DLINESIZE |
265 | cmp r0, r1 | 266 | cmp r0, r1 |
@@ -282,7 +283,7 @@ ENTRY(arm925_flush_kern_dcache_page) | |||
282 | * | 283 | * |
283 | * (same as v4wb) | 284 | * (same as v4wb) |
284 | */ | 285 | */ |
285 | ENTRY(arm925_dma_inv_range) | 286 | arm925_dma_inv_range: |
286 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 287 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
287 | tst r0, #CACHE_DLINESIZE - 1 | 288 | tst r0, #CACHE_DLINESIZE - 1 |
288 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 289 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -307,7 +308,7 @@ ENTRY(arm925_dma_inv_range) | |||
307 | * | 308 | * |
308 | * (same as v4wb) | 309 | * (same as v4wb) |
309 | */ | 310 | */ |
310 | ENTRY(arm925_dma_clean_range) | 311 | arm925_dma_clean_range: |
311 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 312 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
312 | bic r0, r0, #CACHE_DLINESIZE - 1 | 313 | bic r0, r0, #CACHE_DLINESIZE - 1 |
313 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 314 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -340,15 +341,39 @@ ENTRY(arm925_dma_flush_range) | |||
340 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 341 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
341 | mov pc, lr | 342 | mov pc, lr |
342 | 343 | ||
344 | /* | ||
345 | * dma_map_area(start, size, dir) | ||
346 | * - start - kernel virtual start address | ||
347 | * - size - size of region | ||
348 | * - dir - DMA direction | ||
349 | */ | ||
350 | ENTRY(arm925_dma_map_area) | ||
351 | add r1, r1, r0 | ||
352 | cmp r2, #DMA_TO_DEVICE | ||
353 | beq arm925_dma_clean_range | ||
354 | bcs arm925_dma_inv_range | ||
355 | b arm925_dma_flush_range | ||
356 | ENDPROC(arm925_dma_map_area) | ||
357 | |||
358 | /* | ||
359 | * dma_unmap_area(start, size, dir) | ||
360 | * - start - kernel virtual start address | ||
361 | * - size - size of region | ||
362 | * - dir - DMA direction | ||
363 | */ | ||
364 | ENTRY(arm925_dma_unmap_area) | ||
365 | mov pc, lr | ||
366 | ENDPROC(arm925_dma_unmap_area) | ||
367 | |||
343 | ENTRY(arm925_cache_fns) | 368 | ENTRY(arm925_cache_fns) |
344 | .long arm925_flush_kern_cache_all | 369 | .long arm925_flush_kern_cache_all |
345 | .long arm925_flush_user_cache_all | 370 | .long arm925_flush_user_cache_all |
346 | .long arm925_flush_user_cache_range | 371 | .long arm925_flush_user_cache_range |
347 | .long arm925_coherent_kern_range | 372 | .long arm925_coherent_kern_range |
348 | .long arm925_coherent_user_range | 373 | .long arm925_coherent_user_range |
349 | .long arm925_flush_kern_dcache_page | 374 | .long arm925_flush_kern_dcache_area |
350 | .long arm925_dma_inv_range | 375 | .long arm925_dma_map_area |
351 | .long arm925_dma_clean_range | 376 | .long arm925_dma_unmap_area |
352 | .long arm925_dma_flush_range | 377 | .long arm925_dma_flush_range |
353 | 378 | ||
354 | ENTRY(cpu_arm925_dcache_clean_area) | 379 | ENTRY(cpu_arm925_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 1c4848704bb3..75b707c9cce1 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -214,15 +214,16 @@ ENTRY(arm926_coherent_user_range) | |||
214 | mov pc, lr | 214 | mov pc, lr |
215 | 215 | ||
216 | /* | 216 | /* |
217 | * flush_kern_dcache_page(void *page) | 217 | * flush_kern_dcache_area(void *addr, size_t size) |
218 | * | 218 | * |
219 | * Ensure no D cache aliasing occurs, either with itself or | 219 | * Ensure no D cache aliasing occurs, either with itself or |
220 | * the I cache | 220 | * the I cache |
221 | * | 221 | * |
222 | * - addr - page aligned address | 222 | * - addr - kernel address |
223 | * - size - region size | ||
223 | */ | 224 | */ |
224 | ENTRY(arm926_flush_kern_dcache_page) | 225 | ENTRY(arm926_flush_kern_dcache_area) |
225 | add r1, r0, #PAGE_SZ | 226 | add r1, r0, r1 |
226 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 227 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
227 | add r0, r0, #CACHE_DLINESIZE | 228 | add r0, r0, #CACHE_DLINESIZE |
228 | cmp r0, r1 | 229 | cmp r0, r1 |
@@ -245,7 +246,7 @@ ENTRY(arm926_flush_kern_dcache_page) | |||
245 | * | 246 | * |
246 | * (same as v4wb) | 247 | * (same as v4wb) |
247 | */ | 248 | */ |
248 | ENTRY(arm926_dma_inv_range) | 249 | arm926_dma_inv_range: |
249 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 250 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
250 | tst r0, #CACHE_DLINESIZE - 1 | 251 | tst r0, #CACHE_DLINESIZE - 1 |
251 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 252 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -270,7 +271,7 @@ ENTRY(arm926_dma_inv_range) | |||
270 | * | 271 | * |
271 | * (same as v4wb) | 272 | * (same as v4wb) |
272 | */ | 273 | */ |
273 | ENTRY(arm926_dma_clean_range) | 274 | arm926_dma_clean_range: |
274 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 275 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
275 | bic r0, r0, #CACHE_DLINESIZE - 1 | 276 | bic r0, r0, #CACHE_DLINESIZE - 1 |
276 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 277 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -303,15 +304,39 @@ ENTRY(arm926_dma_flush_range) | |||
303 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 304 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
304 | mov pc, lr | 305 | mov pc, lr |
305 | 306 | ||
307 | /* | ||
308 | * dma_map_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(arm926_dma_map_area) | ||
314 | add r1, r1, r0 | ||
315 | cmp r2, #DMA_TO_DEVICE | ||
316 | beq arm926_dma_clean_range | ||
317 | bcs arm926_dma_inv_range | ||
318 | b arm926_dma_flush_range | ||
319 | ENDPROC(arm926_dma_map_area) | ||
320 | |||
321 | /* | ||
322 | * dma_unmap_area(start, size, dir) | ||
323 | * - start - kernel virtual start address | ||
324 | * - size - size of region | ||
325 | * - dir - DMA direction | ||
326 | */ | ||
327 | ENTRY(arm926_dma_unmap_area) | ||
328 | mov pc, lr | ||
329 | ENDPROC(arm926_dma_unmap_area) | ||
330 | |||
306 | ENTRY(arm926_cache_fns) | 331 | ENTRY(arm926_cache_fns) |
307 | .long arm926_flush_kern_cache_all | 332 | .long arm926_flush_kern_cache_all |
308 | .long arm926_flush_user_cache_all | 333 | .long arm926_flush_user_cache_all |
309 | .long arm926_flush_user_cache_range | 334 | .long arm926_flush_user_cache_range |
310 | .long arm926_coherent_kern_range | 335 | .long arm926_coherent_kern_range |
311 | .long arm926_coherent_user_range | 336 | .long arm926_coherent_user_range |
312 | .long arm926_flush_kern_dcache_page | 337 | .long arm926_flush_kern_dcache_area |
313 | .long arm926_dma_inv_range | 338 | .long arm926_dma_map_area |
314 | .long arm926_dma_clean_range | 339 | .long arm926_dma_unmap_area |
315 | .long arm926_dma_flush_range | 340 | .long arm926_dma_flush_range |
316 | 341 | ||
317 | ENTRY(cpu_arm926_dcache_clean_area) | 342 | ENTRY(cpu_arm926_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 5b0f8464c8f2..1af1657819eb 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -141,14 +141,15 @@ ENTRY(arm940_coherent_user_range) | |||
141 | /* FALLTHROUGH */ | 141 | /* FALLTHROUGH */ |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * flush_kern_dcache_page(void *page) | 144 | * flush_kern_dcache_area(void *addr, size_t size) |
145 | * | 145 | * |
146 | * Ensure no D cache aliasing occurs, either with itself or | 146 | * Ensure no D cache aliasing occurs, either with itself or |
147 | * the I cache | 147 | * the I cache |
148 | * | 148 | * |
149 | * - addr - page aligned address | 149 | * - addr - kernel address |
150 | * - size - region size | ||
150 | */ | 151 | */ |
151 | ENTRY(arm940_flush_kern_dcache_page) | 152 | ENTRY(arm940_flush_kern_dcache_area) |
152 | mov ip, #0 | 153 | mov ip, #0 |
153 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments | 154 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments |
154 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | 155 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries |
@@ -170,7 +171,7 @@ ENTRY(arm940_flush_kern_dcache_page) | |||
170 | * - start - virtual start address | 171 | * - start - virtual start address |
171 | * - end - virtual end address | 172 | * - end - virtual end address |
172 | */ | 173 | */ |
173 | ENTRY(arm940_dma_inv_range) | 174 | arm940_dma_inv_range: |
174 | mov ip, #0 | 175 | mov ip, #0 |
175 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments | 176 | mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments |
176 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries | 177 | 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries |
@@ -191,7 +192,7 @@ ENTRY(arm940_dma_inv_range) | |||
191 | * - start - virtual start address | 192 | * - start - virtual start address |
192 | * - end - virtual end address | 193 | * - end - virtual end address |
193 | */ | 194 | */ |
194 | ENTRY(arm940_dma_clean_range) | 195 | arm940_dma_clean_range: |
195 | ENTRY(cpu_arm940_dcache_clean_area) | 196 | ENTRY(cpu_arm940_dcache_clean_area) |
196 | mov ip, #0 | 197 | mov ip, #0 |
197 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 198 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -232,15 +233,39 @@ ENTRY(arm940_dma_flush_range) | |||
232 | mcr p15, 0, ip, c7, c10, 4 @ drain WB | 233 | mcr p15, 0, ip, c7, c10, 4 @ drain WB |
233 | mov pc, lr | 234 | mov pc, lr |
234 | 235 | ||
236 | /* | ||
237 | * dma_map_area(start, size, dir) | ||
238 | * - start - kernel virtual start address | ||
239 | * - size - size of region | ||
240 | * - dir - DMA direction | ||
241 | */ | ||
242 | ENTRY(arm940_dma_map_area) | ||
243 | add r1, r1, r0 | ||
244 | cmp r2, #DMA_TO_DEVICE | ||
245 | beq arm940_dma_clean_range | ||
246 | bcs arm940_dma_inv_range | ||
247 | b arm940_dma_flush_range | ||
248 | ENDPROC(arm940_dma_map_area) | ||
249 | |||
250 | /* | ||
251 | * dma_unmap_area(start, size, dir) | ||
252 | * - start - kernel virtual start address | ||
253 | * - size - size of region | ||
254 | * - dir - DMA direction | ||
255 | */ | ||
256 | ENTRY(arm940_dma_unmap_area) | ||
257 | mov pc, lr | ||
258 | ENDPROC(arm940_dma_unmap_area) | ||
259 | |||
235 | ENTRY(arm940_cache_fns) | 260 | ENTRY(arm940_cache_fns) |
236 | .long arm940_flush_kern_cache_all | 261 | .long arm940_flush_kern_cache_all |
237 | .long arm940_flush_user_cache_all | 262 | .long arm940_flush_user_cache_all |
238 | .long arm940_flush_user_cache_range | 263 | .long arm940_flush_user_cache_range |
239 | .long arm940_coherent_kern_range | 264 | .long arm940_coherent_kern_range |
240 | .long arm940_coherent_user_range | 265 | .long arm940_coherent_user_range |
241 | .long arm940_flush_kern_dcache_page | 266 | .long arm940_flush_kern_dcache_area |
242 | .long arm940_dma_inv_range | 267 | .long arm940_dma_map_area |
243 | .long arm940_dma_clean_range | 268 | .long arm940_dma_unmap_area |
244 | .long arm940_dma_flush_range | 269 | .long arm940_dma_flush_range |
245 | 270 | ||
246 | __INIT | 271 | __INIT |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 40c0449a139b..1664b6aaff79 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -183,16 +183,17 @@ ENTRY(arm946_coherent_user_range) | |||
183 | mov pc, lr | 183 | mov pc, lr |
184 | 184 | ||
185 | /* | 185 | /* |
186 | * flush_kern_dcache_page(void *page) | 186 | * flush_kern_dcache_area(void *addr, size_t size) |
187 | * | 187 | * |
188 | * Ensure no D cache aliasing occurs, either with itself or | 188 | * Ensure no D cache aliasing occurs, either with itself or |
189 | * the I cache | 189 | * the I cache |
190 | * | 190 | * |
191 | * - addr - page aligned address | 191 | * - addr - kernel address |
192 | * - size - region size | ||
192 | * (same as arm926) | 193 | * (same as arm926) |
193 | */ | 194 | */ |
194 | ENTRY(arm946_flush_kern_dcache_page) | 195 | ENTRY(arm946_flush_kern_dcache_area) |
195 | add r1, r0, #PAGE_SZ | 196 | add r1, r0, r1 |
196 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 197 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
197 | add r0, r0, #CACHE_DLINESIZE | 198 | add r0, r0, #CACHE_DLINESIZE |
198 | cmp r0, r1 | 199 | cmp r0, r1 |
@@ -214,7 +215,7 @@ ENTRY(arm946_flush_kern_dcache_page) | |||
214 | * - end - virtual end address | 215 | * - end - virtual end address |
215 | * (same as arm926) | 216 | * (same as arm926) |
216 | */ | 217 | */ |
217 | ENTRY(arm946_dma_inv_range) | 218 | arm946_dma_inv_range: |
218 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 219 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
219 | tst r0, #CACHE_DLINESIZE - 1 | 220 | tst r0, #CACHE_DLINESIZE - 1 |
220 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 221 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -239,7 +240,7 @@ ENTRY(arm946_dma_inv_range) | |||
239 | * | 240 | * |
240 | * (same as arm926) | 241 | * (same as arm926) |
241 | */ | 242 | */ |
242 | ENTRY(arm946_dma_clean_range) | 243 | arm946_dma_clean_range: |
243 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 244 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
244 | bic r0, r0, #CACHE_DLINESIZE - 1 | 245 | bic r0, r0, #CACHE_DLINESIZE - 1 |
245 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -274,15 +275,39 @@ ENTRY(arm946_dma_flush_range) | |||
274 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 275 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
275 | mov pc, lr | 276 | mov pc, lr |
276 | 277 | ||
278 | /* | ||
279 | * dma_map_area(start, size, dir) | ||
280 | * - start - kernel virtual start address | ||
281 | * - size - size of region | ||
282 | * - dir - DMA direction | ||
283 | */ | ||
284 | ENTRY(arm946_dma_map_area) | ||
285 | add r1, r1, r0 | ||
286 | cmp r2, #DMA_TO_DEVICE | ||
287 | beq arm946_dma_clean_range | ||
288 | bcs arm946_dma_inv_range | ||
289 | b arm946_dma_flush_range | ||
290 | ENDPROC(arm946_dma_map_area) | ||
291 | |||
292 | /* | ||
293 | * dma_unmap_area(start, size, dir) | ||
294 | * - start - kernel virtual start address | ||
295 | * - size - size of region | ||
296 | * - dir - DMA direction | ||
297 | */ | ||
298 | ENTRY(arm946_dma_unmap_area) | ||
299 | mov pc, lr | ||
300 | ENDPROC(arm946_dma_unmap_area) | ||
301 | |||
277 | ENTRY(arm946_cache_fns) | 302 | ENTRY(arm946_cache_fns) |
278 | .long arm946_flush_kern_cache_all | 303 | .long arm946_flush_kern_cache_all |
279 | .long arm946_flush_user_cache_all | 304 | .long arm946_flush_user_cache_all |
280 | .long arm946_flush_user_cache_range | 305 | .long arm946_flush_user_cache_range |
281 | .long arm946_coherent_kern_range | 306 | .long arm946_coherent_kern_range |
282 | .long arm946_coherent_user_range | 307 | .long arm946_coherent_user_range |
283 | .long arm946_flush_kern_dcache_page | 308 | .long arm946_flush_kern_dcache_area |
284 | .long arm946_dma_inv_range | 309 | .long arm946_dma_map_area |
285 | .long arm946_dma_clean_range | 310 | .long arm946_dma_unmap_area |
286 | .long arm946_dma_flush_range | 311 | .long arm946_dma_flush_range |
287 | 312 | ||
288 | 313 | ||
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index d0d7795200fc..53e632343849 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -226,16 +226,17 @@ ENTRY(feroceon_coherent_user_range) | |||
226 | mov pc, lr | 226 | mov pc, lr |
227 | 227 | ||
228 | /* | 228 | /* |
229 | * flush_kern_dcache_page(void *page) | 229 | * flush_kern_dcache_area(void *addr, size_t size) |
230 | * | 230 | * |
231 | * Ensure no D cache aliasing occurs, either with itself or | 231 | * Ensure no D cache aliasing occurs, either with itself or |
232 | * the I cache | 232 | * the I cache |
233 | * | 233 | * |
234 | * - addr - page aligned address | 234 | * - addr - kernel address |
235 | * - size - region size | ||
235 | */ | 236 | */ |
236 | .align 5 | 237 | .align 5 |
237 | ENTRY(feroceon_flush_kern_dcache_page) | 238 | ENTRY(feroceon_flush_kern_dcache_area) |
238 | add r1, r0, #PAGE_SZ | 239 | add r1, r0, r1 |
239 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 240 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
240 | add r0, r0, #CACHE_DLINESIZE | 241 | add r0, r0, #CACHE_DLINESIZE |
241 | cmp r0, r1 | 242 | cmp r0, r1 |
@@ -246,7 +247,7 @@ ENTRY(feroceon_flush_kern_dcache_page) | |||
246 | mov pc, lr | 247 | mov pc, lr |
247 | 248 | ||
248 | .align 5 | 249 | .align 5 |
249 | ENTRY(feroceon_range_flush_kern_dcache_page) | 250 | ENTRY(feroceon_range_flush_kern_dcache_area) |
250 | mrs r2, cpsr | 251 | mrs r2, cpsr |
251 | add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive | 252 | add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive |
252 | orr r3, r2, #PSR_I_BIT | 253 | orr r3, r2, #PSR_I_BIT |
@@ -273,7 +274,7 @@ ENTRY(feroceon_range_flush_kern_dcache_page) | |||
273 | * (same as v4wb) | 274 | * (same as v4wb) |
274 | */ | 275 | */ |
275 | .align 5 | 276 | .align 5 |
276 | ENTRY(feroceon_dma_inv_range) | 277 | feroceon_dma_inv_range: |
277 | tst r0, #CACHE_DLINESIZE - 1 | 278 | tst r0, #CACHE_DLINESIZE - 1 |
278 | bic r0, r0, #CACHE_DLINESIZE - 1 | 279 | bic r0, r0, #CACHE_DLINESIZE - 1 |
279 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 280 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -287,7 +288,7 @@ ENTRY(feroceon_dma_inv_range) | |||
287 | mov pc, lr | 288 | mov pc, lr |
288 | 289 | ||
289 | .align 5 | 290 | .align 5 |
290 | ENTRY(feroceon_range_dma_inv_range) | 291 | feroceon_range_dma_inv_range: |
291 | mrs r2, cpsr | 292 | mrs r2, cpsr |
292 | tst r0, #CACHE_DLINESIZE - 1 | 293 | tst r0, #CACHE_DLINESIZE - 1 |
293 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 294 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -313,7 +314,7 @@ ENTRY(feroceon_range_dma_inv_range) | |||
313 | * (same as v4wb) | 314 | * (same as v4wb) |
314 | */ | 315 | */ |
315 | .align 5 | 316 | .align 5 |
316 | ENTRY(feroceon_dma_clean_range) | 317 | feroceon_dma_clean_range: |
317 | bic r0, r0, #CACHE_DLINESIZE - 1 | 318 | bic r0, r0, #CACHE_DLINESIZE - 1 |
318 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 319 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
319 | add r0, r0, #CACHE_DLINESIZE | 320 | add r0, r0, #CACHE_DLINESIZE |
@@ -323,7 +324,7 @@ ENTRY(feroceon_dma_clean_range) | |||
323 | mov pc, lr | 324 | mov pc, lr |
324 | 325 | ||
325 | .align 5 | 326 | .align 5 |
326 | ENTRY(feroceon_range_dma_clean_range) | 327 | feroceon_range_dma_clean_range: |
327 | mrs r2, cpsr | 328 | mrs r2, cpsr |
328 | cmp r1, r0 | 329 | cmp r1, r0 |
329 | subne r1, r1, #1 @ top address is inclusive | 330 | subne r1, r1, #1 @ top address is inclusive |
@@ -366,15 +367,53 @@ ENTRY(feroceon_range_dma_flush_range) | |||
366 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 367 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
367 | mov pc, lr | 368 | mov pc, lr |
368 | 369 | ||
370 | /* | ||
371 | * dma_map_area(start, size, dir) | ||
372 | * - start - kernel virtual start address | ||
373 | * - size - size of region | ||
374 | * - dir - DMA direction | ||
375 | */ | ||
376 | ENTRY(feroceon_dma_map_area) | ||
377 | add r1, r1, r0 | ||
378 | cmp r2, #DMA_TO_DEVICE | ||
379 | beq feroceon_dma_clean_range | ||
380 | bcs feroceon_dma_inv_range | ||
381 | b feroceon_dma_flush_range | ||
382 | ENDPROC(feroceon_dma_map_area) | ||
383 | |||
384 | /* | ||
385 | * dma_map_area(start, size, dir) | ||
386 | * - start - kernel virtual start address | ||
387 | * - size - size of region | ||
388 | * - dir - DMA direction | ||
389 | */ | ||
390 | ENTRY(feroceon_range_dma_map_area) | ||
391 | add r1, r1, r0 | ||
392 | cmp r2, #DMA_TO_DEVICE | ||
393 | beq feroceon_range_dma_clean_range | ||
394 | bcs feroceon_range_dma_inv_range | ||
395 | b feroceon_range_dma_flush_range | ||
396 | ENDPROC(feroceon_range_dma_map_area) | ||
397 | |||
398 | /* | ||
399 | * dma_unmap_area(start, size, dir) | ||
400 | * - start - kernel virtual start address | ||
401 | * - size - size of region | ||
402 | * - dir - DMA direction | ||
403 | */ | ||
404 | ENTRY(feroceon_dma_unmap_area) | ||
405 | mov pc, lr | ||
406 | ENDPROC(feroceon_dma_unmap_area) | ||
407 | |||
369 | ENTRY(feroceon_cache_fns) | 408 | ENTRY(feroceon_cache_fns) |
370 | .long feroceon_flush_kern_cache_all | 409 | .long feroceon_flush_kern_cache_all |
371 | .long feroceon_flush_user_cache_all | 410 | .long feroceon_flush_user_cache_all |
372 | .long feroceon_flush_user_cache_range | 411 | .long feroceon_flush_user_cache_range |
373 | .long feroceon_coherent_kern_range | 412 | .long feroceon_coherent_kern_range |
374 | .long feroceon_coherent_user_range | 413 | .long feroceon_coherent_user_range |
375 | .long feroceon_flush_kern_dcache_page | 414 | .long feroceon_flush_kern_dcache_area |
376 | .long feroceon_dma_inv_range | 415 | .long feroceon_dma_map_area |
377 | .long feroceon_dma_clean_range | 416 | .long feroceon_dma_unmap_area |
378 | .long feroceon_dma_flush_range | 417 | .long feroceon_dma_flush_range |
379 | 418 | ||
380 | ENTRY(feroceon_range_cache_fns) | 419 | ENTRY(feroceon_range_cache_fns) |
@@ -383,9 +422,9 @@ ENTRY(feroceon_range_cache_fns) | |||
383 | .long feroceon_flush_user_cache_range | 422 | .long feroceon_flush_user_cache_range |
384 | .long feroceon_coherent_kern_range | 423 | .long feroceon_coherent_kern_range |
385 | .long feroceon_coherent_user_range | 424 | .long feroceon_coherent_user_range |
386 | .long feroceon_range_flush_kern_dcache_page | 425 | .long feroceon_range_flush_kern_dcache_area |
387 | .long feroceon_range_dma_inv_range | 426 | .long feroceon_range_dma_map_area |
388 | .long feroceon_range_dma_clean_range | 427 | .long feroceon_dma_unmap_area |
389 | .long feroceon_range_dma_flush_range | 428 | .long feroceon_range_dma_flush_range |
390 | 429 | ||
391 | .align 5 | 430 | .align 5 |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 52b5fd74fbb3..caa31154e7db 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -186,15 +186,16 @@ ENTRY(mohawk_coherent_user_range) | |||
186 | mov pc, lr | 186 | mov pc, lr |
187 | 187 | ||
188 | /* | 188 | /* |
189 | * flush_kern_dcache_page(void *page) | 189 | * flush_kern_dcache_area(void *addr, size_t size) |
190 | * | 190 | * |
191 | * Ensure no D cache aliasing occurs, either with itself or | 191 | * Ensure no D cache aliasing occurs, either with itself or |
192 | * the I cache | 192 | * the I cache |
193 | * | 193 | * |
194 | * - addr - page aligned address | 194 | * - addr - kernel address |
195 | * - size - region size | ||
195 | */ | 196 | */ |
196 | ENTRY(mohawk_flush_kern_dcache_page) | 197 | ENTRY(mohawk_flush_kern_dcache_area) |
197 | add r1, r0, #PAGE_SZ | 198 | add r1, r0, r1 |
198 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry | 199 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry |
199 | add r0, r0, #CACHE_DLINESIZE | 200 | add r0, r0, #CACHE_DLINESIZE |
200 | cmp r0, r1 | 201 | cmp r0, r1 |
@@ -217,7 +218,7 @@ ENTRY(mohawk_flush_kern_dcache_page) | |||
217 | * | 218 | * |
218 | * (same as v4wb) | 219 | * (same as v4wb) |
219 | */ | 220 | */ |
220 | ENTRY(mohawk_dma_inv_range) | 221 | mohawk_dma_inv_range: |
221 | tst r0, #CACHE_DLINESIZE - 1 | 222 | tst r0, #CACHE_DLINESIZE - 1 |
222 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 223 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
223 | tst r1, #CACHE_DLINESIZE - 1 | 224 | tst r1, #CACHE_DLINESIZE - 1 |
@@ -240,7 +241,7 @@ ENTRY(mohawk_dma_inv_range) | |||
240 | * | 241 | * |
241 | * (same as v4wb) | 242 | * (same as v4wb) |
242 | */ | 243 | */ |
243 | ENTRY(mohawk_dma_clean_range) | 244 | mohawk_dma_clean_range: |
244 | bic r0, r0, #CACHE_DLINESIZE - 1 | 245 | bic r0, r0, #CACHE_DLINESIZE - 1 |
245 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 246 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
246 | add r0, r0, #CACHE_DLINESIZE | 247 | add r0, r0, #CACHE_DLINESIZE |
@@ -267,15 +268,39 @@ ENTRY(mohawk_dma_flush_range) | |||
267 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 268 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
268 | mov pc, lr | 269 | mov pc, lr |
269 | 270 | ||
271 | /* | ||
272 | * dma_map_area(start, size, dir) | ||
273 | * - start - kernel virtual start address | ||
274 | * - size - size of region | ||
275 | * - dir - DMA direction | ||
276 | */ | ||
277 | ENTRY(mohawk_dma_map_area) | ||
278 | add r1, r1, r0 | ||
279 | cmp r2, #DMA_TO_DEVICE | ||
280 | beq mohawk_dma_clean_range | ||
281 | bcs mohawk_dma_inv_range | ||
282 | b mohawk_dma_flush_range | ||
283 | ENDPROC(mohawk_dma_map_area) | ||
284 | |||
285 | /* | ||
286 | * dma_unmap_area(start, size, dir) | ||
287 | * - start - kernel virtual start address | ||
288 | * - size - size of region | ||
289 | * - dir - DMA direction | ||
290 | */ | ||
291 | ENTRY(mohawk_dma_unmap_area) | ||
292 | mov pc, lr | ||
293 | ENDPROC(mohawk_dma_unmap_area) | ||
294 | |||
270 | ENTRY(mohawk_cache_fns) | 295 | ENTRY(mohawk_cache_fns) |
271 | .long mohawk_flush_kern_cache_all | 296 | .long mohawk_flush_kern_cache_all |
272 | .long mohawk_flush_user_cache_all | 297 | .long mohawk_flush_user_cache_all |
273 | .long mohawk_flush_user_cache_range | 298 | .long mohawk_flush_user_cache_range |
274 | .long mohawk_coherent_kern_range | 299 | .long mohawk_coherent_kern_range |
275 | .long mohawk_coherent_user_range | 300 | .long mohawk_coherent_user_range |
276 | .long mohawk_flush_kern_dcache_page | 301 | .long mohawk_flush_kern_dcache_area |
277 | .long mohawk_dma_inv_range | 302 | .long mohawk_dma_map_area |
278 | .long mohawk_dma_clean_range | 303 | .long mohawk_dma_unmap_area |
279 | .long mohawk_dma_flush_range | 304 | .long mohawk_dma_flush_range |
280 | 305 | ||
281 | ENTRY(cpu_mohawk_dcache_clean_area) | 306 | ENTRY(cpu_mohawk_dcache_clean_area) |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index ee7700242c19..5c47760c2064 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init) | |||
45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland | 45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland |
46 | mov pc, lr | 46 | mov pc, lr |
47 | 47 | ||
48 | .previous | 48 | .section .text |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * cpu_sa1100_proc_fin() | 51 | * cpu_sa1100_proc_fin() |
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index ac5c80062b70..3e6210b4d6d4 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c | |||
@@ -27,8 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all); | |||
27 | EXPORT_SYMBOL(__cpuc_flush_user_all); | 27 | EXPORT_SYMBOL(__cpuc_flush_user_all); |
28 | EXPORT_SYMBOL(__cpuc_flush_user_range); | 28 | EXPORT_SYMBOL(__cpuc_flush_user_range); |
29 | EXPORT_SYMBOL(__cpuc_coherent_kern_range); | 29 | EXPORT_SYMBOL(__cpuc_coherent_kern_range); |
30 | EXPORT_SYMBOL(__cpuc_flush_dcache_page); | 30 | EXPORT_SYMBOL(__cpuc_flush_dcache_area); |
31 | EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */ | ||
32 | #else | 31 | #else |
33 | EXPORT_SYMBOL(cpu_cache); | 32 | EXPORT_SYMBOL(cpu_cache); |
34 | #endif | 33 | #endif |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 70f75d2e3ead..7a5337ed7d68 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -59,8 +59,6 @@ ENTRY(cpu_v6_proc_fin) | |||
59 | * to what would be the reset vector. | 59 | * to what would be the reset vector. |
60 | * | 60 | * |
61 | * - loc - location to jump to for soft reset | 61 | * - loc - location to jump to for soft reset |
62 | * | ||
63 | * It is assumed that: | ||
64 | */ | 62 | */ |
65 | .align 5 | 63 | .align 5 |
66 | ENTRY(cpu_v6_reset) | 64 | ENTRY(cpu_v6_reset) |
@@ -130,9 +128,16 @@ ENTRY(cpu_v6_set_pte_ext) | |||
130 | 128 | ||
131 | 129 | ||
132 | 130 | ||
133 | 131 | .type cpu_v6_name, #object | |
134 | cpu_v6_name: | 132 | cpu_v6_name: |
135 | .asciz "ARMv6-compatible processor" | 133 | .asciz "ARMv6-compatible processor" |
134 | .size cpu_v6_name, . - cpu_v6_name | ||
135 | |||
136 | .type cpu_pj4_name, #object | ||
137 | cpu_pj4_name: | ||
138 | .asciz "Marvell PJ4 processor" | ||
139 | .size cpu_pj4_name, . - cpu_pj4_name | ||
140 | |||
136 | .align | 141 | .align |
137 | 142 | ||
138 | __INIT | 143 | __INIT |
@@ -241,3 +246,26 @@ __v6_proc_info: | |||
241 | .long v6_user_fns | 246 | .long v6_user_fns |
242 | .long v6_cache_fns | 247 | .long v6_cache_fns |
243 | .size __v6_proc_info, . - __v6_proc_info | 248 | .size __v6_proc_info, . - __v6_proc_info |
249 | |||
250 | .type __pj4_v6_proc_info, #object | ||
251 | __pj4_v6_proc_info: | ||
252 | .long 0x560f5810 | ||
253 | .long 0xff0ffff0 | ||
254 | .long PMD_TYPE_SECT | \ | ||
255 | PMD_SECT_AP_WRITE | \ | ||
256 | PMD_SECT_AP_READ | \ | ||
257 | PMD_FLAGS | ||
258 | .long PMD_TYPE_SECT | \ | ||
259 | PMD_SECT_XN | \ | ||
260 | PMD_SECT_AP_WRITE | \ | ||
261 | PMD_SECT_AP_READ | ||
262 | b __v6_setup | ||
263 | .long cpu_arch_name | ||
264 | .long cpu_elf_name | ||
265 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
266 | .long cpu_pj4_name | ||
267 | .long v6_processor_functions | ||
268 | .long v6wbi_tlb_fns | ||
269 | .long v6_user_fns | ||
270 | .long v6_cache_fns | ||
271 | .size __pj4_v6_proc_info, . - __pj4_v6_proc_info | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3a285218fd15..7aaf88a3b7aa 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -45,7 +45,14 @@ ENTRY(cpu_v7_proc_init) | |||
45 | ENDPROC(cpu_v7_proc_init) | 45 | ENDPROC(cpu_v7_proc_init) |
46 | 46 | ||
47 | ENTRY(cpu_v7_proc_fin) | 47 | ENTRY(cpu_v7_proc_fin) |
48 | mov pc, lr | 48 | stmfd sp!, {lr} |
49 | cpsid if @ disable interrupts | ||
50 | bl v7_flush_kern_cache_all | ||
51 | mrc p15, 0, r0, c1, c0, 0 @ ctrl register | ||
52 | bic r0, r0, #0x1000 @ ...i............ | ||
53 | bic r0, r0, #0x0006 @ .............ca. | ||
54 | mcr p15, 0, r0, c1, c0, 0 @ disable caches | ||
55 | ldmfd sp!, {pc} | ||
49 | ENDPROC(cpu_v7_proc_fin) | 56 | ENDPROC(cpu_v7_proc_fin) |
50 | 57 | ||
51 | /* | 58 | /* |
@@ -56,8 +63,6 @@ ENDPROC(cpu_v7_proc_fin) | |||
56 | * to what would be the reset vector. | 63 | * to what would be the reset vector. |
57 | * | 64 | * |
58 | * - loc - location to jump to for soft reset | 65 | * - loc - location to jump to for soft reset |
59 | * | ||
60 | * It is assumed that: | ||
61 | */ | 66 | */ |
62 | .align 5 | 67 | .align 5 |
63 | ENTRY(cpu_v7_reset) | 68 | ENTRY(cpu_v7_reset) |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 2028f3702881..e5797f1c1db7 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -226,15 +226,16 @@ ENTRY(xsc3_coherent_user_range) | |||
226 | mov pc, lr | 226 | mov pc, lr |
227 | 227 | ||
228 | /* | 228 | /* |
229 | * flush_kern_dcache_page(void *page) | 229 | * flush_kern_dcache_area(void *addr, size_t size) |
230 | * | 230 | * |
231 | * Ensure no D cache aliasing occurs, either with itself or | 231 | * Ensure no D cache aliasing occurs, either with itself or |
232 | * the I cache. | 232 | * the I cache. |
233 | * | 233 | * |
234 | * - addr - page aligned address | 234 | * - addr - kernel address |
235 | * - size - region size | ||
235 | */ | 236 | */ |
236 | ENTRY(xsc3_flush_kern_dcache_page) | 237 | ENTRY(xsc3_flush_kern_dcache_area) |
237 | add r1, r0, #PAGE_SZ | 238 | add r1, r0, r1 |
238 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line | 239 | 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line |
239 | add r0, r0, #CACHELINESIZE | 240 | add r0, r0, #CACHELINESIZE |
240 | cmp r0, r1 | 241 | cmp r0, r1 |
@@ -256,7 +257,7 @@ ENTRY(xsc3_flush_kern_dcache_page) | |||
256 | * - start - virtual start address | 257 | * - start - virtual start address |
257 | * - end - virtual end address | 258 | * - end - virtual end address |
258 | */ | 259 | */ |
259 | ENTRY(xsc3_dma_inv_range) | 260 | xsc3_dma_inv_range: |
260 | tst r0, #CACHELINESIZE - 1 | 261 | tst r0, #CACHELINESIZE - 1 |
261 | bic r0, r0, #CACHELINESIZE - 1 | 262 | bic r0, r0, #CACHELINESIZE - 1 |
262 | mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line | 263 | mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line |
@@ -277,7 +278,7 @@ ENTRY(xsc3_dma_inv_range) | |||
277 | * - start - virtual start address | 278 | * - start - virtual start address |
278 | * - end - virtual end address | 279 | * - end - virtual end address |
279 | */ | 280 | */ |
280 | ENTRY(xsc3_dma_clean_range) | 281 | xsc3_dma_clean_range: |
281 | bic r0, r0, #CACHELINESIZE - 1 | 282 | bic r0, r0, #CACHELINESIZE - 1 |
282 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 283 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
283 | add r0, r0, #CACHELINESIZE | 284 | add r0, r0, #CACHELINESIZE |
@@ -303,15 +304,39 @@ ENTRY(xsc3_dma_flush_range) | |||
303 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier | 304 | mcr p15, 0, r0, c7, c10, 4 @ data write barrier |
304 | mov pc, lr | 305 | mov pc, lr |
305 | 306 | ||
307 | /* | ||
308 | * dma_map_area(start, size, dir) | ||
309 | * - start - kernel virtual start address | ||
310 | * - size - size of region | ||
311 | * - dir - DMA direction | ||
312 | */ | ||
313 | ENTRY(xsc3_dma_map_area) | ||
314 | add r1, r1, r0 | ||
315 | cmp r2, #DMA_TO_DEVICE | ||
316 | beq xsc3_dma_clean_range | ||
317 | bcs xsc3_dma_inv_range | ||
318 | b xsc3_dma_flush_range | ||
319 | ENDPROC(xsc3_dma_map_area) | ||
320 | |||
321 | /* | ||
322 | * dma_unmap_area(start, size, dir) | ||
323 | * - start - kernel virtual start address | ||
324 | * - size - size of region | ||
325 | * - dir - DMA direction | ||
326 | */ | ||
327 | ENTRY(xsc3_dma_unmap_area) | ||
328 | mov pc, lr | ||
329 | ENDPROC(xsc3_dma_unmap_area) | ||
330 | |||
306 | ENTRY(xsc3_cache_fns) | 331 | ENTRY(xsc3_cache_fns) |
307 | .long xsc3_flush_kern_cache_all | 332 | .long xsc3_flush_kern_cache_all |
308 | .long xsc3_flush_user_cache_all | 333 | .long xsc3_flush_user_cache_all |
309 | .long xsc3_flush_user_cache_range | 334 | .long xsc3_flush_user_cache_range |
310 | .long xsc3_coherent_kern_range | 335 | .long xsc3_coherent_kern_range |
311 | .long xsc3_coherent_user_range | 336 | .long xsc3_coherent_user_range |
312 | .long xsc3_flush_kern_dcache_page | 337 | .long xsc3_flush_kern_dcache_area |
313 | .long xsc3_dma_inv_range | 338 | .long xsc3_dma_map_area |
314 | .long xsc3_dma_clean_range | 339 | .long xsc3_dma_unmap_area |
315 | .long xsc3_dma_flush_range | 340 | .long xsc3_dma_flush_range |
316 | 341 | ||
317 | ENTRY(cpu_xsc3_dcache_clean_area) | 342 | ENTRY(cpu_xsc3_dcache_clean_area) |
@@ -396,7 +421,7 @@ __xsc3_setup: | |||
396 | orr r4, r4, #0x18 @ cache the page table in L2 | 421 | orr r4, r4, #0x18 @ cache the page table in L2 |
397 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | 422 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer |
398 | 423 | ||
399 | mov r0, #0 @ don't allow CP access | 424 | mov r0, #1 << 6 @ cp6 access for early sched_clock |
400 | mcr p15, 0, r0, c15, c1, 0 @ write CP access register | 425 | mcr p15, 0, r0, c15, c1, 0 @ write CP access register |
401 | 426 | ||
402 | mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg | 427 | mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg |
@@ -406,6 +431,13 @@ __xsc3_setup: | |||
406 | 431 | ||
407 | adr r5, xsc3_crval | 432 | adr r5, xsc3_crval |
408 | ldmia r5, {r5, r6} | 433 | ldmia r5, {r5, r6} |
434 | |||
435 | #ifdef CONFIG_CACHE_XSC3L2 | ||
436 | mrc p15, 1, r0, c0, c0, 1 @ get L2 present information | ||
437 | ands r0, r0, #0xf8 | ||
438 | orrne r6, r6, #(1 << 26) @ enable L2 if present | ||
439 | #endif | ||
440 | |||
409 | mrc p15, 0, r0, c1, c0, 0 @ get control register | 441 | mrc p15, 0, r0, c1, c0, 0 @ get control register |
410 | bic r0, r0, r5 @ ..V. ..R. .... ..A. | 442 | bic r0, r0, r5 @ ..V. ..R. .... ..A. |
411 | orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) | 443 | orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu) |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index f056c283682d..63037e2162f2 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -284,15 +284,16 @@ ENTRY(xscale_coherent_user_range) | |||
284 | mov pc, lr | 284 | mov pc, lr |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * flush_kern_dcache_page(void *page) | 287 | * flush_kern_dcache_area(void *addr, size_t size) |
288 | * | 288 | * |
289 | * Ensure no D cache aliasing occurs, either with itself or | 289 | * Ensure no D cache aliasing occurs, either with itself or |
290 | * the I cache | 290 | * the I cache |
291 | * | 291 | * |
292 | * - addr - page aligned address | 292 | * - addr - kernel address |
293 | * - size - region size | ||
293 | */ | 294 | */ |
294 | ENTRY(xscale_flush_kern_dcache_page) | 295 | ENTRY(xscale_flush_kern_dcache_area) |
295 | add r1, r0, #PAGE_SZ | 296 | add r1, r0, r1 |
296 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 297 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
297 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry | 298 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry |
298 | add r0, r0, #CACHELINESIZE | 299 | add r0, r0, #CACHELINESIZE |
@@ -314,7 +315,7 @@ ENTRY(xscale_flush_kern_dcache_page) | |||
314 | * - start - virtual start address | 315 | * - start - virtual start address |
315 | * - end - virtual end address | 316 | * - end - virtual end address |
316 | */ | 317 | */ |
317 | ENTRY(xscale_dma_inv_range) | 318 | xscale_dma_inv_range: |
318 | tst r0, #CACHELINESIZE - 1 | 319 | tst r0, #CACHELINESIZE - 1 |
319 | bic r0, r0, #CACHELINESIZE - 1 | 320 | bic r0, r0, #CACHELINESIZE - 1 |
320 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 321 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -335,7 +336,7 @@ ENTRY(xscale_dma_inv_range) | |||
335 | * - start - virtual start address | 336 | * - start - virtual start address |
336 | * - end - virtual end address | 337 | * - end - virtual end address |
337 | */ | 338 | */ |
338 | ENTRY(xscale_dma_clean_range) | 339 | xscale_dma_clean_range: |
339 | bic r0, r0, #CACHELINESIZE - 1 | 340 | bic r0, r0, #CACHELINESIZE - 1 |
340 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 341 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
341 | add r0, r0, #CACHELINESIZE | 342 | add r0, r0, #CACHELINESIZE |
@@ -362,15 +363,52 @@ ENTRY(xscale_dma_flush_range) | |||
362 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer | 363 | mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer |
363 | mov pc, lr | 364 | mov pc, lr |
364 | 365 | ||
366 | /* | ||
367 | * dma_map_area(start, size, dir) | ||
368 | * - start - kernel virtual start address | ||
369 | * - size - size of region | ||
370 | * - dir - DMA direction | ||
371 | */ | ||
372 | ENTRY(xscale_dma_map_area) | ||
373 | add r1, r1, r0 | ||
374 | cmp r2, #DMA_TO_DEVICE | ||
375 | beq xscale_dma_clean_range | ||
376 | bcs xscale_dma_inv_range | ||
377 | b xscale_dma_flush_range | ||
378 | ENDPROC(xscale_dma_map_area) | ||
379 | |||
380 | /* | ||
381 | * dma_map_area(start, size, dir) | ||
382 | * - start - kernel virtual start address | ||
383 | * - size - size of region | ||
384 | * - dir - DMA direction | ||
385 | */ | ||
386 | ENTRY(xscale_dma_a0_map_area) | ||
387 | add r1, r1, r0 | ||
388 | teq r2, #DMA_TO_DEVICE | ||
389 | beq xscale_dma_clean_range | ||
390 | b xscale_dma_flush_range | ||
391 | ENDPROC(xscsale_dma_a0_map_area) | ||
392 | |||
393 | /* | ||
394 | * dma_unmap_area(start, size, dir) | ||
395 | * - start - kernel virtual start address | ||
396 | * - size - size of region | ||
397 | * - dir - DMA direction | ||
398 | */ | ||
399 | ENTRY(xscale_dma_unmap_area) | ||
400 | mov pc, lr | ||
401 | ENDPROC(xscale_dma_unmap_area) | ||
402 | |||
365 | ENTRY(xscale_cache_fns) | 403 | ENTRY(xscale_cache_fns) |
366 | .long xscale_flush_kern_cache_all | 404 | .long xscale_flush_kern_cache_all |
367 | .long xscale_flush_user_cache_all | 405 | .long xscale_flush_user_cache_all |
368 | .long xscale_flush_user_cache_range | 406 | .long xscale_flush_user_cache_range |
369 | .long xscale_coherent_kern_range | 407 | .long xscale_coherent_kern_range |
370 | .long xscale_coherent_user_range | 408 | .long xscale_coherent_user_range |
371 | .long xscale_flush_kern_dcache_page | 409 | .long xscale_flush_kern_dcache_area |
372 | .long xscale_dma_inv_range | 410 | .long xscale_dma_map_area |
373 | .long xscale_dma_clean_range | 411 | .long xscale_dma_unmap_area |
374 | .long xscale_dma_flush_range | 412 | .long xscale_dma_flush_range |
375 | 413 | ||
376 | /* | 414 | /* |
@@ -392,9 +430,9 @@ ENTRY(xscale_80200_A0_A1_cache_fns) | |||
392 | .long xscale_flush_user_cache_range | 430 | .long xscale_flush_user_cache_range |
393 | .long xscale_coherent_kern_range | 431 | .long xscale_coherent_kern_range |
394 | .long xscale_coherent_user_range | 432 | .long xscale_coherent_user_range |
395 | .long xscale_flush_kern_dcache_page | 433 | .long xscale_flush_kern_dcache_area |
396 | .long xscale_dma_flush_range | 434 | .long xscale_dma_a0_map_area |
397 | .long xscale_dma_clean_range | 435 | .long xscale_dma_unmap_area |
398 | .long xscale_dma_flush_range | 436 | .long xscale_dma_flush_range |
399 | 437 | ||
400 | ENTRY(cpu_xscale_dcache_clean_area) | 438 | ENTRY(cpu_xscale_dcache_clean_area) |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index a26a605b73bd..f3f288a9546d 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -40,7 +40,6 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
40 | asid r3, r3 @ mask ASID | 40 | asid r3, r3 @ mask ASID |
41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | 41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA |
42 | mov r1, r1, lsl #PAGE_SHIFT | 42 | mov r1, r1, lsl #PAGE_SHIFT |
43 | vma_vm_flags r2, r2 @ get vma->vm_flags | ||
44 | 1: | 43 | 1: |
45 | #ifdef CONFIG_SMP | 44 | #ifdef CONFIG_SMP |
46 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | 45 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) |
@@ -51,7 +50,11 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
51 | cmp r0, r1 | 50 | cmp r0, r1 |
52 | blo 1b | 51 | blo 1b |
53 | mov ip, #0 | 52 | mov ip, #0 |
53 | #ifdef CONFIG_SMP | ||
54 | mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | ||
55 | #else | ||
54 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | 56 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB |
57 | #endif | ||
55 | dsb | 58 | dsb |
56 | mov pc, lr | 59 | mov pc, lr |
57 | ENDPROC(v7wbi_flush_user_tlb_range) | 60 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -80,7 +83,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
80 | cmp r0, r1 | 83 | cmp r0, r1 |
81 | blo 1b | 84 | blo 1b |
82 | mov r2, #0 | 85 | mov r2, #0 |
86 | #ifdef CONFIG_SMP | ||
87 | mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | ||
88 | #else | ||
83 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 89 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
90 | #endif | ||
84 | dsb | 91 | dsb |
85 | isb | 92 | isb |
86 | mov pc, lr | 93 | mov pc, lr |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c new file mode 100644 index 000000000000..19e09bdb1b8a --- /dev/null +++ b/arch/arm/mm/vmregion.c | |||
@@ -0,0 +1,131 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/list.h> | ||
3 | #include <linux/slab.h> | ||
4 | |||
5 | #include "vmregion.h" | ||
6 | |||
7 | /* | ||
8 | * VM region handling support. | ||
9 | * | ||
10 | * This should become something generic, handling VM region allocations for | ||
11 | * vmalloc and similar (ioremap, module space, etc). | ||
12 | * | ||
13 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
14 | * | ||
15 | * struct vm_struct { | ||
16 | * struct vmregion region; | ||
17 | * unsigned long flags; | ||
18 | * struct page **pages; | ||
19 | * unsigned int nr_pages; | ||
20 | * unsigned long phys_addr; | ||
21 | * }; | ||
22 | * | ||
23 | * get_vm_area() would then call vmregion_alloc with an appropriate | ||
24 | * struct vmregion head (eg): | ||
25 | * | ||
26 | * struct vmregion vmalloc_head = { | ||
27 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
28 | * .vm_start = VMALLOC_START, | ||
29 | * .vm_end = VMALLOC_END, | ||
30 | * }; | ||
31 | * | ||
32 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
33 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
34 | * would have to initialise this each time prior to calling vmregion_alloc(). | ||
35 | */ | ||
36 | |||
37 | struct arm_vmregion * | ||
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) | ||
39 | { | ||
40 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
41 | unsigned long flags; | ||
42 | struct arm_vmregion *c, *new; | ||
43 | |||
44 | if (head->vm_end - head->vm_start < size) { | ||
45 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | ||
46 | __func__, size); | ||
47 | goto out; | ||
48 | } | ||
49 | |||
50 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | ||
51 | if (!new) | ||
52 | goto out; | ||
53 | |||
54 | spin_lock_irqsave(&head->vm_lock, flags); | ||
55 | |||
56 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
57 | if ((addr + size) < addr) | ||
58 | goto nospc; | ||
59 | if ((addr + size) <= c->vm_start) | ||
60 | goto found; | ||
61 | addr = c->vm_end; | ||
62 | if (addr > end) | ||
63 | goto nospc; | ||
64 | } | ||
65 | |||
66 | found: | ||
67 | /* | ||
68 | * Insert this entry _before_ the one we found. | ||
69 | */ | ||
70 | list_add_tail(&new->vm_list, &c->vm_list); | ||
71 | new->vm_start = addr; | ||
72 | new->vm_end = addr + size; | ||
73 | new->vm_active = 1; | ||
74 | |||
75 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
76 | return new; | ||
77 | |||
78 | nospc: | ||
79 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
80 | kfree(new); | ||
81 | out: | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
86 | { | ||
87 | struct arm_vmregion *c; | ||
88 | |||
89 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
90 | if (c->vm_active && c->vm_start == addr) | ||
91 | goto out; | ||
92 | } | ||
93 | c = NULL; | ||
94 | out: | ||
95 | return c; | ||
96 | } | ||
97 | |||
98 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
99 | { | ||
100 | struct arm_vmregion *c; | ||
101 | unsigned long flags; | ||
102 | |||
103 | spin_lock_irqsave(&head->vm_lock, flags); | ||
104 | c = __arm_vmregion_find(head, addr); | ||
105 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
106 | return c; | ||
107 | } | ||
108 | |||
109 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | ||
110 | { | ||
111 | struct arm_vmregion *c; | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&head->vm_lock, flags); | ||
115 | c = __arm_vmregion_find(head, addr); | ||
116 | if (c) | ||
117 | c->vm_active = 0; | ||
118 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
119 | return c; | ||
120 | } | ||
121 | |||
122 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | ||
123 | { | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&head->vm_lock, flags); | ||
127 | list_del(&c->vm_list); | ||
128 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
129 | |||
130 | kfree(c); | ||
131 | } | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h new file mode 100644 index 000000000000..6b2cdbdf3a85 --- /dev/null +++ b/arch/arm/mm/vmregion.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef VMREGION_H | ||
2 | #define VMREGION_H | ||
3 | |||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/list.h> | ||
6 | |||
7 | struct page; | ||
8 | |||
9 | struct arm_vmregion_head { | ||
10 | spinlock_t vm_lock; | ||
11 | struct list_head vm_list; | ||
12 | unsigned long vm_start; | ||
13 | unsigned long vm_end; | ||
14 | }; | ||
15 | |||
16 | struct arm_vmregion { | ||
17 | struct list_head vm_list; | ||
18 | unsigned long vm_start; | ||
19 | unsigned long vm_end; | ||
20 | struct page *vm_pages; | ||
21 | int vm_active; | ||
22 | }; | ||
23 | |||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); | ||
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | ||
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | ||
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | ||
28 | |||
29 | #endif | ||