aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
commit5cea24c5899a81abf59706d69580dd5c734effa8 (patch)
treec080ec6b1c6cf27b50f00b2980068fb563b6f7ec
parent2fc07efa2241afe08de136c061b3baa103fb286c (diff)
parenta0f0dd57f4a85310d9936f1770a0424b49fef876 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull second set of ARM updates from Russell King: "This is the second set of ARM updates for this merge window. Contained within are changes to allow the kernel to boot in hypervisor mode on CPUs supporting virtualization, and cache flushing support to the point of inner sharable unification, which are used by the suspend/resume code to avoid having to do a full cache flush. Also included is one fix for VFP code identified by Michael Olbrich." * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels ARM: 7549/1: HYP: fix boot on some ARM1136 cores ARM: 7542/1: mm: fix cache LoUIS API for xscale and feroceon ARM: mm: update __v7_setup() to the new LoUIS cache maintenance API ARM: kernel: update __cpu_disable to use cache LoUIS maintenance API ARM: kernel: update cpu_suspend code to use cache LoUIS operations ARM: mm: rename jump labels in v7_flush_dcache_all function ARM: mm: implement LoUIS API for cache maintenance ops ARM: virt: arch_timers: enable access to physical timers ARM: virt: Add CONFIG_ARM_VIRT_EXT option ARM: virt: Add boot-time diagnostics ARM: virt: Update documentation for hyp mode entry support ARM: zImage/virt: hyp mode entry support for the zImage loader ARM: virt: allow the kernel to be entered in HYP mode ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding
-rw-r--r--Documentation/arm/Booting22
-rw-r--r--arch/arm/boot/compressed/.gitignore1
-rw-r--r--arch/arm/boot/compressed/Makefile9
-rw-r--r--arch/arm/boot/compressed/head.S71
-rw-r--r--arch/arm/include/asm/assembler.h29
-rw-r--r--arch/arm/include/asm/cacheflush.h15
-rw-r--r--arch/arm/include/asm/glue-cache.h1
-rw-r--r--arch/arm/include/asm/opcodes-virt.h10
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/vfpmacros.h4
-rw-r--r--arch/arm/include/asm/virt.h69
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/head.S14
-rw-r--r--arch/arm/kernel/hyp-stub.S223
-rw-r--r--arch/arm/kernel/setup.c20
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/suspend.c17
-rw-r--r--arch/arm/mm/Kconfig17
-rw-r--r--arch/arm/mm/cache-fa.S3
-rw-r--r--arch/arm/mm/cache-v3.S3
-rw-r--r--arch/arm/mm/cache-v4.S3
-rw-r--r--arch/arm/mm/cache-v4wb.S3
-rw-r--r--arch/arm/mm/cache-v4wt.S3
-rw-r--r--arch/arm/mm/cache-v6.S3
-rw-r--r--arch/arm/mm/cache-v7.S48
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm920.S3
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S3
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S4
-rw-r--r--arch/arm/mm/proc-macros.S1
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xsc3.S3
-rw-r--r--arch/arm/mm/proc-xscale.S4
41 files changed, 623 insertions, 23 deletions
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index a341d87d276e..0c1f475fdf36 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -154,13 +154,33 @@ In either case, the following conditions must be met:
154 154
155- CPU mode 155- CPU mode
156 All forms of interrupts must be disabled (IRQs and FIQs) 156 All forms of interrupts must be disabled (IRQs and FIQs)
157 The CPU must be in SVC mode. (A special exception exists for Angel) 157
158 For CPUs which do not include the ARM virtualization extensions, the
159 CPU must be in SVC mode. (A special exception exists for Angel)
160
161 CPUs which include support for the virtualization extensions can be
162 entered in HYP mode in order to enable the kernel to make full use of
163 these extensions. This is the recommended boot method for such CPUs,
164 unless the virtualisations are already in use by a pre-installed
165 hypervisor.
166
167 If the kernel is not entered in HYP mode for any reason, it must be
168 entered in SVC mode.
158 169
159- Caches, MMUs 170- Caches, MMUs
160 The MMU must be off. 171 The MMU must be off.
161 Instruction cache may be on or off. 172 Instruction cache may be on or off.
162 Data cache must be off. 173 Data cache must be off.
163 174
175 If the kernel is entered in HYP mode, the above requirements apply to
176 the HYP mode configuration in addition to the ordinary PL1 (privileged
177 kernel modes) configuration. In addition, all traps into the
178 hypervisor must be disabled, and PL1 access must be granted for all
179 peripherals and CPU resources for which this is architecturally
180 possible. Except for entering in HYP mode, the system configuration
181 should be such that a kernel which does not include support for the
182 virtualization extensions can boot correctly without extra help.
183
164- The boot loader is expected to call the kernel image by jumping 184- The boot loader is expected to call the kernel image by jumping
165 directly to the first instruction of the kernel image. 185 directly to the first instruction of the kernel image.
166 186
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index d0d441c429ae..f79a08efe000 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,6 +1,7 @@
1ashldi3.S 1ashldi3.S
2font.c 2font.c
3lib1funcs.S 3lib1funcs.S
4hyp-stub.S
4piggy.gzip 5piggy.gzip
5piggy.lzo 6piggy.lzo
6piggy.lzma 7piggy.lzma
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index bb267562e7ed..a517153a13ea 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -30,6 +30,10 @@ FONTC = $(srctree)/drivers/video/console/font_acorn_8x8.c
30OBJS += string.o 30OBJS += string.o
31CFLAGS_string.o := -Os 31CFLAGS_string.o := -Os
32 32
33ifeq ($(CONFIG_ARM_VIRT_EXT),y)
34OBJS += hyp-stub.o
35endif
36
33# 37#
34# Architecture dependencies 38# Architecture dependencies
35# 39#
@@ -126,7 +130,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
126endif 130endif
127 131
128ccflags-y := -fpic -fno-builtin -I$(obj) 132ccflags-y := -fpic -fno-builtin -I$(obj)
129asflags-y := -Wa,-march=all 133asflags-y := -Wa,-march=all -DZIMAGE
130 134
131# Supply kernel BSS size to the decompressor via a linker symbol. 135# Supply kernel BSS size to the decompressor via a linker symbol.
132KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ 136KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \
@@ -198,3 +202,6 @@ $(obj)/font.c: $(FONTC)
198 202
199$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG) 203$(obj)/vmlinux.lds: $(obj)/vmlinux.lds.in arch/arm/boot/Makefile $(KCONFIG_CONFIG)
200 @sed "$(SEDFLAGS)" < $< > $@ 204 @sed "$(SEDFLAGS)" < $< > $@
205
206$(obj)/hyp-stub.S: $(srctree)/arch/$(SRCARCH)/kernel/hyp-stub.S
207 $(call cmd,shipped)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index bc67cbff3944..90275f036cd1 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -9,6 +9,7 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <asm/assembler.h>
12 13
13/* 14/*
14 * Debugging stuff 15 * Debugging stuff
@@ -132,7 +133,12 @@ start:
132 .word start @ absolute load/run zImage address 133 .word start @ absolute load/run zImage address
133 .word _edata @ zImage end address 134 .word _edata @ zImage end address
134 THUMB( .thumb ) 135 THUMB( .thumb )
1351: mov r7, r1 @ save architecture ID 1361:
137 mrs r9, cpsr
138#ifdef CONFIG_ARM_VIRT_EXT
139 bl __hyp_stub_install @ get into SVC mode, reversibly
140#endif
141 mov r7, r1 @ save architecture ID
136 mov r8, r2 @ save atags pointer 142 mov r8, r2 @ save atags pointer
137 143
138#ifndef __ARM_ARCH_2__ 144#ifndef __ARM_ARCH_2__
@@ -148,9 +154,9 @@ start:
148 ARM( swi 0x123456 ) @ angel_SWI_ARM 154 ARM( swi 0x123456 ) @ angel_SWI_ARM
149 THUMB( svc 0xab ) @ angel_SWI_THUMB 155 THUMB( svc 0xab ) @ angel_SWI_THUMB
150not_angel: 156not_angel:
151 mrs r2, cpsr @ turn off interrupts to 157 safe_svcmode_maskall r0
152 orr r2, r2, #0xc0 @ prevent angel from running 158 msr spsr_cxsf, r9 @ Save the CPU boot mode in
153 msr cpsr_c, r2 159 @ SPSR
154#else 160#else
155 teqp pc, #0x0c000003 @ turn off interrupts 161 teqp pc, #0x0c000003 @ turn off interrupts
156#endif 162#endif
@@ -350,6 +356,20 @@ dtb_check_done:
350 adr r5, restart 356 adr r5, restart
351 bic r5, r5, #31 357 bic r5, r5, #31
352 358
359/* Relocate the hyp vector base if necessary */
360#ifdef CONFIG_ARM_VIRT_EXT
361 mrs r0, spsr
362 and r0, r0, #MODE_MASK
363 cmp r0, #HYP_MODE
364 bne 1f
365
366 bl __hyp_get_vectors
367 sub r0, r0, r5
368 add r0, r0, r10
369 bl __hyp_set_vectors
3701:
371#endif
372
353 sub r9, r6, r5 @ size to copy 373 sub r9, r6, r5 @ size to copy
354 add r9, r9, #31 @ rounded up to a multiple 374 add r9, r9, #31 @ rounded up to a multiple
355 bic r9, r9, #31 @ ... of 32 bytes 375 bic r9, r9, #31 @ ... of 32 bytes
@@ -458,11 +478,29 @@ not_relocated: mov r0, #0
458 bl decompress_kernel 478 bl decompress_kernel
459 bl cache_clean_flush 479 bl cache_clean_flush
460 bl cache_off 480 bl cache_off
461 mov r0, #0 @ must be zero
462 mov r1, r7 @ restore architecture number 481 mov r1, r7 @ restore architecture number
463 mov r2, r8 @ restore atags pointer 482 mov r2, r8 @ restore atags pointer
464 ARM( mov pc, r4 ) @ call kernel 483
465 THUMB( bx r4 ) @ entry point is always ARM 484#ifdef CONFIG_ARM_VIRT_EXT
485 mrs r0, spsr @ Get saved CPU boot mode
486 and r0, r0, #MODE_MASK
487 cmp r0, #HYP_MODE @ if not booted in HYP mode...
488 bne __enter_kernel @ boot kernel directly
489
490 adr r12, .L__hyp_reentry_vectors_offset
491 ldr r0, [r12]
492 add r0, r0, r12
493
494 bl __hyp_set_vectors
495 __HVC(0) @ otherwise bounce to hyp mode
496
497 b . @ should never be reached
498
499 .align 2
500.L__hyp_reentry_vectors_offset: .long __hyp_reentry_vectors - .
501#else
502 b __enter_kernel
503#endif
466 504
467 .align 2 505 .align 2
468 .type LC0, #object 506 .type LC0, #object
@@ -1196,6 +1234,25 @@ memdump: mov r12, r0
1196#endif 1234#endif
1197 1235
1198 .ltorg 1236 .ltorg
1237
1238#ifdef CONFIG_ARM_VIRT_EXT
1239.align 5
1240__hyp_reentry_vectors:
1241 W(b) . @ reset
1242 W(b) . @ undef
1243 W(b) . @ svc
1244 W(b) . @ pabort
1245 W(b) . @ dabort
1246 W(b) __enter_kernel @ hyp
1247 W(b) . @ irq
1248 W(b) . @ fiq
1249#endif /* CONFIG_ARM_VIRT_EXT */
1250
1251__enter_kernel:
1252 mov r0, #0 @ must be 0
1253 ARM( mov pc, r4 ) @ call kernel
1254 THUMB( bx r4 ) @ entry point is always ARM
1255
1199reloc_code_end: 1256reloc_code_end:
1200 1257
1201 .align 1258 .align
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c8b3bf4d825..2ef95813fce0 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -22,6 +22,7 @@
22 22
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/domain.h> 24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
25 26
26#define IOMEM(x) (x) 27#define IOMEM(x) (x)
27 28
@@ -240,6 +241,34 @@
240#endif 241#endif
241 242
242/* 243/*
244 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
245 * a scratch register for the macro to overwrite.
246 *
247 * This macro is intended for forcing the CPU into SVC mode at boot time.
248 * you cannot return to the original mode.
249 *
250 * Beware, it also clobers LR.
251 */
252.macro safe_svcmode_maskall reg:req
253 mrs \reg , cpsr
254 mov lr , \reg
255 and lr , lr , #MODE_MASK
256 cmp lr , #HYP_MODE
257 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
258 bic \reg , \reg , #MODE_MASK
259 orr \reg , \reg , #SVC_MODE
260THUMB( orr \reg , \reg , #PSR_T_BIT )
261 bne 1f
262 orr \reg, \reg, #PSR_A_BIT
263 adr lr, BSYM(2f)
264 msr spsr_cxsf, \reg
265 __MSR_ELR_HYP(14)
266 __ERET
2671: msr cpsr_c, \reg
2682:
269.endm
270
271/*
243 * STRT/LDRT access macros with ARM and Thumb-2 variants 272 * STRT/LDRT access macros with ARM and Thumb-2 variants
244 */ 273 */
245#ifdef CONFIG_THUMB2_KERNEL 274#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e4448e16046d..e1489c54cd12 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -49,6 +49,13 @@
49 * 49 *
50 * Unconditionally clean and invalidate the entire cache. 50 * Unconditionally clean and invalidate the entire cache.
51 * 51 *
52 * flush_kern_louis()
53 *
54 * Flush data cache levels up to the level of unification
55 * inner shareable and invalidate the I-cache.
56 * Only needed from v7 onwards, falls back to flush_cache_all()
57 * for all other processor versions.
58 *
52 * flush_user_all() 59 * flush_user_all()
53 * 60 *
54 * Clean and invalidate all user space cache entries 61 * Clean and invalidate all user space cache entries
@@ -97,6 +104,7 @@
97struct cpu_cache_fns { 104struct cpu_cache_fns {
98 void (*flush_icache_all)(void); 105 void (*flush_icache_all)(void);
99 void (*flush_kern_all)(void); 106 void (*flush_kern_all)(void);
107 void (*flush_kern_louis)(void);
100 void (*flush_user_all)(void); 108 void (*flush_user_all)(void);
101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
102 110
@@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache;
119 127
120#define __cpuc_flush_icache_all cpu_cache.flush_icache_all 128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
121#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
122#define __cpuc_flush_user_all cpu_cache.flush_user_all 131#define __cpuc_flush_user_all cpu_cache.flush_user_all
123#define __cpuc_flush_user_range cpu_cache.flush_user_range 132#define __cpuc_flush_user_range cpu_cache.flush_user_range
124#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
@@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache;
139 148
140extern void __cpuc_flush_icache_all(void); 149extern void __cpuc_flush_icache_all(void);
141extern void __cpuc_flush_kern_all(void); 150extern void __cpuc_flush_kern_all(void);
151extern void __cpuc_flush_kern_louis(void);
142extern void __cpuc_flush_user_all(void); 152extern void __cpuc_flush_user_all(void);
143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 153extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
144extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 154extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
@@ -204,6 +214,11 @@ static inline void __flush_icache_all(void)
204 __flush_icache_preferred(); 214 __flush_icache_preferred();
205} 215}
206 216
217/*
218 * Flush caches up to Level of Unification Inner Shareable
219 */
220#define flush_cache_louis() __cpuc_flush_kern_louis()
221
207#define flush_cache_all() __cpuc_flush_kern_all() 222#define flush_cache_all() __cpuc_flush_kern_all()
208 223
209static inline void vivt_flush_cache_mm(struct mm_struct *mm) 224static inline void vivt_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 4f8d2c0dc441..cca9f15704ed 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -132,6 +132,7 @@
132#ifndef MULTI_CACHE 132#ifndef MULTI_CACHE
133#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) 133#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
134#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 134#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
135#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
135#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 136#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
136#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 137#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
137#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) 138#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
index b85665a96f8e..efcfdf92d9d5 100644
--- a/arch/arm/include/asm/opcodes-virt.h
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -26,4 +26,14 @@
26 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ 26 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
27) 27)
28 28
29#define __ERET __inst_arm_thumb32( \
30 0xE160006E, \
31 0xF3DE8F00 \
32)
33
34#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
35 0xE12EF300 | regnum, \
36 0xF3808E30 | (regnum << 16) \
37)
38
29#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ 39#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 44fe998269d9..142d6ae41231 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -44,6 +44,7 @@
44#define IRQ_MODE 0x00000012 44#define IRQ_MODE 0x00000012
45#define SVC_MODE 0x00000013 45#define SVC_MODE 0x00000013
46#define ABT_MODE 0x00000017 46#define ABT_MODE 0x00000017
47#define HYP_MODE 0x0000001a
47#define UND_MODE 0x0000001b 48#define UND_MODE 0x0000001b
48#define SYSTEM_MODE 0x0000001f 49#define SYSTEM_MODE 0x0000001f
49#define MODE32_BIT 0x00000010 50#define MODE32_BIT 0x00000010
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index a7aadbd9a6dd..6a6f1e485f41 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -28,7 +28,7 @@
28 ldr \tmp, =elf_hwcap @ may not have MVFR regs 28 ldr \tmp, =elf_hwcap @ may not have MVFR regs
29 ldr \tmp, [\tmp, #0] 29 ldr \tmp, [\tmp, #0]
30 tst \tmp, #HWCAP_VFPv3D16 30 tst \tmp, #HWCAP_VFPv3D16
31 ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} 31 ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
32 addne \base, \base, #32*4 @ step over unused register space 32 addne \base, \base, #32*4 @ step over unused register space
33#else 33#else
34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
@@ -52,7 +52,7 @@
52 ldr \tmp, =elf_hwcap @ may not have MVFR regs 52 ldr \tmp, =elf_hwcap @ may not have MVFR regs
53 ldr \tmp, [\tmp, #0] 53 ldr \tmp, [\tmp, #0]
54 tst \tmp, #HWCAP_VFPv3D16 54 tst \tmp, #HWCAP_VFPv3D16
55 stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} 55 stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
56 addne \base, \base, #32*4 @ step over unused register space 56 addne \base, \base, #32*4 @ step over unused register space
57#else 57#else
58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
new file mode 100644
index 000000000000..86164df86cb4
--- /dev/null
+++ b/arch/arm/include/asm/virt.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef VIRT_H
20#define VIRT_H
21
22#include <asm/ptrace.h>
23
24/*
25 * Flag indicating that the kernel was not entered in the same mode on every
26 * CPU. The zImage loader stashes this value in an SPSR, so we need an
27 * architecturally defined flag bit here (the N flag, as it happens)
28 */
29#define BOOT_CPU_MODE_MISMATCH (1<<31)
30
31#ifndef __ASSEMBLY__
32
33#ifdef CONFIG_ARM_VIRT_EXT
34/*
35 * __boot_cpu_mode records what mode the primary CPU was booted in.
36 * A correctly-implemented bootloader must start all CPUs in the same mode:
37 * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
38 * that some CPU(s) were booted in a different mode.
39 *
40 * This allows the kernel to flag an error when the secondaries have come up.
41 */
42extern int __boot_cpu_mode;
43
44void __hyp_set_vectors(unsigned long phys_vector_base);
45unsigned long __hyp_get_vectors(void);
46#else
47#define __boot_cpu_mode (SVC_MODE)
48#endif
49
50#ifndef ZIMAGE
51void hyp_mode_check(void);
52
53/* Reports the availability of HYP mode */
54static inline bool is_hyp_mode_available(void)
55{
56 return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
57 !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
58}
59
60/* Check if the bootloader has booted CPUs in different modes */
61static inline bool is_hyp_mode_mismatched(void)
62{
63 return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
64}
65#endif
66
67#endif /* __ASSEMBLY__ */
68
69#endif /* ! VIRT_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 5dfef9d97ed9..5bbec7b8183e 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -81,4 +81,6 @@ head-y := head$(MMUEXT).o
81obj-$(CONFIG_DEBUG_LL) += debug.o 81obj-$(CONFIG_DEBUG_LL) += debug.o
82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 82obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
83 83
84obj-$(CONFIG_ARM_VIRT_EXT) += hyp-stub.o
85
84extra-y := $(head-y) vmlinux.lds 86extra-y := $(head-y) vmlinux.lds
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9874d0741191..4eee351f4668 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -83,8 +83,12 @@ ENTRY(stext)
83 THUMB( .thumb ) @ switch to Thumb now. 83 THUMB( .thumb ) @ switch to Thumb now.
84 THUMB(1: ) 84 THUMB(1: )
85 85
86 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 86#ifdef CONFIG_ARM_VIRT_EXT
87 @ and irqs disabled 87 bl __hyp_stub_install
88#endif
89 @ ensure svc mode and all interrupts masked
90 safe_svcmode_maskall r9
91
88 mrc p15, 0, r9, c0, c0 @ get processor id 92 mrc p15, 0, r9, c0, c0 @ get processor id
89 bl __lookup_processor_type @ r5=procinfo r9=cpuid 93 bl __lookup_processor_type @ r5=procinfo r9=cpuid
90 movs r10, r5 @ invalid processor (r5=0)? 94 movs r10, r5 @ invalid processor (r5=0)?
@@ -326,7 +330,11 @@ ENTRY(secondary_startup)
326 * the processor type - there is no need to check the machine type 330 * the processor type - there is no need to check the machine type
327 * as it has already been validated by the primary processor. 331 * as it has already been validated by the primary processor.
328 */ 332 */
329 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 333#ifdef CONFIG_ARM_VIRT_EXT
334 bl __hyp_stub_install
335#endif
336 safe_svcmode_maskall r9
337
330 mrc p15, 0, r9, c0, c0 @ get processor id 338 mrc p15, 0, r9, c0, c0 @ get processor id
331 bl __lookup_processor_type 339 bl __lookup_processor_type
332 movs r10, r5 @ invalid processor? 340 movs r10, r5 @ invalid processor?
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
new file mode 100644
index 000000000000..65b2417aebce
--- /dev/null
+++ b/arch/arm/kernel/hyp-stub.S
@@ -0,0 +1,223 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/init.h>
20#include <linux/linkage.h>
21#include <asm/assembler.h>
22#include <asm/virt.h>
23
24#ifndef ZIMAGE
25/*
26 * For the kernel proper, we need to find out the CPU boot mode long after
27 * boot, so we need to store it in a writable variable.
28 *
29 * This is not in .bss, because we set it sufficiently early that the boot-time
30 * zeroing of .bss would clobber it.
31 */
32.data
33ENTRY(__boot_cpu_mode)
34 .long 0
35.text
36
37 /*
38 * Save the primary CPU boot mode. Requires 3 scratch registers.
39 */
40 .macro store_primary_cpu_mode reg1, reg2, reg3
41 mrs \reg1, cpsr
42 and \reg1, \reg1, #MODE_MASK
43 adr \reg2, .L__boot_cpu_mode_offset
44 ldr \reg3, [\reg2]
45 str \reg1, [\reg2, \reg3]
46 .endm
47
48 /*
49 * Compare the current mode with the one saved on the primary CPU.
50 * If they don't match, record that fact. The Z bit indicates
51 * if there's a match or not.
52 * Requires 3 additionnal scratch registers.
53 */
54 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
55 adr \reg2, .L__boot_cpu_mode_offset
56 ldr \reg3, [\reg2]
57 ldr \reg1, [\reg2, \reg3]
58 cmp \mode, \reg1 @ matches primary CPU boot mode?
59 orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
60 strne r7, [r5, r6] @ record what happened and give up
61 .endm
62
63#else /* ZIMAGE */
64
65 .macro store_primary_cpu_mode reg1:req, reg2:req, reg3:req
66 .endm
67
68/*
69 * The zImage loader only runs on one CPU, so we don't bother with mult-CPU
70 * consistency checking:
71 */
72 .macro compare_cpu_mode_with_primary mode, reg1, reg2, reg3
73 cmp \mode, \mode
74 .endm
75
76#endif /* ZIMAGE */
77
78/*
79 * Hypervisor stub installation functions.
80 *
81 * These must be called with the MMU and D-cache off.
82 * They are not ABI compliant and are only intended to be called from the kernel
83 * entry points in head.S.
84 */
85@ Call this from the primary CPU
86ENTRY(__hyp_stub_install)
87 store_primary_cpu_mode r4, r5, r6
88ENDPROC(__hyp_stub_install)
89
90 @ fall through...
91
92@ Secondary CPUs should call here
93ENTRY(__hyp_stub_install_secondary)
94 mrs r4, cpsr
95 and r4, r4, #MODE_MASK
96
97 /*
98 * If the secondary has booted with a different mode, give up
99 * immediately.
100 */
101 compare_cpu_mode_with_primary r4, r5, r6, r7
102 bxne lr
103
104 /*
105 * Once we have given up on one CPU, we do not try to install the
106 * stub hypervisor on the remaining ones: because the saved boot mode
107 * is modified, it can't compare equal to the CPSR mode field any
108 * more.
109 *
110 * Otherwise...
111 */
112
113 cmp r4, #HYP_MODE
114 bxne lr @ give up if the CPU is not in HYP mode
115
116/*
117 * Configure HSCTLR to set correct exception endianness/instruction set
118 * state etc.
119 * Turn off all traps
120 * Eventually, CPU-specific code might be needed -- assume not for now
121 *
122 * This code relies on the "eret" instruction to synchronize the
123 * various coprocessor accesses.
124 */
125 @ Now install the hypervisor stub:
126 adr r7, __hyp_stub_vectors
127 mcr p15, 4, r7, c12, c0, 0 @ set hypervisor vector base (HVBAR)
128
129 @ Disable all traps, so we don't get any nasty surprise
130 mov r7, #0
131 mcr p15, 4, r7, c1, c1, 0 @ HCR
132 mcr p15, 4, r7, c1, c1, 2 @ HCPTR
133 mcr p15, 4, r7, c1, c1, 3 @ HSTR
134
135THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE
136#ifdef CONFIG_CPU_BIG_ENDIAN
137 orr r7, #(1 << 9) @ HSCTLR.EE
138#endif
139 mcr p15, 4, r7, c1, c0, 0 @ HSCTLR
140
141 mrc p15, 4, r7, c1, c1, 1 @ HDCR
142 and r7, #0x1f @ Preserve HPMN
143 mcr p15, 4, r7, c1, c1, 1 @ HDCR
144
145#if !defined(ZIMAGE) && defined(CONFIG_ARM_ARCH_TIMER)
146 @ make CNTP_* and CNTPCT accessible from PL1
147 mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
148 lsr r7, #16
149 and r7, #0xf
150 cmp r7, #1
151 bne 1f
152 mrc p15, 4, r7, c14, c1, 0 @ CNTHCTL
153 orr r7, r7, #3 @ PL1PCEN | PL1PCTEN
154 mcr p15, 4, r7, c14, c1, 0 @ CNTHCTL
1551:
156#endif
157
158 bic r7, r4, #MODE_MASK
159 orr r7, r7, #SVC_MODE
160THUMB( orr r7, r7, #PSR_T_BIT )
161 msr spsr_cxsf, r7 @ This is SPSR_hyp.
162
163 __MSR_ELR_HYP(14) @ msr elr_hyp, lr
164 __ERET @ return, switching to SVC mode
165 @ The boot CPU mode is left in r4.
166ENDPROC(__hyp_stub_install_secondary)
167
168__hyp_stub_do_trap:
169 cmp r0, #-1
170 mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
171 mcrne p15, 4, r0, c12, c0, 0 @ set HVBAR
172 __ERET
173ENDPROC(__hyp_stub_do_trap)
174
175/*
176 * __hyp_set_vectors: Call this after boot to set the initial hypervisor
177 * vectors as part of hypervisor installation. On an SMP system, this should
178 * be called on each CPU.
179 *
180 * r0 must be the physical address of the new vector table (which must lie in
181 * the bottom 4GB of physical address space.
182 *
183 * r0 must be 32-byte aligned.
184 *
185 * Before calling this, you must check that the stub hypervisor is installed
186 * everywhere, by waiting for any secondary CPUs to be brought up and then
187 * checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
188 *
189 * If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
190 * something else went wrong... in such cases, trying to install a new
191 * hypervisor is unlikely to work as desired.
192 *
193 * When you call into your shiny new hypervisor, sp_hyp will contain junk,
194 * so you will need to set that to something sensible at the new hypervisor's
195 * initialisation entry point.
196 */
197ENTRY(__hyp_get_vectors)
198 mov r0, #-1
199ENDPROC(__hyp_get_vectors)
200 @ fall through
201ENTRY(__hyp_set_vectors)
202 __HVC(0)
203 bx lr
204ENDPROC(__hyp_set_vectors)
205
206#ifndef ZIMAGE
207.align 2
208.L__boot_cpu_mode_offset:
209 .long __boot_cpu_mode - .
210#endif
211
212.align 5
213__hyp_stub_vectors:
214__hyp_stub_reset: W(b) .
215__hyp_stub_und: W(b) .
216__hyp_stub_svc: W(b) .
217__hyp_stub_pabort: W(b) .
218__hyp_stub_dabort: W(b) .
219__hyp_stub_trap: W(b) __hyp_stub_do_trap
220__hyp_stub_irq: W(b) .
221__hyp_stub_fiq: W(b) .
222ENDPROC(__hyp_stub_vectors)
223
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index febafa0f552d..da1d1aa20ad9 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -53,6 +53,7 @@
53#include <asm/traps.h> 53#include <asm/traps.h>
54#include <asm/unwind.h> 54#include <asm/unwind.h>
55#include <asm/memblock.h> 55#include <asm/memblock.h>
56#include <asm/virt.h>
56 57
57#include "atags.h" 58#include "atags.h"
58#include "tcm.h" 59#include "tcm.h"
@@ -703,6 +704,21 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
703 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 704 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
704} 705}
705 706
707void __init hyp_mode_check(void)
708{
709#ifdef CONFIG_ARM_VIRT_EXT
710 if (is_hyp_mode_available()) {
711 pr_info("CPU: All CPU(s) started in HYP mode.\n");
712 pr_info("CPU: Virtualization extensions available.\n");
713 } else if (is_hyp_mode_mismatched()) {
714 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
715 __boot_cpu_mode & MODE_MASK);
716 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
717 } else
718 pr_info("CPU: All CPU(s) started in SVC mode.\n");
719#endif
720}
721
706void __init setup_arch(char **cmdline_p) 722void __init setup_arch(char **cmdline_p)
707{ 723{
708 struct machine_desc *mdesc; 724 struct machine_desc *mdesc;
@@ -748,6 +764,10 @@ void __init setup_arch(char **cmdline_p)
748 smp_init_cpus(); 764 smp_init_cpus();
749 } 765 }
750#endif 766#endif
767
768 if (!is_smp())
769 hyp_mode_check();
770
751 reserve_crashkernel(); 771 reserve_crashkernel();
752 772
753 tcm_init(); 773 tcm_init();
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index d100eacdb798..8e20754dd31d 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -43,6 +43,7 @@
43#include <asm/ptrace.h> 43#include <asm/ptrace.h>
44#include <asm/localtimer.h> 44#include <asm/localtimer.h>
45#include <asm/smp_plat.h> 45#include <asm/smp_plat.h>
46#include <asm/virt.h>
46#include <asm/mach/arch.h> 47#include <asm/mach/arch.h>
47 48
48/* 49/*
@@ -202,8 +203,11 @@ int __cpuinit __cpu_disable(void)
202 /* 203 /*
203 * Flush user cache and TLB mappings, and then remove this CPU 204 * Flush user cache and TLB mappings, and then remove this CPU
204 * from the vm mask set of all processes. 205 * from the vm mask set of all processes.
206 *
207 * Caches are flushed to the Level of Unification Inner Shareable
208 * to write-back dirty lines to unified caches shared by all CPUs.
205 */ 209 */
206 flush_cache_all(); 210 flush_cache_louis();
207 local_flush_tlb_all(); 211 local_flush_tlb_all();
208 212
209 clear_tasks_mm_cpumask(cpu); 213 clear_tasks_mm_cpumask(cpu);
@@ -355,6 +359,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
355 num_online_cpus(), 359 num_online_cpus(),
356 bogosum / (500000/HZ), 360 bogosum / (500000/HZ),
357 (bogosum / (5000/HZ)) % 100); 361 (bogosum / (5000/HZ)) % 100);
362
363 hyp_mode_check();
358} 364}
359 365
360void __init smp_prepare_boot_cpu(void) 366void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f18..358bca3a995e 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -17,6 +17,8 @@ extern void cpu_resume_mmu(void);
17 */ 17 */
18void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr) 18void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
19{ 19{
20 u32 *ctx = ptr;
21
20 *save_ptr = virt_to_phys(ptr); 22 *save_ptr = virt_to_phys(ptr);
21 23
22 /* This must correspond to the LDM in cpu_resume() assembly */ 24 /* This must correspond to the LDM in cpu_resume() assembly */
@@ -26,7 +28,20 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
26 28
27 cpu_do_suspend(ptr); 29 cpu_do_suspend(ptr);
28 30
29 flush_cache_all(); 31 flush_cache_louis();
32
33 /*
34 * flush_cache_louis does not guarantee that
35 * save_ptr and ptr are cleaned to main memory,
36 * just up to the Level of Unification Inner Shareable.
37 * Since the context pointer and context itself
38 * are to be retrieved with the MMU off that
39 * data must be cleaned from all cache levels
40 * to main memory using "area" cache primitives.
41 */
42 __cpuc_flush_dcache_area(ctx, ptrsz);
43 __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
44
30 outer_clean_range(*save_ptr, *save_ptr + ptrsz); 45 outer_clean_range(*save_ptr, *save_ptr + ptrsz);
31 outer_clean_range(virt_to_phys(save_ptr), 46 outer_clean_range(virt_to_phys(save_ptr),
32 virt_to_phys(save_ptr) + sizeof(*save_ptr)); 47 virt_to_phys(save_ptr) + sizeof(*save_ptr));
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 101b9681c08c..c9a4963b5c3d 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -624,6 +624,23 @@ config ARM_THUMBEE
624 Say Y here if you have a CPU with the ThumbEE extension and code to 624 Say Y here if you have a CPU with the ThumbEE extension and code to
625 make use of it. Say N for code that can run on CPUs without ThumbEE. 625 make use of it. Say N for code that can run on CPUs without ThumbEE.
626 626
627config ARM_VIRT_EXT
628 bool "Native support for the ARM Virtualization Extensions"
629 depends on MMU && CPU_V7
630 help
631 Enable the kernel to make use of the ARM Virtualization
632 Extensions to install hypervisors without run-time firmware
633 assistance.
634
635 A compliant bootloader is required in order to make maximum
636 use of this feature. Refer to Documentation/arm/Booting for
637 details.
638
639 It is safe to enable this option even if the kernel may not be
640 booted in HYP mode, may not have support for the
641 virtualization extensions, or may be booted with a
642 non-compliant bootloader.
643
627config SWP_EMULATE 644config SWP_EMULATE
628 bool "Emulate SWP/SWPB instructions" 645 bool "Emulate SWP/SWPB instructions"
629 depends on !CPU_USE_DOMAINS && CPU_V7 646 depends on !CPU_USE_DOMAINS && CPU_V7
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 072016371093..e505befe51b5 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -240,6 +240,9 @@ ENTRY(fa_dma_unmap_area)
240 mov pc, lr 240 mov pc, lr
241ENDPROC(fa_dma_unmap_area) 241ENDPROC(fa_dma_unmap_area)
242 242
243 .globl fa_flush_kern_cache_louis
244 .equ fa_flush_kern_cache_louis, fa_flush_kern_cache_all
245
243 __INITDATA 246 __INITDATA
244 247
245 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 248 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 52e35f32eefb..8a3fadece8d3 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -128,6 +128,9 @@ ENTRY(v3_dma_map_area)
128ENDPROC(v3_dma_unmap_area) 128ENDPROC(v3_dma_unmap_area)
129ENDPROC(v3_dma_map_area) 129ENDPROC(v3_dma_map_area)
130 130
131 .globl v3_flush_kern_cache_louis
132 .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
133
131 __INITDATA 134 __INITDATA
132 135
133 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 136 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 022135d2b7e4..43e5d77be677 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -140,6 +140,9 @@ ENTRY(v4_dma_map_area)
140ENDPROC(v4_dma_unmap_area) 140ENDPROC(v4_dma_unmap_area)
141ENDPROC(v4_dma_map_area) 141ENDPROC(v4_dma_map_area)
142 142
143 .globl v4_flush_kern_cache_louis
144 .equ v4_flush_kern_cache_louis, v4_flush_kern_cache_all
145
143 __INITDATA 146 __INITDATA
144 147
145 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 148 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index 8f1eeae340c8..cd4945321407 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -251,6 +251,9 @@ ENTRY(v4wb_dma_unmap_area)
251 mov pc, lr 251 mov pc, lr
252ENDPROC(v4wb_dma_unmap_area) 252ENDPROC(v4wb_dma_unmap_area)
253 253
254 .globl v4wb_flush_kern_cache_louis
255 .equ v4wb_flush_kern_cache_louis, v4wb_flush_kern_cache_all
256
254 __INITDATA 257 __INITDATA
255 258
256 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 259 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index b34a5f908a82..11e5e5838bc5 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -196,6 +196,9 @@ ENTRY(v4wt_dma_map_area)
196ENDPROC(v4wt_dma_unmap_area) 196ENDPROC(v4wt_dma_unmap_area)
197ENDPROC(v4wt_dma_map_area) 197ENDPROC(v4wt_dma_map_area)
198 198
199 .globl v4wt_flush_kern_cache_louis
200 .equ v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
201
199 __INITDATA 202 __INITDATA
200 203
201 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 204 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 4b10760c56d6..d8fd4d4bd3d4 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -326,6 +326,9 @@ ENTRY(v6_dma_unmap_area)
326 mov pc, lr 326 mov pc, lr
327ENDPROC(v6_dma_unmap_area) 327ENDPROC(v6_dma_unmap_area)
328 328
329 .globl v6_flush_kern_cache_louis
330 .equ v6_flush_kern_cache_louis, v6_flush_kern_cache_all
331
329 __INITDATA 332 __INITDATA
330 333
331 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 334 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 3b172275262e..cd956647c21a 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -33,6 +33,24 @@ ENTRY(v7_flush_icache_all)
33 mov pc, lr 33 mov pc, lr
34ENDPROC(v7_flush_icache_all) 34ENDPROC(v7_flush_icache_all)
35 35
36 /*
37 * v7_flush_dcache_louis()
38 *
39 * Flush the D-cache up to the Level of Unification Inner Shareable
40 *
41 * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
42 */
43
44ENTRY(v7_flush_dcache_louis)
45 dmb @ ensure ordering with previous memory accesses
46 mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
47 ands r3, r0, #0xe00000 @ extract LoUIS from clidr
48 mov r3, r3, lsr #20 @ r3 = LoUIS * 2
49 moveq pc, lr @ return if level == 0
50 mov r10, #0 @ r10 (starting level) = 0
51 b flush_levels @ start flushing cache levels
52ENDPROC(v7_flush_dcache_louis)
53
36/* 54/*
37 * v7_flush_dcache_all() 55 * v7_flush_dcache_all()
38 * 56 *
@@ -49,7 +67,7 @@ ENTRY(v7_flush_dcache_all)
49 mov r3, r3, lsr #23 @ left align loc bit field 67 mov r3, r3, lsr #23 @ left align loc bit field
50 beq finished @ if loc is 0, then no need to clean 68 beq finished @ if loc is 0, then no need to clean
51 mov r10, #0 @ start clean at cache level 0 69 mov r10, #0 @ start clean at cache level 0
52loop1: 70flush_levels:
53 add r2, r10, r10, lsr #1 @ work out 3x current cache level 71 add r2, r10, r10, lsr #1 @ work out 3x current cache level
54 mov r1, r0, lsr r2 @ extract cache type bits from clidr 72 mov r1, r0, lsr r2 @ extract cache type bits from clidr
55 and r1, r1, #7 @ mask of the bits for current cache only 73 and r1, r1, #7 @ mask of the bits for current cache only
@@ -71,9 +89,9 @@ loop1:
71 clz r5, r4 @ find bit position of way size increment 89 clz r5, r4 @ find bit position of way size increment
72 ldr r7, =0x7fff 90 ldr r7, =0x7fff
73 ands r7, r7, r1, lsr #13 @ extract max number of the index size 91 ands r7, r7, r1, lsr #13 @ extract max number of the index size
74loop2: 92loop1:
75 mov r9, r4 @ create working copy of max way size 93 mov r9, r4 @ create working copy of max way size
76loop3: 94loop2:
77 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 95 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
78 THUMB( lsl r6, r9, r5 ) 96 THUMB( lsl r6, r9, r5 )
79 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 97 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
@@ -82,13 +100,13 @@ loop3:
82 THUMB( orr r11, r11, r6 ) @ factor index number into r11 100 THUMB( orr r11, r11, r6 ) @ factor index number into r11
83 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 101 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
84 subs r9, r9, #1 @ decrement the way 102 subs r9, r9, #1 @ decrement the way
85 bge loop3
86 subs r7, r7, #1 @ decrement the index
87 bge loop2 103 bge loop2
104 subs r7, r7, #1 @ decrement the index
105 bge loop1
88skip: 106skip:
89 add r10, r10, #2 @ increment cache number 107 add r10, r10, #2 @ increment cache number
90 cmp r3, r10 108 cmp r3, r10
91 bgt loop1 109 bgt flush_levels
92finished: 110finished:
93 mov r10, #0 @ swith back to cache level 0 111 mov r10, #0 @ swith back to cache level 0
94 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr 112 mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr
@@ -120,6 +138,24 @@ ENTRY(v7_flush_kern_cache_all)
120 mov pc, lr 138 mov pc, lr
121ENDPROC(v7_flush_kern_cache_all) 139ENDPROC(v7_flush_kern_cache_all)
122 140
141 /*
142 * v7_flush_kern_cache_louis(void)
143 *
144 * Flush the data cache up to Level of Unification Inner Shareable.
145 * Invalidate the I-cache to the point of unification.
146 */
147ENTRY(v7_flush_kern_cache_louis)
148 ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
149 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
150 bl v7_flush_dcache_louis
151 mov r0, #0
152 ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable
153 ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate
154 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
155 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
156 mov pc, lr
157ENDPROC(v7_flush_kern_cache_louis)
158
123/* 159/*
124 * v7_flush_cache_all() 160 * v7_flush_cache_all()
125 * 161 *
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 0650bb87c1e3..2bb61e703d6c 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -368,6 +368,9 @@ ENTRY(arm1020_dma_unmap_area)
368 mov pc, lr 368 mov pc, lr
369ENDPROC(arm1020_dma_unmap_area) 369ENDPROC(arm1020_dma_unmap_area)
370 370
371 .globl arm1020_flush_kern_cache_louis
372 .equ arm1020_flush_kern_cache_louis, arm1020_flush_kern_cache_all
373
371 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 374 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
372 define_cache_functions arm1020 375 define_cache_functions arm1020
373 376
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4188478325a6..8f96aa40f510 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -354,6 +354,9 @@ ENTRY(arm1020e_dma_unmap_area)
354 mov pc, lr 354 mov pc, lr
355ENDPROC(arm1020e_dma_unmap_area) 355ENDPROC(arm1020e_dma_unmap_area)
356 356
357 .globl arm1020e_flush_kern_cache_louis
358 .equ arm1020e_flush_kern_cache_louis, arm1020e_flush_kern_cache_all
359
357 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 360 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
358 define_cache_functions arm1020e 361 define_cache_functions arm1020e
359 362
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 33c68824bff0..8ebe4a469a22 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -343,6 +343,9 @@ ENTRY(arm1022_dma_unmap_area)
343 mov pc, lr 343 mov pc, lr
344ENDPROC(arm1022_dma_unmap_area) 344ENDPROC(arm1022_dma_unmap_area)
345 345
346 .globl arm1022_flush_kern_cache_louis
347 .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
348
346 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 349 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
347 define_cache_functions arm1022 350 define_cache_functions arm1022
348 351
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index fbc1d5fc24dc..093fc7e520c3 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -337,6 +337,9 @@ ENTRY(arm1026_dma_unmap_area)
337 mov pc, lr 337 mov pc, lr
338ENDPROC(arm1026_dma_unmap_area) 338ENDPROC(arm1026_dma_unmap_area)
339 339
340 .globl arm1026_flush_kern_cache_louis
341 .equ arm1026_flush_kern_cache_louis, arm1026_flush_kern_cache_all
342
340 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 343 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
341 define_cache_functions arm1026 344 define_cache_functions arm1026
342 345
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 1a8c138eb897..2c3b9421ab5e 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -319,6 +319,9 @@ ENTRY(arm920_dma_unmap_area)
319 mov pc, lr 319 mov pc, lr
320ENDPROC(arm920_dma_unmap_area) 320ENDPROC(arm920_dma_unmap_area)
321 321
322 .globl arm920_flush_kern_cache_louis
323 .equ arm920_flush_kern_cache_louis, arm920_flush_kern_cache_all
324
322 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 325 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
323 define_cache_functions arm920 326 define_cache_functions arm920
324#endif 327#endif
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 4c44d7e1c3ca..4464c49d7449 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -321,6 +321,9 @@ ENTRY(arm922_dma_unmap_area)
321 mov pc, lr 321 mov pc, lr
322ENDPROC(arm922_dma_unmap_area) 322ENDPROC(arm922_dma_unmap_area)
323 323
324 .globl arm922_flush_kern_cache_louis
325 .equ arm922_flush_kern_cache_louis, arm922_flush_kern_cache_all
326
324 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 327 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
325 define_cache_functions arm922 328 define_cache_functions arm922
326#endif 329#endif
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ec5b1180994f..281eb9b9c1d6 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -376,6 +376,9 @@ ENTRY(arm925_dma_unmap_area)
376 mov pc, lr 376 mov pc, lr
377ENDPROC(arm925_dma_unmap_area) 377ENDPROC(arm925_dma_unmap_area)
378 378
379 .globl arm925_flush_kern_cache_louis
380 .equ arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
381
379 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 382 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
380 define_cache_functions arm925 383 define_cache_functions arm925
381 384
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index c31e62c606c0..f1803f7e2972 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -339,6 +339,9 @@ ENTRY(arm926_dma_unmap_area)
339 mov pc, lr 339 mov pc, lr
340ENDPROC(arm926_dma_unmap_area) 340ENDPROC(arm926_dma_unmap_area)
341 341
342 .globl arm926_flush_kern_cache_louis
343 .equ arm926_flush_kern_cache_louis, arm926_flush_kern_cache_all
344
342 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 345 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
343 define_cache_functions arm926 346 define_cache_functions arm926
344 347
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index a613a7dd7146..8da189d4a402 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -267,6 +267,9 @@ ENTRY(arm940_dma_unmap_area)
267 mov pc, lr 267 mov pc, lr
268ENDPROC(arm940_dma_unmap_area) 268ENDPROC(arm940_dma_unmap_area)
269 269
270 .globl arm940_flush_kern_cache_louis
271 .equ arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
272
270 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 273 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
271 define_cache_functions arm940 274 define_cache_functions arm940
272 275
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 9f4f2999fdd0..f666cf34075a 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -310,6 +310,9 @@ ENTRY(arm946_dma_unmap_area)
310 mov pc, lr 310 mov pc, lr
311ENDPROC(arm946_dma_unmap_area) 311ENDPROC(arm946_dma_unmap_area)
312 312
313 .globl arm946_flush_kern_cache_louis
314 .equ arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
315
313 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 316 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
314 define_cache_functions arm946 317 define_cache_functions arm946
315 318
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index 23a8e4c7f2bd..4106b09e0c29 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -415,6 +415,9 @@ ENTRY(feroceon_dma_unmap_area)
415 mov pc, lr 415 mov pc, lr
416ENDPROC(feroceon_dma_unmap_area) 416ENDPROC(feroceon_dma_unmap_area)
417 417
418 .globl feroceon_flush_kern_cache_louis
419 .equ feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
420
418 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 421 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
419 define_cache_functions feroceon 422 define_cache_functions feroceon
420 423
@@ -431,6 +434,7 @@ ENDPROC(feroceon_dma_unmap_area)
431 range_alias flush_icache_all 434 range_alias flush_icache_all
432 range_alias flush_user_cache_all 435 range_alias flush_user_cache_all
433 range_alias flush_kern_cache_all 436 range_alias flush_kern_cache_all
437 range_alias flush_kern_cache_louis
434 range_alias flush_user_cache_range 438 range_alias flush_user_cache_range
435 range_alias coherent_kern_range 439 range_alias coherent_kern_range
436 range_alias coherent_user_range 440 range_alias coherent_user_range
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3ad86d3..b29a2265af01 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -299,6 +299,7 @@ ENTRY(\name\()_processor_functions)
299ENTRY(\name\()_cache_fns) 299ENTRY(\name\()_cache_fns)
300 .long \name\()_flush_icache_all 300 .long \name\()_flush_icache_all
301 .long \name\()_flush_kern_cache_all 301 .long \name\()_flush_kern_cache_all
302 .long \name\()_flush_kern_cache_louis
302 .long \name\()_flush_user_cache_all 303 .long \name\()_flush_user_cache_all
303 .long \name\()_flush_user_cache_range 304 .long \name\()_flush_user_cache_range
304 .long \name\()_coherent_kern_range 305 .long \name\()_coherent_kern_range
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index fbb2124a547d..82f9cdc751d6 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -303,6 +303,9 @@ ENTRY(mohawk_dma_unmap_area)
303 mov pc, lr 303 mov pc, lr
304ENDPROC(mohawk_dma_unmap_area) 304ENDPROC(mohawk_dma_unmap_area)
305 305
306 .globl mohawk_flush_kern_cache_louis
307 .equ mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
308
306 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 309 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
307 define_cache_functions mohawk 310 define_cache_functions mohawk
308 311
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index c2e2b66f72b5..846d279f3176 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -172,7 +172,7 @@ __v7_ca15mp_setup:
172__v7_setup: 172__v7_setup:
173 adr r12, __v7_setup_stack @ the local stack 173 adr r12, __v7_setup_stack @ the local stack
174 stmia r12, {r0-r5, r7, r9, r11, lr} 174 stmia r12, {r0-r5, r7, r9, r11, lr}
175 bl v7_flush_dcache_all 175 bl v7_flush_dcache_louis
176 ldmia r12, {r0-r5, r7, r9, r11, lr} 176 ldmia r12, {r0-r5, r7, r9, r11, lr}
177 177
178 mrc p15, 0, r0, c0, c0, 0 @ read main ID register 178 mrc p15, 0, r0, c0, c0, 0 @ read main ID register
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index b0d57869da2d..eb93d6487f35 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -337,6 +337,9 @@ ENTRY(xsc3_dma_unmap_area)
337 mov pc, lr 337 mov pc, lr
338ENDPROC(xsc3_dma_unmap_area) 338ENDPROC(xsc3_dma_unmap_area)
339 339
340 .globl xsc3_flush_kern_cache_louis
341 .equ xsc3_flush_kern_cache_louis, xsc3_flush_kern_cache_all
342
340 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 343 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
341 define_cache_functions xsc3 344 define_cache_functions xsc3
342 345
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 4ffebaa595ee..25510361aa18 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -410,6 +410,9 @@ ENTRY(xscale_dma_unmap_area)
410 mov pc, lr 410 mov pc, lr
411ENDPROC(xscale_dma_unmap_area) 411ENDPROC(xscale_dma_unmap_area)
412 412
413 .globl xscale_flush_kern_cache_louis
414 .equ xscale_flush_kern_cache_louis, xscale_flush_kern_cache_all
415
413 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) 416 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
414 define_cache_functions xscale 417 define_cache_functions xscale
415 418
@@ -439,6 +442,7 @@ ENDPROC(xscale_dma_unmap_area)
439 a0_alias flush_icache_all 442 a0_alias flush_icache_all
440 a0_alias flush_user_cache_all 443 a0_alias flush_user_cache_all
441 a0_alias flush_kern_cache_all 444 a0_alias flush_kern_cache_all
445 a0_alias flush_kern_cache_louis
442 a0_alias flush_user_cache_range 446 a0_alias flush_user_cache_range
443 a0_alias coherent_kern_range 447 a0_alias coherent_kern_range
444 a0_alias coherent_user_range 448 a0_alias coherent_user_range