aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-11 23:32:43 -0400
commit5cea24c5899a81abf59706d69580dd5c734effa8 (patch)
treec080ec6b1c6cf27b50f00b2980068fb563b6f7ec /arch/arm/include
parent2fc07efa2241afe08de136c061b3baa103fb286c (diff)
parenta0f0dd57f4a85310d9936f1770a0424b49fef876 (diff)
Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm
Pull second set of ARM updates from Russell King: "This is the second set of ARM updates for this merge window. Contained within are changes to allow the kernel to boot in hypervisor mode on CPUs supporting virtualization, and cache flushing support to the point of inner sharable unification, which are used by the suspend/resume code to avoid having to do a full cache flush. Also included is one fix for VFP code identified by Michael Olbrich." * 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: ARM: vfp: fix saving d16-d31 vfp registers on v6+ kernels ARM: 7549/1: HYP: fix boot on some ARM1136 cores ARM: 7542/1: mm: fix cache LoUIS API for xscale and feroceon ARM: mm: update __v7_setup() to the new LoUIS cache maintenance API ARM: kernel: update __cpu_disable to use cache LoUIS maintenance API ARM: kernel: update cpu_suspend code to use cache LoUIS operations ARM: mm: rename jump labels in v7_flush_dcache_all function ARM: mm: implement LoUIS API for cache maintenance ops ARM: virt: arch_timers: enable access to physical timers ARM: virt: Add CONFIG_ARM_VIRT_EXT option ARM: virt: Add boot-time diagnostics ARM: virt: Update documentation for hyp mode entry support ARM: zImage/virt: hyp mode entry support for the zImage loader ARM: virt: allow the kernel to be entered in HYP mode ARM: opcodes: add __ERET/__MSR_ELR_HYP instruction encoding
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/assembler.h29
-rw-r--r--arch/arm/include/asm/cacheflush.h15
-rw-r--r--arch/arm/include/asm/glue-cache.h1
-rw-r--r--arch/arm/include/asm/opcodes-virt.h10
-rw-r--r--arch/arm/include/asm/ptrace.h1
-rw-r--r--arch/arm/include/asm/vfpmacros.h4
-rw-r--r--arch/arm/include/asm/virt.h69
7 files changed, 127 insertions, 2 deletions
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 5c8b3bf4d825..2ef95813fce0 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -22,6 +22,7 @@
22 22
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/domain.h> 24#include <asm/domain.h>
25#include <asm/opcodes-virt.h>
25 26
26#define IOMEM(x) (x) 27#define IOMEM(x) (x)
27 28
@@ -240,6 +241,34 @@
240#endif 241#endif
241 242
242/* 243/*
244 * Helper macro to enter SVC mode cleanly and mask interrupts. reg is
245 * a scratch register for the macro to overwrite.
246 *
247 * This macro is intended for forcing the CPU into SVC mode at boot time.
248 * you cannot return to the original mode.
249 *
250 * Beware, it also clobers LR.
251 */
252.macro safe_svcmode_maskall reg:req
253 mrs \reg , cpsr
254 mov lr , \reg
255 and lr , lr , #MODE_MASK
256 cmp lr , #HYP_MODE
257 orr \reg , \reg , #PSR_I_BIT | PSR_F_BIT
258 bic \reg , \reg , #MODE_MASK
259 orr \reg , \reg , #SVC_MODE
260THUMB( orr \reg , \reg , #PSR_T_BIT )
261 bne 1f
262 orr \reg, \reg, #PSR_A_BIT
263 adr lr, BSYM(2f)
264 msr spsr_cxsf, \reg
265 __MSR_ELR_HYP(14)
266 __ERET
2671: msr cpsr_c, \reg
2682:
269.endm
270
271/*
243 * STRT/LDRT access macros with ARM and Thumb-2 variants 272 * STRT/LDRT access macros with ARM and Thumb-2 variants
244 */ 273 */
245#ifdef CONFIG_THUMB2_KERNEL 274#ifdef CONFIG_THUMB2_KERNEL
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e4448e16046d..e1489c54cd12 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -49,6 +49,13 @@
49 * 49 *
50 * Unconditionally clean and invalidate the entire cache. 50 * Unconditionally clean and invalidate the entire cache.
51 * 51 *
52 * flush_kern_louis()
53 *
54 * Flush data cache levels up to the level of unification
55 * inner shareable and invalidate the I-cache.
56 * Only needed from v7 onwards, falls back to flush_cache_all()
57 * for all other processor versions.
58 *
52 * flush_user_all() 59 * flush_user_all()
53 * 60 *
54 * Clean and invalidate all user space cache entries 61 * Clean and invalidate all user space cache entries
@@ -97,6 +104,7 @@
97struct cpu_cache_fns { 104struct cpu_cache_fns {
98 void (*flush_icache_all)(void); 105 void (*flush_icache_all)(void);
99 void (*flush_kern_all)(void); 106 void (*flush_kern_all)(void);
107 void (*flush_kern_louis)(void);
100 void (*flush_user_all)(void); 108 void (*flush_user_all)(void);
101 void (*flush_user_range)(unsigned long, unsigned long, unsigned int); 109 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
102 110
@@ -119,6 +127,7 @@ extern struct cpu_cache_fns cpu_cache;
119 127
120#define __cpuc_flush_icache_all cpu_cache.flush_icache_all 128#define __cpuc_flush_icache_all cpu_cache.flush_icache_all
121#define __cpuc_flush_kern_all cpu_cache.flush_kern_all 129#define __cpuc_flush_kern_all cpu_cache.flush_kern_all
130#define __cpuc_flush_kern_louis cpu_cache.flush_kern_louis
122#define __cpuc_flush_user_all cpu_cache.flush_user_all 131#define __cpuc_flush_user_all cpu_cache.flush_user_all
123#define __cpuc_flush_user_range cpu_cache.flush_user_range 132#define __cpuc_flush_user_range cpu_cache.flush_user_range
124#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 133#define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
@@ -139,6 +148,7 @@ extern struct cpu_cache_fns cpu_cache;
139 148
140extern void __cpuc_flush_icache_all(void); 149extern void __cpuc_flush_icache_all(void);
141extern void __cpuc_flush_kern_all(void); 150extern void __cpuc_flush_kern_all(void);
151extern void __cpuc_flush_kern_louis(void);
142extern void __cpuc_flush_user_all(void); 152extern void __cpuc_flush_user_all(void);
143extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 153extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
144extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 154extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
@@ -204,6 +214,11 @@ static inline void __flush_icache_all(void)
204 __flush_icache_preferred(); 214 __flush_icache_preferred();
205} 215}
206 216
217/*
218 * Flush caches up to Level of Unification Inner Shareable
219 */
220#define flush_cache_louis() __cpuc_flush_kern_louis()
221
207#define flush_cache_all() __cpuc_flush_kern_all() 222#define flush_cache_all() __cpuc_flush_kern_all()
208 223
209static inline void vivt_flush_cache_mm(struct mm_struct *mm) 224static inline void vivt_flush_cache_mm(struct mm_struct *mm)
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 4f8d2c0dc441..cca9f15704ed 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -132,6 +132,7 @@
132#ifndef MULTI_CACHE 132#ifndef MULTI_CACHE
133#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all) 133#define __cpuc_flush_icache_all __glue(_CACHE,_flush_icache_all)
134#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all) 134#define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
135#define __cpuc_flush_kern_louis __glue(_CACHE,_flush_kern_cache_louis)
135#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all) 136#define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
136#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 137#define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
137#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) 138#define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
diff --git a/arch/arm/include/asm/opcodes-virt.h b/arch/arm/include/asm/opcodes-virt.h
index b85665a96f8e..efcfdf92d9d5 100644
--- a/arch/arm/include/asm/opcodes-virt.h
+++ b/arch/arm/include/asm/opcodes-virt.h
@@ -26,4 +26,14 @@
26 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \ 26 0xF7E08000 | (((imm16) & 0xF000) << 4) | ((imm16) & 0x0FFF) \
27) 27)
28 28
29#define __ERET __inst_arm_thumb32( \
30 0xE160006E, \
31 0xF3DE8F00 \
32)
33
34#define __MSR_ELR_HYP(regnum) __inst_arm_thumb32( \
35 0xE12EF300 | regnum, \
36 0xF3808E30 | (regnum << 16) \
37)
38
29#endif /* ! __ASM_ARM_OPCODES_VIRT_H */ 39#endif /* ! __ASM_ARM_OPCODES_VIRT_H */
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 44fe998269d9..142d6ae41231 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -44,6 +44,7 @@
44#define IRQ_MODE 0x00000012 44#define IRQ_MODE 0x00000012
45#define SVC_MODE 0x00000013 45#define SVC_MODE 0x00000013
46#define ABT_MODE 0x00000017 46#define ABT_MODE 0x00000017
47#define HYP_MODE 0x0000001a
47#define UND_MODE 0x0000001b 48#define UND_MODE 0x0000001b
48#define SYSTEM_MODE 0x0000001f 49#define SYSTEM_MODE 0x0000001f
49#define MODE32_BIT 0x00000010 50#define MODE32_BIT 0x00000010
diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h
index a7aadbd9a6dd..6a6f1e485f41 100644
--- a/arch/arm/include/asm/vfpmacros.h
+++ b/arch/arm/include/asm/vfpmacros.h
@@ -28,7 +28,7 @@
28 ldr \tmp, =elf_hwcap @ may not have MVFR regs 28 ldr \tmp, =elf_hwcap @ may not have MVFR regs
29 ldr \tmp, [\tmp, #0] 29 ldr \tmp, [\tmp, #0]
30 tst \tmp, #HWCAP_VFPv3D16 30 tst \tmp, #HWCAP_VFPv3D16
31 ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} 31 ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31}
32 addne \base, \base, #32*4 @ step over unused register space 32 addne \base, \base, #32*4 @ step over unused register space
33#else 33#else
34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 34 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
@@ -52,7 +52,7 @@
52 ldr \tmp, =elf_hwcap @ may not have MVFR regs 52 ldr \tmp, =elf_hwcap @ may not have MVFR regs
53 ldr \tmp, [\tmp, #0] 53 ldr \tmp, [\tmp, #0]
54 tst \tmp, #HWCAP_VFPv3D16 54 tst \tmp, #HWCAP_VFPv3D16
55 stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} 55 stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31}
56 addne \base, \base, #32*4 @ step over unused register space 56 addne \base, \base, #32*4 @ step over unused register space
57#else 57#else
58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 58 VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
new file mode 100644
index 000000000000..86164df86cb4
--- /dev/null
+++ b/arch/arm/include/asm/virt.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (c) 2012 Linaro Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef VIRT_H
20#define VIRT_H
21
22#include <asm/ptrace.h>
23
24/*
25 * Flag indicating that the kernel was not entered in the same mode on every
26 * CPU. The zImage loader stashes this value in an SPSR, so we need an
27 * architecturally defined flag bit here (the N flag, as it happens)
28 */
29#define BOOT_CPU_MODE_MISMATCH (1<<31)
30
31#ifndef __ASSEMBLY__
32
33#ifdef CONFIG_ARM_VIRT_EXT
34/*
35 * __boot_cpu_mode records what mode the primary CPU was booted in.
36 * A correctly-implemented bootloader must start all CPUs in the same mode:
37 * if it fails to do this, the flag BOOT_CPU_MODE_MISMATCH is set to indicate
38 * that some CPU(s) were booted in a different mode.
39 *
40 * This allows the kernel to flag an error when the secondaries have come up.
41 */
42extern int __boot_cpu_mode;
43
44void __hyp_set_vectors(unsigned long phys_vector_base);
45unsigned long __hyp_get_vectors(void);
46#else
47#define __boot_cpu_mode (SVC_MODE)
48#endif
49
50#ifndef ZIMAGE
51void hyp_mode_check(void);
52
53/* Reports the availability of HYP mode */
54static inline bool is_hyp_mode_available(void)
55{
56 return ((__boot_cpu_mode & MODE_MASK) == HYP_MODE &&
57 !(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH));
58}
59
60/* Check if the bootloader has booted CPUs in different modes */
61static inline bool is_hyp_mode_mismatched(void)
62{
63 return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
64}
65#endif
66
67#endif /* __ASSEMBLY__ */
68
69#endif /* ! VIRT_H */