aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2016-09-08 08:55:38 -0400
committerWill Deacon <will.deacon@arm.com>2016-09-09 06:43:50 -0400
commitadf7589997927b1d84a5d003027b866bbef61ef2 (patch)
treef3bb449f2bda4e8992983b5d87e7dab1415fa17e
parent1f3d8699be82583c713e2a1099c597a740ebaf4d (diff)
arm64: simplify sysreg manipulation
A while back we added {read,write}_sysreg accessors to handle accesses to system registers, without the usual boilerplate asm volatile, temporary variable, etc. This patch makes use of these across arm64 to make code shorter and clearer. For sequences with a trailing ISB, the existing isb() macro is also used so that asm blocks can be removed entirely. A few uses of inline assembly for msr/mrs are left as-is. Those manipulating sp_el0 for the current thread_info value have special clobber requiremends. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/include/asm/hw_breakpoint.h13
-rw-r--r--arch/arm64/include/asm/mmu_context.h27
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h20
-rw-r--r--arch/arm64/include/asm/thread_info.h3
-rw-r--r--arch/arm64/kernel/cacheinfo.c8
-rw-r--r--arch/arm64/kernel/debug-monitors.c8
-rw-r--r--arch/arm64/kernel/process.c14
-rw-r--r--arch/arm64/kernel/sys_compat.c2
9 files changed, 44 insertions, 52 deletions
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 2487891dec46..9510ace570e2 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -18,6 +18,7 @@
18 18
19#include <asm/cputype.h> 19#include <asm/cputype.h>
20#include <asm/cpufeature.h> 20#include <asm/cpufeature.h>
21#include <asm/sysreg.h>
21#include <asm/virt.h> 22#include <asm/virt.h>
22 23
23#ifdef __KERNEL__ 24#ifdef __KERNEL__
@@ -98,18 +99,18 @@ static inline void decode_ctrl_reg(u32 reg,
98#define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP) 99#define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP)
99 100
100/* Debug register names. */ 101/* Debug register names. */
101#define AARCH64_DBG_REG_NAME_BVR "bvr" 102#define AARCH64_DBG_REG_NAME_BVR bvr
102#define AARCH64_DBG_REG_NAME_BCR "bcr" 103#define AARCH64_DBG_REG_NAME_BCR bcr
103#define AARCH64_DBG_REG_NAME_WVR "wvr" 104#define AARCH64_DBG_REG_NAME_WVR wvr
104#define AARCH64_DBG_REG_NAME_WCR "wcr" 105#define AARCH64_DBG_REG_NAME_WCR wcr
105 106
106/* Accessor macros for the debug registers. */ 107/* Accessor macros for the debug registers. */
107#define AARCH64_DBG_READ(N, REG, VAL) do {\ 108#define AARCH64_DBG_READ(N, REG, VAL) do {\
108 asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\ 109 VAL = read_sysreg(dbg##REG##N##_el1);\
109} while (0) 110} while (0)
110 111
111#define AARCH64_DBG_WRITE(N, REG, VAL) do {\ 112#define AARCH64_DBG_WRITE(N, REG, VAL) do {\
112 asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\ 113 write_sysreg(VAL, dbg##REG##N##_el1);\
113} while (0) 114} while (0)
114 115
115struct task_struct; 116struct task_struct;
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index b1892a0dbcb0..e5c24b47dba5 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -27,16 +27,14 @@
27#include <asm-generic/mm_hooks.h> 27#include <asm-generic/mm_hooks.h>
28#include <asm/cputype.h> 28#include <asm/cputype.h>
29#include <asm/pgtable.h> 29#include <asm/pgtable.h>
30#include <asm/sysreg.h>
30#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
31 32
32#ifdef CONFIG_PID_IN_CONTEXTIDR 33#ifdef CONFIG_PID_IN_CONTEXTIDR
33static inline void contextidr_thread_switch(struct task_struct *next) 34static inline void contextidr_thread_switch(struct task_struct *next)
34{ 35{
35 asm( 36 write_sysreg(task_pid_nr(next), contextidr_el1);
36 " msr contextidr_el1, %0\n" 37 isb();
37 " isb"
38 :
39 : "r" (task_pid_nr(next)));
40} 38}
41#else 39#else
42static inline void contextidr_thread_switch(struct task_struct *next) 40static inline void contextidr_thread_switch(struct task_struct *next)
@@ -51,11 +49,8 @@ static inline void cpu_set_reserved_ttbr0(void)
51{ 49{
52 unsigned long ttbr = virt_to_phys(empty_zero_page); 50 unsigned long ttbr = virt_to_phys(empty_zero_page);
53 51
54 asm( 52 write_sysreg(ttbr, ttbr0_el1);
55 " msr ttbr0_el1, %0 // set TTBR0\n" 53 isb();
56 " isb"
57 :
58 : "r" (ttbr));
59} 54}
60 55
61/* 56/*
@@ -81,13 +76,11 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz)
81 if (!__cpu_uses_extended_idmap()) 76 if (!__cpu_uses_extended_idmap())
82 return; 77 return;
83 78
84 asm volatile ( 79 tcr = read_sysreg(tcr_el1);
85 " mrs %0, tcr_el1 ;" 80 tcr &= ~TCR_T0SZ_MASK;
86 " bfi %0, %1, %2, %3 ;" 81 tcr |= t0sz << TCR_T0SZ_OFFSET;
87 " msr tcr_el1, %0 ;" 82 write_sysreg(tcr, tcr_el1);
88 " isb" 83 isb();
89 : "=&r" (tcr)
90 : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
91} 84}
92 85
93#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) 86#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS))
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index c3ae239db3ee..eb0c2bd90de9 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -208,6 +208,7 @@
208#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) 208#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET)
209#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) 209#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x))
210#define TCR_TxSZ_WIDTH 6 210#define TCR_TxSZ_WIDTH 6
211#define TCR_T0SZ_MASK (((UL(1) << TCR_TxSZ_WIDTH) - 1) << TCR_T0SZ_OFFSET)
211 212
212#define TCR_IRGN0_SHIFT 8 213#define TCR_IRGN0_SHIFT 8
213#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT) 214#define TCR_IRGN0_MASK (UL(3) << TCR_IRGN0_SHIFT)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 39fed2e56e98..e91aef2bb33d 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -253,16 +253,6 @@ asm(
253" .endm\n" 253" .endm\n"
254); 254);
255 255
256static inline void config_sctlr_el1(u32 clear, u32 set)
257{
258 u32 val;
259
260 asm volatile("mrs %0, sctlr_el1" : "=r" (val));
261 val &= ~clear;
262 val |= set;
263 asm volatile("msr sctlr_el1, %0" : : "r" (val));
264}
265
266/* 256/*
267 * Unlike read_cpuid, calls to read_sysreg are never expected to be 257 * Unlike read_cpuid, calls to read_sysreg are never expected to be
268 * optimized away or replaced with synthetic values. 258 * optimized away or replaced with synthetic values.
@@ -283,6 +273,16 @@ static inline void config_sctlr_el1(u32 clear, u32 set)
283 : : "rZ" (__val)); \ 273 : : "rZ" (__val)); \
284} while (0) 274} while (0)
285 275
276static inline void config_sctlr_el1(u32 clear, u32 set)
277{
278 u32 val;
279
280 val = read_sysreg(sctlr_el1);
281 val &= ~clear;
282 val |= set;
283 write_sysreg(val, sctlr_el1);
284}
285
286#endif 286#endif
287 287
288#endif /* __ASM_SYSREG_H */ 288#endif /* __ASM_SYSREG_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index abd64bd1f6d9..e9ea5a6bd449 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -75,6 +75,9 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__;
75 75
76/* 76/*
77 * struct thread_info can be accessed directly via sp_el0. 77 * struct thread_info can be accessed directly via sp_el0.
78 *
79 * We don't use read_sysreg() as we want the compiler to cache the value where
80 * possible.
78 */ 81 */
79static inline struct thread_info *current_thread_info(void) 82static inline struct thread_info *current_thread_info(void)
80{ 83{
diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c
index b8629d52fba9..9617301f76b5 100644
--- a/arch/arm64/kernel/cacheinfo.c
+++ b/arch/arm64/kernel/cacheinfo.c
@@ -39,7 +39,7 @@ static inline enum cache_type get_cache_type(int level)
39 39
40 if (level > MAX_CACHE_LEVEL) 40 if (level > MAX_CACHE_LEVEL)
41 return CACHE_TYPE_NOCACHE; 41 return CACHE_TYPE_NOCACHE;
42 asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr)); 42 clidr = read_sysreg(clidr_el1);
43 return CLIDR_CTYPE(clidr, level); 43 return CLIDR_CTYPE(clidr, level);
44} 44}
45 45
@@ -55,11 +55,9 @@ u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
55 55
56 WARN_ON(preemptible()); 56 WARN_ON(preemptible());
57 57
58 /* Put value into CSSELR */ 58 write_sysreg(csselr, csselr_el1);
59 asm volatile("msr csselr_el1, %x0" : : "r" (csselr));
60 isb(); 59 isb();
61 /* Read result out of CCSIDR */ 60 ccsidr = read_sysreg(ccsidr_el1);
62 asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr));
63 61
64 return ccsidr; 62 return ccsidr;
65} 63}
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index d97fdc1f6a38..73ae90ef434c 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -46,16 +46,14 @@ static void mdscr_write(u32 mdscr)
46{ 46{
47 unsigned long flags; 47 unsigned long flags;
48 local_dbg_save(flags); 48 local_dbg_save(flags);
49 asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); 49 write_sysreg(mdscr, mdscr_el1);
50 local_dbg_restore(flags); 50 local_dbg_restore(flags);
51} 51}
52NOKPROBE_SYMBOL(mdscr_write); 52NOKPROBE_SYMBOL(mdscr_write);
53 53
54static u32 mdscr_read(void) 54static u32 mdscr_read(void)
55{ 55{
56 u32 mdscr; 56 return read_sysreg(mdscr_el1);
57 asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
58 return mdscr;
59} 57}
60NOKPROBE_SYMBOL(mdscr_read); 58NOKPROBE_SYMBOL(mdscr_read);
61 59
@@ -134,7 +132,7 @@ NOKPROBE_SYMBOL(disable_debug_monitors);
134 */ 132 */
135static int clear_os_lock(unsigned int cpu) 133static int clear_os_lock(unsigned int cpu)
136{ 134{
137 asm volatile("msr oslar_el1, %0" : : "r" (0)); 135 write_sysreg(0, oslar_el1);
138 isb(); 136 isb();
139 return 0; 137 return 0;
140} 138}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 6cd2612236dc..a4f5f766af08 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -202,7 +202,7 @@ void show_regs(struct pt_regs * regs)
202 202
203static void tls_thread_flush(void) 203static void tls_thread_flush(void)
204{ 204{
205 asm ("msr tpidr_el0, xzr"); 205 write_sysreg(0, tpidr_el0);
206 206
207 if (is_compat_task()) { 207 if (is_compat_task()) {
208 current->thread.tp_value = 0; 208 current->thread.tp_value = 0;
@@ -213,7 +213,7 @@ static void tls_thread_flush(void)
213 * with a stale shadow state during context switch. 213 * with a stale shadow state during context switch.
214 */ 214 */
215 barrier(); 215 barrier();
216 asm ("msr tpidrro_el0, xzr"); 216 write_sysreg(0, tpidrro_el0);
217 } 217 }
218} 218}
219 219
@@ -253,7 +253,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
253 * Read the current TLS pointer from tpidr_el0 as it may be 253 * Read the current TLS pointer from tpidr_el0 as it may be
254 * out-of-sync with the saved value. 254 * out-of-sync with the saved value.
255 */ 255 */
256 asm("mrs %0, tpidr_el0" : "=r" (*task_user_tls(p))); 256 *task_user_tls(p) = read_sysreg(tpidr_el0);
257 257
258 if (stack_start) { 258 if (stack_start) {
259 if (is_compat_thread(task_thread_info(p))) 259 if (is_compat_thread(task_thread_info(p)))
@@ -289,17 +289,15 @@ static void tls_thread_switch(struct task_struct *next)
289{ 289{
290 unsigned long tpidr, tpidrro; 290 unsigned long tpidr, tpidrro;
291 291
292 asm("mrs %0, tpidr_el0" : "=r" (tpidr)); 292 tpidr = read_sysreg(tpidr_el0);
293 *task_user_tls(current) = tpidr; 293 *task_user_tls(current) = tpidr;
294 294
295 tpidr = *task_user_tls(next); 295 tpidr = *task_user_tls(next);
296 tpidrro = is_compat_thread(task_thread_info(next)) ? 296 tpidrro = is_compat_thread(task_thread_info(next)) ?
297 next->thread.tp_value : 0; 297 next->thread.tp_value : 0;
298 298
299 asm( 299 write_sysreg(tpidr, tpidr_el0);
300 " msr tpidr_el0, %0\n" 300 write_sysreg(tpidrro, tpidrro_el0);
301 " msr tpidrro_el0, %1"
302 : : "r" (tpidr), "r" (tpidrro));
303} 301}
304 302
305/* Restore the UAO state depending on next's addr_limit */ 303/* Restore the UAO state depending on next's addr_limit */
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 28c511b06edf..abaf582fc7a8 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -94,7 +94,7 @@ long compat_arm_syscall(struct pt_regs *regs)
94 * See comment in tls_thread_flush. 94 * See comment in tls_thread_flush.
95 */ 95 */
96 barrier(); 96 barrier();
97 asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); 97 write_sysreg(regs->regs[0], tpidrro_el0);
98 return 0; 98 return 0;
99 99
100 default: 100 default: