aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorJiang Liu <liuj97@gmail.com>2014-01-07 09:17:09 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2014-01-08 10:21:29 -0500
commitae16480785de1da84f21d1698f304a52f9790c49 (patch)
treedc5ecc5f4c146c805321d1c226d47b4add3337d0 /arch/arm64
parentb11a64a48ccc7ca0ceb33544206934fbd3cdbb22 (diff)
arm64: introduce interfaces to hotpatch kernel and module code
Introduce three interfaces to patch kernel and module code: aarch64_insn_patch_text_nosync(): patch code without synchronization, it's caller's responsibility to synchronize all CPUs if needed. aarch64_insn_patch_text_sync(): patch code and always synchronize with stop_machine() aarch64_insn_patch_text(): patch code and synchronize with stop_machine() if needed Reviewed-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Jiang Liu <liuj97@gmail.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/insn.h10
-rw-r--r--arch/arm64/kernel/insn.c119
2 files changed, 128 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 1bdc44c27456..bf8085fdc140 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -18,6 +18,9 @@
18#define __ASM_INSN_H 18#define __ASM_INSN_H
19#include <linux/types.h> 19#include <linux/types.h>
20 20
21/* A64 instructions are always 32 bits. */
22#define AARCH64_INSN_SIZE 4
23
21/* 24/*
22 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a 25 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
23 * Section C3.1 "A64 instruction index by encoding": 26 * Section C3.1 "A64 instruction index by encoding":
@@ -70,8 +73,13 @@ __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
70 73
71bool aarch64_insn_is_nop(u32 insn); 74bool aarch64_insn_is_nop(u32 insn);
72 75
76int aarch64_insn_read(void *addr, u32 *insnp);
77int aarch64_insn_write(void *addr, u32 insn);
73enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); 78enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
74
75bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn); 79bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
76 80
81int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
82int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
83int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
84
77#endif /* __ASM_INSN_H */ 85#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index 56a2498ab3e3..b9dac57e580a 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -16,6 +16,10 @@
16 */ 16 */
17#include <linux/compiler.h> 17#include <linux/compiler.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/smp.h>
20#include <linux/stop_machine.h>
21#include <linux/uaccess.h>
22#include <asm/cacheflush.h>
19#include <asm/insn.h> 23#include <asm/insn.h>
20 24
21static int aarch64_insn_encoding_class[] = { 25static int aarch64_insn_encoding_class[] = {
@@ -60,6 +64,28 @@ bool __kprobes aarch64_insn_is_nop(u32 insn)
60 } 64 }
61} 65}
62 66
67/*
68 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
69 * little-endian.
70 */
71int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
72{
73 int ret;
74 u32 val;
75
76 ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
77 if (!ret)
78 *insnp = le32_to_cpu(val);
79
80 return ret;
81}
82
83int __kprobes aarch64_insn_write(void *addr, u32 insn)
84{
85 insn = cpu_to_le32(insn);
86 return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
87}
88
63static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) 89static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
64{ 90{
65 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS) 91 if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
@@ -89,3 +115,96 @@ bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
89 return __aarch64_insn_hotpatch_safe(old_insn) && 115 return __aarch64_insn_hotpatch_safe(old_insn) &&
90 __aarch64_insn_hotpatch_safe(new_insn); 116 __aarch64_insn_hotpatch_safe(new_insn);
91} 117}
118
119int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
120{
121 u32 *tp = addr;
122 int ret;
123
124 /* A64 instructions must be word aligned */
125 if ((uintptr_t)tp & 0x3)
126 return -EINVAL;
127
128 ret = aarch64_insn_write(tp, insn);
129 if (ret == 0)
130 flush_icache_range((uintptr_t)tp,
131 (uintptr_t)tp + AARCH64_INSN_SIZE);
132
133 return ret;
134}
135
136struct aarch64_insn_patch {
137 void **text_addrs;
138 u32 *new_insns;
139 int insn_cnt;
140 atomic_t cpu_count;
141};
142
143static int __kprobes aarch64_insn_patch_text_cb(void *arg)
144{
145 int i, ret = 0;
146 struct aarch64_insn_patch *pp = arg;
147
148 /* The first CPU becomes master */
149 if (atomic_inc_return(&pp->cpu_count) == 1) {
150 for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
151 ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
152 pp->new_insns[i]);
153 /*
154 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
155 * which ends with "dsb; isb" pair guaranteeing global
156 * visibility.
157 */
158 atomic_set(&pp->cpu_count, -1);
159 } else {
160 while (atomic_read(&pp->cpu_count) != -1)
161 cpu_relax();
162 isb();
163 }
164
165 return ret;
166}
167
168int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
169{
170 struct aarch64_insn_patch patch = {
171 .text_addrs = addrs,
172 .new_insns = insns,
173 .insn_cnt = cnt,
174 .cpu_count = ATOMIC_INIT(0),
175 };
176
177 if (cnt <= 0)
178 return -EINVAL;
179
180 return stop_machine(aarch64_insn_patch_text_cb, &patch,
181 cpu_online_mask);
182}
183
184int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
185{
186 int ret;
187 u32 insn;
188
189 /* Unsafe to patch multiple instructions without synchronizaiton */
190 if (cnt == 1) {
191 ret = aarch64_insn_read(addrs[0], &insn);
192 if (ret)
193 return ret;
194
195 if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
196 /*
197 * ARMv8 architecture doesn't guarantee all CPUs see
198 * the new instruction after returning from function
199 * aarch64_insn_patch_text_nosync(). So send IPIs to
200 * all other CPUs to achieve instruction
201 * synchronization.
202 */
203 ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
204 kick_all_cpus_sync();
205 return ret;
206 }
207 }
208
209 return aarch64_insn_patch_text_sync(addrs, insns, cnt);
210}