diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/kprobes/core.c | 20 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/ftrace.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/kprobes/opt.c | 24 |
3 files changed, 23 insertions, 23 deletions
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9b80aec1ea1a..bd717137ae77 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -159,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) | |||
159 | * Returns non-zero if opcode is boostable. | 159 | * Returns non-zero if opcode is boostable. |
160 | * RIP relative instructions are adjusted at copying time in 64 bits mode | 160 | * RIP relative instructions are adjusted at copying time in 64 bits mode |
161 | */ | 161 | */ |
162 | int __kprobes can_boost(kprobe_opcode_t *opcodes) | 162 | int can_boost(kprobe_opcode_t *opcodes) |
163 | { | 163 | { |
164 | kprobe_opcode_t opcode; | 164 | kprobe_opcode_t opcode; |
165 | kprobe_opcode_t *orig_opcodes = opcodes; | 165 | kprobe_opcode_t *orig_opcodes = opcodes; |
@@ -260,7 +260,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add | |||
260 | } | 260 | } |
261 | 261 | ||
262 | /* Check if paddr is at an instruction boundary */ | 262 | /* Check if paddr is at an instruction boundary */ |
263 | static int __kprobes can_probe(unsigned long paddr) | 263 | static int can_probe(unsigned long paddr) |
264 | { | 264 | { |
265 | unsigned long addr, __addr, offset = 0; | 265 | unsigned long addr, __addr, offset = 0; |
266 | struct insn insn; | 266 | struct insn insn; |
@@ -299,7 +299,7 @@ static int __kprobes can_probe(unsigned long paddr) | |||
299 | /* | 299 | /* |
300 | * Returns non-zero if opcode modifies the interrupt flag. | 300 | * Returns non-zero if opcode modifies the interrupt flag. |
301 | */ | 301 | */ |
302 | static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | 302 | static int is_IF_modifier(kprobe_opcode_t *insn) |
303 | { | 303 | { |
304 | /* Skip prefixes */ | 304 | /* Skip prefixes */ |
305 | insn = skip_prefixes(insn); | 305 | insn = skip_prefixes(insn); |
@@ -322,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) | |||
322 | * If not, return null. | 322 | * If not, return null. |
323 | * Only applicable to 64-bit x86. | 323 | * Only applicable to 64-bit x86. |
324 | */ | 324 | */ |
325 | int __kprobes __copy_instruction(u8 *dest, u8 *src) | 325 | int __copy_instruction(u8 *dest, u8 *src) |
326 | { | 326 | { |
327 | struct insn insn; | 327 | struct insn insn; |
328 | kprobe_opcode_t buf[MAX_INSN_SIZE]; | 328 | kprobe_opcode_t buf[MAX_INSN_SIZE]; |
@@ -365,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) | |||
365 | return insn.length; | 365 | return insn.length; |
366 | } | 366 | } |
367 | 367 | ||
368 | static int __kprobes arch_copy_kprobe(struct kprobe *p) | 368 | static int arch_copy_kprobe(struct kprobe *p) |
369 | { | 369 | { |
370 | int ret; | 370 | int ret; |
371 | 371 | ||
@@ -392,7 +392,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p) | |||
392 | return 0; | 392 | return 0; |
393 | } | 393 | } |
394 | 394 | ||
395 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | 395 | int arch_prepare_kprobe(struct kprobe *p) |
396 | { | 396 | { |
397 | if (alternatives_text_reserved(p->addr, p->addr)) | 397 | if (alternatives_text_reserved(p->addr, p->addr)) |
398 | return -EINVAL; | 398 | return -EINVAL; |
@@ -407,17 +407,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
407 | return arch_copy_kprobe(p); | 407 | return arch_copy_kprobe(p); |
408 | } | 408 | } |
409 | 409 | ||
410 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 410 | void arch_arm_kprobe(struct kprobe *p) |
411 | { | 411 | { |
412 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); | 412 | text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); |
413 | } | 413 | } |
414 | 414 | ||
415 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | 415 | void arch_disarm_kprobe(struct kprobe *p) |
416 | { | 416 | { |
417 | text_poke(p->addr, &p->opcode, 1); | 417 | text_poke(p->addr, &p->opcode, 1); |
418 | } | 418 | } |
419 | 419 | ||
420 | void __kprobes arch_remove_kprobe(struct kprobe *p) | 420 | void arch_remove_kprobe(struct kprobe *p) |
421 | { | 421 | { |
422 | if (p->ainsn.insn) { | 422 | if (p->ainsn.insn) { |
423 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); | 423 | free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); |
@@ -1060,7 +1060,7 @@ int __init arch_init_kprobes(void) | |||
1060 | return 0; | 1060 | return 0; |
1061 | } | 1061 | } |
1062 | 1062 | ||
1063 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1063 | int arch_trampoline_kprobe(struct kprobe *p) |
1064 | { | 1064 | { |
1065 | return 0; | 1065 | return 0; |
1066 | } | 1066 | } |
diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 23ef5c556f06..dcaa1310ccfd 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c | |||
@@ -85,7 +85,7 @@ end: | |||
85 | local_irq_restore(flags); | 85 | local_irq_restore(flags); |
86 | } | 86 | } |
87 | 87 | ||
88 | int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) | 88 | int arch_prepare_kprobe_ftrace(struct kprobe *p) |
89 | { | 89 | { |
90 | p->ainsn.insn = NULL; | 90 | p->ainsn.insn = NULL; |
91 | p->ainsn.boostable = -1; | 91 | p->ainsn.boostable = -1; |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 898160b42e43..fba7fb075e8a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -77,7 +77,7 @@ found: | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ | 79 | /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ |
80 | static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) | 80 | static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) |
81 | { | 81 | { |
82 | #ifdef CONFIG_X86_64 | 82 | #ifdef CONFIG_X86_64 |
83 | *addr++ = 0x48; | 83 | *addr++ = 0x48; |
@@ -169,7 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ | |||
169 | local_irq_restore(flags); | 169 | local_irq_restore(flags); |
170 | } | 170 | } |
171 | 171 | ||
172 | static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | 172 | static int copy_optimized_instructions(u8 *dest, u8 *src) |
173 | { | 173 | { |
174 | int len = 0, ret; | 174 | int len = 0, ret; |
175 | 175 | ||
@@ -189,7 +189,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) | |||
189 | } | 189 | } |
190 | 190 | ||
191 | /* Check whether insn is indirect jump */ | 191 | /* Check whether insn is indirect jump */ |
192 | static int __kprobes insn_is_indirect_jump(struct insn *insn) | 192 | static int insn_is_indirect_jump(struct insn *insn) |
193 | { | 193 | { |
194 | return ((insn->opcode.bytes[0] == 0xff && | 194 | return ((insn->opcode.bytes[0] == 0xff && |
195 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ | 195 | (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ |
@@ -224,7 +224,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) | |||
224 | } | 224 | } |
225 | 225 | ||
226 | /* Decode whole function to ensure any instructions don't jump into target */ | 226 | /* Decode whole function to ensure any instructions don't jump into target */ |
227 | static int __kprobes can_optimize(unsigned long paddr) | 227 | static int can_optimize(unsigned long paddr) |
228 | { | 228 | { |
229 | unsigned long addr, size = 0, offset = 0; | 229 | unsigned long addr, size = 0, offset = 0; |
230 | struct insn insn; | 230 | struct insn insn; |
@@ -275,7 +275,7 @@ static int __kprobes can_optimize(unsigned long paddr) | |||
275 | } | 275 | } |
276 | 276 | ||
277 | /* Check optimized_kprobe can actually be optimized. */ | 277 | /* Check optimized_kprobe can actually be optimized. */ |
278 | int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | 278 | int arch_check_optimized_kprobe(struct optimized_kprobe *op) |
279 | { | 279 | { |
280 | int i; | 280 | int i; |
281 | struct kprobe *p; | 281 | struct kprobe *p; |
@@ -290,15 +290,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) | |||
290 | } | 290 | } |
291 | 291 | ||
292 | /* Check the addr is within the optimized instructions. */ | 292 | /* Check the addr is within the optimized instructions. */ |
293 | int __kprobes | 293 | int arch_within_optimized_kprobe(struct optimized_kprobe *op, |
294 | arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) | 294 | unsigned long addr) |
295 | { | 295 | { |
296 | return ((unsigned long)op->kp.addr <= addr && | 296 | return ((unsigned long)op->kp.addr <= addr && |
297 | (unsigned long)op->kp.addr + op->optinsn.size > addr); | 297 | (unsigned long)op->kp.addr + op->optinsn.size > addr); |
298 | } | 298 | } |
299 | 299 | ||
300 | /* Free optimized instruction slot */ | 300 | /* Free optimized instruction slot */ |
301 | static __kprobes | 301 | static |
302 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | 302 | void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) |
303 | { | 303 | { |
304 | if (op->optinsn.insn) { | 304 | if (op->optinsn.insn) { |
@@ -308,7 +308,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) | |||
308 | } | 308 | } |
309 | } | 309 | } |
310 | 310 | ||
311 | void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | 311 | void arch_remove_optimized_kprobe(struct optimized_kprobe *op) |
312 | { | 312 | { |
313 | __arch_remove_optimized_kprobe(op, 1); | 313 | __arch_remove_optimized_kprobe(op, 1); |
314 | } | 314 | } |
@@ -318,7 +318,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) | |||
318 | * Target instructions MUST be relocatable (checked inside) | 318 | * Target instructions MUST be relocatable (checked inside) |
319 | * This is called when new aggr(opt)probe is allocated or reused. | 319 | * This is called when new aggr(opt)probe is allocated or reused. |
320 | */ | 320 | */ |
321 | int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | 321 | int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) |
322 | { | 322 | { |
323 | u8 *buf; | 323 | u8 *buf; |
324 | int ret; | 324 | int ret; |
@@ -372,7 +372,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |||
372 | * Replace breakpoints (int3) with relative jumps. | 372 | * Replace breakpoints (int3) with relative jumps. |
373 | * Caller must call with locking kprobe_mutex and text_mutex. | 373 | * Caller must call with locking kprobe_mutex and text_mutex. |
374 | */ | 374 | */ |
375 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | 375 | void arch_optimize_kprobes(struct list_head *oplist) |
376 | { | 376 | { |
377 | struct optimized_kprobe *op, *tmp; | 377 | struct optimized_kprobe *op, *tmp; |
378 | u8 insn_buf[RELATIVEJUMP_SIZE]; | 378 | u8 insn_buf[RELATIVEJUMP_SIZE]; |
@@ -398,7 +398,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) | |||
398 | } | 398 | } |
399 | 399 | ||
400 | /* Replace a relative jump with a breakpoint (int3). */ | 400 | /* Replace a relative jump with a breakpoint (int3). */ |
401 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | 401 | void arch_unoptimize_kprobe(struct optimized_kprobe *op) |
402 | { | 402 | { |
403 | u8 insn_buf[RELATIVEJUMP_SIZE]; | 403 | u8 insn_buf[RELATIVEJUMP_SIZE]; |
404 | 404 | ||