diff options
| author | Catalin Marinas <catalin.marinas@arm.com> | 2016-07-21 13:20:41 -0400 |
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2016-07-21 13:20:41 -0400 |
| commit | a95b0644b38c16c40b753224671b919b9af0b73c (patch) | |
| tree | 62f4b54f35dd3a99d6a3c2337bddcf1c72c32c8c | |
| parent | e75118a7b581b19b08282c7819c1ec6f68b91b79 (diff) | |
| parent | f7e35c5ba4322838ce84b23a2f1a6d6b7f0b57ec (diff) | |
Merge branch 'for-next/kprobes' into for-next/core
* kprobes:
arm64: kprobes: Add KASAN instrumentation around stack accesses
arm64: kprobes: Cleanup jprobe_return
arm64: kprobes: Fix overflow when saving stack
arm64: kprobes: WARN if attempting to step with PSTATE.D=1
kprobes: Add arm64 case in kprobe example module
arm64: Add kernel return probes support (kretprobes)
arm64: Add trampoline code for kretprobes
arm64: kprobes instruction simulation support
arm64: Treat all entry code as non-kprobe-able
arm64: Blacklist non-kprobe-able symbol
arm64: Kprobes with single stepping support
arm64: add conditional instruction simulation support
arm64: Add more test functions to insn.c
arm64: Add HAVE_REGS_AND_STACK_ACCESS_API feature
27 files changed, 1781 insertions, 10 deletions
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 51622ba7c4a6..d3c0c23703b6 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
| @@ -121,7 +121,6 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
| 121 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) | 121 | #define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0)) |
| 122 | 122 | ||
| 123 | extern int regs_query_register_offset(const char *name); | 123 | extern int regs_query_register_offset(const char *name); |
| 124 | extern const char *regs_query_register_name(unsigned int offset); | ||
| 125 | extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); | 124 | extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr); |
| 126 | extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | 125 | extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, |
| 127 | unsigned int n); | 126 | unsigned int n); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 1b196bf99320..ac4746f454ed 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -86,8 +86,11 @@ config ARM64 | |||
| 86 | select HAVE_PERF_EVENTS | 86 | select HAVE_PERF_EVENTS |
| 87 | select HAVE_PERF_REGS | 87 | select HAVE_PERF_REGS |
| 88 | select HAVE_PERF_USER_STACK_DUMP | 88 | select HAVE_PERF_USER_STACK_DUMP |
| 89 | select HAVE_REGS_AND_STACK_ACCESS_API | ||
| 89 | select HAVE_RCU_TABLE_FREE | 90 | select HAVE_RCU_TABLE_FREE |
| 90 | select HAVE_SYSCALL_TRACEPOINTS | 91 | select HAVE_SYSCALL_TRACEPOINTS |
| 92 | select HAVE_KPROBES | ||
| 93 | select HAVE_KRETPROBES if HAVE_KPROBES | ||
| 91 | select IOMMU_DMA if IOMMU_SUPPORT | 94 | select IOMMU_DMA if IOMMU_SUPPORT |
| 92 | select IRQ_DOMAIN | 95 | select IRQ_DOMAIN |
| 93 | select IRQ_FORCED_THREADING | 96 | select IRQ_FORCED_THREADING |
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index 2fcb9b7c876c..4b6b3f72a215 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h | |||
| @@ -66,6 +66,11 @@ | |||
| 66 | 66 | ||
| 67 | #define CACHE_FLUSH_IS_SAFE 1 | 67 | #define CACHE_FLUSH_IS_SAFE 1 |
| 68 | 68 | ||
| 69 | /* kprobes BRK opcodes with ESR encoding */ | ||
| 70 | #define BRK64_ESR_MASK 0xFFFF | ||
| 71 | #define BRK64_ESR_KPROBES 0x0004 | ||
| 72 | #define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5)) | ||
| 73 | |||
| 69 | /* AArch32 */ | 74 | /* AArch32 */ |
| 70 | #define DBG_ESR_EVT_BKPT 0x4 | 75 | #define DBG_ESR_EVT_BKPT 0x4 |
| 71 | #define DBG_ESR_EVT_VECC 0x5 | 76 | #define DBG_ESR_EVT_VECC 0x5 |
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index 30e50eb54a67..1dbaa901d7e5 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h | |||
| @@ -120,6 +120,29 @@ enum aarch64_insn_register { | |||
| 120 | AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ | 120 | AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */ |
| 121 | }; | 121 | }; |
| 122 | 122 | ||
| 123 | enum aarch64_insn_special_register { | ||
| 124 | AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200, | ||
| 125 | AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201, | ||
| 126 | AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208, | ||
| 127 | AARCH64_INSN_SPCLREG_SPSEL = 0xC210, | ||
| 128 | AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212, | ||
| 129 | AARCH64_INSN_SPCLREG_DAIF = 0xDA11, | ||
| 130 | AARCH64_INSN_SPCLREG_NZCV = 0xDA10, | ||
| 131 | AARCH64_INSN_SPCLREG_FPCR = 0xDA20, | ||
| 132 | AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28, | ||
| 133 | AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29, | ||
| 134 | AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200, | ||
| 135 | AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201, | ||
| 136 | AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208, | ||
| 137 | AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218, | ||
| 138 | AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219, | ||
| 139 | AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A, | ||
| 140 | AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B, | ||
| 141 | AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200, | ||
| 142 | AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201, | ||
| 143 | AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210 | ||
| 144 | }; | ||
| 145 | |||
| 123 | enum aarch64_insn_variant { | 146 | enum aarch64_insn_variant { |
| 124 | AARCH64_INSN_VARIANT_32BIT, | 147 | AARCH64_INSN_VARIANT_32BIT, |
| 125 | AARCH64_INSN_VARIANT_64BIT | 148 | AARCH64_INSN_VARIANT_64BIT |
| @@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \ | |||
| 223 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ | 246 | static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \ |
| 224 | { return (val); } | 247 | { return (val); } |
| 225 | 248 | ||
| 249 | __AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000) | ||
| 250 | __AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000) | ||
| 226 | __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) | 251 | __AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800) |
| 227 | __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) | 252 | __AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800) |
| 253 | __AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000) | ||
| 254 | __AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000) | ||
| 255 | __AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000) | ||
| 256 | __AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000) | ||
| 257 | __AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000) | ||
| 228 | __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) | 258 | __AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000) |
| 229 | __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) | 259 | __AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000) |
| 230 | __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) | 260 | __AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000) |
| @@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) | |||
| 273 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) | 303 | __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) |
| 274 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) | 304 | __AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003) |
| 275 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) | 305 | __AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000) |
| 306 | __AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000) | ||
| 276 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) | 307 | __AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F) |
| 277 | __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) | 308 | __AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000) |
| 278 | __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) | 309 | __AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000) |
| 279 | __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) | 310 | __AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000) |
| 311 | __AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0) | ||
| 312 | __AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000) | ||
| 313 | __AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F) | ||
| 314 | __AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000) | ||
| 280 | 315 | ||
| 281 | #undef __AARCH64_INSN_FUNCS | 316 | #undef __AARCH64_INSN_FUNCS |
| 282 | 317 | ||
| @@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn); | |||
| 286 | int aarch64_insn_read(void *addr, u32 *insnp); | 321 | int aarch64_insn_read(void *addr, u32 *insnp); |
| 287 | int aarch64_insn_write(void *addr, u32 insn); | 322 | int aarch64_insn_write(void *addr, u32 insn); |
| 288 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); | 323 | enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn); |
| 324 | bool aarch64_insn_uses_literal(u32 insn); | ||
| 325 | bool aarch64_insn_is_branch(u32 insn); | ||
| 289 | u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); | 326 | u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn); |
| 290 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, | 327 | u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, |
| 291 | u32 insn, u64 imm); | 328 | u32 insn, u64 imm); |
| @@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn); | |||
| 367 | #define A32_RT_OFFSET 12 | 404 | #define A32_RT_OFFSET 12 |
| 368 | #define A32_RT2_OFFSET 0 | 405 | #define A32_RT2_OFFSET 0 |
| 369 | 406 | ||
| 407 | u32 aarch64_insn_extract_system_reg(u32 insn); | ||
| 370 | u32 aarch32_insn_extract_reg_num(u32 insn, int offset); | 408 | u32 aarch32_insn_extract_reg_num(u32 insn, int offset); |
| 371 | u32 aarch32_insn_mcr_extract_opc2(u32 insn); | 409 | u32 aarch32_insn_mcr_extract_opc2(u32 insn); |
| 372 | u32 aarch32_insn_mcr_extract_crm(u32 insn); | 410 | u32 aarch32_insn_mcr_extract_crm(u32 insn); |
| 411 | |||
| 412 | typedef bool (pstate_check_t)(unsigned long); | ||
| 413 | extern pstate_check_t * const aarch32_opcode_cond_checks[16]; | ||
| 373 | #endif /* __ASSEMBLY__ */ | 414 | #endif /* __ASSEMBLY__ */ |
| 374 | 415 | ||
| 375 | #endif /* __ASM_INSN_H */ | 416 | #endif /* __ASM_INSN_H */ |
diff --git a/arch/arm64/include/asm/kprobes.h b/arch/arm64/include/asm/kprobes.h new file mode 100644 index 000000000000..61b49150dfa3 --- /dev/null +++ b/arch/arm64/include/asm/kprobes.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/include/asm/kprobes.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef _ARM_KPROBES_H | ||
| 17 | #define _ARM_KPROBES_H | ||
| 18 | |||
| 19 | #include <linux/types.h> | ||
| 20 | #include <linux/ptrace.h> | ||
| 21 | #include <linux/percpu.h> | ||
| 22 | |||
| 23 | #define __ARCH_WANT_KPROBES_INSN_SLOT | ||
| 24 | #define MAX_INSN_SIZE 1 | ||
| 25 | #define MAX_STACK_SIZE 128 | ||
| 26 | |||
| 27 | #define flush_insn_slot(p) do { } while (0) | ||
| 28 | #define kretprobe_blacklist_size 0 | ||
| 29 | |||
| 30 | #include <asm/probes.h> | ||
| 31 | |||
| 32 | struct prev_kprobe { | ||
| 33 | struct kprobe *kp; | ||
| 34 | unsigned int status; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* Single step context for kprobe */ | ||
| 38 | struct kprobe_step_ctx { | ||
| 39 | unsigned long ss_pending; | ||
| 40 | unsigned long match_addr; | ||
| 41 | }; | ||
| 42 | |||
| 43 | /* per-cpu kprobe control block */ | ||
| 44 | struct kprobe_ctlblk { | ||
| 45 | unsigned int kprobe_status; | ||
| 46 | unsigned long saved_irqflag; | ||
| 47 | struct prev_kprobe prev_kprobe; | ||
| 48 | struct kprobe_step_ctx ss_ctx; | ||
| 49 | struct pt_regs jprobe_saved_regs; | ||
| 50 | char jprobes_stack[MAX_STACK_SIZE]; | ||
| 51 | }; | ||
| 52 | |||
| 53 | void arch_remove_kprobe(struct kprobe *); | ||
| 54 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); | ||
| 55 | int kprobe_exceptions_notify(struct notifier_block *self, | ||
| 56 | unsigned long val, void *data); | ||
| 57 | int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr); | ||
| 58 | int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr); | ||
| 59 | void kretprobe_trampoline(void); | ||
| 60 | void __kprobes *trampoline_probe_handler(struct pt_regs *regs); | ||
| 61 | |||
| 62 | #endif /* _ARM_KPROBES_H */ | ||
diff --git a/arch/arm64/include/asm/probes.h b/arch/arm64/include/asm/probes.h new file mode 100644 index 000000000000..5af574d632fa --- /dev/null +++ b/arch/arm64/include/asm/probes.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/include/asm/probes.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | #ifndef _ARM_PROBES_H | ||
| 16 | #define _ARM_PROBES_H | ||
| 17 | |||
| 18 | #include <asm/opcodes.h> | ||
| 19 | |||
| 20 | struct kprobe; | ||
| 21 | struct arch_specific_insn; | ||
| 22 | |||
| 23 | typedef u32 kprobe_opcode_t; | ||
| 24 | typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *); | ||
| 25 | |||
| 26 | /* architecture specific copy of original instruction */ | ||
| 27 | struct arch_specific_insn { | ||
| 28 | kprobe_opcode_t *insn; | ||
| 29 | pstate_check_t *pstate_cc; | ||
| 30 | kprobes_handler_t *handler; | ||
| 31 | /* restore address after step xol */ | ||
| 32 | unsigned long restore; | ||
| 33 | }; | ||
| 34 | |||
| 35 | #endif | ||
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 10e6f1d7269c..3fd15fdf4181 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
| @@ -73,6 +73,7 @@ | |||
| 73 | #define COMPAT_PT_DATA_ADDR 0x10004 | 73 | #define COMPAT_PT_DATA_ADDR 0x10004 |
| 74 | #define COMPAT_PT_TEXT_END_ADDR 0x10008 | 74 | #define COMPAT_PT_TEXT_END_ADDR 0x10008 |
| 75 | #ifndef __ASSEMBLY__ | 75 | #ifndef __ASSEMBLY__ |
| 76 | #include <linux/bug.h> | ||
| 76 | 77 | ||
| 77 | /* sizeof(struct user) for AArch32 */ | 78 | /* sizeof(struct user) for AArch32 */ |
| 78 | #define COMPAT_USER_SZ 296 | 79 | #define COMPAT_USER_SZ 296 |
| @@ -118,6 +119,8 @@ struct pt_regs { | |||
| 118 | u64 syscallno; | 119 | u64 syscallno; |
| 119 | }; | 120 | }; |
| 120 | 121 | ||
| 122 | #define MAX_REG_OFFSET offsetof(struct pt_regs, pstate) | ||
| 123 | |||
| 121 | #define arch_has_single_step() (1) | 124 | #define arch_has_single_step() (1) |
| 122 | 125 | ||
| 123 | #ifdef CONFIG_COMPAT | 126 | #ifdef CONFIG_COMPAT |
| @@ -143,9 +146,59 @@ struct pt_regs { | |||
| 143 | #define fast_interrupts_enabled(regs) \ | 146 | #define fast_interrupts_enabled(regs) \ |
| 144 | (!((regs)->pstate & PSR_F_BIT)) | 147 | (!((regs)->pstate & PSR_F_BIT)) |
| 145 | 148 | ||
| 146 | #define user_stack_pointer(regs) \ | 149 | #define GET_USP(regs) \ |
| 147 | (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) | 150 | (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) |
| 148 | 151 | ||
| 152 | #define SET_USP(ptregs, value) \ | ||
| 153 | (!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value)) | ||
| 154 | |||
| 155 | extern int regs_query_register_offset(const char *name); | ||
| 156 | extern const char *regs_query_register_name(unsigned int offset); | ||
| 157 | extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, | ||
| 158 | unsigned int n); | ||
| 159 | |||
| 160 | /** | ||
| 161 | * regs_get_register() - get register value from its offset | ||
| 162 | * @regs: pt_regs from which register value is gotten | ||
| 163 | * @offset: offset of the register. | ||
| 164 | * | ||
| 165 | * regs_get_register returns the value of a register whose offset from @regs. | ||
| 166 | * The @offset is the offset of the register in struct pt_regs. | ||
| 167 | * If @offset is bigger than MAX_REG_OFFSET, this returns 0. | ||
| 168 | */ | ||
| 169 | static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) | ||
| 170 | { | ||
| 171 | u64 val = 0; | ||
| 172 | |||
| 173 | WARN_ON(offset & 7); | ||
| 174 | |||
| 175 | offset >>= 3; | ||
| 176 | switch (offset) { | ||
| 177 | case 0 ... 30: | ||
| 178 | val = regs->regs[offset]; | ||
| 179 | break; | ||
| 180 | case offsetof(struct pt_regs, sp) >> 3: | ||
| 181 | val = regs->sp; | ||
| 182 | break; | ||
| 183 | case offsetof(struct pt_regs, pc) >> 3: | ||
| 184 | val = regs->pc; | ||
| 185 | break; | ||
| 186 | case offsetof(struct pt_regs, pstate) >> 3: | ||
| 187 | val = regs->pstate; | ||
| 188 | break; | ||
| 189 | default: | ||
| 190 | val = 0; | ||
| 191 | } | ||
| 192 | |||
| 193 | return val; | ||
| 194 | } | ||
| 195 | |||
| 196 | /* Valid only for Kernel mode traps. */ | ||
| 197 | static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | ||
| 198 | { | ||
| 199 | return regs->sp; | ||
| 200 | } | ||
| 201 | |||
| 149 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 202 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
| 150 | { | 203 | { |
| 151 | return regs->regs[0]; | 204 | return regs->regs[0]; |
| @@ -155,8 +208,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) | |||
| 155 | struct task_struct; | 208 | struct task_struct; |
| 156 | int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); | 209 | int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task); |
| 157 | 210 | ||
| 158 | #define instruction_pointer(regs) ((unsigned long)(regs)->pc) | 211 | #define GET_IP(regs) ((unsigned long)(regs)->pc) |
| 212 | #define SET_IP(regs, value) ((regs)->pc = ((u64) (value))) | ||
| 213 | |||
| 214 | #define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29]) | ||
| 215 | #define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value))) | ||
| 216 | |||
| 217 | #include <asm-generic/ptrace.h> | ||
| 159 | 218 | ||
| 219 | #undef profile_pc | ||
| 160 | extern unsigned long profile_pc(struct pt_regs *regs); | 220 | extern unsigned long profile_pc(struct pt_regs *regs); |
| 161 | 221 | ||
| 162 | #endif /* __ASSEMBLY__ */ | 222 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index 9ef8ec681e30..0b9e49a92c47 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
| @@ -26,8 +26,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE | |||
| 26 | $(call if_changed,objcopy) | 26 | $(call if_changed,objcopy) |
| 27 | 27 | ||
| 28 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 28 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
| 29 | sys_compat.o entry32.o \ | 29 | sys_compat.o entry32.o |
| 30 | ../../arm/kernel/opcodes.o | ||
| 31 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o | 30 | arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o |
| 32 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o | 31 | arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o |
| 33 | arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o | 32 | arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o |
| @@ -49,7 +48,7 @@ arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o | |||
| 49 | arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ | 48 | arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \ |
| 50 | cpu-reset.o | 49 | cpu-reset.o |
| 51 | 50 | ||
| 52 | obj-y += $(arm64-obj-y) vdso/ | 51 | obj-y += $(arm64-obj-y) vdso/ probes/ |
| 53 | obj-m += $(arm64-obj-m) | 52 | obj-m += $(arm64-obj-m) |
| 54 | head-y := head.o | 53 | head-y := head.o |
| 55 | extra-y += $(head-y) vmlinux.lds | 54 | extra-y += $(head-y) vmlinux.lds |
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index 2dc44406a7ad..78f368039c79 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
| 28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
| 29 | #include <linux/arm-smccc.h> | 29 | #include <linux/arm-smccc.h> |
| 30 | #include <linux/kprobes.h> | ||
| 30 | 31 | ||
| 31 | #include <asm/checksum.h> | 32 | #include <asm/checksum.h> |
| 32 | 33 | ||
| @@ -68,6 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit); | |||
| 68 | 69 | ||
| 69 | #ifdef CONFIG_FUNCTION_TRACER | 70 | #ifdef CONFIG_FUNCTION_TRACER |
| 70 | EXPORT_SYMBOL(_mcount); | 71 | EXPORT_SYMBOL(_mcount); |
| 72 | NOKPROBE_SYMBOL(_mcount); | ||
| 71 | #endif | 73 | #endif |
| 72 | 74 | ||
| 73 | /* arm-smccc */ | 75 | /* arm-smccc */ |
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index fab5603f57ea..5f72475e2e3b 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
| @@ -344,6 +344,21 @@ static int emulate_swpX(unsigned int address, unsigned int *data, | |||
| 344 | return res; | 344 | return res; |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | #define ARM_OPCODE_CONDITION_UNCOND 0xf | ||
| 348 | |||
| 349 | static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr) | ||
| 350 | { | ||
| 351 | u32 cc_bits = opcode >> 28; | ||
| 352 | |||
| 353 | if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) { | ||
| 354 | if ((*aarch32_opcode_cond_checks[cc_bits])(psr)) | ||
| 355 | return ARM_OPCODE_CONDTEST_PASS; | ||
| 356 | else | ||
| 357 | return ARM_OPCODE_CONDTEST_FAIL; | ||
| 358 | } | ||
| 359 | return ARM_OPCODE_CONDTEST_UNCOND; | ||
| 360 | } | ||
| 361 | |||
| 347 | /* | 362 | /* |
| 348 | * swp_handler logs the id of calling process, dissects the instruction, sanity | 363 | * swp_handler logs the id of calling process, dissects the instruction, sanity |
| 349 | * checks the memory location, calls emulate_swpX for the actual operation and | 364 | * checks the memory location, calls emulate_swpX for the actual operation and |
| @@ -358,7 +373,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr) | |||
| 358 | 373 | ||
| 359 | type = instr & TYPE_SWPB; | 374 | type = instr & TYPE_SWPB; |
| 360 | 375 | ||
| 361 | switch (arm_check_condition(instr, regs->pstate)) { | 376 | switch (aarch32_check_condition(instr, regs->pstate)) { |
| 362 | case ARM_OPCODE_CONDTEST_PASS: | 377 | case ARM_OPCODE_CONDTEST_PASS: |
| 363 | break; | 378 | break; |
| 364 | case ARM_OPCODE_CONDTEST_FAIL: | 379 | case ARM_OPCODE_CONDTEST_FAIL: |
| @@ -440,7 +455,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr) | |||
| 440 | { | 455 | { |
| 441 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); | 456 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); |
| 442 | 457 | ||
| 443 | switch (arm_check_condition(instr, regs->pstate)) { | 458 | switch (aarch32_check_condition(instr, regs->pstate)) { |
| 444 | case ARM_OPCODE_CONDTEST_PASS: | 459 | case ARM_OPCODE_CONDTEST_PASS: |
| 445 | break; | 460 | break; |
| 446 | case ARM_OPCODE_CONDTEST_FAIL: | 461 | case ARM_OPCODE_CONDTEST_FAIL: |
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 334a1cd7a942..93ff5922debb 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c | |||
| @@ -51,6 +51,17 @@ int main(void) | |||
| 51 | DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); | 51 | DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); |
| 52 | DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); | 52 | DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); |
| 53 | DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); | 53 | DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); |
| 54 | DEFINE(S_X8, offsetof(struct pt_regs, regs[8])); | ||
| 55 | DEFINE(S_X10, offsetof(struct pt_regs, regs[10])); | ||
| 56 | DEFINE(S_X12, offsetof(struct pt_regs, regs[12])); | ||
| 57 | DEFINE(S_X14, offsetof(struct pt_regs, regs[14])); | ||
| 58 | DEFINE(S_X16, offsetof(struct pt_regs, regs[16])); | ||
| 59 | DEFINE(S_X18, offsetof(struct pt_regs, regs[18])); | ||
| 60 | DEFINE(S_X20, offsetof(struct pt_regs, regs[20])); | ||
| 61 | DEFINE(S_X22, offsetof(struct pt_regs, regs[22])); | ||
| 62 | DEFINE(S_X24, offsetof(struct pt_regs, regs[24])); | ||
| 63 | DEFINE(S_X26, offsetof(struct pt_regs, regs[26])); | ||
| 64 | DEFINE(S_X28, offsetof(struct pt_regs, regs[28])); | ||
| 54 | DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); | 65 | DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); |
| 55 | DEFINE(S_SP, offsetof(struct pt_regs, sp)); | 66 | DEFINE(S_SP, offsetof(struct pt_regs, sp)); |
| 56 | #ifdef CONFIG_COMPAT | 67 | #ifdef CONFIG_COMPAT |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index f17134d39e6b..91fff48d0f57 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/hardirq.h> | 23 | #include <linux/hardirq.h> |
| 24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
| 25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
| 26 | #include <linux/kprobes.h> | ||
| 26 | #include <linux/stat.h> | 27 | #include <linux/stat.h> |
| 27 | #include <linux/uaccess.h> | 28 | #include <linux/uaccess.h> |
| 28 | 29 | ||
| @@ -48,6 +49,7 @@ static void mdscr_write(u32 mdscr) | |||
| 48 | asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); | 49 | asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); |
| 49 | local_dbg_restore(flags); | 50 | local_dbg_restore(flags); |
| 50 | } | 51 | } |
| 52 | NOKPROBE_SYMBOL(mdscr_write); | ||
| 51 | 53 | ||
| 52 | static u32 mdscr_read(void) | 54 | static u32 mdscr_read(void) |
| 53 | { | 55 | { |
| @@ -55,6 +57,7 @@ static u32 mdscr_read(void) | |||
| 55 | asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); | 57 | asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); |
| 56 | return mdscr; | 58 | return mdscr; |
| 57 | } | 59 | } |
| 60 | NOKPROBE_SYMBOL(mdscr_read); | ||
| 58 | 61 | ||
| 59 | /* | 62 | /* |
| 60 | * Allow root to disable self-hosted debug from userspace. | 63 | * Allow root to disable self-hosted debug from userspace. |
| @@ -103,6 +106,7 @@ void enable_debug_monitors(enum dbg_active_el el) | |||
| 103 | mdscr_write(mdscr); | 106 | mdscr_write(mdscr); |
| 104 | } | 107 | } |
| 105 | } | 108 | } |
| 109 | NOKPROBE_SYMBOL(enable_debug_monitors); | ||
| 106 | 110 | ||
| 107 | void disable_debug_monitors(enum dbg_active_el el) | 111 | void disable_debug_monitors(enum dbg_active_el el) |
| 108 | { | 112 | { |
| @@ -123,6 +127,7 @@ void disable_debug_monitors(enum dbg_active_el el) | |||
| 123 | mdscr_write(mdscr); | 127 | mdscr_write(mdscr); |
| 124 | } | 128 | } |
| 125 | } | 129 | } |
| 130 | NOKPROBE_SYMBOL(disable_debug_monitors); | ||
| 126 | 131 | ||
| 127 | /* | 132 | /* |
| 128 | * OS lock clearing. | 133 | * OS lock clearing. |
| @@ -167,11 +172,13 @@ static void set_regs_spsr_ss(struct pt_regs *regs) | |||
| 167 | { | 172 | { |
| 168 | regs->pstate |= DBG_SPSR_SS; | 173 | regs->pstate |= DBG_SPSR_SS; |
| 169 | } | 174 | } |
| 175 | NOKPROBE_SYMBOL(set_regs_spsr_ss); | ||
| 170 | 176 | ||
| 171 | static void clear_regs_spsr_ss(struct pt_regs *regs) | 177 | static void clear_regs_spsr_ss(struct pt_regs *regs) |
| 172 | { | 178 | { |
| 173 | regs->pstate &= ~DBG_SPSR_SS; | 179 | regs->pstate &= ~DBG_SPSR_SS; |
| 174 | } | 180 | } |
| 181 | NOKPROBE_SYMBOL(clear_regs_spsr_ss); | ||
| 175 | 182 | ||
| 176 | /* EL1 Single Step Handler hooks */ | 183 | /* EL1 Single Step Handler hooks */ |
| 177 | static LIST_HEAD(step_hook); | 184 | static LIST_HEAD(step_hook); |
| @@ -215,6 +222,7 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr) | |||
| 215 | 222 | ||
| 216 | return retval; | 223 | return retval; |
| 217 | } | 224 | } |
| 225 | NOKPROBE_SYMBOL(call_step_hook); | ||
| 218 | 226 | ||
| 219 | static void send_user_sigtrap(int si_code) | 227 | static void send_user_sigtrap(int si_code) |
| 220 | { | 228 | { |
| @@ -256,6 +264,10 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
| 256 | */ | 264 | */ |
| 257 | user_rewind_single_step(current); | 265 | user_rewind_single_step(current); |
| 258 | } else { | 266 | } else { |
| 267 | #ifdef CONFIG_KPROBES | ||
| 268 | if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED) | ||
| 269 | return 0; | ||
| 270 | #endif | ||
| 259 | if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) | 271 | if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED) |
| 260 | return 0; | 272 | return 0; |
| 261 | 273 | ||
| @@ -269,6 +281,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr, | |||
| 269 | 281 | ||
| 270 | return 0; | 282 | return 0; |
| 271 | } | 283 | } |
| 284 | NOKPROBE_SYMBOL(single_step_handler); | ||
| 272 | 285 | ||
| 273 | /* | 286 | /* |
| 274 | * Breakpoint handler is re-entrant as another breakpoint can | 287 | * Breakpoint handler is re-entrant as another breakpoint can |
| @@ -306,19 +319,28 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) | |||
| 306 | 319 | ||
| 307 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; | 320 | return fn ? fn(regs, esr) : DBG_HOOK_ERROR; |
| 308 | } | 321 | } |
| 322 | NOKPROBE_SYMBOL(call_break_hook); | ||
| 309 | 323 | ||
| 310 | static int brk_handler(unsigned long addr, unsigned int esr, | 324 | static int brk_handler(unsigned long addr, unsigned int esr, |
| 311 | struct pt_regs *regs) | 325 | struct pt_regs *regs) |
| 312 | { | 326 | { |
| 313 | if (user_mode(regs)) { | 327 | if (user_mode(regs)) { |
| 314 | send_user_sigtrap(TRAP_BRKPT); | 328 | send_user_sigtrap(TRAP_BRKPT); |
| 315 | } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { | 329 | } |
| 316 | pr_warning("Unexpected kernel BRK exception at EL1\n"); | 330 | #ifdef CONFIG_KPROBES |
| 331 | else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) { | ||
| 332 | if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED) | ||
| 333 | return -EFAULT; | ||
| 334 | } | ||
| 335 | #endif | ||
| 336 | else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { | ||
| 337 | pr_warn("Unexpected kernel BRK exception at EL1\n"); | ||
| 317 | return -EFAULT; | 338 | return -EFAULT; |
| 318 | } | 339 | } |
| 319 | 340 | ||
| 320 | return 0; | 341 | return 0; |
| 321 | } | 342 | } |
| 343 | NOKPROBE_SYMBOL(brk_handler); | ||
| 322 | 344 | ||
| 323 | int aarch32_break_handler(struct pt_regs *regs) | 345 | int aarch32_break_handler(struct pt_regs *regs) |
| 324 | { | 346 | { |
| @@ -355,6 +377,7 @@ int aarch32_break_handler(struct pt_regs *regs) | |||
| 355 | send_user_sigtrap(TRAP_BRKPT); | 377 | send_user_sigtrap(TRAP_BRKPT); |
| 356 | return 0; | 378 | return 0; |
| 357 | } | 379 | } |
| 380 | NOKPROBE_SYMBOL(aarch32_break_handler); | ||
| 358 | 381 | ||
| 359 | static int __init debug_traps_init(void) | 382 | static int __init debug_traps_init(void) |
| 360 | { | 383 | { |
| @@ -376,6 +399,7 @@ void user_rewind_single_step(struct task_struct *task) | |||
| 376 | if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) | 399 | if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) |
| 377 | set_regs_spsr_ss(task_pt_regs(task)); | 400 | set_regs_spsr_ss(task_pt_regs(task)); |
| 378 | } | 401 | } |
| 402 | NOKPROBE_SYMBOL(user_rewind_single_step); | ||
| 379 | 403 | ||
| 380 | void user_fastforward_single_step(struct task_struct *task) | 404 | void user_fastforward_single_step(struct task_struct *task) |
| 381 | { | 405 | { |
| @@ -391,6 +415,7 @@ void kernel_enable_single_step(struct pt_regs *regs) | |||
| 391 | mdscr_write(mdscr_read() | DBG_MDSCR_SS); | 415 | mdscr_write(mdscr_read() | DBG_MDSCR_SS); |
| 392 | enable_debug_monitors(DBG_ACTIVE_EL1); | 416 | enable_debug_monitors(DBG_ACTIVE_EL1); |
| 393 | } | 417 | } |
| 418 | NOKPROBE_SYMBOL(kernel_enable_single_step); | ||
| 394 | 419 | ||
| 395 | void kernel_disable_single_step(void) | 420 | void kernel_disable_single_step(void) |
| 396 | { | 421 | { |
| @@ -398,12 +423,14 @@ void kernel_disable_single_step(void) | |||
| 398 | mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); | 423 | mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); |
| 399 | disable_debug_monitors(DBG_ACTIVE_EL1); | 424 | disable_debug_monitors(DBG_ACTIVE_EL1); |
| 400 | } | 425 | } |
| 426 | NOKPROBE_SYMBOL(kernel_disable_single_step); | ||
| 401 | 427 | ||
| 402 | int kernel_active_single_step(void) | 428 | int kernel_active_single_step(void) |
| 403 | { | 429 | { |
| 404 | WARN_ON(!irqs_disabled()); | 430 | WARN_ON(!irqs_disabled()); |
| 405 | return mdscr_read() & DBG_MDSCR_SS; | 431 | return mdscr_read() & DBG_MDSCR_SS; |
| 406 | } | 432 | } |
| 433 | NOKPROBE_SYMBOL(kernel_active_single_step); | ||
| 407 | 434 | ||
| 408 | /* ptrace API */ | 435 | /* ptrace API */ |
| 409 | void user_enable_single_step(struct task_struct *task) | 436 | void user_enable_single_step(struct task_struct *task) |
| @@ -411,8 +438,10 @@ void user_enable_single_step(struct task_struct *task) | |||
| 411 | set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); | 438 | set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); |
| 412 | set_regs_spsr_ss(task_pt_regs(task)); | 439 | set_regs_spsr_ss(task_pt_regs(task)); |
| 413 | } | 440 | } |
| 441 | NOKPROBE_SYMBOL(user_enable_single_step); | ||
| 414 | 442 | ||
| 415 | void user_disable_single_step(struct task_struct *task) | 443 | void user_disable_single_step(struct task_struct *task) |
| 416 | { | 444 | { |
| 417 | clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); | 445 | clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); |
| 418 | } | 446 | } |
| 447 | NOKPROBE_SYMBOL(user_disable_single_step); | ||
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 3eca5d34f7a6..a03eb49cfeeb 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
| @@ -242,6 +242,7 @@ tsk .req x28 // current thread_info | |||
| 242 | /* | 242 | /* |
| 243 | * Exception vectors. | 243 | * Exception vectors. |
| 244 | */ | 244 | */ |
| 245 | .pushsection ".entry.text", "ax" | ||
| 245 | 246 | ||
| 246 | .align 11 | 247 | .align 11 |
| 247 | ENTRY(vectors) | 248 | ENTRY(vectors) |
| @@ -784,6 +785,8 @@ __ni_sys_trace: | |||
| 784 | bl do_ni_syscall | 785 | bl do_ni_syscall |
| 785 | b __sys_trace_return | 786 | b __sys_trace_return |
| 786 | 787 | ||
| 788 | .popsection // .entry.text | ||
| 789 | |||
| 787 | /* | 790 | /* |
| 788 | * Special system call wrappers. | 791 | * Special system call wrappers. |
| 789 | */ | 792 | */ |
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index ce21aa88263f..26a6bf77d272 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/cpu_pm.h> | 24 | #include <linux/cpu_pm.h> |
| 25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
| 26 | #include <linux/hw_breakpoint.h> | 26 | #include <linux/hw_breakpoint.h> |
| 27 | #include <linux/kprobes.h> | ||
| 27 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
| 28 | #include <linux/ptrace.h> | 29 | #include <linux/ptrace.h> |
| 29 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
| @@ -127,6 +128,7 @@ static u64 read_wb_reg(int reg, int n) | |||
| 127 | 128 | ||
| 128 | return val; | 129 | return val; |
| 129 | } | 130 | } |
| 131 | NOKPROBE_SYMBOL(read_wb_reg); | ||
| 130 | 132 | ||
| 131 | static void write_wb_reg(int reg, int n, u64 val) | 133 | static void write_wb_reg(int reg, int n, u64 val) |
| 132 | { | 134 | { |
| @@ -140,6 +142,7 @@ static void write_wb_reg(int reg, int n, u64 val) | |||
| 140 | } | 142 | } |
| 141 | isb(); | 143 | isb(); |
| 142 | } | 144 | } |
| 145 | NOKPROBE_SYMBOL(write_wb_reg); | ||
| 143 | 146 | ||
| 144 | /* | 147 | /* |
| 145 | * Convert a breakpoint privilege level to the corresponding exception | 148 | * Convert a breakpoint privilege level to the corresponding exception |
| @@ -157,6 +160,7 @@ static enum dbg_active_el debug_exception_level(int privilege) | |||
| 157 | return -EINVAL; | 160 | return -EINVAL; |
| 158 | } | 161 | } |
| 159 | } | 162 | } |
| 163 | NOKPROBE_SYMBOL(debug_exception_level); | ||
| 160 | 164 | ||
| 161 | enum hw_breakpoint_ops { | 165 | enum hw_breakpoint_ops { |
| 162 | HW_BREAKPOINT_INSTALL, | 166 | HW_BREAKPOINT_INSTALL, |
| @@ -575,6 +579,7 @@ static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable) | |||
| 575 | write_wb_reg(reg, i, ctrl); | 579 | write_wb_reg(reg, i, ctrl); |
| 576 | } | 580 | } |
| 577 | } | 581 | } |
| 582 | NOKPROBE_SYMBOL(toggle_bp_registers); | ||
| 578 | 583 | ||
| 579 | /* | 584 | /* |
| 580 | * Debug exception handlers. | 585 | * Debug exception handlers. |
| @@ -654,6 +659,7 @@ unlock: | |||
| 654 | 659 | ||
| 655 | return 0; | 660 | return 0; |
| 656 | } | 661 | } |
| 662 | NOKPROBE_SYMBOL(breakpoint_handler); | ||
| 657 | 663 | ||
| 658 | static int watchpoint_handler(unsigned long addr, unsigned int esr, | 664 | static int watchpoint_handler(unsigned long addr, unsigned int esr, |
| 659 | struct pt_regs *regs) | 665 | struct pt_regs *regs) |
| @@ -756,6 +762,7 @@ unlock: | |||
| 756 | 762 | ||
| 757 | return 0; | 763 | return 0; |
| 758 | } | 764 | } |
| 765 | NOKPROBE_SYMBOL(watchpoint_handler); | ||
| 759 | 766 | ||
| 760 | /* | 767 | /* |
| 761 | * Handle single-step exception. | 768 | * Handle single-step exception. |
| @@ -813,6 +820,7 @@ int reinstall_suspended_bps(struct pt_regs *regs) | |||
| 813 | 820 | ||
| 814 | return !handled_exception; | 821 | return !handled_exception; |
| 815 | } | 822 | } |
| 823 | NOKPROBE_SYMBOL(reinstall_suspended_bps); | ||
| 816 | 824 | ||
| 817 | /* | 825 | /* |
| 818 | * Context-switcher for restoring suspended breakpoints. | 826 | * Context-switcher for restoring suspended breakpoints. |
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 368c08290dd8..63f9432d05e8 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
| 31 | #include <asm/debug-monitors.h> | 31 | #include <asm/debug-monitors.h> |
| 32 | #include <asm/fixmap.h> | 32 | #include <asm/fixmap.h> |
| 33 | #include <asm/opcodes.h> | ||
| 33 | #include <asm/insn.h> | 34 | #include <asm/insn.h> |
| 34 | 35 | ||
| 35 | #define AARCH64_INSN_SF_BIT BIT(31) | 36 | #define AARCH64_INSN_SF_BIT BIT(31) |
| @@ -162,6 +163,32 @@ static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) | |||
| 162 | aarch64_insn_is_nop(insn); | 163 | aarch64_insn_is_nop(insn); |
| 163 | } | 164 | } |
| 164 | 165 | ||
| 166 | bool __kprobes aarch64_insn_uses_literal(u32 insn) | ||
| 167 | { | ||
| 168 | /* ldr/ldrsw (literal), prfm */ | ||
| 169 | |||
| 170 | return aarch64_insn_is_ldr_lit(insn) || | ||
| 171 | aarch64_insn_is_ldrsw_lit(insn) || | ||
| 172 | aarch64_insn_is_adr_adrp(insn) || | ||
| 173 | aarch64_insn_is_prfm_lit(insn); | ||
| 174 | } | ||
| 175 | |||
| 176 | bool __kprobes aarch64_insn_is_branch(u32 insn) | ||
| 177 | { | ||
| 178 | /* b, bl, cb*, tb*, b.cond, br, blr */ | ||
| 179 | |||
| 180 | return aarch64_insn_is_b(insn) || | ||
| 181 | aarch64_insn_is_bl(insn) || | ||
| 182 | aarch64_insn_is_cbz(insn) || | ||
| 183 | aarch64_insn_is_cbnz(insn) || | ||
| 184 | aarch64_insn_is_tbz(insn) || | ||
| 185 | aarch64_insn_is_tbnz(insn) || | ||
| 186 | aarch64_insn_is_ret(insn) || | ||
| 187 | aarch64_insn_is_br(insn) || | ||
| 188 | aarch64_insn_is_blr(insn) || | ||
| 189 | aarch64_insn_is_bcond(insn); | ||
| 190 | } | ||
| 191 | |||
| 165 | /* | 192 | /* |
| 166 | * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a | 193 | * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a |
| 167 | * Section B2.6.5 "Concurrent modification and execution of instructions": | 194 | * Section B2.6.5 "Concurrent modification and execution of instructions": |
| @@ -1175,6 +1202,14 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset) | |||
| 1175 | BUG(); | 1202 | BUG(); |
| 1176 | } | 1203 | } |
| 1177 | 1204 | ||
| 1205 | /* | ||
| 1206 | * Extract the Op/CR data from a msr/mrs instruction. | ||
| 1207 | */ | ||
| 1208 | u32 aarch64_insn_extract_system_reg(u32 insn) | ||
| 1209 | { | ||
| 1210 | return (insn & 0x1FFFE0) >> 5; | ||
| 1211 | } | ||
| 1212 | |||
| 1178 | bool aarch32_insn_is_wide(u32 insn) | 1213 | bool aarch32_insn_is_wide(u32 insn) |
| 1179 | { | 1214 | { |
| 1180 | return insn >= 0xe800; | 1215 | return insn >= 0xe800; |
| @@ -1200,3 +1235,101 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn) | |||
| 1200 | { | 1235 | { |
| 1201 | return insn & CRM_MASK; | 1236 | return insn & CRM_MASK; |
| 1202 | } | 1237 | } |
| 1238 | |||
| 1239 | static bool __kprobes __check_eq(unsigned long pstate) | ||
| 1240 | { | ||
| 1241 | return (pstate & PSR_Z_BIT) != 0; | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | static bool __kprobes __check_ne(unsigned long pstate) | ||
| 1245 | { | ||
| 1246 | return (pstate & PSR_Z_BIT) == 0; | ||
| 1247 | } | ||
| 1248 | |||
| 1249 | static bool __kprobes __check_cs(unsigned long pstate) | ||
| 1250 | { | ||
| 1251 | return (pstate & PSR_C_BIT) != 0; | ||
| 1252 | } | ||
| 1253 | |||
| 1254 | static bool __kprobes __check_cc(unsigned long pstate) | ||
| 1255 | { | ||
| 1256 | return (pstate & PSR_C_BIT) == 0; | ||
| 1257 | } | ||
| 1258 | |||
| 1259 | static bool __kprobes __check_mi(unsigned long pstate) | ||
| 1260 | { | ||
| 1261 | return (pstate & PSR_N_BIT) != 0; | ||
| 1262 | } | ||
| 1263 | |||
| 1264 | static bool __kprobes __check_pl(unsigned long pstate) | ||
| 1265 | { | ||
| 1266 | return (pstate & PSR_N_BIT) == 0; | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | static bool __kprobes __check_vs(unsigned long pstate) | ||
| 1270 | { | ||
| 1271 | return (pstate & PSR_V_BIT) != 0; | ||
| 1272 | } | ||
| 1273 | |||
| 1274 | static bool __kprobes __check_vc(unsigned long pstate) | ||
| 1275 | { | ||
| 1276 | return (pstate & PSR_V_BIT) == 0; | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | static bool __kprobes __check_hi(unsigned long pstate) | ||
| 1280 | { | ||
| 1281 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
| 1282 | return (pstate & PSR_C_BIT) != 0; | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | static bool __kprobes __check_ls(unsigned long pstate) | ||
| 1286 | { | ||
| 1287 | pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
| 1288 | return (pstate & PSR_C_BIT) == 0; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | static bool __kprobes __check_ge(unsigned long pstate) | ||
| 1292 | { | ||
| 1293 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
| 1294 | return (pstate & PSR_N_BIT) == 0; | ||
| 1295 | } | ||
| 1296 | |||
| 1297 | static bool __kprobes __check_lt(unsigned long pstate) | ||
| 1298 | { | ||
| 1299 | pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
| 1300 | return (pstate & PSR_N_BIT) != 0; | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | static bool __kprobes __check_gt(unsigned long pstate) | ||
| 1304 | { | ||
| 1305 | /*PSR_N_BIT ^= PSR_V_BIT */ | ||
| 1306 | unsigned long temp = pstate ^ (pstate << 3); | ||
| 1307 | |||
| 1308 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ | ||
| 1309 | return (temp & PSR_N_BIT) == 0; | ||
| 1310 | } | ||
| 1311 | |||
| 1312 | static bool __kprobes __check_le(unsigned long pstate) | ||
| 1313 | { | ||
| 1314 | /*PSR_N_BIT ^= PSR_V_BIT */ | ||
| 1315 | unsigned long temp = pstate ^ (pstate << 3); | ||
| 1316 | |||
| 1317 | temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */ | ||
| 1318 | return (temp & PSR_N_BIT) != 0; | ||
| 1319 | } | ||
| 1320 | |||
| 1321 | static bool __kprobes __check_al(unsigned long pstate) | ||
| 1322 | { | ||
| 1323 | return true; | ||
| 1324 | } | ||
| 1325 | |||
| 1326 | /* | ||
| 1327 | * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that | ||
| 1328 | * it behaves identically to 0b1110 ("al"). | ||
| 1329 | */ | ||
| 1330 | pstate_check_t * const aarch32_opcode_cond_checks[16] = { | ||
| 1331 | __check_eq, __check_ne, __check_cs, __check_cc, | ||
| 1332 | __check_mi, __check_pl, __check_vs, __check_vc, | ||
| 1333 | __check_hi, __check_ls, __check_ge, __check_lt, | ||
| 1334 | __check_gt, __check_le, __check_al, __check_al | ||
| 1335 | }; | ||
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c index b5f063e5eff7..8c57f6496e56 100644 --- a/arch/arm64/kernel/kgdb.c +++ b/arch/arm64/kernel/kgdb.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
| 23 | #include <linux/kdebug.h> | 23 | #include <linux/kdebug.h> |
| 24 | #include <linux/kgdb.h> | 24 | #include <linux/kgdb.h> |
| 25 | #include <linux/kprobes.h> | ||
| 25 | #include <asm/traps.h> | 26 | #include <asm/traps.h> |
| 26 | 27 | ||
| 27 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { | 28 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { |
| @@ -230,6 +231,7 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr) | |||
| 230 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 231 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
| 231 | return 0; | 232 | return 0; |
| 232 | } | 233 | } |
| 234 | NOKPROBE_SYMBOL(kgdb_brk_fn) | ||
| 233 | 235 | ||
| 234 | static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) | 236 | static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) |
| 235 | { | 237 | { |
| @@ -238,12 +240,14 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr) | |||
| 238 | 240 | ||
| 239 | return 0; | 241 | return 0; |
| 240 | } | 242 | } |
| 243 | NOKPROBE_SYMBOL(kgdb_compiled_brk_fn); | ||
| 241 | 244 | ||
| 242 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) | 245 | static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr) |
| 243 | { | 246 | { |
| 244 | kgdb_handle_exception(1, SIGTRAP, 0, regs); | 247 | kgdb_handle_exception(1, SIGTRAP, 0, regs); |
| 245 | return 0; | 248 | return 0; |
| 246 | } | 249 | } |
| 250 | NOKPROBE_SYMBOL(kgdb_step_brk_fn); | ||
| 247 | 251 | ||
| 248 | static struct break_hook kgdb_brkpt_hook = { | 252 | static struct break_hook kgdb_brkpt_hook = { |
| 249 | .esr_mask = 0xffffffff, | 253 | .esr_mask = 0xffffffff, |
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile new file mode 100644 index 000000000000..ce06312e3d34 --- /dev/null +++ b/arch/arm64/kernel/probes/Makefile | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \ | ||
| 2 | kprobes_trampoline.o \ | ||
| 3 | simulate-insn.o | ||
diff --git a/arch/arm64/kernel/probes/decode-insn.c b/arch/arm64/kernel/probes/decode-insn.c new file mode 100644 index 000000000000..37e47a9d617e --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.c | |||
| @@ -0,0 +1,174 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/kernel/probes/decode-insn.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/kprobes.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <asm/kprobes.h> | ||
| 20 | #include <asm/insn.h> | ||
| 21 | #include <asm/sections.h> | ||
| 22 | |||
| 23 | #include "decode-insn.h" | ||
| 24 | #include "simulate-insn.h" | ||
| 25 | |||
| 26 | static bool __kprobes aarch64_insn_is_steppable(u32 insn) | ||
| 27 | { | ||
| 28 | /* | ||
| 29 | * Branch instructions will write a new value into the PC which is | ||
| 30 | * likely to be relative to the XOL address and therefore invalid. | ||
| 31 | * Deliberate generation of an exception during stepping is also not | ||
| 32 | * currently safe. Lastly, MSR instructions can do any number of nasty | ||
| 33 | * things we can't handle during single-stepping. | ||
| 34 | */ | ||
| 35 | if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) { | ||
| 36 | if (aarch64_insn_is_branch(insn) || | ||
| 37 | aarch64_insn_is_msr_imm(insn) || | ||
| 38 | aarch64_insn_is_msr_reg(insn) || | ||
| 39 | aarch64_insn_is_exception(insn) || | ||
| 40 | aarch64_insn_is_eret(insn)) | ||
| 41 | return false; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * The MRS instruction may not return a correct value when | ||
| 45 | * executing in the single-stepping environment. We do make one | ||
| 46 | * exception, for reading the DAIF bits. | ||
| 47 | */ | ||
| 48 | if (aarch64_insn_is_mrs(insn)) | ||
| 49 | return aarch64_insn_extract_system_reg(insn) | ||
| 50 | != AARCH64_INSN_SPCLREG_DAIF; | ||
| 51 | |||
| 52 | /* | ||
| 53 | * The HINT instruction is is problematic when single-stepping, | ||
| 54 | * except for the NOP case. | ||
| 55 | */ | ||
| 56 | if (aarch64_insn_is_hint(insn)) | ||
| 57 | return aarch64_insn_is_nop(insn); | ||
| 58 | |||
| 59 | return true; | ||
| 60 | } | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Instructions which load PC relative literals are not going to work | ||
| 64 | * when executed from an XOL slot. Instructions doing an exclusive | ||
| 65 | * load/store are not going to complete successfully when single-step | ||
| 66 | * exception handling happens in the middle of the sequence. | ||
| 67 | */ | ||
| 68 | if (aarch64_insn_uses_literal(insn) || | ||
| 69 | aarch64_insn_is_exclusive(insn)) | ||
| 70 | return false; | ||
| 71 | |||
| 72 | return true; | ||
| 73 | } | ||
| 74 | |||
| 75 | /* Return: | ||
| 76 | * INSN_REJECTED If instruction is one not allowed to kprobe, | ||
| 77 | * INSN_GOOD If instruction is supported and uses instruction slot, | ||
| 78 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. | ||
| 79 | */ | ||
| 80 | static enum kprobe_insn __kprobes | ||
| 81 | arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
| 82 | { | ||
| 83 | /* | ||
| 84 | * Instructions reading or modifying the PC won't work from the XOL | ||
| 85 | * slot. | ||
| 86 | */ | ||
| 87 | if (aarch64_insn_is_steppable(insn)) | ||
| 88 | return INSN_GOOD; | ||
| 89 | |||
| 90 | if (aarch64_insn_is_bcond(insn)) { | ||
| 91 | asi->handler = simulate_b_cond; | ||
| 92 | } else if (aarch64_insn_is_cbz(insn) || | ||
| 93 | aarch64_insn_is_cbnz(insn)) { | ||
| 94 | asi->handler = simulate_cbz_cbnz; | ||
| 95 | } else if (aarch64_insn_is_tbz(insn) || | ||
| 96 | aarch64_insn_is_tbnz(insn)) { | ||
| 97 | asi->handler = simulate_tbz_tbnz; | ||
| 98 | } else if (aarch64_insn_is_adr_adrp(insn)) { | ||
| 99 | asi->handler = simulate_adr_adrp; | ||
| 100 | } else if (aarch64_insn_is_b(insn) || | ||
| 101 | aarch64_insn_is_bl(insn)) { | ||
| 102 | asi->handler = simulate_b_bl; | ||
| 103 | } else if (aarch64_insn_is_br(insn) || | ||
| 104 | aarch64_insn_is_blr(insn) || | ||
| 105 | aarch64_insn_is_ret(insn)) { | ||
| 106 | asi->handler = simulate_br_blr_ret; | ||
| 107 | } else if (aarch64_insn_is_ldr_lit(insn)) { | ||
| 108 | asi->handler = simulate_ldr_literal; | ||
| 109 | } else if (aarch64_insn_is_ldrsw_lit(insn)) { | ||
| 110 | asi->handler = simulate_ldrsw_literal; | ||
| 111 | } else { | ||
| 112 | /* | ||
| 113 | * Instruction cannot be stepped out-of-line and we don't | ||
| 114 | * (yet) simulate it. | ||
| 115 | */ | ||
| 116 | return INSN_REJECTED; | ||
| 117 | } | ||
| 118 | |||
| 119 | return INSN_GOOD_NO_SLOT; | ||
| 120 | } | ||
| 121 | |||
| 122 | static bool __kprobes | ||
| 123 | is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end) | ||
| 124 | { | ||
| 125 | while (scan_start > scan_end) { | ||
| 126 | /* | ||
| 127 | * atomic region starts from exclusive load and ends with | ||
| 128 | * exclusive store. | ||
| 129 | */ | ||
| 130 | if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start))) | ||
| 131 | return false; | ||
| 132 | else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start))) | ||
| 133 | return true; | ||
| 134 | scan_start--; | ||
| 135 | } | ||
| 136 | |||
| 137 | return false; | ||
| 138 | } | ||
| 139 | |||
| 140 | enum kprobe_insn __kprobes | ||
| 141 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi) | ||
| 142 | { | ||
| 143 | enum kprobe_insn decoded; | ||
| 144 | kprobe_opcode_t insn = le32_to_cpu(*addr); | ||
| 145 | kprobe_opcode_t *scan_start = addr - 1; | ||
| 146 | kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE; | ||
| 147 | #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) | ||
| 148 | struct module *mod; | ||
| 149 | #endif | ||
| 150 | |||
| 151 | if (addr >= (kprobe_opcode_t *)_text && | ||
| 152 | scan_end < (kprobe_opcode_t *)_text) | ||
| 153 | scan_end = (kprobe_opcode_t *)_text; | ||
| 154 | #if defined(CONFIG_MODULES) && defined(MODULES_VADDR) | ||
| 155 | else { | ||
| 156 | preempt_disable(); | ||
| 157 | mod = __module_address((unsigned long)addr); | ||
| 158 | if (mod && within_module_init((unsigned long)addr, mod) && | ||
| 159 | !within_module_init((unsigned long)scan_end, mod)) | ||
| 160 | scan_end = (kprobe_opcode_t *)mod->init_layout.base; | ||
| 161 | else if (mod && within_module_core((unsigned long)addr, mod) && | ||
| 162 | !within_module_core((unsigned long)scan_end, mod)) | ||
| 163 | scan_end = (kprobe_opcode_t *)mod->core_layout.base; | ||
| 164 | preempt_enable(); | ||
| 165 | } | ||
| 166 | #endif | ||
| 167 | decoded = arm_probe_decode_insn(insn, asi); | ||
| 168 | |||
| 169 | if (decoded == INSN_REJECTED || | ||
| 170 | is_probed_address_atomic(scan_start, scan_end)) | ||
| 171 | return INSN_REJECTED; | ||
| 172 | |||
| 173 | return decoded; | ||
| 174 | } | ||
diff --git a/arch/arm64/kernel/probes/decode-insn.h b/arch/arm64/kernel/probes/decode-insn.h new file mode 100644 index 000000000000..d438289646a6 --- /dev/null +++ b/arch/arm64/kernel/probes/decode-insn.h | |||
| @@ -0,0 +1,35 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/kernel/probes/decode-insn.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef _ARM_KERNEL_KPROBES_ARM64_H | ||
| 17 | #define _ARM_KERNEL_KPROBES_ARM64_H | ||
| 18 | |||
| 19 | /* | ||
| 20 | * ARM strongly recommends a limit of 128 bytes between LoadExcl and | ||
| 21 | * StoreExcl instructions in a single thread of execution. So keep the | ||
| 22 | * max atomic context size as 32. | ||
| 23 | */ | ||
| 24 | #define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t)) | ||
| 25 | |||
| 26 | enum kprobe_insn { | ||
| 27 | INSN_REJECTED, | ||
| 28 | INSN_GOOD_NO_SLOT, | ||
| 29 | INSN_GOOD, | ||
| 30 | }; | ||
| 31 | |||
| 32 | enum kprobe_insn __kprobes | ||
| 33 | arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi); | ||
| 34 | |||
| 35 | #endif /* _ARM_KERNEL_KPROBES_ARM64_H */ | ||
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c new file mode 100644 index 000000000000..bf9768588288 --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes.c | |||
| @@ -0,0 +1,686 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/kernel/probes/kprobes.c | ||
| 3 | * | ||
| 4 | * Kprobes support for ARM64 | ||
| 5 | * | ||
| 6 | * Copyright (C) 2013 Linaro Limited. | ||
| 7 | * Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 16 | * General Public License for more details. | ||
| 17 | * | ||
| 18 | */ | ||
| 19 | #include <linux/kasan.h> | ||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/kprobes.h> | ||
| 22 | #include <linux/module.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | #include <linux/stop_machine.h> | ||
| 25 | #include <linux/stringify.h> | ||
| 26 | #include <asm/traps.h> | ||
| 27 | #include <asm/ptrace.h> | ||
| 28 | #include <asm/cacheflush.h> | ||
| 29 | #include <asm/debug-monitors.h> | ||
| 30 | #include <asm/system_misc.h> | ||
| 31 | #include <asm/insn.h> | ||
| 32 | #include <asm/uaccess.h> | ||
| 33 | #include <asm/irq.h> | ||
| 34 | #include <asm-generic/sections.h> | ||
| 35 | |||
| 36 | #include "decode-insn.h" | ||
| 37 | |||
| 38 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | ||
| 39 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
| 40 | |||
| 41 | static void __kprobes | ||
| 42 | post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *); | ||
| 43 | |||
| 44 | static inline unsigned long min_stack_size(unsigned long addr) | ||
| 45 | { | ||
| 46 | unsigned long size; | ||
| 47 | |||
| 48 | if (on_irq_stack(addr, raw_smp_processor_id())) | ||
| 49 | size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr; | ||
| 50 | else | ||
| 51 | size = (unsigned long)current_thread_info() + THREAD_START_SP - addr; | ||
| 52 | |||
| 53 | return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack)); | ||
| 54 | } | ||
| 55 | |||
| 56 | static void __kprobes arch_prepare_ss_slot(struct kprobe *p) | ||
| 57 | { | ||
| 58 | /* prepare insn slot */ | ||
| 59 | p->ainsn.insn[0] = cpu_to_le32(p->opcode); | ||
| 60 | |||
| 61 | flush_icache_range((uintptr_t) (p->ainsn.insn), | ||
| 62 | (uintptr_t) (p->ainsn.insn) + | ||
| 63 | MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Needs restoring of return address after stepping xol. | ||
| 67 | */ | ||
| 68 | p->ainsn.restore = (unsigned long) p->addr + | ||
| 69 | sizeof(kprobe_opcode_t); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void __kprobes arch_prepare_simulate(struct kprobe *p) | ||
| 73 | { | ||
| 74 | /* This instructions is not executed xol. No need to adjust the PC */ | ||
| 75 | p->ainsn.restore = 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs) | ||
| 79 | { | ||
| 80 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 81 | |||
| 82 | if (p->ainsn.handler) | ||
| 83 | p->ainsn.handler((u32)p->opcode, (long)p->addr, regs); | ||
| 84 | |||
| 85 | /* single step simulated, now go for post processing */ | ||
| 86 | post_kprobe_handler(kcb, regs); | ||
| 87 | } | ||
| 88 | |||
| 89 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | ||
| 90 | { | ||
| 91 | unsigned long probe_addr = (unsigned long)p->addr; | ||
| 92 | extern char __start_rodata[]; | ||
| 93 | extern char __end_rodata[]; | ||
| 94 | |||
| 95 | if (probe_addr & 0x3) | ||
| 96 | return -EINVAL; | ||
| 97 | |||
| 98 | /* copy instruction */ | ||
| 99 | p->opcode = le32_to_cpu(*p->addr); | ||
| 100 | |||
| 101 | if (in_exception_text(probe_addr)) | ||
| 102 | return -EINVAL; | ||
| 103 | if (probe_addr >= (unsigned long) __start_rodata && | ||
| 104 | probe_addr <= (unsigned long) __end_rodata) | ||
| 105 | return -EINVAL; | ||
| 106 | |||
| 107 | /* decode instruction */ | ||
| 108 | switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) { | ||
| 109 | case INSN_REJECTED: /* insn not supported */ | ||
| 110 | return -EINVAL; | ||
| 111 | |||
| 112 | case INSN_GOOD_NO_SLOT: /* insn need simulation */ | ||
| 113 | p->ainsn.insn = NULL; | ||
| 114 | break; | ||
| 115 | |||
| 116 | case INSN_GOOD: /* instruction uses slot */ | ||
| 117 | p->ainsn.insn = get_insn_slot(); | ||
| 118 | if (!p->ainsn.insn) | ||
| 119 | return -ENOMEM; | ||
| 120 | break; | ||
| 121 | }; | ||
| 122 | |||
| 123 | /* prepare the instruction */ | ||
| 124 | if (p->ainsn.insn) | ||
| 125 | arch_prepare_ss_slot(p); | ||
| 126 | else | ||
| 127 | arch_prepare_simulate(p); | ||
| 128 | |||
| 129 | return 0; | ||
| 130 | } | ||
| 131 | |||
| 132 | static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode) | ||
| 133 | { | ||
| 134 | void *addrs[1]; | ||
| 135 | u32 insns[1]; | ||
| 136 | |||
| 137 | addrs[0] = (void *)addr; | ||
| 138 | insns[0] = (u32)opcode; | ||
| 139 | |||
| 140 | return aarch64_insn_patch_text(addrs, insns, 1); | ||
| 141 | } | ||
| 142 | |||
| 143 | /* arm kprobe: install breakpoint in text */ | ||
| 144 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
| 145 | { | ||
| 146 | patch_text(p->addr, BRK64_OPCODE_KPROBES); | ||
| 147 | } | ||
| 148 | |||
| 149 | /* disarm kprobe: remove breakpoint from text */ | ||
| 150 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | ||
| 151 | { | ||
| 152 | patch_text(p->addr, p->opcode); | ||
| 153 | } | ||
| 154 | |||
| 155 | void __kprobes arch_remove_kprobe(struct kprobe *p) | ||
| 156 | { | ||
| 157 | if (p->ainsn.insn) { | ||
| 158 | free_insn_slot(p->ainsn.insn, 0); | ||
| 159 | p->ainsn.insn = NULL; | ||
| 160 | } | ||
| 161 | } | ||
| 162 | |||
| 163 | static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
| 164 | { | ||
| 165 | kcb->prev_kprobe.kp = kprobe_running(); | ||
| 166 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
| 167 | } | ||
| 168 | |||
| 169 | static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
| 170 | { | ||
| 171 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | ||
| 172 | kcb->kprobe_status = kcb->prev_kprobe.status; | ||
| 173 | } | ||
| 174 | |||
| 175 | static void __kprobes set_current_kprobe(struct kprobe *p) | ||
| 176 | { | ||
| 177 | __this_cpu_write(current_kprobe, p); | ||
| 178 | } | ||
| 179 | |||
| 180 | /* | ||
| 181 | * The D-flag (Debug mask) is set (masked) upon debug exception entry. | ||
| 182 | * Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive | ||
| 183 | * probe i.e. when probe hit from kprobe handler context upon | ||
| 184 | * executing the pre/post handlers. In this case we return with | ||
| 185 | * D-flag clear so that single-stepping can be carried-out. | ||
| 186 | * | ||
| 187 | * Leave D-flag set in all other cases. | ||
| 188 | */ | ||
| 189 | static void __kprobes | ||
| 190 | spsr_set_debug_flag(struct pt_regs *regs, int mask) | ||
| 191 | { | ||
| 192 | unsigned long spsr = regs->pstate; | ||
| 193 | |||
| 194 | if (mask) | ||
| 195 | spsr |= PSR_D_BIT; | ||
| 196 | else | ||
| 197 | spsr &= ~PSR_D_BIT; | ||
| 198 | |||
| 199 | regs->pstate = spsr; | ||
| 200 | } | ||
| 201 | |||
| 202 | /* | ||
| 203 | * Interrupts need to be disabled before single-step mode is set, and not | ||
| 204 | * reenabled until after single-step mode ends. | ||
| 205 | * Without disabling interrupt on local CPU, there is a chance of | ||
| 206 | * interrupt occurrence in the period of exception return and start of | ||
| 207 | * out-of-line single-step, that result in wrongly single stepping | ||
| 208 | * into the interrupt handler. | ||
| 209 | */ | ||
| 210 | static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb, | ||
| 211 | struct pt_regs *regs) | ||
| 212 | { | ||
| 213 | kcb->saved_irqflag = regs->pstate; | ||
| 214 | regs->pstate |= PSR_I_BIT; | ||
| 215 | } | ||
| 216 | |||
| 217 | static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb, | ||
| 218 | struct pt_regs *regs) | ||
| 219 | { | ||
| 220 | if (kcb->saved_irqflag & PSR_I_BIT) | ||
| 221 | regs->pstate |= PSR_I_BIT; | ||
| 222 | else | ||
| 223 | regs->pstate &= ~PSR_I_BIT; | ||
| 224 | } | ||
| 225 | |||
| 226 | static void __kprobes | ||
| 227 | set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr) | ||
| 228 | { | ||
| 229 | kcb->ss_ctx.ss_pending = true; | ||
| 230 | kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t); | ||
| 231 | } | ||
| 232 | |||
| 233 | static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb) | ||
| 234 | { | ||
| 235 | kcb->ss_ctx.ss_pending = false; | ||
| 236 | kcb->ss_ctx.match_addr = 0; | ||
| 237 | } | ||
| 238 | |||
| 239 | static void __kprobes setup_singlestep(struct kprobe *p, | ||
| 240 | struct pt_regs *regs, | ||
| 241 | struct kprobe_ctlblk *kcb, int reenter) | ||
| 242 | { | ||
| 243 | unsigned long slot; | ||
| 244 | |||
| 245 | if (reenter) { | ||
| 246 | save_previous_kprobe(kcb); | ||
| 247 | set_current_kprobe(p); | ||
| 248 | kcb->kprobe_status = KPROBE_REENTER; | ||
| 249 | } else { | ||
| 250 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
| 251 | } | ||
| 252 | |||
| 253 | |||
| 254 | if (p->ainsn.insn) { | ||
| 255 | /* prepare for single stepping */ | ||
| 256 | slot = (unsigned long)p->ainsn.insn; | ||
| 257 | |||
| 258 | set_ss_context(kcb, slot); /* mark pending ss */ | ||
| 259 | |||
| 260 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
| 261 | spsr_set_debug_flag(regs, 0); | ||
| 262 | else | ||
| 263 | WARN_ON(regs->pstate & PSR_D_BIT); | ||
| 264 | |||
| 265 | /* IRQs and single stepping do not mix well. */ | ||
| 266 | kprobes_save_local_irqflag(kcb, regs); | ||
| 267 | kernel_enable_single_step(regs); | ||
| 268 | instruction_pointer_set(regs, slot); | ||
| 269 | } else { | ||
| 270 | /* insn simulation */ | ||
| 271 | arch_simulate_insn(p, regs); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | static int __kprobes reenter_kprobe(struct kprobe *p, | ||
| 276 | struct pt_regs *regs, | ||
| 277 | struct kprobe_ctlblk *kcb) | ||
| 278 | { | ||
| 279 | switch (kcb->kprobe_status) { | ||
| 280 | case KPROBE_HIT_SSDONE: | ||
| 281 | case KPROBE_HIT_ACTIVE: | ||
| 282 | kprobes_inc_nmissed_count(p); | ||
| 283 | setup_singlestep(p, regs, kcb, 1); | ||
| 284 | break; | ||
| 285 | case KPROBE_HIT_SS: | ||
| 286 | case KPROBE_REENTER: | ||
| 287 | pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr); | ||
| 288 | dump_kprobe(p); | ||
| 289 | BUG(); | ||
| 290 | break; | ||
| 291 | default: | ||
| 292 | WARN_ON(1); | ||
| 293 | return 0; | ||
| 294 | } | ||
| 295 | |||
| 296 | return 1; | ||
| 297 | } | ||
| 298 | |||
| 299 | static void __kprobes | ||
| 300 | post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs) | ||
| 301 | { | ||
| 302 | struct kprobe *cur = kprobe_running(); | ||
| 303 | |||
| 304 | if (!cur) | ||
| 305 | return; | ||
| 306 | |||
| 307 | /* return addr restore if non-branching insn */ | ||
| 308 | if (cur->ainsn.restore != 0) | ||
| 309 | instruction_pointer_set(regs, cur->ainsn.restore); | ||
| 310 | |||
| 311 | /* restore back original saved kprobe variables and continue */ | ||
| 312 | if (kcb->kprobe_status == KPROBE_REENTER) { | ||
| 313 | restore_previous_kprobe(kcb); | ||
| 314 | return; | ||
| 315 | } | ||
| 316 | /* call post handler */ | ||
| 317 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
| 318 | if (cur->post_handler) { | ||
| 319 | /* post_handler can hit breakpoint and single step | ||
| 320 | * again, so we enable D-flag for recursive exception. | ||
| 321 | */ | ||
| 322 | cur->post_handler(cur, regs, 0); | ||
| 323 | } | ||
| 324 | |||
| 325 | reset_current_kprobe(); | ||
| 326 | } | ||
| 327 | |||
| 328 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) | ||
| 329 | { | ||
| 330 | struct kprobe *cur = kprobe_running(); | ||
| 331 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 332 | |||
| 333 | switch (kcb->kprobe_status) { | ||
| 334 | case KPROBE_HIT_SS: | ||
| 335 | case KPROBE_REENTER: | ||
| 336 | /* | ||
| 337 | * We are here because the instruction being single | ||
| 338 | * stepped caused a page fault. We reset the current | ||
| 339 | * kprobe and the ip points back to the probe address | ||
| 340 | * and allow the page fault handler to continue as a | ||
| 341 | * normal page fault. | ||
| 342 | */ | ||
| 343 | instruction_pointer_set(regs, (unsigned long) cur->addr); | ||
| 344 | if (!instruction_pointer(regs)) | ||
| 345 | BUG(); | ||
| 346 | |||
| 347 | kernel_disable_single_step(); | ||
| 348 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
| 349 | spsr_set_debug_flag(regs, 1); | ||
| 350 | |||
| 351 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
| 352 | restore_previous_kprobe(kcb); | ||
| 353 | else | ||
| 354 | reset_current_kprobe(); | ||
| 355 | |||
| 356 | break; | ||
| 357 | case KPROBE_HIT_ACTIVE: | ||
| 358 | case KPROBE_HIT_SSDONE: | ||
| 359 | /* | ||
| 360 | * We increment the nmissed count for accounting, | ||
| 361 | * we can also use npre/npostfault count for accounting | ||
| 362 | * these specific fault cases. | ||
| 363 | */ | ||
| 364 | kprobes_inc_nmissed_count(cur); | ||
| 365 | |||
| 366 | /* | ||
| 367 | * We come here because instructions in the pre/post | ||
| 368 | * handler caused the page_fault, this could happen | ||
| 369 | * if handler tries to access user space by | ||
| 370 | * copy_from_user(), get_user() etc. Let the | ||
| 371 | * user-specified handler try to fix it first. | ||
| 372 | */ | ||
| 373 | if (cur->fault_handler && cur->fault_handler(cur, regs, fsr)) | ||
| 374 | return 1; | ||
| 375 | |||
| 376 | /* | ||
| 377 | * In case the user-specified fault handler returned | ||
| 378 | * zero, try to fix up. | ||
| 379 | */ | ||
| 380 | if (fixup_exception(regs)) | ||
| 381 | return 1; | ||
| 382 | } | ||
| 383 | return 0; | ||
| 384 | } | ||
| 385 | |||
| 386 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
| 387 | unsigned long val, void *data) | ||
| 388 | { | ||
| 389 | return NOTIFY_DONE; | ||
| 390 | } | ||
| 391 | |||
| 392 | static void __kprobes kprobe_handler(struct pt_regs *regs) | ||
| 393 | { | ||
| 394 | struct kprobe *p, *cur_kprobe; | ||
| 395 | struct kprobe_ctlblk *kcb; | ||
| 396 | unsigned long addr = instruction_pointer(regs); | ||
| 397 | |||
| 398 | kcb = get_kprobe_ctlblk(); | ||
| 399 | cur_kprobe = kprobe_running(); | ||
| 400 | |||
| 401 | p = get_kprobe((kprobe_opcode_t *) addr); | ||
| 402 | |||
| 403 | if (p) { | ||
| 404 | if (cur_kprobe) { | ||
| 405 | if (reenter_kprobe(p, regs, kcb)) | ||
| 406 | return; | ||
| 407 | } else { | ||
| 408 | /* Probe hit */ | ||
| 409 | set_current_kprobe(p); | ||
| 410 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
| 411 | |||
| 412 | /* | ||
| 413 | * If we have no pre-handler or it returned 0, we | ||
| 414 | * continue with normal processing. If we have a | ||
| 415 | * pre-handler and it returned non-zero, it prepped | ||
| 416 | * for calling the break_handler below on re-entry, | ||
| 417 | * so get out doing nothing more here. | ||
| 418 | * | ||
| 419 | * pre_handler can hit a breakpoint and can step thru | ||
| 420 | * before return, keep PSTATE D-flag enabled until | ||
| 421 | * pre_handler return back. | ||
| 422 | */ | ||
| 423 | if (!p->pre_handler || !p->pre_handler(p, regs)) { | ||
| 424 | setup_singlestep(p, regs, kcb, 0); | ||
| 425 | return; | ||
| 426 | } | ||
| 427 | } | ||
| 428 | } else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) == | ||
| 429 | BRK64_OPCODE_KPROBES) && cur_kprobe) { | ||
| 430 | /* We probably hit a jprobe. Call its break handler. */ | ||
| 431 | if (cur_kprobe->break_handler && | ||
| 432 | cur_kprobe->break_handler(cur_kprobe, regs)) { | ||
| 433 | setup_singlestep(cur_kprobe, regs, kcb, 0); | ||
| 434 | return; | ||
| 435 | } | ||
| 436 | } | ||
| 437 | /* | ||
| 438 | * The breakpoint instruction was removed right | ||
| 439 | * after we hit it. Another cpu has removed | ||
| 440 | * either a probepoint or a debugger breakpoint | ||
| 441 | * at this address. In either case, no further | ||
| 442 | * handling of this interrupt is appropriate. | ||
| 443 | * Return back to original instruction, and continue. | ||
| 444 | */ | ||
| 445 | } | ||
| 446 | |||
| 447 | static int __kprobes | ||
| 448 | kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr) | ||
| 449 | { | ||
| 450 | if ((kcb->ss_ctx.ss_pending) | ||
| 451 | && (kcb->ss_ctx.match_addr == addr)) { | ||
| 452 | clear_ss_context(kcb); /* clear pending ss */ | ||
| 453 | return DBG_HOOK_HANDLED; | ||
| 454 | } | ||
| 455 | /* not ours, kprobes should ignore it */ | ||
| 456 | return DBG_HOOK_ERROR; | ||
| 457 | } | ||
| 458 | |||
| 459 | int __kprobes | ||
| 460 | kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr) | ||
| 461 | { | ||
| 462 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 463 | int retval; | ||
| 464 | |||
| 465 | /* return error if this is not our step */ | ||
| 466 | retval = kprobe_ss_hit(kcb, instruction_pointer(regs)); | ||
| 467 | |||
| 468 | if (retval == DBG_HOOK_HANDLED) { | ||
| 469 | kprobes_restore_local_irqflag(kcb, regs); | ||
| 470 | kernel_disable_single_step(); | ||
| 471 | |||
| 472 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
| 473 | spsr_set_debug_flag(regs, 1); | ||
| 474 | |||
| 475 | post_kprobe_handler(kcb, regs); | ||
| 476 | } | ||
| 477 | |||
| 478 | return retval; | ||
| 479 | } | ||
| 480 | |||
| 481 | int __kprobes | ||
| 482 | kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr) | ||
| 483 | { | ||
| 484 | kprobe_handler(regs); | ||
| 485 | return DBG_HOOK_HANDLED; | ||
| 486 | } | ||
| 487 | |||
| 488 | int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 489 | { | ||
| 490 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
| 491 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 492 | long stack_ptr = kernel_stack_pointer(regs); | ||
| 493 | |||
| 494 | kcb->jprobe_saved_regs = *regs; | ||
| 495 | /* | ||
| 496 | * As Linus pointed out, gcc assumes that the callee | ||
| 497 | * owns the argument space and could overwrite it, e.g. | ||
| 498 | * tailcall optimization. So, to be absolutely safe | ||
| 499 | * we also save and restore enough stack bytes to cover | ||
| 500 | * the argument area. | ||
| 501 | */ | ||
| 502 | kasan_disable_current(); | ||
| 503 | memcpy(kcb->jprobes_stack, (void *)stack_ptr, | ||
| 504 | min_stack_size(stack_ptr)); | ||
| 505 | kasan_enable_current(); | ||
| 506 | |||
| 507 | instruction_pointer_set(regs, (unsigned long) jp->entry); | ||
| 508 | preempt_disable(); | ||
| 509 | pause_graph_tracing(); | ||
| 510 | return 1; | ||
| 511 | } | ||
| 512 | |||
| 513 | void __kprobes jprobe_return(void) | ||
| 514 | { | ||
| 515 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 516 | |||
| 517 | /* | ||
| 518 | * Jprobe handler return by entering break exception, | ||
| 519 | * encoded same as kprobe, but with following conditions | ||
| 520 | * -a special PC to identify it from the other kprobes. | ||
| 521 | * -restore stack addr to original saved pt_regs | ||
| 522 | */ | ||
| 523 | asm volatile(" mov sp, %0 \n" | ||
| 524 | "jprobe_return_break: brk %1 \n" | ||
| 525 | : | ||
| 526 | : "r" (kcb->jprobe_saved_regs.sp), | ||
| 527 | "I" (BRK64_ESR_KPROBES) | ||
| 528 | : "memory"); | ||
| 529 | |||
| 530 | unreachable(); | ||
| 531 | } | ||
| 532 | |||
| 533 | int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | ||
| 534 | { | ||
| 535 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
| 536 | long stack_addr = kcb->jprobe_saved_regs.sp; | ||
| 537 | long orig_sp = kernel_stack_pointer(regs); | ||
| 538 | struct jprobe *jp = container_of(p, struct jprobe, kp); | ||
| 539 | extern const char jprobe_return_break[]; | ||
| 540 | |||
| 541 | if (instruction_pointer(regs) != (u64) jprobe_return_break) | ||
| 542 | return 0; | ||
| 543 | |||
| 544 | if (orig_sp != stack_addr) { | ||
| 545 | struct pt_regs *saved_regs = | ||
| 546 | (struct pt_regs *)kcb->jprobe_saved_regs.sp; | ||
| 547 | pr_err("current sp %lx does not match saved sp %lx\n", | ||
| 548 | orig_sp, stack_addr); | ||
| 549 | pr_err("Saved registers for jprobe %p\n", jp); | ||
| 550 | show_regs(saved_regs); | ||
| 551 | pr_err("Current registers\n"); | ||
| 552 | show_regs(regs); | ||
| 553 | BUG(); | ||
| 554 | } | ||
| 555 | unpause_graph_tracing(); | ||
| 556 | *regs = kcb->jprobe_saved_regs; | ||
| 557 | kasan_disable_current(); | ||
| 558 | memcpy((void *)stack_addr, kcb->jprobes_stack, | ||
| 559 | min_stack_size(stack_addr)); | ||
| 560 | kasan_enable_current(); | ||
| 561 | preempt_enable_no_resched(); | ||
| 562 | return 1; | ||
| 563 | } | ||
| 564 | |||
| 565 | bool arch_within_kprobe_blacklist(unsigned long addr) | ||
| 566 | { | ||
| 567 | extern char __idmap_text_start[], __idmap_text_end[]; | ||
| 568 | extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; | ||
| 569 | |||
| 570 | if ((addr >= (unsigned long)__kprobes_text_start && | ||
| 571 | addr < (unsigned long)__kprobes_text_end) || | ||
| 572 | (addr >= (unsigned long)__entry_text_start && | ||
| 573 | addr < (unsigned long)__entry_text_end) || | ||
| 574 | (addr >= (unsigned long)__idmap_text_start && | ||
| 575 | addr < (unsigned long)__idmap_text_end) || | ||
| 576 | !!search_exception_tables(addr)) | ||
| 577 | return true; | ||
| 578 | |||
| 579 | if (!is_kernel_in_hyp_mode()) { | ||
| 580 | if ((addr >= (unsigned long)__hyp_text_start && | ||
| 581 | addr < (unsigned long)__hyp_text_end) || | ||
| 582 | (addr >= (unsigned long)__hyp_idmap_text_start && | ||
| 583 | addr < (unsigned long)__hyp_idmap_text_end)) | ||
| 584 | return true; | ||
| 585 | } | ||
| 586 | |||
| 587 | return false; | ||
| 588 | } | ||
| 589 | |||
| 590 | void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs) | ||
| 591 | { | ||
| 592 | struct kretprobe_instance *ri = NULL; | ||
| 593 | struct hlist_head *head, empty_rp; | ||
| 594 | struct hlist_node *tmp; | ||
| 595 | unsigned long flags, orig_ret_address = 0; | ||
| 596 | unsigned long trampoline_address = | ||
| 597 | (unsigned long)&kretprobe_trampoline; | ||
| 598 | kprobe_opcode_t *correct_ret_addr = NULL; | ||
| 599 | |||
| 600 | INIT_HLIST_HEAD(&empty_rp); | ||
| 601 | kretprobe_hash_lock(current, &head, &flags); | ||
| 602 | |||
| 603 | /* | ||
| 604 | * It is possible to have multiple instances associated with a given | ||
| 605 | * task either because multiple functions in the call path have | ||
| 606 | * return probes installed on them, and/or more than one | ||
| 607 | * return probe was registered for a target function. | ||
| 608 | * | ||
| 609 | * We can handle this because: | ||
| 610 | * - instances are always pushed into the head of the list | ||
| 611 | * - when multiple return probes are registered for the same | ||
| 612 | * function, the (chronologically) first instance's ret_addr | ||
| 613 | * will be the real return address, and all the rest will | ||
| 614 | * point to kretprobe_trampoline. | ||
| 615 | */ | ||
| 616 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { | ||
| 617 | if (ri->task != current) | ||
| 618 | /* another task is sharing our hash bucket */ | ||
| 619 | continue; | ||
| 620 | |||
| 621 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 622 | |||
| 623 | if (orig_ret_address != trampoline_address) | ||
| 624 | /* | ||
| 625 | * This is the real return address. Any other | ||
| 626 | * instances associated with this task are for | ||
| 627 | * other calls deeper on the call stack | ||
| 628 | */ | ||
| 629 | break; | ||
| 630 | } | ||
| 631 | |||
| 632 | kretprobe_assert(ri, orig_ret_address, trampoline_address); | ||
| 633 | |||
| 634 | correct_ret_addr = ri->ret_addr; | ||
| 635 | hlist_for_each_entry_safe(ri, tmp, head, hlist) { | ||
| 636 | if (ri->task != current) | ||
| 637 | /* another task is sharing our hash bucket */ | ||
| 638 | continue; | ||
| 639 | |||
| 640 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
| 641 | if (ri->rp && ri->rp->handler) { | ||
| 642 | __this_cpu_write(current_kprobe, &ri->rp->kp); | ||
| 643 | get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; | ||
| 644 | ri->ret_addr = correct_ret_addr; | ||
| 645 | ri->rp->handler(ri, regs); | ||
| 646 | __this_cpu_write(current_kprobe, NULL); | ||
| 647 | } | ||
| 648 | |||
| 649 | recycle_rp_inst(ri, &empty_rp); | ||
| 650 | |||
| 651 | if (orig_ret_address != trampoline_address) | ||
| 652 | /* | ||
| 653 | * This is the real return address. Any other | ||
| 654 | * instances associated with this task are for | ||
| 655 | * other calls deeper on the call stack | ||
| 656 | */ | ||
| 657 | break; | ||
| 658 | } | ||
| 659 | |||
| 660 | kretprobe_hash_unlock(current, &flags); | ||
| 661 | |||
| 662 | hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { | ||
| 663 | hlist_del(&ri->hlist); | ||
| 664 | kfree(ri); | ||
| 665 | } | ||
| 666 | return (void *)orig_ret_address; | ||
| 667 | } | ||
| 668 | |||
| 669 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | ||
| 670 | struct pt_regs *regs) | ||
| 671 | { | ||
| 672 | ri->ret_addr = (kprobe_opcode_t *)regs->regs[30]; | ||
| 673 | |||
| 674 | /* replace return addr (x30) with trampoline */ | ||
| 675 | regs->regs[30] = (long)&kretprobe_trampoline; | ||
| 676 | } | ||
| 677 | |||
| 678 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | ||
| 679 | { | ||
| 680 | return 0; | ||
| 681 | } | ||
| 682 | |||
| 683 | int __init arch_init_kprobes(void) | ||
| 684 | { | ||
| 685 | return 0; | ||
| 686 | } | ||
diff --git a/arch/arm64/kernel/probes/kprobes_trampoline.S b/arch/arm64/kernel/probes/kprobes_trampoline.S new file mode 100644 index 000000000000..5d6e7f14638c --- /dev/null +++ b/arch/arm64/kernel/probes/kprobes_trampoline.S | |||
| @@ -0,0 +1,81 @@ | |||
| 1 | /* | ||
| 2 | * trampoline entry and return code for kretprobes. | ||
| 3 | */ | ||
| 4 | |||
| 5 | #include <linux/linkage.h> | ||
| 6 | #include <asm/asm-offsets.h> | ||
| 7 | #include <asm/assembler.h> | ||
| 8 | |||
| 9 | .text | ||
| 10 | |||
| 11 | .macro save_all_base_regs | ||
| 12 | stp x0, x1, [sp, #S_X0] | ||
| 13 | stp x2, x3, [sp, #S_X2] | ||
| 14 | stp x4, x5, [sp, #S_X4] | ||
| 15 | stp x6, x7, [sp, #S_X6] | ||
| 16 | stp x8, x9, [sp, #S_X8] | ||
| 17 | stp x10, x11, [sp, #S_X10] | ||
| 18 | stp x12, x13, [sp, #S_X12] | ||
| 19 | stp x14, x15, [sp, #S_X14] | ||
| 20 | stp x16, x17, [sp, #S_X16] | ||
| 21 | stp x18, x19, [sp, #S_X18] | ||
| 22 | stp x20, x21, [sp, #S_X20] | ||
| 23 | stp x22, x23, [sp, #S_X22] | ||
| 24 | stp x24, x25, [sp, #S_X24] | ||
| 25 | stp x26, x27, [sp, #S_X26] | ||
| 26 | stp x28, x29, [sp, #S_X28] | ||
| 27 | add x0, sp, #S_FRAME_SIZE | ||
| 28 | stp lr, x0, [sp, #S_LR] | ||
| 29 | /* | ||
| 30 | * Construct a useful saved PSTATE | ||
| 31 | */ | ||
| 32 | mrs x0, nzcv | ||
| 33 | mrs x1, daif | ||
| 34 | orr x0, x0, x1 | ||
| 35 | mrs x1, CurrentEL | ||
| 36 | orr x0, x0, x1 | ||
| 37 | mrs x1, SPSel | ||
| 38 | orr x0, x0, x1 | ||
| 39 | stp xzr, x0, [sp, #S_PC] | ||
| 40 | .endm | ||
| 41 | |||
| 42 | .macro restore_all_base_regs | ||
| 43 | ldr x0, [sp, #S_PSTATE] | ||
| 44 | and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT) | ||
| 45 | msr nzcv, x0 | ||
| 46 | ldp x0, x1, [sp, #S_X0] | ||
| 47 | ldp x2, x3, [sp, #S_X2] | ||
| 48 | ldp x4, x5, [sp, #S_X4] | ||
| 49 | ldp x6, x7, [sp, #S_X6] | ||
| 50 | ldp x8, x9, [sp, #S_X8] | ||
| 51 | ldp x10, x11, [sp, #S_X10] | ||
| 52 | ldp x12, x13, [sp, #S_X12] | ||
| 53 | ldp x14, x15, [sp, #S_X14] | ||
| 54 | ldp x16, x17, [sp, #S_X16] | ||
| 55 | ldp x18, x19, [sp, #S_X18] | ||
| 56 | ldp x20, x21, [sp, #S_X20] | ||
| 57 | ldp x22, x23, [sp, #S_X22] | ||
| 58 | ldp x24, x25, [sp, #S_X24] | ||
| 59 | ldp x26, x27, [sp, #S_X26] | ||
| 60 | ldp x28, x29, [sp, #S_X28] | ||
| 61 | .endm | ||
| 62 | |||
| 63 | ENTRY(kretprobe_trampoline) | ||
| 64 | sub sp, sp, #S_FRAME_SIZE | ||
| 65 | |||
| 66 | save_all_base_regs | ||
| 67 | |||
| 68 | mov x0, sp | ||
| 69 | bl trampoline_probe_handler | ||
| 70 | /* | ||
| 71 | * Replace trampoline address in lr with actual orig_ret_addr return | ||
| 72 | * address. | ||
| 73 | */ | ||
| 74 | mov lr, x0 | ||
| 75 | |||
| 76 | restore_all_base_regs | ||
| 77 | |||
| 78 | add sp, sp, #S_FRAME_SIZE | ||
| 79 | ret | ||
| 80 | |||
| 81 | ENDPROC(kretprobe_trampoline) | ||
diff --git a/arch/arm64/kernel/probes/simulate-insn.c b/arch/arm64/kernel/probes/simulate-insn.c new file mode 100644 index 000000000000..8977ce9d009d --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.c | |||
| @@ -0,0 +1,217 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/kernel/probes/simulate-insn.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/kprobes.h> | ||
| 18 | |||
| 19 | #include "simulate-insn.h" | ||
| 20 | |||
| 21 | #define sign_extend(x, signbit) \ | ||
| 22 | ((x) | (0 - ((x) & (1 << (signbit))))) | ||
| 23 | |||
| 24 | #define bbl_displacement(insn) \ | ||
| 25 | sign_extend(((insn) & 0x3ffffff) << 2, 27) | ||
| 26 | |||
| 27 | #define bcond_displacement(insn) \ | ||
| 28 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | ||
| 29 | |||
| 30 | #define cbz_displacement(insn) \ | ||
| 31 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | ||
| 32 | |||
| 33 | #define tbz_displacement(insn) \ | ||
| 34 | sign_extend(((insn >> 5) & 0x3fff) << 2, 15) | ||
| 35 | |||
| 36 | #define ldr_displacement(insn) \ | ||
| 37 | sign_extend(((insn >> 5) & 0x7ffff) << 2, 20) | ||
| 38 | |||
| 39 | static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val) | ||
| 40 | { | ||
| 41 | if (reg < 31) | ||
| 42 | regs->regs[reg] = val; | ||
| 43 | } | ||
| 44 | |||
| 45 | static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val) | ||
| 46 | { | ||
| 47 | if (reg < 31) | ||
| 48 | regs->regs[reg] = lower_32_bits(val); | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline u64 get_x_reg(struct pt_regs *regs, int reg) | ||
| 52 | { | ||
| 53 | if (reg < 31) | ||
| 54 | return regs->regs[reg]; | ||
| 55 | else | ||
| 56 | return 0; | ||
| 57 | } | ||
| 58 | |||
| 59 | static inline u32 get_w_reg(struct pt_regs *regs, int reg) | ||
| 60 | { | ||
| 61 | if (reg < 31) | ||
| 62 | return lower_32_bits(regs->regs[reg]); | ||
| 63 | else | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs) | ||
| 68 | { | ||
| 69 | int xn = opcode & 0x1f; | ||
| 70 | |||
| 71 | return (opcode & (1 << 31)) ? | ||
| 72 | (get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0); | ||
| 73 | } | ||
| 74 | |||
| 75 | static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs) | ||
| 76 | { | ||
| 77 | int xn = opcode & 0x1f; | ||
| 78 | |||
| 79 | return (opcode & (1 << 31)) ? | ||
| 80 | (get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0); | ||
| 81 | } | ||
| 82 | |||
| 83 | static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs) | ||
| 84 | { | ||
| 85 | int xn = opcode & 0x1f; | ||
| 86 | int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); | ||
| 87 | |||
| 88 | return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0; | ||
| 89 | } | ||
| 90 | |||
| 91 | static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs) | ||
| 92 | { | ||
| 93 | int xn = opcode & 0x1f; | ||
| 94 | int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f); | ||
| 95 | |||
| 96 | return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0; | ||
| 97 | } | ||
| 98 | |||
| 99 | /* | ||
| 100 | * instruction simulation functions | ||
| 101 | */ | ||
| 102 | void __kprobes | ||
| 103 | simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs) | ||
| 104 | { | ||
| 105 | long imm, xn, val; | ||
| 106 | |||
| 107 | xn = opcode & 0x1f; | ||
| 108 | imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3); | ||
| 109 | imm = sign_extend(imm, 20); | ||
| 110 | if (opcode & 0x80000000) | ||
| 111 | val = (imm<<12) + (addr & 0xfffffffffffff000); | ||
| 112 | else | ||
| 113 | val = imm + addr; | ||
| 114 | |||
| 115 | set_x_reg(regs, xn, val); | ||
| 116 | |||
| 117 | instruction_pointer_set(regs, instruction_pointer(regs) + 4); | ||
| 118 | } | ||
| 119 | |||
| 120 | void __kprobes | ||
| 121 | simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs) | ||
| 122 | { | ||
| 123 | int disp = bbl_displacement(opcode); | ||
| 124 | |||
| 125 | /* Link register is x30 */ | ||
| 126 | if (opcode & (1 << 31)) | ||
| 127 | set_x_reg(regs, 30, addr + 4); | ||
| 128 | |||
| 129 | instruction_pointer_set(regs, addr + disp); | ||
| 130 | } | ||
| 131 | |||
| 132 | void __kprobes | ||
| 133 | simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs) | ||
| 134 | { | ||
| 135 | int disp = 4; | ||
| 136 | |||
| 137 | if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff)) | ||
| 138 | disp = bcond_displacement(opcode); | ||
| 139 | |||
| 140 | instruction_pointer_set(regs, addr + disp); | ||
| 141 | } | ||
| 142 | |||
| 143 | void __kprobes | ||
| 144 | simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs) | ||
| 145 | { | ||
| 146 | int xn = (opcode >> 5) & 0x1f; | ||
| 147 | |||
| 148 | /* update pc first in case we're doing a "blr lr" */ | ||
| 149 | instruction_pointer_set(regs, get_x_reg(regs, xn)); | ||
| 150 | |||
| 151 | /* Link register is x30 */ | ||
| 152 | if (((opcode >> 21) & 0x3) == 1) | ||
| 153 | set_x_reg(regs, 30, addr + 4); | ||
| 154 | } | ||
| 155 | |||
| 156 | void __kprobes | ||
| 157 | simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs) | ||
| 158 | { | ||
| 159 | int disp = 4; | ||
| 160 | |||
| 161 | if (opcode & (1 << 24)) { | ||
| 162 | if (check_cbnz(opcode, regs)) | ||
| 163 | disp = cbz_displacement(opcode); | ||
| 164 | } else { | ||
| 165 | if (check_cbz(opcode, regs)) | ||
| 166 | disp = cbz_displacement(opcode); | ||
| 167 | } | ||
| 168 | instruction_pointer_set(regs, addr + disp); | ||
| 169 | } | ||
| 170 | |||
| 171 | void __kprobes | ||
| 172 | simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs) | ||
| 173 | { | ||
| 174 | int disp = 4; | ||
| 175 | |||
| 176 | if (opcode & (1 << 24)) { | ||
| 177 | if (check_tbnz(opcode, regs)) | ||
| 178 | disp = tbz_displacement(opcode); | ||
| 179 | } else { | ||
| 180 | if (check_tbz(opcode, regs)) | ||
| 181 | disp = tbz_displacement(opcode); | ||
| 182 | } | ||
| 183 | instruction_pointer_set(regs, addr + disp); | ||
| 184 | } | ||
| 185 | |||
| 186 | void __kprobes | ||
| 187 | simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs) | ||
| 188 | { | ||
| 189 | u64 *load_addr; | ||
| 190 | int xn = opcode & 0x1f; | ||
| 191 | int disp; | ||
| 192 | |||
| 193 | disp = ldr_displacement(opcode); | ||
| 194 | load_addr = (u64 *) (addr + disp); | ||
| 195 | |||
| 196 | if (opcode & (1 << 30)) /* x0-x30 */ | ||
| 197 | set_x_reg(regs, xn, *load_addr); | ||
| 198 | else /* w0-w30 */ | ||
| 199 | set_w_reg(regs, xn, *load_addr); | ||
| 200 | |||
| 201 | instruction_pointer_set(regs, instruction_pointer(regs) + 4); | ||
| 202 | } | ||
| 203 | |||
| 204 | void __kprobes | ||
| 205 | simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs) | ||
| 206 | { | ||
| 207 | s32 *load_addr; | ||
| 208 | int xn = opcode & 0x1f; | ||
| 209 | int disp; | ||
| 210 | |||
| 211 | disp = ldr_displacement(opcode); | ||
| 212 | load_addr = (s32 *) (addr + disp); | ||
| 213 | |||
| 214 | set_x_reg(regs, xn, *load_addr); | ||
| 215 | |||
| 216 | instruction_pointer_set(regs, instruction_pointer(regs) + 4); | ||
| 217 | } | ||
diff --git a/arch/arm64/kernel/probes/simulate-insn.h b/arch/arm64/kernel/probes/simulate-insn.h new file mode 100644 index 000000000000..050bde683c2d --- /dev/null +++ b/arch/arm64/kernel/probes/simulate-insn.h | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm64/kernel/probes/simulate-insn.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2013 Linaro Limited | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
| 13 | * General Public License for more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H | ||
| 17 | #define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H | ||
| 18 | |||
| 19 | void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs); | ||
| 20 | void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs); | ||
| 21 | void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs); | ||
| 22 | void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs); | ||
| 23 | void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs); | ||
| 24 | void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs); | ||
| 25 | void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs); | ||
| 26 | void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs); | ||
| 27 | |||
| 28 | #endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */ | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 3f6cd5c5234f..030c1d5aa46d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
| @@ -48,6 +48,107 @@ | |||
| 48 | #define CREATE_TRACE_POINTS | 48 | #define CREATE_TRACE_POINTS |
| 49 | #include <trace/events/syscalls.h> | 49 | #include <trace/events/syscalls.h> |
| 50 | 50 | ||
| 51 | struct pt_regs_offset { | ||
| 52 | const char *name; | ||
| 53 | int offset; | ||
| 54 | }; | ||
| 55 | |||
| 56 | #define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)} | ||
| 57 | #define REG_OFFSET_END {.name = NULL, .offset = 0} | ||
| 58 | #define GPR_OFFSET_NAME(r) \ | ||
| 59 | {.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])} | ||
| 60 | |||
| 61 | static const struct pt_regs_offset regoffset_table[] = { | ||
| 62 | GPR_OFFSET_NAME(0), | ||
| 63 | GPR_OFFSET_NAME(1), | ||
| 64 | GPR_OFFSET_NAME(2), | ||
| 65 | GPR_OFFSET_NAME(3), | ||
| 66 | GPR_OFFSET_NAME(4), | ||
| 67 | GPR_OFFSET_NAME(5), | ||
| 68 | GPR_OFFSET_NAME(6), | ||
| 69 | GPR_OFFSET_NAME(7), | ||
| 70 | GPR_OFFSET_NAME(8), | ||
| 71 | GPR_OFFSET_NAME(9), | ||
| 72 | GPR_OFFSET_NAME(10), | ||
| 73 | GPR_OFFSET_NAME(11), | ||
| 74 | GPR_OFFSET_NAME(12), | ||
| 75 | GPR_OFFSET_NAME(13), | ||
| 76 | GPR_OFFSET_NAME(14), | ||
| 77 | GPR_OFFSET_NAME(15), | ||
| 78 | GPR_OFFSET_NAME(16), | ||
| 79 | GPR_OFFSET_NAME(17), | ||
| 80 | GPR_OFFSET_NAME(18), | ||
| 81 | GPR_OFFSET_NAME(19), | ||
| 82 | GPR_OFFSET_NAME(20), | ||
| 83 | GPR_OFFSET_NAME(21), | ||
| 84 | GPR_OFFSET_NAME(22), | ||
| 85 | GPR_OFFSET_NAME(23), | ||
| 86 | GPR_OFFSET_NAME(24), | ||
| 87 | GPR_OFFSET_NAME(25), | ||
| 88 | GPR_OFFSET_NAME(26), | ||
| 89 | GPR_OFFSET_NAME(27), | ||
| 90 | GPR_OFFSET_NAME(28), | ||
| 91 | GPR_OFFSET_NAME(29), | ||
| 92 | GPR_OFFSET_NAME(30), | ||
| 93 | {.name = "lr", .offset = offsetof(struct pt_regs, regs[30])}, | ||
| 94 | REG_OFFSET_NAME(sp), | ||
| 95 | REG_OFFSET_NAME(pc), | ||
| 96 | REG_OFFSET_NAME(pstate), | ||
| 97 | REG_OFFSET_END, | ||
| 98 | }; | ||
| 99 | |||
| 100 | /** | ||
| 101 | * regs_query_register_offset() - query register offset from its name | ||
| 102 | * @name: the name of a register | ||
| 103 | * | ||
| 104 | * regs_query_register_offset() returns the offset of a register in struct | ||
| 105 | * pt_regs from its name. If the name is invalid, this returns -EINVAL; | ||
| 106 | */ | ||
| 107 | int regs_query_register_offset(const char *name) | ||
| 108 | { | ||
| 109 | const struct pt_regs_offset *roff; | ||
| 110 | |||
| 111 | for (roff = regoffset_table; roff->name != NULL; roff++) | ||
| 112 | if (!strcmp(roff->name, name)) | ||
| 113 | return roff->offset; | ||
| 114 | return -EINVAL; | ||
| 115 | } | ||
| 116 | |||
| 117 | /** | ||
| 118 | * regs_within_kernel_stack() - check the address in the stack | ||
| 119 | * @regs: pt_regs which contains kernel stack pointer. | ||
| 120 | * @addr: address which is checked. | ||
| 121 | * | ||
| 122 | * regs_within_kernel_stack() checks @addr is within the kernel stack page(s). | ||
| 123 | * If @addr is within the kernel stack, it returns true. If not, returns false. | ||
| 124 | */ | ||
| 125 | static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) | ||
| 126 | { | ||
| 127 | return ((addr & ~(THREAD_SIZE - 1)) == | ||
| 128 | (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || | ||
| 129 | on_irq_stack(addr, raw_smp_processor_id()); | ||
| 130 | } | ||
| 131 | |||
| 132 | /** | ||
| 133 | * regs_get_kernel_stack_nth() - get Nth entry of the stack | ||
| 134 | * @regs: pt_regs which contains kernel stack pointer. | ||
| 135 | * @n: stack entry number. | ||
| 136 | * | ||
| 137 | * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which | ||
| 138 | * is specified by @regs. If the @n th entry is NOT in the kernel stack, | ||
| 139 | * this returns 0. | ||
| 140 | */ | ||
| 141 | unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) | ||
| 142 | { | ||
| 143 | unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs); | ||
| 144 | |||
| 145 | addr += n; | ||
| 146 | if (regs_within_kernel_stack(regs, (unsigned long)addr)) | ||
| 147 | return *addr; | ||
| 148 | else | ||
| 149 | return 0; | ||
| 150 | } | ||
| 151 | |||
| 51 | /* | 152 | /* |
| 52 | * TODO: does not yet catch signals sent when the child dies. | 153 | * TODO: does not yet catch signals sent when the child dies. |
| 53 | * in exit.c or in signal.c. | 154 | * in exit.c or in signal.c. |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 0de7be4f1a9d..89d6e177ecbd 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
| @@ -118,9 +118,11 @@ SECTIONS | |||
| 118 | __exception_text_end = .; | 118 | __exception_text_end = .; |
| 119 | IRQENTRY_TEXT | 119 | IRQENTRY_TEXT |
| 120 | SOFTIRQENTRY_TEXT | 120 | SOFTIRQENTRY_TEXT |
| 121 | ENTRY_TEXT | ||
| 121 | TEXT_TEXT | 122 | TEXT_TEXT |
| 122 | SCHED_TEXT | 123 | SCHED_TEXT |
| 123 | LOCK_TEXT | 124 | LOCK_TEXT |
| 125 | KPROBES_TEXT | ||
| 124 | HYPERVISOR_TEXT | 126 | HYPERVISOR_TEXT |
| 125 | IDMAP_TEXT | 127 | IDMAP_TEXT |
| 126 | HIBERNATE_TEXT | 128 | HIBERNATE_TEXT |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index fc5a34a72c6d..4ebda515a016 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -41,6 +41,28 @@ | |||
| 41 | 41 | ||
| 42 | static const char *fault_name(unsigned int esr); | 42 | static const char *fault_name(unsigned int esr); |
| 43 | 43 | ||
| 44 | #ifdef CONFIG_KPROBES | ||
| 45 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) | ||
| 46 | { | ||
| 47 | int ret = 0; | ||
| 48 | |||
| 49 | /* kprobe_running() needs smp_processor_id() */ | ||
| 50 | if (!user_mode(regs)) { | ||
| 51 | preempt_disable(); | ||
| 52 | if (kprobe_running() && kprobe_fault_handler(regs, esr)) | ||
| 53 | ret = 1; | ||
| 54 | preempt_enable(); | ||
| 55 | } | ||
| 56 | |||
| 57 | return ret; | ||
| 58 | } | ||
| 59 | #else | ||
| 60 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr) | ||
| 61 | { | ||
| 62 | return 0; | ||
| 63 | } | ||
| 64 | #endif | ||
| 65 | |||
| 44 | /* | 66 | /* |
| 45 | * Dump out the page tables associated with 'addr' in mm 'mm'. | 67 | * Dump out the page tables associated with 'addr' in mm 'mm'. |
| 46 | */ | 68 | */ |
| @@ -262,6 +284,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, | |||
| 262 | unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; | 284 | unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC; |
| 263 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 285 | unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
| 264 | 286 | ||
| 287 | if (notify_page_fault(regs, esr)) | ||
| 288 | return 0; | ||
| 289 | |||
| 265 | tsk = current; | 290 | tsk = current; |
| 266 | mm = tsk->mm; | 291 | mm = tsk->mm; |
| 267 | 292 | ||
| @@ -632,6 +657,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, | |||
| 632 | 657 | ||
| 633 | return rv; | 658 | return rv; |
| 634 | } | 659 | } |
| 660 | NOKPROBE_SYMBOL(do_debug_exception); | ||
| 635 | 661 | ||
| 636 | #ifdef CONFIG_ARM64_PAN | 662 | #ifdef CONFIG_ARM64_PAN |
| 637 | void cpu_enable_pan(void *__unused) | 663 | void cpu_enable_pan(void *__unused) |
diff --git a/samples/kprobes/kprobe_example.c b/samples/kprobes/kprobe_example.c index ed0ca0c07242..f3b61b4ee09c 100644 --- a/samples/kprobes/kprobe_example.c +++ b/samples/kprobes/kprobe_example.c | |||
| @@ -46,6 +46,11 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs) | |||
| 46 | " ex1 = 0x%lx\n", | 46 | " ex1 = 0x%lx\n", |
| 47 | p->symbol_name, p->addr, regs->pc, regs->ex1); | 47 | p->symbol_name, p->addr, regs->pc, regs->ex1); |
| 48 | #endif | 48 | #endif |
| 49 | #ifdef CONFIG_ARM64 | ||
| 50 | pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx," | ||
| 51 | " pstate = 0x%lx\n", | ||
| 52 | p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate); | ||
| 53 | #endif | ||
| 49 | 54 | ||
| 50 | /* A dump_stack() here will give a stack backtrace */ | 55 | /* A dump_stack() here will give a stack backtrace */ |
| 51 | return 0; | 56 | return 0; |
| @@ -71,6 +76,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs, | |||
| 71 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", | 76 | printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n", |
| 72 | p->symbol_name, p->addr, regs->ex1); | 77 | p->symbol_name, p->addr, regs->ex1); |
| 73 | #endif | 78 | #endif |
| 79 | #ifdef CONFIG_ARM64 | ||
| 80 | pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n", | ||
| 81 | p->symbol_name, p->addr, (long)regs->pstate); | ||
| 82 | #endif | ||
| 74 | } | 83 | } |
| 75 | 84 | ||
| 76 | /* | 85 | /* |
