diff options
| author | Will Deacon <will.deacon@arm.com> | 2012-03-05 06:49:33 -0500 |
|---|---|---|
| committer | Catalin Marinas <catalin.marinas@arm.com> | 2012-09-17 08:42:14 -0400 |
| commit | 478fcb2cdb2351dcfc3fb23f42d76f4436ee4149 (patch) | |
| tree | f5d0be182e1ca20a1c539d1632cc27d4d910d5db | |
| parent | 53631b54c8704fe5de435582c82ddbc0bfabf06a (diff) | |
arm64: Debugging support
This patch adds ptrace, debug monitors and hardware breakpoints support.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
| -rw-r--r-- | arch/arm64/include/asm/debug-monitors.h | 88 | ||||
| -rw-r--r-- | arch/arm64/include/asm/hw_breakpoint.h | 137 | ||||
| -rw-r--r-- | arch/arm64/kernel/debug-monitors.c | 288 | ||||
| -rw-r--r-- | arch/arm64/kernel/hw_breakpoint.c | 880 | ||||
| -rw-r--r-- | arch/arm64/kernel/ptrace.c | 1126 | ||||
| -rw-r--r-- | include/linux/elf.h | 3 |
6 files changed, 2522 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h new file mode 100644 index 000000000000..7eaa0b302493 --- /dev/null +++ b/arch/arm64/include/asm/debug-monitors.h | |||
| @@ -0,0 +1,88 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 ARM Ltd. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | #ifndef __ASM_DEBUG_MONITORS_H | ||
| 17 | #define __ASM_DEBUG_MONITORS_H | ||
| 18 | |||
| 19 | #ifdef __KERNEL__ | ||
| 20 | |||
| 21 | #define DBG_ESR_EVT(x) (((x) >> 27) & 0x7) | ||
| 22 | |||
| 23 | /* AArch64 */ | ||
| 24 | #define DBG_ESR_EVT_HWBP 0x0 | ||
| 25 | #define DBG_ESR_EVT_HWSS 0x1 | ||
| 26 | #define DBG_ESR_EVT_HWWP 0x2 | ||
| 27 | #define DBG_ESR_EVT_BRK 0x6 | ||
| 28 | |||
| 29 | enum debug_el { | ||
| 30 | DBG_ACTIVE_EL0 = 0, | ||
| 31 | DBG_ACTIVE_EL1, | ||
| 32 | }; | ||
| 33 | |||
| 34 | /* AArch32 */ | ||
| 35 | #define DBG_ESR_EVT_BKPT 0x4 | ||
| 36 | #define DBG_ESR_EVT_VECC 0x5 | ||
| 37 | |||
| 38 | #define AARCH32_BREAK_ARM 0x07f001f0 | ||
| 39 | #define AARCH32_BREAK_THUMB 0xde01 | ||
| 40 | #define AARCH32_BREAK_THUMB2_LO 0xf7f0 | ||
| 41 | #define AARCH32_BREAK_THUMB2_HI 0xa000 | ||
| 42 | |||
| 43 | #ifndef __ASSEMBLY__ | ||
| 44 | struct task_struct; | ||
| 45 | |||
| 46 | #define local_dbg_save(flags) \ | ||
| 47 | do { \ | ||
| 48 | typecheck(unsigned long, flags); \ | ||
| 49 | asm volatile( \ | ||
| 50 | "mrs %0, daif // local_dbg_save\n" \ | ||
| 51 | "msr daifset, #8" \ | ||
| 52 | : "=r" (flags) : : "memory"); \ | ||
| 53 | } while (0) | ||
| 54 | |||
| 55 | #define local_dbg_restore(flags) \ | ||
| 56 | do { \ | ||
| 57 | typecheck(unsigned long, flags); \ | ||
| 58 | asm volatile( \ | ||
| 59 | "msr daif, %0 // local_dbg_restore\n" \ | ||
| 60 | : : "r" (flags) : "memory"); \ | ||
| 61 | } while (0) | ||
| 62 | |||
| 63 | #define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */ | ||
| 64 | |||
| 65 | u8 debug_monitors_arch(void); | ||
| 66 | |||
| 67 | void enable_debug_monitors(enum debug_el el); | ||
| 68 | void disable_debug_monitors(enum debug_el el); | ||
| 69 | |||
| 70 | void user_rewind_single_step(struct task_struct *task); | ||
| 71 | void user_fastforward_single_step(struct task_struct *task); | ||
| 72 | |||
| 73 | void kernel_enable_single_step(struct pt_regs *regs); | ||
| 74 | void kernel_disable_single_step(void); | ||
| 75 | int kernel_active_single_step(void); | ||
| 76 | |||
| 77 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 78 | int reinstall_suspended_bps(struct pt_regs *regs); | ||
| 79 | #else | ||
| 80 | static inline int reinstall_suspended_bps(struct pt_regs *regs) | ||
| 81 | { | ||
| 82 | return -ENODEV; | ||
| 83 | } | ||
| 84 | #endif | ||
| 85 | |||
| 86 | #endif /* __ASSEMBLY */ | ||
| 87 | #endif /* __KERNEL__ */ | ||
| 88 | #endif /* __ASM_DEBUG_MONITORS_H */ | ||
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h new file mode 100644 index 000000000000..d064047612b1 --- /dev/null +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
| @@ -0,0 +1,137 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2012 ARM Ltd. | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License version 2 as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful, | ||
| 9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | * | ||
| 13 | * You should have received a copy of the GNU General Public License | ||
| 14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 15 | */ | ||
| 16 | #ifndef __ASM_HW_BREAKPOINT_H | ||
| 17 | #define __ASM_HW_BREAKPOINT_H | ||
| 18 | |||
| 19 | #ifdef __KERNEL__ | ||
| 20 | |||
| 21 | struct arch_hw_breakpoint_ctrl { | ||
| 22 | u32 __reserved : 19, | ||
| 23 | len : 8, | ||
| 24 | type : 2, | ||
| 25 | privilege : 2, | ||
| 26 | enabled : 1; | ||
| 27 | }; | ||
| 28 | |||
| 29 | struct arch_hw_breakpoint { | ||
| 30 | u64 address; | ||
| 31 | u64 trigger; | ||
| 32 | struct arch_hw_breakpoint_ctrl ctrl; | ||
| 33 | }; | ||
| 34 | |||
| 35 | static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl) | ||
| 36 | { | ||
| 37 | return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) | | ||
| 38 | ctrl.enabled; | ||
| 39 | } | ||
| 40 | |||
| 41 | static inline void decode_ctrl_reg(u32 reg, | ||
| 42 | struct arch_hw_breakpoint_ctrl *ctrl) | ||
| 43 | { | ||
| 44 | ctrl->enabled = reg & 0x1; | ||
| 45 | reg >>= 1; | ||
| 46 | ctrl->privilege = reg & 0x3; | ||
| 47 | reg >>= 2; | ||
| 48 | ctrl->type = reg & 0x3; | ||
| 49 | reg >>= 2; | ||
| 50 | ctrl->len = reg & 0xff; | ||
| 51 | } | ||
| 52 | |||
| 53 | /* Breakpoint */ | ||
| 54 | #define ARM_BREAKPOINT_EXECUTE 0 | ||
| 55 | |||
| 56 | /* Watchpoints */ | ||
| 57 | #define ARM_BREAKPOINT_LOAD 1 | ||
| 58 | #define ARM_BREAKPOINT_STORE 2 | ||
| 59 | #define AARCH64_ESR_ACCESS_MASK (1 << 6) | ||
| 60 | |||
| 61 | /* Privilege Levels */ | ||
| 62 | #define AARCH64_BREAKPOINT_EL1 1 | ||
| 63 | #define AARCH64_BREAKPOINT_EL0 2 | ||
| 64 | |||
| 65 | /* Lengths */ | ||
| 66 | #define ARM_BREAKPOINT_LEN_1 0x1 | ||
| 67 | #define ARM_BREAKPOINT_LEN_2 0x3 | ||
| 68 | #define ARM_BREAKPOINT_LEN_4 0xf | ||
| 69 | #define ARM_BREAKPOINT_LEN_8 0xff | ||
| 70 | |||
| 71 | /* Kernel stepping */ | ||
| 72 | #define ARM_KERNEL_STEP_NONE 0 | ||
| 73 | #define ARM_KERNEL_STEP_ACTIVE 1 | ||
| 74 | #define ARM_KERNEL_STEP_SUSPEND 2 | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Limits. | ||
| 78 | * Changing these will require modifications to the register accessors. | ||
| 79 | */ | ||
| 80 | #define ARM_MAX_BRP 16 | ||
| 81 | #define ARM_MAX_WRP 16 | ||
| 82 | #define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) | ||
| 83 | |||
| 84 | /* Virtual debug register bases. */ | ||
| 85 | #define AARCH64_DBG_REG_BVR 0 | ||
| 86 | #define AARCH64_DBG_REG_BCR (AARCH64_DBG_REG_BVR + ARM_MAX_BRP) | ||
| 87 | #define AARCH64_DBG_REG_WVR (AARCH64_DBG_REG_BCR + ARM_MAX_BRP) | ||
| 88 | #define AARCH64_DBG_REG_WCR (AARCH64_DBG_REG_WVR + ARM_MAX_WRP) | ||
| 89 | |||
| 90 | /* Debug register names. */ | ||
| 91 | #define AARCH64_DBG_REG_NAME_BVR "bvr" | ||
| 92 | #define AARCH64_DBG_REG_NAME_BCR "bcr" | ||
| 93 | #define AARCH64_DBG_REG_NAME_WVR "wvr" | ||
| 94 | #define AARCH64_DBG_REG_NAME_WCR "wcr" | ||
| 95 | |||
| 96 | /* Accessor macros for the debug registers. */ | ||
| 97 | #define AARCH64_DBG_READ(N, REG, VAL) do {\ | ||
| 98 | asm volatile("mrs %0, dbg" REG #N "_el1" : "=r" (VAL));\ | ||
| 99 | } while (0) | ||
| 100 | |||
| 101 | #define AARCH64_DBG_WRITE(N, REG, VAL) do {\ | ||
| 102 | asm volatile("msr dbg" REG #N "_el1, %0" :: "r" (VAL));\ | ||
| 103 | } while (0) | ||
| 104 | |||
| 105 | struct task_struct; | ||
| 106 | struct notifier_block; | ||
| 107 | struct perf_event; | ||
| 108 | struct pmu; | ||
| 109 | |||
| 110 | extern int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | ||
| 111 | int *gen_len, int *gen_type); | ||
| 112 | extern int arch_check_bp_in_kernelspace(struct perf_event *bp); | ||
| 113 | extern int arch_validate_hwbkpt_settings(struct perf_event *bp); | ||
| 114 | extern int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
| 115 | unsigned long val, void *data); | ||
| 116 | |||
| 117 | extern int arch_install_hw_breakpoint(struct perf_event *bp); | ||
| 118 | extern void arch_uninstall_hw_breakpoint(struct perf_event *bp); | ||
| 119 | extern void hw_breakpoint_pmu_read(struct perf_event *bp); | ||
| 120 | extern int hw_breakpoint_slots(int type); | ||
| 121 | |||
| 122 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 123 | extern void hw_breakpoint_thread_switch(struct task_struct *next); | ||
| 124 | extern void ptrace_hw_copy_thread(struct task_struct *task); | ||
| 125 | #else | ||
| 126 | static inline void hw_breakpoint_thread_switch(struct task_struct *next) | ||
| 127 | { | ||
| 128 | } | ||
| 129 | static inline void ptrace_hw_copy_thread(struct task_struct *task) | ||
| 130 | { | ||
| 131 | } | ||
| 132 | #endif | ||
| 133 | |||
| 134 | extern struct pmu perf_ops_bp; | ||
| 135 | |||
| 136 | #endif /* __KERNEL__ */ | ||
| 137 | #endif /* __ASM_BREAKPOINT_H */ | ||
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c new file mode 100644 index 000000000000..0c3ba9f51376 --- /dev/null +++ b/arch/arm64/kernel/debug-monitors.c | |||
| @@ -0,0 +1,288 @@ | |||
| 1 | /* | ||
| 2 | * ARMv8 single-step debug support and mdscr context switching. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2012 ARM Limited | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 17 | * | ||
| 18 | * Author: Will Deacon <will.deacon@arm.com> | ||
| 19 | */ | ||
| 20 | |||
| 21 | #include <linux/cpu.h> | ||
| 22 | #include <linux/debugfs.h> | ||
| 23 | #include <linux/hardirq.h> | ||
| 24 | #include <linux/init.h> | ||
| 25 | #include <linux/ptrace.h> | ||
| 26 | #include <linux/stat.h> | ||
| 27 | |||
| 28 | #include <asm/debug-monitors.h> | ||
| 29 | #include <asm/local.h> | ||
| 30 | #include <asm/cputype.h> | ||
| 31 | #include <asm/system_misc.h> | ||
| 32 | |||
| 33 | /* Low-level stepping controls. */ | ||
| 34 | #define DBG_MDSCR_SS (1 << 0) | ||
| 35 | #define DBG_SPSR_SS (1 << 21) | ||
| 36 | |||
| 37 | /* MDSCR_EL1 enabling bits */ | ||
| 38 | #define DBG_MDSCR_KDE (1 << 13) | ||
| 39 | #define DBG_MDSCR_MDE (1 << 15) | ||
| 40 | #define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE) | ||
| 41 | |||
| 42 | /* Determine debug architecture. */ | ||
| 43 | u8 debug_monitors_arch(void) | ||
| 44 | { | ||
| 45 | return read_cpuid(ID_AA64DFR0_EL1) & 0xf; | ||
| 46 | } | ||
| 47 | |||
| 48 | /* | ||
| 49 | * MDSCR access routines. | ||
| 50 | */ | ||
| 51 | static void mdscr_write(u32 mdscr) | ||
| 52 | { | ||
| 53 | unsigned long flags; | ||
| 54 | local_dbg_save(flags); | ||
| 55 | asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); | ||
| 56 | local_dbg_restore(flags); | ||
| 57 | } | ||
| 58 | |||
| 59 | static u32 mdscr_read(void) | ||
| 60 | { | ||
| 61 | u32 mdscr; | ||
| 62 | asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); | ||
| 63 | return mdscr; | ||
| 64 | } | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Allow root to disable self-hosted debug from userspace. | ||
| 68 | * This is useful if you want to connect an external JTAG debugger. | ||
| 69 | */ | ||
| 70 | static u32 debug_enabled = 1; | ||
| 71 | |||
| 72 | static int create_debug_debugfs_entry(void) | ||
| 73 | { | ||
| 74 | debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | fs_initcall(create_debug_debugfs_entry); | ||
| 78 | |||
| 79 | static int __init early_debug_disable(char *buf) | ||
| 80 | { | ||
| 81 | debug_enabled = 0; | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | early_param("nodebugmon", early_debug_disable); | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Keep track of debug users on each core. | ||
| 89 | * The ref counts are per-cpu so we use a local_t type. | ||
| 90 | */ | ||
| 91 | static DEFINE_PER_CPU(local_t, mde_ref_count); | ||
| 92 | static DEFINE_PER_CPU(local_t, kde_ref_count); | ||
| 93 | |||
| 94 | void enable_debug_monitors(enum debug_el el) | ||
| 95 | { | ||
| 96 | u32 mdscr, enable = 0; | ||
| 97 | |||
| 98 | WARN_ON(preemptible()); | ||
| 99 | |||
| 100 | if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) | ||
| 101 | enable = DBG_MDSCR_MDE; | ||
| 102 | |||
| 103 | if (el == DBG_ACTIVE_EL1 && | ||
| 104 | local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) | ||
| 105 | enable |= DBG_MDSCR_KDE; | ||
| 106 | |||
| 107 | if (enable && debug_enabled) { | ||
| 108 | mdscr = mdscr_read(); | ||
| 109 | mdscr |= enable; | ||
| 110 | mdscr_write(mdscr); | ||
| 111 | } | ||
| 112 | } | ||
| 113 | |||
| 114 | void disable_debug_monitors(enum debug_el el) | ||
| 115 | { | ||
| 116 | u32 mdscr, disable = 0; | ||
| 117 | |||
| 118 | WARN_ON(preemptible()); | ||
| 119 | |||
| 120 | if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) | ||
| 121 | disable = ~DBG_MDSCR_MDE; | ||
| 122 | |||
| 123 | if (el == DBG_ACTIVE_EL1 && | ||
| 124 | local_dec_and_test(&__get_cpu_var(kde_ref_count))) | ||
| 125 | disable &= ~DBG_MDSCR_KDE; | ||
| 126 | |||
| 127 | if (disable) { | ||
| 128 | mdscr = mdscr_read(); | ||
| 129 | mdscr &= disable; | ||
| 130 | mdscr_write(mdscr); | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | /* | ||
| 135 | * OS lock clearing. | ||
| 136 | */ | ||
| 137 | static void clear_os_lock(void *unused) | ||
| 138 | { | ||
| 139 | asm volatile("msr mdscr_el1, %0" : : "r" (0)); | ||
| 140 | isb(); | ||
| 141 | asm volatile("msr oslar_el1, %0" : : "r" (0)); | ||
| 142 | isb(); | ||
| 143 | } | ||
| 144 | |||
| 145 | static int __cpuinit os_lock_notify(struct notifier_block *self, | ||
| 146 | unsigned long action, void *data) | ||
| 147 | { | ||
| 148 | int cpu = (unsigned long)data; | ||
| 149 | if (action == CPU_ONLINE) | ||
| 150 | smp_call_function_single(cpu, clear_os_lock, NULL, 1); | ||
| 151 | return NOTIFY_OK; | ||
| 152 | } | ||
| 153 | |||
| 154 | static struct notifier_block __cpuinitdata os_lock_nb = { | ||
| 155 | .notifier_call = os_lock_notify, | ||
| 156 | }; | ||
| 157 | |||
| 158 | static int __cpuinit debug_monitors_init(void) | ||
| 159 | { | ||
| 160 | /* Clear the OS lock. */ | ||
| 161 | smp_call_function(clear_os_lock, NULL, 1); | ||
| 162 | clear_os_lock(NULL); | ||
| 163 | |||
| 164 | /* Register hotplug handler. */ | ||
| 165 | register_cpu_notifier(&os_lock_nb); | ||
| 166 | return 0; | ||
| 167 | } | ||
| 168 | postcore_initcall(debug_monitors_init); | ||
| 169 | |||
| 170 | /* | ||
| 171 | * Single step API and exception handling. | ||
| 172 | */ | ||
| 173 | static void set_regs_spsr_ss(struct pt_regs *regs) | ||
| 174 | { | ||
| 175 | unsigned long spsr; | ||
| 176 | |||
| 177 | spsr = regs->pstate; | ||
| 178 | spsr &= ~DBG_SPSR_SS; | ||
| 179 | spsr |= DBG_SPSR_SS; | ||
| 180 | regs->pstate = spsr; | ||
| 181 | } | ||
| 182 | |||
| 183 | static void clear_regs_spsr_ss(struct pt_regs *regs) | ||
| 184 | { | ||
| 185 | unsigned long spsr; | ||
| 186 | |||
| 187 | spsr = regs->pstate; | ||
| 188 | spsr &= ~DBG_SPSR_SS; | ||
| 189 | regs->pstate = spsr; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int single_step_handler(unsigned long addr, unsigned int esr, | ||
| 193 | struct pt_regs *regs) | ||
| 194 | { | ||
| 195 | siginfo_t info; | ||
| 196 | |||
| 197 | /* | ||
| 198 | * If we are stepping a pending breakpoint, call the hw_breakpoint | ||
| 199 | * handler first. | ||
| 200 | */ | ||
| 201 | if (!reinstall_suspended_bps(regs)) | ||
| 202 | return 0; | ||
| 203 | |||
| 204 | if (user_mode(regs)) { | ||
| 205 | info.si_signo = SIGTRAP; | ||
| 206 | info.si_errno = 0; | ||
| 207 | info.si_code = TRAP_HWBKPT; | ||
| 208 | info.si_addr = (void __user *)instruction_pointer(regs); | ||
| 209 | force_sig_info(SIGTRAP, &info, current); | ||
| 210 | |||
| 211 | /* | ||
| 212 | * ptrace will disable single step unless explicitly | ||
| 213 | * asked to re-enable it. For other clients, it makes | ||
| 214 | * sense to leave it enabled (i.e. rewind the controls | ||
| 215 | * to the active-not-pending state). | ||
| 216 | */ | ||
| 217 | user_rewind_single_step(current); | ||
| 218 | } else { | ||
| 219 | /* TODO: route to KGDB */ | ||
| 220 | pr_warning("Unexpected kernel single-step exception at EL1\n"); | ||
| 221 | /* | ||
| 222 | * Re-enable stepping since we know that we will be | ||
| 223 | * returning to regs. | ||
| 224 | */ | ||
| 225 | set_regs_spsr_ss(regs); | ||
| 226 | } | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | static int __init single_step_init(void) | ||
| 232 | { | ||
| 233 | hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, | ||
| 234 | TRAP_HWBKPT, "single-step handler"); | ||
| 235 | return 0; | ||
| 236 | } | ||
| 237 | arch_initcall(single_step_init); | ||
| 238 | |||
| 239 | /* Re-enable single step for syscall restarting. */ | ||
| 240 | void user_rewind_single_step(struct task_struct *task) | ||
| 241 | { | ||
| 242 | /* | ||
| 243 | * If single step is active for this thread, then set SPSR.SS | ||
| 244 | * to 1 to avoid returning to the active-pending state. | ||
| 245 | */ | ||
| 246 | if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) | ||
| 247 | set_regs_spsr_ss(task_pt_regs(task)); | ||
| 248 | } | ||
| 249 | |||
| 250 | void user_fastforward_single_step(struct task_struct *task) | ||
| 251 | { | ||
| 252 | if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) | ||
| 253 | clear_regs_spsr_ss(task_pt_regs(task)); | ||
| 254 | } | ||
| 255 | |||
| 256 | /* Kernel API */ | ||
| 257 | void kernel_enable_single_step(struct pt_regs *regs) | ||
| 258 | { | ||
| 259 | WARN_ON(!irqs_disabled()); | ||
| 260 | set_regs_spsr_ss(regs); | ||
| 261 | mdscr_write(mdscr_read() | DBG_MDSCR_SS); | ||
| 262 | enable_debug_monitors(DBG_ACTIVE_EL1); | ||
| 263 | } | ||
| 264 | |||
| 265 | void kernel_disable_single_step(void) | ||
| 266 | { | ||
| 267 | WARN_ON(!irqs_disabled()); | ||
| 268 | mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); | ||
| 269 | disable_debug_monitors(DBG_ACTIVE_EL1); | ||
| 270 | } | ||
| 271 | |||
| 272 | int kernel_active_single_step(void) | ||
| 273 | { | ||
| 274 | WARN_ON(!irqs_disabled()); | ||
| 275 | return mdscr_read() & DBG_MDSCR_SS; | ||
| 276 | } | ||
| 277 | |||
| 278 | /* ptrace API */ | ||
| 279 | void user_enable_single_step(struct task_struct *task) | ||
| 280 | { | ||
| 281 | set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); | ||
| 282 | set_regs_spsr_ss(task_pt_regs(task)); | ||
| 283 | } | ||
| 284 | |||
| 285 | void user_disable_single_step(struct task_struct *task) | ||
| 286 | { | ||
| 287 | clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); | ||
| 288 | } | ||
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..5ab825c59db9 --- /dev/null +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
| @@ -0,0 +1,880 @@ | |||
| 1 | /* | ||
| 2 | * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, | ||
| 3 | * using the CPU's debug registers. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2012 ARM Limited | ||
| 6 | * Author: Will Deacon <will.deacon@arm.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | * | ||
| 17 | * You should have received a copy of the GNU General Public License | ||
| 18 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 19 | */ | ||
| 20 | |||
| 21 | #define pr_fmt(fmt) "hw-breakpoint: " fmt | ||
| 22 | |||
| 23 | #include <linux/errno.h> | ||
| 24 | #include <linux/hw_breakpoint.h> | ||
| 25 | #include <linux/perf_event.h> | ||
| 26 | #include <linux/ptrace.h> | ||
| 27 | #include <linux/smp.h> | ||
| 28 | |||
| 29 | #include <asm/compat.h> | ||
| 30 | #include <asm/current.h> | ||
| 31 | #include <asm/debug-monitors.h> | ||
| 32 | #include <asm/hw_breakpoint.h> | ||
| 33 | #include <asm/kdebug.h> | ||
| 34 | #include <asm/traps.h> | ||
| 35 | #include <asm/cputype.h> | ||
| 36 | #include <asm/system_misc.h> | ||
| 37 | |||
| 38 | /* Breakpoint currently in use for each BRP. */ | ||
| 39 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); | ||
| 40 | |||
| 41 | /* Watchpoint currently in use for each WRP. */ | ||
| 42 | static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); | ||
| 43 | |||
| 44 | /* Currently stepping a per-CPU kernel breakpoint. */ | ||
| 45 | static DEFINE_PER_CPU(int, stepping_kernel_bp); | ||
| 46 | |||
| 47 | /* Number of BRP/WRP registers on this CPU. */ | ||
| 48 | static int core_num_brps; | ||
| 49 | static int core_num_wrps; | ||
| 50 | |||
| 51 | /* Determine number of BRP registers available. */ | ||
| 52 | static int get_num_brps(void) | ||
| 53 | { | ||
| 54 | return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1; | ||
| 55 | } | ||
| 56 | |||
| 57 | /* Determine number of WRP registers available. */ | ||
| 58 | static int get_num_wrps(void) | ||
| 59 | { | ||
| 60 | return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1; | ||
| 61 | } | ||
| 62 | |||
| 63 | int hw_breakpoint_slots(int type) | ||
| 64 | { | ||
| 65 | /* | ||
| 66 | * We can be called early, so don't rely on | ||
| 67 | * our static variables being initialised. | ||
| 68 | */ | ||
| 69 | switch (type) { | ||
| 70 | case TYPE_INST: | ||
| 71 | return get_num_brps(); | ||
| 72 | case TYPE_DATA: | ||
| 73 | return get_num_wrps(); | ||
| 74 | default: | ||
| 75 | pr_warning("unknown slot type: %d\n", type); | ||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | } | ||
| 79 | |||
| 80 | #define READ_WB_REG_CASE(OFF, N, REG, VAL) \ | ||
| 81 | case (OFF + N): \ | ||
| 82 | AARCH64_DBG_READ(N, REG, VAL); \ | ||
| 83 | break | ||
| 84 | |||
| 85 | #define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \ | ||
| 86 | case (OFF + N): \ | ||
| 87 | AARCH64_DBG_WRITE(N, REG, VAL); \ | ||
| 88 | break | ||
| 89 | |||
| 90 | #define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \ | ||
| 91 | READ_WB_REG_CASE(OFF, 0, REG, VAL); \ | ||
| 92 | READ_WB_REG_CASE(OFF, 1, REG, VAL); \ | ||
| 93 | READ_WB_REG_CASE(OFF, 2, REG, VAL); \ | ||
| 94 | READ_WB_REG_CASE(OFF, 3, REG, VAL); \ | ||
| 95 | READ_WB_REG_CASE(OFF, 4, REG, VAL); \ | ||
| 96 | READ_WB_REG_CASE(OFF, 5, REG, VAL); \ | ||
| 97 | READ_WB_REG_CASE(OFF, 6, REG, VAL); \ | ||
| 98 | READ_WB_REG_CASE(OFF, 7, REG, VAL); \ | ||
| 99 | READ_WB_REG_CASE(OFF, 8, REG, VAL); \ | ||
| 100 | READ_WB_REG_CASE(OFF, 9, REG, VAL); \ | ||
| 101 | READ_WB_REG_CASE(OFF, 10, REG, VAL); \ | ||
| 102 | READ_WB_REG_CASE(OFF, 11, REG, VAL); \ | ||
| 103 | READ_WB_REG_CASE(OFF, 12, REG, VAL); \ | ||
| 104 | READ_WB_REG_CASE(OFF, 13, REG, VAL); \ | ||
| 105 | READ_WB_REG_CASE(OFF, 14, REG, VAL); \ | ||
| 106 | READ_WB_REG_CASE(OFF, 15, REG, VAL) | ||
| 107 | |||
| 108 | #define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \ | ||
| 109 | WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \ | ||
| 110 | WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \ | ||
| 111 | WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \ | ||
| 112 | WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \ | ||
| 113 | WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \ | ||
| 114 | WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \ | ||
| 115 | WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \ | ||
| 116 | WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \ | ||
| 117 | WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \ | ||
| 118 | WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \ | ||
| 119 | WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \ | ||
| 120 | WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \ | ||
| 121 | WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \ | ||
| 122 | WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \ | ||
| 123 | WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \ | ||
| 124 | WRITE_WB_REG_CASE(OFF, 15, REG, VAL) | ||
| 125 | |||
| 126 | static u64 read_wb_reg(int reg, int n) | ||
| 127 | { | ||
| 128 | u64 val = 0; | ||
| 129 | |||
| 130 | switch (reg + n) { | ||
| 131 | GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); | ||
| 132 | GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); | ||
| 133 | GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); | ||
| 134 | GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); | ||
| 135 | default: | ||
| 136 | pr_warning("attempt to read from unknown breakpoint register %d\n", n); | ||
| 137 | } | ||
| 138 | |||
| 139 | return val; | ||
| 140 | } | ||
| 141 | |||
| 142 | static void write_wb_reg(int reg, int n, u64 val) | ||
| 143 | { | ||
| 144 | switch (reg + n) { | ||
| 145 | GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); | ||
| 146 | GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); | ||
| 147 | GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); | ||
| 148 | GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); | ||
| 149 | default: | ||
| 150 | pr_warning("attempt to write to unknown breakpoint register %d\n", n); | ||
| 151 | } | ||
| 152 | isb(); | ||
| 153 | } | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Convert a breakpoint privilege level to the corresponding exception | ||
| 157 | * level. | ||
| 158 | */ | ||
| 159 | static enum debug_el debug_exception_level(int privilege) | ||
| 160 | { | ||
| 161 | switch (privilege) { | ||
| 162 | case AARCH64_BREAKPOINT_EL0: | ||
| 163 | return DBG_ACTIVE_EL0; | ||
| 164 | case AARCH64_BREAKPOINT_EL1: | ||
| 165 | return DBG_ACTIVE_EL1; | ||
| 166 | default: | ||
| 167 | pr_warning("invalid breakpoint privilege level %d\n", privilege); | ||
| 168 | return -EINVAL; | ||
| 169 | } | ||
| 170 | } | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Install a perf counter breakpoint. | ||
| 174 | */ | ||
| 175 | int arch_install_hw_breakpoint(struct perf_event *bp) | ||
| 176 | { | ||
| 177 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
| 178 | struct perf_event **slot, **slots; | ||
| 179 | struct debug_info *debug_info = ¤t->thread.debug; | ||
| 180 | int i, max_slots, ctrl_reg, val_reg, reg_enable; | ||
| 181 | u32 ctrl; | ||
| 182 | |||
| 183 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { | ||
| 184 | /* Breakpoint */ | ||
| 185 | ctrl_reg = AARCH64_DBG_REG_BCR; | ||
| 186 | val_reg = AARCH64_DBG_REG_BVR; | ||
| 187 | slots = __get_cpu_var(bp_on_reg); | ||
| 188 | max_slots = core_num_brps; | ||
| 189 | reg_enable = !debug_info->bps_disabled; | ||
| 190 | } else { | ||
| 191 | /* Watchpoint */ | ||
| 192 | ctrl_reg = AARCH64_DBG_REG_WCR; | ||
| 193 | val_reg = AARCH64_DBG_REG_WVR; | ||
| 194 | slots = __get_cpu_var(wp_on_reg); | ||
| 195 | max_slots = core_num_wrps; | ||
| 196 | reg_enable = !debug_info->wps_disabled; | ||
| 197 | } | ||
| 198 | |||
| 199 | for (i = 0; i < max_slots; ++i) { | ||
| 200 | slot = &slots[i]; | ||
| 201 | |||
| 202 | if (!*slot) { | ||
| 203 | *slot = bp; | ||
| 204 | break; | ||
| 205 | } | ||
| 206 | } | ||
| 207 | |||
| 208 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) | ||
| 209 | return -ENOSPC; | ||
| 210 | |||
| 211 | /* Ensure debug monitors are enabled at the correct exception level. */ | ||
| 212 | enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); | ||
| 213 | |||
| 214 | /* Setup the address register. */ | ||
| 215 | write_wb_reg(val_reg, i, info->address); | ||
| 216 | |||
| 217 | /* Setup the control register. */ | ||
| 218 | ctrl = encode_ctrl_reg(info->ctrl); | ||
| 219 | write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); | ||
| 220 | |||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | |||
| 224 | void arch_uninstall_hw_breakpoint(struct perf_event *bp) | ||
| 225 | { | ||
| 226 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
| 227 | struct perf_event **slot, **slots; | ||
| 228 | int i, max_slots, base; | ||
| 229 | |||
| 230 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { | ||
| 231 | /* Breakpoint */ | ||
| 232 | base = AARCH64_DBG_REG_BCR; | ||
| 233 | slots = __get_cpu_var(bp_on_reg); | ||
| 234 | max_slots = core_num_brps; | ||
| 235 | } else { | ||
| 236 | /* Watchpoint */ | ||
| 237 | base = AARCH64_DBG_REG_WCR; | ||
| 238 | slots = __get_cpu_var(wp_on_reg); | ||
| 239 | max_slots = core_num_wrps; | ||
| 240 | } | ||
| 241 | |||
| 242 | /* Remove the breakpoint. */ | ||
| 243 | for (i = 0; i < max_slots; ++i) { | ||
| 244 | slot = &slots[i]; | ||
| 245 | |||
| 246 | if (*slot == bp) { | ||
| 247 | *slot = NULL; | ||
| 248 | break; | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 252 | if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) | ||
| 253 | return; | ||
| 254 | |||
| 255 | /* Reset the control register. */ | ||
| 256 | write_wb_reg(base, i, 0); | ||
| 257 | |||
| 258 | /* Release the debug monitors for the correct exception level. */ | ||
| 259 | disable_debug_monitors(debug_exception_level(info->ctrl.privilege)); | ||
| 260 | } | ||
| 261 | |||
| 262 | static int get_hbp_len(u8 hbp_len) | ||
| 263 | { | ||
| 264 | unsigned int len_in_bytes = 0; | ||
| 265 | |||
| 266 | switch (hbp_len) { | ||
| 267 | case ARM_BREAKPOINT_LEN_1: | ||
| 268 | len_in_bytes = 1; | ||
| 269 | break; | ||
| 270 | case ARM_BREAKPOINT_LEN_2: | ||
| 271 | len_in_bytes = 2; | ||
| 272 | break; | ||
| 273 | case ARM_BREAKPOINT_LEN_4: | ||
| 274 | len_in_bytes = 4; | ||
| 275 | break; | ||
| 276 | case ARM_BREAKPOINT_LEN_8: | ||
| 277 | len_in_bytes = 8; | ||
| 278 | break; | ||
| 279 | } | ||
| 280 | |||
| 281 | return len_in_bytes; | ||
| 282 | } | ||
| 283 | |||
| 284 | /* | ||
| 285 | * Check whether bp virtual address is in kernel space. | ||
| 286 | */ | ||
| 287 | int arch_check_bp_in_kernelspace(struct perf_event *bp) | ||
| 288 | { | ||
| 289 | unsigned int len; | ||
| 290 | unsigned long va; | ||
| 291 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
| 292 | |||
| 293 | va = info->address; | ||
| 294 | len = get_hbp_len(info->ctrl.len); | ||
| 295 | |||
| 296 | return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); | ||
| 297 | } | ||
| 298 | |||
| 299 | /* | ||
| 300 | * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. | ||
| 301 | * Hopefully this will disappear when ptrace can bypass the conversion | ||
| 302 | * to generic breakpoint descriptions. | ||
| 303 | */ | ||
| 304 | int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, | ||
| 305 | int *gen_len, int *gen_type) | ||
| 306 | { | ||
| 307 | /* Type */ | ||
| 308 | switch (ctrl.type) { | ||
| 309 | case ARM_BREAKPOINT_EXECUTE: | ||
| 310 | *gen_type = HW_BREAKPOINT_X; | ||
| 311 | break; | ||
| 312 | case ARM_BREAKPOINT_LOAD: | ||
| 313 | *gen_type = HW_BREAKPOINT_R; | ||
| 314 | break; | ||
| 315 | case ARM_BREAKPOINT_STORE: | ||
| 316 | *gen_type = HW_BREAKPOINT_W; | ||
| 317 | break; | ||
| 318 | case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: | ||
| 319 | *gen_type = HW_BREAKPOINT_RW; | ||
| 320 | break; | ||
| 321 | default: | ||
| 322 | return -EINVAL; | ||
| 323 | } | ||
| 324 | |||
| 325 | /* Len */ | ||
| 326 | switch (ctrl.len) { | ||
| 327 | case ARM_BREAKPOINT_LEN_1: | ||
| 328 | *gen_len = HW_BREAKPOINT_LEN_1; | ||
| 329 | break; | ||
| 330 | case ARM_BREAKPOINT_LEN_2: | ||
| 331 | *gen_len = HW_BREAKPOINT_LEN_2; | ||
| 332 | break; | ||
| 333 | case ARM_BREAKPOINT_LEN_4: | ||
| 334 | *gen_len = HW_BREAKPOINT_LEN_4; | ||
| 335 | break; | ||
| 336 | case ARM_BREAKPOINT_LEN_8: | ||
| 337 | *gen_len = HW_BREAKPOINT_LEN_8; | ||
| 338 | break; | ||
| 339 | default: | ||
| 340 | return -EINVAL; | ||
| 341 | } | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 346 | /* | ||
| 347 | * Construct an arch_hw_breakpoint from a perf_event. | ||
| 348 | */ | ||
| 349 | static int arch_build_bp_info(struct perf_event *bp) | ||
| 350 | { | ||
| 351 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
| 352 | |||
| 353 | /* Type */ | ||
| 354 | switch (bp->attr.bp_type) { | ||
| 355 | case HW_BREAKPOINT_X: | ||
| 356 | info->ctrl.type = ARM_BREAKPOINT_EXECUTE; | ||
| 357 | break; | ||
| 358 | case HW_BREAKPOINT_R: | ||
| 359 | info->ctrl.type = ARM_BREAKPOINT_LOAD; | ||
| 360 | break; | ||
| 361 | case HW_BREAKPOINT_W: | ||
| 362 | info->ctrl.type = ARM_BREAKPOINT_STORE; | ||
| 363 | break; | ||
| 364 | case HW_BREAKPOINT_RW: | ||
| 365 | info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; | ||
| 366 | break; | ||
| 367 | default: | ||
| 368 | return -EINVAL; | ||
| 369 | } | ||
| 370 | |||
| 371 | /* Len */ | ||
| 372 | switch (bp->attr.bp_len) { | ||
| 373 | case HW_BREAKPOINT_LEN_1: | ||
| 374 | info->ctrl.len = ARM_BREAKPOINT_LEN_1; | ||
| 375 | break; | ||
| 376 | case HW_BREAKPOINT_LEN_2: | ||
| 377 | info->ctrl.len = ARM_BREAKPOINT_LEN_2; | ||
| 378 | break; | ||
| 379 | case HW_BREAKPOINT_LEN_4: | ||
| 380 | info->ctrl.len = ARM_BREAKPOINT_LEN_4; | ||
| 381 | break; | ||
| 382 | case HW_BREAKPOINT_LEN_8: | ||
| 383 | info->ctrl.len = ARM_BREAKPOINT_LEN_8; | ||
| 384 | break; | ||
| 385 | default: | ||
| 386 | return -EINVAL; | ||
| 387 | } | ||
| 388 | |||
| 389 | /* | ||
| 390 | * On AArch64, we only permit breakpoints of length 4, whereas | ||
| 391 | * AArch32 also requires breakpoints of length 2 for Thumb. | ||
| 392 | * Watchpoints can be of length 1, 2, 4 or 8 bytes. | ||
| 393 | */ | ||
| 394 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { | ||
| 395 | if (is_compat_task()) { | ||
| 396 | if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && | ||
| 397 | info->ctrl.len != ARM_BREAKPOINT_LEN_4) | ||
| 398 | return -EINVAL; | ||
| 399 | } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { | ||
| 400 | /* | ||
| 401 | * FIXME: Some tools (I'm looking at you perf) assume | ||
| 402 | * that breakpoints should be sizeof(long). This | ||
| 403 | * is nonsense. For now, we fix up the parameter | ||
| 404 | * but we should probably return -EINVAL instead. | ||
| 405 | */ | ||
| 406 | info->ctrl.len = ARM_BREAKPOINT_LEN_4; | ||
| 407 | } | ||
| 408 | } | ||
| 409 | |||
| 410 | /* Address */ | ||
| 411 | info->address = bp->attr.bp_addr; | ||
| 412 | |||
| 413 | /* | ||
| 414 | * Privilege | ||
| 415 | * Note that we disallow combined EL0/EL1 breakpoints because | ||
| 416 | * that would complicate the stepping code. | ||
| 417 | */ | ||
| 418 | if (arch_check_bp_in_kernelspace(bp)) | ||
| 419 | info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; | ||
| 420 | else | ||
| 421 | info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; | ||
| 422 | |||
| 423 | /* Enabled? */ | ||
| 424 | info->ctrl.enabled = !bp->attr.disabled; | ||
| 425 | |||
| 426 | return 0; | ||
| 427 | } | ||
| 428 | |||
| 429 | /* | ||
| 430 | * Validate the arch-specific HW Breakpoint register settings. | ||
| 431 | */ | ||
| 432 | int arch_validate_hwbkpt_settings(struct perf_event *bp) | ||
| 433 | { | ||
| 434 | struct arch_hw_breakpoint *info = counter_arch_bp(bp); | ||
| 435 | int ret; | ||
| 436 | u64 alignment_mask, offset; | ||
| 437 | |||
| 438 | /* Build the arch_hw_breakpoint. */ | ||
| 439 | ret = arch_build_bp_info(bp); | ||
| 440 | if (ret) | ||
| 441 | return ret; | ||
| 442 | |||
| 443 | /* | ||
| 444 | * Check address alignment. | ||
| 445 | * We don't do any clever alignment correction for watchpoints | ||
| 446 | * because using 64-bit unaligned addresses is deprecated for | ||
| 447 | * AArch64. | ||
| 448 | * | ||
| 449 | * AArch32 tasks expect some simple alignment fixups, so emulate | ||
| 450 | * that here. | ||
| 451 | */ | ||
| 452 | if (is_compat_task()) { | ||
| 453 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | ||
| 454 | alignment_mask = 0x7; | ||
| 455 | else | ||
| 456 | alignment_mask = 0x3; | ||
| 457 | offset = info->address & alignment_mask; | ||
| 458 | switch (offset) { | ||
| 459 | case 0: | ||
| 460 | /* Aligned */ | ||
| 461 | break; | ||
| 462 | case 1: | ||
| 463 | /* Allow single byte watchpoint. */ | ||
| 464 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) | ||
| 465 | break; | ||
| 466 | case 2: | ||
| 467 | /* Allow halfword watchpoints and breakpoints. */ | ||
| 468 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) | ||
| 469 | break; | ||
| 470 | default: | ||
| 471 | return -EINVAL; | ||
| 472 | } | ||
| 473 | |||
| 474 | info->address &= ~alignment_mask; | ||
| 475 | info->ctrl.len <<= offset; | ||
| 476 | } else { | ||
| 477 | if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) | ||
| 478 | alignment_mask = 0x3; | ||
| 479 | else | ||
| 480 | alignment_mask = 0x7; | ||
| 481 | if (info->address & alignment_mask) | ||
| 482 | return -EINVAL; | ||
| 483 | } | ||
| 484 | |||
| 485 | /* | ||
| 486 | * Disallow per-task kernel breakpoints since these would | ||
| 487 | * complicate the stepping code. | ||
| 488 | */ | ||
| 489 | if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target) | ||
| 490 | return -EINVAL; | ||
| 491 | |||
| 492 | return 0; | ||
| 493 | } | ||
| 494 | |||
| 495 | /* | ||
| 496 | * Enable/disable all of the breakpoints active at the specified | ||
| 497 | * exception level at the register level. | ||
| 498 | * This is used when single-stepping after a breakpoint exception. | ||
| 499 | */ | ||
| 500 | static void toggle_bp_registers(int reg, enum debug_el el, int enable) | ||
| 501 | { | ||
| 502 | int i, max_slots, privilege; | ||
| 503 | u32 ctrl; | ||
| 504 | struct perf_event **slots; | ||
| 505 | |||
| 506 | switch (reg) { | ||
| 507 | case AARCH64_DBG_REG_BCR: | ||
| 508 | slots = __get_cpu_var(bp_on_reg); | ||
| 509 | max_slots = core_num_brps; | ||
| 510 | break; | ||
| 511 | case AARCH64_DBG_REG_WCR: | ||
| 512 | slots = __get_cpu_var(wp_on_reg); | ||
| 513 | max_slots = core_num_wrps; | ||
| 514 | break; | ||
| 515 | default: | ||
| 516 | return; | ||
| 517 | } | ||
| 518 | |||
| 519 | for (i = 0; i < max_slots; ++i) { | ||
| 520 | if (!slots[i]) | ||
| 521 | continue; | ||
| 522 | |||
| 523 | privilege = counter_arch_bp(slots[i])->ctrl.privilege; | ||
| 524 | if (debug_exception_level(privilege) != el) | ||
| 525 | continue; | ||
| 526 | |||
| 527 | ctrl = read_wb_reg(reg, i); | ||
| 528 | if (enable) | ||
| 529 | ctrl |= 0x1; | ||
| 530 | else | ||
| 531 | ctrl &= ~0x1; | ||
| 532 | write_wb_reg(reg, i, ctrl); | ||
| 533 | } | ||
| 534 | } | ||
| 535 | |||
| 536 | /* | ||
| 537 | * Debug exception handlers. | ||
| 538 | */ | ||
| 539 | static int breakpoint_handler(unsigned long unused, unsigned int esr, | ||
| 540 | struct pt_regs *regs) | ||
| 541 | { | ||
| 542 | int i, step = 0, *kernel_step; | ||
| 543 | u32 ctrl_reg; | ||
| 544 | u64 addr, val; | ||
| 545 | struct perf_event *bp, **slots; | ||
| 546 | struct debug_info *debug_info; | ||
| 547 | struct arch_hw_breakpoint_ctrl ctrl; | ||
| 548 | |||
| 549 | slots = (struct perf_event **)__get_cpu_var(bp_on_reg); | ||
| 550 | addr = instruction_pointer(regs); | ||
| 551 | debug_info = ¤t->thread.debug; | ||
| 552 | |||
| 553 | for (i = 0; i < core_num_brps; ++i) { | ||
| 554 | rcu_read_lock(); | ||
| 555 | |||
| 556 | bp = slots[i]; | ||
| 557 | |||
| 558 | if (bp == NULL) | ||
| 559 | goto unlock; | ||
| 560 | |||
| 561 | /* Check if the breakpoint value matches. */ | ||
| 562 | val = read_wb_reg(AARCH64_DBG_REG_BVR, i); | ||
| 563 | if (val != (addr & ~0x3)) | ||
| 564 | goto unlock; | ||
| 565 | |||
| 566 | /* Possible match, check the byte address select to confirm. */ | ||
| 567 | ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); | ||
| 568 | decode_ctrl_reg(ctrl_reg, &ctrl); | ||
| 569 | if (!((1 << (addr & 0x3)) & ctrl.len)) | ||
| 570 | goto unlock; | ||
| 571 | |||
| 572 | counter_arch_bp(bp)->trigger = addr; | ||
| 573 | perf_bp_event(bp, regs); | ||
| 574 | |||
| 575 | /* Do we need to handle the stepping? */ | ||
| 576 | if (!bp->overflow_handler) | ||
| 577 | step = 1; | ||
| 578 | unlock: | ||
| 579 | rcu_read_unlock(); | ||
| 580 | } | ||
| 581 | |||
| 582 | if (!step) | ||
| 583 | return 0; | ||
| 584 | |||
| 585 | if (user_mode(regs)) { | ||
| 586 | debug_info->bps_disabled = 1; | ||
| 587 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); | ||
| 588 | |||
| 589 | /* If we're already stepping a watchpoint, just return. */ | ||
| 590 | if (debug_info->wps_disabled) | ||
| 591 | return 0; | ||
| 592 | |||
| 593 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
| 594 | debug_info->suspended_step = 1; | ||
| 595 | else | ||
| 596 | user_enable_single_step(current); | ||
| 597 | } else { | ||
| 598 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); | ||
| 599 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | ||
| 600 | |||
| 601 | if (*kernel_step != ARM_KERNEL_STEP_NONE) | ||
| 602 | return 0; | ||
| 603 | |||
| 604 | if (kernel_active_single_step()) { | ||
| 605 | *kernel_step = ARM_KERNEL_STEP_SUSPEND; | ||
| 606 | } else { | ||
| 607 | *kernel_step = ARM_KERNEL_STEP_ACTIVE; | ||
| 608 | kernel_enable_single_step(regs); | ||
| 609 | } | ||
| 610 | } | ||
| 611 | |||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | static int watchpoint_handler(unsigned long addr, unsigned int esr, | ||
| 616 | struct pt_regs *regs) | ||
| 617 | { | ||
| 618 | int i, step = 0, *kernel_step, access; | ||
| 619 | u32 ctrl_reg; | ||
| 620 | u64 val, alignment_mask; | ||
| 621 | struct perf_event *wp, **slots; | ||
| 622 | struct debug_info *debug_info; | ||
| 623 | struct arch_hw_breakpoint *info; | ||
| 624 | struct arch_hw_breakpoint_ctrl ctrl; | ||
| 625 | |||
| 626 | slots = (struct perf_event **)__get_cpu_var(wp_on_reg); | ||
| 627 | debug_info = ¤t->thread.debug; | ||
| 628 | |||
| 629 | for (i = 0; i < core_num_wrps; ++i) { | ||
| 630 | rcu_read_lock(); | ||
| 631 | |||
| 632 | wp = slots[i]; | ||
| 633 | |||
| 634 | if (wp == NULL) | ||
| 635 | goto unlock; | ||
| 636 | |||
| 637 | info = counter_arch_bp(wp); | ||
| 638 | /* AArch32 watchpoints are either 4 or 8 bytes aligned. */ | ||
| 639 | if (is_compat_task()) { | ||
| 640 | if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) | ||
| 641 | alignment_mask = 0x7; | ||
| 642 | else | ||
| 643 | alignment_mask = 0x3; | ||
| 644 | } else { | ||
| 645 | alignment_mask = 0x7; | ||
| 646 | } | ||
| 647 | |||
| 648 | /* Check if the watchpoint value matches. */ | ||
| 649 | val = read_wb_reg(AARCH64_DBG_REG_WVR, i); | ||
| 650 | if (val != (addr & ~alignment_mask)) | ||
| 651 | goto unlock; | ||
| 652 | |||
| 653 | /* Possible match, check the byte address select to confirm. */ | ||
| 654 | ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); | ||
| 655 | decode_ctrl_reg(ctrl_reg, &ctrl); | ||
| 656 | if (!((1 << (addr & alignment_mask)) & ctrl.len)) | ||
| 657 | goto unlock; | ||
| 658 | |||
| 659 | /* | ||
| 660 | * Check that the access type matches. | ||
| 661 | * 0 => load, otherwise => store | ||
| 662 | */ | ||
| 663 | access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : | ||
| 664 | HW_BREAKPOINT_R; | ||
| 665 | if (!(access & hw_breakpoint_type(wp))) | ||
| 666 | goto unlock; | ||
| 667 | |||
| 668 | info->trigger = addr; | ||
| 669 | perf_bp_event(wp, regs); | ||
| 670 | |||
| 671 | /* Do we need to handle the stepping? */ | ||
| 672 | if (!wp->overflow_handler) | ||
| 673 | step = 1; | ||
| 674 | |||
| 675 | unlock: | ||
| 676 | rcu_read_unlock(); | ||
| 677 | } | ||
| 678 | |||
| 679 | if (!step) | ||
| 680 | return 0; | ||
| 681 | |||
| 682 | /* | ||
| 683 | * We always disable EL0 watchpoints because the kernel can | ||
| 684 | * cause these to fire via an unprivileged access. | ||
| 685 | */ | ||
| 686 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0); | ||
| 687 | |||
| 688 | if (user_mode(regs)) { | ||
| 689 | debug_info->wps_disabled = 1; | ||
| 690 | |||
| 691 | /* If we're already stepping a breakpoint, just return. */ | ||
| 692 | if (debug_info->bps_disabled) | ||
| 693 | return 0; | ||
| 694 | |||
| 695 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
| 696 | debug_info->suspended_step = 1; | ||
| 697 | else | ||
| 698 | user_enable_single_step(current); | ||
| 699 | } else { | ||
| 700 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); | ||
| 701 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | ||
| 702 | |||
| 703 | if (*kernel_step != ARM_KERNEL_STEP_NONE) | ||
| 704 | return 0; | ||
| 705 | |||
| 706 | if (kernel_active_single_step()) { | ||
| 707 | *kernel_step = ARM_KERNEL_STEP_SUSPEND; | ||
| 708 | } else { | ||
| 709 | *kernel_step = ARM_KERNEL_STEP_ACTIVE; | ||
| 710 | kernel_enable_single_step(regs); | ||
| 711 | } | ||
| 712 | } | ||
| 713 | |||
| 714 | return 0; | ||
| 715 | } | ||
| 716 | |||
| 717 | /* | ||
| 718 | * Handle single-step exception. | ||
| 719 | */ | ||
| 720 | int reinstall_suspended_bps(struct pt_regs *regs) | ||
| 721 | { | ||
| 722 | struct debug_info *debug_info = ¤t->thread.debug; | ||
| 723 | int handled_exception = 0, *kernel_step; | ||
| 724 | |||
| 725 | kernel_step = &__get_cpu_var(stepping_kernel_bp); | ||
| 726 | |||
| 727 | /* | ||
| 728 | * Called from single-step exception handler. | ||
| 729 | * Return 0 if execution can resume, 1 if a SIGTRAP should be | ||
| 730 | * reported. | ||
| 731 | */ | ||
| 732 | if (user_mode(regs)) { | ||
| 733 | if (debug_info->bps_disabled) { | ||
| 734 | debug_info->bps_disabled = 0; | ||
| 735 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1); | ||
| 736 | handled_exception = 1; | ||
| 737 | } | ||
| 738 | |||
| 739 | if (debug_info->wps_disabled) { | ||
| 740 | debug_info->wps_disabled = 0; | ||
| 741 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); | ||
| 742 | handled_exception = 1; | ||
| 743 | } | ||
| 744 | |||
| 745 | if (handled_exception) { | ||
| 746 | if (debug_info->suspended_step) { | ||
| 747 | debug_info->suspended_step = 0; | ||
| 748 | /* Allow exception handling to fall-through. */ | ||
| 749 | handled_exception = 0; | ||
| 750 | } else { | ||
| 751 | user_disable_single_step(current); | ||
| 752 | } | ||
| 753 | } | ||
| 754 | } else if (*kernel_step != ARM_KERNEL_STEP_NONE) { | ||
| 755 | toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1); | ||
| 756 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1); | ||
| 757 | |||
| 758 | if (!debug_info->wps_disabled) | ||
| 759 | toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); | ||
| 760 | |||
| 761 | if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) { | ||
| 762 | kernel_disable_single_step(); | ||
| 763 | handled_exception = 1; | ||
| 764 | } else { | ||
| 765 | handled_exception = 0; | ||
| 766 | } | ||
| 767 | |||
| 768 | *kernel_step = ARM_KERNEL_STEP_NONE; | ||
| 769 | } | ||
| 770 | |||
| 771 | return !handled_exception; | ||
| 772 | } | ||
| 773 | |||
| 774 | /* | ||
| 775 | * Context-switcher for restoring suspended breakpoints. | ||
| 776 | */ | ||
| 777 | void hw_breakpoint_thread_switch(struct task_struct *next) | ||
| 778 | { | ||
| 779 | /* | ||
| 780 | * current next | ||
| 781 | * disabled: 0 0 => The usual case, NOTIFY_DONE | ||
| 782 | * 0 1 => Disable the registers | ||
| 783 | * 1 0 => Enable the registers | ||
| 784 | * 1 1 => NOTIFY_DONE. per-task bps will | ||
| 785 | * get taken care of by perf. | ||
| 786 | */ | ||
| 787 | |||
| 788 | struct debug_info *current_debug_info, *next_debug_info; | ||
| 789 | |||
| 790 | current_debug_info = ¤t->thread.debug; | ||
| 791 | next_debug_info = &next->thread.debug; | ||
| 792 | |||
| 793 | /* Update breakpoints. */ | ||
| 794 | if (current_debug_info->bps_disabled != next_debug_info->bps_disabled) | ||
| 795 | toggle_bp_registers(AARCH64_DBG_REG_BCR, | ||
| 796 | DBG_ACTIVE_EL0, | ||
| 797 | !next_debug_info->bps_disabled); | ||
| 798 | |||
| 799 | /* Update watchpoints. */ | ||
| 800 | if (current_debug_info->wps_disabled != next_debug_info->wps_disabled) | ||
| 801 | toggle_bp_registers(AARCH64_DBG_REG_WCR, | ||
| 802 | DBG_ACTIVE_EL0, | ||
| 803 | !next_debug_info->wps_disabled); | ||
| 804 | } | ||
| 805 | |||
| 806 | /* | ||
| 807 | * CPU initialisation. | ||
| 808 | */ | ||
| 809 | static void reset_ctrl_regs(void *unused) | ||
| 810 | { | ||
| 811 | int i; | ||
| 812 | |||
| 813 | for (i = 0; i < core_num_brps; ++i) { | ||
| 814 | write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); | ||
| 815 | write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); | ||
| 816 | } | ||
| 817 | |||
| 818 | for (i = 0; i < core_num_wrps; ++i) { | ||
| 819 | write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); | ||
| 820 | write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); | ||
| 821 | } | ||
| 822 | } | ||
| 823 | |||
| 824 | static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, | ||
| 825 | unsigned long action, | ||
| 826 | void *hcpu) | ||
| 827 | { | ||
| 828 | int cpu = (long)hcpu; | ||
| 829 | if (action == CPU_ONLINE) | ||
| 830 | smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); | ||
| 831 | return NOTIFY_OK; | ||
| 832 | } | ||
| 833 | |||
| 834 | static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { | ||
| 835 | .notifier_call = hw_breakpoint_reset_notify, | ||
| 836 | }; | ||
| 837 | |||
| 838 | /* | ||
| 839 | * One-time initialisation. | ||
| 840 | */ | ||
| 841 | static int __init arch_hw_breakpoint_init(void) | ||
| 842 | { | ||
| 843 | core_num_brps = get_num_brps(); | ||
| 844 | core_num_wrps = get_num_wrps(); | ||
| 845 | |||
| 846 | pr_info("found %d breakpoint and %d watchpoint registers.\n", | ||
| 847 | core_num_brps, core_num_wrps); | ||
| 848 | |||
| 849 | /* | ||
| 850 | * Reset the breakpoint resources. We assume that a halting | ||
| 851 | * debugger will leave the world in a nice state for us. | ||
| 852 | */ | ||
| 853 | smp_call_function(reset_ctrl_regs, NULL, 1); | ||
| 854 | reset_ctrl_regs(NULL); | ||
| 855 | |||
| 856 | /* Register debug fault handlers. */ | ||
| 857 | hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, | ||
| 858 | TRAP_HWBKPT, "hw-breakpoint handler"); | ||
| 859 | hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, | ||
| 860 | TRAP_HWBKPT, "hw-watchpoint handler"); | ||
| 861 | |||
| 862 | /* Register hotplug notifier. */ | ||
| 863 | register_cpu_notifier(&hw_breakpoint_reset_nb); | ||
| 864 | |||
| 865 | return 0; | ||
| 866 | } | ||
| 867 | arch_initcall(arch_hw_breakpoint_init); | ||
| 868 | |||
| 869 | void hw_breakpoint_pmu_read(struct perf_event *bp) | ||
| 870 | { | ||
| 871 | } | ||
| 872 | |||
| 873 | /* | ||
| 874 | * Dummy function to register with die_notifier. | ||
| 875 | */ | ||
| 876 | int hw_breakpoint_exceptions_notify(struct notifier_block *unused, | ||
| 877 | unsigned long val, void *data) | ||
| 878 | { | ||
| 879 | return NOTIFY_DONE; | ||
| 880 | } | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c new file mode 100644 index 000000000000..490f7537a270 --- /dev/null +++ b/arch/arm64/kernel/ptrace.c | |||
| @@ -0,0 +1,1126 @@ | |||
| 1 | /* | ||
| 2 | * Based on arch/arm/kernel/ptrace.c | ||
| 3 | * | ||
| 4 | * By Ross Biro 1/23/92 | ||
| 5 | * edited by Linus Torvalds | ||
| 6 | * ARM modifications Copyright (C) 2000 Russell King | ||
| 7 | * Copyright (C) 2012 ARM Ltd. | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 20 | */ | ||
| 21 | |||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/sched.h> | ||
| 24 | #include <linux/mm.h> | ||
| 25 | #include <linux/smp.h> | ||
| 26 | #include <linux/ptrace.h> | ||
| 27 | #include <linux/user.h> | ||
| 28 | #include <linux/security.h> | ||
| 29 | #include <linux/init.h> | ||
| 30 | #include <linux/signal.h> | ||
| 31 | #include <linux/uaccess.h> | ||
| 32 | #include <linux/perf_event.h> | ||
| 33 | #include <linux/hw_breakpoint.h> | ||
| 34 | #include <linux/regset.h> | ||
| 35 | #include <linux/tracehook.h> | ||
| 36 | #include <linux/elf.h> | ||
| 37 | |||
| 38 | #include <asm/compat.h> | ||
| 39 | #include <asm/debug-monitors.h> | ||
| 40 | #include <asm/pgtable.h> | ||
| 41 | #include <asm/traps.h> | ||
| 42 | #include <asm/system_misc.h> | ||
| 43 | |||
| 44 | /* | ||
| 45 | * TODO: does not yet catch signals sent when the child dies. | ||
| 46 | * in exit.c or in signal.c. | ||
| 47 | */ | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Called by kernel/ptrace.c when detaching.. | ||
| 51 | */ | ||
| 52 | void ptrace_disable(struct task_struct *child) | ||
| 53 | { | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Handle hitting a breakpoint. | ||
| 58 | */ | ||
| 59 | static int ptrace_break(struct pt_regs *regs) | ||
| 60 | { | ||
| 61 | siginfo_t info = { | ||
| 62 | .si_signo = SIGTRAP, | ||
| 63 | .si_errno = 0, | ||
| 64 | .si_code = TRAP_BRKPT, | ||
| 65 | .si_addr = (void __user *)instruction_pointer(regs), | ||
| 66 | }; | ||
| 67 | |||
| 68 | force_sig_info(SIGTRAP, &info, current); | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int arm64_break_trap(unsigned long addr, unsigned int esr, | ||
| 73 | struct pt_regs *regs) | ||
| 74 | { | ||
| 75 | return ptrace_break(regs); | ||
| 76 | } | ||
| 77 | |||
| 78 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 79 | /* | ||
| 80 | * Handle hitting a HW-breakpoint. | ||
| 81 | */ | ||
| 82 | static void ptrace_hbptriggered(struct perf_event *bp, | ||
| 83 | struct perf_sample_data *data, | ||
| 84 | struct pt_regs *regs) | ||
| 85 | { | ||
| 86 | struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); | ||
| 87 | siginfo_t info = { | ||
| 88 | .si_signo = SIGTRAP, | ||
| 89 | .si_errno = 0, | ||
| 90 | .si_code = TRAP_HWBKPT, | ||
| 91 | .si_addr = (void __user *)(bkpt->trigger), | ||
| 92 | }; | ||
| 93 | |||
| 94 | #ifdef CONFIG_COMPAT | ||
| 95 | int i; | ||
| 96 | |||
| 97 | if (!is_compat_task()) | ||
| 98 | goto send_sig; | ||
| 99 | |||
| 100 | for (i = 0; i < ARM_MAX_BRP; ++i) { | ||
| 101 | if (current->thread.debug.hbp_break[i] == bp) { | ||
| 102 | info.si_errno = (i << 1) + 1; | ||
| 103 | break; | ||
| 104 | } | ||
| 105 | } | ||
| 106 | for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { | ||
| 107 | if (current->thread.debug.hbp_watch[i] == bp) { | ||
| 108 | info.si_errno = -((i << 1) + 1); | ||
| 109 | break; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | send_sig: | ||
| 114 | #endif | ||
| 115 | force_sig_info(SIGTRAP, &info, current); | ||
| 116 | } | ||
| 117 | |||
| 118 | /* | ||
| 119 | * Unregister breakpoints from this task and reset the pointers in | ||
| 120 | * the thread_struct. | ||
| 121 | */ | ||
| 122 | void flush_ptrace_hw_breakpoint(struct task_struct *tsk) | ||
| 123 | { | ||
| 124 | int i; | ||
| 125 | struct thread_struct *t = &tsk->thread; | ||
| 126 | |||
| 127 | for (i = 0; i < ARM_MAX_BRP; i++) { | ||
| 128 | if (t->debug.hbp_break[i]) { | ||
| 129 | unregister_hw_breakpoint(t->debug.hbp_break[i]); | ||
| 130 | t->debug.hbp_break[i] = NULL; | ||
| 131 | } | ||
| 132 | } | ||
| 133 | |||
| 134 | for (i = 0; i < ARM_MAX_WRP; i++) { | ||
| 135 | if (t->debug.hbp_watch[i]) { | ||
| 136 | unregister_hw_breakpoint(t->debug.hbp_watch[i]); | ||
| 137 | t->debug.hbp_watch[i] = NULL; | ||
| 138 | } | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | void ptrace_hw_copy_thread(struct task_struct *tsk) | ||
| 143 | { | ||
| 144 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); | ||
| 145 | } | ||
| 146 | |||
| 147 | static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, | ||
| 148 | struct task_struct *tsk, | ||
| 149 | unsigned long idx) | ||
| 150 | { | ||
| 151 | struct perf_event *bp = ERR_PTR(-EINVAL); | ||
| 152 | |||
| 153 | switch (note_type) { | ||
| 154 | case NT_ARM_HW_BREAK: | ||
| 155 | if (idx < ARM_MAX_BRP) | ||
| 156 | bp = tsk->thread.debug.hbp_break[idx]; | ||
| 157 | break; | ||
| 158 | case NT_ARM_HW_WATCH: | ||
| 159 | if (idx < ARM_MAX_WRP) | ||
| 160 | bp = tsk->thread.debug.hbp_watch[idx]; | ||
| 161 | break; | ||
| 162 | } | ||
| 163 | |||
| 164 | return bp; | ||
| 165 | } | ||
| 166 | |||
| 167 | static int ptrace_hbp_set_event(unsigned int note_type, | ||
| 168 | struct task_struct *tsk, | ||
| 169 | unsigned long idx, | ||
| 170 | struct perf_event *bp) | ||
| 171 | { | ||
| 172 | int err = -EINVAL; | ||
| 173 | |||
| 174 | switch (note_type) { | ||
| 175 | case NT_ARM_HW_BREAK: | ||
| 176 | if (idx < ARM_MAX_BRP) { | ||
| 177 | tsk->thread.debug.hbp_break[idx] = bp; | ||
| 178 | err = 0; | ||
| 179 | } | ||
| 180 | break; | ||
| 181 | case NT_ARM_HW_WATCH: | ||
| 182 | if (idx < ARM_MAX_WRP) { | ||
| 183 | tsk->thread.debug.hbp_watch[idx] = bp; | ||
| 184 | err = 0; | ||
| 185 | } | ||
| 186 | break; | ||
| 187 | } | ||
| 188 | |||
| 189 | return err; | ||
| 190 | } | ||
| 191 | |||
| 192 | static struct perf_event *ptrace_hbp_create(unsigned int note_type, | ||
| 193 | struct task_struct *tsk, | ||
| 194 | unsigned long idx) | ||
| 195 | { | ||
| 196 | struct perf_event *bp; | ||
| 197 | struct perf_event_attr attr; | ||
| 198 | int err, type; | ||
| 199 | |||
| 200 | switch (note_type) { | ||
| 201 | case NT_ARM_HW_BREAK: | ||
| 202 | type = HW_BREAKPOINT_X; | ||
| 203 | break; | ||
| 204 | case NT_ARM_HW_WATCH: | ||
| 205 | type = HW_BREAKPOINT_RW; | ||
| 206 | break; | ||
| 207 | default: | ||
| 208 | return ERR_PTR(-EINVAL); | ||
| 209 | } | ||
| 210 | |||
| 211 | ptrace_breakpoint_init(&attr); | ||
| 212 | |||
| 213 | /* | ||
| 214 | * Initialise fields to sane defaults | ||
| 215 | * (i.e. values that will pass validation). | ||
| 216 | */ | ||
| 217 | attr.bp_addr = 0; | ||
| 218 | attr.bp_len = HW_BREAKPOINT_LEN_4; | ||
| 219 | attr.bp_type = type; | ||
| 220 | attr.disabled = 1; | ||
| 221 | |||
| 222 | bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); | ||
| 223 | if (IS_ERR(bp)) | ||
| 224 | return bp; | ||
| 225 | |||
| 226 | err = ptrace_hbp_set_event(note_type, tsk, idx, bp); | ||
| 227 | if (err) | ||
| 228 | return ERR_PTR(err); | ||
| 229 | |||
| 230 | return bp; | ||
| 231 | } | ||
| 232 | |||
| 233 | static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | ||
| 234 | struct arch_hw_breakpoint_ctrl ctrl, | ||
| 235 | struct perf_event_attr *attr) | ||
| 236 | { | ||
| 237 | int err, len, type; | ||
| 238 | |||
| 239 | err = arch_bp_generic_fields(ctrl, &len, &type); | ||
| 240 | if (err) | ||
| 241 | return err; | ||
| 242 | |||
| 243 | switch (note_type) { | ||
| 244 | case NT_ARM_HW_BREAK: | ||
| 245 | if ((type & HW_BREAKPOINT_X) != type) | ||
| 246 | return -EINVAL; | ||
| 247 | break; | ||
| 248 | case NT_ARM_HW_WATCH: | ||
| 249 | if ((type & HW_BREAKPOINT_RW) != type) | ||
| 250 | return -EINVAL; | ||
| 251 | break; | ||
| 252 | default: | ||
| 253 | return -EINVAL; | ||
| 254 | } | ||
| 255 | |||
| 256 | attr->bp_len = len; | ||
| 257 | attr->bp_type = type; | ||
| 258 | attr->disabled = !ctrl.enabled; | ||
| 259 | |||
| 260 | return 0; | ||
| 261 | } | ||
| 262 | |||
| 263 | static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) | ||
| 264 | { | ||
| 265 | u8 num; | ||
| 266 | u32 reg = 0; | ||
| 267 | |||
| 268 | switch (note_type) { | ||
| 269 | case NT_ARM_HW_BREAK: | ||
| 270 | num = hw_breakpoint_slots(TYPE_INST); | ||
| 271 | break; | ||
| 272 | case NT_ARM_HW_WATCH: | ||
| 273 | num = hw_breakpoint_slots(TYPE_DATA); | ||
| 274 | break; | ||
| 275 | default: | ||
| 276 | return -EINVAL; | ||
| 277 | } | ||
| 278 | |||
| 279 | reg |= debug_monitors_arch(); | ||
| 280 | reg <<= 8; | ||
| 281 | reg |= num; | ||
| 282 | |||
| 283 | *info = reg; | ||
| 284 | return 0; | ||
| 285 | } | ||
| 286 | |||
| 287 | static int ptrace_hbp_get_ctrl(unsigned int note_type, | ||
| 288 | struct task_struct *tsk, | ||
| 289 | unsigned long idx, | ||
| 290 | u32 *ctrl) | ||
| 291 | { | ||
| 292 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); | ||
| 293 | |||
| 294 | if (IS_ERR(bp)) | ||
| 295 | return PTR_ERR(bp); | ||
| 296 | |||
| 297 | *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; | ||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | static int ptrace_hbp_get_addr(unsigned int note_type, | ||
| 302 | struct task_struct *tsk, | ||
| 303 | unsigned long idx, | ||
| 304 | u64 *addr) | ||
| 305 | { | ||
| 306 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); | ||
| 307 | |||
| 308 | if (IS_ERR(bp)) | ||
| 309 | return PTR_ERR(bp); | ||
| 310 | |||
| 311 | *addr = bp ? bp->attr.bp_addr : 0; | ||
| 312 | return 0; | ||
| 313 | } | ||
| 314 | |||
| 315 | static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, | ||
| 316 | struct task_struct *tsk, | ||
| 317 | unsigned long idx) | ||
| 318 | { | ||
| 319 | struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); | ||
| 320 | |||
| 321 | if (!bp) | ||
| 322 | bp = ptrace_hbp_create(note_type, tsk, idx); | ||
| 323 | |||
| 324 | return bp; | ||
| 325 | } | ||
| 326 | |||
| 327 | static int ptrace_hbp_set_ctrl(unsigned int note_type, | ||
| 328 | struct task_struct *tsk, | ||
| 329 | unsigned long idx, | ||
| 330 | u32 uctrl) | ||
| 331 | { | ||
| 332 | int err; | ||
| 333 | struct perf_event *bp; | ||
| 334 | struct perf_event_attr attr; | ||
| 335 | struct arch_hw_breakpoint_ctrl ctrl; | ||
| 336 | |||
| 337 | bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); | ||
| 338 | if (IS_ERR(bp)) { | ||
| 339 | err = PTR_ERR(bp); | ||
| 340 | return err; | ||
| 341 | } | ||
| 342 | |||
| 343 | attr = bp->attr; | ||
| 344 | decode_ctrl_reg(uctrl, &ctrl); | ||
| 345 | err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); | ||
| 346 | if (err) | ||
| 347 | return err; | ||
| 348 | |||
| 349 | return modify_user_hw_breakpoint(bp, &attr); | ||
| 350 | } | ||
| 351 | |||
| 352 | static int ptrace_hbp_set_addr(unsigned int note_type, | ||
| 353 | struct task_struct *tsk, | ||
| 354 | unsigned long idx, | ||
| 355 | u64 addr) | ||
| 356 | { | ||
| 357 | int err; | ||
| 358 | struct perf_event *bp; | ||
| 359 | struct perf_event_attr attr; | ||
| 360 | |||
| 361 | bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); | ||
| 362 | if (IS_ERR(bp)) { | ||
| 363 | err = PTR_ERR(bp); | ||
| 364 | return err; | ||
| 365 | } | ||
| 366 | |||
| 367 | attr = bp->attr; | ||
| 368 | attr.bp_addr = addr; | ||
| 369 | err = modify_user_hw_breakpoint(bp, &attr); | ||
| 370 | return err; | ||
| 371 | } | ||
| 372 | |||
| 373 | #define PTRACE_HBP_ADDR_SZ sizeof(u64) | ||
| 374 | #define PTRACE_HBP_CTRL_SZ sizeof(u32) | ||
| 375 | #define PTRACE_HBP_REG_OFF sizeof(u32) | ||
| 376 | |||
| 377 | static int hw_break_get(struct task_struct *target, | ||
| 378 | const struct user_regset *regset, | ||
| 379 | unsigned int pos, unsigned int count, | ||
| 380 | void *kbuf, void __user *ubuf) | ||
| 381 | { | ||
| 382 | unsigned int note_type = regset->core_note_type; | ||
| 383 | int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; | ||
| 384 | u32 info, ctrl; | ||
| 385 | u64 addr; | ||
| 386 | |||
| 387 | /* Resource info */ | ||
| 388 | ret = ptrace_hbp_get_resource_info(note_type, &info); | ||
| 389 | if (ret) | ||
| 390 | return ret; | ||
| 391 | |||
| 392 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4); | ||
| 393 | if (ret) | ||
| 394 | return ret; | ||
| 395 | |||
| 396 | /* (address, ctrl) registers */ | ||
| 397 | limit = regset->n * regset->size; | ||
| 398 | while (count && offset < limit) { | ||
| 399 | ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); | ||
| 400 | if (ret) | ||
| 401 | return ret; | ||
| 402 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, | ||
| 403 | offset, offset + PTRACE_HBP_ADDR_SZ); | ||
| 404 | if (ret) | ||
| 405 | return ret; | ||
| 406 | offset += PTRACE_HBP_ADDR_SZ; | ||
| 407 | |||
| 408 | ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); | ||
| 409 | if (ret) | ||
| 410 | return ret; | ||
| 411 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, | ||
| 412 | offset, offset + PTRACE_HBP_CTRL_SZ); | ||
| 413 | if (ret) | ||
| 414 | return ret; | ||
| 415 | offset += PTRACE_HBP_CTRL_SZ; | ||
| 416 | idx++; | ||
| 417 | } | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | static int hw_break_set(struct task_struct *target, | ||
| 423 | const struct user_regset *regset, | ||
| 424 | unsigned int pos, unsigned int count, | ||
| 425 | const void *kbuf, const void __user *ubuf) | ||
| 426 | { | ||
| 427 | unsigned int note_type = regset->core_note_type; | ||
| 428 | int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; | ||
| 429 | u32 ctrl; | ||
| 430 | u64 addr; | ||
| 431 | |||
| 432 | /* Resource info */ | ||
| 433 | ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); | ||
| 434 | if (ret) | ||
| 435 | return ret; | ||
| 436 | |||
| 437 | /* (address, ctrl) registers */ | ||
| 438 | limit = regset->n * regset->size; | ||
| 439 | while (count && offset < limit) { | ||
| 440 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, | ||
| 441 | offset, offset + PTRACE_HBP_ADDR_SZ); | ||
| 442 | if (ret) | ||
| 443 | return ret; | ||
| 444 | ret = ptrace_hbp_set_addr(note_type, target, idx, addr); | ||
| 445 | if (ret) | ||
| 446 | return ret; | ||
| 447 | offset += PTRACE_HBP_ADDR_SZ; | ||
| 448 | |||
| 449 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, | ||
| 450 | offset, offset + PTRACE_HBP_CTRL_SZ); | ||
| 451 | if (ret) | ||
| 452 | return ret; | ||
| 453 | ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); | ||
| 454 | if (ret) | ||
| 455 | return ret; | ||
| 456 | offset += PTRACE_HBP_CTRL_SZ; | ||
| 457 | idx++; | ||
| 458 | } | ||
| 459 | |||
| 460 | return 0; | ||
| 461 | } | ||
| 462 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
| 463 | |||
| 464 | static int gpr_get(struct task_struct *target, | ||
| 465 | const struct user_regset *regset, | ||
| 466 | unsigned int pos, unsigned int count, | ||
| 467 | void *kbuf, void __user *ubuf) | ||
| 468 | { | ||
| 469 | struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; | ||
| 470 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); | ||
| 471 | } | ||
| 472 | |||
| 473 | static int gpr_set(struct task_struct *target, const struct user_regset *regset, | ||
| 474 | unsigned int pos, unsigned int count, | ||
| 475 | const void *kbuf, const void __user *ubuf) | ||
| 476 | { | ||
| 477 | int ret; | ||
| 478 | struct user_pt_regs newregs; | ||
| 479 | |||
| 480 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); | ||
| 481 | if (ret) | ||
| 482 | return ret; | ||
| 483 | |||
| 484 | if (!valid_user_regs(&newregs)) | ||
| 485 | return -EINVAL; | ||
| 486 | |||
| 487 | task_pt_regs(target)->user_regs = newregs; | ||
| 488 | return 0; | ||
| 489 | } | ||
| 490 | |||
| 491 | /* | ||
| 492 | * TODO: update fp accessors for lazy context switching (sync/flush hwstate) | ||
| 493 | */ | ||
| 494 | static int fpr_get(struct task_struct *target, const struct user_regset *regset, | ||
| 495 | unsigned int pos, unsigned int count, | ||
| 496 | void *kbuf, void __user *ubuf) | ||
| 497 | { | ||
| 498 | struct user_fpsimd_state *uregs; | ||
| 499 | uregs = &target->thread.fpsimd_state.user_fpsimd; | ||
| 500 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); | ||
| 501 | } | ||
| 502 | |||
| 503 | static int fpr_set(struct task_struct *target, const struct user_regset *regset, | ||
| 504 | unsigned int pos, unsigned int count, | ||
| 505 | const void *kbuf, const void __user *ubuf) | ||
| 506 | { | ||
| 507 | int ret; | ||
| 508 | struct user_fpsimd_state newstate; | ||
| 509 | |||
| 510 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); | ||
| 511 | if (ret) | ||
| 512 | return ret; | ||
| 513 | |||
| 514 | target->thread.fpsimd_state.user_fpsimd = newstate; | ||
| 515 | return ret; | ||
| 516 | } | ||
| 517 | |||
| 518 | static int tls_get(struct task_struct *target, const struct user_regset *regset, | ||
| 519 | unsigned int pos, unsigned int count, | ||
| 520 | void *kbuf, void __user *ubuf) | ||
| 521 | { | ||
| 522 | unsigned long *tls = &target->thread.tp_value; | ||
| 523 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); | ||
| 524 | } | ||
| 525 | |||
| 526 | static int tls_set(struct task_struct *target, const struct user_regset *regset, | ||
| 527 | unsigned int pos, unsigned int count, | ||
| 528 | const void *kbuf, const void __user *ubuf) | ||
| 529 | { | ||
| 530 | int ret; | ||
| 531 | unsigned long tls; | ||
| 532 | |||
| 533 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); | ||
| 534 | if (ret) | ||
| 535 | return ret; | ||
| 536 | |||
| 537 | target->thread.tp_value = tls; | ||
| 538 | return ret; | ||
| 539 | } | ||
| 540 | |||
| 541 | enum aarch64_regset { | ||
| 542 | REGSET_GPR, | ||
| 543 | REGSET_FPR, | ||
| 544 | REGSET_TLS, | ||
| 545 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 546 | REGSET_HW_BREAK, | ||
| 547 | REGSET_HW_WATCH, | ||
| 548 | #endif | ||
| 549 | }; | ||
| 550 | |||
| 551 | static const struct user_regset aarch64_regsets[] = { | ||
| 552 | [REGSET_GPR] = { | ||
| 553 | .core_note_type = NT_PRSTATUS, | ||
| 554 | .n = sizeof(struct user_pt_regs) / sizeof(u64), | ||
| 555 | .size = sizeof(u64), | ||
| 556 | .align = sizeof(u64), | ||
| 557 | .get = gpr_get, | ||
| 558 | .set = gpr_set | ||
| 559 | }, | ||
| 560 | [REGSET_FPR] = { | ||
| 561 | .core_note_type = NT_PRFPREG, | ||
| 562 | .n = sizeof(struct user_fpsimd_state) / sizeof(u32), | ||
| 563 | /* | ||
| 564 | * We pretend we have 32-bit registers because the fpsr and | ||
| 565 | * fpcr are 32-bits wide. | ||
| 566 | */ | ||
| 567 | .size = sizeof(u32), | ||
| 568 | .align = sizeof(u32), | ||
| 569 | .get = fpr_get, | ||
| 570 | .set = fpr_set | ||
| 571 | }, | ||
| 572 | [REGSET_TLS] = { | ||
| 573 | .core_note_type = NT_ARM_TLS, | ||
| 574 | .n = 1, | ||
| 575 | .size = sizeof(void *), | ||
| 576 | .align = sizeof(void *), | ||
| 577 | .get = tls_get, | ||
| 578 | .set = tls_set, | ||
| 579 | }, | ||
| 580 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 581 | [REGSET_HW_BREAK] = { | ||
| 582 | .core_note_type = NT_ARM_HW_BREAK, | ||
| 583 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), | ||
| 584 | .size = sizeof(u32), | ||
| 585 | .align = sizeof(u32), | ||
| 586 | .get = hw_break_get, | ||
| 587 | .set = hw_break_set, | ||
| 588 | }, | ||
| 589 | [REGSET_HW_WATCH] = { | ||
| 590 | .core_note_type = NT_ARM_HW_WATCH, | ||
| 591 | .n = sizeof(struct user_hwdebug_state) / sizeof(u32), | ||
| 592 | .size = sizeof(u32), | ||
| 593 | .align = sizeof(u32), | ||
| 594 | .get = hw_break_get, | ||
| 595 | .set = hw_break_set, | ||
| 596 | }, | ||
| 597 | #endif | ||
| 598 | }; | ||
| 599 | |||
| 600 | static const struct user_regset_view user_aarch64_view = { | ||
| 601 | .name = "aarch64", .e_machine = EM_AARCH64, | ||
| 602 | .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) | ||
| 603 | }; | ||
| 604 | |||
| 605 | #ifdef CONFIG_COMPAT | ||
| 606 | #include <linux/compat.h> | ||
| 607 | |||
| 608 | enum compat_regset { | ||
| 609 | REGSET_COMPAT_GPR, | ||
| 610 | REGSET_COMPAT_VFP, | ||
| 611 | }; | ||
| 612 | |||
| 613 | static int compat_gpr_get(struct task_struct *target, | ||
| 614 | const struct user_regset *regset, | ||
| 615 | unsigned int pos, unsigned int count, | ||
| 616 | void *kbuf, void __user *ubuf) | ||
| 617 | { | ||
| 618 | int ret = 0; | ||
| 619 | unsigned int i, start, num_regs; | ||
| 620 | |||
| 621 | /* Calculate the number of AArch32 registers contained in count */ | ||
| 622 | num_regs = count / regset->size; | ||
| 623 | |||
| 624 | /* Convert pos into an register number */ | ||
| 625 | start = pos / regset->size; | ||
| 626 | |||
| 627 | if (start + num_regs > regset->n) | ||
| 628 | return -EIO; | ||
| 629 | |||
| 630 | for (i = 0; i < num_regs; ++i) { | ||
| 631 | unsigned int idx = start + i; | ||
| 632 | void *reg; | ||
| 633 | |||
| 634 | switch (idx) { | ||
| 635 | case 15: | ||
| 636 | reg = (void *)&task_pt_regs(target)->pc; | ||
| 637 | break; | ||
| 638 | case 16: | ||
| 639 | reg = (void *)&task_pt_regs(target)->pstate; | ||
| 640 | break; | ||
| 641 | case 17: | ||
| 642 | reg = (void *)&task_pt_regs(target)->orig_x0; | ||
| 643 | break; | ||
| 644 | default: | ||
| 645 | reg = (void *)&task_pt_regs(target)->regs[idx]; | ||
| 646 | } | ||
| 647 | |||
| 648 | ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); | ||
| 649 | |||
| 650 | if (ret) | ||
| 651 | break; | ||
| 652 | else | ||
| 653 | ubuf += sizeof(compat_ulong_t); | ||
| 654 | } | ||
| 655 | |||
| 656 | return ret; | ||
| 657 | } | ||
| 658 | |||
| 659 | static int compat_gpr_set(struct task_struct *target, | ||
| 660 | const struct user_regset *regset, | ||
| 661 | unsigned int pos, unsigned int count, | ||
| 662 | const void *kbuf, const void __user *ubuf) | ||
| 663 | { | ||
| 664 | struct pt_regs newregs; | ||
| 665 | int ret = 0; | ||
| 666 | unsigned int i, start, num_regs; | ||
| 667 | |||
| 668 | /* Calculate the number of AArch32 registers contained in count */ | ||
| 669 | num_regs = count / regset->size; | ||
| 670 | |||
| 671 | /* Convert pos into an register number */ | ||
| 672 | start = pos / regset->size; | ||
| 673 | |||
| 674 | if (start + num_regs > regset->n) | ||
| 675 | return -EIO; | ||
| 676 | |||
| 677 | newregs = *task_pt_regs(target); | ||
| 678 | |||
| 679 | for (i = 0; i < num_regs; ++i) { | ||
| 680 | unsigned int idx = start + i; | ||
| 681 | void *reg; | ||
| 682 | |||
| 683 | switch (idx) { | ||
| 684 | case 15: | ||
| 685 | reg = (void *)&newregs.pc; | ||
| 686 | break; | ||
| 687 | case 16: | ||
| 688 | reg = (void *)&newregs.pstate; | ||
| 689 | break; | ||
| 690 | case 17: | ||
| 691 | reg = (void *)&newregs.orig_x0; | ||
| 692 | break; | ||
| 693 | default: | ||
| 694 | reg = (void *)&newregs.regs[idx]; | ||
| 695 | } | ||
| 696 | |||
| 697 | ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); | ||
| 698 | |||
| 699 | if (ret) | ||
| 700 | goto out; | ||
| 701 | else | ||
| 702 | ubuf += sizeof(compat_ulong_t); | ||
| 703 | } | ||
| 704 | |||
| 705 | if (valid_user_regs(&newregs.user_regs)) | ||
| 706 | *task_pt_regs(target) = newregs; | ||
| 707 | else | ||
| 708 | ret = -EINVAL; | ||
| 709 | |||
| 710 | out: | ||
| 711 | return ret; | ||
| 712 | } | ||
| 713 | |||
| 714 | static int compat_vfp_get(struct task_struct *target, | ||
| 715 | const struct user_regset *regset, | ||
| 716 | unsigned int pos, unsigned int count, | ||
| 717 | void *kbuf, void __user *ubuf) | ||
| 718 | { | ||
| 719 | struct user_fpsimd_state *uregs; | ||
| 720 | compat_ulong_t fpscr; | ||
| 721 | int ret; | ||
| 722 | |||
| 723 | uregs = &target->thread.fpsimd_state.user_fpsimd; | ||
| 724 | |||
| 725 | /* | ||
| 726 | * The VFP registers are packed into the fpsimd_state, so they all sit | ||
| 727 | * nicely together for us. We just need to create the fpscr separately. | ||
| 728 | */ | ||
| 729 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, | ||
| 730 | VFP_STATE_SIZE - sizeof(compat_ulong_t)); | ||
| 731 | |||
| 732 | if (count && !ret) { | ||
| 733 | fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | | ||
| 734 | (uregs->fpcr & VFP_FPSCR_CTRL_MASK); | ||
| 735 | ret = put_user(fpscr, (compat_ulong_t *)ubuf); | ||
| 736 | } | ||
| 737 | |||
| 738 | return ret; | ||
| 739 | } | ||
| 740 | |||
| 741 | static int compat_vfp_set(struct task_struct *target, | ||
| 742 | const struct user_regset *regset, | ||
| 743 | unsigned int pos, unsigned int count, | ||
| 744 | const void *kbuf, const void __user *ubuf) | ||
| 745 | { | ||
| 746 | struct user_fpsimd_state *uregs; | ||
| 747 | compat_ulong_t fpscr; | ||
| 748 | int ret; | ||
| 749 | |||
| 750 | if (pos + count > VFP_STATE_SIZE) | ||
| 751 | return -EIO; | ||
| 752 | |||
| 753 | uregs = &target->thread.fpsimd_state.user_fpsimd; | ||
| 754 | |||
| 755 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, | ||
| 756 | VFP_STATE_SIZE - sizeof(compat_ulong_t)); | ||
| 757 | |||
| 758 | if (count && !ret) { | ||
| 759 | ret = get_user(fpscr, (compat_ulong_t *)ubuf); | ||
| 760 | uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; | ||
| 761 | uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; | ||
| 762 | } | ||
| 763 | |||
| 764 | return ret; | ||
| 765 | } | ||
| 766 | |||
| 767 | static const struct user_regset aarch32_regsets[] = { | ||
| 768 | [REGSET_COMPAT_GPR] = { | ||
| 769 | .core_note_type = NT_PRSTATUS, | ||
| 770 | .n = COMPAT_ELF_NGREG, | ||
| 771 | .size = sizeof(compat_elf_greg_t), | ||
| 772 | .align = sizeof(compat_elf_greg_t), | ||
| 773 | .get = compat_gpr_get, | ||
| 774 | .set = compat_gpr_set | ||
| 775 | }, | ||
| 776 | [REGSET_COMPAT_VFP] = { | ||
| 777 | .core_note_type = NT_ARM_VFP, | ||
| 778 | .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), | ||
| 779 | .size = sizeof(compat_ulong_t), | ||
| 780 | .align = sizeof(compat_ulong_t), | ||
| 781 | .get = compat_vfp_get, | ||
| 782 | .set = compat_vfp_set | ||
| 783 | }, | ||
| 784 | }; | ||
| 785 | |||
| 786 | static const struct user_regset_view user_aarch32_view = { | ||
| 787 | .name = "aarch32", .e_machine = EM_ARM, | ||
| 788 | .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) | ||
| 789 | }; | ||
| 790 | |||
| 791 | int aarch32_break_trap(struct pt_regs *regs) | ||
| 792 | { | ||
| 793 | unsigned int instr; | ||
| 794 | bool bp = false; | ||
| 795 | void __user *pc = (void __user *)instruction_pointer(regs); | ||
| 796 | |||
| 797 | if (compat_thumb_mode(regs)) { | ||
| 798 | /* get 16-bit Thumb instruction */ | ||
| 799 | get_user(instr, (u16 __user *)pc); | ||
| 800 | if (instr == AARCH32_BREAK_THUMB2_LO) { | ||
| 801 | /* get second half of 32-bit Thumb-2 instruction */ | ||
| 802 | get_user(instr, (u16 __user *)(pc + 2)); | ||
| 803 | bp = instr == AARCH32_BREAK_THUMB2_HI; | ||
| 804 | } else { | ||
| 805 | bp = instr == AARCH32_BREAK_THUMB; | ||
| 806 | } | ||
| 807 | } else { | ||
| 808 | /* 32-bit ARM instruction */ | ||
| 809 | get_user(instr, (u32 __user *)pc); | ||
| 810 | bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; | ||
| 811 | } | ||
| 812 | |||
| 813 | if (bp) | ||
| 814 | return ptrace_break(regs); | ||
| 815 | return 1; | ||
| 816 | } | ||
| 817 | |||
| 818 | static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, | ||
| 819 | compat_ulong_t __user *ret) | ||
| 820 | { | ||
| 821 | compat_ulong_t tmp; | ||
| 822 | |||
| 823 | if (off & 3) | ||
| 824 | return -EIO; | ||
| 825 | |||
| 826 | if (off == PT_TEXT_ADDR) | ||
| 827 | tmp = tsk->mm->start_code; | ||
| 828 | else if (off == PT_DATA_ADDR) | ||
| 829 | tmp = tsk->mm->start_data; | ||
| 830 | else if (off == PT_TEXT_END_ADDR) | ||
| 831 | tmp = tsk->mm->end_code; | ||
| 832 | else if (off < sizeof(compat_elf_gregset_t)) | ||
| 833 | return copy_regset_to_user(tsk, &user_aarch32_view, | ||
| 834 | REGSET_COMPAT_GPR, off, | ||
| 835 | sizeof(compat_ulong_t), ret); | ||
| 836 | else if (off >= COMPAT_USER_SZ) | ||
| 837 | return -EIO; | ||
| 838 | else | ||
| 839 | tmp = 0; | ||
| 840 | |||
| 841 | return put_user(tmp, ret); | ||
| 842 | } | ||
| 843 | |||
| 844 | static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, | ||
| 845 | compat_ulong_t val) | ||
| 846 | { | ||
| 847 | int ret; | ||
| 848 | |||
| 849 | if (off & 3 || off >= COMPAT_USER_SZ) | ||
| 850 | return -EIO; | ||
| 851 | |||
| 852 | if (off >= sizeof(compat_elf_gregset_t)) | ||
| 853 | return 0; | ||
| 854 | |||
| 855 | ret = copy_regset_from_user(tsk, &user_aarch32_view, | ||
| 856 | REGSET_COMPAT_GPR, off, | ||
| 857 | sizeof(compat_ulong_t), | ||
| 858 | &val); | ||
| 859 | return ret; | ||
| 860 | } | ||
| 861 | |||
| 862 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 863 | |||
| 864 | /* | ||
| 865 | * Convert a virtual register number into an index for a thread_info | ||
| 866 | * breakpoint array. Breakpoints are identified using positive numbers | ||
| 867 | * whilst watchpoints are negative. The registers are laid out as pairs | ||
| 868 | * of (address, control), each pair mapping to a unique hw_breakpoint struct. | ||
| 869 | * Register 0 is reserved for describing resource information. | ||
| 870 | */ | ||
| 871 | static int compat_ptrace_hbp_num_to_idx(compat_long_t num) | ||
| 872 | { | ||
| 873 | return (abs(num) - 1) >> 1; | ||
| 874 | } | ||
| 875 | |||
| 876 | static int compat_ptrace_hbp_get_resource_info(u32 *kdata) | ||
| 877 | { | ||
| 878 | u8 num_brps, num_wrps, debug_arch, wp_len; | ||
| 879 | u32 reg = 0; | ||
| 880 | |||
| 881 | num_brps = hw_breakpoint_slots(TYPE_INST); | ||
| 882 | num_wrps = hw_breakpoint_slots(TYPE_DATA); | ||
| 883 | |||
| 884 | debug_arch = debug_monitors_arch(); | ||
| 885 | wp_len = 8; | ||
| 886 | reg |= debug_arch; | ||
| 887 | reg <<= 8; | ||
| 888 | reg |= wp_len; | ||
| 889 | reg <<= 8; | ||
| 890 | reg |= num_wrps; | ||
| 891 | reg <<= 8; | ||
| 892 | reg |= num_brps; | ||
| 893 | |||
| 894 | *kdata = reg; | ||
| 895 | return 0; | ||
| 896 | } | ||
| 897 | |||
| 898 | static int compat_ptrace_hbp_get(unsigned int note_type, | ||
| 899 | struct task_struct *tsk, | ||
| 900 | compat_long_t num, | ||
| 901 | u32 *kdata) | ||
| 902 | { | ||
| 903 | u64 addr = 0; | ||
| 904 | u32 ctrl = 0; | ||
| 905 | |||
| 906 | int err, idx = compat_ptrace_hbp_num_to_idx(num);; | ||
| 907 | |||
| 908 | if (num & 1) { | ||
| 909 | err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); | ||
| 910 | *kdata = (u32)addr; | ||
| 911 | } else { | ||
| 912 | err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); | ||
| 913 | *kdata = ctrl; | ||
| 914 | } | ||
| 915 | |||
| 916 | return err; | ||
| 917 | } | ||
| 918 | |||
| 919 | static int compat_ptrace_hbp_set(unsigned int note_type, | ||
| 920 | struct task_struct *tsk, | ||
| 921 | compat_long_t num, | ||
| 922 | u32 *kdata) | ||
| 923 | { | ||
| 924 | u64 addr; | ||
| 925 | u32 ctrl; | ||
| 926 | |||
| 927 | int err, idx = compat_ptrace_hbp_num_to_idx(num); | ||
| 928 | |||
| 929 | if (num & 1) { | ||
| 930 | addr = *kdata; | ||
| 931 | err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); | ||
| 932 | } else { | ||
| 933 | ctrl = *kdata; | ||
| 934 | err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); | ||
| 935 | } | ||
| 936 | |||
| 937 | return err; | ||
| 938 | } | ||
| 939 | |||
| 940 | static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, | ||
| 941 | compat_ulong_t __user *data) | ||
| 942 | { | ||
| 943 | int ret; | ||
| 944 | u32 kdata; | ||
| 945 | mm_segment_t old_fs = get_fs(); | ||
| 946 | |||
| 947 | set_fs(KERNEL_DS); | ||
| 948 | /* Watchpoint */ | ||
| 949 | if (num < 0) { | ||
| 950 | ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); | ||
| 951 | /* Resource info */ | ||
| 952 | } else if (num == 0) { | ||
| 953 | ret = compat_ptrace_hbp_get_resource_info(&kdata); | ||
| 954 | /* Breakpoint */ | ||
| 955 | } else { | ||
| 956 | ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); | ||
| 957 | } | ||
| 958 | set_fs(old_fs); | ||
| 959 | |||
| 960 | if (!ret) | ||
| 961 | ret = put_user(kdata, data); | ||
| 962 | |||
| 963 | return ret; | ||
| 964 | } | ||
| 965 | |||
| 966 | static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, | ||
| 967 | compat_ulong_t __user *data) | ||
| 968 | { | ||
| 969 | int ret; | ||
| 970 | u32 kdata = 0; | ||
| 971 | mm_segment_t old_fs = get_fs(); | ||
| 972 | |||
| 973 | if (num == 0) | ||
| 974 | return 0; | ||
| 975 | |||
| 976 | ret = get_user(kdata, data); | ||
| 977 | if (ret) | ||
| 978 | return ret; | ||
| 979 | |||
| 980 | set_fs(KERNEL_DS); | ||
| 981 | if (num < 0) | ||
| 982 | ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); | ||
| 983 | else | ||
| 984 | ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); | ||
| 985 | set_fs(old_fs); | ||
| 986 | |||
| 987 | return ret; | ||
| 988 | } | ||
| 989 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | ||
| 990 | |||
| 991 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
| 992 | compat_ulong_t caddr, compat_ulong_t cdata) | ||
| 993 | { | ||
| 994 | unsigned long addr = caddr; | ||
| 995 | unsigned long data = cdata; | ||
| 996 | void __user *datap = compat_ptr(data); | ||
| 997 | int ret; | ||
| 998 | |||
| 999 | switch (request) { | ||
| 1000 | case PTRACE_PEEKUSR: | ||
| 1001 | ret = compat_ptrace_read_user(child, addr, datap); | ||
| 1002 | break; | ||
| 1003 | |||
| 1004 | case PTRACE_POKEUSR: | ||
| 1005 | ret = compat_ptrace_write_user(child, addr, data); | ||
| 1006 | break; | ||
| 1007 | |||
| 1008 | case PTRACE_GETREGS: | ||
| 1009 | ret = copy_regset_to_user(child, | ||
| 1010 | &user_aarch32_view, | ||
| 1011 | REGSET_COMPAT_GPR, | ||
| 1012 | 0, sizeof(compat_elf_gregset_t), | ||
| 1013 | datap); | ||
| 1014 | break; | ||
| 1015 | |||
| 1016 | case PTRACE_SETREGS: | ||
| 1017 | ret = copy_regset_from_user(child, | ||
| 1018 | &user_aarch32_view, | ||
| 1019 | REGSET_COMPAT_GPR, | ||
| 1020 | 0, sizeof(compat_elf_gregset_t), | ||
| 1021 | datap); | ||
| 1022 | break; | ||
| 1023 | |||
| 1024 | case PTRACE_GET_THREAD_AREA: | ||
| 1025 | ret = put_user((compat_ulong_t)child->thread.tp_value, | ||
| 1026 | (compat_ulong_t __user *)datap); | ||
| 1027 | break; | ||
| 1028 | |||
| 1029 | case PTRACE_SET_SYSCALL: | ||
| 1030 | task_pt_regs(child)->syscallno = data; | ||
| 1031 | ret = 0; | ||
| 1032 | break; | ||
| 1033 | |||
| 1034 | case COMPAT_PTRACE_GETVFPREGS: | ||
| 1035 | ret = copy_regset_to_user(child, | ||
| 1036 | &user_aarch32_view, | ||
| 1037 | REGSET_COMPAT_VFP, | ||
| 1038 | 0, VFP_STATE_SIZE, | ||
| 1039 | datap); | ||
| 1040 | break; | ||
| 1041 | |||
| 1042 | case COMPAT_PTRACE_SETVFPREGS: | ||
| 1043 | ret = copy_regset_from_user(child, | ||
| 1044 | &user_aarch32_view, | ||
| 1045 | REGSET_COMPAT_VFP, | ||
| 1046 | 0, VFP_STATE_SIZE, | ||
| 1047 | datap); | ||
| 1048 | break; | ||
| 1049 | |||
| 1050 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | ||
| 1051 | case PTRACE_GETHBPREGS: | ||
| 1052 | ret = compat_ptrace_gethbpregs(child, addr, datap); | ||
| 1053 | break; | ||
| 1054 | |||
| 1055 | case PTRACE_SETHBPREGS: | ||
| 1056 | ret = compat_ptrace_sethbpregs(child, addr, datap); | ||
| 1057 | break; | ||
| 1058 | #endif | ||
| 1059 | |||
| 1060 | default: | ||
| 1061 | ret = compat_ptrace_request(child, request, addr, | ||
| 1062 | data); | ||
| 1063 | break; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | return ret; | ||
| 1067 | } | ||
| 1068 | #endif /* CONFIG_COMPAT */ | ||
| 1069 | |||
| 1070 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
| 1071 | { | ||
| 1072 | #ifdef CONFIG_COMPAT | ||
| 1073 | if (is_compat_thread(task_thread_info(task))) | ||
| 1074 | return &user_aarch32_view; | ||
| 1075 | #endif | ||
| 1076 | return &user_aarch64_view; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | long arch_ptrace(struct task_struct *child, long request, | ||
| 1080 | unsigned long addr, unsigned long data) | ||
| 1081 | { | ||
| 1082 | return ptrace_request(child, request, addr, data); | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | |||
| 1086 | static int __init ptrace_break_init(void) | ||
| 1087 | { | ||
| 1088 | hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP, | ||
| 1089 | TRAP_BRKPT, "ptrace BRK handler"); | ||
| 1090 | return 0; | ||
| 1091 | } | ||
| 1092 | core_initcall(ptrace_break_init); | ||
| 1093 | |||
| 1094 | |||
| 1095 | asmlinkage int syscall_trace(int dir, struct pt_regs *regs) | ||
| 1096 | { | ||
| 1097 | unsigned long saved_reg; | ||
| 1098 | |||
| 1099 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | ||
| 1100 | return regs->syscallno; | ||
| 1101 | |||
| 1102 | if (is_compat_task()) { | ||
| 1103 | /* AArch32 uses ip (r12) for scratch */ | ||
| 1104 | saved_reg = regs->regs[12]; | ||
| 1105 | regs->regs[12] = dir; | ||
| 1106 | } else { | ||
| 1107 | /* | ||
| 1108 | * Save X7. X7 is used to denote syscall entry/exit: | ||
| 1109 | * X7 = 0 -> entry, = 1 -> exit | ||
| 1110 | */ | ||
| 1111 | saved_reg = regs->regs[7]; | ||
| 1112 | regs->regs[7] = dir; | ||
| 1113 | } | ||
| 1114 | |||
| 1115 | if (dir) | ||
| 1116 | tracehook_report_syscall_exit(regs, 0); | ||
| 1117 | else if (tracehook_report_syscall_entry(regs)) | ||
| 1118 | regs->syscallno = ~0UL; | ||
| 1119 | |||
| 1120 | if (is_compat_task()) | ||
| 1121 | regs->regs[12] = saved_reg; | ||
| 1122 | else | ||
| 1123 | regs->regs[7] = saved_reg; | ||
| 1124 | |||
| 1125 | return regs->syscallno; | ||
| 1126 | } | ||
diff --git a/include/linux/elf.h b/include/linux/elf.h index 999b4f52e8e5..1e935e4c6328 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
| @@ -388,6 +388,9 @@ typedef struct elf64_shdr { | |||
| 388 | #define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ | 388 | #define NT_S390_LAST_BREAK 0x306 /* s390 breaking event address */ |
| 389 | #define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ | 389 | #define NT_S390_SYSTEM_CALL 0x307 /* s390 system call restart data */ |
| 390 | #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ | 390 | #define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */ |
| 391 | #define NT_ARM_TLS 0x401 /* ARM TLS register */ | ||
| 392 | #define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */ | ||
| 393 | #define NT_ARM_HW_WATCH 0x403 /* ARM hardware watchpoint registers */ | ||
| 391 | 394 | ||
| 392 | 395 | ||
| 393 | /* Note header in a PT_NOTE section */ | 396 | /* Note header in a PT_NOTE section */ |
