aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile8
-rw-r--r--arch/arm/kernel/armksyms.c3
-rw-r--r--arch/arm/kernel/bios32.c2
-rw-r--r--arch/arm/kernel/calls.S2
-rw-r--r--arch/arm/kernel/cpu_pm.c116
-rw-r--r--arch/arm/kernel/debug.S10
-rw-r--r--arch/arm/kernel/devtree.c14
-rw-r--r--arch/arm/kernel/elf.c17
-rw-r--r--arch/arm/kernel/entry-armv.S525
-rw-r--r--arch/arm/kernel/entry-header.S31
-rw-r--r--arch/arm/kernel/etm.c473
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S10
-rw-r--r--arch/arm/kernel/hw_breakpoint.c12
-rw-r--r--arch/arm/kernel/irq.c70
-rw-r--r--arch/arm/kernel/iwmmxt.S6
-rw-r--r--arch/arm/kernel/kprobes-arm.c999
-rw-r--r--arch/arm/kernel/kprobes-common.c577
-rw-r--r--arch/arm/kernel/kprobes-decode.c1670
-rw-r--r--arch/arm/kernel/kprobes-thumb.c1462
-rw-r--r--arch/arm/kernel/kprobes.c222
-rw-r--r--arch/arm/kernel/kprobes.h420
-rw-r--r--arch/arm/kernel/leds.c27
-rw-r--r--arch/arm/kernel/module.c34
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/perf_event_v6.c30
-rw-r--r--arch/arm/kernel/perf_event_v7.c378
-rw-r--r--arch/arm/kernel/perf_event_xscale.c18
-rw-r--r--arch/arm/kernel/pmu.c105
-rw-r--r--arch/arm/kernel/process.c133
-rw-r--r--arch/arm/kernel/ptrace.c33
-rw-r--r--arch/arm/kernel/relocate_kernel.S3
-rw-r--r--arch/arm/kernel/setup.c128
-rw-r--r--arch/arm/kernel/sleep.S83
-rw-r--r--arch/arm/kernel/smp.c101
-rw-r--r--arch/arm/kernel/smp_scu.c12
-rw-r--r--arch/arm/kernel/smp_twd.c90
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/tcm.c68
-rw-r--r--arch/arm/kernel/traps.c23
-rw-r--r--arch/arm/kernel/vmlinux.lds.S141
41 files changed, 5517 insertions, 2559 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a5b31af5c2b..b6f3c9f80ea 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -37,7 +37,12 @@ obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
37obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 37obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
38obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 38obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
39obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 39obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
40obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 40obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
41ifdef CONFIG_THUMB2_KERNEL
42obj-$(CONFIG_KPROBES) += kprobes-thumb.o
43else
44obj-$(CONFIG_KPROBES) += kprobes-arm.o
45endif
41obj-$(CONFIG_ATAGS_PROC) += atags.o 46obj-$(CONFIG_ATAGS_PROC) += atags.o
42obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 47obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
43obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 48obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
@@ -60,6 +65,7 @@ obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
60obj-$(CONFIG_IWMMXT) += iwmmxt.o 65obj-$(CONFIG_IWMMXT) += iwmmxt.o
61obj-$(CONFIG_CPU_HAS_PMU) += pmu.o 66obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
62obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 67obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
68obj-y += cpu_pm.o
63AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 69AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
64 70
65ifneq ($(CONFIG_ARCH_EBSA110),y) 71ifneq ($(CONFIG_ARCH_EBSA110),y)
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index acca35aebe2..aeef960ff79 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -112,9 +112,6 @@ EXPORT_SYMBOL(__put_user_4);
112EXPORT_SYMBOL(__put_user_8); 112EXPORT_SYMBOL(__put_user_8);
113#endif 113#endif
114 114
115 /* crypto hash */
116EXPORT_SYMBOL(sha_transform);
117
118 /* gcc lib functions */ 115 /* gcc lib functions */
119EXPORT_SYMBOL(__ashldi3); 116EXPORT_SYMBOL(__ashldi3);
120EXPORT_SYMBOL(__ashrdi3); 117EXPORT_SYMBOL(__ashrdi3);
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index e4ee050aad7..d6df359408f 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -476,7 +476,7 @@ static u8 __devinit pcibios_swizzle(struct pci_dev *dev, u8 *pin)
476/* 476/*
477 * Map a slot/pin to an IRQ. 477 * Map a slot/pin to an IRQ.
478 */ 478 */
479static int pcibios_map_irq(struct pci_dev *dev, u8 slot, u8 pin) 479static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
480{ 480{
481 struct pci_sys_data *sys = dev->sysdata; 481 struct pci_sys_data *sys = dev->sysdata;
482 int irq = -1; 482 int irq = -1;
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 6a4f83eeb6b..c0de805e4ea 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -178,7 +178,7 @@
178 CALL(sys_ni_syscall) /* vm86 */ 178 CALL(sys_ni_syscall) /* vm86 */
179 CALL(sys_ni_syscall) /* was sys_query_module */ 179 CALL(sys_ni_syscall) /* was sys_query_module */
180 CALL(sys_poll) 180 CALL(sys_poll)
181 CALL(sys_nfsservctl) 181 CALL(sys_ni_syscall) /* was nfsservctl */
182/* 170 */ CALL(sys_setresgid16) 182/* 170 */ CALL(sys_setresgid16)
183 CALL(sys_getresgid16) 183 CALL(sys_getresgid16)
184 CALL(sys_prctl) 184 CALL(sys_prctl)
diff --git a/arch/arm/kernel/cpu_pm.c b/arch/arm/kernel/cpu_pm.c
new file mode 100644
index 00000000000..748af1f1f43
--- /dev/null
+++ b/arch/arm/kernel/cpu_pm.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright (C) 2011 Google, Inc.
3 *
4 * Author:
5 * Colin Cross <ccross@android.com>
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/notifier.h>
21#include <linux/spinlock.h>
22
23#include <asm/cpu_pm.h>
24
25static DEFINE_RWLOCK(cpu_pm_notifier_lock);
26static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
27
28int cpu_pm_register_notifier(struct notifier_block *nb)
29{
30 unsigned long flags;
31 int ret;
32
33 write_lock_irqsave(&cpu_pm_notifier_lock, flags);
34 ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
35 write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
36
37 return ret;
38}
39EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
40
41int cpu_pm_unregister_notifier(struct notifier_block *nb)
42{
43 unsigned long flags;
44 int ret;
45
46 write_lock_irqsave(&cpu_pm_notifier_lock, flags);
47 ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
48 write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
49
50 return ret;
51}
52EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
53
54static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
55{
56 int ret;
57
58 ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
59 nr_to_call, nr_calls);
60
61 return notifier_to_errno(ret);
62}
63
64int cpu_pm_enter(void)
65{
66 int nr_calls;
67 int ret = 0;
68
69 read_lock(&cpu_pm_notifier_lock);
70 ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
71 if (ret)
72 cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
73 read_unlock(&cpu_pm_notifier_lock);
74
75 return ret;
76}
77EXPORT_SYMBOL_GPL(cpu_pm_enter);
78
79int cpu_pm_exit(void)
80{
81 int ret;
82
83 read_lock(&cpu_pm_notifier_lock);
84 ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
85 read_unlock(&cpu_pm_notifier_lock);
86
87 return ret;
88}
89EXPORT_SYMBOL_GPL(cpu_pm_exit);
90
91int cpu_complex_pm_enter(void)
92{
93 int nr_calls;
94 int ret = 0;
95
96 read_lock(&cpu_pm_notifier_lock);
97 ret = cpu_pm_notify(CPU_COMPLEX_PM_ENTER, -1, &nr_calls);
98 if (ret)
99 cpu_pm_notify(CPU_COMPLEX_PM_ENTER_FAILED, nr_calls - 1, NULL);
100 read_unlock(&cpu_pm_notifier_lock);
101
102 return ret;
103}
104EXPORT_SYMBOL_GPL(cpu_complex_pm_enter);
105
106int cpu_complex_pm_exit(void)
107{
108 int ret;
109
110 read_lock(&cpu_pm_notifier_lock);
111 ret = cpu_pm_notify(CPU_COMPLEX_PM_EXIT, -1, NULL);
112 read_unlock(&cpu_pm_notifier_lock);
113
114 return ret;
115}
116EXPORT_SYMBOL_GPL(cpu_complex_pm_exit);
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index bcd66e00bdb..9126592867f 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -154,6 +154,11 @@ ENDPROC(printhex2)
154 .ltorg 154 .ltorg
155 155
156ENTRY(printascii) 156ENTRY(printascii)
157#if defined(CONFIG_DEBUG_ICEDCC) && defined(CONFIG_SMP)
158 mrc p15, 0, r3, c0, c0, 5
159 ands r3, r3, #3
160 movne pc, lr
161#endif
157 addruart_current r3, r1, r2 162 addruart_current r3, r1, r2
158 b 2f 163 b 2f
1591: waituart r2, r3 1641: waituart r2, r3
@@ -170,6 +175,11 @@ ENTRY(printascii)
170ENDPROC(printascii) 175ENDPROC(printascii)
171 176
172ENTRY(printch) 177ENTRY(printch)
178#if defined(CONFIG_DEBUG_ICEDCC) && defined(CONFIG_SMP)
179 mrc p15, 0, r3, c0, c0, 5
180 ands r3, r3, #3
181 movne pc, lr
182#endif
173 addruart_current r3, r1, r2 183 addruart_current r3, r1, r2
174 mov r1, r0 184 mov r1, r0
175 mov r0, #0 185 mov r0, #0
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 0cdd7b456cb..1a33e9d6bb1 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -132,17 +132,3 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
132 132
133 return mdesc_best; 133 return mdesc_best;
134} 134}
135
136/**
137 * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
138 *
139 * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
140 * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
141 * supported.
142 */
143unsigned int irq_create_of_mapping(struct device_node *controller,
144 const u32 *intspec, unsigned int intsize)
145{
146 return intspec[0];
147}
148EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/arm/kernel/elf.c b/arch/arm/kernel/elf.c
index 9b05c6a0dce..d4a0da1e48f 100644
--- a/arch/arm/kernel/elf.c
+++ b/arch/arm/kernel/elf.c
@@ -40,22 +40,15 @@ EXPORT_SYMBOL(elf_check_arch);
40void elf_set_personality(const struct elf32_hdr *x) 40void elf_set_personality(const struct elf32_hdr *x)
41{ 41{
42 unsigned int eflags = x->e_flags; 42 unsigned int eflags = x->e_flags;
43 unsigned int personality = current->personality & ~PER_MASK; 43 unsigned int personality = PER_LINUX_32BIT;
44
45 /*
46 * We only support Linux ELF executables, so always set the
47 * personality to LINUX.
48 */
49 personality |= PER_LINUX;
50 44
51 /* 45 /*
52 * APCS-26 is only valid for OABI executables 46 * APCS-26 is only valid for OABI executables
53 */ 47 */
54 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN && 48 if ((eflags & EF_ARM_EABI_MASK) == EF_ARM_EABI_UNKNOWN) {
55 (eflags & EF_ARM_APCS_26)) 49 if (eflags & EF_ARM_APCS_26)
56 personality &= ~ADDR_LIMIT_32BIT; 50 personality = PER_LINUX;
57 else 51 }
58 personality |= ADDR_LIMIT_32BIT;
59 52
60 set_personality(personality); 53 set_personality(personality);
61 54
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd51ca..4f8e30f183b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -29,21 +29,53 @@
29#include <asm/entry-macro-multi.S> 29#include <asm/entry-macro-multi.S>
30 30
31/* 31/*
32 * Interrupt handling. Preserves r7, r8, r9 32 * Interrupt handling.
33 */ 33 */
34 .macro irq_handler 34 .macro irq_handler
35#ifdef CONFIG_MULTI_IRQ_HANDLER 35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r5, =handle_arch_irq 36 ldr r1, =handle_arch_irq
37 mov r0, sp 37 mov r0, sp
38 ldr r5, [r5] 38 ldr r1, [r1]
39 adr lr, BSYM(9997f) 39 adr lr, BSYM(9997f)
40 teq r5, #0 40 teq r1, #0
41 movne pc, r5 41 movne pc, r1
42#endif 42#endif
43 arch_irq_handler_default 43 arch_irq_handler_default
449997: 449997:
45 .endm 45 .endm
46 46
47 .macro pabt_helper
48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
49#ifdef MULTI_PABORT
50 ldr ip, .LCprocfns
51 mov lr, pc
52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
63 @ r2 - pt_regs
64 @ r4 - aborted context pc
65 @ r5 - aborted context psr
66 @
67 @ The abort handler must return the aborted address in r0, and
68 @ the fault status register in r1. r9 must be preserved.
69 @
70#ifdef MULTI_DABORT
71 ldr ip, .LCprocfns
72 mov lr, pc
73 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
74#else
75 bl CPU_DABORT_HANDLER
76#endif
77 .endm
78
47#ifdef CONFIG_KPROBES 79#ifdef CONFIG_KPROBES
48 .section .kprobes.text,"ax",%progbits 80 .section .kprobes.text,"ax",%progbits
49#else 81#else
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid)
126 SPFIX( subeq sp, sp, #4 ) 158 SPFIX( subeq sp, sp, #4 )
127 stmia sp, {r1 - r12} 159 stmia sp, {r1 - r12}
128 160
129 ldmia r0, {r1 - r3} 161 ldmia r0, {r3 - r5}
130 add r5, sp, #S_SP - 4 @ here for interlock avoidance 162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
131 mov r4, #-1 @ "" "" "" "" 163 mov r6, #-1 @ "" "" "" ""
132 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133 SPFIX( addeq r0, r0, #4 ) 165 SPFIX( addeq r2, r2, #4 )
134 str r1, [sp, #-4]! @ save the "real" r0 copied 166 str r3, [sp, #-4]! @ save the "real" r0 copied
135 @ from the exception stack 167 @ from the exception stack
136 168
137 mov r1, lr 169 mov r3, lr
138 170
139 @ 171 @
140 @ We are now ready to fill in the remaining blanks on the stack: 172 @ We are now ready to fill in the remaining blanks on the stack:
141 @ 173 @
142 @ r0 - sp_svc 174 @ r2 - sp_svc
143 @ r1 - lr_svc 175 @ r3 - lr_svc
144 @ r2 - lr_<exception>, already fixed up for correct return/restart 176 @ r4 - lr_<exception>, already fixed up for correct return/restart
145 @ r3 - spsr_<exception> 177 @ r5 - spsr_<exception>
146 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
147 @ 179 @
148 stmia r5, {r0 - r4} 180 stmia r7, {r2 - r6}
181
182#ifdef CONFIG_TRACE_IRQFLAGS
183 bl trace_hardirqs_off
184#endif
149 .endm 185 .endm
150 186
151 .align 5 187 .align 5
152__dabt_svc: 188__dabt_svc:
153 svc_entry 189 svc_entry
154
155 @
156 @ get ready to re-enable interrupts if appropriate
157 @
158 mrs r9, cpsr
159 tst r3, #PSR_I_BIT
160 biceq r9, r9, #PSR_I_BIT
161
162 @
163 @ Call the processor-specific abort handler:
164 @
165 @ r2 - aborted context pc
166 @ r3 - aborted context cpsr
167 @
168 @ The abort handler must return the aborted address in r0, and
169 @ the fault status register in r1. r9 must be preserved.
170 @
171#ifdef MULTI_DABORT
172 ldr r4, .LCprocfns
173 mov lr, pc
174 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
175#else
176 bl CPU_DABORT_HANDLER
177#endif
178
179 @
180 @ set desired IRQ state, then call main handler
181 @
182 debug_entry r1
183 msr cpsr_c, r9
184 mov r2, sp 190 mov r2, sp
185 bl do_DataAbort 191 dabt_helper
186 192
187 @ 193 @
188 @ IRQs off again before pulling preserved data off the stack 194 @ IRQs off again before pulling preserved data off the stack
189 @ 195 @
190 disable_irq_notrace 196 disable_irq_notrace
191 197
192 @ 198#ifdef CONFIG_TRACE_IRQFLAGS
193 @ restore SPSR and restart the instruction 199 tst r5, #PSR_I_BIT
194 @ 200 bleq trace_hardirqs_on
195 ldr r2, [sp, #S_PSR] 201 tst r5, #PSR_I_BIT
196 svc_exit r2 @ return from exception 202 blne trace_hardirqs_off
203#endif
204 svc_exit r5 @ return from exception
197 UNWIND(.fnend ) 205 UNWIND(.fnend )
198ENDPROC(__dabt_svc) 206ENDPROC(__dabt_svc)
199 207
200 .align 5 208 .align 5
201__irq_svc: 209__irq_svc:
202 svc_entry 210 svc_entry
211 irq_handler
203 212
204#ifdef CONFIG_TRACE_IRQFLAGS
205 bl trace_hardirqs_off
206#endif
207#ifdef CONFIG_PREEMPT 213#ifdef CONFIG_PREEMPT
208 get_thread_info tsk 214 get_thread_info tsk
209 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
210 add r7, r8, #1 @ increment it
211 str r7, [tsk, #TI_PREEMPT]
212#endif
213
214 irq_handler
215#ifdef CONFIG_PREEMPT
216 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
217 ldr r0, [tsk, #TI_FLAGS] @ get flags 216 ldr r0, [tsk, #TI_FLAGS] @ get flags
218 teq r8, #0 @ if preempt count != 0 217 teq r8, #0 @ if preempt count != 0
219 movne r0, #0 @ force flags to 0 218 movne r0, #0 @ force flags to 0
220 tst r0, #_TIF_NEED_RESCHED 219 tst r0, #_TIF_NEED_RESCHED
221 blne svc_preempt 220 blne svc_preempt
222#endif 221#endif
223 ldr r4, [sp, #S_PSR] @ irqs are already disabled 222
224#ifdef CONFIG_TRACE_IRQFLAGS 223#ifdef CONFIG_TRACE_IRQFLAGS
225 tst r4, #PSR_I_BIT 224 @ The parent context IRQs must have been enabled to get here in
226 bleq trace_hardirqs_on 225 @ the first place, so there's no point checking the PSR I bit.
226 bl trace_hardirqs_on
227#endif 227#endif
228 svc_exit r4 @ return from exception 228 svc_exit r5 @ return from exception
229 UNWIND(.fnend ) 229 UNWIND(.fnend )
230ENDPROC(__irq_svc) 230ENDPROC(__irq_svc)
231 231
@@ -251,7 +251,6 @@ __und_svc:
251#else 251#else
252 svc_entry 252 svc_entry
253#endif 253#endif
254
255 @ 254 @
256 @ call emulation code, which returns using r9 if it has emulated 255 @ call emulation code, which returns using r9 if it has emulated
257 @ the instruction, or the more conventional lr if we are to treat 256 @ the instruction, or the more conventional lr if we are to treat
@@ -260,15 +259,16 @@ __und_svc:
260 @ r0 - instruction 259 @ r0 - instruction
261 @ 260 @
262#ifndef CONFIG_THUMB2_KERNEL 261#ifndef CONFIG_THUMB2_KERNEL
263 ldr r0, [r2, #-4] 262 ldr r0, [r4, #-4]
264#else 263#else
265 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
266 and r9, r0, #0xf800 265 and r9, r0, #0xf800
267 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 266 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
268 ldrhhs r9, [r2] @ bottom 16 bits 267 ldrhhs r9, [r4] @ bottom 16 bits
269 orrhs r0, r9, r0, lsl #16 268 orrhs r0, r9, r0, lsl #16
270#endif 269#endif
271 adr r9, BSYM(1f) 270 adr r9, BSYM(1f)
271 mov r2, r4
272 bl call_fpe 272 bl call_fpe
273 273
274 mov r0, sp @ struct pt_regs *regs 274 mov r0, sp @ struct pt_regs *regs
@@ -282,45 +282,35 @@ __und_svc:
282 @ 282 @
283 @ restore SPSR and restart the instruction 283 @ restore SPSR and restart the instruction
284 @ 284 @
285 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
286 svc_exit r2 @ return from exception 286#ifdef CONFIG_TRACE_IRQFLAGS
287 tst r5, #PSR_I_BIT
288 bleq trace_hardirqs_on
289 tst r5, #PSR_I_BIT
290 blne trace_hardirqs_off
291#endif
292 svc_exit r5 @ return from exception
287 UNWIND(.fnend ) 293 UNWIND(.fnend )
288ENDPROC(__und_svc) 294ENDPROC(__und_svc)
289 295
290 .align 5 296 .align 5
291__pabt_svc: 297__pabt_svc:
292 svc_entry 298 svc_entry
293
294 @
295 @ re-enable interrupts if appropriate
296 @
297 mrs r9, cpsr
298 tst r3, #PSR_I_BIT
299 biceq r9, r9, #PSR_I_BIT
300
301 mov r0, r2 @ pass address of aborted instruction.
302#ifdef MULTI_PABORT
303 ldr r4, .LCprocfns
304 mov lr, pc
305 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
306#else
307 bl CPU_PABORT_HANDLER
308#endif
309 debug_entry r1
310 msr cpsr_c, r9 @ Maybe enable interrupts
311 mov r2, sp @ regs 299 mov r2, sp @ regs
312 bl do_PrefetchAbort @ call abort handler 300 pabt_helper
313 301
314 @ 302 @
315 @ IRQs off again before pulling preserved data off the stack 303 @ IRQs off again before pulling preserved data off the stack
316 @ 304 @
317 disable_irq_notrace 305 disable_irq_notrace
318 306
319 @ 307#ifdef CONFIG_TRACE_IRQFLAGS
320 @ restore SPSR and restart the instruction 308 tst r5, #PSR_I_BIT
321 @ 309 bleq trace_hardirqs_on
322 ldr r2, [sp, #S_PSR] 310 tst r5, #PSR_I_BIT
323 svc_exit r2 @ return from exception 311 blne trace_hardirqs_off
312#endif
313 svc_exit r5 @ return from exception
324 UNWIND(.fnend ) 314 UNWIND(.fnend )
325ENDPROC(__pabt_svc) 315ENDPROC(__pabt_svc)
326 316
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc)
351 ARM( stmib sp, {r1 - r12} ) 341 ARM( stmib sp, {r1 - r12} )
352 THUMB( stmia sp, {r0 - r12} ) 342 THUMB( stmia sp, {r0 - r12} )
353 343
354 ldmia r0, {r1 - r3} 344 ldmia r0, {r3 - r5}
355 add r0, sp, #S_PC @ here for interlock avoidance 345 add r0, sp, #S_PC @ here for interlock avoidance
356 mov r4, #-1 @ "" "" "" "" 346 mov r6, #-1 @ "" "" "" ""
357 347
358 str r1, [sp] @ save the "real" r0 copied 348 str r3, [sp] @ save the "real" r0 copied
359 @ from the exception stack 349 @ from the exception stack
360 350
361 @ 351 @
362 @ We are now ready to fill in the remaining blanks on the stack: 352 @ We are now ready to fill in the remaining blanks on the stack:
363 @ 353 @
364 @ r2 - lr_<exception>, already fixed up for correct return/restart 354 @ r4 - lr_<exception>, already fixed up for correct return/restart
365 @ r3 - spsr_<exception> 355 @ r5 - spsr_<exception>
366 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 356 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
367 @ 357 @
368 @ Also, separately save sp_usr and lr_usr 358 @ Also, separately save sp_usr and lr_usr
369 @ 359 @
370 stmia r0, {r2 - r4} 360 stmia r0, {r4 - r6}
371 ARM( stmdb r0, {sp, lr}^ ) 361 ARM( stmdb r0, {sp, lr}^ )
372 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 362 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
373 363
@@ -380,10 +370,14 @@ ENDPROC(__pabt_svc)
380 @ Clear FP to mark the first stack frame 370 @ Clear FP to mark the first stack frame
381 @ 371 @
382 zero_fp 372 zero_fp
373
374#ifdef CONFIG_IRQSOFF_TRACER
375 bl trace_hardirqs_off
376#endif
383 .endm 377 .endm
384 378
385 .macro kuser_cmpxchg_check 379 .macro kuser_cmpxchg_check
386#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
387#ifndef CONFIG_MMU 381#ifndef CONFIG_MMU
388#warning "NPTL on non MMU needs fixing" 382#warning "NPTL on non MMU needs fixing"
389#else 383#else
@@ -391,8 +385,8 @@ ENDPROC(__pabt_svc)
391 @ if it was interrupted in a critical region. Here we 385 @ if it was interrupted in a critical region. Here we
392 @ perform a quick test inline since it should be false 386 @ perform a quick test inline since it should be false
393 @ 99.9999% of the time. The rest is done out of line. 387 @ 99.9999% of the time. The rest is done out of line.
394 cmp r2, #TASK_SIZE 388 cmp r4, #TASK_SIZE
395 blhs kuser_cmpxchg_fixup 389 blhs kuser_cmpxchg64_fixup
396#endif 390#endif
397#endif 391#endif
398 .endm 392 .endm
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc)
401__dabt_usr: 395__dabt_usr:
402 usr_entry 396 usr_entry
403 kuser_cmpxchg_check 397 kuser_cmpxchg_check
404
405 @
406 @ Call the processor-specific abort handler:
407 @
408 @ r2 - aborted context pc
409 @ r3 - aborted context cpsr
410 @
411 @ The abort handler must return the aborted address in r0, and
412 @ the fault status register in r1.
413 @
414#ifdef MULTI_DABORT
415 ldr r4, .LCprocfns
416 mov lr, pc
417 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
418#else
419 bl CPU_DABORT_HANDLER
420#endif
421
422 @
423 @ IRQs on, then call the main handler
424 @
425 debug_entry r1
426 enable_irq
427 mov r2, sp 398 mov r2, sp
428 adr lr, BSYM(ret_from_exception) 399 dabt_helper
429 b do_DataAbort 400 b ret_from_exception
430 UNWIND(.fnend ) 401 UNWIND(.fnend )
431ENDPROC(__dabt_usr) 402ENDPROC(__dabt_usr)
432 403
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr)
434__irq_usr: 405__irq_usr:
435 usr_entry 406 usr_entry
436 kuser_cmpxchg_check 407 kuser_cmpxchg_check
437
438#ifdef CONFIG_IRQSOFF_TRACER
439 bl trace_hardirqs_off
440#endif
441
442 get_thread_info tsk
443#ifdef CONFIG_PREEMPT
444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
445 add r7, r8, #1 @ increment it
446 str r7, [tsk, #TI_PREEMPT]
447#endif
448
449 irq_handler 408 irq_handler
450#ifdef CONFIG_PREEMPT 409 get_thread_info tsk
451 ldr r0, [tsk, #TI_PREEMPT]
452 str r8, [tsk, #TI_PREEMPT]
453 teq r0, r7
454 ARM( strne r0, [r0, -r0] )
455 THUMB( movne r0, #0 )
456 THUMB( strne r0, [r0] )
457#endif
458
459 mov why, #0 410 mov why, #0
460 b ret_to_user_from_irq 411 b ret_to_user_from_irq
461 UNWIND(.fnend ) 412 UNWIND(.fnend )
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr)
467__und_usr: 418__und_usr:
468 usr_entry 419 usr_entry
469 420
421 mov r2, r4
422 mov r3, r5
423
470 @ 424 @
471 @ fall through to the emulation code, which returns using r9 if 425 @ fall through to the emulation code, which returns using r9 if
472 @ it has emulated the instruction, or the more conventional lr 426 @ it has emulated the instruction, or the more conventional lr
@@ -496,7 +450,7 @@ __und_usr:
496 blo __und_usr_unknown 450 blo __und_usr_unknown
4973: ldrht r0, [r4] 4513: ldrht r0, [r4]
498 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 452 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
499 orr r0, r0, r5, lsl #16 453 orr r0, r0, r5, lsl #16
500#else 454#else
501 b __und_usr_unknown 455 b __und_usr_unknown
502#endif 456#endif
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown)
682 .align 5 636 .align 5
683__pabt_usr: 637__pabt_usr:
684 usr_entry 638 usr_entry
685
686 mov r0, r2 @ pass address of aborted instruction.
687#ifdef MULTI_PABORT
688 ldr r4, .LCprocfns
689 mov lr, pc
690 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
691#else
692 bl CPU_PABORT_HANDLER
693#endif
694 debug_entry r1
695 enable_irq @ Enable interrupts
696 mov r2, sp @ regs 639 mov r2, sp @ regs
697 bl do_PrefetchAbort @ call abort handler 640 pabt_helper
698 UNWIND(.fnend ) 641 UNWIND(.fnend )
699 /* fall through */ 642 /* fall through */
700/* 643/*
@@ -758,31 +701,12 @@ ENDPROC(__switch_to)
758/* 701/*
759 * User helpers. 702 * User helpers.
760 * 703 *
761 * These are segment of kernel provided user code reachable from user space
762 * at a fixed address in kernel memory. This is used to provide user space
763 * with some operations which require kernel help because of unimplemented
764 * native feature and/or instructions in many ARM CPUs. The idea is for
765 * this code to be executed directly in user mode for best efficiency but
766 * which is too intimate with the kernel counter part to be left to user
767 * libraries. In fact this code might even differ from one CPU to another
768 * depending on the available instruction set and restrictions like on
769 * SMP systems. In other words, the kernel reserves the right to change
770 * this code as needed without warning. Only the entry points and their
771 * results are guaranteed to be stable.
772 *
773 * Each segment is 32-byte aligned and will be moved to the top of the high 704 * Each segment is 32-byte aligned and will be moved to the top of the high
774 * vector page. New segments (if ever needed) must be added in front of 705 * vector page. New segments (if ever needed) must be added in front of
775 * existing ones. This mechanism should be used only for things that are 706 * existing ones. This mechanism should be used only for things that are
776 * really small and justified, and not be abused freely. 707 * really small and justified, and not be abused freely.
777 * 708 *
778 * User space is expected to implement those things inline when optimizing 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
779 * for a processor that has the necessary native support, but only if such
780 * resulting binaries are already to be incompatible with earlier ARM
781 * processors due to the use of unsupported instructions other than what
782 * is provided here. In other words don't make binaries unable to run on
783 * earlier processors just for the sake of not using these kernel helpers
784 * if your compiled code is not going to use the new instructions for other
785 * purpose.
786 */ 710 */
787 THUMB( .arm ) 711 THUMB( .arm )
788 712
@@ -799,96 +723,103 @@ ENDPROC(__switch_to)
799__kuser_helper_start: 723__kuser_helper_start:
800 724
801/* 725/*
802 * Reference prototype: 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
803 * 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
804 * void __kernel_memory_barrier(void)
805 *
806 * Input:
807 *
808 * lr = return address
809 *
810 * Output:
811 *
812 * none
813 *
814 * Clobbered:
815 *
816 * none
817 *
818 * Definition and user space usage example:
819 *
820 * typedef void (__kernel_dmb_t)(void);
821 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
822 *
823 * Apply any needed memory barrier to preserve consistency with data modified
824 * manually and __kuser_cmpxchg usage.
825 *
826 * This could be used as follows:
827 *
828 * #define __kernel_dmb() \
829 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
830 * : : : "r0", "lr","cc" )
831 */ 728 */
832 729
833__kuser_memory_barrier: @ 0xffff0fa0 730__kuser_cmpxchg64: @ 0xffff0f60
731
732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
733
734 /*
735 * Poor you. No fast solution possible...
736 * The kernel itself must perform the operation.
737 * A special ghost syscall is used for that (see traps.c).
738 */
739 stmfd sp!, {r7, lr}
740 ldr r7, 1f @ it's 20 bits
741 swi __ARM_NR_cmpxchg64
742 ldmfd sp!, {r7, pc}
7431: .word __ARM_NR_cmpxchg64
744
745#elif defined(CONFIG_CPU_32v6K)
746
747 stmfd sp!, {r4, r5, r6, r7}
748 ldrd r4, r5, [r0] @ load old val
749 ldrd r6, r7, [r1] @ load new val
750 smp_dmb arm
7511: ldrexd r0, r1, [r2] @ load current val
752 eors r3, r0, r4 @ compare with oldval (1)
753 eoreqs r3, r1, r5 @ compare with oldval (2)
754 strexdeq r3, r6, r7, [r2] @ store newval if eq
755 teqeq r3, #1 @ success?
756 beq 1b @ if no then retry
834 smp_dmb arm 757 smp_dmb arm
758 rsbs r0, r3, #0 @ set returned val and C flag
759 ldmfd sp!, {r4, r5, r6, r7}
760 bx lr
761
762#elif !defined(CONFIG_SMP)
763
764#ifdef CONFIG_MMU
765
766 /*
767 * The only thing that can break atomicity in this cmpxchg64
768 * implementation is either an IRQ or a data abort exception
769 * causing another process/thread to be scheduled in the middle of
770 * the critical sequence. The same strategy as for cmpxchg is used.
771 */
772 stmfd sp!, {r4, r5, r6, lr}
773 ldmia r0, {r4, r5} @ load old val
774 ldmia r1, {r6, lr} @ load new val
7751: ldmia r2, {r0, r1} @ load current val
776 eors r3, r0, r4 @ compare with oldval (1)
777 eoreqs r3, r1, r5 @ compare with oldval (2)
7782: stmeqia r2, {r6, lr} @ store newval if eq
779 rsbs r0, r3, #0 @ set return val and C flag
780 ldmfd sp!, {r4, r5, r6, pc}
781
782 .text
783kuser_cmpxchg64_fixup:
784 @ Called from kuser_cmpxchg_fixup.
785 @ r4 = address of interrupted insn (must be preserved).
786 @ sp = saved regs. r7 and r8 are clobbered.
787 @ 1b = first critical insn, 2b = last critical insn.
788 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
789 mov r7, #0xffff0fff
790 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
791 subs r8, r4, r7
792 rsbcss r8, r8, #(2b - 1b)
793 strcs r7, [sp, #S_PC]
794#if __LINUX_ARM_ARCH__ < 6
795 bcc kuser_cmpxchg32_fixup
796#endif
797 mov pc, lr
798 .previous
799
800#else
801#warning "NPTL on non MMU needs fixing"
802 mov r0, #-1
803 adds r0, r0, #0
835 usr_ret lr 804 usr_ret lr
805#endif
806
807#else
808#error "incoherent kernel configuration"
809#endif
810
811 /* pad to next slot */
812 .rept (16 - (. - __kuser_cmpxchg64)/4)
813 .word 0
814 .endr
836 815
837 .align 5 816 .align 5
838 817
839/* 818__kuser_memory_barrier: @ 0xffff0fa0
840 * Reference prototype: 819 smp_dmb arm
841 * 820 usr_ret lr
842 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 821
843 * 822 .align 5
844 * Input:
845 *
846 * r0 = oldval
847 * r1 = newval
848 * r2 = ptr
849 * lr = return address
850 *
851 * Output:
852 *
853 * r0 = returned value (zero or non-zero)
854 * C flag = set if r0 == 0, clear if r0 != 0
855 *
856 * Clobbered:
857 *
858 * r3, ip, flags
859 *
860 * Definition and user space usage example:
861 *
862 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
863 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
864 *
865 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
866 * Return zero if *ptr was changed or non-zero if no exchange happened.
867 * The C flag is also set if *ptr was changed to allow for assembly
868 * optimization in the calling code.
869 *
870 * Notes:
871 *
872 * - This routine already includes memory barriers as needed.
873 *
874 * For example, a user space atomic_add implementation could look like this:
875 *
876 * #define atomic_add(ptr, val) \
877 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
878 * register unsigned int __result asm("r1"); \
879 * asm volatile ( \
880 * "1: @ atomic_add\n\t" \
881 * "ldr r0, [r2]\n\t" \
882 * "mov r3, #0xffff0fff\n\t" \
883 * "add lr, pc, #4\n\t" \
884 * "add r1, r0, %2\n\t" \
885 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
886 * "bcc 1b" \
887 * : "=&r" (__result) \
888 * : "r" (__ptr), "rIL" (val) \
889 * : "r0","r3","ip","lr","cc","memory" ); \
890 * __result; })
891 */
892 823
893__kuser_cmpxchg: @ 0xffff0fc0 824__kuser_cmpxchg: @ 0xffff0fc0
894 825
@@ -925,15 +856,15 @@ __kuser_cmpxchg: @ 0xffff0fc0
925 usr_ret lr 856 usr_ret lr
926 857
927 .text 858 .text
928kuser_cmpxchg_fixup: 859kuser_cmpxchg32_fixup:
929 @ Called from kuser_cmpxchg_check macro. 860 @ Called from kuser_cmpxchg_check macro.
930 @ r2 = address of interrupted insn (must be preserved). 861 @ r4 = address of interrupted insn (must be preserved).
931 @ sp = saved regs. r7 and r8 are clobbered. 862 @ sp = saved regs. r7 and r8 are clobbered.
932 @ 1b = first critical insn, 2b = last critical insn. 863 @ 1b = first critical insn, 2b = last critical insn.
933 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 864 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
934 mov r7, #0xffff0fff 865 mov r7, #0xffff0fff
935 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 866 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
936 subs r8, r2, r7 867 subs r8, r4, r7
937 rsbcss r8, r8, #(2b - 1b) 868 rsbcss r8, r8, #(2b - 1b)
938 strcs r7, [sp, #S_PC] 869 strcs r7, [sp, #S_PC]
939 mov pc, lr 870 mov pc, lr
@@ -963,39 +894,6 @@ kuser_cmpxchg_fixup:
963 894
964 .align 5 895 .align 5
965 896
966/*
967 * Reference prototype:
968 *
969 * int __kernel_get_tls(void)
970 *
971 * Input:
972 *
973 * lr = return address
974 *
975 * Output:
976 *
977 * r0 = TLS value
978 *
979 * Clobbered:
980 *
981 * none
982 *
983 * Definition and user space usage example:
984 *
985 * typedef int (__kernel_get_tls_t)(void);
986 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
987 *
988 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
989 *
990 * This could be used as follows:
991 *
992 * #define __kernel_get_tls() \
993 * ({ register unsigned int __val asm("r0"); \
994 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
995 * : "=r" (__val) : : "lr","cc" ); \
996 * __val; })
997 */
998
999__kuser_get_tls: @ 0xffff0fe0 897__kuser_get_tls: @ 0xffff0fe0
1000 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 898 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
1001 usr_ret lr 899 usr_ret lr
@@ -1004,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0
1004 .word 0 @ 0xffff0ff0 software TLS value, then 902 .word 0 @ 0xffff0ff0 software TLS value, then
1005 .endr @ pad up to __kuser_helper_version 903 .endr @ pad up to __kuser_helper_version
1006 904
1007/*
1008 * Reference declaration:
1009 *
1010 * extern unsigned int __kernel_helper_version;
1011 *
1012 * Definition and user space usage example:
1013 *
1014 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
1015 *
1016 * User space may read this to determine the curent number of helpers
1017 * available.
1018 */
1019
1020__kuser_helper_version: @ 0xffff0ffc 905__kuser_helper_version: @ 0xffff0ffc
1021 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 906 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
1022 907
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 051166c2a93..9a8531eadd3 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -121,15 +121,13 @@
121 .endm 121 .endm
122#else /* CONFIG_THUMB2_KERNEL */ 122#else /* CONFIG_THUMB2_KERNEL */
123 .macro svc_exit, rpsr 123 .macro svc_exit, rpsr
124 ldr lr, [sp, #S_SP] @ top of the stack
125 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
124 clrex @ clear the exclusive monitor 126 clrex @ clear the exclusive monitor
125 ldr r0, [sp, #S_SP] @ top of the stack 127 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
126 ldr r1, [sp, #S_PC] @ return address
127 tst r0, #4 @ orig stack 8-byte aligned?
128 stmdb r0, {r1, \rpsr} @ rfe context
129 ldmia sp, {r0 - r12} 128 ldmia sp, {r0 - r12}
130 ldr lr, [sp, #S_LR] 129 mov sp, lr
131 addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned 130 ldr lr, [sp], #4
132 addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
133 rfeia sp! 131 rfeia sp!
134 .endm 132 .endm
135 133
@@ -165,25 +163,6 @@
165 .endm 163 .endm
166#endif /* !CONFIG_THUMB2_KERNEL */ 164#endif /* !CONFIG_THUMB2_KERNEL */
167 165
168 @
169 @ Debug exceptions are taken as prefetch or data aborts.
170 @ We must disable preemption during the handler so that
171 @ we can access the debug registers safely.
172 @
173 .macro debug_entry, fsr
174#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
175 ldr r4, =0x40f @ mask out fsr.fs
176 and r5, r4, \fsr
177 cmp r5, #2 @ debug exception
178 bne 1f
179 get_thread_info r10
180 ldr r6, [r10, #TI_PREEMPT] @ get preempt count
181 add r11, r6, #1 @ increment it
182 str r11, [r10, #TI_PREEMPT]
1831:
184#endif
185 .endm
186
187/* 166/*
188 * These are the registers used in the syscall handler, and allow us to 167 * These are the registers used in the syscall handler, and allow us to
189 * have in theory up to 7 arguments to a function - r0 to r6. 168 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 1bec8b5f22f..496b8b84e45 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -15,6 +15,7 @@
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/slab.h>
18#include <linux/sysrq.h> 19#include <linux/sysrq.h>
19#include <linux/device.h> 20#include <linux/device.h>
20#include <linux/clk.h> 21#include <linux/clk.h>
@@ -36,26 +37,36 @@ MODULE_AUTHOR("Alexander Shishkin");
36struct tracectx { 37struct tracectx {
37 unsigned int etb_bufsz; 38 unsigned int etb_bufsz;
38 void __iomem *etb_regs; 39 void __iomem *etb_regs;
39 void __iomem *etm_regs; 40 void __iomem **etm_regs;
41 int etm_regs_count;
40 unsigned long flags; 42 unsigned long flags;
41 int ncmppairs; 43 int ncmppairs;
42 int etm_portsz; 44 int etm_portsz;
45 u32 etb_fc;
46 unsigned long range_start;
47 unsigned long range_end;
48 unsigned long data_range_start;
49 unsigned long data_range_end;
50 bool dump_initial_etb;
43 struct device *dev; 51 struct device *dev;
44 struct clk *emu_clk; 52 struct clk *emu_clk;
45 struct mutex mutex; 53 struct mutex mutex;
46}; 54};
47 55
48static struct tracectx tracer; 56static struct tracectx tracer = {
57 .range_start = (unsigned long)_stext,
58 .range_end = (unsigned long)_etext,
59};
49 60
50static inline bool trace_isrunning(struct tracectx *t) 61static inline bool trace_isrunning(struct tracectx *t)
51{ 62{
52 return !!(t->flags & TRACER_RUNNING); 63 return !!(t->flags & TRACER_RUNNING);
53} 64}
54 65
55static int etm_setup_address_range(struct tracectx *t, int n, 66static int etm_setup_address_range(struct tracectx *t, int id, int n,
56 unsigned long start, unsigned long end, int exclude, int data) 67 unsigned long start, unsigned long end, int exclude, int data)
57{ 68{
58 u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_NSONLY | \ 69 u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
59 ETMAAT_NOVALCMP; 70 ETMAAT_NOVALCMP;
60 71
61 if (n < 1 || n > t->ncmppairs) 72 if (n < 1 || n > t->ncmppairs)
@@ -71,95 +82,155 @@ static int etm_setup_address_range(struct tracectx *t, int n,
71 flags |= ETMAAT_IEXEC; 82 flags |= ETMAAT_IEXEC;
72 83
73 /* first comparator for the range */ 84 /* first comparator for the range */
74 etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2)); 85 etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
75 etm_writel(t, start, ETMR_COMP_VAL(n * 2)); 86 etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
76 87
77 /* second comparator is right next to it */ 88 /* second comparator is right next to it */
78 etm_writel(t, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1)); 89 etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
79 etm_writel(t, end, ETMR_COMP_VAL(n * 2 + 1)); 90 etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
80 91
81 flags = exclude ? ETMTE_INCLEXCL : 0; 92 if (data) {
82 etm_writel(t, flags | (1 << n), ETMR_TRACEENCTRL); 93 flags = exclude ? ETMVDC3_EXCLONLY : 0;
94 if (exclude)
95 n += 8;
96 etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
97 } else {
98 flags = exclude ? ETMTE_INCLEXCL : 0;
99 etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
100 }
83 101
84 return 0; 102 return 0;
85} 103}
86 104
87static int trace_start(struct tracectx *t) 105static int trace_start_etm(struct tracectx *t, int id)
88{ 106{
89 u32 v; 107 u32 v;
90 unsigned long timeout = TRACER_TIMEOUT; 108 unsigned long timeout = TRACER_TIMEOUT;
91 109
92 etb_unlock(t);
93
94 etb_writel(t, 0, ETBR_FORMATTERCTRL);
95 etb_writel(t, 1, ETBR_CTRL);
96
97 etb_lock(t);
98
99 /* configure etm */
100 v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz); 110 v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
101 111
102 if (t->flags & TRACER_CYCLE_ACC) 112 if (t->flags & TRACER_CYCLE_ACC)
103 v |= ETMCTRL_CYCLEACCURATE; 113 v |= ETMCTRL_CYCLEACCURATE;
104 114
105 etm_unlock(t); 115 if (t->flags & TRACER_TRACE_DATA)
116 v |= ETMCTRL_DATA_DO_ADDR;
117
118 etm_unlock(t, id);
106 119
107 etm_writel(t, v, ETMR_CTRL); 120 etm_writel(t, id, v, ETMR_CTRL);
108 121
109 while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) 122 while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
110 ; 123 ;
111 if (!timeout) { 124 if (!timeout) {
112 dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); 125 dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
113 etm_lock(t); 126 etm_lock(t, id);
114 return -EFAULT; 127 return -EFAULT;
115 } 128 }
116 129
117 etm_setup_address_range(t, 1, (unsigned long)_stext, 130 if (t->range_start || t->range_end)
118 (unsigned long)_etext, 0, 0); 131 etm_setup_address_range(t, id, 1,
119 etm_writel(t, 0, ETMR_TRACEENCTRL2); 132 t->range_start, t->range_end, 0, 0);
120 etm_writel(t, 0, ETMR_TRACESSCTRL); 133 else
121 etm_writel(t, 0x6f, ETMR_TRACEENEVT); 134 etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
135
136 etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
137 etm_writel(t, id, 0, ETMR_TRACESSCTRL);
138 etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
139
140 etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
141 etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
142
143 if (t->data_range_start || t->data_range_end)
144 etm_setup_address_range(t, id, 2, t->data_range_start,
145 t->data_range_end, 0, 1);
146 else
147 etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
148
149 etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
122 150
123 v &= ~ETMCTRL_PROGRAM; 151 v &= ~ETMCTRL_PROGRAM;
124 v |= ETMCTRL_PORTSEL; 152 v |= ETMCTRL_PORTSEL;
125 153
126 etm_writel(t, v, ETMR_CTRL); 154 etm_writel(t, id, v, ETMR_CTRL);
127 155
128 timeout = TRACER_TIMEOUT; 156 timeout = TRACER_TIMEOUT;
129 while (etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout) 157 while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
130 ; 158 ;
131 if (!timeout) { 159 if (!timeout) {
132 dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n"); 160 dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
133 etm_lock(t); 161 etm_lock(t, id);
134 return -EFAULT; 162 return -EFAULT;
135 } 163 }
136 164
137 etm_lock(t); 165 etm_lock(t, id);
166 return 0;
167}
168
169static int trace_start(struct tracectx *t)
170{
171 int ret;
172 int id;
173 u32 etb_fc = t->etb_fc;
174
175 etb_unlock(t);
176
177 t->dump_initial_etb = false;
178 etb_writel(t, 0, ETBR_WRITEADDR);
179 etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
180 etb_writel(t, 1, ETBR_CTRL);
181
182 etb_lock(t);
183
184 /* configure etm(s) */
185 for (id = 0; id < t->etm_regs_count; id++) {
186 ret = trace_start_etm(t, id);
187 if (ret)
188 return ret;
189 }
138 190
139 t->flags |= TRACER_RUNNING; 191 t->flags |= TRACER_RUNNING;
140 192
141 return 0; 193 return 0;
142} 194}
143 195
144static int trace_stop(struct tracectx *t) 196static int trace_stop_etm(struct tracectx *t, int id)
145{ 197{
146 unsigned long timeout = TRACER_TIMEOUT; 198 unsigned long timeout = TRACER_TIMEOUT;
147 199
148 etm_unlock(t); 200 etm_unlock(t, id);
149 201
150 etm_writel(t, 0x440, ETMR_CTRL); 202 etm_writel(t, id, 0x441, ETMR_CTRL);
151 while (!(etm_readl(t, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout) 203 while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
152 ; 204 ;
153 if (!timeout) { 205 if (!timeout) {
154 dev_dbg(t->dev, "Waiting for progbit to assert timed out\n"); 206 dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
155 etm_lock(t); 207 etm_lock(t, id);
156 return -EFAULT; 208 return -EFAULT;
157 } 209 }
158 210
159 etm_lock(t); 211 etm_lock(t, id);
212 return 0;
213}
214
215static int trace_stop(struct tracectx *t)
216{
217 int id;
218 int ret;
219 unsigned long timeout = TRACER_TIMEOUT;
220 u32 etb_fc = t->etb_fc;
221
222 for (id = 0; id < t->etm_regs_count; id++) {
223 ret = trace_stop_etm(t, id);
224 if (ret)
225 return ret;
226 }
160 227
161 etb_unlock(t); 228 etb_unlock(t);
162 etb_writel(t, ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL); 229 if (etb_fc) {
230 etb_fc |= ETBFF_STOPFL;
231 etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
232 }
233 etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
163 234
164 timeout = TRACER_TIMEOUT; 235 timeout = TRACER_TIMEOUT;
165 while (etb_readl(t, ETBR_FORMATTERCTRL) & 236 while (etb_readl(t, ETBR_FORMATTERCTRL) &
@@ -184,24 +255,15 @@ static int trace_stop(struct tracectx *t)
184static int etb_getdatalen(struct tracectx *t) 255static int etb_getdatalen(struct tracectx *t)
185{ 256{
186 u32 v; 257 u32 v;
187 int rp, wp; 258 int wp;
188 259
189 v = etb_readl(t, ETBR_STATUS); 260 v = etb_readl(t, ETBR_STATUS);
190 261
191 if (v & 1) 262 if (v & 1)
192 return t->etb_bufsz; 263 return t->etb_bufsz;
193 264
194 rp = etb_readl(t, ETBR_READADDR);
195 wp = etb_readl(t, ETBR_WRITEADDR); 265 wp = etb_readl(t, ETBR_WRITEADDR);
196 266 return wp;
197 if (rp > wp) {
198 etb_writel(t, 0, ETBR_READADDR);
199 etb_writel(t, 0, ETBR_WRITEADDR);
200
201 return 0;
202 }
203
204 return wp - rp;
205} 267}
206 268
207/* sysrq+v will always stop the running trace and leave it at that */ 269/* sysrq+v will always stop the running trace and leave it at that */
@@ -234,21 +296,18 @@ static void etm_dump(void)
234 printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); 296 printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
235 printk(KERN_INFO "\n--- ETB buffer end ---\n"); 297 printk(KERN_INFO "\n--- ETB buffer end ---\n");
236 298
237 /* deassert the overflow bit */
238 etb_writel(t, 1, ETBR_CTRL);
239 etb_writel(t, 0, ETBR_CTRL);
240
241 etb_writel(t, 0, ETBR_TRIGGERCOUNT);
242 etb_writel(t, 0, ETBR_READADDR);
243 etb_writel(t, 0, ETBR_WRITEADDR);
244
245 etb_lock(t); 299 etb_lock(t);
246} 300}
247 301
248static void sysrq_etm_dump(int key) 302static void sysrq_etm_dump(int key)
249{ 303{
304 if (!mutex_trylock(&tracer.mutex)) {
305 printk(KERN_INFO "Tracing hardware busy\n");
306 return;
307 }
250 dev_dbg(tracer.dev, "Dumping ETB buffer\n"); 308 dev_dbg(tracer.dev, "Dumping ETB buffer\n");
251 etm_dump(); 309 etm_dump();
310 mutex_unlock(&tracer.mutex);
252} 311}
253 312
254static struct sysrq_key_op sysrq_etm_op = { 313static struct sysrq_key_op sysrq_etm_op = {
@@ -275,6 +334,10 @@ static ssize_t etb_read(struct file *file, char __user *data,
275 struct tracectx *t = file->private_data; 334 struct tracectx *t = file->private_data;
276 u32 first = 0; 335 u32 first = 0;
277 u32 *buf; 336 u32 *buf;
337 int wpos;
338 int skip;
339 long wlength;
340 loff_t pos = *ppos;
278 341
279 mutex_lock(&t->mutex); 342 mutex_lock(&t->mutex);
280 343
@@ -286,31 +349,39 @@ static ssize_t etb_read(struct file *file, char __user *data,
286 etb_unlock(t); 349 etb_unlock(t);
287 350
288 total = etb_getdatalen(t); 351 total = etb_getdatalen(t);
352 if (total == 0 && t->dump_initial_etb)
353 total = t->etb_bufsz;
289 if (total == t->etb_bufsz) 354 if (total == t->etb_bufsz)
290 first = etb_readl(t, ETBR_WRITEADDR); 355 first = etb_readl(t, ETBR_WRITEADDR);
291 356
357 if (pos > total * 4) {
358 skip = 0;
359 wpos = total;
360 } else {
361 skip = (int)pos % 4;
362 wpos = (int)pos / 4;
363 }
364 total -= wpos;
365 first = (first + wpos) % t->etb_bufsz;
366
292 etb_writel(t, first, ETBR_READADDR); 367 etb_writel(t, first, ETBR_READADDR);
293 368
294 length = min(total * 4, (int)len); 369 wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
295 buf = vmalloc(length); 370 length = min(total * 4 - skip, (int)len);
371 buf = vmalloc(wlength * 4);
296 372
297 dev_dbg(t->dev, "ETB buffer length: %d\n", total); 373 dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
374 length, pos, wlength, first);
375 dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
298 dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS)); 376 dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
299 for (i = 0; i < length / 4; i++) 377 for (i = 0; i < wlength; i++)
300 buf[i] = etb_readl(t, ETBR_READMEM); 378 buf[i] = etb_readl(t, ETBR_READMEM);
301 379
302 /* the only way to deassert overflow bit in ETB status is this */
303 etb_writel(t, 1, ETBR_CTRL);
304 etb_writel(t, 0, ETBR_CTRL);
305
306 etb_writel(t, 0, ETBR_WRITEADDR);
307 etb_writel(t, 0, ETBR_READADDR);
308 etb_writel(t, 0, ETBR_TRIGGERCOUNT);
309
310 etb_lock(t); 380 etb_lock(t);
311 381
312 length -= copy_to_user(data, buf, length); 382 length -= copy_to_user(data, (u8 *)buf + skip, length);
313 vfree(buf); 383 vfree(buf);
384 *ppos = pos + length;
314 385
315out: 386out:
316 mutex_unlock(&t->mutex); 387 mutex_unlock(&t->mutex);
@@ -347,28 +418,17 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
347 if (ret) 418 if (ret)
348 goto out; 419 goto out;
349 420
421 mutex_lock(&t->mutex);
350 t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); 422 t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
351 if (!t->etb_regs) { 423 if (!t->etb_regs) {
352 ret = -ENOMEM; 424 ret = -ENOMEM;
353 goto out_release; 425 goto out_release;
354 } 426 }
355 427
428 t->dev = &dev->dev;
429 t->dump_initial_etb = true;
356 amba_set_drvdata(dev, t); 430 amba_set_drvdata(dev, t);
357 431
358 etb_miscdev.parent = &dev->dev;
359
360 ret = misc_register(&etb_miscdev);
361 if (ret)
362 goto out_unmap;
363
364 t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
365 if (IS_ERR(t->emu_clk)) {
366 dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
367 return -EFAULT;
368 }
369
370 clk_enable(t->emu_clk);
371
372 etb_unlock(t); 432 etb_unlock(t);
373 t->etb_bufsz = etb_readl(t, ETBR_DEPTH); 433 t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
374 dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz); 434 dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
@@ -377,6 +437,20 @@ static int __devinit etb_probe(struct amba_device *dev, const struct amba_id *id
377 etb_writel(t, 0, ETBR_CTRL); 437 etb_writel(t, 0, ETBR_CTRL);
378 etb_writel(t, 0x1000, ETBR_FORMATTERCTRL); 438 etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
379 etb_lock(t); 439 etb_lock(t);
440 mutex_unlock(&t->mutex);
441
442 etb_miscdev.parent = &dev->dev;
443
444 ret = misc_register(&etb_miscdev);
445 if (ret)
446 goto out_unmap;
447
448 /* Get optional clock. Currently used to select clock source on omap3 */
449 t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
450 if (IS_ERR(t->emu_clk))
451 dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
452 else
453 clk_enable(t->emu_clk);
380 454
381 dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n"); 455 dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
382 456
@@ -384,10 +458,13 @@ out:
384 return ret; 458 return ret;
385 459
386out_unmap: 460out_unmap:
461 mutex_lock(&t->mutex);
387 amba_set_drvdata(dev, NULL); 462 amba_set_drvdata(dev, NULL);
388 iounmap(t->etb_regs); 463 iounmap(t->etb_regs);
464 t->etb_regs = NULL;
389 465
390out_release: 466out_release:
467 mutex_unlock(&t->mutex);
391 amba_release_regions(dev); 468 amba_release_regions(dev);
392 469
393 return ret; 470 return ret;
@@ -402,8 +479,10 @@ static int etb_remove(struct amba_device *dev)
402 iounmap(t->etb_regs); 479 iounmap(t->etb_regs);
403 t->etb_regs = NULL; 480 t->etb_regs = NULL;
404 481
405 clk_disable(t->emu_clk); 482 if (!IS_ERR(t->emu_clk)) {
406 clk_put(t->emu_clk); 483 clk_disable(t->emu_clk);
484 clk_put(t->emu_clk);
485 }
407 486
408 amba_release_regions(dev); 487 amba_release_regions(dev);
409 488
@@ -447,7 +526,10 @@ static ssize_t trace_running_store(struct kobject *kobj,
447 return -EINVAL; 526 return -EINVAL;
448 527
449 mutex_lock(&tracer.mutex); 528 mutex_lock(&tracer.mutex);
450 ret = value ? trace_start(&tracer) : trace_stop(&tracer); 529 if (!tracer.etb_regs)
530 ret = -ENODEV;
531 else
532 ret = value ? trace_start(&tracer) : trace_stop(&tracer);
451 mutex_unlock(&tracer.mutex); 533 mutex_unlock(&tracer.mutex);
452 534
453 return ret ? : n; 535 return ret ? : n;
@@ -462,36 +544,50 @@ static ssize_t trace_info_show(struct kobject *kobj,
462{ 544{
463 u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st; 545 u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
464 int datalen; 546 int datalen;
547 int id;
548 int ret;
465 549
466 etb_unlock(&tracer); 550 mutex_lock(&tracer.mutex);
467 datalen = etb_getdatalen(&tracer); 551 if (tracer.etb_regs) {
468 etb_wa = etb_readl(&tracer, ETBR_WRITEADDR); 552 etb_unlock(&tracer);
469 etb_ra = etb_readl(&tracer, ETBR_READADDR); 553 datalen = etb_getdatalen(&tracer);
470 etb_st = etb_readl(&tracer, ETBR_STATUS); 554 etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
471 etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL); 555 etb_ra = etb_readl(&tracer, ETBR_READADDR);
472 etb_lock(&tracer); 556 etb_st = etb_readl(&tracer, ETBR_STATUS);
473 557 etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
474 etm_unlock(&tracer); 558 etb_lock(&tracer);
475 etm_ctrl = etm_readl(&tracer, ETMR_CTRL); 559 } else {
476 etm_st = etm_readl(&tracer, ETMR_STATUS); 560 etb_wa = etb_ra = etb_st = etb_fc = ~0;
477 etm_lock(&tracer); 561 datalen = -1;
562 }
478 563
479 return sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n" 564 ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
480 "ETBR_WRITEADDR:\t%08x\n" 565 "ETBR_WRITEADDR:\t%08x\n"
481 "ETBR_READADDR:\t%08x\n" 566 "ETBR_READADDR:\t%08x\n"
482 "ETBR_STATUS:\t%08x\n" 567 "ETBR_STATUS:\t%08x\n"
483 "ETBR_FORMATTERCTRL:\t%08x\n" 568 "ETBR_FORMATTERCTRL:\t%08x\n",
484 "ETMR_CTRL:\t%08x\n"
485 "ETMR_STATUS:\t%08x\n",
486 datalen, 569 datalen,
487 tracer.ncmppairs, 570 tracer.ncmppairs,
488 etb_wa, 571 etb_wa,
489 etb_ra, 572 etb_ra,
490 etb_st, 573 etb_st,
491 etb_fc, 574 etb_fc
575 );
576
577 for (id = 0; id < tracer.etm_regs_count; id++) {
578 etm_unlock(&tracer, id);
579 etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
580 etm_st = etm_readl(&tracer, id, ETMR_STATUS);
581 etm_lock(&tracer, id);
582 ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
583 "ETMR_STATUS:\t%08x\n",
492 etm_ctrl, 584 etm_ctrl,
493 etm_st 585 etm_st
494 ); 586 );
587 }
588 mutex_unlock(&tracer.mutex);
589
590 return ret;
495} 591}
496 592
497static struct kobj_attribute trace_info_attr = 593static struct kobj_attribute trace_info_attr =
@@ -530,42 +626,121 @@ static ssize_t trace_mode_store(struct kobject *kobj,
530static struct kobj_attribute trace_mode_attr = 626static struct kobj_attribute trace_mode_attr =
531 __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store); 627 __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
532 628
629static ssize_t trace_range_show(struct kobject *kobj,
630 struct kobj_attribute *attr,
631 char *buf)
632{
633 return sprintf(buf, "%08lx %08lx\n",
634 tracer.range_start, tracer.range_end);
635}
636
637static ssize_t trace_range_store(struct kobject *kobj,
638 struct kobj_attribute *attr,
639 const char *buf, size_t n)
640{
641 unsigned long range_start, range_end;
642
643 if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
644 return -EINVAL;
645
646 mutex_lock(&tracer.mutex);
647 tracer.range_start = range_start;
648 tracer.range_end = range_end;
649 mutex_unlock(&tracer.mutex);
650
651 return n;
652}
653
654
655static struct kobj_attribute trace_range_attr =
656 __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
657
658static ssize_t trace_data_range_show(struct kobject *kobj,
659 struct kobj_attribute *attr,
660 char *buf)
661{
662 unsigned long range_start;
663 u64 range_end;
664 mutex_lock(&tracer.mutex);
665 range_start = tracer.data_range_start;
666 range_end = tracer.data_range_end;
667 if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
668 range_end = 0x100000000ULL;
669 mutex_unlock(&tracer.mutex);
670 return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
671}
672
673static ssize_t trace_data_range_store(struct kobject *kobj,
674 struct kobj_attribute *attr,
675 const char *buf, size_t n)
676{
677 unsigned long range_start;
678 u64 range_end;
679
680 if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
681 return -EINVAL;
682
683 mutex_lock(&tracer.mutex);
684 tracer.data_range_start = range_start;
685 tracer.data_range_end = (unsigned long)range_end;
686 if (range_end)
687 tracer.flags |= TRACER_TRACE_DATA;
688 else
689 tracer.flags &= ~TRACER_TRACE_DATA;
690 mutex_unlock(&tracer.mutex);
691
692 return n;
693}
694
695
696static struct kobj_attribute trace_data_range_attr =
697 __ATTR(trace_data_range, 0644,
698 trace_data_range_show, trace_data_range_store);
699
533static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id) 700static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id)
534{ 701{
535 struct tracectx *t = &tracer; 702 struct tracectx *t = &tracer;
536 int ret = 0; 703 int ret = 0;
704 void __iomem **new_regs;
705 int new_count;
537 706
538 if (t->etm_regs) { 707 mutex_lock(&t->mutex);
539 dev_dbg(&dev->dev, "ETM already initialized\n"); 708 new_count = t->etm_regs_count + 1;
540 ret = -EBUSY; 709 new_regs = krealloc(t->etm_regs,
710 sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
711
712 if (!new_regs) {
713 dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
714 ret = -ENOMEM;
541 goto out; 715 goto out;
542 } 716 }
717 t->etm_regs = new_regs;
543 718
544 ret = amba_request_regions(dev, NULL); 719 ret = amba_request_regions(dev, NULL);
545 if (ret) 720 if (ret)
546 goto out; 721 goto out;
547 722
548 t->etm_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res)); 723 t->etm_regs[t->etm_regs_count] =
549 if (!t->etm_regs) { 724 ioremap_nocache(dev->res.start, resource_size(&dev->res));
725 if (!t->etm_regs[t->etm_regs_count]) {
550 ret = -ENOMEM; 726 ret = -ENOMEM;
551 goto out_release; 727 goto out_release;
552 } 728 }
553 729
554 amba_set_drvdata(dev, t); 730 amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
555 731
556 mutex_init(&t->mutex); 732 t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA;
557 t->dev = &dev->dev;
558 t->flags = TRACER_CYCLE_ACC;
559 t->etm_portsz = 1; 733 t->etm_portsz = 1;
560 734
561 etm_unlock(t); 735 etm_unlock(t, t->etm_regs_count);
562 (void)etm_readl(t, ETMMR_PDSR); 736 (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
563 /* dummy first read */ 737 /* dummy first read */
564 (void)etm_readl(&tracer, ETMMR_OSSRR); 738 (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
565 739
566 t->ncmppairs = etm_readl(t, ETMR_CONFCODE) & 0xf; 740 t->ncmppairs = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE) & 0xf;
567 etm_writel(t, 0x440, ETMR_CTRL); 741 etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
568 etm_lock(t); 742 etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
743 etm_lock(t, t->etm_regs_count);
569 744
570 ret = sysfs_create_file(&dev->dev.kobj, 745 ret = sysfs_create_file(&dev->dev.kobj,
571 &trace_running_attr.attr); 746 &trace_running_attr.attr);
@@ -581,36 +756,68 @@ static int __devinit etm_probe(struct amba_device *dev, const struct amba_id *id
581 if (ret) 756 if (ret)
582 dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n"); 757 dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
583 758
584 dev_dbg(t->dev, "ETM AMBA driver initialized.\n"); 759 ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
760 if (ret)
761 dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
762
763 ret = sysfs_create_file(&dev->dev.kobj, &trace_data_range_attr.attr);
764 if (ret)
765 dev_dbg(&dev->dev,
766 "Failed to create trace_data_range in sysfs\n");
767
768 dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
769
770 /* Enable formatter if there are multiple trace sources */
771 if (new_count > 1)
772 t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
773
774 t->etm_regs_count = new_count;
585 775
586out: 776out:
777 mutex_unlock(&t->mutex);
587 return ret; 778 return ret;
588 779
589out_unmap: 780out_unmap:
590 amba_set_drvdata(dev, NULL); 781 amba_set_drvdata(dev, NULL);
591 iounmap(t->etm_regs); 782 iounmap(t->etm_regs[t->etm_regs_count]);
592 783
593out_release: 784out_release:
594 amba_release_regions(dev); 785 amba_release_regions(dev);
595 786
787 mutex_unlock(&t->mutex);
596 return ret; 788 return ret;
597} 789}
598 790
599static int etm_remove(struct amba_device *dev) 791static int etm_remove(struct amba_device *dev)
600{ 792{
601 struct tracectx *t = amba_get_drvdata(dev); 793 int i;
794 struct tracectx *t = &tracer;
795 void __iomem *etm_regs = amba_get_drvdata(dev);
796
797 sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
798 sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
799 sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
800 sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
801 sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
602 802
603 amba_set_drvdata(dev, NULL); 803 amba_set_drvdata(dev, NULL);
604 804
605 iounmap(t->etm_regs); 805 mutex_lock(&t->mutex);
606 t->etm_regs = NULL; 806 for (i = 0; i < t->etm_regs_count; i++)
807 if (t->etm_regs[i] == etm_regs)
808 break;
809 for (; i < t->etm_regs_count - 1; i++)
810 t->etm_regs[i] = t->etm_regs[i + 1];
811 t->etm_regs_count--;
812 if (!t->etm_regs_count) {
813 kfree(t->etm_regs);
814 t->etm_regs = NULL;
815 }
816 mutex_unlock(&t->mutex);
607 817
818 iounmap(etm_regs);
608 amba_release_regions(dev); 819 amba_release_regions(dev);
609 820
610 sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
611 sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
612 sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
613
614 return 0; 821 return 0;
615} 822}
616 823
@@ -619,6 +826,10 @@ static struct amba_id etm_ids[] = {
619 .id = 0x0003b921, 826 .id = 0x0003b921,
620 .mask = 0x0007ffff, 827 .mask = 0x0007ffff,
621 }, 828 },
829 {
830 .id = 0x0003b950,
831 .mask = 0x0007ffff,
832 },
622 { 0, 0 }, 833 { 0, 0 },
623}; 834};
624 835
@@ -636,6 +847,8 @@ static int __init etm_init(void)
636{ 847{
637 int retval; 848 int retval;
638 849
850 mutex_init(&tracer.mutex);
851
639 retval = amba_driver_register(&etb_driver); 852 retval = amba_driver_register(&etb_driver);
640 if (retval) { 853 if (retval) {
641 printk(KERN_ERR "Failed to register etb\n"); 854 printk(KERN_ERR "Failed to register etb\n");
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6b1e0ad9ec3..d46f25968be 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,8 +32,16 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36
35 __HEAD 37 __HEAD
36ENTRY(stext) 38ENTRY(stext)
39
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: )
44
37 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
38 @ and irqs disabled 46 @ and irqs disabled
39#ifndef CONFIG_CPU_CP15 47#ifndef CONFIG_CPU_CP15
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0ebb2..fed957a6ff3 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,8 +71,16 @@
71 * crap here - that's what the boot loader (or in extreme, well justified 71 * crap here - that's what the boot loader (or in extreme, well justified
72 * circumstances, zImage) is for. 72 * circumstances, zImage) is for.
73 */ 73 */
74 .arm
75
74 __HEAD 76 __HEAD
75ENTRY(stext) 77ENTRY(stext)
78
79 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
80 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
81 THUMB( .thumb ) @ switch to Thumb now.
82 THUMB(1: )
83
76 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 84 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
77 @ and irqs disabled 85 @ and irqs disabled
78 mrc p15, 0, r9, c0, c0 @ get processor id 86 mrc p15, 0, r9, c0, c0 @ get processor id
@@ -348,7 +356,7 @@ __secondary_data:
348 * r13 = *virtual* address to jump to upon completion 356 * r13 = *virtual* address to jump to upon completion
349 */ 357 */
350__enable_mmu: 358__enable_mmu:
351#ifdef CONFIG_ALIGNMENT_TRAP 359#if defined(CONFIG_ALIGNMENT_TRAP) && __LINUX_ARM_ARCH__ < 6
352 orr r0, r0, #CR_A 360 orr r0, r0, #CR_A
353#else 361#else
354 bic r0, r0, #CR_A 362 bic r0, r0, #CR_A
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 87acc25d7a3..a927ca1f556 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -796,7 +796,7 @@ unlock:
796 796
797/* 797/*
798 * Called from either the Data Abort Handler [watchpoint] or the 798 * Called from either the Data Abort Handler [watchpoint] or the
799 * Prefetch Abort Handler [breakpoint] with preemption disabled. 799 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
800 */ 800 */
801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
802 struct pt_regs *regs) 802 struct pt_regs *regs)
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
804 int ret = 0; 804 int ret = 0;
805 u32 dscr; 805 u32 dscr;
806 806
807 /* We must be called with preemption disabled. */ 807 preempt_disable();
808 WARN_ON(preemptible()); 808
809 if (interrupts_enabled(regs))
810 local_irq_enable();
809 811
810 /* We only handle watchpoints and hardware breakpoints. */ 812 /* We only handle watchpoints and hardware breakpoints. */
811 ARM_DBG_READ(c1, 0, dscr); 813 ARM_DBG_READ(c1, 0, dscr);
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
824 ret = 1; /* Unhandled fault. */ 826 ret = 1; /* Unhandled fault. */
825 } 827 }
826 828
827 /*
828 * Re-enable preemption after it was disabled in the
829 * low-level exception handling code.
830 */
831 preempt_enable(); 829 preempt_enable();
832 830
833 return ret; 831 return ret;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad03fcc..de3dcab8610 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -67,12 +67,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
67} 67}
68 68
69/* 69/*
70 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 70 * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
71 * come via this function. Instead, they should provide their 71 * not come via this function. Instead, they should provide their
72 * own 'handler' 72 * own 'handler'. Used by platform code implementing C-based 1st
73 * level decoding.
73 */ 74 */
74asmlinkage void __exception_irq_entry 75void handle_IRQ(unsigned int irq, struct pt_regs *regs)
75asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
76{ 76{
77 struct pt_regs *old_regs = set_irq_regs(regs); 77 struct pt_regs *old_regs = set_irq_regs(regs);
78 78
@@ -97,6 +97,15 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
97 set_irq_regs(old_regs); 97 set_irq_regs(old_regs);
98} 98}
99 99
100/*
101 * asm_do_IRQ is the interface to be used from assembly code.
102 */
103asmlinkage void __exception_irq_entry
104asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
105{
106 handle_IRQ(irq, regs);
107}
108
100void set_irq_flags(unsigned int irq, unsigned int iflags) 109void set_irq_flags(unsigned int irq, unsigned int iflags)
101{ 110{
102 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 111 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
@@ -131,54 +140,63 @@ int __init arch_probe_nr_irqs(void)
131 140
132#ifdef CONFIG_HOTPLUG_CPU 141#ifdef CONFIG_HOTPLUG_CPU
133 142
134static bool migrate_one_irq(struct irq_data *d) 143static bool migrate_one_irq(struct irq_desc *desc)
135{ 144{
136 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); 145 struct irq_data *d = irq_desc_get_irq_data(desc);
146 const struct cpumask *affinity = d->affinity;
147 struct irq_chip *c;
137 bool ret = false; 148 bool ret = false;
138 149
139 if (cpu >= nr_cpu_ids) { 150 /*
140 cpu = cpumask_any(cpu_online_mask); 151 * If this is a per-CPU interrupt, or the affinity does not
152 * include this CPU, then we have nothing to do.
153 */
154 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
155 return false;
156
157 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
158 affinity = cpu_online_mask;
141 ret = true; 159 ret = true;
142 } 160 }
143 161
144 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); 162 c = irq_data_get_irq_chip(d);
145 163 if (c->irq_set_affinity)
146 d->chip->irq_set_affinity(d, cpumask_of(cpu), true); 164 c->irq_set_affinity(d, affinity, true);
165 else
166 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
147 167
148 return ret; 168 return ret;
149} 169}
150 170
151/* 171/*
152 * The CPU has been marked offline. Migrate IRQs off this CPU. If 172 * The current CPU has been marked offline. Migrate IRQs off this CPU.
153 * the affinity settings do not allow other CPUs, force them onto any 173 * If the affinity settings do not allow other CPUs, force them onto any
154 * available CPU. 174 * available CPU.
175 *
176 * Note: we must iterate over all IRQs, whether they have an attached
177 * action structure or not, as we need to get chained interrupts too.
155 */ 178 */
156void migrate_irqs(void) 179void migrate_irqs(void)
157{ 180{
158 unsigned int i, cpu = smp_processor_id(); 181 unsigned int i;
159 struct irq_desc *desc; 182 struct irq_desc *desc;
160 unsigned long flags; 183 unsigned long flags;
161 184
162 local_irq_save(flags); 185 local_irq_save(flags);
163 186
164 for_each_irq_desc(i, desc) { 187 for_each_irq_desc(i, desc) {
165 struct irq_data *d = &desc->irq_data;
166 bool affinity_broken = false; 188 bool affinity_broken = false;
167 189
168 raw_spin_lock(&desc->lock); 190 if (!desc)
169 do { 191 continue;
170 if (desc->action == NULL)
171 break;
172
173 if (d->node != cpu)
174 break;
175 192
176 affinity_broken = migrate_one_irq(d); 193 raw_spin_lock(&desc->lock);
177 } while (0); 194 affinity_broken = migrate_one_irq(desc);
178 raw_spin_unlock(&desc->lock); 195 raw_spin_unlock(&desc->lock);
179 196
180 if (affinity_broken && printk_ratelimit()) 197 if (affinity_broken && printk_ratelimit())
181 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); 198 pr_warning("IRQ%u no longer affine to CPU%u\n", i,
199 smp_processor_id());
182 } 200 }
183 201
184 local_irq_restore(flags); 202 local_irq_restore(flags);
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index 7fa3bb0d239..a08783823b3 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -195,10 +195,10 @@ ENTRY(iwmmxt_task_disable)
195 195
196 @ enable access to CP0 and CP1 196 @ enable access to CP0 and CP1
197 XSC(mrc p15, 0, r4, c15, c1, 0) 197 XSC(mrc p15, 0, r4, c15, c1, 0)
198 XSC(orr r4, r4, #0xf) 198 XSC(orr r4, r4, #0x3)
199 XSC(mcr p15, 0, r4, c15, c1, 0) 199 XSC(mcr p15, 0, r4, c15, c1, 0)
200 PJ4(mrc p15, 0, r4, c1, c0, 2) 200 PJ4(mrc p15, 0, r4, c1, c0, 2)
201 PJ4(orr r4, r4, #0x3) 201 PJ4(orr r4, r4, #0xf)
202 PJ4(mcr p15, 0, r4, c1, c0, 2) 202 PJ4(mcr p15, 0, r4, c1, c0, 2)
203 203
204 mov r0, #0 @ nothing to load 204 mov r0, #0 @ nothing to load
@@ -313,7 +313,7 @@ ENTRY(iwmmxt_task_switch)
313 teq r2, r3 @ next task owns it? 313 teq r2, r3 @ next task owns it?
314 movne pc, lr @ no: leave Concan disabled 314 movne pc, lr @ no: leave Concan disabled
315 315
3161: @ flip Conan access 3161: @ flip Concan access
317 XSC(eor r1, r1, #0x3) 317 XSC(eor r1, r1, #0x3)
318 XSC(mcr p15, 0, r1, c15, c1, 0) 318 XSC(mcr p15, 0, r1, c15, c1, 0)
319 PJ4(eor r1, r1, #0xf) 319 PJ4(eor r1, r1, #0xf)
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
new file mode 100644
index 00000000000..79203ee1d03
--- /dev/null
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -0,0 +1,999 @@
1/*
2 * arch/arm/kernel/kprobes-decode.c
3 *
4 * Copyright (C) 2006, 2007 Motorola Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16/*
17 * We do not have hardware single-stepping on ARM, This
18 * effort is further complicated by the ARM not having a
19 * "next PC" register. Instructions that change the PC
20 * can't be safely single-stepped in a MP environment, so
21 * we have a lot of work to do:
22 *
23 * In the prepare phase:
24 * *) If it is an instruction that does anything
25 * with the CPU mode, we reject it for a kprobe.
26 * (This is out of laziness rather than need. The
27 * instructions could be simulated.)
28 *
29 * *) Otherwise, decode the instruction rewriting its
30 * registers to take fixed, ordered registers and
31 * setting a handler for it to run the instruction.
32 *
33 * In the execution phase by an instruction's handler:
34 *
35 * *) If the PC is written to by the instruction, the
36 * instruction must be fully simulated in software.
37 *
38 * *) Otherwise, a modified form of the instruction is
39 * directly executed. Its handler calls the
40 * instruction in insn[0]. In insn[1] is a
41 * "mov pc, lr" to return.
42 *
43 * Before calling, load up the reordered registers
44 * from the original instruction's registers. If one
45 * of the original input registers is the PC, compute
46 * and adjust the appropriate input register.
47 *
48 * After call completes, copy the output registers to
49 * the original instruction's original registers.
50 *
51 * We don't use a real breakpoint instruction since that
52 * would have us in the kernel go from SVC mode to SVC
53 * mode losing the link register. Instead we use an
54 * undefined instruction. To simplify processing, the
55 * undefined instruction used for kprobes must be reserved
56 * exclusively for kprobes use.
57 *
58 * TODO: ifdef out some instruction decoding based on architecture.
59 */
60
61#include <linux/kernel.h>
62#include <linux/kprobes.h>
63
64#include "kprobes.h"
65
66#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
67
68#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
69
70#if __LINUX_ARM_ARCH__ >= 6
71#define BLX(reg) "blx "reg" \n\t"
72#else
73#define BLX(reg) "mov lr, pc \n\t" \
74 "mov pc, "reg" \n\t"
75#endif
76
77/*
78 * To avoid the complications of mimicing single-stepping on a
79 * processor without a Next-PC or a single-step mode, and to
80 * avoid having to deal with the side-effects of boosting, we
81 * simulate or emulate (almost) all ARM instructions.
82 *
83 * "Simulation" is where the instruction's behavior is duplicated in
84 * C code. "Emulation" is where the original instruction is rewritten
85 * and executed, often by altering its registers.
86 *
87 * By having all behavior of the kprobe'd instruction completed before
88 * returning from the kprobe_handler(), all locks (scheduler and
89 * interrupt) can safely be released. There is no need for secondary
90 * breakpoints, no race with MP or preemptable kernels, nor having to
91 * clean up resources counts at a later time impacting overall system
92 * performance. By rewriting the instruction, only the minimum registers
93 * need to be loaded and saved back optimizing performance.
94 *
95 * Calling the insnslot_*_rwflags version of a function doesn't hurt
96 * anything even when the CPSR flags aren't updated by the
97 * instruction. It's just a little slower in return for saving
98 * a little space by not having a duplicate function that doesn't
99 * update the flags. (The same optimization can be said for
100 * instructions that do or don't perform register writeback)
101 * Also, instructions can either read the flags, only write the
102 * flags, or read and write the flags. To save combinations
103 * rather than for sheer performance, flag functions just assume
104 * read and write of flags.
105 */
106
107static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
108{
109 kprobe_opcode_t insn = p->opcode;
110 long iaddr = (long)p->addr;
111 int disp = branch_displacement(insn);
112
113 if (insn & (1 << 24))
114 regs->ARM_lr = iaddr + 4;
115
116 regs->ARM_pc = iaddr + 8 + disp;
117}
118
119static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
120{
121 kprobe_opcode_t insn = p->opcode;
122 long iaddr = (long)p->addr;
123 int disp = branch_displacement(insn);
124
125 regs->ARM_lr = iaddr + 4;
126 regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
127 regs->ARM_cpsr |= PSR_T_BIT;
128}
129
130static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
131{
132 kprobe_opcode_t insn = p->opcode;
133 int rm = insn & 0xf;
134 long rmv = regs->uregs[rm];
135
136 if (insn & (1 << 5))
137 regs->ARM_lr = (long)p->addr + 4;
138
139 regs->ARM_pc = rmv & ~0x1;
140 regs->ARM_cpsr &= ~PSR_T_BIT;
141 if (rmv & 0x1)
142 regs->ARM_cpsr |= PSR_T_BIT;
143}
144
145static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
146{
147 kprobe_opcode_t insn = p->opcode;
148 int rd = (insn >> 12) & 0xf;
149 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
150 regs->uregs[rd] = regs->ARM_cpsr & mask;
151}
152
153static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
154{
155 regs->uregs[12] = regs->uregs[13];
156}
157
158static void __kprobes
159emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
160{
161 kprobe_opcode_t insn = p->opcode;
162 unsigned long pc = (unsigned long)p->addr + 8;
163 int rt = (insn >> 12) & 0xf;
164 int rn = (insn >> 16) & 0xf;
165 int rm = insn & 0xf;
166
167 register unsigned long rtv asm("r0") = regs->uregs[rt];
168 register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
169 register unsigned long rnv asm("r2") = (rn == 15) ? pc
170 : regs->uregs[rn];
171 register unsigned long rmv asm("r3") = regs->uregs[rm];
172
173 __asm__ __volatile__ (
174 BLX("%[fn]")
175 : "=r" (rtv), "=r" (rt2v), "=r" (rnv)
176 : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
177 [fn] "r" (p->ainsn.insn_fn)
178 : "lr", "memory", "cc"
179 );
180
181 regs->uregs[rt] = rtv;
182 regs->uregs[rt+1] = rt2v;
183 if (is_writeback(insn))
184 regs->uregs[rn] = rnv;
185}
186
187static void __kprobes
188emulate_ldr(struct kprobe *p, struct pt_regs *regs)
189{
190 kprobe_opcode_t insn = p->opcode;
191 unsigned long pc = (unsigned long)p->addr + 8;
192 int rt = (insn >> 12) & 0xf;
193 int rn = (insn >> 16) & 0xf;
194 int rm = insn & 0xf;
195
196 register unsigned long rtv asm("r0");
197 register unsigned long rnv asm("r2") = (rn == 15) ? pc
198 : regs->uregs[rn];
199 register unsigned long rmv asm("r3") = regs->uregs[rm];
200
201 __asm__ __volatile__ (
202 BLX("%[fn]")
203 : "=r" (rtv), "=r" (rnv)
204 : "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
205 : "lr", "memory", "cc"
206 );
207
208 if (rt == 15)
209 load_write_pc(rtv, regs);
210 else
211 regs->uregs[rt] = rtv;
212
213 if (is_writeback(insn))
214 regs->uregs[rn] = rnv;
215}
216
217static void __kprobes
218emulate_str(struct kprobe *p, struct pt_regs *regs)
219{
220 kprobe_opcode_t insn = p->opcode;
221 unsigned long rtpc = (unsigned long)p->addr + str_pc_offset;
222 unsigned long rnpc = (unsigned long)p->addr + 8;
223 int rt = (insn >> 12) & 0xf;
224 int rn = (insn >> 16) & 0xf;
225 int rm = insn & 0xf;
226
227 register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
228 : regs->uregs[rt];
229 register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
230 : regs->uregs[rn];
231 register unsigned long rmv asm("r3") = regs->uregs[rm];
232
233 __asm__ __volatile__ (
234 BLX("%[fn]")
235 : "=r" (rnv)
236 : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
237 : "lr", "memory", "cc"
238 );
239
240 if (is_writeback(insn))
241 regs->uregs[rn] = rnv;
242}
243
244static void __kprobes
245emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs)
246{
247 kprobe_opcode_t insn = p->opcode;
248 unsigned long pc = (unsigned long)p->addr + 8;
249 int rd = (insn >> 12) & 0xf;
250 int rn = (insn >> 16) & 0xf;
251 int rm = insn & 0xf;
252 int rs = (insn >> 8) & 0xf;
253
254 register unsigned long rdv asm("r0") = regs->uregs[rd];
255 register unsigned long rnv asm("r2") = (rn == 15) ? pc
256 : regs->uregs[rn];
257 register unsigned long rmv asm("r3") = (rm == 15) ? pc
258 : regs->uregs[rm];
259 register unsigned long rsv asm("r1") = regs->uregs[rs];
260 unsigned long cpsr = regs->ARM_cpsr;
261
262 __asm__ __volatile__ (
263 "msr cpsr_fs, %[cpsr] \n\t"
264 BLX("%[fn]")
265 "mrs %[cpsr], cpsr \n\t"
266 : "=r" (rdv), [cpsr] "=r" (cpsr)
267 : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
268 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
269 : "lr", "memory", "cc"
270 );
271
272 if (rd == 15)
273 alu_write_pc(rdv, regs);
274 else
275 regs->uregs[rd] = rdv;
276 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
277}
278
279static void __kprobes
280emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
281{
282 kprobe_opcode_t insn = p->opcode;
283 int rd = (insn >> 12) & 0xf;
284 int rn = (insn >> 16) & 0xf;
285 int rm = insn & 0xf;
286
287 register unsigned long rdv asm("r0") = regs->uregs[rd];
288 register unsigned long rnv asm("r2") = regs->uregs[rn];
289 register unsigned long rmv asm("r3") = regs->uregs[rm];
290 unsigned long cpsr = regs->ARM_cpsr;
291
292 __asm__ __volatile__ (
293 "msr cpsr_fs, %[cpsr] \n\t"
294 BLX("%[fn]")
295 "mrs %[cpsr], cpsr \n\t"
296 : "=r" (rdv), [cpsr] "=r" (cpsr)
297 : "0" (rdv), "r" (rnv), "r" (rmv),
298 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
299 : "lr", "memory", "cc"
300 );
301
302 regs->uregs[rd] = rdv;
303 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
304}
305
306static void __kprobes
307emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
308{
309 kprobe_opcode_t insn = p->opcode;
310 int rd = (insn >> 16) & 0xf;
311 int rn = (insn >> 12) & 0xf;
312 int rm = insn & 0xf;
313 int rs = (insn >> 8) & 0xf;
314
315 register unsigned long rdv asm("r2") = regs->uregs[rd];
316 register unsigned long rnv asm("r0") = regs->uregs[rn];
317 register unsigned long rmv asm("r3") = regs->uregs[rm];
318 register unsigned long rsv asm("r1") = regs->uregs[rs];
319 unsigned long cpsr = regs->ARM_cpsr;
320
321 __asm__ __volatile__ (
322 "msr cpsr_fs, %[cpsr] \n\t"
323 BLX("%[fn]")
324 "mrs %[cpsr], cpsr \n\t"
325 : "=r" (rdv), [cpsr] "=r" (cpsr)
326 : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
327 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
328 : "lr", "memory", "cc"
329 );
330
331 regs->uregs[rd] = rdv;
332 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
333}
334
335static void __kprobes
336emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs)
337{
338 kprobe_opcode_t insn = p->opcode;
339 int rd = (insn >> 12) & 0xf;
340 int rm = insn & 0xf;
341
342 register unsigned long rdv asm("r0") = regs->uregs[rd];
343 register unsigned long rmv asm("r3") = regs->uregs[rm];
344
345 __asm__ __volatile__ (
346 BLX("%[fn]")
347 : "=r" (rdv)
348 : "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
349 : "lr", "memory", "cc"
350 );
351
352 regs->uregs[rd] = rdv;
353}
354
355static void __kprobes
356emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
357{
358 kprobe_opcode_t insn = p->opcode;
359 int rdlo = (insn >> 12) & 0xf;
360 int rdhi = (insn >> 16) & 0xf;
361 int rn = insn & 0xf;
362 int rm = (insn >> 8) & 0xf;
363
364 register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
365 register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
366 register unsigned long rnv asm("r3") = regs->uregs[rn];
367 register unsigned long rmv asm("r1") = regs->uregs[rm];
368 unsigned long cpsr = regs->ARM_cpsr;
369
370 __asm__ __volatile__ (
371 "msr cpsr_fs, %[cpsr] \n\t"
372 BLX("%[fn]")
373 "mrs %[cpsr], cpsr \n\t"
374 : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
375 : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
376 "2" (cpsr), [fn] "r" (p->ainsn.insn_fn)
377 : "lr", "memory", "cc"
378 );
379
380 regs->uregs[rdlo] = rdlov;
381 regs->uregs[rdhi] = rdhiv;
382 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
383}
384
385/*
386 * For the instruction masking and comparisons in all the "space_*"
387 * functions below, Do _not_ rearrange the order of tests unless
388 * you're very, very sure of what you are doing. For the sake of
389 * efficiency, the masks for some tests sometimes assume other test
390 * have been done prior to them so the number of patterns to test
391 * for an instruction set can be as broad as possible to reduce the
392 * number of tests needed.
393 */
394
395static const union decode_item arm_1111_table[] = {
396 /* Unconditional instructions */
397
398 /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */
399 /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */
400 /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */
401 /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */
402 DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop),
403
404 /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */
405 /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */
406 /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */
407 /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */
408 DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop),
409
410 /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */
411 DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1),
412
413 /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
414 /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
415 /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
416 /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
417
418 /* Coprocessor instructions... */
419 /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */
420 /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */
421 /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
422 /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
423 /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
424 /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
425 /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
426
427 /* Other unallocated instructions... */
428 DECODE_END
429};
430
431static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = {
432 /* Miscellaneous instructions */
433
434 /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
435 DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs,
436 REGS(0, NOPC, 0, 0, 0)),
437
438 /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
439 DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx),
440
441 /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
442 DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx,
443 REGS(0, 0, 0, 0, NOPC)),
444
445 /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
446 DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc,
447 REGS(0, NOPC, 0, 0, NOPC)),
448
449 /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */
450 /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */
451 /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */
452 /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */
453 DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc,
454 REGS(NOPC, NOPC, 0, 0, NOPC)),
455
456 /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
457 /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
458 /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
459 /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
460 /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
461 /* And unallocated instructions... */
462 DECODE_END
463};
464
465static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = {
466 /* Halfword multiply and multiply-accumulate */
467
468 /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
469 DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
470 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
471
472 /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
473 DECODE_OR (0x0ff000b0, 0x012000a0),
474 /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
475 DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc,
476 REGS(NOPC, 0, NOPC, 0, NOPC)),
477
478 /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */
479 DECODE_OR (0x0ff00090, 0x01000080),
480 /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */
481 DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc,
482 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
483
484 DECODE_END
485};
486
487static const union decode_item arm_cccc_0000_____1001_table[] = {
488 /* Multiply and multiply-accumulate */
489
490 /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */
491 /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */
492 DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc,
493 REGS(NOPC, 0, NOPC, 0, NOPC)),
494
495 /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */
496 /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */
497 DECODE_OR (0x0fe000f0, 0x00200090),
498 /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */
499 DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc,
500 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
501
502 /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */
503 DECODE_OR (0x0ff000f0, 0x00400090),
504 /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */
505 /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */
506 /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */
507 /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */
508 /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */
509 /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */
510 /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */
511 /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */
512 DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
513 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
514
515 DECODE_END
516};
517
518static const union decode_item arm_cccc_0001_____1001_table[] = {
519 /* Synchronization primitives */
520
521 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
522 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
523 REGS(NOPC, NOPC, 0, 0, NOPC)),
524
525 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
526 /* And unallocated instructions... */
527 DECODE_END
528};
529
530static const union decode_item arm_cccc_000x_____1xx1_table[] = {
531 /* Extra load/store instructions */
532
533 /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */
534 /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */
535 /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */
536 /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */
537 /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */
538 DECODE_REJECT (0x0f200090, 0x00200090),
539
540 /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */
541 DECODE_REJECT (0x0e10e0d0, 0x0000e0d0),
542
543 /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */
544 /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */
545 DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd,
546 REGS(NOPCWB, NOPCX, 0, 0, NOPC)),
547
548 /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */
549 /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */
550 DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd,
551 REGS(NOPCWB, NOPCX, 0, 0, 0)),
552
553 /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */
554 DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str,
555 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
556
557 /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */
558 /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */
559 /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */
560 DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr,
561 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
562
563 /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */
564 DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str,
565 REGS(NOPCWB, NOPC, 0, 0, 0)),
566
567 /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */
568 /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */
569 /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */
570 DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr,
571 REGS(NOPCWB, NOPC, 0, 0, 0)),
572
573 DECODE_END
574};
575
576static const union decode_item arm_cccc_000x_table[] = {
577 /* Data-processing (register) */
578
579 /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */
580 DECODE_REJECT (0x0e10f000, 0x0010f000),
581
582 /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */
583 DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp),
584
585 /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */
586 /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */
587 /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */
588 /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */
589 DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags,
590 REGS(ANY, 0, 0, 0, ANY)),
591
592 /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */
593 /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */
594 DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags,
595 REGS(0, ANY, 0, 0, ANY)),
596
597 /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */
598 /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */
599 /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */
600 /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */
601 /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */
602 /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */
603 /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */
604 /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */
605 /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */
606 /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */
607 DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags,
608 REGS(ANY, ANY, 0, 0, ANY)),
609
610 /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */
611 /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */
612 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
613 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
614 DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags,
615 REGS(ANY, 0, NOPC, 0, ANY)),
616
617 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
618 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
619 DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags,
620 REGS(0, ANY, NOPC, 0, ANY)),
621
622 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
623 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
624 /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */
625 /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */
626 /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */
627 /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */
628 /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */
629 /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */
630 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
631 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
632 DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
633 REGS(ANY, ANY, NOPC, 0, ANY)),
634
635 DECODE_END
636};
637
638static const union decode_item arm_cccc_001x_table[] = {
639 /* Data-processing (immediate) */
640
641 /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
642 /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
643 DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc,
644 REGS(0, NOPC, 0, 0, 0)),
645
646 /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
647 DECODE_OR (0x0fff00ff, 0x03200001),
648 /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
649 DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none),
650 /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
651 /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
652 /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
653 DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop),
654 /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
655 /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
656 /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
657 DECODE_REJECT (0x0fb00000, 0x03200000),
658
659 /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */
660 DECODE_REJECT (0x0e10f000, 0x0210f000),
661
662 /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */
663 /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */
664 /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */
665 /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */
666 DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags,
667 REGS(ANY, 0, 0, 0, 0)),
668
669 /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */
670 /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */
671 DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags,
672 REGS(0, ANY, 0, 0, 0)),
673
674 /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */
675 /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */
676 /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */
677 /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */
678 /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */
679 /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */
680 /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */
681 /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */
682 /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */
683 /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */
684 DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags,
685 REGS(ANY, ANY, 0, 0, 0)),
686
687 DECODE_END
688};
689
690static const union decode_item arm_cccc_0110_____xxx1_table[] = {
691 /* Media instructions */
692
693 /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */
694 DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc,
695 REGS(NOPC, NOPC, 0, 0, NOPC)),
696
697 /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */
698 /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */
699 DECODE_OR(0x0fa00030, 0x06a00010),
700 /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */
701 /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */
702 DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc,
703 REGS(0, NOPC, 0, 0, NOPC)),
704
705 /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
706 /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
707 /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
708 /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
709 DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc,
710 REGS(0, NOPC, 0, 0, NOPC)),
711
712 /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */
713 DECODE_REJECT (0x0fb00010, 0x06000010),
714 /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */
715 DECODE_REJECT (0x0f8000f0, 0x060000b0),
716 /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */
717 DECODE_REJECT (0x0f8000f0, 0x060000d0),
718 /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */
719 /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */
720 /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */
721 /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */
722 /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */
723 /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */
724 /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */
725 /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */
726 /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */
727 /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */
728 /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */
729 /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */
730 /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */
731 /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */
732 /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */
733 /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */
734 /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */
735 /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */
736 /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */
737 /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */
738 /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */
739 /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */
740 /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */
741 /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */
742 /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */
743 /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */
744 /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */
745 /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */
746 /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */
747 /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */
748 /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */
749 /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */
750 /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */
751 /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */
752 /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */
753 /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */
754 DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc,
755 REGS(NOPC, NOPC, 0, 0, NOPC)),
756
757 /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */
758 /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */
759 DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc,
760 REGS(NOPC, NOPC, 0, 0, NOPC)),
761
762 /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */
763 /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */
764 DECODE_REJECT (0x0fb000f0, 0x06900070),
765
766 /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */
767 /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */
768 /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */
769 /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */
770 /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */
771 /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */
772 DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc,
773 REGS(0, NOPC, 0, 0, NOPC)),
774
775 /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */
776 /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */
777 /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */
778 /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */
779 /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */
780 /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */
781 DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc,
782 REGS(NOPCX, NOPC, 0, 0, NOPC)),
783
784 DECODE_END
785};
786
787static const union decode_item arm_cccc_0111_____xxx1_table[] = {
788 /* Media instructions */
789
790 /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
791 DECODE_REJECT (0x0ff000f0, 0x07f000f0),
792
793 /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
794 /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
795 DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
796 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
797
798 /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */
799 /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */
800 DECODE_OR (0x0ff0f090, 0x0700f010),
801 /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */
802 DECODE_OR (0x0ff0f0d0, 0x0750f010),
803 /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
804 DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc,
805 REGS(NOPC, 0, NOPC, 0, NOPC)),
806
807 /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */
808 /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */
809 DECODE_OR (0x0ff00090, 0x07000010),
810 /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */
811 DECODE_OR (0x0ff000d0, 0x07500010),
812 /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
813 DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc,
814 REGS(NOPC, NOPCX, NOPC, 0, NOPC)),
815
816 /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */
817 DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc,
818 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
819
820 /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */
821 /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */
822 DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc,
823 REGS(0, NOPC, 0, 0, NOPC)),
824
825 /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */
826 DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc,
827 REGS(0, NOPC, 0, 0, 0)),
828
829 /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */
830 DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc,
831 REGS(0, NOPC, 0, 0, NOPCX)),
832
833 DECODE_END
834};
835
836static const union decode_item arm_cccc_01xx_table[] = {
837 /* Load/store word and unsigned byte */
838
839 /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */
840 DECODE_REJECT (0x0c40f000, 0x0440f000),
841
842 /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
843 /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
844 /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
845 /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
846 DECODE_REJECT (0x0d200000, 0x04200000),
847
848 /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */
849 /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */
850 DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str,
851 REGS(NOPCWB, ANY, 0, 0, 0)),
852
853 /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */
854 /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */
855 DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr,
856 REGS(NOPCWB, ANY, 0, 0, 0)),
857
858 /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */
859 /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */
860 DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str,
861 REGS(NOPCWB, ANY, 0, 0, NOPC)),
862
863 /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */
864 /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */
865 DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr,
866 REGS(NOPCWB, ANY, 0, 0, NOPC)),
867
868 DECODE_END
869};
870
871static const union decode_item arm_cccc_100x_table[] = {
872 /* Block data transfer instructions */
873
874 /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
875 /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
876 DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm),
877
878 /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
879 /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */
880 /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
881 DECODE_END
882};
883
884const union decode_item kprobe_decode_arm_table[] = {
885 /*
886 * Unconditional instructions
887 * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx
888 */
889 DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table),
890
891 /*
892 * Miscellaneous instructions
893 * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx
894 */
895 DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table),
896
897 /*
898 * Halfword multiply and multiply-accumulate
899 * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx
900 */
901 DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table),
902
903 /*
904 * Multiply and multiply-accumulate
905 * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx
906 */
907 DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table),
908
909 /*
910 * Synchronization primitives
911 * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx
912 */
913 DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table),
914
915 /*
916 * Extra load/store instructions
917 * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx
918 */
919 DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table),
920
921 /*
922 * Data-processing (register)
923 * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx
924 * Data-processing (register-shifted register)
925 * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx
926 */
927 DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table),
928
929 /*
930 * Data-processing (immediate)
931 * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
932 */
933 DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table),
934
935 /*
936 * Media instructions
937 * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx
938 */
939 DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table),
940 DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table),
941
942 /*
943 * Load/store word and unsigned byte
944 * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx
945 */
946 DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table),
947
948 /*
949 * Block data transfer instructions
950 * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx
951 */
952 DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table),
953
954 /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
955 /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
956 DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl),
957
958 /*
959 * Supervisor Call, and coprocessor instructions
960 */
961
962 /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */
963 /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */
964 /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
965 /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
966 /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
967 /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
968 /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
969 /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
970 DECODE_REJECT (0x0c000000, 0x0c000000),
971
972 DECODE_END
973};
974
975static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
976{
977 regs->ARM_pc += 4;
978 p->ainsn.insn_handler(p, regs);
979}
980
981/* Return:
982 * INSN_REJECTED If instruction is one not allowed to kprobe,
983 * INSN_GOOD If instruction is supported and uses instruction slot,
984 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
985 *
986 * For instructions we don't want to kprobe (INSN_REJECTED return result):
987 * These are generally ones that modify the processor state making
988 * them "hard" to simulate such as switches processor modes or
989 * make accesses in alternate modes. Any of these could be simulated
990 * if the work was put into it, but low return considering they
991 * should also be very rare.
992 */
993enum kprobe_insn __kprobes
994arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
995{
996 asi->insn_singlestep = arm_singlestep;
997 asi->insn_check_cc = kprobe_condition_checks[insn>>28];
998 return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false);
999}
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
new file mode 100644
index 00000000000..a5394fb4e4e
--- /dev/null
+++ b/arch/arm/kernel/kprobes-common.c
@@ -0,0 +1,577 @@
1/*
2 * arch/arm/kernel/kprobes-common.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
7 * Copyright (C) 2006, 2007 Motorola Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/kprobes.h>
16
17#include "kprobes.h"
18
19
20#ifndef find_str_pc_offset
21
22/*
23 * For STR and STM instructions, an ARM core may choose to use either
24 * a +8 or a +12 displacement from the current instruction's address.
25 * Whichever value is chosen for a given core, it must be the same for
26 * both instructions and may not change. This function measures it.
27 */
28
29int str_pc_offset;
30
31void __init find_str_pc_offset(void)
32{
33 int addr, scratch, ret;
34
35 __asm__ (
36 "sub %[ret], pc, #4 \n\t"
37 "str pc, %[addr] \n\t"
38 "ldr %[scr], %[addr] \n\t"
39 "sub %[ret], %[scr], %[ret] \n\t"
40 : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
41
42 str_pc_offset = ret;
43}
44
45#endif /* !find_str_pc_offset */
46
47
48#ifndef test_load_write_pc_interworking
49
50bool load_write_pc_interworks;
51
52void __init test_load_write_pc_interworking(void)
53{
54 int arch = cpu_architecture();
55 BUG_ON(arch == CPU_ARCH_UNKNOWN);
56 load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
57}
58
59#endif /* !test_load_write_pc_interworking */
60
61
62#ifndef test_alu_write_pc_interworking
63
64bool alu_write_pc_interworks;
65
66void __init test_alu_write_pc_interworking(void)
67{
68 int arch = cpu_architecture();
69 BUG_ON(arch == CPU_ARCH_UNKNOWN);
70 alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
71}
72
73#endif /* !test_alu_write_pc_interworking */
74
75
76void __init arm_kprobe_decode_init(void)
77{
78 find_str_pc_offset();
79 test_load_write_pc_interworking();
80 test_alu_write_pc_interworking();
81}
82
83
84static unsigned long __kprobes __check_eq(unsigned long cpsr)
85{
86 return cpsr & PSR_Z_BIT;
87}
88
89static unsigned long __kprobes __check_ne(unsigned long cpsr)
90{
91 return (~cpsr) & PSR_Z_BIT;
92}
93
94static unsigned long __kprobes __check_cs(unsigned long cpsr)
95{
96 return cpsr & PSR_C_BIT;
97}
98
99static unsigned long __kprobes __check_cc(unsigned long cpsr)
100{
101 return (~cpsr) & PSR_C_BIT;
102}
103
104static unsigned long __kprobes __check_mi(unsigned long cpsr)
105{
106 return cpsr & PSR_N_BIT;
107}
108
109static unsigned long __kprobes __check_pl(unsigned long cpsr)
110{
111 return (~cpsr) & PSR_N_BIT;
112}
113
114static unsigned long __kprobes __check_vs(unsigned long cpsr)
115{
116 return cpsr & PSR_V_BIT;
117}
118
119static unsigned long __kprobes __check_vc(unsigned long cpsr)
120{
121 return (~cpsr) & PSR_V_BIT;
122}
123
124static unsigned long __kprobes __check_hi(unsigned long cpsr)
125{
126 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
127 return cpsr & PSR_C_BIT;
128}
129
130static unsigned long __kprobes __check_ls(unsigned long cpsr)
131{
132 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
133 return (~cpsr) & PSR_C_BIT;
134}
135
136static unsigned long __kprobes __check_ge(unsigned long cpsr)
137{
138 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
139 return (~cpsr) & PSR_N_BIT;
140}
141
142static unsigned long __kprobes __check_lt(unsigned long cpsr)
143{
144 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
145 return cpsr & PSR_N_BIT;
146}
147
148static unsigned long __kprobes __check_gt(unsigned long cpsr)
149{
150 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
151 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
152 return (~temp) & PSR_N_BIT;
153}
154
155static unsigned long __kprobes __check_le(unsigned long cpsr)
156{
157 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
158 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
159 return temp & PSR_N_BIT;
160}
161
162static unsigned long __kprobes __check_al(unsigned long cpsr)
163{
164 return true;
165}
166
167kprobe_check_cc * const kprobe_condition_checks[16] = {
168 &__check_eq, &__check_ne, &__check_cs, &__check_cc,
169 &__check_mi, &__check_pl, &__check_vs, &__check_vc,
170 &__check_hi, &__check_ls, &__check_ge, &__check_lt,
171 &__check_gt, &__check_le, &__check_al, &__check_al
172};
173
174
175void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
176{
177}
178
179void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
180{
181 p->ainsn.insn_fn();
182}
183
184static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
185{
186 kprobe_opcode_t insn = p->opcode;
187 int rn = (insn >> 16) & 0xf;
188 int lbit = insn & (1 << 20);
189 int wbit = insn & (1 << 21);
190 int ubit = insn & (1 << 23);
191 int pbit = insn & (1 << 24);
192 long *addr = (long *)regs->uregs[rn];
193 int reg_bit_vector;
194 int reg_count;
195
196 reg_count = 0;
197 reg_bit_vector = insn & 0xffff;
198 while (reg_bit_vector) {
199 reg_bit_vector &= (reg_bit_vector - 1);
200 ++reg_count;
201 }
202
203 if (!ubit)
204 addr -= reg_count;
205 addr += (!pbit == !ubit);
206
207 reg_bit_vector = insn & 0xffff;
208 while (reg_bit_vector) {
209 int reg = __ffs(reg_bit_vector);
210 reg_bit_vector &= (reg_bit_vector - 1);
211 if (lbit)
212 regs->uregs[reg] = *addr++;
213 else
214 *addr++ = regs->uregs[reg];
215 }
216
217 if (wbit) {
218 if (!ubit)
219 addr -= reg_count;
220 addr -= (!pbit == !ubit);
221 regs->uregs[rn] = (long)addr;
222 }
223}
224
225static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
226{
227 regs->ARM_pc = (long)p->addr + str_pc_offset;
228 simulate_ldm1stm1(p, regs);
229 regs->ARM_pc = (long)p->addr + 4;
230}
231
232static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
233{
234 simulate_ldm1stm1(p, regs);
235 load_write_pc(regs->ARM_pc, regs);
236}
237
238static void __kprobes
239emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
240{
241 register void *rregs asm("r1") = regs;
242 register void *rfn asm("lr") = p->ainsn.insn_fn;
243
244 __asm__ __volatile__ (
245 "stmdb sp!, {%[regs], r11} \n\t"
246 "ldmia %[regs], {r0-r12} \n\t"
247#if __LINUX_ARM_ARCH__ >= 6
248 "blx %[fn] \n\t"
249#else
250 "str %[fn], [sp, #-4]! \n\t"
251 "adr lr, 1f \n\t"
252 "ldr pc, [sp], #4 \n\t"
253 "1: \n\t"
254#endif
255 "ldr lr, [sp], #4 \n\t" /* lr = regs */
256 "stmia lr, {r0-r12} \n\t"
257 "ldr r11, [sp], #4 \n\t"
258 : [regs] "=r" (rregs), [fn] "=r" (rfn)
259 : "0" (rregs), "1" (rfn)
260 : "r0", "r2", "r3", "r4", "r5", "r6", "r7",
261 "r8", "r9", "r10", "r12", "memory", "cc"
262 );
263}
264
265static void __kprobes
266emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
267{
268 emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
269}
270
271static void __kprobes
272emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
273{
274 emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
275 load_write_pc(regs->ARM_pc, regs);
276}
277
278enum kprobe_insn __kprobes
279kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
280{
281 kprobe_insn_handler_t *handler = 0;
282 unsigned reglist = insn & 0xffff;
283 int is_ldm = insn & 0x100000;
284 int rn = (insn >> 16) & 0xf;
285
286 if (rn <= 12 && (reglist & 0xe000) == 0) {
287 /* Instruction only uses registers in the range R0..R12 */
288 handler = emulate_generic_r0_12_noflags;
289
290 } else if (rn >= 2 && (reglist & 0x8003) == 0) {
291 /* Instruction only uses registers in the range R2..R14 */
292 rn -= 2;
293 reglist >>= 2;
294 handler = emulate_generic_r2_14_noflags;
295
296 } else if (rn >= 3 && (reglist & 0x0007) == 0) {
297 /* Instruction only uses registers in the range R3..R15 */
298 if (is_ldm && (reglist & 0x8000)) {
299 rn -= 3;
300 reglist >>= 3;
301 handler = emulate_ldm_r3_15;
302 }
303 }
304
305 if (handler) {
306 /* We can emulate the instruction in (possibly) modified form */
307 asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
308 asi->insn_handler = handler;
309 return INSN_GOOD;
310 }
311
312 /* Fallback to slower simulation... */
313 if (reglist & 0x8000)
314 handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
315 else
316 handler = simulate_ldm1stm1;
317 asi->insn_handler = handler;
318 return INSN_GOOD_NO_SLOT;
319}
320
321
322/*
323 * Prepare an instruction slot to receive an instruction for emulating.
324 * This is done by placing a subroutine return after the location where the
325 * instruction will be placed. We also modify ARM instructions to be
326 * unconditional as the condition code will already be checked before any
327 * emulation handler is called.
328 */
329static kprobe_opcode_t __kprobes
330prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
331 bool thumb)
332{
333#ifdef CONFIG_THUMB2_KERNEL
334 if (thumb) {
335 u16 *thumb_insn = (u16 *)asi->insn;
336 thumb_insn[1] = 0x4770; /* Thumb bx lr */
337 thumb_insn[2] = 0x4770; /* Thumb bx lr */
338 return insn;
339 }
340 asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
341#else
342 asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
343#endif
344 /* Make an ARM instruction unconditional */
345 if (insn < 0xe0000000)
346 insn = (insn | 0xe0000000) & ~0x10000000;
347 return insn;
348}
349
350/*
351 * Write a (probably modified) instruction into the slot previously prepared by
352 * prepare_emulated_insn
353 */
354static void __kprobes
355set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
356 bool thumb)
357{
358#ifdef CONFIG_THUMB2_KERNEL
359 if (thumb) {
360 u16 *ip = (u16 *)asi->insn;
361 if (is_wide_instruction(insn))
362 *ip++ = insn >> 16;
363 *ip++ = insn;
364 return;
365 }
366#endif
367 asi->insn[0] = insn;
368}
369
370/*
371 * When we modify the register numbers encoded in an instruction to be emulated,
372 * the new values come from this define. For ARM and 32-bit Thumb instructions
373 * this gives...
374 *
375 * bit position 16 12 8 4 0
376 * ---------------+---+---+---+---+---+
377 * register r2 r0 r1 -- r3
378 */
379#define INSN_NEW_BITS 0x00020103
380
381/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
382#define INSN_SAMEAS16_BITS 0x22222222
383
384/*
385 * Validate and modify each of the registers encoded in an instruction.
386 *
387 * Each nibble in regs contains a value from enum decode_reg_type. For each
388 * non-zero value, the corresponding nibble in pinsn is validated and modified
389 * according to the type.
390 */
391static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
392{
393 kprobe_opcode_t insn = *pinsn;
394 kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
395
396 for (; regs != 0; regs >>= 4, mask <<= 4) {
397
398 kprobe_opcode_t new_bits = INSN_NEW_BITS;
399
400 switch (regs & 0xf) {
401
402 case REG_TYPE_NONE:
403 /* Nibble not a register, skip to next */
404 continue;
405
406 case REG_TYPE_ANY:
407 /* Any register is allowed */
408 break;
409
410 case REG_TYPE_SAMEAS16:
411 /* Replace register with same as at bit position 16 */
412 new_bits = INSN_SAMEAS16_BITS;
413 break;
414
415 case REG_TYPE_SP:
416 /* Only allow SP (R13) */
417 if ((insn ^ 0xdddddddd) & mask)
418 goto reject;
419 break;
420
421 case REG_TYPE_PC:
422 /* Only allow PC (R15) */
423 if ((insn ^ 0xffffffff) & mask)
424 goto reject;
425 break;
426
427 case REG_TYPE_NOSP:
428 /* Reject SP (R13) */
429 if (((insn ^ 0xdddddddd) & mask) == 0)
430 goto reject;
431 break;
432
433 case REG_TYPE_NOSPPC:
434 case REG_TYPE_NOSPPCX:
435 /* Reject SP and PC (R13 and R15) */
436 if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
437 goto reject;
438 break;
439
440 case REG_TYPE_NOPCWB:
441 if (!is_writeback(insn))
442 break; /* No writeback, so any register is OK */
443 /* fall through... */
444 case REG_TYPE_NOPC:
445 case REG_TYPE_NOPCX:
446 /* Reject PC (R15) */
447 if (((insn ^ 0xffffffff) & mask) == 0)
448 goto reject;
449 break;
450 }
451
452 /* Replace value of nibble with new register number... */
453 insn &= ~mask;
454 insn |= new_bits & mask;
455 }
456
457 *pinsn = insn;
458 return true;
459
460reject:
461 return false;
462}
463
464static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
465 [DECODE_TYPE_TABLE] = sizeof(struct decode_table),
466 [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
467 [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
468 [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
469 [DECODE_TYPE_OR] = sizeof(struct decode_or),
470 [DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
471};
472
473/*
474 * kprobe_decode_insn operates on data tables in order to decode an ARM
475 * architecture instruction onto which a kprobe has been placed.
476 *
477 * These instruction decoding tables are a concatenation of entries each
478 * of which consist of one of the following structs:
479 *
480 * decode_table
481 * decode_custom
482 * decode_simulate
483 * decode_emulate
484 * decode_or
485 * decode_reject
486 *
487 * Each of these starts with a struct decode_header which has the following
488 * fields:
489 *
490 * type_regs
491 * mask
492 * value
493 *
494 * The least significant DECODE_TYPE_BITS of type_regs contains a value
495 * from enum decode_type, this indicates which of the decode_* structs
496 * the entry contains. The value DECODE_TYPE_END indicates the end of the
497 * table.
498 *
499 * When the table is parsed, each entry is checked in turn to see if it
500 * matches the instruction to be decoded using the test:
501 *
502 * (insn & mask) == value
503 *
504 * If no match is found before the end of the table is reached then decoding
505 * fails with INSN_REJECTED.
506 *
507 * When a match is found, decode_regs() is called to validate and modify each
508 * of the registers encoded in the instruction; the data it uses to do this
509 * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
510 * to fail with INSN_REJECTED.
511 *
512 * Once the instruction has passed the above tests, further processing
513 * depends on the type of the table entry's decode struct.
514 *
515 */
516int __kprobes
517kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
518 const union decode_item *table, bool thumb)
519{
520 const struct decode_header *h = (struct decode_header *)table;
521 const struct decode_header *next;
522 bool matched = false;
523
524 insn = prepare_emulated_insn(insn, asi, thumb);
525
526 for (;; h = next) {
527 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
528 u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
529
530 if (type == DECODE_TYPE_END)
531 return INSN_REJECTED;
532
533 next = (struct decode_header *)
534 ((uintptr_t)h + decode_struct_sizes[type]);
535
536 if (!matched && (insn & h->mask.bits) != h->value.bits)
537 continue;
538
539 if (!decode_regs(&insn, regs))
540 return INSN_REJECTED;
541
542 switch (type) {
543
544 case DECODE_TYPE_TABLE: {
545 struct decode_table *d = (struct decode_table *)h;
546 next = (struct decode_header *)d->table.table;
547 break;
548 }
549
550 case DECODE_TYPE_CUSTOM: {
551 struct decode_custom *d = (struct decode_custom *)h;
552 return (*d->decoder.decoder)(insn, asi);
553 }
554
555 case DECODE_TYPE_SIMULATE: {
556 struct decode_simulate *d = (struct decode_simulate *)h;
557 asi->insn_handler = d->handler.handler;
558 return INSN_GOOD_NO_SLOT;
559 }
560
561 case DECODE_TYPE_EMULATE: {
562 struct decode_emulate *d = (struct decode_emulate *)h;
563 asi->insn_handler = d->handler.handler;
564 set_emulated_insn(insn, asi, thumb);
565 return INSN_GOOD;
566 }
567
568 case DECODE_TYPE_OR:
569 matched = true;
570 break;
571
572 case DECODE_TYPE_REJECT:
573 default:
574 return INSN_REJECTED;
575 }
576 }
577 }
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
deleted file mode 100644
index 15eeff6aea0..00000000000
--- a/arch/arm/kernel/kprobes-decode.c
+++ /dev/null
@@ -1,1670 +0,0 @@
1/*
2 * arch/arm/kernel/kprobes-decode.c
3 *
4 * Copyright (C) 2006, 2007 Motorola Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16/*
17 * We do not have hardware single-stepping on ARM, This
18 * effort is further complicated by the ARM not having a
19 * "next PC" register. Instructions that change the PC
20 * can't be safely single-stepped in a MP environment, so
21 * we have a lot of work to do:
22 *
23 * In the prepare phase:
24 * *) If it is an instruction that does anything
25 * with the CPU mode, we reject it for a kprobe.
26 * (This is out of laziness rather than need. The
27 * instructions could be simulated.)
28 *
29 * *) Otherwise, decode the instruction rewriting its
30 * registers to take fixed, ordered registers and
31 * setting a handler for it to run the instruction.
32 *
33 * In the execution phase by an instruction's handler:
34 *
35 * *) If the PC is written to by the instruction, the
36 * instruction must be fully simulated in software.
37 *
38 * *) Otherwise, a modified form of the instruction is
39 * directly executed. Its handler calls the
40 * instruction in insn[0]. In insn[1] is a
41 * "mov pc, lr" to return.
42 *
43 * Before calling, load up the reordered registers
44 * from the original instruction's registers. If one
45 * of the original input registers is the PC, compute
46 * and adjust the appropriate input register.
47 *
48 * After call completes, copy the output registers to
49 * the original instruction's original registers.
50 *
51 * We don't use a real breakpoint instruction since that
52 * would have us in the kernel go from SVC mode to SVC
53 * mode losing the link register. Instead we use an
54 * undefined instruction. To simplify processing, the
55 * undefined instruction used for kprobes must be reserved
56 * exclusively for kprobes use.
57 *
58 * TODO: ifdef out some instruction decoding based on architecture.
59 */
60
61#include <linux/kernel.h>
62#include <linux/kprobes.h>
63
64#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
65
66#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
67
68#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos))
69
70/*
71 * Test if load/store instructions writeback the address register.
72 * if P (bit 24) == 0 or W (bit 21) == 1
73 */
74#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
75
76#define PSR_fs (PSR_f|PSR_s)
77
78#define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */
79
80typedef long (insn_0arg_fn_t)(void);
81typedef long (insn_1arg_fn_t)(long);
82typedef long (insn_2arg_fn_t)(long, long);
83typedef long (insn_3arg_fn_t)(long, long, long);
84typedef long (insn_4arg_fn_t)(long, long, long, long);
85typedef long long (insn_llret_0arg_fn_t)(void);
86typedef long long (insn_llret_3arg_fn_t)(long, long, long);
87typedef long long (insn_llret_4arg_fn_t)(long, long, long, long);
88
89union reg_pair {
90 long long dr;
91#ifdef __LITTLE_ENDIAN
92 struct { long r0, r1; };
93#else
94 struct { long r1, r0; };
95#endif
96};
97
98/*
99 * For STR and STM instructions, an ARM core may choose to use either
100 * a +8 or a +12 displacement from the current instruction's address.
101 * Whichever value is chosen for a given core, it must be the same for
102 * both instructions and may not change. This function measures it.
103 */
104
105static int str_pc_offset;
106
107static void __init find_str_pc_offset(void)
108{
109 int addr, scratch, ret;
110
111 __asm__ (
112 "sub %[ret], pc, #4 \n\t"
113 "str pc, %[addr] \n\t"
114 "ldr %[scr], %[addr] \n\t"
115 "sub %[ret], %[scr], %[ret] \n\t"
116 : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
117
118 str_pc_offset = ret;
119}
120
121/*
122 * The insnslot_?arg_r[w]flags() functions below are to keep the
123 * msr -> *fn -> mrs instruction sequences indivisible so that
124 * the state of the CPSR flags aren't inadvertently modified
125 * just before or just after the call.
126 */
127
128static inline long __kprobes
129insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn)
130{
131 register long ret asm("r0");
132
133 __asm__ __volatile__ (
134 "msr cpsr_fs, %[cpsr] \n\t"
135 "mov lr, pc \n\t"
136 "mov pc, %[fn] \n\t"
137 : "=r" (ret)
138 : [cpsr] "r" (cpsr), [fn] "r" (fn)
139 : "lr", "cc"
140 );
141 return ret;
142}
143
144static inline long long __kprobes
145insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn)
146{
147 register long ret0 asm("r0");
148 register long ret1 asm("r1");
149 union reg_pair fnr;
150
151 __asm__ __volatile__ (
152 "msr cpsr_fs, %[cpsr] \n\t"
153 "mov lr, pc \n\t"
154 "mov pc, %[fn] \n\t"
155 : "=r" (ret0), "=r" (ret1)
156 : [cpsr] "r" (cpsr), [fn] "r" (fn)
157 : "lr", "cc"
158 );
159 fnr.r0 = ret0;
160 fnr.r1 = ret1;
161 return fnr.dr;
162}
163
164static inline long __kprobes
165insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn)
166{
167 register long rr0 asm("r0") = r0;
168 register long ret asm("r0");
169
170 __asm__ __volatile__ (
171 "msr cpsr_fs, %[cpsr] \n\t"
172 "mov lr, pc \n\t"
173 "mov pc, %[fn] \n\t"
174 : "=r" (ret)
175 : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn)
176 : "lr", "cc"
177 );
178 return ret;
179}
180
181static inline long __kprobes
182insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn)
183{
184 register long rr0 asm("r0") = r0;
185 register long rr1 asm("r1") = r1;
186 register long ret asm("r0");
187
188 __asm__ __volatile__ (
189 "msr cpsr_fs, %[cpsr] \n\t"
190 "mov lr, pc \n\t"
191 "mov pc, %[fn] \n\t"
192 : "=r" (ret)
193 : "0" (rr0), "r" (rr1),
194 [cpsr] "r" (cpsr), [fn] "r" (fn)
195 : "lr", "cc"
196 );
197 return ret;
198}
199
200static inline long __kprobes
201insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn)
202{
203 register long rr0 asm("r0") = r0;
204 register long rr1 asm("r1") = r1;
205 register long rr2 asm("r2") = r2;
206 register long ret asm("r0");
207
208 __asm__ __volatile__ (
209 "msr cpsr_fs, %[cpsr] \n\t"
210 "mov lr, pc \n\t"
211 "mov pc, %[fn] \n\t"
212 : "=r" (ret)
213 : "0" (rr0), "r" (rr1), "r" (rr2),
214 [cpsr] "r" (cpsr), [fn] "r" (fn)
215 : "lr", "cc"
216 );
217 return ret;
218}
219
220static inline long long __kprobes
221insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr,
222 insn_llret_3arg_fn_t *fn)
223{
224 register long rr0 asm("r0") = r0;
225 register long rr1 asm("r1") = r1;
226 register long rr2 asm("r2") = r2;
227 register long ret0 asm("r0");
228 register long ret1 asm("r1");
229 union reg_pair fnr;
230
231 __asm__ __volatile__ (
232 "msr cpsr_fs, %[cpsr] \n\t"
233 "mov lr, pc \n\t"
234 "mov pc, %[fn] \n\t"
235 : "=r" (ret0), "=r" (ret1)
236 : "0" (rr0), "r" (rr1), "r" (rr2),
237 [cpsr] "r" (cpsr), [fn] "r" (fn)
238 : "lr", "cc"
239 );
240 fnr.r0 = ret0;
241 fnr.r1 = ret1;
242 return fnr.dr;
243}
244
245static inline long __kprobes
246insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr,
247 insn_4arg_fn_t *fn)
248{
249 register long rr0 asm("r0") = r0;
250 register long rr1 asm("r1") = r1;
251 register long rr2 asm("r2") = r2;
252 register long rr3 asm("r3") = r3;
253 register long ret asm("r0");
254
255 __asm__ __volatile__ (
256 "msr cpsr_fs, %[cpsr] \n\t"
257 "mov lr, pc \n\t"
258 "mov pc, %[fn] \n\t"
259 : "=r" (ret)
260 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
261 [cpsr] "r" (cpsr), [fn] "r" (fn)
262 : "lr", "cc"
263 );
264 return ret;
265}
266
267static inline long __kprobes
268insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn)
269{
270 register long rr0 asm("r0") = r0;
271 register long ret asm("r0");
272 long oldcpsr = *cpsr;
273 long newcpsr;
274
275 __asm__ __volatile__ (
276 "msr cpsr_fs, %[oldcpsr] \n\t"
277 "mov lr, pc \n\t"
278 "mov pc, %[fn] \n\t"
279 "mrs %[newcpsr], cpsr \n\t"
280 : "=r" (ret), [newcpsr] "=r" (newcpsr)
281 : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
282 : "lr", "cc"
283 );
284 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
285 return ret;
286}
287
288static inline long __kprobes
289insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn)
290{
291 register long rr0 asm("r0") = r0;
292 register long rr1 asm("r1") = r1;
293 register long ret asm("r0");
294 long oldcpsr = *cpsr;
295 long newcpsr;
296
297 __asm__ __volatile__ (
298 "msr cpsr_fs, %[oldcpsr] \n\t"
299 "mov lr, pc \n\t"
300 "mov pc, %[fn] \n\t"
301 "mrs %[newcpsr], cpsr \n\t"
302 : "=r" (ret), [newcpsr] "=r" (newcpsr)
303 : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
304 : "lr", "cc"
305 );
306 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
307 return ret;
308}
309
310static inline long __kprobes
311insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr,
312 insn_3arg_fn_t *fn)
313{
314 register long rr0 asm("r0") = r0;
315 register long rr1 asm("r1") = r1;
316 register long rr2 asm("r2") = r2;
317 register long ret asm("r0");
318 long oldcpsr = *cpsr;
319 long newcpsr;
320
321 __asm__ __volatile__ (
322 "msr cpsr_fs, %[oldcpsr] \n\t"
323 "mov lr, pc \n\t"
324 "mov pc, %[fn] \n\t"
325 "mrs %[newcpsr], cpsr \n\t"
326 : "=r" (ret), [newcpsr] "=r" (newcpsr)
327 : "0" (rr0), "r" (rr1), "r" (rr2),
328 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
329 : "lr", "cc"
330 );
331 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
332 return ret;
333}
334
335static inline long __kprobes
336insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
337 insn_4arg_fn_t *fn)
338{
339 register long rr0 asm("r0") = r0;
340 register long rr1 asm("r1") = r1;
341 register long rr2 asm("r2") = r2;
342 register long rr3 asm("r3") = r3;
343 register long ret asm("r0");
344 long oldcpsr = *cpsr;
345 long newcpsr;
346
347 __asm__ __volatile__ (
348 "msr cpsr_fs, %[oldcpsr] \n\t"
349 "mov lr, pc \n\t"
350 "mov pc, %[fn] \n\t"
351 "mrs %[newcpsr], cpsr \n\t"
352 : "=r" (ret), [newcpsr] "=r" (newcpsr)
353 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
354 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
355 : "lr", "cc"
356 );
357 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
358 return ret;
359}
360
361static inline long long __kprobes
362insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
363 insn_llret_4arg_fn_t *fn)
364{
365 register long rr0 asm("r0") = r0;
366 register long rr1 asm("r1") = r1;
367 register long rr2 asm("r2") = r2;
368 register long rr3 asm("r3") = r3;
369 register long ret0 asm("r0");
370 register long ret1 asm("r1");
371 long oldcpsr = *cpsr;
372 long newcpsr;
373 union reg_pair fnr;
374
375 __asm__ __volatile__ (
376 "msr cpsr_fs, %[oldcpsr] \n\t"
377 "mov lr, pc \n\t"
378 "mov pc, %[fn] \n\t"
379 "mrs %[newcpsr], cpsr \n\t"
380 : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr)
381 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
382 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
383 : "lr", "cc"
384 );
385 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
386 fnr.r0 = ret0;
387 fnr.r1 = ret1;
388 return fnr.dr;
389}
390
391/*
392 * To avoid the complications of mimicing single-stepping on a
393 * processor without a Next-PC or a single-step mode, and to
394 * avoid having to deal with the side-effects of boosting, we
395 * simulate or emulate (almost) all ARM instructions.
396 *
397 * "Simulation" is where the instruction's behavior is duplicated in
398 * C code. "Emulation" is where the original instruction is rewritten
399 * and executed, often by altering its registers.
400 *
401 * By having all behavior of the kprobe'd instruction completed before
402 * returning from the kprobe_handler(), all locks (scheduler and
403 * interrupt) can safely be released. There is no need for secondary
404 * breakpoints, no race with MP or preemptable kernels, nor having to
405 * clean up resources counts at a later time impacting overall system
406 * performance. By rewriting the instruction, only the minimum registers
407 * need to be loaded and saved back optimizing performance.
408 *
409 * Calling the insnslot_*_rwflags version of a function doesn't hurt
410 * anything even when the CPSR flags aren't updated by the
411 * instruction. It's just a little slower in return for saving
412 * a little space by not having a duplicate function that doesn't
413 * update the flags. (The same optimization can be said for
414 * instructions that do or don't perform register writeback)
415 * Also, instructions can either read the flags, only write the
416 * flags, or read and write the flags. To save combinations
417 * rather than for sheer performance, flag functions just assume
418 * read and write of flags.
419 */
420
421static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
422{
423 kprobe_opcode_t insn = p->opcode;
424 long iaddr = (long)p->addr;
425 int disp = branch_displacement(insn);
426
427 if (insn & (1 << 24))
428 regs->ARM_lr = iaddr + 4;
429
430 regs->ARM_pc = iaddr + 8 + disp;
431}
432
433static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
434{
435 kprobe_opcode_t insn = p->opcode;
436 long iaddr = (long)p->addr;
437 int disp = branch_displacement(insn);
438
439 regs->ARM_lr = iaddr + 4;
440 regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
441 regs->ARM_cpsr |= PSR_T_BIT;
442}
443
444static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
445{
446 kprobe_opcode_t insn = p->opcode;
447 int rm = insn & 0xf;
448 long rmv = regs->uregs[rm];
449
450 if (insn & (1 << 5))
451 regs->ARM_lr = (long)p->addr + 4;
452
453 regs->ARM_pc = rmv & ~0x1;
454 regs->ARM_cpsr &= ~PSR_T_BIT;
455 if (rmv & 0x1)
456 regs->ARM_cpsr |= PSR_T_BIT;
457}
458
459static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
460{
461 kprobe_opcode_t insn = p->opcode;
462 int rd = (insn >> 12) & 0xf;
463 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
464 regs->uregs[rd] = regs->ARM_cpsr & mask;
465}
466
467static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
468{
469 kprobe_opcode_t insn = p->opcode;
470 int rn = (insn >> 16) & 0xf;
471 int lbit = insn & (1 << 20);
472 int wbit = insn & (1 << 21);
473 int ubit = insn & (1 << 23);
474 int pbit = insn & (1 << 24);
475 long *addr = (long *)regs->uregs[rn];
476 int reg_bit_vector;
477 int reg_count;
478
479 reg_count = 0;
480 reg_bit_vector = insn & 0xffff;
481 while (reg_bit_vector) {
482 reg_bit_vector &= (reg_bit_vector - 1);
483 ++reg_count;
484 }
485
486 if (!ubit)
487 addr -= reg_count;
488 addr += (!pbit == !ubit);
489
490 reg_bit_vector = insn & 0xffff;
491 while (reg_bit_vector) {
492 int reg = __ffs(reg_bit_vector);
493 reg_bit_vector &= (reg_bit_vector - 1);
494 if (lbit)
495 regs->uregs[reg] = *addr++;
496 else
497 *addr++ = regs->uregs[reg];
498 }
499
500 if (wbit) {
501 if (!ubit)
502 addr -= reg_count;
503 addr -= (!pbit == !ubit);
504 regs->uregs[rn] = (long)addr;
505 }
506}
507
508static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
509{
510 regs->ARM_pc = (long)p->addr + str_pc_offset;
511 simulate_ldm1stm1(p, regs);
512 regs->ARM_pc = (long)p->addr + 4;
513}
514
515static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
516{
517 regs->uregs[12] = regs->uregs[13];
518}
519
520static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
521{
522 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
523 kprobe_opcode_t insn = p->opcode;
524 long ppc = (long)p->addr + 8;
525 int rd = (insn >> 12) & 0xf;
526 int rn = (insn >> 16) & 0xf;
527 int rm = insn & 0xf; /* rm may be invalid, don't care. */
528 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
529 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
530
531 /* Not following the C calling convention here, so need asm(). */
532 __asm__ __volatile__ (
533 "ldr r0, %[rn] \n\t"
534 "ldr r1, %[rm] \n\t"
535 "msr cpsr_fs, %[cpsr]\n\t"
536 "mov lr, pc \n\t"
537 "mov pc, %[i_fn] \n\t"
538 "str r0, %[rn] \n\t" /* in case of writeback */
539 "str r2, %[rd0] \n\t"
540 "str r3, %[rd1] \n\t"
541 : [rn] "+m" (rnv),
542 [rd0] "=m" (regs->uregs[rd]),
543 [rd1] "=m" (regs->uregs[rd+1])
544 : [rm] "m" (rmv),
545 [cpsr] "r" (regs->ARM_cpsr),
546 [i_fn] "r" (i_fn)
547 : "r0", "r1", "r2", "r3", "lr", "cc"
548 );
549 if (is_writeback(insn))
550 regs->uregs[rn] = rnv;
551}
552
553static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs)
554{
555 insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0];
556 kprobe_opcode_t insn = p->opcode;
557 long ppc = (long)p->addr + 8;
558 int rd = (insn >> 12) & 0xf;
559 int rn = (insn >> 16) & 0xf;
560 int rm = insn & 0xf;
561 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
562 /* rm/rmv may be invalid, don't care. */
563 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
564 long rnv_wb;
565
566 rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
567 regs->uregs[rd+1],
568 regs->ARM_cpsr, i_fn);
569 if (is_writeback(insn))
570 regs->uregs[rn] = rnv_wb;
571}
572
573static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
574{
575 insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
576 kprobe_opcode_t insn = p->opcode;
577 long ppc = (long)p->addr + 8;
578 union reg_pair fnr;
579 int rd = (insn >> 12) & 0xf;
580 int rn = (insn >> 16) & 0xf;
581 int rm = insn & 0xf;
582 long rdv;
583 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
584 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
585 long cpsr = regs->ARM_cpsr;
586
587 fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
588 if (rn != 15)
589 regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
590 rdv = fnr.r1;
591
592 if (rd == 15) {
593#if __LINUX_ARM_ARCH__ >= 5
594 cpsr &= ~PSR_T_BIT;
595 if (rdv & 0x1)
596 cpsr |= PSR_T_BIT;
597 regs->ARM_cpsr = cpsr;
598 rdv &= ~0x1;
599#else
600 rdv &= ~0x2;
601#endif
602 }
603 regs->uregs[rd] = rdv;
604}
605
606static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
607{
608 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
609 kprobe_opcode_t insn = p->opcode;
610 long iaddr = (long)p->addr;
611 int rd = (insn >> 12) & 0xf;
612 int rn = (insn >> 16) & 0xf;
613 int rm = insn & 0xf;
614 long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
615 long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
616 long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
617 long rnv_wb;
618
619 rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
620 if (rn != 15)
621 regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */
622}
623
624static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs)
625{
626 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
627 kprobe_opcode_t insn = p->opcode;
628 int rd = (insn >> 12) & 0xf;
629 int rm = insn & 0xf;
630 long rmv = regs->uregs[rm];
631
632 /* Writes Q flag */
633 regs->uregs[rd] = insnslot_1arg_rwflags(rmv, &regs->ARM_cpsr, i_fn);
634}
635
636static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs)
637{
638 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
639 kprobe_opcode_t insn = p->opcode;
640 int rd = (insn >> 12) & 0xf;
641 int rn = (insn >> 16) & 0xf;
642 int rm = insn & 0xf;
643 long rnv = regs->uregs[rn];
644 long rmv = regs->uregs[rm];
645
646 /* Reads GE bits */
647 regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn);
648}
649
650static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs)
651{
652 insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
653
654 insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
655}
656
657static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs)
658{
659}
660
661static void __kprobes
662emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs)
663{
664 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
665 kprobe_opcode_t insn = p->opcode;
666 int rd = (insn >> 12) & 0xf;
667 long rdv = regs->uregs[rd];
668
669 regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn);
670}
671
672static void __kprobes
673emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs)
674{
675 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
676 kprobe_opcode_t insn = p->opcode;
677 int rd = (insn >> 12) & 0xf;
678 int rn = insn & 0xf;
679 long rdv = regs->uregs[rd];
680 long rnv = regs->uregs[rn];
681
682 regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn);
683}
684
685static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs)
686{
687 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
688 kprobe_opcode_t insn = p->opcode;
689 int rd = (insn >> 12) & 0xf;
690 int rm = insn & 0xf;
691 long rmv = regs->uregs[rm];
692
693 regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn);
694}
695
696static void __kprobes
697emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
698{
699 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
700 kprobe_opcode_t insn = p->opcode;
701 int rd = (insn >> 12) & 0xf;
702 int rn = (insn >> 16) & 0xf;
703 int rm = insn & 0xf;
704 long rnv = regs->uregs[rn];
705 long rmv = regs->uregs[rm];
706
707 regs->uregs[rd] =
708 insnslot_2arg_rwflags(rnv, rmv, &regs->ARM_cpsr, i_fn);
709}
710
711static void __kprobes
712emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
713{
714 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
715 kprobe_opcode_t insn = p->opcode;
716 int rd = (insn >> 16) & 0xf;
717 int rn = (insn >> 12) & 0xf;
718 int rs = (insn >> 8) & 0xf;
719 int rm = insn & 0xf;
720 long rnv = regs->uregs[rn];
721 long rsv = regs->uregs[rs];
722 long rmv = regs->uregs[rm];
723
724 regs->uregs[rd] =
725 insnslot_3arg_rwflags(rnv, rsv, rmv, &regs->ARM_cpsr, i_fn);
726}
727
728static void __kprobes
729emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
730{
731 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
732 kprobe_opcode_t insn = p->opcode;
733 int rd = (insn >> 16) & 0xf;
734 int rs = (insn >> 8) & 0xf;
735 int rm = insn & 0xf;
736 long rsv = regs->uregs[rs];
737 long rmv = regs->uregs[rm];
738
739 regs->uregs[rd] =
740 insnslot_2arg_rwflags(rsv, rmv, &regs->ARM_cpsr, i_fn);
741}
742
743static void __kprobes
744emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
745{
746 insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0];
747 kprobe_opcode_t insn = p->opcode;
748 union reg_pair fnr;
749 int rdhi = (insn >> 16) & 0xf;
750 int rdlo = (insn >> 12) & 0xf;
751 int rs = (insn >> 8) & 0xf;
752 int rm = insn & 0xf;
753 long rsv = regs->uregs[rs];
754 long rmv = regs->uregs[rm];
755
756 fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi],
757 regs->uregs[rdlo], rsv, rmv,
758 &regs->ARM_cpsr, i_fn);
759 regs->uregs[rdhi] = fnr.r0;
760 regs->uregs[rdlo] = fnr.r1;
761}
762
763static void __kprobes
764emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs)
765{
766 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
767 kprobe_opcode_t insn = p->opcode;
768 int rd = (insn >> 12) & 0xf;
769 int rn = (insn >> 16) & 0xf;
770 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
771
772 regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
773}
774
775static void __kprobes
776emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs)
777{
778 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
779 kprobe_opcode_t insn = p->opcode;
780 int rd = (insn >> 12) & 0xf;
781 int rn = (insn >> 16) & 0xf;
782 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
783
784 regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
785}
786
787static void __kprobes
788emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs)
789{
790 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
791 kprobe_opcode_t insn = p->opcode;
792 int rn = (insn >> 16) & 0xf;
793 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
794
795 insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
796}
797
798static void __kprobes
799emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs)
800{
801 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
802 kprobe_opcode_t insn = p->opcode;
803 long ppc = (long)p->addr + 8;
804 int rd = (insn >> 12) & 0xf;
805 int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
806 int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
807 int rm = insn & 0xf;
808 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
809 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
810 long rsv = regs->uregs[rs];
811
812 regs->uregs[rd] =
813 insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn);
814}
815
816static void __kprobes
817emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs)
818{
819 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
820 kprobe_opcode_t insn = p->opcode;
821 long ppc = (long)p->addr + 8;
822 int rd = (insn >> 12) & 0xf;
823 int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
824 int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
825 int rm = insn & 0xf;
826 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
827 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
828 long rsv = regs->uregs[rs];
829
830 regs->uregs[rd] =
831 insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
832}
833
834static void __kprobes
835emulate_alu_tests(struct kprobe *p, struct pt_regs *regs)
836{
837 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
838 kprobe_opcode_t insn = p->opcode;
839 long ppc = (long)p->addr + 8;
840 int rn = (insn >> 16) & 0xf;
841 int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */
842 int rm = insn & 0xf;
843 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
844 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
845 long rsv = regs->uregs[rs];
846
847 insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
848}
849
850static enum kprobe_insn __kprobes
851prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
852{
853 int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25))
854 : (~insn & (1 << 22));
855
856 if (is_writeback(insn) && is_r15(insn, 16))
857 return INSN_REJECTED; /* Writeback to PC */
858
859 insn &= 0xfff00fff;
860 insn |= 0x00001000; /* Rn = r0, Rd = r1 */
861 if (not_imm) {
862 insn &= ~0xf;
863 insn |= 2; /* Rm = r2 */
864 }
865 asi->insn[0] = insn;
866 asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str;
867 return INSN_GOOD;
868}
869
870static enum kprobe_insn __kprobes
871prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi)
872{
873 if (is_r15(insn, 12))
874 return INSN_REJECTED; /* Rd is PC */
875
876 insn &= 0xffff0fff; /* Rd = r0 */
877 asi->insn[0] = insn;
878 asi->insn_handler = emulate_rd12_modify;
879 return INSN_GOOD;
880}
881
882static enum kprobe_insn __kprobes
883prep_emulate_rd12rn0_modify(kprobe_opcode_t insn,
884 struct arch_specific_insn *asi)
885{
886 if (is_r15(insn, 12))
887 return INSN_REJECTED; /* Rd is PC */
888
889 insn &= 0xffff0ff0; /* Rd = r0 */
890 insn |= 0x00000001; /* Rn = r1 */
891 asi->insn[0] = insn;
892 asi->insn_handler = emulate_rd12rn0_modify;
893 return INSN_GOOD;
894}
895
896static enum kprobe_insn __kprobes
897prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
898{
899 if (is_r15(insn, 12))
900 return INSN_REJECTED; /* Rd is PC */
901
902 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
903 asi->insn[0] = insn;
904 asi->insn_handler = emulate_rd12rm0;
905 return INSN_GOOD;
906}
907
908static enum kprobe_insn __kprobes
909prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn,
910 struct arch_specific_insn *asi)
911{
912 if (is_r15(insn, 12))
913 return INSN_REJECTED; /* Rd is PC */
914
915 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
916 insn |= 0x00000001; /* Rm = r1 */
917 asi->insn[0] = insn;
918 asi->insn_handler = emulate_rd12rn16rm0_rwflags;
919 return INSN_GOOD;
920}
921
922static enum kprobe_insn __kprobes
923prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn,
924 struct arch_specific_insn *asi)
925{
926 if (is_r15(insn, 16))
927 return INSN_REJECTED; /* Rd is PC */
928
929 insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */
930 insn |= 0x00000001; /* Rm = r1 */
931 asi->insn[0] = insn;
932 asi->insn_handler = emulate_rd16rs8rm0_rwflags;
933 return INSN_GOOD;
934}
935
936static enum kprobe_insn __kprobes
937prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn,
938 struct arch_specific_insn *asi)
939{
940 if (is_r15(insn, 16))
941 return INSN_REJECTED; /* Rd is PC */
942
943 insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */
944 insn |= 0x00000102; /* Rs = r1, Rm = r2 */
945 asi->insn[0] = insn;
946 asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags;
947 return INSN_GOOD;
948}
949
950static enum kprobe_insn __kprobes
951prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
952 struct arch_specific_insn *asi)
953{
954 if (is_r15(insn, 16) || is_r15(insn, 12))
955 return INSN_REJECTED; /* RdHi or RdLo is PC */
956
957 insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */
958 insn |= 0x00001203; /* Rs = r2, Rm = r3 */
959 asi->insn[0] = insn;
960 asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags;
961 return INSN_GOOD;
962}
963
964/*
965 * For the instruction masking and comparisons in all the "space_*"
966 * functions below, Do _not_ rearrange the order of tests unless
967 * you're very, very sure of what you are doing. For the sake of
968 * efficiency, the masks for some tests sometimes assume other test
969 * have been done prior to them so the number of patterns to test
970 * for an instruction set can be as broad as possible to reduce the
971 * number of tests needed.
972 */
973
974static enum kprobe_insn __kprobes
975space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
976{
977 /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */
978 /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */
979 /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */
980 /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */
981 if ((insn & 0xfe300000) == 0xf4100000) {
982 asi->insn_handler = emulate_nop;
983 return INSN_GOOD_NO_SLOT;
984 }
985
986 /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */
987 if ((insn & 0xfe000000) == 0xfa000000) {
988 asi->insn_handler = simulate_blx1;
989 return INSN_GOOD_NO_SLOT;
990 }
991
992 /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
993 /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
994
995 /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
996 /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
997
998 /* Coprocessor instructions... */
999 /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
1000 /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
1001 /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
1002 /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
1003 /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
1004 /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
1005 /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
1006
1007 return INSN_REJECTED;
1008}
1009
1010static enum kprobe_insn __kprobes
1011space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1012{
1013 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */
1014 if ((insn & 0x0f900010) == 0x01000000) {
1015
1016 /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
1017 if ((insn & 0x0ff000f0) == 0x01000000) {
1018 if (is_r15(insn, 12))
1019 return INSN_REJECTED; /* Rd is PC */
1020 asi->insn_handler = simulate_mrs;
1021 return INSN_GOOD_NO_SLOT;
1022 }
1023
1024 /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
1025 if ((insn & 0x0ff00090) == 0x01400080)
1026 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
1027 asi);
1028
1029 /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
1030 /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
1031 if ((insn & 0x0ff000b0) == 0x012000a0 ||
1032 (insn & 0x0ff00090) == 0x01600080)
1033 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1034
1035 /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */
1036 /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */
1037 if ((insn & 0x0ff00090) == 0x01000080 ||
1038 (insn & 0x0ff000b0) == 0x01200080)
1039 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1040
1041 /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
1042 /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
1043 /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
1044
1045 /* Other instruction encodings aren't yet defined */
1046 return INSN_REJECTED;
1047 }
1048
1049 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */
1050 else if ((insn & 0x0f900090) == 0x01000010) {
1051
1052 /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
1053 /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
1054 if ((insn & 0x0ff000d0) == 0x01200010) {
1055 if ((insn & 0x0ff000ff) == 0x0120003f)
1056 return INSN_REJECTED; /* BLX pc */
1057 asi->insn_handler = simulate_blx2bx;
1058 return INSN_GOOD_NO_SLOT;
1059 }
1060
1061 /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
1062 if ((insn & 0x0ff000f0) == 0x01600010)
1063 return prep_emulate_rd12rm0(insn, asi);
1064
1065 /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */
1066 /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */
1067 /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */
1068 /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */
1069 if ((insn & 0x0f9000f0) == 0x01000050)
1070 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1071
1072 /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
1073 /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
1074
1075 /* Other instruction encodings aren't yet defined */
1076 return INSN_REJECTED;
1077 }
1078
1079 /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */
1080 else if ((insn & 0x0f0000f0) == 0x00000090) {
1081
1082 /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */
1083 /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */
1084 /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */
1085 /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */
1086 /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */
1087 /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */
1088 /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */
1089 /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */
1090 /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */
1091 /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */
1092 /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */
1093 /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */
1094 /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */
1095 /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */
1096 /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */
1097 /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */
1098 if ((insn & 0x00d00000) == 0x00500000)
1099 return INSN_REJECTED;
1100 else if ((insn & 0x00e00000) == 0x00000000)
1101 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1102 else if ((insn & 0x00a00000) == 0x00200000)
1103 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1104 else
1105 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
1106 asi);
1107 }
1108
1109 /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */
1110 else if ((insn & 0x0e000090) == 0x00000090) {
1111
1112 /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */
1113 /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */
1114 /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */
1115 /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */
1116 /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */
1117 /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */
1118 /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */
1119 /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */
1120 /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */
1121 /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */
1122 /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */
1123 /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */
1124 /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */
1125
1126 /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
1127 /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
1128 /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */
1129 /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */
1130 /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */
1131 /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */
1132 if ((insn & 0x0f0000f0) == 0x01000090) {
1133 if ((insn & 0x0fb000f0) == 0x01000090) {
1134 /* SWP/SWPB */
1135 return prep_emulate_rd12rn16rm0_wflags(insn,
1136 asi);
1137 } else {
1138 /* STREX/LDREX variants and unallocaed space */
1139 return INSN_REJECTED;
1140 }
1141
1142 } else if ((insn & 0x0e1000d0) == 0x00000d0) {
1143 /* STRD/LDRD */
1144 if ((insn & 0x0000e000) == 0x0000e000)
1145 return INSN_REJECTED; /* Rd is LR or PC */
1146 if (is_writeback(insn) && is_r15(insn, 16))
1147 return INSN_REJECTED; /* Writeback to PC */
1148
1149 insn &= 0xfff00fff;
1150 insn |= 0x00002000; /* Rn = r0, Rd = r2 */
1151 if (!(insn & (1 << 22))) {
1152 /* Register index */
1153 insn &= ~0xf;
1154 insn |= 1; /* Rm = r1 */
1155 }
1156 asi->insn[0] = insn;
1157 asi->insn_handler =
1158 (insn & (1 << 5)) ? emulate_strd : emulate_ldrd;
1159 return INSN_GOOD;
1160 }
1161
1162 /* LDRH/STRH/LDRSB/LDRSH */
1163 if (is_r15(insn, 12))
1164 return INSN_REJECTED; /* Rd is PC */
1165 return prep_emulate_ldr_str(insn, asi);
1166 }
1167
1168 /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
1169
1170 /*
1171 * ALU op with S bit and Rd == 15 :
1172 * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
1173 */
1174 if ((insn & 0x0e10f000) == 0x0010f000)
1175 return INSN_REJECTED;
1176
1177 /*
1178 * "mov ip, sp" is the most common kprobe'd instruction by far.
1179 * Check and optimize for it explicitly.
1180 */
1181 if (insn == 0xe1a0c00d) {
1182 asi->insn_handler = simulate_mov_ipsp;
1183 return INSN_GOOD_NO_SLOT;
1184 }
1185
1186 /*
1187 * Data processing: Immediate-shift / Register-shift
1188 * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx
1189 * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx
1190 * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx
1191 * *S (bit 20) updates condition codes
1192 * ADC/SBC/RSC reads the C flag
1193 */
1194 insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */
1195 insn |= 0x00000001; /* Rm = r1 */
1196 if (insn & 0x010) {
1197 insn &= 0xfffff0ff; /* register shift */
1198 insn |= 0x00000200; /* Rs = r2 */
1199 }
1200 asi->insn[0] = insn;
1201
1202 if ((insn & 0x0f900000) == 0x01100000) {
1203 /*
1204 * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx
1205 * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx
1206 * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx
1207 * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx
1208 */
1209 asi->insn_handler = emulate_alu_tests;
1210 } else {
1211 /* ALU ops which write to Rd */
1212 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
1213 emulate_alu_rwflags : emulate_alu_rflags;
1214 }
1215 return INSN_GOOD;
1216}
1217
1218static enum kprobe_insn __kprobes
1219space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1220{
1221 /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
1222 /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
1223 if ((insn & 0x0fb00000) == 0x03000000)
1224 return prep_emulate_rd12_modify(insn, asi);
1225
1226 /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
1227 if ((insn & 0x0fff0000) == 0x03200000) {
1228 unsigned op2 = insn & 0x000000ff;
1229 if (op2 == 0x01 || op2 == 0x04) {
1230 /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
1231 /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
1232 asi->insn[0] = insn;
1233 asi->insn_handler = emulate_none;
1234 return INSN_GOOD;
1235 } else if (op2 <= 0x03) {
1236 /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
1237 /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
1238 /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
1239 /*
1240 * We make WFE and WFI true NOPs to avoid stalls due
1241 * to missing events whilst processing the probe.
1242 */
1243 asi->insn_handler = emulate_nop;
1244 return INSN_GOOD_NO_SLOT;
1245 }
1246 /* For DBG and unallocated hints it's safest to reject them */
1247 return INSN_REJECTED;
1248 }
1249
1250 /*
1251 * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
1252 * ALU op with S bit and Rd == 15 :
1253 * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
1254 */
1255 if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
1256 (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
1257 return INSN_REJECTED;
1258
1259 /*
1260 * Data processing: 32-bit Immediate
1261 * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
1262 * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx
1263 * *S (bit 20) updates condition codes
1264 * ADC/SBC/RSC reads the C flag
1265 */
1266 insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */
1267 asi->insn[0] = insn;
1268
1269 if ((insn & 0x0f900000) == 0x03100000) {
1270 /*
1271 * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx
1272 * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx
1273 * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx
1274 * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx
1275 */
1276 asi->insn_handler = emulate_alu_tests_imm;
1277 } else {
1278 /* ALU ops which write to Rd */
1279 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
1280 emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
1281 }
1282 return INSN_GOOD;
1283}
1284
1285static enum kprobe_insn __kprobes
1286space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1287{
1288 /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */
1289 if ((insn & 0x0ff000f0) == 0x068000b0) {
1290 if (is_r15(insn, 12))
1291 return INSN_REJECTED; /* Rd is PC */
1292 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
1293 insn |= 0x00000001; /* Rm = r1 */
1294 asi->insn[0] = insn;
1295 asi->insn_handler = emulate_sel;
1296 return INSN_GOOD;
1297 }
1298
1299 /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */
1300 /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */
1301 /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */
1302 /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */
1303 if ((insn & 0x0fa00030) == 0x06a00010 ||
1304 (insn & 0x0fb000f0) == 0x06a00030) {
1305 if (is_r15(insn, 12))
1306 return INSN_REJECTED; /* Rd is PC */
1307 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
1308 asi->insn[0] = insn;
1309 asi->insn_handler = emulate_sat;
1310 return INSN_GOOD;
1311 }
1312
1313 /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
1314 /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
1315 /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
1316 /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
1317 if ((insn & 0x0ff00070) == 0x06b00030 ||
1318 (insn & 0x0ff00070) == 0x06f00030)
1319 return prep_emulate_rd12rm0(insn, asi);
1320
1321 /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */
1322 /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */
1323 /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */
1324 /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */
1325 /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */
1326 /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */
1327 /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */
1328 /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */
1329 /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */
1330 /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */
1331 /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */
1332 /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */
1333 /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */
1334 /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */
1335 /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */
1336 /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */
1337 /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */
1338 /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */
1339 /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */
1340 /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */
1341 /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */
1342 /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */
1343 /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */
1344 /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */
1345 /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */
1346 /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */
1347 /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */
1348 /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */
1349 /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */
1350 /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */
1351 /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */
1352 /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */
1353 /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */
1354 /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */
1355 /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */
1356 /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */
1357 /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */
1358 /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */
1359 /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */
1360 /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */
1361 /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */
1362 /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */
1363 /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */
1364 /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */
1365 /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */
1366 /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */
1367 /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */
1368 /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */
1369 /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */
1370 /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */
1371 if ((insn & 0x0f800010) == 0x06000010) {
1372 if ((insn & 0x00300000) == 0x00000000 ||
1373 (insn & 0x000000e0) == 0x000000a0 ||
1374 (insn & 0x000000e0) == 0x000000c0)
1375 return INSN_REJECTED; /* Unallocated space */
1376 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1377 }
1378
1379 /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */
1380 /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */
1381 if ((insn & 0x0ff00030) == 0x06800010)
1382 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1383
1384 /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */
1385 /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */
1386 /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */
1387 /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */
1388 /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */
1389 /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */
1390 /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */
1391 /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */
1392 /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */
1393 /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */
1394 /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */
1395 /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */
1396 /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */
1397 /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */
1398 if ((insn & 0x0f8000f0) == 0x06800070) {
1399 if ((insn & 0x00300000) == 0x00100000)
1400 return INSN_REJECTED; /* Unallocated space */
1401
1402 if ((insn & 0x000f0000) == 0x000f0000)
1403 return prep_emulate_rd12rm0(insn, asi);
1404 else
1405 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1406 }
1407
1408 /* Other instruction encodings aren't yet defined */
1409 return INSN_REJECTED;
1410}
1411
1412static enum kprobe_insn __kprobes
1413space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1414{
1415 /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
1416 if ((insn & 0x0ff000f0) == 0x03f000f0)
1417 return INSN_REJECTED;
1418
1419 /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
1420 /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
1421 if ((insn & 0x0ff00090) == 0x07400010)
1422 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
1423
1424 /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */
1425 /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
1426 /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */
1427 /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */
1428 /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */
1429 /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */
1430 /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */
1431 /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */
1432 if ((insn & 0x0ff00090) == 0x07000010 ||
1433 (insn & 0x0ff000d0) == 0x07500010 ||
1434 (insn & 0x0ff000f0) == 0x07800010) {
1435
1436 if ((insn & 0x0000f000) == 0x0000f000)
1437 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1438 else
1439 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1440 }
1441
1442 /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */
1443 if ((insn & 0x0ff000d0) == 0x075000d0)
1444 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1445
1446 /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */
1447 /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */
1448 if ((insn & 0x0fa00070) == 0x07a00050)
1449 return prep_emulate_rd12rm0(insn, asi);
1450
1451 /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */
1452 /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */
1453 if ((insn & 0x0fe00070) == 0x07c00010) {
1454
1455 if ((insn & 0x0000000f) == 0x0000000f)
1456 return prep_emulate_rd12_modify(insn, asi);
1457 else
1458 return prep_emulate_rd12rn0_modify(insn, asi);
1459 }
1460
1461 return INSN_REJECTED;
1462}
1463
1464static enum kprobe_insn __kprobes
1465space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1466{
1467 /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */
1468 /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */
1469 /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
1470 /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
1471 /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */
1472 /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */
1473 /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
1474 /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
1475
1476 if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12))
1477 return INSN_REJECTED; /* LDRB into PC */
1478
1479 return prep_emulate_ldr_str(insn, asi);
1480}
1481
1482static enum kprobe_insn __kprobes
1483space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1484{
1485 /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */
1486 /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
1487 if ((insn & 0x0e708000) == 0x85000000 ||
1488 (insn & 0x0e508000) == 0x85010000)
1489 return INSN_REJECTED;
1490
1491 /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
1492 /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
1493 asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */
1494 simulate_stm1_pc : simulate_ldm1stm1;
1495 return INSN_GOOD_NO_SLOT;
1496}
1497
1498static enum kprobe_insn __kprobes
1499space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1500{
1501 /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
1502 /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
1503 asi->insn_handler = simulate_bbl;
1504 return INSN_GOOD_NO_SLOT;
1505}
1506
1507static enum kprobe_insn __kprobes
1508space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1509{
1510 /* Coprocessor instructions... */
1511 /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
1512 /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
1513 /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
1514 /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
1515 /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
1516 /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
1517 /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
1518
1519 /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
1520
1521 return INSN_REJECTED;
1522}
1523
1524static unsigned long __kprobes __check_eq(unsigned long cpsr)
1525{
1526 return cpsr & PSR_Z_BIT;
1527}
1528
1529static unsigned long __kprobes __check_ne(unsigned long cpsr)
1530{
1531 return (~cpsr) & PSR_Z_BIT;
1532}
1533
1534static unsigned long __kprobes __check_cs(unsigned long cpsr)
1535{
1536 return cpsr & PSR_C_BIT;
1537}
1538
1539static unsigned long __kprobes __check_cc(unsigned long cpsr)
1540{
1541 return (~cpsr) & PSR_C_BIT;
1542}
1543
1544static unsigned long __kprobes __check_mi(unsigned long cpsr)
1545{
1546 return cpsr & PSR_N_BIT;
1547}
1548
1549static unsigned long __kprobes __check_pl(unsigned long cpsr)
1550{
1551 return (~cpsr) & PSR_N_BIT;
1552}
1553
1554static unsigned long __kprobes __check_vs(unsigned long cpsr)
1555{
1556 return cpsr & PSR_V_BIT;
1557}
1558
1559static unsigned long __kprobes __check_vc(unsigned long cpsr)
1560{
1561 return (~cpsr) & PSR_V_BIT;
1562}
1563
1564static unsigned long __kprobes __check_hi(unsigned long cpsr)
1565{
1566 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1567 return cpsr & PSR_C_BIT;
1568}
1569
1570static unsigned long __kprobes __check_ls(unsigned long cpsr)
1571{
1572 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1573 return (~cpsr) & PSR_C_BIT;
1574}
1575
1576static unsigned long __kprobes __check_ge(unsigned long cpsr)
1577{
1578 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1579 return (~cpsr) & PSR_N_BIT;
1580}
1581
1582static unsigned long __kprobes __check_lt(unsigned long cpsr)
1583{
1584 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1585 return cpsr & PSR_N_BIT;
1586}
1587
1588static unsigned long __kprobes __check_gt(unsigned long cpsr)
1589{
1590 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1591 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1592 return (~temp) & PSR_N_BIT;
1593}
1594
1595static unsigned long __kprobes __check_le(unsigned long cpsr)
1596{
1597 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1598 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1599 return temp & PSR_N_BIT;
1600}
1601
1602static unsigned long __kprobes __check_al(unsigned long cpsr)
1603{
1604 return true;
1605}
1606
1607static kprobe_check_cc * const condition_checks[16] = {
1608 &__check_eq, &__check_ne, &__check_cs, &__check_cc,
1609 &__check_mi, &__check_pl, &__check_vs, &__check_vc,
1610 &__check_hi, &__check_ls, &__check_ge, &__check_lt,
1611 &__check_gt, &__check_le, &__check_al, &__check_al
1612};
1613
1614/* Return:
1615 * INSN_REJECTED If instruction is one not allowed to kprobe,
1616 * INSN_GOOD If instruction is supported and uses instruction slot,
1617 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
1618 *
1619 * For instructions we don't want to kprobe (INSN_REJECTED return result):
1620 * These are generally ones that modify the processor state making
1621 * them "hard" to simulate such as switches processor modes or
1622 * make accesses in alternate modes. Any of these could be simulated
1623 * if the work was put into it, but low return considering they
1624 * should also be very rare.
1625 */
1626enum kprobe_insn __kprobes
1627arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1628{
1629 asi->insn_check_cc = condition_checks[insn>>28];
1630 asi->insn[1] = KPROBE_RETURN_INSTRUCTION;
1631
1632 if ((insn & 0xf0000000) == 0xf0000000)
1633
1634 return space_1111(insn, asi);
1635
1636 else if ((insn & 0x0e000000) == 0x00000000)
1637
1638 return space_cccc_000x(insn, asi);
1639
1640 else if ((insn & 0x0e000000) == 0x02000000)
1641
1642 return space_cccc_001x(insn, asi);
1643
1644 else if ((insn & 0x0f000010) == 0x06000010)
1645
1646 return space_cccc_0110__1(insn, asi);
1647
1648 else if ((insn & 0x0f000010) == 0x07000010)
1649
1650 return space_cccc_0111__1(insn, asi);
1651
1652 else if ((insn & 0x0c000000) == 0x04000000)
1653
1654 return space_cccc_01xx(insn, asi);
1655
1656 else if ((insn & 0x0e000000) == 0x08000000)
1657
1658 return space_cccc_100x(insn, asi);
1659
1660 else if ((insn & 0x0e000000) == 0x0a000000)
1661
1662 return space_cccc_101x(insn, asi);
1663
1664 return space_cccc_11xx(insn, asi);
1665}
1666
1667void __init arm_kprobe_decode_init(void)
1668{
1669 find_str_pc_offset();
1670}
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
new file mode 100644
index 00000000000..902ca59e8b1
--- /dev/null
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -0,0 +1,1462 @@
1/*
2 * arch/arm/kernel/kprobes-thumb.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/kprobes.h>
13
14#include "kprobes.h"
15
16
17/*
18 * True if current instruction is in an IT block.
19 */
20#define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000)
21
22/*
23 * Return the condition code to check for the currently executing instruction.
24 * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if
25 * in_it_block returns true.
26 */
27#define current_cond(cpsr) ((cpsr >> 12) & 0xf)
28
29/*
30 * Return the PC value for a probe in thumb code.
31 * This is the address of the probed instruction plus 4.
32 * We subtract one because the address will have bit zero set to indicate
33 * a pointer to thumb code.
34 */
35static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p)
36{
37 return (unsigned long)p->addr - 1 + 4;
38}
39
40static void __kprobes
41t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs)
42{
43 kprobe_opcode_t insn = p->opcode;
44 unsigned long pc = thumb_probe_pc(p);
45 int rn = (insn >> 16) & 0xf;
46 int rm = insn & 0xf;
47
48 unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn];
49 unsigned long rmv = regs->uregs[rm];
50 unsigned int halfwords;
51
52 if (insn & 0x10) /* TBH */
53 halfwords = ((u16 *)rnv)[rmv];
54 else /* TBB */
55 halfwords = ((u8 *)rnv)[rmv];
56
57 regs->ARM_pc = pc + 2 * halfwords;
58}
59
60static void __kprobes
61t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs)
62{
63 kprobe_opcode_t insn = p->opcode;
64 int rd = (insn >> 8) & 0xf;
65 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
66 regs->uregs[rd] = regs->ARM_cpsr & mask;
67}
68
69static void __kprobes
70t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
71{
72 kprobe_opcode_t insn = p->opcode;
73 unsigned long pc = thumb_probe_pc(p);
74
75 long offset = insn & 0x7ff; /* imm11 */
76 offset += (insn & 0x003f0000) >> 5; /* imm6 */
77 offset += (insn & 0x00002000) << 4; /* J1 */
78 offset += (insn & 0x00000800) << 7; /* J2 */
79 offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */
80
81 regs->ARM_pc = pc + (offset * 2);
82}
83
84static enum kprobe_insn __kprobes
85t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
86{
87 int cc = (insn >> 22) & 0xf;
88 asi->insn_check_cc = kprobe_condition_checks[cc];
89 asi->insn_handler = t32_simulate_cond_branch;
90 return INSN_GOOD_NO_SLOT;
91}
92
93static void __kprobes
94t32_simulate_branch(struct kprobe *p, struct pt_regs *regs)
95{
96 kprobe_opcode_t insn = p->opcode;
97 unsigned long pc = thumb_probe_pc(p);
98
99 long offset = insn & 0x7ff; /* imm11 */
100 offset += (insn & 0x03ff0000) >> 5; /* imm10 */
101 offset += (insn & 0x00002000) << 9; /* J1 */
102 offset += (insn & 0x00000800) << 10; /* J2 */
103 if (insn & 0x04000000)
104 offset -= 0x00800000; /* Apply sign bit */
105 else
106 offset ^= 0x00600000; /* Invert J1 and J2 */
107
108 if (insn & (1 << 14)) {
109 /* BL or BLX */
110 regs->ARM_lr = (unsigned long)p->addr + 4;
111 if (!(insn & (1 << 12))) {
112 /* BLX so switch to ARM mode */
113 regs->ARM_cpsr &= ~PSR_T_BIT;
114 pc &= ~3;
115 }
116 }
117
118 regs->ARM_pc = pc + (offset * 2);
119}
120
121static void __kprobes
122t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
123{
124 kprobe_opcode_t insn = p->opcode;
125 unsigned long addr = thumb_probe_pc(p) & ~3;
126 int rt = (insn >> 12) & 0xf;
127 unsigned long rtv;
128
129 long offset = insn & 0xfff;
130 if (insn & 0x00800000)
131 addr += offset;
132 else
133 addr -= offset;
134
135 if (insn & 0x00400000) {
136 /* LDR */
137 rtv = *(unsigned long *)addr;
138 if (rt == 15) {
139 bx_write_pc(rtv, regs);
140 return;
141 }
142 } else if (insn & 0x00200000) {
143 /* LDRH */
144 if (insn & 0x01000000)
145 rtv = *(s16 *)addr;
146 else
147 rtv = *(u16 *)addr;
148 } else {
149 /* LDRB */
150 if (insn & 0x01000000)
151 rtv = *(s8 *)addr;
152 else
153 rtv = *(u8 *)addr;
154 }
155
156 regs->uregs[rt] = rtv;
157}
158
159static enum kprobe_insn __kprobes
160t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
161{
162 enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
163
164 /* Fixup modified instruction to have halfwords in correct order...*/
165 insn = asi->insn[0];
166 ((u16 *)asi->insn)[0] = insn >> 16;
167 ((u16 *)asi->insn)[1] = insn & 0xffff;
168
169 return ret;
170}
171
172static void __kprobes
173t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
174{
175 kprobe_opcode_t insn = p->opcode;
176 unsigned long pc = thumb_probe_pc(p) & ~3;
177 int rt1 = (insn >> 12) & 0xf;
178 int rt2 = (insn >> 8) & 0xf;
179 int rn = (insn >> 16) & 0xf;
180
181 register unsigned long rt1v asm("r0") = regs->uregs[rt1];
182 register unsigned long rt2v asm("r1") = regs->uregs[rt2];
183 register unsigned long rnv asm("r2") = (rn == 15) ? pc
184 : regs->uregs[rn];
185
186 __asm__ __volatile__ (
187 "blx %[fn]"
188 : "=r" (rt1v), "=r" (rt2v), "=r" (rnv)
189 : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn)
190 : "lr", "memory", "cc"
191 );
192
193 if (rn != 15)
194 regs->uregs[rn] = rnv; /* Writeback base register */
195 regs->uregs[rt1] = rt1v;
196 regs->uregs[rt2] = rt2v;
197}
198
199static void __kprobes
200t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs)
201{
202 kprobe_opcode_t insn = p->opcode;
203 int rt = (insn >> 12) & 0xf;
204 int rn = (insn >> 16) & 0xf;
205 int rm = insn & 0xf;
206
207 register unsigned long rtv asm("r0") = regs->uregs[rt];
208 register unsigned long rnv asm("r2") = regs->uregs[rn];
209 register unsigned long rmv asm("r3") = regs->uregs[rm];
210
211 __asm__ __volatile__ (
212 "blx %[fn]"
213 : "=r" (rtv), "=r" (rnv)
214 : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
215 : "lr", "memory", "cc"
216 );
217
218 regs->uregs[rn] = rnv; /* Writeback base register */
219 if (rt == 15) /* Can't be true for a STR as they aren't allowed */
220 bx_write_pc(rtv, regs);
221 else
222 regs->uregs[rt] = rtv;
223}
224
225static void __kprobes
226t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
227{
228 kprobe_opcode_t insn = p->opcode;
229 int rd = (insn >> 8) & 0xf;
230 int rn = (insn >> 16) & 0xf;
231 int rm = insn & 0xf;
232
233 register unsigned long rdv asm("r1") = regs->uregs[rd];
234 register unsigned long rnv asm("r2") = regs->uregs[rn];
235 register unsigned long rmv asm("r3") = regs->uregs[rm];
236 unsigned long cpsr = regs->ARM_cpsr;
237
238 __asm__ __volatile__ (
239 "msr cpsr_fs, %[cpsr] \n\t"
240 "blx %[fn] \n\t"
241 "mrs %[cpsr], cpsr \n\t"
242 : "=r" (rdv), [cpsr] "=r" (cpsr)
243 : "0" (rdv), "r" (rnv), "r" (rmv),
244 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
245 : "lr", "memory", "cc"
246 );
247
248 regs->uregs[rd] = rdv;
249 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
250}
251
252static void __kprobes
253t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs)
254{
255 kprobe_opcode_t insn = p->opcode;
256 unsigned long pc = thumb_probe_pc(p);
257 int rd = (insn >> 8) & 0xf;
258
259 register unsigned long rdv asm("r1") = regs->uregs[rd];
260 register unsigned long rnv asm("r2") = pc & ~3;
261
262 __asm__ __volatile__ (
263 "blx %[fn]"
264 : "=r" (rdv)
265 : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
266 : "lr", "memory", "cc"
267 );
268
269 regs->uregs[rd] = rdv;
270}
271
272static void __kprobes
273t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs)
274{
275 kprobe_opcode_t insn = p->opcode;
276 int rd = (insn >> 8) & 0xf;
277 int rn = (insn >> 16) & 0xf;
278
279 register unsigned long rdv asm("r1") = regs->uregs[rd];
280 register unsigned long rnv asm("r2") = regs->uregs[rn];
281
282 __asm__ __volatile__ (
283 "blx %[fn]"
284 : "=r" (rdv)
285 : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
286 : "lr", "memory", "cc"
287 );
288
289 regs->uregs[rd] = rdv;
290}
291
292static void __kprobes
293t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs)
294{
295 kprobe_opcode_t insn = p->opcode;
296 int rdlo = (insn >> 12) & 0xf;
297 int rdhi = (insn >> 8) & 0xf;
298 int rn = (insn >> 16) & 0xf;
299 int rm = insn & 0xf;
300
301 register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
302 register unsigned long rdhiv asm("r1") = regs->uregs[rdhi];
303 register unsigned long rnv asm("r2") = regs->uregs[rn];
304 register unsigned long rmv asm("r3") = regs->uregs[rm];
305
306 __asm__ __volatile__ (
307 "blx %[fn]"
308 : "=r" (rdlov), "=r" (rdhiv)
309 : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
310 [fn] "r" (p->ainsn.insn_fn)
311 : "lr", "memory", "cc"
312 );
313
314 regs->uregs[rdlo] = rdlov;
315 regs->uregs[rdhi] = rdhiv;
316}
317
318/* These emulation encodings are functionally equivalent... */
319#define t32_emulate_rd8rn16rm0ra12_noflags \
320 t32_emulate_rdlo12rdhi8rn16rm0_noflags
321
322static const union decode_item t32_table_1110_100x_x0xx[] = {
323 /* Load/store multiple instructions */
324
325 /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */
326 DECODE_REJECT (0xfe4f0000, 0xe80f0000),
327
328 /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */
329 /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */
330 DECODE_REJECT (0xffc00000, 0xe8000000),
331 /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */
332 /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */
333 DECODE_REJECT (0xffc00000, 0xe9800000),
334
335 /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */
336 DECODE_REJECT (0xfe508000, 0xe8008000),
337 /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */
338 DECODE_REJECT (0xfe50c000, 0xe810c000),
339 /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */
340 DECODE_REJECT (0xfe402000, 0xe8002000),
341
342 /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */
343 /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */
344 /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */
345 /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */
346 DECODE_CUSTOM (0xfe400000, 0xe8000000, t32_decode_ldmstm),
347
348 DECODE_END
349};
350
351static const union decode_item t32_table_1110_100x_x1xx[] = {
352 /* Load/store dual, load/store exclusive, table branch */
353
354 /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */
355 /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */
356 DECODE_OR (0xff600000, 0xe8600000),
357 /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */
358 /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */
359 DECODE_EMULATEX (0xff400000, 0xe9400000, t32_emulate_ldrdstrd,
360 REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)),
361
362 /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */
363 /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */
364 DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch,
365 REGS(NOSP, 0, 0, 0, NOSPPC)),
366
367 /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */
368 /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */
369 /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */
370 /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */
371 /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */
372 /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */
373 /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */
374 /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */
375 /* And unallocated instructions... */
376 DECODE_END
377};
378
379static const union decode_item t32_table_1110_101x[] = {
380 /* Data-processing (shifted register) */
381
382 /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */
383 /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */
384 DECODE_EMULATEX (0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags,
385 REGS(NOSPPC, 0, 0, 0, NOSPPC)),
386
387 /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */
388 DECODE_OR (0xfff00f00, 0xeb100f00),
389 /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */
390 DECODE_EMULATEX (0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags,
391 REGS(NOPC, 0, 0, 0, NOSPPC)),
392
393 /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */
394 /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */
395 DECODE_EMULATEX (0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags,
396 REGS(0, 0, NOSPPC, 0, NOSPPC)),
397
398 /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */
399 /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */
400 DECODE_REJECT (0xffa00000, 0xeaa00000),
401 /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */
402 DECODE_REJECT (0xffe00000, 0xeb200000),
403 /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */
404 DECODE_REJECT (0xffe00000, 0xeb800000),
405 /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */
406 DECODE_REJECT (0xffe00000, 0xebe00000),
407
408 /* ADD/SUB SP, SP, Rm, LSL #0..3 */
409 /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */
410 DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags,
411 REGS(SP, 0, SP, 0, NOSPPC)),
412
413 /* ADD/SUB SP, SP, Rm, shift */
414 /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */
415 DECODE_REJECT (0xff4f0f00, 0xeb0d0d00),
416
417 /* ADD/SUB Rd, SP, Rm, shift */
418 /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */
419 DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags,
420 REGS(SP, 0, NOPC, 0, NOSPPC)),
421
422 /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */
423 /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */
424 /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */
425 /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */
426 /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */
427 /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */
428 /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */
429 /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */
430 /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */
431 /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */
432 /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */
433 DECODE_EMULATEX (0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags,
434 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
435
436 DECODE_END
437};
438
439static const union decode_item t32_table_1111_0x0x___0[] = {
440 /* Data-processing (modified immediate) */
441
442 /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */
443 /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */
444 DECODE_EMULATEX (0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags,
445 REGS(NOSPPC, 0, 0, 0, 0)),
446
447 /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */
448 DECODE_OR (0xfbf08f00, 0xf1100f00),
449 /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */
450 DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags,
451 REGS(NOPC, 0, 0, 0, 0)),
452
453 /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */
454 /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */
455 DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags,
456 REGS(0, 0, NOSPPC, 0, 0)),
457
458 /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */
459 DECODE_REJECT (0xfbe08000, 0xf0a00000),
460 /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */
461 /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */
462 DECODE_REJECT (0xfbc08000, 0xf0c00000),
463 /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */
464 DECODE_REJECT (0xfbe08000, 0xf1200000),
465 /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */
466 DECODE_REJECT (0xfbe08000, 0xf1800000),
467 /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */
468 DECODE_REJECT (0xfbe08000, 0xf1e00000),
469
470 /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */
471 /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */
472 DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags,
473 REGS(SP, 0, NOPC, 0, 0)),
474
475 /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */
476 /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */
477 /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */
478 /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */
479 /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */
480 /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */
481 /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */
482 /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */
483 /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */
484 /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */
485 DECODE_EMULATEX (0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags,
486 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
487
488 DECODE_END
489};
490
491static const union decode_item t32_table_1111_0x1x___0[] = {
492 /* Data-processing (plain binary immediate) */
493
494 /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */
495 DECODE_OR (0xfbff8000, 0xf20f0000),
496 /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */
497 DECODE_EMULATEX (0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags,
498 REGS(PC, 0, NOSPPC, 0, 0)),
499
500 /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */
501 DECODE_OR (0xfbff8f00, 0xf20d0d00),
502 /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */
503 DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags,
504 REGS(SP, 0, SP, 0, 0)),
505
506 /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */
507 DECODE_OR (0xfbf08000, 0xf2000000),
508 /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */
509 DECODE_EMULATEX (0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags,
510 REGS(NOPCX, 0, NOSPPC, 0, 0)),
511
512 /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */
513 /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */
514 DECODE_EMULATEX (0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags,
515 REGS(0, 0, NOSPPC, 0, 0)),
516
517 /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */
518 /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */
519 /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */
520 /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */
521 DECODE_EMULATEX (0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags,
522 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
523
524 /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */
525 /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */
526 DECODE_EMULATEX (0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags,
527 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
528
529 /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */
530 DECODE_EMULATEX (0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags,
531 REGS(0, 0, NOSPPC, 0, 0)),
532
533 /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */
534 DECODE_EMULATEX (0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags,
535 REGS(NOSPPCX, 0, NOSPPC, 0, 0)),
536
537 DECODE_END
538};
539
540static const union decode_item t32_table_1111_0xxx___1[] = {
541 /* Branches and miscellaneous control */
542
543 /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */
544 DECODE_OR (0xfff0d7ff, 0xf3a08001),
545 /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */
546 DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, kprobe_emulate_none),
547 /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */
548 /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */
549 /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */
550 DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop),
551
552 /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */
553 DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs,
554 REGS(0, 0, NOSPPC, 0, 0)),
555
556 /*
557 * Unsupported instructions
558 * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx
559 *
560 * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx
561 * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx
562 * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx
563 * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx
564 * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx
565 * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx
566 * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx
567 * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx
568 * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx
569 * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx
570 * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx
571 */
572 DECODE_REJECT (0xfb80d000, 0xf3808000),
573
574 /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */
575 DECODE_CUSTOM (0xf800d000, 0xf0008000, t32_decode_cond_branch),
576
577 /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */
578 DECODE_OR (0xf800d001, 0xf000c000),
579 /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */
580 /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */
581 DECODE_SIMULATE (0xf8009000, 0xf0009000, t32_simulate_branch),
582
583 DECODE_END
584};
585
586static const union decode_item t32_table_1111_100x_x0x1__1111[] = {
587 /* Memory hints */
588
589 /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */
590 /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */
591 DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, kprobe_simulate_nop),
592
593 /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */
594 DECODE_OR (0xffd0f000, 0xf890f000),
595 /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */
596 DECODE_OR (0xffd0ff00, 0xf810fc00),
597 /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */
598 DECODE_OR (0xfff0f000, 0xf990f000),
599 /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */
600 DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop,
601 REGS(NOPCX, 0, 0, 0, 0)),
602
603 /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */
604 DECODE_OR (0xffd0ffc0, 0xf810f000),
605 /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */
606 DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop,
607 REGS(NOPCX, 0, 0, 0, NOSPPC)),
608
609 /* Other unallocated instructions... */
610 DECODE_END
611};
612
613static const union decode_item t32_table_1111_100x[] = {
614 /* Store/Load single data item */
615
616 /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */
617 DECODE_REJECT (0xfe600000, 0xf8600000),
618
619 /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */
620 DECODE_REJECT (0xfff00000, 0xf9500000),
621
622 /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */
623 DECODE_REJECT (0xfe800d00, 0xf8000800),
624
625 /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */
626 /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */
627 /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */
628 /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */
629 /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */
630 /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */
631 /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */
632 /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */
633 DECODE_REJECT (0xfe800f00, 0xf8000e00),
634
635 /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */
636 DECODE_REJECT (0xff1f0000, 0xf80f0000),
637
638 /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */
639 DECODE_REJECT (0xff10f000, 0xf800f000),
640
641 /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */
642 DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal,
643 REGS(PC, ANY, 0, 0, 0)),
644
645 /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */
646 /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */
647 DECODE_OR (0xffe00800, 0xf8400800),
648 /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */
649 /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */
650 DECODE_EMULATEX (0xffe00000, 0xf8c00000, t32_emulate_ldrstr,
651 REGS(NOPCX, ANY, 0, 0, 0)),
652
653 /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
654 /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */
655 DECODE_EMULATEX (0xffe00fc0, 0xf8400000, t32_emulate_ldrstr,
656 REGS(NOPCX, ANY, 0, 0, NOSPPC)),
657
658 /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */
659 /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
660 /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
661 /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
662 DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
663 REGS(PC, NOSPPCX, 0, 0, 0)),
664
665 /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
666 /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */
667 /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */
668 /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */
669 /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */
670 /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */
671 DECODE_OR (0xfec00800, 0xf8000800),
672 /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */
673 /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */
674 /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */
675 /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */
676 /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */
677 /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */
678 DECODE_EMULATEX (0xfec00000, 0xf8800000, t32_emulate_ldrstr,
679 REGS(NOPCX, NOSPPCX, 0, 0, 0)),
680
681 /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
682 /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
683 /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */
684 /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */
685 /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */
686 /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */
687 DECODE_EMULATEX (0xfe800fc0, 0xf8000000, t32_emulate_ldrstr,
688 REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)),
689
690 /* Other unallocated instructions... */
691 DECODE_END
692};
693
694static const union decode_item t32_table_1111_1010___1111[] = {
695 /* Data-processing (register) */
696
697 /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */
698 DECODE_REJECT (0xffe0f080, 0xfa60f080),
699
700 /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */
701 /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */
702 /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */
703 /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */
704 /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */
705 /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */
706 DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags,
707 REGS(0, 0, NOSPPC, 0, NOSPPC)),
708
709
710 /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */
711 DECODE_REJECT (0xff80f0b0, 0xfa80f030),
712 /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */
713 DECODE_REJECT (0xffb0f080, 0xfab0f000),
714
715 /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */
716 /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */
717 /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */
718 /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */
719 /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */
720 /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */
721
722 /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */
723 /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */
724 /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */
725 /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */
726 /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */
727 /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */
728
729 /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */
730 /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */
731 /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */
732 /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */
733 /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */
734 /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */
735
736 /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */
737 /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */
738 /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */
739 /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */
740 /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */
741 /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */
742
743 /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */
744 /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */
745 /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */
746 /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */
747 /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */
748 /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */
749
750 /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */
751 /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */
752 /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */
753 /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */
754 /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */
755 /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */
756 DECODE_OR (0xff80f080, 0xfa80f000),
757
758 /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */
759 /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */
760 /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */
761 /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */
762 /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */
763 /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */
764 DECODE_OR (0xff80f080, 0xfa00f080),
765
766 /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */
767 /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */
768 /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */
769 /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */
770 DECODE_OR (0xfff0f0c0, 0xfa80f080),
771
772 /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
773 DECODE_OR (0xfff0f0f0, 0xfaa0f080),
774
775 /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */
776 /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */
777 /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */
778 /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */
779 DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags,
780 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
781
782 /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
783 DECODE_OR (0xfff0f0f0, 0xfab0f080),
784
785 /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */
786 /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */
787 /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */
788 /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */
789 DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags,
790 REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)),
791
792 /* Other unallocated instructions... */
793 DECODE_END
794};
795
796static const union decode_item t32_table_1111_1011_0[] = {
797 /* Multiply, multiply accumulate, and absolute difference */
798
799 /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */
800 DECODE_REJECT (0xfff0f0f0, 0xfb00f010),
801 /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */
802 DECODE_REJECT (0xfff0f0f0, 0xfb70f010),
803
804 /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */
805 DECODE_OR (0xfff0f0c0, 0xfb10f000),
806 /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */
807 /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */
808 /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */
809 /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */
810 /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */
811 /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */
812 DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags,
813 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
814
815 /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */
816 DECODE_REJECT (0xfff000f0, 0xfb700010),
817
818 /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */
819 DECODE_OR (0xfff000c0, 0xfb100000),
820 /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */
821 /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */
822 /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */
823 /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */
824 /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */
825 /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */
826 /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */
827 /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */
828 DECODE_EMULATEX (0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags,
829 REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)),
830
831 /* Other unallocated instructions... */
832 DECODE_END
833};
834
835static const union decode_item t32_table_1111_1011_1[] = {
836 /* Long multiply, long multiply accumulate, and divide */
837
838 /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */
839 DECODE_OR (0xfff000f0, 0xfbe00060),
840 /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */
841 DECODE_OR (0xfff000c0, 0xfbc00080),
842 /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */
843 /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */
844 DECODE_OR (0xffe000e0, 0xfbc000c0),
845 /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */
846 /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */
847 /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */
848 /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */
849 DECODE_EMULATEX (0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags,
850 REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)),
851
852 /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */
853 /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */
854 /* Other unallocated instructions... */
855 DECODE_END
856};
857
858const union decode_item kprobe_decode_thumb32_table[] = {
859
860 /*
861 * Load/store multiple instructions
862 * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx
863 */
864 DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx),
865
866 /*
867 * Load/store dual, load/store exclusive, table branch
868 * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx
869 */
870 DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx),
871
872 /*
873 * Data-processing (shifted register)
874 * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx
875 */
876 DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x),
877
878 /*
879 * Coprocessor instructions
880 * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx
881 */
882 DECODE_REJECT (0xfc000000, 0xec000000),
883
884 /*
885 * Data-processing (modified immediate)
886 * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx
887 */
888 DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0),
889
890 /*
891 * Data-processing (plain binary immediate)
892 * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx
893 */
894 DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0),
895
896 /*
897 * Branches and miscellaneous control
898 * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx
899 */
900 DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1),
901
902 /*
903 * Advanced SIMD element or structure load/store instructions
904 * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx
905 */
906 DECODE_REJECT (0xff100000, 0xf9000000),
907
908 /*
909 * Memory hints
910 * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx
911 */
912 DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111),
913
914 /*
915 * Store single data item
916 * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx
917 * Load single data items
918 * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx
919 */
920 DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x),
921
922 /*
923 * Data-processing (register)
924 * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx
925 */
926 DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111),
927
928 /*
929 * Multiply, multiply accumulate, and absolute difference
930 * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx
931 */
932 DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0),
933
934 /*
935 * Long multiply, long multiply accumulate, and divide
936 * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx
937 */
938 DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1),
939
940 /*
941 * Coprocessor instructions
942 * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx
943 */
944 DECODE_END
945};
946
947static void __kprobes
948t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
949{
950 kprobe_opcode_t insn = p->opcode;
951 unsigned long pc = thumb_probe_pc(p);
952 int rm = (insn >> 3) & 0xf;
953 unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm];
954
955 if (insn & (1 << 7)) /* BLX ? */
956 regs->ARM_lr = (unsigned long)p->addr + 2;
957
958 bx_write_pc(rmv, regs);
959}
960
961static void __kprobes
962t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
963{
964 kprobe_opcode_t insn = p->opcode;
965 unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3);
966 long index = insn & 0xff;
967 int rt = (insn >> 8) & 0x7;
968 regs->uregs[rt] = base[index];
969}
970
971static void __kprobes
972t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs)
973{
974 kprobe_opcode_t insn = p->opcode;
975 unsigned long* base = (unsigned long *)regs->ARM_sp;
976 long index = insn & 0xff;
977 int rt = (insn >> 8) & 0x7;
978 if (insn & 0x800) /* LDR */
979 regs->uregs[rt] = base[index];
980 else /* STR */
981 base[index] = regs->uregs[rt];
982}
983
984static void __kprobes
985t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs)
986{
987 kprobe_opcode_t insn = p->opcode;
988 unsigned long base = (insn & 0x800) ? regs->ARM_sp
989 : (thumb_probe_pc(p) & ~3);
990 long offset = insn & 0xff;
991 int rt = (insn >> 8) & 0x7;
992 regs->uregs[rt] = base + offset * 4;
993}
994
995static void __kprobes
996t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs)
997{
998 kprobe_opcode_t insn = p->opcode;
999 long imm = insn & 0x7f;
1000 if (insn & 0x80) /* SUB */
1001 regs->ARM_sp -= imm * 4;
1002 else /* ADD */
1003 regs->ARM_sp += imm * 4;
1004}
1005
1006static void __kprobes
1007t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs)
1008{
1009 kprobe_opcode_t insn = p->opcode;
1010 int rn = insn & 0x7;
1011 kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn;
1012 if (nonzero & 0x800) {
1013 long i = insn & 0x200;
1014 long imm5 = insn & 0xf8;
1015 unsigned long pc = thumb_probe_pc(p);
1016 regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2);
1017 }
1018}
1019
1020static void __kprobes
1021t16_simulate_it(struct kprobe *p, struct pt_regs *regs)
1022{
1023 /*
1024 * The 8 IT state bits are split into two parts in CPSR:
1025 * ITSTATE<1:0> are in CPSR<26:25>
1026 * ITSTATE<7:2> are in CPSR<15:10>
1027 * The new IT state is in the lower byte of insn.
1028 */
1029 kprobe_opcode_t insn = p->opcode;
1030 unsigned long cpsr = regs->ARM_cpsr;
1031 cpsr &= ~PSR_IT_MASK;
1032 cpsr |= (insn & 0xfc) << 8;
1033 cpsr |= (insn & 0x03) << 25;
1034 regs->ARM_cpsr = cpsr;
1035}
1036
1037static void __kprobes
1038t16_singlestep_it(struct kprobe *p, struct pt_regs *regs)
1039{
1040 regs->ARM_pc += 2;
1041 t16_simulate_it(p, regs);
1042}
1043
1044static enum kprobe_insn __kprobes
1045t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1046{
1047 asi->insn_singlestep = t16_singlestep_it;
1048 return INSN_GOOD_NO_SLOT;
1049}
1050
1051static void __kprobes
1052t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
1053{
1054 kprobe_opcode_t insn = p->opcode;
1055 unsigned long pc = thumb_probe_pc(p);
1056 long offset = insn & 0x7f;
1057 offset -= insn & 0x80; /* Apply sign bit */
1058 regs->ARM_pc = pc + (offset * 2);
1059}
1060
1061static enum kprobe_insn __kprobes
1062t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1063{
1064 int cc = (insn >> 8) & 0xf;
1065 asi->insn_check_cc = kprobe_condition_checks[cc];
1066 asi->insn_handler = t16_simulate_cond_branch;
1067 return INSN_GOOD_NO_SLOT;
1068}
1069
1070static void __kprobes
1071t16_simulate_branch(struct kprobe *p, struct pt_regs *regs)
1072{
1073 kprobe_opcode_t insn = p->opcode;
1074 unsigned long pc = thumb_probe_pc(p);
1075 long offset = insn & 0x3ff;
1076 offset -= insn & 0x400; /* Apply sign bit */
1077 regs->ARM_pc = pc + (offset * 2);
1078}
1079
1080static unsigned long __kprobes
1081t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs)
1082{
1083 unsigned long oldcpsr = regs->ARM_cpsr;
1084 unsigned long newcpsr;
1085
1086 __asm__ __volatile__ (
1087 "msr cpsr_fs, %[oldcpsr] \n\t"
1088 "ldmia %[regs], {r0-r7} \n\t"
1089 "blx %[fn] \n\t"
1090 "stmia %[regs], {r0-r7} \n\t"
1091 "mrs %[newcpsr], cpsr \n\t"
1092 : [newcpsr] "=r" (newcpsr)
1093 : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
1094 [fn] "r" (p->ainsn.insn_fn)
1095 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1096 "lr", "memory", "cc"
1097 );
1098
1099 return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK);
1100}
1101
1102static void __kprobes
1103t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs)
1104{
1105 regs->ARM_cpsr = t16_emulate_loregs(p, regs);
1106}
1107
1108static void __kprobes
1109t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs)
1110{
1111 unsigned long cpsr = t16_emulate_loregs(p, regs);
1112 if (!in_it_block(cpsr))
1113 regs->ARM_cpsr = cpsr;
1114}
1115
1116static void __kprobes
1117t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs)
1118{
1119 kprobe_opcode_t insn = p->opcode;
1120 unsigned long pc = thumb_probe_pc(p);
1121 int rdn = (insn & 0x7) | ((insn & 0x80) >> 4);
1122 int rm = (insn >> 3) & 0xf;
1123
1124 register unsigned long rdnv asm("r1");
1125 register unsigned long rmv asm("r0");
1126 unsigned long cpsr = regs->ARM_cpsr;
1127
1128 rdnv = (rdn == 15) ? pc : regs->uregs[rdn];
1129 rmv = (rm == 15) ? pc : regs->uregs[rm];
1130
1131 __asm__ __volatile__ (
1132 "msr cpsr_fs, %[cpsr] \n\t"
1133 "blx %[fn] \n\t"
1134 "mrs %[cpsr], cpsr \n\t"
1135 : "=r" (rdnv), [cpsr] "=r" (cpsr)
1136 : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
1137 : "lr", "memory", "cc"
1138 );
1139
1140 if (rdn == 15)
1141 rdnv &= ~1;
1142
1143 regs->uregs[rdn] = rdnv;
1144 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
1145}
1146
1147static enum kprobe_insn __kprobes
1148t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1149{
1150 insn &= ~0x00ff;
1151 insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
1152 ((u16 *)asi->insn)[0] = insn;
1153 asi->insn_handler = t16_emulate_hiregs;
1154 return INSN_GOOD;
1155}
1156
1157static void __kprobes
1158t16_emulate_push(struct kprobe *p, struct pt_regs *regs)
1159{
1160 __asm__ __volatile__ (
1161 "ldr r9, [%[regs], #13*4] \n\t"
1162 "ldr r8, [%[regs], #14*4] \n\t"
1163 "ldmia %[regs], {r0-r7} \n\t"
1164 "blx %[fn] \n\t"
1165 "str r9, [%[regs], #13*4] \n\t"
1166 :
1167 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1168 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
1169 "lr", "memory", "cc"
1170 );
1171}
1172
1173static enum kprobe_insn __kprobes
1174t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1175{
1176 /*
1177 * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}"
1178 * and call it with R9=SP and LR in the register list represented
1179 * by R8.
1180 */
1181 ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
1182 ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
1183 asi->insn_handler = t16_emulate_push;
1184 return INSN_GOOD;
1185}
1186
1187static void __kprobes
1188t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs)
1189{
1190 __asm__ __volatile__ (
1191 "ldr r9, [%[regs], #13*4] \n\t"
1192 "ldmia %[regs], {r0-r7} \n\t"
1193 "blx %[fn] \n\t"
1194 "stmia %[regs], {r0-r7} \n\t"
1195 "str r9, [%[regs], #13*4] \n\t"
1196 :
1197 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1198 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
1199 "lr", "memory", "cc"
1200 );
1201}
1202
1203static void __kprobes
1204t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs)
1205{
1206 register unsigned long pc asm("r8");
1207
1208 __asm__ __volatile__ (
1209 "ldr r9, [%[regs], #13*4] \n\t"
1210 "ldmia %[regs], {r0-r7} \n\t"
1211 "blx %[fn] \n\t"
1212 "stmia %[regs], {r0-r7} \n\t"
1213 "str r9, [%[regs], #13*4] \n\t"
1214 : "=r" (pc)
1215 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1216 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
1217 "lr", "memory", "cc"
1218 );
1219
1220 bx_write_pc(pc, regs);
1221}
1222
1223static enum kprobe_insn __kprobes
1224t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1225{
1226 /*
1227 * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}"
1228 * and call it with R9=SP and PC in the register list represented
1229 * by R8.
1230 */
1231 ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
1232 ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
1233 asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
1234 : t16_emulate_pop_nopc;
1235 return INSN_GOOD;
1236}
1237
1238static const union decode_item t16_table_1011[] = {
1239 /* Miscellaneous 16-bit instructions */
1240
1241 /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */
1242 /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */
1243 DECODE_SIMULATE (0xff00, 0xb000, t16_simulate_add_sp_imm),
1244
1245 /* CBZ 1011 00x1 xxxx xxxx */
1246 /* CBNZ 1011 10x1 xxxx xxxx */
1247 DECODE_SIMULATE (0xf500, 0xb100, t16_simulate_cbz),
1248
1249 /* SXTH 1011 0010 00xx xxxx */
1250 /* SXTB 1011 0010 01xx xxxx */
1251 /* UXTH 1011 0010 10xx xxxx */
1252 /* UXTB 1011 0010 11xx xxxx */
1253 /* REV 1011 1010 00xx xxxx */
1254 /* REV16 1011 1010 01xx xxxx */
1255 /* ??? 1011 1010 10xx xxxx */
1256 /* REVSH 1011 1010 11xx xxxx */
1257 DECODE_REJECT (0xffc0, 0xba80),
1258 DECODE_EMULATE (0xf500, 0xb000, t16_emulate_loregs_rwflags),
1259
1260 /* PUSH 1011 010x xxxx xxxx */
1261 DECODE_CUSTOM (0xfe00, 0xb400, t16_decode_push),
1262 /* POP 1011 110x xxxx xxxx */
1263 DECODE_CUSTOM (0xfe00, 0xbc00, t16_decode_pop),
1264
1265 /*
1266 * If-Then, and hints
1267 * 1011 1111 xxxx xxxx
1268 */
1269
1270 /* YIELD 1011 1111 0001 0000 */
1271 DECODE_OR (0xffff, 0xbf10),
1272 /* SEV 1011 1111 0100 0000 */
1273 DECODE_EMULATE (0xffff, 0xbf40, kprobe_emulate_none),
1274 /* NOP 1011 1111 0000 0000 */
1275 /* WFE 1011 1111 0010 0000 */
1276 /* WFI 1011 1111 0011 0000 */
1277 DECODE_SIMULATE (0xffcf, 0xbf00, kprobe_simulate_nop),
1278 /* Unassigned hints 1011 1111 xxxx 0000 */
1279 DECODE_REJECT (0xff0f, 0xbf00),
1280 /* IT 1011 1111 xxxx xxxx */
1281 DECODE_CUSTOM (0xff00, 0xbf00, t16_decode_it),
1282
1283 /* SETEND 1011 0110 010x xxxx */
1284 /* CPS 1011 0110 011x xxxx */
1285 /* BKPT 1011 1110 xxxx xxxx */
1286 /* And unallocated instructions... */
1287 DECODE_END
1288};
1289
1290const union decode_item kprobe_decode_thumb16_table[] = {
1291
1292 /*
1293 * Shift (immediate), add, subtract, move, and compare
1294 * 00xx xxxx xxxx xxxx
1295 */
1296
1297 /* CMP (immediate) 0010 1xxx xxxx xxxx */
1298 DECODE_EMULATE (0xf800, 0x2800, t16_emulate_loregs_rwflags),
1299
1300 /* ADD (register) 0001 100x xxxx xxxx */
1301 /* SUB (register) 0001 101x xxxx xxxx */
1302 /* LSL (immediate) 0000 0xxx xxxx xxxx */
1303 /* LSR (immediate) 0000 1xxx xxxx xxxx */
1304 /* ASR (immediate) 0001 0xxx xxxx xxxx */
1305 /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */
1306 /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */
1307 /* MOV (immediate) 0010 0xxx xxxx xxxx */
1308 /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */
1309 /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */
1310 DECODE_EMULATE (0xc000, 0x0000, t16_emulate_loregs_noitrwflags),
1311
1312 /*
1313 * 16-bit Thumb data-processing instructions
1314 * 0100 00xx xxxx xxxx
1315 */
1316
1317 /* TST (register) 0100 0010 00xx xxxx */
1318 DECODE_EMULATE (0xffc0, 0x4200, t16_emulate_loregs_rwflags),
1319 /* CMP (register) 0100 0010 10xx xxxx */
1320 /* CMN (register) 0100 0010 11xx xxxx */
1321 DECODE_EMULATE (0xff80, 0x4280, t16_emulate_loregs_rwflags),
1322 /* AND (register) 0100 0000 00xx xxxx */
1323 /* EOR (register) 0100 0000 01xx xxxx */
1324 /* LSL (register) 0100 0000 10xx xxxx */
1325 /* LSR (register) 0100 0000 11xx xxxx */
1326 /* ASR (register) 0100 0001 00xx xxxx */
1327 /* ADC (register) 0100 0001 01xx xxxx */
1328 /* SBC (register) 0100 0001 10xx xxxx */
1329 /* ROR (register) 0100 0001 11xx xxxx */
1330 /* RSB (immediate) 0100 0010 01xx xxxx */
1331 /* ORR (register) 0100 0011 00xx xxxx */
1332 /* MUL 0100 0011 00xx xxxx */
1333 /* BIC (register) 0100 0011 10xx xxxx */
1334 /* MVN (register) 0100 0011 10xx xxxx */
1335 DECODE_EMULATE (0xfc00, 0x4000, t16_emulate_loregs_noitrwflags),
1336
1337 /*
1338 * Special data instructions and branch and exchange
1339 * 0100 01xx xxxx xxxx
1340 */
1341
1342 /* BLX pc 0100 0111 1111 1xxx */
1343 DECODE_REJECT (0xfff8, 0x47f8),
1344
1345 /* BX (register) 0100 0111 0xxx xxxx */
1346 /* BLX (register) 0100 0111 1xxx xxxx */
1347 DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx),
1348
1349 /* ADD pc, pc 0100 0100 1111 1111 */
1350 DECODE_REJECT (0xffff, 0x44ff),
1351
1352 /* ADD (register) 0100 0100 xxxx xxxx */
1353 /* CMP (register) 0100 0101 xxxx xxxx */
1354 /* MOV (register) 0100 0110 xxxx xxxx */
1355 DECODE_CUSTOM (0xfc00, 0x4400, t16_decode_hiregs),
1356
1357 /*
1358 * Load from Literal Pool
1359 * LDR (literal) 0100 1xxx xxxx xxxx
1360 */
1361 DECODE_SIMULATE (0xf800, 0x4800, t16_simulate_ldr_literal),
1362
1363 /*
1364 * 16-bit Thumb Load/store instructions
1365 * 0101 xxxx xxxx xxxx
1366 * 011x xxxx xxxx xxxx
1367 * 100x xxxx xxxx xxxx
1368 */
1369
1370 /* STR (register) 0101 000x xxxx xxxx */
1371 /* STRH (register) 0101 001x xxxx xxxx */
1372 /* STRB (register) 0101 010x xxxx xxxx */
1373 /* LDRSB (register) 0101 011x xxxx xxxx */
1374 /* LDR (register) 0101 100x xxxx xxxx */
1375 /* LDRH (register) 0101 101x xxxx xxxx */
1376 /* LDRB (register) 0101 110x xxxx xxxx */
1377 /* LDRSH (register) 0101 111x xxxx xxxx */
1378 /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */
1379 /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */
1380 /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */
1381 /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */
1382 DECODE_EMULATE (0xc000, 0x4000, t16_emulate_loregs_rwflags),
1383 /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */
1384 /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */
1385 DECODE_EMULATE (0xf000, 0x8000, t16_emulate_loregs_rwflags),
1386 /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */
1387 /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */
1388 DECODE_SIMULATE (0xf000, 0x9000, t16_simulate_ldrstr_sp_relative),
1389
1390 /*
1391 * Generate PC-/SP-relative address
1392 * ADR (literal) 1010 0xxx xxxx xxxx
1393 * ADD (SP plus immediate) 1010 1xxx xxxx xxxx
1394 */
1395 DECODE_SIMULATE (0xf000, 0xa000, t16_simulate_reladr),
1396
1397 /*
1398 * Miscellaneous 16-bit instructions
1399 * 1011 xxxx xxxx xxxx
1400 */
1401 DECODE_TABLE (0xf000, 0xb000, t16_table_1011),
1402
1403 /* STM 1100 0xxx xxxx xxxx */
1404 /* LDM 1100 1xxx xxxx xxxx */
1405 DECODE_EMULATE (0xf000, 0xc000, t16_emulate_loregs_rwflags),
1406
1407 /*
1408 * Conditional branch, and Supervisor Call
1409 */
1410
1411 /* Permanently UNDEFINED 1101 1110 xxxx xxxx */
1412 /* SVC 1101 1111 xxxx xxxx */
1413 DECODE_REJECT (0xfe00, 0xde00),
1414
1415 /* Conditional branch 1101 xxxx xxxx xxxx */
1416 DECODE_CUSTOM (0xf000, 0xd000, t16_decode_cond_branch),
1417
1418 /*
1419 * Unconditional branch
1420 * B 1110 0xxx xxxx xxxx
1421 */
1422 DECODE_SIMULATE (0xf800, 0xe000, t16_simulate_branch),
1423
1424 DECODE_END
1425};
1426
1427static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
1428{
1429 if (unlikely(in_it_block(cpsr)))
1430 return kprobe_condition_checks[current_cond(cpsr)](cpsr);
1431 return true;
1432}
1433
1434static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs)
1435{
1436 regs->ARM_pc += 2;
1437 p->ainsn.insn_handler(p, regs);
1438 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
1439}
1440
1441static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs)
1442{
1443 regs->ARM_pc += 4;
1444 p->ainsn.insn_handler(p, regs);
1445 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
1446}
1447
1448enum kprobe_insn __kprobes
1449thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1450{
1451 asi->insn_singlestep = thumb16_singlestep;
1452 asi->insn_check_cc = thumb_check_cc;
1453 return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true);
1454}
1455
1456enum kprobe_insn __kprobes
1457thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1458{
1459 asi->insn_singlestep = thumb32_singlestep;
1460 asi->insn_check_cc = thumb_check_cc;
1461 return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true);
1462}
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 1656c87501c..129c1163248 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -28,14 +28,16 @@
28#include <asm/traps.h> 28#include <asm/traps.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30 30
31#include "kprobes.h"
32
31#define MIN_STACK_SIZE(addr) \ 33#define MIN_STACK_SIZE(addr) \
32 min((unsigned long)MAX_STACK_SIZE, \ 34 min((unsigned long)MAX_STACK_SIZE, \
33 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) 35 (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
34 36
35#define flush_insns(addr, cnt) \ 37#define flush_insns(addr, size) \
36 flush_icache_range((unsigned long)(addr), \ 38 flush_icache_range((unsigned long)(addr), \
37 (unsigned long)(addr) + \ 39 (unsigned long)(addr) + \
38 sizeof(kprobe_opcode_t) * (cnt)) 40 (size))
39 41
40/* Used as a marker in ARM_pc to note when we're in a jprobe. */ 42/* Used as a marker in ARM_pc to note when we're in a jprobe. */
41#define JPROBE_MAGIC_ADDR 0xffffffff 43#define JPROBE_MAGIC_ADDR 0xffffffff
@@ -49,16 +51,35 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
49 kprobe_opcode_t insn; 51 kprobe_opcode_t insn;
50 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; 52 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
51 unsigned long addr = (unsigned long)p->addr; 53 unsigned long addr = (unsigned long)p->addr;
54 bool thumb;
55 kprobe_decode_insn_t *decode_insn;
52 int is; 56 int is;
53 57
54 if (addr & 0x3 || in_exception_text(addr)) 58 if (in_exception_text(addr))
55 return -EINVAL; 59 return -EINVAL;
56 60
61#ifdef CONFIG_THUMB2_KERNEL
62 thumb = true;
63 addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
64 insn = ((u16 *)addr)[0];
65 if (is_wide_instruction(insn)) {
66 insn <<= 16;
67 insn |= ((u16 *)addr)[1];
68 decode_insn = thumb32_kprobe_decode_insn;
69 } else
70 decode_insn = thumb16_kprobe_decode_insn;
71#else /* !CONFIG_THUMB2_KERNEL */
72 thumb = false;
73 if (addr & 0x3)
74 return -EINVAL;
57 insn = *p->addr; 75 insn = *p->addr;
76 decode_insn = arm_kprobe_decode_insn;
77#endif
78
58 p->opcode = insn; 79 p->opcode = insn;
59 p->ainsn.insn = tmp_insn; 80 p->ainsn.insn = tmp_insn;
60 81
61 switch (arm_kprobe_decode_insn(insn, &p->ainsn)) { 82 switch ((*decode_insn)(insn, &p->ainsn)) {
62 case INSN_REJECTED: /* not supported */ 83 case INSN_REJECTED: /* not supported */
63 return -EINVAL; 84 return -EINVAL;
64 85
@@ -68,7 +89,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
68 return -ENOMEM; 89 return -ENOMEM;
69 for (is = 0; is < MAX_INSN_SIZE; ++is) 90 for (is = 0; is < MAX_INSN_SIZE; ++is)
70 p->ainsn.insn[is] = tmp_insn[is]; 91 p->ainsn.insn[is] = tmp_insn[is];
71 flush_insns(p->ainsn.insn, MAX_INSN_SIZE); 92 flush_insns(p->ainsn.insn,
93 sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
94 p->ainsn.insn_fn = (kprobe_insn_fn_t *)
95 ((uintptr_t)p->ainsn.insn | thumb);
72 break; 96 break;
73 97
74 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ 98 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
@@ -79,24 +103,88 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
79 return 0; 103 return 0;
80} 104}
81 105
106#ifdef CONFIG_THUMB2_KERNEL
107
108/*
109 * For a 32-bit Thumb breakpoint spanning two memory words we need to take
110 * special precautions to insert the breakpoint atomically, especially on SMP
111 * systems. This is achieved by calling this arming function using stop_machine.
112 */
113static int __kprobes set_t32_breakpoint(void *addr)
114{
115 ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
116 ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
117 flush_insns(addr, 2*sizeof(u16));
118 return 0;
119}
120
121void __kprobes arch_arm_kprobe(struct kprobe *p)
122{
123 uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
124
125 if (!is_wide_instruction(p->opcode)) {
126 *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
127 flush_insns(addr, sizeof(u16));
128 } else if (addr & 2) {
129 /* A 32-bit instruction spanning two words needs special care */
130 stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
131 } else {
132 /* Word aligned 32-bit instruction can be written atomically */
133 u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
134#ifndef __ARMEB__ /* Swap halfwords for little-endian */
135 bkp = (bkp >> 16) | (bkp << 16);
136#endif
137 *(u32 *)addr = bkp;
138 flush_insns(addr, sizeof(u32));
139 }
140}
141
142#else /* !CONFIG_THUMB2_KERNEL */
143
82void __kprobes arch_arm_kprobe(struct kprobe *p) 144void __kprobes arch_arm_kprobe(struct kprobe *p)
83{ 145{
84 *p->addr = KPROBE_BREAKPOINT_INSTRUCTION; 146 kprobe_opcode_t insn = p->opcode;
85 flush_insns(p->addr, 1); 147 kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
148 if (insn >= 0xe0000000)
149 brkp |= 0xe0000000; /* Unconditional instruction */
150 else
151 brkp |= insn & 0xf0000000; /* Copy condition from insn */
152 *p->addr = brkp;
153 flush_insns(p->addr, sizeof(p->addr[0]));
86} 154}
87 155
156#endif /* !CONFIG_THUMB2_KERNEL */
157
88/* 158/*
89 * The actual disarming is done here on each CPU and synchronized using 159 * The actual disarming is done here on each CPU and synchronized using
90 * stop_machine. This synchronization is necessary on SMP to avoid removing 160 * stop_machine. This synchronization is necessary on SMP to avoid removing
91 * a probe between the moment the 'Undefined Instruction' exception is raised 161 * a probe between the moment the 'Undefined Instruction' exception is raised
92 * and the moment the exception handler reads the faulting instruction from 162 * and the moment the exception handler reads the faulting instruction from
93 * memory. 163 * memory. It is also needed to atomically set the two half-words of a 32-bit
164 * Thumb breakpoint.
94 */ 165 */
95int __kprobes __arch_disarm_kprobe(void *p) 166int __kprobes __arch_disarm_kprobe(void *p)
96{ 167{
97 struct kprobe *kp = p; 168 struct kprobe *kp = p;
169#ifdef CONFIG_THUMB2_KERNEL
170 u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
171 kprobe_opcode_t insn = kp->opcode;
172 unsigned int len;
173
174 if (is_wide_instruction(insn)) {
175 ((u16 *)addr)[0] = insn>>16;
176 ((u16 *)addr)[1] = insn;
177 len = 2*sizeof(u16);
178 } else {
179 ((u16 *)addr)[0] = insn;
180 len = sizeof(u16);
181 }
182 flush_insns(addr, len);
183
184#else /* !CONFIG_THUMB2_KERNEL */
98 *kp->addr = kp->opcode; 185 *kp->addr = kp->opcode;
99 flush_insns(kp->addr, 1); 186 flush_insns(kp->addr, sizeof(kp->addr[0]));
187#endif
100 return 0; 188 return 0;
101} 189}
102 190
@@ -130,12 +218,24 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
130 __get_cpu_var(current_kprobe) = p; 218 __get_cpu_var(current_kprobe) = p;
131} 219}
132 220
133static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, 221static void __kprobes
134 struct kprobe_ctlblk *kcb) 222singlestep_skip(struct kprobe *p, struct pt_regs *regs)
135{ 223{
224#ifdef CONFIG_THUMB2_KERNEL
225 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
226 if (is_wide_instruction(p->opcode))
227 regs->ARM_pc += 4;
228 else
229 regs->ARM_pc += 2;
230#else
136 regs->ARM_pc += 4; 231 regs->ARM_pc += 4;
137 if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) 232#endif
138 p->ainsn.insn_handler(p, regs); 233}
234
235static inline void __kprobes
236singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
237{
238 p->ainsn.insn_singlestep(p, regs);
139} 239}
140 240
141/* 241/*
@@ -149,11 +249,23 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
149{ 249{
150 struct kprobe *p, *cur; 250 struct kprobe *p, *cur;
151 struct kprobe_ctlblk *kcb; 251 struct kprobe_ctlblk *kcb;
152 kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc;
153 252
154 kcb = get_kprobe_ctlblk(); 253 kcb = get_kprobe_ctlblk();
155 cur = kprobe_running(); 254 cur = kprobe_running();
156 p = get_kprobe(addr); 255
256#ifdef CONFIG_THUMB2_KERNEL
257 /*
258 * First look for a probe which was registered using an address with
259 * bit 0 set, this is the usual situation for pointers to Thumb code.
260 * If not found, fallback to looking for one with bit 0 clear.
261 */
262 p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
263 if (!p)
264 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
265
266#else /* ! CONFIG_THUMB2_KERNEL */
267 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
268#endif
157 269
158 if (p) { 270 if (p) {
159 if (cur) { 271 if (cur) {
@@ -173,7 +285,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
173 /* impossible cases */ 285 /* impossible cases */
174 BUG(); 286 BUG();
175 } 287 }
176 } else { 288 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
289 /* Probe hit and conditional execution check ok. */
177 set_current_kprobe(p); 290 set_current_kprobe(p);
178 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 291 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
179 292
@@ -193,6 +306,13 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
193 } 306 }
194 reset_current_kprobe(); 307 reset_current_kprobe();
195 } 308 }
309 } else {
310 /*
311 * Probe hit but conditional execution check failed,
312 * so just skip the instruction and continue as if
313 * nothing had happened.
314 */
315 singlestep_skip(p, regs);
196 } 316 }
197 } else if (cur) { 317 } else if (cur) {
198 /* We probably hit a jprobe. Call its break handler. */ 318 /* We probably hit a jprobe. Call its break handler. */
@@ -300,7 +420,11 @@ void __naked __kprobes kretprobe_trampoline(void)
300 "bl trampoline_handler \n\t" 420 "bl trampoline_handler \n\t"
301 "mov lr, r0 \n\t" 421 "mov lr, r0 \n\t"
302 "ldmia sp!, {r0 - r11} \n\t" 422 "ldmia sp!, {r0 - r11} \n\t"
423#ifdef CONFIG_THUMB2_KERNEL
424 "bx lr \n\t"
425#else
303 "mov pc, lr \n\t" 426 "mov pc, lr \n\t"
427#endif
304 : : : "memory"); 428 : : : "memory");
305} 429}
306 430
@@ -378,11 +502,22 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
378 struct jprobe *jp = container_of(p, struct jprobe, kp); 502 struct jprobe *jp = container_of(p, struct jprobe, kp);
379 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 503 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
380 long sp_addr = regs->ARM_sp; 504 long sp_addr = regs->ARM_sp;
505 long cpsr;
381 506
382 kcb->jprobe_saved_regs = *regs; 507 kcb->jprobe_saved_regs = *regs;
383 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); 508 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
384 regs->ARM_pc = (long)jp->entry; 509 regs->ARM_pc = (long)jp->entry;
385 regs->ARM_cpsr |= PSR_I_BIT; 510
511 cpsr = regs->ARM_cpsr | PSR_I_BIT;
512#ifdef CONFIG_THUMB2_KERNEL
513 /* Set correct Thumb state in cpsr */
514 if (regs->ARM_pc & 1)
515 cpsr |= PSR_T_BIT;
516 else
517 cpsr &= ~PSR_T_BIT;
518#endif
519 regs->ARM_cpsr = cpsr;
520
386 preempt_disable(); 521 preempt_disable();
387 return 1; 522 return 1;
388} 523}
@@ -404,7 +539,12 @@ void __kprobes jprobe_return(void)
404 * This is to prevent any simulated instruction from writing 539 * This is to prevent any simulated instruction from writing
405 * over the regs when they are accessing the stack. 540 * over the regs when they are accessing the stack.
406 */ 541 */
542#ifdef CONFIG_THUMB2_KERNEL
543 "sub r0, %0, %1 \n\t"
544 "mov sp, r0 \n\t"
545#else
407 "sub sp, %0, %1 \n\t" 546 "sub sp, %0, %1 \n\t"
547#endif
408 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" 548 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
409 "str %0, [sp, %2] \n\t" 549 "str %0, [sp, %2] \n\t"
410 "str r0, [sp, %3] \n\t" 550 "str r0, [sp, %3] \n\t"
@@ -415,15 +555,28 @@ void __kprobes jprobe_return(void)
415 * Return to the context saved by setjmp_pre_handler 555 * Return to the context saved by setjmp_pre_handler
416 * and restored by longjmp_break_handler. 556 * and restored by longjmp_break_handler.
417 */ 557 */
558#ifdef CONFIG_THUMB2_KERNEL
559 "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
560 "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
561 "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
562 "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
563 /* rfe context */
564 "ldmia sp, {r0 - r12} \n\t"
565 "mov sp, lr \n\t"
566 "ldr lr, [sp], #4 \n\t"
567 "rfeia sp! \n\t"
568#else
418 "ldr r0, [sp, %4] \n\t" 569 "ldr r0, [sp, %4] \n\t"
419 "msr cpsr_cxsf, r0 \n\t" 570 "msr cpsr_cxsf, r0 \n\t"
420 "ldmia sp, {r0 - pc} \n\t" 571 "ldmia sp, {r0 - pc} \n\t"
572#endif
421 : 573 :
422 : "r" (kcb->jprobe_saved_regs.ARM_sp), 574 : "r" (kcb->jprobe_saved_regs.ARM_sp),
423 "I" (sizeof(struct pt_regs) * 2), 575 "I" (sizeof(struct pt_regs) * 2),
424 "J" (offsetof(struct pt_regs, ARM_sp)), 576 "J" (offsetof(struct pt_regs, ARM_sp)),
425 "J" (offsetof(struct pt_regs, ARM_pc)), 577 "J" (offsetof(struct pt_regs, ARM_pc)),
426 "J" (offsetof(struct pt_regs, ARM_cpsr)) 578 "J" (offsetof(struct pt_regs, ARM_cpsr)),
579 "J" (offsetof(struct pt_regs, ARM_lr))
427 : "memory", "cc"); 580 : "memory", "cc");
428} 581}
429 582
@@ -460,17 +613,44 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
460 return 0; 613 return 0;
461} 614}
462 615
463static struct undef_hook kprobes_break_hook = { 616#ifdef CONFIG_THUMB2_KERNEL
617
618static struct undef_hook kprobes_thumb16_break_hook = {
619 .instr_mask = 0xffff,
620 .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
621 .cpsr_mask = MODE_MASK,
622 .cpsr_val = SVC_MODE,
623 .fn = kprobe_trap_handler,
624};
625
626static struct undef_hook kprobes_thumb32_break_hook = {
464 .instr_mask = 0xffffffff, 627 .instr_mask = 0xffffffff,
465 .instr_val = KPROBE_BREAKPOINT_INSTRUCTION, 628 .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
466 .cpsr_mask = MODE_MASK, 629 .cpsr_mask = MODE_MASK,
467 .cpsr_val = SVC_MODE, 630 .cpsr_val = SVC_MODE,
468 .fn = kprobe_trap_handler, 631 .fn = kprobe_trap_handler,
469}; 632};
470 633
634#else /* !CONFIG_THUMB2_KERNEL */
635
636static struct undef_hook kprobes_arm_break_hook = {
637 .instr_mask = 0x0fffffff,
638 .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
639 .cpsr_mask = MODE_MASK,
640 .cpsr_val = SVC_MODE,
641 .fn = kprobe_trap_handler,
642};
643
644#endif /* !CONFIG_THUMB2_KERNEL */
645
471int __init arch_init_kprobes() 646int __init arch_init_kprobes()
472{ 647{
473 arm_kprobe_decode_init(); 648 arm_kprobe_decode_init();
474 register_undef_hook(&kprobes_break_hook); 649#ifdef CONFIG_THUMB2_KERNEL
650 register_undef_hook(&kprobes_thumb16_break_hook);
651 register_undef_hook(&kprobes_thumb32_break_hook);
652#else
653 register_undef_hook(&kprobes_arm_break_hook);
654#endif
475 return 0; 655 return 0;
476} 656}
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h
new file mode 100644
index 00000000000..a6aeda0a6c7
--- /dev/null
+++ b/arch/arm/kernel/kprobes.h
@@ -0,0 +1,420 @@
1/*
2 * arch/arm/kernel/kprobes.h
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * Some contents moved here from arch/arm/include/asm/kprobes.h which is
7 * Copyright (C) 2006, 2007 Motorola Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#ifndef _ARM_KERNEL_KPROBES_H
20#define _ARM_KERNEL_KPROBES_H
21
22/*
23 * These undefined instructions must be unique and
24 * reserved solely for kprobes' use.
25 */
26#define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8
27#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
28#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
29
30
31enum kprobe_insn {
32 INSN_REJECTED,
33 INSN_GOOD,
34 INSN_GOOD_NO_SLOT
35};
36
37typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t,
38 struct arch_specific_insn *);
39
40#ifdef CONFIG_THUMB2_KERNEL
41
42enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t,
43 struct arch_specific_insn *);
44enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t,
45 struct arch_specific_insn *);
46
47#else /* !CONFIG_THUMB2_KERNEL */
48
49enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
50 struct arch_specific_insn *);
51#endif
52
53void __init arm_kprobe_decode_init(void);
54
55extern kprobe_check_cc * const kprobe_condition_checks[16];
56
57
58#if __LINUX_ARM_ARCH__ >= 7
59
60/* str_pc_offset is architecturally defined from ARMv7 onwards */
61#define str_pc_offset 8
62#define find_str_pc_offset()
63
64#else /* __LINUX_ARM_ARCH__ < 7 */
65
66/* We need a run-time check to determine str_pc_offset */
67extern int str_pc_offset;
68void __init find_str_pc_offset(void);
69
70#endif
71
72
73/*
74 * Update ITSTATE after normal execution of an IT block instruction.
75 *
76 * The 8 IT state bits are split into two parts in CPSR:
77 * ITSTATE<1:0> are in CPSR<26:25>
78 * ITSTATE<7:2> are in CPSR<15:10>
79 */
80static inline unsigned long it_advance(unsigned long cpsr)
81 {
82 if ((cpsr & 0x06000400) == 0) {
83 /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
84 cpsr &= ~PSR_IT_MASK;
85 } else {
86 /* We need to shift left ITSTATE<4:0> */
87 const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
88 unsigned long it = cpsr & mask;
89 it <<= 1;
90 it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
91 it &= mask;
92 cpsr &= ~mask;
93 cpsr |= it;
94 }
95 return cpsr;
96}
97
98static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
99{
100 long cpsr = regs->ARM_cpsr;
101 if (pcv & 0x1) {
102 cpsr |= PSR_T_BIT;
103 pcv &= ~0x1;
104 } else {
105 cpsr &= ~PSR_T_BIT;
106 pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */
107 }
108 regs->ARM_cpsr = cpsr;
109 regs->ARM_pc = pcv;
110}
111
112
113#if __LINUX_ARM_ARCH__ >= 6
114
115/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */
116#define load_write_pc_interworks true
117#define test_load_write_pc_interworking()
118
119#else /* __LINUX_ARM_ARCH__ < 6 */
120
121/* We need run-time testing to determine if load_write_pc() should interwork. */
122extern bool load_write_pc_interworks;
123void __init test_load_write_pc_interworking(void);
124
125#endif
126
127static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs)
128{
129 if (load_write_pc_interworks)
130 bx_write_pc(pcv, regs);
131 else
132 regs->ARM_pc = pcv;
133}
134
135
136#if __LINUX_ARM_ARCH__ >= 7
137
138#define alu_write_pc_interworks true
139#define test_alu_write_pc_interworking()
140
141#elif __LINUX_ARM_ARCH__ <= 5
142
143/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */
144#define alu_write_pc_interworks false
145#define test_alu_write_pc_interworking()
146
147#else /* __LINUX_ARM_ARCH__ == 6 */
148
149/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */
150extern bool alu_write_pc_interworks;
151void __init test_alu_write_pc_interworking(void);
152
153#endif /* __LINUX_ARM_ARCH__ == 6 */
154
155static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs)
156{
157 if (alu_write_pc_interworks)
158 bx_write_pc(pcv, regs);
159 else
160 regs->ARM_pc = pcv;
161}
162
163
164void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs);
165void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs);
166
167enum kprobe_insn __kprobes
168kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi);
169
170/*
171 * Test if load/store instructions writeback the address register.
172 * if P (bit 24) == 0 or W (bit 21) == 1
173 */
174#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
175
176/*
177 * The following definitions and macros are used to build instruction
178 * decoding tables for use by kprobe_decode_insn.
179 *
180 * These tables are a concatenation of entries each of which consist of one of
181 * the decode_* structs. All of the fields in every type of decode structure
182 * are of the union type decode_item, therefore the entire decode table can be
183 * viewed as an array of these and declared like:
184 *
185 * static const union decode_item table_name[] = {};
186 *
187 * In order to construct each entry in the table, macros are used to
188 * initialise a number of sequential decode_item values in a layout which
189 * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct
190 * decode_simulate by initialising four decode_item objects like this...
191 *
192 * {.bits = _type},
193 * {.bits = _mask},
194 * {.bits = _value},
195 * {.handler = _handler},
196 *
197 * Initialising a specified member of the union means that the compiler
198 * will produce a warning if the argument is of an incorrect type.
199 *
200 * Below is a list of each of the macros used to initialise entries and a
201 * description of the action performed when that entry is matched to an
202 * instruction. A match is found when (instruction & mask) == value.
203 *
204 * DECODE_TABLE(mask, value, table)
205 * Instruction decoding jumps to parsing the new sub-table 'table'.
206 *
207 * DECODE_CUSTOM(mask, value, decoder)
208 * The custom function 'decoder' is called to the complete decoding
209 * of an instruction.
210 *
211 * DECODE_SIMULATE(mask, value, handler)
212 * Set the probes instruction handler to 'handler', this will be used
213 * to simulate the instruction when the probe is hit. Decoding returns
214 * with INSN_GOOD_NO_SLOT.
215 *
216 * DECODE_EMULATE(mask, value, handler)
217 * Set the probes instruction handler to 'handler', this will be used
218 * to emulate the instruction when the probe is hit. The modified
219 * instruction (see below) is placed in the probes instruction slot so it
220 * may be called by the emulation code. Decoding returns with INSN_GOOD.
221 *
222 * DECODE_REJECT(mask, value)
223 * Instruction decoding fails with INSN_REJECTED
224 *
225 * DECODE_OR(mask, value)
226 * This allows the mask/value test of multiple table entries to be
227 * logically ORed. Once an 'or' entry is matched the decoding action to
228 * be performed is that of the next entry which isn't an 'or'. E.g.
229 *
230 * DECODE_OR (mask1, value1)
231 * DECODE_OR (mask2, value2)
232 * DECODE_SIMULATE (mask3, value3, simulation_handler)
233 *
234 * This means that if any of the three mask/value pairs match the
235 * instruction being decoded, then 'simulation_handler' will be used
236 * for it.
237 *
238 * Both the SIMULATE and EMULATE macros have a second form which take an
239 * additional 'regs' argument.
240 *
241 * DECODE_SIMULATEX(mask, value, handler, regs)
242 * DECODE_EMULATEX (mask, value, handler, regs)
243 *
244 * These are used to specify what kind of CPU register is encoded in each of the
245 * least significant 5 nibbles of the instruction being decoded. The regs value
246 * is specified using the REGS macro, this takes any of the REG_TYPE_* values
247 * from enum decode_reg_type as arguments; only the '*' part of the name is
248 * given. E.g.
249 *
250 * REGS(0, ANY, NOPC, 0, ANY)
251 *
252 * This indicates an instruction is encoded like:
253 *
254 * bits 19..16 ignore
255 * bits 15..12 any register allowed here
256 * bits 11.. 8 any register except PC allowed here
257 * bits 7.. 4 ignore
258 * bits 3.. 0 any register allowed here
259 *
260 * This register specification is checked after a decode table entry is found to
261 * match an instruction (through the mask/value test). Any invalid register then
262 * found in the instruction will cause decoding to fail with INSN_REJECTED. In
263 * the above example this would happen if bits 11..8 of the instruction were
264 * 1111, indicating R15 or PC.
265 *
266 * As well as checking for legal combinations of registers, this data is also
267 * used to modify the registers encoded in the instructions so that an
268 * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.)
269 *
270 * Here is a real example which matches ARM instructions of the form
271 * "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>"
272 *
273 * DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
274 * REGS(ANY, ANY, NOPC, 0, ANY)),
275 * ^ ^ ^ ^
276 * Rn Rd Rs Rm
277 *
278 * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because
279 * Rs == R15
280 *
281 * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the
282 * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into
283 * the kprobes instruction slot. This can then be called later by the handler
284 * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction.
285 */
286
287enum decode_type {
288 DECODE_TYPE_END,
289 DECODE_TYPE_TABLE,
290 DECODE_TYPE_CUSTOM,
291 DECODE_TYPE_SIMULATE,
292 DECODE_TYPE_EMULATE,
293 DECODE_TYPE_OR,
294 DECODE_TYPE_REJECT,
295 NUM_DECODE_TYPES /* Must be last enum */
296};
297
298#define DECODE_TYPE_BITS 4
299#define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1)
300
301enum decode_reg_type {
302 REG_TYPE_NONE = 0, /* Not a register, ignore */
303 REG_TYPE_ANY, /* Any register allowed */
304 REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */
305 REG_TYPE_SP, /* Register must be SP */
306 REG_TYPE_PC, /* Register must be PC */
307 REG_TYPE_NOSP, /* Register must not be SP */
308 REG_TYPE_NOSPPC, /* Register must not be SP or PC */
309 REG_TYPE_NOPC, /* Register must not be PC */
310 REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */
311
312 /* The following types are used when the encoding for PC indicates
313 * another instruction form. This distiction only matters for test
314 * case coverage checks.
315 */
316 REG_TYPE_NOPCX, /* Register must not be PC */
317 REG_TYPE_NOSPPCX, /* Register must not be SP or PC */
318
319 /* Alias to allow '0' arg to be used in REGS macro. */
320 REG_TYPE_0 = REG_TYPE_NONE
321};
322
323#define REGS(r16, r12, r8, r4, r0) \
324 ((REG_TYPE_##r16) << 16) + \
325 ((REG_TYPE_##r12) << 12) + \
326 ((REG_TYPE_##r8) << 8) + \
327 ((REG_TYPE_##r4) << 4) + \
328 (REG_TYPE_##r0)
329
330union decode_item {
331 u32 bits;
332 const union decode_item *table;
333 kprobe_insn_handler_t *handler;
334 kprobe_decode_insn_t *decoder;
335};
336
337
338#define DECODE_END \
339 {.bits = DECODE_TYPE_END}
340
341
342struct decode_header {
343 union decode_item type_regs;
344 union decode_item mask;
345 union decode_item value;
346};
347
348#define DECODE_HEADER(_type, _mask, _value, _regs) \
349 {.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \
350 {.bits = (_mask)}, \
351 {.bits = (_value)}
352
353
354struct decode_table {
355 struct decode_header header;
356 union decode_item table;
357};
358
359#define DECODE_TABLE(_mask, _value, _table) \
360 DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \
361 {.table = (_table)}
362
363
364struct decode_custom {
365 struct decode_header header;
366 union decode_item decoder;
367};
368
369#define DECODE_CUSTOM(_mask, _value, _decoder) \
370 DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \
371 {.decoder = (_decoder)}
372
373
374struct decode_simulate {
375 struct decode_header header;
376 union decode_item handler;
377};
378
379#define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \
380 DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \
381 {.handler = (_handler)}
382
383#define DECODE_SIMULATE(_mask, _value, _handler) \
384 DECODE_SIMULATEX(_mask, _value, _handler, 0)
385
386
387struct decode_emulate {
388 struct decode_header header;
389 union decode_item handler;
390};
391
392#define DECODE_EMULATEX(_mask, _value, _handler, _regs) \
393 DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \
394 {.handler = (_handler)}
395
396#define DECODE_EMULATE(_mask, _value, _handler) \
397 DECODE_EMULATEX(_mask, _value, _handler, 0)
398
399
400struct decode_or {
401 struct decode_header header;
402};
403
404#define DECODE_OR(_mask, _value) \
405 DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0)
406
407
408struct decode_reject {
409 struct decode_header header;
410};
411
412#define DECODE_REJECT(_mask, _value) \
413 DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
414
415
416int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
417 const union decode_item *table, bool thumb16);
418
419
420#endif /* _ARM_KERNEL_KPROBES_H */
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0f107dcb034..136e8376a3e 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,6 +9,8 @@
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/notifier.h>
13#include <linux/cpu.h>
12#include <linux/sysdev.h> 14#include <linux/sysdev.h>
13#include <linux/syscore_ops.h> 15#include <linux/syscore_ops.h>
14 16
@@ -101,6 +103,25 @@ static struct syscore_ops leds_syscore_ops = {
101 .resume = leds_resume, 103 .resume = leds_resume,
102}; 104};
103 105
106static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
107 void *data)
108{
109 switch (val) {
110 case IDLE_START:
111 leds_event(led_idle_start);
112 break;
113 case IDLE_END:
114 leds_event(led_idle_end);
115 break;
116 }
117
118 return 0;
119}
120
121static struct notifier_block leds_idle_nb = {
122 .notifier_call = leds_idle_notifier,
123};
124
104static int __init leds_init(void) 125static int __init leds_init(void)
105{ 126{
106 int ret; 127 int ret;
@@ -109,8 +130,12 @@ static int __init leds_init(void)
109 ret = sysdev_register(&leds_device); 130 ret = sysdev_register(&leds_device);
110 if (ret == 0) 131 if (ret == 0)
111 ret = sysdev_create_file(&leds_device, &attr_event); 132 ret = sysdev_create_file(&leds_device, &attr_event);
112 if (ret == 0) 133
134 if (ret == 0) {
113 register_syscore_ops(&leds_syscore_ops); 135 register_syscore_ops(&leds_syscore_ops);
136 idle_notifier_register(&leds_idle_nb);
137 }
138
114 return ret; 139 return ret;
115} 140}
116 141
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 016d6a0830a..09326b62780 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -43,25 +43,7 @@ void *module_alloc(unsigned long size)
43 GFP_KERNEL, PAGE_KERNEL_EXEC, -1, 43 GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
44 __builtin_return_address(0)); 44 __builtin_return_address(0));
45} 45}
46#else /* CONFIG_MMU */ 46#endif
47void *module_alloc(unsigned long size)
48{
49 return size == 0 ? NULL : vmalloc(size);
50}
51#endif /* !CONFIG_MMU */
52
53void module_free(struct module *module, void *region)
54{
55 vfree(region);
56}
57
58int module_frob_arch_sections(Elf_Ehdr *hdr,
59 Elf_Shdr *sechdrs,
60 char *secstrings,
61 struct module *mod)
62{
63 return 0;
64}
65 47
66int 48int
67apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, 49apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
@@ -107,6 +89,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
107 break; 89 break;
108 90
109 case R_ARM_ABS32: 91 case R_ARM_ABS32:
92 case R_ARM_TARGET1:
110 *(u32 *)loc += sym->st_value; 93 *(u32 *)loc += sym->st_value;
111 break; 94 break;
112 95
@@ -265,15 +248,6 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
265 return 0; 248 return 0;
266} 249}
267 250
268int
269apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
270 unsigned int symindex, unsigned int relsec, struct module *module)
271{
272 printk(KERN_ERR "module %s: ADD RELOCATION unsupported\n",
273 module->name);
274 return -ENOEXEC;
275}
276
277struct mod_unwind_map { 251struct mod_unwind_map {
278 const Elf_Shdr *unw_sec; 252 const Elf_Shdr *unw_sec;
279 const Elf_Shdr *txt_sec; 253 const Elf_Shdr *txt_sec;
@@ -350,7 +324,11 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
350#endif 324#endif
351 s = find_mod_section(hdr, sechdrs, ".alt.smp.init"); 325 s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
352 if (s && !is_smp()) 326 if (s && !is_smp())
327#ifdef CONFIG_SMP_ON_UP
353 fixup_smp((void *)s->sh_addr, s->sh_size); 328 fixup_smp((void *)s->sh_addr, s->sh_size);
329#else
330 return -EINVAL;
331#endif
354 return 0; 332 return 0;
355} 333}
356 334
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 2b5b1421596..53c9c2610cb 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void)
435 if (irq >= 0) 435 if (irq >= 0)
436 free_irq(irq, NULL); 436 free_irq(irq, NULL);
437 } 437 }
438 release_pmu(pmu_device); 438 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL; 439 pmu_device = NULL;
440 } 440 }
441 441
@@ -454,7 +454,7 @@ armpmu_release_hardware(void)
454 } 454 }
455 armpmu->stop(); 455 armpmu->stop();
456 456
457 release_pmu(pmu_device); 457 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL; 458 pmu_device = NULL;
459} 459}
460 460
@@ -662,6 +662,12 @@ init_hw_perf_events(void)
662 case 0xC090: /* Cortex-A9 */ 662 case 0xC090: /* Cortex-A9 */
663 armpmu = armv7_a9_pmu_init(); 663 armpmu = armv7_a9_pmu_init();
664 break; 664 break;
665 case 0xC050: /* Cortex-A5 */
666 armpmu = armv7_a5_pmu_init();
667 break;
668 case 0xC0F0: /* Cortex-A15 */
669 armpmu = armv7_a15_pmu_init();
670 break;
665 } 671 }
666 /* Intel CPUs [xscale]. */ 672 /* Intel CPUs [xscale]. */
667 } else if (0x69 == implementor) { 673 } else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index f1e8dd94afe..dd7f3b9f4cb 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -173,6 +173,20 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
173 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 173 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
174 }, 174 },
175 }, 175 },
176 [C(NODE)] = {
177 [C(OP_READ)] = {
178 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
179 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
180 },
181 [C(OP_WRITE)] = {
182 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
183 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
184 },
185 [C(OP_PREFETCH)] = {
186 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
187 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
188 },
189 },
176}; 190};
177 191
178enum armv6mpcore_perf_types { 192enum armv6mpcore_perf_types {
@@ -310,6 +324,20 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
310 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 324 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
311 }, 325 },
312 }, 326 },
327 [C(NODE)] = {
328 [C(OP_READ)] = {
329 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
330 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
331 },
332 [C(OP_WRITE)] = {
333 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
334 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
335 },
336 [C(OP_PREFETCH)] = {
337 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
338 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
339 },
340 },
313}; 341};
314 342
315static inline unsigned long 343static inline unsigned long
@@ -479,7 +507,7 @@ armv6pmu_handle_irq(int irq_num,
479 if (!armpmu_event_set_period(event, hwc, idx)) 507 if (!armpmu_event_set_period(event, hwc, idx))
480 continue; 508 continue;
481 509
482 if (perf_event_overflow(event, 0, &data, regs)) 510 if (perf_event_overflow(event, &data, regs))
483 armpmu->disable(hwc, idx); 511 armpmu->disable(hwc, idx);
484 } 512 }
485 513
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4960686afb5..6be3e2e4d83 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,17 +17,23 @@
17 */ 17 */
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20/* Common ARMv7 event types */ 20/*
21 * Common ARMv7 event types
22 *
23 * Note: An implementation may not be able to count all of these events
24 * but the encodings are considered to be `reserved' in the case that
25 * they are not available.
26 */
21enum armv7_perf_types { 27enum armv7_perf_types {
22 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, 28 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
23 ARMV7_PERFCTR_IFETCH_MISS = 0x01, 29 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
24 ARMV7_PERFCTR_ITLB_MISS = 0x02, 30 ARMV7_PERFCTR_ITLB_MISS = 0x02,
25 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, 31 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
26 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, 32 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
27 ARMV7_PERFCTR_DTLB_REFILL = 0x05, 33 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
28 ARMV7_PERFCTR_DREAD = 0x06, 34 ARMV7_PERFCTR_DREAD = 0x06,
29 ARMV7_PERFCTR_DWRITE = 0x07, 35 ARMV7_PERFCTR_DWRITE = 0x07,
30 36 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
31 ARMV7_PERFCTR_EXC_TAKEN = 0x09, 37 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
32 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, 38 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
33 ARMV7_PERFCTR_CID_WRITE = 0x0B, 39 ARMV7_PERFCTR_CID_WRITE = 0x0B,
@@ -39,21 +45,30 @@ enum armv7_perf_types {
39 */ 45 */
40 ARMV7_PERFCTR_PC_WRITE = 0x0C, 46 ARMV7_PERFCTR_PC_WRITE = 0x0C,
41 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, 47 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
48 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
42 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, 49 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
50
51 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
43 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, 52 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
44 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, 53 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
45 54 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
46 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, 55 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
61 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
63 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
47 66
48 ARMV7_PERFCTR_CPU_CYCLES = 0xFF 67 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
49}; 68};
50 69
51/* ARMv7 Cortex-A8 specific event types */ 70/* ARMv7 Cortex-A8 specific event types */
52enum armv7_a8_perf_types { 71enum armv7_a8_perf_types {
53 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
54
55 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
56
57 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, 72 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
58 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, 73 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
59 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, 74 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
@@ -138,6 +153,39 @@ enum armv7_a9_perf_types {
138 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 153 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
139}; 154};
140 155
156/* ARMv7 Cortex-A5 specific event types */
157enum armv7_a5_perf_types {
158 ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
159 ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
160
161 ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
162 ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
163 ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
164 ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
165 ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
166 ARMV7_PERFCTR_READ_ALLOC = 0xc5,
167
168 ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
169};
170
171/* ARMv7 Cortex-A15 specific event types */
172enum armv7_a15_perf_types {
173 ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
174 ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
175 ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
176 ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
177
178 ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
179 ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
180
181 ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
182 ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
183 ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
184 ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
185
186 ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
187};
188
141/* 189/*
142 * Cortex-A8 HW events mapping 190 * Cortex-A8 HW events mapping
143 * 191 *
@@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
207 }, 255 },
208 }, 256 },
209 [C(DTLB)] = { 257 [C(DTLB)] = {
210 /*
211 * Only ITLB misses and DTLB refills are supported.
212 * If users want the DTLB refills misses a raw counter
213 * must be used.
214 */
215 [C(OP_READ)] = { 258 [C(OP_READ)] = {
216 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 259 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
217 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 260 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -255,6 +298,20 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
255 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 298 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
256 }, 299 },
257 }, 300 },
301 [C(NODE)] = {
302 [C(OP_READ)] = {
303 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
304 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
305 },
306 [C(OP_WRITE)] = {
307 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
308 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
309 },
310 [C(OP_PREFETCH)] = {
311 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
312 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
313 },
314 },
258}; 315};
259 316
260/* 317/*
@@ -264,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
264 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, 321 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
265 [PERF_COUNT_HW_INSTRUCTIONS] = 322 [PERF_COUNT_HW_INSTRUCTIONS] =
266 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, 323 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
267 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, 324 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
268 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, 325 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
269 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, 326 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
270 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, 327 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
271 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, 328 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
@@ -323,11 +380,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
323 }, 380 },
324 }, 381 },
325 [C(DTLB)] = { 382 [C(DTLB)] = {
326 /*
327 * Only ITLB misses and DTLB refills are supported.
328 * If users want the DTLB refills misses a raw counter
329 * must be used.
330 */
331 [C(OP_READ)] = { 383 [C(OP_READ)] = {
332 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 384 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
333 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 385 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -371,6 +423,256 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
371 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 423 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
372 }, 424 },
373 }, 425 },
426 [C(NODE)] = {
427 [C(OP_READ)] = {
428 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
429 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
430 },
431 [C(OP_WRITE)] = {
432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
434 },
435 [C(OP_PREFETCH)] = {
436 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
437 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
438 },
439 },
440};
441
442/*
443 * Cortex-A5 HW events mapping
444 */
445static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
446 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
447 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
448 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
449 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
450 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
451 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
452 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
453};
454
455static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
456 [PERF_COUNT_HW_CACHE_OP_MAX]
457 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
458 [C(L1D)] = {
459 [C(OP_READ)] = {
460 [C(RESULT_ACCESS)]
461 = ARMV7_PERFCTR_DCACHE_ACCESS,
462 [C(RESULT_MISS)]
463 = ARMV7_PERFCTR_DCACHE_REFILL,
464 },
465 [C(OP_WRITE)] = {
466 [C(RESULT_ACCESS)]
467 = ARMV7_PERFCTR_DCACHE_ACCESS,
468 [C(RESULT_MISS)]
469 = ARMV7_PERFCTR_DCACHE_REFILL,
470 },
471 [C(OP_PREFETCH)] = {
472 [C(RESULT_ACCESS)]
473 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
474 [C(RESULT_MISS)]
475 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
476 },
477 },
478 [C(L1I)] = {
479 [C(OP_READ)] = {
480 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
481 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
482 },
483 [C(OP_WRITE)] = {
484 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
485 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
486 },
487 /*
488 * The prefetch counters don't differentiate between the I
489 * side and the D side.
490 */
491 [C(OP_PREFETCH)] = {
492 [C(RESULT_ACCESS)]
493 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
494 [C(RESULT_MISS)]
495 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
496 },
497 },
498 [C(LL)] = {
499 [C(OP_READ)] = {
500 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
501 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
502 },
503 [C(OP_WRITE)] = {
504 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
505 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
506 },
507 [C(OP_PREFETCH)] = {
508 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
509 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
510 },
511 },
512 [C(DTLB)] = {
513 [C(OP_READ)] = {
514 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
515 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
516 },
517 [C(OP_WRITE)] = {
518 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
519 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
520 },
521 [C(OP_PREFETCH)] = {
522 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
523 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
524 },
525 },
526 [C(ITLB)] = {
527 [C(OP_READ)] = {
528 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
529 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
530 },
531 [C(OP_WRITE)] = {
532 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
533 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
534 },
535 [C(OP_PREFETCH)] = {
536 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
537 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
538 },
539 },
540 [C(BPU)] = {
541 [C(OP_READ)] = {
542 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
543 [C(RESULT_MISS)]
544 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
545 },
546 [C(OP_WRITE)] = {
547 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
548 [C(RESULT_MISS)]
549 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
550 },
551 [C(OP_PREFETCH)] = {
552 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
553 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
554 },
555 },
556};
557
558/*
559 * Cortex-A15 HW events mapping
560 */
561static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
562 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
563 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
564 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
565 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
566 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
567 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
568 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
569};
570
571static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
572 [PERF_COUNT_HW_CACHE_OP_MAX]
573 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
574 [C(L1D)] = {
575 [C(OP_READ)] = {
576 [C(RESULT_ACCESS)]
577 = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
578 [C(RESULT_MISS)]
579 = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
580 },
581 [C(OP_WRITE)] = {
582 [C(RESULT_ACCESS)]
583 = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
584 [C(RESULT_MISS)]
585 = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
586 },
587 [C(OP_PREFETCH)] = {
588 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
589 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
590 },
591 },
592 [C(L1I)] = {
593 /*
594 * Not all performance counters differentiate between read
595 * and write accesses/misses so we're not always strictly
596 * correct, but it's the best we can do. Writes and reads get
597 * combined in these cases.
598 */
599 [C(OP_READ)] = {
600 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
601 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
602 },
603 [C(OP_WRITE)] = {
604 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
605 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
606 },
607 [C(OP_PREFETCH)] = {
608 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
609 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
610 },
611 },
612 [C(LL)] = {
613 [C(OP_READ)] = {
614 [C(RESULT_ACCESS)]
615 = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
616 [C(RESULT_MISS)]
617 = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
618 },
619 [C(OP_WRITE)] = {
620 [C(RESULT_ACCESS)]
621 = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
622 [C(RESULT_MISS)]
623 = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
624 },
625 [C(OP_PREFETCH)] = {
626 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
627 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
628 },
629 },
630 [C(DTLB)] = {
631 [C(OP_READ)] = {
632 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
633 [C(RESULT_MISS)]
634 = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
635 },
636 [C(OP_WRITE)] = {
637 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
638 [C(RESULT_MISS)]
639 = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
640 },
641 [C(OP_PREFETCH)] = {
642 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
643 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
644 },
645 },
646 [C(ITLB)] = {
647 [C(OP_READ)] = {
648 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
649 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
650 },
651 [C(OP_WRITE)] = {
652 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
653 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
654 },
655 [C(OP_PREFETCH)] = {
656 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
657 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
658 },
659 },
660 [C(BPU)] = {
661 [C(OP_READ)] = {
662 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
663 [C(RESULT_MISS)]
664 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
665 },
666 [C(OP_WRITE)] = {
667 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
668 [C(RESULT_MISS)]
669 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
670 },
671 [C(OP_PREFETCH)] = {
672 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
673 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
674 },
675 },
374}; 676};
375 677
376/* 678/*
@@ -787,7 +1089,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
787 if (!armpmu_event_set_period(event, hwc, idx)) 1089 if (!armpmu_event_set_period(event, hwc, idx))
788 continue; 1090 continue;
789 1091
790 if (perf_event_overflow(event, 0, &data, regs)) 1092 if (perf_event_overflow(event, &data, regs))
791 armpmu->disable(hwc, idx); 1093 armpmu->disable(hwc, idx);
792 } 1094 }
793 1095
@@ -905,6 +1207,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
905 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1207 armv7pmu.num_events = armv7_read_num_pmnc_events();
906 return &armv7pmu; 1208 return &armv7pmu;
907} 1209}
1210
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{
1213 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1214 armv7pmu.name = "ARMv7 Cortex-A5";
1215 armv7pmu.cache_map = &armv7_a5_perf_cache_map;
1216 armv7pmu.event_map = &armv7_a5_perf_map;
1217 armv7pmu.num_events = armv7_read_num_pmnc_events();
1218 return &armv7pmu;
1219}
1220
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{
1223 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1224 armv7pmu.name = "ARMv7 Cortex-A15";
1225 armv7pmu.cache_map = &armv7_a15_perf_cache_map;
1226 armv7pmu.event_map = &armv7_a15_perf_map;
1227 armv7pmu.num_events = armv7_read_num_pmnc_events();
1228 return &armv7pmu;
1229}
908#else 1230#else
909static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1231static const struct arm_pmu *__init armv7_a8_pmu_init(void)
910{ 1232{
@@ -915,4 +1237,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
915{ 1237{
916 return NULL; 1238 return NULL;
917} 1239}
1240
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{
1243 return NULL;
1244}
1245
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1247{
1248 return NULL;
1249}
918#endif /* CONFIG_CPU_V7 */ 1250#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 39affbe4fdb..3c4397491d0 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -144,6 +144,20 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
144 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, 144 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
145 }, 145 },
146 }, 146 },
147 [C(NODE)] = {
148 [C(OP_READ)] = {
149 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
150 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
151 },
152 [C(OP_WRITE)] = {
153 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
154 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
155 },
156 [C(OP_PREFETCH)] = {
157 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
158 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
159 },
160 },
147}; 161};
148 162
149#define XSCALE_PMU_ENABLE 0x001 163#define XSCALE_PMU_ENABLE 0x001
@@ -251,7 +265,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
251 if (!armpmu_event_set_period(event, hwc, idx)) 265 if (!armpmu_event_set_period(event, hwc, idx))
252 continue; 266 continue;
253 267
254 if (perf_event_overflow(event, 0, &data, regs)) 268 if (perf_event_overflow(event, &data, regs))
255 armpmu->disable(hwc, idx); 269 armpmu->disable(hwc, idx);
256 } 270 }
257 271
@@ -583,7 +597,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
583 if (!armpmu_event_set_period(event, hwc, idx)) 597 if (!armpmu_event_set_period(event, hwc, idx))
584 continue; 598 continue;
585 599
586 if (perf_event_overflow(event, 0, &data, regs)) 600 if (perf_event_overflow(event, &data, regs))
587 armpmu->disable(hwc, idx); 601 armpmu->disable(hwc, idx);
588 } 602 }
589 603
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c79eec1926..c53474fe84d 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -17,6 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_device.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21 22
22#include <asm/pmu.h> 23#include <asm/pmu.h>
@@ -25,51 +26,103 @@ static volatile long pmu_lock;
25 26
26static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
27 28
28static int __devinit pmu_device_probe(struct platform_device *pdev) 29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
29{ 31{
30 32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
31 if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
32 pr_warning("received registration request for unknown " 33 pr_warning("received registration request for unknown "
33 "device %d\n", pdev->id); 34 "PMU device type %d\n", type);
34 return -EINVAL; 35 return -EINVAL;
35 } 36 }
36 37
37 if (pmu_devices[pdev->id]) 38 if (pmu_devices[type]) {
38 pr_warning("registering new PMU device type %d overwrites " 39 pr_warning("rejecting duplicate registration of PMU device "
39 "previous registration!\n", pdev->id); 40 "type %d.", type);
40 else 41 return -ENOSPC;
41 pr_info("registered new PMU device of type %d\n", 42 }
42 pdev->id);
43 43
44 pmu_devices[pdev->id] = pdev; 44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
45 return 0; 46 return 0;
46} 47}
47 48
48static struct platform_driver pmu_driver = { 49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
49 .driver = { 100 .driver = {
50 .name = "arm-pmu", 101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
51 }, 103 },
52 .probe = pmu_device_probe, 104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
53}; 106};
54 107
55static int __init register_pmu_driver(void) 108static int __init register_pmu_driver(void)
56{ 109{
57 return platform_driver_register(&pmu_driver); 110 return platform_driver_register(&armpmu_driver);
58} 111}
59device_initcall(register_pmu_driver); 112device_initcall(register_pmu_driver);
60 113
61struct platform_device * 114struct platform_device *
62reserve_pmu(enum arm_pmu_type device) 115reserve_pmu(enum arm_pmu_type type)
63{ 116{
64 struct platform_device *pdev; 117 struct platform_device *pdev;
65 118
66 if (test_and_set_bit_lock(device, &pmu_lock)) { 119 if (test_and_set_bit_lock(type, &pmu_lock)) {
67 pdev = ERR_PTR(-EBUSY); 120 pdev = ERR_PTR(-EBUSY);
68 } else if (pmu_devices[device] == NULL) { 121 } else if (pmu_devices[type] == NULL) {
69 clear_bit_unlock(device, &pmu_lock); 122 clear_bit_unlock(type, &pmu_lock);
70 pdev = ERR_PTR(-ENODEV); 123 pdev = ERR_PTR(-ENODEV);
71 } else { 124 } else {
72 pdev = pmu_devices[device]; 125 pdev = pmu_devices[type];
73 } 126 }
74 127
75 return pdev; 128 return pdev;
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
77EXPORT_SYMBOL_GPL(reserve_pmu); 130EXPORT_SYMBOL_GPL(reserve_pmu);
78 131
79int 132int
80release_pmu(struct platform_device *pdev) 133release_pmu(enum arm_pmu_type type)
81{ 134{
82 if (WARN_ON(pdev != pmu_devices[pdev->id])) 135 if (WARN_ON(!pmu_devices[type]))
83 return -EINVAL; 136 return -EINVAL;
84 clear_bit_unlock(pdev->id, &pmu_lock); 137 clear_bit_unlock(type, &pmu_lock);
85 return 0; 138 return 0;
86} 139}
87EXPORT_SYMBOL_GPL(release_pmu); 140EXPORT_SYMBOL_GPL(release_pmu);
@@ -129,17 +182,17 @@ init_cpu_pmu(void)
129} 182}
130 183
131int 184int
132init_pmu(enum arm_pmu_type device) 185init_pmu(enum arm_pmu_type type)
133{ 186{
134 int err = 0; 187 int err = 0;
135 188
136 switch (device) { 189 switch (type) {
137 case ARM_PMU_DEVICE_CPU: 190 case ARM_PMU_DEVICE_CPU:
138 err = init_cpu_pmu(); 191 err = init_cpu_pmu();
139 break; 192 break;
140 default: 193 default:
141 pr_warning("attempt to initialise unknown device %d\n", 194 pr_warning("attempt to initialise PMU of unknown "
142 device); 195 "type %d\n", type);
143 err = -EINVAL; 196 err = -EINVAL;
144 } 197 }
145 198
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 5e1e5419722..3a2da7788cc 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -30,9 +30,10 @@
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/random.h> 31#include <linux/random.h>
32#include <linux/hw_breakpoint.h> 32#include <linux/hw_breakpoint.h>
33#include <linux/cpuidle.h>
34#include <linux/console.h>
33 35
34#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
35#include <asm/leds.h>
36#include <asm/processor.h> 37#include <asm/processor.h>
37#include <asm/system.h> 38#include <asm/system.h>
38#include <asm/thread_notify.h> 39#include <asm/thread_notify.h>
@@ -62,6 +63,18 @@ static volatile int hlt_counter;
62 63
63#include <mach/system.h> 64#include <mach/system.h>
64 65
66#ifdef CONFIG_SMP
67void arch_trigger_all_cpu_backtrace(void)
68{
69 smp_send_all_cpu_backtrace();
70}
71#else
72void arch_trigger_all_cpu_backtrace(void)
73{
74 dump_stack();
75}
76#endif
77
65void disable_hlt(void) 78void disable_hlt(void)
66{ 79{
67 hlt_counter++; 80 hlt_counter++;
@@ -91,12 +104,33 @@ static int __init hlt_setup(char *__unused)
91__setup("nohlt", nohlt_setup); 104__setup("nohlt", nohlt_setup);
92__setup("hlt", hlt_setup); 105__setup("hlt", hlt_setup);
93 106
94void arm_machine_restart(char mode, const char *cmd) 107#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
108void arm_machine_flush_console(void)
95{ 109{
96 /* Disable interrupts first */ 110 printk("\n");
111 pr_emerg("Restarting %s\n", linux_banner);
112 if (console_trylock()) {
113 console_unlock();
114 return;
115 }
116
117 mdelay(50);
118
97 local_irq_disable(); 119 local_irq_disable();
98 local_fiq_disable(); 120 if (!console_trylock())
121 pr_emerg("arm_restart: Console was locked! Busting\n");
122 else
123 pr_emerg("arm_restart: Console was locked!\n");
124 console_unlock();
125}
126#else
127void arm_machine_flush_console(void)
128{
129}
130#endif
99 131
132void arm_machine_restart(char mode, const char *cmd)
133{
100 /* 134 /*
101 * Tell the mm system that we are going to reboot - 135 * Tell the mm system that we are going to reboot -
102 * we may need it to insert some 1:1 mappings so that 136 * we may need it to insert some 1:1 mappings so that
@@ -182,8 +216,8 @@ void cpu_idle(void)
182 216
183 /* endless idle loop with no priority at all */ 217 /* endless idle loop with no priority at all */
184 while (1) { 218 while (1) {
219 idle_notifier_call_chain(IDLE_START);
185 tick_nohz_stop_sched_tick(1); 220 tick_nohz_stop_sched_tick(1);
186 leds_event(led_idle_start);
187 while (!need_resched()) { 221 while (!need_resched()) {
188#ifdef CONFIG_HOTPLUG_CPU 222#ifdef CONFIG_HOTPLUG_CPU
189 if (cpu_is_offline(smp_processor_id())) 223 if (cpu_is_offline(smp_processor_id()))
@@ -191,12 +225,16 @@ void cpu_idle(void)
191#endif 225#endif
192 226
193 local_irq_disable(); 227 local_irq_disable();
228#ifdef CONFIG_PL310_ERRATA_769419
229 wmb();
230#endif
194 if (hlt_counter) { 231 if (hlt_counter) {
195 local_irq_enable(); 232 local_irq_enable();
196 cpu_relax(); 233 cpu_relax();
197 } else { 234 } else {
198 stop_critical_timings(); 235 stop_critical_timings();
199 pm_idle(); 236 if (cpuidle_idle_call())
237 pm_idle();
200 start_critical_timings(); 238 start_critical_timings();
201 /* 239 /*
202 * This will eventually be removed - pm_idle 240 * This will eventually be removed - pm_idle
@@ -207,8 +245,8 @@ void cpu_idle(void)
207 local_irq_enable(); 245 local_irq_enable();
208 } 246 }
209 } 247 }
210 leds_event(led_idle_end);
211 tick_nohz_restart_sched_tick(); 248 tick_nohz_restart_sched_tick();
249 idle_notifier_call_chain(IDLE_END);
212 preempt_enable_no_resched(); 250 preempt_enable_no_resched();
213 schedule(); 251 schedule();
214 preempt_disable(); 252 preempt_disable();
@@ -247,10 +285,89 @@ void machine_power_off(void)
247 285
248void machine_restart(char *cmd) 286void machine_restart(char *cmd)
249{ 287{
288 /* Flush the console to make sure all the relevant messages make it
289 * out to the console drivers */
290 arm_machine_flush_console();
291
292 /* Disable interrupts first */
293 local_irq_disable();
294 local_fiq_disable();
295
250 machine_shutdown(); 296 machine_shutdown();
251 arm_pm_restart(reboot_mode, cmd); 297 arm_pm_restart(reboot_mode, cmd);
252} 298}
253 299
300/*
301 * dump a block of kernel memory from around the given address
302 */
303static void show_data(unsigned long addr, int nbytes, const char *name)
304{
305 int i, j;
306 int nlines;
307 u32 *p;
308
309 /*
310 * don't attempt to dump non-kernel addresses or
311 * values that are probably just small negative numbers
312 */
313 if (addr < PAGE_OFFSET || addr > -256UL)
314 return;
315
316 printk("\n%s: %#lx:\n", name, addr);
317
318 /*
319 * round address down to a 32 bit boundary
320 * and always dump a multiple of 32 bytes
321 */
322 p = (u32 *)(addr & ~(sizeof(u32) - 1));
323 nbytes += (addr & (sizeof(u32) - 1));
324 nlines = (nbytes + 31) / 32;
325
326
327 for (i = 0; i < nlines; i++) {
328 /*
329 * just display low 16 bits of address to keep
330 * each line of the dump < 80 characters
331 */
332 printk("%04lx ", (unsigned long)p & 0xffff);
333 for (j = 0; j < 8; j++) {
334 u32 data;
335 if (probe_kernel_address(p, data)) {
336 printk(" ********");
337 } else {
338 printk(" %08x", data);
339 }
340 ++p;
341 }
342 printk("\n");
343 }
344}
345
346static void show_extra_register_data(struct pt_regs *regs, int nbytes)
347{
348 mm_segment_t fs;
349
350 fs = get_fs();
351 set_fs(KERNEL_DS);
352 show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
353 show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
354 show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
355 show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
356 show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
357 show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
358 show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
359 show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
360 show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
361 show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
362 show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
363 show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
364 show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
365 show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
366 show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
367 show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
368 set_fs(fs);
369}
370
254void __show_regs(struct pt_regs *regs) 371void __show_regs(struct pt_regs *regs)
255{ 372{
256 unsigned long flags; 373 unsigned long flags;
@@ -310,6 +427,8 @@ void __show_regs(struct pt_regs *regs)
310 printk("Control: %08x%s\n", ctrl, buf); 427 printk("Control: %08x%s\n", ctrl, buf);
311 } 428 }
312#endif 429#endif
430
431 show_extra_register_data(regs, 128);
313} 432}
314 433
315void show_regs(struct pt_regs * regs) 434void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 97260060bf2..2491f3b406b 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = {
228 .fn = break_trap, 228 .fn = break_trap,
229}; 229};
230 230
231static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
232{
233 unsigned int instr2;
234 void __user *pc;
235
236 /* Check the second half of the instruction. */
237 pc = (void __user *)(instruction_pointer(regs) + 2);
238
239 if (processor_mode(regs) == SVC_MODE) {
240 instr2 = *(u16 *) pc;
241 } else {
242 get_user(instr2, (u16 __user *)pc);
243 }
244
245 if (instr2 == 0xa000) {
246 ptrace_break(current, regs);
247 return 0;
248 } else {
249 return 1;
250 }
251}
252
253static struct undef_hook thumb2_break_hook = { 231static struct undef_hook thumb2_break_hook = {
254 .instr_mask = 0xffff, 232 .instr_mask = 0xffffffff,
255 .instr_val = 0xf7f0, 233 .instr_val = 0xf7f0a000,
256 .cpsr_mask = PSR_T_BIT, 234 .cpsr_mask = PSR_T_BIT,
257 .cpsr_val = PSR_T_BIT, 235 .cpsr_val = PSR_T_BIT,
258 .fn = thumb2_break_trap, 236 .fn = break_trap,
259}; 237};
260 238
261static int __init ptrace_break_init(void) 239static int __init ptrace_break_init(void)
@@ -396,7 +374,7 @@ static long ptrace_hbp_idx_to_num(int idx)
396/* 374/*
397 * Handle hitting a HW-breakpoint. 375 * Handle hitting a HW-breakpoint.
398 */ 376 */
399static void ptrace_hbptriggered(struct perf_event *bp, int unused, 377static void ptrace_hbptriggered(struct perf_event *bp,
400 struct perf_sample_data *data, 378 struct perf_sample_data *data,
401 struct pt_regs *regs) 379 struct pt_regs *regs)
402{ 380{
@@ -479,7 +457,8 @@ static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
479 attr.bp_type = type; 457 attr.bp_type = type;
480 attr.disabled = 1; 458 attr.disabled = 1;
481 459
482 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk); 460 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
461 tsk);
483} 462}
484 463
485static int ptrace_gethbpregs(struct task_struct *tsk, long num, 464static int ptrace_gethbpregs(struct task_struct *tsk, long num,
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 9cf4cbf8f95..d0cdedf4864 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -57,7 +57,8 @@ relocate_new_kernel:
57 mov r0,#0 57 mov r0,#0
58 ldr r1,kexec_mach_type 58 ldr r1,kexec_mach_type
59 ldr r2,kexec_boot_atags 59 ldr r2,kexec_boot_atags
60 mov pc,lr 60 ARM( mov pc, lr )
61 THUMB( bx lr )
61 62
62 .align 63 .align
63 64
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index acbb447ac6b..5af79bd6a8b 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -280,18 +280,19 @@ static void __init cacheid_init(void)
280 if (arch >= CPU_ARCH_ARMv6) { 280 if (arch >= CPU_ARCH_ARMv6) {
281 if ((cachetype & (7 << 29)) == 4 << 29) { 281 if ((cachetype & (7 << 29)) == 4 << 29) {
282 /* ARMv7 register format */ 282 /* ARMv7 register format */
283 arch = CPU_ARCH_ARMv7;
283 cacheid = CACHEID_VIPT_NONALIASING; 284 cacheid = CACHEID_VIPT_NONALIASING;
284 if ((cachetype & (3 << 14)) == 1 << 14) 285 if ((cachetype & (3 << 14)) == 1 << 14)
285 cacheid |= CACHEID_ASID_TAGGED; 286 cacheid |= CACHEID_ASID_TAGGED;
286 else if (cpu_has_aliasing_icache(CPU_ARCH_ARMv7))
287 cacheid |= CACHEID_VIPT_I_ALIASING;
288 } else if (cachetype & (1 << 23)) {
289 cacheid = CACHEID_VIPT_ALIASING;
290 } else { 287 } else {
291 cacheid = CACHEID_VIPT_NONALIASING; 288 arch = CPU_ARCH_ARMv6;
292 if (cpu_has_aliasing_icache(CPU_ARCH_ARMv6)) 289 if (cachetype & (1 << 23))
293 cacheid |= CACHEID_VIPT_I_ALIASING; 290 cacheid = CACHEID_VIPT_ALIASING;
291 else
292 cacheid = CACHEID_VIPT_NONALIASING;
294 } 293 }
294 if (cpu_has_aliasing_icache(arch))
295 cacheid |= CACHEID_VIPT_I_ALIASING;
295 } else { 296 } else {
296 cacheid = CACHEID_VIVT; 297 cacheid = CACHEID_VIVT;
297 } 298 }
@@ -343,54 +344,6 @@ static void __init feat_v6_fixup(void)
343 elf_hwcap &= ~HWCAP_TLS; 344 elf_hwcap &= ~HWCAP_TLS;
344} 345}
345 346
346static void __init setup_processor(void)
347{
348 struct proc_info_list *list;
349
350 /*
351 * locate processor in the list of supported processor
352 * types. The linker builds this table for us from the
353 * entries in arch/arm/mm/proc-*.S
354 */
355 list = lookup_processor_type(read_cpuid_id());
356 if (!list) {
357 printk("CPU configuration botched (ID %08x), unable "
358 "to continue.\n", read_cpuid_id());
359 while (1);
360 }
361
362 cpu_name = list->cpu_name;
363
364#ifdef MULTI_CPU
365 processor = *list->proc;
366#endif
367#ifdef MULTI_TLB
368 cpu_tlb = *list->tlb;
369#endif
370#ifdef MULTI_USER
371 cpu_user = *list->user;
372#endif
373#ifdef MULTI_CACHE
374 cpu_cache = *list->cache;
375#endif
376
377 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
378 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
379 proc_arch[cpu_architecture()], cr_alignment);
380
381 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
382 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
383 elf_hwcap = list->elf_hwcap;
384#ifndef CONFIG_ARM_THUMB
385 elf_hwcap &= ~HWCAP_THUMB;
386#endif
387
388 feat_v6_fixup();
389
390 cacheid_init();
391 cpu_proc_init();
392}
393
394/* 347/*
395 * cpu_init - initialise one CPU. 348 * cpu_init - initialise one CPU.
396 * 349 *
@@ -406,6 +359,8 @@ void cpu_init(void)
406 BUG(); 359 BUG();
407 } 360 }
408 361
362 cpu_proc_init();
363
409 /* 364 /*
410 * Define the placement constraint for the inline asm directive below. 365 * Define the placement constraint for the inline asm directive below.
411 * In Thumb-2, msr with an immediate value is not allowed. 366 * In Thumb-2, msr with an immediate value is not allowed.
@@ -442,6 +397,54 @@ void cpu_init(void)
442 : "r14"); 397 : "r14");
443} 398}
444 399
400static void __init setup_processor(void)
401{
402 struct proc_info_list *list;
403
404 /*
405 * locate processor in the list of supported processor
406 * types. The linker builds this table for us from the
407 * entries in arch/arm/mm/proc-*.S
408 */
409 list = lookup_processor_type(read_cpuid_id());
410 if (!list) {
411 printk("CPU configuration botched (ID %08x), unable "
412 "to continue.\n", read_cpuid_id());
413 while (1);
414 }
415
416 cpu_name = list->cpu_name;
417
418#ifdef MULTI_CPU
419 processor = *list->proc;
420#endif
421#ifdef MULTI_TLB
422 cpu_tlb = *list->tlb;
423#endif
424#ifdef MULTI_USER
425 cpu_user = *list->user;
426#endif
427#ifdef MULTI_CACHE
428 cpu_cache = *list->cache;
429#endif
430
431 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
432 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
433 proc_arch[cpu_architecture()], cr_alignment);
434
435 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
436 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
437 elf_hwcap = list->elf_hwcap;
438#ifndef CONFIG_ARM_THUMB
439 elf_hwcap &= ~HWCAP_THUMB;
440#endif
441
442 feat_v6_fixup();
443
444 cacheid_init();
445 cpu_init();
446}
447
445void __init dump_machine_table(void) 448void __init dump_machine_table(void)
446{ 449{
447 struct machine_desc *p; 450 struct machine_desc *p;
@@ -887,6 +890,12 @@ void __init setup_arch(char **cmdline_p)
887 machine_desc = mdesc; 890 machine_desc = mdesc;
888 machine_name = mdesc->name; 891 machine_name = mdesc->name;
889 892
893#ifdef CONFIG_ZONE_DMA
894 if (mdesc->dma_zone_size) {
895 extern unsigned long arm_dma_zone_size;
896 arm_dma_zone_size = mdesc->dma_zone_size;
897 }
898#endif
890 if (mdesc->soft_reboot) 899 if (mdesc->soft_reboot)
891 reboot_setup("s"); 900 reboot_setup("s");
892 901
@@ -915,7 +924,6 @@ void __init setup_arch(char **cmdline_p)
915#endif 924#endif
916 reserve_crashkernel(); 925 reserve_crashkernel();
917 926
918 cpu_init();
919 tcm_init(); 927 tcm_init();
920 928
921#ifdef CONFIG_MULTI_IRQ_HANDLER 929#ifdef CONFIG_MULTI_IRQ_HANDLER
@@ -979,6 +987,10 @@ static const char *hwcap_str[] = {
979 "neon", 987 "neon",
980 "vfpv3", 988 "vfpv3",
981 "vfpv3d16", 989 "vfpv3d16",
990 "tls",
991 "vfpv4",
992 "idiva",
993 "idivt",
982 NULL 994 NULL
983}; 995};
984 996
@@ -990,7 +1002,11 @@ static int c_show(struct seq_file *m, void *v)
990 cpu_name, read_cpuid_id() & 15, elf_platform); 1002 cpu_name, read_cpuid_id() & 15, elf_platform);
991 1003
992#if defined(CONFIG_SMP) 1004#if defined(CONFIG_SMP)
1005# if defined(CONFIG_REPORT_PRESENT_CPUS)
1006 for_each_present_cpu(i) {
1007# else
993 for_each_online_cpu(i) { 1008 for_each_online_cpu(i) {
1009# endif
994 /* 1010 /*
995 * glibc reads /proc/cpuinfo to determine the number of 1011 * glibc reads /proc/cpuinfo to determine the number of
996 * online processors, looking for lines beginning with 1012 * online processors, looking for lines beginning with
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 6398ead9d1c..e87f5f24301 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -10,64 +10,61 @@
10/* 10/*
11 * Save CPU state for a suspend 11 * Save CPU state for a suspend
12 * r1 = v:p offset 12 * r1 = v:p offset
13 * r3 = virtual return function 13 * r2 = suspend function arg0
14 * Note: sp is decremented to allocate space for CPU state on stack 14 * r3 = suspend function
15 * r0-r3,r9,r10,lr corrupted
16 */ 15 */
17ENTRY(cpu_suspend) 16ENTRY(__cpu_suspend)
18 mov r9, lr 17 stmfd sp!, {r4 - r11, lr}
19#ifdef MULTI_CPU 18#ifdef MULTI_CPU
20 ldr r10, =processor 19 ldr r10, =processor
21 mov r2, sp @ current virtual SP 20 ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function 21 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
24 sub sp, sp, r0 @ allocate CPU state on stack 22#else
25 mov r0, sp @ save pointer 23 ldr r5, =cpu_suspend_size
24 ldr ip, =cpu_do_resume
25#endif
26 mov r6, sp @ current virtual SP
27 sub sp, sp, r5 @ allocate CPU state on stack
28 mov r0, sp @ save pointer to CPU save block
26 add ip, ip, r1 @ convert resume fn to phys 29 add ip, ip, r1 @ convert resume fn to phys
27 stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn 30 stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
28 ldr r3, =sleep_save_sp 31 ldr r5, =sleep_save_sp
29 add r2, sp, r1 @ convert SP to phys 32 add r6, sp, r1 @ convert SP to phys
33 stmfd sp!, {r2, r3} @ save suspend func arg and pointer
30#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
31 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 35 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
32 ALT_UP(mov lr, #0) 36 ALT_UP(mov lr, #0)
33 and lr, lr, #15 37 and lr, lr, #15
34 str r2, [r3, lr, lsl #2] @ save phys SP 38 str r6, [r5, lr, lsl #2] @ save phys SP
35#else 39#else
36 str r2, [r3] @ save phys SP 40 str r6, [r5] @ save phys SP
37#endif 41#endif
42#ifdef MULTI_CPU
38 mov lr, pc 43 mov lr, pc
39 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state 44 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
40#else 45#else
41 mov r2, sp @ current virtual SP
42 ldr r0, =cpu_suspend_size
43 sub sp, sp, r0 @ allocate CPU state on stack
44 mov r0, sp @ save pointer
45 stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
46 ldr r3, =sleep_save_sp
47 add r2, sp, r1 @ convert SP to phys
48#ifdef CONFIG_SMP
49 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
50 ALT_UP(mov lr, #0)
51 and lr, lr, #15
52 str r2, [r3, lr, lsl #2] @ save phys SP
53#else
54 str r2, [r3] @ save phys SP
55#endif
56 bl cpu_do_suspend 46 bl cpu_do_suspend
57#endif 47#endif
58 48
59 @ flush data cache 49 @ flush data cache
60#ifdef MULTI_CACHE 50#ifdef MULTI_CACHE
61 ldr r10, =cpu_cache 51 ldr r10, =cpu_cache
62 mov lr, r9 52 mov lr, pc
63 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] 53 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
64#else 54#else
65 mov lr, r9 55 bl __cpuc_flush_kern_all
66 b __cpuc_flush_kern_all
67#endif 56#endif
68ENDPROC(cpu_suspend) 57 adr lr, BSYM(cpu_suspend_abort)
58 ldmfd sp!, {r0, pc} @ call suspend fn
59ENDPROC(__cpu_suspend)
69 .ltorg 60 .ltorg
70 61
62cpu_suspend_abort:
63 ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
64 mov sp, r2
65 ldmfd sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort)
67
71/* 68/*
72 * r0 = control register value 69 * r0 = control register value
73 * r1 = v:p offset (preserved by cpu_do_resume) 70 * r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,8 @@ ENDPROC(cpu_resume_turn_mmu_on)
97cpu_resume_after_mmu: 94cpu_resume_after_mmu:
98 str r5, [r2, r4, lsl #2] @ restore old mapping 95 str r5, [r2, r4, lsl #2] @ restore old mapping
99 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache 96 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
100 mov pc, lr 97 mov r0, #0 @ return zero on success
98 ldmfd sp!, {r4 - r11, pc}
101ENDPROC(cpu_resume_after_mmu) 99ENDPROC(cpu_resume_after_mmu)
102 100
103/* 101/*
@@ -120,20 +118,11 @@ ENTRY(cpu_resume)
120 ldr r0, sleep_save_sp @ stack phys addr 118 ldr r0, sleep_save_sp @ stack phys addr
121#endif 119#endif
122 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 120 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
123#ifdef MULTI_CPU 121 @ load v:p, stack, resume fn
124 @ load v:p, stack, return fn, resume fn 122 ARM( ldmia r0!, {r1, sp, pc} )
125 ARM( ldmia r0!, {r1, sp, lr, pc} ) 123THUMB( ldmia r0!, {r1, r2, r3} )
126THUMB( ldmia r0!, {r1, r2, r3, r4} )
127THUMB( mov sp, r2 ) 124THUMB( mov sp, r2 )
128THUMB( mov lr, r3 ) 125THUMB( bx r3 )
129THUMB( bx r4 )
130#else
131 @ load v:p, stack, return fn
132 ARM( ldmia r0!, {r1, sp, lr} )
133THUMB( ldmia r0!, {r1, r2, lr} )
134THUMB( mov sp, r2 )
135 b cpu_do_resume
136#endif
137ENDPROC(cpu_resume) 126ENDPROC(cpu_resume)
138 127
139sleep_save_sp: 128sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5a574296ace..a07ca050112 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -27,7 +27,7 @@
27#include <linux/clockchips.h> 27#include <linux/clockchips.h>
28#include <linux/completion.h> 28#include <linux/completion.h>
29 29
30#include <asm/atomic.h> 30#include <linux/atomic.h>
31#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
32#include <asm/cpu.h> 32#include <asm/cpu.h>
33#include <asm/cputype.h> 33#include <asm/cputype.h>
@@ -55,6 +55,7 @@ enum ipi_msg_type {
55 IPI_CALL_FUNC, 55 IPI_CALL_FUNC,
56 IPI_CALL_FUNC_SINGLE, 56 IPI_CALL_FUNC_SINGLE,
57 IPI_CPU_STOP, 57 IPI_CPU_STOP,
58 IPI_CPU_BACKTRACE,
58}; 59};
59 60
60int __cpuinit __cpu_up(unsigned int cpu) 61int __cpuinit __cpu_up(unsigned int cpu)
@@ -303,17 +304,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
303 */ 304 */
304 platform_secondary_init(cpu); 305 platform_secondary_init(cpu);
305 306
306 /*
307 * Enable local interrupts.
308 */
309 notify_cpu_starting(cpu); 307 notify_cpu_starting(cpu);
310 local_irq_enable();
311 local_fiq_enable();
312
313 /*
314 * Setup the percpu timer for this CPU.
315 */
316 percpu_timer_setup();
317 308
318 calibrate_delay(); 309 calibrate_delay();
319 310
@@ -325,8 +316,14 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
325 * before we continue. 316 * before we continue.
326 */ 317 */
327 set_cpu_online(cpu, true); 318 set_cpu_online(cpu, true);
328 while (!cpu_active(cpu)) 319
329 cpu_relax(); 320 /*
321 * Setup the percpu timer for this CPU.
322 */
323 percpu_timer_setup();
324
325 local_irq_enable();
326 local_fiq_enable();
330 327
331 /* 328 /*
332 * OK, it's off to the idle thread for us 329 * OK, it's off to the idle thread for us
@@ -367,8 +364,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
367 */ 364 */
368 if (max_cpus > ncores) 365 if (max_cpus > ncores)
369 max_cpus = ncores; 366 max_cpus = ncores;
370 367 if (ncores > 1 && max_cpus) {
371 if (max_cpus > 1) {
372 /* 368 /*
373 * Enable the local timer or broadcast device for the 369 * Enable the local timer or broadcast device for the
374 * boot CPU, but only if we have more than one CPU. 370 * boot CPU, but only if we have more than one CPU.
@@ -376,6 +372,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
376 percpu_timer_setup(); 372 percpu_timer_setup();
377 373
378 /* 374 /*
375 * Initialise the present map, which describes the set of CPUs
376 * actually populated at the present time. A platform should
377 * re-initialize the map in platform_smp_prepare_cpus() if
378 * present != possible (e.g. physical hotplug).
379 */
380 init_cpu_present(&cpu_possible_map);
381
382 /*
379 * Initialise the SCU if there are more than one CPU 383 * Initialise the SCU if there are more than one CPU
380 * and let them know where to start. 384 * and let them know where to start.
381 */ 385 */
@@ -407,6 +411,7 @@ static const char *ipi_types[NR_IPI] = {
407 S(IPI_CALL_FUNC, "Function call interrupts"), 411 S(IPI_CALL_FUNC, "Function call interrupts"),
408 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 412 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
409 S(IPI_CPU_STOP, "CPU stop interrupts"), 413 S(IPI_CPU_STOP, "CPU stop interrupts"),
414 S(IPI_CPU_BACKTRACE, "CPU backtrace"),
410}; 415};
411 416
412void show_ipi_list(struct seq_file *p, int prec) 417void show_ipi_list(struct seq_file *p, int prec)
@@ -447,9 +452,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
447static void ipi_timer(void) 452static void ipi_timer(void)
448{ 453{
449 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); 454 struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
450 irq_enter();
451 evt->event_handler(evt); 455 evt->event_handler(evt);
452 irq_exit();
453} 456}
454 457
455#ifdef CONFIG_LOCAL_TIMERS 458#ifdef CONFIG_LOCAL_TIMERS
@@ -557,6 +560,58 @@ static void ipi_cpu_stop(unsigned int cpu)
557 cpu_relax(); 560 cpu_relax();
558} 561}
559 562
563static cpumask_t backtrace_mask;
564static DEFINE_RAW_SPINLOCK(backtrace_lock);
565
566/* "in progress" flag of arch_trigger_all_cpu_backtrace */
567static unsigned long backtrace_flag;
568
569void smp_send_all_cpu_backtrace(void)
570{
571 unsigned int this_cpu = smp_processor_id();
572 int i;
573
574 if (test_and_set_bit(0, &backtrace_flag))
575 /*
576 * If there is already a trigger_all_cpu_backtrace() in progress
577 * (backtrace_flag == 1), don't output double cpu dump infos.
578 */
579 return;
580
581 cpumask_copy(&backtrace_mask, cpu_online_mask);
582 cpu_clear(this_cpu, backtrace_mask);
583
584 pr_info("Backtrace for cpu %d (current):\n", this_cpu);
585 dump_stack();
586
587 pr_info("\nsending IPI to all other CPUs:\n");
588 smp_cross_call(&backtrace_mask, IPI_CPU_BACKTRACE);
589
590 /* Wait for up to 10 seconds for all other CPUs to do the backtrace */
591 for (i = 0; i < 10 * 1000; i++) {
592 if (cpumask_empty(&backtrace_mask))
593 break;
594 mdelay(1);
595 }
596
597 clear_bit(0, &backtrace_flag);
598 smp_mb__after_clear_bit();
599}
600
601/*
602 * ipi_cpu_backtrace - handle IPI from smp_send_all_cpu_backtrace()
603 */
604static void ipi_cpu_backtrace(unsigned int cpu, struct pt_regs *regs)
605{
606 if (cpu_isset(cpu, backtrace_mask)) {
607 raw_spin_lock(&backtrace_lock);
608 pr_warning("IPI backtrace for cpu %d\n", cpu);
609 show_regs(regs);
610 raw_spin_unlock(&backtrace_lock);
611 cpu_clear(cpu, backtrace_mask);
612 }
613}
614
560/* 615/*
561 * Main handler for inter-processor interrupts 616 * Main handler for inter-processor interrupts
562 */ 617 */
@@ -570,7 +625,9 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
570 625
571 switch (ipinr) { 626 switch (ipinr) {
572 case IPI_TIMER: 627 case IPI_TIMER:
628 irq_enter();
573 ipi_timer(); 629 ipi_timer();
630 irq_exit();
574 break; 631 break;
575 632
576 case IPI_RESCHEDULE: 633 case IPI_RESCHEDULE:
@@ -580,15 +637,25 @@ asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
580 break; 637 break;
581 638
582 case IPI_CALL_FUNC: 639 case IPI_CALL_FUNC:
640 irq_enter();
583 generic_smp_call_function_interrupt(); 641 generic_smp_call_function_interrupt();
642 irq_exit();
584 break; 643 break;
585 644
586 case IPI_CALL_FUNC_SINGLE: 645 case IPI_CALL_FUNC_SINGLE:
646 irq_enter();
587 generic_smp_call_function_single_interrupt(); 647 generic_smp_call_function_single_interrupt();
648 irq_exit();
588 break; 649 break;
589 650
590 case IPI_CPU_STOP: 651 case IPI_CPU_STOP:
652 irq_enter();
591 ipi_cpu_stop(cpu); 653 ipi_cpu_stop(cpu);
654 irq_exit();
655 break;
656
657 case IPI_CPU_BACKTRACE:
658 ipi_cpu_backtrace(cpu, regs);
592 break; 659 break;
593 660
594 default: 661 default:
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index a1e757c3439..7fcddb75c87 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -13,6 +13,7 @@
13 13
14#include <asm/smp_scu.h> 14#include <asm/smp_scu.h>
15#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
16#include <asm/cputype.h>
16 17
17#define SCU_CTRL 0x00 18#define SCU_CTRL 0x00
18#define SCU_CONFIG 0x04 19#define SCU_CONFIG 0x04
@@ -20,6 +21,7 @@
20#define SCU_INVALIDATE 0x0c 21#define SCU_INVALIDATE 0x0c
21#define SCU_FPGA_REVISION 0x10 22#define SCU_FPGA_REVISION 0x10
22 23
24#ifdef CONFIG_SMP
23/* 25/*
24 * Get the number of CPU cores from the SCU configuration 26 * Get the number of CPU cores from the SCU configuration
25 */ 27 */
@@ -36,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base)
36{ 38{
37 u32 scu_ctrl; 39 u32 scu_ctrl;
38 40
41#ifdef CONFIG_ARM_ERRATA_764369
42 /* Cortex-A9 only */
43 if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) {
44 scu_ctrl = __raw_readl(scu_base + 0x30);
45 if (!(scu_ctrl & 1))
46 __raw_writel(scu_ctrl | 0x1, scu_base + 0x30);
47 }
48#endif
49
39 scu_ctrl = __raw_readl(scu_base + SCU_CTRL); 50 scu_ctrl = __raw_readl(scu_base + SCU_CTRL);
40 /* already enabled? */ 51 /* already enabled? */
41 if (scu_ctrl & 1) 52 if (scu_ctrl & 1)
@@ -50,6 +61,7 @@ void __init scu_enable(void __iomem *scu_base)
50 */ 61 */
51 flush_cache_all(); 62 flush_cache_all();
52} 63}
64#endif
53 65
54/* 66/*
55 * Set the executing CPUs power mode as defined. This will be in 67 * Set the executing CPUs power mode as defined. This will be in
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 2c277d40cee..47e44f21f60 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -10,13 +10,17 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/clk.h>
14#include <linux/cpufreq.h>
13#include <linux/delay.h> 15#include <linux/delay.h>
14#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/err.h>
15#include <linux/smp.h> 18#include <linux/smp.h>
16#include <linux/jiffies.h> 19#include <linux/jiffies.h>
17#include <linux/clockchips.h> 20#include <linux/clockchips.h>
18#include <linux/irq.h> 21#include <linux/irq.h>
19#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/percpu.h>
20 24
21#include <asm/smp_twd.h> 25#include <asm/smp_twd.h>
22#include <asm/hardware/gic.h> 26#include <asm/hardware/gic.h>
@@ -24,7 +28,9 @@
24/* set up by the platform code */ 28/* set up by the platform code */
25void __iomem *twd_base; 29void __iomem *twd_base;
26 30
31static struct clk *twd_clk;
27static unsigned long twd_timer_rate; 32static unsigned long twd_timer_rate;
33static DEFINE_PER_CPU(struct clock_event_device *, twd_ce);
28 34
29static void twd_set_mode(enum clock_event_mode mode, 35static void twd_set_mode(enum clock_event_mode mode,
30 struct clock_event_device *clk) 36 struct clock_event_device *clk)
@@ -80,6 +86,48 @@ int twd_timer_ack(void)
80 return 0; 86 return 0;
81} 87}
82 88
89/*
90 * Updates clockevent frequency when the cpu frequency changes.
91 * Called on the cpu that is changing frequency with interrupts disabled.
92 */
93static void twd_update_frequency(void *data)
94{
95 twd_timer_rate = clk_get_rate(twd_clk);
96
97 clockevents_update_freq(__get_cpu_var(twd_ce), twd_timer_rate);
98}
99
100static int twd_cpufreq_transition(struct notifier_block *nb,
101 unsigned long state, void *data)
102{
103 struct cpufreq_freqs *freqs = data;
104
105 /*
106 * The twd clock events must be reprogrammed to account for the new
107 * frequency. The timer is local to a cpu, so cross-call to the
108 * changing cpu.
109 */
110 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
111 smp_call_function_single(freqs->cpu, twd_update_frequency,
112 NULL, 1);
113
114 return NOTIFY_OK;
115}
116
117static struct notifier_block twd_cpufreq_nb = {
118 .notifier_call = twd_cpufreq_transition,
119};
120
121static int twd_cpufreq_init(void)
122{
123 if (!IS_ERR_OR_NULL(twd_clk))
124 return cpufreq_register_notifier(&twd_cpufreq_nb,
125 CPUFREQ_TRANSITION_NOTIFIER);
126
127 return 0;
128}
129core_initcall(twd_cpufreq_init);
130
83static void __cpuinit twd_calibrate_rate(void) 131static void __cpuinit twd_calibrate_rate(void)
84{ 132{
85 unsigned long count; 133 unsigned long count;
@@ -119,12 +167,41 @@ static void __cpuinit twd_calibrate_rate(void)
119 } 167 }
120} 168}
121 169
170static struct clk *twd_get_clock(void)
171{
172 struct clk *clk;
173 int err;
174
175 clk = clk_get_sys("smp_twd", NULL);
176 if (IS_ERR(clk)) {
177 pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk));
178 return clk;
179 }
180
181 err = clk_enable(clk);
182 if (err) {
183 pr_err("smp_twd: clock failed to enable: %d\n", err);
184 clk_put(clk);
185 return ERR_PTR(err);
186 }
187
188 return clk;
189}
190
122/* 191/*
123 * Setup the local clock events for a CPU. 192 * Setup the local clock events for a CPU.
124 */ 193 */
125void __cpuinit twd_timer_setup(struct clock_event_device *clk) 194void __cpuinit twd_timer_setup(struct clock_event_device *clk)
126{ 195{
127 twd_calibrate_rate(); 196 if (!twd_clk)
197 twd_clk = twd_get_clock();
198
199 if (!IS_ERR_OR_NULL(twd_clk))
200 twd_timer_rate = clk_get_rate(twd_clk);
201 else
202 twd_calibrate_rate();
203
204 __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
128 205
129 clk->name = "local_timer"; 206 clk->name = "local_timer";
130 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | 207 clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -132,13 +209,12 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
132 clk->rating = 350; 209 clk->rating = 350;
133 clk->set_mode = twd_set_mode; 210 clk->set_mode = twd_set_mode;
134 clk->set_next_event = twd_set_next_event; 211 clk->set_next_event = twd_set_next_event;
135 clk->shift = 20; 212
136 clk->mult = div_sc(twd_timer_rate, NSEC_PER_SEC, clk->shift); 213 __get_cpu_var(twd_ce) = clk;
137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 214
138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 215 clockevents_config_and_register(clk, twd_timer_rate,
216 0xf, 0xffffffff);
139 217
140 /* Make sure our local interrupt controller has this enabled */ 218 /* Make sure our local interrupt controller has this enabled */
141 gic_enable_ppi(clk->irq); 219 gic_enable_ppi(clk->irq);
142
143 clockevents_register_device(clk);
144} 220}
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 40ee7e5045e..5f452f8fde0 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -183,7 +183,7 @@ static int swp_handler(struct pt_regs *regs, unsigned int instr)
183 unsigned int address, destreg, data, type; 183 unsigned int address, destreg, data, type;
184 unsigned int res = 0; 184 unsigned int res = 0;
185 185
186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc); 186 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->ARM_pc);
187 187
188 if (current->pid != previous_pid) { 188 if (current->pid != previous_pid) {
189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n", 189 pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index f5cf660eefc..30e302d33e0 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -19,6 +19,8 @@
19#include "tcm.h" 19#include "tcm.h"
20 20
21static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
22static bool dtcm_present;
23static bool itcm_present;
22 24
23/* TCM section definitions from the linker */ 25/* TCM section definitions from the linker */
24extern char __itcm_start, __sitcm_text, __eitcm_text; 26extern char __itcm_start, __sitcm_text, __eitcm_text;
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len)
90} 92}
91EXPORT_SYMBOL(tcm_free); 93EXPORT_SYMBOL(tcm_free);
92 94
95bool tcm_dtcm_present(void)
96{
97 return dtcm_present;
98}
99EXPORT_SYMBOL(tcm_dtcm_present);
100
101bool tcm_itcm_present(void)
102{
103 return itcm_present;
104}
105EXPORT_SYMBOL(tcm_itcm_present);
106
93static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, 107static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
94 u32 *offset) 108 u32 *offset)
95{ 109{
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
134 (tcm_region & 1) ? "" : "not "); 148 (tcm_region & 1) ? "" : "not ");
135 } 149 }
136 150
151 /* Not much fun you can do with a size 0 bank */
152 if (tcm_size == 0)
153 return 0;
154
137 /* Force move the TCM bank to where we want it, enable */ 155 /* Force move the TCM bank to where we want it, enable */
138 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; 156 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
139 157
@@ -165,12 +183,20 @@ void __init tcm_init(void)
165 u32 tcm_status = read_cpuid_tcmstatus(); 183 u32 tcm_status = read_cpuid_tcmstatus();
166 u8 dtcm_banks = (tcm_status >> 16) & 0x03; 184 u8 dtcm_banks = (tcm_status >> 16) & 0x03;
167 u8 itcm_banks = (tcm_status & 0x03); 185 u8 itcm_banks = (tcm_status & 0x03);
186 size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
187 size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
168 char *start; 188 char *start;
169 char *end; 189 char *end;
170 char *ram; 190 char *ram;
171 int ret; 191 int ret;
172 int i; 192 int i;
173 193
194 /* Values greater than 2 for D/ITCM banks are "reserved" */
195 if (dtcm_banks > 2)
196 dtcm_banks = 0;
197 if (itcm_banks > 2)
198 itcm_banks = 0;
199
174 /* Setup DTCM if present */ 200 /* Setup DTCM if present */
175 if (dtcm_banks > 0) { 201 if (dtcm_banks > 0) {
176 for (i = 0; i < dtcm_banks; i++) { 202 for (i = 0; i < dtcm_banks; i++) {
@@ -178,6 +204,13 @@ void __init tcm_init(void)
178 if (ret) 204 if (ret)
179 return; 205 return;
180 } 206 }
207 /* This means you compiled more code than fits into DTCM */
208 if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
209 pr_info("CPU DTCM: %u bytes of code compiled to "
210 "DTCM but only %lu bytes of DTCM present\n",
211 dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
212 goto no_dtcm;
213 }
181 dtcm_res.end = dtcm_end - 1; 214 dtcm_res.end = dtcm_end - 1;
182 request_resource(&iomem_resource, &dtcm_res); 215 request_resource(&iomem_resource, &dtcm_res);
183 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; 216 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@@ -186,12 +219,16 @@ void __init tcm_init(void)
186 start = &__sdtcm_data; 219 start = &__sdtcm_data;
187 end = &__edtcm_data; 220 end = &__edtcm_data;
188 ram = &__dtcm_start; 221 ram = &__dtcm_start;
189 /* This means you compiled more code than fits into DTCM */ 222 memcpy(start, ram, dtcm_code_sz);
190 BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); 223 pr_debug("CPU DTCM: copied data from %p - %p\n",
191 memcpy(start, ram, (end-start)); 224 start, end);
192 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); 225 dtcm_present = true;
226 } else if (dtcm_code_sz) {
227 pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
228 "DTCM banks present in CPU\n", dtcm_code_sz);
193 } 229 }
194 230
231no_dtcm:
195 /* Setup ITCM if present */ 232 /* Setup ITCM if present */
196 if (itcm_banks > 0) { 233 if (itcm_banks > 0) {
197 for (i = 0; i < itcm_banks; i++) { 234 for (i = 0; i < itcm_banks; i++) {
@@ -199,6 +236,13 @@ void __init tcm_init(void)
199 if (ret) 236 if (ret)
200 return; 237 return;
201 } 238 }
239 /* This means you compiled more code than fits into ITCM */
240 if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
241 pr_info("CPU ITCM: %u bytes of code compiled to "
242 "ITCM but only %lu bytes of ITCM present\n",
243 itcm_code_sz, (itcm_end - ITCM_OFFSET));
244 return;
245 }
202 itcm_res.end = itcm_end - 1; 246 itcm_res.end = itcm_end - 1;
203 request_resource(&iomem_resource, &itcm_res); 247 request_resource(&iomem_resource, &itcm_res);
204 itcm_iomap[0].length = itcm_end - ITCM_OFFSET; 248 itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@@ -207,10 +251,13 @@ void __init tcm_init(void)
207 start = &__sitcm_text; 251 start = &__sitcm_text;
208 end = &__eitcm_text; 252 end = &__eitcm_text;
209 ram = &__itcm_start; 253 ram = &__itcm_start;
210 /* This means you compiled more code than fits into ITCM */ 254 memcpy(start, ram, itcm_code_sz);
211 BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); 255 pr_debug("CPU ITCM: copied code from %p - %p\n",
212 memcpy(start, ram, (end-start)); 256 start, end);
213 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); 257 itcm_present = true;
258 } else if (itcm_code_sz) {
259 pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
260 "ITCM banks present in CPU\n", itcm_code_sz);
214 } 261 }
215} 262}
216 263
@@ -221,7 +268,6 @@ void __init tcm_init(void)
221 */ 268 */
222static int __init setup_tcm_pool(void) 269static int __init setup_tcm_pool(void)
223{ 270{
224 u32 tcm_status = read_cpuid_tcmstatus();
225 u32 dtcm_pool_start = (u32) &__edtcm_data; 271 u32 dtcm_pool_start = (u32) &__edtcm_data;
226 u32 itcm_pool_start = (u32) &__eitcm_text; 272 u32 itcm_pool_start = (u32) &__eitcm_text;
227 int ret; 273 int ret;
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void)
236 pr_debug("Setting up TCM memory pool\n"); 282 pr_debug("Setting up TCM memory pool\n");
237 283
238 /* Add the rest of DTCM to the TCM pool */ 284 /* Add the rest of DTCM to the TCM pool */
239 if (tcm_status & (0x03 << 16)) { 285 if (dtcm_present) {
240 if (dtcm_pool_start < dtcm_end) { 286 if (dtcm_pool_start < dtcm_end) {
241 ret = gen_pool_add(tcm_pool, dtcm_pool_start, 287 ret = gen_pool_add(tcm_pool, dtcm_pool_start,
242 dtcm_end - dtcm_pool_start, -1); 288 dtcm_end - dtcm_pool_start, -1);
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void)
253 } 299 }
254 300
255 /* Add the rest of ITCM to the TCM pool */ 301 /* Add the rest of ITCM to the TCM pool */
256 if (tcm_status & 0x03) { 302 if (itcm_present) {
257 if (itcm_pool_start < itcm_end) { 303 if (itcm_pool_start < itcm_end) {
258 ret = gen_pool_add(tcm_pool, itcm_pool_start, 304 ret = gen_pool_add(tcm_pool, itcm_pool_start,
259 itcm_end - itcm_pool_start, -1); 305 itcm_end - itcm_pool_start, -1);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6807cb1e76d..4ef9f0d04e5 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -25,7 +25,7 @@
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28#include <asm/atomic.h> 28#include <linux/atomic.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/unistd.h> 31#include <asm/unistd.h>
@@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
355 pc = (void __user *)instruction_pointer(regs); 355 pc = (void __user *)instruction_pointer(regs);
356 356
357 if (processor_mode(regs) == SVC_MODE) { 357 if (processor_mode(regs) == SVC_MODE) {
358 instr = *(u32 *) pc; 358#ifdef CONFIG_THUMB2_KERNEL
359 if (thumb_mode(regs)) {
360 instr = ((u16 *)pc)[0];
361 if (is_wide_instruction(instr)) {
362 instr <<= 16;
363 instr |= ((u16 *)pc)[1];
364 }
365 } else
366#endif
367 instr = *(u32 *) pc;
359 } else if (thumb_mode(regs)) { 368 } else if (thumb_mode(regs)) {
360 get_user(instr, (u16 __user *)pc); 369 get_user(instr, (u16 __user *)pc);
370 if (is_wide_instruction(instr)) {
371 unsigned int instr2;
372 get_user(instr2, (u16 __user *)pc+1);
373 instr <<= 16;
374 instr |= instr2;
375 }
361 } else { 376 } else {
362 get_user(instr, (u32 __user *)pc); 377 get_user(instr, (u32 __user *)pc);
363 } 378 }
@@ -451,7 +466,9 @@ do_cache_op(unsigned long start, unsigned long end, int flags)
451 if (end > vma->vm_end) 466 if (end > vma->vm_end)
452 end = vma->vm_end; 467 end = vma->vm_end;
453 468
454 flush_cache_user_range(vma, start, end); 469 up_read(&mm->mmap_sem);
470 flush_cache_user_range(start, end);
471 return;
455 } 472 }
456 up_read(&mm->mmap_sem); 473 up_read(&mm->mmap_sem);
457} 474}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e5287f21bad..4e66f62b8d4 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -23,8 +23,10 @@
23 23
24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) 24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
25#define ARM_EXIT_KEEP(x) x 25#define ARM_EXIT_KEEP(x) x
26#define ARM_EXIT_DISCARD(x)
26#else 27#else
27#define ARM_EXIT_KEEP(x) 28#define ARM_EXIT_KEEP(x)
29#define ARM_EXIT_DISCARD(x) x
28#endif 30#endif
29 31
30OUTPUT_ARCH(arm) 32OUTPUT_ARCH(arm)
@@ -38,58 +40,12 @@ jiffies = jiffies_64 + 4;
38 40
39SECTIONS 41SECTIONS
40{ 42{
41#ifdef CONFIG_XIP_KERNEL
42 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
43#else
44 . = PAGE_OFFSET + TEXT_OFFSET;
45#endif
46
47 .init : { /* Init code and data */
48 _stext = .;
49 _sinittext = .;
50 HEAD_TEXT
51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
53 _einittext = .;
54 ARM_CPU_DISCARD(PROC_INFO)
55 __arch_info_begin = .;
56 *(.arch.info.init)
57 __arch_info_end = .;
58 __tagtable_begin = .;
59 *(.taglist.init)
60 __tagtable_end = .;
61#ifdef CONFIG_SMP_ON_UP
62 __smpalt_begin = .;
63 *(.alt.smp.init)
64 __smpalt_end = .;
65#endif
66
67 __pv_table_begin = .;
68 *(.pv_table)
69 __pv_table_end = .;
70
71 INIT_SETUP(16)
72
73 INIT_CALLS
74 CON_INITCALL
75 SECURITY_INITCALL
76 INIT_RAM_FS
77
78#ifndef CONFIG_XIP_KERNEL
79 __init_begin = _stext;
80 INIT_DATA
81 ARM_EXIT_KEEP(EXIT_DATA)
82#endif
83 }
84
85 PERCPU_SECTION(32)
86
87#ifndef CONFIG_XIP_KERNEL
88 . = ALIGN(PAGE_SIZE);
89 __init_end = .;
90#endif
91
92 /* 43 /*
44 * XXX: The linker does not define how output sections are
45 * assigned to input sections when there are multiple statements
46 * matching the same input section name. There is no documented
47 * order of matching.
48 *
93 * unwind exit sections must be discarded before the rest of the 49 * unwind exit sections must be discarded before the rest of the
94 * unwind sections get included. 50 * unwind sections get included.
95 */ 51 */
@@ -98,6 +54,9 @@ SECTIONS
98 *(.ARM.extab.exit.text) 54 *(.ARM.extab.exit.text)
99 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) 55 ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text))
100 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) 56 ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text))
57 ARM_EXIT_DISCARD(EXIT_TEXT)
58 ARM_EXIT_DISCARD(EXIT_DATA)
59 EXIT_CALL
101#ifndef CONFIG_HOTPLUG 60#ifndef CONFIG_HOTPLUG
102 *(.ARM.exidx.devexit.text) 61 *(.ARM.exidx.devexit.text)
103 *(.ARM.extab.devexit.text) 62 *(.ARM.extab.devexit.text)
@@ -106,10 +65,24 @@ SECTIONS
106 *(.fixup) 65 *(.fixup)
107 *(__ex_table) 66 *(__ex_table)
108#endif 67#endif
68#ifndef CONFIG_SMP_ON_UP
69 *(.alt.smp.init)
70#endif
71 *(.discard)
72 *(.discard.*)
109 } 73 }
110 74
75#ifdef CONFIG_XIP_KERNEL
76 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
77#else
78 . = PAGE_OFFSET + TEXT_OFFSET;
79#endif
80 .head.text : {
81 _text = .;
82 HEAD_TEXT
83 }
111 .text : { /* Real text segment */ 84 .text : { /* Real text segment */
112 _text = .; /* Text and read-only data */ 85 _stext = .; /* Text and read-only data */
113 __exception_text_start = .; 86 __exception_text_start = .;
114 *(.exception.text) 87 *(.exception.text)
115 __exception_text_end = .; 88 __exception_text_end = .;
@@ -122,8 +95,6 @@ SECTIONS
122 *(.fixup) 95 *(.fixup)
123#endif 96#endif
124 *(.gnu.warning) 97 *(.gnu.warning)
125 *(.rodata)
126 *(.rodata.*)
127 *(.glue_7) 98 *(.glue_7)
128 *(.glue_7t) 99 *(.glue_7t)
129 . = ALIGN(4); 100 . = ALIGN(4);
@@ -152,10 +123,63 @@ SECTIONS
152 123
153 _etext = .; /* End of text and rodata section */ 124 _etext = .; /* End of text and rodata section */
154 125
126#ifndef CONFIG_XIP_KERNEL
127 . = ALIGN(PAGE_SIZE);
128 __init_begin = .;
129#endif
130
131 INIT_TEXT_SECTION(8)
132 .exit.text : {
133 ARM_EXIT_KEEP(EXIT_TEXT)
134 }
135 .init.proc.info : {
136 ARM_CPU_DISCARD(PROC_INFO)
137 }
138 .init.arch.info : {
139 __arch_info_begin = .;
140 *(.arch.info.init)
141 __arch_info_end = .;
142 }
143 .init.tagtable : {
144 __tagtable_begin = .;
145 *(.taglist.init)
146 __tagtable_end = .;
147 }
148#ifdef CONFIG_SMP_ON_UP
149 .init.smpalt : {
150 __smpalt_begin = .;
151 *(.alt.smp.init)
152 __smpalt_end = .;
153 }
154#endif
155 .init.pv_table : {
156 __pv_table_begin = .;
157 *(.pv_table)
158 __pv_table_end = .;
159 }
160 .init.data : {
161#ifndef CONFIG_XIP_KERNEL
162 INIT_DATA
163#endif
164 INIT_SETUP(16)
165 INIT_CALLS
166 CON_INITCALL
167 SECURITY_INITCALL
168 INIT_RAM_FS
169 }
170#ifndef CONFIG_XIP_KERNEL
171 .exit.data : {
172 ARM_EXIT_KEEP(EXIT_DATA)
173 }
174#endif
175
176 PERCPU_SECTION(32)
177
155#ifdef CONFIG_XIP_KERNEL 178#ifdef CONFIG_XIP_KERNEL
156 __data_loc = ALIGN(4); /* location in binary */ 179 __data_loc = ALIGN(4); /* location in binary */
157 . = PAGE_OFFSET + TEXT_OFFSET; 180 . = PAGE_OFFSET + TEXT_OFFSET;
158#else 181#else
182 __init_end = .;
159 . = ALIGN(THREAD_SIZE); 183 . = ALIGN(THREAD_SIZE);
160 __data_loc = .; 184 __data_loc = .;
161#endif 185#endif
@@ -267,15 +291,6 @@ SECTIONS
267 291
268 STABS_DEBUG 292 STABS_DEBUG
269 .comment 0 : { *(.comment) } 293 .comment 0 : { *(.comment) }
270
271 /* Default discards */
272 DISCARDS
273
274#ifndef CONFIG_SMP_ON_UP
275 /DISCARD/ : {
276 *(.alt.smp.init)
277 }
278#endif
279} 294}
280 295
281/* 296/*