aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 15:02:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-28 15:02:27 -0400
commit1fdb24e969110fafea36d3b393bea438f702c87f (patch)
tree47a1dfef8a259e7922285315f8a02d31b4efe2f1 /arch/arm/kernel
parentf362f98e7c445643d27c610bb7a86b79727b592e (diff)
parent531a6a941745e1e045dd2a6bd09e1dc01247a5f3 (diff)
Merge branch 'devel-stable' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm
* 'devel-stable' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm: (178 commits) ARM: 7139/1: fix compilation with CONFIG_ARM_ATAG_DTB_COMPAT and large TEXT_OFFSET ARM: gic, local timers: use the request_percpu_irq() interface ARM: gic: consolidate PPI handling ARM: switch from NO_MACH_MEMORY_H to NEED_MACH_MEMORY_H ARM: mach-s5p64x0: remove mach/memory.h ARM: mach-s3c64xx: remove mach/memory.h ARM: plat-mxc: remove mach/memory.h ARM: mach-prima2: remove mach/memory.h ARM: mach-zynq: remove mach/memory.h ARM: mach-bcmring: remove mach/memory.h ARM: mach-davinci: remove mach/memory.h ARM: mach-pxa: remove mach/memory.h ARM: mach-ixp4xx: remove mach/memory.h ARM: mach-h720x: remove mach/memory.h ARM: mach-vt8500: remove mach/memory.h ARM: mach-s5pc100: remove mach/memory.h ARM: mach-tegra: remove mach/memory.h ARM: plat-tcc: remove mach/memory.h ARM: mach-mmp: remove mach/memory.h ARM: mach-cns3xxx: remove mach/memory.h ... Fix up mostly pretty trivial conflicts in: - arch/arm/Kconfig - arch/arm/include/asm/localtimer.h - arch/arm/kernel/Makefile - arch/arm/mach-shmobile/board-ap4evb.c - arch/arm/mach-u300/core.c - arch/arm/mm/dma-mapping.c - arch/arm/mm/proc-v7.S - arch/arm/plat-omap/Kconfig largely due to some CONFIG option renaming (ie CONFIG_PM_SLEEP -> CONFIG_ARM_CPU_SUSPEND for the arm-specific suspend code etc) and addition of NEED_MACH_MEMORY_H next to HAVE_IDE.
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile9
-rw-r--r--arch/arm/kernel/debug.S4
-rw-r--r--arch/arm/kernel/head.S4
-rw-r--r--arch/arm/kernel/hw_breakpoint.c275
-rw-r--r--arch/arm/kernel/irq.c3
-rw-r--r--arch/arm/kernel/kprobes-arm.c4
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c1323
-rw-r--r--arch/arm/kernel/kprobes-test-thumb.c1187
-rw-r--r--arch/arm/kernel/kprobes-test.c1748
-rw-r--r--arch/arm/kernel/kprobes-test.h392
-rw-r--r--arch/arm/kernel/kprobes-thumb.c7
-rw-r--r--arch/arm/kernel/kprobes.h8
-rw-r--r--arch/arm/kernel/perf_event.c475
-rw-r--r--arch/arm/kernel/perf_event_v6.c87
-rw-r--r--arch/arm/kernel/perf_event_v7.c395
-rw-r--r--arch/arm/kernel/perf_event_xscale.c90
-rw-r--r--arch/arm/kernel/pmu.c182
-rw-r--r--arch/arm/kernel/setup.c21
-rw-r--r--arch/arm/kernel/sleep.S85
-rw-r--r--arch/arm/kernel/smp.c38
-rw-r--r--arch/arm/kernel/smp_twd.c47
-rw-r--r--arch/arm/kernel/suspend.c72
22 files changed, 5562 insertions, 894 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 68036eece340..16eed6aebfa4 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -29,7 +29,7 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
29obj-$(CONFIG_ARTHUR) += arthur.o 29obj-$(CONFIG_ARTHUR) += arthur.o
30obj-$(CONFIG_ISA_DMA) += dma-isa.o 30obj-$(CONFIG_ISA_DMA) += dma-isa.o
31obj-$(CONFIG_PCI) += bios32.o isa.o 31obj-$(CONFIG_PCI) += bios32.o isa.o
32obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o 32obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o 33obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
34obj-$(CONFIG_SMP) += smp.o smp_tlb.o 34obj-$(CONFIG_SMP) += smp.o smp_tlb.o
35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 35obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
@@ -43,6 +43,13 @@ obj-$(CONFIG_KPROBES) += kprobes-thumb.o
43else 43else
44obj-$(CONFIG_KPROBES) += kprobes-arm.o 44obj-$(CONFIG_KPROBES) += kprobes-arm.o
45endif 45endif
46obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
47test-kprobes-objs := kprobes-test.o
48ifdef CONFIG_THUMB2_KERNEL
49test-kprobes-objs += kprobes-test-thumb.o
50else
51test-kprobes-objs += kprobes-test-arm.o
52endif
46obj-$(CONFIG_ATAGS_PROC) += atags.o 53obj-$(CONFIG_ATAGS_PROC) += atags.o
47obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 54obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
48obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 55obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S
index 0f852d082fcf..204e2160cfcc 100644
--- a/arch/arm/kernel/debug.S
+++ b/arch/arm/kernel/debug.S
@@ -22,7 +22,7 @@
22#if defined(CONFIG_DEBUG_ICEDCC) 22#if defined(CONFIG_DEBUG_ICEDCC)
23 @@ debug using ARM EmbeddedICE DCC channel 23 @@ debug using ARM EmbeddedICE DCC channel
24 24
25 .macro addruart, rp, rv 25 .macro addruart, rp, rv, tmp
26 .endm 26 .endm
27 27
28#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7) 28#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) || defined(CONFIG_CPU_V7)
@@ -106,7 +106,7 @@
106 106
107#ifdef CONFIG_MMU 107#ifdef CONFIG_MMU
108 .macro addruart_current, rx, tmp1, tmp2 108 .macro addruart_current, rx, tmp1, tmp2
109 addruart \tmp1, \tmp2 109 addruart \tmp1, \tmp2, \rx
110 mrc p15, 0, \rx, c1, c0 110 mrc p15, 0, \rx, c1, c0
111 tst \rx, #1 111 tst \rx, #1
112 moveq \rx, \tmp1 112 moveq \rx, \tmp1
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 239703dbdf4f..566c54c2a1fe 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -99,7 +99,7 @@ ENTRY(stext)
99 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET) 99 sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
100 add r8, r8, r4 @ PHYS_OFFSET 100 add r8, r8, r4 @ PHYS_OFFSET
101#else 101#else
102 ldr r8, =PLAT_PHYS_OFFSET 102 ldr r8, =PHYS_OFFSET @ always constant in this case
103#endif 103#endif
104 104
105 /* 105 /*
@@ -238,7 +238,7 @@ __create_page_tables:
238 * This allows debug messages to be output 238 * This allows debug messages to be output
239 * via a serial console before paging_init. 239 * via a serial console before paging_init.
240 */ 240 */
241 addruart r7, r3 241 addruart r7, r3, r0
242 242
243 mov r3, r3, lsr #SECTION_SHIFT 243 mov r3, r3, lsr #SECTION_SHIFT
244 mov r3, r3, lsl #PMD_ORDER 244 mov r3, r3, lsl #PMD_ORDER
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index a927ca1f5566..814a52a9dc39 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -45,7 +45,6 @@ static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
45 45
46/* Number of BRP/WRP registers on this CPU. */ 46/* Number of BRP/WRP registers on this CPU. */
47static int core_num_brps; 47static int core_num_brps;
48static int core_num_reserved_brps;
49static int core_num_wrps; 48static int core_num_wrps;
50 49
51/* Debug architecture version. */ 50/* Debug architecture version. */
@@ -137,10 +136,11 @@ static u8 get_debug_arch(void)
137 u32 didr; 136 u32 didr;
138 137
139 /* Do we implement the extended CPUID interface? */ 138 /* Do we implement the extended CPUID interface? */
140 if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf), 139 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
141 "CPUID feature registers not supported. " 140 pr_warning("CPUID feature registers not supported. "
142 "Assuming v6 debug is present.\n")) 141 "Assuming v6 debug is present.\n");
143 return ARM_DEBUG_ARCH_V6; 142 return ARM_DEBUG_ARCH_V6;
143 }
144 144
145 ARM_DBG_READ(c0, 0, didr); 145 ARM_DBG_READ(c0, 0, didr);
146 return (didr >> 16) & 0xf; 146 return (didr >> 16) & 0xf;
@@ -154,10 +154,21 @@ u8 arch_get_debug_arch(void)
154static int debug_arch_supported(void) 154static int debug_arch_supported(void)
155{ 155{
156 u8 arch = get_debug_arch(); 156 u8 arch = get_debug_arch();
157 return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14; 157
158 /* We don't support the memory-mapped interface. */
159 return (arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14) ||
160 arch >= ARM_DEBUG_ARCH_V7_1;
161}
162
163/* Determine number of WRP registers available. */
164static int get_num_wrp_resources(void)
165{
166 u32 didr;
167 ARM_DBG_READ(c0, 0, didr);
168 return ((didr >> 28) & 0xf) + 1;
158} 169}
159 170
160/* Determine number of BRP register available. */ 171/* Determine number of BRP registers available. */
161static int get_num_brp_resources(void) 172static int get_num_brp_resources(void)
162{ 173{
163 u32 didr; 174 u32 didr;
@@ -176,9 +187,10 @@ static int core_has_mismatch_brps(void)
176static int get_num_wrps(void) 187static int get_num_wrps(void)
177{ 188{
178 /* 189 /*
179 * FIXME: When a watchpoint fires, the only way to work out which 190 * On debug architectures prior to 7.1, when a watchpoint fires, the
180 * watchpoint it was is by disassembling the faulting instruction 191 * only way to work out which watchpoint it was is by disassembling
181 * and working out the address of the memory access. 192 * the faulting instruction and working out the address of the memory
193 * access.
182 * 194 *
183 * Furthermore, we can only do this if the watchpoint was precise 195 * Furthermore, we can only do this if the watchpoint was precise
184 * since imprecise watchpoints prevent us from calculating register 196 * since imprecise watchpoints prevent us from calculating register
@@ -192,36 +204,17 @@ static int get_num_wrps(void)
192 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows 204 * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
193 * that it is set on some implementations]. 205 * that it is set on some implementations].
194 */ 206 */
207 if (get_debug_arch() < ARM_DEBUG_ARCH_V7_1)
208 return 1;
195 209
196#if 0 210 return get_num_wrp_resources();
197 int wrps;
198 u32 didr;
199 ARM_DBG_READ(c0, 0, didr);
200 wrps = ((didr >> 28) & 0xf) + 1;
201#endif
202 int wrps = 1;
203
204 if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
205 wrps = get_num_brp_resources() - 1;
206
207 return wrps;
208}
209
210/* We reserve one breakpoint for each watchpoint. */
211static int get_num_reserved_brps(void)
212{
213 if (core_has_mismatch_brps())
214 return get_num_wrps();
215 return 0;
216} 211}
217 212
218/* Determine number of usable BRPs available. */ 213/* Determine number of usable BRPs available. */
219static int get_num_brps(void) 214static int get_num_brps(void)
220{ 215{
221 int brps = get_num_brp_resources(); 216 int brps = get_num_brp_resources();
222 if (core_has_mismatch_brps()) 217 return core_has_mismatch_brps() ? brps - 1 : brps;
223 brps -= get_num_reserved_brps();
224 return brps;
225} 218}
226 219
227/* 220/*
@@ -239,7 +232,7 @@ static int enable_monitor_mode(void)
239 232
240 /* Ensure that halting mode is disabled. */ 233 /* Ensure that halting mode is disabled. */
241 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, 234 if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN,
242 "halting debug mode enabled. Unable to access hardware resources.\n")) { 235 "halting debug mode enabled. Unable to access hardware resources.\n")) {
243 ret = -EPERM; 236 ret = -EPERM;
244 goto out; 237 goto out;
245 } 238 }
@@ -255,6 +248,7 @@ static int enable_monitor_mode(void)
255 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN)); 248 ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
256 break; 249 break;
257 case ARM_DEBUG_ARCH_V7_ECP14: 250 case ARM_DEBUG_ARCH_V7_ECP14:
251 case ARM_DEBUG_ARCH_V7_1:
258 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN)); 252 ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
259 break; 253 break;
260 default: 254 default:
@@ -346,24 +340,10 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
346 val_base = ARM_BASE_BVR; 340 val_base = ARM_BASE_BVR;
347 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 341 slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
348 max_slots = core_num_brps; 342 max_slots = core_num_brps;
349 if (info->step_ctrl.enabled) {
350 /* Override the breakpoint data with the step data. */
351 addr = info->trigger & ~0x3;
352 ctrl = encode_ctrl_reg(info->step_ctrl);
353 }
354 } else { 343 } else {
355 /* Watchpoint */ 344 /* Watchpoint */
356 if (info->step_ctrl.enabled) { 345 ctrl_base = ARM_BASE_WCR;
357 /* Install into the reserved breakpoint region. */ 346 val_base = ARM_BASE_WVR;
358 ctrl_base = ARM_BASE_BCR + core_num_brps;
359 val_base = ARM_BASE_BVR + core_num_brps;
360 /* Override the watchpoint data with the step data. */
361 addr = info->trigger & ~0x3;
362 ctrl = encode_ctrl_reg(info->step_ctrl);
363 } else {
364 ctrl_base = ARM_BASE_WCR;
365 val_base = ARM_BASE_WVR;
366 }
367 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 347 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
368 max_slots = core_num_wrps; 348 max_slots = core_num_wrps;
369 } 349 }
@@ -382,6 +362,17 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
382 goto out; 362 goto out;
383 } 363 }
384 364
365 /* Override the breakpoint data with the step data. */
366 if (info->step_ctrl.enabled) {
367 addr = info->trigger & ~0x3;
368 ctrl = encode_ctrl_reg(info->step_ctrl);
369 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE) {
370 i = 0;
371 ctrl_base = ARM_BASE_BCR + core_num_brps;
372 val_base = ARM_BASE_BVR + core_num_brps;
373 }
374 }
375
385 /* Setup the address register. */ 376 /* Setup the address register. */
386 write_wb_reg(val_base + i, addr); 377 write_wb_reg(val_base + i, addr);
387 378
@@ -405,10 +396,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
405 max_slots = core_num_brps; 396 max_slots = core_num_brps;
406 } else { 397 } else {
407 /* Watchpoint */ 398 /* Watchpoint */
408 if (info->step_ctrl.enabled) 399 base = ARM_BASE_WCR;
409 base = ARM_BASE_BCR + core_num_brps;
410 else
411 base = ARM_BASE_WCR;
412 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 400 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
413 max_slots = core_num_wrps; 401 max_slots = core_num_wrps;
414 } 402 }
@@ -426,6 +414,13 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
426 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n")) 414 if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot\n"))
427 return; 415 return;
428 416
417 /* Ensure that we disable the mismatch breakpoint. */
418 if (info->ctrl.type != ARM_BREAKPOINT_EXECUTE &&
419 info->step_ctrl.enabled) {
420 i = 0;
421 base = ARM_BASE_BCR + core_num_brps;
422 }
423
429 /* Reset the control register. */ 424 /* Reset the control register. */
430 write_wb_reg(base + i, 0); 425 write_wb_reg(base + i, 0);
431} 426}
@@ -632,10 +627,9 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
632 * we can use the mismatch feature as a poor-man's hardware 627 * we can use the mismatch feature as a poor-man's hardware
633 * single-step, but this only works for per-task breakpoints. 628 * single-step, but this only works for per-task breakpoints.
634 */ 629 */
635 if (WARN_ONCE(!bp->overflow_handler && 630 if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
636 (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps() 631 !core_has_mismatch_brps() || !bp->hw.bp_target)) {
637 || !bp->hw.bp_target), 632 pr_warning("overflow handler required but none found\n");
638 "overflow handler required but none found\n")) {
639 ret = -EINVAL; 633 ret = -EINVAL;
640 } 634 }
641out: 635out:
@@ -666,34 +660,62 @@ static void disable_single_step(struct perf_event *bp)
666 arch_install_hw_breakpoint(bp); 660 arch_install_hw_breakpoint(bp);
667} 661}
668 662
669static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs) 663static void watchpoint_handler(unsigned long addr, unsigned int fsr,
664 struct pt_regs *regs)
670{ 665{
671 int i; 666 int i, access;
667 u32 val, ctrl_reg, alignment_mask;
672 struct perf_event *wp, **slots; 668 struct perf_event *wp, **slots;
673 struct arch_hw_breakpoint *info; 669 struct arch_hw_breakpoint *info;
670 struct arch_hw_breakpoint_ctrl ctrl;
674 671
675 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 672 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
676 673
677 /* Without a disassembler, we can only handle 1 watchpoint. */
678 BUG_ON(core_num_wrps > 1);
679
680 for (i = 0; i < core_num_wrps; ++i) { 674 for (i = 0; i < core_num_wrps; ++i) {
681 rcu_read_lock(); 675 rcu_read_lock();
682 676
683 wp = slots[i]; 677 wp = slots[i];
684 678
685 if (wp == NULL) { 679 if (wp == NULL)
686 rcu_read_unlock(); 680 goto unlock;
687 continue;
688 }
689 681
682 info = counter_arch_bp(wp);
690 /* 683 /*
691 * The DFAR is an unknown value. Since we only allow a 684 * The DFAR is an unknown value on debug architectures prior
692 * single watchpoint, we can set the trigger to the lowest 685 * to 7.1. Since we only allow a single watchpoint on these
693 * possible faulting address. 686 * older CPUs, we can set the trigger to the lowest possible
687 * faulting address.
694 */ 688 */
695 info = counter_arch_bp(wp); 689 if (debug_arch < ARM_DEBUG_ARCH_V7_1) {
696 info->trigger = wp->attr.bp_addr; 690 BUG_ON(i > 0);
691 info->trigger = wp->attr.bp_addr;
692 } else {
693 if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
694 alignment_mask = 0x7;
695 else
696 alignment_mask = 0x3;
697
698 /* Check if the watchpoint value matches. */
699 val = read_wb_reg(ARM_BASE_WVR + i);
700 if (val != (addr & ~alignment_mask))
701 goto unlock;
702
703 /* Possible match, check the byte address select. */
704 ctrl_reg = read_wb_reg(ARM_BASE_WCR + i);
705 decode_ctrl_reg(ctrl_reg, &ctrl);
706 if (!((1 << (addr & alignment_mask)) & ctrl.len))
707 goto unlock;
708
709 /* Check that the access type matches. */
710 access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
711 HW_BREAKPOINT_R;
712 if (!(access & hw_breakpoint_type(wp)))
713 goto unlock;
714
715 /* We have a winner. */
716 info->trigger = addr;
717 }
718
697 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger); 719 pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
698 perf_bp_event(wp, regs); 720 perf_bp_event(wp, regs);
699 721
@@ -705,6 +727,7 @@ static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
705 if (!wp->overflow_handler) 727 if (!wp->overflow_handler)
706 enable_single_step(wp, instruction_pointer(regs)); 728 enable_single_step(wp, instruction_pointer(regs));
707 729
730unlock:
708 rcu_read_unlock(); 731 rcu_read_unlock();
709 } 732 }
710} 733}
@@ -717,7 +740,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
717 740
718 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 741 slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
719 742
720 for (i = 0; i < core_num_reserved_brps; ++i) { 743 for (i = 0; i < core_num_wrps; ++i) {
721 rcu_read_lock(); 744 rcu_read_lock();
722 745
723 wp = slots[i]; 746 wp = slots[i];
@@ -820,7 +843,7 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
820 case ARM_ENTRY_ASYNC_WATCHPOINT: 843 case ARM_ENTRY_ASYNC_WATCHPOINT:
821 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n"); 844 WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
822 case ARM_ENTRY_SYNC_WATCHPOINT: 845 case ARM_ENTRY_SYNC_WATCHPOINT:
823 watchpoint_handler(addr, regs); 846 watchpoint_handler(addr, fsr, regs);
824 break; 847 break;
825 default: 848 default:
826 ret = 1; /* Unhandled fault. */ 849 ret = 1; /* Unhandled fault. */
@@ -834,11 +857,31 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
834/* 857/*
835 * One-time initialisation. 858 * One-time initialisation.
836 */ 859 */
837static void reset_ctrl_regs(void *info) 860static cpumask_t debug_err_mask;
861
862static int debug_reg_trap(struct pt_regs *regs, unsigned int instr)
838{ 863{
839 int i, cpu = smp_processor_id(); 864 int cpu = smp_processor_id();
865
866 pr_warning("Debug register access (0x%x) caused undefined instruction on CPU %d\n",
867 instr, cpu);
868
869 /* Set the error flag for this CPU and skip the faulting instruction. */
870 cpumask_set_cpu(cpu, &debug_err_mask);
871 instruction_pointer(regs) += 4;
872 return 0;
873}
874
875static struct undef_hook debug_reg_hook = {
876 .instr_mask = 0x0fe80f10,
877 .instr_val = 0x0e000e10,
878 .fn = debug_reg_trap,
879};
880
881static void reset_ctrl_regs(void *unused)
882{
883 int i, raw_num_brps, err = 0, cpu = smp_processor_id();
840 u32 dbg_power; 884 u32 dbg_power;
841 cpumask_t *cpumask = info;
842 885
843 /* 886 /*
844 * v7 debug contains save and restore registers so that debug state 887 * v7 debug contains save and restore registers so that debug state
@@ -848,38 +891,57 @@ static void reset_ctrl_regs(void *info)
848 * Access Register to avoid taking undefined instruction exceptions 891 * Access Register to avoid taking undefined instruction exceptions
849 * later on. 892 * later on.
850 */ 893 */
851 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 894 switch (debug_arch) {
895 case ARM_DEBUG_ARCH_V6:
896 case ARM_DEBUG_ARCH_V6_1:
897 /* ARMv6 cores just need to reset the registers. */
898 goto reset_regs;
899 case ARM_DEBUG_ARCH_V7_ECP14:
852 /* 900 /*
853 * Ensure sticky power-down is clear (i.e. debug logic is 901 * Ensure sticky power-down is clear (i.e. debug logic is
854 * powered up). 902 * powered up).
855 */ 903 */
856 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); 904 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
857 if ((dbg_power & 0x1) == 0) { 905 if ((dbg_power & 0x1) == 0)
858 pr_warning("CPU %d debug is powered down!\n", cpu); 906 err = -EPERM;
859 cpumask_or(cpumask, cpumask, cpumask_of(cpu)); 907 break;
860 return; 908 case ARM_DEBUG_ARCH_V7_1:
861 }
862
863 /* 909 /*
864 * Unconditionally clear the lock by writing a value 910 * Ensure the OS double lock is clear.
865 * other than 0xC5ACCE55 to the access register.
866 */ 911 */
867 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0)); 912 asm volatile("mrc p14, 0, %0, c1, c3, 4" : "=r" (dbg_power));
868 isb(); 913 if ((dbg_power & 0x1) == 1)
914 err = -EPERM;
915 break;
916 }
869 917
870 /* 918 if (err) {
871 * Clear any configured vector-catch events before 919 pr_warning("CPU %d debug is powered down!\n", cpu);
872 * enabling monitor mode. 920 cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
873 */ 921 return;
874 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
875 isb();
876 } 922 }
877 923
924 /*
925 * Unconditionally clear the lock by writing a value
926 * other than 0xC5ACCE55 to the access register.
927 */
928 asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
929 isb();
930
931 /*
932 * Clear any configured vector-catch events before
933 * enabling monitor mode.
934 */
935 asm volatile("mcr p14, 0, %0, c0, c7, 0" : : "r" (0));
936 isb();
937
938reset_regs:
878 if (enable_monitor_mode()) 939 if (enable_monitor_mode())
879 return; 940 return;
880 941
881 /* We must also reset any reserved registers. */ 942 /* We must also reset any reserved registers. */
882 for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) { 943 raw_num_brps = get_num_brp_resources();
944 for (i = 0; i < raw_num_brps; ++i) {
883 write_wb_reg(ARM_BASE_BCR + i, 0UL); 945 write_wb_reg(ARM_BASE_BCR + i, 0UL);
884 write_wb_reg(ARM_BASE_BVR + i, 0UL); 946 write_wb_reg(ARM_BASE_BVR + i, 0UL);
885 } 947 }
@@ -895,6 +957,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
895{ 957{
896 if (action == CPU_ONLINE) 958 if (action == CPU_ONLINE)
897 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); 959 smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
960
898 return NOTIFY_OK; 961 return NOTIFY_OK;
899} 962}
900 963
@@ -905,7 +968,6 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
905static int __init arch_hw_breakpoint_init(void) 968static int __init arch_hw_breakpoint_init(void)
906{ 969{
907 u32 dscr; 970 u32 dscr;
908 cpumask_t cpumask = { CPU_BITS_NONE };
909 971
910 debug_arch = get_debug_arch(); 972 debug_arch = get_debug_arch();
911 973
@@ -916,28 +978,31 @@ static int __init arch_hw_breakpoint_init(void)
916 978
917 /* Determine how many BRPs/WRPs are available. */ 979 /* Determine how many BRPs/WRPs are available. */
918 core_num_brps = get_num_brps(); 980 core_num_brps = get_num_brps();
919 core_num_reserved_brps = get_num_reserved_brps();
920 core_num_wrps = get_num_wrps(); 981 core_num_wrps = get_num_wrps();
921 982
922 pr_info("found %d breakpoint and %d watchpoint registers.\n", 983 /*
923 core_num_brps + core_num_reserved_brps, core_num_wrps); 984 * We need to tread carefully here because DBGSWENABLE may be
924 985 * driven low on this core and there isn't an architected way to
925 if (core_num_reserved_brps) 986 * determine that.
926 pr_info("%d breakpoint(s) reserved for watchpoint " 987 */
927 "single-step.\n", core_num_reserved_brps); 988 register_undef_hook(&debug_reg_hook);
928 989
929 /* 990 /*
930 * Reset the breakpoint resources. We assume that a halting 991 * Reset the breakpoint resources. We assume that a halting
931 * debugger will leave the world in a nice state for us. 992 * debugger will leave the world in a nice state for us.
932 */ 993 */
933 on_each_cpu(reset_ctrl_regs, &cpumask, 1); 994 on_each_cpu(reset_ctrl_regs, NULL, 1);
934 if (!cpumask_empty(&cpumask)) { 995 unregister_undef_hook(&debug_reg_hook);
996 if (!cpumask_empty(&debug_err_mask)) {
935 core_num_brps = 0; 997 core_num_brps = 0;
936 core_num_reserved_brps = 0;
937 core_num_wrps = 0; 998 core_num_wrps = 0;
938 return 0; 999 return 0;
939 } 1000 }
940 1001
1002 pr_info("found %d " "%s" "breakpoint and %d watchpoint registers.\n",
1003 core_num_brps, core_has_mismatch_brps() ? "(+1 reserved) " :
1004 "", core_num_wrps);
1005
941 ARM_DBG_READ(c1, 0, dscr); 1006 ARM_DBG_READ(c1, 0, dscr);
942 if (dscr & ARM_DSCR_HDBGEN) { 1007 if (dscr & ARM_DSCR_HDBGEN) {
943 max_watchpoint_len = 4; 1008 max_watchpoint_len = 4;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 53919b230e8b..7cb29261249a 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -59,9 +59,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
59#ifdef CONFIG_SMP 59#ifdef CONFIG_SMP
60 show_ipi_list(p, prec); 60 show_ipi_list(p, prec);
61#endif 61#endif
62#ifdef CONFIG_LOCAL_TIMERS
63 show_local_irqs(p, prec);
64#endif
65 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 62 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
66 return 0; 63 return 0;
67} 64}
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
index 79203ee1d039..9fe8910308af 100644
--- a/arch/arm/kernel/kprobes-arm.c
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -60,6 +60,7 @@
60 60
61#include <linux/kernel.h> 61#include <linux/kernel.h>
62#include <linux/kprobes.h> 62#include <linux/kprobes.h>
63#include <linux/module.h>
63 64
64#include "kprobes.h" 65#include "kprobes.h"
65 66
@@ -971,6 +972,9 @@ const union decode_item kprobe_decode_arm_table[] = {
971 972
972 DECODE_END 973 DECODE_END
973}; 974};
975#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
976EXPORT_SYMBOL_GPL(kprobe_decode_arm_table);
977#endif
974 978
975static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) 979static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
976{ 980{
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
new file mode 100644
index 000000000000..fc82de8bdcce
--- /dev/null
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -0,0 +1,1323 @@
1/*
2 * arch/arm/kernel/kprobes-test-arm.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include "kprobes-test.h"
15
16
17#define TEST_ISA "32"
18
19#define TEST_ARM_TO_THUMB_INTERWORK_R(code1, reg, val, code2) \
20 TESTCASE_START(code1 #reg code2) \
21 TEST_ARG_REG(reg, val) \
22 TEST_ARG_REG(14, 99f) \
23 TEST_ARG_END("") \
24 "50: nop \n\t" \
25 "1: "code1 #reg code2" \n\t" \
26 " bx lr \n\t" \
27 ".thumb \n\t" \
28 "3: adr lr, 2f \n\t" \
29 " bx lr \n\t" \
30 ".arm \n\t" \
31 "2: nop \n\t" \
32 TESTCASE_END
33
34#define TEST_ARM_TO_THUMB_INTERWORK_P(code1, reg, val, code2) \
35 TESTCASE_START(code1 #reg code2) \
36 TEST_ARG_PTR(reg, val) \
37 TEST_ARG_REG(14, 99f) \
38 TEST_ARG_MEM(15, 3f+1) \
39 TEST_ARG_END("") \
40 "50: nop \n\t" \
41 "1: "code1 #reg code2" \n\t" \
42 " bx lr \n\t" \
43 ".thumb \n\t" \
44 "3: adr lr, 2f \n\t" \
45 " bx lr \n\t" \
46 ".arm \n\t" \
47 "2: nop \n\t" \
48 TESTCASE_END
49
50
51void kprobe_arm_test_cases(void)
52{
53 kprobe_test_flags = 0;
54
55 TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)")
56
57#define _DATA_PROCESSING_DNM(op,s,val) \
58 TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \
59 TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \
60 TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \
61 TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \
62 TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \
63 TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \
64 TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \
65 TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \
66 TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \
67 TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \
68 TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \
69 TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\
70 TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\
71 TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\
72 TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
73 TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
74 TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
75 TEST_RR( op s " r12, pc" ", r",14,val, ", ror r",14,7,"")\
76 TEST_RR( op s " r14, r",0, val, ", pc" ", lsl r",14,8,"")\
77 TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
78 TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
79 TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
80 TEST( op s " r4, pc" ", #0x00005a00")
81
82#define DATA_PROCESSING_DNM(op,val) \
83 _DATA_PROCESSING_DNM(op,"",val) \
84 _DATA_PROCESSING_DNM(op,"s",val)
85
86#define DATA_PROCESSING_NM(op,val) \
87 TEST_RR( op "ne r",1, VAL1,", r",2, val, "") \
88 TEST_RR( op "eq r",1, VAL1,", r",2, val, ", lsl #3") \
89 TEST_RR( op "cc r",3, VAL1,", r",2, val, ", lsr #4") \
90 TEST_RR( op "cs r",3, VAL1,", r",2, val, ", asr #5") \
91 TEST_RR( op "pl r",5, VAL1,", r",2, N(val),", asr #6") \
92 TEST_RR( op "mi r",5, VAL1,", r",2, val, ", ror #7") \
93 TEST_RR( op "vc r",7, VAL1,", r",2, val, ", rrx") \
94 TEST_R ( op "vs r",7, VAL1,", pc, lsl #3") \
95 TEST_R ( op "vs r",7, VAL1,", sp, lsr #4") \
96 TEST_R( op "vs pc, r",7, VAL1,", asr #5") \
97 TEST_R( op "vs sp, r",7, VAL1,", ror #6") \
98 TEST_RRR( op "ls r",9, VAL1,", r",14,val, ", lsl r",0, 3,"") \
99 TEST_RRR( op "hi r",9, VAL1,", r",14,val, ", lsr r",7, 4,"") \
100 TEST_RRR( op "lt r",11,VAL1,", r",14,val, ", asr r",7, 5,"") \
101 TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \
102 TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \
103 TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \
104 TEST_RR( op " pc" ", r",14,val, ", ror r",14,7,"") \
105 TEST_RR( op " r",0, val, ", pc" ", lsl r",14,8,"") \
106 TEST_R( op "eq r",11,VAL1,", #0xf5") \
107 TEST_R( op "ne r",0, VAL1,", #0xf5000000") \
108 TEST_R( op " r",8, VAL2,", #0x000af000")
109
110#define _DATA_PROCESSING_DM(op,s,val) \
111 TEST_R( op "eq" s " r0, r",1, val, "") \
112 TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \
113 TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \
114 TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \
115 TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \
116 TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \
117 TEST_R( op "vs" s " r6, r",10,val, ", rrx") \
118 TEST( op "vs" s " r7, pc, lsl #3") \
119 TEST( op "vs" s " r7, sp, lsr #4") \
120 TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \
121 TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \
122 TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \
123 TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
124 TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
125 TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
126 TEST_R( op "le" s " r14, pc" ", lsl r",14,8,"") \
127 TEST( op "eq" s " r0, #0xf5") \
128 TEST( op "ne" s " r11, #0xf5000000") \
129 TEST( op s " r7, #0x000af000") \
130 TEST( op s " r4, #0x00005a00")
131
132#define DATA_PROCESSING_DM(op,val) \
133 _DATA_PROCESSING_DM(op,"",val) \
134 _DATA_PROCESSING_DM(op,"s",val)
135
136 DATA_PROCESSING_DNM("and",0xf00f00ff)
137 DATA_PROCESSING_DNM("eor",0xf00f00ff)
138 DATA_PROCESSING_DNM("sub",VAL2)
139 DATA_PROCESSING_DNM("rsb",VAL2)
140 DATA_PROCESSING_DNM("add",VAL2)
141 DATA_PROCESSING_DNM("adc",VAL2)
142 DATA_PROCESSING_DNM("sbc",VAL2)
143 DATA_PROCESSING_DNM("rsc",VAL2)
144 DATA_PROCESSING_NM("tst",0xf00f00ff)
145 DATA_PROCESSING_NM("teq",0xf00f00ff)
146 DATA_PROCESSING_NM("cmp",VAL2)
147 DATA_PROCESSING_NM("cmn",VAL2)
148 DATA_PROCESSING_DNM("orr",0xf00f00ff)
149 DATA_PROCESSING_DM("mov",VAL2)
150 DATA_PROCESSING_DNM("bic",0xf00f00ff)
151 DATA_PROCESSING_DM("mvn",VAL2)
152
153 TEST("mov ip, sp") /* This has special case emulation code */
154
155 TEST_SUPPORTED("mov pc, #0x1000");
156 TEST_SUPPORTED("mov sp, #0x1000");
157 TEST_SUPPORTED("cmp pc, #0x1000");
158 TEST_SUPPORTED("cmp sp, #0x1000");
159
160 /* Data-processing with PC as shift*/
161 TEST_UNSUPPORTED(".word 0xe15c0f1e @ cmp r12, r14, asl pc")
162 TEST_UNSUPPORTED(".word 0xe1a0cf1e @ mov r12, r14, asl pc")
163 TEST_UNSUPPORTED(".word 0xe08caf1e @ add r10, r12, r14, asl pc")
164
165 /* Data-processing with PC as shift*/
166 TEST_UNSUPPORTED("movs pc, r1")
167 TEST_UNSUPPORTED("movs pc, r1, lsl r2")
168 TEST_UNSUPPORTED("movs pc, #0x10000")
169 TEST_UNSUPPORTED("adds pc, lr, r1")
170 TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2")
171 TEST_UNSUPPORTED("adds pc, lr, #4")
172
173 /* Data-processing with SP as target */
174 TEST("add sp, sp, #16")
175 TEST("sub sp, sp, #8")
176 TEST("bic sp, sp, #0x20")
177 TEST("orr sp, sp, #0x20")
178 TEST_PR( "add sp, r",10,0,", r",11,4,"")
179 TEST_PRR("add sp, r",10,0,", r",11,4,", asl r",12,1,"")
180 TEST_P( "mov sp, r",10,0,"")
181 TEST_PR( "mov sp, r",10,0,", asl r",12,0,"")
182
183 /* Data-processing with PC as target */
184 TEST_BF( "add pc, pc, #2f-1b-8")
185 TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
186 TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
187 TEST_BF_R ("mov pc, r",0,2f,"")
188 TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
189 TEST_BB( "sub pc, pc, #1b-2b+8")
190#if __LINUX_ARM_ARCH__ >= 6
191 TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before ARMv6 */
192#endif
193 TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
194 TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
195 TEST_RR( "add pc, pc, r",10,-2,", asl r",11,1,"")
196#ifdef CONFIG_THUMB2_KERNEL
197 TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"")
198 TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8")
199#endif
200 TEST_GROUP("Miscellaneous instructions")
201
202 TEST("mrs r0, cpsr")
203 TEST("mrspl r7, cpsr")
204 TEST("mrs r14, cpsr")
205 TEST_UNSUPPORTED(".word 0xe10ff000 @ mrs r15, cpsr")
206 TEST_UNSUPPORTED("mrs r0, spsr")
207 TEST_UNSUPPORTED("mrs lr, spsr")
208
209 TEST_UNSUPPORTED("msr cpsr, r0")
210 TEST_UNSUPPORTED("msr cpsr_f, lr")
211 TEST_UNSUPPORTED("msr spsr, r0")
212
213 TEST_BF_R("bx r",0,2f,"")
214 TEST_BB_R("bx r",7,2f,"")
215 TEST_BF_R("bxeq r",14,2f,"")
216
217 TEST_R("clz r0, r",0, 0x0,"")
218 TEST_R("clzeq r7, r",14,0x1,"")
219 TEST_R("clz lr, r",7, 0xffffffff,"")
220 TEST( "clz r4, sp")
221 TEST_UNSUPPORTED(".word 0x016fff10 @ clz pc, r0")
222 TEST_UNSUPPORTED(".word 0x016f0f1f @ clz r0, pc")
223
224#if __LINUX_ARM_ARCH__ >= 6
225 TEST_UNSUPPORTED("bxj r0")
226#endif
227
228 TEST_BF_R("blx r",0,2f,"")
229 TEST_BB_R("blx r",7,2f,"")
230 TEST_BF_R("blxeq r",14,2f,"")
231 TEST_UNSUPPORTED(".word 0x0120003f @ blx pc")
232
233 TEST_RR( "qadd r0, r",1, VAL1,", r",2, VAL2,"")
234 TEST_RR( "qaddvs lr, r",9, VAL2,", r",8, VAL1,"")
235 TEST_R( "qadd lr, r",9, VAL2,", r13")
236 TEST_RR( "qsub r0, r",1, VAL1,", r",2, VAL2,"")
237 TEST_RR( "qsubvs lr, r",9, VAL2,", r",8, VAL1,"")
238 TEST_R( "qsub lr, r",9, VAL2,", r13")
239 TEST_RR( "qdadd r0, r",1, VAL1,", r",2, VAL2,"")
240 TEST_RR( "qdaddvs lr, r",9, VAL2,", r",8, VAL1,"")
241 TEST_R( "qdadd lr, r",9, VAL2,", r13")
242 TEST_RR( "qdsub r0, r",1, VAL1,", r",2, VAL2,"")
243 TEST_RR( "qdsubvs lr, r",9, VAL2,", r",8, VAL1,"")
244 TEST_R( "qdsub lr, r",9, VAL2,", r13")
245 TEST_UNSUPPORTED(".word 0xe101f050 @ qadd pc, r0, r1")
246 TEST_UNSUPPORTED(".word 0xe121f050 @ qsub pc, r0, r1")
247 TEST_UNSUPPORTED(".word 0xe141f050 @ qdadd pc, r0, r1")
248 TEST_UNSUPPORTED(".word 0xe161f050 @ qdsub pc, r0, r1")
249 TEST_UNSUPPORTED(".word 0xe16f2050 @ qdsub r2, r0, pc")
250 TEST_UNSUPPORTED(".word 0xe161205f @ qdsub r2, pc, r1")
251
252 TEST_UNSUPPORTED("bkpt 0xffff")
253 TEST_UNSUPPORTED("bkpt 0x0000")
254
255 TEST_UNSUPPORTED(".word 0xe1600070 @ smc #0")
256
257 TEST_GROUP("Halfword multiply and multiply-accumulate")
258
259 TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
260 TEST_RRR( "smlabbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
261 TEST_RR( "smlabb lr, r",1, VAL2,", r",2, VAL3,", r13")
262 TEST_UNSUPPORTED(".word 0xe10f3281 @ smlabb pc, r1, r2, r3")
263 TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
264 TEST_RRR( "smlatbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
265 TEST_RR( "smlatb lr, r",1, VAL2,", r",2, VAL3,", r13")
266 TEST_UNSUPPORTED(".word 0xe10f32a1 @ smlatb pc, r1, r2, r3")
267 TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
268 TEST_RRR( "smlabtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
269 TEST_RR( "smlabt lr, r",1, VAL2,", r",2, VAL3,", r13")
270 TEST_UNSUPPORTED(".word 0xe10f32c1 @ smlabt pc, r1, r2, r3")
271 TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
272 TEST_RRR( "smlattge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
273 TEST_RR( "smlatt lr, r",1, VAL2,", r",2, VAL3,", r13")
274 TEST_UNSUPPORTED(".word 0xe10f32e1 @ smlatt pc, r1, r2, r3")
275
276 TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
277 TEST_RRR( "smlawbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
278 TEST_RR( "smlawb lr, r",1, VAL2,", r",2, VAL3,", r13")
279 TEST_UNSUPPORTED(".word 0xe12f3281 @ smlawb pc, r1, r2, r3")
280 TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
281 TEST_RRR( "smlawtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
282 TEST_RR( "smlawt lr, r",1, VAL2,", r",2, VAL3,", r13")
283 TEST_UNSUPPORTED(".word 0xe12f32c1 @ smlawt pc, r1, r2, r3")
284 TEST_UNSUPPORTED(".word 0xe12032cf @ smlawt r0, pc, r2, r3")
285 TEST_UNSUPPORTED(".word 0xe1203fc1 @ smlawt r0, r1, pc, r3")
286 TEST_UNSUPPORTED(".word 0xe120f2c1 @ smlawt r0, r1, r2, pc")
287
288 TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"")
289 TEST_RR( "smulwbge r7, r",8, VAL3,", r",9, VAL1,"")
290 TEST_R( "smulwb lr, r",1, VAL2,", r13")
291 TEST_UNSUPPORTED(".word 0xe12f02a1 @ smulwb pc, r1, r2")
292 TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"")
293 TEST_RR( "smulwtge r7, r",8, VAL3,", r",9, VAL1,"")
294 TEST_R( "smulwt lr, r",1, VAL2,", r13")
295 TEST_UNSUPPORTED(".word 0xe12f02e1 @ smulwt pc, r1, r2")
296
297 TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
298 TEST_RRRR( "smlalbble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
299 TEST_RRR( "smlalbb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
300 TEST_UNSUPPORTED(".word 0xe14f1382 @ smlalbb pc, r1, r2, r3")
301 TEST_UNSUPPORTED(".word 0xe141f382 @ smlalbb r1, pc, r2, r3")
302 TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
303 TEST_RRRR( "smlaltble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
304 TEST_RRR( "smlaltb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
305 TEST_UNSUPPORTED(".word 0xe14f13a2 @ smlaltb pc, r1, r2, r3")
306 TEST_UNSUPPORTED(".word 0xe141f3a2 @ smlaltb r1, pc, r2, r3")
307 TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
308 TEST_RRRR( "smlalbtle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
309 TEST_RRR( "smlalbt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
310 TEST_UNSUPPORTED(".word 0xe14f13c2 @ smlalbt pc, r1, r2, r3")
311 TEST_UNSUPPORTED(".word 0xe141f3c2 @ smlalbt r1, pc, r2, r3")
312 TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
313 TEST_RRRR( "smlalttle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
314 TEST_RRR( "smlaltt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
315 TEST_UNSUPPORTED(".word 0xe14f13e2 @ smlalbb pc, r1, r2, r3")
316 TEST_UNSUPPORTED(".word 0xe140f3e2 @ smlalbb r0, pc, r2, r3")
317 TEST_UNSUPPORTED(".word 0xe14013ef @ smlalbb r0, r1, pc, r3")
318 TEST_UNSUPPORTED(".word 0xe1401fe2 @ smlalbb r0, r1, r2, pc")
319
320 TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"")
321 TEST_RR( "smulbbge r7, r",8, VAL3,", r",9, VAL1,"")
322 TEST_R( "smulbb lr, r",1, VAL2,", r13")
323 TEST_UNSUPPORTED(".word 0xe16f0281 @ smulbb pc, r1, r2")
324 TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"")
325 TEST_RR( "smultbge r7, r",8, VAL3,", r",9, VAL1,"")
326 TEST_R( "smultb lr, r",1, VAL2,", r13")
327 TEST_UNSUPPORTED(".word 0xe16f02a1 @ smultb pc, r1, r2")
328 TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"")
329 TEST_RR( "smulbtge r7, r",8, VAL3,", r",9, VAL1,"")
330 TEST_R( "smulbt lr, r",1, VAL2,", r13")
331 TEST_UNSUPPORTED(".word 0xe16f02c1 @ smultb pc, r1, r2")
332 TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"")
333 TEST_RR( "smulttge r7, r",8, VAL3,", r",9, VAL1,"")
334 TEST_R( "smultt lr, r",1, VAL2,", r13")
335 TEST_UNSUPPORTED(".word 0xe16f02e1 @ smultt pc, r1, r2")
336 TEST_UNSUPPORTED(".word 0xe16002ef @ smultt r0, pc, r2")
337 TEST_UNSUPPORTED(".word 0xe1600fe1 @ smultt r0, r1, pc")
338
339 TEST_GROUP("Multiply and multiply-accumulate")
340
341 TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"")
342 TEST_RR( "mulls r7, r",8, VAL2,", r",9, VAL2,"")
343 TEST_R( "mul lr, r",4, VAL3,", r13")
344 TEST_UNSUPPORTED(".word 0xe00f0291 @ mul pc, r1, r2")
345 TEST_UNSUPPORTED(".word 0xe000029f @ mul r0, pc, r2")
346 TEST_UNSUPPORTED(".word 0xe0000f91 @ mul r0, r1, pc")
347 TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"")
348 TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"")
349 TEST_R( "muls lr, r",4, VAL3,", r13")
350 TEST_UNSUPPORTED(".word 0xe01f0291 @ muls pc, r1, r2")
351
352 TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
353 TEST_RRR( "mlahi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
354 TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13")
355 TEST_UNSUPPORTED(".word 0xe02f3291 @ mla pc, r1, r2, r3")
356 TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
357 TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
358 TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13")
359 TEST_UNSUPPORTED(".word 0xe03f3291 @ mlas pc, r1, r2, r3")
360
361#if __LINUX_ARM_ARCH__ >= 6
362 TEST_RR( "umaal r0, r1, r",2, VAL1,", r",3, VAL2,"")
363 TEST_RR( "umaalls r7, r8, r",9, VAL2,", r",10, VAL1,"")
364 TEST_R( "umaal lr, r12, r",11,VAL3,", r13")
365 TEST_UNSUPPORTED(".word 0xe041f392 @ umaal pc, r1, r2, r3")
366 TEST_UNSUPPORTED(".word 0xe04f0392 @ umaal r0, pc, r2, r3")
367 TEST_UNSUPPORTED(".word 0xe0500090 @ undef")
368 TEST_UNSUPPORTED(".word 0xe05fff9f @ undef")
369
370 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
371 TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
372 TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
373 TEST_UNSUPPORTED(".word 0xe06f3291 @ mls pc, r1, r2, r3")
374 TEST_UNSUPPORTED(".word 0xe060329f @ mls r0, pc, r2, r3")
375 TEST_UNSUPPORTED(".word 0xe0603f91 @ mls r0, r1, pc, r3")
376 TEST_UNSUPPORTED(".word 0xe060f291 @ mls r0, r1, r2, pc")
377#endif
378
379 TEST_UNSUPPORTED(".word 0xe0700090 @ undef")
380 TEST_UNSUPPORTED(".word 0xe07fff9f @ undef")
381
382 TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"")
383 TEST_RR( "umullls r7, r8, r",9, VAL2,", r",10, VAL1,"")
384 TEST_R( "umull lr, r12, r",11,VAL3,", r13")
385 TEST_UNSUPPORTED(".word 0xe081f392 @ umull pc, r1, r2, r3")
386 TEST_UNSUPPORTED(".word 0xe08f1392 @ umull r1, pc, r2, r3")
387 TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
388 TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
389 TEST_R( "umulls lr, r12, r",11,VAL3,", r13")
390 TEST_UNSUPPORTED(".word 0xe091f392 @ umulls pc, r1, r2, r3")
391 TEST_UNSUPPORTED(".word 0xe09f1392 @ umulls r1, pc, r2, r3")
392
393 TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
394 TEST_RRRR( "umlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
395 TEST_RRR( "umlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
396 TEST_UNSUPPORTED(".word 0xe0af1392 @ umlal pc, r1, r2, r3")
397 TEST_UNSUPPORTED(".word 0xe0a1f392 @ umlal r1, pc, r2, r3")
398 TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
399 TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
400 TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
401 TEST_UNSUPPORTED(".word 0xe0bf1392 @ umlals pc, r1, r2, r3")
402 TEST_UNSUPPORTED(".word 0xe0b1f392 @ umlals r1, pc, r2, r3")
403
404 TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"")
405 TEST_RR( "smullls r7, r8, r",9, VAL2,", r",10, VAL1,"")
406 TEST_R( "smull lr, r12, r",11,VAL3,", r13")
407 TEST_UNSUPPORTED(".word 0xe0c1f392 @ smull pc, r1, r2, r3")
408 TEST_UNSUPPORTED(".word 0xe0cf1392 @ smull r1, pc, r2, r3")
409 TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
410 TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
411 TEST_R( "smulls lr, r12, r",11,VAL3,", r13")
412 TEST_UNSUPPORTED(".word 0xe0d1f392 @ smulls pc, r1, r2, r3")
413 TEST_UNSUPPORTED(".word 0xe0df1392 @ smulls r1, pc, r2, r3")
414
415 TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
416 TEST_RRRR( "smlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
417 TEST_RRR( "smlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
418 TEST_UNSUPPORTED(".word 0xe0ef1392 @ smlal pc, r1, r2, r3")
419 TEST_UNSUPPORTED(".word 0xe0e1f392 @ smlal r1, pc, r2, r3")
420 TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
421 TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
422 TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
423 TEST_UNSUPPORTED(".word 0xe0ff1392 @ smlals pc, r1, r2, r3")
424 TEST_UNSUPPORTED(".word 0xe0f0f392 @ smlals r0, pc, r2, r3")
425 TEST_UNSUPPORTED(".word 0xe0f0139f @ smlals r0, r1, pc, r3")
426 TEST_UNSUPPORTED(".word 0xe0f01f92 @ smlals r0, r1, r2, pc")
427
428 TEST_GROUP("Synchronization primitives")
429
430 /*
431 * Use hard coded constants for SWP instructions to avoid warnings
432 * about deprecated instructions.
433 */
434 TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]")
435 TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]")
436 TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]")
437 TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
438 TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
439 TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
440 TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]")
441 TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]")
442 TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
443
444 TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
445 TEST_UNSUPPORTED(".word 0xe1200090") /* Unallocated space */
446 TEST_UNSUPPORTED(".word 0xe1300090") /* Unallocated space */
447 TEST_UNSUPPORTED(".word 0xe1500090") /* Unallocated space */
448 TEST_UNSUPPORTED(".word 0xe1600090") /* Unallocated space */
449 TEST_UNSUPPORTED(".word 0xe1700090") /* Unallocated space */
450#if __LINUX_ARM_ARCH__ >= 6
451 TEST_UNSUPPORTED("ldrex r2, [sp]")
452 TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
453 TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
454 TEST_UNSUPPORTED("strexb r0, r2, [sp]")
455 TEST_UNSUPPORTED("ldrexb r2, [sp]")
456 TEST_UNSUPPORTED("strexh r0, r2, [sp]")
457 TEST_UNSUPPORTED("ldrexh r2, [sp]")
458#endif
459 TEST_GROUP("Extra load/store instructions")
460
461 TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")
462 TEST_RPR( "streqh r",14,VAL2,", [r",13,0, ", r",12, 48,"]")
463 TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!")
464 TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
465 TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
466 TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
467 TEST_UNSUPPORTED(".word 0xe1afc0ba @ strh r12, [pc, r10]!")
468 TEST_UNSUPPORTED(".word 0xe089f0bb @ strh pc, [r9], r11")
469 TEST_UNSUPPORTED(".word 0xe089a0bf @ strh r10, [r9], pc")
470
471 TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]")
472 TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]")
473 TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!")
474 TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!")
475 TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"")
476 TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"")
477 TEST_UNSUPPORTED(".word 0xe1bfc0ba @ ldrh r12, [pc, r10]!")
478 TEST_UNSUPPORTED(".word 0xe099f0bb @ ldrh pc, [r9], r11")
479 TEST_UNSUPPORTED(".word 0xe099a0bf @ ldrh r10, [r9], pc")
480
481 TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]")
482 TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]")
483 TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!")
484 TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!")
485 TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48")
486 TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48")
487 TEST_UNSUPPORTED(".word 0xe1efc3b0 @ strh r12, [pc, #48]!")
488 TEST_UNSUPPORTED(".word 0xe0c9f3b0 @ strh pc, [r9], #48")
489
490 TEST_P( "ldrh r0, [r",0, 24,", #-2]")
491 TEST_P( "ldrvsh r14, [r",13,0, ", #2]")
492 TEST_P( "ldrh r1, [r",2, 24,", #4]!")
493 TEST_P( "ldrvch r12, [r",11,24,", #-4]!")
494 TEST_P( "ldrh r2, [r",3, 24,"], #48")
495 TEST_P( "ldrh r10, [r",9, 64,"], #-48")
496 TEST( "ldrh r0, [pc, #0]")
497 TEST_UNSUPPORTED(".word 0xe1ffc3b0 @ ldrh r12, [pc, #48]!")
498 TEST_UNSUPPORTED(".word 0xe0d9f3b0 @ ldrh pc, [r9], #48")
499
500 TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]")
501 TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]")
502 TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!")
503 TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!")
504 TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"")
505 TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"")
506 TEST_UNSUPPORTED(".word 0xe1bfc0da @ ldrsb r12, [pc, r10]!")
507 TEST_UNSUPPORTED(".word 0xe099f0db @ ldrsb pc, [r9], r11")
508
509 TEST_P( "ldrsb r0, [r",0, 24,", #-1]")
510 TEST_P( "ldrgesb r14, [r",13,0, ", #1]")
511 TEST_P( "ldrsb r1, [r",2, 24,", #4]!")
512 TEST_P( "ldrltsb r12, [r",11,24,", #-4]!")
513 TEST_P( "ldrsb r2, [r",3, 24,"], #48")
514 TEST_P( "ldrsb r10, [r",9, 64,"], #-48")
515 TEST( "ldrsb r0, [pc, #0]")
516 TEST_UNSUPPORTED(".word 0xe1ffc3d0 @ ldrsb r12, [pc, #48]!")
517 TEST_UNSUPPORTED(".word 0xe0d9f3d0 @ ldrsb pc, [r9], #48")
518
519 TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]")
520 TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]")
521 TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!")
522 TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!")
523 TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"")
524 TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"")
525 TEST_UNSUPPORTED(".word 0xe1bfc0fa @ ldrsh r12, [pc, r10]!")
526 TEST_UNSUPPORTED(".word 0xe099f0fb @ ldrsh pc, [r9], r11")
527
528 TEST_P( "ldrsh r0, [r",0, 24,", #-1]")
529 TEST_P( "ldreqsh r14, [r",13,0 ,", #1]")
530 TEST_P( "ldrsh r1, [r",2, 24,", #4]!")
531 TEST_P( "ldrnesh r12, [r",11,24,", #-4]!")
532 TEST_P( "ldrsh r2, [r",3, 24,"], #48")
533 TEST_P( "ldrsh r10, [r",9, 64,"], #-48")
534 TEST( "ldrsh r0, [pc, #0]")
535 TEST_UNSUPPORTED(".word 0xe1ffc3f0 @ ldrsh r12, [pc, #48]!")
536 TEST_UNSUPPORTED(".word 0xe0d9f3f0 @ ldrsh pc, [r9], #48")
537
538#if __LINUX_ARM_ARCH__ >= 7
539 TEST_UNSUPPORTED("strht r1, [r2], r3")
540 TEST_UNSUPPORTED("ldrht r1, [r2], r3")
541 TEST_UNSUPPORTED("strht r1, [r2], #48")
542 TEST_UNSUPPORTED("ldrht r1, [r2], #48")
543 TEST_UNSUPPORTED("ldrsbt r1, [r2], r3")
544 TEST_UNSUPPORTED("ldrsbt r1, [r2], #48")
545 TEST_UNSUPPORTED("ldrsht r1, [r2], r3")
546 TEST_UNSUPPORTED("ldrsht r1, [r2], #48")
547#endif
548
549 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
550 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
551 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
552 TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
553 TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"")
554 TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
555 TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
556
557 TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]")
558 TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]")
559 TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!")
560 TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!")
561 TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"")
562 TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"")
563 TEST_UNSUPPORTED(".word 0xe1afc0da @ ldrd r12, [pc, r10]!")
564 TEST_UNSUPPORTED(".word 0xe089f0db @ ldrd pc, [r9], r11")
565 TEST_UNSUPPORTED(".word 0xe089e0db @ ldrd lr, [r9], r11")
566 TEST_UNSUPPORTED(".word 0xe089c0df @ ldrd r12, [r9], pc")
567
568 TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]")
569 TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]")
570 TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!")
571 TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!")
572 TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48")
573 TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48")
574 TEST_UNSUPPORTED(".word 0xe1efc3f0 @ strd r12, [pc, #48]!")
575
576 TEST_P( "ldrd r0, [r",0, 24,", #-8]")
577 TEST_P( "ldrhid r8, [r",13,0, ", #8]")
578 TEST_P( "ldrd r4, [r",2, 24,", #16]!")
579 TEST_P( "ldrlsd r6, [r",11,24,", #-16]!")
580 TEST_P( "ldrd r2, [r",5, 24,"], #48")
581 TEST_P( "ldrd r10, [r",9,6,"], #-48")
582 TEST_UNSUPPORTED(".word 0xe1efc3d0 @ ldrd r12, [pc, #48]!")
583 TEST_UNSUPPORTED(".word 0xe0c9f3d0 @ ldrd pc, [r9], #48")
584 TEST_UNSUPPORTED(".word 0xe0c9e3d0 @ ldrd lr, [r9], #48")
585
586 TEST_GROUP("Miscellaneous")
587
588#if __LINUX_ARM_ARCH__ >= 7
589 TEST("movw r0, #0")
590 TEST("movw r0, #0xffff")
591 TEST("movw lr, #0xffff")
592 TEST_UNSUPPORTED(".word 0xe300f000 @ movw pc, #0")
593 TEST_R("movt r",0, VAL1,", #0")
594 TEST_R("movt r",0, VAL2,", #0xffff")
595 TEST_R("movt r",14,VAL1,", #0xffff")
596 TEST_UNSUPPORTED(".word 0xe340f000 @ movt pc, #0")
597#endif
598
599 TEST_UNSUPPORTED("msr cpsr, 0x13")
600 TEST_UNSUPPORTED("msr cpsr_f, 0xf0000000")
601 TEST_UNSUPPORTED("msr spsr, 0x13")
602
603#if __LINUX_ARM_ARCH__ >= 7
604 TEST_SUPPORTED("yield")
605 TEST("sev")
606 TEST("nop")
607 TEST("wfi")
608 TEST_SUPPORTED("wfe")
609 TEST_UNSUPPORTED("dbg #0")
610#endif
611
612 TEST_GROUP("Load/store word and unsigned byte")
613
614#define LOAD_STORE(byte) \
615 TEST_RP( "str"byte" r",0, VAL1,", [r",1, 24,", #-2]") \
616 TEST_RP( "str"byte" r",14,VAL2,", [r",13,0, ", #2]") \
617 TEST_RP( "str"byte" r",1, VAL1,", [r",2, 24,", #4]!") \
618 TEST_RP( "str"byte" r",12,VAL2,", [r",11,24,", #-4]!") \
619 TEST_RP( "str"byte" r",2, VAL1,", [r",3, 24,"], #48") \
620 TEST_RP( "str"byte" r",10,VAL2,", [r",9, 64,"], #-48") \
621 TEST_RPR("str"byte" r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") \
622 TEST_RPR("str"byte" r",14,VAL2,", [r",13,0, ", r",12, 48,"]") \
623 TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") \
624 TEST_RPR("str"byte" r",12,VAL2,", [r",11,48,", -r",10,24,"]!") \
625 TEST_RPR("str"byte" r",2, VAL1,", [r",3, 24,"], r",4, 48,"") \
626 TEST_RPR("str"byte" r",10,VAL2,", [r",9, 48,"], -r",11,24,"") \
627 TEST_RPR("str"byte" r",0, VAL1,", [r",1, 24,", r",2, 32,", asl #1]")\
628 TEST_RPR("str"byte" r",14,VAL2,", [r",13,0, ", r",12, 32,", lsr #2]")\
629 TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 32,", asr #3]!")\
630 TEST_RPR("str"byte" r",12,VAL2,", [r",11,24,", r",10, 4,", ror #31]!")\
631 TEST_P( "ldr"byte" r0, [r",0, 24,", #-2]") \
632 TEST_P( "ldr"byte" r14, [r",13,0, ", #2]") \
633 TEST_P( "ldr"byte" r1, [r",2, 24,", #4]!") \
634 TEST_P( "ldr"byte" r12, [r",11,24,", #-4]!") \
635 TEST_P( "ldr"byte" r2, [r",3, 24,"], #48") \
636 TEST_P( "ldr"byte" r10, [r",9, 64,"], #-48") \
637 TEST_PR( "ldr"byte" r0, [r",0, 48,", -r",2, 24,"]") \
638 TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 48,"]") \
639 TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 48,"]!") \
640 TEST_PR( "ldr"byte" r12, [r",11,48,", -r",10,24,"]!") \
641 TEST_PR( "ldr"byte" r2, [r",3, 24,"], r",4, 48,"") \
642 TEST_PR( "ldr"byte" r10, [r",9, 48,"], -r",11,24,"") \
643 TEST_PR( "ldr"byte" r0, [r",0, 24,", r",2, 32,", asl #1]") \
644 TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 32,", lsr #2]") \
645 TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 32,", asr #3]!") \
646 TEST_PR( "ldr"byte" r12, [r",11,24,", r",10, 4,", ror #31]!") \
647 TEST( "ldr"byte" r0, [pc, #0]") \
648 TEST_R( "ldr"byte" r12, [pc, r",14,0,"]")
649
650 LOAD_STORE("")
651 TEST_P( "str pc, [r",0,0,", #15*4]")
652 TEST_R( "str pc, [sp, r",2,15*4,"]")
653 TEST_BF( "ldr pc, [sp, #15*4]")
654 TEST_BF_R("ldr pc, [sp, r",2,15*4,"]")
655
656 TEST_P( "str sp, [r",0,0,", #13*4]")
657 TEST_R( "str sp, [sp, r",2,13*4,"]")
658 TEST_BF( "ldr sp, [sp, #13*4]")
659 TEST_BF_R("ldr sp, [sp, r",2,13*4,"]")
660
661#ifdef CONFIG_THUMB2_KERNEL
662 TEST_ARM_TO_THUMB_INTERWORK_P("ldr pc, [r",0,0,", #15*4]")
663#endif
664 TEST_UNSUPPORTED(".word 0xe5af6008 @ str r6, [pc, #8]!")
665 TEST_UNSUPPORTED(".word 0xe7af6008 @ str r6, [pc, r8]!")
666 TEST_UNSUPPORTED(".word 0xe5bf6008 @ ldr r6, [pc, #8]!")
667 TEST_UNSUPPORTED(".word 0xe7bf6008 @ ldr r6, [pc, r8]!")
668 TEST_UNSUPPORTED(".word 0xe788600f @ str r6, [r8, pc]")
669 TEST_UNSUPPORTED(".word 0xe798600f @ ldr r6, [r8, pc]")
670
671 LOAD_STORE("b")
672 TEST_UNSUPPORTED(".word 0xe5f7f008 @ ldrb pc, [r7, #8]!")
673 TEST_UNSUPPORTED(".word 0xe7f7f008 @ ldrb pc, [r7, r8]!")
674 TEST_UNSUPPORTED(".word 0xe5ef6008 @ strb r6, [pc, #8]!")
675 TEST_UNSUPPORTED(".word 0xe7ef6008 @ strb r6, [pc, r3]!")
676 TEST_UNSUPPORTED(".word 0xe5ff6008 @ ldrb r6, [pc, #8]!")
677 TEST_UNSUPPORTED(".word 0xe7ff6008 @ ldrb r6, [pc, r3]!")
678
679 TEST_UNSUPPORTED("ldrt r0, [r1], #4")
680 TEST_UNSUPPORTED("ldrt r1, [r2], r3")
681 TEST_UNSUPPORTED("strt r2, [r3], #4")
682 TEST_UNSUPPORTED("strt r3, [r4], r5")
683 TEST_UNSUPPORTED("ldrbt r4, [r5], #4")
684 TEST_UNSUPPORTED("ldrbt r5, [r6], r7")
685 TEST_UNSUPPORTED("strbt r6, [r7], #4")
686 TEST_UNSUPPORTED("strbt r7, [r8], r9")
687
688#if __LINUX_ARM_ARCH__ >= 7
689 TEST_GROUP("Parallel addition and subtraction, signed")
690
691 TEST_UNSUPPORTED(".word 0xe6000010") /* Unallocated space */
692 TEST_UNSUPPORTED(".word 0xe60fffff") /* Unallocated space */
693
694 TEST_RR( "sadd16 r0, r",0, HH1,", r",1, HH2,"")
695 TEST_RR( "sadd16 r14, r",12,HH2,", r",10,HH1,"")
696 TEST_UNSUPPORTED(".word 0xe61cff1a @ sadd16 pc, r12, r10")
697 TEST_RR( "sasx r0, r",0, HH1,", r",1, HH2,"")
698 TEST_RR( "sasx r14, r",12,HH2,", r",10,HH1,"")
699 TEST_UNSUPPORTED(".word 0xe61cff3a @ sasx pc, r12, r10")
700 TEST_RR( "ssax r0, r",0, HH1,", r",1, HH2,"")
701 TEST_RR( "ssax r14, r",12,HH2,", r",10,HH1,"")
702 TEST_UNSUPPORTED(".word 0xe61cff5a @ ssax pc, r12, r10")
703 TEST_RR( "ssub16 r0, r",0, HH1,", r",1, HH2,"")
704 TEST_RR( "ssub16 r14, r",12,HH2,", r",10,HH1,"")
705 TEST_UNSUPPORTED(".word 0xe61cff7a @ ssub16 pc, r12, r10")
706 TEST_RR( "sadd8 r0, r",0, HH1,", r",1, HH2,"")
707 TEST_RR( "sadd8 r14, r",12,HH2,", r",10,HH1,"")
708 TEST_UNSUPPORTED(".word 0xe61cff9a @ sadd8 pc, r12, r10")
709 TEST_UNSUPPORTED(".word 0xe61000b0") /* Unallocated space */
710 TEST_UNSUPPORTED(".word 0xe61fffbf") /* Unallocated space */
711 TEST_UNSUPPORTED(".word 0xe61000d0") /* Unallocated space */
712 TEST_UNSUPPORTED(".word 0xe61fffdf") /* Unallocated space */
713 TEST_RR( "ssub8 r0, r",0, HH1,", r",1, HH2,"")
714 TEST_RR( "ssub8 r14, r",12,HH2,", r",10,HH1,"")
715 TEST_UNSUPPORTED(".word 0xe61cfffa @ ssub8 pc, r12, r10")
716
717 TEST_RR( "qadd16 r0, r",0, HH1,", r",1, HH2,"")
718 TEST_RR( "qadd16 r14, r",12,HH2,", r",10,HH1,"")
719 TEST_UNSUPPORTED(".word 0xe62cff1a @ qadd16 pc, r12, r10")
720 TEST_RR( "qasx r0, r",0, HH1,", r",1, HH2,"")
721 TEST_RR( "qasx r14, r",12,HH2,", r",10,HH1,"")
722 TEST_UNSUPPORTED(".word 0xe62cff3a @ qasx pc, r12, r10")
723 TEST_RR( "qsax r0, r",0, HH1,", r",1, HH2,"")
724 TEST_RR( "qsax r14, r",12,HH2,", r",10,HH1,"")
725 TEST_UNSUPPORTED(".word 0xe62cff5a @ qsax pc, r12, r10")
726 TEST_RR( "qsub16 r0, r",0, HH1,", r",1, HH2,"")
727 TEST_RR( "qsub16 r14, r",12,HH2,", r",10,HH1,"")
728 TEST_UNSUPPORTED(".word 0xe62cff7a @ qsub16 pc, r12, r10")
729 TEST_RR( "qadd8 r0, r",0, HH1,", r",1, HH2,"")
730 TEST_RR( "qadd8 r14, r",12,HH2,", r",10,HH1,"")
731 TEST_UNSUPPORTED(".word 0xe62cff9a @ qadd8 pc, r12, r10")
732 TEST_UNSUPPORTED(".word 0xe62000b0") /* Unallocated space */
733 TEST_UNSUPPORTED(".word 0xe62fffbf") /* Unallocated space */
734 TEST_UNSUPPORTED(".word 0xe62000d0") /* Unallocated space */
735 TEST_UNSUPPORTED(".word 0xe62fffdf") /* Unallocated space */
736 TEST_RR( "qsub8 r0, r",0, HH1,", r",1, HH2,"")
737 TEST_RR( "qsub8 r14, r",12,HH2,", r",10,HH1,"")
738 TEST_UNSUPPORTED(".word 0xe62cfffa @ qsub8 pc, r12, r10")
739
740 TEST_RR( "shadd16 r0, r",0, HH1,", r",1, HH2,"")
741 TEST_RR( "shadd16 r14, r",12,HH2,", r",10,HH1,"")
742 TEST_UNSUPPORTED(".word 0xe63cff1a @ shadd16 pc, r12, r10")
743 TEST_RR( "shasx r0, r",0, HH1,", r",1, HH2,"")
744 TEST_RR( "shasx r14, r",12,HH2,", r",10,HH1,"")
745 TEST_UNSUPPORTED(".word 0xe63cff3a @ shasx pc, r12, r10")
746 TEST_RR( "shsax r0, r",0, HH1,", r",1, HH2,"")
747 TEST_RR( "shsax r14, r",12,HH2,", r",10,HH1,"")
748 TEST_UNSUPPORTED(".word 0xe63cff5a @ shsax pc, r12, r10")
749 TEST_RR( "shsub16 r0, r",0, HH1,", r",1, HH2,"")
750 TEST_RR( "shsub16 r14, r",12,HH2,", r",10,HH1,"")
751 TEST_UNSUPPORTED(".word 0xe63cff7a @ shsub16 pc, r12, r10")
752 TEST_RR( "shadd8 r0, r",0, HH1,", r",1, HH2,"")
753 TEST_RR( "shadd8 r14, r",12,HH2,", r",10,HH1,"")
754 TEST_UNSUPPORTED(".word 0xe63cff9a @ shadd8 pc, r12, r10")
755 TEST_UNSUPPORTED(".word 0xe63000b0") /* Unallocated space */
756 TEST_UNSUPPORTED(".word 0xe63fffbf") /* Unallocated space */
757 TEST_UNSUPPORTED(".word 0xe63000d0") /* Unallocated space */
758 TEST_UNSUPPORTED(".word 0xe63fffdf") /* Unallocated space */
759 TEST_RR( "shsub8 r0, r",0, HH1,", r",1, HH2,"")
760 TEST_RR( "shsub8 r14, r",12,HH2,", r",10,HH1,"")
761 TEST_UNSUPPORTED(".word 0xe63cfffa @ shsub8 pc, r12, r10")
762
763 TEST_GROUP("Parallel addition and subtraction, unsigned")
764
765 TEST_UNSUPPORTED(".word 0xe6400010") /* Unallocated space */
766 TEST_UNSUPPORTED(".word 0xe64fffff") /* Unallocated space */
767
768 TEST_RR( "uadd16 r0, r",0, HH1,", r",1, HH2,"")
769 TEST_RR( "uadd16 r14, r",12,HH2,", r",10,HH1,"")
770 TEST_UNSUPPORTED(".word 0xe65cff1a @ uadd16 pc, r12, r10")
771 TEST_RR( "uasx r0, r",0, HH1,", r",1, HH2,"")
772 TEST_RR( "uasx r14, r",12,HH2,", r",10,HH1,"")
773 TEST_UNSUPPORTED(".word 0xe65cff3a @ uasx pc, r12, r10")
774 TEST_RR( "usax r0, r",0, HH1,", r",1, HH2,"")
775 TEST_RR( "usax r14, r",12,HH2,", r",10,HH1,"")
776 TEST_UNSUPPORTED(".word 0xe65cff5a @ usax pc, r12, r10")
777 TEST_RR( "usub16 r0, r",0, HH1,", r",1, HH2,"")
778 TEST_RR( "usub16 r14, r",12,HH2,", r",10,HH1,"")
779 TEST_UNSUPPORTED(".word 0xe65cff7a @ usub16 pc, r12, r10")
780 TEST_RR( "uadd8 r0, r",0, HH1,", r",1, HH2,"")
781 TEST_RR( "uadd8 r14, r",12,HH2,", r",10,HH1,"")
782 TEST_UNSUPPORTED(".word 0xe65cff9a @ uadd8 pc, r12, r10")
783 TEST_UNSUPPORTED(".word 0xe65000b0") /* Unallocated space */
784 TEST_UNSUPPORTED(".word 0xe65fffbf") /* Unallocated space */
785 TEST_UNSUPPORTED(".word 0xe65000d0") /* Unallocated space */
786 TEST_UNSUPPORTED(".word 0xe65fffdf") /* Unallocated space */
787 TEST_RR( "usub8 r0, r",0, HH1,", r",1, HH2,"")
788 TEST_RR( "usub8 r14, r",12,HH2,", r",10,HH1,"")
789 TEST_UNSUPPORTED(".word 0xe65cfffa @ usub8 pc, r12, r10")
790
791 TEST_RR( "uqadd16 r0, r",0, HH1,", r",1, HH2,"")
792 TEST_RR( "uqadd16 r14, r",12,HH2,", r",10,HH1,"")
793 TEST_UNSUPPORTED(".word 0xe66cff1a @ uqadd16 pc, r12, r10")
794 TEST_RR( "uqasx r0, r",0, HH1,", r",1, HH2,"")
795 TEST_RR( "uqasx r14, r",12,HH2,", r",10,HH1,"")
796 TEST_UNSUPPORTED(".word 0xe66cff3a @ uqasx pc, r12, r10")
797 TEST_RR( "uqsax r0, r",0, HH1,", r",1, HH2,"")
798 TEST_RR( "uqsax r14, r",12,HH2,", r",10,HH1,"")
799 TEST_UNSUPPORTED(".word 0xe66cff5a @ uqsax pc, r12, r10")
800 TEST_RR( "uqsub16 r0, r",0, HH1,", r",1, HH2,"")
801 TEST_RR( "uqsub16 r14, r",12,HH2,", r",10,HH1,"")
802 TEST_UNSUPPORTED(".word 0xe66cff7a @ uqsub16 pc, r12, r10")
803 TEST_RR( "uqadd8 r0, r",0, HH1,", r",1, HH2,"")
804 TEST_RR( "uqadd8 r14, r",12,HH2,", r",10,HH1,"")
805 TEST_UNSUPPORTED(".word 0xe66cff9a @ uqadd8 pc, r12, r10")
806 TEST_UNSUPPORTED(".word 0xe66000b0") /* Unallocated space */
807 TEST_UNSUPPORTED(".word 0xe66fffbf") /* Unallocated space */
808 TEST_UNSUPPORTED(".word 0xe66000d0") /* Unallocated space */
809 TEST_UNSUPPORTED(".word 0xe66fffdf") /* Unallocated space */
810 TEST_RR( "uqsub8 r0, r",0, HH1,", r",1, HH2,"")
811 TEST_RR( "uqsub8 r14, r",12,HH2,", r",10,HH1,"")
812 TEST_UNSUPPORTED(".word 0xe66cfffa @ uqsub8 pc, r12, r10")
813
814 TEST_RR( "uhadd16 r0, r",0, HH1,", r",1, HH2,"")
815 TEST_RR( "uhadd16 r14, r",12,HH2,", r",10,HH1,"")
816 TEST_UNSUPPORTED(".word 0xe67cff1a @ uhadd16 pc, r12, r10")
817 TEST_RR( "uhasx r0, r",0, HH1,", r",1, HH2,"")
818 TEST_RR( "uhasx r14, r",12,HH2,", r",10,HH1,"")
819 TEST_UNSUPPORTED(".word 0xe67cff3a @ uhasx pc, r12, r10")
820 TEST_RR( "uhsax r0, r",0, HH1,", r",1, HH2,"")
821 TEST_RR( "uhsax r14, r",12,HH2,", r",10,HH1,"")
822 TEST_UNSUPPORTED(".word 0xe67cff5a @ uhsax pc, r12, r10")
823 TEST_RR( "uhsub16 r0, r",0, HH1,", r",1, HH2,"")
824 TEST_RR( "uhsub16 r14, r",12,HH2,", r",10,HH1,"")
825 TEST_UNSUPPORTED(".word 0xe67cff7a @ uhsub16 pc, r12, r10")
826 TEST_RR( "uhadd8 r0, r",0, HH1,", r",1, HH2,"")
827 TEST_RR( "uhadd8 r14, r",12,HH2,", r",10,HH1,"")
828 TEST_UNSUPPORTED(".word 0xe67cff9a @ uhadd8 pc, r12, r10")
829 TEST_UNSUPPORTED(".word 0xe67000b0") /* Unallocated space */
830 TEST_UNSUPPORTED(".word 0xe67fffbf") /* Unallocated space */
831 TEST_UNSUPPORTED(".word 0xe67000d0") /* Unallocated space */
832 TEST_UNSUPPORTED(".word 0xe67fffdf") /* Unallocated space */
833 TEST_RR( "uhsub8 r0, r",0, HH1,", r",1, HH2,"")
834 TEST_RR( "uhsub8 r14, r",12,HH2,", r",10,HH1,"")
835 TEST_UNSUPPORTED(".word 0xe67cfffa @ uhsub8 pc, r12, r10")
836 TEST_UNSUPPORTED(".word 0xe67feffa @ uhsub8 r14, pc, r10")
837 TEST_UNSUPPORTED(".word 0xe67cefff @ uhsub8 r14, r12, pc")
838#endif /* __LINUX_ARM_ARCH__ >= 7 */
839
840#if __LINUX_ARM_ARCH__ >= 6
841 TEST_GROUP("Packing, unpacking, saturation, and reversal")
842
843 TEST_RR( "pkhbt r0, r",0, HH1,", r",1, HH2,"")
844 TEST_RR( "pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2")
845 TEST_UNSUPPORTED(".word 0xe68cf11a @ pkhbt pc, r12, r10, lsl #2")
846 TEST_RR( "pkhtb r0, r",0, HH1,", r",1, HH2,"")
847 TEST_RR( "pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2")
848 TEST_UNSUPPORTED(".word 0xe68cf15a @ pkhtb pc, r12, r10, asr #2")
849 TEST_UNSUPPORTED(".word 0xe68fe15a @ pkhtb r14, pc, r10, asr #2")
850 TEST_UNSUPPORTED(".word 0xe68ce15f @ pkhtb r14, r12, pc, asr #2")
851 TEST_UNSUPPORTED(".word 0xe6900010") /* Unallocated space */
852 TEST_UNSUPPORTED(".word 0xe69fffdf") /* Unallocated space */
853
854 TEST_R( "ssat r0, #24, r",0, VAL1,"")
855 TEST_R( "ssat r14, #24, r",12, VAL2,"")
856 TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8")
857 TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8")
858 TEST_UNSUPPORTED(".word 0xe6b7f01c @ ssat pc, #24, r12")
859
860 TEST_R( "usat r0, #24, r",0, VAL1,"")
861 TEST_R( "usat r14, #24, r",12, VAL2,"")
862 TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8")
863 TEST_R( "usat r14, #24, r",12, VAL2,", asr #8")
864 TEST_UNSUPPORTED(".word 0xe6f7f01c @ usat pc, #24, r12")
865
866 TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"")
867 TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
868 TEST_R( "sxtb16 r8, r",7, HH1,"")
869 TEST_UNSUPPORTED(".word 0xe68cf47a @ sxtab16 pc,r12, r10, ror #8")
870
871 TEST_RR( "sel r0, r",0, VAL1,", r",1, VAL2,"")
872 TEST_RR( "sel r14, r",12,VAL1,", r",10, VAL2,"")
873 TEST_UNSUPPORTED(".word 0xe68cffba @ sel pc, r12, r10")
874 TEST_UNSUPPORTED(".word 0xe68fefba @ sel r14, pc, r10")
875 TEST_UNSUPPORTED(".word 0xe68cefbf @ sel r14, r12, pc")
876
877 TEST_R( "ssat16 r0, #12, r",0, HH1,"")
878 TEST_R( "ssat16 r14, #12, r",12, HH2,"")
879 TEST_UNSUPPORTED(".word 0xe6abff3c @ ssat16 pc, #12, r12")
880
881 TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"")
882 TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
883 TEST_R( "sxtb r8, r",7, HH1,"")
884 TEST_UNSUPPORTED(".word 0xe6acf47a @ sxtab pc,r12, r10, ror #8")
885
886 TEST_R( "rev r0, r",0, VAL1,"")
887 TEST_R( "rev r14, r",12, VAL2,"")
888 TEST_UNSUPPORTED(".word 0xe6bfff3c @ rev pc, r12")
889
890 TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"")
891 TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
892 TEST_R( "sxth r8, r",7, HH1,"")
893 TEST_UNSUPPORTED(".word 0xe6bcf47a @ sxtah pc,r12, r10, ror #8")
894
895 TEST_R( "rev16 r0, r",0, VAL1,"")
896 TEST_R( "rev16 r14, r",12, VAL2,"")
897 TEST_UNSUPPORTED(".word 0xe6bfffbc @ rev16 pc, r12")
898
899 TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"")
900 TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
901 TEST_R( "uxtb16 r8, r",7, HH1,"")
902 TEST_UNSUPPORTED(".word 0xe6ccf47a @ uxtab16 pc,r12, r10, ror #8")
903
904 TEST_R( "usat16 r0, #12, r",0, HH1,"")
905 TEST_R( "usat16 r14, #12, r",12, HH2,"")
906 TEST_UNSUPPORTED(".word 0xe6ecff3c @ usat16 pc, #12, r12")
907 TEST_UNSUPPORTED(".word 0xe6ecef3f @ usat16 r14, #12, pc")
908
909 TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"")
910 TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
911 TEST_R( "uxtb r8, r",7, HH1,"")
912 TEST_UNSUPPORTED(".word 0xe6ecf47a @ uxtab pc,r12, r10, ror #8")
913
914#if __LINUX_ARM_ARCH__ >= 7
915 TEST_R( "rbit r0, r",0, VAL1,"")
916 TEST_R( "rbit r14, r",12, VAL2,"")
917 TEST_UNSUPPORTED(".word 0xe6ffff3c @ rbit pc, r12")
918#endif
919
920 TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"")
921 TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
922 TEST_R( "uxth r8, r",7, HH1,"")
923 TEST_UNSUPPORTED(".word 0xe6fff077 @ uxth pc, r7")
924 TEST_UNSUPPORTED(".word 0xe6ff807f @ uxth r8, pc")
925 TEST_UNSUPPORTED(".word 0xe6fcf47a @ uxtah pc, r12, r10, ror #8")
926 TEST_UNSUPPORTED(".word 0xe6fce47f @ uxtah r14, r12, pc, ror #8")
927
928 TEST_R( "revsh r0, r",0, VAL1,"")
929 TEST_R( "revsh r14, r",12, VAL2,"")
930 TEST_UNSUPPORTED(".word 0xe6ffff3c @ revsh pc, r12")
931 TEST_UNSUPPORTED(".word 0xe6ffef3f @ revsh r14, pc")
932
933 TEST_UNSUPPORTED(".word 0xe6900070") /* Unallocated space */
934 TEST_UNSUPPORTED(".word 0xe69fff7f") /* Unallocated space */
935
936 TEST_UNSUPPORTED(".word 0xe6d00070") /* Unallocated space */
937 TEST_UNSUPPORTED(".word 0xe6dfff7f") /* Unallocated space */
938#endif /* __LINUX_ARM_ARCH__ >= 6 */
939
940#if __LINUX_ARM_ARCH__ >= 6
941 TEST_GROUP("Signed multiplies")
942
943 TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
944 TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
945 TEST_UNSUPPORTED(".word 0xe70f8a1c @ smlad pc, r12, r10, r8")
946 TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
947 TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
948 TEST_UNSUPPORTED(".word 0xe70f8a3c @ smladx pc, r12, r10, r8")
949
950 TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"")
951 TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"")
952 TEST_UNSUPPORTED(".word 0xe70ffa1c @ smuad pc, r12, r10")
953 TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"")
954 TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"")
955 TEST_UNSUPPORTED(".word 0xe70ffa3c @ smuadx pc, r12, r10")
956
957 TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
958 TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
959 TEST_UNSUPPORTED(".word 0xe70f8a5c @ smlsd pc, r12, r10, r8")
960 TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
961 TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
962 TEST_UNSUPPORTED(".word 0xe70f8a7c @ smlsdx pc, r12, r10, r8")
963
964 TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"")
965 TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"")
966 TEST_UNSUPPORTED(".word 0xe70ffa5c @ smusd pc, r12, r10")
967 TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"")
968 TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"")
969 TEST_UNSUPPORTED(".word 0xe70ffa7c @ smusdx pc, r12, r10")
970
971 TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
972 TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
973 TEST_UNSUPPORTED(".word 0xe74af819 @ smlald pc, r10, r9, r8")
974 TEST_UNSUPPORTED(".word 0xe74fb819 @ smlald r11, pc, r9, r8")
975 TEST_UNSUPPORTED(".word 0xe74ab81f @ smlald r11, r10, pc, r8")
976 TEST_UNSUPPORTED(".word 0xe74abf19 @ smlald r11, r10, r9, pc")
977
978 TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
979 TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
980 TEST_UNSUPPORTED(".word 0xe74af839 @ smlaldx pc, r10, r9, r8")
981 TEST_UNSUPPORTED(".word 0xe74fb839 @ smlaldx r11, pc, r9, r8")
982
983 TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
984 TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
985 TEST_UNSUPPORTED(".word 0xe75f8a1c @ smmla pc, r12, r10, r8")
986 TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
987 TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
988 TEST_UNSUPPORTED(".word 0xe75f8a3c @ smmlar pc, r12, r10, r8")
989
990 TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"")
991 TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"")
992 TEST_UNSUPPORTED(".word 0xe75ffa1c @ smmul pc, r12, r10")
993 TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"")
994 TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"")
995 TEST_UNSUPPORTED(".word 0xe75ffa3c @ smmulr pc, r12, r10")
996
997 TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
998 TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
999 TEST_UNSUPPORTED(".word 0xe75f8adc @ smmls pc, r12, r10, r8")
1000 TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
1001 TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
1002 TEST_UNSUPPORTED(".word 0xe75f8afc @ smmlsr pc, r12, r10, r8")
1003 TEST_UNSUPPORTED(".word 0xe75e8aff @ smmlsr r14, pc, r10, r8")
1004 TEST_UNSUPPORTED(".word 0xe75e8ffc @ smmlsr r14, r12, pc, r8")
1005 TEST_UNSUPPORTED(".word 0xe75efafc @ smmlsr r14, r12, r10, pc")
1006
1007 TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"")
1008 TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"")
1009 TEST_UNSUPPORTED(".word 0xe75ffa1c @ usad8 pc, r12, r10")
1010 TEST_UNSUPPORTED(".word 0xe75efa1f @ usad8 r14, pc, r10")
1011 TEST_UNSUPPORTED(".word 0xe75eff1c @ usad8 r14, r12, pc")
1012
1013 TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"")
1014 TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
1015 TEST_UNSUPPORTED(".word 0xe78f8a1c @ usada8 pc, r12, r10, r8")
1016 TEST_UNSUPPORTED(".word 0xe78e8a1f @ usada8 r14, pc, r10, r8")
1017 TEST_UNSUPPORTED(".word 0xe78e8f1c @ usada8 r14, r12, pc, r8")
1018#endif /* __LINUX_ARM_ARCH__ >= 6 */
1019
1020#if __LINUX_ARM_ARCH__ >= 7
1021 TEST_GROUP("Bit Field")
1022
1023 TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31")
1024 TEST_R( "sbfxeq r14, r",12, VAL2,", #8, #16")
1025 TEST_R( "sbfx r4, r",10, VAL1,", #16, #15")
1026 TEST_UNSUPPORTED(".word 0xe7aff45c @ sbfx pc, r12, #8, #16")
1027
1028 TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31")
1029 TEST_R( "ubfxcs r14, r",12, VAL2,", #8, #16")
1030 TEST_R( "ubfx r4, r",10, VAL1,", #16, #15")
1031 TEST_UNSUPPORTED(".word 0xe7eff45c @ ubfx pc, r12, #8, #16")
1032 TEST_UNSUPPORTED(".word 0xe7efc45f @ ubfx r12, pc, #8, #16")
1033
1034 TEST_R( "bfc r",0, VAL1,", #4, #20")
1035 TEST_R( "bfcvs r",14,VAL2,", #4, #20")
1036 TEST_R( "bfc r",7, VAL1,", #0, #31")
1037 TEST_R( "bfc r",8, VAL2,", #0, #31")
1038 TEST_UNSUPPORTED(".word 0xe7def01f @ bfc pc, #0, #31");
1039
1040 TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31")
1041 TEST_RR( "bfipl r",12,VAL1,", r",14 , VAL2,", #4, #20")
1042 TEST_UNSUPPORTED(".word 0xe7d7f21e @ bfi pc, r14, #4, #20")
1043
1044 TEST_UNSUPPORTED(".word 0x07f000f0") /* Permanently UNDEFINED */
1045 TEST_UNSUPPORTED(".word 0x07ffffff") /* Permanently UNDEFINED */
1046#endif /* __LINUX_ARM_ARCH__ >= 6 */
1047
1048 TEST_GROUP("Branch, branch with link, and block data transfer")
1049
1050 TEST_P( "stmda r",0, 16*4,", {r0}")
1051 TEST_P( "stmeqda r",4, 16*4,", {r0-r15}")
1052 TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}")
1053 TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1054 TEST_P( "stmda r",13,0, "!, {pc}")
1055
1056 TEST_P( "ldmda r",0, 16*4,", {r0}")
1057 TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}")
1058 TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}")
1059 TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1060 TEST_BF_P("ldmda r",14,15*4,"!, {pc}")
1061
1062 TEST_P( "stmia r",0, 16*4,", {r0}")
1063 TEST_P( "stmmiia r",4, 16*4,", {r0-r15}")
1064 TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}")
1065 TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1066 TEST_P( "stmia r",14,0, "!, {pc}")
1067
1068 TEST_P( "ldmia r",0, 16*4,", {r0}")
1069 TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}")
1070 TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}")
1071 TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1072 TEST_BF_P("ldmia r",14,15*4,"!, {pc}")
1073
1074 TEST_P( "stmdb r",0, 16*4,", {r0}")
1075 TEST_P( "stmhidb r",4, 16*4,", {r0-r15}")
1076 TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}")
1077 TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1078 TEST_P( "stmdb r",13,4, "!, {pc}")
1079
1080 TEST_P( "ldmdb r",0, 16*4,", {r0}")
1081 TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}")
1082 TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}")
1083 TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1084 TEST_BF_P("ldmdb r",14,16*4,"!, {pc}")
1085
1086 TEST_P( "stmib r",0, 16*4,", {r0}")
1087 TEST_P( "stmgtib r",4, 16*4,", {r0-r15}")
1088 TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}")
1089 TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1090 TEST_P( "stmib r",13,-4, "!, {pc}")
1091
1092 TEST_P( "ldmib r",0, 16*4,", {r0}")
1093 TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}")
1094 TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}")
1095 TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
1096 TEST_BF_P("ldmib r",14,14*4,"!, {pc}")
1097
1098 TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
1099 TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}")
1100 TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}")
1101 TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
1102 TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
1103 TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
1104
1105 TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
1106 TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}")
1107 TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}")
1108 TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
1109 TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
1110 TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
1111
1112#ifdef CONFIG_THUMB2_KERNEL
1113 TEST_ARM_TO_THUMB_INTERWORK_P("ldmplia r",0,15*4,", {pc}")
1114 TEST_ARM_TO_THUMB_INTERWORK_P("ldmmiia r",13,0,", {r0-r15}")
1115#endif
1116 TEST_BF("b 2f")
1117 TEST_BF("bl 2f")
1118 TEST_BB("b 2b")
1119 TEST_BB("bl 2b")
1120
1121 TEST_BF("beq 2f")
1122 TEST_BF("bleq 2f")
1123 TEST_BB("bne 2b")
1124 TEST_BB("blne 2b")
1125
1126 TEST_BF("bgt 2f")
1127 TEST_BF("blgt 2f")
1128 TEST_BB("blt 2b")
1129 TEST_BB("bllt 2b")
1130
1131 TEST_GROUP("Supervisor Call, and coprocessor instructions")
1132
1133 /*
1134 * We can't really test these by executing them, so all
1135 * we can do is check that probes are, or are not allowed.
1136 * At the moment none are allowed...
1137 */
1138#define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code)
1139
1140#define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \
1141 TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \
1142 TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \
1143 TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \
1144 TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \
1145 TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \
1146 TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \
1147 TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \
1148 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \
1149 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \
1150 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \
1151 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \
1152 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \
1153 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \
1154 TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \
1155 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \
1156 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \
1157 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \
1158 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \
1159 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \
1160 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \
1161 TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \
1162 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \
1163 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \
1164 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \
1165 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \
1166 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \
1167 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \
1168 TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \
1169 \
1170 TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \
1171 TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \
1172 TEST_UNSUPPORTED(".word 0x"cc"daf0001 @ stc"two" 0, cr0, [r15, #4]!") \
1173 TEST_UNSUPPORTED(".word 0x"cc"d2f0001 @ stc"two" 0, cr0, [r15, #-4]!") \
1174 TEST_UNSUPPORTED(".word 0x"cc"caf0001 @ stc"two" 0, cr0, [r15], #4") \
1175 TEST_UNSUPPORTED(".word 0x"cc"c2f0001 @ stc"two" 0, cr0, [r15], #-4") \
1176 TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \
1177 TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \
1178 TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \
1179 TEST_UNSUPPORTED(".word 0x"cc"def0001 @ stc"two"l 0, cr0, [r15, #4]!") \
1180 TEST_UNSUPPORTED(".word 0x"cc"d6f0001 @ stc"two"l 0, cr0, [r15, #-4]!") \
1181 TEST_UNSUPPORTED(".word 0x"cc"cef0001 @ stc"two"l 0, cr0, [r15], #4") \
1182 TEST_UNSUPPORTED(".word 0x"cc"c6f0001 @ stc"two"l 0, cr0, [r15], #-4") \
1183 TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \
1184 TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \
1185 TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \
1186 TEST_UNSUPPORTED(".word 0x"cc"dbf0001 @ ldc"two" 0, cr0, [r15, #4]!") \
1187 TEST_UNSUPPORTED(".word 0x"cc"d3f0001 @ ldc"two" 0, cr0, [r15, #-4]!") \
1188 TEST_UNSUPPORTED(".word 0x"cc"cbf0001 @ ldc"two" 0, cr0, [r15], #4") \
1189 TEST_UNSUPPORTED(".word 0x"cc"c3f0001 @ ldc"two" 0, cr0, [r15], #-4") \
1190 TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \
1191 TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \
1192 TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \
1193 TEST_UNSUPPORTED(".word 0x"cc"dff0001 @ ldc"two"l 0, cr0, [r15, #4]!") \
1194 TEST_UNSUPPORTED(".word 0x"cc"d7f0001 @ ldc"two"l 0, cr0, [r15, #-4]!") \
1195 TEST_UNSUPPORTED(".word 0x"cc"cff0001 @ ldc"two"l 0, cr0, [r15], #4") \
1196 TEST_UNSUPPORTED(".word 0x"cc"c7f0001 @ ldc"two"l 0, cr0, [r15], #-4") \
1197 TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}")
1198
1199#define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \
1200 \
1201 TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \
1202 TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \
1203 TEST_UNSUPPORTED(".word 0x"cc"c4f00f0 @ mcrr"two" 0, 15, r0, r15, cr0") \
1204 TEST_UNSUPPORTED(".word 0x"cc"c40ff0f @ mcrr"two" 15, 0, r15, r0, cr15") \
1205 TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \
1206 TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \
1207 TEST_UNSUPPORTED(".word 0x"cc"c5f00f0 @ mrrc"two" 0, 15, r0, r15, cr0") \
1208 TEST_UNSUPPORTED(".word 0x"cc"c50ff0f @ mrrc"two" 15, 0, r15, r0, cr15") \
1209 TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \
1210 TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \
1211 TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \
1212 TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \
1213 TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \
1214 TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
1215
1216 COPROCESSOR_INSTRUCTIONS_ST_LD("","e")
1217 COPROCESSOR_INSTRUCTIONS_MC_MR("","e")
1218 TEST_UNSUPPORTED("svc 0")
1219 TEST_UNSUPPORTED("svc 0xffffff")
1220
1221 TEST_UNSUPPORTED("svc 0")
1222
1223 TEST_GROUP("Unconditional instruction")
1224
1225#if __LINUX_ARM_ARCH__ >= 6
1226 TEST_UNSUPPORTED("srsda sp, 0x13")
1227 TEST_UNSUPPORTED("srsdb sp, 0x13")
1228 TEST_UNSUPPORTED("srsia sp, 0x13")
1229 TEST_UNSUPPORTED("srsib sp, 0x13")
1230 TEST_UNSUPPORTED("srsda sp!, 0x13")
1231 TEST_UNSUPPORTED("srsdb sp!, 0x13")
1232 TEST_UNSUPPORTED("srsia sp!, 0x13")
1233 TEST_UNSUPPORTED("srsib sp!, 0x13")
1234
1235 TEST_UNSUPPORTED("rfeda sp")
1236 TEST_UNSUPPORTED("rfedb sp")
1237 TEST_UNSUPPORTED("rfeia sp")
1238 TEST_UNSUPPORTED("rfeib sp")
1239 TEST_UNSUPPORTED("rfeda sp!")
1240 TEST_UNSUPPORTED("rfedb sp!")
1241 TEST_UNSUPPORTED("rfeia sp!")
1242 TEST_UNSUPPORTED("rfeib sp!")
1243 TEST_UNSUPPORTED(".word 0xf81d0a00 @ rfeda pc")
1244 TEST_UNSUPPORTED(".word 0xf91d0a00 @ rfedb pc")
1245 TEST_UNSUPPORTED(".word 0xf89d0a00 @ rfeia pc")
1246 TEST_UNSUPPORTED(".word 0xf99d0a00 @ rfeib pc")
1247 TEST_UNSUPPORTED(".word 0xf83d0a00 @ rfeda pc!")
1248 TEST_UNSUPPORTED(".word 0xf93d0a00 @ rfedb pc!")
1249 TEST_UNSUPPORTED(".word 0xf8bd0a00 @ rfeia pc!")
1250 TEST_UNSUPPORTED(".word 0xf9bd0a00 @ rfeib pc!")
1251#endif /* __LINUX_ARM_ARCH__ >= 6 */
1252
1253#if __LINUX_ARM_ARCH__ >= 6
1254 TEST_X( "blx __dummy_thumb_subroutine_even",
1255 ".thumb \n\t"
1256 ".space 4 \n\t"
1257 ".type __dummy_thumb_subroutine_even, %%function \n\t"
1258 "__dummy_thumb_subroutine_even: \n\t"
1259 "mov r0, pc \n\t"
1260 "bx lr \n\t"
1261 ".arm \n\t"
1262 )
1263 TEST( "blx __dummy_thumb_subroutine_even")
1264
1265 TEST_X( "blx __dummy_thumb_subroutine_odd",
1266 ".thumb \n\t"
1267 ".space 2 \n\t"
1268 ".type __dummy_thumb_subroutine_odd, %%function \n\t"
1269 "__dummy_thumb_subroutine_odd: \n\t"
1270 "mov r0, pc \n\t"
1271 "bx lr \n\t"
1272 ".arm \n\t"
1273 )
1274 TEST( "blx __dummy_thumb_subroutine_odd")
1275#endif /* __LINUX_ARM_ARCH__ >= 6 */
1276
1277 COPROCESSOR_INSTRUCTIONS_ST_LD("2","f")
1278#if __LINUX_ARM_ARCH__ >= 6
1279 COPROCESSOR_INSTRUCTIONS_MC_MR("2","f")
1280#endif
1281
1282 TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions")
1283
1284#if __LINUX_ARM_ARCH__ >= 6
1285 TEST_UNSUPPORTED("cps 0x13")
1286 TEST_UNSUPPORTED("cpsie i")
1287 TEST_UNSUPPORTED("cpsid i")
1288 TEST_UNSUPPORTED("cpsie i,0x13")
1289 TEST_UNSUPPORTED("cpsid i,0x13")
1290 TEST_UNSUPPORTED("setend le")
1291 TEST_UNSUPPORTED("setend be")
1292#endif
1293
1294#if __LINUX_ARM_ARCH__ >= 7
1295 TEST_P("pli [r",0,0b,", #16]")
1296 TEST( "pli [pc, #0]")
1297 TEST_RR("pli [r",12,0b,", r",0, 16,"]")
1298 TEST_RR("pli [r",0, 0b,", -r",12,16,", lsl #4]")
1299#endif
1300
1301#if __LINUX_ARM_ARCH__ >= 5
1302 TEST_P("pld [r",0,32,", #-16]")
1303 TEST( "pld [pc, #0]")
1304 TEST_PR("pld [r",7, 24, ", r",0, 16,"]")
1305 TEST_PR("pld [r",8, 24, ", -r",12,16,", lsl #4]")
1306#endif
1307
1308#if __LINUX_ARM_ARCH__ >= 7
1309 TEST_SUPPORTED( ".word 0xf590f000 @ pldw [r0, #0]")
1310 TEST_SUPPORTED( ".word 0xf797f000 @ pldw [r7, r0]")
1311 TEST_SUPPORTED( ".word 0xf798f18c @ pldw [r8, r12, lsl #3]");
1312#endif
1313
1314#if __LINUX_ARM_ARCH__ >= 7
1315 TEST_UNSUPPORTED("clrex")
1316 TEST_UNSUPPORTED("dsb")
1317 TEST_UNSUPPORTED("dmb")
1318 TEST_UNSUPPORTED("isb")
1319#endif
1320
1321 verbose("\n");
1322}
1323
diff --git a/arch/arm/kernel/kprobes-test-thumb.c b/arch/arm/kernel/kprobes-test-thumb.c
new file mode 100644
index 000000000000..5e726c31c45a
--- /dev/null
+++ b/arch/arm/kernel/kprobes-test-thumb.c
@@ -0,0 +1,1187 @@
1/*
2 * arch/arm/kernel/kprobes-test-thumb.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13
14#include "kprobes-test.h"
15
16
17#define TEST_ISA "16"
18
19#define DONT_TEST_IN_ITBLOCK(tests) \
20 kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \
21 tests \
22 kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK;
23
24#define CONDITION_INSTRUCTIONS(cc_pos, tests) \
25 kprobe_test_cc_position = cc_pos; \
26 DONT_TEST_IN_ITBLOCK(tests) \
27 kprobe_test_cc_position = 0;
28
29#define TEST_ITBLOCK(code) \
30 kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \
31 TESTCASE_START(code) \
32 TEST_ARG_END("") \
33 "50: nop \n\t" \
34 "1: "code" \n\t" \
35 " mov r1, #0x11 \n\t" \
36 " mov r2, #0x22 \n\t" \
37 " mov r3, #0x33 \n\t" \
38 "2: nop \n\t" \
39 TESTCASE_END \
40 kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK;
41
42#define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \
43 TESTCASE_START(code1 #reg code2) \
44 TEST_ARG_PTR(reg, val) \
45 TEST_ARG_REG(14, 99f+1) \
46 TEST_ARG_MEM(15, 3f) \
47 TEST_ARG_END("") \
48 " nop \n\t" /* To align 1f */ \
49 "50: nop \n\t" \
50 "1: "code1 #reg code2" \n\t" \
51 " bx lr \n\t" \
52 ".arm \n\t" \
53 "3: adr lr, 2f+1 \n\t" \
54 " bx lr \n\t" \
55 ".thumb \n\t" \
56 "2: nop \n\t" \
57 TESTCASE_END
58
59
60void kprobe_thumb16_test_cases(void)
61{
62 kprobe_test_flags = TEST_FLAG_NARROW_INSTR;
63
64 TEST_GROUP("Shift (immediate), add, subtract, move, and compare")
65
66 TEST_R( "lsls r7, r",0,VAL1,", #5")
67 TEST_R( "lsls r0, r",7,VAL2,", #11")
68 TEST_R( "lsrs r7, r",0,VAL1,", #5")
69 TEST_R( "lsrs r0, r",7,VAL2,", #11")
70 TEST_R( "asrs r7, r",0,VAL1,", #5")
71 TEST_R( "asrs r0, r",7,VAL2,", #11")
72 TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"")
73 TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"")
74 TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"")
75 TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"")
76 TEST_R( "adds r7, r",0,VAL1,", #5")
77 TEST_R( "adds r0, r",7,VAL2,", #2")
78 TEST_R( "subs r7, r",0,VAL1,", #5")
79 TEST_R( "subs r0, r",7,VAL2,", #2")
80 TEST( "movs.n r0, #0x5f")
81 TEST( "movs.n r7, #0xa0")
82 TEST_R( "cmp.n r",0,0x5e, ", #0x5f")
83 TEST_R( "cmp.n r",5,0x15f,", #0x5f")
84 TEST_R( "cmp.n r",7,0xa0, ", #0xa0")
85 TEST_R( "adds.n r",0,VAL1,", #0x5f")
86 TEST_R( "adds.n r",7,VAL2,", #0xa0")
87 TEST_R( "subs.n r",0,VAL1,", #0x5f")
88 TEST_R( "subs.n r",7,VAL2,", #0xa0")
89
90 TEST_GROUP("16-bit Thumb data-processing instructions")
91
92#define DATA_PROCESSING16(op,val) \
93 TEST_RR( op" r",0,VAL1,", r",7,val,"") \
94 TEST_RR( op" r",7,VAL2,", r",0,val,"")
95
96 DATA_PROCESSING16("ands",0xf00f00ff)
97 DATA_PROCESSING16("eors",0xf00f00ff)
98 DATA_PROCESSING16("lsls",11)
99 DATA_PROCESSING16("lsrs",11)
100 DATA_PROCESSING16("asrs",11)
101 DATA_PROCESSING16("adcs",VAL2)
102 DATA_PROCESSING16("sbcs",VAL2)
103 DATA_PROCESSING16("rors",11)
104 DATA_PROCESSING16("tst",0xf00f00ff)
105 TEST_R("rsbs r",0,VAL1,", #0")
106 TEST_R("rsbs r",7,VAL2,", #0")
107 DATA_PROCESSING16("cmp",0xf00f00ff)
108 DATA_PROCESSING16("cmn",0xf00f00ff)
109 DATA_PROCESSING16("orrs",0xf00f00ff)
110 DATA_PROCESSING16("muls",VAL2)
111 DATA_PROCESSING16("bics",0xf00f00ff)
112 DATA_PROCESSING16("mvns",VAL2)
113
114 TEST_GROUP("Special data instructions and branch and exchange")
115
116 TEST_RR( "add r",0, VAL1,", r",7,VAL2,"")
117 TEST_RR( "add r",3, VAL2,", r",8,VAL3,"")
118 TEST_RR( "add r",8, VAL3,", r",0,VAL1,"")
119 TEST_R( "add sp" ", r",8,-8, "")
120 TEST_R( "add r",14,VAL1,", pc")
121 TEST_BF_R("add pc" ", r",0,2f-1f-8,"")
122 TEST_UNSUPPORTED(".short 0x44ff @ add pc, pc")
123
124 TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"")
125 TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"")
126 TEST_R( "cmp sp" ", r",8,-8, "")
127
128 TEST_R( "mov r0, r",7,VAL2,"")
129 TEST_R( "mov r3, r",8,VAL3,"")
130 TEST_R( "mov r8, r",0,VAL1,"")
131 TEST_P( "mov sp, r",8,-8, "")
132 TEST( "mov lr, pc")
133 TEST_BF_R("mov pc, r",0,2f, "")
134
135 TEST_BF_R("bx r",0, 2f+1,"")
136 TEST_BF_R("bx r",14,2f+1,"")
137 TESTCASE_START("bx pc")
138 TEST_ARG_REG(14, 99f+1)
139 TEST_ARG_END("")
140 " nop \n\t" /* To align the bx pc*/
141 "50: nop \n\t"
142 "1: bx pc \n\t"
143 " bx lr \n\t"
144 ".arm \n\t"
145 " adr lr, 2f+1 \n\t"
146 " bx lr \n\t"
147 ".thumb \n\t"
148 "2: nop \n\t"
149 TESTCASE_END
150
151 TEST_BF_R("blx r",0, 2f+1,"")
152 TEST_BB_R("blx r",14,2f+1,"")
153 TEST_UNSUPPORTED(".short 0x47f8 @ blx pc")
154
155 TEST_GROUP("Load from Literal Pool")
156
157 TEST_X( "ldr r0, 3f",
158 ".align \n\t"
159 "3: .word "__stringify(VAL1))
160 TEST_X( "ldr r7, 3f",
161 ".space 128 \n\t"
162 ".align \n\t"
163 "3: .word "__stringify(VAL2))
164
165 TEST_GROUP("16-bit Thumb Load/store instructions")
166
167 TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
168 TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
169 TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
170 TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
171 TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
172 TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
173 TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]")
174 TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]")
175 TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]")
176 TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]")
177 TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]")
178 TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]")
179 TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]")
180 TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]")
181 TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]")
182 TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]")
183
184 TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]")
185 TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]")
186 TEST_P( "ldr r0, [r",1, 24,", #120]")
187 TEST_P( "ldr r7, [r",6, 24,", #120]")
188 TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]")
189 TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]")
190 TEST_P( "ldrb r0, [r",1, 24,", #30]")
191 TEST_P( "ldrb r7, [r",6, 24,", #30]")
192 TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]")
193 TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]")
194 TEST_P( "ldrh r0, [r",1, 24,", #60]")
195 TEST_P( "ldrh r7, [r",6, 24,", #60]")
196
197 TEST_R( "str r",0, VAL1,", [sp, #0]")
198 TEST_R( "str r",7, VAL2,", [sp, #160]")
199 TEST( "ldr r0, [sp, #0]")
200 TEST( "ldr r7, [sp, #160]")
201
202 TEST_RP("str r",0, VAL1,", [r",0, 24,"]")
203 TEST_P( "ldr r0, [r",0, 24,"]")
204
205 TEST_GROUP("Generate PC-/SP-relative address")
206
207 TEST("add r0, pc, #4")
208 TEST("add r7, pc, #1020")
209 TEST("add r0, sp, #4")
210 TEST("add r7, sp, #1020")
211
212 TEST_GROUP("Miscellaneous 16-bit instructions")
213
214 TEST_UNSUPPORTED( "cpsie i")
215 TEST_UNSUPPORTED( "cpsid i")
216 TEST_UNSUPPORTED( "setend le")
217 TEST_UNSUPPORTED( "setend be")
218
219 TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */
220 TEST("sub sp, #0x7f*4")
221
222DONT_TEST_IN_ITBLOCK(
223 TEST_BF_R( "cbnz r",0,0, ", 2f")
224 TEST_BF_R( "cbz r",2,-1,", 2f")
225 TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20)
226 TEST_BF_RX( "cbz r",7,0, ", 2f",0x40)
227)
228 TEST_R("sxth r0, r",7, HH1,"")
229 TEST_R("sxth r7, r",0, HH2,"")
230 TEST_R("sxtb r0, r",7, HH1,"")
231 TEST_R("sxtb r7, r",0, HH2,"")
232 TEST_R("uxth r0, r",7, HH1,"")
233 TEST_R("uxth r7, r",0, HH2,"")
234 TEST_R("uxtb r0, r",7, HH1,"")
235 TEST_R("uxtb r7, r",0, HH2,"")
236 TEST_R("rev r0, r",7, VAL1,"")
237 TEST_R("rev r7, r",0, VAL2,"")
238 TEST_R("rev16 r0, r",7, VAL1,"")
239 TEST_R("rev16 r7, r",0, VAL2,"")
240 TEST_UNSUPPORTED(".short 0xba80")
241 TEST_UNSUPPORTED(".short 0xbabf")
242 TEST_R("revsh r0, r",7, VAL1,"")
243 TEST_R("revsh r7, r",0, VAL2,"")
244
245#define TEST_POPPC(code, offset) \
246 TESTCASE_START(code) \
247 TEST_ARG_PTR(13, offset) \
248 TEST_ARG_END("") \
249 TEST_BRANCH_F(code,0) \
250 TESTCASE_END
251
252 TEST("push {r0}")
253 TEST("push {r7}")
254 TEST("push {r14}")
255 TEST("push {r0-r7,r14}")
256 TEST("push {r0,r2,r4,r6,r14}")
257 TEST("push {r1,r3,r5,r7}")
258 TEST("pop {r0}")
259 TEST("pop {r7}")
260 TEST("pop {r0,r2,r4,r6}")
261 TEST_POPPC("pop {pc}",15*4)
262 TEST_POPPC("pop {r0-r7,pc}",7*4)
263 TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4)
264 TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"")
265 TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"")
266
267 TEST_UNSUPPORTED("bkpt.n 0")
268 TEST_UNSUPPORTED("bkpt.n 255")
269
270 TEST_SUPPORTED("yield")
271 TEST("sev")
272 TEST("nop")
273 TEST("wfi")
274 TEST_SUPPORTED("wfe")
275 TEST_UNSUPPORTED(".short 0xbf50") /* Unassigned hints */
276 TEST_UNSUPPORTED(".short 0xbff0") /* Unassigned hints */
277
278#define TEST_IT(code, code2) \
279 TESTCASE_START(code) \
280 TEST_ARG_END("") \
281 "50: nop \n\t" \
282 "1: "code" \n\t" \
283 " "code2" \n\t" \
284 "2: nop \n\t" \
285 TESTCASE_END
286
287DONT_TEST_IN_ITBLOCK(
288 TEST_IT("it eq","moveq r0,#0")
289 TEST_IT("it vc","movvc r0,#0")
290 TEST_IT("it le","movle r0,#0")
291 TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1")
292 TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2")
293 TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3")
294 TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3")
295 TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3")
296)
297
298 TEST_GROUP("Load and store multiple")
299
300 TEST_P("ldmia r",4, 16*4,"!, {r0,r7}")
301 TEST_P("ldmia r",7, 16*4,"!, {r0-r6}")
302 TEST_P("stmia r",4, 16*4,"!, {r0,r7}")
303 TEST_P("stmia r",0, 16*4,"!, {r0-r7}")
304
305 TEST_GROUP("Conditional branch and Supervisor Call instructions")
306
307CONDITION_INSTRUCTIONS(8,
308 TEST_BF("beq 2f")
309 TEST_BB("bne 2b")
310 TEST_BF("bgt 2f")
311 TEST_BB("blt 2b")
312)
313 TEST_UNSUPPORTED(".short 0xde00")
314 TEST_UNSUPPORTED(".short 0xdeff")
315 TEST_UNSUPPORTED("svc #0x00")
316 TEST_UNSUPPORTED("svc #0xff")
317
318 TEST_GROUP("Unconditional branch")
319
320 TEST_BF( "b 2f")
321 TEST_BB( "b 2b")
322 TEST_BF_X("b 2f", 0x400)
323 TEST_BB_X("b 2b", 0x400)
324
325 TEST_GROUP("Testing instructions in IT blocks")
326
327 TEST_ITBLOCK("subs.n r0, r0")
328
329 verbose("\n");
330}
331
332
333void kprobe_thumb32_test_cases(void)
334{
335 kprobe_test_flags = 0;
336
337 TEST_GROUP("Load/store multiple")
338
339 TEST_UNSUPPORTED("rfedb sp")
340 TEST_UNSUPPORTED("rfeia sp")
341 TEST_UNSUPPORTED("rfedb sp!")
342 TEST_UNSUPPORTED("rfeia sp!")
343
344 TEST_P( "stmia r",0, 16*4,", {r0,r8}")
345 TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}")
346 TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}")
347 TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
348
349 TEST_P( "ldmia r",0, 16*4,", {r0,r8}")
350 TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}")
351 TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}")
352 TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
353 TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}")
354
355 TEST_P( "stmdb r",0, 16*4,", {r0,r8}")
356 TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}")
357 TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}")
358 TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
359
360 TEST_P( "ldmdb r",0, 16*4,", {r0,r8}")
361 TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}")
362 TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}")
363 TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
364 TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}")
365
366 TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
367 TEST_P( "stmdb r",13,16*4,"!, {r3-r12}")
368 TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}")
369 TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
370 TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
371 TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
372
373 TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
374 TEST_P( "ldmia r",13,5*4, "!, {r3-r12}")
375 TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}")
376 TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
377 TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
378 TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
379
380 TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}")
381 TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}")
382
383 TEST_UNSUPPORTED(".short 0xe88f,0x0101 @ stmia pc, {r0,r8}")
384 TEST_UNSUPPORTED(".short 0xe92f,0x5f00 @ stmdb pc!, {r8-r12,r14}")
385 TEST_UNSUPPORTED(".short 0xe8bd,0xc000 @ ldmia r13!, {r14,pc}")
386 TEST_UNSUPPORTED(".short 0xe93e,0xc000 @ ldmdb r14!, {r14,pc}")
387 TEST_UNSUPPORTED(".short 0xe8a7,0x3f00 @ stmia r7!, {r8-r12,sp}")
388 TEST_UNSUPPORTED(".short 0xe8a7,0x9f00 @ stmia r7!, {r8-r12,pc}")
389 TEST_UNSUPPORTED(".short 0xe93e,0x2010 @ ldmdb r14!, {r4,sp}")
390
391 TEST_GROUP("Load/store double or exclusive, table branch")
392
393 TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]")
394 TEST( "ldrd r12, r14, [sp, #16]")
395 TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!")
396 TEST( "ldrd r14, r12, [sp, #16]!")
397 TEST_P( "ldrd r1, r0, [r",7, 24,"], #16")
398 TEST( "ldrd r7, r8, [sp], #-16")
399
400 TEST_X( "ldrd r12, r14, 3f",
401 ".align 3 \n\t"
402 "3: .word "__stringify(VAL1)" \n\t"
403 " .word "__stringify(VAL2))
404
405 TEST_UNSUPPORTED(".short 0xe9ff,0xec04 @ ldrd r14, r12, [pc, #16]!")
406 TEST_UNSUPPORTED(".short 0xe8ff,0xec04 @ ldrd r14, r12, [pc], #16")
407 TEST_UNSUPPORTED(".short 0xe9d4,0xd800 @ ldrd sp, r8, [r4]")
408 TEST_UNSUPPORTED(".short 0xe9d4,0xf800 @ ldrd pc, r8, [r4]")
409 TEST_UNSUPPORTED(".short 0xe9d4,0x7d00 @ ldrd r7, sp, [r4]")
410 TEST_UNSUPPORTED(".short 0xe9d4,0x7f00 @ ldrd r7, pc, [r4]")
411
412 TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
413 TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]")
414 TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!")
415 TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
416 TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
417 TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16")
418 TEST_UNSUPPORTED(".short 0xe9ef,0xec04 @ strd r14, r12, [pc, #16]!")
419 TEST_UNSUPPORTED(".short 0xe8ef,0xec04 @ strd r14, r12, [pc], #16")
420
421 TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]",
422 "9: \n\t"
423 ".byte (2f-1b-4)>>1 \n\t"
424 ".byte (3f-1b-4)>>1 \n\t"
425 "3: mvn r0, r0 \n\t"
426 "2: nop \n\t")
427
428 TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]",
429 "9: \n\t"
430 ".byte (2f-1b-4)>>1 \n\t"
431 ".byte (3f-1b-4)>>1 \n\t"
432 "3: mvn r0, r0 \n\t"
433 "2: nop \n\t")
434
435 TEST_RRX("tbb [r",1,9f,", r",2,0,"]",
436 "9: \n\t"
437 ".byte (2f-1b-4)>>1 \n\t"
438 ".byte (3f-1b-4)>>1 \n\t"
439 "3: mvn r0, r0 \n\t"
440 "2: nop \n\t")
441
442 TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,"]",
443 "9: \n\t"
444 ".short (2f-1b-4)>>1 \n\t"
445 ".short (3f-1b-4)>>1 \n\t"
446 "3: mvn r0, r0 \n\t"
447 "2: nop \n\t")
448
449 TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,"]",
450 "9: \n\t"
451 ".short (2f-1b-4)>>1 \n\t"
452 ".short (3f-1b-4)>>1 \n\t"
453 "3: mvn r0, r0 \n\t"
454 "2: nop \n\t")
455
456 TEST_RRX("tbh [r",1,9f, ", r",14,1,"]",
457 "9: \n\t"
458 ".short (2f-1b-4)>>1 \n\t"
459 ".short (3f-1b-4)>>1 \n\t"
460 "3: mvn r0, r0 \n\t"
461 "2: nop \n\t")
462
463 TEST_UNSUPPORTED(".short 0xe8d1,0xf01f @ tbh [r1, pc]")
464 TEST_UNSUPPORTED(".short 0xe8d1,0xf01d @ tbh [r1, sp]")
465 TEST_UNSUPPORTED(".short 0xe8dd,0xf012 @ tbh [sp, r2]")
466
467 TEST_UNSUPPORTED("strexb r0, r1, [r2]")
468 TEST_UNSUPPORTED("strexh r0, r1, [r2]")
469 TEST_UNSUPPORTED("strexd r0, r1, [r2]")
470 TEST_UNSUPPORTED("ldrexb r0, [r1]")
471 TEST_UNSUPPORTED("ldrexh r0, [r1]")
472 TEST_UNSUPPORTED("ldrexd r0, [r1]")
473
474 TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
475
476#define _DATA_PROCESSING32_DNM(op,s,val) \
477 TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \
478 TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \
479 TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \
480 TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \
481 TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \
482 TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \
483 TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \
484 TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \
485 TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \
486 TEST_R( op s" r7, r",8, VAL2,", #0x000af000")
487
488#define DATA_PROCESSING32_DNM(op,val) \
489 _DATA_PROCESSING32_DNM(op,"",val) \
490 _DATA_PROCESSING32_DNM(op,"s",val)
491
492#define DATA_PROCESSING32_NM(op,val) \
493 TEST_RR(op".w r",1, VAL1,", r",2, val, "") \
494 TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \
495 TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \
496 TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \
497 TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \
498 TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \
499 TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \
500 TEST_R( op" r",11,VAL1,", #0x00010001") \
501 TEST_R( op" r",0, VAL1,", #0xf5000000") \
502 TEST_R( op" r",8, VAL2,", #0x000af000")
503
504#define _DATA_PROCESSING32_DM(op,s,val) \
505 TEST_R( op s".w r0, r",14, val, "") \
506 TEST_R( op s" r1, r",12, val, ", lsl #3") \
507 TEST_R( op s" r2, r",11, val, ", lsr #4") \
508 TEST_R( op s" r3, r",10, val, ", asr #5") \
509 TEST_R( op s" r4, r",9, N(val),", asr #6") \
510 TEST_R( op s" r5, r",8, val, ", ror #7") \
511 TEST_R( op s" r8, r",7,val, ", rrx") \
512 TEST( op s" r0, #0x00010001") \
513 TEST( op s" r11, #0xf5000000") \
514 TEST( op s" r7, #0x000af000") \
515 TEST( op s" r4, #0x00005a00")
516
517#define DATA_PROCESSING32_DM(op,val) \
518 _DATA_PROCESSING32_DM(op,"",val) \
519 _DATA_PROCESSING32_DM(op,"s",val)
520
521 DATA_PROCESSING32_DNM("and",0xf00f00ff)
522 DATA_PROCESSING32_NM("tst",0xf00f00ff)
523 DATA_PROCESSING32_DNM("bic",0xf00f00ff)
524 DATA_PROCESSING32_DNM("orr",0xf00f00ff)
525 DATA_PROCESSING32_DM("mov",VAL2)
526 DATA_PROCESSING32_DNM("orn",0xf00f00ff)
527 DATA_PROCESSING32_DM("mvn",VAL2)
528 DATA_PROCESSING32_DNM("eor",0xf00f00ff)
529 DATA_PROCESSING32_NM("teq",0xf00f00ff)
530 DATA_PROCESSING32_DNM("add",VAL2)
531 DATA_PROCESSING32_NM("cmn",VAL2)
532 DATA_PROCESSING32_DNM("adc",VAL2)
533 DATA_PROCESSING32_DNM("sbc",VAL2)
534 DATA_PROCESSING32_DNM("sub",VAL2)
535 DATA_PROCESSING32_NM("cmp",VAL2)
536 DATA_PROCESSING32_DNM("rsb",VAL2)
537
538 TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"")
539 TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2")
540 TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"")
541 TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2")
542
543 TEST_UNSUPPORTED(".short 0xea17,0x0f0d @ tst.w r7, sp")
544 TEST_UNSUPPORTED(".short 0xea17,0x0f0f @ tst.w r7, pc")
545 TEST_UNSUPPORTED(".short 0xea1d,0x0f07 @ tst.w sp, r7")
546 TEST_UNSUPPORTED(".short 0xea1f,0x0f07 @ tst.w pc, r7")
547 TEST_UNSUPPORTED(".short 0xf01d,0x1f08 @ tst sp, #0x00080008")
548 TEST_UNSUPPORTED(".short 0xf01f,0x1f08 @ tst pc, #0x00080008")
549
550 TEST_UNSUPPORTED(".short 0xea97,0x0f0d @ teq.w r7, sp")
551 TEST_UNSUPPORTED(".short 0xea97,0x0f0f @ teq.w r7, pc")
552 TEST_UNSUPPORTED(".short 0xea9d,0x0f07 @ teq.w sp, r7")
553 TEST_UNSUPPORTED(".short 0xea9f,0x0f07 @ teq.w pc, r7")
554 TEST_UNSUPPORTED(".short 0xf09d,0x1f08 @ tst sp, #0x00080008")
555 TEST_UNSUPPORTED(".short 0xf09f,0x1f08 @ tst pc, #0x00080008")
556
557 TEST_UNSUPPORTED(".short 0xeb17,0x0f0d @ cmn.w r7, sp")
558 TEST_UNSUPPORTED(".short 0xeb17,0x0f0f @ cmn.w r7, pc")
559 TEST_P("cmn.w sp, r",7,0,"")
560 TEST_UNSUPPORTED(".short 0xeb1f,0x0f07 @ cmn.w pc, r7")
561 TEST( "cmn sp, #0x00080008")
562 TEST_UNSUPPORTED(".short 0xf11f,0x1f08 @ cmn pc, #0x00080008")
563
564 TEST_UNSUPPORTED(".short 0xebb7,0x0f0d @ cmp.w r7, sp")
565 TEST_UNSUPPORTED(".short 0xebb7,0x0f0f @ cmp.w r7, pc")
566 TEST_P("cmp.w sp, r",7,0,"")
567 TEST_UNSUPPORTED(".short 0xebbf,0x0f07 @ cmp.w pc, r7")
568 TEST( "cmp sp, #0x00080008")
569 TEST_UNSUPPORTED(".short 0xf1bf,0x1f08 @ cmp pc, #0x00080008")
570
571 TEST_UNSUPPORTED(".short 0xea5f,0x070d @ movs.w r7, sp")
572 TEST_UNSUPPORTED(".short 0xea5f,0x070f @ movs.w r7, pc")
573 TEST_UNSUPPORTED(".short 0xea5f,0x0d07 @ movs.w sp, r7")
574 TEST_UNSUPPORTED(".short 0xea4f,0x0f07 @ mov.w pc, r7")
575 TEST_UNSUPPORTED(".short 0xf04f,0x1d08 @ mov sp, #0x00080008")
576 TEST_UNSUPPORTED(".short 0xf04f,0x1f08 @ mov pc, #0x00080008")
577
578 TEST_R("add.w r0, sp, r",1, 4,"")
579 TEST_R("adds r0, sp, r",1, 4,", asl #3")
580 TEST_R("add r0, sp, r",1, 4,", asl #4")
581 TEST_R("add r0, sp, r",1, 16,", ror #1")
582 TEST_R("add.w sp, sp, r",1, 4,"")
583 TEST_R("add sp, sp, r",1, 4,", asl #3")
584 TEST_UNSUPPORTED(".short 0xeb0d,0x1d01 @ add sp, sp, r1, asl #4")
585 TEST_UNSUPPORTED(".short 0xeb0d,0x0d71 @ add sp, sp, r1, ror #1")
586 TEST( "add.w r0, sp, #24")
587 TEST( "add.w sp, sp, #24")
588 TEST_UNSUPPORTED(".short 0xeb0d,0x0f01 @ add pc, sp, r1")
589 TEST_UNSUPPORTED(".short 0xeb0d,0x000f @ add r0, sp, pc")
590 TEST_UNSUPPORTED(".short 0xeb0d,0x000d @ add r0, sp, sp")
591 TEST_UNSUPPORTED(".short 0xeb0d,0x0d0f @ add sp, sp, pc")
592 TEST_UNSUPPORTED(".short 0xeb0d,0x0d0d @ add sp, sp, sp")
593
594 TEST_R("sub.w r0, sp, r",1, 4,"")
595 TEST_R("subs r0, sp, r",1, 4,", asl #3")
596 TEST_R("sub r0, sp, r",1, 4,", asl #4")
597 TEST_R("sub r0, sp, r",1, 16,", ror #1")
598 TEST_R("sub.w sp, sp, r",1, 4,"")
599 TEST_R("sub sp, sp, r",1, 4,", asl #3")
600 TEST_UNSUPPORTED(".short 0xebad,0x1d01 @ sub sp, sp, r1, asl #4")
601 TEST_UNSUPPORTED(".short 0xebad,0x0d71 @ sub sp, sp, r1, ror #1")
602 TEST_UNSUPPORTED(".short 0xebad,0x0f01 @ sub pc, sp, r1")
603 TEST( "sub.w r0, sp, #24")
604 TEST( "sub.w sp, sp, #24")
605
606 TEST_UNSUPPORTED(".short 0xea02,0x010f @ and r1, r2, pc")
607 TEST_UNSUPPORTED(".short 0xea0f,0x0103 @ and r1, pc, r3")
608 TEST_UNSUPPORTED(".short 0xea02,0x0f03 @ and pc, r2, r3")
609 TEST_UNSUPPORTED(".short 0xea02,0x010d @ and r1, r2, sp")
610 TEST_UNSUPPORTED(".short 0xea0d,0x0103 @ and r1, sp, r3")
611 TEST_UNSUPPORTED(".short 0xea02,0x0d03 @ and sp, r2, r3")
612 TEST_UNSUPPORTED(".short 0xf00d,0x1108 @ and r1, sp, #0x00080008")
613 TEST_UNSUPPORTED(".short 0xf00f,0x1108 @ and r1, pc, #0x00080008")
614 TEST_UNSUPPORTED(".short 0xf002,0x1d08 @ and sp, r8, #0x00080008")
615 TEST_UNSUPPORTED(".short 0xf002,0x1f08 @ and pc, r8, #0x00080008")
616
617 TEST_UNSUPPORTED(".short 0xeb02,0x010f @ add r1, r2, pc")
618 TEST_UNSUPPORTED(".short 0xeb0f,0x0103 @ add r1, pc, r3")
619 TEST_UNSUPPORTED(".short 0xeb02,0x0f03 @ add pc, r2, r3")
620 TEST_UNSUPPORTED(".short 0xeb02,0x010d @ add r1, r2, sp")
621 TEST_SUPPORTED( ".short 0xeb0d,0x0103 @ add r1, sp, r3")
622 TEST_UNSUPPORTED(".short 0xeb02,0x0d03 @ add sp, r2, r3")
623 TEST_SUPPORTED( ".short 0xf10d,0x1108 @ add r1, sp, #0x00080008")
624 TEST_UNSUPPORTED(".short 0xf10d,0x1f08 @ add pc, sp, #0x00080008")
625 TEST_UNSUPPORTED(".short 0xf10f,0x1108 @ add r1, pc, #0x00080008")
626 TEST_UNSUPPORTED(".short 0xf102,0x1d08 @ add sp, r8, #0x00080008")
627 TEST_UNSUPPORTED(".short 0xf102,0x1f08 @ add pc, r8, #0x00080008")
628
629 TEST_UNSUPPORTED(".short 0xeaa0,0x0000")
630 TEST_UNSUPPORTED(".short 0xeaf0,0x0000")
631 TEST_UNSUPPORTED(".short 0xeb20,0x0000")
632 TEST_UNSUPPORTED(".short 0xeb80,0x0000")
633 TEST_UNSUPPORTED(".short 0xebe0,0x0000")
634
635 TEST_UNSUPPORTED(".short 0xf0a0,0x0000")
636 TEST_UNSUPPORTED(".short 0xf0c0,0x0000")
637 TEST_UNSUPPORTED(".short 0xf0f0,0x0000")
638 TEST_UNSUPPORTED(".short 0xf120,0x0000")
639 TEST_UNSUPPORTED(".short 0xf180,0x0000")
640 TEST_UNSUPPORTED(".short 0xf1e0,0x0000")
641
642 TEST_GROUP("Coprocessor instructions")
643
644 TEST_UNSUPPORTED(".short 0xec00,0x0000")
645 TEST_UNSUPPORTED(".short 0xeff0,0x0000")
646 TEST_UNSUPPORTED(".short 0xfc00,0x0000")
647 TEST_UNSUPPORTED(".short 0xfff0,0x0000")
648
649 TEST_GROUP("Data-processing (plain binary immediate)")
650
651 TEST_R("addw r0, r",1, VAL1,", #0x123")
652 TEST( "addw r14, sp, #0xf5a")
653 TEST( "addw sp, sp, #0x20")
654 TEST( "addw r7, pc, #0x888")
655 TEST_UNSUPPORTED(".short 0xf20f,0x1f20 @ addw pc, pc, #0x120")
656 TEST_UNSUPPORTED(".short 0xf20d,0x1f20 @ addw pc, sp, #0x120")
657 TEST_UNSUPPORTED(".short 0xf20f,0x1d20 @ addw sp, pc, #0x120")
658 TEST_UNSUPPORTED(".short 0xf200,0x1d20 @ addw sp, r0, #0x120")
659
660 TEST_R("subw r0, r",1, VAL1,", #0x123")
661 TEST( "subw r14, sp, #0xf5a")
662 TEST( "subw sp, sp, #0x20")
663 TEST( "subw r7, pc, #0x888")
664 TEST_UNSUPPORTED(".short 0xf2af,0x1f20 @ subw pc, pc, #0x120")
665 TEST_UNSUPPORTED(".short 0xf2ad,0x1f20 @ subw pc, sp, #0x120")
666 TEST_UNSUPPORTED(".short 0xf2af,0x1d20 @ subw sp, pc, #0x120")
667 TEST_UNSUPPORTED(".short 0xf2a0,0x1d20 @ subw sp, r0, #0x120")
668
669 TEST("movw r0, #0")
670 TEST("movw r0, #0xffff")
671 TEST("movw lr, #0xffff")
672 TEST_UNSUPPORTED(".short 0xf240,0x0d00 @ movw sp, #0")
673 TEST_UNSUPPORTED(".short 0xf240,0x0f00 @ movw pc, #0")
674
675 TEST_R("movt r",0, VAL1,", #0")
676 TEST_R("movt r",0, VAL2,", #0xffff")
677 TEST_R("movt r",14,VAL1,", #0xffff")
678 TEST_UNSUPPORTED(".short 0xf2c0,0x0d00 @ movt sp, #0")
679 TEST_UNSUPPORTED(".short 0xf2c0,0x0f00 @ movt pc, #0")
680
681 TEST_R( "ssat r0, #24, r",0, VAL1,"")
682 TEST_R( "ssat r14, #24, r",12, VAL2,"")
683 TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8")
684 TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8")
685 TEST_UNSUPPORTED(".short 0xf30c,0x0d17 @ ssat sp, #24, r12")
686 TEST_UNSUPPORTED(".short 0xf30c,0x0f17 @ ssat pc, #24, r12")
687 TEST_UNSUPPORTED(".short 0xf30d,0x0c17 @ ssat r12, #24, sp")
688 TEST_UNSUPPORTED(".short 0xf30f,0x0c17 @ ssat r12, #24, pc")
689
690 TEST_R( "usat r0, #24, r",0, VAL1,"")
691 TEST_R( "usat r14, #24, r",12, VAL2,"")
692 TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8")
693 TEST_R( "usat r14, #24, r",12, VAL2,", asr #8")
694 TEST_UNSUPPORTED(".short 0xf38c,0x0d17 @ usat sp, #24, r12")
695 TEST_UNSUPPORTED(".short 0xf38c,0x0f17 @ usat pc, #24, r12")
696 TEST_UNSUPPORTED(".short 0xf38d,0x0c17 @ usat r12, #24, sp")
697 TEST_UNSUPPORTED(".short 0xf38f,0x0c17 @ usat r12, #24, pc")
698
699 TEST_R( "ssat16 r0, #12, r",0, HH1,"")
700 TEST_R( "ssat16 r14, #12, r",12, HH2,"")
701 TEST_UNSUPPORTED(".short 0xf32c,0x0d0b @ ssat16 sp, #12, r12")
702 TEST_UNSUPPORTED(".short 0xf32c,0x0f0b @ ssat16 pc, #12, r12")
703 TEST_UNSUPPORTED(".short 0xf32d,0x0c0b @ ssat16 r12, #12, sp")
704 TEST_UNSUPPORTED(".short 0xf32f,0x0c0b @ ssat16 r12, #12, pc")
705
706 TEST_R( "usat16 r0, #12, r",0, HH1,"")
707 TEST_R( "usat16 r14, #12, r",12, HH2,"")
708 TEST_UNSUPPORTED(".short 0xf3ac,0x0d0b @ usat16 sp, #12, r12")
709 TEST_UNSUPPORTED(".short 0xf3ac,0x0f0b @ usat16 pc, #12, r12")
710 TEST_UNSUPPORTED(".short 0xf3ad,0x0c0b @ usat16 r12, #12, sp")
711 TEST_UNSUPPORTED(".short 0xf3af,0x0c0b @ usat16 r12, #12, pc")
712
713 TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31")
714 TEST_R( "sbfx r14, r",12, VAL2,", #8, #16")
715 TEST_R( "sbfx r4, r",10, VAL1,", #16, #15")
716 TEST_UNSUPPORTED(".short 0xf34c,0x2d0f @ sbfx sp, r12, #8, #16")
717 TEST_UNSUPPORTED(".short 0xf34c,0x2f0f @ sbfx pc, r12, #8, #16")
718 TEST_UNSUPPORTED(".short 0xf34d,0x2c0f @ sbfx r12, sp, #8, #16")
719 TEST_UNSUPPORTED(".short 0xf34f,0x2c0f @ sbfx r12, pc, #8, #16")
720
721 TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31")
722 TEST_R( "ubfx r14, r",12, VAL2,", #8, #16")
723 TEST_R( "ubfx r4, r",10, VAL1,", #16, #15")
724 TEST_UNSUPPORTED(".short 0xf3cc,0x2d0f @ ubfx sp, r12, #8, #16")
725 TEST_UNSUPPORTED(".short 0xf3cc,0x2f0f @ ubfx pc, r12, #8, #16")
726 TEST_UNSUPPORTED(".short 0xf3cd,0x2c0f @ ubfx r12, sp, #8, #16")
727 TEST_UNSUPPORTED(".short 0xf3cf,0x2c0f @ ubfx r12, pc, #8, #16")
728
729 TEST_R( "bfc r",0, VAL1,", #4, #20")
730 TEST_R( "bfc r",14,VAL2,", #4, #20")
731 TEST_R( "bfc r",7, VAL1,", #0, #31")
732 TEST_R( "bfc r",8, VAL2,", #0, #31")
733 TEST_UNSUPPORTED(".short 0xf36f,0x0d1e @ bfc sp, #0, #31")
734 TEST_UNSUPPORTED(".short 0xf36f,0x0f1e @ bfc pc, #0, #31")
735
736 TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31")
737 TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20")
738 TEST_UNSUPPORTED(".short 0xf36e,0x1d17 @ bfi sp, r14, #4, #20")
739 TEST_UNSUPPORTED(".short 0xf36e,0x1f17 @ bfi pc, r14, #4, #20")
740 TEST_UNSUPPORTED(".short 0xf36d,0x1e17 @ bfi r14, sp, #4, #20")
741
742 TEST_GROUP("Branches and miscellaneous control")
743
744CONDITION_INSTRUCTIONS(22,
745 TEST_BF("beq.w 2f")
746 TEST_BB("bne.w 2b")
747 TEST_BF("bgt.w 2f")
748 TEST_BB("blt.w 2b")
749 TEST_BF_X("bpl.w 2f",0x1000)
750)
751
752 TEST_UNSUPPORTED("msr cpsr, r0")
753 TEST_UNSUPPORTED("msr cpsr_f, r1")
754 TEST_UNSUPPORTED("msr spsr, r2")
755
756 TEST_UNSUPPORTED("cpsie.w i")
757 TEST_UNSUPPORTED("cpsid.w i")
758 TEST_UNSUPPORTED("cps 0x13")
759
760 TEST_SUPPORTED("yield.w")
761 TEST("sev.w")
762 TEST("nop.w")
763 TEST("wfi.w")
764 TEST_SUPPORTED("wfe.w")
765 TEST_UNSUPPORTED("dbg.w #0")
766
767 TEST_UNSUPPORTED("clrex")
768 TEST_UNSUPPORTED("dsb")
769 TEST_UNSUPPORTED("dmb")
770 TEST_UNSUPPORTED("isb")
771
772 TEST_UNSUPPORTED("bxj r0")
773
774 TEST_UNSUPPORTED("subs pc, lr, #4")
775
776 TEST("mrs r0, cpsr")
777 TEST("mrs r14, cpsr")
778 TEST_UNSUPPORTED(".short 0xf3ef,0x8d00 @ mrs sp, spsr")
779 TEST_UNSUPPORTED(".short 0xf3ef,0x8f00 @ mrs pc, spsr")
780 TEST_UNSUPPORTED("mrs r0, spsr")
781 TEST_UNSUPPORTED("mrs lr, spsr")
782
783 TEST_UNSUPPORTED(".short 0xf7f0,0x8000 @ smc #0")
784
785 TEST_UNSUPPORTED(".short 0xf7f0,0xa000 @ undefeined")
786
787 TEST_BF( "b.w 2f")
788 TEST_BB( "b.w 2b")
789 TEST_BF_X("b.w 2f", 0x1000)
790
791 TEST_BF( "bl.w 2f")
792 TEST_BB( "bl.w 2b")
793 TEST_BB_X("bl.w 2b", 0x1000)
794
795 TEST_X( "blx __dummy_arm_subroutine",
796 ".arm \n\t"
797 ".align \n\t"
798 ".type __dummy_arm_subroutine, %%function \n\t"
799 "__dummy_arm_subroutine: \n\t"
800 "mov r0, pc \n\t"
801 "bx lr \n\t"
802 ".thumb \n\t"
803 )
804 TEST( "blx __dummy_arm_subroutine")
805
806 TEST_GROUP("Store single data item")
807
808#define SINGLE_STORE(size) \
809 TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \
810 TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \
811 TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \
812 TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \
813 TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \
814 TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \
815 TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \
816 TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \
817 TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \
818 TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \
819 TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \
820 TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \
821 TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \
822 TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \
823 TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \
824 TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \
825 TEST_UNSUPPORTED("str"size"t r0, [r1, #4]")
826
827 SINGLE_STORE("b")
828 SINGLE_STORE("h")
829 SINGLE_STORE("")
830
831 TEST("str sp, [sp]")
832 TEST_UNSUPPORTED(".short 0xf8cf,0xe000 @ str r14, [pc]")
833 TEST_UNSUPPORTED(".short 0xf8ce,0xf000 @ str pc, [r14]")
834
835 TEST_GROUP("Advanced SIMD element or structure load/store instructions")
836
837 TEST_UNSUPPORTED(".short 0xf900,0x0000")
838 TEST_UNSUPPORTED(".short 0xf92f,0xffff")
839 TEST_UNSUPPORTED(".short 0xf980,0x0000")
840 TEST_UNSUPPORTED(".short 0xf9ef,0xffff")
841
842 TEST_GROUP("Load single data item and memory hints")
843
844#define SINGLE_LOAD(size) \
845 TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \
846 TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \
847 TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \
848 TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \
849 TEST_P( "ldr"size" r0, [r",11,24, "], #120") \
850 TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \
851 TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \
852 TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \
853 TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \
854 TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \
855 TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \
856 TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \
857 TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \
858 TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \
859 TEST_X( "ldr"size".w r0, 3f", \
860 ".align 3 \n\t" \
861 "3: .word "__stringify(VAL1)) \
862 TEST_X( "ldr"size".w r14, 3f", \
863 ".align 3 \n\t" \
864 "3: .word "__stringify(VAL2)) \
865 TEST( "ldr"size".w r7, 3b") \
866 TEST( "ldr"size".w r7, [sp, #24]") \
867 TEST_P( "ldr"size".w r0, [r",0,0, "]") \
868 TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]")
869
870 SINGLE_LOAD("b")
871 SINGLE_LOAD("sb")
872 SINGLE_LOAD("h")
873 SINGLE_LOAD("sh")
874 SINGLE_LOAD("")
875
876 TEST_BF_P("ldr pc, [r",14, 15*4,"]")
877 TEST_P( "ldr sp, [r",14, 13*4,"]")
878 TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]")
879 TEST_R( "ldr sp, [sp, r",14, 13*4,"]")
880 TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]")
881 TEST_SUPPORTED("ldr sp, 99f")
882 TEST_SUPPORTED("ldr pc, 99f")
883
884 TEST_UNSUPPORTED(".short 0xf854,0x700d @ ldr r7, [r4, sp]")
885 TEST_UNSUPPORTED(".short 0xf854,0x700f @ ldr r7, [r4, pc]")
886 TEST_UNSUPPORTED(".short 0xf814,0x700d @ ldrb r7, [r4, sp]")
887 TEST_UNSUPPORTED(".short 0xf814,0x700f @ ldrb r7, [r4, pc]")
888 TEST_UNSUPPORTED(".short 0xf89f,0xd004 @ ldrb sp, 99f")
889 TEST_UNSUPPORTED(".short 0xf814,0xd008 @ ldrb sp, [r4, r8]")
890 TEST_UNSUPPORTED(".short 0xf894,0xd000 @ ldrb sp, [r4]")
891
892 TEST_UNSUPPORTED(".short 0xf860,0x0000") /* Unallocated space */
893 TEST_UNSUPPORTED(".short 0xf9ff,0xffff") /* Unallocated space */
894 TEST_UNSUPPORTED(".short 0xf950,0x0000") /* Unallocated space */
895 TEST_UNSUPPORTED(".short 0xf95f,0xffff") /* Unallocated space */
896 TEST_UNSUPPORTED(".short 0xf800,0x0800") /* Unallocated space */
897 TEST_UNSUPPORTED(".short 0xf97f,0xfaff") /* Unallocated space */
898
899 TEST( "pli [pc, #4]")
900 TEST( "pli [pc, #-4]")
901 TEST( "pld [pc, #4]")
902 TEST( "pld [pc, #-4]")
903
904 TEST_P( "pld [r",0,-1024,", #1024]")
905 TEST( ".short 0xf8b0,0xf400 @ pldw [r0, #1024]")
906 TEST_P( "pli [r",4, 0b,", #1024]")
907 TEST_P( "pld [r",7, 120,", #-120]")
908 TEST( ".short 0xf837,0xfc78 @ pldw [r7, #-120]")
909 TEST_P( "pli [r",11,120,", #-120]")
910 TEST( "pld [sp, #0]")
911
912 TEST_PR("pld [r",7, 24, ", r",0, 16,"]")
913 TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]")
914 TEST_SUPPORTED(".short 0xf837,0xf000 @ pldw [r7, r0]")
915 TEST_SUPPORTED(".short 0xf838,0xf03c @ pldw [r8, r12, lsl #3]");
916 TEST_RR("pli [r",12,0b,", r",0, 16,"]")
917 TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]")
918 TEST_R( "pld [sp, r",1, 16,"]")
919 TEST_UNSUPPORTED(".short 0xf817,0xf00d @pld [r7, sp]")
920 TEST_UNSUPPORTED(".short 0xf817,0xf00f @pld [r7, pc]")
921
922 TEST_GROUP("Data-processing (register)")
923
924#define SHIFTS32(op) \
925 TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \
926 TEST_RR(op" r14, r",12,VAL2,", r",11,10,"")
927
928 SHIFTS32("lsl")
929 SHIFTS32("lsls")
930 SHIFTS32("lsr")
931 SHIFTS32("lsrs")
932 SHIFTS32("asr")
933 SHIFTS32("asrs")
934 SHIFTS32("ror")
935 SHIFTS32("rors")
936
937 TEST_UNSUPPORTED(".short 0xfa01,0xff02 @ lsl pc, r1, r2")
938 TEST_UNSUPPORTED(".short 0xfa01,0xfd02 @ lsl sp, r1, r2")
939 TEST_UNSUPPORTED(".short 0xfa0f,0xf002 @ lsl r0, pc, r2")
940 TEST_UNSUPPORTED(".short 0xfa0d,0xf002 @ lsl r0, sp, r2")
941 TEST_UNSUPPORTED(".short 0xfa01,0xf00f @ lsl r0, r1, pc")
942 TEST_UNSUPPORTED(".short 0xfa01,0xf00d @ lsl r0, r1, sp")
943
944 TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"")
945 TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
946 TEST_R( "sxth r8, r",7, HH1,"")
947
948 TEST_UNSUPPORTED(".short 0xfa0f,0xff87 @ sxth pc, r7");
949 TEST_UNSUPPORTED(".short 0xfa0f,0xfd87 @ sxth sp, r7");
950 TEST_UNSUPPORTED(".short 0xfa0f,0xf88f @ sxth r8, pc");
951 TEST_UNSUPPORTED(".short 0xfa0f,0xf88d @ sxth r8, sp");
952
953 TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"")
954 TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
955 TEST_R( "uxth r8, r",7, HH1,"")
956
957 TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"")
958 TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
959 TEST_R( "sxtb16 r8, r",7, HH1,"")
960
961 TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"")
962 TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
963 TEST_R( "uxtb16 r8, r",7, HH1,"")
964
965 TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"")
966 TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
967 TEST_R( "sxtb r8, r",7, HH1,"")
968
969 TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"")
970 TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
971 TEST_R( "uxtb r8, r",7, HH1,"")
972
973 TEST_UNSUPPORTED(".short 0xfa60,0x00f0")
974 TEST_UNSUPPORTED(".short 0xfa7f,0xffff")
975
976#define PARALLEL_ADD_SUB(op) \
977 TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \
978 TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \
979 TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \
980 TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \
981 TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \
982 TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \
983 TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \
984 TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \
985 TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \
986 TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \
987 TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \
988 TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"")
989
990 TEST_GROUP("Parallel addition and subtraction, signed")
991
992 PARALLEL_ADD_SUB("s")
993 PARALLEL_ADD_SUB("q")
994 PARALLEL_ADD_SUB("sh")
995
996 TEST_GROUP("Parallel addition and subtraction, unsigned")
997
998 PARALLEL_ADD_SUB("u")
999 PARALLEL_ADD_SUB("uq")
1000 PARALLEL_ADD_SUB("uh")
1001
1002 TEST_GROUP("Miscellaneous operations")
1003
1004 TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"")
1005 TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"")
1006 TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"")
1007 TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"")
1008 TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"")
1009 TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"")
1010 TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"")
1011 TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"")
1012
1013 TEST_R("rev.w r0, r",0, VAL1,"")
1014 TEST_R("rev r14, r",12, VAL2,"")
1015 TEST_R("rev16.w r0, r",0, VAL1,"")
1016 TEST_R("rev16 r14, r",12, VAL2,"")
1017 TEST_R("rbit r0, r",0, VAL1,"")
1018 TEST_R("rbit r14, r",12, VAL2,"")
1019 TEST_R("revsh.w r0, r",0, VAL1,"")
1020 TEST_R("revsh r14, r",12, VAL2,"")
1021
1022 TEST_UNSUPPORTED(".short 0xfa9c,0xff8c @ rev pc, r12");
1023 TEST_UNSUPPORTED(".short 0xfa9c,0xfd8c @ rev sp, r12");
1024 TEST_UNSUPPORTED(".short 0xfa9f,0xfe8f @ rev r14, pc");
1025 TEST_UNSUPPORTED(".short 0xfa9d,0xfe8d @ rev r14, sp");
1026
1027 TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"")
1028 TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"")
1029
1030 TEST_R("clz r0, r",0, 0x0,"")
1031 TEST_R("clz r7, r",14,0x1,"")
1032 TEST_R("clz lr, r",7, 0xffffffff,"")
1033
1034 TEST_UNSUPPORTED(".short 0xfa80,0xf030") /* Unallocated space */
1035 TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
1036 TEST_UNSUPPORTED(".short 0xfab0,0xf000") /* Unallocated space */
1037 TEST_UNSUPPORTED(".short 0xfaff,0xff7f") /* Unallocated space */
1038
1039 TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
1040
1041 TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"")
1042 TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"")
1043 TEST_UNSUPPORTED(".short 0xfb08,0xff09 @ mul pc, r8, r9")
1044 TEST_UNSUPPORTED(".short 0xfb08,0xfd09 @ mul sp, r8, r9")
1045 TEST_UNSUPPORTED(".short 0xfb0f,0xf709 @ mul r7, pc, r9")
1046 TEST_UNSUPPORTED(".short 0xfb0d,0xf709 @ mul r7, sp, r9")
1047 TEST_UNSUPPORTED(".short 0xfb08,0xf70f @ mul r7, r8, pc")
1048 TEST_UNSUPPORTED(".short 0xfb08,0xf70d @ mul r7, r8, sp")
1049
1050 TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1051 TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1052 TEST_UNSUPPORTED(".short 0xfb08,0xaf09 @ mla pc, r8, r9, r10");
1053 TEST_UNSUPPORTED(".short 0xfb08,0xad09 @ mla sp, r8, r9, r10");
1054 TEST_UNSUPPORTED(".short 0xfb0f,0xa709 @ mla r7, pc, r9, r10");
1055 TEST_UNSUPPORTED(".short 0xfb0d,0xa709 @ mla r7, sp, r9, r10");
1056 TEST_UNSUPPORTED(".short 0xfb08,0xa70f @ mla r7, r8, pc, r10");
1057 TEST_UNSUPPORTED(".short 0xfb08,0xa70d @ mla r7, r8, sp, r10");
1058 TEST_UNSUPPORTED(".short 0xfb08,0xd709 @ mla r7, r8, r9, sp");
1059
1060 TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1061 TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1062
1063 TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1064 TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1065 TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1066 TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1067 TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1068 TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1069 TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1070 TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1071 TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"")
1072 TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"")
1073 TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"")
1074 TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"")
1075 TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"")
1076 TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"")
1077 TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"")
1078 TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"")
1079
1080 TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
1081 TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
1082 TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
1083 TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
1084 TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"")
1085 TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"")
1086 TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"")
1087 TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"")
1088
1089 TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1090 TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1091 TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
1092 TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
1093 TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"")
1094 TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"")
1095 TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"")
1096 TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"")
1097
1098 TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
1099 TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
1100 TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
1101 TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
1102 TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"")
1103 TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"")
1104 TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"")
1105 TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"")
1106
1107 TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
1108 TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
1109 TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
1110 TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
1111 TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"")
1112 TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"")
1113 TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"")
1114 TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"")
1115
1116 TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
1117 TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
1118 TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
1119 TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
1120
1121 TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"")
1122 TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
1123 TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"")
1124 TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"")
1125
1126 TEST_UNSUPPORTED(".short 0xfb00,0xf010") /* Unallocated space */
1127 TEST_UNSUPPORTED(".short 0xfb0f,0xff1f") /* Unallocated space */
1128 TEST_UNSUPPORTED(".short 0xfb70,0xf010") /* Unallocated space */
1129 TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
1130 TEST_UNSUPPORTED(".short 0xfb70,0x0010") /* Unallocated space */
1131 TEST_UNSUPPORTED(".short 0xfb7f,0xff1f") /* Unallocated space */
1132
1133 TEST_GROUP("Long multiply, long multiply accumulate, and divide")
1134
1135 TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"")
1136 TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"")
1137 TEST_UNSUPPORTED(".short 0xfb89,0xf80a @ smull pc, r8, r9, r10");
1138 TEST_UNSUPPORTED(".short 0xfb89,0xd80a @ smull sp, r8, r9, r10");
1139 TEST_UNSUPPORTED(".short 0xfb89,0x7f0a @ smull r7, pc, r9, r10");
1140 TEST_UNSUPPORTED(".short 0xfb89,0x7d0a @ smull r7, sp, r9, r10");
1141 TEST_UNSUPPORTED(".short 0xfb8f,0x780a @ smull r7, r8, pc, r10");
1142 TEST_UNSUPPORTED(".short 0xfb8d,0x780a @ smull r7, r8, sp, r10");
1143 TEST_UNSUPPORTED(".short 0xfb89,0x780f @ smull r7, r8, r9, pc");
1144 TEST_UNSUPPORTED(".short 0xfb89,0x780d @ smull r7, r8, r9, sp");
1145
1146 TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"")
1147 TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"")
1148
1149 TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1150 TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1151
1152 TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1153 TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1154 TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1155 TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1156 TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1157 TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1158 TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1159 TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1160
1161 TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
1162 TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
1163 TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
1164 TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
1165
1166 TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
1167 TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
1168 TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
1169 TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
1170
1171 TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1172 TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1173 TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
1174 TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
1175
1176 TEST_GROUP("Coprocessor instructions")
1177
1178 TEST_UNSUPPORTED(".short 0xfc00,0x0000")
1179 TEST_UNSUPPORTED(".short 0xffff,0xffff")
1180
1181 TEST_GROUP("Testing instructions in IT blocks")
1182
1183 TEST_ITBLOCK("sub.w r0, r0")
1184
1185 verbose("\n");
1186}
1187
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
new file mode 100644
index 000000000000..e17cdd6d90d8
--- /dev/null
+++ b/arch/arm/kernel/kprobes-test.c
@@ -0,0 +1,1748 @@
1/*
2 * arch/arm/kernel/kprobes-test.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * This file contains test code for ARM kprobes.
13 *
14 * The top level function run_all_tests() executes tests for all of the
15 * supported instruction sets: ARM, 16-bit Thumb, and 32-bit Thumb. These tests
16 * fall into two categories; run_api_tests() checks basic functionality of the
17 * kprobes API, and run_test_cases() is a comprehensive test for kprobes
18 * instruction decoding and simulation.
19 *
20 * run_test_cases() first checks the kprobes decoding table for self consistency
21 * (using table_test()) then executes a series of test cases for each of the CPU
22 * instruction forms. coverage_start() and coverage_end() are used to verify
23 * that these test cases cover all of the possible combinations of instructions
24 * described by the kprobes decoding tables.
25 *
26 * The individual test cases are in kprobes-test-arm.c and kprobes-test-thumb.c
27 * which use the macros defined in kprobes-test.h. The rest of this
28 * documentation will describe the operation of the framework used by these
29 * test cases.
30 */
31
32/*
33 * TESTING METHODOLOGY
34 * -------------------
35 *
36 * The methodology used to test an ARM instruction 'test_insn' is to use
37 * inline assembler like:
38 *
39 * test_before: nop
40 * test_case: test_insn
41 * test_after: nop
42 *
43 * When the test case is run a kprobe is placed of each nop. The
44 * post-handler of the test_before probe is used to modify the saved CPU
45 * register context to that which we require for the test case. The
46 * pre-handler of the of the test_after probe saves a copy of the CPU
47 * register context. In this way we can execute test_insn with a specific
48 * register context and see the results afterwards.
49 *
50 * To actually test the kprobes instruction emulation we perform the above
51 * step a second time but with an additional kprobe on the test_case
52 * instruction itself. If the emulation is accurate then the results seen
53 * by the test_after probe will be identical to the first run which didn't
54 * have a probe on test_case.
55 *
56 * Each test case is run several times with a variety of variations in the
57 * flags value of stored in CPSR, and for Thumb code, different ITState.
58 *
59 * For instructions which can modify PC, a second test_after probe is used
60 * like this:
61 *
62 * test_before: nop
63 * test_case: test_insn
64 * test_after: nop
65 * b test_done
66 * test_after2: nop
67 * test_done:
68 *
69 * The test case is constructed such that test_insn branches to
70 * test_after2, or, if testing a conditional instruction, it may just
71 * continue to test_after. The probes inserted at both locations let us
72 * determine which happened. A similar approach is used for testing
73 * backwards branches...
74 *
75 * b test_before
76 * b test_done @ helps to cope with off by 1 branches
77 * test_after2: nop
78 * b test_done
79 * test_before: nop
80 * test_case: test_insn
81 * test_after: nop
82 * test_done:
83 *
84 * The macros used to generate the assembler instructions describe above
85 * are TEST_INSTRUCTION, TEST_BRANCH_F (branch forwards) and TEST_BRANCH_B
86 * (branch backwards). In these, the local variables numbered 1, 50, 2 and
87 * 99 represent: test_before, test_case, test_after2 and test_done.
88 *
89 * FRAMEWORK
90 * ---------
91 *
92 * Each test case is wrapped between the pair of macros TESTCASE_START and
93 * TESTCASE_END. As well as performing the inline assembler boilerplate,
94 * these call out to the kprobes_test_case_start() and
95 * kprobes_test_case_end() functions which drive the execution of the test
96 * case. The specific arguments to use for each test case are stored as
97 * inline data constructed using the various TEST_ARG_* macros. Putting
98 * this all together, a simple test case may look like:
99 *
100 * TESTCASE_START("Testing mov r0, r7")
101 * TEST_ARG_REG(7, 0x12345678) // Set r7=0x12345678
102 * TEST_ARG_END("")
103 * TEST_INSTRUCTION("mov r0, r7")
104 * TESTCASE_END
105 *
106 * Note, in practice the single convenience macro TEST_R would be used for this
107 * instead.
108 *
109 * The above would expand to assembler looking something like:
110 *
111 * @ TESTCASE_START
112 * bl __kprobes_test_case_start
113 * @ start of inline data...
114 * .ascii "mov r0, r7" @ text title for test case
115 * .byte 0
116 * .align 2
117 *
118 * @ TEST_ARG_REG
119 * .byte ARG_TYPE_REG
120 * .byte 7
121 * .short 0
122 * .word 0x1234567
123 *
124 * @ TEST_ARG_END
125 * .byte ARG_TYPE_END
126 * .byte TEST_ISA @ flags, including ISA being tested
127 * .short 50f-0f @ offset of 'test_before'
128 * .short 2f-0f @ offset of 'test_after2' (if relevent)
129 * .short 99f-0f @ offset of 'test_done'
130 * @ start of test case code...
131 * 0:
132 * .code TEST_ISA @ switch to ISA being tested
133 *
134 * @ TEST_INSTRUCTION
135 * 50: nop @ location for 'test_before' probe
136 * 1: mov r0, r7 @ the test case instruction 'test_insn'
137 * nop @ location for 'test_after' probe
138 *
139 * // TESTCASE_END
140 * 2:
141 * 99: bl __kprobes_test_case_end_##TEST_ISA
142 * .code NONMAL_ISA
143 *
144 * When the above is execute the following happens...
145 *
146 * __kprobes_test_case_start() is an assembler wrapper which sets up space
147 * for a stack buffer and calls the C function kprobes_test_case_start().
148 * This C function will do some initial processing of the inline data and
149 * setup some global state. It then inserts the test_before and test_after
150 * kprobes and returns a value which causes the assembler wrapper to jump
151 * to the start of the test case code, (local label '0').
152 *
153 * When the test case code executes, the test_before probe will be hit and
154 * test_before_post_handler will call setup_test_context(). This fills the
155 * stack buffer and CPU registers with a test pattern and then processes
156 * the test case arguments. In our example there is one TEST_ARG_REG which
157 * indicates that R7 should be loaded with the value 0x12345678.
158 *
159 * When the test_before probe ends, the test case continues and executes
160 * the "mov r0, r7" instruction. It then hits the test_after probe and the
161 * pre-handler for this (test_after_pre_handler) will save a copy of the
162 * CPU register context. This should now have R0 holding the same value as
163 * R7.
164 *
165 * Finally we get to the call to __kprobes_test_case_end_{32,16}. This is
166 * an assembler wrapper which switches back to the ISA used by the test
167 * code and calls the C function kprobes_test_case_end().
168 *
169 * For each run through the test case, test_case_run_count is incremented
170 * by one. For even runs, kprobes_test_case_end() saves a copy of the
171 * register and stack buffer contents from the test case just run. It then
172 * inserts a kprobe on the test case instruction 'test_insn' and returns a
173 * value to cause the test case code to be re-run.
174 *
175 * For odd numbered runs, kprobes_test_case_end() compares the register and
176 * stack buffer contents to those that were saved on the previous even
177 * numbered run (the one without the kprobe on test_insn). These should be
178 * the same if the kprobe instruction simulation routine is correct.
179 *
180 * The pair of test case runs is repeated with different combinations of
181 * flag values in CPSR and, for Thumb, different ITState. This is
182 * controlled by test_context_cpsr().
183 *
184 * BUILDING TEST CASES
185 * -------------------
186 *
187 *
188 * As an aid to building test cases, the stack buffer is initialised with
189 * some special values:
190 *
191 * [SP+13*4] Contains SP+120. This can be used to test instructions
192 * which load a value into SP.
193 *
194 * [SP+15*4] When testing branching instructions using TEST_BRANCH_{F,B},
195 * this holds the target address of the branch, 'test_after2'.
196 * This can be used to test instructions which load a PC value
197 * from memory.
198 */
199
200#include <linux/kernel.h>
201#include <linux/module.h>
202#include <linux/slab.h>
203#include <linux/kprobes.h>
204
205#include "kprobes.h"
206#include "kprobes-test.h"
207
208
209#define BENCHMARKING 1
210
211
212/*
213 * Test basic API
214 */
215
216static bool test_regs_ok;
217static int test_func_instance;
218static int pre_handler_called;
219static int post_handler_called;
220static int jprobe_func_called;
221static int kretprobe_handler_called;
222
223#define FUNC_ARG1 0x12345678
224#define FUNC_ARG2 0xabcdef
225
226
227#ifndef CONFIG_THUMB2_KERNEL
228
229long arm_func(long r0, long r1);
230
231static void __used __naked __arm_kprobes_test_func(void)
232{
233 __asm__ __volatile__ (
234 ".arm \n\t"
235 ".type arm_func, %%function \n\t"
236 "arm_func: \n\t"
237 "adds r0, r0, r1 \n\t"
238 "bx lr \n\t"
239 ".code "NORMAL_ISA /* Back to Thumb if necessary */
240 : : : "r0", "r1", "cc"
241 );
242}
243
244#else /* CONFIG_THUMB2_KERNEL */
245
246long thumb16_func(long r0, long r1);
247long thumb32even_func(long r0, long r1);
248long thumb32odd_func(long r0, long r1);
249
250static void __used __naked __thumb_kprobes_test_funcs(void)
251{
252 __asm__ __volatile__ (
253 ".type thumb16_func, %%function \n\t"
254 "thumb16_func: \n\t"
255 "adds.n r0, r0, r1 \n\t"
256 "bx lr \n\t"
257
258 ".align \n\t"
259 ".type thumb32even_func, %%function \n\t"
260 "thumb32even_func: \n\t"
261 "adds.w r0, r0, r1 \n\t"
262 "bx lr \n\t"
263
264 ".align \n\t"
265 "nop.n \n\t"
266 ".type thumb32odd_func, %%function \n\t"
267 "thumb32odd_func: \n\t"
268 "adds.w r0, r0, r1 \n\t"
269 "bx lr \n\t"
270
271 : : : "r0", "r1", "cc"
272 );
273}
274
275#endif /* CONFIG_THUMB2_KERNEL */
276
277
278static int call_test_func(long (*func)(long, long), bool check_test_regs)
279{
280 long ret;
281
282 ++test_func_instance;
283 test_regs_ok = false;
284
285 ret = (*func)(FUNC_ARG1, FUNC_ARG2);
286 if (ret != FUNC_ARG1 + FUNC_ARG2) {
287 pr_err("FAIL: call_test_func: func returned %lx\n", ret);
288 return false;
289 }
290
291 if (check_test_regs && !test_regs_ok) {
292 pr_err("FAIL: test regs not OK\n");
293 return false;
294 }
295
296 return true;
297}
298
299static int __kprobes pre_handler(struct kprobe *p, struct pt_regs *regs)
300{
301 pre_handler_called = test_func_instance;
302 if (regs->ARM_r0 == FUNC_ARG1 && regs->ARM_r1 == FUNC_ARG2)
303 test_regs_ok = true;
304 return 0;
305}
306
307static void __kprobes post_handler(struct kprobe *p, struct pt_regs *regs,
308 unsigned long flags)
309{
310 post_handler_called = test_func_instance;
311 if (regs->ARM_r0 != FUNC_ARG1 + FUNC_ARG2 || regs->ARM_r1 != FUNC_ARG2)
312 test_regs_ok = false;
313}
314
315static struct kprobe the_kprobe = {
316 .addr = 0,
317 .pre_handler = pre_handler,
318 .post_handler = post_handler
319};
320
321static int test_kprobe(long (*func)(long, long))
322{
323 int ret;
324
325 the_kprobe.addr = (kprobe_opcode_t *)func;
326 ret = register_kprobe(&the_kprobe);
327 if (ret < 0) {
328 pr_err("FAIL: register_kprobe failed with %d\n", ret);
329 return ret;
330 }
331
332 ret = call_test_func(func, true);
333
334 unregister_kprobe(&the_kprobe);
335 the_kprobe.flags = 0; /* Clear disable flag to allow reuse */
336
337 if (!ret)
338 return -EINVAL;
339 if (pre_handler_called != test_func_instance) {
340 pr_err("FAIL: kprobe pre_handler not called\n");
341 return -EINVAL;
342 }
343 if (post_handler_called != test_func_instance) {
344 pr_err("FAIL: kprobe post_handler not called\n");
345 return -EINVAL;
346 }
347 if (!call_test_func(func, false))
348 return -EINVAL;
349 if (pre_handler_called == test_func_instance ||
350 post_handler_called == test_func_instance) {
351 pr_err("FAIL: probe called after unregistering\n");
352 return -EINVAL;
353 }
354
355 return 0;
356}
357
358static void __kprobes jprobe_func(long r0, long r1)
359{
360 jprobe_func_called = test_func_instance;
361 if (r0 == FUNC_ARG1 && r1 == FUNC_ARG2)
362 test_regs_ok = true;
363 jprobe_return();
364}
365
366static struct jprobe the_jprobe = {
367 .entry = jprobe_func,
368};
369
370static int test_jprobe(long (*func)(long, long))
371{
372 int ret;
373
374 the_jprobe.kp.addr = (kprobe_opcode_t *)func;
375 ret = register_jprobe(&the_jprobe);
376 if (ret < 0) {
377 pr_err("FAIL: register_jprobe failed with %d\n", ret);
378 return ret;
379 }
380
381 ret = call_test_func(func, true);
382
383 unregister_jprobe(&the_jprobe);
384 the_jprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
385
386 if (!ret)
387 return -EINVAL;
388 if (jprobe_func_called != test_func_instance) {
389 pr_err("FAIL: jprobe handler function not called\n");
390 return -EINVAL;
391 }
392 if (!call_test_func(func, false))
393 return -EINVAL;
394 if (jprobe_func_called == test_func_instance) {
395 pr_err("FAIL: probe called after unregistering\n");
396 return -EINVAL;
397 }
398
399 return 0;
400}
401
402static int __kprobes
403kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
404{
405 kretprobe_handler_called = test_func_instance;
406 if (regs_return_value(regs) == FUNC_ARG1 + FUNC_ARG2)
407 test_regs_ok = true;
408 return 0;
409}
410
411static struct kretprobe the_kretprobe = {
412 .handler = kretprobe_handler,
413};
414
415static int test_kretprobe(long (*func)(long, long))
416{
417 int ret;
418
419 the_kretprobe.kp.addr = (kprobe_opcode_t *)func;
420 ret = register_kretprobe(&the_kretprobe);
421 if (ret < 0) {
422 pr_err("FAIL: register_kretprobe failed with %d\n", ret);
423 return ret;
424 }
425
426 ret = call_test_func(func, true);
427
428 unregister_kretprobe(&the_kretprobe);
429 the_kretprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
430
431 if (!ret)
432 return -EINVAL;
433 if (kretprobe_handler_called != test_func_instance) {
434 pr_err("FAIL: kretprobe handler not called\n");
435 return -EINVAL;
436 }
437 if (!call_test_func(func, false))
438 return -EINVAL;
439 if (jprobe_func_called == test_func_instance) {
440 pr_err("FAIL: kretprobe called after unregistering\n");
441 return -EINVAL;
442 }
443
444 return 0;
445}
446
447static int run_api_tests(long (*func)(long, long))
448{
449 int ret;
450
451 pr_info(" kprobe\n");
452 ret = test_kprobe(func);
453 if (ret < 0)
454 return ret;
455
456 pr_info(" jprobe\n");
457 ret = test_jprobe(func);
458 if (ret < 0)
459 return ret;
460
461 pr_info(" kretprobe\n");
462 ret = test_kretprobe(func);
463 if (ret < 0)
464 return ret;
465
466 return 0;
467}
468
469
470/*
471 * Benchmarking
472 */
473
474#if BENCHMARKING
475
476static void __naked benchmark_nop(void)
477{
478 __asm__ __volatile__ (
479 "nop \n\t"
480 "bx lr"
481 );
482}
483
484#ifdef CONFIG_THUMB2_KERNEL
485#define wide ".w"
486#else
487#define wide
488#endif
489
490static void __naked benchmark_pushpop1(void)
491{
492 __asm__ __volatile__ (
493 "stmdb"wide" sp!, {r3-r11,lr} \n\t"
494 "ldmia"wide" sp!, {r3-r11,pc}"
495 );
496}
497
498static void __naked benchmark_pushpop2(void)
499{
500 __asm__ __volatile__ (
501 "stmdb"wide" sp!, {r0-r8,lr} \n\t"
502 "ldmia"wide" sp!, {r0-r8,pc}"
503 );
504}
505
506static void __naked benchmark_pushpop3(void)
507{
508 __asm__ __volatile__ (
509 "stmdb"wide" sp!, {r4,lr} \n\t"
510 "ldmia"wide" sp!, {r4,pc}"
511 );
512}
513
514static void __naked benchmark_pushpop4(void)
515{
516 __asm__ __volatile__ (
517 "stmdb"wide" sp!, {r0,lr} \n\t"
518 "ldmia"wide" sp!, {r0,pc}"
519 );
520}
521
522
523#ifdef CONFIG_THUMB2_KERNEL
524
525static void __naked benchmark_pushpop_thumb(void)
526{
527 __asm__ __volatile__ (
528 "push.n {r0-r7,lr} \n\t"
529 "pop.n {r0-r7,pc}"
530 );
531}
532
533#endif
534
535static int __kprobes
536benchmark_pre_handler(struct kprobe *p, struct pt_regs *regs)
537{
538 return 0;
539}
540
541static int benchmark(void(*fn)(void))
542{
543 unsigned n, i, t, t0;
544
545 for (n = 1000; ; n *= 2) {
546 t0 = sched_clock();
547 for (i = n; i > 0; --i)
548 fn();
549 t = sched_clock() - t0;
550 if (t >= 250000000)
551 break; /* Stop once we took more than 0.25 seconds */
552 }
553 return t / n; /* Time for one iteration in nanoseconds */
554};
555
556static int kprobe_benchmark(void(*fn)(void), unsigned offset)
557{
558 struct kprobe k = {
559 .addr = (kprobe_opcode_t *)((uintptr_t)fn + offset),
560 .pre_handler = benchmark_pre_handler,
561 };
562
563 int ret = register_kprobe(&k);
564 if (ret < 0) {
565 pr_err("FAIL: register_kprobe failed with %d\n", ret);
566 return ret;
567 }
568
569 ret = benchmark(fn);
570
571 unregister_kprobe(&k);
572 return ret;
573};
574
575struct benchmarks {
576 void (*fn)(void);
577 unsigned offset;
578 const char *title;
579};
580
581static int run_benchmarks(void)
582{
583 int ret;
584 struct benchmarks list[] = {
585 {&benchmark_nop, 0, "nop"},
586 /*
587 * benchmark_pushpop{1,3} will have the optimised
588 * instruction emulation, whilst benchmark_pushpop{2,4} will
589 * be the equivalent unoptimised instructions.
590 */
591 {&benchmark_pushpop1, 0, "stmdb sp!, {r3-r11,lr}"},
592 {&benchmark_pushpop1, 4, "ldmia sp!, {r3-r11,pc}"},
593 {&benchmark_pushpop2, 0, "stmdb sp!, {r0-r8,lr}"},
594 {&benchmark_pushpop2, 4, "ldmia sp!, {r0-r8,pc}"},
595 {&benchmark_pushpop3, 0, "stmdb sp!, {r4,lr}"},
596 {&benchmark_pushpop3, 4, "ldmia sp!, {r4,pc}"},
597 {&benchmark_pushpop4, 0, "stmdb sp!, {r0,lr}"},
598 {&benchmark_pushpop4, 4, "ldmia sp!, {r0,pc}"},
599#ifdef CONFIG_THUMB2_KERNEL
600 {&benchmark_pushpop_thumb, 0, "push.n {r0-r7,lr}"},
601 {&benchmark_pushpop_thumb, 2, "pop.n {r0-r7,pc}"},
602#endif
603 {0}
604 };
605
606 struct benchmarks *b;
607 for (b = list; b->fn; ++b) {
608 ret = kprobe_benchmark(b->fn, b->offset);
609 if (ret < 0)
610 return ret;
611 pr_info(" %dns for kprobe %s\n", ret, b->title);
612 }
613
614 pr_info("\n");
615 return 0;
616}
617
618#endif /* BENCHMARKING */
619
620
621/*
622 * Decoding table self-consistency tests
623 */
624
625static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
626 [DECODE_TYPE_TABLE] = sizeof(struct decode_table),
627 [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
628 [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
629 [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
630 [DECODE_TYPE_OR] = sizeof(struct decode_or),
631 [DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
632};
633
634static int table_iter(const union decode_item *table,
635 int (*fn)(const struct decode_header *, void *),
636 void *args)
637{
638 const struct decode_header *h = (struct decode_header *)table;
639 int result;
640
641 for (;;) {
642 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
643
644 if (type == DECODE_TYPE_END)
645 return 0;
646
647 result = fn(h, args);
648 if (result)
649 return result;
650
651 h = (struct decode_header *)
652 ((uintptr_t)h + decode_struct_sizes[type]);
653
654 }
655}
656
657static int table_test_fail(const struct decode_header *h, const char* message)
658{
659
660 pr_err("FAIL: kprobes test failure \"%s\" (mask %08x, value %08x)\n",
661 message, h->mask.bits, h->value.bits);
662 return -EINVAL;
663}
664
665struct table_test_args {
666 const union decode_item *root_table;
667 u32 parent_mask;
668 u32 parent_value;
669};
670
671static int table_test_fn(const struct decode_header *h, void *args)
672{
673 struct table_test_args *a = (struct table_test_args *)args;
674 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
675
676 if (h->value.bits & ~h->mask.bits)
677 return table_test_fail(h, "Match value has bits not in mask");
678
679 if ((h->mask.bits & a->parent_mask) != a->parent_mask)
680 return table_test_fail(h, "Mask has bits not in parent mask");
681
682 if ((h->value.bits ^ a->parent_value) & a->parent_mask)
683 return table_test_fail(h, "Value is inconsistent with parent");
684
685 if (type == DECODE_TYPE_TABLE) {
686 struct decode_table *d = (struct decode_table *)h;
687 struct table_test_args args2 = *a;
688 args2.parent_mask = h->mask.bits;
689 args2.parent_value = h->value.bits;
690 return table_iter(d->table.table, table_test_fn, &args2);
691 }
692
693 return 0;
694}
695
696static int table_test(const union decode_item *table)
697{
698 struct table_test_args args = {
699 .root_table = table,
700 .parent_mask = 0,
701 .parent_value = 0
702 };
703 return table_iter(args.root_table, table_test_fn, &args);
704}
705
706
707/*
708 * Decoding table test coverage analysis
709 *
710 * coverage_start() builds a coverage_table which contains a list of
711 * coverage_entry's to match each entry in the specified kprobes instruction
712 * decoding table.
713 *
714 * When test cases are run, coverage_add() is called to process each case.
715 * This looks up the corresponding entry in the coverage_table and sets it as
716 * being matched, as well as clearing the regs flag appropriate for the test.
717 *
718 * After all test cases have been run, coverage_end() is called to check that
719 * all entries in coverage_table have been matched and that all regs flags are
720 * cleared. I.e. that all possible combinations of instructions described by
721 * the kprobes decoding tables have had a test case executed for them.
722 */
723
724bool coverage_fail;
725
726#define MAX_COVERAGE_ENTRIES 256
727
728struct coverage_entry {
729 const struct decode_header *header;
730 unsigned regs;
731 unsigned nesting;
732 char matched;
733};
734
735struct coverage_table {
736 struct coverage_entry *base;
737 unsigned num_entries;
738 unsigned nesting;
739};
740
741struct coverage_table coverage;
742
743#define COVERAGE_ANY_REG (1<<0)
744#define COVERAGE_SP (1<<1)
745#define COVERAGE_PC (1<<2)
746#define COVERAGE_PCWB (1<<3)
747
748static const char coverage_register_lookup[16] = {
749 [REG_TYPE_ANY] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
750 [REG_TYPE_SAMEAS16] = COVERAGE_ANY_REG,
751 [REG_TYPE_SP] = COVERAGE_SP,
752 [REG_TYPE_PC] = COVERAGE_PC,
753 [REG_TYPE_NOSP] = COVERAGE_ANY_REG | COVERAGE_SP,
754 [REG_TYPE_NOSPPC] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
755 [REG_TYPE_NOPC] = COVERAGE_ANY_REG | COVERAGE_PC,
756 [REG_TYPE_NOPCWB] = COVERAGE_ANY_REG | COVERAGE_PC | COVERAGE_PCWB,
757 [REG_TYPE_NOPCX] = COVERAGE_ANY_REG,
758 [REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP,
759};
760
761unsigned coverage_start_registers(const struct decode_header *h)
762{
763 unsigned regs = 0;
764 int i;
765 for (i = 0; i < 20; i += 4) {
766 int r = (h->type_regs.bits >> (DECODE_TYPE_BITS + i)) & 0xf;
767 regs |= coverage_register_lookup[r] << i;
768 }
769 return regs;
770}
771
772static int coverage_start_fn(const struct decode_header *h, void *args)
773{
774 struct coverage_table *coverage = (struct coverage_table *)args;
775 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
776 struct coverage_entry *entry = coverage->base + coverage->num_entries;
777
778 if (coverage->num_entries == MAX_COVERAGE_ENTRIES - 1) {
779 pr_err("FAIL: Out of space for test coverage data");
780 return -ENOMEM;
781 }
782
783 ++coverage->num_entries;
784
785 entry->header = h;
786 entry->regs = coverage_start_registers(h);
787 entry->nesting = coverage->nesting;
788 entry->matched = false;
789
790 if (type == DECODE_TYPE_TABLE) {
791 struct decode_table *d = (struct decode_table *)h;
792 int ret;
793 ++coverage->nesting;
794 ret = table_iter(d->table.table, coverage_start_fn, coverage);
795 --coverage->nesting;
796 return ret;
797 }
798
799 return 0;
800}
801
802static int coverage_start(const union decode_item *table)
803{
804 coverage.base = kmalloc(MAX_COVERAGE_ENTRIES *
805 sizeof(struct coverage_entry), GFP_KERNEL);
806 coverage.num_entries = 0;
807 coverage.nesting = 0;
808 return table_iter(table, coverage_start_fn, &coverage);
809}
810
811static void
812coverage_add_registers(struct coverage_entry *entry, kprobe_opcode_t insn)
813{
814 int regs = entry->header->type_regs.bits >> DECODE_TYPE_BITS;
815 int i;
816 for (i = 0; i < 20; i += 4) {
817 enum decode_reg_type reg_type = (regs >> i) & 0xf;
818 int reg = (insn >> i) & 0xf;
819 int flag;
820
821 if (!reg_type)
822 continue;
823
824 if (reg == 13)
825 flag = COVERAGE_SP;
826 else if (reg == 15)
827 flag = COVERAGE_PC;
828 else
829 flag = COVERAGE_ANY_REG;
830 entry->regs &= ~(flag << i);
831
832 switch (reg_type) {
833
834 case REG_TYPE_NONE:
835 case REG_TYPE_ANY:
836 case REG_TYPE_SAMEAS16:
837 break;
838
839 case REG_TYPE_SP:
840 if (reg != 13)
841 return;
842 break;
843
844 case REG_TYPE_PC:
845 if (reg != 15)
846 return;
847 break;
848
849 case REG_TYPE_NOSP:
850 if (reg == 13)
851 return;
852 break;
853
854 case REG_TYPE_NOSPPC:
855 case REG_TYPE_NOSPPCX:
856 if (reg == 13 || reg == 15)
857 return;
858 break;
859
860 case REG_TYPE_NOPCWB:
861 if (!is_writeback(insn))
862 break;
863 if (reg == 15) {
864 entry->regs &= ~(COVERAGE_PCWB << i);
865 return;
866 }
867 break;
868
869 case REG_TYPE_NOPC:
870 case REG_TYPE_NOPCX:
871 if (reg == 15)
872 return;
873 break;
874 }
875
876 }
877}
878
879static void coverage_add(kprobe_opcode_t insn)
880{
881 struct coverage_entry *entry = coverage.base;
882 struct coverage_entry *end = coverage.base + coverage.num_entries;
883 bool matched = false;
884 unsigned nesting = 0;
885
886 for (; entry < end; ++entry) {
887 const struct decode_header *h = entry->header;
888 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
889
890 if (entry->nesting > nesting)
891 continue; /* Skip sub-table we didn't match */
892
893 if (entry->nesting < nesting)
894 break; /* End of sub-table we were scanning */
895
896 if (!matched) {
897 if ((insn & h->mask.bits) != h->value.bits)
898 continue;
899 entry->matched = true;
900 }
901
902 switch (type) {
903
904 case DECODE_TYPE_TABLE:
905 ++nesting;
906 break;
907
908 case DECODE_TYPE_CUSTOM:
909 case DECODE_TYPE_SIMULATE:
910 case DECODE_TYPE_EMULATE:
911 coverage_add_registers(entry, insn);
912 return;
913
914 case DECODE_TYPE_OR:
915 matched = true;
916 break;
917
918 case DECODE_TYPE_REJECT:
919 default:
920 return;
921 }
922
923 }
924}
925
926static void coverage_end(void)
927{
928 struct coverage_entry *entry = coverage.base;
929 struct coverage_entry *end = coverage.base + coverage.num_entries;
930
931 for (; entry < end; ++entry) {
932 u32 mask = entry->header->mask.bits;
933 u32 value = entry->header->value.bits;
934
935 if (entry->regs) {
936 pr_err("FAIL: Register test coverage missing for %08x %08x (%05x)\n",
937 mask, value, entry->regs);
938 coverage_fail = true;
939 }
940 if (!entry->matched) {
941 pr_err("FAIL: Test coverage entry missing for %08x %08x\n",
942 mask, value);
943 coverage_fail = true;
944 }
945 }
946
947 kfree(coverage.base);
948}
949
950
951/*
952 * Framework for instruction set test cases
953 */
954
955void __naked __kprobes_test_case_start(void)
956{
957 __asm__ __volatile__ (
958 "stmdb sp!, {r4-r11} \n\t"
959 "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
960 "bic r0, lr, #1 @ r0 = inline title string \n\t"
961 "mov r1, sp \n\t"
962 "bl kprobes_test_case_start \n\t"
963 "bx r0 \n\t"
964 );
965}
966
967#ifndef CONFIG_THUMB2_KERNEL
968
969void __naked __kprobes_test_case_end_32(void)
970{
971 __asm__ __volatile__ (
972 "mov r4, lr \n\t"
973 "bl kprobes_test_case_end \n\t"
974 "cmp r0, #0 \n\t"
975 "movne pc, r0 \n\t"
976 "mov r0, r4 \n\t"
977 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
978 "ldmia sp!, {r4-r11} \n\t"
979 "mov pc, r0 \n\t"
980 );
981}
982
983#else /* CONFIG_THUMB2_KERNEL */
984
985void __naked __kprobes_test_case_end_16(void)
986{
987 __asm__ __volatile__ (
988 "mov r4, lr \n\t"
989 "bl kprobes_test_case_end \n\t"
990 "cmp r0, #0 \n\t"
991 "bxne r0 \n\t"
992 "mov r0, r4 \n\t"
993 "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
994 "ldmia sp!, {r4-r11} \n\t"
995 "bx r0 \n\t"
996 );
997}
998
999void __naked __kprobes_test_case_end_32(void)
1000{
1001 __asm__ __volatile__ (
1002 ".arm \n\t"
1003 "orr lr, lr, #1 @ will return to Thumb code \n\t"
1004 "ldr pc, 1f \n\t"
1005 "1: \n\t"
1006 ".word __kprobes_test_case_end_16 \n\t"
1007 );
1008}
1009
1010#endif
1011
1012
1013int kprobe_test_flags;
1014int kprobe_test_cc_position;
1015
1016static int test_try_count;
1017static int test_pass_count;
1018static int test_fail_count;
1019
1020static struct pt_regs initial_regs;
1021static struct pt_regs expected_regs;
1022static struct pt_regs result_regs;
1023
1024static u32 expected_memory[TEST_MEMORY_SIZE/sizeof(u32)];
1025
1026static const char *current_title;
1027static struct test_arg *current_args;
1028static u32 *current_stack;
1029static uintptr_t current_branch_target;
1030
1031static uintptr_t current_code_start;
1032static kprobe_opcode_t current_instruction;
1033
1034
1035#define TEST_CASE_PASSED -1
1036#define TEST_CASE_FAILED -2
1037
1038static int test_case_run_count;
1039static bool test_case_is_thumb;
1040static int test_instance;
1041
1042/*
1043 * We ignore the state of the imprecise abort disable flag (CPSR.A) because this
1044 * can change randomly as the kernel doesn't take care to preserve or initialise
1045 * this across context switches. Also, with Security Extentions, the flag may
1046 * not be under control of the kernel; for this reason we ignore the state of
1047 * the FIQ disable flag CPSR.F as well.
1048 */
1049#define PSR_IGNORE_BITS (PSR_A_BIT | PSR_F_BIT)
1050
1051static unsigned long test_check_cc(int cc, unsigned long cpsr)
1052{
1053 unsigned long temp;
1054
1055 switch (cc) {
1056 case 0x0: /* eq */
1057 return cpsr & PSR_Z_BIT;
1058
1059 case 0x1: /* ne */
1060 return (~cpsr) & PSR_Z_BIT;
1061
1062 case 0x2: /* cs */
1063 return cpsr & PSR_C_BIT;
1064
1065 case 0x3: /* cc */
1066 return (~cpsr) & PSR_C_BIT;
1067
1068 case 0x4: /* mi */
1069 return cpsr & PSR_N_BIT;
1070
1071 case 0x5: /* pl */
1072 return (~cpsr) & PSR_N_BIT;
1073
1074 case 0x6: /* vs */
1075 return cpsr & PSR_V_BIT;
1076
1077 case 0x7: /* vc */
1078 return (~cpsr) & PSR_V_BIT;
1079
1080 case 0x8: /* hi */
1081 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1082 return cpsr & PSR_C_BIT;
1083
1084 case 0x9: /* ls */
1085 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1086 return (~cpsr) & PSR_C_BIT;
1087
1088 case 0xa: /* ge */
1089 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1090 return (~cpsr) & PSR_N_BIT;
1091
1092 case 0xb: /* lt */
1093 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1094 return cpsr & PSR_N_BIT;
1095
1096 case 0xc: /* gt */
1097 temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1098 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1099 return (~temp) & PSR_N_BIT;
1100
1101 case 0xd: /* le */
1102 temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1103 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1104 return temp & PSR_N_BIT;
1105
1106 case 0xe: /* al */
1107 case 0xf: /* unconditional */
1108 return true;
1109 }
1110 BUG();
1111 return false;
1112}
1113
1114static int is_last_scenario;
1115static int probe_should_run; /* 0 = no, 1 = yes, -1 = unknown */
1116static int memory_needs_checking;
1117
1118static unsigned long test_context_cpsr(int scenario)
1119{
1120 unsigned long cpsr;
1121
1122 probe_should_run = 1;
1123
1124 /* Default case is that we cycle through 16 combinations of flags */
1125 cpsr = (scenario & 0xf) << 28; /* N,Z,C,V flags */
1126 cpsr |= (scenario & 0xf) << 16; /* GE flags */
1127 cpsr |= (scenario & 0x1) << 27; /* Toggle Q flag */
1128
1129 if (!test_case_is_thumb) {
1130 /* Testing ARM code */
1131 probe_should_run = test_check_cc(current_instruction >> 28, cpsr) != 0;
1132 if (scenario == 15)
1133 is_last_scenario = true;
1134
1135 } else if (kprobe_test_flags & TEST_FLAG_NO_ITBLOCK) {
1136 /* Testing Thumb code without setting ITSTATE */
1137 if (kprobe_test_cc_position) {
1138 int cc = (current_instruction >> kprobe_test_cc_position) & 0xf;
1139 probe_should_run = test_check_cc(cc, cpsr) != 0;
1140 }
1141
1142 if (scenario == 15)
1143 is_last_scenario = true;
1144
1145 } else if (kprobe_test_flags & TEST_FLAG_FULL_ITBLOCK) {
1146 /* Testing Thumb code with all combinations of ITSTATE */
1147 unsigned x = (scenario >> 4);
1148 unsigned cond_base = x % 7; /* ITSTATE<7:5> */
1149 unsigned mask = x / 7 + 2; /* ITSTATE<4:0>, bits reversed */
1150
1151 if (mask > 0x1f) {
1152 /* Finish by testing state from instruction 'itt al' */
1153 cond_base = 7;
1154 mask = 0x4;
1155 if ((scenario & 0xf) == 0xf)
1156 is_last_scenario = true;
1157 }
1158
1159 cpsr |= cond_base << 13; /* ITSTATE<7:5> */
1160 cpsr |= (mask & 0x1) << 12; /* ITSTATE<4> */
1161 cpsr |= (mask & 0x2) << 10; /* ITSTATE<3> */
1162 cpsr |= (mask & 0x4) << 8; /* ITSTATE<2> */
1163 cpsr |= (mask & 0x8) << 23; /* ITSTATE<1> */
1164 cpsr |= (mask & 0x10) << 21; /* ITSTATE<0> */
1165
1166 probe_should_run = test_check_cc((cpsr >> 12) & 0xf, cpsr) != 0;
1167
1168 } else {
1169 /* Testing Thumb code with several combinations of ITSTATE */
1170 switch (scenario) {
1171 case 16: /* Clear NZCV flags and 'it eq' state (false as Z=0) */
1172 cpsr = 0x00000800;
1173 probe_should_run = 0;
1174 break;
1175 case 17: /* Set NZCV flags and 'it vc' state (false as V=1) */
1176 cpsr = 0xf0007800;
1177 probe_should_run = 0;
1178 break;
1179 case 18: /* Clear NZCV flags and 'it ls' state (true as C=0) */
1180 cpsr = 0x00009800;
1181 break;
1182 case 19: /* Set NZCV flags and 'it cs' state (true as C=1) */
1183 cpsr = 0xf0002800;
1184 is_last_scenario = true;
1185 break;
1186 }
1187 }
1188
1189 return cpsr;
1190}
1191
1192static void setup_test_context(struct pt_regs *regs)
1193{
1194 int scenario = test_case_run_count>>1;
1195 unsigned long val;
1196 struct test_arg *args;
1197 int i;
1198
1199 is_last_scenario = false;
1200 memory_needs_checking = false;
1201
1202 /* Initialise test memory on stack */
1203 val = (scenario & 1) ? VALM : ~VALM;
1204 for (i = 0; i < TEST_MEMORY_SIZE / sizeof(current_stack[0]); ++i)
1205 current_stack[i] = val + (i << 8);
1206 /* Put target of branch on stack for tests which load PC from memory */
1207 if (current_branch_target)
1208 current_stack[15] = current_branch_target;
1209 /* Put a value for SP on stack for tests which load SP from memory */
1210 current_stack[13] = (u32)current_stack + 120;
1211
1212 /* Initialise register values to their default state */
1213 val = (scenario & 2) ? VALR : ~VALR;
1214 for (i = 0; i < 13; ++i)
1215 regs->uregs[i] = val ^ (i << 8);
1216 regs->ARM_lr = val ^ (14 << 8);
1217 regs->ARM_cpsr &= ~(APSR_MASK | PSR_IT_MASK);
1218 regs->ARM_cpsr |= test_context_cpsr(scenario);
1219
1220 /* Perform testcase specific register setup */
1221 args = current_args;
1222 for (; args[0].type != ARG_TYPE_END; ++args)
1223 switch (args[0].type) {
1224 case ARG_TYPE_REG: {
1225 struct test_arg_regptr *arg =
1226 (struct test_arg_regptr *)args;
1227 regs->uregs[arg->reg] = arg->val;
1228 break;
1229 }
1230 case ARG_TYPE_PTR: {
1231 struct test_arg_regptr *arg =
1232 (struct test_arg_regptr *)args;
1233 regs->uregs[arg->reg] =
1234 (unsigned long)current_stack + arg->val;
1235 memory_needs_checking = true;
1236 break;
1237 }
1238 case ARG_TYPE_MEM: {
1239 struct test_arg_mem *arg = (struct test_arg_mem *)args;
1240 current_stack[arg->index] = arg->val;
1241 break;
1242 }
1243 default:
1244 break;
1245 }
1246}
1247
1248struct test_probe {
1249 struct kprobe kprobe;
1250 bool registered;
1251 int hit;
1252};
1253
1254static void unregister_test_probe(struct test_probe *probe)
1255{
1256 if (probe->registered) {
1257 unregister_kprobe(&probe->kprobe);
1258 probe->kprobe.flags = 0; /* Clear disable flag to allow reuse */
1259 }
1260 probe->registered = false;
1261}
1262
1263static int register_test_probe(struct test_probe *probe)
1264{
1265 int ret;
1266
1267 if (probe->registered)
1268 BUG();
1269
1270 ret = register_kprobe(&probe->kprobe);
1271 if (ret >= 0) {
1272 probe->registered = true;
1273 probe->hit = -1;
1274 }
1275 return ret;
1276}
1277
1278static int __kprobes
1279test_before_pre_handler(struct kprobe *p, struct pt_regs *regs)
1280{
1281 container_of(p, struct test_probe, kprobe)->hit = test_instance;
1282 return 0;
1283}
1284
1285static void __kprobes
1286test_before_post_handler(struct kprobe *p, struct pt_regs *regs,
1287 unsigned long flags)
1288{
1289 setup_test_context(regs);
1290 initial_regs = *regs;
1291 initial_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
1292}
1293
1294static int __kprobes
1295test_case_pre_handler(struct kprobe *p, struct pt_regs *regs)
1296{
1297 container_of(p, struct test_probe, kprobe)->hit = test_instance;
1298 return 0;
1299}
1300
1301static int __kprobes
1302test_after_pre_handler(struct kprobe *p, struct pt_regs *regs)
1303{
1304 if (container_of(p, struct test_probe, kprobe)->hit == test_instance)
1305 return 0; /* Already run for this test instance */
1306
1307 result_regs = *regs;
1308 result_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
1309
1310 /* Undo any changes done to SP by the test case */
1311 regs->ARM_sp = (unsigned long)current_stack;
1312
1313 container_of(p, struct test_probe, kprobe)->hit = test_instance;
1314 return 0;
1315}
1316
1317static struct test_probe test_before_probe = {
1318 .kprobe.pre_handler = test_before_pre_handler,
1319 .kprobe.post_handler = test_before_post_handler,
1320};
1321
1322static struct test_probe test_case_probe = {
1323 .kprobe.pre_handler = test_case_pre_handler,
1324};
1325
1326static struct test_probe test_after_probe = {
1327 .kprobe.pre_handler = test_after_pre_handler,
1328};
1329
1330static struct test_probe test_after2_probe = {
1331 .kprobe.pre_handler = test_after_pre_handler,
1332};
1333
1334static void test_case_cleanup(void)
1335{
1336 unregister_test_probe(&test_before_probe);
1337 unregister_test_probe(&test_case_probe);
1338 unregister_test_probe(&test_after_probe);
1339 unregister_test_probe(&test_after2_probe);
1340}
1341
1342static void print_registers(struct pt_regs *regs)
1343{
1344 pr_err("r0 %08lx | r1 %08lx | r2 %08lx | r3 %08lx\n",
1345 regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
1346 pr_err("r4 %08lx | r5 %08lx | r6 %08lx | r7 %08lx\n",
1347 regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
1348 pr_err("r8 %08lx | r9 %08lx | r10 %08lx | r11 %08lx\n",
1349 regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp);
1350 pr_err("r12 %08lx | sp %08lx | lr %08lx | pc %08lx\n",
1351 regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc);
1352 pr_err("cpsr %08lx\n", regs->ARM_cpsr);
1353}
1354
1355static void print_memory(u32 *mem, size_t size)
1356{
1357 int i;
1358 for (i = 0; i < size / sizeof(u32); i += 4)
1359 pr_err("%08x %08x %08x %08x\n", mem[i], mem[i+1],
1360 mem[i+2], mem[i+3]);
1361}
1362
1363static size_t expected_memory_size(u32 *sp)
1364{
1365 size_t size = sizeof(expected_memory);
1366 int offset = (uintptr_t)sp - (uintptr_t)current_stack;
1367 if (offset > 0)
1368 size -= offset;
1369 return size;
1370}
1371
1372static void test_case_failed(const char *message)
1373{
1374 test_case_cleanup();
1375
1376 pr_err("FAIL: %s\n", message);
1377 pr_err("FAIL: Test %s\n", current_title);
1378 pr_err("FAIL: Scenario %d\n", test_case_run_count >> 1);
1379}
1380
1381static unsigned long next_instruction(unsigned long pc)
1382{
1383#ifdef CONFIG_THUMB2_KERNEL
1384 if ((pc & 1) && !is_wide_instruction(*(u16 *)(pc - 1)))
1385 return pc + 2;
1386 else
1387#endif
1388 return pc + 4;
1389}
1390
1391static uintptr_t __used kprobes_test_case_start(const char *title, void *stack)
1392{
1393 struct test_arg *args;
1394 struct test_arg_end *end_arg;
1395 unsigned long test_code;
1396
1397 args = (struct test_arg *)PTR_ALIGN(title + strlen(title) + 1, 4);
1398
1399 current_title = title;
1400 current_args = args;
1401 current_stack = stack;
1402
1403 ++test_try_count;
1404
1405 while (args->type != ARG_TYPE_END)
1406 ++args;
1407 end_arg = (struct test_arg_end *)args;
1408
1409 test_code = (unsigned long)(args + 1); /* Code starts after args */
1410
1411 test_case_is_thumb = end_arg->flags & ARG_FLAG_THUMB;
1412 if (test_case_is_thumb)
1413 test_code |= 1;
1414
1415 current_code_start = test_code;
1416
1417 current_branch_target = 0;
1418 if (end_arg->branch_offset != end_arg->end_offset)
1419 current_branch_target = test_code + end_arg->branch_offset;
1420
1421 test_code += end_arg->code_offset;
1422 test_before_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
1423
1424 test_code = next_instruction(test_code);
1425 test_case_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
1426
1427 if (test_case_is_thumb) {
1428 u16 *p = (u16 *)(test_code & ~1);
1429 current_instruction = p[0];
1430 if (is_wide_instruction(current_instruction)) {
1431 current_instruction <<= 16;
1432 current_instruction |= p[1];
1433 }
1434 } else {
1435 current_instruction = *(u32 *)test_code;
1436 }
1437
1438 if (current_title[0] == '.')
1439 verbose("%s\n", current_title);
1440 else
1441 verbose("%s\t@ %0*x\n", current_title,
1442 test_case_is_thumb ? 4 : 8,
1443 current_instruction);
1444
1445 test_code = next_instruction(test_code);
1446 test_after_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
1447
1448 if (kprobe_test_flags & TEST_FLAG_NARROW_INSTR) {
1449 if (!test_case_is_thumb ||
1450 is_wide_instruction(current_instruction)) {
1451 test_case_failed("expected 16-bit instruction");
1452 goto fail;
1453 }
1454 } else {
1455 if (test_case_is_thumb &&
1456 !is_wide_instruction(current_instruction)) {
1457 test_case_failed("expected 32-bit instruction");
1458 goto fail;
1459 }
1460 }
1461
1462 coverage_add(current_instruction);
1463
1464 if (end_arg->flags & ARG_FLAG_UNSUPPORTED) {
1465 if (register_test_probe(&test_case_probe) < 0)
1466 goto pass;
1467 test_case_failed("registered probe for unsupported instruction");
1468 goto fail;
1469 }
1470
1471 if (end_arg->flags & ARG_FLAG_SUPPORTED) {
1472 if (register_test_probe(&test_case_probe) >= 0)
1473 goto pass;
1474 test_case_failed("couldn't register probe for supported instruction");
1475 goto fail;
1476 }
1477
1478 if (register_test_probe(&test_before_probe) < 0) {
1479 test_case_failed("register test_before_probe failed");
1480 goto fail;
1481 }
1482 if (register_test_probe(&test_after_probe) < 0) {
1483 test_case_failed("register test_after_probe failed");
1484 goto fail;
1485 }
1486 if (current_branch_target) {
1487 test_after2_probe.kprobe.addr =
1488 (kprobe_opcode_t *)current_branch_target;
1489 if (register_test_probe(&test_after2_probe) < 0) {
1490 test_case_failed("register test_after2_probe failed");
1491 goto fail;
1492 }
1493 }
1494
1495 /* Start first run of test case */
1496 test_case_run_count = 0;
1497 ++test_instance;
1498 return current_code_start;
1499pass:
1500 test_case_run_count = TEST_CASE_PASSED;
1501 return (uintptr_t)test_after_probe.kprobe.addr;
1502fail:
1503 test_case_run_count = TEST_CASE_FAILED;
1504 return (uintptr_t)test_after_probe.kprobe.addr;
1505}
1506
1507static bool check_test_results(void)
1508{
1509 size_t mem_size = 0;
1510 u32 *mem = 0;
1511
1512 if (memcmp(&expected_regs, &result_regs, sizeof(expected_regs))) {
1513 test_case_failed("registers differ");
1514 goto fail;
1515 }
1516
1517 if (memory_needs_checking) {
1518 mem = (u32 *)result_regs.ARM_sp;
1519 mem_size = expected_memory_size(mem);
1520 if (memcmp(expected_memory, mem, mem_size)) {
1521 test_case_failed("test memory differs");
1522 goto fail;
1523 }
1524 }
1525
1526 return true;
1527
1528fail:
1529 pr_err("initial_regs:\n");
1530 print_registers(&initial_regs);
1531 pr_err("expected_regs:\n");
1532 print_registers(&expected_regs);
1533 pr_err("result_regs:\n");
1534 print_registers(&result_regs);
1535
1536 if (mem) {
1537 pr_err("current_stack=%p\n", current_stack);
1538 pr_err("expected_memory:\n");
1539 print_memory(expected_memory, mem_size);
1540 pr_err("result_memory:\n");
1541 print_memory(mem, mem_size);
1542 }
1543
1544 return false;
1545}
1546
1547static uintptr_t __used kprobes_test_case_end(void)
1548{
1549 if (test_case_run_count < 0) {
1550 if (test_case_run_count == TEST_CASE_PASSED)
1551 /* kprobes_test_case_start did all the needed testing */
1552 goto pass;
1553 else
1554 /* kprobes_test_case_start failed */
1555 goto fail;
1556 }
1557
1558 if (test_before_probe.hit != test_instance) {
1559 test_case_failed("test_before_handler not run");
1560 goto fail;
1561 }
1562
1563 if (test_after_probe.hit != test_instance &&
1564 test_after2_probe.hit != test_instance) {
1565 test_case_failed("test_after_handler not run");
1566 goto fail;
1567 }
1568
1569 /*
1570 * Even numbered test runs ran without a probe on the test case so
1571 * we can gather reference results. The subsequent odd numbered run
1572 * will have the probe inserted.
1573 */
1574 if ((test_case_run_count & 1) == 0) {
1575 /* Save results from run without probe */
1576 u32 *mem = (u32 *)result_regs.ARM_sp;
1577 expected_regs = result_regs;
1578 memcpy(expected_memory, mem, expected_memory_size(mem));
1579
1580 /* Insert probe onto test case instruction */
1581 if (register_test_probe(&test_case_probe) < 0) {
1582 test_case_failed("register test_case_probe failed");
1583 goto fail;
1584 }
1585 } else {
1586 /* Check probe ran as expected */
1587 if (probe_should_run == 1) {
1588 if (test_case_probe.hit != test_instance) {
1589 test_case_failed("test_case_handler not run");
1590 goto fail;
1591 }
1592 } else if (probe_should_run == 0) {
1593 if (test_case_probe.hit == test_instance) {
1594 test_case_failed("test_case_handler ran");
1595 goto fail;
1596 }
1597 }
1598
1599 /* Remove probe for any subsequent reference run */
1600 unregister_test_probe(&test_case_probe);
1601
1602 if (!check_test_results())
1603 goto fail;
1604
1605 if (is_last_scenario)
1606 goto pass;
1607 }
1608
1609 /* Do next test run */
1610 ++test_case_run_count;
1611 ++test_instance;
1612 return current_code_start;
1613fail:
1614 ++test_fail_count;
1615 goto end;
1616pass:
1617 ++test_pass_count;
1618end:
1619 test_case_cleanup();
1620 return 0;
1621}
1622
1623
1624/*
1625 * Top level test functions
1626 */
1627
1628static int run_test_cases(void (*tests)(void), const union decode_item *table)
1629{
1630 int ret;
1631
1632 pr_info(" Check decoding tables\n");
1633 ret = table_test(table);
1634 if (ret)
1635 return ret;
1636
1637 pr_info(" Run test cases\n");
1638 ret = coverage_start(table);
1639 if (ret)
1640 return ret;
1641
1642 tests();
1643
1644 coverage_end();
1645 return 0;
1646}
1647
1648
1649static int __init run_all_tests(void)
1650{
1651 int ret = 0;
1652
1653 pr_info("Begining kprobe tests...\n");
1654
1655#ifndef CONFIG_THUMB2_KERNEL
1656
1657 pr_info("Probe ARM code\n");
1658 ret = run_api_tests(arm_func);
1659 if (ret)
1660 goto out;
1661
1662 pr_info("ARM instruction simulation\n");
1663 ret = run_test_cases(kprobe_arm_test_cases, kprobe_decode_arm_table);
1664 if (ret)
1665 goto out;
1666
1667#else /* CONFIG_THUMB2_KERNEL */
1668
1669 pr_info("Probe 16-bit Thumb code\n");
1670 ret = run_api_tests(thumb16_func);
1671 if (ret)
1672 goto out;
1673
1674 pr_info("Probe 32-bit Thumb code, even halfword\n");
1675 ret = run_api_tests(thumb32even_func);
1676 if (ret)
1677 goto out;
1678
1679 pr_info("Probe 32-bit Thumb code, odd halfword\n");
1680 ret = run_api_tests(thumb32odd_func);
1681 if (ret)
1682 goto out;
1683
1684 pr_info("16-bit Thumb instruction simulation\n");
1685 ret = run_test_cases(kprobe_thumb16_test_cases,
1686 kprobe_decode_thumb16_table);
1687 if (ret)
1688 goto out;
1689
1690 pr_info("32-bit Thumb instruction simulation\n");
1691 ret = run_test_cases(kprobe_thumb32_test_cases,
1692 kprobe_decode_thumb32_table);
1693 if (ret)
1694 goto out;
1695#endif
1696
1697 pr_info("Total instruction simulation tests=%d, pass=%d fail=%d\n",
1698 test_try_count, test_pass_count, test_fail_count);
1699 if (test_fail_count) {
1700 ret = -EINVAL;
1701 goto out;
1702 }
1703
1704#if BENCHMARKING
1705 pr_info("Benchmarks\n");
1706 ret = run_benchmarks();
1707 if (ret)
1708 goto out;
1709#endif
1710
1711#if __LINUX_ARM_ARCH__ >= 7
1712 /* We are able to run all test cases so coverage should be complete */
1713 if (coverage_fail) {
1714 pr_err("FAIL: Test coverage checks failed\n");
1715 ret = -EINVAL;
1716 goto out;
1717 }
1718#endif
1719
1720out:
1721 if (ret == 0)
1722 pr_info("Finished kprobe tests OK\n");
1723 else
1724 pr_err("kprobe tests failed\n");
1725
1726 return ret;
1727}
1728
1729
1730/*
1731 * Module setup
1732 */
1733
1734#ifdef MODULE
1735
1736static void __exit kprobe_test_exit(void)
1737{
1738}
1739
1740module_init(run_all_tests)
1741module_exit(kprobe_test_exit)
1742MODULE_LICENSE("GPL");
1743
1744#else /* !MODULE */
1745
1746late_initcall(run_all_tests);
1747
1748#endif
diff --git a/arch/arm/kernel/kprobes-test.h b/arch/arm/kernel/kprobes-test.h
new file mode 100644
index 000000000000..0dc5d77b9356
--- /dev/null
+++ b/arch/arm/kernel/kprobes-test.h
@@ -0,0 +1,392 @@
1/*
2 * arch/arm/kernel/kprobes-test.h
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#define VERBOSE 0 /* Set to '1' for more logging of test cases */
12
13#ifdef CONFIG_THUMB2_KERNEL
14#define NORMAL_ISA "16"
15#else
16#define NORMAL_ISA "32"
17#endif
18
19
20/* Flags used in kprobe_test_flags */
21#define TEST_FLAG_NO_ITBLOCK (1<<0)
22#define TEST_FLAG_FULL_ITBLOCK (1<<1)
23#define TEST_FLAG_NARROW_INSTR (1<<2)
24
25extern int kprobe_test_flags;
26extern int kprobe_test_cc_position;
27
28
29#define TEST_MEMORY_SIZE 256
30
31
32/*
33 * Test case structures.
34 *
35 * The arguments given to test cases can be one of three types.
36 *
37 * ARG_TYPE_REG
38 * Load a register with the given value.
39 *
40 * ARG_TYPE_PTR
41 * Load a register with a pointer into the stack buffer (SP + given value).
42 *
43 * ARG_TYPE_MEM
44 * Store the given value into the stack buffer at [SP+index].
45 *
46 */
47
48#define ARG_TYPE_END 0
49#define ARG_TYPE_REG 1
50#define ARG_TYPE_PTR 2
51#define ARG_TYPE_MEM 3
52
53#define ARG_FLAG_UNSUPPORTED 0x01
54#define ARG_FLAG_SUPPORTED 0x02
55#define ARG_FLAG_THUMB 0x10 /* Must be 16 so TEST_ISA can be used */
56#define ARG_FLAG_ARM 0x20 /* Must be 32 so TEST_ISA can be used */
57
58struct test_arg {
59 u8 type; /* ARG_TYPE_x */
60 u8 _padding[7];
61};
62
63struct test_arg_regptr {
64 u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR */
65 u8 reg;
66 u8 _padding[2];
67 u32 val;
68};
69
70struct test_arg_mem {
71 u8 type; /* ARG_TYPE_MEM */
72 u8 index;
73 u8 _padding[2];
74 u32 val;
75};
76
77struct test_arg_end {
78 u8 type; /* ARG_TYPE_END */
79 u8 flags; /* ARG_FLAG_x */
80 u16 code_offset;
81 u16 branch_offset;
82 u16 end_offset;
83};
84
85
86/*
87 * Building blocks for test cases.
88 *
89 * Each test case is wrapped between TESTCASE_START and TESTCASE_END.
90 *
91 * To specify arguments for a test case the TEST_ARG_{REG,PTR,MEM} macros are
92 * used followed by a terminating TEST_ARG_END.
93 *
94 * After this, the instruction to be tested is defined with TEST_INSTRUCTION.
95 * Or for branches, TEST_BRANCH_B and TEST_BRANCH_F (branch forwards/backwards).
96 *
97 * Some specific test cases may make use of other custom constructs.
98 */
99
100#if VERBOSE
101#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
102#else
103#define verbose(fmt, ...)
104#endif
105
106#define TEST_GROUP(title) \
107 verbose("\n"); \
108 verbose(title"\n"); \
109 verbose("---------------------------------------------------------\n");
110
111#define TESTCASE_START(title) \
112 __asm__ __volatile__ ( \
113 "bl __kprobes_test_case_start \n\t" \
114 /* don't use .asciz here as 'title' may be */ \
115 /* multiple strings to be concatenated. */ \
116 ".ascii "#title" \n\t" \
117 ".byte 0 \n\t" \
118 ".align 2 \n\t"
119
120#define TEST_ARG_REG(reg, val) \
121 ".byte "__stringify(ARG_TYPE_REG)" \n\t" \
122 ".byte "#reg" \n\t" \
123 ".short 0 \n\t" \
124 ".word "#val" \n\t"
125
126#define TEST_ARG_PTR(reg, val) \
127 ".byte "__stringify(ARG_TYPE_PTR)" \n\t" \
128 ".byte "#reg" \n\t" \
129 ".short 0 \n\t" \
130 ".word "#val" \n\t"
131
132#define TEST_ARG_MEM(index, val) \
133 ".byte "__stringify(ARG_TYPE_MEM)" \n\t" \
134 ".byte "#index" \n\t" \
135 ".short 0 \n\t" \
136 ".word "#val" \n\t"
137
138#define TEST_ARG_END(flags) \
139 ".byte "__stringify(ARG_TYPE_END)" \n\t" \
140 ".byte "TEST_ISA flags" \n\t" \
141 ".short 50f-0f \n\t" \
142 ".short 2f-0f \n\t" \
143 ".short 99f-0f \n\t" \
144 ".code "TEST_ISA" \n\t" \
145 "0: \n\t"
146
147#define TEST_INSTRUCTION(instruction) \
148 "50: nop \n\t" \
149 "1: "instruction" \n\t" \
150 " nop \n\t"
151
152#define TEST_BRANCH_F(instruction, xtra_dist) \
153 TEST_INSTRUCTION(instruction) \
154 ".if "#xtra_dist" \n\t" \
155 " b 99f \n\t" \
156 ".space "#xtra_dist" \n\t" \
157 ".endif \n\t" \
158 " b 99f \n\t" \
159 "2: nop \n\t"
160
161#define TEST_BRANCH_B(instruction, xtra_dist) \
162 " b 50f \n\t" \
163 " b 99f \n\t" \
164 "2: nop \n\t" \
165 " b 99f \n\t" \
166 ".if "#xtra_dist" \n\t" \
167 ".space "#xtra_dist" \n\t" \
168 ".endif \n\t" \
169 TEST_INSTRUCTION(instruction)
170
171#define TESTCASE_END \
172 "2: \n\t" \
173 "99: \n\t" \
174 " bl __kprobes_test_case_end_"TEST_ISA" \n\t" \
175 ".code "NORMAL_ISA" \n\t" \
176 : : \
177 : "r0", "r1", "r2", "r3", "ip", "lr", "memory", "cc" \
178 );
179
180
181/*
182 * Macros to define test cases.
183 *
184 * Those of the form TEST_{R,P,M}* can be used to define test cases
185 * which take combinations of the three basic types of arguments. E.g.
186 *
187 * TEST_R One register argument
188 * TEST_RR Two register arguments
189 * TEST_RPR A register, a pointer, then a register argument
190 *
191 * For testing instructions which may branch, there are macros TEST_BF_*
192 * and TEST_BB_* for branching forwards and backwards.
193 *
194 * TEST_SUPPORTED and TEST_UNSUPPORTED don't cause the code to be executed,
195 * the just verify that a kprobe is or is not allowed on the given instruction.
196 */
197
198#define TEST(code) \
199 TESTCASE_START(code) \
200 TEST_ARG_END("") \
201 TEST_INSTRUCTION(code) \
202 TESTCASE_END
203
204#define TEST_UNSUPPORTED(code) \
205 TESTCASE_START(code) \
206 TEST_ARG_END("|"__stringify(ARG_FLAG_UNSUPPORTED)) \
207 TEST_INSTRUCTION(code) \
208 TESTCASE_END
209
210#define TEST_SUPPORTED(code) \
211 TESTCASE_START(code) \
212 TEST_ARG_END("|"__stringify(ARG_FLAG_SUPPORTED)) \
213 TEST_INSTRUCTION(code) \
214 TESTCASE_END
215
216#define TEST_R(code1, reg, val, code2) \
217 TESTCASE_START(code1 #reg code2) \
218 TEST_ARG_REG(reg, val) \
219 TEST_ARG_END("") \
220 TEST_INSTRUCTION(code1 #reg code2) \
221 TESTCASE_END
222
223#define TEST_RR(code1, reg1, val1, code2, reg2, val2, code3) \
224 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
225 TEST_ARG_REG(reg1, val1) \
226 TEST_ARG_REG(reg2, val2) \
227 TEST_ARG_END("") \
228 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
229 TESTCASE_END
230
231#define TEST_RRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
232 TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
233 TEST_ARG_REG(reg1, val1) \
234 TEST_ARG_REG(reg2, val2) \
235 TEST_ARG_REG(reg3, val3) \
236 TEST_ARG_END("") \
237 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
238 TESTCASE_END
239
240#define TEST_RRRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4, reg4, val4) \
241 TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
242 TEST_ARG_REG(reg1, val1) \
243 TEST_ARG_REG(reg2, val2) \
244 TEST_ARG_REG(reg3, val3) \
245 TEST_ARG_REG(reg4, val4) \
246 TEST_ARG_END("") \
247 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
248 TESTCASE_END
249
250#define TEST_P(code1, reg1, val1, code2) \
251 TESTCASE_START(code1 #reg1 code2) \
252 TEST_ARG_PTR(reg1, val1) \
253 TEST_ARG_END("") \
254 TEST_INSTRUCTION(code1 #reg1 code2) \
255 TESTCASE_END
256
257#define TEST_PR(code1, reg1, val1, code2, reg2, val2, code3) \
258 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
259 TEST_ARG_PTR(reg1, val1) \
260 TEST_ARG_REG(reg2, val2) \
261 TEST_ARG_END("") \
262 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
263 TESTCASE_END
264
265#define TEST_RP(code1, reg1, val1, code2, reg2, val2, code3) \
266 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
267 TEST_ARG_REG(reg1, val1) \
268 TEST_ARG_PTR(reg2, val2) \
269 TEST_ARG_END("") \
270 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
271 TESTCASE_END
272
273#define TEST_PRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
274 TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
275 TEST_ARG_PTR(reg1, val1) \
276 TEST_ARG_REG(reg2, val2) \
277 TEST_ARG_REG(reg3, val3) \
278 TEST_ARG_END("") \
279 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
280 TESTCASE_END
281
282#define TEST_RPR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
283 TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
284 TEST_ARG_REG(reg1, val1) \
285 TEST_ARG_PTR(reg2, val2) \
286 TEST_ARG_REG(reg3, val3) \
287 TEST_ARG_END("") \
288 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
289 TESTCASE_END
290
291#define TEST_RRP(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
292 TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
293 TEST_ARG_REG(reg1, val1) \
294 TEST_ARG_REG(reg2, val2) \
295 TEST_ARG_PTR(reg3, val3) \
296 TEST_ARG_END("") \
297 TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
298 TESTCASE_END
299
300#define TEST_BF_P(code1, reg1, val1, code2) \
301 TESTCASE_START(code1 #reg1 code2) \
302 TEST_ARG_PTR(reg1, val1) \
303 TEST_ARG_END("") \
304 TEST_BRANCH_F(code1 #reg1 code2, 0) \
305 TESTCASE_END
306
307#define TEST_BF_X(code, xtra_dist) \
308 TESTCASE_START(code) \
309 TEST_ARG_END("") \
310 TEST_BRANCH_F(code, xtra_dist) \
311 TESTCASE_END
312
313#define TEST_BB_X(code, xtra_dist) \
314 TESTCASE_START(code) \
315 TEST_ARG_END("") \
316 TEST_BRANCH_B(code, xtra_dist) \
317 TESTCASE_END
318
319#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \
320 TESTCASE_START(code1 #reg code2) \
321 TEST_ARG_REG(reg, val) \
322 TEST_ARG_END("") \
323 TEST_BRANCH_F(code1 #reg code2, xtra_dist) \
324 TESTCASE_END
325
326#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \
327 TESTCASE_START(code1 #reg code2) \
328 TEST_ARG_REG(reg, val) \
329 TEST_ARG_END("") \
330 TEST_BRANCH_B(code1 #reg code2, xtra_dist) \
331 TESTCASE_END
332
333#define TEST_BF(code) TEST_BF_X(code, 0)
334#define TEST_BB(code) TEST_BB_X(code, 0)
335
336#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
337#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
338
339#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
340 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
341 TEST_ARG_REG(reg1, val1) \
342 TEST_ARG_REG(reg2, val2) \
343 TEST_ARG_END("") \
344 TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \
345 TESTCASE_END
346
347#define TEST_X(code, codex) \
348 TESTCASE_START(code) \
349 TEST_ARG_END("") \
350 TEST_INSTRUCTION(code) \
351 " b 99f \n\t" \
352 " "codex" \n\t" \
353 TESTCASE_END
354
355#define TEST_RX(code1, reg, val, code2, codex) \
356 TESTCASE_START(code1 #reg code2) \
357 TEST_ARG_REG(reg, val) \
358 TEST_ARG_END("") \
359 TEST_INSTRUCTION(code1 __stringify(reg) code2) \
360 " b 99f \n\t" \
361 " "codex" \n\t" \
362 TESTCASE_END
363
364#define TEST_RRX(code1, reg1, val1, code2, reg2, val2, code3, codex) \
365 TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
366 TEST_ARG_REG(reg1, val1) \
367 TEST_ARG_REG(reg2, val2) \
368 TEST_ARG_END("") \
369 TEST_INSTRUCTION(code1 __stringify(reg1) code2 __stringify(reg2) code3) \
370 " b 99f \n\t" \
371 " "codex" \n\t" \
372 TESTCASE_END
373
374
375/* Various values used in test cases... */
376#define N(val) (val ^ 0xffffffff)
377#define VAL1 0x12345678
378#define VAL2 N(VAL1)
379#define VAL3 0xa5f801
380#define VAL4 N(VAL3)
381#define VALM 0x456789ab
382#define VALR 0xdeaddead
383#define HH1 0x0123fecb
384#define HH2 0xa9874567
385
386
387#ifdef CONFIG_THUMB2_KERNEL
388void kprobe_thumb16_test_cases(void);
389void kprobe_thumb32_test_cases(void);
390#else
391void kprobe_arm_test_cases(void);
392#endif
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 902ca59e8b11..8f96ec778e8d 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/kprobes.h> 12#include <linux/kprobes.h>
13#include <linux/module.h>
13 14
14#include "kprobes.h" 15#include "kprobes.h"
15 16
@@ -943,6 +944,9 @@ const union decode_item kprobe_decode_thumb32_table[] = {
943 */ 944 */
944 DECODE_END 945 DECODE_END
945}; 946};
947#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
948EXPORT_SYMBOL_GPL(kprobe_decode_thumb32_table);
949#endif
946 950
947static void __kprobes 951static void __kprobes
948t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs) 952t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
@@ -1423,6 +1427,9 @@ const union decode_item kprobe_decode_thumb16_table[] = {
1423 1427
1424 DECODE_END 1428 DECODE_END
1425}; 1429};
1430#ifdef CONFIG_ARM_KPROBES_TEST_MODULE
1431EXPORT_SYMBOL_GPL(kprobe_decode_thumb16_table);
1432#endif
1426 1433
1427static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) 1434static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
1428{ 1435{
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h
index a6aeda0a6c7f..38945f78f9f1 100644
--- a/arch/arm/kernel/kprobes.h
+++ b/arch/arm/kernel/kprobes.h
@@ -413,6 +413,14 @@ struct decode_reject {
413 DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0) 413 DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
414 414
415 415
416#ifdef CONFIG_THUMB2_KERNEL
417extern const union decode_item kprobe_decode_thumb16_table[];
418extern const union decode_item kprobe_decode_thumb32_table[];
419#else
420extern const union decode_item kprobe_decode_arm_table[];
421#endif
422
423
416int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, 424int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
417 const union decode_item *table, bool thumb16); 425 const union decode_item *table, bool thumb16);
418 426
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 53c9c2610cbc..e6e5d7c84f1a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt 13#define pr_fmt(fmt) "hw perfevents: " fmt
14 14
15#include <linux/bitmap.h>
15#include <linux/interrupt.h> 16#include <linux/interrupt.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/module.h> 18#include <linux/module.h>
@@ -26,16 +27,8 @@
26#include <asm/pmu.h> 27#include <asm/pmu.h>
27#include <asm/stacktrace.h> 28#include <asm/stacktrace.h>
28 29
29static struct platform_device *pmu_device;
30
31/*
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
34 */
35static DEFINE_RAW_SPINLOCK(pmu_lock);
36
37/* 30/*
38 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add 31 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
39 * another platform that supports more, we need to increase this to be the 32 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms. 33 * largest of all platforms.
41 * 34 *
@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock);
43 * cycle counter CCNT + 31 events counters CNT0..30. 36 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. 37 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
45 */ 38 */
46#define ARMPMU_MAX_HWEVENTS 33 39#define ARMPMU_MAX_HWEVENTS 32
47 40
48/* The events for a given CPU. */ 41static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
49struct cpu_hw_events { 42static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
50 /* 43static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
51 * The events that are active on the CPU for the given index. Index 0
52 * is reserved.
53 */
54 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
55
56 /*
57 * A 1 bit for an index indicates that the counter is being used for
58 * an event. A 0 means that the counter can be used.
59 */
60 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
61 44
62 /* 45#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
63 * A 1 bit for an index indicates that the counter is actively being
64 * used.
65 */
66 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
67};
68static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
69
70struct arm_pmu {
71 enum arm_perf_pmu_ids id;
72 const char *name;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
78 u32 (*read_counter)(int idx);
79 void (*write_counter)(int idx, u32 val);
80 void (*start)(void);
81 void (*stop)(void);
82 void (*reset)(void *);
83 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
84 [PERF_COUNT_HW_CACHE_OP_MAX]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX];
86 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
87 u32 raw_event_mask;
88 int num_events;
89 u64 max_period;
90};
91 46
92/* Set at runtime when we know what CPU type we are. */ 47/* Set at runtime when we know what CPU type we are. */
93static const struct arm_pmu *armpmu; 48static struct arm_pmu *cpu_pmu;
94 49
95enum arm_perf_pmu_ids 50enum arm_perf_pmu_ids
96armpmu_get_pmu_id(void) 51armpmu_get_pmu_id(void)
97{ 52{
98 int id = -ENODEV; 53 int id = -ENODEV;
99 54
100 if (armpmu != NULL) 55 if (cpu_pmu != NULL)
101 id = armpmu->id; 56 id = cpu_pmu->id;
102 57
103 return id; 58 return id;
104} 59}
@@ -109,8 +64,8 @@ armpmu_get_max_events(void)
109{ 64{
110 int max_events = 0; 65 int max_events = 0;
111 66
112 if (armpmu != NULL) 67 if (cpu_pmu != NULL)
113 max_events = armpmu->num_events; 68 max_events = cpu_pmu->num_events;
114 69
115 return max_events; 70 return max_events;
116} 71}
@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
130#define CACHE_OP_UNSUPPORTED 0xFFFF 85#define CACHE_OP_UNSUPPORTED 0xFFFF
131 86
132static int 87static int
133armpmu_map_cache_event(u64 config) 88armpmu_map_cache_event(const unsigned (*cache_map)
89 [PERF_COUNT_HW_CACHE_MAX]
90 [PERF_COUNT_HW_CACHE_OP_MAX]
91 [PERF_COUNT_HW_CACHE_RESULT_MAX],
92 u64 config)
134{ 93{
135 unsigned int cache_type, cache_op, cache_result, ret; 94 unsigned int cache_type, cache_op, cache_result, ret;
136 95
@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config)
146 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 105 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
147 return -EINVAL; 106 return -EINVAL;
148 107
149 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; 108 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
150 109
151 if (ret == CACHE_OP_UNSUPPORTED) 110 if (ret == CACHE_OP_UNSUPPORTED)
152 return -ENOENT; 111 return -ENOENT;
@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config)
155} 114}
156 115
157static int 116static int
158armpmu_map_event(u64 config) 117armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
159{ 118{
160 int mapping = (*armpmu->event_map)[config]; 119 int mapping = (*event_map)[config];
161 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; 120 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
162} 121}
163 122
164static int 123static int
165armpmu_map_raw_event(u64 config) 124armpmu_map_raw_event(u32 raw_event_mask, u64 config)
166{ 125{
167 return (int)(config & armpmu->raw_event_mask); 126 return (int)(config & raw_event_mask);
168} 127}
169 128
170static int 129static int map_cpu_event(struct perf_event *event,
130 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
131 const unsigned (*cache_map)
132 [PERF_COUNT_HW_CACHE_MAX]
133 [PERF_COUNT_HW_CACHE_OP_MAX]
134 [PERF_COUNT_HW_CACHE_RESULT_MAX],
135 u32 raw_event_mask)
136{
137 u64 config = event->attr.config;
138
139 switch (event->attr.type) {
140 case PERF_TYPE_HARDWARE:
141 return armpmu_map_event(event_map, config);
142 case PERF_TYPE_HW_CACHE:
143 return armpmu_map_cache_event(cache_map, config);
144 case PERF_TYPE_RAW:
145 return armpmu_map_raw_event(raw_event_mask, config);
146 }
147
148 return -ENOENT;
149}
150
151int
171armpmu_event_set_period(struct perf_event *event, 152armpmu_event_set_period(struct perf_event *event,
172 struct hw_perf_event *hwc, 153 struct hw_perf_event *hwc,
173 int idx) 154 int idx)
174{ 155{
156 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
175 s64 left = local64_read(&hwc->period_left); 157 s64 left = local64_read(&hwc->period_left);
176 s64 period = hwc->sample_period; 158 s64 period = hwc->sample_period;
177 int ret = 0; 159 int ret = 0;
@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event,
202 return ret; 184 return ret;
203} 185}
204 186
205static u64 187u64
206armpmu_event_update(struct perf_event *event, 188armpmu_event_update(struct perf_event *event,
207 struct hw_perf_event *hwc, 189 struct hw_perf_event *hwc,
208 int idx, int overflow) 190 int idx, int overflow)
209{ 191{
192 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
210 u64 delta, prev_raw_count, new_raw_count; 193 u64 delta, prev_raw_count, new_raw_count;
211 194
212again: 195again:
@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event)
246static void 229static void
247armpmu_stop(struct perf_event *event, int flags) 230armpmu_stop(struct perf_event *event, int flags)
248{ 231{
232 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
249 struct hw_perf_event *hwc = &event->hw; 233 struct hw_perf_event *hwc = &event->hw;
250 234
251 if (!armpmu)
252 return;
253
254 /* 235 /*
255 * ARM pmu always has to update the counter, so ignore 236 * ARM pmu always has to update the counter, so ignore
256 * PERF_EF_UPDATE, see comments in armpmu_start(). 237 * PERF_EF_UPDATE, see comments in armpmu_start().
@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags)
266static void 247static void
267armpmu_start(struct perf_event *event, int flags) 248armpmu_start(struct perf_event *event, int flags)
268{ 249{
250 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
269 struct hw_perf_event *hwc = &event->hw; 251 struct hw_perf_event *hwc = &event->hw;
270 252
271 if (!armpmu)
272 return;
273
274 /* 253 /*
275 * ARM pmu always has to reprogram the period, so ignore 254 * ARM pmu always has to reprogram the period, so ignore
276 * PERF_EF_RELOAD, see the comment below. 255 * PERF_EF_RELOAD, see the comment below.
@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags)
293static void 272static void
294armpmu_del(struct perf_event *event, int flags) 273armpmu_del(struct perf_event *event, int flags)
295{ 274{
296 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 275 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
276 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
297 struct hw_perf_event *hwc = &event->hw; 277 struct hw_perf_event *hwc = &event->hw;
298 int idx = hwc->idx; 278 int idx = hwc->idx;
299 279
300 WARN_ON(idx < 0); 280 WARN_ON(idx < 0);
301 281
302 clear_bit(idx, cpuc->active_mask);
303 armpmu_stop(event, PERF_EF_UPDATE); 282 armpmu_stop(event, PERF_EF_UPDATE);
304 cpuc->events[idx] = NULL; 283 hw_events->events[idx] = NULL;
305 clear_bit(idx, cpuc->used_mask); 284 clear_bit(idx, hw_events->used_mask);
306 285
307 perf_event_update_userpage(event); 286 perf_event_update_userpage(event);
308} 287}
@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags)
310static int 289static int
311armpmu_add(struct perf_event *event, int flags) 290armpmu_add(struct perf_event *event, int flags)
312{ 291{
313 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 292 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
293 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
314 struct hw_perf_event *hwc = &event->hw; 294 struct hw_perf_event *hwc = &event->hw;
315 int idx; 295 int idx;
316 int err = 0; 296 int err = 0;
@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags)
318 perf_pmu_disable(event->pmu); 298 perf_pmu_disable(event->pmu);
319 299
320 /* If we don't have a space for the counter then finish early. */ 300 /* If we don't have a space for the counter then finish early. */
321 idx = armpmu->get_event_idx(cpuc, hwc); 301 idx = armpmu->get_event_idx(hw_events, hwc);
322 if (idx < 0) { 302 if (idx < 0) {
323 err = idx; 303 err = idx;
324 goto out; 304 goto out;
@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags)
330 */ 310 */
331 event->hw.idx = idx; 311 event->hw.idx = idx;
332 armpmu->disable(hwc, idx); 312 armpmu->disable(hwc, idx);
333 cpuc->events[idx] = event; 313 hw_events->events[idx] = event;
334 set_bit(idx, cpuc->active_mask);
335 314
336 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 315 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
337 if (flags & PERF_EF_START) 316 if (flags & PERF_EF_START)
@@ -345,25 +324,25 @@ out:
345 return err; 324 return err;
346} 325}
347 326
348static struct pmu pmu;
349
350static int 327static int
351validate_event(struct cpu_hw_events *cpuc, 328validate_event(struct pmu_hw_events *hw_events,
352 struct perf_event *event) 329 struct perf_event *event)
353{ 330{
331 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
354 struct hw_perf_event fake_event = event->hw; 332 struct hw_perf_event fake_event = event->hw;
333 struct pmu *leader_pmu = event->group_leader->pmu;
355 334
356 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) 335 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
357 return 1; 336 return 1;
358 337
359 return armpmu->get_event_idx(cpuc, &fake_event) >= 0; 338 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
360} 339}
361 340
362static int 341static int
363validate_group(struct perf_event *event) 342validate_group(struct perf_event *event)
364{ 343{
365 struct perf_event *sibling, *leader = event->group_leader; 344 struct perf_event *sibling, *leader = event->group_leader;
366 struct cpu_hw_events fake_pmu; 345 struct pmu_hw_events fake_pmu;
367 346
368 memset(&fake_pmu, 0, sizeof(fake_pmu)); 347 memset(&fake_pmu, 0, sizeof(fake_pmu));
369 348
@@ -383,110 +362,119 @@ validate_group(struct perf_event *event)
383 362
384static irqreturn_t armpmu_platform_irq(int irq, void *dev) 363static irqreturn_t armpmu_platform_irq(int irq, void *dev)
385{ 364{
386 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev); 365 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
366 struct platform_device *plat_device = armpmu->plat_device;
367 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
387 368
388 return plat->handle_irq(irq, dev, armpmu->handle_irq); 369 return plat->handle_irq(irq, dev, armpmu->handle_irq);
389} 370}
390 371
372static void
373armpmu_release_hardware(struct arm_pmu *armpmu)
374{
375 int i, irq, irqs;
376 struct platform_device *pmu_device = armpmu->plat_device;
377
378 irqs = min(pmu_device->num_resources, num_possible_cpus());
379
380 for (i = 0; i < irqs; ++i) {
381 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
382 continue;
383 irq = platform_get_irq(pmu_device, i);
384 if (irq >= 0)
385 free_irq(irq, armpmu);
386 }
387
388 release_pmu(armpmu->type);
389}
390
391static int 391static int
392armpmu_reserve_hardware(void) 392armpmu_reserve_hardware(struct arm_pmu *armpmu)
393{ 393{
394 struct arm_pmu_platdata *plat; 394 struct arm_pmu_platdata *plat;
395 irq_handler_t handle_irq; 395 irq_handler_t handle_irq;
396 int i, err = -ENODEV, irq; 396 int i, err, irq, irqs;
397 struct platform_device *pmu_device = armpmu->plat_device;
397 398
398 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); 399 err = reserve_pmu(armpmu->type);
399 if (IS_ERR(pmu_device)) { 400 if (err) {
400 pr_warning("unable to reserve pmu\n"); 401 pr_warning("unable to reserve pmu\n");
401 return PTR_ERR(pmu_device); 402 return err;
402 } 403 }
403 404
404 init_pmu(ARM_PMU_DEVICE_CPU);
405
406 plat = dev_get_platdata(&pmu_device->dev); 405 plat = dev_get_platdata(&pmu_device->dev);
407 if (plat && plat->handle_irq) 406 if (plat && plat->handle_irq)
408 handle_irq = armpmu_platform_irq; 407 handle_irq = armpmu_platform_irq;
409 else 408 else
410 handle_irq = armpmu->handle_irq; 409 handle_irq = armpmu->handle_irq;
411 410
412 if (pmu_device->num_resources < 1) { 411 irqs = min(pmu_device->num_resources, num_possible_cpus());
412 if (irqs < 1) {
413 pr_err("no irqs for PMUs defined\n"); 413 pr_err("no irqs for PMUs defined\n");
414 return -ENODEV; 414 return -ENODEV;
415 } 415 }
416 416
417 for (i = 0; i < pmu_device->num_resources; ++i) { 417 for (i = 0; i < irqs; ++i) {
418 err = 0;
418 irq = platform_get_irq(pmu_device, i); 419 irq = platform_get_irq(pmu_device, i);
419 if (irq < 0) 420 if (irq < 0)
420 continue; 421 continue;
421 422
423 /*
424 * If we have a single PMU interrupt that we can't shift,
425 * assume that we're running on a uniprocessor machine and
426 * continue. Otherwise, continue without this interrupt.
427 */
428 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
429 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
430 irq, i);
431 continue;
432 }
433
422 err = request_irq(irq, handle_irq, 434 err = request_irq(irq, handle_irq,
423 IRQF_DISABLED | IRQF_NOBALANCING, 435 IRQF_DISABLED | IRQF_NOBALANCING,
424 "armpmu", NULL); 436 "arm-pmu", armpmu);
425 if (err) { 437 if (err) {
426 pr_warning("unable to request IRQ%d for ARM perf " 438 pr_err("unable to request IRQ%d for ARM PMU counters\n",
427 "counters\n", irq); 439 irq);
428 break; 440 armpmu_release_hardware(armpmu);
441 return err;
429 } 442 }
430 }
431 443
432 if (err) { 444 cpumask_set_cpu(i, &armpmu->active_irqs);
433 for (i = i - 1; i >= 0; --i) {
434 irq = platform_get_irq(pmu_device, i);
435 if (irq >= 0)
436 free_irq(irq, NULL);
437 }
438 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL;
440 } 445 }
441 446
442 return err; 447 return 0;
443} 448}
444 449
445static void 450static void
446armpmu_release_hardware(void) 451hw_perf_event_destroy(struct perf_event *event)
447{ 452{
448 int i, irq; 453 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
454 atomic_t *active_events = &armpmu->active_events;
455 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
449 456
450 for (i = pmu_device->num_resources - 1; i >= 0; --i) { 457 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
451 irq = platform_get_irq(pmu_device, i); 458 armpmu_release_hardware(armpmu);
452 if (irq >= 0) 459 mutex_unlock(pmu_reserve_mutex);
453 free_irq(irq, NULL);
454 } 460 }
455 armpmu->stop();
456
457 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL;
459} 461}
460 462
461static atomic_t active_events = ATOMIC_INIT(0); 463static int
462static DEFINE_MUTEX(pmu_reserve_mutex); 464event_requires_mode_exclusion(struct perf_event_attr *attr)
463
464static void
465hw_perf_event_destroy(struct perf_event *event)
466{ 465{
467 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { 466 return attr->exclude_idle || attr->exclude_user ||
468 armpmu_release_hardware(); 467 attr->exclude_kernel || attr->exclude_hv;
469 mutex_unlock(&pmu_reserve_mutex);
470 }
471} 468}
472 469
473static int 470static int
474__hw_perf_event_init(struct perf_event *event) 471__hw_perf_event_init(struct perf_event *event)
475{ 472{
473 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
476 struct hw_perf_event *hwc = &event->hw; 474 struct hw_perf_event *hwc = &event->hw;
477 int mapping, err; 475 int mapping, err;
478 476
479 /* Decode the generic type into an ARM event identifier. */ 477 mapping = armpmu->map_event(event);
480 if (PERF_TYPE_HARDWARE == event->attr.type) {
481 mapping = armpmu_map_event(event->attr.config);
482 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
483 mapping = armpmu_map_cache_event(event->attr.config);
484 } else if (PERF_TYPE_RAW == event->attr.type) {
485 mapping = armpmu_map_raw_event(event->attr.config);
486 } else {
487 pr_debug("event type %x not supported\n", event->attr.type);
488 return -EOPNOTSUPP;
489 }
490 478
491 if (mapping < 0) { 479 if (mapping < 0) {
492 pr_debug("event %x:%llx not supported\n", event->attr.type, 480 pr_debug("event %x:%llx not supported\n", event->attr.type,
@@ -495,34 +483,31 @@ __hw_perf_event_init(struct perf_event *event)
495 } 483 }
496 484
497 /* 485 /*
486 * We don't assign an index until we actually place the event onto
487 * hardware. Use -1 to signify that we haven't decided where to put it
488 * yet. For SMP systems, each core has it's own PMU so we can't do any
489 * clever allocation or constraints checking at this point.
490 */
491 hwc->idx = -1;
492 hwc->config_base = 0;
493 hwc->config = 0;
494 hwc->event_base = 0;
495
496 /*
498 * Check whether we need to exclude the counter from certain modes. 497 * Check whether we need to exclude the counter from certain modes.
499 * The ARM performance counters are on all of the time so if someone
500 * has asked us for some excludes then we have to fail.
501 */ 498 */
502 if (event->attr.exclude_kernel || event->attr.exclude_user || 499 if ((!armpmu->set_event_filter ||
503 event->attr.exclude_hv || event->attr.exclude_idle) { 500 armpmu->set_event_filter(hwc, &event->attr)) &&
501 event_requires_mode_exclusion(&event->attr)) {
504 pr_debug("ARM performance counters do not support " 502 pr_debug("ARM performance counters do not support "
505 "mode exclusion\n"); 503 "mode exclusion\n");
506 return -EPERM; 504 return -EPERM;
507 } 505 }
508 506
509 /* 507 /*
510 * We don't assign an index until we actually place the event onto 508 * Store the event encoding into the config_base field.
511 * hardware. Use -1 to signify that we haven't decided where to put it
512 * yet. For SMP systems, each core has it's own PMU so we can't do any
513 * clever allocation or constraints checking at this point.
514 */ 509 */
515 hwc->idx = -1; 510 hwc->config_base |= (unsigned long)mapping;
516
517 /*
518 * Store the event encoding into the config_base field. config and
519 * event_base are unused as the only 2 things we need to know are
520 * the event mapping and the counter to use. The counter to use is
521 * also the indx and the config_base is the event type.
522 */
523 hwc->config_base = (unsigned long)mapping;
524 hwc->config = 0;
525 hwc->event_base = 0;
526 511
527 if (!hwc->sample_period) { 512 if (!hwc->sample_period) {
528 hwc->sample_period = armpmu->max_period; 513 hwc->sample_period = armpmu->max_period;
@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event)
542 527
543static int armpmu_event_init(struct perf_event *event) 528static int armpmu_event_init(struct perf_event *event)
544{ 529{
530 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
545 int err = 0; 531 int err = 0;
532 atomic_t *active_events = &armpmu->active_events;
546 533
547 switch (event->attr.type) { 534 if (armpmu->map_event(event) == -ENOENT)
548 case PERF_TYPE_RAW:
549 case PERF_TYPE_HARDWARE:
550 case PERF_TYPE_HW_CACHE:
551 break;
552
553 default:
554 return -ENOENT; 535 return -ENOENT;
555 }
556
557 if (!armpmu)
558 return -ENODEV;
559 536
560 event->destroy = hw_perf_event_destroy; 537 event->destroy = hw_perf_event_destroy;
561 538
562 if (!atomic_inc_not_zero(&active_events)) { 539 if (!atomic_inc_not_zero(active_events)) {
563 mutex_lock(&pmu_reserve_mutex); 540 mutex_lock(&armpmu->reserve_mutex);
564 if (atomic_read(&active_events) == 0) { 541 if (atomic_read(active_events) == 0)
565 err = armpmu_reserve_hardware(); 542 err = armpmu_reserve_hardware(armpmu);
566 }
567 543
568 if (!err) 544 if (!err)
569 atomic_inc(&active_events); 545 atomic_inc(active_events);
570 mutex_unlock(&pmu_reserve_mutex); 546 mutex_unlock(&armpmu->reserve_mutex);
571 } 547 }
572 548
573 if (err) 549 if (err)
@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event)
582 558
583static void armpmu_enable(struct pmu *pmu) 559static void armpmu_enable(struct pmu *pmu)
584{ 560{
585 /* Enable all of the perf events on hardware. */ 561 struct arm_pmu *armpmu = to_arm_pmu(pmu);
586 int idx, enabled = 0; 562 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 563 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
588
589 if (!armpmu)
590 return;
591
592 for (idx = 0; idx <= armpmu->num_events; ++idx) {
593 struct perf_event *event = cpuc->events[idx];
594
595 if (!event)
596 continue;
597
598 armpmu->enable(&event->hw, idx);
599 enabled = 1;
600 }
601 564
602 if (enabled) 565 if (enabled)
603 armpmu->start(); 566 armpmu->start();
@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu)
605 568
606static void armpmu_disable(struct pmu *pmu) 569static void armpmu_disable(struct pmu *pmu)
607{ 570{
608 if (armpmu) 571 struct arm_pmu *armpmu = to_arm_pmu(pmu);
609 armpmu->stop(); 572 armpmu->stop();
610} 573}
611 574
612static struct pmu pmu = { 575static void __init armpmu_init(struct arm_pmu *armpmu)
613 .pmu_enable = armpmu_enable, 576{
614 .pmu_disable = armpmu_disable, 577 atomic_set(&armpmu->active_events, 0);
615 .event_init = armpmu_event_init, 578 mutex_init(&armpmu->reserve_mutex);
616 .add = armpmu_add, 579
617 .del = armpmu_del, 580 armpmu->pmu = (struct pmu) {
618 .start = armpmu_start, 581 .pmu_enable = armpmu_enable,
619 .stop = armpmu_stop, 582 .pmu_disable = armpmu_disable,
620 .read = armpmu_read, 583 .event_init = armpmu_event_init,
621}; 584 .add = armpmu_add,
585 .del = armpmu_del,
586 .start = armpmu_start,
587 .stop = armpmu_stop,
588 .read = armpmu_read,
589 };
590}
591
592int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type)
593{
594 armpmu_init(armpmu);
595 return perf_pmu_register(&armpmu->pmu, name, type);
596}
622 597
623/* Include the PMU-specific implementations. */ 598/* Include the PMU-specific implementations. */
624#include "perf_event_xscale.c" 599#include "perf_event_xscale.c"
@@ -630,14 +605,72 @@ static struct pmu pmu = {
630 * This requires SMP to be available, so exists as a separate initcall. 605 * This requires SMP to be available, so exists as a separate initcall.
631 */ 606 */
632static int __init 607static int __init
633armpmu_reset(void) 608cpu_pmu_reset(void)
609{
610 if (cpu_pmu && cpu_pmu->reset)
611 return on_each_cpu(cpu_pmu->reset, NULL, 1);
612 return 0;
613}
614arch_initcall(cpu_pmu_reset);
615
616/*
617 * PMU platform driver and devicetree bindings.
618 */
619static struct of_device_id armpmu_of_device_ids[] = {
620 {.compatible = "arm,cortex-a9-pmu"},
621 {.compatible = "arm,cortex-a8-pmu"},
622 {.compatible = "arm,arm1136-pmu"},
623 {.compatible = "arm,arm1176-pmu"},
624 {},
625};
626
627static struct platform_device_id armpmu_plat_device_ids[] = {
628 {.name = "arm-pmu"},
629 {},
630};
631
632static int __devinit armpmu_device_probe(struct platform_device *pdev)
634{ 633{
635 if (armpmu && armpmu->reset) 634 cpu_pmu->plat_device = pdev;
636 return on_each_cpu(armpmu->reset, NULL, 1);
637 return 0; 635 return 0;
638} 636}
639arch_initcall(armpmu_reset);
640 637
638static struct platform_driver armpmu_driver = {
639 .driver = {
640 .name = "arm-pmu",
641 .of_match_table = armpmu_of_device_ids,
642 },
643 .probe = armpmu_device_probe,
644 .id_table = armpmu_plat_device_ids,
645};
646
647static int __init register_pmu_driver(void)
648{
649 return platform_driver_register(&armpmu_driver);
650}
651device_initcall(register_pmu_driver);
652
653static struct pmu_hw_events *armpmu_get_cpu_events(void)
654{
655 return &__get_cpu_var(cpu_hw_events);
656}
657
658static void __init cpu_pmu_init(struct arm_pmu *armpmu)
659{
660 int cpu;
661 for_each_possible_cpu(cpu) {
662 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
663 events->events = per_cpu(hw_events, cpu);
664 events->used_mask = per_cpu(used_mask, cpu);
665 raw_spin_lock_init(&events->pmu_lock);
666 }
667 armpmu->get_hw_events = armpmu_get_cpu_events;
668 armpmu->type = ARM_PMU_DEVICE_CPU;
669}
670
671/*
672 * CPU PMU identification and registration.
673 */
641static int __init 674static int __init
642init_hw_perf_events(void) 675init_hw_perf_events(void)
643{ 676{
@@ -651,22 +684,22 @@ init_hw_perf_events(void)
651 case 0xB360: /* ARM1136 */ 684 case 0xB360: /* ARM1136 */
652 case 0xB560: /* ARM1156 */ 685 case 0xB560: /* ARM1156 */
653 case 0xB760: /* ARM1176 */ 686 case 0xB760: /* ARM1176 */
654 armpmu = armv6pmu_init(); 687 cpu_pmu = armv6pmu_init();
655 break; 688 break;
656 case 0xB020: /* ARM11mpcore */ 689 case 0xB020: /* ARM11mpcore */
657 armpmu = armv6mpcore_pmu_init(); 690 cpu_pmu = armv6mpcore_pmu_init();
658 break; 691 break;
659 case 0xC080: /* Cortex-A8 */ 692 case 0xC080: /* Cortex-A8 */
660 armpmu = armv7_a8_pmu_init(); 693 cpu_pmu = armv7_a8_pmu_init();
661 break; 694 break;
662 case 0xC090: /* Cortex-A9 */ 695 case 0xC090: /* Cortex-A9 */
663 armpmu = armv7_a9_pmu_init(); 696 cpu_pmu = armv7_a9_pmu_init();
664 break; 697 break;
665 case 0xC050: /* Cortex-A5 */ 698 case 0xC050: /* Cortex-A5 */
666 armpmu = armv7_a5_pmu_init(); 699 cpu_pmu = armv7_a5_pmu_init();
667 break; 700 break;
668 case 0xC0F0: /* Cortex-A15 */ 701 case 0xC0F0: /* Cortex-A15 */
669 armpmu = armv7_a15_pmu_init(); 702 cpu_pmu = armv7_a15_pmu_init();
670 break; 703 break;
671 } 704 }
672 /* Intel CPUs [xscale]. */ 705 /* Intel CPUs [xscale]. */
@@ -674,23 +707,23 @@ init_hw_perf_events(void)
674 part_number = (cpuid >> 13) & 0x7; 707 part_number = (cpuid >> 13) & 0x7;
675 switch (part_number) { 708 switch (part_number) {
676 case 1: 709 case 1:
677 armpmu = xscale1pmu_init(); 710 cpu_pmu = xscale1pmu_init();
678 break; 711 break;
679 case 2: 712 case 2:
680 armpmu = xscale2pmu_init(); 713 cpu_pmu = xscale2pmu_init();
681 break; 714 break;
682 } 715 }
683 } 716 }
684 717
685 if (armpmu) { 718 if (cpu_pmu) {
686 pr_info("enabled with %s PMU driver, %d counters available\n", 719 pr_info("enabled with %s PMU driver, %d counters available\n",
687 armpmu->name, armpmu->num_events); 720 cpu_pmu->name, cpu_pmu->num_events);
721 cpu_pmu_init(cpu_pmu);
722 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
688 } else { 723 } else {
689 pr_info("no hardware support available\n"); 724 pr_info("no hardware support available\n");
690 } 725 }
691 726
692 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
693
694 return 0; 727 return 0;
695} 728}
696early_initcall(init_hw_perf_events); 729early_initcall(init_hw_perf_events);
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index dd7f3b9f4cb3..e63d8115c01b 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -54,7 +54,7 @@ enum armv6_perf_types {
54}; 54};
55 55
56enum armv6_counters { 56enum armv6_counters {
57 ARMV6_CYCLE_COUNTER = 1, 57 ARMV6_CYCLE_COUNTER = 0,
58 ARMV6_COUNTER0, 58 ARMV6_COUNTER0,
59 ARMV6_COUNTER1, 59 ARMV6_COUNTER1,
60}; 60};
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
433 int idx) 433 int idx)
434{ 434{
435 unsigned long val, mask, evt, flags; 435 unsigned long val, mask, evt, flags;
436 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
436 437
437 if (ARMV6_CYCLE_COUNTER == idx) { 438 if (ARMV6_CYCLE_COUNTER == idx) {
438 mask = 0; 439 mask = 0;
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
454 * Mask out the current event and set the counter to count the event 455 * Mask out the current event and set the counter to count the event
455 * that we're interested in. 456 * that we're interested in.
456 */ 457 */
457 raw_spin_lock_irqsave(&pmu_lock, flags); 458 raw_spin_lock_irqsave(&events->pmu_lock, flags);
458 val = armv6_pmcr_read(); 459 val = armv6_pmcr_read();
459 val &= ~mask; 460 val &= ~mask;
460 val |= evt; 461 val |= evt;
461 armv6_pmcr_write(val); 462 armv6_pmcr_write(val);
462 raw_spin_unlock_irqrestore(&pmu_lock, flags); 463 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
464}
465
466static int counter_is_active(unsigned long pmcr, int idx)
467{
468 unsigned long mask = 0;
469 if (idx == ARMV6_CYCLE_COUNTER)
470 mask = ARMV6_PMCR_CCOUNT_IEN;
471 else if (idx == ARMV6_COUNTER0)
472 mask = ARMV6_PMCR_COUNT0_IEN;
473 else if (idx == ARMV6_COUNTER1)
474 mask = ARMV6_PMCR_COUNT1_IEN;
475
476 if (mask)
477 return pmcr & mask;
478
479 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
480 return 0;
463} 481}
464 482
465static irqreturn_t 483static irqreturn_t
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num,
468{ 486{
469 unsigned long pmcr = armv6_pmcr_read(); 487 unsigned long pmcr = armv6_pmcr_read();
470 struct perf_sample_data data; 488 struct perf_sample_data data;
471 struct cpu_hw_events *cpuc; 489 struct pmu_hw_events *cpuc;
472 struct pt_regs *regs; 490 struct pt_regs *regs;
473 int idx; 491 int idx;
474 492
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num,
487 perf_sample_data_init(&data, 0); 505 perf_sample_data_init(&data, 0);
488 506
489 cpuc = &__get_cpu_var(cpu_hw_events); 507 cpuc = &__get_cpu_var(cpu_hw_events);
490 for (idx = 0; idx <= armpmu->num_events; ++idx) { 508 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
491 struct perf_event *event = cpuc->events[idx]; 509 struct perf_event *event = cpuc->events[idx];
492 struct hw_perf_event *hwc; 510 struct hw_perf_event *hwc;
493 511
494 if (!test_bit(idx, cpuc->active_mask)) 512 if (!counter_is_active(pmcr, idx))
495 continue; 513 continue;
496 514
497 /* 515 /*
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num,
508 continue; 526 continue;
509 527
510 if (perf_event_overflow(event, &data, regs)) 528 if (perf_event_overflow(event, &data, regs))
511 armpmu->disable(hwc, idx); 529 cpu_pmu->disable(hwc, idx);
512 } 530 }
513 531
514 /* 532 /*
@@ -527,28 +545,30 @@ static void
527armv6pmu_start(void) 545armv6pmu_start(void)
528{ 546{
529 unsigned long flags, val; 547 unsigned long flags, val;
548 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
530 549
531 raw_spin_lock_irqsave(&pmu_lock, flags); 550 raw_spin_lock_irqsave(&events->pmu_lock, flags);
532 val = armv6_pmcr_read(); 551 val = armv6_pmcr_read();
533 val |= ARMV6_PMCR_ENABLE; 552 val |= ARMV6_PMCR_ENABLE;
534 armv6_pmcr_write(val); 553 armv6_pmcr_write(val);
535 raw_spin_unlock_irqrestore(&pmu_lock, flags); 554 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
536} 555}
537 556
538static void 557static void
539armv6pmu_stop(void) 558armv6pmu_stop(void)
540{ 559{
541 unsigned long flags, val; 560 unsigned long flags, val;
561 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
542 562
543 raw_spin_lock_irqsave(&pmu_lock, flags); 563 raw_spin_lock_irqsave(&events->pmu_lock, flags);
544 val = armv6_pmcr_read(); 564 val = armv6_pmcr_read();
545 val &= ~ARMV6_PMCR_ENABLE; 565 val &= ~ARMV6_PMCR_ENABLE;
546 armv6_pmcr_write(val); 566 armv6_pmcr_write(val);
547 raw_spin_unlock_irqrestore(&pmu_lock, flags); 567 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
548} 568}
549 569
550static int 570static int
551armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, 571armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
552 struct hw_perf_event *event) 572 struct hw_perf_event *event)
553{ 573{
554 /* Always place a cycle counter into the cycle counter. */ 574 /* Always place a cycle counter into the cycle counter. */
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
578 int idx) 598 int idx)
579{ 599{
580 unsigned long val, mask, evt, flags; 600 unsigned long val, mask, evt, flags;
601 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
581 602
582 if (ARMV6_CYCLE_COUNTER == idx) { 603 if (ARMV6_CYCLE_COUNTER == idx) {
583 mask = ARMV6_PMCR_CCOUNT_IEN; 604 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
598 * of ETM bus signal assertion cycles. The external reporting should 619 * of ETM bus signal assertion cycles. The external reporting should
599 * be disabled and so this should never increment. 620 * be disabled and so this should never increment.
600 */ 621 */
601 raw_spin_lock_irqsave(&pmu_lock, flags); 622 raw_spin_lock_irqsave(&events->pmu_lock, flags);
602 val = armv6_pmcr_read(); 623 val = armv6_pmcr_read();
603 val &= ~mask; 624 val &= ~mask;
604 val |= evt; 625 val |= evt;
605 armv6_pmcr_write(val); 626 armv6_pmcr_write(val);
606 raw_spin_unlock_irqrestore(&pmu_lock, flags); 627 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
607} 628}
608 629
609static void 630static void
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
611 int idx) 632 int idx)
612{ 633{
613 unsigned long val, mask, flags, evt = 0; 634 unsigned long val, mask, flags, evt = 0;
635 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
614 636
615 if (ARMV6_CYCLE_COUNTER == idx) { 637 if (ARMV6_CYCLE_COUNTER == idx) {
616 mask = ARMV6_PMCR_CCOUNT_IEN; 638 mask = ARMV6_PMCR_CCOUNT_IEN;
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
627 * Unlike UP ARMv6, we don't have a way of stopping the counters. We 649 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
628 * simply disable the interrupt reporting. 650 * simply disable the interrupt reporting.
629 */ 651 */
630 raw_spin_lock_irqsave(&pmu_lock, flags); 652 raw_spin_lock_irqsave(&events->pmu_lock, flags);
631 val = armv6_pmcr_read(); 653 val = armv6_pmcr_read();
632 val &= ~mask; 654 val &= ~mask;
633 val |= evt; 655 val |= evt;
634 armv6_pmcr_write(val); 656 armv6_pmcr_write(val);
635 raw_spin_unlock_irqrestore(&pmu_lock, flags); 657 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
658}
659
660static int armv6_map_event(struct perf_event *event)
661{
662 return map_cpu_event(event, &armv6_perf_map,
663 &armv6_perf_cache_map, 0xFF);
636} 664}
637 665
638static const struct arm_pmu armv6pmu = { 666static struct arm_pmu armv6pmu = {
639 .id = ARM_PERF_PMU_ID_V6, 667 .id = ARM_PERF_PMU_ID_V6,
640 .name = "v6", 668 .name = "v6",
641 .handle_irq = armv6pmu_handle_irq, 669 .handle_irq = armv6pmu_handle_irq,
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = {
646 .get_event_idx = armv6pmu_get_event_idx, 674 .get_event_idx = armv6pmu_get_event_idx,
647 .start = armv6pmu_start, 675 .start = armv6pmu_start,
648 .stop = armv6pmu_stop, 676 .stop = armv6pmu_stop,
649 .cache_map = &armv6_perf_cache_map, 677 .map_event = armv6_map_event,
650 .event_map = &armv6_perf_map,
651 .raw_event_mask = 0xFF,
652 .num_events = 3, 678 .num_events = 3,
653 .max_period = (1LLU << 32) - 1, 679 .max_period = (1LLU << 32) - 1,
654}; 680};
655 681
656static const struct arm_pmu *__init armv6pmu_init(void) 682static struct arm_pmu *__init armv6pmu_init(void)
657{ 683{
658 return &armv6pmu; 684 return &armv6pmu;
659} 685}
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void)
665 * disable the interrupt reporting and update the event. When unthrottling we 691 * disable the interrupt reporting and update the event. When unthrottling we
666 * reset the period and enable the interrupt reporting. 692 * reset the period and enable the interrupt reporting.
667 */ 693 */
668static const struct arm_pmu armv6mpcore_pmu = { 694
695static int armv6mpcore_map_event(struct perf_event *event)
696{
697 return map_cpu_event(event, &armv6mpcore_perf_map,
698 &armv6mpcore_perf_cache_map, 0xFF);
699}
700
701static struct arm_pmu armv6mpcore_pmu = {
669 .id = ARM_PERF_PMU_ID_V6MP, 702 .id = ARM_PERF_PMU_ID_V6MP,
670 .name = "v6mpcore", 703 .name = "v6mpcore",
671 .handle_irq = armv6pmu_handle_irq, 704 .handle_irq = armv6pmu_handle_irq,
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = {
676 .get_event_idx = armv6pmu_get_event_idx, 709 .get_event_idx = armv6pmu_get_event_idx,
677 .start = armv6pmu_start, 710 .start = armv6pmu_start,
678 .stop = armv6pmu_stop, 711 .stop = armv6pmu_stop,
679 .cache_map = &armv6mpcore_perf_cache_map, 712 .map_event = armv6mpcore_map_event,
680 .event_map = &armv6mpcore_perf_map,
681 .raw_event_mask = 0xFF,
682 .num_events = 3, 713 .num_events = 3,
683 .max_period = (1LLU << 32) - 1, 714 .max_period = (1LLU << 32) - 1,
684}; 715};
685 716
686static const struct arm_pmu *__init armv6mpcore_pmu_init(void) 717static struct arm_pmu *__init armv6mpcore_pmu_init(void)
687{ 718{
688 return &armv6mpcore_pmu; 719 return &armv6mpcore_pmu;
689} 720}
690#else 721#else
691static const struct arm_pmu *__init armv6pmu_init(void) 722static struct arm_pmu *__init armv6pmu_init(void)
692{ 723{
693 return NULL; 724 return NULL;
694} 725}
695 726
696static const struct arm_pmu *__init armv6mpcore_pmu_init(void) 727static struct arm_pmu *__init armv6mpcore_pmu_init(void)
697{ 728{
698 return NULL; 729 return NULL;
699} 730}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 6be3e2e4d838..1ef6d0034b85 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,6 +17,9 @@
17 */ 17 */
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20
21static struct arm_pmu armv7pmu;
22
20/* 23/*
21 * Common ARMv7 event types 24 * Common ARMv7 event types
22 * 25 *
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
676}; 679};
677 680
678/* 681/*
679 * Perf Events counters 682 * Perf Events' indices
680 */ 683 */
681enum armv7_counters { 684#define ARMV7_IDX_CYCLE_COUNTER 0
682 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ 685#define ARMV7_IDX_COUNTER0 1
683 ARMV7_COUNTER0 = 2, /* First event counter */ 686#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
684}; 687
688#define ARMV7_MAX_COUNTERS 32
689#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
685 690
686/* 691/*
687 * The cycle counter is ARMV7_CYCLE_COUNTER. 692 * ARMv7 low level PMNC access
688 * The first event counter is ARMV7_COUNTER0.
689 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
690 */ 693 */
691#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
692 694
693/* 695/*
694 * ARMv7 low level PMNC access 696 * Perf Event to low level counters mapping
695 */ 697 */
698#define ARMV7_IDX_TO_COUNTER(x) \
699 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
696 700
697/* 701/*
698 * Per-CPU PMNC: config reg 702 * Per-CPU PMNC: config reg
@@ -708,103 +712,76 @@ enum armv7_counters {
708#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ 712#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
709 713
710/* 714/*
711 * Available counters 715 * FLAG: counters overflow flag status reg
712 */
713#define ARMV7_CNT0 0 /* First event counter */
714#define ARMV7_CCNT 31 /* Cycle counter */
715
716/* Perf Event to low level counters mapping */
717#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
718
719/*
720 * CNTENS: counters enable reg
721 */
722#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
723#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
724
725/*
726 * CNTENC: counters disable reg
727 */
728#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
729#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
730
731/*
732 * INTENS: counters overflow interrupt enable reg
733 */
734#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
735#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
736
737/*
738 * INTENC: counters overflow interrupt disable reg
739 */
740#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
741#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
742
743/*
744 * EVTSEL: Event selection reg
745 */ 716 */
746#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ 717#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
718#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
747 719
748/* 720/*
749 * SELECT: Counter selection reg 721 * PMXEVTYPER: Event selection reg
750 */ 722 */
751#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ 723#define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
724#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
752 725
753/* 726/*
754 * FLAG: counters overflow flag status reg 727 * Event filters for PMUv2
755 */ 728 */
756#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) 729#define ARMV7_EXCLUDE_PL1 (1 << 31)
757#define ARMV7_FLAG_C (1 << ARMV7_CCNT) 730#define ARMV7_EXCLUDE_USER (1 << 30)
758#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ 731#define ARMV7_INCLUDE_HYP (1 << 27)
759#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
760 732
761static inline unsigned long armv7_pmnc_read(void) 733static inline u32 armv7_pmnc_read(void)
762{ 734{
763 u32 val; 735 u32 val;
764 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); 736 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
765 return val; 737 return val;
766} 738}
767 739
768static inline void armv7_pmnc_write(unsigned long val) 740static inline void armv7_pmnc_write(u32 val)
769{ 741{
770 val &= ARMV7_PMNC_MASK; 742 val &= ARMV7_PMNC_MASK;
771 isb(); 743 isb();
772 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); 744 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
773} 745}
774 746
775static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) 747static inline int armv7_pmnc_has_overflowed(u32 pmnc)
776{ 748{
777 return pmnc & ARMV7_OVERFLOWED_MASK; 749 return pmnc & ARMV7_OVERFLOWED_MASK;
778} 750}
779 751
780static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, 752static inline int armv7_pmnc_counter_valid(int idx)
781 enum armv7_counters counter) 753{
754 return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST;
755}
756
757static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
782{ 758{
783 int ret = 0; 759 int ret = 0;
760 u32 counter;
784 761
785 if (counter == ARMV7_CYCLE_COUNTER) 762 if (!armv7_pmnc_counter_valid(idx)) {
786 ret = pmnc & ARMV7_FLAG_C;
787 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
788 ret = pmnc & ARMV7_FLAG_P(counter);
789 else
790 pr_err("CPU%u checking wrong counter %d overflow status\n", 763 pr_err("CPU%u checking wrong counter %d overflow status\n",
791 smp_processor_id(), counter); 764 smp_processor_id(), idx);
765 } else {
766 counter = ARMV7_IDX_TO_COUNTER(idx);
767 ret = pmnc & BIT(counter);
768 }
792 769
793 return ret; 770 return ret;
794} 771}
795 772
796static inline int armv7_pmnc_select_counter(unsigned int idx) 773static inline int armv7_pmnc_select_counter(int idx)
797{ 774{
798 u32 val; 775 u32 counter;
799 776
800 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { 777 if (!armv7_pmnc_counter_valid(idx)) {
801 pr_err("CPU%u selecting wrong PMNC counter" 778 pr_err("CPU%u selecting wrong PMNC counter %d\n",
802 " %d\n", smp_processor_id(), idx); 779 smp_processor_id(), idx);
803 return -1; 780 return -EINVAL;
804 } 781 }
805 782
806 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; 783 counter = ARMV7_IDX_TO_COUNTER(idx);
807 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); 784 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
808 isb(); 785 isb();
809 786
810 return idx; 787 return idx;
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx)
812 789
813static inline u32 armv7pmu_read_counter(int idx) 790static inline u32 armv7pmu_read_counter(int idx)
814{ 791{
815 unsigned long value = 0; 792 u32 value = 0;
816 793
817 if (idx == ARMV7_CYCLE_COUNTER) 794 if (!armv7_pmnc_counter_valid(idx))
818 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
819 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
820 if (armv7_pmnc_select_counter(idx) == idx)
821 asm volatile("mrc p15, 0, %0, c9, c13, 2"
822 : "=r" (value));
823 } else
824 pr_err("CPU%u reading wrong counter %d\n", 795 pr_err("CPU%u reading wrong counter %d\n",
825 smp_processor_id(), idx); 796 smp_processor_id(), idx);
797 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
798 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
799 else if (armv7_pmnc_select_counter(idx) == idx)
800 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
826 801
827 return value; 802 return value;
828} 803}
829 804
830static inline void armv7pmu_write_counter(int idx, u32 value) 805static inline void armv7pmu_write_counter(int idx, u32 value)
831{ 806{
832 if (idx == ARMV7_CYCLE_COUNTER) 807 if (!armv7_pmnc_counter_valid(idx))
833 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
834 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
835 if (armv7_pmnc_select_counter(idx) == idx)
836 asm volatile("mcr p15, 0, %0, c9, c13, 2"
837 : : "r" (value));
838 } else
839 pr_err("CPU%u writing wrong counter %d\n", 808 pr_err("CPU%u writing wrong counter %d\n",
840 smp_processor_id(), idx); 809 smp_processor_id(), idx);
810 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
811 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
812 else if (armv7_pmnc_select_counter(idx) == idx)
813 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
841} 814}
842 815
843static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) 816static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
844{ 817{
845 if (armv7_pmnc_select_counter(idx) == idx) { 818 if (armv7_pmnc_select_counter(idx) == idx) {
846 val &= ARMV7_EVTSEL_MASK; 819 val &= ARMV7_EVTYPE_MASK;
847 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); 820 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
848 } 821 }
849} 822}
850 823
851static inline u32 armv7_pmnc_enable_counter(unsigned int idx) 824static inline int armv7_pmnc_enable_counter(int idx)
852{ 825{
853 u32 val; 826 u32 counter;
854 827
855 if ((idx != ARMV7_CYCLE_COUNTER) && 828 if (!armv7_pmnc_counter_valid(idx)) {
856 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 829 pr_err("CPU%u enabling wrong PMNC counter %d\n",
857 pr_err("CPU%u enabling wrong PMNC counter" 830 smp_processor_id(), idx);
858 " %d\n", smp_processor_id(), idx); 831 return -EINVAL;
859 return -1;
860 } 832 }
861 833
862 if (idx == ARMV7_CYCLE_COUNTER) 834 counter = ARMV7_IDX_TO_COUNTER(idx);
863 val = ARMV7_CNTENS_C; 835 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
864 else
865 val = ARMV7_CNTENS_P(idx);
866
867 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
868
869 return idx; 836 return idx;
870} 837}
871 838
872static inline u32 armv7_pmnc_disable_counter(unsigned int idx) 839static inline int armv7_pmnc_disable_counter(int idx)
873{ 840{
874 u32 val; 841 u32 counter;
875
876 842
877 if ((idx != ARMV7_CYCLE_COUNTER) && 843 if (!armv7_pmnc_counter_valid(idx)) {
878 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 844 pr_err("CPU%u disabling wrong PMNC counter %d\n",
879 pr_err("CPU%u disabling wrong PMNC counter" 845 smp_processor_id(), idx);
880 " %d\n", smp_processor_id(), idx); 846 return -EINVAL;
881 return -1;
882 } 847 }
883 848
884 if (idx == ARMV7_CYCLE_COUNTER) 849 counter = ARMV7_IDX_TO_COUNTER(idx);
885 val = ARMV7_CNTENC_C; 850 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
886 else
887 val = ARMV7_CNTENC_P(idx);
888
889 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
890
891 return idx; 851 return idx;
892} 852}
893 853
894static inline u32 armv7_pmnc_enable_intens(unsigned int idx) 854static inline int armv7_pmnc_enable_intens(int idx)
895{ 855{
896 u32 val; 856 u32 counter;
897 857
898 if ((idx != ARMV7_CYCLE_COUNTER) && 858 if (!armv7_pmnc_counter_valid(idx)) {
899 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 859 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
900 pr_err("CPU%u enabling wrong PMNC counter" 860 smp_processor_id(), idx);
901 " interrupt enable %d\n", smp_processor_id(), idx); 861 return -EINVAL;
902 return -1;
903 } 862 }
904 863
905 if (idx == ARMV7_CYCLE_COUNTER) 864 counter = ARMV7_IDX_TO_COUNTER(idx);
906 val = ARMV7_INTENS_C; 865 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
907 else
908 val = ARMV7_INTENS_P(idx);
909
910 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
911
912 return idx; 866 return idx;
913} 867}
914 868
915static inline u32 armv7_pmnc_disable_intens(unsigned int idx) 869static inline int armv7_pmnc_disable_intens(int idx)
916{ 870{
917 u32 val; 871 u32 counter;
918 872
919 if ((idx != ARMV7_CYCLE_COUNTER) && 873 if (!armv7_pmnc_counter_valid(idx)) {
920 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { 874 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
921 pr_err("CPU%u disabling wrong PMNC counter" 875 smp_processor_id(), idx);
922 " interrupt enable %d\n", smp_processor_id(), idx); 876 return -EINVAL;
923 return -1;
924 } 877 }
925 878
926 if (idx == ARMV7_CYCLE_COUNTER) 879 counter = ARMV7_IDX_TO_COUNTER(idx);
927 val = ARMV7_INTENC_C; 880 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
928 else
929 val = ARMV7_INTENC_P(idx);
930
931 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
932
933 return idx; 881 return idx;
934} 882}
935 883
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void)
973 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); 921 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
974 printk(KERN_INFO "CCNT =0x%08x\n", val); 922 printk(KERN_INFO "CCNT =0x%08x\n", val);
975 923
976 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { 924 for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) {
977 armv7_pmnc_select_counter(cnt); 925 armv7_pmnc_select_counter(cnt);
978 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); 926 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
979 printk(KERN_INFO "CNT[%d] count =0x%08x\n", 927 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
980 cnt-ARMV7_EVENT_CNT_TO_CNTx, val); 928 ARMV7_IDX_TO_COUNTER(cnt), val);
981 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); 929 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
982 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", 930 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
983 cnt-ARMV7_EVENT_CNT_TO_CNTx, val); 931 ARMV7_IDX_TO_COUNTER(cnt), val);
984 } 932 }
985} 933}
986#endif 934#endif
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void)
988static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) 936static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
989{ 937{
990 unsigned long flags; 938 unsigned long flags;
939 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
991 940
992 /* 941 /*
993 * Enable counter and interrupt, and set the counter to count 942 * Enable counter and interrupt, and set the counter to count
994 * the event that we're interested in. 943 * the event that we're interested in.
995 */ 944 */
996 raw_spin_lock_irqsave(&pmu_lock, flags); 945 raw_spin_lock_irqsave(&events->pmu_lock, flags);
997 946
998 /* 947 /*
999 * Disable counter 948 * Disable counter
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1002 951
1003 /* 952 /*
1004 * Set event (if destined for PMNx counters) 953 * Set event (if destined for PMNx counters)
1005 * We don't need to set the event if it's a cycle count 954 * We only need to set the event for the cycle counter if we
955 * have the ability to perform event filtering.
1006 */ 956 */
1007 if (idx != ARMV7_CYCLE_COUNTER) 957 if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
1008 armv7_pmnc_write_evtsel(idx, hwc->config_base); 958 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1009 959
1010 /* 960 /*
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1017 */ 967 */
1018 armv7_pmnc_enable_counter(idx); 968 armv7_pmnc_enable_counter(idx);
1019 969
1020 raw_spin_unlock_irqrestore(&pmu_lock, flags); 970 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1021} 971}
1022 972
1023static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) 973static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1024{ 974{
1025 unsigned long flags; 975 unsigned long flags;
976 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1026 977
1027 /* 978 /*
1028 * Disable counter and interrupt 979 * Disable counter and interrupt
1029 */ 980 */
1030 raw_spin_lock_irqsave(&pmu_lock, flags); 981 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1031 982
1032 /* 983 /*
1033 * Disable counter 984 * Disable counter
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1039 */ 990 */
1040 armv7_pmnc_disable_intens(idx); 991 armv7_pmnc_disable_intens(idx);
1041 992
1042 raw_spin_unlock_irqrestore(&pmu_lock, flags); 993 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1043} 994}
1044 995
1045static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) 996static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1046{ 997{
1047 unsigned long pmnc; 998 u32 pmnc;
1048 struct perf_sample_data data; 999 struct perf_sample_data data;
1049 struct cpu_hw_events *cpuc; 1000 struct pmu_hw_events *cpuc;
1050 struct pt_regs *regs; 1001 struct pt_regs *regs;
1051 int idx; 1002 int idx;
1052 1003
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1069 perf_sample_data_init(&data, 0); 1020 perf_sample_data_init(&data, 0);
1070 1021
1071 cpuc = &__get_cpu_var(cpu_hw_events); 1022 cpuc = &__get_cpu_var(cpu_hw_events);
1072 for (idx = 0; idx <= armpmu->num_events; ++idx) { 1023 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1073 struct perf_event *event = cpuc->events[idx]; 1024 struct perf_event *event = cpuc->events[idx];
1074 struct hw_perf_event *hwc; 1025 struct hw_perf_event *hwc;
1075 1026
1076 if (!test_bit(idx, cpuc->active_mask))
1077 continue;
1078
1079 /* 1027 /*
1080 * We have a single interrupt for all counters. Check that 1028 * We have a single interrupt for all counters. Check that
1081 * each counter has overflowed before we process it. 1029 * each counter has overflowed before we process it.
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1090 continue; 1038 continue;
1091 1039
1092 if (perf_event_overflow(event, &data, regs)) 1040 if (perf_event_overflow(event, &data, regs))
1093 armpmu->disable(hwc, idx); 1041 cpu_pmu->disable(hwc, idx);
1094 } 1042 }
1095 1043
1096 /* 1044 /*
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1108static void armv7pmu_start(void) 1056static void armv7pmu_start(void)
1109{ 1057{
1110 unsigned long flags; 1058 unsigned long flags;
1059 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1111 1060
1112 raw_spin_lock_irqsave(&pmu_lock, flags); 1061 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1113 /* Enable all counters */ 1062 /* Enable all counters */
1114 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); 1063 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
1115 raw_spin_unlock_irqrestore(&pmu_lock, flags); 1064 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1116} 1065}
1117 1066
1118static void armv7pmu_stop(void) 1067static void armv7pmu_stop(void)
1119{ 1068{
1120 unsigned long flags; 1069 unsigned long flags;
1070 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1121 1071
1122 raw_spin_lock_irqsave(&pmu_lock, flags); 1072 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1123 /* Disable all counters */ 1073 /* Disable all counters */
1124 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); 1074 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
1125 raw_spin_unlock_irqrestore(&pmu_lock, flags); 1075 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1126} 1076}
1127 1077
1128static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, 1078static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
1129 struct hw_perf_event *event) 1079 struct hw_perf_event *event)
1130{ 1080{
1131 int idx; 1081 int idx;
1082 unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT;
1132 1083
1133 /* Always place a cycle counter into the cycle counter. */ 1084 /* Always place a cycle counter into the cycle counter. */
1134 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { 1085 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
1135 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) 1086 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
1136 return -EAGAIN; 1087 return -EAGAIN;
1137 1088
1138 return ARMV7_CYCLE_COUNTER; 1089 return ARMV7_IDX_CYCLE_COUNTER;
1139 } else { 1090 }
1140 /*
1141 * For anything other than a cycle counter, try and use
1142 * the events counters
1143 */
1144 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
1145 if (!test_and_set_bit(idx, cpuc->used_mask))
1146 return idx;
1147 }
1148 1091
1149 /* The counters are all in use. */ 1092 /*
1150 return -EAGAIN; 1093 * For anything other than a cycle counter, try and use
1094 * the events counters
1095 */
1096 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
1097 if (!test_and_set_bit(idx, cpuc->used_mask))
1098 return idx;
1151 } 1099 }
1100
1101 /* The counters are all in use. */
1102 return -EAGAIN;
1103}
1104
1105/*
1106 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
1107 */
1108static int armv7pmu_set_event_filter(struct hw_perf_event *event,
1109 struct perf_event_attr *attr)
1110{
1111 unsigned long config_base = 0;
1112
1113 if (attr->exclude_idle)
1114 return -EPERM;
1115 if (attr->exclude_user)
1116 config_base |= ARMV7_EXCLUDE_USER;
1117 if (attr->exclude_kernel)
1118 config_base |= ARMV7_EXCLUDE_PL1;
1119 if (!attr->exclude_hv)
1120 config_base |= ARMV7_INCLUDE_HYP;
1121
1122 /*
1123 * Install the filter into config_base as this is used to
1124 * construct the event type.
1125 */
1126 event->config_base = config_base;
1127
1128 return 0;
1152} 1129}
1153 1130
1154static void armv7pmu_reset(void *info) 1131static void armv7pmu_reset(void *info)
1155{ 1132{
1156 u32 idx, nb_cnt = armpmu->num_events; 1133 u32 idx, nb_cnt = cpu_pmu->num_events;
1157 1134
1158 /* The counter and interrupt enable registers are unknown at reset. */ 1135 /* The counter and interrupt enable registers are unknown at reset. */
1159 for (idx = 1; idx < nb_cnt; ++idx) 1136 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx)
1160 armv7pmu_disable_event(NULL, idx); 1137 armv7pmu_disable_event(NULL, idx);
1161 1138
1162 /* Initialize & Reset PMNC: C and P bits */ 1139 /* Initialize & Reset PMNC: C and P bits */
1163 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 1140 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1164} 1141}
1165 1142
1143static int armv7_a8_map_event(struct perf_event *event)
1144{
1145 return map_cpu_event(event, &armv7_a8_perf_map,
1146 &armv7_a8_perf_cache_map, 0xFF);
1147}
1148
1149static int armv7_a9_map_event(struct perf_event *event)
1150{
1151 return map_cpu_event(event, &armv7_a9_perf_map,
1152 &armv7_a9_perf_cache_map, 0xFF);
1153}
1154
1155static int armv7_a5_map_event(struct perf_event *event)
1156{
1157 return map_cpu_event(event, &armv7_a5_perf_map,
1158 &armv7_a5_perf_cache_map, 0xFF);
1159}
1160
1161static int armv7_a15_map_event(struct perf_event *event)
1162{
1163 return map_cpu_event(event, &armv7_a15_perf_map,
1164 &armv7_a15_perf_cache_map, 0xFF);
1165}
1166
1166static struct arm_pmu armv7pmu = { 1167static struct arm_pmu armv7pmu = {
1167 .handle_irq = armv7pmu_handle_irq, 1168 .handle_irq = armv7pmu_handle_irq,
1168 .enable = armv7pmu_enable_event, 1169 .enable = armv7pmu_enable_event,
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
1173 .start = armv7pmu_start, 1174 .start = armv7pmu_start,
1174 .stop = armv7pmu_stop, 1175 .stop = armv7pmu_stop,
1175 .reset = armv7pmu_reset, 1176 .reset = armv7pmu_reset,
1176 .raw_event_mask = 0xFF,
1177 .max_period = (1LLU << 32) - 1, 1177 .max_period = (1LLU << 32) - 1,
1178}; 1178};
1179 1179
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void)
1188 return nb_cnt + 1; 1188 return nb_cnt + 1;
1189} 1189}
1190 1190
1191static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1191static struct arm_pmu *__init armv7_a8_pmu_init(void)
1192{ 1192{
1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
1194 armv7pmu.name = "ARMv7 Cortex-A8"; 1194 armv7pmu.name = "ARMv7 Cortex-A8";
1195 armv7pmu.cache_map = &armv7_a8_perf_cache_map; 1195 armv7pmu.map_event = armv7_a8_map_event;
1196 armv7pmu.event_map = &armv7_a8_perf_map;
1197 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1196 armv7pmu.num_events = armv7_read_num_pmnc_events();
1198 return &armv7pmu; 1197 return &armv7pmu;
1199} 1198}
1200 1199
1201static const struct arm_pmu *__init armv7_a9_pmu_init(void) 1200static struct arm_pmu *__init armv7_a9_pmu_init(void)
1202{ 1201{
1203 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 1202 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
1204 armv7pmu.name = "ARMv7 Cortex-A9"; 1203 armv7pmu.name = "ARMv7 Cortex-A9";
1205 armv7pmu.cache_map = &armv7_a9_perf_cache_map; 1204 armv7pmu.map_event = armv7_a9_map_event;
1206 armv7pmu.event_map = &armv7_a9_perf_map;
1207 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1205 armv7pmu.num_events = armv7_read_num_pmnc_events();
1208 return &armv7pmu; 1206 return &armv7pmu;
1209} 1207}
1210 1208
1211static const struct arm_pmu *__init armv7_a5_pmu_init(void) 1209static struct arm_pmu *__init armv7_a5_pmu_init(void)
1212{ 1210{
1213 armv7pmu.id = ARM_PERF_PMU_ID_CA5; 1211 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1214 armv7pmu.name = "ARMv7 Cortex-A5"; 1212 armv7pmu.name = "ARMv7 Cortex-A5";
1215 armv7pmu.cache_map = &armv7_a5_perf_cache_map; 1213 armv7pmu.map_event = armv7_a5_map_event;
1216 armv7pmu.event_map = &armv7_a5_perf_map;
1217 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1214 armv7pmu.num_events = armv7_read_num_pmnc_events();
1218 return &armv7pmu; 1215 return &armv7pmu;
1219} 1216}
1220 1217
1221static const struct arm_pmu *__init armv7_a15_pmu_init(void) 1218static struct arm_pmu *__init armv7_a15_pmu_init(void)
1222{ 1219{
1223 armv7pmu.id = ARM_PERF_PMU_ID_CA15; 1220 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1224 armv7pmu.name = "ARMv7 Cortex-A15"; 1221 armv7pmu.name = "ARMv7 Cortex-A15";
1225 armv7pmu.cache_map = &armv7_a15_perf_cache_map; 1222 armv7pmu.map_event = armv7_a15_map_event;
1226 armv7pmu.event_map = &armv7_a15_perf_map;
1227 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1223 armv7pmu.num_events = armv7_read_num_pmnc_events();
1224 armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1228 return &armv7pmu; 1225 return &armv7pmu;
1229} 1226}
1230#else 1227#else
1231static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1228static struct arm_pmu *__init armv7_a8_pmu_init(void)
1232{ 1229{
1233 return NULL; 1230 return NULL;
1234} 1231}
1235 1232
1236static const struct arm_pmu *__init armv7_a9_pmu_init(void) 1233static struct arm_pmu *__init armv7_a9_pmu_init(void)
1237{ 1234{
1238 return NULL; 1235 return NULL;
1239} 1236}
1240 1237
1241static const struct arm_pmu *__init armv7_a5_pmu_init(void) 1238static struct arm_pmu *__init armv7_a5_pmu_init(void)
1242{ 1239{
1243 return NULL; 1240 return NULL;
1244} 1241}
1245 1242
1246static const struct arm_pmu *__init armv7_a15_pmu_init(void) 1243static struct arm_pmu *__init armv7_a15_pmu_init(void)
1247{ 1244{
1248 return NULL; 1245 return NULL;
1249} 1246}
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 3c4397491d08..e0cca10a8411 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -40,7 +40,7 @@ enum xscale_perf_types {
40}; 40};
41 41
42enum xscale_counters { 42enum xscale_counters {
43 XSCALE_CYCLE_COUNTER = 1, 43 XSCALE_CYCLE_COUNTER = 0,
44 XSCALE_COUNTER0, 44 XSCALE_COUNTER0,
45 XSCALE_COUNTER1, 45 XSCALE_COUNTER1,
46 XSCALE_COUNTER2, 46 XSCALE_COUNTER2,
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
222{ 222{
223 unsigned long pmnc; 223 unsigned long pmnc;
224 struct perf_sample_data data; 224 struct perf_sample_data data;
225 struct cpu_hw_events *cpuc; 225 struct pmu_hw_events *cpuc;
226 struct pt_regs *regs; 226 struct pt_regs *regs;
227 int idx; 227 int idx;
228 228
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
249 perf_sample_data_init(&data, 0); 249 perf_sample_data_init(&data, 0);
250 250
251 cpuc = &__get_cpu_var(cpu_hw_events); 251 cpuc = &__get_cpu_var(cpu_hw_events);
252 for (idx = 0; idx <= armpmu->num_events; ++idx) { 252 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
253 struct perf_event *event = cpuc->events[idx]; 253 struct perf_event *event = cpuc->events[idx];
254 struct hw_perf_event *hwc; 254 struct hw_perf_event *hwc;
255 255
256 if (!test_bit(idx, cpuc->active_mask))
257 continue;
258
259 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) 256 if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
260 continue; 257 continue;
261 258
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
266 continue; 263 continue;
267 264
268 if (perf_event_overflow(event, &data, regs)) 265 if (perf_event_overflow(event, &data, regs))
269 armpmu->disable(hwc, idx); 266 cpu_pmu->disable(hwc, idx);
270 } 267 }
271 268
272 irq_work_run(); 269 irq_work_run();
@@ -284,6 +281,7 @@ static void
284xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) 281xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
285{ 282{
286 unsigned long val, mask, evt, flags; 283 unsigned long val, mask, evt, flags;
284 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
287 285
288 switch (idx) { 286 switch (idx) {
289 case XSCALE_CYCLE_COUNTER: 287 case XSCALE_CYCLE_COUNTER:
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
305 return; 303 return;
306 } 304 }
307 305
308 raw_spin_lock_irqsave(&pmu_lock, flags); 306 raw_spin_lock_irqsave(&events->pmu_lock, flags);
309 val = xscale1pmu_read_pmnc(); 307 val = xscale1pmu_read_pmnc();
310 val &= ~mask; 308 val &= ~mask;
311 val |= evt; 309 val |= evt;
312 xscale1pmu_write_pmnc(val); 310 xscale1pmu_write_pmnc(val);
313 raw_spin_unlock_irqrestore(&pmu_lock, flags); 311 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
314} 312}
315 313
316static void 314static void
317xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) 315xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
318{ 316{
319 unsigned long val, mask, evt, flags; 317 unsigned long val, mask, evt, flags;
318 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
320 319
321 switch (idx) { 320 switch (idx) {
322 case XSCALE_CYCLE_COUNTER: 321 case XSCALE_CYCLE_COUNTER:
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
336 return; 335 return;
337 } 336 }
338 337
339 raw_spin_lock_irqsave(&pmu_lock, flags); 338 raw_spin_lock_irqsave(&events->pmu_lock, flags);
340 val = xscale1pmu_read_pmnc(); 339 val = xscale1pmu_read_pmnc();
341 val &= ~mask; 340 val &= ~mask;
342 val |= evt; 341 val |= evt;
343 xscale1pmu_write_pmnc(val); 342 xscale1pmu_write_pmnc(val);
344 raw_spin_unlock_irqrestore(&pmu_lock, flags); 343 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
345} 344}
346 345
347static int 346static int
348xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, 347xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc,
349 struct hw_perf_event *event) 348 struct hw_perf_event *event)
350{ 349{
351 if (XSCALE_PERFCTR_CCNT == event->config_base) { 350 if (XSCALE_PERFCTR_CCNT == event->config_base) {
@@ -368,24 +367,26 @@ static void
368xscale1pmu_start(void) 367xscale1pmu_start(void)
369{ 368{
370 unsigned long flags, val; 369 unsigned long flags, val;
370 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
371 371
372 raw_spin_lock_irqsave(&pmu_lock, flags); 372 raw_spin_lock_irqsave(&events->pmu_lock, flags);
373 val = xscale1pmu_read_pmnc(); 373 val = xscale1pmu_read_pmnc();
374 val |= XSCALE_PMU_ENABLE; 374 val |= XSCALE_PMU_ENABLE;
375 xscale1pmu_write_pmnc(val); 375 xscale1pmu_write_pmnc(val);
376 raw_spin_unlock_irqrestore(&pmu_lock, flags); 376 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
377} 377}
378 378
379static void 379static void
380xscale1pmu_stop(void) 380xscale1pmu_stop(void)
381{ 381{
382 unsigned long flags, val; 382 unsigned long flags, val;
383 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
383 384
384 raw_spin_lock_irqsave(&pmu_lock, flags); 385 raw_spin_lock_irqsave(&events->pmu_lock, flags);
385 val = xscale1pmu_read_pmnc(); 386 val = xscale1pmu_read_pmnc();
386 val &= ~XSCALE_PMU_ENABLE; 387 val &= ~XSCALE_PMU_ENABLE;
387 xscale1pmu_write_pmnc(val); 388 xscale1pmu_write_pmnc(val);
388 raw_spin_unlock_irqrestore(&pmu_lock, flags); 389 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
389} 390}
390 391
391static inline u32 392static inline u32
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val)
424 } 425 }
425} 426}
426 427
427static const struct arm_pmu xscale1pmu = { 428static int xscale_map_event(struct perf_event *event)
429{
430 return map_cpu_event(event, &xscale_perf_map,
431 &xscale_perf_cache_map, 0xFF);
432}
433
434static struct arm_pmu xscale1pmu = {
428 .id = ARM_PERF_PMU_ID_XSCALE1, 435 .id = ARM_PERF_PMU_ID_XSCALE1,
429 .name = "xscale1", 436 .name = "xscale1",
430 .handle_irq = xscale1pmu_handle_irq, 437 .handle_irq = xscale1pmu_handle_irq,
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = {
435 .get_event_idx = xscale1pmu_get_event_idx, 442 .get_event_idx = xscale1pmu_get_event_idx,
436 .start = xscale1pmu_start, 443 .start = xscale1pmu_start,
437 .stop = xscale1pmu_stop, 444 .stop = xscale1pmu_stop,
438 .cache_map = &xscale_perf_cache_map, 445 .map_event = xscale_map_event,
439 .event_map = &xscale_perf_map,
440 .raw_event_mask = 0xFF,
441 .num_events = 3, 446 .num_events = 3,
442 .max_period = (1LLU << 32) - 1, 447 .max_period = (1LLU << 32) - 1,
443}; 448};
444 449
445static const struct arm_pmu *__init xscale1pmu_init(void) 450static struct arm_pmu *__init xscale1pmu_init(void)
446{ 451{
447 return &xscale1pmu; 452 return &xscale1pmu;
448} 453}
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
560{ 565{
561 unsigned long pmnc, of_flags; 566 unsigned long pmnc, of_flags;
562 struct perf_sample_data data; 567 struct perf_sample_data data;
563 struct cpu_hw_events *cpuc; 568 struct pmu_hw_events *cpuc;
564 struct pt_regs *regs; 569 struct pt_regs *regs;
565 int idx; 570 int idx;
566 571
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
581 perf_sample_data_init(&data, 0); 586 perf_sample_data_init(&data, 0);
582 587
583 cpuc = &__get_cpu_var(cpu_hw_events); 588 cpuc = &__get_cpu_var(cpu_hw_events);
584 for (idx = 0; idx <= armpmu->num_events; ++idx) { 589 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
585 struct perf_event *event = cpuc->events[idx]; 590 struct perf_event *event = cpuc->events[idx];
586 struct hw_perf_event *hwc; 591 struct hw_perf_event *hwc;
587 592
588 if (!test_bit(idx, cpuc->active_mask))
589 continue;
590
591 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) 593 if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
592 continue; 594 continue;
593 595
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
598 continue; 600 continue;
599 601
600 if (perf_event_overflow(event, &data, regs)) 602 if (perf_event_overflow(event, &data, regs))
601 armpmu->disable(hwc, idx); 603 cpu_pmu->disable(hwc, idx);
602 } 604 }
603 605
604 irq_work_run(); 606 irq_work_run();
@@ -616,6 +618,7 @@ static void
616xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) 618xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
617{ 619{
618 unsigned long flags, ien, evtsel; 620 unsigned long flags, ien, evtsel;
621 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
619 622
620 ien = xscale2pmu_read_int_enable(); 623 ien = xscale2pmu_read_int_enable();
621 evtsel = xscale2pmu_read_event_select(); 624 evtsel = xscale2pmu_read_event_select();
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
649 return; 652 return;
650 } 653 }
651 654
652 raw_spin_lock_irqsave(&pmu_lock, flags); 655 raw_spin_lock_irqsave(&events->pmu_lock, flags);
653 xscale2pmu_write_event_select(evtsel); 656 xscale2pmu_write_event_select(evtsel);
654 xscale2pmu_write_int_enable(ien); 657 xscale2pmu_write_int_enable(ien);
655 raw_spin_unlock_irqrestore(&pmu_lock, flags); 658 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
656} 659}
657 660
658static void 661static void
659xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) 662xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
660{ 663{
661 unsigned long flags, ien, evtsel; 664 unsigned long flags, ien, evtsel;
665 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
662 666
663 ien = xscale2pmu_read_int_enable(); 667 ien = xscale2pmu_read_int_enable();
664 evtsel = xscale2pmu_read_event_select(); 668 evtsel = xscale2pmu_read_event_select();
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
692 return; 696 return;
693 } 697 }
694 698
695 raw_spin_lock_irqsave(&pmu_lock, flags); 699 raw_spin_lock_irqsave(&events->pmu_lock, flags);
696 xscale2pmu_write_event_select(evtsel); 700 xscale2pmu_write_event_select(evtsel);
697 xscale2pmu_write_int_enable(ien); 701 xscale2pmu_write_int_enable(ien);
698 raw_spin_unlock_irqrestore(&pmu_lock, flags); 702 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
699} 703}
700 704
701static int 705static int
702xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, 706xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc,
703 struct hw_perf_event *event) 707 struct hw_perf_event *event)
704{ 708{
705 int idx = xscale1pmu_get_event_idx(cpuc, event); 709 int idx = xscale1pmu_get_event_idx(cpuc, event);
@@ -718,24 +722,26 @@ static void
718xscale2pmu_start(void) 722xscale2pmu_start(void)
719{ 723{
720 unsigned long flags, val; 724 unsigned long flags, val;
725 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
721 726
722 raw_spin_lock_irqsave(&pmu_lock, flags); 727 raw_spin_lock_irqsave(&events->pmu_lock, flags);
723 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; 728 val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
724 val |= XSCALE_PMU_ENABLE; 729 val |= XSCALE_PMU_ENABLE;
725 xscale2pmu_write_pmnc(val); 730 xscale2pmu_write_pmnc(val);
726 raw_spin_unlock_irqrestore(&pmu_lock, flags); 731 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
727} 732}
728 733
729static void 734static void
730xscale2pmu_stop(void) 735xscale2pmu_stop(void)
731{ 736{
732 unsigned long flags, val; 737 unsigned long flags, val;
738 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
733 739
734 raw_spin_lock_irqsave(&pmu_lock, flags); 740 raw_spin_lock_irqsave(&events->pmu_lock, flags);
735 val = xscale2pmu_read_pmnc(); 741 val = xscale2pmu_read_pmnc();
736 val &= ~XSCALE_PMU_ENABLE; 742 val &= ~XSCALE_PMU_ENABLE;
737 xscale2pmu_write_pmnc(val); 743 xscale2pmu_write_pmnc(val);
738 raw_spin_unlock_irqrestore(&pmu_lock, flags); 744 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
739} 745}
740 746
741static inline u32 747static inline u32
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val)
786 } 792 }
787} 793}
788 794
789static const struct arm_pmu xscale2pmu = { 795static struct arm_pmu xscale2pmu = {
790 .id = ARM_PERF_PMU_ID_XSCALE2, 796 .id = ARM_PERF_PMU_ID_XSCALE2,
791 .name = "xscale2", 797 .name = "xscale2",
792 .handle_irq = xscale2pmu_handle_irq, 798 .handle_irq = xscale2pmu_handle_irq,
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = {
797 .get_event_idx = xscale2pmu_get_event_idx, 803 .get_event_idx = xscale2pmu_get_event_idx,
798 .start = xscale2pmu_start, 804 .start = xscale2pmu_start,
799 .stop = xscale2pmu_stop, 805 .stop = xscale2pmu_stop,
800 .cache_map = &xscale_perf_cache_map, 806 .map_event = xscale_map_event,
801 .event_map = &xscale_perf_map,
802 .raw_event_mask = 0xFF,
803 .num_events = 5, 807 .num_events = 5,
804 .max_period = (1LLU << 32) - 1, 808 .max_period = (1LLU << 32) - 1,
805}; 809};
806 810
807static const struct arm_pmu *__init xscale2pmu_init(void) 811static struct arm_pmu *__init xscale2pmu_init(void)
808{ 812{
809 return &xscale2pmu; 813 return &xscale2pmu;
810} 814}
811#else 815#else
812static const struct arm_pmu *__init xscale1pmu_init(void) 816static struct arm_pmu *__init xscale1pmu_init(void)
813{ 817{
814 return NULL; 818 return NULL;
815} 819}
816 820
817static const struct arm_pmu *__init xscale2pmu_init(void) 821static struct arm_pmu *__init xscale2pmu_init(void)
818{ 822{
819 return NULL; 823 return NULL;
820} 824}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index c53474fe84df..2c3407ee8576 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -10,192 +10,26 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) "PMU: " fmt
14
15#include <linux/cpumask.h>
16#include <linux/err.h> 13#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h> 14#include <linux/kernel.h>
19#include <linux/module.h> 15#include <linux/module.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22 16
23#include <asm/pmu.h> 17#include <asm/pmu.h>
24 18
25static volatile long pmu_lock; 19/*
26 20 * PMU locking to ensure mutual exclusion between different subsystems.
27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 21 */
28 22static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
31{
32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
33 pr_warning("received registration request for unknown "
34 "PMU device type %d\n", type);
35 return -EINVAL;
36 }
37
38 if (pmu_devices[type]) {
39 pr_warning("rejecting duplicate registration of PMU device "
40 "type %d.", type);
41 return -ENOSPC;
42 }
43
44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
46 return 0;
47}
48
49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
100 .driver = {
101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
103 },
104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
106};
107
108static int __init register_pmu_driver(void)
109{
110 return platform_driver_register(&armpmu_driver);
111}
112device_initcall(register_pmu_driver);
113 23
114struct platform_device * 24int
115reserve_pmu(enum arm_pmu_type type) 25reserve_pmu(enum arm_pmu_type type)
116{ 26{
117 struct platform_device *pdev; 27 return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
118
119 if (test_and_set_bit_lock(type, &pmu_lock)) {
120 pdev = ERR_PTR(-EBUSY);
121 } else if (pmu_devices[type] == NULL) {
122 clear_bit_unlock(type, &pmu_lock);
123 pdev = ERR_PTR(-ENODEV);
124 } else {
125 pdev = pmu_devices[type];
126 }
127
128 return pdev;
129} 28}
130EXPORT_SYMBOL_GPL(reserve_pmu); 29EXPORT_SYMBOL_GPL(reserve_pmu);
131 30
132int 31void
133release_pmu(enum arm_pmu_type type) 32release_pmu(enum arm_pmu_type type)
134{ 33{
135 if (WARN_ON(!pmu_devices[type])) 34 clear_bit_unlock(type, pmu_lock);
136 return -EINVAL;
137 clear_bit_unlock(type, &pmu_lock);
138 return 0;
139}
140EXPORT_SYMBOL_GPL(release_pmu);
141
142static int
143set_irq_affinity(int irq,
144 unsigned int cpu)
145{
146#ifdef CONFIG_SMP
147 int err = irq_set_affinity(irq, cpumask_of(cpu));
148 if (err)
149 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
150 irq, cpu);
151 return err;
152#else
153 return -EINVAL;
154#endif
155}
156
157static int
158init_cpu_pmu(void)
159{
160 int i, irqs, err = 0;
161 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
162
163 if (!pdev)
164 return -ENODEV;
165
166 irqs = pdev->num_resources;
167
168 /*
169 * If we have a single PMU interrupt that we can't shift, assume that
170 * we're running on a uniprocessor machine and continue.
171 */
172 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
173 return 0;
174
175 for (i = 0; i < irqs; ++i) {
176 err = set_irq_affinity(platform_get_irq(pdev, i), i);
177 if (err)
178 break;
179 }
180
181 return err;
182}
183
184int
185init_pmu(enum arm_pmu_type type)
186{
187 int err = 0;
188
189 switch (type) {
190 case ARM_PMU_DEVICE_CPU:
191 err = init_cpu_pmu();
192 break;
193 default:
194 pr_warning("attempt to initialise PMU of unknown "
195 "type %d\n", type);
196 err = -EINVAL;
197 }
198
199 return err;
200} 35}
201EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3fe93f75b55a..bda0a218f4a5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -849,25 +849,8 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
849 849
850 if (__atags_pointer) 850 if (__atags_pointer)
851 tags = phys_to_virt(__atags_pointer); 851 tags = phys_to_virt(__atags_pointer);
852 else if (mdesc->boot_params) { 852 else if (mdesc->atag_offset)
853#ifdef CONFIG_MMU 853 tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
854 /*
855 * We still are executing with a minimal MMU mapping created
856 * with the presumption that the machine default for this
857 * is located in the first MB of RAM. Anything else will
858 * fault and silently hang the kernel at this point.
859 */
860 if (mdesc->boot_params < PHYS_OFFSET ||
861 mdesc->boot_params >= PHYS_OFFSET + SZ_1M) {
862 printk(KERN_WARNING
863 "Default boot params at physical 0x%08lx out of reach\n",
864 mdesc->boot_params);
865 } else
866#endif
867 {
868 tags = phys_to_virt(mdesc->boot_params);
869 }
870 }
871 854
872#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 855#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
873 /* 856 /*
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index dc902f2c6845..020e99c845e7 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -8,92 +8,61 @@
8 .text 8 .text
9 9
10/* 10/*
11 * Save CPU state for a suspend 11 * Save CPU state for a suspend. This saves the CPU general purpose
12 * r1 = v:p offset 12 * registers, and allocates space on the kernel stack to save the CPU
13 * r2 = suspend function arg0 13 * specific registers and some other data for resume.
14 * r3 = suspend function 14 * r0 = suspend function arg0
15 * r1 = suspend function
15 */ 16 */
16ENTRY(__cpu_suspend) 17ENTRY(__cpu_suspend)
17 stmfd sp!, {r4 - r11, lr} 18 stmfd sp!, {r4 - r11, lr}
18#ifdef MULTI_CPU 19#ifdef MULTI_CPU
19 ldr r10, =processor 20 ldr r10, =processor
20 ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state 21 ldr r4, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
21 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
22#else 22#else
23 ldr r5, =cpu_suspend_size 23 ldr r4, =cpu_suspend_size
24 ldr ip, =cpu_do_resume
25#endif 24#endif
26 mov r6, sp @ current virtual SP 25 mov r5, sp @ current virtual SP
27 sub sp, sp, r5 @ allocate CPU state on stack 26 add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
28 mov r0, sp @ save pointer to CPU save block 27 sub sp, sp, r4 @ allocate CPU state on stack
29 add ip, ip, r1 @ convert resume fn to phys 28 stmfd sp!, {r0, r1} @ save suspend func arg and pointer
30 stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn 29 add r0, sp, #8 @ save pointer to save block
31 ldr r5, =sleep_save_sp 30 mov r1, r4 @ size of save block
32 add r6, sp, r1 @ convert SP to phys 31 mov r2, r5 @ virtual SP
33 stmfd sp!, {r2, r3} @ save suspend func arg and pointer 32 ldr r3, =sleep_save_sp
34#ifdef CONFIG_SMP 33#ifdef CONFIG_SMP
35 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 34 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
36 ALT_UP(mov lr, #0) 35 ALT_UP(mov lr, #0)
37 and lr, lr, #15 36 and lr, lr, #15
38 str r6, [r5, lr, lsl #2] @ save phys SP 37 add r3, r3, lr, lsl #2
39#else
40 str r6, [r5] @ save phys SP
41#endif
42#ifdef MULTI_CPU
43 mov lr, pc
44 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
45#else
46 bl cpu_do_suspend
47#endif
48
49 @ flush data cache
50#ifdef MULTI_CACHE
51 ldr r10, =cpu_cache
52 mov lr, pc
53 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
54#else
55 bl __cpuc_flush_kern_all
56#endif 38#endif
39 bl __cpu_suspend_save
57 adr lr, BSYM(cpu_suspend_abort) 40 adr lr, BSYM(cpu_suspend_abort)
58 ldmfd sp!, {r0, pc} @ call suspend fn 41 ldmfd sp!, {r0, pc} @ call suspend fn
59ENDPROC(__cpu_suspend) 42ENDPROC(__cpu_suspend)
60 .ltorg 43 .ltorg
61 44
62cpu_suspend_abort: 45cpu_suspend_abort:
63 ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn 46 ldmia sp!, {r1 - r3} @ pop phys pgd, virt SP, phys resume fn
47 teq r0, #0
48 moveq r0, #1 @ force non-zero value
64 mov sp, r2 49 mov sp, r2
65 ldmfd sp!, {r4 - r11, pc} 50 ldmfd sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort) 51ENDPROC(cpu_suspend_abort)
67 52
68/* 53/*
69 * r0 = control register value 54 * r0 = control register value
70 * r1 = v:p offset (preserved by cpu_do_resume)
71 * r2 = phys page table base
72 * r3 = L1 section flags
73 */ 55 */
56 .align 5
74ENTRY(cpu_resume_mmu) 57ENTRY(cpu_resume_mmu)
75 adr r4, cpu_resume_turn_mmu_on
76 mov r4, r4, lsr #20
77 orr r3, r3, r4, lsl #20
78 ldr r5, [r2, r4, lsl #2] @ save old mapping
79 str r3, [r2, r4, lsl #2] @ setup 1:1 mapping for mmu code
80 sub r2, r2, r1
81 ldr r3, =cpu_resume_after_mmu 58 ldr r3, =cpu_resume_after_mmu
82 bic r1, r0, #CR_C @ ensure D-cache is disabled 59 mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
83 b cpu_resume_turn_mmu_on 60 mrc p15, 0, r0, c0, c0, 0 @ read id reg
84ENDPROC(cpu_resume_mmu) 61 mov r0, r0
85 .ltorg 62 mov r0, r0
86 .align 5
87cpu_resume_turn_mmu_on:
88 mcr p15, 0, r1, c1, c0, 0 @ turn on MMU, I-cache, etc
89 mrc p15, 0, r1, c0, c0, 0 @ read id reg
90 mov r1, r1
91 mov r1, r1
92 mov pc, r3 @ jump to virtual address 63 mov pc, r3 @ jump to virtual address
93ENDPROC(cpu_resume_turn_mmu_on) 64ENDPROC(cpu_resume_mmu)
94cpu_resume_after_mmu: 65cpu_resume_after_mmu:
95 str r5, [r2, r4, lsl #2] @ restore old mapping
96 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
97 bl cpu_init @ restore the und/abt/irq banked regs 66 bl cpu_init @ restore the und/abt/irq banked regs
98 mov r0, #0 @ return zero on success 67 mov r0, #0 @ return zero on success
99 ldmfd sp!, {r4 - r11, pc} 68 ldmfd sp!, {r4 - r11, pc}
@@ -119,7 +88,7 @@ ENTRY(cpu_resume)
119 ldr r0, sleep_save_sp @ stack phys addr 88 ldr r0, sleep_save_sp @ stack phys addr
120#endif 89#endif
121 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 90 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
122 @ load v:p, stack, resume fn 91 @ load phys pgd, stack, resume fn
123 ARM( ldmia r0!, {r1, sp, pc} ) 92 ARM( ldmia r0!, {r1, sp, pc} )
124THUMB( ldmia r0!, {r1, r2, r3} ) 93THUMB( ldmia r0!, {r1, r2, r3} )
125THUMB( mov sp, r2 ) 94THUMB( mov sp, r2 )
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 94f34a6c8610..ef5640b9e218 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -460,10 +460,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
460 for (i = 0; i < NR_IPI; i++) 460 for (i = 0; i < NR_IPI; i++)
461 sum += __get_irq_stat(cpu, ipi_irqs[i]); 461 sum += __get_irq_stat(cpu, ipi_irqs[i]);
462 462
463#ifdef CONFIG_LOCAL_TIMERS
464 sum += __get_irq_stat(cpu, local_timer_irqs);
465#endif
466
467 return sum; 463 return sum;
468} 464}
469 465
@@ -480,38 +476,6 @@ static void ipi_timer(void)
480 irq_exit(); 476 irq_exit();
481} 477}
482 478
483#ifdef CONFIG_LOCAL_TIMERS
484asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs)
485{
486 handle_local_timer(regs);
487}
488
489void handle_local_timer(struct pt_regs *regs)
490{
491 struct pt_regs *old_regs = set_irq_regs(regs);
492 int cpu = smp_processor_id();
493
494 if (local_timer_ack()) {
495 __inc_irq_stat(cpu, local_timer_irqs);
496 ipi_timer();
497 }
498
499 set_irq_regs(old_regs);
500}
501
502void show_local_irqs(struct seq_file *p, int prec)
503{
504 unsigned int cpu;
505
506 seq_printf(p, "%*s: ", prec, "LOC");
507
508 for_each_present_cpu(cpu)
509 seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs));
510
511 seq_printf(p, " Local timer interrupts\n");
512}
513#endif
514
515#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 479#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
516static void smp_timer_broadcast(const struct cpumask *mask) 480static void smp_timer_broadcast(const struct cpumask *mask)
517{ 481{
@@ -562,7 +526,7 @@ static void percpu_timer_stop(void)
562 unsigned int cpu = smp_processor_id(); 526 unsigned int cpu = smp_processor_id();
563 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); 527 struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
564 528
565 evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); 529 local_timer_stop(evt);
566} 530}
567#endif 531#endif
568 532
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 01c186222f3b..a8a6682d6b52 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -19,6 +19,7 @@
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include <asm/smp_twd.h> 21#include <asm/smp_twd.h>
22#include <asm/localtimer.h>
22#include <asm/hardware/gic.h> 23#include <asm/hardware/gic.h>
23 24
24/* set up by the platform code */ 25/* set up by the platform code */
@@ -26,6 +27,8 @@ void __iomem *twd_base;
26 27
27static unsigned long twd_timer_rate; 28static unsigned long twd_timer_rate;
28 29
30static struct clock_event_device __percpu **twd_evt;
31
29static void twd_set_mode(enum clock_event_mode mode, 32static void twd_set_mode(enum clock_event_mode mode,
30 struct clock_event_device *clk) 33 struct clock_event_device *clk)
31{ 34{
@@ -80,6 +83,12 @@ int twd_timer_ack(void)
80 return 0; 83 return 0;
81} 84}
82 85
86void twd_timer_stop(struct clock_event_device *clk)
87{
88 twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
89 disable_percpu_irq(clk->irq);
90}
91
83static void __cpuinit twd_calibrate_rate(void) 92static void __cpuinit twd_calibrate_rate(void)
84{ 93{
85 unsigned long count; 94 unsigned long count;
@@ -119,11 +128,43 @@ static void __cpuinit twd_calibrate_rate(void)
119 } 128 }
120} 129}
121 130
131static irqreturn_t twd_handler(int irq, void *dev_id)
132{
133 struct clock_event_device *evt = *(struct clock_event_device **)dev_id;
134
135 if (twd_timer_ack()) {
136 evt->event_handler(evt);
137 return IRQ_HANDLED;
138 }
139
140 return IRQ_NONE;
141}
142
122/* 143/*
123 * Setup the local clock events for a CPU. 144 * Setup the local clock events for a CPU.
124 */ 145 */
125void __cpuinit twd_timer_setup(struct clock_event_device *clk) 146void __cpuinit twd_timer_setup(struct clock_event_device *clk)
126{ 147{
148 struct clock_event_device **this_cpu_clk;
149
150 if (!twd_evt) {
151 int err;
152
153 twd_evt = alloc_percpu(struct clock_event_device *);
154 if (!twd_evt) {
155 pr_err("twd: can't allocate memory\n");
156 return;
157 }
158
159 err = request_percpu_irq(clk->irq, twd_handler,
160 "twd", twd_evt);
161 if (err) {
162 pr_err("twd: can't register interrupt %d (%d)\n",
163 clk->irq, err);
164 return;
165 }
166 }
167
127 twd_calibrate_rate(); 168 twd_calibrate_rate();
128 169
129 clk->name = "local_timer"; 170 clk->name = "local_timer";
@@ -137,8 +178,10 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
137 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk); 178 clk->max_delta_ns = clockevent_delta2ns(0xffffffff, clk);
138 clk->min_delta_ns = clockevent_delta2ns(0xf, clk); 179 clk->min_delta_ns = clockevent_delta2ns(0xf, clk);
139 180
181 this_cpu_clk = __this_cpu_ptr(twd_evt);
182 *this_cpu_clk = clk;
183
140 clockevents_register_device(clk); 184 clockevents_register_device(clk);
141 185
142 /* Make sure our local interrupt controller has this enabled */ 186 enable_percpu_irq(clk->irq, 0);
143 gic_enable_ppi(clk->irq);
144} 187}
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
new file mode 100644
index 000000000000..93a22d282c16
--- /dev/null
+++ b/arch/arm/kernel/suspend.c
@@ -0,0 +1,72 @@
1#include <linux/init.h>
2
3#include <asm/pgalloc.h>
4#include <asm/pgtable.h>
5#include <asm/memory.h>
6#include <asm/suspend.h>
7#include <asm/tlbflush.h>
8
9static pgd_t *suspend_pgd;
10
11extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
12extern void cpu_resume_mmu(void);
13
14/*
15 * This is called by __cpu_suspend() to save the state, and do whatever
16 * flushing is required to ensure that when the CPU goes to sleep we have
17 * the necessary data available when the caches are not searched.
18 */
19void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
20{
21 *save_ptr = virt_to_phys(ptr);
22
23 /* This must correspond to the LDM in cpu_resume() assembly */
24 *ptr++ = virt_to_phys(suspend_pgd);
25 *ptr++ = sp;
26 *ptr++ = virt_to_phys(cpu_do_resume);
27
28 cpu_do_suspend(ptr);
29
30 flush_cache_all();
31 outer_clean_range(*save_ptr, *save_ptr + ptrsz);
32 outer_clean_range(virt_to_phys(save_ptr),
33 virt_to_phys(save_ptr) + sizeof(*save_ptr));
34}
35
36/*
37 * Hide the first two arguments to __cpu_suspend - these are an implementation
38 * detail which platform code shouldn't have to know about.
39 */
40int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
41{
42 struct mm_struct *mm = current->active_mm;
43 int ret;
44
45 if (!suspend_pgd)
46 return -EINVAL;
47
48 /*
49 * Provide a temporary page table with an identity mapping for
50 * the MMU-enable code, required for resuming. On successful
51 * resume (indicated by a zero return code), we need to switch
52 * back to the correct page tables.
53 */
54 ret = __cpu_suspend(arg, fn);
55 if (ret == 0) {
56 cpu_switch_mm(mm->pgd, mm);
57 local_flush_tlb_all();
58 }
59
60 return ret;
61}
62
63static int __init cpu_suspend_init(void)
64{
65 suspend_pgd = pgd_alloc(&init_mm);
66 if (suspend_pgd) {
67 unsigned long addr = virt_to_phys(cpu_resume_mmu);
68 identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
69 }
70 return suspend_pgd ? 0 : -ENOMEM;
71}
72core_initcall(cpu_suspend_init);