aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/include/asm/assembler.h9
-rw-r--r--arch/arm/include/asm/dma.h11
-rw-r--r--arch/arm/include/asm/hwcap.h36
-rw-r--r--arch/arm/include/asm/kprobes.h28
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/perf_event.h2
-rw-r--r--arch/arm/include/asm/ptrace.h11
-rw-r--r--arch/arm/kernel/Makefile7
-rw-r--r--arch/arm/kernel/entry-armv.S246
-rw-r--r--arch/arm/kernel/entry-header.S12
-rw-r--r--arch/arm/kernel/kprobes-arm.c999
-rw-r--r--arch/arm/kernel/kprobes-common.c577
-rw-r--r--arch/arm/kernel/kprobes-decode.c1670
-rw-r--r--arch/arm/kernel/kprobes-thumb.c1462
-rw-r--r--arch/arm/kernel/kprobes.c222
-rw-r--r--arch/arm/kernel/kprobes.h420
-rw-r--r--arch/arm/kernel/perf_event.c6
-rw-r--r--arch/arm/kernel/perf_event_v7.c344
-rw-r--r--arch/arm/kernel/ptrace.c28
-rw-r--r--arch/arm/kernel/setup.c10
-rw-r--r--arch/arm/kernel/traps.c17
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/include/mach/memory.h7
-rw-r--r--arch/arm/mach-h720x/h7201-eval.c1
-rw-r--r--arch/arm/mach-h720x/h7202-eval.c1
-rw-r--r--arch/arm/mach-h720x/include/mach/memory.h7
-rw-r--r--arch/arm/mach-ixp4xx/avila-setup.c6
-rw-r--r--arch/arm/mach-ixp4xx/coyote-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/dsmg600-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/fsg-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/gateway7001-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/goramo_mlr.c3
-rw-r--r--arch/arm/mach-ixp4xx/gtwx5715-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/memory.h4
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c12
-rw-r--r--arch/arm/mach-ixp4xx/nas100d-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/vulcan-setup.c3
-rw-r--r--arch/arm/mach-ixp4xx/wg302v2-setup.c3
-rw-r--r--arch/arm/mach-pxa/cm-x2xx.c3
-rw-r--r--arch/arm/mach-pxa/include/mach/memory.h4
-rw-r--r--arch/arm/mach-realview/include/mach/memory.h4
-rw-r--r--arch/arm/mach-realview/realview_eb.c3
-rw-r--r--arch/arm/mach-realview/realview_pb1176.c3
-rw-r--r--arch/arm/mach-realview/realview_pb11mp.c3
-rw-r--r--arch/arm/mach-realview/realview_pba8.c3
-rw-r--r--arch/arm/mach-realview/realview_pbx.c3
-rw-r--r--arch/arm/mach-sa1100/assabet.c3
-rw-r--r--arch/arm/mach-sa1100/badge4.c3
-rw-r--r--arch/arm/mach-sa1100/include/mach/memory.h4
-rw-r--r--arch/arm/mach-sa1100/jornada720.c3
-rw-r--r--arch/arm/mach-shark/core.c1
-rw-r--r--arch/arm/mach-shark/include/mach/memory.h2
-rw-r--r--arch/arm/mm/cache-fa.S15
-rw-r--r--arch/arm/mm/cache-v3.S15
-rw-r--r--arch/arm/mm/cache-v4.S15
-rw-r--r--arch/arm/mm/cache-v4wb.S15
-rw-r--r--arch/arm/mm/cache-v4wt.S15
-rw-r--r--arch/arm/mm/cache-v6.S15
-rw-r--r--arch/arm/mm/cache-v7.S15
-rw-r--r--arch/arm/mm/init.c20
-rw-r--r--arch/arm/mm/proc-arm1020.S45
-rw-r--r--arch/arm/mm/proc-arm1020e.S52
-rw-r--r--arch/arm/mm/proc-arm1022.S52
-rw-r--r--arch/arm/mm/proc-arm1026.S53
-rw-r--r--arch/arm/mm/proc-arm6_7.S166
-rw-r--r--arch/arm/mm/proc-arm720.S85
-rw-r--r--arch/arm/mm/proc-arm740.S42
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S216
-rw-r--r--arch/arm/mm/proc-arm920.S56
-rw-r--r--arch/arm/mm/proc-arm922.S53
-rw-r--r--arch/arm/mm/proc-arm925.S88
-rw-r--r--arch/arm/mm/proc-arm926.S54
-rw-r--r--arch/arm/mm/proc-arm940.S51
-rw-r--r--arch/arm/mm/proc-arm946.S53
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S78
-rw-r--r--arch/arm/mm/proc-fa526.S38
-rw-r--r--arch/arm/mm/proc-feroceon.S202
-rw-r--r--arch/arm/mm/proc-macros.S68
-rw-r--r--arch/arm/mm/proc-mohawk.S61
-rw-r--r--arch/arm/mm/proc-sa110.S39
-rw-r--r--arch/arm/mm/proc-sa1100.S87
-rw-r--r--arch/arm/mm/proc-v6.S42
-rw-r--r--arch/arm/mm/proc-v7.S139
-rw-r--r--arch/arm/mm/proc-xsc3.S93
-rw-r--r--arch/arm/mm/proc-xscale.S510
-rw-r--r--arch/arm/mm/tlb-fa.S8
-rw-r--r--arch/arm/mm/tlb-v3.S8
-rw-r--r--arch/arm/mm/tlb-v4.S8
-rw-r--r--arch/arm/mm/tlb-v4wb.S8
-rw-r--r--arch/arm/mm/tlb-v4wbi.S8
-rw-r--r--arch/arm/mm/tlb-v6.S8
-rw-r--r--arch/arm/mm/tlb-v7.S9
-rw-r--r--arch/arm/vfp/vfpmodule.c6
106 files changed, 4837 insertions, 4020 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 84fda2bebd7a..894487479f37 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -10,7 +10,7 @@ config ARM
10 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) 10 select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
11 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 11 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
12 select HAVE_ARCH_KGDB 12 select HAVE_ARCH_KGDB
13 select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) 13 select HAVE_KPROBES if !XIP_KERNEL
14 select HAVE_KRETPROBES if (HAVE_KPROBES) 14 select HAVE_KRETPROBES if (HAVE_KPROBES)
15 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 15 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
16 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) 16 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 65c3f2474f5e..29035e86a59d 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -293,4 +293,13 @@
293 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f 293 .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f
294 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort 294 usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort
295 .endm 295 .endm
296
297/* Utility macro for declaring string literals */
298 .macro string name:req, string
299 .type \name , #object
300\name:
301 .asciz "\string"
302 .size \name , . - \name
303 .endm
304
296#endif /* __ASM_ASSEMBLER_H__ */ 305#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h
index 42005542932b..628670e9d7c9 100644
--- a/arch/arm/include/asm/dma.h
+++ b/arch/arm/include/asm/dma.h
@@ -1,15 +1,16 @@
1#ifndef __ASM_ARM_DMA_H 1#ifndef __ASM_ARM_DMA_H
2#define __ASM_ARM_DMA_H 2#define __ASM_ARM_DMA_H
3 3
4#include <asm/memory.h>
5
6/* 4/*
7 * This is the maximum virtual address which can be DMA'd from. 5 * This is the maximum virtual address which can be DMA'd from.
8 */ 6 */
9#ifndef ARM_DMA_ZONE_SIZE 7#ifndef CONFIG_ZONE_DMA
10#define MAX_DMA_ADDRESS 0xffffffff 8#define MAX_DMA_ADDRESS 0xffffffffUL
11#else 9#else
12#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) 10#define MAX_DMA_ADDRESS ({ \
11 extern unsigned long arm_dma_zone_size; \
12 arm_dma_zone_size ? \
13 (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
13#endif 14#endif
14 15
15#ifdef CONFIG_ISA_DMA_API 16#ifdef CONFIG_ISA_DMA_API
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h
index c1062c317103..c93a22a8b924 100644
--- a/arch/arm/include/asm/hwcap.h
+++ b/arch/arm/include/asm/hwcap.h
@@ -4,22 +4,26 @@
4/* 4/*
5 * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP 5 * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
6 */ 6 */
7#define HWCAP_SWP 1 7#define HWCAP_SWP (1 << 0)
8#define HWCAP_HALF 2 8#define HWCAP_HALF (1 << 1)
9#define HWCAP_THUMB 4 9#define HWCAP_THUMB (1 << 2)
10#define HWCAP_26BIT 8 /* Play it safe */ 10#define HWCAP_26BIT (1 << 3) /* Play it safe */
11#define HWCAP_FAST_MULT 16 11#define HWCAP_FAST_MULT (1 << 4)
12#define HWCAP_FPA 32 12#define HWCAP_FPA (1 << 5)
13#define HWCAP_VFP 64 13#define HWCAP_VFP (1 << 6)
14#define HWCAP_EDSP 128 14#define HWCAP_EDSP (1 << 7)
15#define HWCAP_JAVA 256 15#define HWCAP_JAVA (1 << 8)
16#define HWCAP_IWMMXT 512 16#define HWCAP_IWMMXT (1 << 9)
17#define HWCAP_CRUNCH 1024 17#define HWCAP_CRUNCH (1 << 10)
18#define HWCAP_THUMBEE 2048 18#define HWCAP_THUMBEE (1 << 11)
19#define HWCAP_NEON 4096 19#define HWCAP_NEON (1 << 12)
20#define HWCAP_VFPv3 8192 20#define HWCAP_VFPv3 (1 << 13)
21#define HWCAP_VFPv3D16 16384 21#define HWCAP_VFPv3D16 (1 << 14)
22#define HWCAP_TLS 32768 22#define HWCAP_TLS (1 << 15)
23#define HWCAP_VFPv4 (1 << 16)
24#define HWCAP_IDIVA (1 << 17)
25#define HWCAP_IDIVT (1 << 18)
26#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
23 27
24#if defined(__KERNEL__) && !defined(__ASSEMBLY__) 28#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
25/* 29/*
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h
index e46bdd0097eb..feec86768f9c 100644
--- a/arch/arm/include/asm/kprobes.h
+++ b/arch/arm/include/asm/kprobes.h
@@ -24,12 +24,6 @@
24#define MAX_INSN_SIZE 2 24#define MAX_INSN_SIZE 2
25#define MAX_STACK_SIZE 64 /* 32 would probably be OK */ 25#define MAX_STACK_SIZE 64 /* 32 would probably be OK */
26 26
27/*
28 * This undefined instruction must be unique and
29 * reserved solely for kprobes' use.
30 */
31#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8
32
33#define regs_return_value(regs) ((regs)->ARM_r0) 27#define regs_return_value(regs) ((regs)->ARM_r0)
34#define flush_insn_slot(p) do { } while (0) 28#define flush_insn_slot(p) do { } while (0)
35#define kretprobe_blacklist_size 0 29#define kretprobe_blacklist_size 0
@@ -38,14 +32,17 @@ typedef u32 kprobe_opcode_t;
38 32
39struct kprobe; 33struct kprobe;
40typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); 34typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *);
41
42typedef unsigned long (kprobe_check_cc)(unsigned long); 35typedef unsigned long (kprobe_check_cc)(unsigned long);
36typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *);
37typedef void (kprobe_insn_fn_t)(void);
43 38
44/* Architecture specific copy of original instruction. */ 39/* Architecture specific copy of original instruction. */
45struct arch_specific_insn { 40struct arch_specific_insn {
46 kprobe_opcode_t *insn; 41 kprobe_opcode_t *insn;
47 kprobe_insn_handler_t *insn_handler; 42 kprobe_insn_handler_t *insn_handler;
48 kprobe_check_cc *insn_check_cc; 43 kprobe_check_cc *insn_check_cc;
44 kprobe_insn_singlestep_t *insn_singlestep;
45 kprobe_insn_fn_t *insn_fn;
49}; 46};
50 47
51struct prev_kprobe { 48struct prev_kprobe {
@@ -62,20 +59,9 @@ struct kprobe_ctlblk {
62}; 59};
63 60
64void arch_remove_kprobe(struct kprobe *); 61void arch_remove_kprobe(struct kprobe *);
65void kretprobe_trampoline(void);
66
67int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); 62int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
68int kprobe_exceptions_notify(struct notifier_block *self, 63int kprobe_exceptions_notify(struct notifier_block *self,
69 unsigned long val, void *data); 64 unsigned long val, void *data);
70 65
71enum kprobe_insn {
72 INSN_REJECTED,
73 INSN_GOOD,
74 INSN_GOOD_NO_SLOT
75};
76
77enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
78 struct arch_specific_insn *);
79void __init arm_kprobe_decode_init(void);
80 66
81#endif /* _ARM_KPROBES_H */ 67#endif /* _ARM_KPROBES_H */
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 946f4d778f71..3281fb4b12e3 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -23,6 +23,10 @@ struct machine_desc {
23 23
24 unsigned int nr_irqs; /* number of IRQs */ 24 unsigned int nr_irqs; /* number of IRQs */
25 25
26#ifdef CONFIG_ZONE_DMA
27 unsigned long dma_zone_size; /* size of DMA-able area */
28#endif
29
26 unsigned int video_start; /* start of video RAM */ 30 unsigned int video_start; /* start of video RAM */
27 unsigned int video_end; /* end of video RAM */ 31 unsigned int video_end; /* end of video RAM */
28 32
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h
index c4aa4e8c6af9..0f8e3827a89b 100644
--- a/arch/arm/include/asm/perf_event.h
+++ b/arch/arm/include/asm/perf_event.h
@@ -24,6 +24,8 @@ enum arm_perf_pmu_ids {
24 ARM_PERF_PMU_ID_V6MP, 24 ARM_PERF_PMU_ID_V6MP,
25 ARM_PERF_PMU_ID_CA8, 25 ARM_PERF_PMU_ID_CA8,
26 ARM_PERF_PMU_ID_CA9, 26 ARM_PERF_PMU_ID_CA9,
27 ARM_PERF_PMU_ID_CA5,
28 ARM_PERF_PMU_ID_CA15,
27 ARM_NUM_PMU_IDS, 29 ARM_NUM_PMU_IDS,
28}; 30};
29 31
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 312d10877bd7..96187ff58c24 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -69,8 +69,9 @@
69#define PSR_c 0x000000ff /* Control */ 69#define PSR_c 0x000000ff /* Control */
70 70
71/* 71/*
72 * ARMv7 groups of APSR bits 72 * ARMv7 groups of PSR bits
73 */ 73 */
74#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
74#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ 75#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
75#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ 76#define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
76#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ 77#define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
@@ -200,6 +201,14 @@ extern unsigned long profile_pc(struct pt_regs *regs);
200#define PREDICATE_ALWAYS 0xe0000000 201#define PREDICATE_ALWAYS 0xe0000000
201 202
202/* 203/*
204 * True if instr is a 32-bit thumb instruction. This works if instr
205 * is the first or only half-word of a thumb instruction. It also works
206 * when instr holds all 32-bits of a wide thumb instruction if stored
207 * in the form (first_half<<16)|(second_half)
208 */
209#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
210
211/*
203 * kprobe-based event tracer support 212 * kprobe-based event tracer support
204 */ 213 */
205#include <linux/stddef.h> 214#include <linux/stddef.h>
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index a5b31af5c2b8..f7887dc53c1f 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -37,7 +37,12 @@ obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
37obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 37obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
38obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 38obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
39obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 39obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
40obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o 40obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o
41ifdef CONFIG_THUMB2_KERNEL
42obj-$(CONFIG_KPROBES) += kprobes-thumb.o
43else
44obj-$(CONFIG_KPROBES) += kprobes-arm.o
45endif
41obj-$(CONFIG_ATAGS_PROC) += atags.o 46obj-$(CONFIG_ATAGS_PROC) += atags.o
42obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 47obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
43obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 48obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fa02a22a4c4b..a87cbf889ff4 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -377,7 +377,7 @@ ENDPROC(__pabt_svc)
377 .endm 377 .endm
378 378
379 .macro kuser_cmpxchg_check 379 .macro kuser_cmpxchg_check
380#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 380#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
381#ifndef CONFIG_MMU 381#ifndef CONFIG_MMU
382#warning "NPTL on non MMU needs fixing" 382#warning "NPTL on non MMU needs fixing"
383#else 383#else
@@ -386,7 +386,7 @@ ENDPROC(__pabt_svc)
386 @ perform a quick test inline since it should be false 386 @ perform a quick test inline since it should be false
387 @ 99.9999% of the time. The rest is done out of line. 387 @ 99.9999% of the time. The rest is done out of line.
388 cmp r4, #TASK_SIZE 388 cmp r4, #TASK_SIZE
389 blhs kuser_cmpxchg_fixup 389 blhs kuser_cmpxchg64_fixup
390#endif 390#endif
391#endif 391#endif
392 .endm 392 .endm
@@ -701,31 +701,12 @@ ENDPROC(__switch_to)
701/* 701/*
702 * User helpers. 702 * User helpers.
703 * 703 *
704 * These are segment of kernel provided user code reachable from user space
705 * at a fixed address in kernel memory. This is used to provide user space
706 * with some operations which require kernel help because of unimplemented
707 * native feature and/or instructions in many ARM CPUs. The idea is for
708 * this code to be executed directly in user mode for best efficiency but
709 * which is too intimate with the kernel counter part to be left to user
710 * libraries. In fact this code might even differ from one CPU to another
711 * depending on the available instruction set and restrictions like on
712 * SMP systems. In other words, the kernel reserves the right to change
713 * this code as needed without warning. Only the entry points and their
714 * results are guaranteed to be stable.
715 *
716 * Each segment is 32-byte aligned and will be moved to the top of the high 704 * Each segment is 32-byte aligned and will be moved to the top of the high
717 * vector page. New segments (if ever needed) must be added in front of 705 * vector page. New segments (if ever needed) must be added in front of
718 * existing ones. This mechanism should be used only for things that are 706 * existing ones. This mechanism should be used only for things that are
719 * really small and justified, and not be abused freely. 707 * really small and justified, and not be abused freely.
720 * 708 *
721 * User space is expected to implement those things inline when optimizing 709 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
722 * for a processor that has the necessary native support, but only if such
723 * resulting binaries are already to be incompatible with earlier ARM
724 * processors due to the use of unsupported instructions other than what
725 * is provided here. In other words don't make binaries unable to run on
726 * earlier processors just for the sake of not using these kernel helpers
727 * if your compiled code is not going to use the new instructions for other
728 * purpose.
729 */ 710 */
730 THUMB( .arm ) 711 THUMB( .arm )
731 712
@@ -742,96 +723,103 @@ ENDPROC(__switch_to)
742__kuser_helper_start: 723__kuser_helper_start:
743 724
744/* 725/*
745 * Reference prototype: 726 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
746 * 727 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
747 * void __kernel_memory_barrier(void)
748 *
749 * Input:
750 *
751 * lr = return address
752 *
753 * Output:
754 *
755 * none
756 *
757 * Clobbered:
758 *
759 * none
760 *
761 * Definition and user space usage example:
762 *
763 * typedef void (__kernel_dmb_t)(void);
764 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
765 *
766 * Apply any needed memory barrier to preserve consistency with data modified
767 * manually and __kuser_cmpxchg usage.
768 *
769 * This could be used as follows:
770 *
771 * #define __kernel_dmb() \
772 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
773 * : : : "r0", "lr","cc" )
774 */ 728 */
775 729
776__kuser_memory_barrier: @ 0xffff0fa0 730__kuser_cmpxchg64: @ 0xffff0f60
731
732#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
733
734 /*
735 * Poor you. No fast solution possible...
736 * The kernel itself must perform the operation.
737 * A special ghost syscall is used for that (see traps.c).
738 */
739 stmfd sp!, {r7, lr}
740 ldr r7, 1f @ it's 20 bits
741 swi __ARM_NR_cmpxchg64
742 ldmfd sp!, {r7, pc}
7431: .word __ARM_NR_cmpxchg64
744
745#elif defined(CONFIG_CPU_32v6K)
746
747 stmfd sp!, {r4, r5, r6, r7}
748 ldrd r4, r5, [r0] @ load old val
749 ldrd r6, r7, [r1] @ load new val
750 smp_dmb arm
7511: ldrexd r0, r1, [r2] @ load current val
752 eors r3, r0, r4 @ compare with oldval (1)
753 eoreqs r3, r1, r5 @ compare with oldval (2)
754 strexdeq r3, r6, r7, [r2] @ store newval if eq
755 teqeq r3, #1 @ success?
756 beq 1b @ if no then retry
777 smp_dmb arm 757 smp_dmb arm
758 rsbs r0, r3, #0 @ set returned val and C flag
759 ldmfd sp!, {r4, r5, r6, r7}
760 bx lr
761
762#elif !defined(CONFIG_SMP)
763
764#ifdef CONFIG_MMU
765
766 /*
767 * The only thing that can break atomicity in this cmpxchg64
768 * implementation is either an IRQ or a data abort exception
769 * causing another process/thread to be scheduled in the middle of
770 * the critical sequence. The same strategy as for cmpxchg is used.
771 */
772 stmfd sp!, {r4, r5, r6, lr}
773 ldmia r0, {r4, r5} @ load old val
774 ldmia r1, {r6, lr} @ load new val
7751: ldmia r2, {r0, r1} @ load current val
776 eors r3, r0, r4 @ compare with oldval (1)
777 eoreqs r3, r1, r5 @ compare with oldval (2)
7782: stmeqia r2, {r6, lr} @ store newval if eq
779 rsbs r0, r3, #0 @ set return val and C flag
780 ldmfd sp!, {r4, r5, r6, pc}
781
782 .text
783kuser_cmpxchg64_fixup:
784 @ Called from kuser_cmpxchg_fixup.
785 @ r4 = address of interrupted insn (must be preserved).
786 @ sp = saved regs. r7 and r8 are clobbered.
787 @ 1b = first critical insn, 2b = last critical insn.
788 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
789 mov r7, #0xffff0fff
790 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
791 subs r8, r4, r7
792 rsbcss r8, r8, #(2b - 1b)
793 strcs r7, [sp, #S_PC]
794#if __LINUX_ARM_ARCH__ < 6
795 bcc kuser_cmpxchg32_fixup
796#endif
797 mov pc, lr
798 .previous
799
800#else
801#warning "NPTL on non MMU needs fixing"
802 mov r0, #-1
803 adds r0, r0, #0
778 usr_ret lr 804 usr_ret lr
805#endif
806
807#else
808#error "incoherent kernel configuration"
809#endif
810
811 /* pad to next slot */
812 .rept (16 - (. - __kuser_cmpxchg64)/4)
813 .word 0
814 .endr
779 815
780 .align 5 816 .align 5
781 817
782/* 818__kuser_memory_barrier: @ 0xffff0fa0
783 * Reference prototype: 819 smp_dmb arm
784 * 820 usr_ret lr
785 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 821
786 * 822 .align 5
787 * Input:
788 *
789 * r0 = oldval
790 * r1 = newval
791 * r2 = ptr
792 * lr = return address
793 *
794 * Output:
795 *
796 * r0 = returned value (zero or non-zero)
797 * C flag = set if r0 == 0, clear if r0 != 0
798 *
799 * Clobbered:
800 *
801 * r3, ip, flags
802 *
803 * Definition and user space usage example:
804 *
805 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr);
806 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
807 *
808 * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
809 * Return zero if *ptr was changed or non-zero if no exchange happened.
810 * The C flag is also set if *ptr was changed to allow for assembly
811 * optimization in the calling code.
812 *
813 * Notes:
814 *
815 * - This routine already includes memory barriers as needed.
816 *
817 * For example, a user space atomic_add implementation could look like this:
818 *
819 * #define atomic_add(ptr, val) \
820 * ({ register unsigned int *__ptr asm("r2") = (ptr); \
821 * register unsigned int __result asm("r1"); \
822 * asm volatile ( \
823 * "1: @ atomic_add\n\t" \
824 * "ldr r0, [r2]\n\t" \
825 * "mov r3, #0xffff0fff\n\t" \
826 * "add lr, pc, #4\n\t" \
827 * "add r1, r0, %2\n\t" \
828 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \
829 * "bcc 1b" \
830 * : "=&r" (__result) \
831 * : "r" (__ptr), "rIL" (val) \
832 * : "r0","r3","ip","lr","cc","memory" ); \
833 * __result; })
834 */
835 823
836__kuser_cmpxchg: @ 0xffff0fc0 824__kuser_cmpxchg: @ 0xffff0fc0
837 825
@@ -868,7 +856,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
868 usr_ret lr 856 usr_ret lr
869 857
870 .text 858 .text
871kuser_cmpxchg_fixup: 859kuser_cmpxchg32_fixup:
872 @ Called from kuser_cmpxchg_check macro. 860 @ Called from kuser_cmpxchg_check macro.
873 @ r4 = address of interrupted insn (must be preserved). 861 @ r4 = address of interrupted insn (must be preserved).
874 @ sp = saved regs. r7 and r8 are clobbered. 862 @ sp = saved regs. r7 and r8 are clobbered.
@@ -906,39 +894,6 @@ kuser_cmpxchg_fixup:
906 894
907 .align 5 895 .align 5
908 896
909/*
910 * Reference prototype:
911 *
912 * int __kernel_get_tls(void)
913 *
914 * Input:
915 *
916 * lr = return address
917 *
918 * Output:
919 *
920 * r0 = TLS value
921 *
922 * Clobbered:
923 *
924 * none
925 *
926 * Definition and user space usage example:
927 *
928 * typedef int (__kernel_get_tls_t)(void);
929 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0)
930 *
931 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall.
932 *
933 * This could be used as follows:
934 *
935 * #define __kernel_get_tls() \
936 * ({ register unsigned int __val asm("r0"); \
937 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \
938 * : "=r" (__val) : : "lr","cc" ); \
939 * __val; })
940 */
941
942__kuser_get_tls: @ 0xffff0fe0 897__kuser_get_tls: @ 0xffff0fe0
943 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 898 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
944 usr_ret lr 899 usr_ret lr
@@ -947,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0
947 .word 0 @ 0xffff0ff0 software TLS value, then 902 .word 0 @ 0xffff0ff0 software TLS value, then
948 .endr @ pad up to __kuser_helper_version 903 .endr @ pad up to __kuser_helper_version
949 904
950/*
951 * Reference declaration:
952 *
953 * extern unsigned int __kernel_helper_version;
954 *
955 * Definition and user space usage example:
956 *
957 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc)
958 *
959 * User space may read this to determine the curent number of helpers
960 * available.
961 */
962
963__kuser_helper_version: @ 0xffff0ffc 905__kuser_helper_version: @ 0xffff0ffc
964 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 906 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
965 907
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4d6ad8348e89..9a8531eadd3d 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -121,15 +121,13 @@
121 .endm 121 .endm
122#else /* CONFIG_THUMB2_KERNEL */ 122#else /* CONFIG_THUMB2_KERNEL */
123 .macro svc_exit, rpsr 123 .macro svc_exit, rpsr
124 ldr lr, [sp, #S_SP] @ top of the stack
125 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
124 clrex @ clear the exclusive monitor 126 clrex @ clear the exclusive monitor
125 ldr r0, [sp, #S_SP] @ top of the stack 127 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
126 ldr r1, [sp, #S_PC] @ return address
127 tst r0, #4 @ orig stack 8-byte aligned?
128 stmdb r0, {r1, \rpsr} @ rfe context
129 ldmia sp, {r0 - r12} 128 ldmia sp, {r0 - r12}
130 ldr lr, [sp, #S_LR] 129 mov sp, lr
131 addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned 130 ldr lr, [sp], #4
132 addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned
133 rfeia sp! 131 rfeia sp!
134 .endm 132 .endm
135 133
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c
new file mode 100644
index 000000000000..79203ee1d039
--- /dev/null
+++ b/arch/arm/kernel/kprobes-arm.c
@@ -0,0 +1,999 @@
1/*
2 * arch/arm/kernel/kprobes-decode.c
3 *
4 * Copyright (C) 2006, 2007 Motorola Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16/*
17 * We do not have hardware single-stepping on ARM, This
18 * effort is further complicated by the ARM not having a
19 * "next PC" register. Instructions that change the PC
20 * can't be safely single-stepped in a MP environment, so
21 * we have a lot of work to do:
22 *
23 * In the prepare phase:
24 * *) If it is an instruction that does anything
25 * with the CPU mode, we reject it for a kprobe.
26 * (This is out of laziness rather than need. The
27 * instructions could be simulated.)
28 *
29 * *) Otherwise, decode the instruction rewriting its
30 * registers to take fixed, ordered registers and
31 * setting a handler for it to run the instruction.
32 *
33 * In the execution phase by an instruction's handler:
34 *
35 * *) If the PC is written to by the instruction, the
36 * instruction must be fully simulated in software.
37 *
38 * *) Otherwise, a modified form of the instruction is
39 * directly executed. Its handler calls the
40 * instruction in insn[0]. In insn[1] is a
41 * "mov pc, lr" to return.
42 *
43 * Before calling, load up the reordered registers
44 * from the original instruction's registers. If one
45 * of the original input registers is the PC, compute
46 * and adjust the appropriate input register.
47 *
48 * After call completes, copy the output registers to
49 * the original instruction's original registers.
50 *
51 * We don't use a real breakpoint instruction since that
52 * would have us in the kernel go from SVC mode to SVC
53 * mode losing the link register. Instead we use an
54 * undefined instruction. To simplify processing, the
55 * undefined instruction used for kprobes must be reserved
56 * exclusively for kprobes use.
57 *
58 * TODO: ifdef out some instruction decoding based on architecture.
59 */
60
61#include <linux/kernel.h>
62#include <linux/kprobes.h>
63
64#include "kprobes.h"
65
66#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
67
68#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
69
70#if __LINUX_ARM_ARCH__ >= 6
71#define BLX(reg) "blx "reg" \n\t"
72#else
73#define BLX(reg) "mov lr, pc \n\t" \
74 "mov pc, "reg" \n\t"
75#endif
76
77/*
78 * To avoid the complications of mimicing single-stepping on a
79 * processor without a Next-PC or a single-step mode, and to
80 * avoid having to deal with the side-effects of boosting, we
81 * simulate or emulate (almost) all ARM instructions.
82 *
83 * "Simulation" is where the instruction's behavior is duplicated in
84 * C code. "Emulation" is where the original instruction is rewritten
85 * and executed, often by altering its registers.
86 *
87 * By having all behavior of the kprobe'd instruction completed before
88 * returning from the kprobe_handler(), all locks (scheduler and
89 * interrupt) can safely be released. There is no need for secondary
90 * breakpoints, no race with MP or preemptable kernels, nor having to
91 * clean up resources counts at a later time impacting overall system
92 * performance. By rewriting the instruction, only the minimum registers
93 * need to be loaded and saved back optimizing performance.
94 *
95 * Calling the insnslot_*_rwflags version of a function doesn't hurt
96 * anything even when the CPSR flags aren't updated by the
97 * instruction. It's just a little slower in return for saving
98 * a little space by not having a duplicate function that doesn't
99 * update the flags. (The same optimization can be said for
100 * instructions that do or don't perform register writeback)
101 * Also, instructions can either read the flags, only write the
102 * flags, or read and write the flags. To save combinations
103 * rather than for sheer performance, flag functions just assume
104 * read and write of flags.
105 */
106
107static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
108{
109 kprobe_opcode_t insn = p->opcode;
110 long iaddr = (long)p->addr;
111 int disp = branch_displacement(insn);
112
113 if (insn & (1 << 24))
114 regs->ARM_lr = iaddr + 4;
115
116 regs->ARM_pc = iaddr + 8 + disp;
117}
118
119static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
120{
121 kprobe_opcode_t insn = p->opcode;
122 long iaddr = (long)p->addr;
123 int disp = branch_displacement(insn);
124
125 regs->ARM_lr = iaddr + 4;
126 regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
127 regs->ARM_cpsr |= PSR_T_BIT;
128}
129
130static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
131{
132 kprobe_opcode_t insn = p->opcode;
133 int rm = insn & 0xf;
134 long rmv = regs->uregs[rm];
135
136 if (insn & (1 << 5))
137 regs->ARM_lr = (long)p->addr + 4;
138
139 regs->ARM_pc = rmv & ~0x1;
140 regs->ARM_cpsr &= ~PSR_T_BIT;
141 if (rmv & 0x1)
142 regs->ARM_cpsr |= PSR_T_BIT;
143}
144
145static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
146{
147 kprobe_opcode_t insn = p->opcode;
148 int rd = (insn >> 12) & 0xf;
149 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
150 regs->uregs[rd] = regs->ARM_cpsr & mask;
151}
152
153static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
154{
155 regs->uregs[12] = regs->uregs[13];
156}
157
158static void __kprobes
159emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
160{
161 kprobe_opcode_t insn = p->opcode;
162 unsigned long pc = (unsigned long)p->addr + 8;
163 int rt = (insn >> 12) & 0xf;
164 int rn = (insn >> 16) & 0xf;
165 int rm = insn & 0xf;
166
167 register unsigned long rtv asm("r0") = regs->uregs[rt];
168 register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
169 register unsigned long rnv asm("r2") = (rn == 15) ? pc
170 : regs->uregs[rn];
171 register unsigned long rmv asm("r3") = regs->uregs[rm];
172
173 __asm__ __volatile__ (
174 BLX("%[fn]")
175 : "=r" (rtv), "=r" (rt2v), "=r" (rnv)
176 : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
177 [fn] "r" (p->ainsn.insn_fn)
178 : "lr", "memory", "cc"
179 );
180
181 regs->uregs[rt] = rtv;
182 regs->uregs[rt+1] = rt2v;
183 if (is_writeback(insn))
184 regs->uregs[rn] = rnv;
185}
186
187static void __kprobes
188emulate_ldr(struct kprobe *p, struct pt_regs *regs)
189{
190 kprobe_opcode_t insn = p->opcode;
191 unsigned long pc = (unsigned long)p->addr + 8;
192 int rt = (insn >> 12) & 0xf;
193 int rn = (insn >> 16) & 0xf;
194 int rm = insn & 0xf;
195
196 register unsigned long rtv asm("r0");
197 register unsigned long rnv asm("r2") = (rn == 15) ? pc
198 : regs->uregs[rn];
199 register unsigned long rmv asm("r3") = regs->uregs[rm];
200
201 __asm__ __volatile__ (
202 BLX("%[fn]")
203 : "=r" (rtv), "=r" (rnv)
204 : "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
205 : "lr", "memory", "cc"
206 );
207
208 if (rt == 15)
209 load_write_pc(rtv, regs);
210 else
211 regs->uregs[rt] = rtv;
212
213 if (is_writeback(insn))
214 regs->uregs[rn] = rnv;
215}
216
217static void __kprobes
218emulate_str(struct kprobe *p, struct pt_regs *regs)
219{
220 kprobe_opcode_t insn = p->opcode;
221 unsigned long rtpc = (unsigned long)p->addr + str_pc_offset;
222 unsigned long rnpc = (unsigned long)p->addr + 8;
223 int rt = (insn >> 12) & 0xf;
224 int rn = (insn >> 16) & 0xf;
225 int rm = insn & 0xf;
226
227 register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
228 : regs->uregs[rt];
229 register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
230 : regs->uregs[rn];
231 register unsigned long rmv asm("r3") = regs->uregs[rm];
232
233 __asm__ __volatile__ (
234 BLX("%[fn]")
235 : "=r" (rnv)
236 : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
237 : "lr", "memory", "cc"
238 );
239
240 if (is_writeback(insn))
241 regs->uregs[rn] = rnv;
242}
243
244static void __kprobes
245emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs)
246{
247 kprobe_opcode_t insn = p->opcode;
248 unsigned long pc = (unsigned long)p->addr + 8;
249 int rd = (insn >> 12) & 0xf;
250 int rn = (insn >> 16) & 0xf;
251 int rm = insn & 0xf;
252 int rs = (insn >> 8) & 0xf;
253
254 register unsigned long rdv asm("r0") = regs->uregs[rd];
255 register unsigned long rnv asm("r2") = (rn == 15) ? pc
256 : regs->uregs[rn];
257 register unsigned long rmv asm("r3") = (rm == 15) ? pc
258 : regs->uregs[rm];
259 register unsigned long rsv asm("r1") = regs->uregs[rs];
260 unsigned long cpsr = regs->ARM_cpsr;
261
262 __asm__ __volatile__ (
263 "msr cpsr_fs, %[cpsr] \n\t"
264 BLX("%[fn]")
265 "mrs %[cpsr], cpsr \n\t"
266 : "=r" (rdv), [cpsr] "=r" (cpsr)
267 : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
268 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
269 : "lr", "memory", "cc"
270 );
271
272 if (rd == 15)
273 alu_write_pc(rdv, regs);
274 else
275 regs->uregs[rd] = rdv;
276 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
277}
278
279static void __kprobes
280emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
281{
282 kprobe_opcode_t insn = p->opcode;
283 int rd = (insn >> 12) & 0xf;
284 int rn = (insn >> 16) & 0xf;
285 int rm = insn & 0xf;
286
287 register unsigned long rdv asm("r0") = regs->uregs[rd];
288 register unsigned long rnv asm("r2") = regs->uregs[rn];
289 register unsigned long rmv asm("r3") = regs->uregs[rm];
290 unsigned long cpsr = regs->ARM_cpsr;
291
292 __asm__ __volatile__ (
293 "msr cpsr_fs, %[cpsr] \n\t"
294 BLX("%[fn]")
295 "mrs %[cpsr], cpsr \n\t"
296 : "=r" (rdv), [cpsr] "=r" (cpsr)
297 : "0" (rdv), "r" (rnv), "r" (rmv),
298 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
299 : "lr", "memory", "cc"
300 );
301
302 regs->uregs[rd] = rdv;
303 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
304}
305
306static void __kprobes
307emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
308{
309 kprobe_opcode_t insn = p->opcode;
310 int rd = (insn >> 16) & 0xf;
311 int rn = (insn >> 12) & 0xf;
312 int rm = insn & 0xf;
313 int rs = (insn >> 8) & 0xf;
314
315 register unsigned long rdv asm("r2") = regs->uregs[rd];
316 register unsigned long rnv asm("r0") = regs->uregs[rn];
317 register unsigned long rmv asm("r3") = regs->uregs[rm];
318 register unsigned long rsv asm("r1") = regs->uregs[rs];
319 unsigned long cpsr = regs->ARM_cpsr;
320
321 __asm__ __volatile__ (
322 "msr cpsr_fs, %[cpsr] \n\t"
323 BLX("%[fn]")
324 "mrs %[cpsr], cpsr \n\t"
325 : "=r" (rdv), [cpsr] "=r" (cpsr)
326 : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
327 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
328 : "lr", "memory", "cc"
329 );
330
331 regs->uregs[rd] = rdv;
332 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
333}
334
335static void __kprobes
336emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs)
337{
338 kprobe_opcode_t insn = p->opcode;
339 int rd = (insn >> 12) & 0xf;
340 int rm = insn & 0xf;
341
342 register unsigned long rdv asm("r0") = regs->uregs[rd];
343 register unsigned long rmv asm("r3") = regs->uregs[rm];
344
345 __asm__ __volatile__ (
346 BLX("%[fn]")
347 : "=r" (rdv)
348 : "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
349 : "lr", "memory", "cc"
350 );
351
352 regs->uregs[rd] = rdv;
353}
354
355static void __kprobes
356emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs)
357{
358 kprobe_opcode_t insn = p->opcode;
359 int rdlo = (insn >> 12) & 0xf;
360 int rdhi = (insn >> 16) & 0xf;
361 int rn = insn & 0xf;
362 int rm = (insn >> 8) & 0xf;
363
364 register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
365 register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
366 register unsigned long rnv asm("r3") = regs->uregs[rn];
367 register unsigned long rmv asm("r1") = regs->uregs[rm];
368 unsigned long cpsr = regs->ARM_cpsr;
369
370 __asm__ __volatile__ (
371 "msr cpsr_fs, %[cpsr] \n\t"
372 BLX("%[fn]")
373 "mrs %[cpsr], cpsr \n\t"
374 : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
375 : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
376 "2" (cpsr), [fn] "r" (p->ainsn.insn_fn)
377 : "lr", "memory", "cc"
378 );
379
380 regs->uregs[rdlo] = rdlov;
381 regs->uregs[rdhi] = rdhiv;
382 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
383}
384
385/*
386 * For the instruction masking and comparisons in all the "space_*"
387 * functions below, Do _not_ rearrange the order of tests unless
388 * you're very, very sure of what you are doing. For the sake of
389 * efficiency, the masks for some tests sometimes assume other test
390 * have been done prior to them so the number of patterns to test
391 * for an instruction set can be as broad as possible to reduce the
392 * number of tests needed.
393 */
394
395static const union decode_item arm_1111_table[] = {
396 /* Unconditional instructions */
397
398 /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */
399 /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */
400 /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */
401 /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */
402 DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop),
403
404 /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */
405 /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */
406 /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */
407 /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */
408 DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop),
409
410 /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */
411 DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1),
412
413 /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
414 /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
415 /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
416 /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
417
418 /* Coprocessor instructions... */
419 /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */
420 /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */
421 /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
422 /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
423 /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
424 /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
425 /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
426
427 /* Other unallocated instructions... */
428 DECODE_END
429};
430
431static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = {
432 /* Miscellaneous instructions */
433
434 /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
435 DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs,
436 REGS(0, NOPC, 0, 0, 0)),
437
438 /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
439 DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx),
440
441 /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
442 DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx,
443 REGS(0, 0, 0, 0, NOPC)),
444
445 /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
446 DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc,
447 REGS(0, NOPC, 0, 0, NOPC)),
448
449 /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */
450 /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */
451 /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */
452 /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */
453 DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc,
454 REGS(NOPC, NOPC, 0, 0, NOPC)),
455
456 /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
457 /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
458 /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
459 /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
460 /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
461 /* And unallocated instructions... */
462 DECODE_END
463};
464
465static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = {
466 /* Halfword multiply and multiply-accumulate */
467
468 /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
469 DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
470 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
471
472 /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
473 DECODE_OR (0x0ff000b0, 0x012000a0),
474 /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
475 DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc,
476 REGS(NOPC, 0, NOPC, 0, NOPC)),
477
478 /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */
479 DECODE_OR (0x0ff00090, 0x01000080),
480 /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */
481 DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc,
482 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
483
484 DECODE_END
485};
486
487static const union decode_item arm_cccc_0000_____1001_table[] = {
488 /* Multiply and multiply-accumulate */
489
490 /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */
491 /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */
492 DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc,
493 REGS(NOPC, 0, NOPC, 0, NOPC)),
494
495 /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */
496 /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */
497 DECODE_OR (0x0fe000f0, 0x00200090),
498 /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */
499 DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc,
500 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
501
502 /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */
503 DECODE_OR (0x0ff000f0, 0x00400090),
504 /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */
505 /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */
506 /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */
507 /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */
508 /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */
509 /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */
510 /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */
511 /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */
512 DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
513 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
514
515 DECODE_END
516};
517
518static const union decode_item arm_cccc_0001_____1001_table[] = {
519 /* Synchronization primitives */
520
521 /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
522 DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
523 REGS(NOPC, NOPC, 0, 0, NOPC)),
524
525 /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
526 /* And unallocated instructions... */
527 DECODE_END
528};
529
530static const union decode_item arm_cccc_000x_____1xx1_table[] = {
531 /* Extra load/store instructions */
532
533 /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */
534 /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */
535 /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */
536 /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */
537 /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */
538 DECODE_REJECT (0x0f200090, 0x00200090),
539
540 /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */
541 DECODE_REJECT (0x0e10e0d0, 0x0000e0d0),
542
543 /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */
544 /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */
545 DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd,
546 REGS(NOPCWB, NOPCX, 0, 0, NOPC)),
547
548 /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */
549 /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */
550 DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd,
551 REGS(NOPCWB, NOPCX, 0, 0, 0)),
552
553 /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */
554 DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str,
555 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
556
557 /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */
558 /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */
559 /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */
560 DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr,
561 REGS(NOPCWB, NOPC, 0, 0, NOPC)),
562
563 /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */
564 DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str,
565 REGS(NOPCWB, NOPC, 0, 0, 0)),
566
567 /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */
568 /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */
569 /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */
570 DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr,
571 REGS(NOPCWB, NOPC, 0, 0, 0)),
572
573 DECODE_END
574};
575
576static const union decode_item arm_cccc_000x_table[] = {
577 /* Data-processing (register) */
578
579 /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */
580 DECODE_REJECT (0x0e10f000, 0x0010f000),
581
582 /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */
583 DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp),
584
585 /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */
586 /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */
587 /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */
588 /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */
589 DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags,
590 REGS(ANY, 0, 0, 0, ANY)),
591
592 /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */
593 /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */
594 DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags,
595 REGS(0, ANY, 0, 0, ANY)),
596
597 /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */
598 /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */
599 /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */
600 /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */
601 /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */
602 /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */
603 /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */
604 /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */
605 /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */
606 /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */
607 DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags,
608 REGS(ANY, ANY, 0, 0, ANY)),
609
610 /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */
611 /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */
612 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
613 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
614 DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags,
615 REGS(ANY, 0, NOPC, 0, ANY)),
616
617 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
618 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
619 DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags,
620 REGS(0, ANY, NOPC, 0, ANY)),
621
622 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
623 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
624 /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */
625 /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */
626 /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */
627 /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */
628 /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */
629 /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */
630 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
631 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
632 DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
633 REGS(ANY, ANY, NOPC, 0, ANY)),
634
635 DECODE_END
636};
637
638static const union decode_item arm_cccc_001x_table[] = {
639 /* Data-processing (immediate) */
640
641 /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
642 /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
643 DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc,
644 REGS(0, NOPC, 0, 0, 0)),
645
646 /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
647 DECODE_OR (0x0fff00ff, 0x03200001),
648 /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
649 DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none),
650 /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
651 /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
652 /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
653 DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop),
654 /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */
655 /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
656 /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */
657 DECODE_REJECT (0x0fb00000, 0x03200000),
658
659 /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */
660 DECODE_REJECT (0x0e10f000, 0x0210f000),
661
662 /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */
663 /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */
664 /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */
665 /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */
666 DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags,
667 REGS(ANY, 0, 0, 0, 0)),
668
669 /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */
670 /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */
671 DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags,
672 REGS(0, ANY, 0, 0, 0)),
673
674 /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */
675 /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */
676 /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */
677 /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */
678 /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */
679 /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */
680 /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */
681 /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */
682 /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */
683 /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */
684 DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags,
685 REGS(ANY, ANY, 0, 0, 0)),
686
687 DECODE_END
688};
689
690static const union decode_item arm_cccc_0110_____xxx1_table[] = {
691 /* Media instructions */
692
693 /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */
694 DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc,
695 REGS(NOPC, NOPC, 0, 0, NOPC)),
696
697 /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */
698 /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */
699 DECODE_OR(0x0fa00030, 0x06a00010),
700 /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */
701 /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */
702 DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc,
703 REGS(0, NOPC, 0, 0, NOPC)),
704
705 /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
706 /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
707 /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
708 /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
709 DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc,
710 REGS(0, NOPC, 0, 0, NOPC)),
711
712 /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */
713 DECODE_REJECT (0x0fb00010, 0x06000010),
714 /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */
715 DECODE_REJECT (0x0f8000f0, 0x060000b0),
716 /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */
717 DECODE_REJECT (0x0f8000f0, 0x060000d0),
718 /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */
719 /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */
720 /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */
721 /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */
722 /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */
723 /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */
724 /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */
725 /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */
726 /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */
727 /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */
728 /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */
729 /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */
730 /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */
731 /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */
732 /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */
733 /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */
734 /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */
735 /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */
736 /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */
737 /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */
738 /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */
739 /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */
740 /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */
741 /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */
742 /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */
743 /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */
744 /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */
745 /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */
746 /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */
747 /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */
748 /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */
749 /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */
750 /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */
751 /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */
752 /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */
753 /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */
754 DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc,
755 REGS(NOPC, NOPC, 0, 0, NOPC)),
756
757 /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */
758 /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */
759 DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc,
760 REGS(NOPC, NOPC, 0, 0, NOPC)),
761
762 /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */
763 /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */
764 DECODE_REJECT (0x0fb000f0, 0x06900070),
765
766 /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */
767 /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */
768 /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */
769 /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */
770 /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */
771 /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */
772 DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc,
773 REGS(0, NOPC, 0, 0, NOPC)),
774
775 /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */
776 /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */
777 /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */
778 /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */
779 /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */
780 /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */
781 DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc,
782 REGS(NOPCX, NOPC, 0, 0, NOPC)),
783
784 DECODE_END
785};
786
787static const union decode_item arm_cccc_0111_____xxx1_table[] = {
788 /* Media instructions */
789
790 /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
791 DECODE_REJECT (0x0ff000f0, 0x07f000f0),
792
793 /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
794 /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
795 DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc,
796 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
797
798 /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */
799 /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */
800 DECODE_OR (0x0ff0f090, 0x0700f010),
801 /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */
802 DECODE_OR (0x0ff0f0d0, 0x0750f010),
803 /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */
804 DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc,
805 REGS(NOPC, 0, NOPC, 0, NOPC)),
806
807 /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */
808 /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */
809 DECODE_OR (0x0ff00090, 0x07000010),
810 /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */
811 DECODE_OR (0x0ff000d0, 0x07500010),
812 /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */
813 DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc,
814 REGS(NOPC, NOPCX, NOPC, 0, NOPC)),
815
816 /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */
817 DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc,
818 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
819
820 /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */
821 /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */
822 DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc,
823 REGS(0, NOPC, 0, 0, NOPC)),
824
825 /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */
826 DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc,
827 REGS(0, NOPC, 0, 0, 0)),
828
829 /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */
830 DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc,
831 REGS(0, NOPC, 0, 0, NOPCX)),
832
833 DECODE_END
834};
835
836static const union decode_item arm_cccc_01xx_table[] = {
837 /* Load/store word and unsigned byte */
838
839 /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */
840 DECODE_REJECT (0x0c40f000, 0x0440f000),
841
842 /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
843 /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
844 /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
845 /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
846 DECODE_REJECT (0x0d200000, 0x04200000),
847
848 /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */
849 /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */
850 DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str,
851 REGS(NOPCWB, ANY, 0, 0, 0)),
852
853 /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */
854 /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */
855 DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr,
856 REGS(NOPCWB, ANY, 0, 0, 0)),
857
858 /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */
859 /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */
860 DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str,
861 REGS(NOPCWB, ANY, 0, 0, NOPC)),
862
863 /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */
864 /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */
865 DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr,
866 REGS(NOPCWB, ANY, 0, 0, NOPC)),
867
868 DECODE_END
869};
870
871static const union decode_item arm_cccc_100x_table[] = {
872 /* Block data transfer instructions */
873
874 /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
875 /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
876 DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm),
877
878 /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
879 /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */
880 /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
881 DECODE_END
882};
883
884const union decode_item kprobe_decode_arm_table[] = {
885 /*
886 * Unconditional instructions
887 * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx
888 */
889 DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table),
890
891 /*
892 * Miscellaneous instructions
893 * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx
894 */
895 DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table),
896
897 /*
898 * Halfword multiply and multiply-accumulate
899 * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx
900 */
901 DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table),
902
903 /*
904 * Multiply and multiply-accumulate
905 * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx
906 */
907 DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table),
908
909 /*
910 * Synchronization primitives
911 * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx
912 */
913 DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table),
914
915 /*
916 * Extra load/store instructions
917 * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx
918 */
919 DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table),
920
921 /*
922 * Data-processing (register)
923 * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx
924 * Data-processing (register-shifted register)
925 * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx
926 */
927 DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table),
928
929 /*
930 * Data-processing (immediate)
931 * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
932 */
933 DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table),
934
935 /*
936 * Media instructions
937 * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx
938 */
939 DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table),
940 DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table),
941
942 /*
943 * Load/store word and unsigned byte
944 * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx
945 */
946 DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table),
947
948 /*
949 * Block data transfer instructions
950 * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx
951 */
952 DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table),
953
954 /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
955 /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
956 DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl),
957
958 /*
959 * Supervisor Call, and coprocessor instructions
960 */
961
962 /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */
963 /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */
964 /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
965 /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
966 /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
967 /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
968 /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
969 /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
970 DECODE_REJECT (0x0c000000, 0x0c000000),
971
972 DECODE_END
973};
974
975static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs)
976{
977 regs->ARM_pc += 4;
978 p->ainsn.insn_handler(p, regs);
979}
980
981/* Return:
982 * INSN_REJECTED If instruction is one not allowed to kprobe,
983 * INSN_GOOD If instruction is supported and uses instruction slot,
984 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
985 *
986 * For instructions we don't want to kprobe (INSN_REJECTED return result):
987 * These are generally ones that modify the processor state making
988 * them "hard" to simulate such as switches processor modes or
989 * make accesses in alternate modes. Any of these could be simulated
990 * if the work was put into it, but low return considering they
991 * should also be very rare.
992 */
993enum kprobe_insn __kprobes
994arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
995{
996 asi->insn_singlestep = arm_singlestep;
997 asi->insn_check_cc = kprobe_condition_checks[insn>>28];
998 return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false);
999}
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c
new file mode 100644
index 000000000000..a5394fb4e4e0
--- /dev/null
+++ b/arch/arm/kernel/kprobes-common.c
@@ -0,0 +1,577 @@
1/*
2 * arch/arm/kernel/kprobes-common.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
7 * Copyright (C) 2006, 2007 Motorola Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/kprobes.h>
16
17#include "kprobes.h"
18
19
20#ifndef find_str_pc_offset
21
22/*
23 * For STR and STM instructions, an ARM core may choose to use either
24 * a +8 or a +12 displacement from the current instruction's address.
25 * Whichever value is chosen for a given core, it must be the same for
26 * both instructions and may not change. This function measures it.
27 */
28
29int str_pc_offset;
30
31void __init find_str_pc_offset(void)
32{
33 int addr, scratch, ret;
34
35 __asm__ (
36 "sub %[ret], pc, #4 \n\t"
37 "str pc, %[addr] \n\t"
38 "ldr %[scr], %[addr] \n\t"
39 "sub %[ret], %[scr], %[ret] \n\t"
40 : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
41
42 str_pc_offset = ret;
43}
44
45#endif /* !find_str_pc_offset */
46
47
48#ifndef test_load_write_pc_interworking
49
50bool load_write_pc_interworks;
51
52void __init test_load_write_pc_interworking(void)
53{
54 int arch = cpu_architecture();
55 BUG_ON(arch == CPU_ARCH_UNKNOWN);
56 load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T;
57}
58
59#endif /* !test_load_write_pc_interworking */
60
61
62#ifndef test_alu_write_pc_interworking
63
64bool alu_write_pc_interworks;
65
66void __init test_alu_write_pc_interworking(void)
67{
68 int arch = cpu_architecture();
69 BUG_ON(arch == CPU_ARCH_UNKNOWN);
70 alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7;
71}
72
73#endif /* !test_alu_write_pc_interworking */
74
75
76void __init arm_kprobe_decode_init(void)
77{
78 find_str_pc_offset();
79 test_load_write_pc_interworking();
80 test_alu_write_pc_interworking();
81}
82
83
84static unsigned long __kprobes __check_eq(unsigned long cpsr)
85{
86 return cpsr & PSR_Z_BIT;
87}
88
89static unsigned long __kprobes __check_ne(unsigned long cpsr)
90{
91 return (~cpsr) & PSR_Z_BIT;
92}
93
94static unsigned long __kprobes __check_cs(unsigned long cpsr)
95{
96 return cpsr & PSR_C_BIT;
97}
98
99static unsigned long __kprobes __check_cc(unsigned long cpsr)
100{
101 return (~cpsr) & PSR_C_BIT;
102}
103
104static unsigned long __kprobes __check_mi(unsigned long cpsr)
105{
106 return cpsr & PSR_N_BIT;
107}
108
109static unsigned long __kprobes __check_pl(unsigned long cpsr)
110{
111 return (~cpsr) & PSR_N_BIT;
112}
113
114static unsigned long __kprobes __check_vs(unsigned long cpsr)
115{
116 return cpsr & PSR_V_BIT;
117}
118
119static unsigned long __kprobes __check_vc(unsigned long cpsr)
120{
121 return (~cpsr) & PSR_V_BIT;
122}
123
124static unsigned long __kprobes __check_hi(unsigned long cpsr)
125{
126 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
127 return cpsr & PSR_C_BIT;
128}
129
130static unsigned long __kprobes __check_ls(unsigned long cpsr)
131{
132 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
133 return (~cpsr) & PSR_C_BIT;
134}
135
136static unsigned long __kprobes __check_ge(unsigned long cpsr)
137{
138 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
139 return (~cpsr) & PSR_N_BIT;
140}
141
142static unsigned long __kprobes __check_lt(unsigned long cpsr)
143{
144 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
145 return cpsr & PSR_N_BIT;
146}
147
148static unsigned long __kprobes __check_gt(unsigned long cpsr)
149{
150 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
151 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
152 return (~temp) & PSR_N_BIT;
153}
154
155static unsigned long __kprobes __check_le(unsigned long cpsr)
156{
157 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
158 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
159 return temp & PSR_N_BIT;
160}
161
162static unsigned long __kprobes __check_al(unsigned long cpsr)
163{
164 return true;
165}
166
167kprobe_check_cc * const kprobe_condition_checks[16] = {
168 &__check_eq, &__check_ne, &__check_cs, &__check_cc,
169 &__check_mi, &__check_pl, &__check_vs, &__check_vc,
170 &__check_hi, &__check_ls, &__check_ge, &__check_lt,
171 &__check_gt, &__check_le, &__check_al, &__check_al
172};
173
174
175void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs)
176{
177}
178
179void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs)
180{
181 p->ainsn.insn_fn();
182}
183
184static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
185{
186 kprobe_opcode_t insn = p->opcode;
187 int rn = (insn >> 16) & 0xf;
188 int lbit = insn & (1 << 20);
189 int wbit = insn & (1 << 21);
190 int ubit = insn & (1 << 23);
191 int pbit = insn & (1 << 24);
192 long *addr = (long *)regs->uregs[rn];
193 int reg_bit_vector;
194 int reg_count;
195
196 reg_count = 0;
197 reg_bit_vector = insn & 0xffff;
198 while (reg_bit_vector) {
199 reg_bit_vector &= (reg_bit_vector - 1);
200 ++reg_count;
201 }
202
203 if (!ubit)
204 addr -= reg_count;
205 addr += (!pbit == !ubit);
206
207 reg_bit_vector = insn & 0xffff;
208 while (reg_bit_vector) {
209 int reg = __ffs(reg_bit_vector);
210 reg_bit_vector &= (reg_bit_vector - 1);
211 if (lbit)
212 regs->uregs[reg] = *addr++;
213 else
214 *addr++ = regs->uregs[reg];
215 }
216
217 if (wbit) {
218 if (!ubit)
219 addr -= reg_count;
220 addr -= (!pbit == !ubit);
221 regs->uregs[rn] = (long)addr;
222 }
223}
224
225static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
226{
227 regs->ARM_pc = (long)p->addr + str_pc_offset;
228 simulate_ldm1stm1(p, regs);
229 regs->ARM_pc = (long)p->addr + 4;
230}
231
232static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs)
233{
234 simulate_ldm1stm1(p, regs);
235 load_write_pc(regs->ARM_pc, regs);
236}
237
238static void __kprobes
239emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs)
240{
241 register void *rregs asm("r1") = regs;
242 register void *rfn asm("lr") = p->ainsn.insn_fn;
243
244 __asm__ __volatile__ (
245 "stmdb sp!, {%[regs], r11} \n\t"
246 "ldmia %[regs], {r0-r12} \n\t"
247#if __LINUX_ARM_ARCH__ >= 6
248 "blx %[fn] \n\t"
249#else
250 "str %[fn], [sp, #-4]! \n\t"
251 "adr lr, 1f \n\t"
252 "ldr pc, [sp], #4 \n\t"
253 "1: \n\t"
254#endif
255 "ldr lr, [sp], #4 \n\t" /* lr = regs */
256 "stmia lr, {r0-r12} \n\t"
257 "ldr r11, [sp], #4 \n\t"
258 : [regs] "=r" (rregs), [fn] "=r" (rfn)
259 : "0" (rregs), "1" (rfn)
260 : "r0", "r2", "r3", "r4", "r5", "r6", "r7",
261 "r8", "r9", "r10", "r12", "memory", "cc"
262 );
263}
264
265static void __kprobes
266emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs)
267{
268 emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2));
269}
270
271static void __kprobes
272emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs)
273{
274 emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3));
275 load_write_pc(regs->ARM_pc, regs);
276}
277
278enum kprobe_insn __kprobes
279kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
280{
281 kprobe_insn_handler_t *handler = 0;
282 unsigned reglist = insn & 0xffff;
283 int is_ldm = insn & 0x100000;
284 int rn = (insn >> 16) & 0xf;
285
286 if (rn <= 12 && (reglist & 0xe000) == 0) {
287 /* Instruction only uses registers in the range R0..R12 */
288 handler = emulate_generic_r0_12_noflags;
289
290 } else if (rn >= 2 && (reglist & 0x8003) == 0) {
291 /* Instruction only uses registers in the range R2..R14 */
292 rn -= 2;
293 reglist >>= 2;
294 handler = emulate_generic_r2_14_noflags;
295
296 } else if (rn >= 3 && (reglist & 0x0007) == 0) {
297 /* Instruction only uses registers in the range R3..R15 */
298 if (is_ldm && (reglist & 0x8000)) {
299 rn -= 3;
300 reglist >>= 3;
301 handler = emulate_ldm_r3_15;
302 }
303 }
304
305 if (handler) {
306 /* We can emulate the instruction in (possibly) modified form */
307 asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
308 asi->insn_handler = handler;
309 return INSN_GOOD;
310 }
311
312 /* Fallback to slower simulation... */
313 if (reglist & 0x8000)
314 handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
315 else
316 handler = simulate_ldm1stm1;
317 asi->insn_handler = handler;
318 return INSN_GOOD_NO_SLOT;
319}
320
321
322/*
323 * Prepare an instruction slot to receive an instruction for emulating.
324 * This is done by placing a subroutine return after the location where the
325 * instruction will be placed. We also modify ARM instructions to be
326 * unconditional as the condition code will already be checked before any
327 * emulation handler is called.
328 */
329static kprobe_opcode_t __kprobes
330prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
331 bool thumb)
332{
333#ifdef CONFIG_THUMB2_KERNEL
334 if (thumb) {
335 u16 *thumb_insn = (u16 *)asi->insn;
336 thumb_insn[1] = 0x4770; /* Thumb bx lr */
337 thumb_insn[2] = 0x4770; /* Thumb bx lr */
338 return insn;
339 }
340 asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
341#else
342 asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
343#endif
344 /* Make an ARM instruction unconditional */
345 if (insn < 0xe0000000)
346 insn = (insn | 0xe0000000) & ~0x10000000;
347 return insn;
348}
349
350/*
351 * Write a (probably modified) instruction into the slot previously prepared by
352 * prepare_emulated_insn
353 */
354static void __kprobes
355set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
356 bool thumb)
357{
358#ifdef CONFIG_THUMB2_KERNEL
359 if (thumb) {
360 u16 *ip = (u16 *)asi->insn;
361 if (is_wide_instruction(insn))
362 *ip++ = insn >> 16;
363 *ip++ = insn;
364 return;
365 }
366#endif
367 asi->insn[0] = insn;
368}
369
370/*
371 * When we modify the register numbers encoded in an instruction to be emulated,
372 * the new values come from this define. For ARM and 32-bit Thumb instructions
373 * this gives...
374 *
375 * bit position 16 12 8 4 0
376 * ---------------+---+---+---+---+---+
377 * register r2 r0 r1 -- r3
378 */
379#define INSN_NEW_BITS 0x00020103
380
381/* Each nibble has same value as that at INSN_NEW_BITS bit 16 */
382#define INSN_SAMEAS16_BITS 0x22222222
383
384/*
385 * Validate and modify each of the registers encoded in an instruction.
386 *
387 * Each nibble in regs contains a value from enum decode_reg_type. For each
388 * non-zero value, the corresponding nibble in pinsn is validated and modified
389 * according to the type.
390 */
391static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs)
392{
393 kprobe_opcode_t insn = *pinsn;
394 kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */
395
396 for (; regs != 0; regs >>= 4, mask <<= 4) {
397
398 kprobe_opcode_t new_bits = INSN_NEW_BITS;
399
400 switch (regs & 0xf) {
401
402 case REG_TYPE_NONE:
403 /* Nibble not a register, skip to next */
404 continue;
405
406 case REG_TYPE_ANY:
407 /* Any register is allowed */
408 break;
409
410 case REG_TYPE_SAMEAS16:
411 /* Replace register with same as at bit position 16 */
412 new_bits = INSN_SAMEAS16_BITS;
413 break;
414
415 case REG_TYPE_SP:
416 /* Only allow SP (R13) */
417 if ((insn ^ 0xdddddddd) & mask)
418 goto reject;
419 break;
420
421 case REG_TYPE_PC:
422 /* Only allow PC (R15) */
423 if ((insn ^ 0xffffffff) & mask)
424 goto reject;
425 break;
426
427 case REG_TYPE_NOSP:
428 /* Reject SP (R13) */
429 if (((insn ^ 0xdddddddd) & mask) == 0)
430 goto reject;
431 break;
432
433 case REG_TYPE_NOSPPC:
434 case REG_TYPE_NOSPPCX:
435 /* Reject SP and PC (R13 and R15) */
436 if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0)
437 goto reject;
438 break;
439
440 case REG_TYPE_NOPCWB:
441 if (!is_writeback(insn))
442 break; /* No writeback, so any register is OK */
443 /* fall through... */
444 case REG_TYPE_NOPC:
445 case REG_TYPE_NOPCX:
446 /* Reject PC (R15) */
447 if (((insn ^ 0xffffffff) & mask) == 0)
448 goto reject;
449 break;
450 }
451
452 /* Replace value of nibble with new register number... */
453 insn &= ~mask;
454 insn |= new_bits & mask;
455 }
456
457 *pinsn = insn;
458 return true;
459
460reject:
461 return false;
462}
463
464static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
465 [DECODE_TYPE_TABLE] = sizeof(struct decode_table),
466 [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
467 [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
468 [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
469 [DECODE_TYPE_OR] = sizeof(struct decode_or),
470 [DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
471};
472
473/*
474 * kprobe_decode_insn operates on data tables in order to decode an ARM
475 * architecture instruction onto which a kprobe has been placed.
476 *
477 * These instruction decoding tables are a concatenation of entries each
478 * of which consist of one of the following structs:
479 *
480 * decode_table
481 * decode_custom
482 * decode_simulate
483 * decode_emulate
484 * decode_or
485 * decode_reject
486 *
487 * Each of these starts with a struct decode_header which has the following
488 * fields:
489 *
490 * type_regs
491 * mask
492 * value
493 *
494 * The least significant DECODE_TYPE_BITS of type_regs contains a value
495 * from enum decode_type, this indicates which of the decode_* structs
496 * the entry contains. The value DECODE_TYPE_END indicates the end of the
497 * table.
498 *
499 * When the table is parsed, each entry is checked in turn to see if it
500 * matches the instruction to be decoded using the test:
501 *
502 * (insn & mask) == value
503 *
504 * If no match is found before the end of the table is reached then decoding
505 * fails with INSN_REJECTED.
506 *
507 * When a match is found, decode_regs() is called to validate and modify each
508 * of the registers encoded in the instruction; the data it uses to do this
509 * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding
510 * to fail with INSN_REJECTED.
511 *
512 * Once the instruction has passed the above tests, further processing
513 * depends on the type of the table entry's decode struct.
514 *
515 */
516int __kprobes
517kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
518 const union decode_item *table, bool thumb)
519{
520 const struct decode_header *h = (struct decode_header *)table;
521 const struct decode_header *next;
522 bool matched = false;
523
524 insn = prepare_emulated_insn(insn, asi, thumb);
525
526 for (;; h = next) {
527 enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
528 u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
529
530 if (type == DECODE_TYPE_END)
531 return INSN_REJECTED;
532
533 next = (struct decode_header *)
534 ((uintptr_t)h + decode_struct_sizes[type]);
535
536 if (!matched && (insn & h->mask.bits) != h->value.bits)
537 continue;
538
539 if (!decode_regs(&insn, regs))
540 return INSN_REJECTED;
541
542 switch (type) {
543
544 case DECODE_TYPE_TABLE: {
545 struct decode_table *d = (struct decode_table *)h;
546 next = (struct decode_header *)d->table.table;
547 break;
548 }
549
550 case DECODE_TYPE_CUSTOM: {
551 struct decode_custom *d = (struct decode_custom *)h;
552 return (*d->decoder.decoder)(insn, asi);
553 }
554
555 case DECODE_TYPE_SIMULATE: {
556 struct decode_simulate *d = (struct decode_simulate *)h;
557 asi->insn_handler = d->handler.handler;
558 return INSN_GOOD_NO_SLOT;
559 }
560
561 case DECODE_TYPE_EMULATE: {
562 struct decode_emulate *d = (struct decode_emulate *)h;
563 asi->insn_handler = d->handler.handler;
564 set_emulated_insn(insn, asi, thumb);
565 return INSN_GOOD;
566 }
567
568 case DECODE_TYPE_OR:
569 matched = true;
570 break;
571
572 case DECODE_TYPE_REJECT:
573 default:
574 return INSN_REJECTED;
575 }
576 }
577 }
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
deleted file mode 100644
index 15eeff6aea0e..000000000000
--- a/arch/arm/kernel/kprobes-decode.c
+++ /dev/null
@@ -1,1670 +0,0 @@
1/*
2 * arch/arm/kernel/kprobes-decode.c
3 *
4 * Copyright (C) 2006, 2007 Motorola Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 */
15
16/*
17 * We do not have hardware single-stepping on ARM, This
18 * effort is further complicated by the ARM not having a
19 * "next PC" register. Instructions that change the PC
20 * can't be safely single-stepped in a MP environment, so
21 * we have a lot of work to do:
22 *
23 * In the prepare phase:
24 * *) If it is an instruction that does anything
25 * with the CPU mode, we reject it for a kprobe.
26 * (This is out of laziness rather than need. The
27 * instructions could be simulated.)
28 *
29 * *) Otherwise, decode the instruction rewriting its
30 * registers to take fixed, ordered registers and
31 * setting a handler for it to run the instruction.
32 *
33 * In the execution phase by an instruction's handler:
34 *
35 * *) If the PC is written to by the instruction, the
36 * instruction must be fully simulated in software.
37 *
38 * *) Otherwise, a modified form of the instruction is
39 * directly executed. Its handler calls the
40 * instruction in insn[0]. In insn[1] is a
41 * "mov pc, lr" to return.
42 *
43 * Before calling, load up the reordered registers
44 * from the original instruction's registers. If one
45 * of the original input registers is the PC, compute
46 * and adjust the appropriate input register.
47 *
48 * After call completes, copy the output registers to
49 * the original instruction's original registers.
50 *
51 * We don't use a real breakpoint instruction since that
52 * would have us in the kernel go from SVC mode to SVC
53 * mode losing the link register. Instead we use an
54 * undefined instruction. To simplify processing, the
55 * undefined instruction used for kprobes must be reserved
56 * exclusively for kprobes use.
57 *
58 * TODO: ifdef out some instruction decoding based on architecture.
59 */
60
61#include <linux/kernel.h>
62#include <linux/kprobes.h>
63
64#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
65
66#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
67
68#define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos))
69
70/*
71 * Test if load/store instructions writeback the address register.
72 * if P (bit 24) == 0 or W (bit 21) == 1
73 */
74#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
75
76#define PSR_fs (PSR_f|PSR_s)
77
78#define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */
79
80typedef long (insn_0arg_fn_t)(void);
81typedef long (insn_1arg_fn_t)(long);
82typedef long (insn_2arg_fn_t)(long, long);
83typedef long (insn_3arg_fn_t)(long, long, long);
84typedef long (insn_4arg_fn_t)(long, long, long, long);
85typedef long long (insn_llret_0arg_fn_t)(void);
86typedef long long (insn_llret_3arg_fn_t)(long, long, long);
87typedef long long (insn_llret_4arg_fn_t)(long, long, long, long);
88
89union reg_pair {
90 long long dr;
91#ifdef __LITTLE_ENDIAN
92 struct { long r0, r1; };
93#else
94 struct { long r1, r0; };
95#endif
96};
97
98/*
99 * For STR and STM instructions, an ARM core may choose to use either
100 * a +8 or a +12 displacement from the current instruction's address.
101 * Whichever value is chosen for a given core, it must be the same for
102 * both instructions and may not change. This function measures it.
103 */
104
105static int str_pc_offset;
106
107static void __init find_str_pc_offset(void)
108{
109 int addr, scratch, ret;
110
111 __asm__ (
112 "sub %[ret], pc, #4 \n\t"
113 "str pc, %[addr] \n\t"
114 "ldr %[scr], %[addr] \n\t"
115 "sub %[ret], %[scr], %[ret] \n\t"
116 : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr));
117
118 str_pc_offset = ret;
119}
120
121/*
122 * The insnslot_?arg_r[w]flags() functions below are to keep the
123 * msr -> *fn -> mrs instruction sequences indivisible so that
124 * the state of the CPSR flags aren't inadvertently modified
125 * just before or just after the call.
126 */
127
128static inline long __kprobes
129insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn)
130{
131 register long ret asm("r0");
132
133 __asm__ __volatile__ (
134 "msr cpsr_fs, %[cpsr] \n\t"
135 "mov lr, pc \n\t"
136 "mov pc, %[fn] \n\t"
137 : "=r" (ret)
138 : [cpsr] "r" (cpsr), [fn] "r" (fn)
139 : "lr", "cc"
140 );
141 return ret;
142}
143
144static inline long long __kprobes
145insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn)
146{
147 register long ret0 asm("r0");
148 register long ret1 asm("r1");
149 union reg_pair fnr;
150
151 __asm__ __volatile__ (
152 "msr cpsr_fs, %[cpsr] \n\t"
153 "mov lr, pc \n\t"
154 "mov pc, %[fn] \n\t"
155 : "=r" (ret0), "=r" (ret1)
156 : [cpsr] "r" (cpsr), [fn] "r" (fn)
157 : "lr", "cc"
158 );
159 fnr.r0 = ret0;
160 fnr.r1 = ret1;
161 return fnr.dr;
162}
163
164static inline long __kprobes
165insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn)
166{
167 register long rr0 asm("r0") = r0;
168 register long ret asm("r0");
169
170 __asm__ __volatile__ (
171 "msr cpsr_fs, %[cpsr] \n\t"
172 "mov lr, pc \n\t"
173 "mov pc, %[fn] \n\t"
174 : "=r" (ret)
175 : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn)
176 : "lr", "cc"
177 );
178 return ret;
179}
180
181static inline long __kprobes
182insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn)
183{
184 register long rr0 asm("r0") = r0;
185 register long rr1 asm("r1") = r1;
186 register long ret asm("r0");
187
188 __asm__ __volatile__ (
189 "msr cpsr_fs, %[cpsr] \n\t"
190 "mov lr, pc \n\t"
191 "mov pc, %[fn] \n\t"
192 : "=r" (ret)
193 : "0" (rr0), "r" (rr1),
194 [cpsr] "r" (cpsr), [fn] "r" (fn)
195 : "lr", "cc"
196 );
197 return ret;
198}
199
200static inline long __kprobes
201insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn)
202{
203 register long rr0 asm("r0") = r0;
204 register long rr1 asm("r1") = r1;
205 register long rr2 asm("r2") = r2;
206 register long ret asm("r0");
207
208 __asm__ __volatile__ (
209 "msr cpsr_fs, %[cpsr] \n\t"
210 "mov lr, pc \n\t"
211 "mov pc, %[fn] \n\t"
212 : "=r" (ret)
213 : "0" (rr0), "r" (rr1), "r" (rr2),
214 [cpsr] "r" (cpsr), [fn] "r" (fn)
215 : "lr", "cc"
216 );
217 return ret;
218}
219
220static inline long long __kprobes
221insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr,
222 insn_llret_3arg_fn_t *fn)
223{
224 register long rr0 asm("r0") = r0;
225 register long rr1 asm("r1") = r1;
226 register long rr2 asm("r2") = r2;
227 register long ret0 asm("r0");
228 register long ret1 asm("r1");
229 union reg_pair fnr;
230
231 __asm__ __volatile__ (
232 "msr cpsr_fs, %[cpsr] \n\t"
233 "mov lr, pc \n\t"
234 "mov pc, %[fn] \n\t"
235 : "=r" (ret0), "=r" (ret1)
236 : "0" (rr0), "r" (rr1), "r" (rr2),
237 [cpsr] "r" (cpsr), [fn] "r" (fn)
238 : "lr", "cc"
239 );
240 fnr.r0 = ret0;
241 fnr.r1 = ret1;
242 return fnr.dr;
243}
244
245static inline long __kprobes
246insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr,
247 insn_4arg_fn_t *fn)
248{
249 register long rr0 asm("r0") = r0;
250 register long rr1 asm("r1") = r1;
251 register long rr2 asm("r2") = r2;
252 register long rr3 asm("r3") = r3;
253 register long ret asm("r0");
254
255 __asm__ __volatile__ (
256 "msr cpsr_fs, %[cpsr] \n\t"
257 "mov lr, pc \n\t"
258 "mov pc, %[fn] \n\t"
259 : "=r" (ret)
260 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
261 [cpsr] "r" (cpsr), [fn] "r" (fn)
262 : "lr", "cc"
263 );
264 return ret;
265}
266
267static inline long __kprobes
268insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn)
269{
270 register long rr0 asm("r0") = r0;
271 register long ret asm("r0");
272 long oldcpsr = *cpsr;
273 long newcpsr;
274
275 __asm__ __volatile__ (
276 "msr cpsr_fs, %[oldcpsr] \n\t"
277 "mov lr, pc \n\t"
278 "mov pc, %[fn] \n\t"
279 "mrs %[newcpsr], cpsr \n\t"
280 : "=r" (ret), [newcpsr] "=r" (newcpsr)
281 : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
282 : "lr", "cc"
283 );
284 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
285 return ret;
286}
287
288static inline long __kprobes
289insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn)
290{
291 register long rr0 asm("r0") = r0;
292 register long rr1 asm("r1") = r1;
293 register long ret asm("r0");
294 long oldcpsr = *cpsr;
295 long newcpsr;
296
297 __asm__ __volatile__ (
298 "msr cpsr_fs, %[oldcpsr] \n\t"
299 "mov lr, pc \n\t"
300 "mov pc, %[fn] \n\t"
301 "mrs %[newcpsr], cpsr \n\t"
302 : "=r" (ret), [newcpsr] "=r" (newcpsr)
303 : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
304 : "lr", "cc"
305 );
306 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
307 return ret;
308}
309
310static inline long __kprobes
311insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr,
312 insn_3arg_fn_t *fn)
313{
314 register long rr0 asm("r0") = r0;
315 register long rr1 asm("r1") = r1;
316 register long rr2 asm("r2") = r2;
317 register long ret asm("r0");
318 long oldcpsr = *cpsr;
319 long newcpsr;
320
321 __asm__ __volatile__ (
322 "msr cpsr_fs, %[oldcpsr] \n\t"
323 "mov lr, pc \n\t"
324 "mov pc, %[fn] \n\t"
325 "mrs %[newcpsr], cpsr \n\t"
326 : "=r" (ret), [newcpsr] "=r" (newcpsr)
327 : "0" (rr0), "r" (rr1), "r" (rr2),
328 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
329 : "lr", "cc"
330 );
331 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
332 return ret;
333}
334
335static inline long __kprobes
336insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
337 insn_4arg_fn_t *fn)
338{
339 register long rr0 asm("r0") = r0;
340 register long rr1 asm("r1") = r1;
341 register long rr2 asm("r2") = r2;
342 register long rr3 asm("r3") = r3;
343 register long ret asm("r0");
344 long oldcpsr = *cpsr;
345 long newcpsr;
346
347 __asm__ __volatile__ (
348 "msr cpsr_fs, %[oldcpsr] \n\t"
349 "mov lr, pc \n\t"
350 "mov pc, %[fn] \n\t"
351 "mrs %[newcpsr], cpsr \n\t"
352 : "=r" (ret), [newcpsr] "=r" (newcpsr)
353 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
354 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
355 : "lr", "cc"
356 );
357 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
358 return ret;
359}
360
361static inline long long __kprobes
362insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr,
363 insn_llret_4arg_fn_t *fn)
364{
365 register long rr0 asm("r0") = r0;
366 register long rr1 asm("r1") = r1;
367 register long rr2 asm("r2") = r2;
368 register long rr3 asm("r3") = r3;
369 register long ret0 asm("r0");
370 register long ret1 asm("r1");
371 long oldcpsr = *cpsr;
372 long newcpsr;
373 union reg_pair fnr;
374
375 __asm__ __volatile__ (
376 "msr cpsr_fs, %[oldcpsr] \n\t"
377 "mov lr, pc \n\t"
378 "mov pc, %[fn] \n\t"
379 "mrs %[newcpsr], cpsr \n\t"
380 : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr)
381 : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3),
382 [oldcpsr] "r" (oldcpsr), [fn] "r" (fn)
383 : "lr", "cc"
384 );
385 *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs);
386 fnr.r0 = ret0;
387 fnr.r1 = ret1;
388 return fnr.dr;
389}
390
391/*
392 * To avoid the complications of mimicing single-stepping on a
393 * processor without a Next-PC or a single-step mode, and to
394 * avoid having to deal with the side-effects of boosting, we
395 * simulate or emulate (almost) all ARM instructions.
396 *
397 * "Simulation" is where the instruction's behavior is duplicated in
398 * C code. "Emulation" is where the original instruction is rewritten
399 * and executed, often by altering its registers.
400 *
401 * By having all behavior of the kprobe'd instruction completed before
402 * returning from the kprobe_handler(), all locks (scheduler and
403 * interrupt) can safely be released. There is no need for secondary
404 * breakpoints, no race with MP or preemptable kernels, nor having to
405 * clean up resources counts at a later time impacting overall system
406 * performance. By rewriting the instruction, only the minimum registers
407 * need to be loaded and saved back optimizing performance.
408 *
409 * Calling the insnslot_*_rwflags version of a function doesn't hurt
410 * anything even when the CPSR flags aren't updated by the
411 * instruction. It's just a little slower in return for saving
412 * a little space by not having a duplicate function that doesn't
413 * update the flags. (The same optimization can be said for
414 * instructions that do or don't perform register writeback)
415 * Also, instructions can either read the flags, only write the
416 * flags, or read and write the flags. To save combinations
417 * rather than for sheer performance, flag functions just assume
418 * read and write of flags.
419 */
420
421static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs)
422{
423 kprobe_opcode_t insn = p->opcode;
424 long iaddr = (long)p->addr;
425 int disp = branch_displacement(insn);
426
427 if (insn & (1 << 24))
428 regs->ARM_lr = iaddr + 4;
429
430 regs->ARM_pc = iaddr + 8 + disp;
431}
432
433static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs)
434{
435 kprobe_opcode_t insn = p->opcode;
436 long iaddr = (long)p->addr;
437 int disp = branch_displacement(insn);
438
439 regs->ARM_lr = iaddr + 4;
440 regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2);
441 regs->ARM_cpsr |= PSR_T_BIT;
442}
443
444static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs)
445{
446 kprobe_opcode_t insn = p->opcode;
447 int rm = insn & 0xf;
448 long rmv = regs->uregs[rm];
449
450 if (insn & (1 << 5))
451 regs->ARM_lr = (long)p->addr + 4;
452
453 regs->ARM_pc = rmv & ~0x1;
454 regs->ARM_cpsr &= ~PSR_T_BIT;
455 if (rmv & 0x1)
456 regs->ARM_cpsr |= PSR_T_BIT;
457}
458
459static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs)
460{
461 kprobe_opcode_t insn = p->opcode;
462 int rd = (insn >> 12) & 0xf;
463 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
464 regs->uregs[rd] = regs->ARM_cpsr & mask;
465}
466
467static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs)
468{
469 kprobe_opcode_t insn = p->opcode;
470 int rn = (insn >> 16) & 0xf;
471 int lbit = insn & (1 << 20);
472 int wbit = insn & (1 << 21);
473 int ubit = insn & (1 << 23);
474 int pbit = insn & (1 << 24);
475 long *addr = (long *)regs->uregs[rn];
476 int reg_bit_vector;
477 int reg_count;
478
479 reg_count = 0;
480 reg_bit_vector = insn & 0xffff;
481 while (reg_bit_vector) {
482 reg_bit_vector &= (reg_bit_vector - 1);
483 ++reg_count;
484 }
485
486 if (!ubit)
487 addr -= reg_count;
488 addr += (!pbit == !ubit);
489
490 reg_bit_vector = insn & 0xffff;
491 while (reg_bit_vector) {
492 int reg = __ffs(reg_bit_vector);
493 reg_bit_vector &= (reg_bit_vector - 1);
494 if (lbit)
495 regs->uregs[reg] = *addr++;
496 else
497 *addr++ = regs->uregs[reg];
498 }
499
500 if (wbit) {
501 if (!ubit)
502 addr -= reg_count;
503 addr -= (!pbit == !ubit);
504 regs->uregs[rn] = (long)addr;
505 }
506}
507
508static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs)
509{
510 regs->ARM_pc = (long)p->addr + str_pc_offset;
511 simulate_ldm1stm1(p, regs);
512 regs->ARM_pc = (long)p->addr + 4;
513}
514
515static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs)
516{
517 regs->uregs[12] = regs->uregs[13];
518}
519
520static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs)
521{
522 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
523 kprobe_opcode_t insn = p->opcode;
524 long ppc = (long)p->addr + 8;
525 int rd = (insn >> 12) & 0xf;
526 int rn = (insn >> 16) & 0xf;
527 int rm = insn & 0xf; /* rm may be invalid, don't care. */
528 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
529 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
530
531 /* Not following the C calling convention here, so need asm(). */
532 __asm__ __volatile__ (
533 "ldr r0, %[rn] \n\t"
534 "ldr r1, %[rm] \n\t"
535 "msr cpsr_fs, %[cpsr]\n\t"
536 "mov lr, pc \n\t"
537 "mov pc, %[i_fn] \n\t"
538 "str r0, %[rn] \n\t" /* in case of writeback */
539 "str r2, %[rd0] \n\t"
540 "str r3, %[rd1] \n\t"
541 : [rn] "+m" (rnv),
542 [rd0] "=m" (regs->uregs[rd]),
543 [rd1] "=m" (regs->uregs[rd+1])
544 : [rm] "m" (rmv),
545 [cpsr] "r" (regs->ARM_cpsr),
546 [i_fn] "r" (i_fn)
547 : "r0", "r1", "r2", "r3", "lr", "cc"
548 );
549 if (is_writeback(insn))
550 regs->uregs[rn] = rnv;
551}
552
553static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs)
554{
555 insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0];
556 kprobe_opcode_t insn = p->opcode;
557 long ppc = (long)p->addr + 8;
558 int rd = (insn >> 12) & 0xf;
559 int rn = (insn >> 16) & 0xf;
560 int rm = insn & 0xf;
561 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
562 /* rm/rmv may be invalid, don't care. */
563 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
564 long rnv_wb;
565
566 rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd],
567 regs->uregs[rd+1],
568 regs->ARM_cpsr, i_fn);
569 if (is_writeback(insn))
570 regs->uregs[rn] = rnv_wb;
571}
572
573static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs)
574{
575 insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0];
576 kprobe_opcode_t insn = p->opcode;
577 long ppc = (long)p->addr + 8;
578 union reg_pair fnr;
579 int rd = (insn >> 12) & 0xf;
580 int rn = (insn >> 16) & 0xf;
581 int rm = insn & 0xf;
582 long rdv;
583 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
584 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
585 long cpsr = regs->ARM_cpsr;
586
587 fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn);
588 if (rn != 15)
589 regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */
590 rdv = fnr.r1;
591
592 if (rd == 15) {
593#if __LINUX_ARM_ARCH__ >= 5
594 cpsr &= ~PSR_T_BIT;
595 if (rdv & 0x1)
596 cpsr |= PSR_T_BIT;
597 regs->ARM_cpsr = cpsr;
598 rdv &= ~0x1;
599#else
600 rdv &= ~0x2;
601#endif
602 }
603 regs->uregs[rd] = rdv;
604}
605
606static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs)
607{
608 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
609 kprobe_opcode_t insn = p->opcode;
610 long iaddr = (long)p->addr;
611 int rd = (insn >> 12) & 0xf;
612 int rn = (insn >> 16) & 0xf;
613 int rm = insn & 0xf;
614 long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd];
615 long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn];
616 long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */
617 long rnv_wb;
618
619 rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn);
620 if (rn != 15)
621 regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */
622}
623
624static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs)
625{
626 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
627 kprobe_opcode_t insn = p->opcode;
628 int rd = (insn >> 12) & 0xf;
629 int rm = insn & 0xf;
630 long rmv = regs->uregs[rm];
631
632 /* Writes Q flag */
633 regs->uregs[rd] = insnslot_1arg_rwflags(rmv, &regs->ARM_cpsr, i_fn);
634}
635
636static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs)
637{
638 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
639 kprobe_opcode_t insn = p->opcode;
640 int rd = (insn >> 12) & 0xf;
641 int rn = (insn >> 16) & 0xf;
642 int rm = insn & 0xf;
643 long rnv = regs->uregs[rn];
644 long rmv = regs->uregs[rm];
645
646 /* Reads GE bits */
647 regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn);
648}
649
650static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs)
651{
652 insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0];
653
654 insnslot_0arg_rflags(regs->ARM_cpsr, i_fn);
655}
656
657static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs)
658{
659}
660
661static void __kprobes
662emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs)
663{
664 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
665 kprobe_opcode_t insn = p->opcode;
666 int rd = (insn >> 12) & 0xf;
667 long rdv = regs->uregs[rd];
668
669 regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn);
670}
671
672static void __kprobes
673emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs)
674{
675 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
676 kprobe_opcode_t insn = p->opcode;
677 int rd = (insn >> 12) & 0xf;
678 int rn = insn & 0xf;
679 long rdv = regs->uregs[rd];
680 long rnv = regs->uregs[rn];
681
682 regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn);
683}
684
685static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs)
686{
687 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
688 kprobe_opcode_t insn = p->opcode;
689 int rd = (insn >> 12) & 0xf;
690 int rm = insn & 0xf;
691 long rmv = regs->uregs[rm];
692
693 regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn);
694}
695
696static void __kprobes
697emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
698{
699 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
700 kprobe_opcode_t insn = p->opcode;
701 int rd = (insn >> 12) & 0xf;
702 int rn = (insn >> 16) & 0xf;
703 int rm = insn & 0xf;
704 long rnv = regs->uregs[rn];
705 long rmv = regs->uregs[rm];
706
707 regs->uregs[rd] =
708 insnslot_2arg_rwflags(rnv, rmv, &regs->ARM_cpsr, i_fn);
709}
710
711static void __kprobes
712emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
713{
714 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
715 kprobe_opcode_t insn = p->opcode;
716 int rd = (insn >> 16) & 0xf;
717 int rn = (insn >> 12) & 0xf;
718 int rs = (insn >> 8) & 0xf;
719 int rm = insn & 0xf;
720 long rnv = regs->uregs[rn];
721 long rsv = regs->uregs[rs];
722 long rmv = regs->uregs[rm];
723
724 regs->uregs[rd] =
725 insnslot_3arg_rwflags(rnv, rsv, rmv, &regs->ARM_cpsr, i_fn);
726}
727
728static void __kprobes
729emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
730{
731 insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0];
732 kprobe_opcode_t insn = p->opcode;
733 int rd = (insn >> 16) & 0xf;
734 int rs = (insn >> 8) & 0xf;
735 int rm = insn & 0xf;
736 long rsv = regs->uregs[rs];
737 long rmv = regs->uregs[rm];
738
739 regs->uregs[rd] =
740 insnslot_2arg_rwflags(rsv, rmv, &regs->ARM_cpsr, i_fn);
741}
742
743static void __kprobes
744emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
745{
746 insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0];
747 kprobe_opcode_t insn = p->opcode;
748 union reg_pair fnr;
749 int rdhi = (insn >> 16) & 0xf;
750 int rdlo = (insn >> 12) & 0xf;
751 int rs = (insn >> 8) & 0xf;
752 int rm = insn & 0xf;
753 long rsv = regs->uregs[rs];
754 long rmv = regs->uregs[rm];
755
756 fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi],
757 regs->uregs[rdlo], rsv, rmv,
758 &regs->ARM_cpsr, i_fn);
759 regs->uregs[rdhi] = fnr.r0;
760 regs->uregs[rdlo] = fnr.r1;
761}
762
763static void __kprobes
764emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs)
765{
766 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
767 kprobe_opcode_t insn = p->opcode;
768 int rd = (insn >> 12) & 0xf;
769 int rn = (insn >> 16) & 0xf;
770 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
771
772 regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn);
773}
774
775static void __kprobes
776emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs)
777{
778 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
779 kprobe_opcode_t insn = p->opcode;
780 int rd = (insn >> 12) & 0xf;
781 int rn = (insn >> 16) & 0xf;
782 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
783
784 regs->uregs[rd] = insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
785}
786
787static void __kprobes
788emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs)
789{
790 insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0];
791 kprobe_opcode_t insn = p->opcode;
792 int rn = (insn >> 16) & 0xf;
793 long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn];
794
795 insnslot_1arg_rwflags(rnv, &regs->ARM_cpsr, i_fn);
796}
797
798static void __kprobes
799emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs)
800{
801 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
802 kprobe_opcode_t insn = p->opcode;
803 long ppc = (long)p->addr + 8;
804 int rd = (insn >> 12) & 0xf;
805 int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
806 int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
807 int rm = insn & 0xf;
808 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
809 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
810 long rsv = regs->uregs[rs];
811
812 regs->uregs[rd] =
813 insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn);
814}
815
816static void __kprobes
817emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs)
818{
819 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
820 kprobe_opcode_t insn = p->opcode;
821 long ppc = (long)p->addr + 8;
822 int rd = (insn >> 12) & 0xf;
823 int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */
824 int rs = (insn >> 8) & 0xf; /* invalid, don't care. */
825 int rm = insn & 0xf;
826 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
827 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
828 long rsv = regs->uregs[rs];
829
830 regs->uregs[rd] =
831 insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
832}
833
834static void __kprobes
835emulate_alu_tests(struct kprobe *p, struct pt_regs *regs)
836{
837 insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0];
838 kprobe_opcode_t insn = p->opcode;
839 long ppc = (long)p->addr + 8;
840 int rn = (insn >> 16) & 0xf;
841 int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */
842 int rm = insn & 0xf;
843 long rnv = (rn == 15) ? ppc : regs->uregs[rn];
844 long rmv = (rm == 15) ? ppc : regs->uregs[rm];
845 long rsv = regs->uregs[rs];
846
847 insnslot_3arg_rwflags(rnv, rmv, rsv, &regs->ARM_cpsr, i_fn);
848}
849
850static enum kprobe_insn __kprobes
851prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi)
852{
853 int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25))
854 : (~insn & (1 << 22));
855
856 if (is_writeback(insn) && is_r15(insn, 16))
857 return INSN_REJECTED; /* Writeback to PC */
858
859 insn &= 0xfff00fff;
860 insn |= 0x00001000; /* Rn = r0, Rd = r1 */
861 if (not_imm) {
862 insn &= ~0xf;
863 insn |= 2; /* Rm = r2 */
864 }
865 asi->insn[0] = insn;
866 asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str;
867 return INSN_GOOD;
868}
869
870static enum kprobe_insn __kprobes
871prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi)
872{
873 if (is_r15(insn, 12))
874 return INSN_REJECTED; /* Rd is PC */
875
876 insn &= 0xffff0fff; /* Rd = r0 */
877 asi->insn[0] = insn;
878 asi->insn_handler = emulate_rd12_modify;
879 return INSN_GOOD;
880}
881
882static enum kprobe_insn __kprobes
883prep_emulate_rd12rn0_modify(kprobe_opcode_t insn,
884 struct arch_specific_insn *asi)
885{
886 if (is_r15(insn, 12))
887 return INSN_REJECTED; /* Rd is PC */
888
889 insn &= 0xffff0ff0; /* Rd = r0 */
890 insn |= 0x00000001; /* Rn = r1 */
891 asi->insn[0] = insn;
892 asi->insn_handler = emulate_rd12rn0_modify;
893 return INSN_GOOD;
894}
895
896static enum kprobe_insn __kprobes
897prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi)
898{
899 if (is_r15(insn, 12))
900 return INSN_REJECTED; /* Rd is PC */
901
902 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
903 asi->insn[0] = insn;
904 asi->insn_handler = emulate_rd12rm0;
905 return INSN_GOOD;
906}
907
908static enum kprobe_insn __kprobes
909prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn,
910 struct arch_specific_insn *asi)
911{
912 if (is_r15(insn, 12))
913 return INSN_REJECTED; /* Rd is PC */
914
915 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
916 insn |= 0x00000001; /* Rm = r1 */
917 asi->insn[0] = insn;
918 asi->insn_handler = emulate_rd12rn16rm0_rwflags;
919 return INSN_GOOD;
920}
921
922static enum kprobe_insn __kprobes
923prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn,
924 struct arch_specific_insn *asi)
925{
926 if (is_r15(insn, 16))
927 return INSN_REJECTED; /* Rd is PC */
928
929 insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */
930 insn |= 0x00000001; /* Rm = r1 */
931 asi->insn[0] = insn;
932 asi->insn_handler = emulate_rd16rs8rm0_rwflags;
933 return INSN_GOOD;
934}
935
936static enum kprobe_insn __kprobes
937prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn,
938 struct arch_specific_insn *asi)
939{
940 if (is_r15(insn, 16))
941 return INSN_REJECTED; /* Rd is PC */
942
943 insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */
944 insn |= 0x00000102; /* Rs = r1, Rm = r2 */
945 asi->insn[0] = insn;
946 asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags;
947 return INSN_GOOD;
948}
949
950static enum kprobe_insn __kprobes
951prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn,
952 struct arch_specific_insn *asi)
953{
954 if (is_r15(insn, 16) || is_r15(insn, 12))
955 return INSN_REJECTED; /* RdHi or RdLo is PC */
956
957 insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */
958 insn |= 0x00001203; /* Rs = r2, Rm = r3 */
959 asi->insn[0] = insn;
960 asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags;
961 return INSN_GOOD;
962}
963
964/*
965 * For the instruction masking and comparisons in all the "space_*"
966 * functions below, Do _not_ rearrange the order of tests unless
967 * you're very, very sure of what you are doing. For the sake of
968 * efficiency, the masks for some tests sometimes assume other test
969 * have been done prior to them so the number of patterns to test
970 * for an instruction set can be as broad as possible to reduce the
971 * number of tests needed.
972 */
973
974static enum kprobe_insn __kprobes
975space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi)
976{
977 /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */
978 /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */
979 /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */
980 /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */
981 if ((insn & 0xfe300000) == 0xf4100000) {
982 asi->insn_handler = emulate_nop;
983 return INSN_GOOD_NO_SLOT;
984 }
985
986 /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */
987 if ((insn & 0xfe000000) == 0xfa000000) {
988 asi->insn_handler = simulate_blx1;
989 return INSN_GOOD_NO_SLOT;
990 }
991
992 /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */
993 /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */
994
995 /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */
996 /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
997
998 /* Coprocessor instructions... */
999 /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
1000 /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */
1001 /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
1002 /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
1003 /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
1004 /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
1005 /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
1006
1007 return INSN_REJECTED;
1008}
1009
1010static enum kprobe_insn __kprobes
1011space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1012{
1013 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */
1014 if ((insn & 0x0f900010) == 0x01000000) {
1015
1016 /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */
1017 if ((insn & 0x0ff000f0) == 0x01000000) {
1018 if (is_r15(insn, 12))
1019 return INSN_REJECTED; /* Rd is PC */
1020 asi->insn_handler = simulate_mrs;
1021 return INSN_GOOD_NO_SLOT;
1022 }
1023
1024 /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */
1025 if ((insn & 0x0ff00090) == 0x01400080)
1026 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
1027 asi);
1028
1029 /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */
1030 /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */
1031 if ((insn & 0x0ff000b0) == 0x012000a0 ||
1032 (insn & 0x0ff00090) == 0x01600080)
1033 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1034
1035 /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */
1036 /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */
1037 if ((insn & 0x0ff00090) == 0x01000080 ||
1038 (insn & 0x0ff000b0) == 0x01200080)
1039 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1040
1041 /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */
1042 /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */
1043 /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */
1044
1045 /* Other instruction encodings aren't yet defined */
1046 return INSN_REJECTED;
1047 }
1048
1049 /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */
1050 else if ((insn & 0x0f900090) == 0x01000010) {
1051
1052 /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */
1053 /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */
1054 if ((insn & 0x0ff000d0) == 0x01200010) {
1055 if ((insn & 0x0ff000ff) == 0x0120003f)
1056 return INSN_REJECTED; /* BLX pc */
1057 asi->insn_handler = simulate_blx2bx;
1058 return INSN_GOOD_NO_SLOT;
1059 }
1060
1061 /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */
1062 if ((insn & 0x0ff000f0) == 0x01600010)
1063 return prep_emulate_rd12rm0(insn, asi);
1064
1065 /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */
1066 /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */
1067 /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */
1068 /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */
1069 if ((insn & 0x0f9000f0) == 0x01000050)
1070 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1071
1072 /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */
1073 /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */
1074
1075 /* Other instruction encodings aren't yet defined */
1076 return INSN_REJECTED;
1077 }
1078
1079 /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */
1080 else if ((insn & 0x0f0000f0) == 0x00000090) {
1081
1082 /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */
1083 /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */
1084 /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */
1085 /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */
1086 /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */
1087 /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */
1088 /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */
1089 /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */
1090 /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */
1091 /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */
1092 /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */
1093 /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */
1094 /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */
1095 /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */
1096 /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */
1097 /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */
1098 if ((insn & 0x00d00000) == 0x00500000)
1099 return INSN_REJECTED;
1100 else if ((insn & 0x00e00000) == 0x00000000)
1101 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1102 else if ((insn & 0x00a00000) == 0x00200000)
1103 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1104 else
1105 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn,
1106 asi);
1107 }
1108
1109 /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */
1110 else if ((insn & 0x0e000090) == 0x00000090) {
1111
1112 /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */
1113 /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */
1114 /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */
1115 /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */
1116 /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */
1117 /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */
1118 /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */
1119 /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */
1120 /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */
1121 /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */
1122 /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */
1123 /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */
1124 /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */
1125
1126 /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */
1127 /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */
1128 /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */
1129 /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */
1130 /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */
1131 /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */
1132 if ((insn & 0x0f0000f0) == 0x01000090) {
1133 if ((insn & 0x0fb000f0) == 0x01000090) {
1134 /* SWP/SWPB */
1135 return prep_emulate_rd12rn16rm0_wflags(insn,
1136 asi);
1137 } else {
1138 /* STREX/LDREX variants and unallocaed space */
1139 return INSN_REJECTED;
1140 }
1141
1142 } else if ((insn & 0x0e1000d0) == 0x00000d0) {
1143 /* STRD/LDRD */
1144 if ((insn & 0x0000e000) == 0x0000e000)
1145 return INSN_REJECTED; /* Rd is LR or PC */
1146 if (is_writeback(insn) && is_r15(insn, 16))
1147 return INSN_REJECTED; /* Writeback to PC */
1148
1149 insn &= 0xfff00fff;
1150 insn |= 0x00002000; /* Rn = r0, Rd = r2 */
1151 if (!(insn & (1 << 22))) {
1152 /* Register index */
1153 insn &= ~0xf;
1154 insn |= 1; /* Rm = r1 */
1155 }
1156 asi->insn[0] = insn;
1157 asi->insn_handler =
1158 (insn & (1 << 5)) ? emulate_strd : emulate_ldrd;
1159 return INSN_GOOD;
1160 }
1161
1162 /* LDRH/STRH/LDRSB/LDRSH */
1163 if (is_r15(insn, 12))
1164 return INSN_REJECTED; /* Rd is PC */
1165 return prep_emulate_ldr_str(insn, asi);
1166 }
1167
1168 /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */
1169
1170 /*
1171 * ALU op with S bit and Rd == 15 :
1172 * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx
1173 */
1174 if ((insn & 0x0e10f000) == 0x0010f000)
1175 return INSN_REJECTED;
1176
1177 /*
1178 * "mov ip, sp" is the most common kprobe'd instruction by far.
1179 * Check and optimize for it explicitly.
1180 */
1181 if (insn == 0xe1a0c00d) {
1182 asi->insn_handler = simulate_mov_ipsp;
1183 return INSN_GOOD_NO_SLOT;
1184 }
1185
1186 /*
1187 * Data processing: Immediate-shift / Register-shift
1188 * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx
1189 * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx
1190 * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx
1191 * *S (bit 20) updates condition codes
1192 * ADC/SBC/RSC reads the C flag
1193 */
1194 insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */
1195 insn |= 0x00000001; /* Rm = r1 */
1196 if (insn & 0x010) {
1197 insn &= 0xfffff0ff; /* register shift */
1198 insn |= 0x00000200; /* Rs = r2 */
1199 }
1200 asi->insn[0] = insn;
1201
1202 if ((insn & 0x0f900000) == 0x01100000) {
1203 /*
1204 * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx
1205 * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx
1206 * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx
1207 * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx
1208 */
1209 asi->insn_handler = emulate_alu_tests;
1210 } else {
1211 /* ALU ops which write to Rd */
1212 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
1213 emulate_alu_rwflags : emulate_alu_rflags;
1214 }
1215 return INSN_GOOD;
1216}
1217
1218static enum kprobe_insn __kprobes
1219space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1220{
1221 /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */
1222 /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */
1223 if ((insn & 0x0fb00000) == 0x03000000)
1224 return prep_emulate_rd12_modify(insn, asi);
1225
1226 /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */
1227 if ((insn & 0x0fff0000) == 0x03200000) {
1228 unsigned op2 = insn & 0x000000ff;
1229 if (op2 == 0x01 || op2 == 0x04) {
1230 /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */
1231 /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */
1232 asi->insn[0] = insn;
1233 asi->insn_handler = emulate_none;
1234 return INSN_GOOD;
1235 } else if (op2 <= 0x03) {
1236 /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */
1237 /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */
1238 /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */
1239 /*
1240 * We make WFE and WFI true NOPs to avoid stalls due
1241 * to missing events whilst processing the probe.
1242 */
1243 asi->insn_handler = emulate_nop;
1244 return INSN_GOOD_NO_SLOT;
1245 }
1246 /* For DBG and unallocated hints it's safest to reject them */
1247 return INSN_REJECTED;
1248 }
1249
1250 /*
1251 * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx
1252 * ALU op with S bit and Rd == 15 :
1253 * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx
1254 */
1255 if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */
1256 (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */
1257 return INSN_REJECTED;
1258
1259 /*
1260 * Data processing: 32-bit Immediate
1261 * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx
1262 * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx
1263 * *S (bit 20) updates condition codes
1264 * ADC/SBC/RSC reads the C flag
1265 */
1266 insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */
1267 asi->insn[0] = insn;
1268
1269 if ((insn & 0x0f900000) == 0x03100000) {
1270 /*
1271 * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx
1272 * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx
1273 * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx
1274 * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx
1275 */
1276 asi->insn_handler = emulate_alu_tests_imm;
1277 } else {
1278 /* ALU ops which write to Rd */
1279 asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */
1280 emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
1281 }
1282 return INSN_GOOD;
1283}
1284
1285static enum kprobe_insn __kprobes
1286space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1287{
1288 /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */
1289 if ((insn & 0x0ff000f0) == 0x068000b0) {
1290 if (is_r15(insn, 12))
1291 return INSN_REJECTED; /* Rd is PC */
1292 insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */
1293 insn |= 0x00000001; /* Rm = r1 */
1294 asi->insn[0] = insn;
1295 asi->insn_handler = emulate_sel;
1296 return INSN_GOOD;
1297 }
1298
1299 /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */
1300 /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */
1301 /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */
1302 /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */
1303 if ((insn & 0x0fa00030) == 0x06a00010 ||
1304 (insn & 0x0fb000f0) == 0x06a00030) {
1305 if (is_r15(insn, 12))
1306 return INSN_REJECTED; /* Rd is PC */
1307 insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */
1308 asi->insn[0] = insn;
1309 asi->insn_handler = emulate_sat;
1310 return INSN_GOOD;
1311 }
1312
1313 /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */
1314 /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */
1315 /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */
1316 /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */
1317 if ((insn & 0x0ff00070) == 0x06b00030 ||
1318 (insn & 0x0ff00070) == 0x06f00030)
1319 return prep_emulate_rd12rm0(insn, asi);
1320
1321 /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */
1322 /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */
1323 /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */
1324 /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */
1325 /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */
1326 /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */
1327 /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */
1328 /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */
1329 /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */
1330 /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */
1331 /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */
1332 /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */
1333 /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */
1334 /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */
1335 /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */
1336 /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */
1337 /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */
1338 /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */
1339 /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */
1340 /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */
1341 /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */
1342 /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */
1343 /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */
1344 /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */
1345 /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */
1346 /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */
1347 /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */
1348 /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */
1349 /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */
1350 /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */
1351 /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */
1352 /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */
1353 /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */
1354 /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */
1355 /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */
1356 /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */
1357 /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */
1358 /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */
1359 /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */
1360 /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */
1361 /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */
1362 /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */
1363 /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */
1364 /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */
1365 /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */
1366 /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */
1367 /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */
1368 /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */
1369 /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */
1370 /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */
1371 if ((insn & 0x0f800010) == 0x06000010) {
1372 if ((insn & 0x00300000) == 0x00000000 ||
1373 (insn & 0x000000e0) == 0x000000a0 ||
1374 (insn & 0x000000e0) == 0x000000c0)
1375 return INSN_REJECTED; /* Unallocated space */
1376 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1377 }
1378
1379 /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */
1380 /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */
1381 if ((insn & 0x0ff00030) == 0x06800010)
1382 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1383
1384 /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */
1385 /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */
1386 /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */
1387 /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */
1388 /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */
1389 /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */
1390 /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */
1391 /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */
1392 /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */
1393 /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */
1394 /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */
1395 /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */
1396 /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */
1397 /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */
1398 if ((insn & 0x0f8000f0) == 0x06800070) {
1399 if ((insn & 0x00300000) == 0x00100000)
1400 return INSN_REJECTED; /* Unallocated space */
1401
1402 if ((insn & 0x000f0000) == 0x000f0000)
1403 return prep_emulate_rd12rm0(insn, asi);
1404 else
1405 return prep_emulate_rd12rn16rm0_wflags(insn, asi);
1406 }
1407
1408 /* Other instruction encodings aren't yet defined */
1409 return INSN_REJECTED;
1410}
1411
1412static enum kprobe_insn __kprobes
1413space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1414{
1415 /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */
1416 if ((insn & 0x0ff000f0) == 0x03f000f0)
1417 return INSN_REJECTED;
1418
1419 /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */
1420 /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */
1421 if ((insn & 0x0ff00090) == 0x07400010)
1422 return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi);
1423
1424 /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */
1425 /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */
1426 /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */
1427 /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */
1428 /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */
1429 /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */
1430 /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */
1431 /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */
1432 if ((insn & 0x0ff00090) == 0x07000010 ||
1433 (insn & 0x0ff000d0) == 0x07500010 ||
1434 (insn & 0x0ff000f0) == 0x07800010) {
1435
1436 if ((insn & 0x0000f000) == 0x0000f000)
1437 return prep_emulate_rd16rs8rm0_wflags(insn, asi);
1438 else
1439 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1440 }
1441
1442 /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */
1443 if ((insn & 0x0ff000d0) == 0x075000d0)
1444 return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi);
1445
1446 /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */
1447 /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */
1448 if ((insn & 0x0fa00070) == 0x07a00050)
1449 return prep_emulate_rd12rm0(insn, asi);
1450
1451 /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */
1452 /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */
1453 if ((insn & 0x0fe00070) == 0x07c00010) {
1454
1455 if ((insn & 0x0000000f) == 0x0000000f)
1456 return prep_emulate_rd12_modify(insn, asi);
1457 else
1458 return prep_emulate_rd12rn0_modify(insn, asi);
1459 }
1460
1461 return INSN_REJECTED;
1462}
1463
1464static enum kprobe_insn __kprobes
1465space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1466{
1467 /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */
1468 /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */
1469 /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */
1470 /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */
1471 /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */
1472 /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */
1473 /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */
1474 /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */
1475
1476 if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12))
1477 return INSN_REJECTED; /* LDRB into PC */
1478
1479 return prep_emulate_ldr_str(insn, asi);
1480}
1481
1482static enum kprobe_insn __kprobes
1483space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1484{
1485 /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */
1486 /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */
1487 if ((insn & 0x0e708000) == 0x85000000 ||
1488 (insn & 0x0e508000) == 0x85010000)
1489 return INSN_REJECTED;
1490
1491 /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */
1492 /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */
1493 asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */
1494 simulate_stm1_pc : simulate_ldm1stm1;
1495 return INSN_GOOD_NO_SLOT;
1496}
1497
1498static enum kprobe_insn __kprobes
1499space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1500{
1501 /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */
1502 /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */
1503 asi->insn_handler = simulate_bbl;
1504 return INSN_GOOD_NO_SLOT;
1505}
1506
1507static enum kprobe_insn __kprobes
1508space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1509{
1510 /* Coprocessor instructions... */
1511 /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
1512 /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */
1513 /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */
1514 /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */
1515 /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */
1516 /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */
1517 /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */
1518
1519 /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */
1520
1521 return INSN_REJECTED;
1522}
1523
1524static unsigned long __kprobes __check_eq(unsigned long cpsr)
1525{
1526 return cpsr & PSR_Z_BIT;
1527}
1528
1529static unsigned long __kprobes __check_ne(unsigned long cpsr)
1530{
1531 return (~cpsr) & PSR_Z_BIT;
1532}
1533
1534static unsigned long __kprobes __check_cs(unsigned long cpsr)
1535{
1536 return cpsr & PSR_C_BIT;
1537}
1538
1539static unsigned long __kprobes __check_cc(unsigned long cpsr)
1540{
1541 return (~cpsr) & PSR_C_BIT;
1542}
1543
1544static unsigned long __kprobes __check_mi(unsigned long cpsr)
1545{
1546 return cpsr & PSR_N_BIT;
1547}
1548
1549static unsigned long __kprobes __check_pl(unsigned long cpsr)
1550{
1551 return (~cpsr) & PSR_N_BIT;
1552}
1553
1554static unsigned long __kprobes __check_vs(unsigned long cpsr)
1555{
1556 return cpsr & PSR_V_BIT;
1557}
1558
1559static unsigned long __kprobes __check_vc(unsigned long cpsr)
1560{
1561 return (~cpsr) & PSR_V_BIT;
1562}
1563
1564static unsigned long __kprobes __check_hi(unsigned long cpsr)
1565{
1566 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1567 return cpsr & PSR_C_BIT;
1568}
1569
1570static unsigned long __kprobes __check_ls(unsigned long cpsr)
1571{
1572 cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
1573 return (~cpsr) & PSR_C_BIT;
1574}
1575
1576static unsigned long __kprobes __check_ge(unsigned long cpsr)
1577{
1578 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1579 return (~cpsr) & PSR_N_BIT;
1580}
1581
1582static unsigned long __kprobes __check_lt(unsigned long cpsr)
1583{
1584 cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1585 return cpsr & PSR_N_BIT;
1586}
1587
1588static unsigned long __kprobes __check_gt(unsigned long cpsr)
1589{
1590 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1591 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1592 return (~temp) & PSR_N_BIT;
1593}
1594
1595static unsigned long __kprobes __check_le(unsigned long cpsr)
1596{
1597 unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */
1598 temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */
1599 return temp & PSR_N_BIT;
1600}
1601
1602static unsigned long __kprobes __check_al(unsigned long cpsr)
1603{
1604 return true;
1605}
1606
1607static kprobe_check_cc * const condition_checks[16] = {
1608 &__check_eq, &__check_ne, &__check_cs, &__check_cc,
1609 &__check_mi, &__check_pl, &__check_vs, &__check_vc,
1610 &__check_hi, &__check_ls, &__check_ge, &__check_lt,
1611 &__check_gt, &__check_le, &__check_al, &__check_al
1612};
1613
1614/* Return:
1615 * INSN_REJECTED If instruction is one not allowed to kprobe,
1616 * INSN_GOOD If instruction is supported and uses instruction slot,
1617 * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
1618 *
1619 * For instructions we don't want to kprobe (INSN_REJECTED return result):
1620 * These are generally ones that modify the processor state making
1621 * them "hard" to simulate such as switches processor modes or
1622 * make accesses in alternate modes. Any of these could be simulated
1623 * if the work was put into it, but low return considering they
1624 * should also be very rare.
1625 */
1626enum kprobe_insn __kprobes
1627arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1628{
1629 asi->insn_check_cc = condition_checks[insn>>28];
1630 asi->insn[1] = KPROBE_RETURN_INSTRUCTION;
1631
1632 if ((insn & 0xf0000000) == 0xf0000000)
1633
1634 return space_1111(insn, asi);
1635
1636 else if ((insn & 0x0e000000) == 0x00000000)
1637
1638 return space_cccc_000x(insn, asi);
1639
1640 else if ((insn & 0x0e000000) == 0x02000000)
1641
1642 return space_cccc_001x(insn, asi);
1643
1644 else if ((insn & 0x0f000010) == 0x06000010)
1645
1646 return space_cccc_0110__1(insn, asi);
1647
1648 else if ((insn & 0x0f000010) == 0x07000010)
1649
1650 return space_cccc_0111__1(insn, asi);
1651
1652 else if ((insn & 0x0c000000) == 0x04000000)
1653
1654 return space_cccc_01xx(insn, asi);
1655
1656 else if ((insn & 0x0e000000) == 0x08000000)
1657
1658 return space_cccc_100x(insn, asi);
1659
1660 else if ((insn & 0x0e000000) == 0x0a000000)
1661
1662 return space_cccc_101x(insn, asi);
1663
1664 return space_cccc_11xx(insn, asi);
1665}
1666
1667void __init arm_kprobe_decode_init(void)
1668{
1669 find_str_pc_offset();
1670}
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
new file mode 100644
index 000000000000..902ca59e8b11
--- /dev/null
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -0,0 +1,1462 @@
1/*
2 * arch/arm/kernel/kprobes-thumb.c
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/kprobes.h>
13
14#include "kprobes.h"
15
16
17/*
18 * True if current instruction is in an IT block.
19 */
20#define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000)
21
22/*
23 * Return the condition code to check for the currently executing instruction.
24 * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if
25 * in_it_block returns true.
26 */
27#define current_cond(cpsr) ((cpsr >> 12) & 0xf)
28
29/*
30 * Return the PC value for a probe in thumb code.
31 * This is the address of the probed instruction plus 4.
32 * We subtract one because the address will have bit zero set to indicate
33 * a pointer to thumb code.
34 */
35static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p)
36{
37 return (unsigned long)p->addr - 1 + 4;
38}
39
40static void __kprobes
41t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs)
42{
43 kprobe_opcode_t insn = p->opcode;
44 unsigned long pc = thumb_probe_pc(p);
45 int rn = (insn >> 16) & 0xf;
46 int rm = insn & 0xf;
47
48 unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn];
49 unsigned long rmv = regs->uregs[rm];
50 unsigned int halfwords;
51
52 if (insn & 0x10) /* TBH */
53 halfwords = ((u16 *)rnv)[rmv];
54 else /* TBB */
55 halfwords = ((u8 *)rnv)[rmv];
56
57 regs->ARM_pc = pc + 2 * halfwords;
58}
59
60static void __kprobes
61t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs)
62{
63 kprobe_opcode_t insn = p->opcode;
64 int rd = (insn >> 8) & 0xf;
65 unsigned long mask = 0xf8ff03df; /* Mask out execution state */
66 regs->uregs[rd] = regs->ARM_cpsr & mask;
67}
68
69static void __kprobes
70t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
71{
72 kprobe_opcode_t insn = p->opcode;
73 unsigned long pc = thumb_probe_pc(p);
74
75 long offset = insn & 0x7ff; /* imm11 */
76 offset += (insn & 0x003f0000) >> 5; /* imm6 */
77 offset += (insn & 0x00002000) << 4; /* J1 */
78 offset += (insn & 0x00000800) << 7; /* J2 */
79 offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */
80
81 regs->ARM_pc = pc + (offset * 2);
82}
83
84static enum kprobe_insn __kprobes
85t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
86{
87 int cc = (insn >> 22) & 0xf;
88 asi->insn_check_cc = kprobe_condition_checks[cc];
89 asi->insn_handler = t32_simulate_cond_branch;
90 return INSN_GOOD_NO_SLOT;
91}
92
93static void __kprobes
94t32_simulate_branch(struct kprobe *p, struct pt_regs *regs)
95{
96 kprobe_opcode_t insn = p->opcode;
97 unsigned long pc = thumb_probe_pc(p);
98
99 long offset = insn & 0x7ff; /* imm11 */
100 offset += (insn & 0x03ff0000) >> 5; /* imm10 */
101 offset += (insn & 0x00002000) << 9; /* J1 */
102 offset += (insn & 0x00000800) << 10; /* J2 */
103 if (insn & 0x04000000)
104 offset -= 0x00800000; /* Apply sign bit */
105 else
106 offset ^= 0x00600000; /* Invert J1 and J2 */
107
108 if (insn & (1 << 14)) {
109 /* BL or BLX */
110 regs->ARM_lr = (unsigned long)p->addr + 4;
111 if (!(insn & (1 << 12))) {
112 /* BLX so switch to ARM mode */
113 regs->ARM_cpsr &= ~PSR_T_BIT;
114 pc &= ~3;
115 }
116 }
117
118 regs->ARM_pc = pc + (offset * 2);
119}
120
121static void __kprobes
122t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
123{
124 kprobe_opcode_t insn = p->opcode;
125 unsigned long addr = thumb_probe_pc(p) & ~3;
126 int rt = (insn >> 12) & 0xf;
127 unsigned long rtv;
128
129 long offset = insn & 0xfff;
130 if (insn & 0x00800000)
131 addr += offset;
132 else
133 addr -= offset;
134
135 if (insn & 0x00400000) {
136 /* LDR */
137 rtv = *(unsigned long *)addr;
138 if (rt == 15) {
139 bx_write_pc(rtv, regs);
140 return;
141 }
142 } else if (insn & 0x00200000) {
143 /* LDRH */
144 if (insn & 0x01000000)
145 rtv = *(s16 *)addr;
146 else
147 rtv = *(u16 *)addr;
148 } else {
149 /* LDRB */
150 if (insn & 0x01000000)
151 rtv = *(s8 *)addr;
152 else
153 rtv = *(u8 *)addr;
154 }
155
156 regs->uregs[rt] = rtv;
157}
158
159static enum kprobe_insn __kprobes
160t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
161{
162 enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
163
164 /* Fixup modified instruction to have halfwords in correct order...*/
165 insn = asi->insn[0];
166 ((u16 *)asi->insn)[0] = insn >> 16;
167 ((u16 *)asi->insn)[1] = insn & 0xffff;
168
169 return ret;
170}
171
172static void __kprobes
173t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs)
174{
175 kprobe_opcode_t insn = p->opcode;
176 unsigned long pc = thumb_probe_pc(p) & ~3;
177 int rt1 = (insn >> 12) & 0xf;
178 int rt2 = (insn >> 8) & 0xf;
179 int rn = (insn >> 16) & 0xf;
180
181 register unsigned long rt1v asm("r0") = regs->uregs[rt1];
182 register unsigned long rt2v asm("r1") = regs->uregs[rt2];
183 register unsigned long rnv asm("r2") = (rn == 15) ? pc
184 : regs->uregs[rn];
185
186 __asm__ __volatile__ (
187 "blx %[fn]"
188 : "=r" (rt1v), "=r" (rt2v), "=r" (rnv)
189 : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn)
190 : "lr", "memory", "cc"
191 );
192
193 if (rn != 15)
194 regs->uregs[rn] = rnv; /* Writeback base register */
195 regs->uregs[rt1] = rt1v;
196 regs->uregs[rt2] = rt2v;
197}
198
199static void __kprobes
200t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs)
201{
202 kprobe_opcode_t insn = p->opcode;
203 int rt = (insn >> 12) & 0xf;
204 int rn = (insn >> 16) & 0xf;
205 int rm = insn & 0xf;
206
207 register unsigned long rtv asm("r0") = regs->uregs[rt];
208 register unsigned long rnv asm("r2") = regs->uregs[rn];
209 register unsigned long rmv asm("r3") = regs->uregs[rm];
210
211 __asm__ __volatile__ (
212 "blx %[fn]"
213 : "=r" (rtv), "=r" (rnv)
214 : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn)
215 : "lr", "memory", "cc"
216 );
217
218 regs->uregs[rn] = rnv; /* Writeback base register */
219 if (rt == 15) /* Can't be true for a STR as they aren't allowed */
220 bx_write_pc(rtv, regs);
221 else
222 regs->uregs[rt] = rtv;
223}
224
225static void __kprobes
226t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs)
227{
228 kprobe_opcode_t insn = p->opcode;
229 int rd = (insn >> 8) & 0xf;
230 int rn = (insn >> 16) & 0xf;
231 int rm = insn & 0xf;
232
233 register unsigned long rdv asm("r1") = regs->uregs[rd];
234 register unsigned long rnv asm("r2") = regs->uregs[rn];
235 register unsigned long rmv asm("r3") = regs->uregs[rm];
236 unsigned long cpsr = regs->ARM_cpsr;
237
238 __asm__ __volatile__ (
239 "msr cpsr_fs, %[cpsr] \n\t"
240 "blx %[fn] \n\t"
241 "mrs %[cpsr], cpsr \n\t"
242 : "=r" (rdv), [cpsr] "=r" (cpsr)
243 : "0" (rdv), "r" (rnv), "r" (rmv),
244 "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
245 : "lr", "memory", "cc"
246 );
247
248 regs->uregs[rd] = rdv;
249 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
250}
251
252static void __kprobes
253t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs)
254{
255 kprobe_opcode_t insn = p->opcode;
256 unsigned long pc = thumb_probe_pc(p);
257 int rd = (insn >> 8) & 0xf;
258
259 register unsigned long rdv asm("r1") = regs->uregs[rd];
260 register unsigned long rnv asm("r2") = pc & ~3;
261
262 __asm__ __volatile__ (
263 "blx %[fn]"
264 : "=r" (rdv)
265 : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
266 : "lr", "memory", "cc"
267 );
268
269 regs->uregs[rd] = rdv;
270}
271
272static void __kprobes
273t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs)
274{
275 kprobe_opcode_t insn = p->opcode;
276 int rd = (insn >> 8) & 0xf;
277 int rn = (insn >> 16) & 0xf;
278
279 register unsigned long rdv asm("r1") = regs->uregs[rd];
280 register unsigned long rnv asm("r2") = regs->uregs[rn];
281
282 __asm__ __volatile__ (
283 "blx %[fn]"
284 : "=r" (rdv)
285 : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn)
286 : "lr", "memory", "cc"
287 );
288
289 regs->uregs[rd] = rdv;
290}
291
292static void __kprobes
293t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs)
294{
295 kprobe_opcode_t insn = p->opcode;
296 int rdlo = (insn >> 12) & 0xf;
297 int rdhi = (insn >> 8) & 0xf;
298 int rn = (insn >> 16) & 0xf;
299 int rm = insn & 0xf;
300
301 register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
302 register unsigned long rdhiv asm("r1") = regs->uregs[rdhi];
303 register unsigned long rnv asm("r2") = regs->uregs[rn];
304 register unsigned long rmv asm("r3") = regs->uregs[rm];
305
306 __asm__ __volatile__ (
307 "blx %[fn]"
308 : "=r" (rdlov), "=r" (rdhiv)
309 : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
310 [fn] "r" (p->ainsn.insn_fn)
311 : "lr", "memory", "cc"
312 );
313
314 regs->uregs[rdlo] = rdlov;
315 regs->uregs[rdhi] = rdhiv;
316}
317
318/* These emulation encodings are functionally equivalent... */
319#define t32_emulate_rd8rn16rm0ra12_noflags \
320 t32_emulate_rdlo12rdhi8rn16rm0_noflags
321
322static const union decode_item t32_table_1110_100x_x0xx[] = {
323 /* Load/store multiple instructions */
324
325 /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */
326 DECODE_REJECT (0xfe4f0000, 0xe80f0000),
327
328 /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */
329 /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */
330 DECODE_REJECT (0xffc00000, 0xe8000000),
331 /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */
332 /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */
333 DECODE_REJECT (0xffc00000, 0xe9800000),
334
335 /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */
336 DECODE_REJECT (0xfe508000, 0xe8008000),
337 /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */
338 DECODE_REJECT (0xfe50c000, 0xe810c000),
339 /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */
340 DECODE_REJECT (0xfe402000, 0xe8002000),
341
342 /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */
343 /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */
344 /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */
345 /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */
346 DECODE_CUSTOM (0xfe400000, 0xe8000000, t32_decode_ldmstm),
347
348 DECODE_END
349};
350
351static const union decode_item t32_table_1110_100x_x1xx[] = {
352 /* Load/store dual, load/store exclusive, table branch */
353
354 /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */
355 /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */
356 DECODE_OR (0xff600000, 0xe8600000),
357 /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */
358 /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */
359 DECODE_EMULATEX (0xff400000, 0xe9400000, t32_emulate_ldrdstrd,
360 REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)),
361
362 /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */
363 /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */
364 DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch,
365 REGS(NOSP, 0, 0, 0, NOSPPC)),
366
367 /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */
368 /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */
369 /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */
370 /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */
371 /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */
372 /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */
373 /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */
374 /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */
375 /* And unallocated instructions... */
376 DECODE_END
377};
378
379static const union decode_item t32_table_1110_101x[] = {
380 /* Data-processing (shifted register) */
381
382 /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */
383 /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */
384 DECODE_EMULATEX (0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags,
385 REGS(NOSPPC, 0, 0, 0, NOSPPC)),
386
387 /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */
388 DECODE_OR (0xfff00f00, 0xeb100f00),
389 /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */
390 DECODE_EMULATEX (0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags,
391 REGS(NOPC, 0, 0, 0, NOSPPC)),
392
393 /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */
394 /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */
395 DECODE_EMULATEX (0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags,
396 REGS(0, 0, NOSPPC, 0, NOSPPC)),
397
398 /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */
399 /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */
400 DECODE_REJECT (0xffa00000, 0xeaa00000),
401 /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */
402 DECODE_REJECT (0xffe00000, 0xeb200000),
403 /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */
404 DECODE_REJECT (0xffe00000, 0xeb800000),
405 /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */
406 DECODE_REJECT (0xffe00000, 0xebe00000),
407
408 /* ADD/SUB SP, SP, Rm, LSL #0..3 */
409 /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */
410 DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags,
411 REGS(SP, 0, SP, 0, NOSPPC)),
412
413 /* ADD/SUB SP, SP, Rm, shift */
414 /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */
415 DECODE_REJECT (0xff4f0f00, 0xeb0d0d00),
416
417 /* ADD/SUB Rd, SP, Rm, shift */
418 /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */
419 DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags,
420 REGS(SP, 0, NOPC, 0, NOSPPC)),
421
422 /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */
423 /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */
424 /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */
425 /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */
426 /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */
427 /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */
428 /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */
429 /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */
430 /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */
431 /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */
432 /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */
433 DECODE_EMULATEX (0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags,
434 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
435
436 DECODE_END
437};
438
439static const union decode_item t32_table_1111_0x0x___0[] = {
440 /* Data-processing (modified immediate) */
441
442 /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */
443 /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */
444 DECODE_EMULATEX (0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags,
445 REGS(NOSPPC, 0, 0, 0, 0)),
446
447 /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */
448 DECODE_OR (0xfbf08f00, 0xf1100f00),
449 /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */
450 DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags,
451 REGS(NOPC, 0, 0, 0, 0)),
452
453 /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */
454 /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */
455 DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags,
456 REGS(0, 0, NOSPPC, 0, 0)),
457
458 /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */
459 DECODE_REJECT (0xfbe08000, 0xf0a00000),
460 /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */
461 /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */
462 DECODE_REJECT (0xfbc08000, 0xf0c00000),
463 /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */
464 DECODE_REJECT (0xfbe08000, 0xf1200000),
465 /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */
466 DECODE_REJECT (0xfbe08000, 0xf1800000),
467 /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */
468 DECODE_REJECT (0xfbe08000, 0xf1e00000),
469
470 /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */
471 /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */
472 DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags,
473 REGS(SP, 0, NOPC, 0, 0)),
474
475 /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */
476 /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */
477 /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */
478 /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */
479 /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */
480 /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */
481 /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */
482 /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */
483 /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */
484 /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */
485 DECODE_EMULATEX (0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags,
486 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
487
488 DECODE_END
489};
490
491static const union decode_item t32_table_1111_0x1x___0[] = {
492 /* Data-processing (plain binary immediate) */
493
494 /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */
495 DECODE_OR (0xfbff8000, 0xf20f0000),
496 /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */
497 DECODE_EMULATEX (0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags,
498 REGS(PC, 0, NOSPPC, 0, 0)),
499
500 /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */
501 DECODE_OR (0xfbff8f00, 0xf20d0d00),
502 /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */
503 DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags,
504 REGS(SP, 0, SP, 0, 0)),
505
506 /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */
507 DECODE_OR (0xfbf08000, 0xf2000000),
508 /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */
509 DECODE_EMULATEX (0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags,
510 REGS(NOPCX, 0, NOSPPC, 0, 0)),
511
512 /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */
513 /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */
514 DECODE_EMULATEX (0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags,
515 REGS(0, 0, NOSPPC, 0, 0)),
516
517 /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */
518 /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */
519 /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */
520 /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */
521 DECODE_EMULATEX (0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags,
522 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
523
524 /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */
525 /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */
526 DECODE_EMULATEX (0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags,
527 REGS(NOSPPC, 0, NOSPPC, 0, 0)),
528
529 /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */
530 DECODE_EMULATEX (0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags,
531 REGS(0, 0, NOSPPC, 0, 0)),
532
533 /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */
534 DECODE_EMULATEX (0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags,
535 REGS(NOSPPCX, 0, NOSPPC, 0, 0)),
536
537 DECODE_END
538};
539
540static const union decode_item t32_table_1111_0xxx___1[] = {
541 /* Branches and miscellaneous control */
542
543 /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */
544 DECODE_OR (0xfff0d7ff, 0xf3a08001),
545 /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */
546 DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, kprobe_emulate_none),
547 /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */
548 /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */
549 /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */
550 DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop),
551
552 /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */
553 DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs,
554 REGS(0, 0, NOSPPC, 0, 0)),
555
556 /*
557 * Unsupported instructions
558 * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx
559 *
560 * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx
561 * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx
562 * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx
563 * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx
564 * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx
565 * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx
566 * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx
567 * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx
568 * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx
569 * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx
570 * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx
571 */
572 DECODE_REJECT (0xfb80d000, 0xf3808000),
573
574 /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */
575 DECODE_CUSTOM (0xf800d000, 0xf0008000, t32_decode_cond_branch),
576
577 /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */
578 DECODE_OR (0xf800d001, 0xf000c000),
579 /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */
580 /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */
581 DECODE_SIMULATE (0xf8009000, 0xf0009000, t32_simulate_branch),
582
583 DECODE_END
584};
585
586static const union decode_item t32_table_1111_100x_x0x1__1111[] = {
587 /* Memory hints */
588
589 /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */
590 /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */
591 DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, kprobe_simulate_nop),
592
593 /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */
594 DECODE_OR (0xffd0f000, 0xf890f000),
595 /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */
596 DECODE_OR (0xffd0ff00, 0xf810fc00),
597 /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */
598 DECODE_OR (0xfff0f000, 0xf990f000),
599 /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */
600 DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop,
601 REGS(NOPCX, 0, 0, 0, 0)),
602
603 /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */
604 DECODE_OR (0xffd0ffc0, 0xf810f000),
605 /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */
606 DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop,
607 REGS(NOPCX, 0, 0, 0, NOSPPC)),
608
609 /* Other unallocated instructions... */
610 DECODE_END
611};
612
613static const union decode_item t32_table_1111_100x[] = {
614 /* Store/Load single data item */
615
616 /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */
617 DECODE_REJECT (0xfe600000, 0xf8600000),
618
619 /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */
620 DECODE_REJECT (0xfff00000, 0xf9500000),
621
622 /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */
623 DECODE_REJECT (0xfe800d00, 0xf8000800),
624
625 /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */
626 /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */
627 /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */
628 /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */
629 /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */
630 /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */
631 /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */
632 /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */
633 DECODE_REJECT (0xfe800f00, 0xf8000e00),
634
635 /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */
636 DECODE_REJECT (0xff1f0000, 0xf80f0000),
637
638 /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */
639 DECODE_REJECT (0xff10f000, 0xf800f000),
640
641 /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */
642 DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal,
643 REGS(PC, ANY, 0, 0, 0)),
644
645 /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */
646 /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */
647 DECODE_OR (0xffe00800, 0xf8400800),
648 /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */
649 /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */
650 DECODE_EMULATEX (0xffe00000, 0xf8c00000, t32_emulate_ldrstr,
651 REGS(NOPCX, ANY, 0, 0, 0)),
652
653 /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
654 /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */
655 DECODE_EMULATEX (0xffe00fc0, 0xf8400000, t32_emulate_ldrstr,
656 REGS(NOPCX, ANY, 0, 0, NOSPPC)),
657
658 /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */
659 /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
660 /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
661 /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
662 DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
663 REGS(PC, NOSPPCX, 0, 0, 0)),
664
665 /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
666 /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */
667 /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */
668 /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */
669 /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */
670 /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */
671 DECODE_OR (0xfec00800, 0xf8000800),
672 /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */
673 /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */
674 /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */
675 /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */
676 /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */
677 /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */
678 DECODE_EMULATEX (0xfec00000, 0xf8800000, t32_emulate_ldrstr,
679 REGS(NOPCX, NOSPPCX, 0, 0, 0)),
680
681 /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
682 /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
683 /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */
684 /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */
685 /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */
686 /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */
687 DECODE_EMULATEX (0xfe800fc0, 0xf8000000, t32_emulate_ldrstr,
688 REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)),
689
690 /* Other unallocated instructions... */
691 DECODE_END
692};
693
694static const union decode_item t32_table_1111_1010___1111[] = {
695 /* Data-processing (register) */
696
697 /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */
698 DECODE_REJECT (0xffe0f080, 0xfa60f080),
699
700 /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */
701 /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */
702 /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */
703 /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */
704 /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */
705 /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */
706 DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags,
707 REGS(0, 0, NOSPPC, 0, NOSPPC)),
708
709
710 /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */
711 DECODE_REJECT (0xff80f0b0, 0xfa80f030),
712 /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */
713 DECODE_REJECT (0xffb0f080, 0xfab0f000),
714
715 /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */
716 /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */
717 /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */
718 /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */
719 /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */
720 /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */
721
722 /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */
723 /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */
724 /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */
725 /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */
726 /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */
727 /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */
728
729 /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */
730 /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */
731 /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */
732 /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */
733 /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */
734 /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */
735
736 /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */
737 /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */
738 /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */
739 /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */
740 /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */
741 /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */
742
743 /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */
744 /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */
745 /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */
746 /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */
747 /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */
748 /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */
749
750 /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */
751 /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */
752 /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */
753 /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */
754 /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */
755 /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */
756 DECODE_OR (0xff80f080, 0xfa80f000),
757
758 /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */
759 /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */
760 /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */
761 /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */
762 /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */
763 /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */
764 DECODE_OR (0xff80f080, 0xfa00f080),
765
766 /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */
767 /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */
768 /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */
769 /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */
770 DECODE_OR (0xfff0f0c0, 0xfa80f080),
771
772 /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
773 DECODE_OR (0xfff0f0f0, 0xfaa0f080),
774
775 /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */
776 /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */
777 /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */
778 /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */
779 DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags,
780 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
781
782 /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */
783 DECODE_OR (0xfff0f0f0, 0xfab0f080),
784
785 /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */
786 /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */
787 /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */
788 /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */
789 DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags,
790 REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)),
791
792 /* Other unallocated instructions... */
793 DECODE_END
794};
795
796static const union decode_item t32_table_1111_1011_0[] = {
797 /* Multiply, multiply accumulate, and absolute difference */
798
799 /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */
800 DECODE_REJECT (0xfff0f0f0, 0xfb00f010),
801 /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */
802 DECODE_REJECT (0xfff0f0f0, 0xfb70f010),
803
804 /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */
805 DECODE_OR (0xfff0f0c0, 0xfb10f000),
806 /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */
807 /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */
808 /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */
809 /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */
810 /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */
811 /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */
812 DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags,
813 REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)),
814
815 /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */
816 DECODE_REJECT (0xfff000f0, 0xfb700010),
817
818 /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */
819 DECODE_OR (0xfff000c0, 0xfb100000),
820 /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */
821 /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */
822 /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */
823 /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */
824 /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */
825 /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */
826 /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */
827 /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */
828 DECODE_EMULATEX (0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags,
829 REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)),
830
831 /* Other unallocated instructions... */
832 DECODE_END
833};
834
835static const union decode_item t32_table_1111_1011_1[] = {
836 /* Long multiply, long multiply accumulate, and divide */
837
838 /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */
839 DECODE_OR (0xfff000f0, 0xfbe00060),
840 /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */
841 DECODE_OR (0xfff000c0, 0xfbc00080),
842 /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */
843 /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */
844 DECODE_OR (0xffe000e0, 0xfbc000c0),
845 /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */
846 /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */
847 /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */
848 /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */
849 DECODE_EMULATEX (0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags,
850 REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)),
851
852 /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */
853 /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */
854 /* Other unallocated instructions... */
855 DECODE_END
856};
857
858const union decode_item kprobe_decode_thumb32_table[] = {
859
860 /*
861 * Load/store multiple instructions
862 * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx
863 */
864 DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx),
865
866 /*
867 * Load/store dual, load/store exclusive, table branch
868 * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx
869 */
870 DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx),
871
872 /*
873 * Data-processing (shifted register)
874 * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx
875 */
876 DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x),
877
878 /*
879 * Coprocessor instructions
880 * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx
881 */
882 DECODE_REJECT (0xfc000000, 0xec000000),
883
884 /*
885 * Data-processing (modified immediate)
886 * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx
887 */
888 DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0),
889
890 /*
891 * Data-processing (plain binary immediate)
892 * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx
893 */
894 DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0),
895
896 /*
897 * Branches and miscellaneous control
898 * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx
899 */
900 DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1),
901
902 /*
903 * Advanced SIMD element or structure load/store instructions
904 * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx
905 */
906 DECODE_REJECT (0xff100000, 0xf9000000),
907
908 /*
909 * Memory hints
910 * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx
911 */
912 DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111),
913
914 /*
915 * Store single data item
916 * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx
917 * Load single data items
918 * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx
919 */
920 DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x),
921
922 /*
923 * Data-processing (register)
924 * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx
925 */
926 DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111),
927
928 /*
929 * Multiply, multiply accumulate, and absolute difference
930 * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx
931 */
932 DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0),
933
934 /*
935 * Long multiply, long multiply accumulate, and divide
936 * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx
937 */
938 DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1),
939
940 /*
941 * Coprocessor instructions
942 * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx
943 */
944 DECODE_END
945};
946
947static void __kprobes
948t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs)
949{
950 kprobe_opcode_t insn = p->opcode;
951 unsigned long pc = thumb_probe_pc(p);
952 int rm = (insn >> 3) & 0xf;
953 unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm];
954
955 if (insn & (1 << 7)) /* BLX ? */
956 regs->ARM_lr = (unsigned long)p->addr + 2;
957
958 bx_write_pc(rmv, regs);
959}
960
961static void __kprobes
962t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs)
963{
964 kprobe_opcode_t insn = p->opcode;
965 unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3);
966 long index = insn & 0xff;
967 int rt = (insn >> 8) & 0x7;
968 regs->uregs[rt] = base[index];
969}
970
971static void __kprobes
972t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs)
973{
974 kprobe_opcode_t insn = p->opcode;
975 unsigned long* base = (unsigned long *)regs->ARM_sp;
976 long index = insn & 0xff;
977 int rt = (insn >> 8) & 0x7;
978 if (insn & 0x800) /* LDR */
979 regs->uregs[rt] = base[index];
980 else /* STR */
981 base[index] = regs->uregs[rt];
982}
983
984static void __kprobes
985t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs)
986{
987 kprobe_opcode_t insn = p->opcode;
988 unsigned long base = (insn & 0x800) ? regs->ARM_sp
989 : (thumb_probe_pc(p) & ~3);
990 long offset = insn & 0xff;
991 int rt = (insn >> 8) & 0x7;
992 regs->uregs[rt] = base + offset * 4;
993}
994
995static void __kprobes
996t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs)
997{
998 kprobe_opcode_t insn = p->opcode;
999 long imm = insn & 0x7f;
1000 if (insn & 0x80) /* SUB */
1001 regs->ARM_sp -= imm * 4;
1002 else /* ADD */
1003 regs->ARM_sp += imm * 4;
1004}
1005
1006static void __kprobes
1007t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs)
1008{
1009 kprobe_opcode_t insn = p->opcode;
1010 int rn = insn & 0x7;
1011 kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn;
1012 if (nonzero & 0x800) {
1013 long i = insn & 0x200;
1014 long imm5 = insn & 0xf8;
1015 unsigned long pc = thumb_probe_pc(p);
1016 regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2);
1017 }
1018}
1019
1020static void __kprobes
1021t16_simulate_it(struct kprobe *p, struct pt_regs *regs)
1022{
1023 /*
1024 * The 8 IT state bits are split into two parts in CPSR:
1025 * ITSTATE<1:0> are in CPSR<26:25>
1026 * ITSTATE<7:2> are in CPSR<15:10>
1027 * The new IT state is in the lower byte of insn.
1028 */
1029 kprobe_opcode_t insn = p->opcode;
1030 unsigned long cpsr = regs->ARM_cpsr;
1031 cpsr &= ~PSR_IT_MASK;
1032 cpsr |= (insn & 0xfc) << 8;
1033 cpsr |= (insn & 0x03) << 25;
1034 regs->ARM_cpsr = cpsr;
1035}
1036
1037static void __kprobes
1038t16_singlestep_it(struct kprobe *p, struct pt_regs *regs)
1039{
1040 regs->ARM_pc += 2;
1041 t16_simulate_it(p, regs);
1042}
1043
1044static enum kprobe_insn __kprobes
1045t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1046{
1047 asi->insn_singlestep = t16_singlestep_it;
1048 return INSN_GOOD_NO_SLOT;
1049}
1050
1051static void __kprobes
1052t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs)
1053{
1054 kprobe_opcode_t insn = p->opcode;
1055 unsigned long pc = thumb_probe_pc(p);
1056 long offset = insn & 0x7f;
1057 offset -= insn & 0x80; /* Apply sign bit */
1058 regs->ARM_pc = pc + (offset * 2);
1059}
1060
1061static enum kprobe_insn __kprobes
1062t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1063{
1064 int cc = (insn >> 8) & 0xf;
1065 asi->insn_check_cc = kprobe_condition_checks[cc];
1066 asi->insn_handler = t16_simulate_cond_branch;
1067 return INSN_GOOD_NO_SLOT;
1068}
1069
1070static void __kprobes
1071t16_simulate_branch(struct kprobe *p, struct pt_regs *regs)
1072{
1073 kprobe_opcode_t insn = p->opcode;
1074 unsigned long pc = thumb_probe_pc(p);
1075 long offset = insn & 0x3ff;
1076 offset -= insn & 0x400; /* Apply sign bit */
1077 regs->ARM_pc = pc + (offset * 2);
1078}
1079
1080static unsigned long __kprobes
1081t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs)
1082{
1083 unsigned long oldcpsr = regs->ARM_cpsr;
1084 unsigned long newcpsr;
1085
1086 __asm__ __volatile__ (
1087 "msr cpsr_fs, %[oldcpsr] \n\t"
1088 "ldmia %[regs], {r0-r7} \n\t"
1089 "blx %[fn] \n\t"
1090 "stmia %[regs], {r0-r7} \n\t"
1091 "mrs %[newcpsr], cpsr \n\t"
1092 : [newcpsr] "=r" (newcpsr)
1093 : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
1094 [fn] "r" (p->ainsn.insn_fn)
1095 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
1096 "lr", "memory", "cc"
1097 );
1098
1099 return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK);
1100}
1101
1102static void __kprobes
1103t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs)
1104{
1105 regs->ARM_cpsr = t16_emulate_loregs(p, regs);
1106}
1107
1108static void __kprobes
1109t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs)
1110{
1111 unsigned long cpsr = t16_emulate_loregs(p, regs);
1112 if (!in_it_block(cpsr))
1113 regs->ARM_cpsr = cpsr;
1114}
1115
1116static void __kprobes
1117t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs)
1118{
1119 kprobe_opcode_t insn = p->opcode;
1120 unsigned long pc = thumb_probe_pc(p);
1121 int rdn = (insn & 0x7) | ((insn & 0x80) >> 4);
1122 int rm = (insn >> 3) & 0xf;
1123
1124 register unsigned long rdnv asm("r1");
1125 register unsigned long rmv asm("r0");
1126 unsigned long cpsr = regs->ARM_cpsr;
1127
1128 rdnv = (rdn == 15) ? pc : regs->uregs[rdn];
1129 rmv = (rm == 15) ? pc : regs->uregs[rm];
1130
1131 __asm__ __volatile__ (
1132 "msr cpsr_fs, %[cpsr] \n\t"
1133 "blx %[fn] \n\t"
1134 "mrs %[cpsr], cpsr \n\t"
1135 : "=r" (rdnv), [cpsr] "=r" (cpsr)
1136 : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn)
1137 : "lr", "memory", "cc"
1138 );
1139
1140 if (rdn == 15)
1141 rdnv &= ~1;
1142
1143 regs->uregs[rdn] = rdnv;
1144 regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
1145}
1146
1147static enum kprobe_insn __kprobes
1148t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1149{
1150 insn &= ~0x00ff;
1151 insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
1152 ((u16 *)asi->insn)[0] = insn;
1153 asi->insn_handler = t16_emulate_hiregs;
1154 return INSN_GOOD;
1155}
1156
1157static void __kprobes
1158t16_emulate_push(struct kprobe *p, struct pt_regs *regs)
1159{
1160 __asm__ __volatile__ (
1161 "ldr r9, [%[regs], #13*4] \n\t"
1162 "ldr r8, [%[regs], #14*4] \n\t"
1163 "ldmia %[regs], {r0-r7} \n\t"
1164 "blx %[fn] \n\t"
1165 "str r9, [%[regs], #13*4] \n\t"
1166 :
1167 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1168 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
1169 "lr", "memory", "cc"
1170 );
1171}
1172
1173static enum kprobe_insn __kprobes
1174t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1175{
1176 /*
1177 * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}"
1178 * and call it with R9=SP and LR in the register list represented
1179 * by R8.
1180 */
1181 ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */
1182 ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
1183 asi->insn_handler = t16_emulate_push;
1184 return INSN_GOOD;
1185}
1186
1187static void __kprobes
1188t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs)
1189{
1190 __asm__ __volatile__ (
1191 "ldr r9, [%[regs], #13*4] \n\t"
1192 "ldmia %[regs], {r0-r7} \n\t"
1193 "blx %[fn] \n\t"
1194 "stmia %[regs], {r0-r7} \n\t"
1195 "str r9, [%[regs], #13*4] \n\t"
1196 :
1197 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1198 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
1199 "lr", "memory", "cc"
1200 );
1201}
1202
1203static void __kprobes
1204t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs)
1205{
1206 register unsigned long pc asm("r8");
1207
1208 __asm__ __volatile__ (
1209 "ldr r9, [%[regs], #13*4] \n\t"
1210 "ldmia %[regs], {r0-r7} \n\t"
1211 "blx %[fn] \n\t"
1212 "stmia %[regs], {r0-r7} \n\t"
1213 "str r9, [%[regs], #13*4] \n\t"
1214 : "=r" (pc)
1215 : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn)
1216 : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
1217 "lr", "memory", "cc"
1218 );
1219
1220 bx_write_pc(pc, regs);
1221}
1222
1223static enum kprobe_insn __kprobes
1224t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1225{
1226 /*
1227 * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}"
1228 * and call it with R9=SP and PC in the register list represented
1229 * by R8.
1230 */
1231 ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */
1232 ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */
1233 asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
1234 : t16_emulate_pop_nopc;
1235 return INSN_GOOD;
1236}
1237
1238static const union decode_item t16_table_1011[] = {
1239 /* Miscellaneous 16-bit instructions */
1240
1241 /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */
1242 /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */
1243 DECODE_SIMULATE (0xff00, 0xb000, t16_simulate_add_sp_imm),
1244
1245 /* CBZ 1011 00x1 xxxx xxxx */
1246 /* CBNZ 1011 10x1 xxxx xxxx */
1247 DECODE_SIMULATE (0xf500, 0xb100, t16_simulate_cbz),
1248
1249 /* SXTH 1011 0010 00xx xxxx */
1250 /* SXTB 1011 0010 01xx xxxx */
1251 /* UXTH 1011 0010 10xx xxxx */
1252 /* UXTB 1011 0010 11xx xxxx */
1253 /* REV 1011 1010 00xx xxxx */
1254 /* REV16 1011 1010 01xx xxxx */
1255 /* ??? 1011 1010 10xx xxxx */
1256 /* REVSH 1011 1010 11xx xxxx */
1257 DECODE_REJECT (0xffc0, 0xba80),
1258 DECODE_EMULATE (0xf500, 0xb000, t16_emulate_loregs_rwflags),
1259
1260 /* PUSH 1011 010x xxxx xxxx */
1261 DECODE_CUSTOM (0xfe00, 0xb400, t16_decode_push),
1262 /* POP 1011 110x xxxx xxxx */
1263 DECODE_CUSTOM (0xfe00, 0xbc00, t16_decode_pop),
1264
1265 /*
1266 * If-Then, and hints
1267 * 1011 1111 xxxx xxxx
1268 */
1269
1270 /* YIELD 1011 1111 0001 0000 */
1271 DECODE_OR (0xffff, 0xbf10),
1272 /* SEV 1011 1111 0100 0000 */
1273 DECODE_EMULATE (0xffff, 0xbf40, kprobe_emulate_none),
1274 /* NOP 1011 1111 0000 0000 */
1275 /* WFE 1011 1111 0010 0000 */
1276 /* WFI 1011 1111 0011 0000 */
1277 DECODE_SIMULATE (0xffcf, 0xbf00, kprobe_simulate_nop),
1278 /* Unassigned hints 1011 1111 xxxx 0000 */
1279 DECODE_REJECT (0xff0f, 0xbf00),
1280 /* IT 1011 1111 xxxx xxxx */
1281 DECODE_CUSTOM (0xff00, 0xbf00, t16_decode_it),
1282
1283 /* SETEND 1011 0110 010x xxxx */
1284 /* CPS 1011 0110 011x xxxx */
1285 /* BKPT 1011 1110 xxxx xxxx */
1286 /* And unallocated instructions... */
1287 DECODE_END
1288};
1289
1290const union decode_item kprobe_decode_thumb16_table[] = {
1291
1292 /*
1293 * Shift (immediate), add, subtract, move, and compare
1294 * 00xx xxxx xxxx xxxx
1295 */
1296
1297 /* CMP (immediate) 0010 1xxx xxxx xxxx */
1298 DECODE_EMULATE (0xf800, 0x2800, t16_emulate_loregs_rwflags),
1299
1300 /* ADD (register) 0001 100x xxxx xxxx */
1301 /* SUB (register) 0001 101x xxxx xxxx */
1302 /* LSL (immediate) 0000 0xxx xxxx xxxx */
1303 /* LSR (immediate) 0000 1xxx xxxx xxxx */
1304 /* ASR (immediate) 0001 0xxx xxxx xxxx */
1305 /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */
1306 /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */
1307 /* MOV (immediate) 0010 0xxx xxxx xxxx */
1308 /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */
1309 /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */
1310 DECODE_EMULATE (0xc000, 0x0000, t16_emulate_loregs_noitrwflags),
1311
1312 /*
1313 * 16-bit Thumb data-processing instructions
1314 * 0100 00xx xxxx xxxx
1315 */
1316
1317 /* TST (register) 0100 0010 00xx xxxx */
1318 DECODE_EMULATE (0xffc0, 0x4200, t16_emulate_loregs_rwflags),
1319 /* CMP (register) 0100 0010 10xx xxxx */
1320 /* CMN (register) 0100 0010 11xx xxxx */
1321 DECODE_EMULATE (0xff80, 0x4280, t16_emulate_loregs_rwflags),
1322 /* AND (register) 0100 0000 00xx xxxx */
1323 /* EOR (register) 0100 0000 01xx xxxx */
1324 /* LSL (register) 0100 0000 10xx xxxx */
1325 /* LSR (register) 0100 0000 11xx xxxx */
1326 /* ASR (register) 0100 0001 00xx xxxx */
1327 /* ADC (register) 0100 0001 01xx xxxx */
1328 /* SBC (register) 0100 0001 10xx xxxx */
1329 /* ROR (register) 0100 0001 11xx xxxx */
1330 /* RSB (immediate) 0100 0010 01xx xxxx */
1331 /* ORR (register) 0100 0011 00xx xxxx */
1332 /* MUL 0100 0011 00xx xxxx */
1333 /* BIC (register) 0100 0011 10xx xxxx */
1334 /* MVN (register) 0100 0011 10xx xxxx */
1335 DECODE_EMULATE (0xfc00, 0x4000, t16_emulate_loregs_noitrwflags),
1336
1337 /*
1338 * Special data instructions and branch and exchange
1339 * 0100 01xx xxxx xxxx
1340 */
1341
1342 /* BLX pc 0100 0111 1111 1xxx */
1343 DECODE_REJECT (0xfff8, 0x47f8),
1344
1345 /* BX (register) 0100 0111 0xxx xxxx */
1346 /* BLX (register) 0100 0111 1xxx xxxx */
1347 DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx),
1348
1349 /* ADD pc, pc 0100 0100 1111 1111 */
1350 DECODE_REJECT (0xffff, 0x44ff),
1351
1352 /* ADD (register) 0100 0100 xxxx xxxx */
1353 /* CMP (register) 0100 0101 xxxx xxxx */
1354 /* MOV (register) 0100 0110 xxxx xxxx */
1355 DECODE_CUSTOM (0xfc00, 0x4400, t16_decode_hiregs),
1356
1357 /*
1358 * Load from Literal Pool
1359 * LDR (literal) 0100 1xxx xxxx xxxx
1360 */
1361 DECODE_SIMULATE (0xf800, 0x4800, t16_simulate_ldr_literal),
1362
1363 /*
1364 * 16-bit Thumb Load/store instructions
1365 * 0101 xxxx xxxx xxxx
1366 * 011x xxxx xxxx xxxx
1367 * 100x xxxx xxxx xxxx
1368 */
1369
1370 /* STR (register) 0101 000x xxxx xxxx */
1371 /* STRH (register) 0101 001x xxxx xxxx */
1372 /* STRB (register) 0101 010x xxxx xxxx */
1373 /* LDRSB (register) 0101 011x xxxx xxxx */
1374 /* LDR (register) 0101 100x xxxx xxxx */
1375 /* LDRH (register) 0101 101x xxxx xxxx */
1376 /* LDRB (register) 0101 110x xxxx xxxx */
1377 /* LDRSH (register) 0101 111x xxxx xxxx */
1378 /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */
1379 /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */
1380 /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */
1381 /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */
1382 DECODE_EMULATE (0xc000, 0x4000, t16_emulate_loregs_rwflags),
1383 /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */
1384 /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */
1385 DECODE_EMULATE (0xf000, 0x8000, t16_emulate_loregs_rwflags),
1386 /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */
1387 /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */
1388 DECODE_SIMULATE (0xf000, 0x9000, t16_simulate_ldrstr_sp_relative),
1389
1390 /*
1391 * Generate PC-/SP-relative address
1392 * ADR (literal) 1010 0xxx xxxx xxxx
1393 * ADD (SP plus immediate) 1010 1xxx xxxx xxxx
1394 */
1395 DECODE_SIMULATE (0xf000, 0xa000, t16_simulate_reladr),
1396
1397 /*
1398 * Miscellaneous 16-bit instructions
1399 * 1011 xxxx xxxx xxxx
1400 */
1401 DECODE_TABLE (0xf000, 0xb000, t16_table_1011),
1402
1403 /* STM 1100 0xxx xxxx xxxx */
1404 /* LDM 1100 1xxx xxxx xxxx */
1405 DECODE_EMULATE (0xf000, 0xc000, t16_emulate_loregs_rwflags),
1406
1407 /*
1408 * Conditional branch, and Supervisor Call
1409 */
1410
1411 /* Permanently UNDEFINED 1101 1110 xxxx xxxx */
1412 /* SVC 1101 1111 xxxx xxxx */
1413 DECODE_REJECT (0xfe00, 0xde00),
1414
1415 /* Conditional branch 1101 xxxx xxxx xxxx */
1416 DECODE_CUSTOM (0xf000, 0xd000, t16_decode_cond_branch),
1417
1418 /*
1419 * Unconditional branch
1420 * B 1110 0xxx xxxx xxxx
1421 */
1422 DECODE_SIMULATE (0xf800, 0xe000, t16_simulate_branch),
1423
1424 DECODE_END
1425};
1426
1427static unsigned long __kprobes thumb_check_cc(unsigned long cpsr)
1428{
1429 if (unlikely(in_it_block(cpsr)))
1430 return kprobe_condition_checks[current_cond(cpsr)](cpsr);
1431 return true;
1432}
1433
1434static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs)
1435{
1436 regs->ARM_pc += 2;
1437 p->ainsn.insn_handler(p, regs);
1438 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
1439}
1440
1441static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs)
1442{
1443 regs->ARM_pc += 4;
1444 p->ainsn.insn_handler(p, regs);
1445 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
1446}
1447
1448enum kprobe_insn __kprobes
1449thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1450{
1451 asi->insn_singlestep = thumb16_singlestep;
1452 asi->insn_check_cc = thumb_check_cc;
1453 return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true);
1454}
1455
1456enum kprobe_insn __kprobes
1457thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1458{
1459 asi->insn_singlestep = thumb32_singlestep;
1460 asi->insn_check_cc = thumb_check_cc;
1461 return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true);
1462}
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 1656c87501c0..129c1163248b 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -28,14 +28,16 @@
28#include <asm/traps.h> 28#include <asm/traps.h>
29#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
30 30
31#include "kprobes.h"
32
31#define MIN_STACK_SIZE(addr) \ 33#define MIN_STACK_SIZE(addr) \
32 min((unsigned long)MAX_STACK_SIZE, \ 34 min((unsigned long)MAX_STACK_SIZE, \
33 (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) 35 (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
34 36
35#define flush_insns(addr, cnt) \ 37#define flush_insns(addr, size) \
36 flush_icache_range((unsigned long)(addr), \ 38 flush_icache_range((unsigned long)(addr), \
37 (unsigned long)(addr) + \ 39 (unsigned long)(addr) + \
38 sizeof(kprobe_opcode_t) * (cnt)) 40 (size))
39 41
40/* Used as a marker in ARM_pc to note when we're in a jprobe. */ 42/* Used as a marker in ARM_pc to note when we're in a jprobe. */
41#define JPROBE_MAGIC_ADDR 0xffffffff 43#define JPROBE_MAGIC_ADDR 0xffffffff
@@ -49,16 +51,35 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
49 kprobe_opcode_t insn; 51 kprobe_opcode_t insn;
50 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; 52 kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
51 unsigned long addr = (unsigned long)p->addr; 53 unsigned long addr = (unsigned long)p->addr;
54 bool thumb;
55 kprobe_decode_insn_t *decode_insn;
52 int is; 56 int is;
53 57
54 if (addr & 0x3 || in_exception_text(addr)) 58 if (in_exception_text(addr))
55 return -EINVAL; 59 return -EINVAL;
56 60
61#ifdef CONFIG_THUMB2_KERNEL
62 thumb = true;
63 addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
64 insn = ((u16 *)addr)[0];
65 if (is_wide_instruction(insn)) {
66 insn <<= 16;
67 insn |= ((u16 *)addr)[1];
68 decode_insn = thumb32_kprobe_decode_insn;
69 } else
70 decode_insn = thumb16_kprobe_decode_insn;
71#else /* !CONFIG_THUMB2_KERNEL */
72 thumb = false;
73 if (addr & 0x3)
74 return -EINVAL;
57 insn = *p->addr; 75 insn = *p->addr;
76 decode_insn = arm_kprobe_decode_insn;
77#endif
78
58 p->opcode = insn; 79 p->opcode = insn;
59 p->ainsn.insn = tmp_insn; 80 p->ainsn.insn = tmp_insn;
60 81
61 switch (arm_kprobe_decode_insn(insn, &p->ainsn)) { 82 switch ((*decode_insn)(insn, &p->ainsn)) {
62 case INSN_REJECTED: /* not supported */ 83 case INSN_REJECTED: /* not supported */
63 return -EINVAL; 84 return -EINVAL;
64 85
@@ -68,7 +89,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
68 return -ENOMEM; 89 return -ENOMEM;
69 for (is = 0; is < MAX_INSN_SIZE; ++is) 90 for (is = 0; is < MAX_INSN_SIZE; ++is)
70 p->ainsn.insn[is] = tmp_insn[is]; 91 p->ainsn.insn[is] = tmp_insn[is];
71 flush_insns(p->ainsn.insn, MAX_INSN_SIZE); 92 flush_insns(p->ainsn.insn,
93 sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
94 p->ainsn.insn_fn = (kprobe_insn_fn_t *)
95 ((uintptr_t)p->ainsn.insn | thumb);
72 break; 96 break;
73 97
74 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ 98 case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
@@ -79,24 +103,88 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
79 return 0; 103 return 0;
80} 104}
81 105
106#ifdef CONFIG_THUMB2_KERNEL
107
108/*
109 * For a 32-bit Thumb breakpoint spanning two memory words we need to take
110 * special precautions to insert the breakpoint atomically, especially on SMP
111 * systems. This is achieved by calling this arming function using stop_machine.
112 */
113static int __kprobes set_t32_breakpoint(void *addr)
114{
115 ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16;
116 ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff;
117 flush_insns(addr, 2*sizeof(u16));
118 return 0;
119}
120
121void __kprobes arch_arm_kprobe(struct kprobe *p)
122{
123 uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */
124
125 if (!is_wide_instruction(p->opcode)) {
126 *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
127 flush_insns(addr, sizeof(u16));
128 } else if (addr & 2) {
129 /* A 32-bit instruction spanning two words needs special care */
130 stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map);
131 } else {
132 /* Word aligned 32-bit instruction can be written atomically */
133 u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
134#ifndef __ARMEB__ /* Swap halfwords for little-endian */
135 bkp = (bkp >> 16) | (bkp << 16);
136#endif
137 *(u32 *)addr = bkp;
138 flush_insns(addr, sizeof(u32));
139 }
140}
141
142#else /* !CONFIG_THUMB2_KERNEL */
143
82void __kprobes arch_arm_kprobe(struct kprobe *p) 144void __kprobes arch_arm_kprobe(struct kprobe *p)
83{ 145{
84 *p->addr = KPROBE_BREAKPOINT_INSTRUCTION; 146 kprobe_opcode_t insn = p->opcode;
85 flush_insns(p->addr, 1); 147 kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
148 if (insn >= 0xe0000000)
149 brkp |= 0xe0000000; /* Unconditional instruction */
150 else
151 brkp |= insn & 0xf0000000; /* Copy condition from insn */
152 *p->addr = brkp;
153 flush_insns(p->addr, sizeof(p->addr[0]));
86} 154}
87 155
156#endif /* !CONFIG_THUMB2_KERNEL */
157
88/* 158/*
89 * The actual disarming is done here on each CPU and synchronized using 159 * The actual disarming is done here on each CPU and synchronized using
90 * stop_machine. This synchronization is necessary on SMP to avoid removing 160 * stop_machine. This synchronization is necessary on SMP to avoid removing
91 * a probe between the moment the 'Undefined Instruction' exception is raised 161 * a probe between the moment the 'Undefined Instruction' exception is raised
92 * and the moment the exception handler reads the faulting instruction from 162 * and the moment the exception handler reads the faulting instruction from
93 * memory. 163 * memory. It is also needed to atomically set the two half-words of a 32-bit
164 * Thumb breakpoint.
94 */ 165 */
95int __kprobes __arch_disarm_kprobe(void *p) 166int __kprobes __arch_disarm_kprobe(void *p)
96{ 167{
97 struct kprobe *kp = p; 168 struct kprobe *kp = p;
169#ifdef CONFIG_THUMB2_KERNEL
170 u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1);
171 kprobe_opcode_t insn = kp->opcode;
172 unsigned int len;
173
174 if (is_wide_instruction(insn)) {
175 ((u16 *)addr)[0] = insn>>16;
176 ((u16 *)addr)[1] = insn;
177 len = 2*sizeof(u16);
178 } else {
179 ((u16 *)addr)[0] = insn;
180 len = sizeof(u16);
181 }
182 flush_insns(addr, len);
183
184#else /* !CONFIG_THUMB2_KERNEL */
98 *kp->addr = kp->opcode; 185 *kp->addr = kp->opcode;
99 flush_insns(kp->addr, 1); 186 flush_insns(kp->addr, sizeof(kp->addr[0]));
187#endif
100 return 0; 188 return 0;
101} 189}
102 190
@@ -130,12 +218,24 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
130 __get_cpu_var(current_kprobe) = p; 218 __get_cpu_var(current_kprobe) = p;
131} 219}
132 220
133static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, 221static void __kprobes
134 struct kprobe_ctlblk *kcb) 222singlestep_skip(struct kprobe *p, struct pt_regs *regs)
135{ 223{
224#ifdef CONFIG_THUMB2_KERNEL
225 regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
226 if (is_wide_instruction(p->opcode))
227 regs->ARM_pc += 4;
228 else
229 regs->ARM_pc += 2;
230#else
136 regs->ARM_pc += 4; 231 regs->ARM_pc += 4;
137 if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) 232#endif
138 p->ainsn.insn_handler(p, regs); 233}
234
235static inline void __kprobes
236singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
237{
238 p->ainsn.insn_singlestep(p, regs);
139} 239}
140 240
141/* 241/*
@@ -149,11 +249,23 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
149{ 249{
150 struct kprobe *p, *cur; 250 struct kprobe *p, *cur;
151 struct kprobe_ctlblk *kcb; 251 struct kprobe_ctlblk *kcb;
152 kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc;
153 252
154 kcb = get_kprobe_ctlblk(); 253 kcb = get_kprobe_ctlblk();
155 cur = kprobe_running(); 254 cur = kprobe_running();
156 p = get_kprobe(addr); 255
256#ifdef CONFIG_THUMB2_KERNEL
257 /*
258 * First look for a probe which was registered using an address with
259 * bit 0 set, this is the usual situation for pointers to Thumb code.
260 * If not found, fallback to looking for one with bit 0 clear.
261 */
262 p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
263 if (!p)
264 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
265
266#else /* ! CONFIG_THUMB2_KERNEL */
267 p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
268#endif
157 269
158 if (p) { 270 if (p) {
159 if (cur) { 271 if (cur) {
@@ -173,7 +285,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
173 /* impossible cases */ 285 /* impossible cases */
174 BUG(); 286 BUG();
175 } 287 }
176 } else { 288 } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
289 /* Probe hit and conditional execution check ok. */
177 set_current_kprobe(p); 290 set_current_kprobe(p);
178 kcb->kprobe_status = KPROBE_HIT_ACTIVE; 291 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
179 292
@@ -193,6 +306,13 @@ void __kprobes kprobe_handler(struct pt_regs *regs)
193 } 306 }
194 reset_current_kprobe(); 307 reset_current_kprobe();
195 } 308 }
309 } else {
310 /*
311 * Probe hit but conditional execution check failed,
312 * so just skip the instruction and continue as if
313 * nothing had happened.
314 */
315 singlestep_skip(p, regs);
196 } 316 }
197 } else if (cur) { 317 } else if (cur) {
198 /* We probably hit a jprobe. Call its break handler. */ 318 /* We probably hit a jprobe. Call its break handler. */
@@ -300,7 +420,11 @@ void __naked __kprobes kretprobe_trampoline(void)
300 "bl trampoline_handler \n\t" 420 "bl trampoline_handler \n\t"
301 "mov lr, r0 \n\t" 421 "mov lr, r0 \n\t"
302 "ldmia sp!, {r0 - r11} \n\t" 422 "ldmia sp!, {r0 - r11} \n\t"
423#ifdef CONFIG_THUMB2_KERNEL
424 "bx lr \n\t"
425#else
303 "mov pc, lr \n\t" 426 "mov pc, lr \n\t"
427#endif
304 : : : "memory"); 428 : : : "memory");
305} 429}
306 430
@@ -378,11 +502,22 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
378 struct jprobe *jp = container_of(p, struct jprobe, kp); 502 struct jprobe *jp = container_of(p, struct jprobe, kp);
379 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 503 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
380 long sp_addr = regs->ARM_sp; 504 long sp_addr = regs->ARM_sp;
505 long cpsr;
381 506
382 kcb->jprobe_saved_regs = *regs; 507 kcb->jprobe_saved_regs = *regs;
383 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); 508 memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
384 regs->ARM_pc = (long)jp->entry; 509 regs->ARM_pc = (long)jp->entry;
385 regs->ARM_cpsr |= PSR_I_BIT; 510
511 cpsr = regs->ARM_cpsr | PSR_I_BIT;
512#ifdef CONFIG_THUMB2_KERNEL
513 /* Set correct Thumb state in cpsr */
514 if (regs->ARM_pc & 1)
515 cpsr |= PSR_T_BIT;
516 else
517 cpsr &= ~PSR_T_BIT;
518#endif
519 regs->ARM_cpsr = cpsr;
520
386 preempt_disable(); 521 preempt_disable();
387 return 1; 522 return 1;
388} 523}
@@ -404,7 +539,12 @@ void __kprobes jprobe_return(void)
404 * This is to prevent any simulated instruction from writing 539 * This is to prevent any simulated instruction from writing
405 * over the regs when they are accessing the stack. 540 * over the regs when they are accessing the stack.
406 */ 541 */
542#ifdef CONFIG_THUMB2_KERNEL
543 "sub r0, %0, %1 \n\t"
544 "mov sp, r0 \n\t"
545#else
407 "sub sp, %0, %1 \n\t" 546 "sub sp, %0, %1 \n\t"
547#endif
408 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" 548 "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
409 "str %0, [sp, %2] \n\t" 549 "str %0, [sp, %2] \n\t"
410 "str r0, [sp, %3] \n\t" 550 "str r0, [sp, %3] \n\t"
@@ -415,15 +555,28 @@ void __kprobes jprobe_return(void)
415 * Return to the context saved by setjmp_pre_handler 555 * Return to the context saved by setjmp_pre_handler
416 * and restored by longjmp_break_handler. 556 * and restored by longjmp_break_handler.
417 */ 557 */
558#ifdef CONFIG_THUMB2_KERNEL
559 "ldr lr, [sp, %2] \n\t" /* lr = saved sp */
560 "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */
561 "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */
562 "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */
563 /* rfe context */
564 "ldmia sp, {r0 - r12} \n\t"
565 "mov sp, lr \n\t"
566 "ldr lr, [sp], #4 \n\t"
567 "rfeia sp! \n\t"
568#else
418 "ldr r0, [sp, %4] \n\t" 569 "ldr r0, [sp, %4] \n\t"
419 "msr cpsr_cxsf, r0 \n\t" 570 "msr cpsr_cxsf, r0 \n\t"
420 "ldmia sp, {r0 - pc} \n\t" 571 "ldmia sp, {r0 - pc} \n\t"
572#endif
421 : 573 :
422 : "r" (kcb->jprobe_saved_regs.ARM_sp), 574 : "r" (kcb->jprobe_saved_regs.ARM_sp),
423 "I" (sizeof(struct pt_regs) * 2), 575 "I" (sizeof(struct pt_regs) * 2),
424 "J" (offsetof(struct pt_regs, ARM_sp)), 576 "J" (offsetof(struct pt_regs, ARM_sp)),
425 "J" (offsetof(struct pt_regs, ARM_pc)), 577 "J" (offsetof(struct pt_regs, ARM_pc)),
426 "J" (offsetof(struct pt_regs, ARM_cpsr)) 578 "J" (offsetof(struct pt_regs, ARM_cpsr)),
579 "J" (offsetof(struct pt_regs, ARM_lr))
427 : "memory", "cc"); 580 : "memory", "cc");
428} 581}
429 582
@@ -460,17 +613,44 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
460 return 0; 613 return 0;
461} 614}
462 615
463static struct undef_hook kprobes_break_hook = { 616#ifdef CONFIG_THUMB2_KERNEL
617
618static struct undef_hook kprobes_thumb16_break_hook = {
619 .instr_mask = 0xffff,
620 .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
621 .cpsr_mask = MODE_MASK,
622 .cpsr_val = SVC_MODE,
623 .fn = kprobe_trap_handler,
624};
625
626static struct undef_hook kprobes_thumb32_break_hook = {
464 .instr_mask = 0xffffffff, 627 .instr_mask = 0xffffffff,
465 .instr_val = KPROBE_BREAKPOINT_INSTRUCTION, 628 .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
466 .cpsr_mask = MODE_MASK, 629 .cpsr_mask = MODE_MASK,
467 .cpsr_val = SVC_MODE, 630 .cpsr_val = SVC_MODE,
468 .fn = kprobe_trap_handler, 631 .fn = kprobe_trap_handler,
469}; 632};
470 633
634#else /* !CONFIG_THUMB2_KERNEL */
635
636static struct undef_hook kprobes_arm_break_hook = {
637 .instr_mask = 0x0fffffff,
638 .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
639 .cpsr_mask = MODE_MASK,
640 .cpsr_val = SVC_MODE,
641 .fn = kprobe_trap_handler,
642};
643
644#endif /* !CONFIG_THUMB2_KERNEL */
645
471int __init arch_init_kprobes() 646int __init arch_init_kprobes()
472{ 647{
473 arm_kprobe_decode_init(); 648 arm_kprobe_decode_init();
474 register_undef_hook(&kprobes_break_hook); 649#ifdef CONFIG_THUMB2_KERNEL
650 register_undef_hook(&kprobes_thumb16_break_hook);
651 register_undef_hook(&kprobes_thumb32_break_hook);
652#else
653 register_undef_hook(&kprobes_arm_break_hook);
654#endif
475 return 0; 655 return 0;
476} 656}
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h
new file mode 100644
index 000000000000..a6aeda0a6c7f
--- /dev/null
+++ b/arch/arm/kernel/kprobes.h
@@ -0,0 +1,420 @@
1/*
2 * arch/arm/kernel/kprobes.h
3 *
4 * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
5 *
6 * Some contents moved here from arch/arm/include/asm/kprobes.h which is
7 * Copyright (C) 2006, 2007 Motorola Inc.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 */
18
19#ifndef _ARM_KERNEL_KPROBES_H
20#define _ARM_KERNEL_KPROBES_H
21
22/*
23 * These undefined instructions must be unique and
24 * reserved solely for kprobes' use.
25 */
26#define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8
27#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
28#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
29
30
31enum kprobe_insn {
32 INSN_REJECTED,
33 INSN_GOOD,
34 INSN_GOOD_NO_SLOT
35};
36
37typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t,
38 struct arch_specific_insn *);
39
40#ifdef CONFIG_THUMB2_KERNEL
41
42enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t,
43 struct arch_specific_insn *);
44enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t,
45 struct arch_specific_insn *);
46
47#else /* !CONFIG_THUMB2_KERNEL */
48
49enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t,
50 struct arch_specific_insn *);
51#endif
52
53void __init arm_kprobe_decode_init(void);
54
55extern kprobe_check_cc * const kprobe_condition_checks[16];
56
57
58#if __LINUX_ARM_ARCH__ >= 7
59
60/* str_pc_offset is architecturally defined from ARMv7 onwards */
61#define str_pc_offset 8
62#define find_str_pc_offset()
63
64#else /* __LINUX_ARM_ARCH__ < 7 */
65
66/* We need a run-time check to determine str_pc_offset */
67extern int str_pc_offset;
68void __init find_str_pc_offset(void);
69
70#endif
71
72
73/*
74 * Update ITSTATE after normal execution of an IT block instruction.
75 *
76 * The 8 IT state bits are split into two parts in CPSR:
77 * ITSTATE<1:0> are in CPSR<26:25>
78 * ITSTATE<7:2> are in CPSR<15:10>
79 */
80static inline unsigned long it_advance(unsigned long cpsr)
81 {
82 if ((cpsr & 0x06000400) == 0) {
83 /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
84 cpsr &= ~PSR_IT_MASK;
85 } else {
86 /* We need to shift left ITSTATE<4:0> */
87 const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
88 unsigned long it = cpsr & mask;
89 it <<= 1;
90 it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
91 it &= mask;
92 cpsr &= ~mask;
93 cpsr |= it;
94 }
95 return cpsr;
96}
97
98static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs)
99{
100 long cpsr = regs->ARM_cpsr;
101 if (pcv & 0x1) {
102 cpsr |= PSR_T_BIT;
103 pcv &= ~0x1;
104 } else {
105 cpsr &= ~PSR_T_BIT;
106 pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */
107 }
108 regs->ARM_cpsr = cpsr;
109 regs->ARM_pc = pcv;
110}
111
112
113#if __LINUX_ARM_ARCH__ >= 6
114
115/* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */
116#define load_write_pc_interworks true
117#define test_load_write_pc_interworking()
118
119#else /* __LINUX_ARM_ARCH__ < 6 */
120
121/* We need run-time testing to determine if load_write_pc() should interwork. */
122extern bool load_write_pc_interworks;
123void __init test_load_write_pc_interworking(void);
124
125#endif
126
127static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs)
128{
129 if (load_write_pc_interworks)
130 bx_write_pc(pcv, regs);
131 else
132 regs->ARM_pc = pcv;
133}
134
135
136#if __LINUX_ARM_ARCH__ >= 7
137
138#define alu_write_pc_interworks true
139#define test_alu_write_pc_interworking()
140
141#elif __LINUX_ARM_ARCH__ <= 5
142
143/* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */
144#define alu_write_pc_interworks false
145#define test_alu_write_pc_interworking()
146
147#else /* __LINUX_ARM_ARCH__ == 6 */
148
149/* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */
150extern bool alu_write_pc_interworks;
151void __init test_alu_write_pc_interworking(void);
152
153#endif /* __LINUX_ARM_ARCH__ == 6 */
154
155static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs)
156{
157 if (alu_write_pc_interworks)
158 bx_write_pc(pcv, regs);
159 else
160 regs->ARM_pc = pcv;
161}
162
163
164void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs);
165void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs);
166
167enum kprobe_insn __kprobes
168kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi);
169
170/*
171 * Test if load/store instructions writeback the address register.
172 * if P (bit 24) == 0 or W (bit 21) == 1
173 */
174#define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000)
175
176/*
177 * The following definitions and macros are used to build instruction
178 * decoding tables for use by kprobe_decode_insn.
179 *
180 * These tables are a concatenation of entries each of which consist of one of
181 * the decode_* structs. All of the fields in every type of decode structure
182 * are of the union type decode_item, therefore the entire decode table can be
183 * viewed as an array of these and declared like:
184 *
185 * static const union decode_item table_name[] = {};
186 *
187 * In order to construct each entry in the table, macros are used to
188 * initialise a number of sequential decode_item values in a layout which
189 * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct
190 * decode_simulate by initialising four decode_item objects like this...
191 *
192 * {.bits = _type},
193 * {.bits = _mask},
194 * {.bits = _value},
195 * {.handler = _handler},
196 *
197 * Initialising a specified member of the union means that the compiler
198 * will produce a warning if the argument is of an incorrect type.
199 *
200 * Below is a list of each of the macros used to initialise entries and a
201 * description of the action performed when that entry is matched to an
202 * instruction. A match is found when (instruction & mask) == value.
203 *
204 * DECODE_TABLE(mask, value, table)
205 * Instruction decoding jumps to parsing the new sub-table 'table'.
206 *
207 * DECODE_CUSTOM(mask, value, decoder)
208 * The custom function 'decoder' is called to the complete decoding
209 * of an instruction.
210 *
211 * DECODE_SIMULATE(mask, value, handler)
212 * Set the probes instruction handler to 'handler', this will be used
213 * to simulate the instruction when the probe is hit. Decoding returns
214 * with INSN_GOOD_NO_SLOT.
215 *
216 * DECODE_EMULATE(mask, value, handler)
217 * Set the probes instruction handler to 'handler', this will be used
218 * to emulate the instruction when the probe is hit. The modified
219 * instruction (see below) is placed in the probes instruction slot so it
220 * may be called by the emulation code. Decoding returns with INSN_GOOD.
221 *
222 * DECODE_REJECT(mask, value)
223 * Instruction decoding fails with INSN_REJECTED
224 *
225 * DECODE_OR(mask, value)
226 * This allows the mask/value test of multiple table entries to be
227 * logically ORed. Once an 'or' entry is matched the decoding action to
228 * be performed is that of the next entry which isn't an 'or'. E.g.
229 *
230 * DECODE_OR (mask1, value1)
231 * DECODE_OR (mask2, value2)
232 * DECODE_SIMULATE (mask3, value3, simulation_handler)
233 *
234 * This means that if any of the three mask/value pairs match the
235 * instruction being decoded, then 'simulation_handler' will be used
236 * for it.
237 *
238 * Both the SIMULATE and EMULATE macros have a second form which take an
239 * additional 'regs' argument.
240 *
241 * DECODE_SIMULATEX(mask, value, handler, regs)
242 * DECODE_EMULATEX (mask, value, handler, regs)
243 *
244 * These are used to specify what kind of CPU register is encoded in each of the
245 * least significant 5 nibbles of the instruction being decoded. The regs value
246 * is specified using the REGS macro, this takes any of the REG_TYPE_* values
247 * from enum decode_reg_type as arguments; only the '*' part of the name is
248 * given. E.g.
249 *
250 * REGS(0, ANY, NOPC, 0, ANY)
251 *
252 * This indicates an instruction is encoded like:
253 *
254 * bits 19..16 ignore
255 * bits 15..12 any register allowed here
256 * bits 11.. 8 any register except PC allowed here
257 * bits 7.. 4 ignore
258 * bits 3.. 0 any register allowed here
259 *
260 * This register specification is checked after a decode table entry is found to
261 * match an instruction (through the mask/value test). Any invalid register then
262 * found in the instruction will cause decoding to fail with INSN_REJECTED. In
263 * the above example this would happen if bits 11..8 of the instruction were
264 * 1111, indicating R15 or PC.
265 *
266 * As well as checking for legal combinations of registers, this data is also
267 * used to modify the registers encoded in the instructions so that an
268 * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.)
269 *
270 * Here is a real example which matches ARM instructions of the form
271 * "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>"
272 *
273 * DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags,
274 * REGS(ANY, ANY, NOPC, 0, ANY)),
275 * ^ ^ ^ ^
276 * Rn Rd Rs Rm
277 *
278 * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because
279 * Rs == R15
280 *
281 * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the
282 * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into
283 * the kprobes instruction slot. This can then be called later by the handler
284 * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction.
285 */
286
287enum decode_type {
288 DECODE_TYPE_END,
289 DECODE_TYPE_TABLE,
290 DECODE_TYPE_CUSTOM,
291 DECODE_TYPE_SIMULATE,
292 DECODE_TYPE_EMULATE,
293 DECODE_TYPE_OR,
294 DECODE_TYPE_REJECT,
295 NUM_DECODE_TYPES /* Must be last enum */
296};
297
298#define DECODE_TYPE_BITS 4
299#define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1)
300
301enum decode_reg_type {
302 REG_TYPE_NONE = 0, /* Not a register, ignore */
303 REG_TYPE_ANY, /* Any register allowed */
304 REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */
305 REG_TYPE_SP, /* Register must be SP */
306 REG_TYPE_PC, /* Register must be PC */
307 REG_TYPE_NOSP, /* Register must not be SP */
308 REG_TYPE_NOSPPC, /* Register must not be SP or PC */
309 REG_TYPE_NOPC, /* Register must not be PC */
310 REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */
311
312 /* The following types are used when the encoding for PC indicates
313 * another instruction form. This distiction only matters for test
314 * case coverage checks.
315 */
316 REG_TYPE_NOPCX, /* Register must not be PC */
317 REG_TYPE_NOSPPCX, /* Register must not be SP or PC */
318
319 /* Alias to allow '0' arg to be used in REGS macro. */
320 REG_TYPE_0 = REG_TYPE_NONE
321};
322
323#define REGS(r16, r12, r8, r4, r0) \
324 ((REG_TYPE_##r16) << 16) + \
325 ((REG_TYPE_##r12) << 12) + \
326 ((REG_TYPE_##r8) << 8) + \
327 ((REG_TYPE_##r4) << 4) + \
328 (REG_TYPE_##r0)
329
330union decode_item {
331 u32 bits;
332 const union decode_item *table;
333 kprobe_insn_handler_t *handler;
334 kprobe_decode_insn_t *decoder;
335};
336
337
338#define DECODE_END \
339 {.bits = DECODE_TYPE_END}
340
341
342struct decode_header {
343 union decode_item type_regs;
344 union decode_item mask;
345 union decode_item value;
346};
347
348#define DECODE_HEADER(_type, _mask, _value, _regs) \
349 {.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \
350 {.bits = (_mask)}, \
351 {.bits = (_value)}
352
353
354struct decode_table {
355 struct decode_header header;
356 union decode_item table;
357};
358
359#define DECODE_TABLE(_mask, _value, _table) \
360 DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \
361 {.table = (_table)}
362
363
364struct decode_custom {
365 struct decode_header header;
366 union decode_item decoder;
367};
368
369#define DECODE_CUSTOM(_mask, _value, _decoder) \
370 DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \
371 {.decoder = (_decoder)}
372
373
374struct decode_simulate {
375 struct decode_header header;
376 union decode_item handler;
377};
378
379#define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \
380 DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \
381 {.handler = (_handler)}
382
383#define DECODE_SIMULATE(_mask, _value, _handler) \
384 DECODE_SIMULATEX(_mask, _value, _handler, 0)
385
386
387struct decode_emulate {
388 struct decode_header header;
389 union decode_item handler;
390};
391
392#define DECODE_EMULATEX(_mask, _value, _handler, _regs) \
393 DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \
394 {.handler = (_handler)}
395
396#define DECODE_EMULATE(_mask, _value, _handler) \
397 DECODE_EMULATEX(_mask, _value, _handler, 0)
398
399
400struct decode_or {
401 struct decode_header header;
402};
403
404#define DECODE_OR(_mask, _value) \
405 DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0)
406
407
408struct decode_reject {
409 struct decode_header header;
410};
411
412#define DECODE_REJECT(_mask, _value) \
413 DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0)
414
415
416int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
417 const union decode_item *table, bool thumb16);
418
419
420#endif /* _ARM_KERNEL_KPROBES_H */
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8d8507858e5c..53c9c2610cbc 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -662,6 +662,12 @@ init_hw_perf_events(void)
662 case 0xC090: /* Cortex-A9 */ 662 case 0xC090: /* Cortex-A9 */
663 armpmu = armv7_a9_pmu_init(); 663 armpmu = armv7_a9_pmu_init();
664 break; 664 break;
665 case 0xC050: /* Cortex-A5 */
666 armpmu = armv7_a5_pmu_init();
667 break;
668 case 0xC0F0: /* Cortex-A15 */
669 armpmu = armv7_a15_pmu_init();
670 break;
665 } 671 }
666 /* Intel CPUs [xscale]. */ 672 /* Intel CPUs [xscale]. */
667 } else if (0x69 == implementor) { 673 } else if (0x69 == implementor) {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 4960686afb58..963317896c80 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -17,17 +17,23 @@
17 */ 17 */
18 18
19#ifdef CONFIG_CPU_V7 19#ifdef CONFIG_CPU_V7
20/* Common ARMv7 event types */ 20/*
21 * Common ARMv7 event types
22 *
23 * Note: An implementation may not be able to count all of these events
24 * but the encodings are considered to be `reserved' in the case that
25 * they are not available.
26 */
21enum armv7_perf_types { 27enum armv7_perf_types {
22 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, 28 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
23 ARMV7_PERFCTR_IFETCH_MISS = 0x01, 29 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
24 ARMV7_PERFCTR_ITLB_MISS = 0x02, 30 ARMV7_PERFCTR_ITLB_MISS = 0x02,
25 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, 31 ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
26 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, 32 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
27 ARMV7_PERFCTR_DTLB_REFILL = 0x05, 33 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
28 ARMV7_PERFCTR_DREAD = 0x06, 34 ARMV7_PERFCTR_DREAD = 0x06,
29 ARMV7_PERFCTR_DWRITE = 0x07, 35 ARMV7_PERFCTR_DWRITE = 0x07,
30 36 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
31 ARMV7_PERFCTR_EXC_TAKEN = 0x09, 37 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
32 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, 38 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
33 ARMV7_PERFCTR_CID_WRITE = 0x0B, 39 ARMV7_PERFCTR_CID_WRITE = 0x0B,
@@ -39,21 +45,30 @@ enum armv7_perf_types {
39 */ 45 */
40 ARMV7_PERFCTR_PC_WRITE = 0x0C, 46 ARMV7_PERFCTR_PC_WRITE = 0x0C,
41 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, 47 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
48 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
42 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, 49 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
50
51 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
43 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, 52 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
44 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, 53 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
45 54 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
46 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, 55 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
56 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
57 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
58 ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
59 ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
60 ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
61 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
62 ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
63 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
64 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
65 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
47 66
48 ARMV7_PERFCTR_CPU_CYCLES = 0xFF 67 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
49}; 68};
50 69
51/* ARMv7 Cortex-A8 specific event types */ 70/* ARMv7 Cortex-A8 specific event types */
52enum armv7_a8_perf_types { 71enum armv7_a8_perf_types {
53 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
54
55 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
56
57 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, 72 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
58 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, 73 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
59 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, 74 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
@@ -138,6 +153,39 @@ enum armv7_a9_perf_types {
138 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 153 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
139}; 154};
140 155
156/* ARMv7 Cortex-A5 specific event types */
157enum armv7_a5_perf_types {
158 ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
159 ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
160
161 ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
162 ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
163 ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
164 ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
165 ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
166 ARMV7_PERFCTR_READ_ALLOC = 0xc5,
167
168 ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
169};
170
171/* ARMv7 Cortex-A15 specific event types */
172enum armv7_a15_perf_types {
173 ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
174 ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
175 ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
176 ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
177
178 ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
179 ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
180
181 ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
182 ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
183 ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
184 ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
185
186 ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
187};
188
141/* 189/*
142 * Cortex-A8 HW events mapping 190 * Cortex-A8 HW events mapping
143 * 191 *
@@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
207 }, 255 },
208 }, 256 },
209 [C(DTLB)] = { 257 [C(DTLB)] = {
210 /*
211 * Only ITLB misses and DTLB refills are supported.
212 * If users want the DTLB refills misses a raw counter
213 * must be used.
214 */
215 [C(OP_READ)] = { 258 [C(OP_READ)] = {
216 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 259 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
217 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 260 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -323,11 +366,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
323 }, 366 },
324 }, 367 },
325 [C(DTLB)] = { 368 [C(DTLB)] = {
326 /*
327 * Only ITLB misses and DTLB refills are supported.
328 * If users want the DTLB refills misses a raw counter
329 * must be used.
330 */
331 [C(OP_READ)] = { 369 [C(OP_READ)] = {
332 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, 370 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
333 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, 371 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
@@ -374,6 +412,242 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
374}; 412};
375 413
376/* 414/*
415 * Cortex-A5 HW events mapping
416 */
417static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
418 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
419 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
420 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
421 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
422 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
423 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
424 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
425};
426
427static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
428 [PERF_COUNT_HW_CACHE_OP_MAX]
429 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
430 [C(L1D)] = {
431 [C(OP_READ)] = {
432 [C(RESULT_ACCESS)]
433 = ARMV7_PERFCTR_DCACHE_ACCESS,
434 [C(RESULT_MISS)]
435 = ARMV7_PERFCTR_DCACHE_REFILL,
436 },
437 [C(OP_WRITE)] = {
438 [C(RESULT_ACCESS)]
439 = ARMV7_PERFCTR_DCACHE_ACCESS,
440 [C(RESULT_MISS)]
441 = ARMV7_PERFCTR_DCACHE_REFILL,
442 },
443 [C(OP_PREFETCH)] = {
444 [C(RESULT_ACCESS)]
445 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
446 [C(RESULT_MISS)]
447 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
448 },
449 },
450 [C(L1I)] = {
451 [C(OP_READ)] = {
452 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
453 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
454 },
455 [C(OP_WRITE)] = {
456 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
457 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
458 },
459 /*
460 * The prefetch counters don't differentiate between the I
461 * side and the D side.
462 */
463 [C(OP_PREFETCH)] = {
464 [C(RESULT_ACCESS)]
465 = ARMV7_PERFCTR_PREFETCH_LINEFILL,
466 [C(RESULT_MISS)]
467 = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
468 },
469 },
470 [C(LL)] = {
471 [C(OP_READ)] = {
472 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
473 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
474 },
475 [C(OP_WRITE)] = {
476 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
477 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
478 },
479 [C(OP_PREFETCH)] = {
480 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
481 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
482 },
483 },
484 [C(DTLB)] = {
485 [C(OP_READ)] = {
486 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
487 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
488 },
489 [C(OP_WRITE)] = {
490 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
491 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
492 },
493 [C(OP_PREFETCH)] = {
494 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
495 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
496 },
497 },
498 [C(ITLB)] = {
499 [C(OP_READ)] = {
500 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
501 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
502 },
503 [C(OP_WRITE)] = {
504 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
505 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
506 },
507 [C(OP_PREFETCH)] = {
508 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
509 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
510 },
511 },
512 [C(BPU)] = {
513 [C(OP_READ)] = {
514 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
515 [C(RESULT_MISS)]
516 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
517 },
518 [C(OP_WRITE)] = {
519 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
520 [C(RESULT_MISS)]
521 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
522 },
523 [C(OP_PREFETCH)] = {
524 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
525 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
526 },
527 },
528};
529
530/*
531 * Cortex-A15 HW events mapping
532 */
533static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
534 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
535 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
536 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
537 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
538 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
539 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
540 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
541};
542
543static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
544 [PERF_COUNT_HW_CACHE_OP_MAX]
545 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
546 [C(L1D)] = {
547 [C(OP_READ)] = {
548 [C(RESULT_ACCESS)]
549 = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
550 [C(RESULT_MISS)]
551 = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
552 },
553 [C(OP_WRITE)] = {
554 [C(RESULT_ACCESS)]
555 = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
556 [C(RESULT_MISS)]
557 = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
558 },
559 [C(OP_PREFETCH)] = {
560 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
561 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
562 },
563 },
564 [C(L1I)] = {
565 /*
566 * Not all performance counters differentiate between read
567 * and write accesses/misses so we're not always strictly
568 * correct, but it's the best we can do. Writes and reads get
569 * combined in these cases.
570 */
571 [C(OP_READ)] = {
572 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
573 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
574 },
575 [C(OP_WRITE)] = {
576 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
577 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
578 },
579 [C(OP_PREFETCH)] = {
580 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
581 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
582 },
583 },
584 [C(LL)] = {
585 [C(OP_READ)] = {
586 [C(RESULT_ACCESS)]
587 = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
588 [C(RESULT_MISS)]
589 = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
590 },
591 [C(OP_WRITE)] = {
592 [C(RESULT_ACCESS)]
593 = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
594 [C(RESULT_MISS)]
595 = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
596 },
597 [C(OP_PREFETCH)] = {
598 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
599 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
600 },
601 },
602 [C(DTLB)] = {
603 [C(OP_READ)] = {
604 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
605 [C(RESULT_MISS)]
606 = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
607 },
608 [C(OP_WRITE)] = {
609 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
610 [C(RESULT_MISS)]
611 = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
612 },
613 [C(OP_PREFETCH)] = {
614 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
615 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
616 },
617 },
618 [C(ITLB)] = {
619 [C(OP_READ)] = {
620 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
621 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
622 },
623 [C(OP_WRITE)] = {
624 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
625 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
626 },
627 [C(OP_PREFETCH)] = {
628 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
629 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
630 },
631 },
632 [C(BPU)] = {
633 [C(OP_READ)] = {
634 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
635 [C(RESULT_MISS)]
636 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
637 },
638 [C(OP_WRITE)] = {
639 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
640 [C(RESULT_MISS)]
641 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
642 },
643 [C(OP_PREFETCH)] = {
644 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
645 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
646 },
647 },
648};
649
650/*
377 * Perf Events counters 651 * Perf Events counters
378 */ 652 */
379enum armv7_counters { 653enum armv7_counters {
@@ -905,6 +1179,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
905 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1179 armv7pmu.num_events = armv7_read_num_pmnc_events();
906 return &armv7pmu; 1180 return &armv7pmu;
907} 1181}
1182
1183static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1184{
1185 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1186 armv7pmu.name = "ARMv7 Cortex-A5";
1187 armv7pmu.cache_map = &armv7_a5_perf_cache_map;
1188 armv7pmu.event_map = &armv7_a5_perf_map;
1189 armv7pmu.num_events = armv7_read_num_pmnc_events();
1190 return &armv7pmu;
1191}
1192
1193static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1194{
1195 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1196 armv7pmu.name = "ARMv7 Cortex-A15";
1197 armv7pmu.cache_map = &armv7_a15_perf_cache_map;
1198 armv7pmu.event_map = &armv7_a15_perf_map;
1199 armv7pmu.num_events = armv7_read_num_pmnc_events();
1200 return &armv7pmu;
1201}
908#else 1202#else
909static const struct arm_pmu *__init armv7_a8_pmu_init(void) 1203static const struct arm_pmu *__init armv7_a8_pmu_init(void)
910{ 1204{
@@ -915,4 +1209,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void)
915{ 1209{
916 return NULL; 1210 return NULL;
917} 1211}
1212
1213static const struct arm_pmu *__init armv7_a5_pmu_init(void)
1214{
1215 return NULL;
1216}
1217
1218static const struct arm_pmu *__init armv7_a15_pmu_init(void)
1219{
1220 return NULL;
1221}
918#endif /* CONFIG_CPU_V7 */ 1222#endif /* CONFIG_CPU_V7 */
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 97260060bf26..897ade059f58 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = {
228 .fn = break_trap, 228 .fn = break_trap,
229}; 229};
230 230
231static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
232{
233 unsigned int instr2;
234 void __user *pc;
235
236 /* Check the second half of the instruction. */
237 pc = (void __user *)(instruction_pointer(regs) + 2);
238
239 if (processor_mode(regs) == SVC_MODE) {
240 instr2 = *(u16 *) pc;
241 } else {
242 get_user(instr2, (u16 __user *)pc);
243 }
244
245 if (instr2 == 0xa000) {
246 ptrace_break(current, regs);
247 return 0;
248 } else {
249 return 1;
250 }
251}
252
253static struct undef_hook thumb2_break_hook = { 231static struct undef_hook thumb2_break_hook = {
254 .instr_mask = 0xffff, 232 .instr_mask = 0xffffffff,
255 .instr_val = 0xf7f0, 233 .instr_val = 0xf7f0a000,
256 .cpsr_mask = PSR_T_BIT, 234 .cpsr_mask = PSR_T_BIT,
257 .cpsr_val = PSR_T_BIT, 235 .cpsr_val = PSR_T_BIT,
258 .fn = thumb2_break_trap, 236 .fn = break_trap,
259}; 237};
260 238
261static int __init ptrace_break_init(void) 239static int __init ptrace_break_init(void)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9c3278f37796..70bca649e925 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -919,6 +919,12 @@ void __init setup_arch(char **cmdline_p)
919 919
920 tcm_init(); 920 tcm_init();
921 921
922#ifdef CONFIG_ZONE_DMA
923 if (mdesc->dma_zone_size) {
924 extern unsigned long arm_dma_zone_size;
925 arm_dma_zone_size = mdesc->dma_zone_size;
926 }
927#endif
922#ifdef CONFIG_MULTI_IRQ_HANDLER 928#ifdef CONFIG_MULTI_IRQ_HANDLER
923 handle_arch_irq = mdesc->handle_irq; 929 handle_arch_irq = mdesc->handle_irq;
924#endif 930#endif
@@ -980,6 +986,10 @@ static const char *hwcap_str[] = {
980 "neon", 986 "neon",
981 "vfpv3", 987 "vfpv3",
982 "vfpv3d16", 988 "vfpv3d16",
989 "tls",
990 "vfpv4",
991 "idiva",
992 "idivt",
983 NULL 993 NULL
984}; 994};
985 995
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 6807cb1e76dd..2d3436e9f71f 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
355 pc = (void __user *)instruction_pointer(regs); 355 pc = (void __user *)instruction_pointer(regs);
356 356
357 if (processor_mode(regs) == SVC_MODE) { 357 if (processor_mode(regs) == SVC_MODE) {
358 instr = *(u32 *) pc; 358#ifdef CONFIG_THUMB2_KERNEL
359 if (thumb_mode(regs)) {
360 instr = ((u16 *)pc)[0];
361 if (is_wide_instruction(instr)) {
362 instr <<= 16;
363 instr |= ((u16 *)pc)[1];
364 }
365 } else
366#endif
367 instr = *(u32 *) pc;
359 } else if (thumb_mode(regs)) { 368 } else if (thumb_mode(regs)) {
360 get_user(instr, (u16 __user *)pc); 369 get_user(instr, (u16 __user *)pc);
370 if (is_wide_instruction(instr)) {
371 unsigned int instr2;
372 get_user(instr2, (u16 __user *)pc+1);
373 instr <<= 16;
374 instr |= instr2;
375 }
361 } else { 376 } else {
362 get_user(instr, (u32 __user *)pc); 377 get_user(instr, (u32 __user *)pc);
363 } 378 }
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index 8bc3701aa05c..84fd78684868 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
681 .init_irq = cp_intc_init, 681 .init_irq = cp_intc_init,
682 .timer = &davinci_timer, 682 .timer = &davinci_timer,
683 .init_machine = da830_evm_init, 683 .init_machine = da830_evm_init,
684 .dma_zone_size = SZ_128M,
684MACHINE_END 685MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index a7b41bf505f1..29671ef07152 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
1261 .init_irq = cp_intc_init, 1261 .init_irq = cp_intc_init,
1262 .timer = &davinci_timer, 1262 .timer = &davinci_timer,
1263 .init_machine = da850_evm_init, 1263 .init_machine = da850_evm_init,
1264 .dma_zone_size = SZ_128M,
1264MACHINE_END 1265MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 6e7cad13352c..241a6bd67408 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
356 .init_irq = davinci_irq_init, 356 .init_irq = davinci_irq_init,
357 .timer = &davinci_timer, 357 .timer = &davinci_timer,
358 .init_machine = dm355_evm_init, 358 .init_machine = dm355_evm_init,
359 .dma_zone_size = SZ_128M,
359MACHINE_END 360MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index 543f9911b281..bee284ca7fd6 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
275 .init_irq = davinci_irq_init, 275 .init_irq = davinci_irq_init,
276 .timer = &davinci_timer, 276 .timer = &davinci_timer,
277 .init_machine = dm355_leopard_init, 277 .init_machine = dm355_leopard_init,
278 .dma_zone_size = SZ_128M,
278MACHINE_END 279MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 09a87e61ffcf..9818f214d4f0 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
617 .init_irq = davinci_irq_init, 617 .init_irq = davinci_irq_init,
618 .timer = &davinci_timer, 618 .timer = &davinci_timer,
619 .init_machine = dm365_evm_init, 619 .init_machine = dm365_evm_init,
620 .dma_zone_size = SZ_128M,
620MACHINE_END 621MACHINE_END
621 622
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 556bbd468db3..95607a191e03 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
717 .init_irq = davinci_irq_init, 717 .init_irq = davinci_irq_init,
718 .timer = &davinci_timer, 718 .timer = &davinci_timer,
719 .init_machine = davinci_evm_init, 719 .init_machine = davinci_evm_init,
720 .dma_zone_size = SZ_128M,
720MACHINE_END 721MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index f6ac9ba74878..6d03643b9bd1 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -802,6 +802,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
802 .init_irq = davinci_irq_init, 802 .init_irq = davinci_irq_init,
803 .timer = &davinci_timer, 803 .timer = &davinci_timer,
804 .init_machine = evm_init, 804 .init_machine = evm_init,
805 .dma_zone_size = SZ_128M,
805MACHINE_END 806MACHINE_END
806 807
807MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") 808MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
@@ -810,5 +811,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
810 .init_irq = davinci_irq_init, 811 .init_irq = davinci_irq_init,
811 .timer = &davinci_timer, 812 .timer = &davinci_timer,
812 .init_machine = evm_init, 813 .init_machine = evm_init,
814 .dma_zone_size = SZ_128M,
813MACHINE_END 815MACHINE_END
814 816
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 606a6f27ed6c..b8d59ca49027 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -570,4 +570,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
570 .init_irq = cp_intc_init, 570 .init_irq = cp_intc_init,
571 .timer = &davinci_timer, 571 .timer = &davinci_timer,
572 .init_machine = mityomapl138_init, 572 .init_machine = mityomapl138_init,
573 .dma_zone_size = SZ_128M,
573MACHINE_END 574MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 3e7be2de96de..d60a80028ba3 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
277 .init_irq = davinci_irq_init, 277 .init_irq = davinci_irq_init,
278 .timer = &davinci_timer, 278 .timer = &davinci_timer,
279 .init_machine = davinci_ntosd2_init, 279 .init_machine = davinci_ntosd2_init,
280 .dma_zone_size = SZ_128M,
280MACHINE_END 281MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 67c38d0ecd10..237332a11421 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
343 .init_irq = cp_intc_init, 343 .init_irq = cp_intc_init,
344 .timer = &davinci_timer, 344 .timer = &davinci_timer,
345 .init_machine = omapl138_hawk_init, 345 .init_machine = omapl138_hawk_init,
346 .dma_zone_size = SZ_128M,
346MACHINE_END 347MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 61ac96d8f00d..5f4385c0a089 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
156 .init_irq = davinci_irq_init, 156 .init_irq = davinci_irq_init,
157 .timer = &davinci_timer, 157 .timer = &davinci_timer,
158 .init_machine = davinci_sffsdr_init, 158 .init_machine = davinci_sffsdr_init,
159 .dma_zone_size = SZ_128M,
159MACHINE_END 160MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 1a656e882262..782892065682 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
282 .init_irq = cp_intc_init, 282 .init_irq = cp_intc_init,
283 .timer = &davinci_timer, 283 .timer = &davinci_timer,
284 .init_machine = tnetv107x_evm_board_init, 284 .init_machine = tnetv107x_evm_board_init,
285 .dma_zone_size = SZ_128M,
285MACHINE_END 286MACHINE_END
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h
index 491249ef209c..78731944a70c 100644
--- a/arch/arm/mach-davinci/include/mach/memory.h
+++ b/arch/arm/mach-davinci/include/mach/memory.h
@@ -41,11 +41,4 @@
41 */ 41 */
42#define CONSISTENT_DMA_SIZE (14<<20) 42#define CONSISTENT_DMA_SIZE (14<<20)
43 43
44/*
45 * Restrict DMA-able region to workaround silicon bug. The bug
46 * restricts buffers available for DMA to video hardware to be
47 * below 128M
48 */
49#define ARM_DMA_ZONE_SIZE SZ_128M
50
51#endif /* __ASM_ARCH_MEMORY_H */ 44#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c
index 629454d71c8d..65f1bea958e5 100644
--- a/arch/arm/mach-h720x/h7201-eval.c
+++ b/arch/arm/mach-h720x/h7201-eval.c
@@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201")
33 .map_io = h720x_map_io, 33 .map_io = h720x_map_io,
34 .init_irq = h720x_init_irq, 34 .init_irq = h720x_init_irq,
35 .timer = &h7201_timer, 35 .timer = &h7201_timer,
36 .dma_zone_size = SZ_256M,
36MACHINE_END 37MACHINE_END
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c
index e9f46b696354..884584a09752 100644
--- a/arch/arm/mach-h720x/h7202-eval.c
+++ b/arch/arm/mach-h720x/h7202-eval.c
@@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202")
76 .init_irq = h7202_init_irq, 76 .init_irq = h7202_init_irq,
77 .timer = &h7202_timer, 77 .timer = &h7202_timer,
78 .init_machine = init_eval_h7202, 78 .init_machine = init_eval_h7202,
79 .dma_zone_size = SZ_256M,
79MACHINE_END 80MACHINE_END
diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h
index b0b3baec9acf..96dcf50c51d3 100644
--- a/arch/arm/mach-h720x/include/mach/memory.h
+++ b/arch/arm/mach-h720x/include/mach/memory.h
@@ -8,11 +8,4 @@
8#define __ASM_ARCH_MEMORY_H 8#define __ASM_ARCH_MEMORY_H
9 9
10#define PLAT_PHYS_OFFSET UL(0x40000000) 10#define PLAT_PHYS_OFFSET UL(0x40000000)
11/*
12 * This is the maximum DMA address that can be DMAd to.
13 * There should not be more than (0xd0000000 - 0xc0000000)
14 * bytes of RAM.
15 */
16#define ARM_DMA_ZONE_SIZE SZ_256M
17
18#endif 11#endif
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c
index 73745ff102d5..ee19c1d383aa 100644
--- a/arch/arm/mach-ixp4xx/avila-setup.c
+++ b/arch/arm/mach-ixp4xx/avila-setup.c
@@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform")
169 .timer = &ixp4xx_timer, 169 .timer = &ixp4xx_timer,
170 .boot_params = 0x0100, 170 .boot_params = 0x0100,
171 .init_machine = avila_init, 171 .init_machine = avila_init,
172#if defined(CONFIG_PCI)
173 .dma_zone_size = SZ_64M,
174#endif
172MACHINE_END 175MACHINE_END
173 176
174 /* 177 /*
@@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board")
184 .timer = &ixp4xx_timer, 187 .timer = &ixp4xx_timer,
185 .boot_params = 0x0100, 188 .boot_params = 0x0100,
186 .init_machine = avila_init, 189 .init_machine = avila_init,
190#if defined(CONFIG_PCI)
191 .dma_zone_size = SZ_64M,
192#endif
187MACHINE_END 193MACHINE_END
188#endif 194#endif
189 195
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c
index 355e3de38733..e24564b5d935 100644
--- a/arch/arm/mach-ixp4xx/coyote-setup.c
+++ b/arch/arm/mach-ixp4xx/coyote-setup.c
@@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote")
114 .timer = &ixp4xx_timer, 114 .timer = &ixp4xx_timer,
115 .boot_params = 0x0100, 115 .boot_params = 0x0100,
116 .init_machine = coyote_init, 116 .init_machine = coyote_init,
117#if defined(CONFIG_PCI)
118 .dma_zone_size = SZ_64M,
119#endif
117MACHINE_END 120MACHINE_END
118#endif 121#endif
119 122
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c
index d398229cfaa5..03e54515e8b3 100644
--- a/arch/arm/mach-ixp4xx/dsmg600-setup.c
+++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c
@@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA")
284 .init_irq = ixp4xx_init_irq, 284 .init_irq = ixp4xx_init_irq,
285 .timer = &dsmg600_timer, 285 .timer = &dsmg600_timer,
286 .init_machine = dsmg600_init, 286 .init_machine = dsmg600_init,
287#if defined(CONFIG_PCI)
288 .dma_zone_size = SZ_64M,
289#endif
287MACHINE_END 290MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c
index 727ee39ce11c..23a8b3614568 100644
--- a/arch/arm/mach-ixp4xx/fsg-setup.c
+++ b/arch/arm/mach-ixp4xx/fsg-setup.c
@@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3")
275 .timer = &ixp4xx_timer, 275 .timer = &ixp4xx_timer,
276 .boot_params = 0x0100, 276 .boot_params = 0x0100,
277 .init_machine = fsg_init, 277 .init_machine = fsg_init,
278#if defined(CONFIG_PCI)
279 .dma_zone_size = SZ_64M,
280#endif
278MACHINE_END 281MACHINE_END
279 282
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c
index 9dc0b4eaa65a..d4f851bdd9a4 100644
--- a/arch/arm/mach-ixp4xx/gateway7001-setup.c
+++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c
@@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP")
101 .timer = &ixp4xx_timer, 101 .timer = &ixp4xx_timer,
102 .boot_params = 0x0100, 102 .boot_params = 0x0100,
103 .init_machine = gateway7001_init, 103 .init_machine = gateway7001_init,
104#if defined(CONFIG_PCI)
105 .dma_zone_size = SZ_64M,
106#endif
104MACHINE_END 107MACHINE_END
105#endif 108#endif
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c
index 3e8c0e33b59c..5f00ad224fe0 100644
--- a/arch/arm/mach-ixp4xx/goramo_mlr.c
+++ b/arch/arm/mach-ixp4xx/goramo_mlr.c
@@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink")
501 .timer = &ixp4xx_timer, 501 .timer = &ixp4xx_timer,
502 .boot_params = 0x0100, 502 .boot_params = 0x0100,
503 .init_machine = gmlr_init, 503 .init_machine = gmlr_init,
504#if defined(CONFIG_PCI)
505 .dma_zone_size = SZ_64M,
506#endif
504MACHINE_END 507MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
index 77abead36227..3790dffd3c30 100644
--- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c
+++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c
@@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
169 .timer = &ixp4xx_timer, 169 .timer = &ixp4xx_timer,
170 .boot_params = 0x0100, 170 .boot_params = 0x0100,
171 .init_machine = gtwx5715_init, 171 .init_machine = gtwx5715_init,
172#if defined(CONFIG_PCI)
173 .dma_zone_size = SZ_64M,
174#endif
172MACHINE_END 175MACHINE_END
173 176
174 177
diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h
index 34e79404671a..4caf1761f1e2 100644
--- a/arch/arm/mach-ixp4xx/include/mach/memory.h
+++ b/arch/arm/mach-ixp4xx/include/mach/memory.h
@@ -14,8 +14,4 @@
14 */ 14 */
15#define PLAT_PHYS_OFFSET UL(0x00000000) 15#define PLAT_PHYS_OFFSET UL(0x00000000)
16 16
17#ifdef CONFIG_PCI
18#define ARM_DMA_ZONE_SIZE SZ_64M
19#endif
20
21#endif 17#endif
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index dca4f7f9f4f7..6a2927956bf6 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform")
258 .timer = &ixp4xx_timer, 258 .timer = &ixp4xx_timer,
259 .boot_params = 0x0100, 259 .boot_params = 0x0100,
260 .init_machine = ixdp425_init, 260 .init_machine = ixdp425_init,
261#if defined(CONFIG_PCI)
262 .dma_zone_size = SZ_64M,
263#endif
261MACHINE_END 264MACHINE_END
262#endif 265#endif
263 266
@@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform")
269 .timer = &ixp4xx_timer, 272 .timer = &ixp4xx_timer,
270 .boot_params = 0x0100, 273 .boot_params = 0x0100,
271 .init_machine = ixdp425_init, 274 .init_machine = ixdp425_init,
275#if defined(CONFIG_PCI)
276 .dma_zone_size = SZ_64M,
277#endif
272MACHINE_END 278MACHINE_END
273#endif 279#endif
274 280
@@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform")
280 .timer = &ixp4xx_timer, 286 .timer = &ixp4xx_timer,
281 .boot_params = 0x0100, 287 .boot_params = 0x0100,
282 .init_machine = ixdp425_init, 288 .init_machine = ixdp425_init,
289#if defined(CONFIG_PCI)
290 .dma_zone_size = SZ_64M,
291#endif
283MACHINE_END 292MACHINE_END
284#endif 293#endif
285 294
@@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform")
291 .timer = &ixp4xx_timer, 300 .timer = &ixp4xx_timer,
292 .boot_params = 0x0100, 301 .boot_params = 0x0100,
293 .init_machine = ixdp425_init, 302 .init_machine = ixdp425_init,
303#if defined(CONFIG_PCI)
304 .dma_zone_size = SZ_64M,
305#endif
294MACHINE_END 306MACHINE_END
295#endif 307#endif
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c
index f18fee748878..afb51879d9a4 100644
--- a/arch/arm/mach-ixp4xx/nas100d-setup.c
+++ b/arch/arm/mach-ixp4xx/nas100d-setup.c
@@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d")
319 .init_irq = ixp4xx_init_irq, 319 .init_irq = ixp4xx_init_irq,
320 .timer = &ixp4xx_timer, 320 .timer = &ixp4xx_timer,
321 .init_machine = nas100d_init, 321 .init_machine = nas100d_init,
322#if defined(CONFIG_PCI)
323 .dma_zone_size = SZ_64M,
324#endif
322MACHINE_END 325MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
index f79b62eb7614..69e40f2cf092 100644
--- a/arch/arm/mach-ixp4xx/nslu2-setup.c
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2")
305 .init_irq = ixp4xx_init_irq, 305 .init_irq = ixp4xx_init_irq,
306 .timer = &nslu2_timer, 306 .timer = &nslu2_timer,
307 .init_machine = nslu2_init, 307 .init_machine = nslu2_init,
308#if defined(CONFIG_PCI)
309 .dma_zone_size = SZ_64M,
310#endif
308MACHINE_END 311MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c
index 4e72cfdd3c46..045336c833af 100644
--- a/arch/arm/mach-ixp4xx/vulcan-setup.c
+++ b/arch/arm/mach-ixp4xx/vulcan-setup.c
@@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan")
241 .timer = &ixp4xx_timer, 241 .timer = &ixp4xx_timer,
242 .boot_params = 0x0100, 242 .boot_params = 0x0100,
243 .init_machine = vulcan_init, 243 .init_machine = vulcan_init,
244#if defined(CONFIG_PCI)
245 .dma_zone_size = SZ_64M,
246#endif
244MACHINE_END 247MACHINE_END
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c
index 5d148c7bc4fb..40b9fad800b8 100644
--- a/arch/arm/mach-ixp4xx/wg302v2-setup.c
+++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c
@@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2")
102 .timer = &ixp4xx_timer, 102 .timer = &ixp4xx_timer,
103 .boot_params = 0x0100, 103 .boot_params = 0x0100,
104 .init_machine = wg302v2_init, 104 .init_machine = wg302v2_init,
105#if defined(CONFIG_PCI)
106 .dma_zone_size = SZ_64M,
107#endif
105MACHINE_END 108MACHINE_END
106#endif 109#endif
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index a10996782476..bc55d07566ca 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX")
518 .init_irq = cmx2xx_init_irq, 518 .init_irq = cmx2xx_init_irq,
519 .timer = &pxa_timer, 519 .timer = &pxa_timer,
520 .init_machine = cmx2xx_init, 520 .init_machine = cmx2xx_init,
521#ifdef CONFIG_PCI
522 .dma_zone_size = SZ_64M,
523#endif
521MACHINE_END 524MACHINE_END
diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h
index 07734f37f8fd..d05a59727d66 100644
--- a/arch/arm/mach-pxa/include/mach/memory.h
+++ b/arch/arm/mach-pxa/include/mach/memory.h
@@ -17,8 +17,4 @@
17 */ 17 */
18#define PLAT_PHYS_OFFSET UL(0xa0000000) 18#define PLAT_PHYS_OFFSET UL(0xa0000000)
19 19
20#if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI)
21#define ARM_DMA_ZONE_SIZE SZ_64M
22#endif
23
24#endif 20#endif
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h
index 1759fa673eea..2022e092f0ca 100644
--- a/arch/arm/mach-realview/include/mach/memory.h
+++ b/arch/arm/mach-realview/include/mach/memory.h
@@ -29,10 +29,6 @@
29#define PLAT_PHYS_OFFSET UL(0x00000000) 29#define PLAT_PHYS_OFFSET UL(0x00000000)
30#endif 30#endif
31 31
32#ifdef CONFIG_ZONE_DMA
33#define ARM_DMA_ZONE_SIZE SZ_256M
34#endif
35
36#ifdef CONFIG_SPARSEMEM 32#ifdef CONFIG_SPARSEMEM
37 33
38/* 34/*
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c
index 10e75faba4c9..7a4e3b18cb3e 100644
--- a/arch/arm/mach-realview/realview_eb.c
+++ b/arch/arm/mach-realview/realview_eb.c
@@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB")
470 .init_irq = gic_init_irq, 470 .init_irq = gic_init_irq,
471 .timer = &realview_eb_timer, 471 .timer = &realview_eb_timer,
472 .init_machine = realview_eb_init, 472 .init_machine = realview_eb_init,
473#ifdef CONFIG_ZONE_DMA
474 .dma_zone_size = SZ_256M,
475#endif
473MACHINE_END 476MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c
index eab6070f66d0..ad5671acb66a 100644
--- a/arch/arm/mach-realview/realview_pb1176.c
+++ b/arch/arm/mach-realview/realview_pb1176.c
@@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176")
365 .init_irq = gic_init_irq, 365 .init_irq = gic_init_irq,
366 .timer = &realview_pb1176_timer, 366 .timer = &realview_pb1176_timer,
367 .init_machine = realview_pb1176_init, 367 .init_machine = realview_pb1176_init,
368#ifdef CONFIG_ZONE_DMA
369 .dma_zone_size = SZ_256M,
370#endif
368MACHINE_END 371MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c
index b2985fc7cd4e..b43644b3685e 100644
--- a/arch/arm/mach-realview/realview_pb11mp.c
+++ b/arch/arm/mach-realview/realview_pb11mp.c
@@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore")
367 .init_irq = gic_init_irq, 367 .init_irq = gic_init_irq,
368 .timer = &realview_pb11mp_timer, 368 .timer = &realview_pb11mp_timer,
369 .init_machine = realview_pb11mp_init, 369 .init_machine = realview_pb11mp_init,
370#ifdef CONFIG_ZONE_DMA
371 .dma_zone_size = SZ_256M,
372#endif
370MACHINE_END 373MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c
index fb6866558760..763e8f38c15d 100644
--- a/arch/arm/mach-realview/realview_pba8.c
+++ b/arch/arm/mach-realview/realview_pba8.c
@@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8")
317 .init_irq = gic_init_irq, 317 .init_irq = gic_init_irq,
318 .timer = &realview_pba8_timer, 318 .timer = &realview_pba8_timer,
319 .init_machine = realview_pba8_init, 319 .init_machine = realview_pba8_init,
320#ifdef CONFIG_ZONE_DMA
321 .dma_zone_size = SZ_256M,
322#endif
320MACHINE_END 323MACHINE_END
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c
index 92ace2cf2b2c..363b0ab56150 100644
--- a/arch/arm/mach-realview/realview_pbx.c
+++ b/arch/arm/mach-realview/realview_pbx.c
@@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX")
400 .init_irq = gic_init_irq, 400 .init_irq = gic_init_irq,
401 .timer = &realview_pbx_timer, 401 .timer = &realview_pbx_timer,
402 .init_machine = realview_pbx_init, 402 .init_machine = realview_pbx_init,
403#ifdef CONFIG_ZONE_DMA
404 .dma_zone_size = SZ_256M,
405#endif
403MACHINE_END 406MACHINE_END
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 5778274a8260..26257df19b63 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
453 .init_irq = sa1100_init_irq, 453 .init_irq = sa1100_init_irq,
454 .timer = &sa1100_timer, 454 .timer = &sa1100_timer,
455 .init_machine = assabet_init, 455 .init_machine = assabet_init,
456#ifdef CONFIG_SA1111
457 .dma_zone_size = SZ_1M,
458#endif
456MACHINE_END 459MACHINE_END
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index 4f19ff868b00..b4311b0a4395 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
306 .map_io = badge4_map_io, 306 .map_io = badge4_map_io,
307 .init_irq = sa1100_init_irq, 307 .init_irq = sa1100_init_irq,
308 .timer = &sa1100_timer, 308 .timer = &sa1100_timer,
309#ifdef CONFIG_SA1111
310 .dma_zone_size = SZ_1M,
311#endif
309MACHINE_END 312MACHINE_END
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h
index cff31ee246b7..12d376795abc 100644
--- a/arch/arm/mach-sa1100/include/mach/memory.h
+++ b/arch/arm/mach-sa1100/include/mach/memory.h
@@ -14,10 +14,6 @@
14 */ 14 */
15#define PLAT_PHYS_OFFSET UL(0xc0000000) 15#define PLAT_PHYS_OFFSET UL(0xc0000000)
16 16
17#ifdef CONFIG_SA1111
18#define ARM_DMA_ZONE_SIZE SZ_1M
19#endif
20
21/* 17/*
22 * Because of the wide memory address space between physical RAM banks on the 18 * Because of the wide memory address space between physical RAM banks on the
23 * SA1100, it's much convenient to use Linux's SparseMEM support to implement 19 * SA1100, it's much convenient to use Linux's SparseMEM support to implement
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index 491ac9f20fb4..176c066aec7e 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
369 .init_irq = sa1100_init_irq, 369 .init_irq = sa1100_init_irq,
370 .timer = &sa1100_timer, 370 .timer = &sa1100_timer,
371 .init_machine = jornada720_mach_init, 371 .init_machine = jornada720_mach_init,
372#ifdef CONFIG_SA1111
373 .dma_zone_size = SZ_1M,
374#endif
372MACHINE_END 375MACHINE_END
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 5cf7f94c1f31..ac2873c8014b 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark")
156 .map_io = shark_map_io, 156 .map_io = shark_map_io,
157 .init_irq = shark_init_irq, 157 .init_irq = shark_init_irq,
158 .timer = &shark_timer, 158 .timer = &shark_timer,
159 .dma_zone_size = SZ_4M,
159MACHINE_END 160MACHINE_END
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h
index 4c0831f83b0c..1cf8d6962617 100644
--- a/arch/arm/mach-shark/include/mach/memory.h
+++ b/arch/arm/mach-shark/include/mach/memory.h
@@ -17,8 +17,6 @@
17 */ 17 */
18#define PLAT_PHYS_OFFSET UL(0x08000000) 18#define PLAT_PHYS_OFFSET UL(0x08000000)
19 19
20#define ARM_DMA_ZONE_SIZE SZ_4M
21
22/* 20/*
23 * Cache flushing area 21 * Cache flushing area
24 */ 22 */
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S
index 1fa6f71470de..072016371093 100644
--- a/arch/arm/mm/cache-fa.S
+++ b/arch/arm/mm/cache-fa.S
@@ -242,16 +242,5 @@ ENDPROC(fa_dma_unmap_area)
242 242
243 __INITDATA 243 __INITDATA
244 244
245 .type fa_cache_fns, #object 245 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
246ENTRY(fa_cache_fns) 246 define_cache_functions fa
247 .long fa_flush_icache_all
248 .long fa_flush_kern_cache_all
249 .long fa_flush_user_cache_all
250 .long fa_flush_user_cache_range
251 .long fa_coherent_kern_range
252 .long fa_coherent_user_range
253 .long fa_flush_kern_dcache_area
254 .long fa_dma_map_area
255 .long fa_dma_unmap_area
256 .long fa_dma_flush_range
257 .size fa_cache_fns, . - fa_cache_fns
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
index 2e2bc406a18d..c2301f226100 100644
--- a/arch/arm/mm/cache-v3.S
+++ b/arch/arm/mm/cache-v3.S
@@ -129,16 +129,5 @@ ENDPROC(v3_dma_map_area)
129 129
130 __INITDATA 130 __INITDATA
131 131
132 .type v3_cache_fns, #object 132 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
133ENTRY(v3_cache_fns) 133 define_cache_functions v3
134 .long v3_flush_icache_all
135 .long v3_flush_kern_cache_all
136 .long v3_flush_user_cache_all
137 .long v3_flush_user_cache_range
138 .long v3_coherent_kern_range
139 .long v3_coherent_user_range
140 .long v3_flush_kern_dcache_area
141 .long v3_dma_map_area
142 .long v3_dma_unmap_area
143 .long v3_dma_flush_range
144 .size v3_cache_fns, . - v3_cache_fns
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index a8fefb523f19..fd9bb7addc8d 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -141,16 +141,5 @@ ENDPROC(v4_dma_map_area)
141 141
142 __INITDATA 142 __INITDATA
143 143
144 .type v4_cache_fns, #object 144 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
145ENTRY(v4_cache_fns) 145 define_cache_functions v4
146 .long v4_flush_icache_all
147 .long v4_flush_kern_cache_all
148 .long v4_flush_user_cache_all
149 .long v4_flush_user_cache_range
150 .long v4_coherent_kern_range
151 .long v4_coherent_user_range
152 .long v4_flush_kern_dcache_area
153 .long v4_dma_map_area
154 .long v4_dma_unmap_area
155 .long v4_dma_flush_range
156 .size v4_cache_fns, . - v4_cache_fns
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S
index f40c69656d8d..4f2c14151ccb 100644
--- a/arch/arm/mm/cache-v4wb.S
+++ b/arch/arm/mm/cache-v4wb.S
@@ -253,16 +253,5 @@ ENDPROC(v4wb_dma_unmap_area)
253 253
254 __INITDATA 254 __INITDATA
255 255
256 .type v4wb_cache_fns, #object 256 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
257ENTRY(v4wb_cache_fns) 257 define_cache_functions v4wb
258 .long v4wb_flush_icache_all
259 .long v4wb_flush_kern_cache_all
260 .long v4wb_flush_user_cache_all
261 .long v4wb_flush_user_cache_range
262 .long v4wb_coherent_kern_range
263 .long v4wb_coherent_user_range
264 .long v4wb_flush_kern_dcache_area
265 .long v4wb_dma_map_area
266 .long v4wb_dma_unmap_area
267 .long v4wb_dma_flush_range
268 .size v4wb_cache_fns, . - v4wb_cache_fns
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S
index a7b276dbda11..4d7b467631ce 100644
--- a/arch/arm/mm/cache-v4wt.S
+++ b/arch/arm/mm/cache-v4wt.S
@@ -197,16 +197,5 @@ ENDPROC(v4wt_dma_map_area)
197 197
198 __INITDATA 198 __INITDATA
199 199
200 .type v4wt_cache_fns, #object 200 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
201ENTRY(v4wt_cache_fns) 201 define_cache_functions v4wt
202 .long v4wt_flush_icache_all
203 .long v4wt_flush_kern_cache_all
204 .long v4wt_flush_user_cache_all
205 .long v4wt_flush_user_cache_range
206 .long v4wt_coherent_kern_range
207 .long v4wt_coherent_user_range
208 .long v4wt_flush_kern_dcache_area
209 .long v4wt_dma_map_area
210 .long v4wt_dma_unmap_area
211 .long v4wt_dma_flush_range
212 .size v4wt_cache_fns, . - v4wt_cache_fns
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 73b4a8b66a57..74c2e5a33a4d 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -330,16 +330,5 @@ ENDPROC(v6_dma_unmap_area)
330 330
331 __INITDATA 331 __INITDATA
332 332
333 .type v6_cache_fns, #object 333 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
334ENTRY(v6_cache_fns) 334 define_cache_functions v6
335 .long v6_flush_icache_all
336 .long v6_flush_kern_cache_all
337 .long v6_flush_user_cache_all
338 .long v6_flush_user_cache_range
339 .long v6_coherent_kern_range
340 .long v6_coherent_user_range
341 .long v6_flush_kern_dcache_area
342 .long v6_dma_map_area
343 .long v6_dma_unmap_area
344 .long v6_dma_flush_range
345 .size v6_cache_fns, . - v6_cache_fns
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index d32f02b61866..3b24bfa3b828 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -325,16 +325,5 @@ ENDPROC(v7_dma_unmap_area)
325 325
326 __INITDATA 326 __INITDATA
327 327
328 .type v7_cache_fns, #object 328 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
329ENTRY(v7_cache_fns) 329 define_cache_functions v7
330 .long v7_flush_icache_all
331 .long v7_flush_kern_cache_all
332 .long v7_flush_user_cache_all
333 .long v7_flush_user_cache_range
334 .long v7_coherent_kern_range
335 .long v7_coherent_user_range
336 .long v7_flush_kern_dcache_area
337 .long v7_dma_map_area
338 .long v7_dma_unmap_area
339 .long v7_dma_flush_range
340 .size v7_cache_fns, . - v7_cache_fns
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e5ab4362322f..2fee782077c1 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,6 +212,10 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
212} 212}
213 213
214#ifdef CONFIG_ZONE_DMA 214#ifdef CONFIG_ZONE_DMA
215
216unsigned long arm_dma_zone_size __read_mostly;
217EXPORT_SYMBOL(arm_dma_zone_size);
218
215/* 219/*
216 * The DMA mask corresponding to the maximum bus address allocatable 220 * The DMA mask corresponding to the maximum bus address allocatable
217 * using GFP_DMA. The default here places no restriction on DMA 221 * using GFP_DMA. The default here places no restriction on DMA
@@ -275,19 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
275#endif 279#endif
276 } 280 }
277 281
278#ifdef ARM_DMA_ZONE_SIZE 282#ifdef CONFIG_ZONE_DMA
279#ifndef CONFIG_ZONE_DMA
280#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations
281#endif
282
283 /* 283 /*
284 * Adjust the sizes according to any special requirements for 284 * Adjust the sizes according to any special requirements for
285 * this machine type. 285 * this machine type.
286 */ 286 */
287 arm_adjust_dma_zone(zone_size, zhole_size, 287 if (arm_dma_zone_size) {
288 ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); 288 arm_adjust_dma_zone(zone_size, zhole_size,
289 289 arm_dma_zone_size >> PAGE_SHIFT);
290 arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; 290 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
291 } else
292 arm_dma_limit = 0xffffffff;
291#endif 293#endif
292 294
293 free_area_init_node(0, zone_size, min, zhole_size); 295 free_area_init_node(0, zone_size, min, zhole_size);
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index 6c4e7fd6c8af..67469665d47a 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -364,17 +364,8 @@ ENTRY(arm1020_dma_unmap_area)
364 mov pc, lr 364 mov pc, lr
365ENDPROC(arm1020_dma_unmap_area) 365ENDPROC(arm1020_dma_unmap_area)
366 366
367ENTRY(arm1020_cache_fns) 367 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
368 .long arm1020_flush_icache_all 368 define_cache_functions arm1020
369 .long arm1020_flush_kern_cache_all
370 .long arm1020_flush_user_cache_all
371 .long arm1020_flush_user_cache_range
372 .long arm1020_coherent_kern_range
373 .long arm1020_coherent_user_range
374 .long arm1020_flush_kern_dcache_area
375 .long arm1020_dma_map_area
376 .long arm1020_dma_unmap_area
377 .long arm1020_dma_flush_range
378 369
379 .align 5 370 .align 5
380ENTRY(cpu_arm1020_dcache_clean_area) 371ENTRY(cpu_arm1020_dcache_clean_area)
@@ -477,38 +468,14 @@ arm1020_crval:
477 crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 468 crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930
478 469
479 __INITDATA 470 __INITDATA
471 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
472 define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort
480 473
481/*
482 * Purpose : Function pointers used to access above functions - all calls
483 * come through these
484 */
485 .type arm1020_processor_functions, #object
486arm1020_processor_functions:
487 .word v4t_early_abort
488 .word legacy_pabort
489 .word cpu_arm1020_proc_init
490 .word cpu_arm1020_proc_fin
491 .word cpu_arm1020_reset
492 .word cpu_arm1020_do_idle
493 .word cpu_arm1020_dcache_clean_area
494 .word cpu_arm1020_switch_mm
495 .word cpu_arm1020_set_pte_ext
496 .word 0
497 .word 0
498 .word 0
499 .size arm1020_processor_functions, . - arm1020_processor_functions
500 474
501 .section ".rodata" 475 .section ".rodata"
502 476
503 .type cpu_arch_name, #object 477 string cpu_arch_name, "armv5t"
504cpu_arch_name: 478 string cpu_elf_name, "v5"
505 .asciz "armv5t"
506 .size cpu_arch_name, . - cpu_arch_name
507
508 .type cpu_elf_name, #object
509cpu_elf_name:
510 .asciz "v5"
511 .size cpu_elf_name, . - cpu_elf_name
512 479
513 .type cpu_arm1020_name, #object 480 .type cpu_arm1020_name, #object
514cpu_arm1020_name: 481cpu_arm1020_name:
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index 4ce947c19623..4251421c0ed5 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -350,17 +350,8 @@ ENTRY(arm1020e_dma_unmap_area)
350 mov pc, lr 350 mov pc, lr
351ENDPROC(arm1020e_dma_unmap_area) 351ENDPROC(arm1020e_dma_unmap_area)
352 352
353ENTRY(arm1020e_cache_fns) 353 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
354 .long arm1020e_flush_icache_all 354 define_cache_functions arm1020e
355 .long arm1020e_flush_kern_cache_all
356 .long arm1020e_flush_user_cache_all
357 .long arm1020e_flush_user_cache_range
358 .long arm1020e_coherent_kern_range
359 .long arm1020e_coherent_user_range
360 .long arm1020e_flush_kern_dcache_area
361 .long arm1020e_dma_map_area
362 .long arm1020e_dma_unmap_area
363 .long arm1020e_dma_flush_range
364 355
365 .align 5 356 .align 5
366ENTRY(cpu_arm1020e_dcache_clean_area) 357ENTRY(cpu_arm1020e_dcache_clean_area)
@@ -458,43 +449,14 @@ arm1020e_crval:
458 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 449 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
459 450
460 __INITDATA 451 __INITDATA
461 452 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
462/* 453 define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort
463 * Purpose : Function pointers used to access above functions - all calls
464 * come through these
465 */
466 .type arm1020e_processor_functions, #object
467arm1020e_processor_functions:
468 .word v4t_early_abort
469 .word legacy_pabort
470 .word cpu_arm1020e_proc_init
471 .word cpu_arm1020e_proc_fin
472 .word cpu_arm1020e_reset
473 .word cpu_arm1020e_do_idle
474 .word cpu_arm1020e_dcache_clean_area
475 .word cpu_arm1020e_switch_mm
476 .word cpu_arm1020e_set_pte_ext
477 .word 0
478 .word 0
479 .word 0
480 .size arm1020e_processor_functions, . - arm1020e_processor_functions
481 454
482 .section ".rodata" 455 .section ".rodata"
483 456
484 .type cpu_arch_name, #object 457 string cpu_arch_name, "armv5te"
485cpu_arch_name: 458 string cpu_elf_name, "v5"
486 .asciz "armv5te" 459 string cpu_arm1020e_name, "ARM1020E"
487 .size cpu_arch_name, . - cpu_arch_name
488
489 .type cpu_elf_name, #object
490cpu_elf_name:
491 .asciz "v5"
492 .size cpu_elf_name, . - cpu_elf_name
493
494 .type cpu_arm1020e_name, #object
495cpu_arm1020e_name:
496 .asciz "ARM1020E"
497 .size cpu_arm1020e_name, . - cpu_arm1020e_name
498 460
499 .align 461 .align
500 462
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index c8884c5413a2..d283cf3d06e3 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -339,17 +339,8 @@ ENTRY(arm1022_dma_unmap_area)
339 mov pc, lr 339 mov pc, lr
340ENDPROC(arm1022_dma_unmap_area) 340ENDPROC(arm1022_dma_unmap_area)
341 341
342ENTRY(arm1022_cache_fns) 342 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
343 .long arm1022_flush_icache_all 343 define_cache_functions arm1022
344 .long arm1022_flush_kern_cache_all
345 .long arm1022_flush_user_cache_all
346 .long arm1022_flush_user_cache_range
347 .long arm1022_coherent_kern_range
348 .long arm1022_coherent_user_range
349 .long arm1022_flush_kern_dcache_area
350 .long arm1022_dma_map_area
351 .long arm1022_dma_unmap_area
352 .long arm1022_dma_flush_range
353 344
354 .align 5 345 .align 5
355ENTRY(cpu_arm1022_dcache_clean_area) 346ENTRY(cpu_arm1022_dcache_clean_area)
@@ -441,43 +432,14 @@ arm1022_crval:
441 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 432 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
442 433
443 __INITDATA 434 __INITDATA
444 435 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
445/* 436 define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort
446 * Purpose : Function pointers used to access above functions - all calls
447 * come through these
448 */
449 .type arm1022_processor_functions, #object
450arm1022_processor_functions:
451 .word v4t_early_abort
452 .word legacy_pabort
453 .word cpu_arm1022_proc_init
454 .word cpu_arm1022_proc_fin
455 .word cpu_arm1022_reset
456 .word cpu_arm1022_do_idle
457 .word cpu_arm1022_dcache_clean_area
458 .word cpu_arm1022_switch_mm
459 .word cpu_arm1022_set_pte_ext
460 .word 0
461 .word 0
462 .word 0
463 .size arm1022_processor_functions, . - arm1022_processor_functions
464 437
465 .section ".rodata" 438 .section ".rodata"
466 439
467 .type cpu_arch_name, #object 440 string cpu_arch_name, "armv5te"
468cpu_arch_name: 441 string cpu_elf_name, "v5"
469 .asciz "armv5te" 442 string cpu_arm1022_name, "ARM1022"
470 .size cpu_arch_name, . - cpu_arch_name
471
472 .type cpu_elf_name, #object
473cpu_elf_name:
474 .asciz "v5"
475 .size cpu_elf_name, . - cpu_elf_name
476
477 .type cpu_arm1022_name, #object
478cpu_arm1022_name:
479 .asciz "ARM1022"
480 .size cpu_arm1022_name, . - cpu_arm1022_name
481 443
482 .align 444 .align
483 445
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index 413684660aad..678a1ceafed2 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -333,17 +333,8 @@ ENTRY(arm1026_dma_unmap_area)
333 mov pc, lr 333 mov pc, lr
334ENDPROC(arm1026_dma_unmap_area) 334ENDPROC(arm1026_dma_unmap_area)
335 335
336ENTRY(arm1026_cache_fns) 336 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
337 .long arm1026_flush_icache_all 337 define_cache_functions arm1026
338 .long arm1026_flush_kern_cache_all
339 .long arm1026_flush_user_cache_all
340 .long arm1026_flush_user_cache_range
341 .long arm1026_coherent_kern_range
342 .long arm1026_coherent_user_range
343 .long arm1026_flush_kern_dcache_area
344 .long arm1026_dma_map_area
345 .long arm1026_dma_unmap_area
346 .long arm1026_dma_flush_range
347 338
348 .align 5 339 .align 5
349ENTRY(cpu_arm1026_dcache_clean_area) 340ENTRY(cpu_arm1026_dcache_clean_area)
@@ -436,45 +427,15 @@ arm1026_crval:
436 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 427 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934
437 428
438 __INITDATA 429 __INITDATA
439 430 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
440/* 431 define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort
441 * Purpose : Function pointers used to access above functions - all calls
442 * come through these
443 */
444 .type arm1026_processor_functions, #object
445arm1026_processor_functions:
446 .word v5t_early_abort
447 .word legacy_pabort
448 .word cpu_arm1026_proc_init
449 .word cpu_arm1026_proc_fin
450 .word cpu_arm1026_reset
451 .word cpu_arm1026_do_idle
452 .word cpu_arm1026_dcache_clean_area
453 .word cpu_arm1026_switch_mm
454 .word cpu_arm1026_set_pte_ext
455 .word 0
456 .word 0
457 .word 0
458 .size arm1026_processor_functions, . - arm1026_processor_functions
459 432
460 .section .rodata 433 .section .rodata
461 434
462 .type cpu_arch_name, #object 435 string cpu_arch_name, "armv5tej"
463cpu_arch_name: 436 string cpu_elf_name, "v5"
464 .asciz "armv5tej"
465 .size cpu_arch_name, . - cpu_arch_name
466
467 .type cpu_elf_name, #object
468cpu_elf_name:
469 .asciz "v5"
470 .size cpu_elf_name, . - cpu_elf_name
471 .align 437 .align
472 438 string cpu_arm1026_name, "ARM1026EJ-S"
473 .type cpu_arm1026_name, #object
474cpu_arm1026_name:
475 .asciz "ARM1026EJ-S"
476 .size cpu_arm1026_name, . - cpu_arm1026_name
477
478 .align 439 .align
479 440
480 .section ".proc.info.init", #alloc, #execinstr 441 .section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 50e3543d03bf..e5b974cddac3 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -267,159 +267,57 @@ __arm7_setup: mov r0, #0
267 267
268 __INITDATA 268 __INITDATA
269 269
270/* 270 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
271 * Purpose : Function pointers used to access above functions - all calls 271 define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort
272 * come through these 272 define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort
273 */
274 .type arm6_processor_functions, #object
275ENTRY(arm6_processor_functions)
276 .word cpu_arm6_data_abort
277 .word legacy_pabort
278 .word cpu_arm6_proc_init
279 .word cpu_arm6_proc_fin
280 .word cpu_arm6_reset
281 .word cpu_arm6_do_idle
282 .word cpu_arm6_dcache_clean_area
283 .word cpu_arm6_switch_mm
284 .word cpu_arm6_set_pte_ext
285 .word 0
286 .word 0
287 .word 0
288 .size arm6_processor_functions, . - arm6_processor_functions
289
290/*
291 * Purpose : Function pointers used to access above functions - all calls
292 * come through these
293 */
294 .type arm7_processor_functions, #object
295ENTRY(arm7_processor_functions)
296 .word cpu_arm7_data_abort
297 .word legacy_pabort
298 .word cpu_arm7_proc_init
299 .word cpu_arm7_proc_fin
300 .word cpu_arm7_reset
301 .word cpu_arm7_do_idle
302 .word cpu_arm7_dcache_clean_area
303 .word cpu_arm7_switch_mm
304 .word cpu_arm7_set_pte_ext
305 .word 0
306 .word 0
307 .word 0
308 .size arm7_processor_functions, . - arm7_processor_functions
309 273
310 .section ".rodata" 274 .section ".rodata"
311 275
312 .type cpu_arch_name, #object 276 string cpu_arch_name, "armv3"
313cpu_arch_name: .asciz "armv3" 277 string cpu_elf_name, "v3"
314 .size cpu_arch_name, . - cpu_arch_name 278 string cpu_arm6_name, "ARM6"
315 279 string cpu_arm610_name, "ARM610"
316 .type cpu_elf_name, #object 280 string cpu_arm7_name, "ARM7"
317cpu_elf_name: .asciz "v3" 281 string cpu_arm710_name, "ARM710"
318 .size cpu_elf_name, . - cpu_elf_name
319
320 .type cpu_arm6_name, #object
321cpu_arm6_name: .asciz "ARM6"
322 .size cpu_arm6_name, . - cpu_arm6_name
323
324 .type cpu_arm610_name, #object
325cpu_arm610_name:
326 .asciz "ARM610"
327 .size cpu_arm610_name, . - cpu_arm610_name
328
329 .type cpu_arm7_name, #object
330cpu_arm7_name: .asciz "ARM7"
331 .size cpu_arm7_name, . - cpu_arm7_name
332
333 .type cpu_arm710_name, #object
334cpu_arm710_name:
335 .asciz "ARM710"
336 .size cpu_arm710_name, . - cpu_arm710_name
337 282
338 .align 283 .align
339 284
340 .section ".proc.info.init", #alloc, #execinstr 285 .section ".proc.info.init", #alloc, #execinstr
341 286
342 .type __arm6_proc_info, #object 287.macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
343__arm6_proc_info: 288 cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req
344 .long 0x41560600 289 .type __\name\()_proc_info, #object
345 .long 0xfffffff0 290__\name\()_proc_info:
346 .long 0x00000c1e 291 .long \cpu_val
292 .long \cpu_mask
293 .long \cpu_mm_mmu_flags
347 .long PMD_TYPE_SECT | \ 294 .long PMD_TYPE_SECT | \
348 PMD_BIT4 | \ 295 PMD_BIT4 | \
349 PMD_SECT_AP_WRITE | \ 296 PMD_SECT_AP_WRITE | \
350 PMD_SECT_AP_READ 297 PMD_SECT_AP_READ
351 b __arm6_setup 298 b \cpu_flush
352 .long cpu_arch_name 299 .long cpu_arch_name
353 .long cpu_elf_name 300 .long cpu_elf_name
354 .long HWCAP_SWP | HWCAP_26BIT 301 .long HWCAP_SWP | HWCAP_26BIT
355 .long cpu_arm6_name 302 .long \cpu_name
356 .long arm6_processor_functions 303 .long \cpu_proc_funcs
357 .long v3_tlb_fns 304 .long v3_tlb_fns
358 .long v3_user_fns 305 .long v3_user_fns
359 .long v3_cache_fns 306 .long v3_cache_fns
360 .size __arm6_proc_info, . - __arm6_proc_info 307 .size __\name\()_proc_info, . - __\name\()_proc_info
361 308.endm
362 .type __arm610_proc_info, #object 309
363__arm610_proc_info: 310 arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \
364 .long 0x41560610 311 0x00000c1e, __arm6_setup, arm6_processor_functions
365 .long 0xfffffff0 312 arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \
366 .long 0x00000c1e 313 0x00000c1e, __arm6_setup, arm6_processor_functions
367 .long PMD_TYPE_SECT | \ 314 arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \
368 PMD_BIT4 | \ 315 0x00000c1e, __arm7_setup, arm7_processor_functions
369 PMD_SECT_AP_WRITE | \ 316 arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \
370 PMD_SECT_AP_READ 317 PMD_TYPE_SECT | \
371 b __arm6_setup
372 .long cpu_arch_name
373 .long cpu_elf_name
374 .long HWCAP_SWP | HWCAP_26BIT
375 .long cpu_arm610_name
376 .long arm6_processor_functions
377 .long v3_tlb_fns
378 .long v3_user_fns
379 .long v3_cache_fns
380 .size __arm610_proc_info, . - __arm610_proc_info
381
382 .type __arm7_proc_info, #object
383__arm7_proc_info:
384 .long 0x41007000
385 .long 0xffffff00
386 .long 0x00000c1e
387 .long PMD_TYPE_SECT | \
388 PMD_BIT4 | \
389 PMD_SECT_AP_WRITE | \
390 PMD_SECT_AP_READ
391 b __arm7_setup
392 .long cpu_arch_name
393 .long cpu_elf_name
394 .long HWCAP_SWP | HWCAP_26BIT
395 .long cpu_arm7_name
396 .long arm7_processor_functions
397 .long v3_tlb_fns
398 .long v3_user_fns
399 .long v3_cache_fns
400 .size __arm7_proc_info, . - __arm7_proc_info
401
402 .type __arm710_proc_info, #object
403__arm710_proc_info:
404 .long 0x41007100
405 .long 0xfff8ff00
406 .long PMD_TYPE_SECT | \
407 PMD_SECT_BUFFERABLE | \ 318 PMD_SECT_BUFFERABLE | \
408 PMD_SECT_CACHEABLE | \ 319 PMD_SECT_CACHEABLE | \
409 PMD_BIT4 | \ 320 PMD_BIT4 | \
410 PMD_SECT_AP_WRITE | \ 321 PMD_SECT_AP_WRITE | \
411 PMD_SECT_AP_READ 322 PMD_SECT_AP_READ, \
412 .long PMD_TYPE_SECT | \ 323 __arm7_setup, arm7_processor_functions
413 PMD_BIT4 | \
414 PMD_SECT_AP_WRITE | \
415 PMD_SECT_AP_READ
416 b __arm7_setup
417 .long cpu_arch_name
418 .long cpu_elf_name
419 .long HWCAP_SWP | HWCAP_26BIT
420 .long cpu_arm710_name
421 .long arm7_processor_functions
422 .long v3_tlb_fns
423 .long v3_user_fns
424 .long v3_cache_fns
425 .size __arm710_proc_info, . - __arm710_proc_info
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index 7a06e5964f59..55f4e290665a 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -169,46 +169,15 @@ arm720_crval:
169 crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 169 crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130
170 170
171 __INITDATA 171 __INITDATA
172 172 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
173/* 173 define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort
174 * Purpose : Function pointers used to access above functions - all calls
175 * come through these
176 */
177 .type arm720_processor_functions, #object
178ENTRY(arm720_processor_functions)
179 .word v4t_late_abort
180 .word legacy_pabort
181 .word cpu_arm720_proc_init
182 .word cpu_arm720_proc_fin
183 .word cpu_arm720_reset
184 .word cpu_arm720_do_idle
185 .word cpu_arm720_dcache_clean_area
186 .word cpu_arm720_switch_mm
187 .word cpu_arm720_set_pte_ext
188 .word 0
189 .word 0
190 .word 0
191 .size arm720_processor_functions, . - arm720_processor_functions
192 174
193 .section ".rodata" 175 .section ".rodata"
194 176
195 .type cpu_arch_name, #object 177 string cpu_arch_name, "armv4t"
196cpu_arch_name: .asciz "armv4t" 178 string cpu_elf_name, "v4"
197 .size cpu_arch_name, . - cpu_arch_name 179 string cpu_arm710_name, "ARM710T"
198 180 string cpu_arm720_name, "ARM720T"
199 .type cpu_elf_name, #object
200cpu_elf_name: .asciz "v4"
201 .size cpu_elf_name, . - cpu_elf_name
202
203 .type cpu_arm710_name, #object
204cpu_arm710_name:
205 .asciz "ARM710T"
206 .size cpu_arm710_name, . - cpu_arm710_name
207
208 .type cpu_arm720_name, #object
209cpu_arm720_name:
210 .asciz "ARM720T"
211 .size cpu_arm720_name, . - cpu_arm720_name
212 181
213 .align 182 .align
214 183
@@ -218,10 +187,11 @@ cpu_arm720_name:
218 187
219 .section ".proc.info.init", #alloc, #execinstr 188 .section ".proc.info.init", #alloc, #execinstr
220 189
221 .type __arm710_proc_info, #object 190.macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req
222__arm710_proc_info: 191 .type __\name\()_proc_info,#object
223 .long 0x41807100 @ cpu_val 192__\name\()_proc_info:
224 .long 0xffffff00 @ cpu_mask 193 .long \cpu_val
194 .long \cpu_mask
225 .long PMD_TYPE_SECT | \ 195 .long PMD_TYPE_SECT | \
226 PMD_SECT_BUFFERABLE | \ 196 PMD_SECT_BUFFERABLE | \
227 PMD_SECT_CACHEABLE | \ 197 PMD_SECT_CACHEABLE | \
@@ -232,38 +202,17 @@ __arm710_proc_info:
232 PMD_BIT4 | \ 202 PMD_BIT4 | \
233 PMD_SECT_AP_WRITE | \ 203 PMD_SECT_AP_WRITE | \
234 PMD_SECT_AP_READ 204 PMD_SECT_AP_READ
235 b __arm710_setup @ cpu_flush 205 b \cpu_flush @ cpu_flush
236 .long cpu_arch_name @ arch_name 206 .long cpu_arch_name @ arch_name
237 .long cpu_elf_name @ elf_name 207 .long cpu_elf_name @ elf_name
238 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap 208 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
239 .long cpu_arm710_name @ name 209 .long \cpu_name
240 .long arm720_processor_functions 210 .long arm720_processor_functions
241 .long v4_tlb_fns 211 .long v4_tlb_fns
242 .long v4wt_user_fns 212 .long v4wt_user_fns
243 .long v4_cache_fns 213 .long v4_cache_fns
244 .size __arm710_proc_info, . - __arm710_proc_info 214 .size __\name\()_proc_info, . - __\name\()_proc_info
215.endm
245 216
246 .type __arm720_proc_info, #object 217 arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup
247__arm720_proc_info: 218 arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup
248 .long 0x41807200 @ cpu_val
249 .long 0xffffff00 @ cpu_mask
250 .long PMD_TYPE_SECT | \
251 PMD_SECT_BUFFERABLE | \
252 PMD_SECT_CACHEABLE | \
253 PMD_BIT4 | \
254 PMD_SECT_AP_WRITE | \
255 PMD_SECT_AP_READ
256 .long PMD_TYPE_SECT | \
257 PMD_BIT4 | \
258 PMD_SECT_AP_WRITE | \
259 PMD_SECT_AP_READ
260 b __arm720_setup @ cpu_flush
261 .long cpu_arch_name @ arch_name
262 .long cpu_elf_name @ elf_name
263 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap
264 .long cpu_arm720_name @ name
265 .long arm720_processor_functions
266 .long v4_tlb_fns
267 .long v4wt_user_fns
268 .long v4_cache_fns
269 .size __arm720_proc_info, . - __arm720_proc_info
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 6f9d12effee1..4506be3adda6 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -17,6 +17,8 @@
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19 19
20#include "proc-macros.S"
21
20 .text 22 .text
21/* 23/*
22 * cpu_arm740_proc_init() 24 * cpu_arm740_proc_init()
@@ -115,42 +117,14 @@ __arm740_setup:
115 117
116 __INITDATA 118 __INITDATA
117 119
118/* 120 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
119 * Purpose : Function pointers used to access above functions - all calls 121 define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1
120 * come through these
121 */
122 .type arm740_processor_functions, #object
123ENTRY(arm740_processor_functions)
124 .word v4t_late_abort
125 .word legacy_pabort
126 .word cpu_arm740_proc_init
127 .word cpu_arm740_proc_fin
128 .word cpu_arm740_reset
129 .word cpu_arm740_do_idle
130 .word cpu_arm740_dcache_clean_area
131 .word cpu_arm740_switch_mm
132 .word 0 @ cpu_*_set_pte
133 .word 0
134 .word 0
135 .word 0
136 .size arm740_processor_functions, . - arm740_processor_functions
137 122
138 .section ".rodata" 123 .section ".rodata"
139 124
140 .type cpu_arch_name, #object 125 string cpu_arch_name, "armv4"
141cpu_arch_name: 126 string cpu_elf_name, "v4"
142 .asciz "armv4" 127 string cpu_arm740_name, "ARM740T"
143 .size cpu_arch_name, . - cpu_arch_name
144
145 .type cpu_elf_name, #object
146cpu_elf_name:
147 .asciz "v4"
148 .size cpu_elf_name, . - cpu_elf_name
149
150 .type cpu_arm740_name, #object
151cpu_arm740_name:
152 .ascii "ARM740T"
153 .size cpu_arm740_name, . - cpu_arm740_name
154 128
155 .align 129 .align
156 130
@@ -170,5 +144,3 @@ __arm740_proc_info:
170 .long 0 144 .long 0
171 .long v3_cache_fns @ cache model 145 .long v3_cache_fns @ cache model
172 .size __arm740_proc_info, . - __arm740_proc_info 146 .size __arm740_proc_info, . - __arm740_proc_info
173
174
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 537ffcb0646d..7e0e1fe4ed4d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -17,6 +17,8 @@
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19 19
20#include "proc-macros.S"
21
20 .text 22 .text
21/* 23/*
22 * cpu_arm7tdmi_proc_init() 24 * cpu_arm7tdmi_proc_init()
@@ -55,197 +57,57 @@ __arm7tdmi_setup:
55 57
56 __INITDATA 58 __INITDATA
57 59
58/* 60 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
59 * Purpose : Function pointers used to access above functions - all calls 61 define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1
60 * come through these
61 */
62 .type arm7tdmi_processor_functions, #object
63ENTRY(arm7tdmi_processor_functions)
64 .word v4t_late_abort
65 .word legacy_pabort
66 .word cpu_arm7tdmi_proc_init
67 .word cpu_arm7tdmi_proc_fin
68 .word cpu_arm7tdmi_reset
69 .word cpu_arm7tdmi_do_idle
70 .word cpu_arm7tdmi_dcache_clean_area
71 .word cpu_arm7tdmi_switch_mm
72 .word 0 @ cpu_*_set_pte
73 .word 0
74 .word 0
75 .word 0
76 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
77 62
78 .section ".rodata" 63 .section ".rodata"
79 64
80 .type cpu_arch_name, #object 65 string cpu_arch_name, "armv4t"
81cpu_arch_name: 66 string cpu_elf_name, "v4"
82 .asciz "armv4t" 67 string cpu_arm7tdmi_name, "ARM7TDMI"
83 .size cpu_arch_name, . - cpu_arch_name 68 string cpu_triscenda7_name, "Triscend-A7x"
84 69 string cpu_at91_name, "Atmel-AT91M40xxx"
85 .type cpu_elf_name, #object 70 string cpu_s3c3410_name, "Samsung-S3C3410"
86cpu_elf_name: 71 string cpu_s3c44b0x_name, "Samsung-S3C44B0x"
87 .asciz "v4" 72 string cpu_s3c4510b_name, "Samsung-S3C4510B"
88 .size cpu_elf_name, . - cpu_elf_name 73 string cpu_s3c4530_name, "Samsung-S3C4530"
89 74 string cpu_netarm_name, "NETARM"
90 .type cpu_arm7tdmi_name, #object
91cpu_arm7tdmi_name:
92 .asciz "ARM7TDMI"
93 .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name
94
95 .type cpu_triscenda7_name, #object
96cpu_triscenda7_name:
97 .asciz "Triscend-A7x"
98 .size cpu_triscenda7_name, . - cpu_triscenda7_name
99
100 .type cpu_at91_name, #object
101cpu_at91_name:
102 .asciz "Atmel-AT91M40xxx"
103 .size cpu_at91_name, . - cpu_at91_name
104
105 .type cpu_s3c3410_name, #object
106cpu_s3c3410_name:
107 .asciz "Samsung-S3C3410"
108 .size cpu_s3c3410_name, . - cpu_s3c3410_name
109
110 .type cpu_s3c44b0x_name, #object
111cpu_s3c44b0x_name:
112 .asciz "Samsung-S3C44B0x"
113 .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name
114
115 .type cpu_s3c4510b, #object
116cpu_s3c4510b_name:
117 .asciz "Samsung-S3C4510B"
118 .size cpu_s3c4510b_name, . - cpu_s3c4510b_name
119
120 .type cpu_s3c4530_name, #object
121cpu_s3c4530_name:
122 .asciz "Samsung-S3C4530"
123 .size cpu_s3c4530_name, . - cpu_s3c4530_name
124
125 .type cpu_netarm_name, #object
126cpu_netarm_name:
127 .asciz "NETARM"
128 .size cpu_netarm_name, . - cpu_netarm_name
129 75
130 .align 76 .align
131 77
132 .section ".proc.info.init", #alloc, #execinstr 78 .section ".proc.info.init", #alloc, #execinstr
133 79
134 .type __arm7tdmi_proc_info, #object 80.macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \
135__arm7tdmi_proc_info: 81 extra_hwcaps=0
136 .long 0x41007700 82 .type __\name\()_proc_info, #object
137 .long 0xfff8ff00 83__\name\()_proc_info:
138 .long 0 84 .long \cpu_val
139 .long 0 85 .long \cpu_mask
140 b __arm7tdmi_setup
141 .long cpu_arch_name
142 .long cpu_elf_name
143 .long HWCAP_SWP | HWCAP_26BIT
144 .long cpu_arm7tdmi_name
145 .long arm7tdmi_processor_functions
146 .long 0
147 .long 0
148 .long v4_cache_fns
149 .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
150
151 .type __triscenda7_proc_info, #object
152__triscenda7_proc_info:
153 .long 0x0001d2ff
154 .long 0x0001ffff
155 .long 0
156 .long 0
157 b __arm7tdmi_setup
158 .long cpu_arch_name
159 .long cpu_elf_name
160 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
161 .long cpu_triscenda7_name
162 .long arm7tdmi_processor_functions
163 .long 0
164 .long 0
165 .long v4_cache_fns
166 .size __triscenda7_proc_info, . - __triscenda7_proc_info
167
168 .type __at91_proc_info, #object
169__at91_proc_info:
170 .long 0x14000040
171 .long 0xfff000e0
172 .long 0
173 .long 0
174 b __arm7tdmi_setup
175 .long cpu_arch_name
176 .long cpu_elf_name
177 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
178 .long cpu_at91_name
179 .long arm7tdmi_processor_functions
180 .long 0
181 .long 0
182 .long v4_cache_fns
183 .size __at91_proc_info, . - __at91_proc_info
184
185 .type __s3c4510b_proc_info, #object
186__s3c4510b_proc_info:
187 .long 0x36365000
188 .long 0xfffff000
189 .long 0
190 .long 0
191 b __arm7tdmi_setup
192 .long cpu_arch_name
193 .long cpu_elf_name
194 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
195 .long cpu_s3c4510b_name
196 .long arm7tdmi_processor_functions
197 .long 0
198 .long 0
199 .long v4_cache_fns
200 .size __s3c4510b_proc_info, . - __s3c4510b_proc_info
201
202 .type __s3c4530_proc_info, #object
203__s3c4530_proc_info:
204 .long 0x4c000000
205 .long 0xfff000e0
206 .long 0
207 .long 0
208 b __arm7tdmi_setup
209 .long cpu_arch_name
210 .long cpu_elf_name
211 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
212 .long cpu_s3c4530_name
213 .long arm7tdmi_processor_functions
214 .long 0
215 .long 0
216 .long v4_cache_fns
217 .size __s3c4530_proc_info, . - __s3c4530_proc_info
218
219 .type __s3c3410_proc_info, #object
220__s3c3410_proc_info:
221 .long 0x34100000
222 .long 0xffff0000
223 .long 0
224 .long 0
225 b __arm7tdmi_setup
226 .long cpu_arch_name
227 .long cpu_elf_name
228 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
229 .long cpu_s3c3410_name
230 .long arm7tdmi_processor_functions
231 .long 0
232 .long 0
233 .long v4_cache_fns
234 .size __s3c3410_proc_info, . - __s3c3410_proc_info
235
236 .type __s3c44b0x_proc_info, #object
237__s3c44b0x_proc_info:
238 .long 0x44b00000
239 .long 0xffff0000
240 .long 0 86 .long 0
241 .long 0 87 .long 0
242 b __arm7tdmi_setup 88 b __arm7tdmi_setup
243 .long cpu_arch_name 89 .long cpu_arch_name
244 .long cpu_elf_name 90 .long cpu_elf_name
245 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT 91 .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps )
246 .long cpu_s3c44b0x_name 92 .long \cpu_name
247 .long arm7tdmi_processor_functions 93 .long arm7tdmi_processor_functions
248 .long 0 94 .long 0
249 .long 0 95 .long 0
250 .long v4_cache_fns 96 .long v4_cache_fns
251 .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info 97 .size __\name\()_proc_info, . - __\name\()_proc_info
98.endm
99
100 arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \
101 cpu_arm7tdmi_name
102 arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \
103 cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB
104 arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \
105 cpu_at91_name, extra_hwcaps=HWCAP_THUMB
106 arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \
107 cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB
108 arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \
109 cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB
110 arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \
111 cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB
112 arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \
113 cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index bf8a1d1cccb6..92bd102e3982 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -315,18 +315,8 @@ ENTRY(arm920_dma_unmap_area)
315 mov pc, lr 315 mov pc, lr
316ENDPROC(arm920_dma_unmap_area) 316ENDPROC(arm920_dma_unmap_area)
317 317
318ENTRY(arm920_cache_fns) 318 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
319 .long arm920_flush_icache_all 319 define_cache_functions arm920
320 .long arm920_flush_kern_cache_all
321 .long arm920_flush_user_cache_all
322 .long arm920_flush_user_cache_range
323 .long arm920_coherent_kern_range
324 .long arm920_coherent_user_range
325 .long arm920_flush_kern_dcache_area
326 .long arm920_dma_map_area
327 .long arm920_dma_unmap_area
328 .long arm920_dma_flush_range
329
330#endif 320#endif
331 321
332 322
@@ -416,9 +406,6 @@ ENTRY(cpu_arm920_do_resume)
416 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE 406 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
417 b cpu_resume_mmu 407 b cpu_resume_mmu
418ENDPROC(cpu_arm920_do_resume) 408ENDPROC(cpu_arm920_do_resume)
419#else
420#define cpu_arm920_do_suspend 0
421#define cpu_arm920_do_resume 0
422#endif 409#endif
423 410
424 __CPUINIT 411 __CPUINIT
@@ -450,43 +437,14 @@ arm920_crval:
450 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 437 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
451 438
452 __INITDATA 439 __INITDATA
453 440 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
454/* 441 define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1
455 * Purpose : Function pointers used to access above functions - all calls
456 * come through these
457 */
458 .type arm920_processor_functions, #object
459arm920_processor_functions:
460 .word v4t_early_abort
461 .word legacy_pabort
462 .word cpu_arm920_proc_init
463 .word cpu_arm920_proc_fin
464 .word cpu_arm920_reset
465 .word cpu_arm920_do_idle
466 .word cpu_arm920_dcache_clean_area
467 .word cpu_arm920_switch_mm
468 .word cpu_arm920_set_pte_ext
469 .word cpu_arm920_suspend_size
470 .word cpu_arm920_do_suspend
471 .word cpu_arm920_do_resume
472 .size arm920_processor_functions, . - arm920_processor_functions
473 442
474 .section ".rodata" 443 .section ".rodata"
475 444
476 .type cpu_arch_name, #object 445 string cpu_arch_name, "armv4t"
477cpu_arch_name: 446 string cpu_elf_name, "v4"
478 .asciz "armv4t" 447 string cpu_arm920_name, "ARM920T"
479 .size cpu_arch_name, . - cpu_arch_name
480
481 .type cpu_elf_name, #object
482cpu_elf_name:
483 .asciz "v4"
484 .size cpu_elf_name, . - cpu_elf_name
485
486 .type cpu_arm920_name, #object
487cpu_arm920_name:
488 .asciz "ARM920T"
489 .size cpu_arm920_name, . - cpu_arm920_name
490 448
491 .align 449 .align
492 450
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index 95ba1fc56e4d..490e18833857 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -317,18 +317,8 @@ ENTRY(arm922_dma_unmap_area)
317 mov pc, lr 317 mov pc, lr
318ENDPROC(arm922_dma_unmap_area) 318ENDPROC(arm922_dma_unmap_area)
319 319
320ENTRY(arm922_cache_fns) 320 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
321 .long arm922_flush_icache_all 321 define_cache_functions arm922
322 .long arm922_flush_kern_cache_all
323 .long arm922_flush_user_cache_all
324 .long arm922_flush_user_cache_range
325 .long arm922_coherent_kern_range
326 .long arm922_coherent_user_range
327 .long arm922_flush_kern_dcache_area
328 .long arm922_dma_map_area
329 .long arm922_dma_unmap_area
330 .long arm922_dma_flush_range
331
332#endif 322#endif
333 323
334 324
@@ -420,43 +410,14 @@ arm922_crval:
420 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 410 crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130
421 411
422 __INITDATA 412 __INITDATA
423 413 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
424/* 414 define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort
425 * Purpose : Function pointers used to access above functions - all calls
426 * come through these
427 */
428 .type arm922_processor_functions, #object
429arm922_processor_functions:
430 .word v4t_early_abort
431 .word legacy_pabort
432 .word cpu_arm922_proc_init
433 .word cpu_arm922_proc_fin
434 .word cpu_arm922_reset
435 .word cpu_arm922_do_idle
436 .word cpu_arm922_dcache_clean_area
437 .word cpu_arm922_switch_mm
438 .word cpu_arm922_set_pte_ext
439 .word 0
440 .word 0
441 .word 0
442 .size arm922_processor_functions, . - arm922_processor_functions
443 415
444 .section ".rodata" 416 .section ".rodata"
445 417
446 .type cpu_arch_name, #object 418 string cpu_arch_name, "armv4t"
447cpu_arch_name: 419 string cpu_elf_name, "v4"
448 .asciz "armv4t" 420 string cpu_arm922_name, "ARM922T"
449 .size cpu_arch_name, . - cpu_arch_name
450
451 .type cpu_elf_name, #object
452cpu_elf_name:
453 .asciz "v4"
454 .size cpu_elf_name, . - cpu_elf_name
455
456 .type cpu_arm922_name, #object
457cpu_arm922_name:
458 .asciz "ARM922T"
459 .size cpu_arm922_name, . - cpu_arm922_name
460 421
461 .align 422 .align
462 423
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 541e4774eea1..51d494be057e 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -372,17 +372,8 @@ ENTRY(arm925_dma_unmap_area)
372 mov pc, lr 372 mov pc, lr
373ENDPROC(arm925_dma_unmap_area) 373ENDPROC(arm925_dma_unmap_area)
374 374
375ENTRY(arm925_cache_fns) 375 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
376 .long arm925_flush_icache_all 376 define_cache_functions arm925
377 .long arm925_flush_kern_cache_all
378 .long arm925_flush_user_cache_all
379 .long arm925_flush_user_cache_range
380 .long arm925_coherent_kern_range
381 .long arm925_coherent_user_range
382 .long arm925_flush_kern_dcache_area
383 .long arm925_dma_map_area
384 .long arm925_dma_unmap_area
385 .long arm925_dma_flush_range
386 377
387ENTRY(cpu_arm925_dcache_clean_area) 378ENTRY(cpu_arm925_dcache_clean_area)
388#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 379#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -487,52 +478,24 @@ arm925_crval:
487 crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 478 crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
488 479
489 __INITDATA 480 __INITDATA
490 481 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
491/* 482 define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
492 * Purpose : Function pointers used to access above functions - all calls
493 * come through these
494 */
495 .type arm925_processor_functions, #object
496arm925_processor_functions:
497 .word v4t_early_abort
498 .word legacy_pabort
499 .word cpu_arm925_proc_init
500 .word cpu_arm925_proc_fin
501 .word cpu_arm925_reset
502 .word cpu_arm925_do_idle
503 .word cpu_arm925_dcache_clean_area
504 .word cpu_arm925_switch_mm
505 .word cpu_arm925_set_pte_ext
506 .word 0
507 .word 0
508 .word 0
509 .size arm925_processor_functions, . - arm925_processor_functions
510 483
511 .section ".rodata" 484 .section ".rodata"
512 485
513 .type cpu_arch_name, #object 486 string cpu_arch_name, "armv4t"
514cpu_arch_name: 487 string cpu_elf_name, "v4"
515 .asciz "armv4t" 488 string cpu_arm925_name, "ARM925T"
516 .size cpu_arch_name, . - cpu_arch_name
517
518 .type cpu_elf_name, #object
519cpu_elf_name:
520 .asciz "v4"
521 .size cpu_elf_name, . - cpu_elf_name
522
523 .type cpu_arm925_name, #object
524cpu_arm925_name:
525 .asciz "ARM925T"
526 .size cpu_arm925_name, . - cpu_arm925_name
527 489
528 .align 490 .align
529 491
530 .section ".proc.info.init", #alloc, #execinstr 492 .section ".proc.info.init", #alloc, #execinstr
531 493
532 .type __arm925_proc_info,#object 494.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
533__arm925_proc_info: 495 .type __\name\()_proc_info,#object
534 .long 0x54029250 496__\name\()_proc_info:
535 .long 0xfffffff0 497 .long \cpu_val
498 .long \cpu_mask
536 .long PMD_TYPE_SECT | \ 499 .long PMD_TYPE_SECT | \
537 PMD_BIT4 | \ 500 PMD_BIT4 | \
538 PMD_SECT_AP_WRITE | \ 501 PMD_SECT_AP_WRITE | \
@@ -550,27 +513,8 @@ __arm925_proc_info:
550 .long v4wbi_tlb_fns 513 .long v4wbi_tlb_fns
551 .long v4wb_user_fns 514 .long v4wb_user_fns
552 .long arm925_cache_fns 515 .long arm925_cache_fns
553 .size __arm925_proc_info, . - __arm925_proc_info 516 .size __\name\()_proc_info, . - __\name\()_proc_info
517.endm
554 518
555 .type __arm915_proc_info,#object 519 arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
556__arm915_proc_info: 520 arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
557 .long 0x54029150
558 .long 0xfffffff0
559 .long PMD_TYPE_SECT | \
560 PMD_BIT4 | \
561 PMD_SECT_AP_WRITE | \
562 PMD_SECT_AP_READ
563 .long PMD_TYPE_SECT | \
564 PMD_BIT4 | \
565 PMD_SECT_AP_WRITE | \
566 PMD_SECT_AP_READ
567 b __arm925_setup
568 .long cpu_arch_name
569 .long cpu_elf_name
570 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
571 .long cpu_arm925_name
572 .long arm925_processor_functions
573 .long v4wbi_tlb_fns
574 .long v4wb_user_fns
575 .long arm925_cache_fns
576 .size __arm925_proc_info, . - __arm925_proc_info
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 0ed85d930c09..2bbcf053dffd 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -335,17 +335,8 @@ ENTRY(arm926_dma_unmap_area)
335 mov pc, lr 335 mov pc, lr
336ENDPROC(arm926_dma_unmap_area) 336ENDPROC(arm926_dma_unmap_area)
337 337
338ENTRY(arm926_cache_fns) 338 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
339 .long arm926_flush_icache_all 339 define_cache_functions arm926
340 .long arm926_flush_kern_cache_all
341 .long arm926_flush_user_cache_all
342 .long arm926_flush_user_cache_range
343 .long arm926_coherent_kern_range
344 .long arm926_coherent_user_range
345 .long arm926_flush_kern_dcache_area
346 .long arm926_dma_map_area
347 .long arm926_dma_unmap_area
348 .long arm926_dma_flush_range
349 340
350ENTRY(cpu_arm926_dcache_clean_area) 341ENTRY(cpu_arm926_dcache_clean_area)
351#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 342#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -430,9 +421,6 @@ ENTRY(cpu_arm926_do_resume)
430 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE 421 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
431 b cpu_resume_mmu 422 b cpu_resume_mmu
432ENDPROC(cpu_arm926_do_resume) 423ENDPROC(cpu_arm926_do_resume)
433#else
434#define cpu_arm926_do_suspend 0
435#define cpu_arm926_do_resume 0
436#endif 424#endif
437 425
438 __CPUINIT 426 __CPUINIT
@@ -475,42 +463,14 @@ arm926_crval:
475 463
476 __INITDATA 464 __INITDATA
477 465
478/* 466 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
479 * Purpose : Function pointers used to access above functions - all calls 467 define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1
480 * come through these
481 */
482 .type arm926_processor_functions, #object
483arm926_processor_functions:
484 .word v5tj_early_abort
485 .word legacy_pabort
486 .word cpu_arm926_proc_init
487 .word cpu_arm926_proc_fin
488 .word cpu_arm926_reset
489 .word cpu_arm926_do_idle
490 .word cpu_arm926_dcache_clean_area
491 .word cpu_arm926_switch_mm
492 .word cpu_arm926_set_pte_ext
493 .word cpu_arm926_suspend_size
494 .word cpu_arm926_do_suspend
495 .word cpu_arm926_do_resume
496 .size arm926_processor_functions, . - arm926_processor_functions
497 468
498 .section ".rodata" 469 .section ".rodata"
499 470
500 .type cpu_arch_name, #object 471 string cpu_arch_name, "armv5tej"
501cpu_arch_name: 472 string cpu_elf_name, "v5"
502 .asciz "armv5tej" 473 string cpu_arm926_name, "ARM926EJ-S"
503 .size cpu_arch_name, . - cpu_arch_name
504
505 .type cpu_elf_name, #object
506cpu_elf_name:
507 .asciz "v5"
508 .size cpu_elf_name, . - cpu_elf_name
509
510 .type cpu_arm926_name, #object
511cpu_arm926_name:
512 .asciz "ARM926EJ-S"
513 .size cpu_arm926_name, . - cpu_arm926_name
514 474
515 .align 475 .align
516 476
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 26aea3f71c26..ac750d506153 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -264,17 +264,8 @@ ENTRY(arm940_dma_unmap_area)
264 mov pc, lr 264 mov pc, lr
265ENDPROC(arm940_dma_unmap_area) 265ENDPROC(arm940_dma_unmap_area)
266 266
267ENTRY(arm940_cache_fns) 267 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
268 .long arm940_flush_icache_all 268 define_cache_functions arm940
269 .long arm940_flush_kern_cache_all
270 .long arm940_flush_user_cache_all
271 .long arm940_flush_user_cache_range
272 .long arm940_coherent_kern_range
273 .long arm940_coherent_user_range
274 .long arm940_flush_kern_dcache_area
275 .long arm940_dma_map_area
276 .long arm940_dma_unmap_area
277 .long arm940_dma_flush_range
278 269
279 __CPUINIT 270 __CPUINIT
280 271
@@ -348,42 +339,14 @@ __arm940_setup:
348 339
349 __INITDATA 340 __INITDATA
350 341
351/* 342 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
352 * Purpose : Function pointers used to access above functions - all calls 343 define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
353 * come through these
354 */
355 .type arm940_processor_functions, #object
356ENTRY(arm940_processor_functions)
357 .word nommu_early_abort
358 .word legacy_pabort
359 .word cpu_arm940_proc_init
360 .word cpu_arm940_proc_fin
361 .word cpu_arm940_reset
362 .word cpu_arm940_do_idle
363 .word cpu_arm940_dcache_clean_area
364 .word cpu_arm940_switch_mm
365 .word 0 @ cpu_*_set_pte
366 .word 0
367 .word 0
368 .word 0
369 .size arm940_processor_functions, . - arm940_processor_functions
370 344
371 .section ".rodata" 345 .section ".rodata"
372 346
373.type cpu_arch_name, #object 347 string cpu_arch_name, "armv4t"
374cpu_arch_name: 348 string cpu_elf_name, "v4"
375 .asciz "armv4t" 349 string cpu_arm940_name, "ARM940T"
376 .size cpu_arch_name, . - cpu_arch_name
377
378 .type cpu_elf_name, #object
379cpu_elf_name:
380 .asciz "v4"
381 .size cpu_elf_name, . - cpu_elf_name
382
383 .type cpu_arm940_name, #object
384cpu_arm940_name:
385 .ascii "ARM940T"
386 .size cpu_arm940_name, . - cpu_arm940_name
387 350
388 .align 351 .align
389 352
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 8063345406fe..f8f7ea34bfc5 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -306,18 +306,8 @@ ENTRY(arm946_dma_unmap_area)
306 mov pc, lr 306 mov pc, lr
307ENDPROC(arm946_dma_unmap_area) 307ENDPROC(arm946_dma_unmap_area)
308 308
309ENTRY(arm946_cache_fns) 309 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
310 .long arm946_flush_icache_all 310 define_cache_functions arm946
311 .long arm946_flush_kern_cache_all
312 .long arm946_flush_user_cache_all
313 .long arm946_flush_user_cache_range
314 .long arm946_coherent_kern_range
315 .long arm946_coherent_user_range
316 .long arm946_flush_kern_dcache_area
317 .long arm946_dma_map_area
318 .long arm946_dma_unmap_area
319 .long arm946_dma_flush_range
320
321 311
322ENTRY(cpu_arm946_dcache_clean_area) 312ENTRY(cpu_arm946_dcache_clean_area)
323#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 313#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
@@ -403,43 +393,14 @@ __arm946_setup:
403 393
404 __INITDATA 394 __INITDATA
405 395
406/* 396 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
407 * Purpose : Function pointers used to access above functions - all calls 397 define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
408 * come through these
409 */
410 .type arm946_processor_functions, #object
411ENTRY(arm946_processor_functions)
412 .word nommu_early_abort
413 .word legacy_pabort
414 .word cpu_arm946_proc_init
415 .word cpu_arm946_proc_fin
416 .word cpu_arm946_reset
417 .word cpu_arm946_do_idle
418
419 .word cpu_arm946_dcache_clean_area
420 .word cpu_arm946_switch_mm
421 .word 0 @ cpu_*_set_pte
422 .word 0
423 .word 0
424 .word 0
425 .size arm946_processor_functions, . - arm946_processor_functions
426 398
427 .section ".rodata" 399 .section ".rodata"
428 400
429 .type cpu_arch_name, #object 401 string cpu_arch_name, "armv5te"
430cpu_arch_name: 402 string cpu_elf_name, "v5t"
431 .asciz "armv5te" 403 string cpu_arm946_name, "ARM946E-S"
432 .size cpu_arch_name, . - cpu_arch_name
433
434 .type cpu_elf_name, #object
435cpu_elf_name:
436 .asciz "v5t"
437 .size cpu_elf_name, . - cpu_elf_name
438
439 .type cpu_arm946_name, #object
440cpu_arm946_name:
441 .ascii "ARM946E-S"
442 .size cpu_arm946_name, . - cpu_arm946_name
443 404
444 .align 405 .align
445 406
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 546b54da1005..2120f9e2af7f 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -17,6 +17,8 @@
17#include <asm/pgtable.h> 17#include <asm/pgtable.h>
18#include <asm/ptrace.h> 18#include <asm/ptrace.h>
19 19
20#include "proc-macros.S"
21
20 .text 22 .text
21/* 23/*
22 * cpu_arm9tdmi_proc_init() 24 * cpu_arm9tdmi_proc_init()
@@ -55,82 +57,38 @@ __arm9tdmi_setup:
55 57
56 __INITDATA 58 __INITDATA
57 59
58/* 60 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
59 * Purpose : Function pointers used to access above functions - all calls 61 define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
60 * come through these
61 */
62 .type arm9tdmi_processor_functions, #object
63ENTRY(arm9tdmi_processor_functions)
64 .word nommu_early_abort
65 .word legacy_pabort
66 .word cpu_arm9tdmi_proc_init
67 .word cpu_arm9tdmi_proc_fin
68 .word cpu_arm9tdmi_reset
69 .word cpu_arm9tdmi_do_idle
70 .word cpu_arm9tdmi_dcache_clean_area
71 .word cpu_arm9tdmi_switch_mm
72 .word 0 @ cpu_*_set_pte
73 .word 0
74 .word 0
75 .word 0
76 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
77 62
78 .section ".rodata" 63 .section ".rodata"
79 64
80 .type cpu_arch_name, #object 65 string cpu_arch_name, "armv4t"
81cpu_arch_name: 66 string cpu_elf_name, "v4"
82 .asciz "armv4t" 67 string cpu_arm9tdmi_name, "ARM9TDMI"
83 .size cpu_arch_name, . - cpu_arch_name 68 string cpu_p2001_name, "P2001"
84
85 .type cpu_elf_name, #object
86cpu_elf_name:
87 .asciz "v4"
88 .size cpu_elf_name, . - cpu_elf_name
89
90 .type cpu_arm9tdmi_name, #object
91cpu_arm9tdmi_name:
92 .asciz "ARM9TDMI"
93 .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name
94
95 .type cpu_p2001_name, #object
96cpu_p2001_name:
97 .asciz "P2001"
98 .size cpu_p2001_name, . - cpu_p2001_name
99 69
100 .align 70 .align
101 71
102 .section ".proc.info.init", #alloc, #execinstr 72 .section ".proc.info.init", #alloc, #execinstr
103 73
104 .type __arm9tdmi_proc_info, #object 74.macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
105__arm9tdmi_proc_info: 75 .type __\name\()_proc_info, #object
106 .long 0x41009900 76__\name\()_proc_info:
107 .long 0xfff8ff00 77 .long \cpu_val
78 .long \cpu_mask
108 .long 0 79 .long 0
109 .long 0 80 .long 0
110 b __arm9tdmi_setup 81 b __arm9tdmi_setup
111 .long cpu_arch_name 82 .long cpu_arch_name
112 .long cpu_elf_name 83 .long cpu_elf_name
113 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT 84 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
114 .long cpu_arm9tdmi_name 85 .long \cpu_name
115 .long arm9tdmi_processor_functions 86 .long arm9tdmi_processor_functions
116 .long 0 87 .long 0
117 .long 0 88 .long 0
118 .long v4_cache_fns 89 .long v4_cache_fns
119 .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info 90 .size __\name\()_proc_info, . - __\name\()_proc_info
91.endm
120 92
121 .type __p2001_proc_info, #object 93 arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name
122__p2001_proc_info: 94 arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name
123 .long 0x41029000
124 .long 0xffffffff
125 .long 0
126 .long 0
127 b __arm9tdmi_setup
128 .long cpu_arch_name
129 .long cpu_elf_name
130 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
131 .long cpu_p2001_name
132 .long arm9tdmi_processor_functions
133 .long 0
134 .long 0
135 .long v4_cache_fns
136 .size __p2001_proc_info, . - __p2001_proc_info
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index fc2a4ae15cf4..4c7a5710472b 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -180,42 +180,14 @@ fa526_cr1_set:
180 180
181 __INITDATA 181 __INITDATA
182 182
183/* 183 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
184 * Purpose : Function pointers used to access above functions - all calls 184 define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort
185 * come through these
186 */
187 .type fa526_processor_functions, #object
188fa526_processor_functions:
189 .word v4_early_abort
190 .word legacy_pabort
191 .word cpu_fa526_proc_init
192 .word cpu_fa526_proc_fin
193 .word cpu_fa526_reset
194 .word cpu_fa526_do_idle
195 .word cpu_fa526_dcache_clean_area
196 .word cpu_fa526_switch_mm
197 .word cpu_fa526_set_pte_ext
198 .word 0
199 .word 0
200 .word 0
201 .size fa526_processor_functions, . - fa526_processor_functions
202 185
203 .section ".rodata" 186 .section ".rodata"
204 187
205 .type cpu_arch_name, #object 188 string cpu_arch_name, "armv4"
206cpu_arch_name: 189 string cpu_elf_name, "v4"
207 .asciz "armv4" 190 string cpu_fa526_name, "FA526"
208 .size cpu_arch_name, . - cpu_arch_name
209
210 .type cpu_elf_name, #object
211cpu_elf_name:
212 .asciz "v4"
213 .size cpu_elf_name, . - cpu_elf_name
214
215 .type cpu_fa526_name, #object
216cpu_fa526_name:
217 .asciz "FA526"
218 .size cpu_fa526_name, . - cpu_fa526_name
219 191
220 .align 192 .align
221 193
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d3883eed7a4a..8a6c2f78c1c3 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -411,29 +411,28 @@ ENTRY(feroceon_dma_unmap_area)
411 mov pc, lr 411 mov pc, lr
412ENDPROC(feroceon_dma_unmap_area) 412ENDPROC(feroceon_dma_unmap_area)
413 413
414ENTRY(feroceon_cache_fns) 414 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
415 .long feroceon_flush_icache_all 415 define_cache_functions feroceon
416 .long feroceon_flush_kern_cache_all 416
417 .long feroceon_flush_user_cache_all 417.macro range_alias basename
418 .long feroceon_flush_user_cache_range 418 .globl feroceon_range_\basename
419 .long feroceon_coherent_kern_range 419 .type feroceon_range_\basename , %function
420 .long feroceon_coherent_user_range 420 .equ feroceon_range_\basename , feroceon_\basename
421 .long feroceon_flush_kern_dcache_area 421.endm
422 .long feroceon_dma_map_area 422
423 .long feroceon_dma_unmap_area 423/*
424 .long feroceon_dma_flush_range 424 * Most of the cache functions are unchanged for this case.
425 425 * Export suitable alias symbols for the unchanged functions:
426ENTRY(feroceon_range_cache_fns) 426 */
427 .long feroceon_flush_icache_all 427 range_alias flush_icache_all
428 .long feroceon_flush_kern_cache_all 428 range_alias flush_user_cache_all
429 .long feroceon_flush_user_cache_all 429 range_alias flush_kern_cache_all
430 .long feroceon_flush_user_cache_range 430 range_alias flush_user_cache_range
431 .long feroceon_coherent_kern_range 431 range_alias coherent_kern_range
432 .long feroceon_coherent_user_range 432 range_alias coherent_user_range
433 .long feroceon_range_flush_kern_dcache_area 433 range_alias dma_unmap_area
434 .long feroceon_range_dma_map_area 434
435 .long feroceon_dma_unmap_area 435 define_cache_functions feroceon_range
436 .long feroceon_range_dma_flush_range
437 436
438 .align 5 437 .align 5
439ENTRY(cpu_feroceon_dcache_clean_area) 438ENTRY(cpu_feroceon_dcache_clean_area)
@@ -539,93 +538,27 @@ feroceon_crval:
539 538
540 __INITDATA 539 __INITDATA
541 540
542/* 541 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
543 * Purpose : Function pointers used to access above functions - all calls 542 define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
544 * come through these
545 */
546 .type feroceon_processor_functions, #object
547feroceon_processor_functions:
548 .word v5t_early_abort
549 .word legacy_pabort
550 .word cpu_feroceon_proc_init
551 .word cpu_feroceon_proc_fin
552 .word cpu_feroceon_reset
553 .word cpu_feroceon_do_idle
554 .word cpu_feroceon_dcache_clean_area
555 .word cpu_feroceon_switch_mm
556 .word cpu_feroceon_set_pte_ext
557 .word 0
558 .word 0
559 .word 0
560 .size feroceon_processor_functions, . - feroceon_processor_functions
561 543
562 .section ".rodata" 544 .section ".rodata"
563 545
564 .type cpu_arch_name, #object 546 string cpu_arch_name, "armv5te"
565cpu_arch_name: 547 string cpu_elf_name, "v5"
566 .asciz "armv5te" 548 string cpu_feroceon_name, "Feroceon"
567 .size cpu_arch_name, . - cpu_arch_name 549 string cpu_88fr531_name, "Feroceon 88FR531-vd"
568 550 string cpu_88fr571_name, "Feroceon 88FR571-vd"
569 .type cpu_elf_name, #object 551 string cpu_88fr131_name, "Feroceon 88FR131"
570cpu_elf_name:
571 .asciz "v5"
572 .size cpu_elf_name, . - cpu_elf_name
573
574 .type cpu_feroceon_name, #object
575cpu_feroceon_name:
576 .asciz "Feroceon"
577 .size cpu_feroceon_name, . - cpu_feroceon_name
578
579 .type cpu_88fr531_name, #object
580cpu_88fr531_name:
581 .asciz "Feroceon 88FR531-vd"
582 .size cpu_88fr531_name, . - cpu_88fr531_name
583
584 .type cpu_88fr571_name, #object
585cpu_88fr571_name:
586 .asciz "Feroceon 88FR571-vd"
587 .size cpu_88fr571_name, . - cpu_88fr571_name
588
589 .type cpu_88fr131_name, #object
590cpu_88fr131_name:
591 .asciz "Feroceon 88FR131"
592 .size cpu_88fr131_name, . - cpu_88fr131_name
593 552
594 .align 553 .align
595 554
596 .section ".proc.info.init", #alloc, #execinstr 555 .section ".proc.info.init", #alloc, #execinstr
597 556
598#ifdef CONFIG_CPU_FEROCEON_OLD_ID 557.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
599 .type __feroceon_old_id_proc_info,#object 558 .type __\name\()_proc_info,#object
600__feroceon_old_id_proc_info: 559__\name\()_proc_info:
601 .long 0x41009260 560 .long \cpu_val
602 .long 0xff00fff0 561 .long \cpu_mask
603 .long PMD_TYPE_SECT | \
604 PMD_SECT_BUFFERABLE | \
605 PMD_SECT_CACHEABLE | \
606 PMD_BIT4 | \
607 PMD_SECT_AP_WRITE | \
608 PMD_SECT_AP_READ
609 .long PMD_TYPE_SECT | \
610 PMD_BIT4 | \
611 PMD_SECT_AP_WRITE | \
612 PMD_SECT_AP_READ
613 b __feroceon_setup
614 .long cpu_arch_name
615 .long cpu_elf_name
616 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
617 .long cpu_feroceon_name
618 .long feroceon_processor_functions
619 .long v4wbi_tlb_fns
620 .long feroceon_user_fns
621 .long feroceon_cache_fns
622 .size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info
623#endif
624
625 .type __88fr531_proc_info,#object
626__88fr531_proc_info:
627 .long 0x56055310
628 .long 0xfffffff0
629 .long PMD_TYPE_SECT | \ 562 .long PMD_TYPE_SECT | \
630 PMD_SECT_BUFFERABLE | \ 563 PMD_SECT_BUFFERABLE | \
631 PMD_SECT_CACHEABLE | \ 564 PMD_SECT_CACHEABLE | \
@@ -640,59 +573,22 @@ __88fr531_proc_info:
640 .long cpu_arch_name 573 .long cpu_arch_name
641 .long cpu_elf_name 574 .long cpu_elf_name
642 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 575 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
643 .long cpu_88fr531_name 576 .long \cpu_name
644 .long feroceon_processor_functions 577 .long feroceon_processor_functions
645 .long v4wbi_tlb_fns 578 .long v4wbi_tlb_fns
646 .long feroceon_user_fns 579 .long feroceon_user_fns
647 .long feroceon_cache_fns 580 .long \cache
648 .size __88fr531_proc_info, . - __88fr531_proc_info 581 .size __\name\()_proc_info, . - __\name\()_proc_info
582.endm
649 583
650 .type __88fr571_proc_info,#object 584#ifdef CONFIG_CPU_FEROCEON_OLD_ID
651__88fr571_proc_info: 585 feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
652 .long 0x56155710 586 cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
653 .long 0xfffffff0 587#endif
654 .long PMD_TYPE_SECT | \
655 PMD_SECT_BUFFERABLE | \
656 PMD_SECT_CACHEABLE | \
657 PMD_BIT4 | \
658 PMD_SECT_AP_WRITE | \
659 PMD_SECT_AP_READ
660 .long PMD_TYPE_SECT | \
661 PMD_BIT4 | \
662 PMD_SECT_AP_WRITE | \
663 PMD_SECT_AP_READ
664 b __feroceon_setup
665 .long cpu_arch_name
666 .long cpu_elf_name
667 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
668 .long cpu_88fr571_name
669 .long feroceon_processor_functions
670 .long v4wbi_tlb_fns
671 .long feroceon_user_fns
672 .long feroceon_range_cache_fns
673 .size __88fr571_proc_info, . - __88fr571_proc_info
674 588
675 .type __88fr131_proc_info,#object 589 feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
676__88fr131_proc_info: 590 cache=feroceon_cache_fns
677 .long 0x56251310 591 feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
678 .long 0xfffffff0 592 cache=feroceon_range_cache_fns
679 .long PMD_TYPE_SECT | \ 593 feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
680 PMD_SECT_BUFFERABLE | \ 594 cache=feroceon_range_cache_fns
681 PMD_SECT_CACHEABLE | \
682 PMD_BIT4 | \
683 PMD_SECT_AP_WRITE | \
684 PMD_SECT_AP_READ
685 .long PMD_TYPE_SECT | \
686 PMD_BIT4 | \
687 PMD_SECT_AP_WRITE | \
688 PMD_SECT_AP_READ
689 b __feroceon_setup
690 .long cpu_arch_name
691 .long cpu_elf_name
692 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
693 .long cpu_88fr131_name
694 .long feroceon_processor_functions
695 .long v4wbi_tlb_fns
696 .long feroceon_user_fns
697 .long feroceon_range_cache_fns
698 .size __88fr131_proc_info, . - __88fr131_proc_info
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 34261f9486b9..307a4def8d3a 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -254,3 +254,71 @@
254 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 254 mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
255 mcr p15, 0, ip, c7, c10, 4 @ data write barrier 255 mcr p15, 0, ip, c7, c10, 4 @ data write barrier
256 .endm 256 .endm
257
258.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0
259 .type \name\()_processor_functions, #object
260 .align 2
261ENTRY(\name\()_processor_functions)
262 .word \dabort
263 .word \pabort
264 .word cpu_\name\()_proc_init
265 .word cpu_\name\()_proc_fin
266 .word cpu_\name\()_reset
267 .word cpu_\name\()_do_idle
268 .word cpu_\name\()_dcache_clean_area
269 .word cpu_\name\()_switch_mm
270
271 .if \nommu
272 .word 0
273 .else
274 .word cpu_\name\()_set_pte_ext
275 .endif
276
277 .if \suspend
278 .word cpu_\name\()_suspend_size
279#ifdef CONFIG_PM_SLEEP
280 .word cpu_\name\()_do_suspend
281 .word cpu_\name\()_do_resume
282#else
283 .word 0
284 .word 0
285#endif
286 .else
287 .word 0
288 .word 0
289 .word 0
290 .endif
291
292 .size \name\()_processor_functions, . - \name\()_processor_functions
293.endm
294
295.macro define_cache_functions name:req
296 .align 2
297 .type \name\()_cache_fns, #object
298ENTRY(\name\()_cache_fns)
299 .long \name\()_flush_icache_all
300 .long \name\()_flush_kern_cache_all
301 .long \name\()_flush_user_cache_all
302 .long \name\()_flush_user_cache_range
303 .long \name\()_coherent_kern_range
304 .long \name\()_coherent_user_range
305 .long \name\()_flush_kern_dcache_area
306 .long \name\()_dma_map_area
307 .long \name\()_dma_unmap_area
308 .long \name\()_dma_flush_range
309 .size \name\()_cache_fns, . - \name\()_cache_fns
310.endm
311
312.macro define_tlb_functions name:req, flags_up:req, flags_smp
313 .type \name\()_tlb_fns, #object
314ENTRY(\name\()_tlb_fns)
315 .long \name\()_flush_user_tlb_range
316 .long \name\()_flush_kern_tlb_range
317 .ifnb \flags_smp
318 ALT_SMP(.long \flags_smp )
319 ALT_UP(.long \flags_up )
320 .else
321 .long \flags_up
322 .endif
323 .size \name\()_tlb_fns, . - \name\()_tlb_fns
324.endm
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 9d4f2ae63370..db52b0fb14a0 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -93,6 +93,17 @@ ENTRY(cpu_mohawk_do_idle)
93 mov pc, lr 93 mov pc, lr
94 94
95/* 95/*
96 * flush_icache_all()
97 *
98 * Unconditionally clean and invalidate the entire icache.
99 */
100ENTRY(mohawk_flush_icache_all)
101 mov r0, #0
102 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
103 mov pc, lr
104ENDPROC(mohawk_flush_icache_all)
105
106/*
96 * flush_user_cache_all() 107 * flush_user_cache_all()
97 * 108 *
98 * Clean and invalidate all cache entries in a particular 109 * Clean and invalidate all cache entries in a particular
@@ -288,16 +299,8 @@ ENTRY(mohawk_dma_unmap_area)
288 mov pc, lr 299 mov pc, lr
289ENDPROC(mohawk_dma_unmap_area) 300ENDPROC(mohawk_dma_unmap_area)
290 301
291ENTRY(mohawk_cache_fns) 302 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
292 .long mohawk_flush_kern_cache_all 303 define_cache_functions mohawk
293 .long mohawk_flush_user_cache_all
294 .long mohawk_flush_user_cache_range
295 .long mohawk_coherent_kern_range
296 .long mohawk_coherent_user_range
297 .long mohawk_flush_kern_dcache_area
298 .long mohawk_dma_map_area
299 .long mohawk_dma_unmap_area
300 .long mohawk_dma_flush_range
301 304
302ENTRY(cpu_mohawk_dcache_clean_area) 305ENTRY(cpu_mohawk_dcache_clean_area)
3031: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 3061: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -373,42 +376,14 @@ mohawk_crval:
373 376
374 __INITDATA 377 __INITDATA
375 378
376/* 379 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
377 * Purpose : Function pointers used to access above functions - all calls 380 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
378 * come through these
379 */
380 .type mohawk_processor_functions, #object
381mohawk_processor_functions:
382 .word v5t_early_abort
383 .word legacy_pabort
384 .word cpu_mohawk_proc_init
385 .word cpu_mohawk_proc_fin
386 .word cpu_mohawk_reset
387 .word cpu_mohawk_do_idle
388 .word cpu_mohawk_dcache_clean_area
389 .word cpu_mohawk_switch_mm
390 .word cpu_mohawk_set_pte_ext
391 .word 0
392 .word 0
393 .word 0
394 .size mohawk_processor_functions, . - mohawk_processor_functions
395 381
396 .section ".rodata" 382 .section ".rodata"
397 383
398 .type cpu_arch_name, #object 384 string cpu_arch_name, "armv5te"
399cpu_arch_name: 385 string cpu_elf_name, "v5"
400 .asciz "armv5te" 386 string cpu_mohawk_name, "Marvell 88SV331x"
401 .size cpu_arch_name, . - cpu_arch_name
402
403 .type cpu_elf_name, #object
404cpu_elf_name:
405 .asciz "v5"
406 .size cpu_elf_name, . - cpu_elf_name
407
408 .type cpu_mohawk_name, #object
409cpu_mohawk_name:
410 .asciz "Marvell 88SV331x"
411 .size cpu_mohawk_name, . - cpu_mohawk_name
412 387
413 .align 388 .align
414 389
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 46f09ed16b98..d50ada26edd6 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -187,43 +187,14 @@ sa110_crval:
187 187
188 __INITDATA 188 __INITDATA
189 189
190/* 190 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
191 * Purpose : Function pointers used to access above functions - all calls 191 define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort
192 * come through these
193 */
194
195 .type sa110_processor_functions, #object
196ENTRY(sa110_processor_functions)
197 .word v4_early_abort
198 .word legacy_pabort
199 .word cpu_sa110_proc_init
200 .word cpu_sa110_proc_fin
201 .word cpu_sa110_reset
202 .word cpu_sa110_do_idle
203 .word cpu_sa110_dcache_clean_area
204 .word cpu_sa110_switch_mm
205 .word cpu_sa110_set_pte_ext
206 .word 0
207 .word 0
208 .word 0
209 .size sa110_processor_functions, . - sa110_processor_functions
210 192
211 .section ".rodata" 193 .section ".rodata"
212 194
213 .type cpu_arch_name, #object 195 string cpu_arch_name, "armv4"
214cpu_arch_name: 196 string cpu_elf_name, "v4"
215 .asciz "armv4" 197 string cpu_sa110_name, "StrongARM-110"
216 .size cpu_arch_name, . - cpu_arch_name
217
218 .type cpu_elf_name, #object
219cpu_elf_name:
220 .asciz "v4"
221 .size cpu_elf_name, . - cpu_elf_name
222
223 .type cpu_sa110_name, #object
224cpu_sa110_name:
225 .asciz "StrongARM-110"
226 .size cpu_sa110_name, . - cpu_sa110_name
227 198
228 .align 199 .align
229 200
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index e9c47271732d..07219c2ae114 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -198,9 +198,6 @@ ENTRY(cpu_sa1100_do_resume)
198 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE 198 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
199 b cpu_resume_mmu 199 b cpu_resume_mmu
200ENDPROC(cpu_sa1100_do_resume) 200ENDPROC(cpu_sa1100_do_resume)
201#else
202#define cpu_sa1100_do_suspend 0
203#define cpu_sa1100_do_resume 0
204#endif 201#endif
205 202
206 __CPUINIT 203 __CPUINIT
@@ -234,59 +231,28 @@ sa1100_crval:
234 __INITDATA 231 __INITDATA
235 232
236/* 233/*
237 * Purpose : Function pointers used to access above functions - all calls
238 * come through these
239 */
240
241/*
242 * SA1100 and SA1110 share the same function calls 234 * SA1100 and SA1110 share the same function calls
243 */ 235 */
244 .type sa1100_processor_functions, #object
245ENTRY(sa1100_processor_functions)
246 .word v4_early_abort
247 .word legacy_pabort
248 .word cpu_sa1100_proc_init
249 .word cpu_sa1100_proc_fin
250 .word cpu_sa1100_reset
251 .word cpu_sa1100_do_idle
252 .word cpu_sa1100_dcache_clean_area
253 .word cpu_sa1100_switch_mm
254 .word cpu_sa1100_set_pte_ext
255 .word cpu_sa1100_suspend_size
256 .word cpu_sa1100_do_suspend
257 .word cpu_sa1100_do_resume
258 .size sa1100_processor_functions, . - sa1100_processor_functions
259
260 .section ".rodata"
261 236
262 .type cpu_arch_name, #object 237 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
263cpu_arch_name: 238 define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1
264 .asciz "armv4"
265 .size cpu_arch_name, . - cpu_arch_name
266 239
267 .type cpu_elf_name, #object 240 .section ".rodata"
268cpu_elf_name:
269 .asciz "v4"
270 .size cpu_elf_name, . - cpu_elf_name
271
272 .type cpu_sa1100_name, #object
273cpu_sa1100_name:
274 .asciz "StrongARM-1100"
275 .size cpu_sa1100_name, . - cpu_sa1100_name
276 241
277 .type cpu_sa1110_name, #object 242 string cpu_arch_name, "armv4"
278cpu_sa1110_name: 243 string cpu_elf_name, "v4"
279 .asciz "StrongARM-1110" 244 string cpu_sa1100_name, "StrongARM-1100"
280 .size cpu_sa1110_name, . - cpu_sa1110_name 245 string cpu_sa1110_name, "StrongARM-1110"
281 246
282 .align 247 .align
283 248
284 .section ".proc.info.init", #alloc, #execinstr 249 .section ".proc.info.init", #alloc, #execinstr
285 250
286 .type __sa1100_proc_info,#object 251.macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req
287__sa1100_proc_info: 252 .type __\name\()_proc_info,#object
288 .long 0x4401a110 253__\name\()_proc_info:
289 .long 0xfffffff0 254 .long \cpu_val
255 .long \cpu_mask
290 .long PMD_TYPE_SECT | \ 256 .long PMD_TYPE_SECT | \
291 PMD_SECT_BUFFERABLE | \ 257 PMD_SECT_BUFFERABLE | \
292 PMD_SECT_CACHEABLE | \ 258 PMD_SECT_CACHEABLE | \
@@ -299,32 +265,13 @@ __sa1100_proc_info:
299 .long cpu_arch_name 265 .long cpu_arch_name
300 .long cpu_elf_name 266 .long cpu_elf_name
301 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT 267 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
302 .long cpu_sa1100_name 268 .long \cpu_name
303 .long sa1100_processor_functions 269 .long sa1100_processor_functions
304 .long v4wb_tlb_fns 270 .long v4wb_tlb_fns
305 .long v4_mc_user_fns 271 .long v4_mc_user_fns
306 .long v4wb_cache_fns 272 .long v4wb_cache_fns
307 .size __sa1100_proc_info, . - __sa1100_proc_info 273 .size __\name\()_proc_info, . - __\name\()_proc_info
274.endm
308 275
309 .type __sa1110_proc_info,#object 276 sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name
310__sa1110_proc_info: 277 sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name
311 .long 0x6901b110
312 .long 0xfffffff0
313 .long PMD_TYPE_SECT | \
314 PMD_SECT_BUFFERABLE | \
315 PMD_SECT_CACHEABLE | \
316 PMD_SECT_AP_WRITE | \
317 PMD_SECT_AP_READ
318 .long PMD_TYPE_SECT | \
319 PMD_SECT_AP_WRITE | \
320 PMD_SECT_AP_READ
321 b __sa1100_setup
322 .long cpu_arch_name
323 .long cpu_elf_name
324 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT
325 .long cpu_sa1110_name
326 .long sa1100_processor_functions
327 .long v4wb_tlb_fns
328 .long v4_mc_user_fns
329 .long v4wb_cache_fns
330 .size __sa1110_proc_info, . - __sa1110_proc_info
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 1d2b8451bf25..219138d2f158 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -56,6 +56,11 @@ ENTRY(cpu_v6_proc_fin)
56 */ 56 */
57 .align 5 57 .align 5
58ENTRY(cpu_v6_reset) 58ENTRY(cpu_v6_reset)
59 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
60 bic r1, r1, #0x1 @ ...............m
61 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
62 mov r1, #0
63 mcr p15, 0, r1, c7, c5, 4 @ ISB
59 mov pc, r0 64 mov pc, r0
60 65
61/* 66/*
@@ -164,16 +169,9 @@ ENDPROC(cpu_v6_do_resume)
164cpu_resume_l1_flags: 169cpu_resume_l1_flags:
165 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) 170 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
166 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) 171 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
167#else
168#define cpu_v6_do_suspend 0
169#define cpu_v6_do_resume 0
170#endif 172#endif
171 173
172 174 string cpu_v6_name, "ARMv6-compatible processor"
173 .type cpu_v6_name, #object
174cpu_v6_name:
175 .asciz "ARMv6-compatible processor"
176 .size cpu_v6_name, . - cpu_v6_name
177 175
178 .align 176 .align
179 177
@@ -239,33 +237,13 @@ v6_crval:
239 237
240 __INITDATA 238 __INITDATA
241 239
242 .type v6_processor_functions, #object 240 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
243ENTRY(v6_processor_functions) 241 define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1
244 .word v6_early_abort
245 .word v6_pabort
246 .word cpu_v6_proc_init
247 .word cpu_v6_proc_fin
248 .word cpu_v6_reset
249 .word cpu_v6_do_idle
250 .word cpu_v6_dcache_clean_area
251 .word cpu_v6_switch_mm
252 .word cpu_v6_set_pte_ext
253 .word cpu_v6_suspend_size
254 .word cpu_v6_do_suspend
255 .word cpu_v6_do_resume
256 .size v6_processor_functions, . - v6_processor_functions
257 242
258 .section ".rodata" 243 .section ".rodata"
259 244
260 .type cpu_arch_name, #object 245 string cpu_arch_name, "armv6"
261cpu_arch_name: 246 string cpu_elf_name, "v6"
262 .asciz "armv6"
263 .size cpu_arch_name, . - cpu_arch_name
264
265 .type cpu_elf_name, #object
266cpu_elf_name:
267 .asciz "v6"
268 .size cpu_elf_name, . - cpu_elf_name
269 .align 247 .align
270 248
271 .section ".proc.info.init", #alloc, #execinstr 249 .section ".proc.info.init", #alloc, #execinstr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 089c0b5e454f..a30e78542ccf 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -58,9 +58,16 @@ ENDPROC(cpu_v7_proc_fin)
58 * to what would be the reset vector. 58 * to what would be the reset vector.
59 * 59 *
60 * - loc - location to jump to for soft reset 60 * - loc - location to jump to for soft reset
61 *
62 * This code must be executed using a flat identity mapping with
63 * caches disabled.
61 */ 64 */
62 .align 5 65 .align 5
63ENTRY(cpu_v7_reset) 66ENTRY(cpu_v7_reset)
67 mrc p15, 0, r1, c1, c0, 0 @ ctrl register
68 bic r1, r1, #0x1 @ ...............m
69 mcr p15, 0, r1, c1, c0, 0 @ disable MMU
70 isb
64 mov pc, r0 71 mov pc, r0
65ENDPROC(cpu_v7_reset) 72ENDPROC(cpu_v7_reset)
66 73
@@ -173,8 +180,7 @@ ENTRY(cpu_v7_set_pte_ext)
173 mov pc, lr 180 mov pc, lr
174ENDPROC(cpu_v7_set_pte_ext) 181ENDPROC(cpu_v7_set_pte_ext)
175 182
176cpu_v7_name: 183 string cpu_v7_name, "ARMv7 Processor"
177 .ascii "ARMv7 Processor"
178 .align 184 .align
179 185
180 /* 186 /*
@@ -257,9 +263,6 @@ ENDPROC(cpu_v7_do_resume)
257cpu_resume_l1_flags: 263cpu_resume_l1_flags:
258 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) 264 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
259 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) 265 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
260#else
261#define cpu_v7_do_suspend 0
262#define cpu_v7_do_resume 0
263#endif 266#endif
264 267
265 __CPUINIT 268 __CPUINIT
@@ -279,13 +282,20 @@ cpu_resume_l1_flags:
279 * It is assumed that: 282 * It is assumed that:
280 * - cache type register is implemented 283 * - cache type register is implemented
281 */ 284 */
285__v7_ca5mp_setup:
282__v7_ca9mp_setup: 286__v7_ca9mp_setup:
287 mov r10, #(1 << 0) @ TLB ops broadcasting
288 b 1f
289__v7_ca15mp_setup:
290 mov r10, #0
2911:
283#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
284 ALT_SMP(mrc p15, 0, r0, c1, c0, 1) 293 ALT_SMP(mrc p15, 0, r0, c1, c0, 1)
285 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP 294 ALT_UP(mov r0, #(1 << 6)) @ fake it for UP
286 tst r0, #(1 << 6) @ SMP/nAMP mode enabled? 295 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
287 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and 296 orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode
288 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting 297 orreq r0, r0, r10 @ Enable CPU-specific SMP bits
298 mcreq p15, 0, r0, c1, c0, 1
289#endif 299#endif
290__v7_setup: 300__v7_setup:
291 adr r12, __v7_setup_stack @ the local stack 301 adr r12, __v7_setup_stack @ the local stack
@@ -411,94 +421,75 @@ __v7_setup_stack:
411 421
412 __INITDATA 422 __INITDATA
413 423
414 .type v7_processor_functions, #object 424 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
415ENTRY(v7_processor_functions) 425 define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
416 .word v7_early_abort
417 .word v7_pabort
418 .word cpu_v7_proc_init
419 .word cpu_v7_proc_fin
420 .word cpu_v7_reset
421 .word cpu_v7_do_idle
422 .word cpu_v7_dcache_clean_area
423 .word cpu_v7_switch_mm
424 .word cpu_v7_set_pte_ext
425 .word cpu_v7_suspend_size
426 .word cpu_v7_do_suspend
427 .word cpu_v7_do_resume
428 .size v7_processor_functions, . - v7_processor_functions
429 426
430 .section ".rodata" 427 .section ".rodata"
431 428
432 .type cpu_arch_name, #object 429 string cpu_arch_name, "armv7"
433cpu_arch_name: 430 string cpu_elf_name, "v7"
434 .asciz "armv7"
435 .size cpu_arch_name, . - cpu_arch_name
436
437 .type cpu_elf_name, #object
438cpu_elf_name:
439 .asciz "v7"
440 .size cpu_elf_name, . - cpu_elf_name
441 .align 431 .align
442 432
443 .section ".proc.info.init", #alloc, #execinstr 433 .section ".proc.info.init", #alloc, #execinstr
444 434
445 .type __v7_ca9mp_proc_info, #object 435 /*
446__v7_ca9mp_proc_info: 436 * Standard v7 proc info content
447 .long 0x410fc090 @ Required ID value 437 */
448 .long 0xff0ffff0 @ Mask for ID 438.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
449 ALT_SMP(.long \ 439 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
450 PMD_TYPE_SECT | \ 440 PMD_FLAGS_SMP | \mm_mmuflags)
451 PMD_SECT_AP_WRITE | \ 441 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
452 PMD_SECT_AP_READ | \ 442 PMD_FLAGS_UP | \mm_mmuflags)
453 PMD_FLAGS_SMP) 443 .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
454 ALT_UP(.long \ 444 PMD_SECT_AP_READ | \io_mmuflags
455 PMD_TYPE_SECT | \ 445 W(b) \initfunc
456 PMD_SECT_AP_WRITE | \
457 PMD_SECT_AP_READ | \
458 PMD_FLAGS_UP)
459 .long PMD_TYPE_SECT | \
460 PMD_SECT_XN | \
461 PMD_SECT_AP_WRITE | \
462 PMD_SECT_AP_READ
463 W(b) __v7_ca9mp_setup
464 .long cpu_arch_name 446 .long cpu_arch_name
465 .long cpu_elf_name 447 .long cpu_elf_name
466 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS 448 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
449 HWCAP_EDSP | HWCAP_TLS | \hwcaps
467 .long cpu_v7_name 450 .long cpu_v7_name
468 .long v7_processor_functions 451 .long v7_processor_functions
469 .long v7wbi_tlb_fns 452 .long v7wbi_tlb_fns
470 .long v6_user_fns 453 .long v6_user_fns
471 .long v7_cache_fns 454 .long v7_cache_fns
455.endm
456
457 /*
458 * ARM Ltd. Cortex A5 processor.
459 */
460 .type __v7_ca5mp_proc_info, #object
461__v7_ca5mp_proc_info:
462 .long 0x410fc050
463 .long 0xff0ffff0
464 __v7_proc __v7_ca5mp_setup
465 .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
466
467 /*
468 * ARM Ltd. Cortex A9 processor.
469 */
470 .type __v7_ca9mp_proc_info, #object
471__v7_ca9mp_proc_info:
472 .long 0x410fc090
473 .long 0xff0ffff0
474 __v7_proc __v7_ca9mp_setup
472 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info 475 .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
473 476
474 /* 477 /*
478 * ARM Ltd. Cortex A15 processor.
479 */
480 .type __v7_ca15mp_proc_info, #object
481__v7_ca15mp_proc_info:
482 .long 0x410fc0f0
483 .long 0xff0ffff0
484 __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
485 .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
486
487 /*
475 * Match any ARMv7 processor core. 488 * Match any ARMv7 processor core.
476 */ 489 */
477 .type __v7_proc_info, #object 490 .type __v7_proc_info, #object
478__v7_proc_info: 491__v7_proc_info:
479 .long 0x000f0000 @ Required ID value 492 .long 0x000f0000 @ Required ID value
480 .long 0x000f0000 @ Mask for ID 493 .long 0x000f0000 @ Mask for ID
481 ALT_SMP(.long \ 494 __v7_proc __v7_setup
482 PMD_TYPE_SECT | \
483 PMD_SECT_AP_WRITE | \
484 PMD_SECT_AP_READ | \
485 PMD_FLAGS_SMP)
486 ALT_UP(.long \
487 PMD_TYPE_SECT | \
488 PMD_SECT_AP_WRITE | \
489 PMD_SECT_AP_READ | \
490 PMD_FLAGS_UP)
491 .long PMD_TYPE_SECT | \
492 PMD_SECT_XN | \
493 PMD_SECT_AP_WRITE | \
494 PMD_SECT_AP_READ
495 W(b) __v7_setup
496 .long cpu_arch_name
497 .long cpu_elf_name
498 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
499 .long cpu_v7_name
500 .long v7_processor_functions
501 .long v7wbi_tlb_fns
502 .long v6_user_fns
503 .long v7_cache_fns
504 .size __v7_proc_info, . - __v7_proc_info 495 .size __v7_proc_info, . - __v7_proc_info
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 596213699f37..64f1fc7edf0a 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -335,17 +335,8 @@ ENTRY(xsc3_dma_unmap_area)
335 mov pc, lr 335 mov pc, lr
336ENDPROC(xsc3_dma_unmap_area) 336ENDPROC(xsc3_dma_unmap_area)
337 337
338ENTRY(xsc3_cache_fns) 338 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
339 .long xsc3_flush_icache_all 339 define_cache_functions xsc3
340 .long xsc3_flush_kern_cache_all
341 .long xsc3_flush_user_cache_all
342 .long xsc3_flush_user_cache_range
343 .long xsc3_coherent_kern_range
344 .long xsc3_coherent_user_range
345 .long xsc3_flush_kern_dcache_area
346 .long xsc3_dma_map_area
347 .long xsc3_dma_unmap_area
348 .long xsc3_dma_flush_range
349 340
350ENTRY(cpu_xsc3_dcache_clean_area) 341ENTRY(cpu_xsc3_dcache_clean_area)
3511: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line 3421: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
@@ -454,9 +445,6 @@ ENTRY(cpu_xsc3_do_resume)
454 ldr r3, =0x542e @ section flags 445 ldr r3, =0x542e @ section flags
455 b cpu_resume_mmu 446 b cpu_resume_mmu
456ENDPROC(cpu_xsc3_do_resume) 447ENDPROC(cpu_xsc3_do_resume)
457#else
458#define cpu_xsc3_do_suspend 0
459#define cpu_xsc3_do_resume 0
460#endif 448#endif
461 449
462 __CPUINIT 450 __CPUINIT
@@ -503,52 +491,24 @@ xsc3_crval:
503 491
504 __INITDATA 492 __INITDATA
505 493
506/* 494 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
507 * Purpose : Function pointers used to access above functions - all calls 495 define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
508 * come through these
509 */
510
511 .type xsc3_processor_functions, #object
512ENTRY(xsc3_processor_functions)
513 .word v5t_early_abort
514 .word legacy_pabort
515 .word cpu_xsc3_proc_init
516 .word cpu_xsc3_proc_fin
517 .word cpu_xsc3_reset
518 .word cpu_xsc3_do_idle
519 .word cpu_xsc3_dcache_clean_area
520 .word cpu_xsc3_switch_mm
521 .word cpu_xsc3_set_pte_ext
522 .word cpu_xsc3_suspend_size
523 .word cpu_xsc3_do_suspend
524 .word cpu_xsc3_do_resume
525 .size xsc3_processor_functions, . - xsc3_processor_functions
526 496
527 .section ".rodata" 497 .section ".rodata"
528 498
529 .type cpu_arch_name, #object 499 string cpu_arch_name, "armv5te"
530cpu_arch_name: 500 string cpu_elf_name, "v5"
531 .asciz "armv5te" 501 string cpu_xsc3_name, "XScale-V3 based processor"
532 .size cpu_arch_name, . - cpu_arch_name
533
534 .type cpu_elf_name, #object
535cpu_elf_name:
536 .asciz "v5"
537 .size cpu_elf_name, . - cpu_elf_name
538
539 .type cpu_xsc3_name, #object
540cpu_xsc3_name:
541 .asciz "XScale-V3 based processor"
542 .size cpu_xsc3_name, . - cpu_xsc3_name
543 502
544 .align 503 .align
545 504
546 .section ".proc.info.init", #alloc, #execinstr 505 .section ".proc.info.init", #alloc, #execinstr
547 506
548 .type __xsc3_proc_info,#object 507.macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req
549__xsc3_proc_info: 508 .type __\name\()_proc_info,#object
550 .long 0x69056000 509__\name\()_proc_info:
551 .long 0xffffe000 510 .long \cpu_val
511 .long \cpu_mask
552 .long PMD_TYPE_SECT | \ 512 .long PMD_TYPE_SECT | \
553 PMD_SECT_BUFFERABLE | \ 513 PMD_SECT_BUFFERABLE | \
554 PMD_SECT_CACHEABLE | \ 514 PMD_SECT_CACHEABLE | \
@@ -566,29 +526,10 @@ __xsc3_proc_info:
566 .long v4wbi_tlb_fns 526 .long v4wbi_tlb_fns
567 .long xsc3_mc_user_fns 527 .long xsc3_mc_user_fns
568 .long xsc3_cache_fns 528 .long xsc3_cache_fns
569 .size __xsc3_proc_info, . - __xsc3_proc_info 529 .size __\name\()_proc_info, . - __\name\()_proc_info
530.endm
570 531
571/* Note: PXA935 changed its implementor ID from Intel to Marvell */ 532 xsc3_proc_info xsc3, 0x69056000, 0xffffe000
572 533
573 .type __xsc3_pxa935_proc_info,#object 534/* Note: PXA935 changed its implementor ID from Intel to Marvell */
574__xsc3_pxa935_proc_info: 535 xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000
575 .long 0x56056000
576 .long 0xffffe000
577 .long PMD_TYPE_SECT | \
578 PMD_SECT_BUFFERABLE | \
579 PMD_SECT_CACHEABLE | \
580 PMD_SECT_AP_WRITE | \
581 PMD_SECT_AP_READ
582 .long PMD_TYPE_SECT | \
583 PMD_SECT_AP_WRITE | \
584 PMD_SECT_AP_READ
585 b __xsc3_setup
586 .long cpu_arch_name
587 .long cpu_elf_name
588 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
589 .long cpu_xsc3_name
590 .long xsc3_processor_functions
591 .long v4wbi_tlb_fns
592 .long xsc3_mc_user_fns
593 .long xsc3_cache_fns
594 .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 42af97664c9d..fbc06e55b87a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -390,12 +390,12 @@ ENDPROC(xscale_dma_map_area)
390 * - size - size of region 390 * - size - size of region
391 * - dir - DMA direction 391 * - dir - DMA direction
392 */ 392 */
393ENTRY(xscale_dma_a0_map_area) 393ENTRY(xscale_80200_A0_A1_dma_map_area)
394 add r1, r1, r0 394 add r1, r1, r0
395 teq r2, #DMA_TO_DEVICE 395 teq r2, #DMA_TO_DEVICE
396 beq xscale_dma_clean_range 396 beq xscale_dma_clean_range
397 b xscale_dma_flush_range 397 b xscale_dma_flush_range
398ENDPROC(xscale_dma_a0_map_area) 398ENDPROC(xscale_80200_A0_A1_dma_map_area)
399 399
400/* 400/*
401 * dma_unmap_area(start, size, dir) 401 * dma_unmap_area(start, size, dir)
@@ -407,17 +407,8 @@ ENTRY(xscale_dma_unmap_area)
407 mov pc, lr 407 mov pc, lr
408ENDPROC(xscale_dma_unmap_area) 408ENDPROC(xscale_dma_unmap_area)
409 409
410ENTRY(xscale_cache_fns) 410 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
411 .long xscale_flush_icache_all 411 define_cache_functions xscale
412 .long xscale_flush_kern_cache_all
413 .long xscale_flush_user_cache_all
414 .long xscale_flush_user_cache_range
415 .long xscale_coherent_kern_range
416 .long xscale_coherent_user_range
417 .long xscale_flush_kern_dcache_area
418 .long xscale_dma_map_area
419 .long xscale_dma_unmap_area
420 .long xscale_dma_flush_range
421 412
422/* 413/*
423 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't 414 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
@@ -432,16 +423,28 @@ ENTRY(xscale_cache_fns)
432 * revision January 22, 2003, available at: 423 * revision January 22, 2003, available at:
433 * http://www.intel.com/design/iio/specupdt/273415.htm 424 * http://www.intel.com/design/iio/specupdt/273415.htm
434 */ 425 */
435ENTRY(xscale_80200_A0_A1_cache_fns) 426.macro a0_alias basename
436 .long xscale_flush_kern_cache_all 427 .globl xscale_80200_A0_A1_\basename
437 .long xscale_flush_user_cache_all 428 .type xscale_80200_A0_A1_\basename , %function
438 .long xscale_flush_user_cache_range 429 .equ xscale_80200_A0_A1_\basename , xscale_\basename
439 .long xscale_coherent_kern_range 430.endm
440 .long xscale_coherent_user_range 431
441 .long xscale_flush_kern_dcache_area 432/*
442 .long xscale_dma_a0_map_area 433 * Most of the cache functions are unchanged for these processor revisions.
443 .long xscale_dma_unmap_area 434 * Export suitable alias symbols for the unchanged functions:
444 .long xscale_dma_flush_range 435 */
436 a0_alias flush_icache_all
437 a0_alias flush_user_cache_all
438 a0_alias flush_kern_cache_all
439 a0_alias flush_user_cache_range
440 a0_alias coherent_kern_range
441 a0_alias coherent_user_range
442 a0_alias flush_kern_dcache_area
443 a0_alias dma_flush_range
444 a0_alias dma_unmap_area
445
446 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
447 define_cache_functions xscale_80200_A0_A1
445 448
446ENTRY(cpu_xscale_dcache_clean_area) 449ENTRY(cpu_xscale_dcache_clean_area)
4471: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 4501: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -551,9 +554,6 @@ ENTRY(cpu_xscale_do_resume)
551 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE 554 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
552 b cpu_resume_mmu 555 b cpu_resume_mmu
553ENDPROC(cpu_xscale_do_resume) 556ENDPROC(cpu_xscale_do_resume)
554#else
555#define cpu_xscale_do_suspend 0
556#define cpu_xscale_do_resume 0
557#endif 557#endif
558 558
559 __CPUINIT 559 __CPUINIT
@@ -587,432 +587,74 @@ xscale_crval:
587 587
588 __INITDATA 588 __INITDATA
589 589
590/* 590 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
591 * Purpose : Function pointers used to access above functions - all calls 591 define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1
592 * come through these
593 */
594
595 .type xscale_processor_functions, #object
596ENTRY(xscale_processor_functions)
597 .word v5t_early_abort
598 .word legacy_pabort
599 .word cpu_xscale_proc_init
600 .word cpu_xscale_proc_fin
601 .word cpu_xscale_reset
602 .word cpu_xscale_do_idle
603 .word cpu_xscale_dcache_clean_area
604 .word cpu_xscale_switch_mm
605 .word cpu_xscale_set_pte_ext
606 .word cpu_xscale_suspend_size
607 .word cpu_xscale_do_suspend
608 .word cpu_xscale_do_resume
609 .size xscale_processor_functions, . - xscale_processor_functions
610 592
611 .section ".rodata" 593 .section ".rodata"
612 594
613 .type cpu_arch_name, #object 595 string cpu_arch_name, "armv5te"
614cpu_arch_name: 596 string cpu_elf_name, "v5"
615 .asciz "armv5te" 597
616 .size cpu_arch_name, . - cpu_arch_name 598 string cpu_80200_A0_A1_name, "XScale-80200 A0/A1"
617 599 string cpu_80200_name, "XScale-80200"
618 .type cpu_elf_name, #object 600 string cpu_80219_name, "XScale-80219"
619cpu_elf_name: 601 string cpu_8032x_name, "XScale-IOP8032x Family"
620 .asciz "v5" 602 string cpu_8033x_name, "XScale-IOP8033x Family"
621 .size cpu_elf_name, . - cpu_elf_name 603 string cpu_pxa250_name, "XScale-PXA250"
622 604 string cpu_pxa210_name, "XScale-PXA210"
623 .type cpu_80200_A0_A1_name, #object 605 string cpu_ixp42x_name, "XScale-IXP42x Family"
624cpu_80200_A0_A1_name: 606 string cpu_ixp43x_name, "XScale-IXP43x Family"
625 .asciz "XScale-80200 A0/A1" 607 string cpu_ixp46x_name, "XScale-IXP46x Family"
626 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 608 string cpu_ixp2400_name, "XScale-IXP2400"
627 609 string cpu_ixp2800_name, "XScale-IXP2800"
628 .type cpu_80200_name, #object 610 string cpu_pxa255_name, "XScale-PXA255"
629cpu_80200_name: 611 string cpu_pxa270_name, "XScale-PXA270"
630 .asciz "XScale-80200"
631 .size cpu_80200_name, . - cpu_80200_name
632
633 .type cpu_80219_name, #object
634cpu_80219_name:
635 .asciz "XScale-80219"
636 .size cpu_80219_name, . - cpu_80219_name
637
638 .type cpu_8032x_name, #object
639cpu_8032x_name:
640 .asciz "XScale-IOP8032x Family"
641 .size cpu_8032x_name, . - cpu_8032x_name
642
643 .type cpu_8033x_name, #object
644cpu_8033x_name:
645 .asciz "XScale-IOP8033x Family"
646 .size cpu_8033x_name, . - cpu_8033x_name
647
648 .type cpu_pxa250_name, #object
649cpu_pxa250_name:
650 .asciz "XScale-PXA250"
651 .size cpu_pxa250_name, . - cpu_pxa250_name
652
653 .type cpu_pxa210_name, #object
654cpu_pxa210_name:
655 .asciz "XScale-PXA210"
656 .size cpu_pxa210_name, . - cpu_pxa210_name
657
658 .type cpu_ixp42x_name, #object
659cpu_ixp42x_name:
660 .asciz "XScale-IXP42x Family"
661 .size cpu_ixp42x_name, . - cpu_ixp42x_name
662
663 .type cpu_ixp43x_name, #object
664cpu_ixp43x_name:
665 .asciz "XScale-IXP43x Family"
666 .size cpu_ixp43x_name, . - cpu_ixp43x_name
667
668 .type cpu_ixp46x_name, #object
669cpu_ixp46x_name:
670 .asciz "XScale-IXP46x Family"
671 .size cpu_ixp46x_name, . - cpu_ixp46x_name
672
673 .type cpu_ixp2400_name, #object
674cpu_ixp2400_name:
675 .asciz "XScale-IXP2400"
676 .size cpu_ixp2400_name, . - cpu_ixp2400_name
677
678 .type cpu_ixp2800_name, #object
679cpu_ixp2800_name:
680 .asciz "XScale-IXP2800"
681 .size cpu_ixp2800_name, . - cpu_ixp2800_name
682
683 .type cpu_pxa255_name, #object
684cpu_pxa255_name:
685 .asciz "XScale-PXA255"
686 .size cpu_pxa255_name, . - cpu_pxa255_name
687
688 .type cpu_pxa270_name, #object
689cpu_pxa270_name:
690 .asciz "XScale-PXA270"
691 .size cpu_pxa270_name, . - cpu_pxa270_name
692 612
693 .align 613 .align
694 614
695 .section ".proc.info.init", #alloc, #execinstr 615 .section ".proc.info.init", #alloc, #execinstr
696 616
697 .type __80200_A0_A1_proc_info,#object 617.macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
698__80200_A0_A1_proc_info: 618 .type __\name\()_proc_info,#object
699 .long 0x69052000 619__\name\()_proc_info:
700 .long 0xfffffffe 620 .long \cpu_val
701 .long PMD_TYPE_SECT | \ 621 .long \cpu_mask
702 PMD_SECT_BUFFERABLE | \ 622 .long PMD_TYPE_SECT | \
703 PMD_SECT_CACHEABLE | \
704 PMD_SECT_AP_WRITE | \
705 PMD_SECT_AP_READ
706 .long PMD_TYPE_SECT | \
707 PMD_SECT_AP_WRITE | \
708 PMD_SECT_AP_READ
709 b __xscale_setup
710 .long cpu_arch_name
711 .long cpu_elf_name
712 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
713 .long cpu_80200_name
714 .long xscale_processor_functions
715 .long v4wbi_tlb_fns
716 .long xscale_mc_user_fns
717 .long xscale_80200_A0_A1_cache_fns
718 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
719
720 .type __80200_proc_info,#object
721__80200_proc_info:
722 .long 0x69052000
723 .long 0xfffffff0
724 .long PMD_TYPE_SECT | \
725 PMD_SECT_BUFFERABLE | \ 623 PMD_SECT_BUFFERABLE | \
726 PMD_SECT_CACHEABLE | \ 624 PMD_SECT_CACHEABLE | \
727 PMD_SECT_AP_WRITE | \ 625 PMD_SECT_AP_WRITE | \
728 PMD_SECT_AP_READ 626 PMD_SECT_AP_READ
729 .long PMD_TYPE_SECT | \ 627 .long PMD_TYPE_SECT | \
730 PMD_SECT_AP_WRITE | \ 628 PMD_SECT_AP_WRITE | \
731 PMD_SECT_AP_READ 629 PMD_SECT_AP_READ
732 b __xscale_setup 630 b __xscale_setup
733 .long cpu_arch_name 631 .long cpu_arch_name
734 .long cpu_elf_name 632 .long cpu_elf_name
735 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 633 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
736 .long cpu_80200_name 634 .long \cpu_name
737 .long xscale_processor_functions 635 .long xscale_processor_functions
738 .long v4wbi_tlb_fns 636 .long v4wbi_tlb_fns
739 .long xscale_mc_user_fns 637 .long xscale_mc_user_fns
740 .long xscale_cache_fns 638 .ifb \cache
741 .size __80200_proc_info, . - __80200_proc_info 639 .long xscale_cache_fns
742 640 .else
743 .type __80219_proc_info,#object 641 .long \cache
744__80219_proc_info: 642 .endif
745 .long 0x69052e20 643 .size __\name\()_proc_info, . - __\name\()_proc_info
746 .long 0xffffffe0 644.endm
747 .long PMD_TYPE_SECT | \ 645
748 PMD_SECT_BUFFERABLE | \ 646 xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \
749 PMD_SECT_CACHEABLE | \ 647 cache=xscale_80200_A0_A1_cache_fns
750 PMD_SECT_AP_WRITE | \ 648 xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name
751 PMD_SECT_AP_READ 649 xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name
752 .long PMD_TYPE_SECT | \ 650 xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name
753 PMD_SECT_AP_WRITE | \ 651 xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name
754 PMD_SECT_AP_READ 652 xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name
755 b __xscale_setup 653 xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name
756 .long cpu_arch_name 654 xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name
757 .long cpu_elf_name 655 xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name
758 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 656 xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name
759 .long cpu_80219_name 657 xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name
760 .long xscale_processor_functions 658 xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name
761 .long v4wbi_tlb_fns 659 xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name
762 .long xscale_mc_user_fns 660 xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name
763 .long xscale_cache_fns
764 .size __80219_proc_info, . - __80219_proc_info
765
766 .type __8032x_proc_info,#object
767__8032x_proc_info:
768 .long 0x69052420
769 .long 0xfffff7e0
770 .long PMD_TYPE_SECT | \
771 PMD_SECT_BUFFERABLE | \
772 PMD_SECT_CACHEABLE | \
773 PMD_SECT_AP_WRITE | \
774 PMD_SECT_AP_READ
775 .long PMD_TYPE_SECT | \
776 PMD_SECT_AP_WRITE | \
777 PMD_SECT_AP_READ
778 b __xscale_setup
779 .long cpu_arch_name
780 .long cpu_elf_name
781 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
782 .long cpu_8032x_name
783 .long xscale_processor_functions
784 .long v4wbi_tlb_fns
785 .long xscale_mc_user_fns
786 .long xscale_cache_fns
787 .size __8032x_proc_info, . - __8032x_proc_info
788
789 .type __8033x_proc_info,#object
790__8033x_proc_info:
791 .long 0x69054010
792 .long 0xfffffd30
793 .long PMD_TYPE_SECT | \
794 PMD_SECT_BUFFERABLE | \
795 PMD_SECT_CACHEABLE | \
796 PMD_SECT_AP_WRITE | \
797 PMD_SECT_AP_READ
798 .long PMD_TYPE_SECT | \
799 PMD_SECT_AP_WRITE | \
800 PMD_SECT_AP_READ
801 b __xscale_setup
802 .long cpu_arch_name
803 .long cpu_elf_name
804 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
805 .long cpu_8033x_name
806 .long xscale_processor_functions
807 .long v4wbi_tlb_fns
808 .long xscale_mc_user_fns
809 .long xscale_cache_fns
810 .size __8033x_proc_info, . - __8033x_proc_info
811
812 .type __pxa250_proc_info,#object
813__pxa250_proc_info:
814 .long 0x69052100
815 .long 0xfffff7f0
816 .long PMD_TYPE_SECT | \
817 PMD_SECT_BUFFERABLE | \
818 PMD_SECT_CACHEABLE | \
819 PMD_SECT_AP_WRITE | \
820 PMD_SECT_AP_READ
821 .long PMD_TYPE_SECT | \
822 PMD_SECT_AP_WRITE | \
823 PMD_SECT_AP_READ
824 b __xscale_setup
825 .long cpu_arch_name
826 .long cpu_elf_name
827 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
828 .long cpu_pxa250_name
829 .long xscale_processor_functions
830 .long v4wbi_tlb_fns
831 .long xscale_mc_user_fns
832 .long xscale_cache_fns
833 .size __pxa250_proc_info, . - __pxa250_proc_info
834
835 .type __pxa210_proc_info,#object
836__pxa210_proc_info:
837 .long 0x69052120
838 .long 0xfffff3f0
839 .long PMD_TYPE_SECT | \
840 PMD_SECT_BUFFERABLE | \
841 PMD_SECT_CACHEABLE | \
842 PMD_SECT_AP_WRITE | \
843 PMD_SECT_AP_READ
844 .long PMD_TYPE_SECT | \
845 PMD_SECT_AP_WRITE | \
846 PMD_SECT_AP_READ
847 b __xscale_setup
848 .long cpu_arch_name
849 .long cpu_elf_name
850 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
851 .long cpu_pxa210_name
852 .long xscale_processor_functions
853 .long v4wbi_tlb_fns
854 .long xscale_mc_user_fns
855 .long xscale_cache_fns
856 .size __pxa210_proc_info, . - __pxa210_proc_info
857
858 .type __ixp2400_proc_info, #object
859__ixp2400_proc_info:
860 .long 0x69054190
861 .long 0xfffffff0
862 .long PMD_TYPE_SECT | \
863 PMD_SECT_BUFFERABLE | \
864 PMD_SECT_CACHEABLE | \
865 PMD_SECT_AP_WRITE | \
866 PMD_SECT_AP_READ
867 .long PMD_TYPE_SECT | \
868 PMD_SECT_AP_WRITE | \
869 PMD_SECT_AP_READ
870 b __xscale_setup
871 .long cpu_arch_name
872 .long cpu_elf_name
873 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
874 .long cpu_ixp2400_name
875 .long xscale_processor_functions
876 .long v4wbi_tlb_fns
877 .long xscale_mc_user_fns
878 .long xscale_cache_fns
879 .size __ixp2400_proc_info, . - __ixp2400_proc_info
880
881 .type __ixp2800_proc_info, #object
882__ixp2800_proc_info:
883 .long 0x690541a0
884 .long 0xfffffff0
885 .long PMD_TYPE_SECT | \
886 PMD_SECT_BUFFERABLE | \
887 PMD_SECT_CACHEABLE | \
888 PMD_SECT_AP_WRITE | \
889 PMD_SECT_AP_READ
890 .long PMD_TYPE_SECT | \
891 PMD_SECT_AP_WRITE | \
892 PMD_SECT_AP_READ
893 b __xscale_setup
894 .long cpu_arch_name
895 .long cpu_elf_name
896 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
897 .long cpu_ixp2800_name
898 .long xscale_processor_functions
899 .long v4wbi_tlb_fns
900 .long xscale_mc_user_fns
901 .long xscale_cache_fns
902 .size __ixp2800_proc_info, . - __ixp2800_proc_info
903
904 .type __ixp42x_proc_info, #object
905__ixp42x_proc_info:
906 .long 0x690541c0
907 .long 0xffffffc0
908 .long PMD_TYPE_SECT | \
909 PMD_SECT_BUFFERABLE | \
910 PMD_SECT_CACHEABLE | \
911 PMD_SECT_AP_WRITE | \
912 PMD_SECT_AP_READ
913 .long PMD_TYPE_SECT | \
914 PMD_SECT_AP_WRITE | \
915 PMD_SECT_AP_READ
916 b __xscale_setup
917 .long cpu_arch_name
918 .long cpu_elf_name
919 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
920 .long cpu_ixp42x_name
921 .long xscale_processor_functions
922 .long v4wbi_tlb_fns
923 .long xscale_mc_user_fns
924 .long xscale_cache_fns
925 .size __ixp42x_proc_info, . - __ixp42x_proc_info
926
927 .type __ixp43x_proc_info, #object
928__ixp43x_proc_info:
929 .long 0x69054040
930 .long 0xfffffff0
931 .long PMD_TYPE_SECT | \
932 PMD_SECT_BUFFERABLE | \
933 PMD_SECT_CACHEABLE | \
934 PMD_SECT_AP_WRITE | \
935 PMD_SECT_AP_READ
936 .long PMD_TYPE_SECT | \
937 PMD_SECT_AP_WRITE | \
938 PMD_SECT_AP_READ
939 b __xscale_setup
940 .long cpu_arch_name
941 .long cpu_elf_name
942 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
943 .long cpu_ixp43x_name
944 .long xscale_processor_functions
945 .long v4wbi_tlb_fns
946 .long xscale_mc_user_fns
947 .long xscale_cache_fns
948 .size __ixp43x_proc_info, . - __ixp43x_proc_info
949
950 .type __ixp46x_proc_info, #object
951__ixp46x_proc_info:
952 .long 0x69054200
953 .long 0xffffff00
954 .long PMD_TYPE_SECT | \
955 PMD_SECT_BUFFERABLE | \
956 PMD_SECT_CACHEABLE | \
957 PMD_SECT_AP_WRITE | \
958 PMD_SECT_AP_READ
959 .long PMD_TYPE_SECT | \
960 PMD_SECT_AP_WRITE | \
961 PMD_SECT_AP_READ
962 b __xscale_setup
963 .long cpu_arch_name
964 .long cpu_elf_name
965 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
966 .long cpu_ixp46x_name
967 .long xscale_processor_functions
968 .long v4wbi_tlb_fns
969 .long xscale_mc_user_fns
970 .long xscale_cache_fns
971 .size __ixp46x_proc_info, . - __ixp46x_proc_info
972
973 .type __pxa255_proc_info,#object
974__pxa255_proc_info:
975 .long 0x69052d00
976 .long 0xfffffff0
977 .long PMD_TYPE_SECT | \
978 PMD_SECT_BUFFERABLE | \
979 PMD_SECT_CACHEABLE | \
980 PMD_SECT_AP_WRITE | \
981 PMD_SECT_AP_READ
982 .long PMD_TYPE_SECT | \
983 PMD_SECT_AP_WRITE | \
984 PMD_SECT_AP_READ
985 b __xscale_setup
986 .long cpu_arch_name
987 .long cpu_elf_name
988 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
989 .long cpu_pxa255_name
990 .long xscale_processor_functions
991 .long v4wbi_tlb_fns
992 .long xscale_mc_user_fns
993 .long xscale_cache_fns
994 .size __pxa255_proc_info, . - __pxa255_proc_info
995
996 .type __pxa270_proc_info,#object
997__pxa270_proc_info:
998 .long 0x69054110
999 .long 0xfffffff0
1000 .long PMD_TYPE_SECT | \
1001 PMD_SECT_BUFFERABLE | \
1002 PMD_SECT_CACHEABLE | \
1003 PMD_SECT_AP_WRITE | \
1004 PMD_SECT_AP_READ
1005 .long PMD_TYPE_SECT | \
1006 PMD_SECT_AP_WRITE | \
1007 PMD_SECT_AP_READ
1008 b __xscale_setup
1009 .long cpu_arch_name
1010 .long cpu_elf_name
1011 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
1012 .long cpu_pxa270_name
1013 .long xscale_processor_functions
1014 .long v4wbi_tlb_fns
1015 .long xscale_mc_user_fns
1016 .long xscale_cache_fns
1017 .size __pxa270_proc_info, . - __pxa270_proc_info
1018
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index d887a31faaae..d3ddcf9a76ca 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -65,9 +65,5 @@ ENTRY(fa_flush_kern_tlb_range)
65 65
66 __INITDATA 66 __INITDATA
67 67
68 .type fa_tlb_fns, #object 68 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
69ENTRY(fa_tlb_fns) 69 define_tlb_functions fa, fa_tlb_flags
70 .long fa_flush_user_tlb_range
71 .long fa_flush_kern_tlb_range
72 .long fa_tlb_flags
73 .size fa_tlb_fns, . - fa_tlb_fns
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S
index c10786ec8e0a..d253995ec4ca 100644
--- a/arch/arm/mm/tlb-v3.S
+++ b/arch/arm/mm/tlb-v3.S
@@ -44,9 +44,5 @@ ENTRY(v3_flush_kern_tlb_range)
44 44
45 __INITDATA 45 __INITDATA
46 46
47 .type v3_tlb_fns, #object 47 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
48ENTRY(v3_tlb_fns) 48 define_tlb_functions v3, v3_tlb_flags
49 .long v3_flush_user_tlb_range
50 .long v3_flush_kern_tlb_range
51 .long v3_tlb_flags
52 .size v3_tlb_fns, . - v3_tlb_fns
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S
index d6c94457c2b9..17a025ade573 100644
--- a/arch/arm/mm/tlb-v4.S
+++ b/arch/arm/mm/tlb-v4.S
@@ -57,9 +57,5 @@ ENTRY(v4_flush_user_tlb_range)
57 57
58 __INITDATA 58 __INITDATA
59 59
60 .type v4_tlb_fns, #object 60 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
61ENTRY(v4_tlb_fns) 61 define_tlb_functions v4, v4_tlb_flags
62 .long v4_flush_user_tlb_range
63 .long v4_flush_kern_tlb_range
64 .long v4_tlb_flags
65 .size v4_tlb_fns, . - v4_tlb_fns
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S
index cb829ca7845d..c04598fa4d4a 100644
--- a/arch/arm/mm/tlb-v4wb.S
+++ b/arch/arm/mm/tlb-v4wb.S
@@ -69,9 +69,5 @@ ENTRY(v4wb_flush_kern_tlb_range)
69 69
70 __INITDATA 70 __INITDATA
71 71
72 .type v4wb_tlb_fns, #object 72 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
73ENTRY(v4wb_tlb_fns) 73 define_tlb_functions v4wb, v4wb_tlb_flags
74 .long v4wb_flush_user_tlb_range
75 .long v4wb_flush_kern_tlb_range
76 .long v4wb_tlb_flags
77 .size v4wb_tlb_fns, . - v4wb_tlb_fns
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S
index 60cfc4a25dd5..1f6062b6c1c1 100644
--- a/arch/arm/mm/tlb-v4wbi.S
+++ b/arch/arm/mm/tlb-v4wbi.S
@@ -60,9 +60,5 @@ ENTRY(v4wbi_flush_kern_tlb_range)
60 60
61 __INITDATA 61 __INITDATA
62 62
63 .type v4wbi_tlb_fns, #object 63 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
64ENTRY(v4wbi_tlb_fns) 64 define_tlb_functions v4wbi, v4wbi_tlb_flags
65 .long v4wbi_flush_user_tlb_range
66 .long v4wbi_flush_kern_tlb_range
67 .long v4wbi_tlb_flags
68 .size v4wbi_tlb_fns, . - v4wbi_tlb_fns
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index ffe06a69a6e5..eca07f550a0b 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -88,9 +88,5 @@ ENTRY(v6wbi_flush_kern_tlb_range)
88 88
89 __INIT 89 __INIT
90 90
91 .type v6wbi_tlb_fns, #object 91 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
92ENTRY(v6wbi_tlb_fns) 92 define_tlb_functions v6wbi, v6wbi_tlb_flags
93 .long v6wbi_flush_user_tlb_range
94 .long v6wbi_flush_kern_tlb_range
95 .long v6wbi_tlb_flags
96 .size v6wbi_tlb_fns, . - v6wbi_tlb_fns
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 86bb71664508..845f461f8ec1 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -79,10 +79,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range)
79 79
80 __INIT 80 __INIT
81 81
82 .type v7wbi_tlb_fns, #object 82 /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */
83ENTRY(v7wbi_tlb_fns) 83 define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp
84 .long v7wbi_flush_user_tlb_range
85 .long v7wbi_flush_kern_tlb_range
86 ALT_SMP(.long v7wbi_tlb_flags_smp)
87 ALT_UP(.long v7wbi_tlb_flags_up)
88 .size v7wbi_tlb_fns, . - v7wbi_tlb_fns
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 0a96f71f0abd..79bcb4316930 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -597,7 +597,6 @@ static int __init vfp_init(void)
597 elf_hwcap |= HWCAP_VFPv3D16; 597 elf_hwcap |= HWCAP_VFPv3D16;
598 } 598 }
599#endif 599#endif
600#ifdef CONFIG_NEON
601 /* 600 /*
602 * Check for the presence of the Advanced SIMD 601 * Check for the presence of the Advanced SIMD
603 * load/store instructions, integer and single 602 * load/store instructions, integer and single
@@ -605,10 +604,13 @@ static int __init vfp_init(void)
605 * for NEON if the hardware has the MVFR registers. 604 * for NEON if the hardware has the MVFR registers.
606 */ 605 */
607 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 606 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
607#ifdef CONFIG_NEON
608 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) 608 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100)
609 elf_hwcap |= HWCAP_NEON; 609 elf_hwcap |= HWCAP_NEON;
610 }
611#endif 610#endif
611 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
612 elf_hwcap |= HWCAP_VFPv4;
613 }
612 } 614 }
613 return 0; 615 return 0;
614} 616}