diff options
Diffstat (limited to 'arch')
206 files changed, 6855 insertions, 5753 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e04fa9d7637c..1478c6171b00 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -10,7 +10,7 @@ config ARM | |||
10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) | 10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) |
11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) | 11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) |
12 | select HAVE_ARCH_KGDB | 12 | select HAVE_ARCH_KGDB |
13 | select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL) | 13 | select HAVE_KPROBES if !XIP_KERNEL |
14 | select HAVE_KRETPROBES if (HAVE_KPROBES) | 14 | select HAVE_KRETPROBES if (HAVE_KPROBES) |
15 | select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) | 15 | select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) |
16 | select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) | 16 | select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) |
@@ -37,6 +37,9 @@ config ARM | |||
37 | Europe. There is an ARM Linux project with a web page at | 37 | Europe. There is an ARM Linux project with a web page at |
38 | <http://www.arm.linux.org.uk/>. | 38 | <http://www.arm.linux.org.uk/>. |
39 | 39 | ||
40 | config ARM_HAS_SG_CHAIN | ||
41 | bool | ||
42 | |||
40 | config HAVE_PWM | 43 | config HAVE_PWM |
41 | bool | 44 | bool |
42 | 45 | ||
@@ -1347,7 +1350,6 @@ config SMP_ON_UP | |||
1347 | 1350 | ||
1348 | config HAVE_ARM_SCU | 1351 | config HAVE_ARM_SCU |
1349 | bool | 1352 | bool |
1350 | depends on SMP | ||
1351 | help | 1353 | help |
1352 | This option enables support for the ARM system coherency unit | 1354 | This option enables support for the ARM system coherency unit |
1353 | 1355 | ||
@@ -1716,17 +1718,34 @@ config ZBOOT_ROM | |||
1716 | Say Y here if you intend to execute your compressed kernel image | 1718 | Say Y here if you intend to execute your compressed kernel image |
1717 | (zImage) directly from ROM or flash. If unsure, say N. | 1719 | (zImage) directly from ROM or flash. If unsure, say N. |
1718 | 1720 | ||
1721 | choice | ||
1722 | prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" | ||
1723 | depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL | ||
1724 | default ZBOOT_ROM_NONE | ||
1725 | help | ||
1726 | Include experimental SD/MMC loading code in the ROM-able zImage. | ||
1727 | With this enabled it is possible to write the the ROM-able zImage | ||
1728 | kernel image to an MMC or SD card and boot the kernel straight | ||
1729 | from the reset vector. At reset the processor Mask ROM will load | ||
1730 | the first part of the the ROM-able zImage which in turn loads the | ||
1731 | rest the kernel image to RAM. | ||
1732 | |||
1733 | config ZBOOT_ROM_NONE | ||
1734 | bool "No SD/MMC loader in zImage (EXPERIMENTAL)" | ||
1735 | help | ||
1736 | Do not load image from SD or MMC | ||
1737 | |||
1719 | config ZBOOT_ROM_MMCIF | 1738 | config ZBOOT_ROM_MMCIF |
1720 | bool "Include MMCIF loader in zImage (EXPERIMENTAL)" | 1739 | bool "Include MMCIF loader in zImage (EXPERIMENTAL)" |
1721 | depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL | ||
1722 | help | 1740 | help |
1723 | Say Y here to include experimental MMCIF loading code in the | 1741 | Load image from MMCIF hardware block. |
1724 | ROM-able zImage. With this enabled it is possible to write the | 1742 | |
1725 | the ROM-able zImage kernel image to an MMC card and boot the | 1743 | config ZBOOT_ROM_SH_MOBILE_SDHI |
1726 | kernel straight from the reset vector. At reset the processor | 1744 | bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)" |
1727 | Mask ROM will load the first part of the the ROM-able zImage | 1745 | help |
1728 | which in turn loads the rest the kernel image to RAM using the | 1746 | Load image from SDHI hardware block |
1729 | MMCIF hardware block. | 1747 | |
1748 | endchoice | ||
1730 | 1749 | ||
1731 | config CMDLINE | 1750 | config CMDLINE |
1732 | string "Default kernel command string" | 1751 | string "Default kernel command string" |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 23aad0722303..0c74a6fab952 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -6,13 +6,19 @@ | |||
6 | 6 | ||
7 | OBJS = | 7 | OBJS = |
8 | 8 | ||
9 | # Ensure that mmcif loader code appears early in the image | 9 | # Ensure that MMCIF loader code appears early in the image |
10 | # to minimise that number of bocks that have to be read in | 10 | # to minimise that number of bocks that have to be read in |
11 | # order to load it. | 11 | # order to load it. |
12 | ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) | 12 | ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) |
13 | ifeq ($(CONFIG_ARCH_SH7372),y) | ||
14 | OBJS += mmcif-sh7372.o | 13 | OBJS += mmcif-sh7372.o |
15 | endif | 14 | endif |
15 | |||
16 | # Ensure that SDHI loader code appears early in the image | ||
17 | # to minimise that number of bocks that have to be read in | ||
18 | # order to load it. | ||
19 | ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y) | ||
20 | OBJS += sdhi-shmobile.o | ||
21 | OBJS += sdhi-sh7372.o | ||
16 | endif | 22 | endif |
17 | 23 | ||
18 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) | 24 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) |
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S index c943d2e7da9d..fe3719b516fd 100644 --- a/arch/arm/boot/compressed/head-shmobile.S +++ b/arch/arm/boot/compressed/head-shmobile.S | |||
@@ -25,14 +25,14 @@ | |||
25 | /* load board-specific initialization code */ | 25 | /* load board-specific initialization code */ |
26 | #include <mach/zboot.h> | 26 | #include <mach/zboot.h> |
27 | 27 | ||
28 | #ifdef CONFIG_ZBOOT_ROM_MMCIF | 28 | #if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI) |
29 | /* Load image from MMC */ | 29 | /* Load image from MMC/SD */ |
30 | adr sp, __tmp_stack + 128 | 30 | adr sp, __tmp_stack + 256 |
31 | ldr r0, __image_start | 31 | ldr r0, __image_start |
32 | ldr r1, __image_end | 32 | ldr r1, __image_end |
33 | subs r1, r1, r0 | 33 | subs r1, r1, r0 |
34 | ldr r0, __load_base | 34 | ldr r0, __load_base |
35 | bl mmcif_loader | 35 | bl mmc_loader |
36 | 36 | ||
37 | /* Jump to loaded code */ | 37 | /* Jump to loaded code */ |
38 | ldr r0, __loaded | 38 | ldr r0, __loaded |
@@ -51,9 +51,9 @@ __loaded: | |||
51 | .long __continue | 51 | .long __continue |
52 | .align | 52 | .align |
53 | __tmp_stack: | 53 | __tmp_stack: |
54 | .space 128 | 54 | .space 256 |
55 | __continue: | 55 | __continue: |
56 | #endif /* CONFIG_ZBOOT_ROM_MMCIF */ | 56 | #endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */ |
57 | 57 | ||
58 | b 1f | 58 | b 1f |
59 | __atags:@ tag #1 | 59 | __atags:@ tag #1 |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 940b20178107..e95a5989602a 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -353,7 +353,8 @@ not_relocated: mov r0, #0 | |||
353 | mov r0, #0 @ must be zero | 353 | mov r0, #0 @ must be zero |
354 | mov r1, r7 @ restore architecture number | 354 | mov r1, r7 @ restore architecture number |
355 | mov r2, r8 @ restore atags pointer | 355 | mov r2, r8 @ restore atags pointer |
356 | mov pc, r4 @ call kernel | 356 | ARM( mov pc, r4 ) @ call kernel |
357 | THUMB( bx r4 ) @ entry point is always ARM | ||
357 | 358 | ||
358 | .align 2 | 359 | .align 2 |
359 | .type LC0, #object | 360 | .type LC0, #object |
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c index 7453c8337b83..b6f61d9a5a1b 100644 --- a/arch/arm/boot/compressed/mmcif-sh7372.c +++ b/arch/arm/boot/compressed/mmcif-sh7372.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * to an MMC card | 40 | * to an MMC card |
41 | * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 | 41 | * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 |
42 | */ | 42 | */ |
43 | asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) | 43 | asmlinkage void mmc_loader(unsigned char *buf, unsigned long len) |
44 | { | 44 | { |
45 | mmc_init_progress(); | 45 | mmc_init_progress(); |
46 | mmc_update_progress(MMC_PROGRESS_ENTER); | 46 | mmc_update_progress(MMC_PROGRESS_ENTER); |
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c new file mode 100644 index 000000000000..d403a8b24d7f --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-sh7372.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2010 Magnus Damm | ||
5 | * Copyright (C) 2010 Kuninori Morimoto | ||
6 | * Copyright (C) 2010 Simon Horman | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * Parts inspired by u-boot | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <mach/mmc.h> | ||
17 | #include <linux/mmc/boot.h> | ||
18 | #include <linux/mmc/tmio.h> | ||
19 | |||
20 | #include "sdhi-shmobile.h" | ||
21 | |||
22 | #define PORT179CR 0xe60520b3 | ||
23 | #define PORT180CR 0xe60520b4 | ||
24 | #define PORT181CR 0xe60520b5 | ||
25 | #define PORT182CR 0xe60520b6 | ||
26 | #define PORT183CR 0xe60520b7 | ||
27 | #define PORT184CR 0xe60520b8 | ||
28 | |||
29 | #define SMSTPCR3 0xe615013c | ||
30 | |||
31 | #define CR_INPUT_ENABLE 0x10 | ||
32 | #define CR_FUNCTION1 0x01 | ||
33 | |||
34 | #define SDHI1_BASE (void __iomem *)0xe6860000 | ||
35 | #define SDHI_BASE SDHI1_BASE | ||
36 | |||
37 | /* SuperH Mobile SDHI loader | ||
38 | * | ||
39 | * loads the zImage from an SD card starting from block 0 | ||
40 | * on physical partition 1 | ||
41 | * | ||
42 | * The image must be start with a vrl4 header and | ||
43 | * the zImage must start at offset 512 of the image. That is, | ||
44 | * at block 1 (=byte 512) of physical partition 1 | ||
45 | * | ||
46 | * Use the following line to write the vrl4 formated zImage | ||
47 | * to an SD card | ||
48 | * # dd if=vrl4.out of=/dev/sdx bs=512 | ||
49 | */ | ||
50 | asmlinkage void mmc_loader(unsigned short *buf, unsigned long len) | ||
51 | { | ||
52 | int high_capacity; | ||
53 | |||
54 | mmc_init_progress(); | ||
55 | |||
56 | mmc_update_progress(MMC_PROGRESS_ENTER); | ||
57 | /* Initialise SDHI1 */ | ||
58 | /* PORT184CR: GPIO_FN_SDHICMD1 Control */ | ||
59 | __raw_writeb(CR_FUNCTION1, PORT184CR); | ||
60 | /* PORT179CR: GPIO_FN_SDHICLK1 Control */ | ||
61 | __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR); | ||
62 | /* PORT181CR: GPIO_FN_SDHID1_3 Control */ | ||
63 | __raw_writeb(CR_FUNCTION1, PORT183CR); | ||
64 | /* PORT182CR: GPIO_FN_SDHID1_2 Control */ | ||
65 | __raw_writeb(CR_FUNCTION1, PORT182CR); | ||
66 | /* PORT183CR: GPIO_FN_SDHID1_1 Control */ | ||
67 | __raw_writeb(CR_FUNCTION1, PORT181CR); | ||
68 | /* PORT180CR: GPIO_FN_SDHID1_0 Control */ | ||
69 | __raw_writeb(CR_FUNCTION1, PORT180CR); | ||
70 | |||
71 | /* Enable clock to SDHI1 hardware block */ | ||
72 | __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3); | ||
73 | |||
74 | /* setup SDHI hardware */ | ||
75 | mmc_update_progress(MMC_PROGRESS_INIT); | ||
76 | high_capacity = sdhi_boot_init(SDHI_BASE); | ||
77 | if (high_capacity < 0) | ||
78 | goto err; | ||
79 | |||
80 | mmc_update_progress(MMC_PROGRESS_LOAD); | ||
81 | /* load kernel */ | ||
82 | if (sdhi_boot_do_read(SDHI_BASE, high_capacity, | ||
83 | 0, /* Kernel is at block 1 */ | ||
84 | (len + TMIO_BBS - 1) / TMIO_BBS, buf)) | ||
85 | goto err; | ||
86 | |||
87 | /* Disable clock to SDHI1 hardware block */ | ||
88 | __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); | ||
89 | |||
90 | mmc_update_progress(MMC_PROGRESS_DONE); | ||
91 | |||
92 | return; | ||
93 | err: | ||
94 | for(;;); | ||
95 | } | ||
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c new file mode 100644 index 000000000000..bd3d46980955 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2010 Magnus Damm | ||
5 | * Copyright (C) 2010 Kuninori Morimoto | ||
6 | * Copyright (C) 2010 Simon Horman | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * Parts inspired by u-boot | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/core.h> | ||
18 | #include <linux/mmc/mmc.h> | ||
19 | #include <linux/mmc/sd.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <mach/sdhi.h> | ||
22 | |||
23 | #define OCR_FASTBOOT (1<<29) | ||
24 | #define OCR_HCS (1<<30) | ||
25 | #define OCR_BUSY (1<<31) | ||
26 | |||
27 | #define RESP_CMD12 0x00000030 | ||
28 | |||
29 | static inline u16 sd_ctrl_read16(void __iomem *base, int addr) | ||
30 | { | ||
31 | return __raw_readw(base + addr); | ||
32 | } | ||
33 | |||
34 | static inline u32 sd_ctrl_read32(void __iomem *base, int addr) | ||
35 | { | ||
36 | return __raw_readw(base + addr) | | ||
37 | __raw_readw(base + addr + 2) << 16; | ||
38 | } | ||
39 | |||
40 | static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val) | ||
41 | { | ||
42 | __raw_writew(val, base + addr); | ||
43 | } | ||
44 | |||
45 | static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val) | ||
46 | { | ||
47 | __raw_writew(val, base + addr); | ||
48 | __raw_writew(val >> 16, base + addr + 2); | ||
49 | } | ||
50 | |||
51 | #define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \ | ||
52 | TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \ | ||
53 | TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \ | ||
54 | TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \ | ||
55 | TMIO_STAT_ILL_FUNC) | ||
56 | |||
57 | static int sdhi_intr(void __iomem *base) | ||
58 | { | ||
59 | unsigned long state = sd_ctrl_read32(base, CTL_STATUS); | ||
60 | |||
61 | if (state & ALL_ERROR) { | ||
62 | sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR); | ||
63 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
64 | ALL_ERROR | | ||
65 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
66 | return -EINVAL; | ||
67 | } | ||
68 | if (state & TMIO_STAT_CMDRESPEND) { | ||
69 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); | ||
70 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
71 | TMIO_STAT_CMDRESPEND | | ||
72 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
73 | return 0; | ||
74 | } | ||
75 | if (state & TMIO_STAT_RXRDY) { | ||
76 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY); | ||
77 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
78 | TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN | | ||
79 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
80 | return 0; | ||
81 | } | ||
82 | if (state & TMIO_STAT_DATAEND) { | ||
83 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND); | ||
84 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
85 | TMIO_STAT_DATAEND | | ||
86 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | return -EAGAIN; | ||
91 | } | ||
92 | |||
93 | static int sdhi_boot_wait_resp_end(void __iomem *base) | ||
94 | { | ||
95 | int err = -EAGAIN, timeout = 10000000; | ||
96 | |||
97 | while (timeout--) { | ||
98 | err = sdhi_intr(base); | ||
99 | if (err != -EAGAIN) | ||
100 | break; | ||
101 | udelay(1); | ||
102 | } | ||
103 | |||
104 | return err; | ||
105 | } | ||
106 | |||
107 | /* SDHI_CLK_CTRL */ | ||
108 | #define CLK_MMC_ENABLE (1 << 8) | ||
109 | #define CLK_MMC_INIT (1 << 6) /* clk / 256 */ | ||
110 | |||
111 | static void sdhi_boot_mmc_clk_stop(void __iomem *base) | ||
112 | { | ||
113 | sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
114 | msleep(10); | ||
115 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE & | ||
116 | sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); | ||
117 | msleep(10); | ||
118 | } | ||
119 | |||
120 | static void sdhi_boot_mmc_clk_start(void __iomem *base) | ||
121 | { | ||
122 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE | | ||
123 | sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); | ||
124 | msleep(10); | ||
125 | sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE); | ||
126 | msleep(10); | ||
127 | } | ||
128 | |||
129 | static void sdhi_boot_reset(void __iomem *base) | ||
130 | { | ||
131 | sd_ctrl_write16(base, CTL_RESET_SD, 0x0000); | ||
132 | msleep(10); | ||
133 | sd_ctrl_write16(base, CTL_RESET_SD, 0x0001); | ||
134 | msleep(10); | ||
135 | } | ||
136 | |||
137 | /* Set MMC clock / power. | ||
138 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
139 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
140 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
141 | * slowest setting. | ||
142 | */ | ||
143 | static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios) | ||
144 | { | ||
145 | if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY) | ||
146 | return -EBUSY; | ||
147 | |||
148 | if (ios->clock) | ||
149 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, | ||
150 | ios->clock | CLK_MMC_ENABLE); | ||
151 | |||
152 | /* Power sequence - OFF -> ON -> UP */ | ||
153 | switch (ios->power_mode) { | ||
154 | case MMC_POWER_OFF: /* power down SD bus */ | ||
155 | sdhi_boot_mmc_clk_stop(base); | ||
156 | break; | ||
157 | case MMC_POWER_ON: /* power up SD bus */ | ||
158 | break; | ||
159 | case MMC_POWER_UP: /* start bus clock */ | ||
160 | sdhi_boot_mmc_clk_start(base); | ||
161 | break; | ||
162 | } | ||
163 | |||
164 | switch (ios->bus_width) { | ||
165 | case MMC_BUS_WIDTH_1: | ||
166 | sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
167 | break; | ||
168 | case MMC_BUS_WIDTH_4: | ||
169 | sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
170 | break; | ||
171 | } | ||
172 | |||
173 | /* Let things settle. delay taken from winCE driver */ | ||
174 | udelay(140); | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
180 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
181 | #define RESP_NONE 0x0300 | ||
182 | #define RESP_R1 0x0400 | ||
183 | #define RESP_R1B 0x0500 | ||
184 | #define RESP_R2 0x0600 | ||
185 | #define RESP_R3 0x0700 | ||
186 | #define DATA_PRESENT 0x0800 | ||
187 | #define TRANSFER_READ 0x1000 | ||
188 | |||
189 | static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd) | ||
190 | { | ||
191 | int err, c = cmd->opcode; | ||
192 | |||
193 | switch (mmc_resp_type(cmd)) { | ||
194 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
195 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
196 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
197 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
198 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
199 | default: | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | |||
203 | /* No interrupts so this may not be cleared */ | ||
204 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); | ||
205 | |||
206 | sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND | | ||
207 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
208 | sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg); | ||
209 | sd_ctrl_write16(base, CTL_SD_CMD, c); | ||
210 | |||
211 | |||
212 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
213 | ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) & | ||
214 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
215 | |||
216 | err = sdhi_boot_wait_resp_end(base); | ||
217 | if (err) | ||
218 | return err; | ||
219 | |||
220 | cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity, | ||
226 | unsigned long block, unsigned short *buf) | ||
227 | { | ||
228 | int err, i; | ||
229 | |||
230 | /* CMD17 - Read */ | ||
231 | { | ||
232 | struct mmc_command cmd; | ||
233 | |||
234 | cmd.opcode = MMC_READ_SINGLE_BLOCK | \ | ||
235 | TRANSFER_READ | DATA_PRESENT; | ||
236 | if (high_capacity) | ||
237 | cmd.arg = block; | ||
238 | else | ||
239 | cmd.arg = block * TMIO_BBS; | ||
240 | cmd.flags = MMC_RSP_R1; | ||
241 | err = sdhi_boot_request(base, &cmd); | ||
242 | if (err) | ||
243 | return err; | ||
244 | } | ||
245 | |||
246 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
247 | ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY | | ||
248 | TMIO_STAT_TXUNDERRUN) & | ||
249 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
250 | err = sdhi_boot_wait_resp_end(base); | ||
251 | if (err) | ||
252 | return err; | ||
253 | |||
254 | sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS); | ||
255 | for (i = 0; i < TMIO_BBS / sizeof(*buf); i++) | ||
256 | *buf++ = sd_ctrl_read16(base, RESP_CMD12); | ||
257 | |||
258 | err = sdhi_boot_wait_resp_end(base); | ||
259 | if (err) | ||
260 | return err; | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | int sdhi_boot_do_read(void __iomem *base, int high_capacity, | ||
266 | unsigned long offset, unsigned short count, | ||
267 | unsigned short *buf) | ||
268 | { | ||
269 | unsigned long i; | ||
270 | int err = 0; | ||
271 | |||
272 | for (i = 0; i < count; i++) { | ||
273 | err = sdhi_boot_do_read_single(base, high_capacity, offset + i, | ||
274 | buf + (i * TMIO_BBS / | ||
275 | sizeof(*buf))); | ||
276 | if (err) | ||
277 | return err; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | #define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34) | ||
284 | |||
285 | int sdhi_boot_init(void __iomem *base) | ||
286 | { | ||
287 | bool sd_v2 = false, sd_v1_0 = false; | ||
288 | unsigned short cid; | ||
289 | int err, high_capacity = 0; | ||
290 | |||
291 | sdhi_boot_mmc_clk_stop(base); | ||
292 | sdhi_boot_reset(base); | ||
293 | |||
294 | /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */ | ||
295 | { | ||
296 | struct mmc_ios ios; | ||
297 | ios.power_mode = MMC_POWER_ON; | ||
298 | ios.bus_width = MMC_BUS_WIDTH_1; | ||
299 | ios.clock = CLK_MMC_INIT; | ||
300 | err = sdhi_boot_mmc_set_ios(base, &ios); | ||
301 | if (err) | ||
302 | return err; | ||
303 | } | ||
304 | |||
305 | /* CMD0 */ | ||
306 | { | ||
307 | struct mmc_command cmd; | ||
308 | msleep(1); | ||
309 | cmd.opcode = MMC_GO_IDLE_STATE; | ||
310 | cmd.arg = 0; | ||
311 | cmd.flags = MMC_RSP_NONE; | ||
312 | err = sdhi_boot_request(base, &cmd); | ||
313 | if (err) | ||
314 | return err; | ||
315 | msleep(2); | ||
316 | } | ||
317 | |||
318 | /* CMD8 - Test for SD version 2 */ | ||
319 | { | ||
320 | struct mmc_command cmd; | ||
321 | cmd.opcode = SD_SEND_IF_COND; | ||
322 | cmd.arg = (VOLTAGES != 0) << 8 | 0xaa; | ||
323 | cmd.flags = MMC_RSP_R1; | ||
324 | err = sdhi_boot_request(base, &cmd); /* Ignore error */ | ||
325 | if ((cmd.resp[0] & 0xff) == 0xaa) | ||
326 | sd_v2 = true; | ||
327 | } | ||
328 | |||
329 | /* CMD55 - Get OCR (SD) */ | ||
330 | { | ||
331 | int timeout = 1000; | ||
332 | struct mmc_command cmd; | ||
333 | |||
334 | cmd.arg = 0; | ||
335 | |||
336 | do { | ||
337 | cmd.opcode = MMC_APP_CMD; | ||
338 | cmd.flags = MMC_RSP_R1; | ||
339 | cmd.arg = 0; | ||
340 | err = sdhi_boot_request(base, &cmd); | ||
341 | if (err) | ||
342 | break; | ||
343 | |||
344 | cmd.opcode = SD_APP_OP_COND; | ||
345 | cmd.flags = MMC_RSP_R3; | ||
346 | cmd.arg = (VOLTAGES & 0xff8000); | ||
347 | if (sd_v2) | ||
348 | cmd.arg |= OCR_HCS; | ||
349 | cmd.arg |= OCR_FASTBOOT; | ||
350 | err = sdhi_boot_request(base, &cmd); | ||
351 | if (err) | ||
352 | break; | ||
353 | |||
354 | msleep(1); | ||
355 | } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); | ||
356 | |||
357 | if (!err && timeout) { | ||
358 | if (!sd_v2) | ||
359 | sd_v1_0 = true; | ||
360 | high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* CMD1 - Get OCR (MMC) */ | ||
365 | if (!sd_v2 && !sd_v1_0) { | ||
366 | int timeout = 1000; | ||
367 | struct mmc_command cmd; | ||
368 | |||
369 | do { | ||
370 | cmd.opcode = MMC_SEND_OP_COND; | ||
371 | cmd.arg = VOLTAGES | OCR_HCS; | ||
372 | cmd.flags = MMC_RSP_R3; | ||
373 | err = sdhi_boot_request(base, &cmd); | ||
374 | if (err) | ||
375 | return err; | ||
376 | |||
377 | msleep(1); | ||
378 | } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); | ||
379 | |||
380 | if (!timeout) | ||
381 | return -EAGAIN; | ||
382 | |||
383 | high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; | ||
384 | } | ||
385 | |||
386 | /* CMD2 - Get CID */ | ||
387 | { | ||
388 | struct mmc_command cmd; | ||
389 | cmd.opcode = MMC_ALL_SEND_CID; | ||
390 | cmd.arg = 0; | ||
391 | cmd.flags = MMC_RSP_R2; | ||
392 | err = sdhi_boot_request(base, &cmd); | ||
393 | if (err) | ||
394 | return err; | ||
395 | } | ||
396 | |||
397 | /* CMD3 | ||
398 | * MMC: Set the relative address | ||
399 | * SD: Get the relative address | ||
400 | * Also puts the card into the standby state | ||
401 | */ | ||
402 | { | ||
403 | struct mmc_command cmd; | ||
404 | cmd.opcode = MMC_SET_RELATIVE_ADDR; | ||
405 | cmd.arg = 0; | ||
406 | cmd.flags = MMC_RSP_R1; | ||
407 | err = sdhi_boot_request(base, &cmd); | ||
408 | if (err) | ||
409 | return err; | ||
410 | cid = cmd.resp[0] >> 16; | ||
411 | } | ||
412 | |||
413 | /* CMD9 - Get CSD */ | ||
414 | { | ||
415 | struct mmc_command cmd; | ||
416 | cmd.opcode = MMC_SEND_CSD; | ||
417 | cmd.arg = cid << 16; | ||
418 | cmd.flags = MMC_RSP_R2; | ||
419 | err = sdhi_boot_request(base, &cmd); | ||
420 | if (err) | ||
421 | return err; | ||
422 | } | ||
423 | |||
424 | /* CMD7 - Select the card */ | ||
425 | { | ||
426 | struct mmc_command cmd; | ||
427 | cmd.opcode = MMC_SELECT_CARD; | ||
428 | //cmd.arg = rca << 16; | ||
429 | cmd.arg = cid << 16; | ||
430 | //cmd.flags = MMC_RSP_R1B; | ||
431 | cmd.flags = MMC_RSP_R1; | ||
432 | err = sdhi_boot_request(base, &cmd); | ||
433 | if (err) | ||
434 | return err; | ||
435 | } | ||
436 | |||
437 | /* CMD16 - Set the block size */ | ||
438 | { | ||
439 | struct mmc_command cmd; | ||
440 | cmd.opcode = MMC_SET_BLOCKLEN; | ||
441 | cmd.arg = TMIO_BBS; | ||
442 | cmd.flags = MMC_RSP_R1; | ||
443 | err = sdhi_boot_request(base, &cmd); | ||
444 | if (err) | ||
445 | return err; | ||
446 | } | ||
447 | |||
448 | return high_capacity; | ||
449 | } | ||
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h new file mode 100644 index 000000000000..92eaa09f985e --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef SDHI_MOBILE_H | ||
2 | #define SDHI_MOBILE_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | |||
6 | int sdhi_boot_do_read(void __iomem *base, int high_capacity, | ||
7 | unsigned long offset, unsigned short count, | ||
8 | unsigned short *buf); | ||
9 | int sdhi_boot_init(void __iomem *base); | ||
10 | |||
11 | #endif | ||
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index ea80abe78844..4e728834a1b9 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -33,20 +33,24 @@ SECTIONS | |||
33 | *(.text.*) | 33 | *(.text.*) |
34 | *(.fixup) | 34 | *(.fixup) |
35 | *(.gnu.warning) | 35 | *(.gnu.warning) |
36 | *(.glue_7t) | ||
37 | *(.glue_7) | ||
38 | } | ||
39 | .rodata : { | ||
36 | *(.rodata) | 40 | *(.rodata) |
37 | *(.rodata.*) | 41 | *(.rodata.*) |
38 | *(.glue_7) | 42 | } |
39 | *(.glue_7t) | 43 | .piggydata : { |
40 | *(.piggydata) | 44 | *(.piggydata) |
41 | . = ALIGN(4); | ||
42 | } | 45 | } |
43 | 46 | ||
47 | . = ALIGN(4); | ||
44 | _etext = .; | 48 | _etext = .; |
45 | 49 | ||
50 | .got.plt : { *(.got.plt) } | ||
46 | _got_start = .; | 51 | _got_start = .; |
47 | .got : { *(.got) } | 52 | .got : { *(.got) } |
48 | _got_end = .; | 53 | _got_end = .; |
49 | .got.plt : { *(.got.plt) } | ||
50 | _edata = .; | 54 | _edata = .; |
51 | 55 | ||
52 | . = BSS_START; | 56 | . = BSS_START; |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index 841df7d21c2f..595ecd290ebf 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -79,6 +79,8 @@ struct dmabounce_device_info { | |||
79 | struct dmabounce_pool large; | 79 | struct dmabounce_pool large; |
80 | 80 | ||
81 | rwlock_t lock; | 81 | rwlock_t lock; |
82 | |||
83 | int (*needs_bounce)(struct device *, dma_addr_t, size_t); | ||
82 | }; | 84 | }; |
83 | 85 | ||
84 | #ifdef STATS | 86 | #ifdef STATS |
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | |||
210 | if (!dev || !dev->archdata.dmabounce) | 212 | if (!dev || !dev->archdata.dmabounce) |
211 | return NULL; | 213 | return NULL; |
212 | if (dma_mapping_error(dev, dma_addr)) { | 214 | if (dma_mapping_error(dev, dma_addr)) { |
213 | if (dev) | 215 | dev_err(dev, "Trying to %s invalid mapping\n", where); |
214 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
215 | else | ||
216 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
217 | return NULL; | 216 | return NULL; |
218 | } | 217 | } |
219 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | 218 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); |
220 | } | 219 | } |
221 | 220 | ||
222 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, | 221 | static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) |
223 | enum dma_data_direction dir) | ||
224 | { | 222 | { |
225 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 223 | if (!dev || !dev->archdata.dmabounce) |
226 | dma_addr_t dma_addr; | 224 | return 0; |
227 | int needs_bounce = 0; | ||
228 | |||
229 | if (device_info) | ||
230 | DO_STATS ( device_info->map_op_count++ ); | ||
231 | |||
232 | dma_addr = virt_to_dma(dev, ptr); | ||
233 | 225 | ||
234 | if (dev->dma_mask) { | 226 | if (dev->dma_mask) { |
235 | unsigned long mask = *dev->dma_mask; | 227 | unsigned long limit, mask = *dev->dma_mask; |
236 | unsigned long limit; | ||
237 | 228 | ||
238 | limit = (mask + 1) & ~mask; | 229 | limit = (mask + 1) & ~mask; |
239 | if (limit && size > limit) { | 230 | if (limit && size > limit) { |
240 | dev_err(dev, "DMA mapping too big (requested %#x " | 231 | dev_err(dev, "DMA mapping too big (requested %#x " |
241 | "mask %#Lx)\n", size, *dev->dma_mask); | 232 | "mask %#Lx)\n", size, *dev->dma_mask); |
242 | return ~0; | 233 | return -E2BIG; |
243 | } | 234 | } |
244 | 235 | ||
245 | /* | 236 | /* Figure out if we need to bounce from the DMA mask. */ |
246 | * Figure out if we need to bounce from the DMA mask. | 237 | if ((dma_addr | (dma_addr + size - 1)) & ~mask) |
247 | */ | 238 | return 1; |
248 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | ||
249 | } | 239 | } |
250 | 240 | ||
251 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | 241 | return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); |
252 | struct safe_buffer *buf; | 242 | } |
253 | 243 | ||
254 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | 244 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, |
255 | if (buf == 0) { | 245 | enum dma_data_direction dir) |
256 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | 246 | { |
257 | __func__, ptr); | 247 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
258 | return ~0; | 248 | struct safe_buffer *buf; |
259 | } | ||
260 | 249 | ||
261 | dev_dbg(dev, | 250 | if (device_info) |
262 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 251 | DO_STATS ( device_info->map_op_count++ ); |
263 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
264 | buf->safe, buf->safe_dma_addr); | ||
265 | 252 | ||
266 | if ((dir == DMA_TO_DEVICE) || | 253 | buf = alloc_safe_buffer(device_info, ptr, size, dir); |
267 | (dir == DMA_BIDIRECTIONAL)) { | 254 | if (buf == NULL) { |
268 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | 255 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", |
269 | __func__, ptr, buf->safe, size); | 256 | __func__, ptr); |
270 | memcpy(buf->safe, ptr, size); | 257 | return ~0; |
271 | } | 258 | } |
272 | ptr = buf->safe; | ||
273 | 259 | ||
274 | dma_addr = buf->safe_dma_addr; | 260 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
275 | } else { | 261 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
276 | /* | 262 | buf->safe, buf->safe_dma_addr); |
277 | * We don't need to sync the DMA buffer since | 263 | |
278 | * it was allocated via the coherent allocators. | 264 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { |
279 | */ | 265 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", |
280 | __dma_single_cpu_to_dev(ptr, size, dir); | 266 | __func__, ptr, buf->safe, size); |
267 | memcpy(buf->safe, ptr, size); | ||
281 | } | 268 | } |
282 | 269 | ||
283 | return dma_addr; | 270 | return buf->safe_dma_addr; |
284 | } | 271 | } |
285 | 272 | ||
286 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | 273 | static inline void unmap_single(struct device *dev, struct safe_buffer *buf, |
287 | size_t size, enum dma_data_direction dir) | 274 | size_t size, enum dma_data_direction dir) |
288 | { | 275 | { |
289 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); | 276 | BUG_ON(buf->size != size); |
290 | 277 | BUG_ON(buf->direction != dir); | |
291 | if (buf) { | ||
292 | BUG_ON(buf->size != size); | ||
293 | BUG_ON(buf->direction != dir); | ||
294 | 278 | ||
295 | dev_dbg(dev, | 279 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
296 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 280 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 281 | buf->safe, buf->safe_dma_addr); |
298 | buf->safe, buf->safe_dma_addr); | ||
299 | 282 | ||
300 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 283 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
301 | 284 | ||
302 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 285 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
303 | void *ptr = buf->ptr; | 286 | void *ptr = buf->ptr; |
304 | 287 | ||
305 | dev_dbg(dev, | 288 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", |
306 | "%s: copy back safe %p to unsafe %p size %d\n", | 289 | __func__, buf->safe, ptr, size); |
307 | __func__, buf->safe, ptr, size); | 290 | memcpy(ptr, buf->safe, size); |
308 | memcpy(ptr, buf->safe, size); | ||
309 | 291 | ||
310 | /* | 292 | /* |
311 | * Since we may have written to a page cache page, | 293 | * Since we may have written to a page cache page, |
312 | * we need to ensure that the data will be coherent | 294 | * we need to ensure that the data will be coherent |
313 | * with user mappings. | 295 | * with user mappings. |
314 | */ | 296 | */ |
315 | __cpuc_flush_dcache_area(ptr, size); | 297 | __cpuc_flush_dcache_area(ptr, size); |
316 | } | ||
317 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
318 | } else { | ||
319 | __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); | ||
320 | } | 298 | } |
299 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
321 | } | 300 | } |
322 | 301 | ||
323 | /* ************************************************** */ | 302 | /* ************************************************** */ |
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
328 | * substitute the safe buffer for the unsafe one. | 307 | * substitute the safe buffer for the unsafe one. |
329 | * (basically move the buffer from an unsafe area to a safe one) | 308 | * (basically move the buffer from an unsafe area to a safe one) |
330 | */ | 309 | */ |
331 | dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, | ||
332 | enum dma_data_direction dir) | ||
333 | { | ||
334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
335 | __func__, ptr, size, dir); | ||
336 | |||
337 | BUG_ON(!valid_dma_direction(dir)); | ||
338 | |||
339 | return map_single(dev, ptr, size, dir); | ||
340 | } | ||
341 | EXPORT_SYMBOL(__dma_map_single); | ||
342 | |||
343 | /* | ||
344 | * see if a mapped address was really a "safe" buffer and if so, copy | ||
345 | * the data from the safe buffer back to the unsafe buffer and free up | ||
346 | * the safe buffer. (basically return things back to the way they | ||
347 | * should be) | ||
348 | */ | ||
349 | void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
350 | enum dma_data_direction dir) | ||
351 | { | ||
352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
353 | __func__, (void *) dma_addr, size, dir); | ||
354 | |||
355 | unmap_single(dev, dma_addr, size, dir); | ||
356 | } | ||
357 | EXPORT_SYMBOL(__dma_unmap_single); | ||
358 | |||
359 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 310 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
360 | unsigned long offset, size_t size, enum dma_data_direction dir) | 311 | unsigned long offset, size_t size, enum dma_data_direction dir) |
361 | { | 312 | { |
313 | dma_addr_t dma_addr; | ||
314 | int ret; | ||
315 | |||
362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 316 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
363 | __func__, page, offset, size, dir); | 317 | __func__, page, offset, size, dir); |
364 | 318 | ||
365 | BUG_ON(!valid_dma_direction(dir)); | 319 | dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; |
320 | |||
321 | ret = needs_bounce(dev, dma_addr, size); | ||
322 | if (ret < 0) | ||
323 | return ~0; | ||
324 | |||
325 | if (ret == 0) { | ||
326 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
327 | return dma_addr; | ||
328 | } | ||
366 | 329 | ||
367 | if (PageHighMem(page)) { | 330 | if (PageHighMem(page)) { |
368 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " | 331 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); |
369 | "is not supported\n"); | ||
370 | return ~0; | 332 | return ~0; |
371 | } | 333 | } |
372 | 334 | ||
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page); | |||
383 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 345 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
384 | enum dma_data_direction dir) | 346 | enum dma_data_direction dir) |
385 | { | 347 | { |
386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 348 | struct safe_buffer *buf; |
387 | __func__, (void *) dma_addr, size, dir); | 349 | |
350 | dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", | ||
351 | __func__, dma_addr, size, dir); | ||
352 | |||
353 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); | ||
354 | if (!buf) { | ||
355 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), | ||
356 | dma_addr & ~PAGE_MASK, size, dir); | ||
357 | return; | ||
358 | } | ||
388 | 359 | ||
389 | unmap_single(dev, dma_addr, size, dir); | 360 | unmap_single(dev, buf, size, dir); |
390 | } | 361 | } |
391 | EXPORT_SYMBOL(__dma_unmap_page); | 362 | EXPORT_SYMBOL(__dma_unmap_page); |
392 | 363 | ||
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, | |||
461 | } | 432 | } |
462 | 433 | ||
463 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 434 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
464 | unsigned long large_buffer_size) | 435 | unsigned long large_buffer_size, |
436 | int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) | ||
465 | { | 437 | { |
466 | struct dmabounce_device_info *device_info; | 438 | struct dmabounce_device_info *device_info; |
467 | int ret; | 439 | int ret; |
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
497 | device_info->dev = dev; | 469 | device_info->dev = dev; |
498 | INIT_LIST_HEAD(&device_info->safe_buffers); | 470 | INIT_LIST_HEAD(&device_info->safe_buffers); |
499 | rwlock_init(&device_info->lock); | 471 | rwlock_init(&device_info->lock); |
472 | device_info->needs_bounce = needs_bounce_fn; | ||
500 | 473 | ||
501 | #ifdef STATS | 474 | #ifdef STATS |
502 | device_info->total_allocs = 0; | 475 | device_info->total_allocs = 0; |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 4ddd0a6ac7ff..7bdd91766d65 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
179 | { | 179 | { |
180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
181 | unsigned int shift = (d->irq % 4) * 8; | 181 | unsigned int shift = (d->irq % 4) * 8; |
182 | unsigned int cpu = cpumask_first(mask_val); | 182 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
183 | u32 val, mask, bit; | 183 | u32 val, mask, bit; |
184 | 184 | ||
185 | if (cpu >= 8) | 185 | if (cpu >= 8 || cpu >= nr_cpu_ids) |
186 | return -EINVAL; | 186 | return -EINVAL; |
187 | 187 | ||
188 | mask = 0xff << shift; | 188 | mask = 0xff << shift; |
189 | bit = 1 << (cpu + shift); | 189 | bit = 1 << (cpu + shift); |
190 | 190 | ||
191 | spin_lock(&irq_controller_lock); | 191 | spin_lock(&irq_controller_lock); |
192 | d->node = cpu; | ||
193 | val = readl_relaxed(reg) & ~mask; | 192 | val = readl_relaxed(reg) & ~mask; |
194 | writel_relaxed(val | bit, reg); | 193 | writel_relaxed(val | bit, reg); |
195 | spin_unlock(&irq_controller_lock); | 194 | spin_unlock(&irq_controller_lock); |
196 | 195 | ||
197 | return 0; | 196 | return IRQ_SET_MASK_OK; |
198 | } | 197 | } |
199 | #endif | 198 | #endif |
200 | 199 | ||
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7a21927c52e1..14ad62e16dd1 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -243,6 +243,12 @@ static struct resource it8152_mem = { | |||
243 | * ITE8152 chip can address up to 64MByte, so all the devices | 243 | * ITE8152 chip can address up to 64MByte, so all the devices |
244 | * connected to ITE8152 (PCI and USB) should have limited DMA window | 244 | * connected to ITE8152 (PCI and USB) should have limited DMA window |
245 | */ | 245 | */ |
246 | static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
247 | { | ||
248 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
249 | __func__, dma_addr, size); | ||
250 | return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; | ||
251 | } | ||
246 | 252 | ||
247 | /* | 253 | /* |
248 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all | 254 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all |
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev) | |||
254 | if (dev->dma_mask) | 260 | if (dev->dma_mask) |
255 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 261 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
256 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 262 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
257 | dmabounce_register_dev(dev, 2048, 4096); | 263 | dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); |
258 | } | 264 | } |
259 | return 0; | 265 | return 0; |
260 | } | 266 | } |
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) | |||
267 | return 0; | 273 | return 0; |
268 | } | 274 | } |
269 | 275 | ||
270 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
271 | { | ||
272 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
273 | __func__, dma_addr, size); | ||
274 | return (dev->bus == &pci_bus_type) && | ||
275 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | ||
276 | } | ||
277 | |||
278 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 276 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
279 | { | 277 | { |
280 | if (mask >= PHYS_OFFSET + SZ_64M - 1) | 278 | if (mask >= PHYS_OFFSET + SZ_64M - 1) |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9c49a46a2b7a..0569de6acfba 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, | |||
579 | 579 | ||
580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; | 580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; |
581 | } | 581 | } |
582 | #endif | ||
582 | 583 | ||
584 | #ifdef CONFIG_DMABOUNCE | ||
585 | /* | ||
586 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
587 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
588 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
589 | * an access to a region of memory above 1MB relative to the bank base, | ||
590 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
591 | * on the configuration of the RAM, bit 10 may correspond to one | ||
592 | * of several different (processor-relative) address bits. | ||
593 | * | ||
594 | * This routine only identifies whether or not a given DMA address | ||
595 | * is susceptible to the bug. | ||
596 | * | ||
597 | * This should only get called for sa1111_device types due to the | ||
598 | * way we configure our device dma_masks. | ||
599 | */ | ||
600 | static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
601 | { | ||
602 | /* | ||
603 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
604 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
605 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
606 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
607 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
608 | */ | ||
609 | return (machine_is_assabet() || machine_is_pfs168()) && | ||
610 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); | ||
611 | } | ||
583 | #endif | 612 | #endif |
584 | 613 | ||
585 | static void sa1111_dev_release(struct device *_dev) | 614 | static void sa1111_dev_release(struct device *_dev) |
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, | |||
644 | dev->dev.dma_mask = &dev->dma_mask; | 673 | dev->dev.dma_mask = &dev->dma_mask; |
645 | 674 | ||
646 | if (dev->dma_mask != 0xffffffffUL) { | 675 | if (dev->dma_mask != 0xffffffffUL) { |
647 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096); | 676 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096, |
677 | sa1111_needs_bounce); | ||
648 | if (ret) { | 678 | if (ret) { |
649 | dev_err(&dev->dev, "SA1111: Failed to register" | 679 | dev_err(&dev->dev, "SA1111: Failed to register" |
650 | " with dmabounce\n"); | 680 | " with dmabounce\n"); |
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) | |||
818 | kfree(sachip); | 848 | kfree(sachip); |
819 | } | 849 | } |
820 | 850 | ||
821 | /* | ||
822 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
823 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
824 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
825 | * an access to a region of memory above 1MB relative to the bank base, | ||
826 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
827 | * on the configuration of the RAM, bit 10 may correspond to one | ||
828 | * of several different (processor-relative) address bits. | ||
829 | * | ||
830 | * This routine only identifies whether or not a given DMA address | ||
831 | * is susceptible to the bug. | ||
832 | * | ||
833 | * This should only get called for sa1111_device types due to the | ||
834 | * way we configure our device dma_masks. | ||
835 | */ | ||
836 | int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
837 | { | ||
838 | /* | ||
839 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
840 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
841 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
842 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
843 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
844 | */ | ||
845 | return ((machine_is_assabet() || machine_is_pfs168()) && | ||
846 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); | ||
847 | } | ||
848 | |||
849 | struct sa1111_save_data { | 851 | struct sa1111_save_data { |
850 | unsigned int skcr; | 852 | unsigned int skcr; |
851 | unsigned int skpcr; | 853 | unsigned int skpcr; |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5e..29035e86a59d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -293,4 +293,13 @@ | |||
293 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | 293 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f |
294 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort | 294 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort |
295 | .endm | 295 | .endm |
296 | |||
297 | /* Utility macro for declaring string literals */ | ||
298 | .macro string name:req, string | ||
299 | .type \name , #object | ||
300 | \name: | ||
301 | .asciz "\string" | ||
302 | .size \name , . - \name | ||
303 | .endm | ||
304 | |||
296 | #endif /* __ASM_ASSEMBLER_H__ */ | 305 | #endif /* __ASM_ASSEMBLER_H__ */ |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442c..f4280593dfa3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
28 | 28 | ||
29 | #define smp_mb__before_clear_bit() mb() | 29 | #define smp_mb__before_clear_bit() smp_mb() |
30 | #define smp_mb__after_clear_bit() mb() | 30 | #define smp_mb__after_clear_bit() smp_mb() |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * These functions are the basis of our bit ops. | 33 | * These functions are the basis of our bit ops. |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363ed..7a21d0bf7134 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
115 | ___dma_page_dev_to_cpu(page, off, size, dir); | 115 | ___dma_page_dev_to_cpu(page, off, size, dir); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | 118 | extern int dma_supported(struct device *, u64); |
119 | * Return whether the given device DMA address mask can be supported | 119 | extern int dma_set_mask(struct device *, u64); |
120 | * properly. For example, if your device can only drive the low 24-bits | ||
121 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
122 | * to this function. | ||
123 | * | ||
124 | * FIXME: This should really be a platform specific issue - we should | ||
125 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | ||
126 | */ | ||
127 | static inline int dma_supported(struct device *dev, u64 mask) | ||
128 | { | ||
129 | if (mask < ISA_DMA_THRESHOLD) | ||
130 | return 0; | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
135 | { | ||
136 | #ifdef CONFIG_DMABOUNCE | ||
137 | if (dev->archdata.dmabounce) { | ||
138 | if (dma_mask >= ISA_DMA_THRESHOLD) | ||
139 | return 0; | ||
140 | else | ||
141 | return -EIO; | ||
142 | } | ||
143 | #endif | ||
144 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
145 | return -EIO; | ||
146 | |||
147 | *dev->dma_mask = dma_mask; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | 120 | ||
152 | /* | 121 | /* |
153 | * DMA errors are defined by all-bits-set in the DMA address. | 122 | * DMA errors are defined by all-bits-set in the DMA address. |
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | |||
256 | * @dev: valid struct device pointer | 225 | * @dev: valid struct device pointer |
257 | * @small_buf_size: size of buffers to use with small buffer pool | 226 | * @small_buf_size: size of buffers to use with small buffer pool |
258 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | 227 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
228 | * @needs_bounce_fn: called to determine whether buffer needs bouncing | ||
259 | * | 229 | * |
260 | * This function should be called by low-level platform code to register | 230 | * This function should be called by low-level platform code to register |
261 | * a device as requireing DMA buffer bouncing. The function will allocate | 231 | * a device as requireing DMA buffer bouncing. The function will allocate |
262 | * appropriate DMA pools for the device. | 232 | * appropriate DMA pools for the device. |
263 | * | ||
264 | */ | 233 | */ |
265 | extern int dmabounce_register_dev(struct device *, unsigned long, | 234 | extern int dmabounce_register_dev(struct device *, unsigned long, |
266 | unsigned long); | 235 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
267 | 236 | ||
268 | /** | 237 | /** |
269 | * dmabounce_unregister_dev | 238 | * dmabounce_unregister_dev |
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, | |||
277 | */ | 246 | */ |
278 | extern void dmabounce_unregister_dev(struct device *); | 247 | extern void dmabounce_unregister_dev(struct device *); |
279 | 248 | ||
280 | /** | ||
281 | * dma_needs_bounce | ||
282 | * | ||
283 | * @dev: valid struct device pointer | ||
284 | * @dma_handle: dma_handle of unbounced buffer | ||
285 | * @size: size of region being mapped | ||
286 | * | ||
287 | * Platforms that utilize the dmabounce mechanism must implement | ||
288 | * this function. | ||
289 | * | ||
290 | * The dmabounce routines call this function whenever a dma-mapping | ||
291 | * is requested to determine whether a given buffer needs to be bounced | ||
292 | * or not. The function must return 0 if the buffer is OK for | ||
293 | * DMA access and 1 if the buffer needs to be bounced. | ||
294 | * | ||
295 | */ | ||
296 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
297 | |||
298 | /* | 249 | /* |
299 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 250 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
300 | */ | 251 | */ |
301 | extern dma_addr_t __dma_map_single(struct device *, void *, size_t, | ||
302 | enum dma_data_direction); | ||
303 | extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, | ||
304 | enum dma_data_direction); | ||
305 | extern dma_addr_t __dma_map_page(struct device *, struct page *, | 252 | extern dma_addr_t __dma_map_page(struct device *, struct page *, |
306 | unsigned long, size_t, enum dma_data_direction); | 253 | unsigned long, size_t, enum dma_data_direction); |
307 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, | 254 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |||
328 | } | 275 | } |
329 | 276 | ||
330 | 277 | ||
331 | static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, | ||
332 | size_t size, enum dma_data_direction dir) | ||
333 | { | ||
334 | __dma_single_cpu_to_dev(cpu_addr, size, dir); | ||
335 | return virt_to_dma(dev, cpu_addr); | ||
336 | } | ||
337 | |||
338 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 278 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
339 | unsigned long offset, size_t size, enum dma_data_direction dir) | 279 | unsigned long offset, size_t size, enum dma_data_direction dir) |
340 | { | 280 | { |
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | |||
342 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 282 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
343 | } | 283 | } |
344 | 284 | ||
345 | static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
346 | size_t size, enum dma_data_direction dir) | ||
347 | { | ||
348 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | ||
349 | } | ||
350 | |||
351 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | 285 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
352 | size_t size, enum dma_data_direction dir) | 286 | size_t size, enum dma_data_direction dir) |
353 | { | 287 | { |
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
373 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 307 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
374 | size_t size, enum dma_data_direction dir) | 308 | size_t size, enum dma_data_direction dir) |
375 | { | 309 | { |
310 | unsigned long offset; | ||
311 | struct page *page; | ||
376 | dma_addr_t addr; | 312 | dma_addr_t addr; |
377 | 313 | ||
314 | BUG_ON(!virt_addr_valid(cpu_addr)); | ||
315 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); | ||
378 | BUG_ON(!valid_dma_direction(dir)); | 316 | BUG_ON(!valid_dma_direction(dir)); |
379 | 317 | ||
380 | addr = __dma_map_single(dev, cpu_addr, size, dir); | 318 | page = virt_to_page(cpu_addr); |
381 | debug_dma_map_page(dev, virt_to_page(cpu_addr), | 319 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; |
382 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | 320 | addr = __dma_map_page(dev, page, offset, size, dir); |
383 | dir, addr, true); | 321 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); |
384 | 322 | ||
385 | return addr; | 323 | return addr; |
386 | } | 324 | } |
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
430 | size_t size, enum dma_data_direction dir) | 368 | size_t size, enum dma_data_direction dir) |
431 | { | 369 | { |
432 | debug_dma_unmap_page(dev, handle, size, dir, true); | 370 | debug_dma_unmap_page(dev, handle, size, dir, true); |
433 | __dma_unmap_single(dev, handle, size, dir); | 371 | __dma_unmap_page(dev, handle, size, dir); |
434 | } | 372 | } |
435 | 373 | ||
436 | /** | 374 | /** |
diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 42005542932b..628670e9d7c9 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h | |||
@@ -1,15 +1,16 @@ | |||
1 | #ifndef __ASM_ARM_DMA_H | 1 | #ifndef __ASM_ARM_DMA_H |
2 | #define __ASM_ARM_DMA_H | 2 | #define __ASM_ARM_DMA_H |
3 | 3 | ||
4 | #include <asm/memory.h> | ||
5 | |||
6 | /* | 4 | /* |
7 | * This is the maximum virtual address which can be DMA'd from. | 5 | * This is the maximum virtual address which can be DMA'd from. |
8 | */ | 6 | */ |
9 | #ifndef ARM_DMA_ZONE_SIZE | 7 | #ifndef CONFIG_ZONE_DMA |
10 | #define MAX_DMA_ADDRESS 0xffffffff | 8 | #define MAX_DMA_ADDRESS 0xffffffffUL |
11 | #else | 9 | #else |
12 | #define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) | 10 | #define MAX_DMA_ADDRESS ({ \ |
11 | extern unsigned long arm_dma_zone_size; \ | ||
12 | arm_dma_zone_size ? \ | ||
13 | (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) | ||
13 | #endif | 14 | #endif |
14 | 15 | ||
15 | #ifdef CONFIG_ISA_DMA_API | 16 | #ifdef CONFIG_ISA_DMA_API |
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index 2da8547de6d6..2f1e2098dfe7 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S | |||
@@ -4,8 +4,8 @@ | |||
4 | * Interrupt handling. Preserves r7, r8, r9 | 4 | * Interrupt handling. Preserves r7, r8, r9 |
5 | */ | 5 | */ |
6 | .macro arch_irq_handler_default | 6 | .macro arch_irq_handler_default |
7 | get_irqnr_preamble r5, lr | 7 | get_irqnr_preamble r6, lr |
8 | 1: get_irqnr_and_base r0, r6, r5, lr | 8 | 1: get_irqnr_and_base r0, r2, r6, lr |
9 | movne r1, sp | 9 | movne r1, sp |
10 | @ | 10 | @ |
11 | @ routine called with r0 = irq number, r1 = struct pt_regs * | 11 | @ routine called with r0 = irq number, r1 = struct pt_regs * |
@@ -17,17 +17,17 @@ | |||
17 | /* | 17 | /* |
18 | * XXX | 18 | * XXX |
19 | * | 19 | * |
20 | * this macro assumes that irqstat (r6) and base (r5) are | 20 | * this macro assumes that irqstat (r2) and base (r6) are |
21 | * preserved from get_irqnr_and_base above | 21 | * preserved from get_irqnr_and_base above |
22 | */ | 22 | */ |
23 | ALT_SMP(test_for_ipi r0, r6, r5, lr) | 23 | ALT_SMP(test_for_ipi r0, r2, r6, lr) |
24 | ALT_UP_B(9997f) | 24 | ALT_UP_B(9997f) |
25 | movne r1, sp | 25 | movne r1, sp |
26 | adrne lr, BSYM(1b) | 26 | adrne lr, BSYM(1b) |
27 | bne do_IPI | 27 | bne do_IPI |
28 | 28 | ||
29 | #ifdef CONFIG_LOCAL_TIMERS | 29 | #ifdef CONFIG_LOCAL_TIMERS |
30 | test_for_ltirq r0, r6, r5, lr | 30 | test_for_ltirq r0, r2, r6, lr |
31 | movne r0, sp | 31 | movne r0, sp |
32 | adrne lr, BSYM(1b) | 32 | adrne lr, BSYM(1b) |
33 | bne do_local_timer | 33 | bne do_local_timer |
@@ -40,7 +40,7 @@ | |||
40 | .align 5 | 40 | .align 5 |
41 | .global \symbol_name | 41 | .global \symbol_name |
42 | \symbol_name: | 42 | \symbol_name: |
43 | mov r4, lr | 43 | mov r8, lr |
44 | arch_irq_handler_default | 44 | arch_irq_handler_default |
45 | mov pc, r4 | 45 | mov pc, r8 |
46 | .endm | 46 | .endm |
diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c1062c317103..c93a22a8b924 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h | |||
@@ -4,22 +4,26 @@ | |||
4 | /* | 4 | /* |
5 | * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP | 5 | * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP |
6 | */ | 6 | */ |
7 | #define HWCAP_SWP 1 | 7 | #define HWCAP_SWP (1 << 0) |
8 | #define HWCAP_HALF 2 | 8 | #define HWCAP_HALF (1 << 1) |
9 | #define HWCAP_THUMB 4 | 9 | #define HWCAP_THUMB (1 << 2) |
10 | #define HWCAP_26BIT 8 /* Play it safe */ | 10 | #define HWCAP_26BIT (1 << 3) /* Play it safe */ |
11 | #define HWCAP_FAST_MULT 16 | 11 | #define HWCAP_FAST_MULT (1 << 4) |
12 | #define HWCAP_FPA 32 | 12 | #define HWCAP_FPA (1 << 5) |
13 | #define HWCAP_VFP 64 | 13 | #define HWCAP_VFP (1 << 6) |
14 | #define HWCAP_EDSP 128 | 14 | #define HWCAP_EDSP (1 << 7) |
15 | #define HWCAP_JAVA 256 | 15 | #define HWCAP_JAVA (1 << 8) |
16 | #define HWCAP_IWMMXT 512 | 16 | #define HWCAP_IWMMXT (1 << 9) |
17 | #define HWCAP_CRUNCH 1024 | 17 | #define HWCAP_CRUNCH (1 << 10) |
18 | #define HWCAP_THUMBEE 2048 | 18 | #define HWCAP_THUMBEE (1 << 11) |
19 | #define HWCAP_NEON 4096 | 19 | #define HWCAP_NEON (1 << 12) |
20 | #define HWCAP_VFPv3 8192 | 20 | #define HWCAP_VFPv3 (1 << 13) |
21 | #define HWCAP_VFPv3D16 16384 | 21 | #define HWCAP_VFPv3D16 (1 << 14) |
22 | #define HWCAP_TLS 32768 | 22 | #define HWCAP_TLS (1 << 15) |
23 | #define HWCAP_VFPv4 (1 << 16) | ||
24 | #define HWCAP_IDIVA (1 << 17) | ||
25 | #define HWCAP_IDIVT (1 << 18) | ||
26 | #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) | ||
23 | 27 | ||
24 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 28 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
25 | /* | 29 | /* |
diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index e46bdd0097eb..feec86768f9c 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h | |||
@@ -24,12 +24,6 @@ | |||
24 | #define MAX_INSN_SIZE 2 | 24 | #define MAX_INSN_SIZE 2 |
25 | #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ | 25 | #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ |
26 | 26 | ||
27 | /* | ||
28 | * This undefined instruction must be unique and | ||
29 | * reserved solely for kprobes' use. | ||
30 | */ | ||
31 | #define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 | ||
32 | |||
33 | #define regs_return_value(regs) ((regs)->ARM_r0) | 27 | #define regs_return_value(regs) ((regs)->ARM_r0) |
34 | #define flush_insn_slot(p) do { } while (0) | 28 | #define flush_insn_slot(p) do { } while (0) |
35 | #define kretprobe_blacklist_size 0 | 29 | #define kretprobe_blacklist_size 0 |
@@ -38,14 +32,17 @@ typedef u32 kprobe_opcode_t; | |||
38 | 32 | ||
39 | struct kprobe; | 33 | struct kprobe; |
40 | typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); | 34 | typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); |
41 | |||
42 | typedef unsigned long (kprobe_check_cc)(unsigned long); | 35 | typedef unsigned long (kprobe_check_cc)(unsigned long); |
36 | typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); | ||
37 | typedef void (kprobe_insn_fn_t)(void); | ||
43 | 38 | ||
44 | /* Architecture specific copy of original instruction. */ | 39 | /* Architecture specific copy of original instruction. */ |
45 | struct arch_specific_insn { | 40 | struct arch_specific_insn { |
46 | kprobe_opcode_t *insn; | 41 | kprobe_opcode_t *insn; |
47 | kprobe_insn_handler_t *insn_handler; | 42 | kprobe_insn_handler_t *insn_handler; |
48 | kprobe_check_cc *insn_check_cc; | 43 | kprobe_check_cc *insn_check_cc; |
44 | kprobe_insn_singlestep_t *insn_singlestep; | ||
45 | kprobe_insn_fn_t *insn_fn; | ||
49 | }; | 46 | }; |
50 | 47 | ||
51 | struct prev_kprobe { | 48 | struct prev_kprobe { |
@@ -62,20 +59,9 @@ struct kprobe_ctlblk { | |||
62 | }; | 59 | }; |
63 | 60 | ||
64 | void arch_remove_kprobe(struct kprobe *); | 61 | void arch_remove_kprobe(struct kprobe *); |
65 | void kretprobe_trampoline(void); | ||
66 | |||
67 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); | 62 | int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); |
68 | int kprobe_exceptions_notify(struct notifier_block *self, | 63 | int kprobe_exceptions_notify(struct notifier_block *self, |
69 | unsigned long val, void *data); | 64 | unsigned long val, void *data); |
70 | 65 | ||
71 | enum kprobe_insn { | ||
72 | INSN_REJECTED, | ||
73 | INSN_GOOD, | ||
74 | INSN_GOOD_NO_SLOT | ||
75 | }; | ||
76 | |||
77 | enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, | ||
78 | struct arch_specific_insn *); | ||
79 | void __init arm_kprobe_decode_init(void); | ||
80 | 66 | ||
81 | #endif /* _ARM_KPROBES_H */ | 67 | #endif /* _ARM_KPROBES_H */ |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 946f4d778f71..3281fb4b12e3 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -23,6 +23,10 @@ struct machine_desc { | |||
23 | 23 | ||
24 | unsigned int nr_irqs; /* number of IRQs */ | 24 | unsigned int nr_irqs; /* number of IRQs */ |
25 | 25 | ||
26 | #ifdef CONFIG_ZONE_DMA | ||
27 | unsigned long dma_zone_size; /* size of DMA-able area */ | ||
28 | #endif | ||
29 | |||
26 | unsigned int video_start; /* start of video RAM */ | 30 | unsigned int video_start; /* start of video RAM */ |
27 | unsigned int video_end; /* end of video RAM */ | 31 | unsigned int video_end; /* end of video RAM */ |
28 | 32 | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb3480..b8de516e600e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -204,18 +204,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
204 | #endif | 204 | #endif |
205 | 205 | ||
206 | /* | 206 | /* |
207 | * The DMA mask corresponding to the maximum bus address allocatable | ||
208 | * using GFP_DMA. The default here places no restriction on DMA | ||
209 | * allocations. This must be the smallest DMA mask in the system, | ||
210 | * so a successful GFP_DMA allocation will always satisfy this. | ||
211 | */ | ||
212 | #ifndef ARM_DMA_ZONE_SIZE | ||
213 | #define ISA_DMA_THRESHOLD (0xffffffffULL) | ||
214 | #else | ||
215 | #define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) | ||
216 | #endif | ||
217 | |||
218 | /* | ||
219 | * PFNs are used to describe any physical page; this means | 207 | * PFNs are used to describe any physical page; this means |
220 | * PFN 0 == physical address 0. | 208 | * PFN 0 == physical address 0. |
221 | * | 209 | * |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af9..0f8e3827a89b 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -24,6 +24,8 @@ enum arm_perf_pmu_ids { | |||
24 | ARM_PERF_PMU_ID_V6MP, | 24 | ARM_PERF_PMU_ID_V6MP, |
25 | ARM_PERF_PMU_ID_CA8, | 25 | ARM_PERF_PMU_ID_CA8, |
26 | ARM_PERF_PMU_ID_CA9, | 26 | ARM_PERF_PMU_ID_CA9, |
27 | ARM_PERF_PMU_ID_CA5, | ||
28 | ARM_PERF_PMU_ID_CA15, | ||
27 | ARM_NUM_PMU_IDS, | 29 | ARM_NUM_PMU_IDS, |
28 | }; | 30 | }; |
29 | 31 | ||
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 7544ce6b481a..67c70a31a1be 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device); | |||
52 | * a cookie. | 52 | * a cookie. |
53 | */ | 53 | */ |
54 | extern int | 54 | extern int |
55 | release_pmu(struct platform_device *pdev); | 55 | release_pmu(enum arm_pmu_type type); |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * init_pmu() - Initialise the PMU. | 58 | * init_pmu() - Initialise the PMU. |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8ec535e11fd7..633d1cb84d87 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
@@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | |||
82 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); | 82 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); |
83 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | 83 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
84 | #else | 84 | #else |
85 | #define cpu_proc_init() processor._proc_init() | 85 | #define cpu_proc_init processor._proc_init |
86 | #define cpu_proc_fin() processor._proc_fin() | 86 | #define cpu_proc_fin processor._proc_fin |
87 | #define cpu_reset(addr) processor.reset(addr) | 87 | #define cpu_reset processor.reset |
88 | #define cpu_do_idle() processor._do_idle() | 88 | #define cpu_do_idle processor._do_idle |
89 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) | 89 | #define cpu_dcache_clean_area processor.dcache_clean_area |
90 | #define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) | 90 | #define cpu_set_pte_ext processor.set_pte_ext |
91 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) | 91 | #define cpu_do_switch_mm processor.switch_mm |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | extern void cpu_resume(void); | 94 | extern void cpu_resume(void); |
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 312d10877bd7..96187ff58c24 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h | |||
@@ -69,8 +69,9 @@ | |||
69 | #define PSR_c 0x000000ff /* Control */ | 69 | #define PSR_c 0x000000ff /* Control */ |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * ARMv7 groups of APSR bits | 72 | * ARMv7 groups of PSR bits |
73 | */ | 73 | */ |
74 | #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ | ||
74 | #define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ | 75 | #define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ |
75 | #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ | 76 | #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ |
76 | #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ | 77 | #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ |
@@ -200,6 +201,14 @@ extern unsigned long profile_pc(struct pt_regs *regs); | |||
200 | #define PREDICATE_ALWAYS 0xe0000000 | 201 | #define PREDICATE_ALWAYS 0xe0000000 |
201 | 202 | ||
202 | /* | 203 | /* |
204 | * True if instr is a 32-bit thumb instruction. This works if instr | ||
205 | * is the first or only half-word of a thumb instruction. It also works | ||
206 | * when instr holds all 32-bits of a wide thumb instruction if stored | ||
207 | * in the form (first_half<<16)|(second_half) | ||
208 | */ | ||
209 | #define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800) | ||
210 | |||
211 | /* | ||
203 | * kprobe-based event tracer support | 212 | * kprobe-based event tracer support |
204 | */ | 213 | */ |
205 | #include <linux/stddef.h> | 214 | #include <linux/stddef.h> |
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d9347..cefdb8f898a1 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASMARM_SCATTERLIST_H | 1 | #ifndef _ASMARM_SCATTERLIST_H |
2 | #define _ASMARM_SCATTERLIST_H | 2 | #define _ASMARM_SCATTERLIST_H |
3 | 3 | ||
4 | #ifdef CONFIG_ARM_HAS_SG_CHAIN | ||
5 | #define ARCH_HAS_SG_CHAIN | ||
6 | #endif | ||
7 | |||
4 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
5 | #include <asm/types.h> | 9 | #include <asm/types.h> |
6 | #include <asm-generic/scatterlist.h> | 10 | #include <asm-generic/scatterlist.h> |
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07af..915696dd9c7c 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -187,12 +187,16 @@ struct tagtable { | |||
187 | 187 | ||
188 | #define __tag __used __attribute__((__section__(".taglist.init"))) | 188 | #define __tag __used __attribute__((__section__(".taglist.init"))) |
189 | #define __tagtable(tag, fn) \ | 189 | #define __tagtable(tag, fn) \ |
190 | static struct tagtable __tagtable_##fn __tag = { tag, fn } | 190 | static const struct tagtable __tagtable_##fn __tag = { tag, fn } |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Memory map description | 193 | * Memory map description |
194 | */ | 194 | */ |
195 | #define NR_BANKS 8 | 195 | #ifdef CONFIG_ARCH_EP93XX |
196 | # define NR_BANKS 16 | ||
197 | #else | ||
198 | # define NR_BANKS 8 | ||
199 | #endif | ||
196 | 200 | ||
197 | struct membank { | 201 | struct membank { |
198 | phys_addr_t start; | 202 | phys_addr_t start; |
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 000000000000..b0e4e1a02318 --- /dev/null +++ b/arch/arm/include/asm/suspend.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_ARM_SUSPEND_H | ||
2 | #define __ASM_ARM_SUSPEND_H | ||
3 | |||
4 | #include <asm/memory.h> | ||
5 | #include <asm/tlbflush.h> | ||
6 | |||
7 | extern void cpu_resume(void); | ||
8 | |||
9 | /* | ||
10 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
11 | * detail which platform code shouldn't have to know about. | ||
12 | */ | ||
13 | static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
14 | { | ||
15 | extern int __cpu_suspend(int, long, unsigned long, | ||
16 | int (*)(unsigned long)); | ||
17 | int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); | ||
18 | flush_tlb_all(); | ||
19 | return ret; | ||
20 | } | ||
21 | |||
22 | #endif | ||
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927a..8578d726ad78 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h | |||
@@ -27,5 +27,7 @@ | |||
27 | 27 | ||
28 | void *tcm_alloc(size_t len); | 28 | void *tcm_alloc(size_t len); |
29 | void tcm_free(void *addr, size_t len); | 29 | void tcm_free(void *addr, size_t len); |
30 | bool tcm_dtcm_present(void); | ||
31 | bool tcm_itcm_present(void); | ||
30 | 32 | ||
31 | #endif | 33 | #endif |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index d2005de383b8..8077145698ff 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -34,16 +34,12 @@ | |||
34 | #define TLB_V6_D_ASID (1 << 17) | 34 | #define TLB_V6_D_ASID (1 << 17) |
35 | #define TLB_V6_I_ASID (1 << 18) | 35 | #define TLB_V6_I_ASID (1 << 18) |
36 | 36 | ||
37 | #define TLB_BTB (1 << 28) | ||
38 | |||
39 | /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ | 37 | /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ |
40 | #define TLB_V7_UIS_PAGE (1 << 19) | 38 | #define TLB_V7_UIS_PAGE (1 << 19) |
41 | #define TLB_V7_UIS_FULL (1 << 20) | 39 | #define TLB_V7_UIS_FULL (1 << 20) |
42 | #define TLB_V7_UIS_ASID (1 << 21) | 40 | #define TLB_V7_UIS_ASID (1 << 21) |
43 | 41 | ||
44 | /* Inner Shareable BTB operation (ARMv7 MP extensions) */ | 42 | #define TLB_BARRIER (1 << 28) |
45 | #define TLB_V7_IS_BTB (1 << 22) | ||
46 | |||
47 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ | 43 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ |
48 | #define TLB_DCLEAN (1 << 30) | 44 | #define TLB_DCLEAN (1 << 30) |
49 | #define TLB_WB (1 << 31) | 45 | #define TLB_WB (1 << 31) |
@@ -58,7 +54,7 @@ | |||
58 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction | 54 | * v4wb - ARMv4 with write buffer without I TLB flush entry instruction |
59 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction | 55 | * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction |
60 | * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) | 56 | * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) |
61 | * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB)) | 57 | * fa - Faraday (v4 with write buffer with UTLB) |
62 | * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction | 58 | * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction |
63 | * v7wbi - identical to v6wbi | 59 | * v7wbi - identical to v6wbi |
64 | */ | 60 | */ |
@@ -99,7 +95,7 @@ | |||
99 | # define v4_always_flags (-1UL) | 95 | # define v4_always_flags (-1UL) |
100 | #endif | 96 | #endif |
101 | 97 | ||
102 | #define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \ | 98 | #define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
103 | TLB_V4_U_FULL | TLB_V4_U_PAGE) | 99 | TLB_V4_U_FULL | TLB_V4_U_PAGE) |
104 | 100 | ||
105 | #ifdef CONFIG_CPU_TLB_FA | 101 | #ifdef CONFIG_CPU_TLB_FA |
@@ -166,7 +162,7 @@ | |||
166 | # define v4wb_always_flags (-1UL) | 162 | # define v4wb_always_flags (-1UL) |
167 | #endif | 163 | #endif |
168 | 164 | ||
169 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ | 165 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
170 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ | 166 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ |
171 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ | 167 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ |
172 | TLB_V6_I_ASID | TLB_V6_D_ASID) | 168 | TLB_V6_I_ASID | TLB_V6_D_ASID) |
@@ -184,9 +180,9 @@ | |||
184 | # define v6wbi_always_flags (-1UL) | 180 | # define v6wbi_always_flags (-1UL) |
185 | #endif | 181 | #endif |
186 | 182 | ||
187 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ | 183 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
188 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) | 184 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) |
189 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ | 185 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
190 | TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) | 186 | TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) |
191 | 187 | ||
192 | #ifdef CONFIG_CPU_TLB_V7 | 188 | #ifdef CONFIG_CPU_TLB_V7 |
@@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void) | |||
341 | if (tlb_flag(TLB_V7_UIS_FULL)) | 337 | if (tlb_flag(TLB_V7_UIS_FULL)) |
342 | asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); | 338 | asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); |
343 | 339 | ||
344 | if (tlb_flag(TLB_BTB)) { | 340 | if (tlb_flag(TLB_BARRIER)) { |
345 | /* flush the branch target cache */ | ||
346 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | ||
347 | dsb(); | ||
348 | isb(); | ||
349 | } | ||
350 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
351 | /* flush the branch target cache */ | ||
352 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
353 | dsb(); | 341 | dsb(); |
354 | isb(); | 342 | isb(); |
355 | } | 343 | } |
@@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
389 | asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); | 377 | asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); |
390 | #endif | 378 | #endif |
391 | 379 | ||
392 | if (tlb_flag(TLB_BTB)) { | 380 | if (tlb_flag(TLB_BARRIER)) |
393 | /* flush the branch target cache */ | ||
394 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | ||
395 | dsb(); | ||
396 | } | ||
397 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
398 | /* flush the branch target cache */ | ||
399 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
400 | dsb(); | 381 | dsb(); |
401 | isb(); | ||
402 | } | ||
403 | } | 382 | } |
404 | 383 | ||
405 | static inline void | 384 | static inline void |
@@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
439 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); | 418 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); |
440 | #endif | 419 | #endif |
441 | 420 | ||
442 | if (tlb_flag(TLB_BTB)) { | 421 | if (tlb_flag(TLB_BARRIER)) |
443 | /* flush the branch target cache */ | ||
444 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | ||
445 | dsb(); | ||
446 | } | ||
447 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
448 | /* flush the branch target cache */ | ||
449 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
450 | dsb(); | 422 | dsb(); |
451 | isb(); | ||
452 | } | ||
453 | } | 423 | } |
454 | 424 | ||
455 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | 425 | static inline void local_flush_tlb_kernel_page(unsigned long kaddr) |
@@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |||
482 | if (tlb_flag(TLB_V7_UIS_PAGE)) | 452 | if (tlb_flag(TLB_V7_UIS_PAGE)) |
483 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); | 453 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); |
484 | 454 | ||
485 | if (tlb_flag(TLB_BTB)) { | 455 | if (tlb_flag(TLB_BARRIER)) { |
486 | /* flush the branch target cache */ | ||
487 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc"); | ||
488 | dsb(); | ||
489 | isb(); | ||
490 | } | ||
491 | if (tlb_flag(TLB_V7_IS_BTB)) { | ||
492 | /* flush the branch target cache */ | ||
493 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc"); | ||
494 | dsb(); | 456 | dsb(); |
495 | isb(); | 457 | isb(); |
496 | } | 458 | } |
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index f90756dc16dc..5b29a6673625 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h | |||
@@ -3,6 +3,9 @@ | |||
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | 5 | ||
6 | struct pt_regs; | ||
7 | struct task_struct; | ||
8 | |||
6 | struct undef_hook { | 9 | struct undef_hook { |
7 | struct list_head node; | 10 | struct list_head node; |
8 | u32 instr_mask; | 11 | u32 instr_mask; |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index a5b31af5c2b8..f7887dc53c1f 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -37,7 +37,12 @@ obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o | |||
37 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 37 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
38 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 38 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
39 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 39 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
40 | obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o | 40 | obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o |
41 | ifdef CONFIG_THUMB2_KERNEL | ||
42 | obj-$(CONFIG_KPROBES) += kprobes-thumb.o | ||
43 | else | ||
44 | obj-$(CONFIG_KPROBES) += kprobes-arm.o | ||
45 | endif | ||
41 | obj-$(CONFIG_ATAGS_PROC) += atags.o | 46 | obj-$(CONFIG_ATAGS_PROC) += atags.o |
42 | obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o | 47 | obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o |
43 | obj-$(CONFIG_ARM_THUMBEE) += thumbee.o | 48 | obj-$(CONFIG_ARM_THUMBEE) += thumbee.o |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 927522cfc12e..16baba2e4369 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -59,6 +59,9 @@ int main(void) | |||
59 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); | 59 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); |
60 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); | 60 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); |
61 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); | 61 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); |
62 | #ifdef CONFIG_SMP | ||
63 | DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); | ||
64 | #endif | ||
62 | #ifdef CONFIG_ARM_THUMBEE | 65 | #ifdef CONFIG_ARM_THUMBEE |
63 | DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); | 66 | DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); |
64 | #endif | 67 | #endif |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca9..a87cbf889ff4 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -29,21 +29,53 @@ | |||
29 | #include <asm/entry-macro-multi.S> | 29 | #include <asm/entry-macro-multi.S> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Interrupt handling. Preserves r7, r8, r9 | 32 | * Interrupt handling. |
33 | */ | 33 | */ |
34 | .macro irq_handler | 34 | .macro irq_handler |
35 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 35 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
36 | ldr r5, =handle_arch_irq | 36 | ldr r1, =handle_arch_irq |
37 | mov r0, sp | 37 | mov r0, sp |
38 | ldr r5, [r5] | 38 | ldr r1, [r1] |
39 | adr lr, BSYM(9997f) | 39 | adr lr, BSYM(9997f) |
40 | teq r5, #0 | 40 | teq r1, #0 |
41 | movne pc, r5 | 41 | movne pc, r1 |
42 | #endif | 42 | #endif |
43 | arch_irq_handler_default | 43 | arch_irq_handler_default |
44 | 9997: | 44 | 9997: |
45 | .endm | 45 | .endm |
46 | 46 | ||
47 | .macro pabt_helper | ||
48 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 | ||
49 | #ifdef MULTI_PABORT | ||
50 | ldr ip, .LCprocfns | ||
51 | mov lr, pc | ||
52 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] | ||
53 | #else | ||
54 | bl CPU_PABORT_HANDLER | ||
55 | #endif | ||
56 | .endm | ||
57 | |||
58 | .macro dabt_helper | ||
59 | |||
60 | @ | ||
61 | @ Call the processor-specific abort handler: | ||
62 | @ | ||
63 | @ r2 - pt_regs | ||
64 | @ r4 - aborted context pc | ||
65 | @ r5 - aborted context psr | ||
66 | @ | ||
67 | @ The abort handler must return the aborted address in r0, and | ||
68 | @ the fault status register in r1. r9 must be preserved. | ||
69 | @ | ||
70 | #ifdef MULTI_DABORT | ||
71 | ldr ip, .LCprocfns | ||
72 | mov lr, pc | ||
73 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] | ||
74 | #else | ||
75 | bl CPU_DABORT_HANDLER | ||
76 | #endif | ||
77 | .endm | ||
78 | |||
47 | #ifdef CONFIG_KPROBES | 79 | #ifdef CONFIG_KPROBES |
48 | .section .kprobes.text,"ax",%progbits | 80 | .section .kprobes.text,"ax",%progbits |
49 | #else | 81 | #else |
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid) | |||
126 | SPFIX( subeq sp, sp, #4 ) | 158 | SPFIX( subeq sp, sp, #4 ) |
127 | stmia sp, {r1 - r12} | 159 | stmia sp, {r1 - r12} |
128 | 160 | ||
129 | ldmia r0, {r1 - r3} | 161 | ldmia r0, {r3 - r5} |
130 | add r5, sp, #S_SP - 4 @ here for interlock avoidance | 162 | add r7, sp, #S_SP - 4 @ here for interlock avoidance |
131 | mov r4, #-1 @ "" "" "" "" | 163 | mov r6, #-1 @ "" "" "" "" |
132 | add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) | 164 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
133 | SPFIX( addeq r0, r0, #4 ) | 165 | SPFIX( addeq r2, r2, #4 ) |
134 | str r1, [sp, #-4]! @ save the "real" r0 copied | 166 | str r3, [sp, #-4]! @ save the "real" r0 copied |
135 | @ from the exception stack | 167 | @ from the exception stack |
136 | 168 | ||
137 | mov r1, lr | 169 | mov r3, lr |
138 | 170 | ||
139 | @ | 171 | @ |
140 | @ We are now ready to fill in the remaining blanks on the stack: | 172 | @ We are now ready to fill in the remaining blanks on the stack: |
141 | @ | 173 | @ |
142 | @ r0 - sp_svc | 174 | @ r2 - sp_svc |
143 | @ r1 - lr_svc | 175 | @ r3 - lr_svc |
144 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 176 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
145 | @ r3 - spsr_<exception> | 177 | @ r5 - spsr_<exception> |
146 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 178 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
147 | @ | 179 | @ |
148 | stmia r5, {r0 - r4} | 180 | stmia r7, {r2 - r6} |
181 | |||
182 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
183 | bl trace_hardirqs_off | ||
184 | #endif | ||
149 | .endm | 185 | .endm |
150 | 186 | ||
151 | .align 5 | 187 | .align 5 |
152 | __dabt_svc: | 188 | __dabt_svc: |
153 | svc_entry | 189 | svc_entry |
154 | |||
155 | @ | ||
156 | @ get ready to re-enable interrupts if appropriate | ||
157 | @ | ||
158 | mrs r9, cpsr | ||
159 | tst r3, #PSR_I_BIT | ||
160 | biceq r9, r9, #PSR_I_BIT | ||
161 | |||
162 | @ | ||
163 | @ Call the processor-specific abort handler: | ||
164 | @ | ||
165 | @ r2 - aborted context pc | ||
166 | @ r3 - aborted context cpsr | ||
167 | @ | ||
168 | @ The abort handler must return the aborted address in r0, and | ||
169 | @ the fault status register in r1. r9 must be preserved. | ||
170 | @ | ||
171 | #ifdef MULTI_DABORT | ||
172 | ldr r4, .LCprocfns | ||
173 | mov lr, pc | ||
174 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
175 | #else | ||
176 | bl CPU_DABORT_HANDLER | ||
177 | #endif | ||
178 | |||
179 | @ | ||
180 | @ set desired IRQ state, then call main handler | ||
181 | @ | ||
182 | debug_entry r1 | ||
183 | msr cpsr_c, r9 | ||
184 | mov r2, sp | 190 | mov r2, sp |
185 | bl do_DataAbort | 191 | dabt_helper |
186 | 192 | ||
187 | @ | 193 | @ |
188 | @ IRQs off again before pulling preserved data off the stack | 194 | @ IRQs off again before pulling preserved data off the stack |
189 | @ | 195 | @ |
190 | disable_irq_notrace | 196 | disable_irq_notrace |
191 | 197 | ||
192 | @ | 198 | #ifdef CONFIG_TRACE_IRQFLAGS |
193 | @ restore SPSR and restart the instruction | 199 | tst r5, #PSR_I_BIT |
194 | @ | 200 | bleq trace_hardirqs_on |
195 | ldr r2, [sp, #S_PSR] | 201 | tst r5, #PSR_I_BIT |
196 | svc_exit r2 @ return from exception | 202 | blne trace_hardirqs_off |
203 | #endif | ||
204 | svc_exit r5 @ return from exception | ||
197 | UNWIND(.fnend ) | 205 | UNWIND(.fnend ) |
198 | ENDPROC(__dabt_svc) | 206 | ENDPROC(__dabt_svc) |
199 | 207 | ||
200 | .align 5 | 208 | .align 5 |
201 | __irq_svc: | 209 | __irq_svc: |
202 | svc_entry | 210 | svc_entry |
211 | irq_handler | ||
203 | 212 | ||
204 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
205 | bl trace_hardirqs_off | ||
206 | #endif | ||
207 | #ifdef CONFIG_PREEMPT | 213 | #ifdef CONFIG_PREEMPT |
208 | get_thread_info tsk | 214 | get_thread_info tsk |
209 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | 215 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
210 | add r7, r8, #1 @ increment it | ||
211 | str r7, [tsk, #TI_PREEMPT] | ||
212 | #endif | ||
213 | |||
214 | irq_handler | ||
215 | #ifdef CONFIG_PREEMPT | ||
216 | str r8, [tsk, #TI_PREEMPT] @ restore preempt count | ||
217 | ldr r0, [tsk, #TI_FLAGS] @ get flags | 216 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
218 | teq r8, #0 @ if preempt count != 0 | 217 | teq r8, #0 @ if preempt count != 0 |
219 | movne r0, #0 @ force flags to 0 | 218 | movne r0, #0 @ force flags to 0 |
220 | tst r0, #_TIF_NEED_RESCHED | 219 | tst r0, #_TIF_NEED_RESCHED |
221 | blne svc_preempt | 220 | blne svc_preempt |
222 | #endif | 221 | #endif |
223 | ldr r4, [sp, #S_PSR] @ irqs are already disabled | 222 | |
224 | #ifdef CONFIG_TRACE_IRQFLAGS | 223 | #ifdef CONFIG_TRACE_IRQFLAGS |
225 | tst r4, #PSR_I_BIT | 224 | @ The parent context IRQs must have been enabled to get here in |
226 | bleq trace_hardirqs_on | 225 | @ the first place, so there's no point checking the PSR I bit. |
226 | bl trace_hardirqs_on | ||
227 | #endif | 227 | #endif |
228 | svc_exit r4 @ return from exception | 228 | svc_exit r5 @ return from exception |
229 | UNWIND(.fnend ) | 229 | UNWIND(.fnend ) |
230 | ENDPROC(__irq_svc) | 230 | ENDPROC(__irq_svc) |
231 | 231 | ||
@@ -251,7 +251,6 @@ __und_svc: | |||
251 | #else | 251 | #else |
252 | svc_entry | 252 | svc_entry |
253 | #endif | 253 | #endif |
254 | |||
255 | @ | 254 | @ |
256 | @ call emulation code, which returns using r9 if it has emulated | 255 | @ call emulation code, which returns using r9 if it has emulated |
257 | @ the instruction, or the more conventional lr if we are to treat | 256 | @ the instruction, or the more conventional lr if we are to treat |
@@ -260,15 +259,16 @@ __und_svc: | |||
260 | @ r0 - instruction | 259 | @ r0 - instruction |
261 | @ | 260 | @ |
262 | #ifndef CONFIG_THUMB2_KERNEL | 261 | #ifndef CONFIG_THUMB2_KERNEL |
263 | ldr r0, [r2, #-4] | 262 | ldr r0, [r4, #-4] |
264 | #else | 263 | #else |
265 | ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 | 264 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
266 | and r9, r0, #0xf800 | 265 | and r9, r0, #0xf800 |
267 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 | 266 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 |
268 | ldrhhs r9, [r2] @ bottom 16 bits | 267 | ldrhhs r9, [r4] @ bottom 16 bits |
269 | orrhs r0, r9, r0, lsl #16 | 268 | orrhs r0, r9, r0, lsl #16 |
270 | #endif | 269 | #endif |
271 | adr r9, BSYM(1f) | 270 | adr r9, BSYM(1f) |
271 | mov r2, r4 | ||
272 | bl call_fpe | 272 | bl call_fpe |
273 | 273 | ||
274 | mov r0, sp @ struct pt_regs *regs | 274 | mov r0, sp @ struct pt_regs *regs |
@@ -282,45 +282,35 @@ __und_svc: | |||
282 | @ | 282 | @ |
283 | @ restore SPSR and restart the instruction | 283 | @ restore SPSR and restart the instruction |
284 | @ | 284 | @ |
285 | ldr r2, [sp, #S_PSR] @ Get SVC cpsr | 285 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
286 | svc_exit r2 @ return from exception | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | tst r5, #PSR_I_BIT | ||
288 | bleq trace_hardirqs_on | ||
289 | tst r5, #PSR_I_BIT | ||
290 | blne trace_hardirqs_off | ||
291 | #endif | ||
292 | svc_exit r5 @ return from exception | ||
287 | UNWIND(.fnend ) | 293 | UNWIND(.fnend ) |
288 | ENDPROC(__und_svc) | 294 | ENDPROC(__und_svc) |
289 | 295 | ||
290 | .align 5 | 296 | .align 5 |
291 | __pabt_svc: | 297 | __pabt_svc: |
292 | svc_entry | 298 | svc_entry |
293 | |||
294 | @ | ||
295 | @ re-enable interrupts if appropriate | ||
296 | @ | ||
297 | mrs r9, cpsr | ||
298 | tst r3, #PSR_I_BIT | ||
299 | biceq r9, r9, #PSR_I_BIT | ||
300 | |||
301 | mov r0, r2 @ pass address of aborted instruction. | ||
302 | #ifdef MULTI_PABORT | ||
303 | ldr r4, .LCprocfns | ||
304 | mov lr, pc | ||
305 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
306 | #else | ||
307 | bl CPU_PABORT_HANDLER | ||
308 | #endif | ||
309 | debug_entry r1 | ||
310 | msr cpsr_c, r9 @ Maybe enable interrupts | ||
311 | mov r2, sp @ regs | 299 | mov r2, sp @ regs |
312 | bl do_PrefetchAbort @ call abort handler | 300 | pabt_helper |
313 | 301 | ||
314 | @ | 302 | @ |
315 | @ IRQs off again before pulling preserved data off the stack | 303 | @ IRQs off again before pulling preserved data off the stack |
316 | @ | 304 | @ |
317 | disable_irq_notrace | 305 | disable_irq_notrace |
318 | 306 | ||
319 | @ | 307 | #ifdef CONFIG_TRACE_IRQFLAGS |
320 | @ restore SPSR and restart the instruction | 308 | tst r5, #PSR_I_BIT |
321 | @ | 309 | bleq trace_hardirqs_on |
322 | ldr r2, [sp, #S_PSR] | 310 | tst r5, #PSR_I_BIT |
323 | svc_exit r2 @ return from exception | 311 | blne trace_hardirqs_off |
312 | #endif | ||
313 | svc_exit r5 @ return from exception | ||
324 | UNWIND(.fnend ) | 314 | UNWIND(.fnend ) |
325 | ENDPROC(__pabt_svc) | 315 | ENDPROC(__pabt_svc) |
326 | 316 | ||
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc) | |||
351 | ARM( stmib sp, {r1 - r12} ) | 341 | ARM( stmib sp, {r1 - r12} ) |
352 | THUMB( stmia sp, {r0 - r12} ) | 342 | THUMB( stmia sp, {r0 - r12} ) |
353 | 343 | ||
354 | ldmia r0, {r1 - r3} | 344 | ldmia r0, {r3 - r5} |
355 | add r0, sp, #S_PC @ here for interlock avoidance | 345 | add r0, sp, #S_PC @ here for interlock avoidance |
356 | mov r4, #-1 @ "" "" "" "" | 346 | mov r6, #-1 @ "" "" "" "" |
357 | 347 | ||
358 | str r1, [sp] @ save the "real" r0 copied | 348 | str r3, [sp] @ save the "real" r0 copied |
359 | @ from the exception stack | 349 | @ from the exception stack |
360 | 350 | ||
361 | @ | 351 | @ |
362 | @ We are now ready to fill in the remaining blanks on the stack: | 352 | @ We are now ready to fill in the remaining blanks on the stack: |
363 | @ | 353 | @ |
364 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 354 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
365 | @ r3 - spsr_<exception> | 355 | @ r5 - spsr_<exception> |
366 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 356 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
367 | @ | 357 | @ |
368 | @ Also, separately save sp_usr and lr_usr | 358 | @ Also, separately save sp_usr and lr_usr |
369 | @ | 359 | @ |
370 | stmia r0, {r2 - r4} | 360 | stmia r0, {r4 - r6} |
371 | ARM( stmdb r0, {sp, lr}^ ) | 361 | ARM( stmdb r0, {sp, lr}^ ) |
372 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | 362 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) |
373 | 363 | ||
@@ -380,10 +370,14 @@ ENDPROC(__pabt_svc) | |||
380 | @ Clear FP to mark the first stack frame | 370 | @ Clear FP to mark the first stack frame |
381 | @ | 371 | @ |
382 | zero_fp | 372 | zero_fp |
373 | |||
374 | #ifdef CONFIG_IRQSOFF_TRACER | ||
375 | bl trace_hardirqs_off | ||
376 | #endif | ||
383 | .endm | 377 | .endm |
384 | 378 | ||
385 | .macro kuser_cmpxchg_check | 379 | .macro kuser_cmpxchg_check |
386 | #if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 380 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) |
387 | #ifndef CONFIG_MMU | 381 | #ifndef CONFIG_MMU |
388 | #warning "NPTL on non MMU needs fixing" | 382 | #warning "NPTL on non MMU needs fixing" |
389 | #else | 383 | #else |
@@ -391,8 +385,8 @@ ENDPROC(__pabt_svc) | |||
391 | @ if it was interrupted in a critical region. Here we | 385 | @ if it was interrupted in a critical region. Here we |
392 | @ perform a quick test inline since it should be false | 386 | @ perform a quick test inline since it should be false |
393 | @ 99.9999% of the time. The rest is done out of line. | 387 | @ 99.9999% of the time. The rest is done out of line. |
394 | cmp r2, #TASK_SIZE | 388 | cmp r4, #TASK_SIZE |
395 | blhs kuser_cmpxchg_fixup | 389 | blhs kuser_cmpxchg64_fixup |
396 | #endif | 390 | #endif |
397 | #endif | 391 | #endif |
398 | .endm | 392 | .endm |
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc) | |||
401 | __dabt_usr: | 395 | __dabt_usr: |
402 | usr_entry | 396 | usr_entry |
403 | kuser_cmpxchg_check | 397 | kuser_cmpxchg_check |
404 | |||
405 | @ | ||
406 | @ Call the processor-specific abort handler: | ||
407 | @ | ||
408 | @ r2 - aborted context pc | ||
409 | @ r3 - aborted context cpsr | ||
410 | @ | ||
411 | @ The abort handler must return the aborted address in r0, and | ||
412 | @ the fault status register in r1. | ||
413 | @ | ||
414 | #ifdef MULTI_DABORT | ||
415 | ldr r4, .LCprocfns | ||
416 | mov lr, pc | ||
417 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
418 | #else | ||
419 | bl CPU_DABORT_HANDLER | ||
420 | #endif | ||
421 | |||
422 | @ | ||
423 | @ IRQs on, then call the main handler | ||
424 | @ | ||
425 | debug_entry r1 | ||
426 | enable_irq | ||
427 | mov r2, sp | 398 | mov r2, sp |
428 | adr lr, BSYM(ret_from_exception) | 399 | dabt_helper |
429 | b do_DataAbort | 400 | b ret_from_exception |
430 | UNWIND(.fnend ) | 401 | UNWIND(.fnend ) |
431 | ENDPROC(__dabt_usr) | 402 | ENDPROC(__dabt_usr) |
432 | 403 | ||
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr) | |||
434 | __irq_usr: | 405 | __irq_usr: |
435 | usr_entry | 406 | usr_entry |
436 | kuser_cmpxchg_check | 407 | kuser_cmpxchg_check |
437 | |||
438 | #ifdef CONFIG_IRQSOFF_TRACER | ||
439 | bl trace_hardirqs_off | ||
440 | #endif | ||
441 | |||
442 | get_thread_info tsk | ||
443 | #ifdef CONFIG_PREEMPT | ||
444 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | ||
445 | add r7, r8, #1 @ increment it | ||
446 | str r7, [tsk, #TI_PREEMPT] | ||
447 | #endif | ||
448 | |||
449 | irq_handler | 408 | irq_handler |
450 | #ifdef CONFIG_PREEMPT | 409 | get_thread_info tsk |
451 | ldr r0, [tsk, #TI_PREEMPT] | ||
452 | str r8, [tsk, #TI_PREEMPT] | ||
453 | teq r0, r7 | ||
454 | ARM( strne r0, [r0, -r0] ) | ||
455 | THUMB( movne r0, #0 ) | ||
456 | THUMB( strne r0, [r0] ) | ||
457 | #endif | ||
458 | |||
459 | mov why, #0 | 410 | mov why, #0 |
460 | b ret_to_user_from_irq | 411 | b ret_to_user_from_irq |
461 | UNWIND(.fnend ) | 412 | UNWIND(.fnend ) |
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr) | |||
467 | __und_usr: | 418 | __und_usr: |
468 | usr_entry | 419 | usr_entry |
469 | 420 | ||
421 | mov r2, r4 | ||
422 | mov r3, r5 | ||
423 | |||
470 | @ | 424 | @ |
471 | @ fall through to the emulation code, which returns using r9 if | 425 | @ fall through to the emulation code, which returns using r9 if |
472 | @ it has emulated the instruction, or the more conventional lr | 426 | @ it has emulated the instruction, or the more conventional lr |
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown) | |||
682 | .align 5 | 636 | .align 5 |
683 | __pabt_usr: | 637 | __pabt_usr: |
684 | usr_entry | 638 | usr_entry |
685 | |||
686 | mov r0, r2 @ pass address of aborted instruction. | ||
687 | #ifdef MULTI_PABORT | ||
688 | ldr r4, .LCprocfns | ||
689 | mov lr, pc | ||
690 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
691 | #else | ||
692 | bl CPU_PABORT_HANDLER | ||
693 | #endif | ||
694 | debug_entry r1 | ||
695 | enable_irq @ Enable interrupts | ||
696 | mov r2, sp @ regs | 639 | mov r2, sp @ regs |
697 | bl do_PrefetchAbort @ call abort handler | 640 | pabt_helper |
698 | UNWIND(.fnend ) | 641 | UNWIND(.fnend ) |
699 | /* fall through */ | 642 | /* fall through */ |
700 | /* | 643 | /* |
@@ -758,31 +701,12 @@ ENDPROC(__switch_to) | |||
758 | /* | 701 | /* |
759 | * User helpers. | 702 | * User helpers. |
760 | * | 703 | * |
761 | * These are segment of kernel provided user code reachable from user space | ||
762 | * at a fixed address in kernel memory. This is used to provide user space | ||
763 | * with some operations which require kernel help because of unimplemented | ||
764 | * native feature and/or instructions in many ARM CPUs. The idea is for | ||
765 | * this code to be executed directly in user mode for best efficiency but | ||
766 | * which is too intimate with the kernel counter part to be left to user | ||
767 | * libraries. In fact this code might even differ from one CPU to another | ||
768 | * depending on the available instruction set and restrictions like on | ||
769 | * SMP systems. In other words, the kernel reserves the right to change | ||
770 | * this code as needed without warning. Only the entry points and their | ||
771 | * results are guaranteed to be stable. | ||
772 | * | ||
773 | * Each segment is 32-byte aligned and will be moved to the top of the high | 704 | * Each segment is 32-byte aligned and will be moved to the top of the high |
774 | * vector page. New segments (if ever needed) must be added in front of | 705 | * vector page. New segments (if ever needed) must be added in front of |
775 | * existing ones. This mechanism should be used only for things that are | 706 | * existing ones. This mechanism should be used only for things that are |
776 | * really small and justified, and not be abused freely. | 707 | * really small and justified, and not be abused freely. |
777 | * | 708 | * |
778 | * User space is expected to implement those things inline when optimizing | 709 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
779 | * for a processor that has the necessary native support, but only if such | ||
780 | * resulting binaries are already to be incompatible with earlier ARM | ||
781 | * processors due to the use of unsupported instructions other than what | ||
782 | * is provided here. In other words don't make binaries unable to run on | ||
783 | * earlier processors just for the sake of not using these kernel helpers | ||
784 | * if your compiled code is not going to use the new instructions for other | ||
785 | * purpose. | ||
786 | */ | 710 | */ |
787 | THUMB( .arm ) | 711 | THUMB( .arm ) |
788 | 712 | ||
@@ -799,96 +723,103 @@ ENDPROC(__switch_to) | |||
799 | __kuser_helper_start: | 723 | __kuser_helper_start: |
800 | 724 | ||
801 | /* | 725 | /* |
802 | * Reference prototype: | 726 | * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular |
803 | * | 727 | * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point. |
804 | * void __kernel_memory_barrier(void) | ||
805 | * | ||
806 | * Input: | ||
807 | * | ||
808 | * lr = return address | ||
809 | * | ||
810 | * Output: | ||
811 | * | ||
812 | * none | ||
813 | * | ||
814 | * Clobbered: | ||
815 | * | ||
816 | * none | ||
817 | * | ||
818 | * Definition and user space usage example: | ||
819 | * | ||
820 | * typedef void (__kernel_dmb_t)(void); | ||
821 | * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) | ||
822 | * | ||
823 | * Apply any needed memory barrier to preserve consistency with data modified | ||
824 | * manually and __kuser_cmpxchg usage. | ||
825 | * | ||
826 | * This could be used as follows: | ||
827 | * | ||
828 | * #define __kernel_dmb() \ | ||
829 | * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ | ||
830 | * : : : "r0", "lr","cc" ) | ||
831 | */ | 728 | */ |
832 | 729 | ||
833 | __kuser_memory_barrier: @ 0xffff0fa0 | 730 | __kuser_cmpxchg64: @ 0xffff0f60 |
731 | |||
732 | #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
733 | |||
734 | /* | ||
735 | * Poor you. No fast solution possible... | ||
736 | * The kernel itself must perform the operation. | ||
737 | * A special ghost syscall is used for that (see traps.c). | ||
738 | */ | ||
739 | stmfd sp!, {r7, lr} | ||
740 | ldr r7, 1f @ it's 20 bits | ||
741 | swi __ARM_NR_cmpxchg64 | ||
742 | ldmfd sp!, {r7, pc} | ||
743 | 1: .word __ARM_NR_cmpxchg64 | ||
744 | |||
745 | #elif defined(CONFIG_CPU_32v6K) | ||
746 | |||
747 | stmfd sp!, {r4, r5, r6, r7} | ||
748 | ldrd r4, r5, [r0] @ load old val | ||
749 | ldrd r6, r7, [r1] @ load new val | ||
750 | smp_dmb arm | ||
751 | 1: ldrexd r0, r1, [r2] @ load current val | ||
752 | eors r3, r0, r4 @ compare with oldval (1) | ||
753 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
754 | strexdeq r3, r6, r7, [r2] @ store newval if eq | ||
755 | teqeq r3, #1 @ success? | ||
756 | beq 1b @ if no then retry | ||
834 | smp_dmb arm | 757 | smp_dmb arm |
758 | rsbs r0, r3, #0 @ set returned val and C flag | ||
759 | ldmfd sp!, {r4, r5, r6, r7} | ||
760 | bx lr | ||
761 | |||
762 | #elif !defined(CONFIG_SMP) | ||
763 | |||
764 | #ifdef CONFIG_MMU | ||
765 | |||
766 | /* | ||
767 | * The only thing that can break atomicity in this cmpxchg64 | ||
768 | * implementation is either an IRQ or a data abort exception | ||
769 | * causing another process/thread to be scheduled in the middle of | ||
770 | * the critical sequence. The same strategy as for cmpxchg is used. | ||
771 | */ | ||
772 | stmfd sp!, {r4, r5, r6, lr} | ||
773 | ldmia r0, {r4, r5} @ load old val | ||
774 | ldmia r1, {r6, lr} @ load new val | ||
775 | 1: ldmia r2, {r0, r1} @ load current val | ||
776 | eors r3, r0, r4 @ compare with oldval (1) | ||
777 | eoreqs r3, r1, r5 @ compare with oldval (2) | ||
778 | 2: stmeqia r2, {r6, lr} @ store newval if eq | ||
779 | rsbs r0, r3, #0 @ set return val and C flag | ||
780 | ldmfd sp!, {r4, r5, r6, pc} | ||
781 | |||
782 | .text | ||
783 | kuser_cmpxchg64_fixup: | ||
784 | @ Called from kuser_cmpxchg_fixup. | ||
785 | @ r4 = address of interrupted insn (must be preserved). | ||
786 | @ sp = saved regs. r7 and r8 are clobbered. | ||
787 | @ 1b = first critical insn, 2b = last critical insn. | ||
788 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. | ||
789 | mov r7, #0xffff0fff | ||
790 | sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64))) | ||
791 | subs r8, r4, r7 | ||
792 | rsbcss r8, r8, #(2b - 1b) | ||
793 | strcs r7, [sp, #S_PC] | ||
794 | #if __LINUX_ARM_ARCH__ < 6 | ||
795 | bcc kuser_cmpxchg32_fixup | ||
796 | #endif | ||
797 | mov pc, lr | ||
798 | .previous | ||
799 | |||
800 | #else | ||
801 | #warning "NPTL on non MMU needs fixing" | ||
802 | mov r0, #-1 | ||
803 | adds r0, r0, #0 | ||
835 | usr_ret lr | 804 | usr_ret lr |
805 | #endif | ||
806 | |||
807 | #else | ||
808 | #error "incoherent kernel configuration" | ||
809 | #endif | ||
810 | |||
811 | /* pad to next slot */ | ||
812 | .rept (16 - (. - __kuser_cmpxchg64)/4) | ||
813 | .word 0 | ||
814 | .endr | ||
836 | 815 | ||
837 | .align 5 | 816 | .align 5 |
838 | 817 | ||
839 | /* | 818 | __kuser_memory_barrier: @ 0xffff0fa0 |
840 | * Reference prototype: | 819 | smp_dmb arm |
841 | * | 820 | usr_ret lr |
842 | * int __kernel_cmpxchg(int oldval, int newval, int *ptr) | 821 | |
843 | * | 822 | .align 5 |
844 | * Input: | ||
845 | * | ||
846 | * r0 = oldval | ||
847 | * r1 = newval | ||
848 | * r2 = ptr | ||
849 | * lr = return address | ||
850 | * | ||
851 | * Output: | ||
852 | * | ||
853 | * r0 = returned value (zero or non-zero) | ||
854 | * C flag = set if r0 == 0, clear if r0 != 0 | ||
855 | * | ||
856 | * Clobbered: | ||
857 | * | ||
858 | * r3, ip, flags | ||
859 | * | ||
860 | * Definition and user space usage example: | ||
861 | * | ||
862 | * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); | ||
863 | * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) | ||
864 | * | ||
865 | * Atomically store newval in *ptr if *ptr is equal to oldval for user space. | ||
866 | * Return zero if *ptr was changed or non-zero if no exchange happened. | ||
867 | * The C flag is also set if *ptr was changed to allow for assembly | ||
868 | * optimization in the calling code. | ||
869 | * | ||
870 | * Notes: | ||
871 | * | ||
872 | * - This routine already includes memory barriers as needed. | ||
873 | * | ||
874 | * For example, a user space atomic_add implementation could look like this: | ||
875 | * | ||
876 | * #define atomic_add(ptr, val) \ | ||
877 | * ({ register unsigned int *__ptr asm("r2") = (ptr); \ | ||
878 | * register unsigned int __result asm("r1"); \ | ||
879 | * asm volatile ( \ | ||
880 | * "1: @ atomic_add\n\t" \ | ||
881 | * "ldr r0, [r2]\n\t" \ | ||
882 | * "mov r3, #0xffff0fff\n\t" \ | ||
883 | * "add lr, pc, #4\n\t" \ | ||
884 | * "add r1, r0, %2\n\t" \ | ||
885 | * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ | ||
886 | * "bcc 1b" \ | ||
887 | * : "=&r" (__result) \ | ||
888 | * : "r" (__ptr), "rIL" (val) \ | ||
889 | * : "r0","r3","ip","lr","cc","memory" ); \ | ||
890 | * __result; }) | ||
891 | */ | ||
892 | 823 | ||
893 | __kuser_cmpxchg: @ 0xffff0fc0 | 824 | __kuser_cmpxchg: @ 0xffff0fc0 |
894 | 825 | ||
@@ -925,15 +856,15 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
925 | usr_ret lr | 856 | usr_ret lr |
926 | 857 | ||
927 | .text | 858 | .text |
928 | kuser_cmpxchg_fixup: | 859 | kuser_cmpxchg32_fixup: |
929 | @ Called from kuser_cmpxchg_check macro. | 860 | @ Called from kuser_cmpxchg_check macro. |
930 | @ r2 = address of interrupted insn (must be preserved). | 861 | @ r4 = address of interrupted insn (must be preserved). |
931 | @ sp = saved regs. r7 and r8 are clobbered. | 862 | @ sp = saved regs. r7 and r8 are clobbered. |
932 | @ 1b = first critical insn, 2b = last critical insn. | 863 | @ 1b = first critical insn, 2b = last critical insn. |
933 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | 864 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
934 | mov r7, #0xffff0fff | 865 | mov r7, #0xffff0fff |
935 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | 866 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) |
936 | subs r8, r2, r7 | 867 | subs r8, r4, r7 |
937 | rsbcss r8, r8, #(2b - 1b) | 868 | rsbcss r8, r8, #(2b - 1b) |
938 | strcs r7, [sp, #S_PC] | 869 | strcs r7, [sp, #S_PC] |
939 | mov pc, lr | 870 | mov pc, lr |
@@ -963,39 +894,6 @@ kuser_cmpxchg_fixup: | |||
963 | 894 | ||
964 | .align 5 | 895 | .align 5 |
965 | 896 | ||
966 | /* | ||
967 | * Reference prototype: | ||
968 | * | ||
969 | * int __kernel_get_tls(void) | ||
970 | * | ||
971 | * Input: | ||
972 | * | ||
973 | * lr = return address | ||
974 | * | ||
975 | * Output: | ||
976 | * | ||
977 | * r0 = TLS value | ||
978 | * | ||
979 | * Clobbered: | ||
980 | * | ||
981 | * none | ||
982 | * | ||
983 | * Definition and user space usage example: | ||
984 | * | ||
985 | * typedef int (__kernel_get_tls_t)(void); | ||
986 | * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) | ||
987 | * | ||
988 | * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. | ||
989 | * | ||
990 | * This could be used as follows: | ||
991 | * | ||
992 | * #define __kernel_get_tls() \ | ||
993 | * ({ register unsigned int __val asm("r0"); \ | ||
994 | * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ | ||
995 | * : "=r" (__val) : : "lr","cc" ); \ | ||
996 | * __val; }) | ||
997 | */ | ||
998 | |||
999 | __kuser_get_tls: @ 0xffff0fe0 | 897 | __kuser_get_tls: @ 0xffff0fe0 |
1000 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init | 898 | ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init |
1001 | usr_ret lr | 899 | usr_ret lr |
@@ -1004,19 +902,6 @@ __kuser_get_tls: @ 0xffff0fe0 | |||
1004 | .word 0 @ 0xffff0ff0 software TLS value, then | 902 | .word 0 @ 0xffff0ff0 software TLS value, then |
1005 | .endr @ pad up to __kuser_helper_version | 903 | .endr @ pad up to __kuser_helper_version |
1006 | 904 | ||
1007 | /* | ||
1008 | * Reference declaration: | ||
1009 | * | ||
1010 | * extern unsigned int __kernel_helper_version; | ||
1011 | * | ||
1012 | * Definition and user space usage example: | ||
1013 | * | ||
1014 | * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) | ||
1015 | * | ||
1016 | * User space may read this to determine the curent number of helpers | ||
1017 | * available. | ||
1018 | */ | ||
1019 | |||
1020 | __kuser_helper_version: @ 0xffff0ffc | 905 | __kuser_helper_version: @ 0xffff0ffc |
1021 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) | 906 | .word ((__kuser_helper_end - __kuser_helper_start) >> 5) |
1022 | 907 | ||
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 051166c2a932..9a8531eadd3d 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -121,15 +121,13 @@ | |||
121 | .endm | 121 | .endm |
122 | #else /* CONFIG_THUMB2_KERNEL */ | 122 | #else /* CONFIG_THUMB2_KERNEL */ |
123 | .macro svc_exit, rpsr | 123 | .macro svc_exit, rpsr |
124 | ldr lr, [sp, #S_SP] @ top of the stack | ||
125 | ldrd r0, r1, [sp, #S_LR] @ calling lr and pc | ||
124 | clrex @ clear the exclusive monitor | 126 | clrex @ clear the exclusive monitor |
125 | ldr r0, [sp, #S_SP] @ top of the stack | 127 | stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context |
126 | ldr r1, [sp, #S_PC] @ return address | ||
127 | tst r0, #4 @ orig stack 8-byte aligned? | ||
128 | stmdb r0, {r1, \rpsr} @ rfe context | ||
129 | ldmia sp, {r0 - r12} | 128 | ldmia sp, {r0 - r12} |
130 | ldr lr, [sp, #S_LR] | 129 | mov sp, lr |
131 | addeq sp, sp, #S_FRAME_SIZE - 8 @ aligned | 130 | ldr lr, [sp], #4 |
132 | addne sp, sp, #S_FRAME_SIZE - 4 @ not aligned | ||
133 | rfeia sp! | 131 | rfeia sp! |
134 | .endm | 132 | .endm |
135 | 133 | ||
@@ -165,25 +163,6 @@ | |||
165 | .endm | 163 | .endm |
166 | #endif /* !CONFIG_THUMB2_KERNEL */ | 164 | #endif /* !CONFIG_THUMB2_KERNEL */ |
167 | 165 | ||
168 | @ | ||
169 | @ Debug exceptions are taken as prefetch or data aborts. | ||
170 | @ We must disable preemption during the handler so that | ||
171 | @ we can access the debug registers safely. | ||
172 | @ | ||
173 | .macro debug_entry, fsr | ||
174 | #if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) | ||
175 | ldr r4, =0x40f @ mask out fsr.fs | ||
176 | and r5, r4, \fsr | ||
177 | cmp r5, #2 @ debug exception | ||
178 | bne 1f | ||
179 | get_thread_info r10 | ||
180 | ldr r6, [r10, #TI_PREEMPT] @ get preempt count | ||
181 | add r11, r6, #1 @ increment it | ||
182 | str r11, [r10, #TI_PREEMPT] | ||
183 | 1: | ||
184 | #endif | ||
185 | .endm | ||
186 | |||
187 | /* | 166 | /* |
188 | * These are the registers used in the syscall handler, and allow us to | 167 | * These are the registers used in the syscall handler, and allow us to |
189 | * have in theory up to 7 arguments to a function - r0 to r6. | 168 | * have in theory up to 7 arguments to a function - r0 to r6. |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b1e0ad9ec3b..d46f25968bec 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -32,8 +32,16 @@ | |||
32 | * numbers for r1. | 32 | * numbers for r1. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | .arm | ||
36 | |||
35 | __HEAD | 37 | __HEAD |
36 | ENTRY(stext) | 38 | ENTRY(stext) |
39 | |||
40 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. | ||
41 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, | ||
42 | THUMB( .thumb ) @ switch to Thumb now. | ||
43 | THUMB(1: ) | ||
44 | |||
37 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 45 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
38 | @ and irqs disabled | 46 | @ and irqs disabled |
39 | #ifndef CONFIG_CPU_CP15 | 47 | #ifndef CONFIG_CPU_CP15 |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 278c1b0ebb2e..742b6108a001 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -71,8 +71,16 @@ | |||
71 | * crap here - that's what the boot loader (or in extreme, well justified | 71 | * crap here - that's what the boot loader (or in extreme, well justified |
72 | * circumstances, zImage) is for. | 72 | * circumstances, zImage) is for. |
73 | */ | 73 | */ |
74 | .arm | ||
75 | |||
74 | __HEAD | 76 | __HEAD |
75 | ENTRY(stext) | 77 | ENTRY(stext) |
78 | |||
79 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. | ||
80 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, | ||
81 | THUMB( .thumb ) @ switch to Thumb now. | ||
82 | THUMB(1: ) | ||
83 | |||
76 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 84 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
77 | @ and irqs disabled | 85 | @ and irqs disabled |
78 | mrc p15, 0, r9, c0, c0 @ get processor id | 86 | mrc p15, 0, r9, c0, c0 @ get processor id |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 87acc25d7a3e..a927ca1f5566 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -796,7 +796,7 @@ unlock: | |||
796 | 796 | ||
797 | /* | 797 | /* |
798 | * Called from either the Data Abort Handler [watchpoint] or the | 798 | * Called from either the Data Abort Handler [watchpoint] or the |
799 | * Prefetch Abort Handler [breakpoint] with preemption disabled. | 799 | * Prefetch Abort Handler [breakpoint] with interrupts disabled. |
800 | */ | 800 | */ |
801 | static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | 801 | static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, |
802 | struct pt_regs *regs) | 802 | struct pt_regs *regs) |
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
804 | int ret = 0; | 804 | int ret = 0; |
805 | u32 dscr; | 805 | u32 dscr; |
806 | 806 | ||
807 | /* We must be called with preemption disabled. */ | 807 | preempt_disable(); |
808 | WARN_ON(preemptible()); | 808 | |
809 | if (interrupts_enabled(regs)) | ||
810 | local_irq_enable(); | ||
809 | 811 | ||
810 | /* We only handle watchpoints and hardware breakpoints. */ | 812 | /* We only handle watchpoints and hardware breakpoints. */ |
811 | ARM_DBG_READ(c1, 0, dscr); | 813 | ARM_DBG_READ(c1, 0, dscr); |
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
824 | ret = 1; /* Unhandled fault. */ | 826 | ret = 1; /* Unhandled fault. */ |
825 | } | 827 | } |
826 | 828 | ||
827 | /* | ||
828 | * Re-enable preemption after it was disabled in the | ||
829 | * low-level exception handling code. | ||
830 | */ | ||
831 | preempt_enable(); | 829 | preempt_enable(); |
832 | 830 | ||
833 | return ret; | 831 | return ret; |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 83bbad03fcc6..0f928a131af8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void) | |||
131 | 131 | ||
132 | #ifdef CONFIG_HOTPLUG_CPU | 132 | #ifdef CONFIG_HOTPLUG_CPU |
133 | 133 | ||
134 | static bool migrate_one_irq(struct irq_data *d) | 134 | static bool migrate_one_irq(struct irq_desc *desc) |
135 | { | 135 | { |
136 | unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); | 136 | struct irq_data *d = irq_desc_get_irq_data(desc); |
137 | const struct cpumask *affinity = d->affinity; | ||
138 | struct irq_chip *c; | ||
137 | bool ret = false; | 139 | bool ret = false; |
138 | 140 | ||
139 | if (cpu >= nr_cpu_ids) { | 141 | /* |
140 | cpu = cpumask_any(cpu_online_mask); | 142 | * If this is a per-CPU interrupt, or the affinity does not |
143 | * include this CPU, then we have nothing to do. | ||
144 | */ | ||
145 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
146 | return false; | ||
147 | |||
148 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
149 | affinity = cpu_online_mask; | ||
141 | ret = true; | 150 | ret = true; |
142 | } | 151 | } |
143 | 152 | ||
144 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); | 153 | c = irq_data_get_irq_chip(d); |
145 | 154 | if (c->irq_set_affinity) | |
146 | d->chip->irq_set_affinity(d, cpumask_of(cpu), true); | 155 | c->irq_set_affinity(d, affinity, true); |
156 | else | ||
157 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
147 | 158 | ||
148 | return ret; | 159 | return ret; |
149 | } | 160 | } |
150 | 161 | ||
151 | /* | 162 | /* |
152 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | 163 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
153 | * the affinity settings do not allow other CPUs, force them onto any | 164 | * If the affinity settings do not allow other CPUs, force them onto any |
154 | * available CPU. | 165 | * available CPU. |
166 | * | ||
167 | * Note: we must iterate over all IRQs, whether they have an attached | ||
168 | * action structure or not, as we need to get chained interrupts too. | ||
155 | */ | 169 | */ |
156 | void migrate_irqs(void) | 170 | void migrate_irqs(void) |
157 | { | 171 | { |
158 | unsigned int i, cpu = smp_processor_id(); | 172 | unsigned int i; |
159 | struct irq_desc *desc; | 173 | struct irq_desc *desc; |
160 | unsigned long flags; | 174 | unsigned long flags; |
161 | 175 | ||
162 | local_irq_save(flags); | 176 | local_irq_save(flags); |
163 | 177 | ||
164 | for_each_irq_desc(i, desc) { | 178 | for_each_irq_desc(i, desc) { |
165 | struct irq_data *d = &desc->irq_data; | ||
166 | bool affinity_broken = false; | 179 | bool affinity_broken = false; |
167 | 180 | ||
168 | raw_spin_lock(&desc->lock); | 181 | if (!desc) |
169 | do { | 182 | continue; |
170 | if (desc->action == NULL) | ||
171 | break; | ||
172 | |||
173 | if (d->node != cpu) | ||
174 | break; | ||
175 | 183 | ||
176 | affinity_broken = migrate_one_irq(d); | 184 | raw_spin_lock(&desc->lock); |
177 | } while (0); | 185 | affinity_broken = migrate_one_irq(desc); |
178 | raw_spin_unlock(&desc->lock); | 186 | raw_spin_unlock(&desc->lock); |
179 | 187 | ||
180 | if (affinity_broken && printk_ratelimit()) | 188 | if (affinity_broken && printk_ratelimit()) |
181 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); | 189 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
190 | smp_processor_id()); | ||
182 | } | 191 | } |
183 | 192 | ||
184 | local_irq_restore(flags); | 193 | local_irq_restore(flags); |
diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c new file mode 100644 index 000000000000..79203ee1d039 --- /dev/null +++ b/arch/arm/kernel/kprobes-arm.c | |||
@@ -0,0 +1,999 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes-decode.c | ||
3 | * | ||
4 | * Copyright (C) 2006, 2007 Motorola Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * We do not have hardware single-stepping on ARM, This | ||
18 | * effort is further complicated by the ARM not having a | ||
19 | * "next PC" register. Instructions that change the PC | ||
20 | * can't be safely single-stepped in a MP environment, so | ||
21 | * we have a lot of work to do: | ||
22 | * | ||
23 | * In the prepare phase: | ||
24 | * *) If it is an instruction that does anything | ||
25 | * with the CPU mode, we reject it for a kprobe. | ||
26 | * (This is out of laziness rather than need. The | ||
27 | * instructions could be simulated.) | ||
28 | * | ||
29 | * *) Otherwise, decode the instruction rewriting its | ||
30 | * registers to take fixed, ordered registers and | ||
31 | * setting a handler for it to run the instruction. | ||
32 | * | ||
33 | * In the execution phase by an instruction's handler: | ||
34 | * | ||
35 | * *) If the PC is written to by the instruction, the | ||
36 | * instruction must be fully simulated in software. | ||
37 | * | ||
38 | * *) Otherwise, a modified form of the instruction is | ||
39 | * directly executed. Its handler calls the | ||
40 | * instruction in insn[0]. In insn[1] is a | ||
41 | * "mov pc, lr" to return. | ||
42 | * | ||
43 | * Before calling, load up the reordered registers | ||
44 | * from the original instruction's registers. If one | ||
45 | * of the original input registers is the PC, compute | ||
46 | * and adjust the appropriate input register. | ||
47 | * | ||
48 | * After call completes, copy the output registers to | ||
49 | * the original instruction's original registers. | ||
50 | * | ||
51 | * We don't use a real breakpoint instruction since that | ||
52 | * would have us in the kernel go from SVC mode to SVC | ||
53 | * mode losing the link register. Instead we use an | ||
54 | * undefined instruction. To simplify processing, the | ||
55 | * undefined instruction used for kprobes must be reserved | ||
56 | * exclusively for kprobes use. | ||
57 | * | ||
58 | * TODO: ifdef out some instruction decoding based on architecture. | ||
59 | */ | ||
60 | |||
61 | #include <linux/kernel.h> | ||
62 | #include <linux/kprobes.h> | ||
63 | |||
64 | #include "kprobes.h" | ||
65 | |||
66 | #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) | ||
67 | |||
68 | #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) | ||
69 | |||
70 | #if __LINUX_ARM_ARCH__ >= 6 | ||
71 | #define BLX(reg) "blx "reg" \n\t" | ||
72 | #else | ||
73 | #define BLX(reg) "mov lr, pc \n\t" \ | ||
74 | "mov pc, "reg" \n\t" | ||
75 | #endif | ||
76 | |||
77 | /* | ||
78 | * To avoid the complications of mimicing single-stepping on a | ||
79 | * processor without a Next-PC or a single-step mode, and to | ||
80 | * avoid having to deal with the side-effects of boosting, we | ||
81 | * simulate or emulate (almost) all ARM instructions. | ||
82 | * | ||
83 | * "Simulation" is where the instruction's behavior is duplicated in | ||
84 | * C code. "Emulation" is where the original instruction is rewritten | ||
85 | * and executed, often by altering its registers. | ||
86 | * | ||
87 | * By having all behavior of the kprobe'd instruction completed before | ||
88 | * returning from the kprobe_handler(), all locks (scheduler and | ||
89 | * interrupt) can safely be released. There is no need for secondary | ||
90 | * breakpoints, no race with MP or preemptable kernels, nor having to | ||
91 | * clean up resources counts at a later time impacting overall system | ||
92 | * performance. By rewriting the instruction, only the minimum registers | ||
93 | * need to be loaded and saved back optimizing performance. | ||
94 | * | ||
95 | * Calling the insnslot_*_rwflags version of a function doesn't hurt | ||
96 | * anything even when the CPSR flags aren't updated by the | ||
97 | * instruction. It's just a little slower in return for saving | ||
98 | * a little space by not having a duplicate function that doesn't | ||
99 | * update the flags. (The same optimization can be said for | ||
100 | * instructions that do or don't perform register writeback) | ||
101 | * Also, instructions can either read the flags, only write the | ||
102 | * flags, or read and write the flags. To save combinations | ||
103 | * rather than for sheer performance, flag functions just assume | ||
104 | * read and write of flags. | ||
105 | */ | ||
106 | |||
107 | static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) | ||
108 | { | ||
109 | kprobe_opcode_t insn = p->opcode; | ||
110 | long iaddr = (long)p->addr; | ||
111 | int disp = branch_displacement(insn); | ||
112 | |||
113 | if (insn & (1 << 24)) | ||
114 | regs->ARM_lr = iaddr + 4; | ||
115 | |||
116 | regs->ARM_pc = iaddr + 8 + disp; | ||
117 | } | ||
118 | |||
119 | static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) | ||
120 | { | ||
121 | kprobe_opcode_t insn = p->opcode; | ||
122 | long iaddr = (long)p->addr; | ||
123 | int disp = branch_displacement(insn); | ||
124 | |||
125 | regs->ARM_lr = iaddr + 4; | ||
126 | regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); | ||
127 | regs->ARM_cpsr |= PSR_T_BIT; | ||
128 | } | ||
129 | |||
130 | static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) | ||
131 | { | ||
132 | kprobe_opcode_t insn = p->opcode; | ||
133 | int rm = insn & 0xf; | ||
134 | long rmv = regs->uregs[rm]; | ||
135 | |||
136 | if (insn & (1 << 5)) | ||
137 | regs->ARM_lr = (long)p->addr + 4; | ||
138 | |||
139 | regs->ARM_pc = rmv & ~0x1; | ||
140 | regs->ARM_cpsr &= ~PSR_T_BIT; | ||
141 | if (rmv & 0x1) | ||
142 | regs->ARM_cpsr |= PSR_T_BIT; | ||
143 | } | ||
144 | |||
145 | static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) | ||
146 | { | ||
147 | kprobe_opcode_t insn = p->opcode; | ||
148 | int rd = (insn >> 12) & 0xf; | ||
149 | unsigned long mask = 0xf8ff03df; /* Mask out execution state */ | ||
150 | regs->uregs[rd] = regs->ARM_cpsr & mask; | ||
151 | } | ||
152 | |||
153 | static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) | ||
154 | { | ||
155 | regs->uregs[12] = regs->uregs[13]; | ||
156 | } | ||
157 | |||
158 | static void __kprobes | ||
159 | emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs) | ||
160 | { | ||
161 | kprobe_opcode_t insn = p->opcode; | ||
162 | unsigned long pc = (unsigned long)p->addr + 8; | ||
163 | int rt = (insn >> 12) & 0xf; | ||
164 | int rn = (insn >> 16) & 0xf; | ||
165 | int rm = insn & 0xf; | ||
166 | |||
167 | register unsigned long rtv asm("r0") = regs->uregs[rt]; | ||
168 | register unsigned long rt2v asm("r1") = regs->uregs[rt+1]; | ||
169 | register unsigned long rnv asm("r2") = (rn == 15) ? pc | ||
170 | : regs->uregs[rn]; | ||
171 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
172 | |||
173 | __asm__ __volatile__ ( | ||
174 | BLX("%[fn]") | ||
175 | : "=r" (rtv), "=r" (rt2v), "=r" (rnv) | ||
176 | : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv), | ||
177 | [fn] "r" (p->ainsn.insn_fn) | ||
178 | : "lr", "memory", "cc" | ||
179 | ); | ||
180 | |||
181 | regs->uregs[rt] = rtv; | ||
182 | regs->uregs[rt+1] = rt2v; | ||
183 | if (is_writeback(insn)) | ||
184 | regs->uregs[rn] = rnv; | ||
185 | } | ||
186 | |||
187 | static void __kprobes | ||
188 | emulate_ldr(struct kprobe *p, struct pt_regs *regs) | ||
189 | { | ||
190 | kprobe_opcode_t insn = p->opcode; | ||
191 | unsigned long pc = (unsigned long)p->addr + 8; | ||
192 | int rt = (insn >> 12) & 0xf; | ||
193 | int rn = (insn >> 16) & 0xf; | ||
194 | int rm = insn & 0xf; | ||
195 | |||
196 | register unsigned long rtv asm("r0"); | ||
197 | register unsigned long rnv asm("r2") = (rn == 15) ? pc | ||
198 | : regs->uregs[rn]; | ||
199 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
200 | |||
201 | __asm__ __volatile__ ( | ||
202 | BLX("%[fn]") | ||
203 | : "=r" (rtv), "=r" (rnv) | ||
204 | : "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) | ||
205 | : "lr", "memory", "cc" | ||
206 | ); | ||
207 | |||
208 | if (rt == 15) | ||
209 | load_write_pc(rtv, regs); | ||
210 | else | ||
211 | regs->uregs[rt] = rtv; | ||
212 | |||
213 | if (is_writeback(insn)) | ||
214 | regs->uregs[rn] = rnv; | ||
215 | } | ||
216 | |||
217 | static void __kprobes | ||
218 | emulate_str(struct kprobe *p, struct pt_regs *regs) | ||
219 | { | ||
220 | kprobe_opcode_t insn = p->opcode; | ||
221 | unsigned long rtpc = (unsigned long)p->addr + str_pc_offset; | ||
222 | unsigned long rnpc = (unsigned long)p->addr + 8; | ||
223 | int rt = (insn >> 12) & 0xf; | ||
224 | int rn = (insn >> 16) & 0xf; | ||
225 | int rm = insn & 0xf; | ||
226 | |||
227 | register unsigned long rtv asm("r0") = (rt == 15) ? rtpc | ||
228 | : regs->uregs[rt]; | ||
229 | register unsigned long rnv asm("r2") = (rn == 15) ? rnpc | ||
230 | : regs->uregs[rn]; | ||
231 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
232 | |||
233 | __asm__ __volatile__ ( | ||
234 | BLX("%[fn]") | ||
235 | : "=r" (rnv) | ||
236 | : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) | ||
237 | : "lr", "memory", "cc" | ||
238 | ); | ||
239 | |||
240 | if (is_writeback(insn)) | ||
241 | regs->uregs[rn] = rnv; | ||
242 | } | ||
243 | |||
244 | static void __kprobes | ||
245 | emulate_rd12rn16rm0rs8_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
246 | { | ||
247 | kprobe_opcode_t insn = p->opcode; | ||
248 | unsigned long pc = (unsigned long)p->addr + 8; | ||
249 | int rd = (insn >> 12) & 0xf; | ||
250 | int rn = (insn >> 16) & 0xf; | ||
251 | int rm = insn & 0xf; | ||
252 | int rs = (insn >> 8) & 0xf; | ||
253 | |||
254 | register unsigned long rdv asm("r0") = regs->uregs[rd]; | ||
255 | register unsigned long rnv asm("r2") = (rn == 15) ? pc | ||
256 | : regs->uregs[rn]; | ||
257 | register unsigned long rmv asm("r3") = (rm == 15) ? pc | ||
258 | : regs->uregs[rm]; | ||
259 | register unsigned long rsv asm("r1") = regs->uregs[rs]; | ||
260 | unsigned long cpsr = regs->ARM_cpsr; | ||
261 | |||
262 | __asm__ __volatile__ ( | ||
263 | "msr cpsr_fs, %[cpsr] \n\t" | ||
264 | BLX("%[fn]") | ||
265 | "mrs %[cpsr], cpsr \n\t" | ||
266 | : "=r" (rdv), [cpsr] "=r" (cpsr) | ||
267 | : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), | ||
268 | "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
269 | : "lr", "memory", "cc" | ||
270 | ); | ||
271 | |||
272 | if (rd == 15) | ||
273 | alu_write_pc(rdv, regs); | ||
274 | else | ||
275 | regs->uregs[rd] = rdv; | ||
276 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
277 | } | ||
278 | |||
279 | static void __kprobes | ||
280 | emulate_rd12rn16rm0_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) | ||
281 | { | ||
282 | kprobe_opcode_t insn = p->opcode; | ||
283 | int rd = (insn >> 12) & 0xf; | ||
284 | int rn = (insn >> 16) & 0xf; | ||
285 | int rm = insn & 0xf; | ||
286 | |||
287 | register unsigned long rdv asm("r0") = regs->uregs[rd]; | ||
288 | register unsigned long rnv asm("r2") = regs->uregs[rn]; | ||
289 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
290 | unsigned long cpsr = regs->ARM_cpsr; | ||
291 | |||
292 | __asm__ __volatile__ ( | ||
293 | "msr cpsr_fs, %[cpsr] \n\t" | ||
294 | BLX("%[fn]") | ||
295 | "mrs %[cpsr], cpsr \n\t" | ||
296 | : "=r" (rdv), [cpsr] "=r" (cpsr) | ||
297 | : "0" (rdv), "r" (rnv), "r" (rmv), | ||
298 | "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
299 | : "lr", "memory", "cc" | ||
300 | ); | ||
301 | |||
302 | regs->uregs[rd] = rdv; | ||
303 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
304 | } | ||
305 | |||
306 | static void __kprobes | ||
307 | emulate_rd16rn12rm0rs8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) | ||
308 | { | ||
309 | kprobe_opcode_t insn = p->opcode; | ||
310 | int rd = (insn >> 16) & 0xf; | ||
311 | int rn = (insn >> 12) & 0xf; | ||
312 | int rm = insn & 0xf; | ||
313 | int rs = (insn >> 8) & 0xf; | ||
314 | |||
315 | register unsigned long rdv asm("r2") = regs->uregs[rd]; | ||
316 | register unsigned long rnv asm("r0") = regs->uregs[rn]; | ||
317 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
318 | register unsigned long rsv asm("r1") = regs->uregs[rs]; | ||
319 | unsigned long cpsr = regs->ARM_cpsr; | ||
320 | |||
321 | __asm__ __volatile__ ( | ||
322 | "msr cpsr_fs, %[cpsr] \n\t" | ||
323 | BLX("%[fn]") | ||
324 | "mrs %[cpsr], cpsr \n\t" | ||
325 | : "=r" (rdv), [cpsr] "=r" (cpsr) | ||
326 | : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv), | ||
327 | "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
328 | : "lr", "memory", "cc" | ||
329 | ); | ||
330 | |||
331 | regs->uregs[rd] = rdv; | ||
332 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
333 | } | ||
334 | |||
335 | static void __kprobes | ||
336 | emulate_rd12rm0_noflags_nopc(struct kprobe *p, struct pt_regs *regs) | ||
337 | { | ||
338 | kprobe_opcode_t insn = p->opcode; | ||
339 | int rd = (insn >> 12) & 0xf; | ||
340 | int rm = insn & 0xf; | ||
341 | |||
342 | register unsigned long rdv asm("r0") = regs->uregs[rd]; | ||
343 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
344 | |||
345 | __asm__ __volatile__ ( | ||
346 | BLX("%[fn]") | ||
347 | : "=r" (rdv) | ||
348 | : "0" (rdv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) | ||
349 | : "lr", "memory", "cc" | ||
350 | ); | ||
351 | |||
352 | regs->uregs[rd] = rdv; | ||
353 | } | ||
354 | |||
355 | static void __kprobes | ||
356 | emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(struct kprobe *p, struct pt_regs *regs) | ||
357 | { | ||
358 | kprobe_opcode_t insn = p->opcode; | ||
359 | int rdlo = (insn >> 12) & 0xf; | ||
360 | int rdhi = (insn >> 16) & 0xf; | ||
361 | int rn = insn & 0xf; | ||
362 | int rm = (insn >> 8) & 0xf; | ||
363 | |||
364 | register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; | ||
365 | register unsigned long rdhiv asm("r2") = regs->uregs[rdhi]; | ||
366 | register unsigned long rnv asm("r3") = regs->uregs[rn]; | ||
367 | register unsigned long rmv asm("r1") = regs->uregs[rm]; | ||
368 | unsigned long cpsr = regs->ARM_cpsr; | ||
369 | |||
370 | __asm__ __volatile__ ( | ||
371 | "msr cpsr_fs, %[cpsr] \n\t" | ||
372 | BLX("%[fn]") | ||
373 | "mrs %[cpsr], cpsr \n\t" | ||
374 | : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr) | ||
375 | : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), | ||
376 | "2" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
377 | : "lr", "memory", "cc" | ||
378 | ); | ||
379 | |||
380 | regs->uregs[rdlo] = rdlov; | ||
381 | regs->uregs[rdhi] = rdhiv; | ||
382 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
383 | } | ||
384 | |||
385 | /* | ||
386 | * For the instruction masking and comparisons in all the "space_*" | ||
387 | * functions below, Do _not_ rearrange the order of tests unless | ||
388 | * you're very, very sure of what you are doing. For the sake of | ||
389 | * efficiency, the masks for some tests sometimes assume other test | ||
390 | * have been done prior to them so the number of patterns to test | ||
391 | * for an instruction set can be as broad as possible to reduce the | ||
392 | * number of tests needed. | ||
393 | */ | ||
394 | |||
395 | static const union decode_item arm_1111_table[] = { | ||
396 | /* Unconditional instructions */ | ||
397 | |||
398 | /* memory hint 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx */ | ||
399 | /* PLDI (immediate) 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx */ | ||
400 | /* PLDW (immediate) 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx */ | ||
401 | /* PLD (immediate) 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx */ | ||
402 | DECODE_SIMULATE (0xfe300000, 0xf4100000, kprobe_simulate_nop), | ||
403 | |||
404 | /* memory hint 1111 0110 x001 xxxx xxxx xxxx xxx0 xxxx */ | ||
405 | /* PLDI (register) 1111 0110 x101 xxxx xxxx xxxx xxx0 xxxx */ | ||
406 | /* PLDW (register) 1111 0111 x001 xxxx xxxx xxxx xxx0 xxxx */ | ||
407 | /* PLD (register) 1111 0111 x101 xxxx xxxx xxxx xxx0 xxxx */ | ||
408 | DECODE_SIMULATE (0xfe300010, 0xf6100000, kprobe_simulate_nop), | ||
409 | |||
410 | /* BLX (immediate) 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
411 | DECODE_SIMULATE (0xfe000000, 0xfa000000, simulate_blx1), | ||
412 | |||
413 | /* CPS 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ | ||
414 | /* SETEND 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ | ||
415 | /* SRS 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
416 | /* RFE 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
417 | |||
418 | /* Coprocessor instructions... */ | ||
419 | /* MCRR2 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx */ | ||
420 | /* MRRC2 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx */ | ||
421 | /* LDC2 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ | ||
422 | /* STC2 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ | ||
423 | /* CDP2 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ | ||
424 | /* MCR2 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ | ||
425 | /* MRC2 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ | ||
426 | |||
427 | /* Other unallocated instructions... */ | ||
428 | DECODE_END | ||
429 | }; | ||
430 | |||
431 | static const union decode_item arm_cccc_0001_0xx0____0xxx_table[] = { | ||
432 | /* Miscellaneous instructions */ | ||
433 | |||
434 | /* MRS cpsr cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ | ||
435 | DECODE_SIMULATEX(0x0ff000f0, 0x01000000, simulate_mrs, | ||
436 | REGS(0, NOPC, 0, 0, 0)), | ||
437 | |||
438 | /* BX cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ | ||
439 | DECODE_SIMULATE (0x0ff000f0, 0x01200010, simulate_blx2bx), | ||
440 | |||
441 | /* BLX (register) cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ | ||
442 | DECODE_SIMULATEX(0x0ff000f0, 0x01200030, simulate_blx2bx, | ||
443 | REGS(0, 0, 0, 0, NOPC)), | ||
444 | |||
445 | /* CLZ cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ | ||
446 | DECODE_EMULATEX (0x0ff000f0, 0x01600010, emulate_rd12rm0_noflags_nopc, | ||
447 | REGS(0, NOPC, 0, 0, NOPC)), | ||
448 | |||
449 | /* QADD cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx */ | ||
450 | /* QSUB cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx */ | ||
451 | /* QDADD cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx */ | ||
452 | /* QDSUB cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx */ | ||
453 | DECODE_EMULATEX (0x0f9000f0, 0x01000050, emulate_rd12rn16rm0_rwflags_nopc, | ||
454 | REGS(NOPC, NOPC, 0, 0, NOPC)), | ||
455 | |||
456 | /* BXJ cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ | ||
457 | /* MSR cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ | ||
458 | /* MRS spsr cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ | ||
459 | /* BKPT 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ | ||
460 | /* SMC cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ | ||
461 | /* And unallocated instructions... */ | ||
462 | DECODE_END | ||
463 | }; | ||
464 | |||
465 | static const union decode_item arm_cccc_0001_0xx0____1xx0_table[] = { | ||
466 | /* Halfword multiply and multiply-accumulate */ | ||
467 | |||
468 | /* SMLALxy cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ | ||
469 | DECODE_EMULATEX (0x0ff00090, 0x01400080, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, | ||
470 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
471 | |||
472 | /* SMULWy cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ | ||
473 | DECODE_OR (0x0ff000b0, 0x012000a0), | ||
474 | /* SMULxy cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ | ||
475 | DECODE_EMULATEX (0x0ff00090, 0x01600080, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
476 | REGS(NOPC, 0, NOPC, 0, NOPC)), | ||
477 | |||
478 | /* SMLAxy cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx */ | ||
479 | DECODE_OR (0x0ff00090, 0x01000080), | ||
480 | /* SMLAWy cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx */ | ||
481 | DECODE_EMULATEX (0x0ff000b0, 0x01200080, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
482 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
483 | |||
484 | DECODE_END | ||
485 | }; | ||
486 | |||
487 | static const union decode_item arm_cccc_0000_____1001_table[] = { | ||
488 | /* Multiply and multiply-accumulate */ | ||
489 | |||
490 | /* MUL cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx */ | ||
491 | /* MULS cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx */ | ||
492 | DECODE_EMULATEX (0x0fe000f0, 0x00000090, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
493 | REGS(NOPC, 0, NOPC, 0, NOPC)), | ||
494 | |||
495 | /* MLA cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx */ | ||
496 | /* MLAS cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx */ | ||
497 | DECODE_OR (0x0fe000f0, 0x00200090), | ||
498 | /* MLS cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx */ | ||
499 | DECODE_EMULATEX (0x0ff000f0, 0x00600090, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
500 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
501 | |||
502 | /* UMAAL cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx */ | ||
503 | DECODE_OR (0x0ff000f0, 0x00400090), | ||
504 | /* UMULL cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx */ | ||
505 | /* UMULLS cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx */ | ||
506 | /* UMLAL cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx */ | ||
507 | /* UMLALS cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx */ | ||
508 | /* SMULL cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx */ | ||
509 | /* SMULLS cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx */ | ||
510 | /* SMLAL cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx */ | ||
511 | /* SMLALS cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx */ | ||
512 | DECODE_EMULATEX (0x0f8000f0, 0x00800090, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, | ||
513 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
514 | |||
515 | DECODE_END | ||
516 | }; | ||
517 | |||
518 | static const union decode_item arm_cccc_0001_____1001_table[] = { | ||
519 | /* Synchronization primitives */ | ||
520 | |||
521 | /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ | ||
522 | DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, | ||
523 | REGS(NOPC, NOPC, 0, 0, NOPC)), | ||
524 | |||
525 | /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ | ||
526 | /* And unallocated instructions... */ | ||
527 | DECODE_END | ||
528 | }; | ||
529 | |||
530 | static const union decode_item arm_cccc_000x_____1xx1_table[] = { | ||
531 | /* Extra load/store instructions */ | ||
532 | |||
533 | /* STRHT cccc 0000 xx10 xxxx xxxx xxxx 1011 xxxx */ | ||
534 | /* ??? cccc 0000 xx10 xxxx xxxx xxxx 11x1 xxxx */ | ||
535 | /* LDRHT cccc 0000 xx11 xxxx xxxx xxxx 1011 xxxx */ | ||
536 | /* LDRSBT cccc 0000 xx11 xxxx xxxx xxxx 1101 xxxx */ | ||
537 | /* LDRSHT cccc 0000 xx11 xxxx xxxx xxxx 1111 xxxx */ | ||
538 | DECODE_REJECT (0x0f200090, 0x00200090), | ||
539 | |||
540 | /* LDRD/STRD lr,pc,{... cccc 000x x0x0 xxxx 111x xxxx 1101 xxxx */ | ||
541 | DECODE_REJECT (0x0e10e0d0, 0x0000e0d0), | ||
542 | |||
543 | /* LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx */ | ||
544 | /* STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx */ | ||
545 | DECODE_EMULATEX (0x0e5000d0, 0x000000d0, emulate_ldrdstrd, | ||
546 | REGS(NOPCWB, NOPCX, 0, 0, NOPC)), | ||
547 | |||
548 | /* LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx */ | ||
549 | /* STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx */ | ||
550 | DECODE_EMULATEX (0x0e5000d0, 0x004000d0, emulate_ldrdstrd, | ||
551 | REGS(NOPCWB, NOPCX, 0, 0, 0)), | ||
552 | |||
553 | /* STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx */ | ||
554 | DECODE_EMULATEX (0x0e5000f0, 0x000000b0, emulate_str, | ||
555 | REGS(NOPCWB, NOPC, 0, 0, NOPC)), | ||
556 | |||
557 | /* LDRH (register) cccc 000x x0x1 xxxx xxxx xxxx 1011 xxxx */ | ||
558 | /* LDRSB (register) cccc 000x x0x1 xxxx xxxx xxxx 1101 xxxx */ | ||
559 | /* LDRSH (register) cccc 000x x0x1 xxxx xxxx xxxx 1111 xxxx */ | ||
560 | DECODE_EMULATEX (0x0e500090, 0x00100090, emulate_ldr, | ||
561 | REGS(NOPCWB, NOPC, 0, 0, NOPC)), | ||
562 | |||
563 | /* STRH (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1011 xxxx */ | ||
564 | DECODE_EMULATEX (0x0e5000f0, 0x004000b0, emulate_str, | ||
565 | REGS(NOPCWB, NOPC, 0, 0, 0)), | ||
566 | |||
567 | /* LDRH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1011 xxxx */ | ||
568 | /* LDRSB (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1101 xxxx */ | ||
569 | /* LDRSH (immediate) cccc 000x x1x1 xxxx xxxx xxxx 1111 xxxx */ | ||
570 | DECODE_EMULATEX (0x0e500090, 0x00500090, emulate_ldr, | ||
571 | REGS(NOPCWB, NOPC, 0, 0, 0)), | ||
572 | |||
573 | DECODE_END | ||
574 | }; | ||
575 | |||
576 | static const union decode_item arm_cccc_000x_table[] = { | ||
577 | /* Data-processing (register) */ | ||
578 | |||
579 | /* <op>S PC, ... cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx */ | ||
580 | DECODE_REJECT (0x0e10f000, 0x0010f000), | ||
581 | |||
582 | /* MOV IP, SP 1110 0001 1010 0000 1100 0000 0000 1101 */ | ||
583 | DECODE_SIMULATE (0xffffffff, 0xe1a0c00d, simulate_mov_ipsp), | ||
584 | |||
585 | /* TST (register) cccc 0001 0001 xxxx xxxx xxxx xxx0 xxxx */ | ||
586 | /* TEQ (register) cccc 0001 0011 xxxx xxxx xxxx xxx0 xxxx */ | ||
587 | /* CMP (register) cccc 0001 0101 xxxx xxxx xxxx xxx0 xxxx */ | ||
588 | /* CMN (register) cccc 0001 0111 xxxx xxxx xxxx xxx0 xxxx */ | ||
589 | DECODE_EMULATEX (0x0f900010, 0x01100000, emulate_rd12rn16rm0rs8_rwflags, | ||
590 | REGS(ANY, 0, 0, 0, ANY)), | ||
591 | |||
592 | /* MOV (register) cccc 0001 101x xxxx xxxx xxxx xxx0 xxxx */ | ||
593 | /* MVN (register) cccc 0001 111x xxxx xxxx xxxx xxx0 xxxx */ | ||
594 | DECODE_EMULATEX (0x0fa00010, 0x01a00000, emulate_rd12rn16rm0rs8_rwflags, | ||
595 | REGS(0, ANY, 0, 0, ANY)), | ||
596 | |||
597 | /* AND (register) cccc 0000 000x xxxx xxxx xxxx xxx0 xxxx */ | ||
598 | /* EOR (register) cccc 0000 001x xxxx xxxx xxxx xxx0 xxxx */ | ||
599 | /* SUB (register) cccc 0000 010x xxxx xxxx xxxx xxx0 xxxx */ | ||
600 | /* RSB (register) cccc 0000 011x xxxx xxxx xxxx xxx0 xxxx */ | ||
601 | /* ADD (register) cccc 0000 100x xxxx xxxx xxxx xxx0 xxxx */ | ||
602 | /* ADC (register) cccc 0000 101x xxxx xxxx xxxx xxx0 xxxx */ | ||
603 | /* SBC (register) cccc 0000 110x xxxx xxxx xxxx xxx0 xxxx */ | ||
604 | /* RSC (register) cccc 0000 111x xxxx xxxx xxxx xxx0 xxxx */ | ||
605 | /* ORR (register) cccc 0001 100x xxxx xxxx xxxx xxx0 xxxx */ | ||
606 | /* BIC (register) cccc 0001 110x xxxx xxxx xxxx xxx0 xxxx */ | ||
607 | DECODE_EMULATEX (0x0e000010, 0x00000000, emulate_rd12rn16rm0rs8_rwflags, | ||
608 | REGS(ANY, ANY, 0, 0, ANY)), | ||
609 | |||
610 | /* TST (reg-shift reg) cccc 0001 0001 xxxx xxxx xxxx 0xx1 xxxx */ | ||
611 | /* TEQ (reg-shift reg) cccc 0001 0011 xxxx xxxx xxxx 0xx1 xxxx */ | ||
612 | /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ | ||
613 | /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ | ||
614 | DECODE_EMULATEX (0x0f900090, 0x01100010, emulate_rd12rn16rm0rs8_rwflags, | ||
615 | REGS(ANY, 0, NOPC, 0, ANY)), | ||
616 | |||
617 | /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ | ||
618 | /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ | ||
619 | DECODE_EMULATEX (0x0fa00090, 0x01a00010, emulate_rd12rn16rm0rs8_rwflags, | ||
620 | REGS(0, ANY, NOPC, 0, ANY)), | ||
621 | |||
622 | /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ | ||
623 | /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ | ||
624 | /* SUB (reg-shift reg) cccc 0000 010x xxxx xxxx xxxx 0xx1 xxxx */ | ||
625 | /* RSB (reg-shift reg) cccc 0000 011x xxxx xxxx xxxx 0xx1 xxxx */ | ||
626 | /* ADD (reg-shift reg) cccc 0000 100x xxxx xxxx xxxx 0xx1 xxxx */ | ||
627 | /* ADC (reg-shift reg) cccc 0000 101x xxxx xxxx xxxx 0xx1 xxxx */ | ||
628 | /* SBC (reg-shift reg) cccc 0000 110x xxxx xxxx xxxx 0xx1 xxxx */ | ||
629 | /* RSC (reg-shift reg) cccc 0000 111x xxxx xxxx xxxx 0xx1 xxxx */ | ||
630 | /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ | ||
631 | /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ | ||
632 | DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags, | ||
633 | REGS(ANY, ANY, NOPC, 0, ANY)), | ||
634 | |||
635 | DECODE_END | ||
636 | }; | ||
637 | |||
638 | static const union decode_item arm_cccc_001x_table[] = { | ||
639 | /* Data-processing (immediate) */ | ||
640 | |||
641 | /* MOVW cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ | ||
642 | /* MOVT cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ | ||
643 | DECODE_EMULATEX (0x0fb00000, 0x03000000, emulate_rd12rm0_noflags_nopc, | ||
644 | REGS(0, NOPC, 0, 0, 0)), | ||
645 | |||
646 | /* YIELD cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ | ||
647 | DECODE_OR (0x0fff00ff, 0x03200001), | ||
648 | /* SEV cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ | ||
649 | DECODE_EMULATE (0x0fff00ff, 0x03200004, kprobe_emulate_none), | ||
650 | /* NOP cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ | ||
651 | /* WFE cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ | ||
652 | /* WFI cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ | ||
653 | DECODE_SIMULATE (0x0fff00fc, 0x03200000, kprobe_simulate_nop), | ||
654 | /* DBG cccc 0011 0010 0000 xxxx xxxx ffff xxxx */ | ||
655 | /* unallocated hints cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ | ||
656 | /* MSR (immediate) cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx */ | ||
657 | DECODE_REJECT (0x0fb00000, 0x03200000), | ||
658 | |||
659 | /* <op>S PC, ... cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx */ | ||
660 | DECODE_REJECT (0x0e10f000, 0x0210f000), | ||
661 | |||
662 | /* TST (immediate) cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx */ | ||
663 | /* TEQ (immediate) cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx */ | ||
664 | /* CMP (immediate) cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx */ | ||
665 | /* CMN (immediate) cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx */ | ||
666 | DECODE_EMULATEX (0x0f900000, 0x03100000, emulate_rd12rn16rm0rs8_rwflags, | ||
667 | REGS(ANY, 0, 0, 0, 0)), | ||
668 | |||
669 | /* MOV (immediate) cccc 0011 101x xxxx xxxx xxxx xxxx xxxx */ | ||
670 | /* MVN (immediate) cccc 0011 111x xxxx xxxx xxxx xxxx xxxx */ | ||
671 | DECODE_EMULATEX (0x0fa00000, 0x03a00000, emulate_rd12rn16rm0rs8_rwflags, | ||
672 | REGS(0, ANY, 0, 0, 0)), | ||
673 | |||
674 | /* AND (immediate) cccc 0010 000x xxxx xxxx xxxx xxxx xxxx */ | ||
675 | /* EOR (immediate) cccc 0010 001x xxxx xxxx xxxx xxxx xxxx */ | ||
676 | /* SUB (immediate) cccc 0010 010x xxxx xxxx xxxx xxxx xxxx */ | ||
677 | /* RSB (immediate) cccc 0010 011x xxxx xxxx xxxx xxxx xxxx */ | ||
678 | /* ADD (immediate) cccc 0010 100x xxxx xxxx xxxx xxxx xxxx */ | ||
679 | /* ADC (immediate) cccc 0010 101x xxxx xxxx xxxx xxxx xxxx */ | ||
680 | /* SBC (immediate) cccc 0010 110x xxxx xxxx xxxx xxxx xxxx */ | ||
681 | /* RSC (immediate) cccc 0010 111x xxxx xxxx xxxx xxxx xxxx */ | ||
682 | /* ORR (immediate) cccc 0011 100x xxxx xxxx xxxx xxxx xxxx */ | ||
683 | /* BIC (immediate) cccc 0011 110x xxxx xxxx xxxx xxxx xxxx */ | ||
684 | DECODE_EMULATEX (0x0e000000, 0x02000000, emulate_rd12rn16rm0rs8_rwflags, | ||
685 | REGS(ANY, ANY, 0, 0, 0)), | ||
686 | |||
687 | DECODE_END | ||
688 | }; | ||
689 | |||
690 | static const union decode_item arm_cccc_0110_____xxx1_table[] = { | ||
691 | /* Media instructions */ | ||
692 | |||
693 | /* SEL cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx */ | ||
694 | DECODE_EMULATEX (0x0ff000f0, 0x068000b0, emulate_rd12rn16rm0_rwflags_nopc, | ||
695 | REGS(NOPC, NOPC, 0, 0, NOPC)), | ||
696 | |||
697 | /* SSAT cccc 0110 101x xxxx xxxx xxxx xx01 xxxx */ | ||
698 | /* USAT cccc 0110 111x xxxx xxxx xxxx xx01 xxxx */ | ||
699 | DECODE_OR(0x0fa00030, 0x06a00010), | ||
700 | /* SSAT16 cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx */ | ||
701 | /* USAT16 cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx */ | ||
702 | DECODE_EMULATEX (0x0fb000f0, 0x06a00030, emulate_rd12rn16rm0_rwflags_nopc, | ||
703 | REGS(0, NOPC, 0, 0, NOPC)), | ||
704 | |||
705 | /* REV cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ | ||
706 | /* REV16 cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ | ||
707 | /* RBIT cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ | ||
708 | /* REVSH cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ | ||
709 | DECODE_EMULATEX (0x0fb00070, 0x06b00030, emulate_rd12rm0_noflags_nopc, | ||
710 | REGS(0, NOPC, 0, 0, NOPC)), | ||
711 | |||
712 | /* ??? cccc 0110 0x00 xxxx xxxx xxxx xxx1 xxxx */ | ||
713 | DECODE_REJECT (0x0fb00010, 0x06000010), | ||
714 | /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1011 xxxx */ | ||
715 | DECODE_REJECT (0x0f8000f0, 0x060000b0), | ||
716 | /* ??? cccc 0110 0xxx xxxx xxxx xxxx 1101 xxxx */ | ||
717 | DECODE_REJECT (0x0f8000f0, 0x060000d0), | ||
718 | /* SADD16 cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx */ | ||
719 | /* SADDSUBX cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx */ | ||
720 | /* SSUBADDX cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx */ | ||
721 | /* SSUB16 cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx */ | ||
722 | /* SADD8 cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx */ | ||
723 | /* SSUB8 cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx */ | ||
724 | /* QADD16 cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx */ | ||
725 | /* QADDSUBX cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx */ | ||
726 | /* QSUBADDX cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx */ | ||
727 | /* QSUB16 cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx */ | ||
728 | /* QADD8 cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx */ | ||
729 | /* QSUB8 cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx */ | ||
730 | /* SHADD16 cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx */ | ||
731 | /* SHADDSUBX cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx */ | ||
732 | /* SHSUBADDX cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx */ | ||
733 | /* SHSUB16 cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx */ | ||
734 | /* SHADD8 cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx */ | ||
735 | /* SHSUB8 cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx */ | ||
736 | /* UADD16 cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx */ | ||
737 | /* UADDSUBX cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx */ | ||
738 | /* USUBADDX cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx */ | ||
739 | /* USUB16 cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx */ | ||
740 | /* UADD8 cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx */ | ||
741 | /* USUB8 cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx */ | ||
742 | /* UQADD16 cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx */ | ||
743 | /* UQADDSUBX cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx */ | ||
744 | /* UQSUBADDX cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx */ | ||
745 | /* UQSUB16 cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx */ | ||
746 | /* UQADD8 cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx */ | ||
747 | /* UQSUB8 cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx */ | ||
748 | /* UHADD16 cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx */ | ||
749 | /* UHADDSUBX cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx */ | ||
750 | /* UHSUBADDX cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx */ | ||
751 | /* UHSUB16 cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx */ | ||
752 | /* UHADD8 cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx */ | ||
753 | /* UHSUB8 cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx */ | ||
754 | DECODE_EMULATEX (0x0f800010, 0x06000010, emulate_rd12rn16rm0_rwflags_nopc, | ||
755 | REGS(NOPC, NOPC, 0, 0, NOPC)), | ||
756 | |||
757 | /* PKHBT cccc 0110 1000 xxxx xxxx xxxx x001 xxxx */ | ||
758 | /* PKHTB cccc 0110 1000 xxxx xxxx xxxx x101 xxxx */ | ||
759 | DECODE_EMULATEX (0x0ff00030, 0x06800010, emulate_rd12rn16rm0_rwflags_nopc, | ||
760 | REGS(NOPC, NOPC, 0, 0, NOPC)), | ||
761 | |||
762 | /* ??? cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx */ | ||
763 | /* ??? cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx */ | ||
764 | DECODE_REJECT (0x0fb000f0, 0x06900070), | ||
765 | |||
766 | /* SXTB16 cccc 0110 1000 1111 xxxx xxxx 0111 xxxx */ | ||
767 | /* SXTB cccc 0110 1010 1111 xxxx xxxx 0111 xxxx */ | ||
768 | /* SXTH cccc 0110 1011 1111 xxxx xxxx 0111 xxxx */ | ||
769 | /* UXTB16 cccc 0110 1100 1111 xxxx xxxx 0111 xxxx */ | ||
770 | /* UXTB cccc 0110 1110 1111 xxxx xxxx 0111 xxxx */ | ||
771 | /* UXTH cccc 0110 1111 1111 xxxx xxxx 0111 xxxx */ | ||
772 | DECODE_EMULATEX (0x0f8f00f0, 0x068f0070, emulate_rd12rm0_noflags_nopc, | ||
773 | REGS(0, NOPC, 0, 0, NOPC)), | ||
774 | |||
775 | /* SXTAB16 cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx */ | ||
776 | /* SXTAB cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx */ | ||
777 | /* SXTAH cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx */ | ||
778 | /* UXTAB16 cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx */ | ||
779 | /* UXTAB cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx */ | ||
780 | /* UXTAH cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx */ | ||
781 | DECODE_EMULATEX (0x0f8000f0, 0x06800070, emulate_rd12rn16rm0_rwflags_nopc, | ||
782 | REGS(NOPCX, NOPC, 0, 0, NOPC)), | ||
783 | |||
784 | DECODE_END | ||
785 | }; | ||
786 | |||
787 | static const union decode_item arm_cccc_0111_____xxx1_table[] = { | ||
788 | /* Media instructions */ | ||
789 | |||
790 | /* UNDEFINED cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ | ||
791 | DECODE_REJECT (0x0ff000f0, 0x07f000f0), | ||
792 | |||
793 | /* SMLALD cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ | ||
794 | /* SMLSLD cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ | ||
795 | DECODE_EMULATEX (0x0ff00090, 0x07400010, emulate_rdlo12rdhi16rn0rm8_rwflags_nopc, | ||
796 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
797 | |||
798 | /* SMUAD cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx */ | ||
799 | /* SMUSD cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx */ | ||
800 | DECODE_OR (0x0ff0f090, 0x0700f010), | ||
801 | /* SMMUL cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx */ | ||
802 | DECODE_OR (0x0ff0f0d0, 0x0750f010), | ||
803 | /* USAD8 cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx */ | ||
804 | DECODE_EMULATEX (0x0ff0f0f0, 0x0780f010, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
805 | REGS(NOPC, 0, NOPC, 0, NOPC)), | ||
806 | |||
807 | /* SMLAD cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx */ | ||
808 | /* SMLSD cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx */ | ||
809 | DECODE_OR (0x0ff00090, 0x07000010), | ||
810 | /* SMMLA cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx */ | ||
811 | DECODE_OR (0x0ff000d0, 0x07500010), | ||
812 | /* USADA8 cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx */ | ||
813 | DECODE_EMULATEX (0x0ff000f0, 0x07800010, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
814 | REGS(NOPC, NOPCX, NOPC, 0, NOPC)), | ||
815 | |||
816 | /* SMMLS cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx */ | ||
817 | DECODE_EMULATEX (0x0ff000d0, 0x075000d0, emulate_rd16rn12rm0rs8_rwflags_nopc, | ||
818 | REGS(NOPC, NOPC, NOPC, 0, NOPC)), | ||
819 | |||
820 | /* SBFX cccc 0111 101x xxxx xxxx xxxx x101 xxxx */ | ||
821 | /* UBFX cccc 0111 111x xxxx xxxx xxxx x101 xxxx */ | ||
822 | DECODE_EMULATEX (0x0fa00070, 0x07a00050, emulate_rd12rm0_noflags_nopc, | ||
823 | REGS(0, NOPC, 0, 0, NOPC)), | ||
824 | |||
825 | /* BFC cccc 0111 110x xxxx xxxx xxxx x001 1111 */ | ||
826 | DECODE_EMULATEX (0x0fe0007f, 0x07c0001f, emulate_rd12rm0_noflags_nopc, | ||
827 | REGS(0, NOPC, 0, 0, 0)), | ||
828 | |||
829 | /* BFI cccc 0111 110x xxxx xxxx xxxx x001 xxxx */ | ||
830 | DECODE_EMULATEX (0x0fe00070, 0x07c00010, emulate_rd12rm0_noflags_nopc, | ||
831 | REGS(0, NOPC, 0, 0, NOPCX)), | ||
832 | |||
833 | DECODE_END | ||
834 | }; | ||
835 | |||
836 | static const union decode_item arm_cccc_01xx_table[] = { | ||
837 | /* Load/store word and unsigned byte */ | ||
838 | |||
839 | /* LDRB/STRB pc,[...] cccc 01xx x0xx xxxx xxxx xxxx xxxx xxxx */ | ||
840 | DECODE_REJECT (0x0c40f000, 0x0440f000), | ||
841 | |||
842 | /* STRT cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ | ||
843 | /* LDRT cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ | ||
844 | /* STRBT cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ | ||
845 | /* LDRBT cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ | ||
846 | DECODE_REJECT (0x0d200000, 0x04200000), | ||
847 | |||
848 | /* STR (immediate) cccc 010x x0x0 xxxx xxxx xxxx xxxx xxxx */ | ||
849 | /* STRB (immediate) cccc 010x x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
850 | DECODE_EMULATEX (0x0e100000, 0x04000000, emulate_str, | ||
851 | REGS(NOPCWB, ANY, 0, 0, 0)), | ||
852 | |||
853 | /* LDR (immediate) cccc 010x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
854 | /* LDRB (immediate) cccc 010x x1x1 xxxx xxxx xxxx xxxx xxxx */ | ||
855 | DECODE_EMULATEX (0x0e100000, 0x04100000, emulate_ldr, | ||
856 | REGS(NOPCWB, ANY, 0, 0, 0)), | ||
857 | |||
858 | /* STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx */ | ||
859 | /* STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
860 | DECODE_EMULATEX (0x0e100000, 0x06000000, emulate_str, | ||
861 | REGS(NOPCWB, ANY, 0, 0, NOPC)), | ||
862 | |||
863 | /* LDR (register) cccc 011x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
864 | /* LDRB (register) cccc 011x x1x1 xxxx xxxx xxxx xxxx xxxx */ | ||
865 | DECODE_EMULATEX (0x0e100000, 0x06100000, emulate_ldr, | ||
866 | REGS(NOPCWB, ANY, 0, 0, NOPC)), | ||
867 | |||
868 | DECODE_END | ||
869 | }; | ||
870 | |||
871 | static const union decode_item arm_cccc_100x_table[] = { | ||
872 | /* Block data transfer instructions */ | ||
873 | |||
874 | /* LDM cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
875 | /* STM cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ | ||
876 | DECODE_CUSTOM (0x0e400000, 0x08000000, kprobe_decode_ldmstm), | ||
877 | |||
878 | /* STM (user registers) cccc 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
879 | /* LDM (user registers) cccc 100x x1x1 xxxx 0xxx xxxx xxxx xxxx */ | ||
880 | /* LDM (exception ret) cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ | ||
881 | DECODE_END | ||
882 | }; | ||
883 | |||
884 | const union decode_item kprobe_decode_arm_table[] = { | ||
885 | /* | ||
886 | * Unconditional instructions | ||
887 | * 1111 xxxx xxxx xxxx xxxx xxxx xxxx xxxx | ||
888 | */ | ||
889 | DECODE_TABLE (0xf0000000, 0xf0000000, arm_1111_table), | ||
890 | |||
891 | /* | ||
892 | * Miscellaneous instructions | ||
893 | * cccc 0001 0xx0 xxxx xxxx xxxx 0xxx xxxx | ||
894 | */ | ||
895 | DECODE_TABLE (0x0f900080, 0x01000000, arm_cccc_0001_0xx0____0xxx_table), | ||
896 | |||
897 | /* | ||
898 | * Halfword multiply and multiply-accumulate | ||
899 | * cccc 0001 0xx0 xxxx xxxx xxxx 1xx0 xxxx | ||
900 | */ | ||
901 | DECODE_TABLE (0x0f900090, 0x01000080, arm_cccc_0001_0xx0____1xx0_table), | ||
902 | |||
903 | /* | ||
904 | * Multiply and multiply-accumulate | ||
905 | * cccc 0000 xxxx xxxx xxxx xxxx 1001 xxxx | ||
906 | */ | ||
907 | DECODE_TABLE (0x0f0000f0, 0x00000090, arm_cccc_0000_____1001_table), | ||
908 | |||
909 | /* | ||
910 | * Synchronization primitives | ||
911 | * cccc 0001 xxxx xxxx xxxx xxxx 1001 xxxx | ||
912 | */ | ||
913 | DECODE_TABLE (0x0f0000f0, 0x01000090, arm_cccc_0001_____1001_table), | ||
914 | |||
915 | /* | ||
916 | * Extra load/store instructions | ||
917 | * cccc 000x xxxx xxxx xxxx xxxx 1xx1 xxxx | ||
918 | */ | ||
919 | DECODE_TABLE (0x0e000090, 0x00000090, arm_cccc_000x_____1xx1_table), | ||
920 | |||
921 | /* | ||
922 | * Data-processing (register) | ||
923 | * cccc 000x xxxx xxxx xxxx xxxx xxx0 xxxx | ||
924 | * Data-processing (register-shifted register) | ||
925 | * cccc 000x xxxx xxxx xxxx xxxx 0xx1 xxxx | ||
926 | */ | ||
927 | DECODE_TABLE (0x0e000000, 0x00000000, arm_cccc_000x_table), | ||
928 | |||
929 | /* | ||
930 | * Data-processing (immediate) | ||
931 | * cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx | ||
932 | */ | ||
933 | DECODE_TABLE (0x0e000000, 0x02000000, arm_cccc_001x_table), | ||
934 | |||
935 | /* | ||
936 | * Media instructions | ||
937 | * cccc 011x xxxx xxxx xxxx xxxx xxx1 xxxx | ||
938 | */ | ||
939 | DECODE_TABLE (0x0f000010, 0x06000010, arm_cccc_0110_____xxx1_table), | ||
940 | DECODE_TABLE (0x0f000010, 0x07000010, arm_cccc_0111_____xxx1_table), | ||
941 | |||
942 | /* | ||
943 | * Load/store word and unsigned byte | ||
944 | * cccc 01xx xxxx xxxx xxxx xxxx xxxx xxxx | ||
945 | */ | ||
946 | DECODE_TABLE (0x0c000000, 0x04000000, arm_cccc_01xx_table), | ||
947 | |||
948 | /* | ||
949 | * Block data transfer instructions | ||
950 | * cccc 100x xxxx xxxx xxxx xxxx xxxx xxxx | ||
951 | */ | ||
952 | DECODE_TABLE (0x0e000000, 0x08000000, arm_cccc_100x_table), | ||
953 | |||
954 | /* B cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
955 | /* BL cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
956 | DECODE_SIMULATE (0x0e000000, 0x0a000000, simulate_bbl), | ||
957 | |||
958 | /* | ||
959 | * Supervisor Call, and coprocessor instructions | ||
960 | */ | ||
961 | |||
962 | /* MCRR cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx */ | ||
963 | /* MRRC cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx */ | ||
964 | /* LDC cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ | ||
965 | /* STC cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ | ||
966 | /* CDP cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ | ||
967 | /* MCR cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ | ||
968 | /* MRC cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ | ||
969 | /* SVC cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
970 | DECODE_REJECT (0x0c000000, 0x0c000000), | ||
971 | |||
972 | DECODE_END | ||
973 | }; | ||
974 | |||
975 | static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) | ||
976 | { | ||
977 | regs->ARM_pc += 4; | ||
978 | p->ainsn.insn_handler(p, regs); | ||
979 | } | ||
980 | |||
981 | /* Return: | ||
982 | * INSN_REJECTED If instruction is one not allowed to kprobe, | ||
983 | * INSN_GOOD If instruction is supported and uses instruction slot, | ||
984 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. | ||
985 | * | ||
986 | * For instructions we don't want to kprobe (INSN_REJECTED return result): | ||
987 | * These are generally ones that modify the processor state making | ||
988 | * them "hard" to simulate such as switches processor modes or | ||
989 | * make accesses in alternate modes. Any of these could be simulated | ||
990 | * if the work was put into it, but low return considering they | ||
991 | * should also be very rare. | ||
992 | */ | ||
993 | enum kprobe_insn __kprobes | ||
994 | arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
995 | { | ||
996 | asi->insn_singlestep = arm_singlestep; | ||
997 | asi->insn_check_cc = kprobe_condition_checks[insn>>28]; | ||
998 | return kprobe_decode_insn(insn, asi, kprobe_decode_arm_table, false); | ||
999 | } | ||
diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c new file mode 100644 index 000000000000..a5394fb4e4e0 --- /dev/null +++ b/arch/arm/kernel/kprobes-common.c | |||
@@ -0,0 +1,577 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes-common.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. | ||
5 | * | ||
6 | * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is | ||
7 | * Copyright (C) 2006, 2007 Motorola Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/kprobes.h> | ||
16 | |||
17 | #include "kprobes.h" | ||
18 | |||
19 | |||
20 | #ifndef find_str_pc_offset | ||
21 | |||
22 | /* | ||
23 | * For STR and STM instructions, an ARM core may choose to use either | ||
24 | * a +8 or a +12 displacement from the current instruction's address. | ||
25 | * Whichever value is chosen for a given core, it must be the same for | ||
26 | * both instructions and may not change. This function measures it. | ||
27 | */ | ||
28 | |||
29 | int str_pc_offset; | ||
30 | |||
31 | void __init find_str_pc_offset(void) | ||
32 | { | ||
33 | int addr, scratch, ret; | ||
34 | |||
35 | __asm__ ( | ||
36 | "sub %[ret], pc, #4 \n\t" | ||
37 | "str pc, %[addr] \n\t" | ||
38 | "ldr %[scr], %[addr] \n\t" | ||
39 | "sub %[ret], %[scr], %[ret] \n\t" | ||
40 | : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); | ||
41 | |||
42 | str_pc_offset = ret; | ||
43 | } | ||
44 | |||
45 | #endif /* !find_str_pc_offset */ | ||
46 | |||
47 | |||
48 | #ifndef test_load_write_pc_interworking | ||
49 | |||
50 | bool load_write_pc_interworks; | ||
51 | |||
52 | void __init test_load_write_pc_interworking(void) | ||
53 | { | ||
54 | int arch = cpu_architecture(); | ||
55 | BUG_ON(arch == CPU_ARCH_UNKNOWN); | ||
56 | load_write_pc_interworks = arch >= CPU_ARCH_ARMv5T; | ||
57 | } | ||
58 | |||
59 | #endif /* !test_load_write_pc_interworking */ | ||
60 | |||
61 | |||
62 | #ifndef test_alu_write_pc_interworking | ||
63 | |||
64 | bool alu_write_pc_interworks; | ||
65 | |||
66 | void __init test_alu_write_pc_interworking(void) | ||
67 | { | ||
68 | int arch = cpu_architecture(); | ||
69 | BUG_ON(arch == CPU_ARCH_UNKNOWN); | ||
70 | alu_write_pc_interworks = arch >= CPU_ARCH_ARMv7; | ||
71 | } | ||
72 | |||
73 | #endif /* !test_alu_write_pc_interworking */ | ||
74 | |||
75 | |||
76 | void __init arm_kprobe_decode_init(void) | ||
77 | { | ||
78 | find_str_pc_offset(); | ||
79 | test_load_write_pc_interworking(); | ||
80 | test_alu_write_pc_interworking(); | ||
81 | } | ||
82 | |||
83 | |||
84 | static unsigned long __kprobes __check_eq(unsigned long cpsr) | ||
85 | { | ||
86 | return cpsr & PSR_Z_BIT; | ||
87 | } | ||
88 | |||
89 | static unsigned long __kprobes __check_ne(unsigned long cpsr) | ||
90 | { | ||
91 | return (~cpsr) & PSR_Z_BIT; | ||
92 | } | ||
93 | |||
94 | static unsigned long __kprobes __check_cs(unsigned long cpsr) | ||
95 | { | ||
96 | return cpsr & PSR_C_BIT; | ||
97 | } | ||
98 | |||
99 | static unsigned long __kprobes __check_cc(unsigned long cpsr) | ||
100 | { | ||
101 | return (~cpsr) & PSR_C_BIT; | ||
102 | } | ||
103 | |||
104 | static unsigned long __kprobes __check_mi(unsigned long cpsr) | ||
105 | { | ||
106 | return cpsr & PSR_N_BIT; | ||
107 | } | ||
108 | |||
109 | static unsigned long __kprobes __check_pl(unsigned long cpsr) | ||
110 | { | ||
111 | return (~cpsr) & PSR_N_BIT; | ||
112 | } | ||
113 | |||
114 | static unsigned long __kprobes __check_vs(unsigned long cpsr) | ||
115 | { | ||
116 | return cpsr & PSR_V_BIT; | ||
117 | } | ||
118 | |||
119 | static unsigned long __kprobes __check_vc(unsigned long cpsr) | ||
120 | { | ||
121 | return (~cpsr) & PSR_V_BIT; | ||
122 | } | ||
123 | |||
124 | static unsigned long __kprobes __check_hi(unsigned long cpsr) | ||
125 | { | ||
126 | cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
127 | return cpsr & PSR_C_BIT; | ||
128 | } | ||
129 | |||
130 | static unsigned long __kprobes __check_ls(unsigned long cpsr) | ||
131 | { | ||
132 | cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
133 | return (~cpsr) & PSR_C_BIT; | ||
134 | } | ||
135 | |||
136 | static unsigned long __kprobes __check_ge(unsigned long cpsr) | ||
137 | { | ||
138 | cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
139 | return (~cpsr) & PSR_N_BIT; | ||
140 | } | ||
141 | |||
142 | static unsigned long __kprobes __check_lt(unsigned long cpsr) | ||
143 | { | ||
144 | cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
145 | return cpsr & PSR_N_BIT; | ||
146 | } | ||
147 | |||
148 | static unsigned long __kprobes __check_gt(unsigned long cpsr) | ||
149 | { | ||
150 | unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
151 | temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ | ||
152 | return (~temp) & PSR_N_BIT; | ||
153 | } | ||
154 | |||
155 | static unsigned long __kprobes __check_le(unsigned long cpsr) | ||
156 | { | ||
157 | unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
158 | temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ | ||
159 | return temp & PSR_N_BIT; | ||
160 | } | ||
161 | |||
162 | static unsigned long __kprobes __check_al(unsigned long cpsr) | ||
163 | { | ||
164 | return true; | ||
165 | } | ||
166 | |||
167 | kprobe_check_cc * const kprobe_condition_checks[16] = { | ||
168 | &__check_eq, &__check_ne, &__check_cs, &__check_cc, | ||
169 | &__check_mi, &__check_pl, &__check_vs, &__check_vc, | ||
170 | &__check_hi, &__check_ls, &__check_ge, &__check_lt, | ||
171 | &__check_gt, &__check_le, &__check_al, &__check_al | ||
172 | }; | ||
173 | |||
174 | |||
175 | void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs) | ||
176 | { | ||
177 | } | ||
178 | |||
179 | void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs) | ||
180 | { | ||
181 | p->ainsn.insn_fn(); | ||
182 | } | ||
183 | |||
184 | static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) | ||
185 | { | ||
186 | kprobe_opcode_t insn = p->opcode; | ||
187 | int rn = (insn >> 16) & 0xf; | ||
188 | int lbit = insn & (1 << 20); | ||
189 | int wbit = insn & (1 << 21); | ||
190 | int ubit = insn & (1 << 23); | ||
191 | int pbit = insn & (1 << 24); | ||
192 | long *addr = (long *)regs->uregs[rn]; | ||
193 | int reg_bit_vector; | ||
194 | int reg_count; | ||
195 | |||
196 | reg_count = 0; | ||
197 | reg_bit_vector = insn & 0xffff; | ||
198 | while (reg_bit_vector) { | ||
199 | reg_bit_vector &= (reg_bit_vector - 1); | ||
200 | ++reg_count; | ||
201 | } | ||
202 | |||
203 | if (!ubit) | ||
204 | addr -= reg_count; | ||
205 | addr += (!pbit == !ubit); | ||
206 | |||
207 | reg_bit_vector = insn & 0xffff; | ||
208 | while (reg_bit_vector) { | ||
209 | int reg = __ffs(reg_bit_vector); | ||
210 | reg_bit_vector &= (reg_bit_vector - 1); | ||
211 | if (lbit) | ||
212 | regs->uregs[reg] = *addr++; | ||
213 | else | ||
214 | *addr++ = regs->uregs[reg]; | ||
215 | } | ||
216 | |||
217 | if (wbit) { | ||
218 | if (!ubit) | ||
219 | addr -= reg_count; | ||
220 | addr -= (!pbit == !ubit); | ||
221 | regs->uregs[rn] = (long)addr; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) | ||
226 | { | ||
227 | regs->ARM_pc = (long)p->addr + str_pc_offset; | ||
228 | simulate_ldm1stm1(p, regs); | ||
229 | regs->ARM_pc = (long)p->addr + 4; | ||
230 | } | ||
231 | |||
232 | static void __kprobes simulate_ldm1_pc(struct kprobe *p, struct pt_regs *regs) | ||
233 | { | ||
234 | simulate_ldm1stm1(p, regs); | ||
235 | load_write_pc(regs->ARM_pc, regs); | ||
236 | } | ||
237 | |||
238 | static void __kprobes | ||
239 | emulate_generic_r0_12_noflags(struct kprobe *p, struct pt_regs *regs) | ||
240 | { | ||
241 | register void *rregs asm("r1") = regs; | ||
242 | register void *rfn asm("lr") = p->ainsn.insn_fn; | ||
243 | |||
244 | __asm__ __volatile__ ( | ||
245 | "stmdb sp!, {%[regs], r11} \n\t" | ||
246 | "ldmia %[regs], {r0-r12} \n\t" | ||
247 | #if __LINUX_ARM_ARCH__ >= 6 | ||
248 | "blx %[fn] \n\t" | ||
249 | #else | ||
250 | "str %[fn], [sp, #-4]! \n\t" | ||
251 | "adr lr, 1f \n\t" | ||
252 | "ldr pc, [sp], #4 \n\t" | ||
253 | "1: \n\t" | ||
254 | #endif | ||
255 | "ldr lr, [sp], #4 \n\t" /* lr = regs */ | ||
256 | "stmia lr, {r0-r12} \n\t" | ||
257 | "ldr r11, [sp], #4 \n\t" | ||
258 | : [regs] "=r" (rregs), [fn] "=r" (rfn) | ||
259 | : "0" (rregs), "1" (rfn) | ||
260 | : "r0", "r2", "r3", "r4", "r5", "r6", "r7", | ||
261 | "r8", "r9", "r10", "r12", "memory", "cc" | ||
262 | ); | ||
263 | } | ||
264 | |||
265 | static void __kprobes | ||
266 | emulate_generic_r2_14_noflags(struct kprobe *p, struct pt_regs *regs) | ||
267 | { | ||
268 | emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+2)); | ||
269 | } | ||
270 | |||
271 | static void __kprobes | ||
272 | emulate_ldm_r3_15(struct kprobe *p, struct pt_regs *regs) | ||
273 | { | ||
274 | emulate_generic_r0_12_noflags(p, (struct pt_regs *)(regs->uregs+3)); | ||
275 | load_write_pc(regs->ARM_pc, regs); | ||
276 | } | ||
277 | |||
278 | enum kprobe_insn __kprobes | ||
279 | kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
280 | { | ||
281 | kprobe_insn_handler_t *handler = 0; | ||
282 | unsigned reglist = insn & 0xffff; | ||
283 | int is_ldm = insn & 0x100000; | ||
284 | int rn = (insn >> 16) & 0xf; | ||
285 | |||
286 | if (rn <= 12 && (reglist & 0xe000) == 0) { | ||
287 | /* Instruction only uses registers in the range R0..R12 */ | ||
288 | handler = emulate_generic_r0_12_noflags; | ||
289 | |||
290 | } else if (rn >= 2 && (reglist & 0x8003) == 0) { | ||
291 | /* Instruction only uses registers in the range R2..R14 */ | ||
292 | rn -= 2; | ||
293 | reglist >>= 2; | ||
294 | handler = emulate_generic_r2_14_noflags; | ||
295 | |||
296 | } else if (rn >= 3 && (reglist & 0x0007) == 0) { | ||
297 | /* Instruction only uses registers in the range R3..R15 */ | ||
298 | if (is_ldm && (reglist & 0x8000)) { | ||
299 | rn -= 3; | ||
300 | reglist >>= 3; | ||
301 | handler = emulate_ldm_r3_15; | ||
302 | } | ||
303 | } | ||
304 | |||
305 | if (handler) { | ||
306 | /* We can emulate the instruction in (possibly) modified form */ | ||
307 | asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist; | ||
308 | asi->insn_handler = handler; | ||
309 | return INSN_GOOD; | ||
310 | } | ||
311 | |||
312 | /* Fallback to slower simulation... */ | ||
313 | if (reglist & 0x8000) | ||
314 | handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc; | ||
315 | else | ||
316 | handler = simulate_ldm1stm1; | ||
317 | asi->insn_handler = handler; | ||
318 | return INSN_GOOD_NO_SLOT; | ||
319 | } | ||
320 | |||
321 | |||
322 | /* | ||
323 | * Prepare an instruction slot to receive an instruction for emulating. | ||
324 | * This is done by placing a subroutine return after the location where the | ||
325 | * instruction will be placed. We also modify ARM instructions to be | ||
326 | * unconditional as the condition code will already be checked before any | ||
327 | * emulation handler is called. | ||
328 | */ | ||
329 | static kprobe_opcode_t __kprobes | ||
330 | prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, | ||
331 | bool thumb) | ||
332 | { | ||
333 | #ifdef CONFIG_THUMB2_KERNEL | ||
334 | if (thumb) { | ||
335 | u16 *thumb_insn = (u16 *)asi->insn; | ||
336 | thumb_insn[1] = 0x4770; /* Thumb bx lr */ | ||
337 | thumb_insn[2] = 0x4770; /* Thumb bx lr */ | ||
338 | return insn; | ||
339 | } | ||
340 | asi->insn[1] = 0xe12fff1e; /* ARM bx lr */ | ||
341 | #else | ||
342 | asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */ | ||
343 | #endif | ||
344 | /* Make an ARM instruction unconditional */ | ||
345 | if (insn < 0xe0000000) | ||
346 | insn = (insn | 0xe0000000) & ~0x10000000; | ||
347 | return insn; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Write a (probably modified) instruction into the slot previously prepared by | ||
352 | * prepare_emulated_insn | ||
353 | */ | ||
354 | static void __kprobes | ||
355 | set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, | ||
356 | bool thumb) | ||
357 | { | ||
358 | #ifdef CONFIG_THUMB2_KERNEL | ||
359 | if (thumb) { | ||
360 | u16 *ip = (u16 *)asi->insn; | ||
361 | if (is_wide_instruction(insn)) | ||
362 | *ip++ = insn >> 16; | ||
363 | *ip++ = insn; | ||
364 | return; | ||
365 | } | ||
366 | #endif | ||
367 | asi->insn[0] = insn; | ||
368 | } | ||
369 | |||
370 | /* | ||
371 | * When we modify the register numbers encoded in an instruction to be emulated, | ||
372 | * the new values come from this define. For ARM and 32-bit Thumb instructions | ||
373 | * this gives... | ||
374 | * | ||
375 | * bit position 16 12 8 4 0 | ||
376 | * ---------------+---+---+---+---+---+ | ||
377 | * register r2 r0 r1 -- r3 | ||
378 | */ | ||
379 | #define INSN_NEW_BITS 0x00020103 | ||
380 | |||
381 | /* Each nibble has same value as that at INSN_NEW_BITS bit 16 */ | ||
382 | #define INSN_SAMEAS16_BITS 0x22222222 | ||
383 | |||
384 | /* | ||
385 | * Validate and modify each of the registers encoded in an instruction. | ||
386 | * | ||
387 | * Each nibble in regs contains a value from enum decode_reg_type. For each | ||
388 | * non-zero value, the corresponding nibble in pinsn is validated and modified | ||
389 | * according to the type. | ||
390 | */ | ||
391 | static bool __kprobes decode_regs(kprobe_opcode_t* pinsn, u32 regs) | ||
392 | { | ||
393 | kprobe_opcode_t insn = *pinsn; | ||
394 | kprobe_opcode_t mask = 0xf; /* Start at least significant nibble */ | ||
395 | |||
396 | for (; regs != 0; regs >>= 4, mask <<= 4) { | ||
397 | |||
398 | kprobe_opcode_t new_bits = INSN_NEW_BITS; | ||
399 | |||
400 | switch (regs & 0xf) { | ||
401 | |||
402 | case REG_TYPE_NONE: | ||
403 | /* Nibble not a register, skip to next */ | ||
404 | continue; | ||
405 | |||
406 | case REG_TYPE_ANY: | ||
407 | /* Any register is allowed */ | ||
408 | break; | ||
409 | |||
410 | case REG_TYPE_SAMEAS16: | ||
411 | /* Replace register with same as at bit position 16 */ | ||
412 | new_bits = INSN_SAMEAS16_BITS; | ||
413 | break; | ||
414 | |||
415 | case REG_TYPE_SP: | ||
416 | /* Only allow SP (R13) */ | ||
417 | if ((insn ^ 0xdddddddd) & mask) | ||
418 | goto reject; | ||
419 | break; | ||
420 | |||
421 | case REG_TYPE_PC: | ||
422 | /* Only allow PC (R15) */ | ||
423 | if ((insn ^ 0xffffffff) & mask) | ||
424 | goto reject; | ||
425 | break; | ||
426 | |||
427 | case REG_TYPE_NOSP: | ||
428 | /* Reject SP (R13) */ | ||
429 | if (((insn ^ 0xdddddddd) & mask) == 0) | ||
430 | goto reject; | ||
431 | break; | ||
432 | |||
433 | case REG_TYPE_NOSPPC: | ||
434 | case REG_TYPE_NOSPPCX: | ||
435 | /* Reject SP and PC (R13 and R15) */ | ||
436 | if (((insn ^ 0xdddddddd) & 0xdddddddd & mask) == 0) | ||
437 | goto reject; | ||
438 | break; | ||
439 | |||
440 | case REG_TYPE_NOPCWB: | ||
441 | if (!is_writeback(insn)) | ||
442 | break; /* No writeback, so any register is OK */ | ||
443 | /* fall through... */ | ||
444 | case REG_TYPE_NOPC: | ||
445 | case REG_TYPE_NOPCX: | ||
446 | /* Reject PC (R15) */ | ||
447 | if (((insn ^ 0xffffffff) & mask) == 0) | ||
448 | goto reject; | ||
449 | break; | ||
450 | } | ||
451 | |||
452 | /* Replace value of nibble with new register number... */ | ||
453 | insn &= ~mask; | ||
454 | insn |= new_bits & mask; | ||
455 | } | ||
456 | |||
457 | *pinsn = insn; | ||
458 | return true; | ||
459 | |||
460 | reject: | ||
461 | return false; | ||
462 | } | ||
463 | |||
464 | static const int decode_struct_sizes[NUM_DECODE_TYPES] = { | ||
465 | [DECODE_TYPE_TABLE] = sizeof(struct decode_table), | ||
466 | [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom), | ||
467 | [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate), | ||
468 | [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate), | ||
469 | [DECODE_TYPE_OR] = sizeof(struct decode_or), | ||
470 | [DECODE_TYPE_REJECT] = sizeof(struct decode_reject) | ||
471 | }; | ||
472 | |||
473 | /* | ||
474 | * kprobe_decode_insn operates on data tables in order to decode an ARM | ||
475 | * architecture instruction onto which a kprobe has been placed. | ||
476 | * | ||
477 | * These instruction decoding tables are a concatenation of entries each | ||
478 | * of which consist of one of the following structs: | ||
479 | * | ||
480 | * decode_table | ||
481 | * decode_custom | ||
482 | * decode_simulate | ||
483 | * decode_emulate | ||
484 | * decode_or | ||
485 | * decode_reject | ||
486 | * | ||
487 | * Each of these starts with a struct decode_header which has the following | ||
488 | * fields: | ||
489 | * | ||
490 | * type_regs | ||
491 | * mask | ||
492 | * value | ||
493 | * | ||
494 | * The least significant DECODE_TYPE_BITS of type_regs contains a value | ||
495 | * from enum decode_type, this indicates which of the decode_* structs | ||
496 | * the entry contains. The value DECODE_TYPE_END indicates the end of the | ||
497 | * table. | ||
498 | * | ||
499 | * When the table is parsed, each entry is checked in turn to see if it | ||
500 | * matches the instruction to be decoded using the test: | ||
501 | * | ||
502 | * (insn & mask) == value | ||
503 | * | ||
504 | * If no match is found before the end of the table is reached then decoding | ||
505 | * fails with INSN_REJECTED. | ||
506 | * | ||
507 | * When a match is found, decode_regs() is called to validate and modify each | ||
508 | * of the registers encoded in the instruction; the data it uses to do this | ||
509 | * is (type_regs >> DECODE_TYPE_BITS). A validation failure will cause decoding | ||
510 | * to fail with INSN_REJECTED. | ||
511 | * | ||
512 | * Once the instruction has passed the above tests, further processing | ||
513 | * depends on the type of the table entry's decode struct. | ||
514 | * | ||
515 | */ | ||
516 | int __kprobes | ||
517 | kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, | ||
518 | const union decode_item *table, bool thumb) | ||
519 | { | ||
520 | const struct decode_header *h = (struct decode_header *)table; | ||
521 | const struct decode_header *next; | ||
522 | bool matched = false; | ||
523 | |||
524 | insn = prepare_emulated_insn(insn, asi, thumb); | ||
525 | |||
526 | for (;; h = next) { | ||
527 | enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK; | ||
528 | u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS; | ||
529 | |||
530 | if (type == DECODE_TYPE_END) | ||
531 | return INSN_REJECTED; | ||
532 | |||
533 | next = (struct decode_header *) | ||
534 | ((uintptr_t)h + decode_struct_sizes[type]); | ||
535 | |||
536 | if (!matched && (insn & h->mask.bits) != h->value.bits) | ||
537 | continue; | ||
538 | |||
539 | if (!decode_regs(&insn, regs)) | ||
540 | return INSN_REJECTED; | ||
541 | |||
542 | switch (type) { | ||
543 | |||
544 | case DECODE_TYPE_TABLE: { | ||
545 | struct decode_table *d = (struct decode_table *)h; | ||
546 | next = (struct decode_header *)d->table.table; | ||
547 | break; | ||
548 | } | ||
549 | |||
550 | case DECODE_TYPE_CUSTOM: { | ||
551 | struct decode_custom *d = (struct decode_custom *)h; | ||
552 | return (*d->decoder.decoder)(insn, asi); | ||
553 | } | ||
554 | |||
555 | case DECODE_TYPE_SIMULATE: { | ||
556 | struct decode_simulate *d = (struct decode_simulate *)h; | ||
557 | asi->insn_handler = d->handler.handler; | ||
558 | return INSN_GOOD_NO_SLOT; | ||
559 | } | ||
560 | |||
561 | case DECODE_TYPE_EMULATE: { | ||
562 | struct decode_emulate *d = (struct decode_emulate *)h; | ||
563 | asi->insn_handler = d->handler.handler; | ||
564 | set_emulated_insn(insn, asi, thumb); | ||
565 | return INSN_GOOD; | ||
566 | } | ||
567 | |||
568 | case DECODE_TYPE_OR: | ||
569 | matched = true; | ||
570 | break; | ||
571 | |||
572 | case DECODE_TYPE_REJECT: | ||
573 | default: | ||
574 | return INSN_REJECTED; | ||
575 | } | ||
576 | } | ||
577 | } | ||
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c deleted file mode 100644 index 15eeff6aea0e..000000000000 --- a/arch/arm/kernel/kprobes-decode.c +++ /dev/null | |||
@@ -1,1670 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes-decode.c | ||
3 | * | ||
4 | * Copyright (C) 2006, 2007 Motorola Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
13 | * General Public License for more details. | ||
14 | */ | ||
15 | |||
16 | /* | ||
17 | * We do not have hardware single-stepping on ARM, This | ||
18 | * effort is further complicated by the ARM not having a | ||
19 | * "next PC" register. Instructions that change the PC | ||
20 | * can't be safely single-stepped in a MP environment, so | ||
21 | * we have a lot of work to do: | ||
22 | * | ||
23 | * In the prepare phase: | ||
24 | * *) If it is an instruction that does anything | ||
25 | * with the CPU mode, we reject it for a kprobe. | ||
26 | * (This is out of laziness rather than need. The | ||
27 | * instructions could be simulated.) | ||
28 | * | ||
29 | * *) Otherwise, decode the instruction rewriting its | ||
30 | * registers to take fixed, ordered registers and | ||
31 | * setting a handler for it to run the instruction. | ||
32 | * | ||
33 | * In the execution phase by an instruction's handler: | ||
34 | * | ||
35 | * *) If the PC is written to by the instruction, the | ||
36 | * instruction must be fully simulated in software. | ||
37 | * | ||
38 | * *) Otherwise, a modified form of the instruction is | ||
39 | * directly executed. Its handler calls the | ||
40 | * instruction in insn[0]. In insn[1] is a | ||
41 | * "mov pc, lr" to return. | ||
42 | * | ||
43 | * Before calling, load up the reordered registers | ||
44 | * from the original instruction's registers. If one | ||
45 | * of the original input registers is the PC, compute | ||
46 | * and adjust the appropriate input register. | ||
47 | * | ||
48 | * After call completes, copy the output registers to | ||
49 | * the original instruction's original registers. | ||
50 | * | ||
51 | * We don't use a real breakpoint instruction since that | ||
52 | * would have us in the kernel go from SVC mode to SVC | ||
53 | * mode losing the link register. Instead we use an | ||
54 | * undefined instruction. To simplify processing, the | ||
55 | * undefined instruction used for kprobes must be reserved | ||
56 | * exclusively for kprobes use. | ||
57 | * | ||
58 | * TODO: ifdef out some instruction decoding based on architecture. | ||
59 | */ | ||
60 | |||
61 | #include <linux/kernel.h> | ||
62 | #include <linux/kprobes.h> | ||
63 | |||
64 | #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) | ||
65 | |||
66 | #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) | ||
67 | |||
68 | #define is_r15(insn, bitpos) (((insn) & (0xf << bitpos)) == (0xf << bitpos)) | ||
69 | |||
70 | /* | ||
71 | * Test if load/store instructions writeback the address register. | ||
72 | * if P (bit 24) == 0 or W (bit 21) == 1 | ||
73 | */ | ||
74 | #define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) | ||
75 | |||
76 | #define PSR_fs (PSR_f|PSR_s) | ||
77 | |||
78 | #define KPROBE_RETURN_INSTRUCTION 0xe1a0f00e /* mov pc, lr */ | ||
79 | |||
80 | typedef long (insn_0arg_fn_t)(void); | ||
81 | typedef long (insn_1arg_fn_t)(long); | ||
82 | typedef long (insn_2arg_fn_t)(long, long); | ||
83 | typedef long (insn_3arg_fn_t)(long, long, long); | ||
84 | typedef long (insn_4arg_fn_t)(long, long, long, long); | ||
85 | typedef long long (insn_llret_0arg_fn_t)(void); | ||
86 | typedef long long (insn_llret_3arg_fn_t)(long, long, long); | ||
87 | typedef long long (insn_llret_4arg_fn_t)(long, long, long, long); | ||
88 | |||
89 | union reg_pair { | ||
90 | long long dr; | ||
91 | #ifdef __LITTLE_ENDIAN | ||
92 | struct { long r0, r1; }; | ||
93 | #else | ||
94 | struct { long r1, r0; }; | ||
95 | #endif | ||
96 | }; | ||
97 | |||
98 | /* | ||
99 | * For STR and STM instructions, an ARM core may choose to use either | ||
100 | * a +8 or a +12 displacement from the current instruction's address. | ||
101 | * Whichever value is chosen for a given core, it must be the same for | ||
102 | * both instructions and may not change. This function measures it. | ||
103 | */ | ||
104 | |||
105 | static int str_pc_offset; | ||
106 | |||
107 | static void __init find_str_pc_offset(void) | ||
108 | { | ||
109 | int addr, scratch, ret; | ||
110 | |||
111 | __asm__ ( | ||
112 | "sub %[ret], pc, #4 \n\t" | ||
113 | "str pc, %[addr] \n\t" | ||
114 | "ldr %[scr], %[addr] \n\t" | ||
115 | "sub %[ret], %[scr], %[ret] \n\t" | ||
116 | : [ret] "=r" (ret), [scr] "=r" (scratch), [addr] "+m" (addr)); | ||
117 | |||
118 | str_pc_offset = ret; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * The insnslot_?arg_r[w]flags() functions below are to keep the | ||
123 | * msr -> *fn -> mrs instruction sequences indivisible so that | ||
124 | * the state of the CPSR flags aren't inadvertently modified | ||
125 | * just before or just after the call. | ||
126 | */ | ||
127 | |||
128 | static inline long __kprobes | ||
129 | insnslot_0arg_rflags(long cpsr, insn_0arg_fn_t *fn) | ||
130 | { | ||
131 | register long ret asm("r0"); | ||
132 | |||
133 | __asm__ __volatile__ ( | ||
134 | "msr cpsr_fs, %[cpsr] \n\t" | ||
135 | "mov lr, pc \n\t" | ||
136 | "mov pc, %[fn] \n\t" | ||
137 | : "=r" (ret) | ||
138 | : [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
139 | : "lr", "cc" | ||
140 | ); | ||
141 | return ret; | ||
142 | } | ||
143 | |||
144 | static inline long long __kprobes | ||
145 | insnslot_llret_0arg_rflags(long cpsr, insn_llret_0arg_fn_t *fn) | ||
146 | { | ||
147 | register long ret0 asm("r0"); | ||
148 | register long ret1 asm("r1"); | ||
149 | union reg_pair fnr; | ||
150 | |||
151 | __asm__ __volatile__ ( | ||
152 | "msr cpsr_fs, %[cpsr] \n\t" | ||
153 | "mov lr, pc \n\t" | ||
154 | "mov pc, %[fn] \n\t" | ||
155 | : "=r" (ret0), "=r" (ret1) | ||
156 | : [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
157 | : "lr", "cc" | ||
158 | ); | ||
159 | fnr.r0 = ret0; | ||
160 | fnr.r1 = ret1; | ||
161 | return fnr.dr; | ||
162 | } | ||
163 | |||
164 | static inline long __kprobes | ||
165 | insnslot_1arg_rflags(long r0, long cpsr, insn_1arg_fn_t *fn) | ||
166 | { | ||
167 | register long rr0 asm("r0") = r0; | ||
168 | register long ret asm("r0"); | ||
169 | |||
170 | __asm__ __volatile__ ( | ||
171 | "msr cpsr_fs, %[cpsr] \n\t" | ||
172 | "mov lr, pc \n\t" | ||
173 | "mov pc, %[fn] \n\t" | ||
174 | : "=r" (ret) | ||
175 | : "0" (rr0), [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
176 | : "lr", "cc" | ||
177 | ); | ||
178 | return ret; | ||
179 | } | ||
180 | |||
181 | static inline long __kprobes | ||
182 | insnslot_2arg_rflags(long r0, long r1, long cpsr, insn_2arg_fn_t *fn) | ||
183 | { | ||
184 | register long rr0 asm("r0") = r0; | ||
185 | register long rr1 asm("r1") = r1; | ||
186 | register long ret asm("r0"); | ||
187 | |||
188 | __asm__ __volatile__ ( | ||
189 | "msr cpsr_fs, %[cpsr] \n\t" | ||
190 | "mov lr, pc \n\t" | ||
191 | "mov pc, %[fn] \n\t" | ||
192 | : "=r" (ret) | ||
193 | : "0" (rr0), "r" (rr1), | ||
194 | [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
195 | : "lr", "cc" | ||
196 | ); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static inline long __kprobes | ||
201 | insnslot_3arg_rflags(long r0, long r1, long r2, long cpsr, insn_3arg_fn_t *fn) | ||
202 | { | ||
203 | register long rr0 asm("r0") = r0; | ||
204 | register long rr1 asm("r1") = r1; | ||
205 | register long rr2 asm("r2") = r2; | ||
206 | register long ret asm("r0"); | ||
207 | |||
208 | __asm__ __volatile__ ( | ||
209 | "msr cpsr_fs, %[cpsr] \n\t" | ||
210 | "mov lr, pc \n\t" | ||
211 | "mov pc, %[fn] \n\t" | ||
212 | : "=r" (ret) | ||
213 | : "0" (rr0), "r" (rr1), "r" (rr2), | ||
214 | [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
215 | : "lr", "cc" | ||
216 | ); | ||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | static inline long long __kprobes | ||
221 | insnslot_llret_3arg_rflags(long r0, long r1, long r2, long cpsr, | ||
222 | insn_llret_3arg_fn_t *fn) | ||
223 | { | ||
224 | register long rr0 asm("r0") = r0; | ||
225 | register long rr1 asm("r1") = r1; | ||
226 | register long rr2 asm("r2") = r2; | ||
227 | register long ret0 asm("r0"); | ||
228 | register long ret1 asm("r1"); | ||
229 | union reg_pair fnr; | ||
230 | |||
231 | __asm__ __volatile__ ( | ||
232 | "msr cpsr_fs, %[cpsr] \n\t" | ||
233 | "mov lr, pc \n\t" | ||
234 | "mov pc, %[fn] \n\t" | ||
235 | : "=r" (ret0), "=r" (ret1) | ||
236 | : "0" (rr0), "r" (rr1), "r" (rr2), | ||
237 | [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
238 | : "lr", "cc" | ||
239 | ); | ||
240 | fnr.r0 = ret0; | ||
241 | fnr.r1 = ret1; | ||
242 | return fnr.dr; | ||
243 | } | ||
244 | |||
245 | static inline long __kprobes | ||
246 | insnslot_4arg_rflags(long r0, long r1, long r2, long r3, long cpsr, | ||
247 | insn_4arg_fn_t *fn) | ||
248 | { | ||
249 | register long rr0 asm("r0") = r0; | ||
250 | register long rr1 asm("r1") = r1; | ||
251 | register long rr2 asm("r2") = r2; | ||
252 | register long rr3 asm("r3") = r3; | ||
253 | register long ret asm("r0"); | ||
254 | |||
255 | __asm__ __volatile__ ( | ||
256 | "msr cpsr_fs, %[cpsr] \n\t" | ||
257 | "mov lr, pc \n\t" | ||
258 | "mov pc, %[fn] \n\t" | ||
259 | : "=r" (ret) | ||
260 | : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), | ||
261 | [cpsr] "r" (cpsr), [fn] "r" (fn) | ||
262 | : "lr", "cc" | ||
263 | ); | ||
264 | return ret; | ||
265 | } | ||
266 | |||
267 | static inline long __kprobes | ||
268 | insnslot_1arg_rwflags(long r0, long *cpsr, insn_1arg_fn_t *fn) | ||
269 | { | ||
270 | register long rr0 asm("r0") = r0; | ||
271 | register long ret asm("r0"); | ||
272 | long oldcpsr = *cpsr; | ||
273 | long newcpsr; | ||
274 | |||
275 | __asm__ __volatile__ ( | ||
276 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
277 | "mov lr, pc \n\t" | ||
278 | "mov pc, %[fn] \n\t" | ||
279 | "mrs %[newcpsr], cpsr \n\t" | ||
280 | : "=r" (ret), [newcpsr] "=r" (newcpsr) | ||
281 | : "0" (rr0), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) | ||
282 | : "lr", "cc" | ||
283 | ); | ||
284 | *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); | ||
285 | return ret; | ||
286 | } | ||
287 | |||
288 | static inline long __kprobes | ||
289 | insnslot_2arg_rwflags(long r0, long r1, long *cpsr, insn_2arg_fn_t *fn) | ||
290 | { | ||
291 | register long rr0 asm("r0") = r0; | ||
292 | register long rr1 asm("r1") = r1; | ||
293 | register long ret asm("r0"); | ||
294 | long oldcpsr = *cpsr; | ||
295 | long newcpsr; | ||
296 | |||
297 | __asm__ __volatile__ ( | ||
298 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
299 | "mov lr, pc \n\t" | ||
300 | "mov pc, %[fn] \n\t" | ||
301 | "mrs %[newcpsr], cpsr \n\t" | ||
302 | : "=r" (ret), [newcpsr] "=r" (newcpsr) | ||
303 | : "0" (rr0), "r" (rr1), [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) | ||
304 | : "lr", "cc" | ||
305 | ); | ||
306 | *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); | ||
307 | return ret; | ||
308 | } | ||
309 | |||
310 | static inline long __kprobes | ||
311 | insnslot_3arg_rwflags(long r0, long r1, long r2, long *cpsr, | ||
312 | insn_3arg_fn_t *fn) | ||
313 | { | ||
314 | register long rr0 asm("r0") = r0; | ||
315 | register long rr1 asm("r1") = r1; | ||
316 | register long rr2 asm("r2") = r2; | ||
317 | register long ret asm("r0"); | ||
318 | long oldcpsr = *cpsr; | ||
319 | long newcpsr; | ||
320 | |||
321 | __asm__ __volatile__ ( | ||
322 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
323 | "mov lr, pc \n\t" | ||
324 | "mov pc, %[fn] \n\t" | ||
325 | "mrs %[newcpsr], cpsr \n\t" | ||
326 | : "=r" (ret), [newcpsr] "=r" (newcpsr) | ||
327 | : "0" (rr0), "r" (rr1), "r" (rr2), | ||
328 | [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) | ||
329 | : "lr", "cc" | ||
330 | ); | ||
331 | *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); | ||
332 | return ret; | ||
333 | } | ||
334 | |||
335 | static inline long __kprobes | ||
336 | insnslot_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, | ||
337 | insn_4arg_fn_t *fn) | ||
338 | { | ||
339 | register long rr0 asm("r0") = r0; | ||
340 | register long rr1 asm("r1") = r1; | ||
341 | register long rr2 asm("r2") = r2; | ||
342 | register long rr3 asm("r3") = r3; | ||
343 | register long ret asm("r0"); | ||
344 | long oldcpsr = *cpsr; | ||
345 | long newcpsr; | ||
346 | |||
347 | __asm__ __volatile__ ( | ||
348 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
349 | "mov lr, pc \n\t" | ||
350 | "mov pc, %[fn] \n\t" | ||
351 | "mrs %[newcpsr], cpsr \n\t" | ||
352 | : "=r" (ret), [newcpsr] "=r" (newcpsr) | ||
353 | : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), | ||
354 | [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) | ||
355 | : "lr", "cc" | ||
356 | ); | ||
357 | *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); | ||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | static inline long long __kprobes | ||
362 | insnslot_llret_4arg_rwflags(long r0, long r1, long r2, long r3, long *cpsr, | ||
363 | insn_llret_4arg_fn_t *fn) | ||
364 | { | ||
365 | register long rr0 asm("r0") = r0; | ||
366 | register long rr1 asm("r1") = r1; | ||
367 | register long rr2 asm("r2") = r2; | ||
368 | register long rr3 asm("r3") = r3; | ||
369 | register long ret0 asm("r0"); | ||
370 | register long ret1 asm("r1"); | ||
371 | long oldcpsr = *cpsr; | ||
372 | long newcpsr; | ||
373 | union reg_pair fnr; | ||
374 | |||
375 | __asm__ __volatile__ ( | ||
376 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
377 | "mov lr, pc \n\t" | ||
378 | "mov pc, %[fn] \n\t" | ||
379 | "mrs %[newcpsr], cpsr \n\t" | ||
380 | : "=r" (ret0), "=r" (ret1), [newcpsr] "=r" (newcpsr) | ||
381 | : "0" (rr0), "r" (rr1), "r" (rr2), "r" (rr3), | ||
382 | [oldcpsr] "r" (oldcpsr), [fn] "r" (fn) | ||
383 | : "lr", "cc" | ||
384 | ); | ||
385 | *cpsr = (oldcpsr & ~PSR_fs) | (newcpsr & PSR_fs); | ||
386 | fnr.r0 = ret0; | ||
387 | fnr.r1 = ret1; | ||
388 | return fnr.dr; | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * To avoid the complications of mimicing single-stepping on a | ||
393 | * processor without a Next-PC or a single-step mode, and to | ||
394 | * avoid having to deal with the side-effects of boosting, we | ||
395 | * simulate or emulate (almost) all ARM instructions. | ||
396 | * | ||
397 | * "Simulation" is where the instruction's behavior is duplicated in | ||
398 | * C code. "Emulation" is where the original instruction is rewritten | ||
399 | * and executed, often by altering its registers. | ||
400 | * | ||
401 | * By having all behavior of the kprobe'd instruction completed before | ||
402 | * returning from the kprobe_handler(), all locks (scheduler and | ||
403 | * interrupt) can safely be released. There is no need for secondary | ||
404 | * breakpoints, no race with MP or preemptable kernels, nor having to | ||
405 | * clean up resources counts at a later time impacting overall system | ||
406 | * performance. By rewriting the instruction, only the minimum registers | ||
407 | * need to be loaded and saved back optimizing performance. | ||
408 | * | ||
409 | * Calling the insnslot_*_rwflags version of a function doesn't hurt | ||
410 | * anything even when the CPSR flags aren't updated by the | ||
411 | * instruction. It's just a little slower in return for saving | ||
412 | * a little space by not having a duplicate function that doesn't | ||
413 | * update the flags. (The same optimization can be said for | ||
414 | * instructions that do or don't perform register writeback) | ||
415 | * Also, instructions can either read the flags, only write the | ||
416 | * flags, or read and write the flags. To save combinations | ||
417 | * rather than for sheer performance, flag functions just assume | ||
418 | * read and write of flags. | ||
419 | */ | ||
420 | |||
421 | static void __kprobes simulate_bbl(struct kprobe *p, struct pt_regs *regs) | ||
422 | { | ||
423 | kprobe_opcode_t insn = p->opcode; | ||
424 | long iaddr = (long)p->addr; | ||
425 | int disp = branch_displacement(insn); | ||
426 | |||
427 | if (insn & (1 << 24)) | ||
428 | regs->ARM_lr = iaddr + 4; | ||
429 | |||
430 | regs->ARM_pc = iaddr + 8 + disp; | ||
431 | } | ||
432 | |||
433 | static void __kprobes simulate_blx1(struct kprobe *p, struct pt_regs *regs) | ||
434 | { | ||
435 | kprobe_opcode_t insn = p->opcode; | ||
436 | long iaddr = (long)p->addr; | ||
437 | int disp = branch_displacement(insn); | ||
438 | |||
439 | regs->ARM_lr = iaddr + 4; | ||
440 | regs->ARM_pc = iaddr + 8 + disp + ((insn >> 23) & 0x2); | ||
441 | regs->ARM_cpsr |= PSR_T_BIT; | ||
442 | } | ||
443 | |||
444 | static void __kprobes simulate_blx2bx(struct kprobe *p, struct pt_regs *regs) | ||
445 | { | ||
446 | kprobe_opcode_t insn = p->opcode; | ||
447 | int rm = insn & 0xf; | ||
448 | long rmv = regs->uregs[rm]; | ||
449 | |||
450 | if (insn & (1 << 5)) | ||
451 | regs->ARM_lr = (long)p->addr + 4; | ||
452 | |||
453 | regs->ARM_pc = rmv & ~0x1; | ||
454 | regs->ARM_cpsr &= ~PSR_T_BIT; | ||
455 | if (rmv & 0x1) | ||
456 | regs->ARM_cpsr |= PSR_T_BIT; | ||
457 | } | ||
458 | |||
459 | static void __kprobes simulate_mrs(struct kprobe *p, struct pt_regs *regs) | ||
460 | { | ||
461 | kprobe_opcode_t insn = p->opcode; | ||
462 | int rd = (insn >> 12) & 0xf; | ||
463 | unsigned long mask = 0xf8ff03df; /* Mask out execution state */ | ||
464 | regs->uregs[rd] = regs->ARM_cpsr & mask; | ||
465 | } | ||
466 | |||
467 | static void __kprobes simulate_ldm1stm1(struct kprobe *p, struct pt_regs *regs) | ||
468 | { | ||
469 | kprobe_opcode_t insn = p->opcode; | ||
470 | int rn = (insn >> 16) & 0xf; | ||
471 | int lbit = insn & (1 << 20); | ||
472 | int wbit = insn & (1 << 21); | ||
473 | int ubit = insn & (1 << 23); | ||
474 | int pbit = insn & (1 << 24); | ||
475 | long *addr = (long *)regs->uregs[rn]; | ||
476 | int reg_bit_vector; | ||
477 | int reg_count; | ||
478 | |||
479 | reg_count = 0; | ||
480 | reg_bit_vector = insn & 0xffff; | ||
481 | while (reg_bit_vector) { | ||
482 | reg_bit_vector &= (reg_bit_vector - 1); | ||
483 | ++reg_count; | ||
484 | } | ||
485 | |||
486 | if (!ubit) | ||
487 | addr -= reg_count; | ||
488 | addr += (!pbit == !ubit); | ||
489 | |||
490 | reg_bit_vector = insn & 0xffff; | ||
491 | while (reg_bit_vector) { | ||
492 | int reg = __ffs(reg_bit_vector); | ||
493 | reg_bit_vector &= (reg_bit_vector - 1); | ||
494 | if (lbit) | ||
495 | regs->uregs[reg] = *addr++; | ||
496 | else | ||
497 | *addr++ = regs->uregs[reg]; | ||
498 | } | ||
499 | |||
500 | if (wbit) { | ||
501 | if (!ubit) | ||
502 | addr -= reg_count; | ||
503 | addr -= (!pbit == !ubit); | ||
504 | regs->uregs[rn] = (long)addr; | ||
505 | } | ||
506 | } | ||
507 | |||
508 | static void __kprobes simulate_stm1_pc(struct kprobe *p, struct pt_regs *regs) | ||
509 | { | ||
510 | regs->ARM_pc = (long)p->addr + str_pc_offset; | ||
511 | simulate_ldm1stm1(p, regs); | ||
512 | regs->ARM_pc = (long)p->addr + 4; | ||
513 | } | ||
514 | |||
515 | static void __kprobes simulate_mov_ipsp(struct kprobe *p, struct pt_regs *regs) | ||
516 | { | ||
517 | regs->uregs[12] = regs->uregs[13]; | ||
518 | } | ||
519 | |||
520 | static void __kprobes emulate_ldrd(struct kprobe *p, struct pt_regs *regs) | ||
521 | { | ||
522 | insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; | ||
523 | kprobe_opcode_t insn = p->opcode; | ||
524 | long ppc = (long)p->addr + 8; | ||
525 | int rd = (insn >> 12) & 0xf; | ||
526 | int rn = (insn >> 16) & 0xf; | ||
527 | int rm = insn & 0xf; /* rm may be invalid, don't care. */ | ||
528 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
529 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
530 | |||
531 | /* Not following the C calling convention here, so need asm(). */ | ||
532 | __asm__ __volatile__ ( | ||
533 | "ldr r0, %[rn] \n\t" | ||
534 | "ldr r1, %[rm] \n\t" | ||
535 | "msr cpsr_fs, %[cpsr]\n\t" | ||
536 | "mov lr, pc \n\t" | ||
537 | "mov pc, %[i_fn] \n\t" | ||
538 | "str r0, %[rn] \n\t" /* in case of writeback */ | ||
539 | "str r2, %[rd0] \n\t" | ||
540 | "str r3, %[rd1] \n\t" | ||
541 | : [rn] "+m" (rnv), | ||
542 | [rd0] "=m" (regs->uregs[rd]), | ||
543 | [rd1] "=m" (regs->uregs[rd+1]) | ||
544 | : [rm] "m" (rmv), | ||
545 | [cpsr] "r" (regs->ARM_cpsr), | ||
546 | [i_fn] "r" (i_fn) | ||
547 | : "r0", "r1", "r2", "r3", "lr", "cc" | ||
548 | ); | ||
549 | if (is_writeback(insn)) | ||
550 | regs->uregs[rn] = rnv; | ||
551 | } | ||
552 | |||
553 | static void __kprobes emulate_strd(struct kprobe *p, struct pt_regs *regs) | ||
554 | { | ||
555 | insn_4arg_fn_t *i_fn = (insn_4arg_fn_t *)&p->ainsn.insn[0]; | ||
556 | kprobe_opcode_t insn = p->opcode; | ||
557 | long ppc = (long)p->addr + 8; | ||
558 | int rd = (insn >> 12) & 0xf; | ||
559 | int rn = (insn >> 16) & 0xf; | ||
560 | int rm = insn & 0xf; | ||
561 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
562 | /* rm/rmv may be invalid, don't care. */ | ||
563 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
564 | long rnv_wb; | ||
565 | |||
566 | rnv_wb = insnslot_4arg_rflags(rnv, rmv, regs->uregs[rd], | ||
567 | regs->uregs[rd+1], | ||
568 | regs->ARM_cpsr, i_fn); | ||
569 | if (is_writeback(insn)) | ||
570 | regs->uregs[rn] = rnv_wb; | ||
571 | } | ||
572 | |||
573 | static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) | ||
574 | { | ||
575 | insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; | ||
576 | kprobe_opcode_t insn = p->opcode; | ||
577 | long ppc = (long)p->addr + 8; | ||
578 | union reg_pair fnr; | ||
579 | int rd = (insn >> 12) & 0xf; | ||
580 | int rn = (insn >> 16) & 0xf; | ||
581 | int rm = insn & 0xf; | ||
582 | long rdv; | ||
583 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
584 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
585 | long cpsr = regs->ARM_cpsr; | ||
586 | |||
587 | fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); | ||
588 | if (rn != 15) | ||
589 | regs->uregs[rn] = fnr.r0; /* Save Rn in case of writeback. */ | ||
590 | rdv = fnr.r1; | ||
591 | |||
592 | if (rd == 15) { | ||
593 | #if __LINUX_ARM_ARCH__ >= 5 | ||
594 | cpsr &= ~PSR_T_BIT; | ||
595 | if (rdv & 0x1) | ||
596 | cpsr |= PSR_T_BIT; | ||
597 | regs->ARM_cpsr = cpsr; | ||
598 | rdv &= ~0x1; | ||
599 | #else | ||
600 | rdv &= ~0x2; | ||
601 | #endif | ||
602 | } | ||
603 | regs->uregs[rd] = rdv; | ||
604 | } | ||
605 | |||
606 | static void __kprobes emulate_str(struct kprobe *p, struct pt_regs *regs) | ||
607 | { | ||
608 | insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; | ||
609 | kprobe_opcode_t insn = p->opcode; | ||
610 | long iaddr = (long)p->addr; | ||
611 | int rd = (insn >> 12) & 0xf; | ||
612 | int rn = (insn >> 16) & 0xf; | ||
613 | int rm = insn & 0xf; | ||
614 | long rdv = (rd == 15) ? iaddr + str_pc_offset : regs->uregs[rd]; | ||
615 | long rnv = (rn == 15) ? iaddr + 8 : regs->uregs[rn]; | ||
616 | long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ | ||
617 | long rnv_wb; | ||
618 | |||
619 | rnv_wb = insnslot_3arg_rflags(rnv, rdv, rmv, regs->ARM_cpsr, i_fn); | ||
620 | if (rn != 15) | ||
621 | regs->uregs[rn] = rnv_wb; /* Save Rn in case of writeback. */ | ||
622 | } | ||
623 | |||
624 | static void __kprobes emulate_sat(struct kprobe *p, struct pt_regs *regs) | ||
625 | { | ||
626 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
627 | kprobe_opcode_t insn = p->opcode; | ||
628 | int rd = (insn >> 12) & 0xf; | ||
629 | int rm = insn & 0xf; | ||
630 | long rmv = regs->uregs[rm]; | ||
631 | |||
632 | /* Writes Q flag */ | ||
633 | regs->uregs[rd] = insnslot_1arg_rwflags(rmv, ®s->ARM_cpsr, i_fn); | ||
634 | } | ||
635 | |||
636 | static void __kprobes emulate_sel(struct kprobe *p, struct pt_regs *regs) | ||
637 | { | ||
638 | insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; | ||
639 | kprobe_opcode_t insn = p->opcode; | ||
640 | int rd = (insn >> 12) & 0xf; | ||
641 | int rn = (insn >> 16) & 0xf; | ||
642 | int rm = insn & 0xf; | ||
643 | long rnv = regs->uregs[rn]; | ||
644 | long rmv = regs->uregs[rm]; | ||
645 | |||
646 | /* Reads GE bits */ | ||
647 | regs->uregs[rd] = insnslot_2arg_rflags(rnv, rmv, regs->ARM_cpsr, i_fn); | ||
648 | } | ||
649 | |||
650 | static void __kprobes emulate_none(struct kprobe *p, struct pt_regs *regs) | ||
651 | { | ||
652 | insn_0arg_fn_t *i_fn = (insn_0arg_fn_t *)&p->ainsn.insn[0]; | ||
653 | |||
654 | insnslot_0arg_rflags(regs->ARM_cpsr, i_fn); | ||
655 | } | ||
656 | |||
657 | static void __kprobes emulate_nop(struct kprobe *p, struct pt_regs *regs) | ||
658 | { | ||
659 | } | ||
660 | |||
661 | static void __kprobes | ||
662 | emulate_rd12_modify(struct kprobe *p, struct pt_regs *regs) | ||
663 | { | ||
664 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
665 | kprobe_opcode_t insn = p->opcode; | ||
666 | int rd = (insn >> 12) & 0xf; | ||
667 | long rdv = regs->uregs[rd]; | ||
668 | |||
669 | regs->uregs[rd] = insnslot_1arg_rflags(rdv, regs->ARM_cpsr, i_fn); | ||
670 | } | ||
671 | |||
672 | static void __kprobes | ||
673 | emulate_rd12rn0_modify(struct kprobe *p, struct pt_regs *regs) | ||
674 | { | ||
675 | insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; | ||
676 | kprobe_opcode_t insn = p->opcode; | ||
677 | int rd = (insn >> 12) & 0xf; | ||
678 | int rn = insn & 0xf; | ||
679 | long rdv = regs->uregs[rd]; | ||
680 | long rnv = regs->uregs[rn]; | ||
681 | |||
682 | regs->uregs[rd] = insnslot_2arg_rflags(rdv, rnv, regs->ARM_cpsr, i_fn); | ||
683 | } | ||
684 | |||
685 | static void __kprobes emulate_rd12rm0(struct kprobe *p, struct pt_regs *regs) | ||
686 | { | ||
687 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
688 | kprobe_opcode_t insn = p->opcode; | ||
689 | int rd = (insn >> 12) & 0xf; | ||
690 | int rm = insn & 0xf; | ||
691 | long rmv = regs->uregs[rm]; | ||
692 | |||
693 | regs->uregs[rd] = insnslot_1arg_rflags(rmv, regs->ARM_cpsr, i_fn); | ||
694 | } | ||
695 | |||
696 | static void __kprobes | ||
697 | emulate_rd12rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
698 | { | ||
699 | insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; | ||
700 | kprobe_opcode_t insn = p->opcode; | ||
701 | int rd = (insn >> 12) & 0xf; | ||
702 | int rn = (insn >> 16) & 0xf; | ||
703 | int rm = insn & 0xf; | ||
704 | long rnv = regs->uregs[rn]; | ||
705 | long rmv = regs->uregs[rm]; | ||
706 | |||
707 | regs->uregs[rd] = | ||
708 | insnslot_2arg_rwflags(rnv, rmv, ®s->ARM_cpsr, i_fn); | ||
709 | } | ||
710 | |||
711 | static void __kprobes | ||
712 | emulate_rd16rn12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
713 | { | ||
714 | insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; | ||
715 | kprobe_opcode_t insn = p->opcode; | ||
716 | int rd = (insn >> 16) & 0xf; | ||
717 | int rn = (insn >> 12) & 0xf; | ||
718 | int rs = (insn >> 8) & 0xf; | ||
719 | int rm = insn & 0xf; | ||
720 | long rnv = regs->uregs[rn]; | ||
721 | long rsv = regs->uregs[rs]; | ||
722 | long rmv = regs->uregs[rm]; | ||
723 | |||
724 | regs->uregs[rd] = | ||
725 | insnslot_3arg_rwflags(rnv, rsv, rmv, ®s->ARM_cpsr, i_fn); | ||
726 | } | ||
727 | |||
728 | static void __kprobes | ||
729 | emulate_rd16rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
730 | { | ||
731 | insn_2arg_fn_t *i_fn = (insn_2arg_fn_t *)&p->ainsn.insn[0]; | ||
732 | kprobe_opcode_t insn = p->opcode; | ||
733 | int rd = (insn >> 16) & 0xf; | ||
734 | int rs = (insn >> 8) & 0xf; | ||
735 | int rm = insn & 0xf; | ||
736 | long rsv = regs->uregs[rs]; | ||
737 | long rmv = regs->uregs[rm]; | ||
738 | |||
739 | regs->uregs[rd] = | ||
740 | insnslot_2arg_rwflags(rsv, rmv, ®s->ARM_cpsr, i_fn); | ||
741 | } | ||
742 | |||
743 | static void __kprobes | ||
744 | emulate_rdhi16rdlo12rs8rm0_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
745 | { | ||
746 | insn_llret_4arg_fn_t *i_fn = (insn_llret_4arg_fn_t *)&p->ainsn.insn[0]; | ||
747 | kprobe_opcode_t insn = p->opcode; | ||
748 | union reg_pair fnr; | ||
749 | int rdhi = (insn >> 16) & 0xf; | ||
750 | int rdlo = (insn >> 12) & 0xf; | ||
751 | int rs = (insn >> 8) & 0xf; | ||
752 | int rm = insn & 0xf; | ||
753 | long rsv = regs->uregs[rs]; | ||
754 | long rmv = regs->uregs[rm]; | ||
755 | |||
756 | fnr.dr = insnslot_llret_4arg_rwflags(regs->uregs[rdhi], | ||
757 | regs->uregs[rdlo], rsv, rmv, | ||
758 | ®s->ARM_cpsr, i_fn); | ||
759 | regs->uregs[rdhi] = fnr.r0; | ||
760 | regs->uregs[rdlo] = fnr.r1; | ||
761 | } | ||
762 | |||
763 | static void __kprobes | ||
764 | emulate_alu_imm_rflags(struct kprobe *p, struct pt_regs *regs) | ||
765 | { | ||
766 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
767 | kprobe_opcode_t insn = p->opcode; | ||
768 | int rd = (insn >> 12) & 0xf; | ||
769 | int rn = (insn >> 16) & 0xf; | ||
770 | long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; | ||
771 | |||
772 | regs->uregs[rd] = insnslot_1arg_rflags(rnv, regs->ARM_cpsr, i_fn); | ||
773 | } | ||
774 | |||
775 | static void __kprobes | ||
776 | emulate_alu_imm_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
777 | { | ||
778 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
779 | kprobe_opcode_t insn = p->opcode; | ||
780 | int rd = (insn >> 12) & 0xf; | ||
781 | int rn = (insn >> 16) & 0xf; | ||
782 | long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; | ||
783 | |||
784 | regs->uregs[rd] = insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); | ||
785 | } | ||
786 | |||
787 | static void __kprobes | ||
788 | emulate_alu_tests_imm(struct kprobe *p, struct pt_regs *regs) | ||
789 | { | ||
790 | insn_1arg_fn_t *i_fn = (insn_1arg_fn_t *)&p->ainsn.insn[0]; | ||
791 | kprobe_opcode_t insn = p->opcode; | ||
792 | int rn = (insn >> 16) & 0xf; | ||
793 | long rnv = (rn == 15) ? (long)p->addr + 8 : regs->uregs[rn]; | ||
794 | |||
795 | insnslot_1arg_rwflags(rnv, ®s->ARM_cpsr, i_fn); | ||
796 | } | ||
797 | |||
798 | static void __kprobes | ||
799 | emulate_alu_rflags(struct kprobe *p, struct pt_regs *regs) | ||
800 | { | ||
801 | insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; | ||
802 | kprobe_opcode_t insn = p->opcode; | ||
803 | long ppc = (long)p->addr + 8; | ||
804 | int rd = (insn >> 12) & 0xf; | ||
805 | int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ | ||
806 | int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ | ||
807 | int rm = insn & 0xf; | ||
808 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
809 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
810 | long rsv = regs->uregs[rs]; | ||
811 | |||
812 | regs->uregs[rd] = | ||
813 | insnslot_3arg_rflags(rnv, rmv, rsv, regs->ARM_cpsr, i_fn); | ||
814 | } | ||
815 | |||
816 | static void __kprobes | ||
817 | emulate_alu_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
818 | { | ||
819 | insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; | ||
820 | kprobe_opcode_t insn = p->opcode; | ||
821 | long ppc = (long)p->addr + 8; | ||
822 | int rd = (insn >> 12) & 0xf; | ||
823 | int rn = (insn >> 16) & 0xf; /* rn/rnv/rs/rsv may be */ | ||
824 | int rs = (insn >> 8) & 0xf; /* invalid, don't care. */ | ||
825 | int rm = insn & 0xf; | ||
826 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
827 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
828 | long rsv = regs->uregs[rs]; | ||
829 | |||
830 | regs->uregs[rd] = | ||
831 | insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); | ||
832 | } | ||
833 | |||
834 | static void __kprobes | ||
835 | emulate_alu_tests(struct kprobe *p, struct pt_regs *regs) | ||
836 | { | ||
837 | insn_3arg_fn_t *i_fn = (insn_3arg_fn_t *)&p->ainsn.insn[0]; | ||
838 | kprobe_opcode_t insn = p->opcode; | ||
839 | long ppc = (long)p->addr + 8; | ||
840 | int rn = (insn >> 16) & 0xf; | ||
841 | int rs = (insn >> 8) & 0xf; /* rs/rsv may be invalid, don't care. */ | ||
842 | int rm = insn & 0xf; | ||
843 | long rnv = (rn == 15) ? ppc : regs->uregs[rn]; | ||
844 | long rmv = (rm == 15) ? ppc : regs->uregs[rm]; | ||
845 | long rsv = regs->uregs[rs]; | ||
846 | |||
847 | insnslot_3arg_rwflags(rnv, rmv, rsv, ®s->ARM_cpsr, i_fn); | ||
848 | } | ||
849 | |||
850 | static enum kprobe_insn __kprobes | ||
851 | prep_emulate_ldr_str(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
852 | { | ||
853 | int not_imm = (insn & (1 << 26)) ? (insn & (1 << 25)) | ||
854 | : (~insn & (1 << 22)); | ||
855 | |||
856 | if (is_writeback(insn) && is_r15(insn, 16)) | ||
857 | return INSN_REJECTED; /* Writeback to PC */ | ||
858 | |||
859 | insn &= 0xfff00fff; | ||
860 | insn |= 0x00001000; /* Rn = r0, Rd = r1 */ | ||
861 | if (not_imm) { | ||
862 | insn &= ~0xf; | ||
863 | insn |= 2; /* Rm = r2 */ | ||
864 | } | ||
865 | asi->insn[0] = insn; | ||
866 | asi->insn_handler = (insn & (1 << 20)) ? emulate_ldr : emulate_str; | ||
867 | return INSN_GOOD; | ||
868 | } | ||
869 | |||
870 | static enum kprobe_insn __kprobes | ||
871 | prep_emulate_rd12_modify(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
872 | { | ||
873 | if (is_r15(insn, 12)) | ||
874 | return INSN_REJECTED; /* Rd is PC */ | ||
875 | |||
876 | insn &= 0xffff0fff; /* Rd = r0 */ | ||
877 | asi->insn[0] = insn; | ||
878 | asi->insn_handler = emulate_rd12_modify; | ||
879 | return INSN_GOOD; | ||
880 | } | ||
881 | |||
882 | static enum kprobe_insn __kprobes | ||
883 | prep_emulate_rd12rn0_modify(kprobe_opcode_t insn, | ||
884 | struct arch_specific_insn *asi) | ||
885 | { | ||
886 | if (is_r15(insn, 12)) | ||
887 | return INSN_REJECTED; /* Rd is PC */ | ||
888 | |||
889 | insn &= 0xffff0ff0; /* Rd = r0 */ | ||
890 | insn |= 0x00000001; /* Rn = r1 */ | ||
891 | asi->insn[0] = insn; | ||
892 | asi->insn_handler = emulate_rd12rn0_modify; | ||
893 | return INSN_GOOD; | ||
894 | } | ||
895 | |||
896 | static enum kprobe_insn __kprobes | ||
897 | prep_emulate_rd12rm0(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
898 | { | ||
899 | if (is_r15(insn, 12)) | ||
900 | return INSN_REJECTED; /* Rd is PC */ | ||
901 | |||
902 | insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ | ||
903 | asi->insn[0] = insn; | ||
904 | asi->insn_handler = emulate_rd12rm0; | ||
905 | return INSN_GOOD; | ||
906 | } | ||
907 | |||
908 | static enum kprobe_insn __kprobes | ||
909 | prep_emulate_rd12rn16rm0_wflags(kprobe_opcode_t insn, | ||
910 | struct arch_specific_insn *asi) | ||
911 | { | ||
912 | if (is_r15(insn, 12)) | ||
913 | return INSN_REJECTED; /* Rd is PC */ | ||
914 | |||
915 | insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ | ||
916 | insn |= 0x00000001; /* Rm = r1 */ | ||
917 | asi->insn[0] = insn; | ||
918 | asi->insn_handler = emulate_rd12rn16rm0_rwflags; | ||
919 | return INSN_GOOD; | ||
920 | } | ||
921 | |||
922 | static enum kprobe_insn __kprobes | ||
923 | prep_emulate_rd16rs8rm0_wflags(kprobe_opcode_t insn, | ||
924 | struct arch_specific_insn *asi) | ||
925 | { | ||
926 | if (is_r15(insn, 16)) | ||
927 | return INSN_REJECTED; /* Rd is PC */ | ||
928 | |||
929 | insn &= 0xfff0f0f0; /* Rd = r0, Rs = r0 */ | ||
930 | insn |= 0x00000001; /* Rm = r1 */ | ||
931 | asi->insn[0] = insn; | ||
932 | asi->insn_handler = emulate_rd16rs8rm0_rwflags; | ||
933 | return INSN_GOOD; | ||
934 | } | ||
935 | |||
936 | static enum kprobe_insn __kprobes | ||
937 | prep_emulate_rd16rn12rs8rm0_wflags(kprobe_opcode_t insn, | ||
938 | struct arch_specific_insn *asi) | ||
939 | { | ||
940 | if (is_r15(insn, 16)) | ||
941 | return INSN_REJECTED; /* Rd is PC */ | ||
942 | |||
943 | insn &= 0xfff000f0; /* Rd = r0, Rn = r0 */ | ||
944 | insn |= 0x00000102; /* Rs = r1, Rm = r2 */ | ||
945 | asi->insn[0] = insn; | ||
946 | asi->insn_handler = emulate_rd16rn12rs8rm0_rwflags; | ||
947 | return INSN_GOOD; | ||
948 | } | ||
949 | |||
950 | static enum kprobe_insn __kprobes | ||
951 | prep_emulate_rdhi16rdlo12rs8rm0_wflags(kprobe_opcode_t insn, | ||
952 | struct arch_specific_insn *asi) | ||
953 | { | ||
954 | if (is_r15(insn, 16) || is_r15(insn, 12)) | ||
955 | return INSN_REJECTED; /* RdHi or RdLo is PC */ | ||
956 | |||
957 | insn &= 0xfff000f0; /* RdHi = r0, RdLo = r1 */ | ||
958 | insn |= 0x00001203; /* Rs = r2, Rm = r3 */ | ||
959 | asi->insn[0] = insn; | ||
960 | asi->insn_handler = emulate_rdhi16rdlo12rs8rm0_rwflags; | ||
961 | return INSN_GOOD; | ||
962 | } | ||
963 | |||
964 | /* | ||
965 | * For the instruction masking and comparisons in all the "space_*" | ||
966 | * functions below, Do _not_ rearrange the order of tests unless | ||
967 | * you're very, very sure of what you are doing. For the sake of | ||
968 | * efficiency, the masks for some tests sometimes assume other test | ||
969 | * have been done prior to them so the number of patterns to test | ||
970 | * for an instruction set can be as broad as possible to reduce the | ||
971 | * number of tests needed. | ||
972 | */ | ||
973 | |||
974 | static enum kprobe_insn __kprobes | ||
975 | space_1111(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
976 | { | ||
977 | /* memory hint : 1111 0100 x001 xxxx xxxx xxxx xxxx xxxx : */ | ||
978 | /* PLDI : 1111 0100 x101 xxxx xxxx xxxx xxxx xxxx : */ | ||
979 | /* PLDW : 1111 0101 x001 xxxx xxxx xxxx xxxx xxxx : */ | ||
980 | /* PLD : 1111 0101 x101 xxxx xxxx xxxx xxxx xxxx : */ | ||
981 | if ((insn & 0xfe300000) == 0xf4100000) { | ||
982 | asi->insn_handler = emulate_nop; | ||
983 | return INSN_GOOD_NO_SLOT; | ||
984 | } | ||
985 | |||
986 | /* BLX(1) : 1111 101x xxxx xxxx xxxx xxxx xxxx xxxx : */ | ||
987 | if ((insn & 0xfe000000) == 0xfa000000) { | ||
988 | asi->insn_handler = simulate_blx1; | ||
989 | return INSN_GOOD_NO_SLOT; | ||
990 | } | ||
991 | |||
992 | /* CPS : 1111 0001 0000 xxx0 xxxx xxxx xx0x xxxx */ | ||
993 | /* SETEND: 1111 0001 0000 0001 xxxx xxxx 0000 xxxx */ | ||
994 | |||
995 | /* SRS : 1111 100x x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
996 | /* RFE : 1111 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
997 | |||
998 | /* Coprocessor instructions... */ | ||
999 | /* MCRR2 : 1111 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ | ||
1000 | /* MRRC2 : 1111 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd != Rn) */ | ||
1001 | /* LDC2 : 1111 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ | ||
1002 | /* STC2 : 1111 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ | ||
1003 | /* CDP2 : 1111 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ | ||
1004 | /* MCR2 : 1111 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ | ||
1005 | /* MRC2 : 1111 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ | ||
1006 | |||
1007 | return INSN_REJECTED; | ||
1008 | } | ||
1009 | |||
1010 | static enum kprobe_insn __kprobes | ||
1011 | space_cccc_000x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1012 | { | ||
1013 | /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx xxx0 xxxx */ | ||
1014 | if ((insn & 0x0f900010) == 0x01000000) { | ||
1015 | |||
1016 | /* MRS cpsr : cccc 0001 0000 xxxx xxxx xxxx 0000 xxxx */ | ||
1017 | if ((insn & 0x0ff000f0) == 0x01000000) { | ||
1018 | if (is_r15(insn, 12)) | ||
1019 | return INSN_REJECTED; /* Rd is PC */ | ||
1020 | asi->insn_handler = simulate_mrs; | ||
1021 | return INSN_GOOD_NO_SLOT; | ||
1022 | } | ||
1023 | |||
1024 | /* SMLALxy : cccc 0001 0100 xxxx xxxx xxxx 1xx0 xxxx */ | ||
1025 | if ((insn & 0x0ff00090) == 0x01400080) | ||
1026 | return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, | ||
1027 | asi); | ||
1028 | |||
1029 | /* SMULWy : cccc 0001 0010 xxxx xxxx xxxx 1x10 xxxx */ | ||
1030 | /* SMULxy : cccc 0001 0110 xxxx xxxx xxxx 1xx0 xxxx */ | ||
1031 | if ((insn & 0x0ff000b0) == 0x012000a0 || | ||
1032 | (insn & 0x0ff00090) == 0x01600080) | ||
1033 | return prep_emulate_rd16rs8rm0_wflags(insn, asi); | ||
1034 | |||
1035 | /* SMLAxy : cccc 0001 0000 xxxx xxxx xxxx 1xx0 xxxx : Q */ | ||
1036 | /* SMLAWy : cccc 0001 0010 xxxx xxxx xxxx 1x00 xxxx : Q */ | ||
1037 | if ((insn & 0x0ff00090) == 0x01000080 || | ||
1038 | (insn & 0x0ff000b0) == 0x01200080) | ||
1039 | return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); | ||
1040 | |||
1041 | /* BXJ : cccc 0001 0010 xxxx xxxx xxxx 0010 xxxx */ | ||
1042 | /* MSR : cccc 0001 0x10 xxxx xxxx xxxx 0000 xxxx */ | ||
1043 | /* MRS spsr : cccc 0001 0100 xxxx xxxx xxxx 0000 xxxx */ | ||
1044 | |||
1045 | /* Other instruction encodings aren't yet defined */ | ||
1046 | return INSN_REJECTED; | ||
1047 | } | ||
1048 | |||
1049 | /* cccc 0001 0xx0 xxxx xxxx xxxx xxxx 0xx1 xxxx */ | ||
1050 | else if ((insn & 0x0f900090) == 0x01000010) { | ||
1051 | |||
1052 | /* BLX(2) : cccc 0001 0010 xxxx xxxx xxxx 0011 xxxx */ | ||
1053 | /* BX : cccc 0001 0010 xxxx xxxx xxxx 0001 xxxx */ | ||
1054 | if ((insn & 0x0ff000d0) == 0x01200010) { | ||
1055 | if ((insn & 0x0ff000ff) == 0x0120003f) | ||
1056 | return INSN_REJECTED; /* BLX pc */ | ||
1057 | asi->insn_handler = simulate_blx2bx; | ||
1058 | return INSN_GOOD_NO_SLOT; | ||
1059 | } | ||
1060 | |||
1061 | /* CLZ : cccc 0001 0110 xxxx xxxx xxxx 0001 xxxx */ | ||
1062 | if ((insn & 0x0ff000f0) == 0x01600010) | ||
1063 | return prep_emulate_rd12rm0(insn, asi); | ||
1064 | |||
1065 | /* QADD : cccc 0001 0000 xxxx xxxx xxxx 0101 xxxx :Q */ | ||
1066 | /* QSUB : cccc 0001 0010 xxxx xxxx xxxx 0101 xxxx :Q */ | ||
1067 | /* QDADD : cccc 0001 0100 xxxx xxxx xxxx 0101 xxxx :Q */ | ||
1068 | /* QDSUB : cccc 0001 0110 xxxx xxxx xxxx 0101 xxxx :Q */ | ||
1069 | if ((insn & 0x0f9000f0) == 0x01000050) | ||
1070 | return prep_emulate_rd12rn16rm0_wflags(insn, asi); | ||
1071 | |||
1072 | /* BKPT : 1110 0001 0010 xxxx xxxx xxxx 0111 xxxx */ | ||
1073 | /* SMC : cccc 0001 0110 xxxx xxxx xxxx 0111 xxxx */ | ||
1074 | |||
1075 | /* Other instruction encodings aren't yet defined */ | ||
1076 | return INSN_REJECTED; | ||
1077 | } | ||
1078 | |||
1079 | /* cccc 0000 xxxx xxxx xxxx xxxx xxxx 1001 xxxx */ | ||
1080 | else if ((insn & 0x0f0000f0) == 0x00000090) { | ||
1081 | |||
1082 | /* MUL : cccc 0000 0000 xxxx xxxx xxxx 1001 xxxx : */ | ||
1083 | /* MULS : cccc 0000 0001 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1084 | /* MLA : cccc 0000 0010 xxxx xxxx xxxx 1001 xxxx : */ | ||
1085 | /* MLAS : cccc 0000 0011 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1086 | /* UMAAL : cccc 0000 0100 xxxx xxxx xxxx 1001 xxxx : */ | ||
1087 | /* undef : cccc 0000 0101 xxxx xxxx xxxx 1001 xxxx : */ | ||
1088 | /* MLS : cccc 0000 0110 xxxx xxxx xxxx 1001 xxxx : */ | ||
1089 | /* undef : cccc 0000 0111 xxxx xxxx xxxx 1001 xxxx : */ | ||
1090 | /* UMULL : cccc 0000 1000 xxxx xxxx xxxx 1001 xxxx : */ | ||
1091 | /* UMULLS : cccc 0000 1001 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1092 | /* UMLAL : cccc 0000 1010 xxxx xxxx xxxx 1001 xxxx : */ | ||
1093 | /* UMLALS : cccc 0000 1011 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1094 | /* SMULL : cccc 0000 1100 xxxx xxxx xxxx 1001 xxxx : */ | ||
1095 | /* SMULLS : cccc 0000 1101 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1096 | /* SMLAL : cccc 0000 1110 xxxx xxxx xxxx 1001 xxxx : */ | ||
1097 | /* SMLALS : cccc 0000 1111 xxxx xxxx xxxx 1001 xxxx :cc */ | ||
1098 | if ((insn & 0x00d00000) == 0x00500000) | ||
1099 | return INSN_REJECTED; | ||
1100 | else if ((insn & 0x00e00000) == 0x00000000) | ||
1101 | return prep_emulate_rd16rs8rm0_wflags(insn, asi); | ||
1102 | else if ((insn & 0x00a00000) == 0x00200000) | ||
1103 | return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); | ||
1104 | else | ||
1105 | return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, | ||
1106 | asi); | ||
1107 | } | ||
1108 | |||
1109 | /* cccc 000x xxxx xxxx xxxx xxxx xxxx 1xx1 xxxx */ | ||
1110 | else if ((insn & 0x0e000090) == 0x00000090) { | ||
1111 | |||
1112 | /* SWP : cccc 0001 0000 xxxx xxxx xxxx 1001 xxxx */ | ||
1113 | /* SWPB : cccc 0001 0100 xxxx xxxx xxxx 1001 xxxx */ | ||
1114 | /* ??? : cccc 0001 0x01 xxxx xxxx xxxx 1001 xxxx */ | ||
1115 | /* ??? : cccc 0001 0x10 xxxx xxxx xxxx 1001 xxxx */ | ||
1116 | /* ??? : cccc 0001 0x11 xxxx xxxx xxxx 1001 xxxx */ | ||
1117 | /* STREX : cccc 0001 1000 xxxx xxxx xxxx 1001 xxxx */ | ||
1118 | /* LDREX : cccc 0001 1001 xxxx xxxx xxxx 1001 xxxx */ | ||
1119 | /* STREXD: cccc 0001 1010 xxxx xxxx xxxx 1001 xxxx */ | ||
1120 | /* LDREXD: cccc 0001 1011 xxxx xxxx xxxx 1001 xxxx */ | ||
1121 | /* STREXB: cccc 0001 1100 xxxx xxxx xxxx 1001 xxxx */ | ||
1122 | /* LDREXB: cccc 0001 1101 xxxx xxxx xxxx 1001 xxxx */ | ||
1123 | /* STREXH: cccc 0001 1110 xxxx xxxx xxxx 1001 xxxx */ | ||
1124 | /* LDREXH: cccc 0001 1111 xxxx xxxx xxxx 1001 xxxx */ | ||
1125 | |||
1126 | /* LDRD : cccc 000x xxx0 xxxx xxxx xxxx 1101 xxxx */ | ||
1127 | /* STRD : cccc 000x xxx0 xxxx xxxx xxxx 1111 xxxx */ | ||
1128 | /* LDRH : cccc 000x xxx1 xxxx xxxx xxxx 1011 xxxx */ | ||
1129 | /* STRH : cccc 000x xxx0 xxxx xxxx xxxx 1011 xxxx */ | ||
1130 | /* LDRSB : cccc 000x xxx1 xxxx xxxx xxxx 1101 xxxx */ | ||
1131 | /* LDRSH : cccc 000x xxx1 xxxx xxxx xxxx 1111 xxxx */ | ||
1132 | if ((insn & 0x0f0000f0) == 0x01000090) { | ||
1133 | if ((insn & 0x0fb000f0) == 0x01000090) { | ||
1134 | /* SWP/SWPB */ | ||
1135 | return prep_emulate_rd12rn16rm0_wflags(insn, | ||
1136 | asi); | ||
1137 | } else { | ||
1138 | /* STREX/LDREX variants and unallocaed space */ | ||
1139 | return INSN_REJECTED; | ||
1140 | } | ||
1141 | |||
1142 | } else if ((insn & 0x0e1000d0) == 0x00000d0) { | ||
1143 | /* STRD/LDRD */ | ||
1144 | if ((insn & 0x0000e000) == 0x0000e000) | ||
1145 | return INSN_REJECTED; /* Rd is LR or PC */ | ||
1146 | if (is_writeback(insn) && is_r15(insn, 16)) | ||
1147 | return INSN_REJECTED; /* Writeback to PC */ | ||
1148 | |||
1149 | insn &= 0xfff00fff; | ||
1150 | insn |= 0x00002000; /* Rn = r0, Rd = r2 */ | ||
1151 | if (!(insn & (1 << 22))) { | ||
1152 | /* Register index */ | ||
1153 | insn &= ~0xf; | ||
1154 | insn |= 1; /* Rm = r1 */ | ||
1155 | } | ||
1156 | asi->insn[0] = insn; | ||
1157 | asi->insn_handler = | ||
1158 | (insn & (1 << 5)) ? emulate_strd : emulate_ldrd; | ||
1159 | return INSN_GOOD; | ||
1160 | } | ||
1161 | |||
1162 | /* LDRH/STRH/LDRSB/LDRSH */ | ||
1163 | if (is_r15(insn, 12)) | ||
1164 | return INSN_REJECTED; /* Rd is PC */ | ||
1165 | return prep_emulate_ldr_str(insn, asi); | ||
1166 | } | ||
1167 | |||
1168 | /* cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
1169 | |||
1170 | /* | ||
1171 | * ALU op with S bit and Rd == 15 : | ||
1172 | * cccc 000x xxx1 xxxx 1111 xxxx xxxx xxxx | ||
1173 | */ | ||
1174 | if ((insn & 0x0e10f000) == 0x0010f000) | ||
1175 | return INSN_REJECTED; | ||
1176 | |||
1177 | /* | ||
1178 | * "mov ip, sp" is the most common kprobe'd instruction by far. | ||
1179 | * Check and optimize for it explicitly. | ||
1180 | */ | ||
1181 | if (insn == 0xe1a0c00d) { | ||
1182 | asi->insn_handler = simulate_mov_ipsp; | ||
1183 | return INSN_GOOD_NO_SLOT; | ||
1184 | } | ||
1185 | |||
1186 | /* | ||
1187 | * Data processing: Immediate-shift / Register-shift | ||
1188 | * ALU op : cccc 000x xxxx xxxx xxxx xxxx xxxx xxxx | ||
1189 | * CPY : cccc 0001 1010 xxxx xxxx 0000 0000 xxxx | ||
1190 | * MOV : cccc 0001 101x xxxx xxxx xxxx xxxx xxxx | ||
1191 | * *S (bit 20) updates condition codes | ||
1192 | * ADC/SBC/RSC reads the C flag | ||
1193 | */ | ||
1194 | insn &= 0xfff00ff0; /* Rn = r0, Rd = r0 */ | ||
1195 | insn |= 0x00000001; /* Rm = r1 */ | ||
1196 | if (insn & 0x010) { | ||
1197 | insn &= 0xfffff0ff; /* register shift */ | ||
1198 | insn |= 0x00000200; /* Rs = r2 */ | ||
1199 | } | ||
1200 | asi->insn[0] = insn; | ||
1201 | |||
1202 | if ((insn & 0x0f900000) == 0x01100000) { | ||
1203 | /* | ||
1204 | * TST : cccc 0001 0001 xxxx xxxx xxxx xxxx xxxx | ||
1205 | * TEQ : cccc 0001 0011 xxxx xxxx xxxx xxxx xxxx | ||
1206 | * CMP : cccc 0001 0101 xxxx xxxx xxxx xxxx xxxx | ||
1207 | * CMN : cccc 0001 0111 xxxx xxxx xxxx xxxx xxxx | ||
1208 | */ | ||
1209 | asi->insn_handler = emulate_alu_tests; | ||
1210 | } else { | ||
1211 | /* ALU ops which write to Rd */ | ||
1212 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ | ||
1213 | emulate_alu_rwflags : emulate_alu_rflags; | ||
1214 | } | ||
1215 | return INSN_GOOD; | ||
1216 | } | ||
1217 | |||
1218 | static enum kprobe_insn __kprobes | ||
1219 | space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1220 | { | ||
1221 | /* MOVW : cccc 0011 0000 xxxx xxxx xxxx xxxx xxxx */ | ||
1222 | /* MOVT : cccc 0011 0100 xxxx xxxx xxxx xxxx xxxx */ | ||
1223 | if ((insn & 0x0fb00000) == 0x03000000) | ||
1224 | return prep_emulate_rd12_modify(insn, asi); | ||
1225 | |||
1226 | /* hints : cccc 0011 0010 0000 xxxx xxxx xxxx xxxx */ | ||
1227 | if ((insn & 0x0fff0000) == 0x03200000) { | ||
1228 | unsigned op2 = insn & 0x000000ff; | ||
1229 | if (op2 == 0x01 || op2 == 0x04) { | ||
1230 | /* YIELD : cccc 0011 0010 0000 xxxx xxxx 0000 0001 */ | ||
1231 | /* SEV : cccc 0011 0010 0000 xxxx xxxx 0000 0100 */ | ||
1232 | asi->insn[0] = insn; | ||
1233 | asi->insn_handler = emulate_none; | ||
1234 | return INSN_GOOD; | ||
1235 | } else if (op2 <= 0x03) { | ||
1236 | /* NOP : cccc 0011 0010 0000 xxxx xxxx 0000 0000 */ | ||
1237 | /* WFE : cccc 0011 0010 0000 xxxx xxxx 0000 0010 */ | ||
1238 | /* WFI : cccc 0011 0010 0000 xxxx xxxx 0000 0011 */ | ||
1239 | /* | ||
1240 | * We make WFE and WFI true NOPs to avoid stalls due | ||
1241 | * to missing events whilst processing the probe. | ||
1242 | */ | ||
1243 | asi->insn_handler = emulate_nop; | ||
1244 | return INSN_GOOD_NO_SLOT; | ||
1245 | } | ||
1246 | /* For DBG and unallocated hints it's safest to reject them */ | ||
1247 | return INSN_REJECTED; | ||
1248 | } | ||
1249 | |||
1250 | /* | ||
1251 | * MSR : cccc 0011 0x10 xxxx xxxx xxxx xxxx xxxx | ||
1252 | * ALU op with S bit and Rd == 15 : | ||
1253 | * cccc 001x xxx1 xxxx 1111 xxxx xxxx xxxx | ||
1254 | */ | ||
1255 | if ((insn & 0x0fb00000) == 0x03200000 || /* MSR */ | ||
1256 | (insn & 0x0e10f000) == 0x0210f000) /* ALU s-bit, R15 */ | ||
1257 | return INSN_REJECTED; | ||
1258 | |||
1259 | /* | ||
1260 | * Data processing: 32-bit Immediate | ||
1261 | * ALU op : cccc 001x xxxx xxxx xxxx xxxx xxxx xxxx | ||
1262 | * MOV : cccc 0011 101x xxxx xxxx xxxx xxxx xxxx | ||
1263 | * *S (bit 20) updates condition codes | ||
1264 | * ADC/SBC/RSC reads the C flag | ||
1265 | */ | ||
1266 | insn &= 0xfff00fff; /* Rn = r0 and Rd = r0 */ | ||
1267 | asi->insn[0] = insn; | ||
1268 | |||
1269 | if ((insn & 0x0f900000) == 0x03100000) { | ||
1270 | /* | ||
1271 | * TST : cccc 0011 0001 xxxx xxxx xxxx xxxx xxxx | ||
1272 | * TEQ : cccc 0011 0011 xxxx xxxx xxxx xxxx xxxx | ||
1273 | * CMP : cccc 0011 0101 xxxx xxxx xxxx xxxx xxxx | ||
1274 | * CMN : cccc 0011 0111 xxxx xxxx xxxx xxxx xxxx | ||
1275 | */ | ||
1276 | asi->insn_handler = emulate_alu_tests_imm; | ||
1277 | } else { | ||
1278 | /* ALU ops which write to Rd */ | ||
1279 | asi->insn_handler = (insn & (1 << 20)) ? /* S-bit */ | ||
1280 | emulate_alu_imm_rwflags : emulate_alu_imm_rflags; | ||
1281 | } | ||
1282 | return INSN_GOOD; | ||
1283 | } | ||
1284 | |||
1285 | static enum kprobe_insn __kprobes | ||
1286 | space_cccc_0110__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1287 | { | ||
1288 | /* SEL : cccc 0110 1000 xxxx xxxx xxxx 1011 xxxx GE: !!! */ | ||
1289 | if ((insn & 0x0ff000f0) == 0x068000b0) { | ||
1290 | if (is_r15(insn, 12)) | ||
1291 | return INSN_REJECTED; /* Rd is PC */ | ||
1292 | insn &= 0xfff00ff0; /* Rd = r0, Rn = r0 */ | ||
1293 | insn |= 0x00000001; /* Rm = r1 */ | ||
1294 | asi->insn[0] = insn; | ||
1295 | asi->insn_handler = emulate_sel; | ||
1296 | return INSN_GOOD; | ||
1297 | } | ||
1298 | |||
1299 | /* SSAT : cccc 0110 101x xxxx xxxx xxxx xx01 xxxx :Q */ | ||
1300 | /* USAT : cccc 0110 111x xxxx xxxx xxxx xx01 xxxx :Q */ | ||
1301 | /* SSAT16 : cccc 0110 1010 xxxx xxxx xxxx 0011 xxxx :Q */ | ||
1302 | /* USAT16 : cccc 0110 1110 xxxx xxxx xxxx 0011 xxxx :Q */ | ||
1303 | if ((insn & 0x0fa00030) == 0x06a00010 || | ||
1304 | (insn & 0x0fb000f0) == 0x06a00030) { | ||
1305 | if (is_r15(insn, 12)) | ||
1306 | return INSN_REJECTED; /* Rd is PC */ | ||
1307 | insn &= 0xffff0ff0; /* Rd = r0, Rm = r0 */ | ||
1308 | asi->insn[0] = insn; | ||
1309 | asi->insn_handler = emulate_sat; | ||
1310 | return INSN_GOOD; | ||
1311 | } | ||
1312 | |||
1313 | /* REV : cccc 0110 1011 xxxx xxxx xxxx 0011 xxxx */ | ||
1314 | /* REV16 : cccc 0110 1011 xxxx xxxx xxxx 1011 xxxx */ | ||
1315 | /* RBIT : cccc 0110 1111 xxxx xxxx xxxx 0011 xxxx */ | ||
1316 | /* REVSH : cccc 0110 1111 xxxx xxxx xxxx 1011 xxxx */ | ||
1317 | if ((insn & 0x0ff00070) == 0x06b00030 || | ||
1318 | (insn & 0x0ff00070) == 0x06f00030) | ||
1319 | return prep_emulate_rd12rm0(insn, asi); | ||
1320 | |||
1321 | /* ??? : cccc 0110 0000 xxxx xxxx xxxx xxx1 xxxx : */ | ||
1322 | /* SADD16 : cccc 0110 0001 xxxx xxxx xxxx 0001 xxxx :GE */ | ||
1323 | /* SADDSUBX : cccc 0110 0001 xxxx xxxx xxxx 0011 xxxx :GE */ | ||
1324 | /* SSUBADDX : cccc 0110 0001 xxxx xxxx xxxx 0101 xxxx :GE */ | ||
1325 | /* SSUB16 : cccc 0110 0001 xxxx xxxx xxxx 0111 xxxx :GE */ | ||
1326 | /* SADD8 : cccc 0110 0001 xxxx xxxx xxxx 1001 xxxx :GE */ | ||
1327 | /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1011 xxxx : */ | ||
1328 | /* ??? : cccc 0110 0001 xxxx xxxx xxxx 1101 xxxx : */ | ||
1329 | /* SSUB8 : cccc 0110 0001 xxxx xxxx xxxx 1111 xxxx :GE */ | ||
1330 | /* QADD16 : cccc 0110 0010 xxxx xxxx xxxx 0001 xxxx : */ | ||
1331 | /* QADDSUBX : cccc 0110 0010 xxxx xxxx xxxx 0011 xxxx : */ | ||
1332 | /* QSUBADDX : cccc 0110 0010 xxxx xxxx xxxx 0101 xxxx : */ | ||
1333 | /* QSUB16 : cccc 0110 0010 xxxx xxxx xxxx 0111 xxxx : */ | ||
1334 | /* QADD8 : cccc 0110 0010 xxxx xxxx xxxx 1001 xxxx : */ | ||
1335 | /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1011 xxxx : */ | ||
1336 | /* ??? : cccc 0110 0010 xxxx xxxx xxxx 1101 xxxx : */ | ||
1337 | /* QSUB8 : cccc 0110 0010 xxxx xxxx xxxx 1111 xxxx : */ | ||
1338 | /* SHADD16 : cccc 0110 0011 xxxx xxxx xxxx 0001 xxxx : */ | ||
1339 | /* SHADDSUBX : cccc 0110 0011 xxxx xxxx xxxx 0011 xxxx : */ | ||
1340 | /* SHSUBADDX : cccc 0110 0011 xxxx xxxx xxxx 0101 xxxx : */ | ||
1341 | /* SHSUB16 : cccc 0110 0011 xxxx xxxx xxxx 0111 xxxx : */ | ||
1342 | /* SHADD8 : cccc 0110 0011 xxxx xxxx xxxx 1001 xxxx : */ | ||
1343 | /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1011 xxxx : */ | ||
1344 | /* ??? : cccc 0110 0011 xxxx xxxx xxxx 1101 xxxx : */ | ||
1345 | /* SHSUB8 : cccc 0110 0011 xxxx xxxx xxxx 1111 xxxx : */ | ||
1346 | /* ??? : cccc 0110 0100 xxxx xxxx xxxx xxx1 xxxx : */ | ||
1347 | /* UADD16 : cccc 0110 0101 xxxx xxxx xxxx 0001 xxxx :GE */ | ||
1348 | /* UADDSUBX : cccc 0110 0101 xxxx xxxx xxxx 0011 xxxx :GE */ | ||
1349 | /* USUBADDX : cccc 0110 0101 xxxx xxxx xxxx 0101 xxxx :GE */ | ||
1350 | /* USUB16 : cccc 0110 0101 xxxx xxxx xxxx 0111 xxxx :GE */ | ||
1351 | /* UADD8 : cccc 0110 0101 xxxx xxxx xxxx 1001 xxxx :GE */ | ||
1352 | /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1011 xxxx : */ | ||
1353 | /* ??? : cccc 0110 0101 xxxx xxxx xxxx 1101 xxxx : */ | ||
1354 | /* USUB8 : cccc 0110 0101 xxxx xxxx xxxx 1111 xxxx :GE */ | ||
1355 | /* UQADD16 : cccc 0110 0110 xxxx xxxx xxxx 0001 xxxx : */ | ||
1356 | /* UQADDSUBX : cccc 0110 0110 xxxx xxxx xxxx 0011 xxxx : */ | ||
1357 | /* UQSUBADDX : cccc 0110 0110 xxxx xxxx xxxx 0101 xxxx : */ | ||
1358 | /* UQSUB16 : cccc 0110 0110 xxxx xxxx xxxx 0111 xxxx : */ | ||
1359 | /* UQADD8 : cccc 0110 0110 xxxx xxxx xxxx 1001 xxxx : */ | ||
1360 | /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1011 xxxx : */ | ||
1361 | /* ??? : cccc 0110 0110 xxxx xxxx xxxx 1101 xxxx : */ | ||
1362 | /* UQSUB8 : cccc 0110 0110 xxxx xxxx xxxx 1111 xxxx : */ | ||
1363 | /* UHADD16 : cccc 0110 0111 xxxx xxxx xxxx 0001 xxxx : */ | ||
1364 | /* UHADDSUBX : cccc 0110 0111 xxxx xxxx xxxx 0011 xxxx : */ | ||
1365 | /* UHSUBADDX : cccc 0110 0111 xxxx xxxx xxxx 0101 xxxx : */ | ||
1366 | /* UHSUB16 : cccc 0110 0111 xxxx xxxx xxxx 0111 xxxx : */ | ||
1367 | /* UHADD8 : cccc 0110 0111 xxxx xxxx xxxx 1001 xxxx : */ | ||
1368 | /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1011 xxxx : */ | ||
1369 | /* ??? : cccc 0110 0111 xxxx xxxx xxxx 1101 xxxx : */ | ||
1370 | /* UHSUB8 : cccc 0110 0111 xxxx xxxx xxxx 1111 xxxx : */ | ||
1371 | if ((insn & 0x0f800010) == 0x06000010) { | ||
1372 | if ((insn & 0x00300000) == 0x00000000 || | ||
1373 | (insn & 0x000000e0) == 0x000000a0 || | ||
1374 | (insn & 0x000000e0) == 0x000000c0) | ||
1375 | return INSN_REJECTED; /* Unallocated space */ | ||
1376 | return prep_emulate_rd12rn16rm0_wflags(insn, asi); | ||
1377 | } | ||
1378 | |||
1379 | /* PKHBT : cccc 0110 1000 xxxx xxxx xxxx x001 xxxx : */ | ||
1380 | /* PKHTB : cccc 0110 1000 xxxx xxxx xxxx x101 xxxx : */ | ||
1381 | if ((insn & 0x0ff00030) == 0x06800010) | ||
1382 | return prep_emulate_rd12rn16rm0_wflags(insn, asi); | ||
1383 | |||
1384 | /* SXTAB16 : cccc 0110 1000 xxxx xxxx xxxx 0111 xxxx : */ | ||
1385 | /* SXTB16 : cccc 0110 1000 1111 xxxx xxxx 0111 xxxx : */ | ||
1386 | /* ??? : cccc 0110 1001 xxxx xxxx xxxx 0111 xxxx : */ | ||
1387 | /* SXTAB : cccc 0110 1010 xxxx xxxx xxxx 0111 xxxx : */ | ||
1388 | /* SXTB : cccc 0110 1010 1111 xxxx xxxx 0111 xxxx : */ | ||
1389 | /* SXTAH : cccc 0110 1011 xxxx xxxx xxxx 0111 xxxx : */ | ||
1390 | /* SXTH : cccc 0110 1011 1111 xxxx xxxx 0111 xxxx : */ | ||
1391 | /* UXTAB16 : cccc 0110 1100 xxxx xxxx xxxx 0111 xxxx : */ | ||
1392 | /* UXTB16 : cccc 0110 1100 1111 xxxx xxxx 0111 xxxx : */ | ||
1393 | /* ??? : cccc 0110 1101 xxxx xxxx xxxx 0111 xxxx : */ | ||
1394 | /* UXTAB : cccc 0110 1110 xxxx xxxx xxxx 0111 xxxx : */ | ||
1395 | /* UXTB : cccc 0110 1110 1111 xxxx xxxx 0111 xxxx : */ | ||
1396 | /* UXTAH : cccc 0110 1111 xxxx xxxx xxxx 0111 xxxx : */ | ||
1397 | /* UXTH : cccc 0110 1111 1111 xxxx xxxx 0111 xxxx : */ | ||
1398 | if ((insn & 0x0f8000f0) == 0x06800070) { | ||
1399 | if ((insn & 0x00300000) == 0x00100000) | ||
1400 | return INSN_REJECTED; /* Unallocated space */ | ||
1401 | |||
1402 | if ((insn & 0x000f0000) == 0x000f0000) | ||
1403 | return prep_emulate_rd12rm0(insn, asi); | ||
1404 | else | ||
1405 | return prep_emulate_rd12rn16rm0_wflags(insn, asi); | ||
1406 | } | ||
1407 | |||
1408 | /* Other instruction encodings aren't yet defined */ | ||
1409 | return INSN_REJECTED; | ||
1410 | } | ||
1411 | |||
1412 | static enum kprobe_insn __kprobes | ||
1413 | space_cccc_0111__1(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1414 | { | ||
1415 | /* Undef : cccc 0111 1111 xxxx xxxx xxxx 1111 xxxx */ | ||
1416 | if ((insn & 0x0ff000f0) == 0x03f000f0) | ||
1417 | return INSN_REJECTED; | ||
1418 | |||
1419 | /* SMLALD : cccc 0111 0100 xxxx xxxx xxxx 00x1 xxxx */ | ||
1420 | /* SMLSLD : cccc 0111 0100 xxxx xxxx xxxx 01x1 xxxx */ | ||
1421 | if ((insn & 0x0ff00090) == 0x07400010) | ||
1422 | return prep_emulate_rdhi16rdlo12rs8rm0_wflags(insn, asi); | ||
1423 | |||
1424 | /* SMLAD : cccc 0111 0000 xxxx xxxx xxxx 00x1 xxxx :Q */ | ||
1425 | /* SMUAD : cccc 0111 0000 xxxx 1111 xxxx 00x1 xxxx :Q */ | ||
1426 | /* SMLSD : cccc 0111 0000 xxxx xxxx xxxx 01x1 xxxx :Q */ | ||
1427 | /* SMUSD : cccc 0111 0000 xxxx 1111 xxxx 01x1 xxxx : */ | ||
1428 | /* SMMLA : cccc 0111 0101 xxxx xxxx xxxx 00x1 xxxx : */ | ||
1429 | /* SMMUL : cccc 0111 0101 xxxx 1111 xxxx 00x1 xxxx : */ | ||
1430 | /* USADA8 : cccc 0111 1000 xxxx xxxx xxxx 0001 xxxx : */ | ||
1431 | /* USAD8 : cccc 0111 1000 xxxx 1111 xxxx 0001 xxxx : */ | ||
1432 | if ((insn & 0x0ff00090) == 0x07000010 || | ||
1433 | (insn & 0x0ff000d0) == 0x07500010 || | ||
1434 | (insn & 0x0ff000f0) == 0x07800010) { | ||
1435 | |||
1436 | if ((insn & 0x0000f000) == 0x0000f000) | ||
1437 | return prep_emulate_rd16rs8rm0_wflags(insn, asi); | ||
1438 | else | ||
1439 | return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); | ||
1440 | } | ||
1441 | |||
1442 | /* SMMLS : cccc 0111 0101 xxxx xxxx xxxx 11x1 xxxx : */ | ||
1443 | if ((insn & 0x0ff000d0) == 0x075000d0) | ||
1444 | return prep_emulate_rd16rn12rs8rm0_wflags(insn, asi); | ||
1445 | |||
1446 | /* SBFX : cccc 0111 101x xxxx xxxx xxxx x101 xxxx : */ | ||
1447 | /* UBFX : cccc 0111 111x xxxx xxxx xxxx x101 xxxx : */ | ||
1448 | if ((insn & 0x0fa00070) == 0x07a00050) | ||
1449 | return prep_emulate_rd12rm0(insn, asi); | ||
1450 | |||
1451 | /* BFI : cccc 0111 110x xxxx xxxx xxxx x001 xxxx : */ | ||
1452 | /* BFC : cccc 0111 110x xxxx xxxx xxxx x001 1111 : */ | ||
1453 | if ((insn & 0x0fe00070) == 0x07c00010) { | ||
1454 | |||
1455 | if ((insn & 0x0000000f) == 0x0000000f) | ||
1456 | return prep_emulate_rd12_modify(insn, asi); | ||
1457 | else | ||
1458 | return prep_emulate_rd12rn0_modify(insn, asi); | ||
1459 | } | ||
1460 | |||
1461 | return INSN_REJECTED; | ||
1462 | } | ||
1463 | |||
1464 | static enum kprobe_insn __kprobes | ||
1465 | space_cccc_01xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1466 | { | ||
1467 | /* LDR : cccc 01xx x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
1468 | /* LDRB : cccc 01xx x1x1 xxxx xxxx xxxx xxxx xxxx */ | ||
1469 | /* LDRBT : cccc 01x0 x111 xxxx xxxx xxxx xxxx xxxx */ | ||
1470 | /* LDRT : cccc 01x0 x011 xxxx xxxx xxxx xxxx xxxx */ | ||
1471 | /* STR : cccc 01xx x0x0 xxxx xxxx xxxx xxxx xxxx */ | ||
1472 | /* STRB : cccc 01xx x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
1473 | /* STRBT : cccc 01x0 x110 xxxx xxxx xxxx xxxx xxxx */ | ||
1474 | /* STRT : cccc 01x0 x010 xxxx xxxx xxxx xxxx xxxx */ | ||
1475 | |||
1476 | if ((insn & 0x00500000) == 0x00500000 && is_r15(insn, 12)) | ||
1477 | return INSN_REJECTED; /* LDRB into PC */ | ||
1478 | |||
1479 | return prep_emulate_ldr_str(insn, asi); | ||
1480 | } | ||
1481 | |||
1482 | static enum kprobe_insn __kprobes | ||
1483 | space_cccc_100x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1484 | { | ||
1485 | /* LDM(2) : cccc 100x x101 xxxx 0xxx xxxx xxxx xxxx */ | ||
1486 | /* LDM(3) : cccc 100x x1x1 xxxx 1xxx xxxx xxxx xxxx */ | ||
1487 | if ((insn & 0x0e708000) == 0x85000000 || | ||
1488 | (insn & 0x0e508000) == 0x85010000) | ||
1489 | return INSN_REJECTED; | ||
1490 | |||
1491 | /* LDM(1) : cccc 100x x0x1 xxxx xxxx xxxx xxxx xxxx */ | ||
1492 | /* STM(1) : cccc 100x x0x0 xxxx xxxx xxxx xxxx xxxx */ | ||
1493 | asi->insn_handler = ((insn & 0x108000) == 0x008000) ? /* STM & R15 */ | ||
1494 | simulate_stm1_pc : simulate_ldm1stm1; | ||
1495 | return INSN_GOOD_NO_SLOT; | ||
1496 | } | ||
1497 | |||
1498 | static enum kprobe_insn __kprobes | ||
1499 | space_cccc_101x(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1500 | { | ||
1501 | /* B : cccc 1010 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
1502 | /* BL : cccc 1011 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
1503 | asi->insn_handler = simulate_bbl; | ||
1504 | return INSN_GOOD_NO_SLOT; | ||
1505 | } | ||
1506 | |||
1507 | static enum kprobe_insn __kprobes | ||
1508 | space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1509 | { | ||
1510 | /* Coprocessor instructions... */ | ||
1511 | /* MCRR : cccc 1100 0100 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ | ||
1512 | /* MRRC : cccc 1100 0101 xxxx xxxx xxxx xxxx xxxx : (Rd!=Rn) */ | ||
1513 | /* LDC : cccc 110x xxx1 xxxx xxxx xxxx xxxx xxxx */ | ||
1514 | /* STC : cccc 110x xxx0 xxxx xxxx xxxx xxxx xxxx */ | ||
1515 | /* CDP : cccc 1110 xxxx xxxx xxxx xxxx xxx0 xxxx */ | ||
1516 | /* MCR : cccc 1110 xxx0 xxxx xxxx xxxx xxx1 xxxx */ | ||
1517 | /* MRC : cccc 1110 xxx1 xxxx xxxx xxxx xxx1 xxxx */ | ||
1518 | |||
1519 | /* SVC : cccc 1111 xxxx xxxx xxxx xxxx xxxx xxxx */ | ||
1520 | |||
1521 | return INSN_REJECTED; | ||
1522 | } | ||
1523 | |||
1524 | static unsigned long __kprobes __check_eq(unsigned long cpsr) | ||
1525 | { | ||
1526 | return cpsr & PSR_Z_BIT; | ||
1527 | } | ||
1528 | |||
1529 | static unsigned long __kprobes __check_ne(unsigned long cpsr) | ||
1530 | { | ||
1531 | return (~cpsr) & PSR_Z_BIT; | ||
1532 | } | ||
1533 | |||
1534 | static unsigned long __kprobes __check_cs(unsigned long cpsr) | ||
1535 | { | ||
1536 | return cpsr & PSR_C_BIT; | ||
1537 | } | ||
1538 | |||
1539 | static unsigned long __kprobes __check_cc(unsigned long cpsr) | ||
1540 | { | ||
1541 | return (~cpsr) & PSR_C_BIT; | ||
1542 | } | ||
1543 | |||
1544 | static unsigned long __kprobes __check_mi(unsigned long cpsr) | ||
1545 | { | ||
1546 | return cpsr & PSR_N_BIT; | ||
1547 | } | ||
1548 | |||
1549 | static unsigned long __kprobes __check_pl(unsigned long cpsr) | ||
1550 | { | ||
1551 | return (~cpsr) & PSR_N_BIT; | ||
1552 | } | ||
1553 | |||
1554 | static unsigned long __kprobes __check_vs(unsigned long cpsr) | ||
1555 | { | ||
1556 | return cpsr & PSR_V_BIT; | ||
1557 | } | ||
1558 | |||
1559 | static unsigned long __kprobes __check_vc(unsigned long cpsr) | ||
1560 | { | ||
1561 | return (~cpsr) & PSR_V_BIT; | ||
1562 | } | ||
1563 | |||
1564 | static unsigned long __kprobes __check_hi(unsigned long cpsr) | ||
1565 | { | ||
1566 | cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
1567 | return cpsr & PSR_C_BIT; | ||
1568 | } | ||
1569 | |||
1570 | static unsigned long __kprobes __check_ls(unsigned long cpsr) | ||
1571 | { | ||
1572 | cpsr &= ~(cpsr >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */ | ||
1573 | return (~cpsr) & PSR_C_BIT; | ||
1574 | } | ||
1575 | |||
1576 | static unsigned long __kprobes __check_ge(unsigned long cpsr) | ||
1577 | { | ||
1578 | cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
1579 | return (~cpsr) & PSR_N_BIT; | ||
1580 | } | ||
1581 | |||
1582 | static unsigned long __kprobes __check_lt(unsigned long cpsr) | ||
1583 | { | ||
1584 | cpsr ^= (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
1585 | return cpsr & PSR_N_BIT; | ||
1586 | } | ||
1587 | |||
1588 | static unsigned long __kprobes __check_gt(unsigned long cpsr) | ||
1589 | { | ||
1590 | unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
1591 | temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ | ||
1592 | return (~temp) & PSR_N_BIT; | ||
1593 | } | ||
1594 | |||
1595 | static unsigned long __kprobes __check_le(unsigned long cpsr) | ||
1596 | { | ||
1597 | unsigned long temp = cpsr ^ (cpsr << 3); /* PSR_N_BIT ^= PSR_V_BIT */ | ||
1598 | temp |= (cpsr << 1); /* PSR_N_BIT |= PSR_Z_BIT */ | ||
1599 | return temp & PSR_N_BIT; | ||
1600 | } | ||
1601 | |||
1602 | static unsigned long __kprobes __check_al(unsigned long cpsr) | ||
1603 | { | ||
1604 | return true; | ||
1605 | } | ||
1606 | |||
1607 | static kprobe_check_cc * const condition_checks[16] = { | ||
1608 | &__check_eq, &__check_ne, &__check_cs, &__check_cc, | ||
1609 | &__check_mi, &__check_pl, &__check_vs, &__check_vc, | ||
1610 | &__check_hi, &__check_ls, &__check_ge, &__check_lt, | ||
1611 | &__check_gt, &__check_le, &__check_al, &__check_al | ||
1612 | }; | ||
1613 | |||
1614 | /* Return: | ||
1615 | * INSN_REJECTED If instruction is one not allowed to kprobe, | ||
1616 | * INSN_GOOD If instruction is supported and uses instruction slot, | ||
1617 | * INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot. | ||
1618 | * | ||
1619 | * For instructions we don't want to kprobe (INSN_REJECTED return result): | ||
1620 | * These are generally ones that modify the processor state making | ||
1621 | * them "hard" to simulate such as switches processor modes or | ||
1622 | * make accesses in alternate modes. Any of these could be simulated | ||
1623 | * if the work was put into it, but low return considering they | ||
1624 | * should also be very rare. | ||
1625 | */ | ||
1626 | enum kprobe_insn __kprobes | ||
1627 | arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1628 | { | ||
1629 | asi->insn_check_cc = condition_checks[insn>>28]; | ||
1630 | asi->insn[1] = KPROBE_RETURN_INSTRUCTION; | ||
1631 | |||
1632 | if ((insn & 0xf0000000) == 0xf0000000) | ||
1633 | |||
1634 | return space_1111(insn, asi); | ||
1635 | |||
1636 | else if ((insn & 0x0e000000) == 0x00000000) | ||
1637 | |||
1638 | return space_cccc_000x(insn, asi); | ||
1639 | |||
1640 | else if ((insn & 0x0e000000) == 0x02000000) | ||
1641 | |||
1642 | return space_cccc_001x(insn, asi); | ||
1643 | |||
1644 | else if ((insn & 0x0f000010) == 0x06000010) | ||
1645 | |||
1646 | return space_cccc_0110__1(insn, asi); | ||
1647 | |||
1648 | else if ((insn & 0x0f000010) == 0x07000010) | ||
1649 | |||
1650 | return space_cccc_0111__1(insn, asi); | ||
1651 | |||
1652 | else if ((insn & 0x0c000000) == 0x04000000) | ||
1653 | |||
1654 | return space_cccc_01xx(insn, asi); | ||
1655 | |||
1656 | else if ((insn & 0x0e000000) == 0x08000000) | ||
1657 | |||
1658 | return space_cccc_100x(insn, asi); | ||
1659 | |||
1660 | else if ((insn & 0x0e000000) == 0x0a000000) | ||
1661 | |||
1662 | return space_cccc_101x(insn, asi); | ||
1663 | |||
1664 | return space_cccc_11xx(insn, asi); | ||
1665 | } | ||
1666 | |||
1667 | void __init arm_kprobe_decode_init(void) | ||
1668 | { | ||
1669 | find_str_pc_offset(); | ||
1670 | } | ||
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c new file mode 100644 index 000000000000..902ca59e8b11 --- /dev/null +++ b/arch/arm/kernel/kprobes-thumb.c | |||
@@ -0,0 +1,1462 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes-thumb.c | ||
3 | * | ||
4 | * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/kprobes.h> | ||
13 | |||
14 | #include "kprobes.h" | ||
15 | |||
16 | |||
17 | /* | ||
18 | * True if current instruction is in an IT block. | ||
19 | */ | ||
20 | #define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000) | ||
21 | |||
22 | /* | ||
23 | * Return the condition code to check for the currently executing instruction. | ||
24 | * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if | ||
25 | * in_it_block returns true. | ||
26 | */ | ||
27 | #define current_cond(cpsr) ((cpsr >> 12) & 0xf) | ||
28 | |||
29 | /* | ||
30 | * Return the PC value for a probe in thumb code. | ||
31 | * This is the address of the probed instruction plus 4. | ||
32 | * We subtract one because the address will have bit zero set to indicate | ||
33 | * a pointer to thumb code. | ||
34 | */ | ||
35 | static inline unsigned long __kprobes thumb_probe_pc(struct kprobe *p) | ||
36 | { | ||
37 | return (unsigned long)p->addr - 1 + 4; | ||
38 | } | ||
39 | |||
40 | static void __kprobes | ||
41 | t32_simulate_table_branch(struct kprobe *p, struct pt_regs *regs) | ||
42 | { | ||
43 | kprobe_opcode_t insn = p->opcode; | ||
44 | unsigned long pc = thumb_probe_pc(p); | ||
45 | int rn = (insn >> 16) & 0xf; | ||
46 | int rm = insn & 0xf; | ||
47 | |||
48 | unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn]; | ||
49 | unsigned long rmv = regs->uregs[rm]; | ||
50 | unsigned int halfwords; | ||
51 | |||
52 | if (insn & 0x10) /* TBH */ | ||
53 | halfwords = ((u16 *)rnv)[rmv]; | ||
54 | else /* TBB */ | ||
55 | halfwords = ((u8 *)rnv)[rmv]; | ||
56 | |||
57 | regs->ARM_pc = pc + 2 * halfwords; | ||
58 | } | ||
59 | |||
60 | static void __kprobes | ||
61 | t32_simulate_mrs(struct kprobe *p, struct pt_regs *regs) | ||
62 | { | ||
63 | kprobe_opcode_t insn = p->opcode; | ||
64 | int rd = (insn >> 8) & 0xf; | ||
65 | unsigned long mask = 0xf8ff03df; /* Mask out execution state */ | ||
66 | regs->uregs[rd] = regs->ARM_cpsr & mask; | ||
67 | } | ||
68 | |||
69 | static void __kprobes | ||
70 | t32_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs) | ||
71 | { | ||
72 | kprobe_opcode_t insn = p->opcode; | ||
73 | unsigned long pc = thumb_probe_pc(p); | ||
74 | |||
75 | long offset = insn & 0x7ff; /* imm11 */ | ||
76 | offset += (insn & 0x003f0000) >> 5; /* imm6 */ | ||
77 | offset += (insn & 0x00002000) << 4; /* J1 */ | ||
78 | offset += (insn & 0x00000800) << 7; /* J2 */ | ||
79 | offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */ | ||
80 | |||
81 | regs->ARM_pc = pc + (offset * 2); | ||
82 | } | ||
83 | |||
84 | static enum kprobe_insn __kprobes | ||
85 | t32_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
86 | { | ||
87 | int cc = (insn >> 22) & 0xf; | ||
88 | asi->insn_check_cc = kprobe_condition_checks[cc]; | ||
89 | asi->insn_handler = t32_simulate_cond_branch; | ||
90 | return INSN_GOOD_NO_SLOT; | ||
91 | } | ||
92 | |||
93 | static void __kprobes | ||
94 | t32_simulate_branch(struct kprobe *p, struct pt_regs *regs) | ||
95 | { | ||
96 | kprobe_opcode_t insn = p->opcode; | ||
97 | unsigned long pc = thumb_probe_pc(p); | ||
98 | |||
99 | long offset = insn & 0x7ff; /* imm11 */ | ||
100 | offset += (insn & 0x03ff0000) >> 5; /* imm10 */ | ||
101 | offset += (insn & 0x00002000) << 9; /* J1 */ | ||
102 | offset += (insn & 0x00000800) << 10; /* J2 */ | ||
103 | if (insn & 0x04000000) | ||
104 | offset -= 0x00800000; /* Apply sign bit */ | ||
105 | else | ||
106 | offset ^= 0x00600000; /* Invert J1 and J2 */ | ||
107 | |||
108 | if (insn & (1 << 14)) { | ||
109 | /* BL or BLX */ | ||
110 | regs->ARM_lr = (unsigned long)p->addr + 4; | ||
111 | if (!(insn & (1 << 12))) { | ||
112 | /* BLX so switch to ARM mode */ | ||
113 | regs->ARM_cpsr &= ~PSR_T_BIT; | ||
114 | pc &= ~3; | ||
115 | } | ||
116 | } | ||
117 | |||
118 | regs->ARM_pc = pc + (offset * 2); | ||
119 | } | ||
120 | |||
121 | static void __kprobes | ||
122 | t32_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs) | ||
123 | { | ||
124 | kprobe_opcode_t insn = p->opcode; | ||
125 | unsigned long addr = thumb_probe_pc(p) & ~3; | ||
126 | int rt = (insn >> 12) & 0xf; | ||
127 | unsigned long rtv; | ||
128 | |||
129 | long offset = insn & 0xfff; | ||
130 | if (insn & 0x00800000) | ||
131 | addr += offset; | ||
132 | else | ||
133 | addr -= offset; | ||
134 | |||
135 | if (insn & 0x00400000) { | ||
136 | /* LDR */ | ||
137 | rtv = *(unsigned long *)addr; | ||
138 | if (rt == 15) { | ||
139 | bx_write_pc(rtv, regs); | ||
140 | return; | ||
141 | } | ||
142 | } else if (insn & 0x00200000) { | ||
143 | /* LDRH */ | ||
144 | if (insn & 0x01000000) | ||
145 | rtv = *(s16 *)addr; | ||
146 | else | ||
147 | rtv = *(u16 *)addr; | ||
148 | } else { | ||
149 | /* LDRB */ | ||
150 | if (insn & 0x01000000) | ||
151 | rtv = *(s8 *)addr; | ||
152 | else | ||
153 | rtv = *(u8 *)addr; | ||
154 | } | ||
155 | |||
156 | regs->uregs[rt] = rtv; | ||
157 | } | ||
158 | |||
159 | static enum kprobe_insn __kprobes | ||
160 | t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
161 | { | ||
162 | enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi); | ||
163 | |||
164 | /* Fixup modified instruction to have halfwords in correct order...*/ | ||
165 | insn = asi->insn[0]; | ||
166 | ((u16 *)asi->insn)[0] = insn >> 16; | ||
167 | ((u16 *)asi->insn)[1] = insn & 0xffff; | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | static void __kprobes | ||
173 | t32_emulate_ldrdstrd(struct kprobe *p, struct pt_regs *regs) | ||
174 | { | ||
175 | kprobe_opcode_t insn = p->opcode; | ||
176 | unsigned long pc = thumb_probe_pc(p) & ~3; | ||
177 | int rt1 = (insn >> 12) & 0xf; | ||
178 | int rt2 = (insn >> 8) & 0xf; | ||
179 | int rn = (insn >> 16) & 0xf; | ||
180 | |||
181 | register unsigned long rt1v asm("r0") = regs->uregs[rt1]; | ||
182 | register unsigned long rt2v asm("r1") = regs->uregs[rt2]; | ||
183 | register unsigned long rnv asm("r2") = (rn == 15) ? pc | ||
184 | : regs->uregs[rn]; | ||
185 | |||
186 | __asm__ __volatile__ ( | ||
187 | "blx %[fn]" | ||
188 | : "=r" (rt1v), "=r" (rt2v), "=r" (rnv) | ||
189 | : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (p->ainsn.insn_fn) | ||
190 | : "lr", "memory", "cc" | ||
191 | ); | ||
192 | |||
193 | if (rn != 15) | ||
194 | regs->uregs[rn] = rnv; /* Writeback base register */ | ||
195 | regs->uregs[rt1] = rt1v; | ||
196 | regs->uregs[rt2] = rt2v; | ||
197 | } | ||
198 | |||
199 | static void __kprobes | ||
200 | t32_emulate_ldrstr(struct kprobe *p, struct pt_regs *regs) | ||
201 | { | ||
202 | kprobe_opcode_t insn = p->opcode; | ||
203 | int rt = (insn >> 12) & 0xf; | ||
204 | int rn = (insn >> 16) & 0xf; | ||
205 | int rm = insn & 0xf; | ||
206 | |||
207 | register unsigned long rtv asm("r0") = regs->uregs[rt]; | ||
208 | register unsigned long rnv asm("r2") = regs->uregs[rn]; | ||
209 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
210 | |||
211 | __asm__ __volatile__ ( | ||
212 | "blx %[fn]" | ||
213 | : "=r" (rtv), "=r" (rnv) | ||
214 | : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (p->ainsn.insn_fn) | ||
215 | : "lr", "memory", "cc" | ||
216 | ); | ||
217 | |||
218 | regs->uregs[rn] = rnv; /* Writeback base register */ | ||
219 | if (rt == 15) /* Can't be true for a STR as they aren't allowed */ | ||
220 | bx_write_pc(rtv, regs); | ||
221 | else | ||
222 | regs->uregs[rt] = rtv; | ||
223 | } | ||
224 | |||
225 | static void __kprobes | ||
226 | t32_emulate_rd8rn16rm0_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
227 | { | ||
228 | kprobe_opcode_t insn = p->opcode; | ||
229 | int rd = (insn >> 8) & 0xf; | ||
230 | int rn = (insn >> 16) & 0xf; | ||
231 | int rm = insn & 0xf; | ||
232 | |||
233 | register unsigned long rdv asm("r1") = regs->uregs[rd]; | ||
234 | register unsigned long rnv asm("r2") = regs->uregs[rn]; | ||
235 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
236 | unsigned long cpsr = regs->ARM_cpsr; | ||
237 | |||
238 | __asm__ __volatile__ ( | ||
239 | "msr cpsr_fs, %[cpsr] \n\t" | ||
240 | "blx %[fn] \n\t" | ||
241 | "mrs %[cpsr], cpsr \n\t" | ||
242 | : "=r" (rdv), [cpsr] "=r" (cpsr) | ||
243 | : "0" (rdv), "r" (rnv), "r" (rmv), | ||
244 | "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
245 | : "lr", "memory", "cc" | ||
246 | ); | ||
247 | |||
248 | regs->uregs[rd] = rdv; | ||
249 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
250 | } | ||
251 | |||
252 | static void __kprobes | ||
253 | t32_emulate_rd8pc16_noflags(struct kprobe *p, struct pt_regs *regs) | ||
254 | { | ||
255 | kprobe_opcode_t insn = p->opcode; | ||
256 | unsigned long pc = thumb_probe_pc(p); | ||
257 | int rd = (insn >> 8) & 0xf; | ||
258 | |||
259 | register unsigned long rdv asm("r1") = regs->uregs[rd]; | ||
260 | register unsigned long rnv asm("r2") = pc & ~3; | ||
261 | |||
262 | __asm__ __volatile__ ( | ||
263 | "blx %[fn]" | ||
264 | : "=r" (rdv) | ||
265 | : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn) | ||
266 | : "lr", "memory", "cc" | ||
267 | ); | ||
268 | |||
269 | regs->uregs[rd] = rdv; | ||
270 | } | ||
271 | |||
272 | static void __kprobes | ||
273 | t32_emulate_rd8rn16_noflags(struct kprobe *p, struct pt_regs *regs) | ||
274 | { | ||
275 | kprobe_opcode_t insn = p->opcode; | ||
276 | int rd = (insn >> 8) & 0xf; | ||
277 | int rn = (insn >> 16) & 0xf; | ||
278 | |||
279 | register unsigned long rdv asm("r1") = regs->uregs[rd]; | ||
280 | register unsigned long rnv asm("r2") = regs->uregs[rn]; | ||
281 | |||
282 | __asm__ __volatile__ ( | ||
283 | "blx %[fn]" | ||
284 | : "=r" (rdv) | ||
285 | : "0" (rdv), "r" (rnv), [fn] "r" (p->ainsn.insn_fn) | ||
286 | : "lr", "memory", "cc" | ||
287 | ); | ||
288 | |||
289 | regs->uregs[rd] = rdv; | ||
290 | } | ||
291 | |||
292 | static void __kprobes | ||
293 | t32_emulate_rdlo12rdhi8rn16rm0_noflags(struct kprobe *p, struct pt_regs *regs) | ||
294 | { | ||
295 | kprobe_opcode_t insn = p->opcode; | ||
296 | int rdlo = (insn >> 12) & 0xf; | ||
297 | int rdhi = (insn >> 8) & 0xf; | ||
298 | int rn = (insn >> 16) & 0xf; | ||
299 | int rm = insn & 0xf; | ||
300 | |||
301 | register unsigned long rdlov asm("r0") = regs->uregs[rdlo]; | ||
302 | register unsigned long rdhiv asm("r1") = regs->uregs[rdhi]; | ||
303 | register unsigned long rnv asm("r2") = regs->uregs[rn]; | ||
304 | register unsigned long rmv asm("r3") = regs->uregs[rm]; | ||
305 | |||
306 | __asm__ __volatile__ ( | ||
307 | "blx %[fn]" | ||
308 | : "=r" (rdlov), "=r" (rdhiv) | ||
309 | : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv), | ||
310 | [fn] "r" (p->ainsn.insn_fn) | ||
311 | : "lr", "memory", "cc" | ||
312 | ); | ||
313 | |||
314 | regs->uregs[rdlo] = rdlov; | ||
315 | regs->uregs[rdhi] = rdhiv; | ||
316 | } | ||
317 | |||
318 | /* These emulation encodings are functionally equivalent... */ | ||
319 | #define t32_emulate_rd8rn16rm0ra12_noflags \ | ||
320 | t32_emulate_rdlo12rdhi8rn16rm0_noflags | ||
321 | |||
322 | static const union decode_item t32_table_1110_100x_x0xx[] = { | ||
323 | /* Load/store multiple instructions */ | ||
324 | |||
325 | /* Rn is PC 1110 100x x0xx 1111 xxxx xxxx xxxx xxxx */ | ||
326 | DECODE_REJECT (0xfe4f0000, 0xe80f0000), | ||
327 | |||
328 | /* SRS 1110 1000 00x0 xxxx xxxx xxxx xxxx xxxx */ | ||
329 | /* RFE 1110 1000 00x1 xxxx xxxx xxxx xxxx xxxx */ | ||
330 | DECODE_REJECT (0xffc00000, 0xe8000000), | ||
331 | /* SRS 1110 1001 10x0 xxxx xxxx xxxx xxxx xxxx */ | ||
332 | /* RFE 1110 1001 10x1 xxxx xxxx xxxx xxxx xxxx */ | ||
333 | DECODE_REJECT (0xffc00000, 0xe9800000), | ||
334 | |||
335 | /* STM Rn, {...pc} 1110 100x x0x0 xxxx 1xxx xxxx xxxx xxxx */ | ||
336 | DECODE_REJECT (0xfe508000, 0xe8008000), | ||
337 | /* LDM Rn, {...lr,pc} 1110 100x x0x1 xxxx 11xx xxxx xxxx xxxx */ | ||
338 | DECODE_REJECT (0xfe50c000, 0xe810c000), | ||
339 | /* LDM/STM Rn, {...sp} 1110 100x x0xx xxxx xx1x xxxx xxxx xxxx */ | ||
340 | DECODE_REJECT (0xfe402000, 0xe8002000), | ||
341 | |||
342 | /* STMIA 1110 1000 10x0 xxxx xxxx xxxx xxxx xxxx */ | ||
343 | /* LDMIA 1110 1000 10x1 xxxx xxxx xxxx xxxx xxxx */ | ||
344 | /* STMDB 1110 1001 00x0 xxxx xxxx xxxx xxxx xxxx */ | ||
345 | /* LDMDB 1110 1001 00x1 xxxx xxxx xxxx xxxx xxxx */ | ||
346 | DECODE_CUSTOM (0xfe400000, 0xe8000000, t32_decode_ldmstm), | ||
347 | |||
348 | DECODE_END | ||
349 | }; | ||
350 | |||
351 | static const union decode_item t32_table_1110_100x_x1xx[] = { | ||
352 | /* Load/store dual, load/store exclusive, table branch */ | ||
353 | |||
354 | /* STRD (immediate) 1110 1000 x110 xxxx xxxx xxxx xxxx xxxx */ | ||
355 | /* LDRD (immediate) 1110 1000 x111 xxxx xxxx xxxx xxxx xxxx */ | ||
356 | DECODE_OR (0xff600000, 0xe8600000), | ||
357 | /* STRD (immediate) 1110 1001 x1x0 xxxx xxxx xxxx xxxx xxxx */ | ||
358 | /* LDRD (immediate) 1110 1001 x1x1 xxxx xxxx xxxx xxxx xxxx */ | ||
359 | DECODE_EMULATEX (0xff400000, 0xe9400000, t32_emulate_ldrdstrd, | ||
360 | REGS(NOPCWB, NOSPPC, NOSPPC, 0, 0)), | ||
361 | |||
362 | /* TBB 1110 1000 1101 xxxx xxxx xxxx 0000 xxxx */ | ||
363 | /* TBH 1110 1000 1101 xxxx xxxx xxxx 0001 xxxx */ | ||
364 | DECODE_SIMULATEX(0xfff000e0, 0xe8d00000, t32_simulate_table_branch, | ||
365 | REGS(NOSP, 0, 0, 0, NOSPPC)), | ||
366 | |||
367 | /* STREX 1110 1000 0100 xxxx xxxx xxxx xxxx xxxx */ | ||
368 | /* LDREX 1110 1000 0101 xxxx xxxx xxxx xxxx xxxx */ | ||
369 | /* STREXB 1110 1000 1100 xxxx xxxx xxxx 0100 xxxx */ | ||
370 | /* STREXH 1110 1000 1100 xxxx xxxx xxxx 0101 xxxx */ | ||
371 | /* STREXD 1110 1000 1100 xxxx xxxx xxxx 0111 xxxx */ | ||
372 | /* LDREXB 1110 1000 1101 xxxx xxxx xxxx 0100 xxxx */ | ||
373 | /* LDREXH 1110 1000 1101 xxxx xxxx xxxx 0101 xxxx */ | ||
374 | /* LDREXD 1110 1000 1101 xxxx xxxx xxxx 0111 xxxx */ | ||
375 | /* And unallocated instructions... */ | ||
376 | DECODE_END | ||
377 | }; | ||
378 | |||
379 | static const union decode_item t32_table_1110_101x[] = { | ||
380 | /* Data-processing (shifted register) */ | ||
381 | |||
382 | /* TST 1110 1010 0001 xxxx xxxx 1111 xxxx xxxx */ | ||
383 | /* TEQ 1110 1010 1001 xxxx xxxx 1111 xxxx xxxx */ | ||
384 | DECODE_EMULATEX (0xff700f00, 0xea100f00, t32_emulate_rd8rn16rm0_rwflags, | ||
385 | REGS(NOSPPC, 0, 0, 0, NOSPPC)), | ||
386 | |||
387 | /* CMN 1110 1011 0001 xxxx xxxx 1111 xxxx xxxx */ | ||
388 | DECODE_OR (0xfff00f00, 0xeb100f00), | ||
389 | /* CMP 1110 1011 1011 xxxx xxxx 1111 xxxx xxxx */ | ||
390 | DECODE_EMULATEX (0xfff00f00, 0xebb00f00, t32_emulate_rd8rn16rm0_rwflags, | ||
391 | REGS(NOPC, 0, 0, 0, NOSPPC)), | ||
392 | |||
393 | /* MOV 1110 1010 010x 1111 xxxx xxxx xxxx xxxx */ | ||
394 | /* MVN 1110 1010 011x 1111 xxxx xxxx xxxx xxxx */ | ||
395 | DECODE_EMULATEX (0xffcf0000, 0xea4f0000, t32_emulate_rd8rn16rm0_rwflags, | ||
396 | REGS(0, 0, NOSPPC, 0, NOSPPC)), | ||
397 | |||
398 | /* ??? 1110 1010 101x xxxx xxxx xxxx xxxx xxxx */ | ||
399 | /* ??? 1110 1010 111x xxxx xxxx xxxx xxxx xxxx */ | ||
400 | DECODE_REJECT (0xffa00000, 0xeaa00000), | ||
401 | /* ??? 1110 1011 001x xxxx xxxx xxxx xxxx xxxx */ | ||
402 | DECODE_REJECT (0xffe00000, 0xeb200000), | ||
403 | /* ??? 1110 1011 100x xxxx xxxx xxxx xxxx xxxx */ | ||
404 | DECODE_REJECT (0xffe00000, 0xeb800000), | ||
405 | /* ??? 1110 1011 111x xxxx xxxx xxxx xxxx xxxx */ | ||
406 | DECODE_REJECT (0xffe00000, 0xebe00000), | ||
407 | |||
408 | /* ADD/SUB SP, SP, Rm, LSL #0..3 */ | ||
409 | /* 1110 1011 x0xx 1101 x000 1101 xx00 xxxx */ | ||
410 | DECODE_EMULATEX (0xff4f7f30, 0xeb0d0d00, t32_emulate_rd8rn16rm0_rwflags, | ||
411 | REGS(SP, 0, SP, 0, NOSPPC)), | ||
412 | |||
413 | /* ADD/SUB SP, SP, Rm, shift */ | ||
414 | /* 1110 1011 x0xx 1101 xxxx 1101 xxxx xxxx */ | ||
415 | DECODE_REJECT (0xff4f0f00, 0xeb0d0d00), | ||
416 | |||
417 | /* ADD/SUB Rd, SP, Rm, shift */ | ||
418 | /* 1110 1011 x0xx 1101 xxxx xxxx xxxx xxxx */ | ||
419 | DECODE_EMULATEX (0xff4f0000, 0xeb0d0000, t32_emulate_rd8rn16rm0_rwflags, | ||
420 | REGS(SP, 0, NOPC, 0, NOSPPC)), | ||
421 | |||
422 | /* AND 1110 1010 000x xxxx xxxx xxxx xxxx xxxx */ | ||
423 | /* BIC 1110 1010 001x xxxx xxxx xxxx xxxx xxxx */ | ||
424 | /* ORR 1110 1010 010x xxxx xxxx xxxx xxxx xxxx */ | ||
425 | /* ORN 1110 1010 011x xxxx xxxx xxxx xxxx xxxx */ | ||
426 | /* EOR 1110 1010 100x xxxx xxxx xxxx xxxx xxxx */ | ||
427 | /* PKH 1110 1010 110x xxxx xxxx xxxx xxxx xxxx */ | ||
428 | /* ADD 1110 1011 000x xxxx xxxx xxxx xxxx xxxx */ | ||
429 | /* ADC 1110 1011 010x xxxx xxxx xxxx xxxx xxxx */ | ||
430 | /* SBC 1110 1011 011x xxxx xxxx xxxx xxxx xxxx */ | ||
431 | /* SUB 1110 1011 101x xxxx xxxx xxxx xxxx xxxx */ | ||
432 | /* RSB 1110 1011 110x xxxx xxxx xxxx xxxx xxxx */ | ||
433 | DECODE_EMULATEX (0xfe000000, 0xea000000, t32_emulate_rd8rn16rm0_rwflags, | ||
434 | REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), | ||
435 | |||
436 | DECODE_END | ||
437 | }; | ||
438 | |||
439 | static const union decode_item t32_table_1111_0x0x___0[] = { | ||
440 | /* Data-processing (modified immediate) */ | ||
441 | |||
442 | /* TST 1111 0x00 0001 xxxx 0xxx 1111 xxxx xxxx */ | ||
443 | /* TEQ 1111 0x00 1001 xxxx 0xxx 1111 xxxx xxxx */ | ||
444 | DECODE_EMULATEX (0xfb708f00, 0xf0100f00, t32_emulate_rd8rn16rm0_rwflags, | ||
445 | REGS(NOSPPC, 0, 0, 0, 0)), | ||
446 | |||
447 | /* CMN 1111 0x01 0001 xxxx 0xxx 1111 xxxx xxxx */ | ||
448 | DECODE_OR (0xfbf08f00, 0xf1100f00), | ||
449 | /* CMP 1111 0x01 1011 xxxx 0xxx 1111 xxxx xxxx */ | ||
450 | DECODE_EMULATEX (0xfbf08f00, 0xf1b00f00, t32_emulate_rd8rn16rm0_rwflags, | ||
451 | REGS(NOPC, 0, 0, 0, 0)), | ||
452 | |||
453 | /* MOV 1111 0x00 010x 1111 0xxx xxxx xxxx xxxx */ | ||
454 | /* MVN 1111 0x00 011x 1111 0xxx xxxx xxxx xxxx */ | ||
455 | DECODE_EMULATEX (0xfbcf8000, 0xf04f0000, t32_emulate_rd8rn16rm0_rwflags, | ||
456 | REGS(0, 0, NOSPPC, 0, 0)), | ||
457 | |||
458 | /* ??? 1111 0x00 101x xxxx 0xxx xxxx xxxx xxxx */ | ||
459 | DECODE_REJECT (0xfbe08000, 0xf0a00000), | ||
460 | /* ??? 1111 0x00 110x xxxx 0xxx xxxx xxxx xxxx */ | ||
461 | /* ??? 1111 0x00 111x xxxx 0xxx xxxx xxxx xxxx */ | ||
462 | DECODE_REJECT (0xfbc08000, 0xf0c00000), | ||
463 | /* ??? 1111 0x01 001x xxxx 0xxx xxxx xxxx xxxx */ | ||
464 | DECODE_REJECT (0xfbe08000, 0xf1200000), | ||
465 | /* ??? 1111 0x01 100x xxxx 0xxx xxxx xxxx xxxx */ | ||
466 | DECODE_REJECT (0xfbe08000, 0xf1800000), | ||
467 | /* ??? 1111 0x01 111x xxxx 0xxx xxxx xxxx xxxx */ | ||
468 | DECODE_REJECT (0xfbe08000, 0xf1e00000), | ||
469 | |||
470 | /* ADD Rd, SP, #imm 1111 0x01 000x 1101 0xxx xxxx xxxx xxxx */ | ||
471 | /* SUB Rd, SP, #imm 1111 0x01 101x 1101 0xxx xxxx xxxx xxxx */ | ||
472 | DECODE_EMULATEX (0xfb4f8000, 0xf10d0000, t32_emulate_rd8rn16rm0_rwflags, | ||
473 | REGS(SP, 0, NOPC, 0, 0)), | ||
474 | |||
475 | /* AND 1111 0x00 000x xxxx 0xxx xxxx xxxx xxxx */ | ||
476 | /* BIC 1111 0x00 001x xxxx 0xxx xxxx xxxx xxxx */ | ||
477 | /* ORR 1111 0x00 010x xxxx 0xxx xxxx xxxx xxxx */ | ||
478 | /* ORN 1111 0x00 011x xxxx 0xxx xxxx xxxx xxxx */ | ||
479 | /* EOR 1111 0x00 100x xxxx 0xxx xxxx xxxx xxxx */ | ||
480 | /* ADD 1111 0x01 000x xxxx 0xxx xxxx xxxx xxxx */ | ||
481 | /* ADC 1111 0x01 010x xxxx 0xxx xxxx xxxx xxxx */ | ||
482 | /* SBC 1111 0x01 011x xxxx 0xxx xxxx xxxx xxxx */ | ||
483 | /* SUB 1111 0x01 101x xxxx 0xxx xxxx xxxx xxxx */ | ||
484 | /* RSB 1111 0x01 110x xxxx 0xxx xxxx xxxx xxxx */ | ||
485 | DECODE_EMULATEX (0xfa008000, 0xf0000000, t32_emulate_rd8rn16rm0_rwflags, | ||
486 | REGS(NOSPPC, 0, NOSPPC, 0, 0)), | ||
487 | |||
488 | DECODE_END | ||
489 | }; | ||
490 | |||
491 | static const union decode_item t32_table_1111_0x1x___0[] = { | ||
492 | /* Data-processing (plain binary immediate) */ | ||
493 | |||
494 | /* ADDW Rd, PC, #imm 1111 0x10 0000 1111 0xxx xxxx xxxx xxxx */ | ||
495 | DECODE_OR (0xfbff8000, 0xf20f0000), | ||
496 | /* SUBW Rd, PC, #imm 1111 0x10 1010 1111 0xxx xxxx xxxx xxxx */ | ||
497 | DECODE_EMULATEX (0xfbff8000, 0xf2af0000, t32_emulate_rd8pc16_noflags, | ||
498 | REGS(PC, 0, NOSPPC, 0, 0)), | ||
499 | |||
500 | /* ADDW SP, SP, #imm 1111 0x10 0000 1101 0xxx 1101 xxxx xxxx */ | ||
501 | DECODE_OR (0xfbff8f00, 0xf20d0d00), | ||
502 | /* SUBW SP, SP, #imm 1111 0x10 1010 1101 0xxx 1101 xxxx xxxx */ | ||
503 | DECODE_EMULATEX (0xfbff8f00, 0xf2ad0d00, t32_emulate_rd8rn16_noflags, | ||
504 | REGS(SP, 0, SP, 0, 0)), | ||
505 | |||
506 | /* ADDW 1111 0x10 0000 xxxx 0xxx xxxx xxxx xxxx */ | ||
507 | DECODE_OR (0xfbf08000, 0xf2000000), | ||
508 | /* SUBW 1111 0x10 1010 xxxx 0xxx xxxx xxxx xxxx */ | ||
509 | DECODE_EMULATEX (0xfbf08000, 0xf2a00000, t32_emulate_rd8rn16_noflags, | ||
510 | REGS(NOPCX, 0, NOSPPC, 0, 0)), | ||
511 | |||
512 | /* MOVW 1111 0x10 0100 xxxx 0xxx xxxx xxxx xxxx */ | ||
513 | /* MOVT 1111 0x10 1100 xxxx 0xxx xxxx xxxx xxxx */ | ||
514 | DECODE_EMULATEX (0xfb708000, 0xf2400000, t32_emulate_rd8rn16_noflags, | ||
515 | REGS(0, 0, NOSPPC, 0, 0)), | ||
516 | |||
517 | /* SSAT16 1111 0x11 0010 xxxx 0000 xxxx 00xx xxxx */ | ||
518 | /* SSAT 1111 0x11 00x0 xxxx 0xxx xxxx xxxx xxxx */ | ||
519 | /* USAT16 1111 0x11 1010 xxxx 0000 xxxx 00xx xxxx */ | ||
520 | /* USAT 1111 0x11 10x0 xxxx 0xxx xxxx xxxx xxxx */ | ||
521 | DECODE_EMULATEX (0xfb508000, 0xf3000000, t32_emulate_rd8rn16rm0_rwflags, | ||
522 | REGS(NOSPPC, 0, NOSPPC, 0, 0)), | ||
523 | |||
524 | /* SFBX 1111 0x11 0100 xxxx 0xxx xxxx xxxx xxxx */ | ||
525 | /* UFBX 1111 0x11 1100 xxxx 0xxx xxxx xxxx xxxx */ | ||
526 | DECODE_EMULATEX (0xfb708000, 0xf3400000, t32_emulate_rd8rn16_noflags, | ||
527 | REGS(NOSPPC, 0, NOSPPC, 0, 0)), | ||
528 | |||
529 | /* BFC 1111 0x11 0110 1111 0xxx xxxx xxxx xxxx */ | ||
530 | DECODE_EMULATEX (0xfbff8000, 0xf36f0000, t32_emulate_rd8rn16_noflags, | ||
531 | REGS(0, 0, NOSPPC, 0, 0)), | ||
532 | |||
533 | /* BFI 1111 0x11 0110 xxxx 0xxx xxxx xxxx xxxx */ | ||
534 | DECODE_EMULATEX (0xfbf08000, 0xf3600000, t32_emulate_rd8rn16_noflags, | ||
535 | REGS(NOSPPCX, 0, NOSPPC, 0, 0)), | ||
536 | |||
537 | DECODE_END | ||
538 | }; | ||
539 | |||
540 | static const union decode_item t32_table_1111_0xxx___1[] = { | ||
541 | /* Branches and miscellaneous control */ | ||
542 | |||
543 | /* YIELD 1111 0011 1010 xxxx 10x0 x000 0000 0001 */ | ||
544 | DECODE_OR (0xfff0d7ff, 0xf3a08001), | ||
545 | /* SEV 1111 0011 1010 xxxx 10x0 x000 0000 0100 */ | ||
546 | DECODE_EMULATE (0xfff0d7ff, 0xf3a08004, kprobe_emulate_none), | ||
547 | /* NOP 1111 0011 1010 xxxx 10x0 x000 0000 0000 */ | ||
548 | /* WFE 1111 0011 1010 xxxx 10x0 x000 0000 0010 */ | ||
549 | /* WFI 1111 0011 1010 xxxx 10x0 x000 0000 0011 */ | ||
550 | DECODE_SIMULATE (0xfff0d7fc, 0xf3a08000, kprobe_simulate_nop), | ||
551 | |||
552 | /* MRS Rd, CPSR 1111 0011 1110 xxxx 10x0 xxxx xxxx xxxx */ | ||
553 | DECODE_SIMULATEX(0xfff0d000, 0xf3e08000, t32_simulate_mrs, | ||
554 | REGS(0, 0, NOSPPC, 0, 0)), | ||
555 | |||
556 | /* | ||
557 | * Unsupported instructions | ||
558 | * 1111 0x11 1xxx xxxx 10x0 xxxx xxxx xxxx | ||
559 | * | ||
560 | * MSR 1111 0011 100x xxxx 10x0 xxxx xxxx xxxx | ||
561 | * DBG hint 1111 0011 1010 xxxx 10x0 x000 1111 xxxx | ||
562 | * Unallocated hints 1111 0011 1010 xxxx 10x0 x000 xxxx xxxx | ||
563 | * CPS 1111 0011 1010 xxxx 10x0 xxxx xxxx xxxx | ||
564 | * CLREX/DSB/DMB/ISB 1111 0011 1011 xxxx 10x0 xxxx xxxx xxxx | ||
565 | * BXJ 1111 0011 1100 xxxx 10x0 xxxx xxxx xxxx | ||
566 | * SUBS PC,LR,#<imm8> 1111 0011 1101 xxxx 10x0 xxxx xxxx xxxx | ||
567 | * MRS Rd, SPSR 1111 0011 1111 xxxx 10x0 xxxx xxxx xxxx | ||
568 | * SMC 1111 0111 1111 xxxx 1000 xxxx xxxx xxxx | ||
569 | * UNDEFINED 1111 0111 1111 xxxx 1010 xxxx xxxx xxxx | ||
570 | * ??? 1111 0111 1xxx xxxx 1010 xxxx xxxx xxxx | ||
571 | */ | ||
572 | DECODE_REJECT (0xfb80d000, 0xf3808000), | ||
573 | |||
574 | /* Bcc 1111 0xxx xxxx xxxx 10x0 xxxx xxxx xxxx */ | ||
575 | DECODE_CUSTOM (0xf800d000, 0xf0008000, t32_decode_cond_branch), | ||
576 | |||
577 | /* BLX 1111 0xxx xxxx xxxx 11x0 xxxx xxxx xxx0 */ | ||
578 | DECODE_OR (0xf800d001, 0xf000c000), | ||
579 | /* B 1111 0xxx xxxx xxxx 10x1 xxxx xxxx xxxx */ | ||
580 | /* BL 1111 0xxx xxxx xxxx 11x1 xxxx xxxx xxxx */ | ||
581 | DECODE_SIMULATE (0xf8009000, 0xf0009000, t32_simulate_branch), | ||
582 | |||
583 | DECODE_END | ||
584 | }; | ||
585 | |||
586 | static const union decode_item t32_table_1111_100x_x0x1__1111[] = { | ||
587 | /* Memory hints */ | ||
588 | |||
589 | /* PLD (literal) 1111 1000 x001 1111 1111 xxxx xxxx xxxx */ | ||
590 | /* PLI (literal) 1111 1001 x001 1111 1111 xxxx xxxx xxxx */ | ||
591 | DECODE_SIMULATE (0xfe7ff000, 0xf81ff000, kprobe_simulate_nop), | ||
592 | |||
593 | /* PLD{W} (immediate) 1111 1000 10x1 xxxx 1111 xxxx xxxx xxxx */ | ||
594 | DECODE_OR (0xffd0f000, 0xf890f000), | ||
595 | /* PLD{W} (immediate) 1111 1000 00x1 xxxx 1111 1100 xxxx xxxx */ | ||
596 | DECODE_OR (0xffd0ff00, 0xf810fc00), | ||
597 | /* PLI (immediate) 1111 1001 1001 xxxx 1111 xxxx xxxx xxxx */ | ||
598 | DECODE_OR (0xfff0f000, 0xf990f000), | ||
599 | /* PLI (immediate) 1111 1001 0001 xxxx 1111 1100 xxxx xxxx */ | ||
600 | DECODE_SIMULATEX(0xfff0ff00, 0xf910fc00, kprobe_simulate_nop, | ||
601 | REGS(NOPCX, 0, 0, 0, 0)), | ||
602 | |||
603 | /* PLD{W} (register) 1111 1000 00x1 xxxx 1111 0000 00xx xxxx */ | ||
604 | DECODE_OR (0xffd0ffc0, 0xf810f000), | ||
605 | /* PLI (register) 1111 1001 0001 xxxx 1111 0000 00xx xxxx */ | ||
606 | DECODE_SIMULATEX(0xfff0ffc0, 0xf910f000, kprobe_simulate_nop, | ||
607 | REGS(NOPCX, 0, 0, 0, NOSPPC)), | ||
608 | |||
609 | /* Other unallocated instructions... */ | ||
610 | DECODE_END | ||
611 | }; | ||
612 | |||
613 | static const union decode_item t32_table_1111_100x[] = { | ||
614 | /* Store/Load single data item */ | ||
615 | |||
616 | /* ??? 1111 100x x11x xxxx xxxx xxxx xxxx xxxx */ | ||
617 | DECODE_REJECT (0xfe600000, 0xf8600000), | ||
618 | |||
619 | /* ??? 1111 1001 0101 xxxx xxxx xxxx xxxx xxxx */ | ||
620 | DECODE_REJECT (0xfff00000, 0xf9500000), | ||
621 | |||
622 | /* ??? 1111 100x 0xxx xxxx xxxx 10x0 xxxx xxxx */ | ||
623 | DECODE_REJECT (0xfe800d00, 0xf8000800), | ||
624 | |||
625 | /* STRBT 1111 1000 0000 xxxx xxxx 1110 xxxx xxxx */ | ||
626 | /* STRHT 1111 1000 0010 xxxx xxxx 1110 xxxx xxxx */ | ||
627 | /* STRT 1111 1000 0100 xxxx xxxx 1110 xxxx xxxx */ | ||
628 | /* LDRBT 1111 1000 0001 xxxx xxxx 1110 xxxx xxxx */ | ||
629 | /* LDRSBT 1111 1001 0001 xxxx xxxx 1110 xxxx xxxx */ | ||
630 | /* LDRHT 1111 1000 0011 xxxx xxxx 1110 xxxx xxxx */ | ||
631 | /* LDRSHT 1111 1001 0011 xxxx xxxx 1110 xxxx xxxx */ | ||
632 | /* LDRT 1111 1000 0101 xxxx xxxx 1110 xxxx xxxx */ | ||
633 | DECODE_REJECT (0xfe800f00, 0xf8000e00), | ||
634 | |||
635 | /* STR{,B,H} Rn,[PC...] 1111 1000 xxx0 1111 xxxx xxxx xxxx xxxx */ | ||
636 | DECODE_REJECT (0xff1f0000, 0xf80f0000), | ||
637 | |||
638 | /* STR{,B,H} PC,[Rn...] 1111 1000 xxx0 xxxx 1111 xxxx xxxx xxxx */ | ||
639 | DECODE_REJECT (0xff10f000, 0xf800f000), | ||
640 | |||
641 | /* LDR (literal) 1111 1000 x101 1111 xxxx xxxx xxxx xxxx */ | ||
642 | DECODE_SIMULATEX(0xff7f0000, 0xf85f0000, t32_simulate_ldr_literal, | ||
643 | REGS(PC, ANY, 0, 0, 0)), | ||
644 | |||
645 | /* STR (immediate) 1111 1000 0100 xxxx xxxx 1xxx xxxx xxxx */ | ||
646 | /* LDR (immediate) 1111 1000 0101 xxxx xxxx 1xxx xxxx xxxx */ | ||
647 | DECODE_OR (0xffe00800, 0xf8400800), | ||
648 | /* STR (immediate) 1111 1000 1100 xxxx xxxx xxxx xxxx xxxx */ | ||
649 | /* LDR (immediate) 1111 1000 1101 xxxx xxxx xxxx xxxx xxxx */ | ||
650 | DECODE_EMULATEX (0xffe00000, 0xf8c00000, t32_emulate_ldrstr, | ||
651 | REGS(NOPCX, ANY, 0, 0, 0)), | ||
652 | |||
653 | /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */ | ||
654 | /* LDR (register) 1111 1000 0101 xxxx xxxx 0000 00xx xxxx */ | ||
655 | DECODE_EMULATEX (0xffe00fc0, 0xf8400000, t32_emulate_ldrstr, | ||
656 | REGS(NOPCX, ANY, 0, 0, NOSPPC)), | ||
657 | |||
658 | /* LDRB (literal) 1111 1000 x001 1111 xxxx xxxx xxxx xxxx */ | ||
659 | /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ | ||
660 | /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ | ||
661 | /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ | ||
662 | DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, | ||
663 | REGS(PC, NOSPPCX, 0, 0, 0)), | ||
664 | |||
665 | /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ | ||
666 | /* STRH (immediate) 1111 1000 0010 xxxx xxxx 1xxx xxxx xxxx */ | ||
667 | /* LDRB (immediate) 1111 1000 0001 xxxx xxxx 1xxx xxxx xxxx */ | ||
668 | /* LDRSB (immediate) 1111 1001 0001 xxxx xxxx 1xxx xxxx xxxx */ | ||
669 | /* LDRH (immediate) 1111 1000 0011 xxxx xxxx 1xxx xxxx xxxx */ | ||
670 | /* LDRSH (immediate) 1111 1001 0011 xxxx xxxx 1xxx xxxx xxxx */ | ||
671 | DECODE_OR (0xfec00800, 0xf8000800), | ||
672 | /* STRB (immediate) 1111 1000 1000 xxxx xxxx xxxx xxxx xxxx */ | ||
673 | /* STRH (immediate) 1111 1000 1010 xxxx xxxx xxxx xxxx xxxx */ | ||
674 | /* LDRB (immediate) 1111 1000 1001 xxxx xxxx xxxx xxxx xxxx */ | ||
675 | /* LDRSB (immediate) 1111 1001 1001 xxxx xxxx xxxx xxxx xxxx */ | ||
676 | /* LDRH (immediate) 1111 1000 1011 xxxx xxxx xxxx xxxx xxxx */ | ||
677 | /* LDRSH (immediate) 1111 1001 1011 xxxx xxxx xxxx xxxx xxxx */ | ||
678 | DECODE_EMULATEX (0xfec00000, 0xf8800000, t32_emulate_ldrstr, | ||
679 | REGS(NOPCX, NOSPPCX, 0, 0, 0)), | ||
680 | |||
681 | /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */ | ||
682 | /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */ | ||
683 | /* LDRB (register) 1111 1000 0001 xxxx xxxx 0000 00xx xxxx */ | ||
684 | /* LDRSB (register) 1111 1001 0001 xxxx xxxx 0000 00xx xxxx */ | ||
685 | /* LDRH (register) 1111 1000 0011 xxxx xxxx 0000 00xx xxxx */ | ||
686 | /* LDRSH (register) 1111 1001 0011 xxxx xxxx 0000 00xx xxxx */ | ||
687 | DECODE_EMULATEX (0xfe800fc0, 0xf8000000, t32_emulate_ldrstr, | ||
688 | REGS(NOPCX, NOSPPCX, 0, 0, NOSPPC)), | ||
689 | |||
690 | /* Other unallocated instructions... */ | ||
691 | DECODE_END | ||
692 | }; | ||
693 | |||
694 | static const union decode_item t32_table_1111_1010___1111[] = { | ||
695 | /* Data-processing (register) */ | ||
696 | |||
697 | /* ??? 1111 1010 011x xxxx 1111 xxxx 1xxx xxxx */ | ||
698 | DECODE_REJECT (0xffe0f080, 0xfa60f080), | ||
699 | |||
700 | /* SXTH 1111 1010 0000 1111 1111 xxxx 1xxx xxxx */ | ||
701 | /* UXTH 1111 1010 0001 1111 1111 xxxx 1xxx xxxx */ | ||
702 | /* SXTB16 1111 1010 0010 1111 1111 xxxx 1xxx xxxx */ | ||
703 | /* UXTB16 1111 1010 0011 1111 1111 xxxx 1xxx xxxx */ | ||
704 | /* SXTB 1111 1010 0100 1111 1111 xxxx 1xxx xxxx */ | ||
705 | /* UXTB 1111 1010 0101 1111 1111 xxxx 1xxx xxxx */ | ||
706 | DECODE_EMULATEX (0xff8ff080, 0xfa0ff080, t32_emulate_rd8rn16rm0_rwflags, | ||
707 | REGS(0, 0, NOSPPC, 0, NOSPPC)), | ||
708 | |||
709 | |||
710 | /* ??? 1111 1010 1xxx xxxx 1111 xxxx 0x11 xxxx */ | ||
711 | DECODE_REJECT (0xff80f0b0, 0xfa80f030), | ||
712 | /* ??? 1111 1010 1x11 xxxx 1111 xxxx 0xxx xxxx */ | ||
713 | DECODE_REJECT (0xffb0f080, 0xfab0f000), | ||
714 | |||
715 | /* SADD16 1111 1010 1001 xxxx 1111 xxxx 0000 xxxx */ | ||
716 | /* SASX 1111 1010 1010 xxxx 1111 xxxx 0000 xxxx */ | ||
717 | /* SSAX 1111 1010 1110 xxxx 1111 xxxx 0000 xxxx */ | ||
718 | /* SSUB16 1111 1010 1101 xxxx 1111 xxxx 0000 xxxx */ | ||
719 | /* SADD8 1111 1010 1000 xxxx 1111 xxxx 0000 xxxx */ | ||
720 | /* SSUB8 1111 1010 1100 xxxx 1111 xxxx 0000 xxxx */ | ||
721 | |||
722 | /* QADD16 1111 1010 1001 xxxx 1111 xxxx 0001 xxxx */ | ||
723 | /* QASX 1111 1010 1010 xxxx 1111 xxxx 0001 xxxx */ | ||
724 | /* QSAX 1111 1010 1110 xxxx 1111 xxxx 0001 xxxx */ | ||
725 | /* QSUB16 1111 1010 1101 xxxx 1111 xxxx 0001 xxxx */ | ||
726 | /* QADD8 1111 1010 1000 xxxx 1111 xxxx 0001 xxxx */ | ||
727 | /* QSUB8 1111 1010 1100 xxxx 1111 xxxx 0001 xxxx */ | ||
728 | |||
729 | /* SHADD16 1111 1010 1001 xxxx 1111 xxxx 0010 xxxx */ | ||
730 | /* SHASX 1111 1010 1010 xxxx 1111 xxxx 0010 xxxx */ | ||
731 | /* SHSAX 1111 1010 1110 xxxx 1111 xxxx 0010 xxxx */ | ||
732 | /* SHSUB16 1111 1010 1101 xxxx 1111 xxxx 0010 xxxx */ | ||
733 | /* SHADD8 1111 1010 1000 xxxx 1111 xxxx 0010 xxxx */ | ||
734 | /* SHSUB8 1111 1010 1100 xxxx 1111 xxxx 0010 xxxx */ | ||
735 | |||
736 | /* UADD16 1111 1010 1001 xxxx 1111 xxxx 0100 xxxx */ | ||
737 | /* UASX 1111 1010 1010 xxxx 1111 xxxx 0100 xxxx */ | ||
738 | /* USAX 1111 1010 1110 xxxx 1111 xxxx 0100 xxxx */ | ||
739 | /* USUB16 1111 1010 1101 xxxx 1111 xxxx 0100 xxxx */ | ||
740 | /* UADD8 1111 1010 1000 xxxx 1111 xxxx 0100 xxxx */ | ||
741 | /* USUB8 1111 1010 1100 xxxx 1111 xxxx 0100 xxxx */ | ||
742 | |||
743 | /* UQADD16 1111 1010 1001 xxxx 1111 xxxx 0101 xxxx */ | ||
744 | /* UQASX 1111 1010 1010 xxxx 1111 xxxx 0101 xxxx */ | ||
745 | /* UQSAX 1111 1010 1110 xxxx 1111 xxxx 0101 xxxx */ | ||
746 | /* UQSUB16 1111 1010 1101 xxxx 1111 xxxx 0101 xxxx */ | ||
747 | /* UQADD8 1111 1010 1000 xxxx 1111 xxxx 0101 xxxx */ | ||
748 | /* UQSUB8 1111 1010 1100 xxxx 1111 xxxx 0101 xxxx */ | ||
749 | |||
750 | /* UHADD16 1111 1010 1001 xxxx 1111 xxxx 0110 xxxx */ | ||
751 | /* UHASX 1111 1010 1010 xxxx 1111 xxxx 0110 xxxx */ | ||
752 | /* UHSAX 1111 1010 1110 xxxx 1111 xxxx 0110 xxxx */ | ||
753 | /* UHSUB16 1111 1010 1101 xxxx 1111 xxxx 0110 xxxx */ | ||
754 | /* UHADD8 1111 1010 1000 xxxx 1111 xxxx 0110 xxxx */ | ||
755 | /* UHSUB8 1111 1010 1100 xxxx 1111 xxxx 0110 xxxx */ | ||
756 | DECODE_OR (0xff80f080, 0xfa80f000), | ||
757 | |||
758 | /* SXTAH 1111 1010 0000 xxxx 1111 xxxx 1xxx xxxx */ | ||
759 | /* UXTAH 1111 1010 0001 xxxx 1111 xxxx 1xxx xxxx */ | ||
760 | /* SXTAB16 1111 1010 0010 xxxx 1111 xxxx 1xxx xxxx */ | ||
761 | /* UXTAB16 1111 1010 0011 xxxx 1111 xxxx 1xxx xxxx */ | ||
762 | /* SXTAB 1111 1010 0100 xxxx 1111 xxxx 1xxx xxxx */ | ||
763 | /* UXTAB 1111 1010 0101 xxxx 1111 xxxx 1xxx xxxx */ | ||
764 | DECODE_OR (0xff80f080, 0xfa00f080), | ||
765 | |||
766 | /* QADD 1111 1010 1000 xxxx 1111 xxxx 1000 xxxx */ | ||
767 | /* QDADD 1111 1010 1000 xxxx 1111 xxxx 1001 xxxx */ | ||
768 | /* QSUB 1111 1010 1000 xxxx 1111 xxxx 1010 xxxx */ | ||
769 | /* QDSUB 1111 1010 1000 xxxx 1111 xxxx 1011 xxxx */ | ||
770 | DECODE_OR (0xfff0f0c0, 0xfa80f080), | ||
771 | |||
772 | /* SEL 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ | ||
773 | DECODE_OR (0xfff0f0f0, 0xfaa0f080), | ||
774 | |||
775 | /* LSL 1111 1010 000x xxxx 1111 xxxx 0000 xxxx */ | ||
776 | /* LSR 1111 1010 001x xxxx 1111 xxxx 0000 xxxx */ | ||
777 | /* ASR 1111 1010 010x xxxx 1111 xxxx 0000 xxxx */ | ||
778 | /* ROR 1111 1010 011x xxxx 1111 xxxx 0000 xxxx */ | ||
779 | DECODE_EMULATEX (0xff80f0f0, 0xfa00f000, t32_emulate_rd8rn16rm0_rwflags, | ||
780 | REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), | ||
781 | |||
782 | /* CLZ 1111 1010 1010 xxxx 1111 xxxx 1000 xxxx */ | ||
783 | DECODE_OR (0xfff0f0f0, 0xfab0f080), | ||
784 | |||
785 | /* REV 1111 1010 1001 xxxx 1111 xxxx 1000 xxxx */ | ||
786 | /* REV16 1111 1010 1001 xxxx 1111 xxxx 1001 xxxx */ | ||
787 | /* RBIT 1111 1010 1001 xxxx 1111 xxxx 1010 xxxx */ | ||
788 | /* REVSH 1111 1010 1001 xxxx 1111 xxxx 1011 xxxx */ | ||
789 | DECODE_EMULATEX (0xfff0f0c0, 0xfa90f080, t32_emulate_rd8rn16_noflags, | ||
790 | REGS(NOSPPC, 0, NOSPPC, 0, SAMEAS16)), | ||
791 | |||
792 | /* Other unallocated instructions... */ | ||
793 | DECODE_END | ||
794 | }; | ||
795 | |||
796 | static const union decode_item t32_table_1111_1011_0[] = { | ||
797 | /* Multiply, multiply accumulate, and absolute difference */ | ||
798 | |||
799 | /* ??? 1111 1011 0000 xxxx 1111 xxxx 0001 xxxx */ | ||
800 | DECODE_REJECT (0xfff0f0f0, 0xfb00f010), | ||
801 | /* ??? 1111 1011 0111 xxxx 1111 xxxx 0001 xxxx */ | ||
802 | DECODE_REJECT (0xfff0f0f0, 0xfb70f010), | ||
803 | |||
804 | /* SMULxy 1111 1011 0001 xxxx 1111 xxxx 00xx xxxx */ | ||
805 | DECODE_OR (0xfff0f0c0, 0xfb10f000), | ||
806 | /* MUL 1111 1011 0000 xxxx 1111 xxxx 0000 xxxx */ | ||
807 | /* SMUAD{X} 1111 1011 0010 xxxx 1111 xxxx 000x xxxx */ | ||
808 | /* SMULWy 1111 1011 0011 xxxx 1111 xxxx 000x xxxx */ | ||
809 | /* SMUSD{X} 1111 1011 0100 xxxx 1111 xxxx 000x xxxx */ | ||
810 | /* SMMUL{R} 1111 1011 0101 xxxx 1111 xxxx 000x xxxx */ | ||
811 | /* USAD8 1111 1011 0111 xxxx 1111 xxxx 0000 xxxx */ | ||
812 | DECODE_EMULATEX (0xff80f0e0, 0xfb00f000, t32_emulate_rd8rn16rm0_rwflags, | ||
813 | REGS(NOSPPC, 0, NOSPPC, 0, NOSPPC)), | ||
814 | |||
815 | /* ??? 1111 1011 0111 xxxx xxxx xxxx 0001 xxxx */ | ||
816 | DECODE_REJECT (0xfff000f0, 0xfb700010), | ||
817 | |||
818 | /* SMLAxy 1111 1011 0001 xxxx xxxx xxxx 00xx xxxx */ | ||
819 | DECODE_OR (0xfff000c0, 0xfb100000), | ||
820 | /* MLA 1111 1011 0000 xxxx xxxx xxxx 0000 xxxx */ | ||
821 | /* MLS 1111 1011 0000 xxxx xxxx xxxx 0001 xxxx */ | ||
822 | /* SMLAD{X} 1111 1011 0010 xxxx xxxx xxxx 000x xxxx */ | ||
823 | /* SMLAWy 1111 1011 0011 xxxx xxxx xxxx 000x xxxx */ | ||
824 | /* SMLSD{X} 1111 1011 0100 xxxx xxxx xxxx 000x xxxx */ | ||
825 | /* SMMLA{R} 1111 1011 0101 xxxx xxxx xxxx 000x xxxx */ | ||
826 | /* SMMLS{R} 1111 1011 0110 xxxx xxxx xxxx 000x xxxx */ | ||
827 | /* USADA8 1111 1011 0111 xxxx xxxx xxxx 0000 xxxx */ | ||
828 | DECODE_EMULATEX (0xff8000c0, 0xfb000000, t32_emulate_rd8rn16rm0ra12_noflags, | ||
829 | REGS(NOSPPC, NOSPPCX, NOSPPC, 0, NOSPPC)), | ||
830 | |||
831 | /* Other unallocated instructions... */ | ||
832 | DECODE_END | ||
833 | }; | ||
834 | |||
835 | static const union decode_item t32_table_1111_1011_1[] = { | ||
836 | /* Long multiply, long multiply accumulate, and divide */ | ||
837 | |||
838 | /* UMAAL 1111 1011 1110 xxxx xxxx xxxx 0110 xxxx */ | ||
839 | DECODE_OR (0xfff000f0, 0xfbe00060), | ||
840 | /* SMLALxy 1111 1011 1100 xxxx xxxx xxxx 10xx xxxx */ | ||
841 | DECODE_OR (0xfff000c0, 0xfbc00080), | ||
842 | /* SMLALD{X} 1111 1011 1100 xxxx xxxx xxxx 110x xxxx */ | ||
843 | /* SMLSLD{X} 1111 1011 1101 xxxx xxxx xxxx 110x xxxx */ | ||
844 | DECODE_OR (0xffe000e0, 0xfbc000c0), | ||
845 | /* SMULL 1111 1011 1000 xxxx xxxx xxxx 0000 xxxx */ | ||
846 | /* UMULL 1111 1011 1010 xxxx xxxx xxxx 0000 xxxx */ | ||
847 | /* SMLAL 1111 1011 1100 xxxx xxxx xxxx 0000 xxxx */ | ||
848 | /* UMLAL 1111 1011 1110 xxxx xxxx xxxx 0000 xxxx */ | ||
849 | DECODE_EMULATEX (0xff9000f0, 0xfb800000, t32_emulate_rdlo12rdhi8rn16rm0_noflags, | ||
850 | REGS(NOSPPC, NOSPPC, NOSPPC, 0, NOSPPC)), | ||
851 | |||
852 | /* SDIV 1111 1011 1001 xxxx xxxx xxxx 1111 xxxx */ | ||
853 | /* UDIV 1111 1011 1011 xxxx xxxx xxxx 1111 xxxx */ | ||
854 | /* Other unallocated instructions... */ | ||
855 | DECODE_END | ||
856 | }; | ||
857 | |||
858 | const union decode_item kprobe_decode_thumb32_table[] = { | ||
859 | |||
860 | /* | ||
861 | * Load/store multiple instructions | ||
862 | * 1110 100x x0xx xxxx xxxx xxxx xxxx xxxx | ||
863 | */ | ||
864 | DECODE_TABLE (0xfe400000, 0xe8000000, t32_table_1110_100x_x0xx), | ||
865 | |||
866 | /* | ||
867 | * Load/store dual, load/store exclusive, table branch | ||
868 | * 1110 100x x1xx xxxx xxxx xxxx xxxx xxxx | ||
869 | */ | ||
870 | DECODE_TABLE (0xfe400000, 0xe8400000, t32_table_1110_100x_x1xx), | ||
871 | |||
872 | /* | ||
873 | * Data-processing (shifted register) | ||
874 | * 1110 101x xxxx xxxx xxxx xxxx xxxx xxxx | ||
875 | */ | ||
876 | DECODE_TABLE (0xfe000000, 0xea000000, t32_table_1110_101x), | ||
877 | |||
878 | /* | ||
879 | * Coprocessor instructions | ||
880 | * 1110 11xx xxxx xxxx xxxx xxxx xxxx xxxx | ||
881 | */ | ||
882 | DECODE_REJECT (0xfc000000, 0xec000000), | ||
883 | |||
884 | /* | ||
885 | * Data-processing (modified immediate) | ||
886 | * 1111 0x0x xxxx xxxx 0xxx xxxx xxxx xxxx | ||
887 | */ | ||
888 | DECODE_TABLE (0xfa008000, 0xf0000000, t32_table_1111_0x0x___0), | ||
889 | |||
890 | /* | ||
891 | * Data-processing (plain binary immediate) | ||
892 | * 1111 0x1x xxxx xxxx 0xxx xxxx xxxx xxxx | ||
893 | */ | ||
894 | DECODE_TABLE (0xfa008000, 0xf2000000, t32_table_1111_0x1x___0), | ||
895 | |||
896 | /* | ||
897 | * Branches and miscellaneous control | ||
898 | * 1111 0xxx xxxx xxxx 1xxx xxxx xxxx xxxx | ||
899 | */ | ||
900 | DECODE_TABLE (0xf8008000, 0xf0008000, t32_table_1111_0xxx___1), | ||
901 | |||
902 | /* | ||
903 | * Advanced SIMD element or structure load/store instructions | ||
904 | * 1111 1001 xxx0 xxxx xxxx xxxx xxxx xxxx | ||
905 | */ | ||
906 | DECODE_REJECT (0xff100000, 0xf9000000), | ||
907 | |||
908 | /* | ||
909 | * Memory hints | ||
910 | * 1111 100x x0x1 xxxx 1111 xxxx xxxx xxxx | ||
911 | */ | ||
912 | DECODE_TABLE (0xfe50f000, 0xf810f000, t32_table_1111_100x_x0x1__1111), | ||
913 | |||
914 | /* | ||
915 | * Store single data item | ||
916 | * 1111 1000 xxx0 xxxx xxxx xxxx xxxx xxxx | ||
917 | * Load single data items | ||
918 | * 1111 100x xxx1 xxxx xxxx xxxx xxxx xxxx | ||
919 | */ | ||
920 | DECODE_TABLE (0xfe000000, 0xf8000000, t32_table_1111_100x), | ||
921 | |||
922 | /* | ||
923 | * Data-processing (register) | ||
924 | * 1111 1010 xxxx xxxx 1111 xxxx xxxx xxxx | ||
925 | */ | ||
926 | DECODE_TABLE (0xff00f000, 0xfa00f000, t32_table_1111_1010___1111), | ||
927 | |||
928 | /* | ||
929 | * Multiply, multiply accumulate, and absolute difference | ||
930 | * 1111 1011 0xxx xxxx xxxx xxxx xxxx xxxx | ||
931 | */ | ||
932 | DECODE_TABLE (0xff800000, 0xfb000000, t32_table_1111_1011_0), | ||
933 | |||
934 | /* | ||
935 | * Long multiply, long multiply accumulate, and divide | ||
936 | * 1111 1011 1xxx xxxx xxxx xxxx xxxx xxxx | ||
937 | */ | ||
938 | DECODE_TABLE (0xff800000, 0xfb800000, t32_table_1111_1011_1), | ||
939 | |||
940 | /* | ||
941 | * Coprocessor instructions | ||
942 | * 1111 11xx xxxx xxxx xxxx xxxx xxxx xxxx | ||
943 | */ | ||
944 | DECODE_END | ||
945 | }; | ||
946 | |||
947 | static void __kprobes | ||
948 | t16_simulate_bxblx(struct kprobe *p, struct pt_regs *regs) | ||
949 | { | ||
950 | kprobe_opcode_t insn = p->opcode; | ||
951 | unsigned long pc = thumb_probe_pc(p); | ||
952 | int rm = (insn >> 3) & 0xf; | ||
953 | unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm]; | ||
954 | |||
955 | if (insn & (1 << 7)) /* BLX ? */ | ||
956 | regs->ARM_lr = (unsigned long)p->addr + 2; | ||
957 | |||
958 | bx_write_pc(rmv, regs); | ||
959 | } | ||
960 | |||
961 | static void __kprobes | ||
962 | t16_simulate_ldr_literal(struct kprobe *p, struct pt_regs *regs) | ||
963 | { | ||
964 | kprobe_opcode_t insn = p->opcode; | ||
965 | unsigned long* base = (unsigned long *)(thumb_probe_pc(p) & ~3); | ||
966 | long index = insn & 0xff; | ||
967 | int rt = (insn >> 8) & 0x7; | ||
968 | regs->uregs[rt] = base[index]; | ||
969 | } | ||
970 | |||
971 | static void __kprobes | ||
972 | t16_simulate_ldrstr_sp_relative(struct kprobe *p, struct pt_regs *regs) | ||
973 | { | ||
974 | kprobe_opcode_t insn = p->opcode; | ||
975 | unsigned long* base = (unsigned long *)regs->ARM_sp; | ||
976 | long index = insn & 0xff; | ||
977 | int rt = (insn >> 8) & 0x7; | ||
978 | if (insn & 0x800) /* LDR */ | ||
979 | regs->uregs[rt] = base[index]; | ||
980 | else /* STR */ | ||
981 | base[index] = regs->uregs[rt]; | ||
982 | } | ||
983 | |||
984 | static void __kprobes | ||
985 | t16_simulate_reladr(struct kprobe *p, struct pt_regs *regs) | ||
986 | { | ||
987 | kprobe_opcode_t insn = p->opcode; | ||
988 | unsigned long base = (insn & 0x800) ? regs->ARM_sp | ||
989 | : (thumb_probe_pc(p) & ~3); | ||
990 | long offset = insn & 0xff; | ||
991 | int rt = (insn >> 8) & 0x7; | ||
992 | regs->uregs[rt] = base + offset * 4; | ||
993 | } | ||
994 | |||
995 | static void __kprobes | ||
996 | t16_simulate_add_sp_imm(struct kprobe *p, struct pt_regs *regs) | ||
997 | { | ||
998 | kprobe_opcode_t insn = p->opcode; | ||
999 | long imm = insn & 0x7f; | ||
1000 | if (insn & 0x80) /* SUB */ | ||
1001 | regs->ARM_sp -= imm * 4; | ||
1002 | else /* ADD */ | ||
1003 | regs->ARM_sp += imm * 4; | ||
1004 | } | ||
1005 | |||
1006 | static void __kprobes | ||
1007 | t16_simulate_cbz(struct kprobe *p, struct pt_regs *regs) | ||
1008 | { | ||
1009 | kprobe_opcode_t insn = p->opcode; | ||
1010 | int rn = insn & 0x7; | ||
1011 | kprobe_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn; | ||
1012 | if (nonzero & 0x800) { | ||
1013 | long i = insn & 0x200; | ||
1014 | long imm5 = insn & 0xf8; | ||
1015 | unsigned long pc = thumb_probe_pc(p); | ||
1016 | regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2); | ||
1017 | } | ||
1018 | } | ||
1019 | |||
1020 | static void __kprobes | ||
1021 | t16_simulate_it(struct kprobe *p, struct pt_regs *regs) | ||
1022 | { | ||
1023 | /* | ||
1024 | * The 8 IT state bits are split into two parts in CPSR: | ||
1025 | * ITSTATE<1:0> are in CPSR<26:25> | ||
1026 | * ITSTATE<7:2> are in CPSR<15:10> | ||
1027 | * The new IT state is in the lower byte of insn. | ||
1028 | */ | ||
1029 | kprobe_opcode_t insn = p->opcode; | ||
1030 | unsigned long cpsr = regs->ARM_cpsr; | ||
1031 | cpsr &= ~PSR_IT_MASK; | ||
1032 | cpsr |= (insn & 0xfc) << 8; | ||
1033 | cpsr |= (insn & 0x03) << 25; | ||
1034 | regs->ARM_cpsr = cpsr; | ||
1035 | } | ||
1036 | |||
1037 | static void __kprobes | ||
1038 | t16_singlestep_it(struct kprobe *p, struct pt_regs *regs) | ||
1039 | { | ||
1040 | regs->ARM_pc += 2; | ||
1041 | t16_simulate_it(p, regs); | ||
1042 | } | ||
1043 | |||
1044 | static enum kprobe_insn __kprobes | ||
1045 | t16_decode_it(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1046 | { | ||
1047 | asi->insn_singlestep = t16_singlestep_it; | ||
1048 | return INSN_GOOD_NO_SLOT; | ||
1049 | } | ||
1050 | |||
1051 | static void __kprobes | ||
1052 | t16_simulate_cond_branch(struct kprobe *p, struct pt_regs *regs) | ||
1053 | { | ||
1054 | kprobe_opcode_t insn = p->opcode; | ||
1055 | unsigned long pc = thumb_probe_pc(p); | ||
1056 | long offset = insn & 0x7f; | ||
1057 | offset -= insn & 0x80; /* Apply sign bit */ | ||
1058 | regs->ARM_pc = pc + (offset * 2); | ||
1059 | } | ||
1060 | |||
1061 | static enum kprobe_insn __kprobes | ||
1062 | t16_decode_cond_branch(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1063 | { | ||
1064 | int cc = (insn >> 8) & 0xf; | ||
1065 | asi->insn_check_cc = kprobe_condition_checks[cc]; | ||
1066 | asi->insn_handler = t16_simulate_cond_branch; | ||
1067 | return INSN_GOOD_NO_SLOT; | ||
1068 | } | ||
1069 | |||
1070 | static void __kprobes | ||
1071 | t16_simulate_branch(struct kprobe *p, struct pt_regs *regs) | ||
1072 | { | ||
1073 | kprobe_opcode_t insn = p->opcode; | ||
1074 | unsigned long pc = thumb_probe_pc(p); | ||
1075 | long offset = insn & 0x3ff; | ||
1076 | offset -= insn & 0x400; /* Apply sign bit */ | ||
1077 | regs->ARM_pc = pc + (offset * 2); | ||
1078 | } | ||
1079 | |||
1080 | static unsigned long __kprobes | ||
1081 | t16_emulate_loregs(struct kprobe *p, struct pt_regs *regs) | ||
1082 | { | ||
1083 | unsigned long oldcpsr = regs->ARM_cpsr; | ||
1084 | unsigned long newcpsr; | ||
1085 | |||
1086 | __asm__ __volatile__ ( | ||
1087 | "msr cpsr_fs, %[oldcpsr] \n\t" | ||
1088 | "ldmia %[regs], {r0-r7} \n\t" | ||
1089 | "blx %[fn] \n\t" | ||
1090 | "stmia %[regs], {r0-r7} \n\t" | ||
1091 | "mrs %[newcpsr], cpsr \n\t" | ||
1092 | : [newcpsr] "=r" (newcpsr) | ||
1093 | : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs), | ||
1094 | [fn] "r" (p->ainsn.insn_fn) | ||
1095 | : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", | ||
1096 | "lr", "memory", "cc" | ||
1097 | ); | ||
1098 | |||
1099 | return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK); | ||
1100 | } | ||
1101 | |||
1102 | static void __kprobes | ||
1103 | t16_emulate_loregs_rwflags(struct kprobe *p, struct pt_regs *regs) | ||
1104 | { | ||
1105 | regs->ARM_cpsr = t16_emulate_loregs(p, regs); | ||
1106 | } | ||
1107 | |||
1108 | static void __kprobes | ||
1109 | t16_emulate_loregs_noitrwflags(struct kprobe *p, struct pt_regs *regs) | ||
1110 | { | ||
1111 | unsigned long cpsr = t16_emulate_loregs(p, regs); | ||
1112 | if (!in_it_block(cpsr)) | ||
1113 | regs->ARM_cpsr = cpsr; | ||
1114 | } | ||
1115 | |||
1116 | static void __kprobes | ||
1117 | t16_emulate_hiregs(struct kprobe *p, struct pt_regs *regs) | ||
1118 | { | ||
1119 | kprobe_opcode_t insn = p->opcode; | ||
1120 | unsigned long pc = thumb_probe_pc(p); | ||
1121 | int rdn = (insn & 0x7) | ((insn & 0x80) >> 4); | ||
1122 | int rm = (insn >> 3) & 0xf; | ||
1123 | |||
1124 | register unsigned long rdnv asm("r1"); | ||
1125 | register unsigned long rmv asm("r0"); | ||
1126 | unsigned long cpsr = regs->ARM_cpsr; | ||
1127 | |||
1128 | rdnv = (rdn == 15) ? pc : regs->uregs[rdn]; | ||
1129 | rmv = (rm == 15) ? pc : regs->uregs[rm]; | ||
1130 | |||
1131 | __asm__ __volatile__ ( | ||
1132 | "msr cpsr_fs, %[cpsr] \n\t" | ||
1133 | "blx %[fn] \n\t" | ||
1134 | "mrs %[cpsr], cpsr \n\t" | ||
1135 | : "=r" (rdnv), [cpsr] "=r" (cpsr) | ||
1136 | : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (p->ainsn.insn_fn) | ||
1137 | : "lr", "memory", "cc" | ||
1138 | ); | ||
1139 | |||
1140 | if (rdn == 15) | ||
1141 | rdnv &= ~1; | ||
1142 | |||
1143 | regs->uregs[rdn] = rdnv; | ||
1144 | regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK); | ||
1145 | } | ||
1146 | |||
1147 | static enum kprobe_insn __kprobes | ||
1148 | t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1149 | { | ||
1150 | insn &= ~0x00ff; | ||
1151 | insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */ | ||
1152 | ((u16 *)asi->insn)[0] = insn; | ||
1153 | asi->insn_handler = t16_emulate_hiregs; | ||
1154 | return INSN_GOOD; | ||
1155 | } | ||
1156 | |||
1157 | static void __kprobes | ||
1158 | t16_emulate_push(struct kprobe *p, struct pt_regs *regs) | ||
1159 | { | ||
1160 | __asm__ __volatile__ ( | ||
1161 | "ldr r9, [%[regs], #13*4] \n\t" | ||
1162 | "ldr r8, [%[regs], #14*4] \n\t" | ||
1163 | "ldmia %[regs], {r0-r7} \n\t" | ||
1164 | "blx %[fn] \n\t" | ||
1165 | "str r9, [%[regs], #13*4] \n\t" | ||
1166 | : | ||
1167 | : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) | ||
1168 | : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", | ||
1169 | "lr", "memory", "cc" | ||
1170 | ); | ||
1171 | } | ||
1172 | |||
1173 | static enum kprobe_insn __kprobes | ||
1174 | t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1175 | { | ||
1176 | /* | ||
1177 | * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}" | ||
1178 | * and call it with R9=SP and LR in the register list represented | ||
1179 | * by R8. | ||
1180 | */ | ||
1181 | ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */ | ||
1182 | ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ | ||
1183 | asi->insn_handler = t16_emulate_push; | ||
1184 | return INSN_GOOD; | ||
1185 | } | ||
1186 | |||
1187 | static void __kprobes | ||
1188 | t16_emulate_pop_nopc(struct kprobe *p, struct pt_regs *regs) | ||
1189 | { | ||
1190 | __asm__ __volatile__ ( | ||
1191 | "ldr r9, [%[regs], #13*4] \n\t" | ||
1192 | "ldmia %[regs], {r0-r7} \n\t" | ||
1193 | "blx %[fn] \n\t" | ||
1194 | "stmia %[regs], {r0-r7} \n\t" | ||
1195 | "str r9, [%[regs], #13*4] \n\t" | ||
1196 | : | ||
1197 | : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) | ||
1198 | : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9", | ||
1199 | "lr", "memory", "cc" | ||
1200 | ); | ||
1201 | } | ||
1202 | |||
1203 | static void __kprobes | ||
1204 | t16_emulate_pop_pc(struct kprobe *p, struct pt_regs *regs) | ||
1205 | { | ||
1206 | register unsigned long pc asm("r8"); | ||
1207 | |||
1208 | __asm__ __volatile__ ( | ||
1209 | "ldr r9, [%[regs], #13*4] \n\t" | ||
1210 | "ldmia %[regs], {r0-r7} \n\t" | ||
1211 | "blx %[fn] \n\t" | ||
1212 | "stmia %[regs], {r0-r7} \n\t" | ||
1213 | "str r9, [%[regs], #13*4] \n\t" | ||
1214 | : "=r" (pc) | ||
1215 | : [regs] "r" (regs), [fn] "r" (p->ainsn.insn_fn) | ||
1216 | : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9", | ||
1217 | "lr", "memory", "cc" | ||
1218 | ); | ||
1219 | |||
1220 | bx_write_pc(pc, regs); | ||
1221 | } | ||
1222 | |||
1223 | static enum kprobe_insn __kprobes | ||
1224 | t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1225 | { | ||
1226 | /* | ||
1227 | * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}" | ||
1228 | * and call it with R9=SP and PC in the register list represented | ||
1229 | * by R8. | ||
1230 | */ | ||
1231 | ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */ | ||
1232 | ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ | ||
1233 | asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc | ||
1234 | : t16_emulate_pop_nopc; | ||
1235 | return INSN_GOOD; | ||
1236 | } | ||
1237 | |||
1238 | static const union decode_item t16_table_1011[] = { | ||
1239 | /* Miscellaneous 16-bit instructions */ | ||
1240 | |||
1241 | /* ADD (SP plus immediate) 1011 0000 0xxx xxxx */ | ||
1242 | /* SUB (SP minus immediate) 1011 0000 1xxx xxxx */ | ||
1243 | DECODE_SIMULATE (0xff00, 0xb000, t16_simulate_add_sp_imm), | ||
1244 | |||
1245 | /* CBZ 1011 00x1 xxxx xxxx */ | ||
1246 | /* CBNZ 1011 10x1 xxxx xxxx */ | ||
1247 | DECODE_SIMULATE (0xf500, 0xb100, t16_simulate_cbz), | ||
1248 | |||
1249 | /* SXTH 1011 0010 00xx xxxx */ | ||
1250 | /* SXTB 1011 0010 01xx xxxx */ | ||
1251 | /* UXTH 1011 0010 10xx xxxx */ | ||
1252 | /* UXTB 1011 0010 11xx xxxx */ | ||
1253 | /* REV 1011 1010 00xx xxxx */ | ||
1254 | /* REV16 1011 1010 01xx xxxx */ | ||
1255 | /* ??? 1011 1010 10xx xxxx */ | ||
1256 | /* REVSH 1011 1010 11xx xxxx */ | ||
1257 | DECODE_REJECT (0xffc0, 0xba80), | ||
1258 | DECODE_EMULATE (0xf500, 0xb000, t16_emulate_loregs_rwflags), | ||
1259 | |||
1260 | /* PUSH 1011 010x xxxx xxxx */ | ||
1261 | DECODE_CUSTOM (0xfe00, 0xb400, t16_decode_push), | ||
1262 | /* POP 1011 110x xxxx xxxx */ | ||
1263 | DECODE_CUSTOM (0xfe00, 0xbc00, t16_decode_pop), | ||
1264 | |||
1265 | /* | ||
1266 | * If-Then, and hints | ||
1267 | * 1011 1111 xxxx xxxx | ||
1268 | */ | ||
1269 | |||
1270 | /* YIELD 1011 1111 0001 0000 */ | ||
1271 | DECODE_OR (0xffff, 0xbf10), | ||
1272 | /* SEV 1011 1111 0100 0000 */ | ||
1273 | DECODE_EMULATE (0xffff, 0xbf40, kprobe_emulate_none), | ||
1274 | /* NOP 1011 1111 0000 0000 */ | ||
1275 | /* WFE 1011 1111 0010 0000 */ | ||
1276 | /* WFI 1011 1111 0011 0000 */ | ||
1277 | DECODE_SIMULATE (0xffcf, 0xbf00, kprobe_simulate_nop), | ||
1278 | /* Unassigned hints 1011 1111 xxxx 0000 */ | ||
1279 | DECODE_REJECT (0xff0f, 0xbf00), | ||
1280 | /* IT 1011 1111 xxxx xxxx */ | ||
1281 | DECODE_CUSTOM (0xff00, 0xbf00, t16_decode_it), | ||
1282 | |||
1283 | /* SETEND 1011 0110 010x xxxx */ | ||
1284 | /* CPS 1011 0110 011x xxxx */ | ||
1285 | /* BKPT 1011 1110 xxxx xxxx */ | ||
1286 | /* And unallocated instructions... */ | ||
1287 | DECODE_END | ||
1288 | }; | ||
1289 | |||
1290 | const union decode_item kprobe_decode_thumb16_table[] = { | ||
1291 | |||
1292 | /* | ||
1293 | * Shift (immediate), add, subtract, move, and compare | ||
1294 | * 00xx xxxx xxxx xxxx | ||
1295 | */ | ||
1296 | |||
1297 | /* CMP (immediate) 0010 1xxx xxxx xxxx */ | ||
1298 | DECODE_EMULATE (0xf800, 0x2800, t16_emulate_loregs_rwflags), | ||
1299 | |||
1300 | /* ADD (register) 0001 100x xxxx xxxx */ | ||
1301 | /* SUB (register) 0001 101x xxxx xxxx */ | ||
1302 | /* LSL (immediate) 0000 0xxx xxxx xxxx */ | ||
1303 | /* LSR (immediate) 0000 1xxx xxxx xxxx */ | ||
1304 | /* ASR (immediate) 0001 0xxx xxxx xxxx */ | ||
1305 | /* ADD (immediate, Thumb) 0001 110x xxxx xxxx */ | ||
1306 | /* SUB (immediate, Thumb) 0001 111x xxxx xxxx */ | ||
1307 | /* MOV (immediate) 0010 0xxx xxxx xxxx */ | ||
1308 | /* ADD (immediate, Thumb) 0011 0xxx xxxx xxxx */ | ||
1309 | /* SUB (immediate, Thumb) 0011 1xxx xxxx xxxx */ | ||
1310 | DECODE_EMULATE (0xc000, 0x0000, t16_emulate_loregs_noitrwflags), | ||
1311 | |||
1312 | /* | ||
1313 | * 16-bit Thumb data-processing instructions | ||
1314 | * 0100 00xx xxxx xxxx | ||
1315 | */ | ||
1316 | |||
1317 | /* TST (register) 0100 0010 00xx xxxx */ | ||
1318 | DECODE_EMULATE (0xffc0, 0x4200, t16_emulate_loregs_rwflags), | ||
1319 | /* CMP (register) 0100 0010 10xx xxxx */ | ||
1320 | /* CMN (register) 0100 0010 11xx xxxx */ | ||
1321 | DECODE_EMULATE (0xff80, 0x4280, t16_emulate_loregs_rwflags), | ||
1322 | /* AND (register) 0100 0000 00xx xxxx */ | ||
1323 | /* EOR (register) 0100 0000 01xx xxxx */ | ||
1324 | /* LSL (register) 0100 0000 10xx xxxx */ | ||
1325 | /* LSR (register) 0100 0000 11xx xxxx */ | ||
1326 | /* ASR (register) 0100 0001 00xx xxxx */ | ||
1327 | /* ADC (register) 0100 0001 01xx xxxx */ | ||
1328 | /* SBC (register) 0100 0001 10xx xxxx */ | ||
1329 | /* ROR (register) 0100 0001 11xx xxxx */ | ||
1330 | /* RSB (immediate) 0100 0010 01xx xxxx */ | ||
1331 | /* ORR (register) 0100 0011 00xx xxxx */ | ||
1332 | /* MUL 0100 0011 00xx xxxx */ | ||
1333 | /* BIC (register) 0100 0011 10xx xxxx */ | ||
1334 | /* MVN (register) 0100 0011 10xx xxxx */ | ||
1335 | DECODE_EMULATE (0xfc00, 0x4000, t16_emulate_loregs_noitrwflags), | ||
1336 | |||
1337 | /* | ||
1338 | * Special data instructions and branch and exchange | ||
1339 | * 0100 01xx xxxx xxxx | ||
1340 | */ | ||
1341 | |||
1342 | /* BLX pc 0100 0111 1111 1xxx */ | ||
1343 | DECODE_REJECT (0xfff8, 0x47f8), | ||
1344 | |||
1345 | /* BX (register) 0100 0111 0xxx xxxx */ | ||
1346 | /* BLX (register) 0100 0111 1xxx xxxx */ | ||
1347 | DECODE_SIMULATE (0xff00, 0x4700, t16_simulate_bxblx), | ||
1348 | |||
1349 | /* ADD pc, pc 0100 0100 1111 1111 */ | ||
1350 | DECODE_REJECT (0xffff, 0x44ff), | ||
1351 | |||
1352 | /* ADD (register) 0100 0100 xxxx xxxx */ | ||
1353 | /* CMP (register) 0100 0101 xxxx xxxx */ | ||
1354 | /* MOV (register) 0100 0110 xxxx xxxx */ | ||
1355 | DECODE_CUSTOM (0xfc00, 0x4400, t16_decode_hiregs), | ||
1356 | |||
1357 | /* | ||
1358 | * Load from Literal Pool | ||
1359 | * LDR (literal) 0100 1xxx xxxx xxxx | ||
1360 | */ | ||
1361 | DECODE_SIMULATE (0xf800, 0x4800, t16_simulate_ldr_literal), | ||
1362 | |||
1363 | /* | ||
1364 | * 16-bit Thumb Load/store instructions | ||
1365 | * 0101 xxxx xxxx xxxx | ||
1366 | * 011x xxxx xxxx xxxx | ||
1367 | * 100x xxxx xxxx xxxx | ||
1368 | */ | ||
1369 | |||
1370 | /* STR (register) 0101 000x xxxx xxxx */ | ||
1371 | /* STRH (register) 0101 001x xxxx xxxx */ | ||
1372 | /* STRB (register) 0101 010x xxxx xxxx */ | ||
1373 | /* LDRSB (register) 0101 011x xxxx xxxx */ | ||
1374 | /* LDR (register) 0101 100x xxxx xxxx */ | ||
1375 | /* LDRH (register) 0101 101x xxxx xxxx */ | ||
1376 | /* LDRB (register) 0101 110x xxxx xxxx */ | ||
1377 | /* LDRSH (register) 0101 111x xxxx xxxx */ | ||
1378 | /* STR (immediate, Thumb) 0110 0xxx xxxx xxxx */ | ||
1379 | /* LDR (immediate, Thumb) 0110 1xxx xxxx xxxx */ | ||
1380 | /* STRB (immediate, Thumb) 0111 0xxx xxxx xxxx */ | ||
1381 | /* LDRB (immediate, Thumb) 0111 1xxx xxxx xxxx */ | ||
1382 | DECODE_EMULATE (0xc000, 0x4000, t16_emulate_loregs_rwflags), | ||
1383 | /* STRH (immediate, Thumb) 1000 0xxx xxxx xxxx */ | ||
1384 | /* LDRH (immediate, Thumb) 1000 1xxx xxxx xxxx */ | ||
1385 | DECODE_EMULATE (0xf000, 0x8000, t16_emulate_loregs_rwflags), | ||
1386 | /* STR (immediate, Thumb) 1001 0xxx xxxx xxxx */ | ||
1387 | /* LDR (immediate, Thumb) 1001 1xxx xxxx xxxx */ | ||
1388 | DECODE_SIMULATE (0xf000, 0x9000, t16_simulate_ldrstr_sp_relative), | ||
1389 | |||
1390 | /* | ||
1391 | * Generate PC-/SP-relative address | ||
1392 | * ADR (literal) 1010 0xxx xxxx xxxx | ||
1393 | * ADD (SP plus immediate) 1010 1xxx xxxx xxxx | ||
1394 | */ | ||
1395 | DECODE_SIMULATE (0xf000, 0xa000, t16_simulate_reladr), | ||
1396 | |||
1397 | /* | ||
1398 | * Miscellaneous 16-bit instructions | ||
1399 | * 1011 xxxx xxxx xxxx | ||
1400 | */ | ||
1401 | DECODE_TABLE (0xf000, 0xb000, t16_table_1011), | ||
1402 | |||
1403 | /* STM 1100 0xxx xxxx xxxx */ | ||
1404 | /* LDM 1100 1xxx xxxx xxxx */ | ||
1405 | DECODE_EMULATE (0xf000, 0xc000, t16_emulate_loregs_rwflags), | ||
1406 | |||
1407 | /* | ||
1408 | * Conditional branch, and Supervisor Call | ||
1409 | */ | ||
1410 | |||
1411 | /* Permanently UNDEFINED 1101 1110 xxxx xxxx */ | ||
1412 | /* SVC 1101 1111 xxxx xxxx */ | ||
1413 | DECODE_REJECT (0xfe00, 0xde00), | ||
1414 | |||
1415 | /* Conditional branch 1101 xxxx xxxx xxxx */ | ||
1416 | DECODE_CUSTOM (0xf000, 0xd000, t16_decode_cond_branch), | ||
1417 | |||
1418 | /* | ||
1419 | * Unconditional branch | ||
1420 | * B 1110 0xxx xxxx xxxx | ||
1421 | */ | ||
1422 | DECODE_SIMULATE (0xf800, 0xe000, t16_simulate_branch), | ||
1423 | |||
1424 | DECODE_END | ||
1425 | }; | ||
1426 | |||
1427 | static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) | ||
1428 | { | ||
1429 | if (unlikely(in_it_block(cpsr))) | ||
1430 | return kprobe_condition_checks[current_cond(cpsr)](cpsr); | ||
1431 | return true; | ||
1432 | } | ||
1433 | |||
1434 | static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs) | ||
1435 | { | ||
1436 | regs->ARM_pc += 2; | ||
1437 | p->ainsn.insn_handler(p, regs); | ||
1438 | regs->ARM_cpsr = it_advance(regs->ARM_cpsr); | ||
1439 | } | ||
1440 | |||
1441 | static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs) | ||
1442 | { | ||
1443 | regs->ARM_pc += 4; | ||
1444 | p->ainsn.insn_handler(p, regs); | ||
1445 | regs->ARM_cpsr = it_advance(regs->ARM_cpsr); | ||
1446 | } | ||
1447 | |||
1448 | enum kprobe_insn __kprobes | ||
1449 | thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1450 | { | ||
1451 | asi->insn_singlestep = thumb16_singlestep; | ||
1452 | asi->insn_check_cc = thumb_check_cc; | ||
1453 | return kprobe_decode_insn(insn, asi, kprobe_decode_thumb16_table, true); | ||
1454 | } | ||
1455 | |||
1456 | enum kprobe_insn __kprobes | ||
1457 | thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | ||
1458 | { | ||
1459 | asi->insn_singlestep = thumb32_singlestep; | ||
1460 | asi->insn_check_cc = thumb_check_cc; | ||
1461 | return kprobe_decode_insn(insn, asi, kprobe_decode_thumb32_table, true); | ||
1462 | } | ||
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 1656c87501c0..129c1163248b 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c | |||
@@ -28,14 +28,16 @@ | |||
28 | #include <asm/traps.h> | 28 | #include <asm/traps.h> |
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | 30 | ||
31 | #include "kprobes.h" | ||
32 | |||
31 | #define MIN_STACK_SIZE(addr) \ | 33 | #define MIN_STACK_SIZE(addr) \ |
32 | min((unsigned long)MAX_STACK_SIZE, \ | 34 | min((unsigned long)MAX_STACK_SIZE, \ |
33 | (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) | 35 | (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) |
34 | 36 | ||
35 | #define flush_insns(addr, cnt) \ | 37 | #define flush_insns(addr, size) \ |
36 | flush_icache_range((unsigned long)(addr), \ | 38 | flush_icache_range((unsigned long)(addr), \ |
37 | (unsigned long)(addr) + \ | 39 | (unsigned long)(addr) + \ |
38 | sizeof(kprobe_opcode_t) * (cnt)) | 40 | (size)) |
39 | 41 | ||
40 | /* Used as a marker in ARM_pc to note when we're in a jprobe. */ | 42 | /* Used as a marker in ARM_pc to note when we're in a jprobe. */ |
41 | #define JPROBE_MAGIC_ADDR 0xffffffff | 43 | #define JPROBE_MAGIC_ADDR 0xffffffff |
@@ -49,16 +51,35 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
49 | kprobe_opcode_t insn; | 51 | kprobe_opcode_t insn; |
50 | kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; | 52 | kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; |
51 | unsigned long addr = (unsigned long)p->addr; | 53 | unsigned long addr = (unsigned long)p->addr; |
54 | bool thumb; | ||
55 | kprobe_decode_insn_t *decode_insn; | ||
52 | int is; | 56 | int is; |
53 | 57 | ||
54 | if (addr & 0x3 || in_exception_text(addr)) | 58 | if (in_exception_text(addr)) |
55 | return -EINVAL; | 59 | return -EINVAL; |
56 | 60 | ||
61 | #ifdef CONFIG_THUMB2_KERNEL | ||
62 | thumb = true; | ||
63 | addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ | ||
64 | insn = ((u16 *)addr)[0]; | ||
65 | if (is_wide_instruction(insn)) { | ||
66 | insn <<= 16; | ||
67 | insn |= ((u16 *)addr)[1]; | ||
68 | decode_insn = thumb32_kprobe_decode_insn; | ||
69 | } else | ||
70 | decode_insn = thumb16_kprobe_decode_insn; | ||
71 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
72 | thumb = false; | ||
73 | if (addr & 0x3) | ||
74 | return -EINVAL; | ||
57 | insn = *p->addr; | 75 | insn = *p->addr; |
76 | decode_insn = arm_kprobe_decode_insn; | ||
77 | #endif | ||
78 | |||
58 | p->opcode = insn; | 79 | p->opcode = insn; |
59 | p->ainsn.insn = tmp_insn; | 80 | p->ainsn.insn = tmp_insn; |
60 | 81 | ||
61 | switch (arm_kprobe_decode_insn(insn, &p->ainsn)) { | 82 | switch ((*decode_insn)(insn, &p->ainsn)) { |
62 | case INSN_REJECTED: /* not supported */ | 83 | case INSN_REJECTED: /* not supported */ |
63 | return -EINVAL; | 84 | return -EINVAL; |
64 | 85 | ||
@@ -68,7 +89,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
68 | return -ENOMEM; | 89 | return -ENOMEM; |
69 | for (is = 0; is < MAX_INSN_SIZE; ++is) | 90 | for (is = 0; is < MAX_INSN_SIZE; ++is) |
70 | p->ainsn.insn[is] = tmp_insn[is]; | 91 | p->ainsn.insn[is] = tmp_insn[is]; |
71 | flush_insns(p->ainsn.insn, MAX_INSN_SIZE); | 92 | flush_insns(p->ainsn.insn, |
93 | sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); | ||
94 | p->ainsn.insn_fn = (kprobe_insn_fn_t *) | ||
95 | ((uintptr_t)p->ainsn.insn | thumb); | ||
72 | break; | 96 | break; |
73 | 97 | ||
74 | case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ | 98 | case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ |
@@ -79,24 +103,88 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
79 | return 0; | 103 | return 0; |
80 | } | 104 | } |
81 | 105 | ||
106 | #ifdef CONFIG_THUMB2_KERNEL | ||
107 | |||
108 | /* | ||
109 | * For a 32-bit Thumb breakpoint spanning two memory words we need to take | ||
110 | * special precautions to insert the breakpoint atomically, especially on SMP | ||
111 | * systems. This is achieved by calling this arming function using stop_machine. | ||
112 | */ | ||
113 | static int __kprobes set_t32_breakpoint(void *addr) | ||
114 | { | ||
115 | ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; | ||
116 | ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; | ||
117 | flush_insns(addr, 2*sizeof(u16)); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
122 | { | ||
123 | uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ | ||
124 | |||
125 | if (!is_wide_instruction(p->opcode)) { | ||
126 | *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | ||
127 | flush_insns(addr, sizeof(u16)); | ||
128 | } else if (addr & 2) { | ||
129 | /* A 32-bit instruction spanning two words needs special care */ | ||
130 | stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); | ||
131 | } else { | ||
132 | /* Word aligned 32-bit instruction can be written atomically */ | ||
133 | u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | ||
134 | #ifndef __ARMEB__ /* Swap halfwords for little-endian */ | ||
135 | bkp = (bkp >> 16) | (bkp << 16); | ||
136 | #endif | ||
137 | *(u32 *)addr = bkp; | ||
138 | flush_insns(addr, sizeof(u32)); | ||
139 | } | ||
140 | } | ||
141 | |||
142 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
143 | |||
82 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 144 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
83 | { | 145 | { |
84 | *p->addr = KPROBE_BREAKPOINT_INSTRUCTION; | 146 | kprobe_opcode_t insn = p->opcode; |
85 | flush_insns(p->addr, 1); | 147 | kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; |
148 | if (insn >= 0xe0000000) | ||
149 | brkp |= 0xe0000000; /* Unconditional instruction */ | ||
150 | else | ||
151 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ | ||
152 | *p->addr = brkp; | ||
153 | flush_insns(p->addr, sizeof(p->addr[0])); | ||
86 | } | 154 | } |
87 | 155 | ||
156 | #endif /* !CONFIG_THUMB2_KERNEL */ | ||
157 | |||
88 | /* | 158 | /* |
89 | * The actual disarming is done here on each CPU and synchronized using | 159 | * The actual disarming is done here on each CPU and synchronized using |
90 | * stop_machine. This synchronization is necessary on SMP to avoid removing | 160 | * stop_machine. This synchronization is necessary on SMP to avoid removing |
91 | * a probe between the moment the 'Undefined Instruction' exception is raised | 161 | * a probe between the moment the 'Undefined Instruction' exception is raised |
92 | * and the moment the exception handler reads the faulting instruction from | 162 | * and the moment the exception handler reads the faulting instruction from |
93 | * memory. | 163 | * memory. It is also needed to atomically set the two half-words of a 32-bit |
164 | * Thumb breakpoint. | ||
94 | */ | 165 | */ |
95 | int __kprobes __arch_disarm_kprobe(void *p) | 166 | int __kprobes __arch_disarm_kprobe(void *p) |
96 | { | 167 | { |
97 | struct kprobe *kp = p; | 168 | struct kprobe *kp = p; |
169 | #ifdef CONFIG_THUMB2_KERNEL | ||
170 | u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); | ||
171 | kprobe_opcode_t insn = kp->opcode; | ||
172 | unsigned int len; | ||
173 | |||
174 | if (is_wide_instruction(insn)) { | ||
175 | ((u16 *)addr)[0] = insn>>16; | ||
176 | ((u16 *)addr)[1] = insn; | ||
177 | len = 2*sizeof(u16); | ||
178 | } else { | ||
179 | ((u16 *)addr)[0] = insn; | ||
180 | len = sizeof(u16); | ||
181 | } | ||
182 | flush_insns(addr, len); | ||
183 | |||
184 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
98 | *kp->addr = kp->opcode; | 185 | *kp->addr = kp->opcode; |
99 | flush_insns(kp->addr, 1); | 186 | flush_insns(kp->addr, sizeof(kp->addr[0])); |
187 | #endif | ||
100 | return 0; | 188 | return 0; |
101 | } | 189 | } |
102 | 190 | ||
@@ -130,12 +218,24 @@ static void __kprobes set_current_kprobe(struct kprobe *p) | |||
130 | __get_cpu_var(current_kprobe) = p; | 218 | __get_cpu_var(current_kprobe) = p; |
131 | } | 219 | } |
132 | 220 | ||
133 | static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, | 221 | static void __kprobes |
134 | struct kprobe_ctlblk *kcb) | 222 | singlestep_skip(struct kprobe *p, struct pt_regs *regs) |
135 | { | 223 | { |
224 | #ifdef CONFIG_THUMB2_KERNEL | ||
225 | regs->ARM_cpsr = it_advance(regs->ARM_cpsr); | ||
226 | if (is_wide_instruction(p->opcode)) | ||
227 | regs->ARM_pc += 4; | ||
228 | else | ||
229 | regs->ARM_pc += 2; | ||
230 | #else | ||
136 | regs->ARM_pc += 4; | 231 | regs->ARM_pc += 4; |
137 | if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) | 232 | #endif |
138 | p->ainsn.insn_handler(p, regs); | 233 | } |
234 | |||
235 | static inline void __kprobes | ||
236 | singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) | ||
237 | { | ||
238 | p->ainsn.insn_singlestep(p, regs); | ||
139 | } | 239 | } |
140 | 240 | ||
141 | /* | 241 | /* |
@@ -149,11 +249,23 @@ void __kprobes kprobe_handler(struct pt_regs *regs) | |||
149 | { | 249 | { |
150 | struct kprobe *p, *cur; | 250 | struct kprobe *p, *cur; |
151 | struct kprobe_ctlblk *kcb; | 251 | struct kprobe_ctlblk *kcb; |
152 | kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->ARM_pc; | ||
153 | 252 | ||
154 | kcb = get_kprobe_ctlblk(); | 253 | kcb = get_kprobe_ctlblk(); |
155 | cur = kprobe_running(); | 254 | cur = kprobe_running(); |
156 | p = get_kprobe(addr); | 255 | |
256 | #ifdef CONFIG_THUMB2_KERNEL | ||
257 | /* | ||
258 | * First look for a probe which was registered using an address with | ||
259 | * bit 0 set, this is the usual situation for pointers to Thumb code. | ||
260 | * If not found, fallback to looking for one with bit 0 clear. | ||
261 | */ | ||
262 | p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1)); | ||
263 | if (!p) | ||
264 | p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); | ||
265 | |||
266 | #else /* ! CONFIG_THUMB2_KERNEL */ | ||
267 | p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc); | ||
268 | #endif | ||
157 | 269 | ||
158 | if (p) { | 270 | if (p) { |
159 | if (cur) { | 271 | if (cur) { |
@@ -173,7 +285,8 @@ void __kprobes kprobe_handler(struct pt_regs *regs) | |||
173 | /* impossible cases */ | 285 | /* impossible cases */ |
174 | BUG(); | 286 | BUG(); |
175 | } | 287 | } |
176 | } else { | 288 | } else if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) { |
289 | /* Probe hit and conditional execution check ok. */ | ||
177 | set_current_kprobe(p); | 290 | set_current_kprobe(p); |
178 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | 291 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; |
179 | 292 | ||
@@ -193,6 +306,13 @@ void __kprobes kprobe_handler(struct pt_regs *regs) | |||
193 | } | 306 | } |
194 | reset_current_kprobe(); | 307 | reset_current_kprobe(); |
195 | } | 308 | } |
309 | } else { | ||
310 | /* | ||
311 | * Probe hit but conditional execution check failed, | ||
312 | * so just skip the instruction and continue as if | ||
313 | * nothing had happened. | ||
314 | */ | ||
315 | singlestep_skip(p, regs); | ||
196 | } | 316 | } |
197 | } else if (cur) { | 317 | } else if (cur) { |
198 | /* We probably hit a jprobe. Call its break handler. */ | 318 | /* We probably hit a jprobe. Call its break handler. */ |
@@ -300,7 +420,11 @@ void __naked __kprobes kretprobe_trampoline(void) | |||
300 | "bl trampoline_handler \n\t" | 420 | "bl trampoline_handler \n\t" |
301 | "mov lr, r0 \n\t" | 421 | "mov lr, r0 \n\t" |
302 | "ldmia sp!, {r0 - r11} \n\t" | 422 | "ldmia sp!, {r0 - r11} \n\t" |
423 | #ifdef CONFIG_THUMB2_KERNEL | ||
424 | "bx lr \n\t" | ||
425 | #else | ||
303 | "mov pc, lr \n\t" | 426 | "mov pc, lr \n\t" |
427 | #endif | ||
304 | : : : "memory"); | 428 | : : : "memory"); |
305 | } | 429 | } |
306 | 430 | ||
@@ -378,11 +502,22 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
378 | struct jprobe *jp = container_of(p, struct jprobe, kp); | 502 | struct jprobe *jp = container_of(p, struct jprobe, kp); |
379 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 503 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
380 | long sp_addr = regs->ARM_sp; | 504 | long sp_addr = regs->ARM_sp; |
505 | long cpsr; | ||
381 | 506 | ||
382 | kcb->jprobe_saved_regs = *regs; | 507 | kcb->jprobe_saved_regs = *regs; |
383 | memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); | 508 | memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr)); |
384 | regs->ARM_pc = (long)jp->entry; | 509 | regs->ARM_pc = (long)jp->entry; |
385 | regs->ARM_cpsr |= PSR_I_BIT; | 510 | |
511 | cpsr = regs->ARM_cpsr | PSR_I_BIT; | ||
512 | #ifdef CONFIG_THUMB2_KERNEL | ||
513 | /* Set correct Thumb state in cpsr */ | ||
514 | if (regs->ARM_pc & 1) | ||
515 | cpsr |= PSR_T_BIT; | ||
516 | else | ||
517 | cpsr &= ~PSR_T_BIT; | ||
518 | #endif | ||
519 | regs->ARM_cpsr = cpsr; | ||
520 | |||
386 | preempt_disable(); | 521 | preempt_disable(); |
387 | return 1; | 522 | return 1; |
388 | } | 523 | } |
@@ -404,7 +539,12 @@ void __kprobes jprobe_return(void) | |||
404 | * This is to prevent any simulated instruction from writing | 539 | * This is to prevent any simulated instruction from writing |
405 | * over the regs when they are accessing the stack. | 540 | * over the regs when they are accessing the stack. |
406 | */ | 541 | */ |
542 | #ifdef CONFIG_THUMB2_KERNEL | ||
543 | "sub r0, %0, %1 \n\t" | ||
544 | "mov sp, r0 \n\t" | ||
545 | #else | ||
407 | "sub sp, %0, %1 \n\t" | 546 | "sub sp, %0, %1 \n\t" |
547 | #endif | ||
408 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" | 548 | "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" |
409 | "str %0, [sp, %2] \n\t" | 549 | "str %0, [sp, %2] \n\t" |
410 | "str r0, [sp, %3] \n\t" | 550 | "str r0, [sp, %3] \n\t" |
@@ -415,15 +555,28 @@ void __kprobes jprobe_return(void) | |||
415 | * Return to the context saved by setjmp_pre_handler | 555 | * Return to the context saved by setjmp_pre_handler |
416 | * and restored by longjmp_break_handler. | 556 | * and restored by longjmp_break_handler. |
417 | */ | 557 | */ |
558 | #ifdef CONFIG_THUMB2_KERNEL | ||
559 | "ldr lr, [sp, %2] \n\t" /* lr = saved sp */ | ||
560 | "ldrd r0, r1, [sp, %5] \n\t" /* r0,r1 = saved lr,pc */ | ||
561 | "ldr r2, [sp, %4] \n\t" /* r2 = saved psr */ | ||
562 | "stmdb lr!, {r0, r1, r2} \n\t" /* push saved lr and */ | ||
563 | /* rfe context */ | ||
564 | "ldmia sp, {r0 - r12} \n\t" | ||
565 | "mov sp, lr \n\t" | ||
566 | "ldr lr, [sp], #4 \n\t" | ||
567 | "rfeia sp! \n\t" | ||
568 | #else | ||
418 | "ldr r0, [sp, %4] \n\t" | 569 | "ldr r0, [sp, %4] \n\t" |
419 | "msr cpsr_cxsf, r0 \n\t" | 570 | "msr cpsr_cxsf, r0 \n\t" |
420 | "ldmia sp, {r0 - pc} \n\t" | 571 | "ldmia sp, {r0 - pc} \n\t" |
572 | #endif | ||
421 | : | 573 | : |
422 | : "r" (kcb->jprobe_saved_regs.ARM_sp), | 574 | : "r" (kcb->jprobe_saved_regs.ARM_sp), |
423 | "I" (sizeof(struct pt_regs) * 2), | 575 | "I" (sizeof(struct pt_regs) * 2), |
424 | "J" (offsetof(struct pt_regs, ARM_sp)), | 576 | "J" (offsetof(struct pt_regs, ARM_sp)), |
425 | "J" (offsetof(struct pt_regs, ARM_pc)), | 577 | "J" (offsetof(struct pt_regs, ARM_pc)), |
426 | "J" (offsetof(struct pt_regs, ARM_cpsr)) | 578 | "J" (offsetof(struct pt_regs, ARM_cpsr)), |
579 | "J" (offsetof(struct pt_regs, ARM_lr)) | ||
427 | : "memory", "cc"); | 580 | : "memory", "cc"); |
428 | } | 581 | } |
429 | 582 | ||
@@ -460,17 +613,44 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p) | |||
460 | return 0; | 613 | return 0; |
461 | } | 614 | } |
462 | 615 | ||
463 | static struct undef_hook kprobes_break_hook = { | 616 | #ifdef CONFIG_THUMB2_KERNEL |
617 | |||
618 | static struct undef_hook kprobes_thumb16_break_hook = { | ||
619 | .instr_mask = 0xffff, | ||
620 | .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION, | ||
621 | .cpsr_mask = MODE_MASK, | ||
622 | .cpsr_val = SVC_MODE, | ||
623 | .fn = kprobe_trap_handler, | ||
624 | }; | ||
625 | |||
626 | static struct undef_hook kprobes_thumb32_break_hook = { | ||
464 | .instr_mask = 0xffffffff, | 627 | .instr_mask = 0xffffffff, |
465 | .instr_val = KPROBE_BREAKPOINT_INSTRUCTION, | 628 | .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION, |
466 | .cpsr_mask = MODE_MASK, | 629 | .cpsr_mask = MODE_MASK, |
467 | .cpsr_val = SVC_MODE, | 630 | .cpsr_val = SVC_MODE, |
468 | .fn = kprobe_trap_handler, | 631 | .fn = kprobe_trap_handler, |
469 | }; | 632 | }; |
470 | 633 | ||
634 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
635 | |||
636 | static struct undef_hook kprobes_arm_break_hook = { | ||
637 | .instr_mask = 0x0fffffff, | ||
638 | .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION, | ||
639 | .cpsr_mask = MODE_MASK, | ||
640 | .cpsr_val = SVC_MODE, | ||
641 | .fn = kprobe_trap_handler, | ||
642 | }; | ||
643 | |||
644 | #endif /* !CONFIG_THUMB2_KERNEL */ | ||
645 | |||
471 | int __init arch_init_kprobes() | 646 | int __init arch_init_kprobes() |
472 | { | 647 | { |
473 | arm_kprobe_decode_init(); | 648 | arm_kprobe_decode_init(); |
474 | register_undef_hook(&kprobes_break_hook); | 649 | #ifdef CONFIG_THUMB2_KERNEL |
650 | register_undef_hook(&kprobes_thumb16_break_hook); | ||
651 | register_undef_hook(&kprobes_thumb32_break_hook); | ||
652 | #else | ||
653 | register_undef_hook(&kprobes_arm_break_hook); | ||
654 | #endif | ||
475 | return 0; | 655 | return 0; |
476 | } | 656 | } |
diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h new file mode 100644 index 000000000000..a6aeda0a6c7f --- /dev/null +++ b/arch/arm/kernel/kprobes.h | |||
@@ -0,0 +1,420 @@ | |||
1 | /* | ||
2 | * arch/arm/kernel/kprobes.h | ||
3 | * | ||
4 | * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>. | ||
5 | * | ||
6 | * Some contents moved here from arch/arm/include/asm/kprobes.h which is | ||
7 | * Copyright (C) 2006, 2007 Motorola Inc. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
16 | * General Public License for more details. | ||
17 | */ | ||
18 | |||
19 | #ifndef _ARM_KERNEL_KPROBES_H | ||
20 | #define _ARM_KERNEL_KPROBES_H | ||
21 | |||
22 | /* | ||
23 | * These undefined instructions must be unique and | ||
24 | * reserved solely for kprobes' use. | ||
25 | */ | ||
26 | #define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8 | ||
27 | #define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18 | ||
28 | #define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018 | ||
29 | |||
30 | |||
31 | enum kprobe_insn { | ||
32 | INSN_REJECTED, | ||
33 | INSN_GOOD, | ||
34 | INSN_GOOD_NO_SLOT | ||
35 | }; | ||
36 | |||
37 | typedef enum kprobe_insn (kprobe_decode_insn_t)(kprobe_opcode_t, | ||
38 | struct arch_specific_insn *); | ||
39 | |||
40 | #ifdef CONFIG_THUMB2_KERNEL | ||
41 | |||
42 | enum kprobe_insn thumb16_kprobe_decode_insn(kprobe_opcode_t, | ||
43 | struct arch_specific_insn *); | ||
44 | enum kprobe_insn thumb32_kprobe_decode_insn(kprobe_opcode_t, | ||
45 | struct arch_specific_insn *); | ||
46 | |||
47 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
48 | |||
49 | enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, | ||
50 | struct arch_specific_insn *); | ||
51 | #endif | ||
52 | |||
53 | void __init arm_kprobe_decode_init(void); | ||
54 | |||
55 | extern kprobe_check_cc * const kprobe_condition_checks[16]; | ||
56 | |||
57 | |||
58 | #if __LINUX_ARM_ARCH__ >= 7 | ||
59 | |||
60 | /* str_pc_offset is architecturally defined from ARMv7 onwards */ | ||
61 | #define str_pc_offset 8 | ||
62 | #define find_str_pc_offset() | ||
63 | |||
64 | #else /* __LINUX_ARM_ARCH__ < 7 */ | ||
65 | |||
66 | /* We need a run-time check to determine str_pc_offset */ | ||
67 | extern int str_pc_offset; | ||
68 | void __init find_str_pc_offset(void); | ||
69 | |||
70 | #endif | ||
71 | |||
72 | |||
73 | /* | ||
74 | * Update ITSTATE after normal execution of an IT block instruction. | ||
75 | * | ||
76 | * The 8 IT state bits are split into two parts in CPSR: | ||
77 | * ITSTATE<1:0> are in CPSR<26:25> | ||
78 | * ITSTATE<7:2> are in CPSR<15:10> | ||
79 | */ | ||
80 | static inline unsigned long it_advance(unsigned long cpsr) | ||
81 | { | ||
82 | if ((cpsr & 0x06000400) == 0) { | ||
83 | /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ | ||
84 | cpsr &= ~PSR_IT_MASK; | ||
85 | } else { | ||
86 | /* We need to shift left ITSTATE<4:0> */ | ||
87 | const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ | ||
88 | unsigned long it = cpsr & mask; | ||
89 | it <<= 1; | ||
90 | it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ | ||
91 | it &= mask; | ||
92 | cpsr &= ~mask; | ||
93 | cpsr |= it; | ||
94 | } | ||
95 | return cpsr; | ||
96 | } | ||
97 | |||
98 | static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) | ||
99 | { | ||
100 | long cpsr = regs->ARM_cpsr; | ||
101 | if (pcv & 0x1) { | ||
102 | cpsr |= PSR_T_BIT; | ||
103 | pcv &= ~0x1; | ||
104 | } else { | ||
105 | cpsr &= ~PSR_T_BIT; | ||
106 | pcv &= ~0x2; /* Avoid UNPREDICTABLE address allignment */ | ||
107 | } | ||
108 | regs->ARM_cpsr = cpsr; | ||
109 | regs->ARM_pc = pcv; | ||
110 | } | ||
111 | |||
112 | |||
113 | #if __LINUX_ARM_ARCH__ >= 6 | ||
114 | |||
115 | /* Kernels built for >= ARMv6 should never run on <= ARMv5 hardware, so... */ | ||
116 | #define load_write_pc_interworks true | ||
117 | #define test_load_write_pc_interworking() | ||
118 | |||
119 | #else /* __LINUX_ARM_ARCH__ < 6 */ | ||
120 | |||
121 | /* We need run-time testing to determine if load_write_pc() should interwork. */ | ||
122 | extern bool load_write_pc_interworks; | ||
123 | void __init test_load_write_pc_interworking(void); | ||
124 | |||
125 | #endif | ||
126 | |||
127 | static inline void __kprobes load_write_pc(long pcv, struct pt_regs *regs) | ||
128 | { | ||
129 | if (load_write_pc_interworks) | ||
130 | bx_write_pc(pcv, regs); | ||
131 | else | ||
132 | regs->ARM_pc = pcv; | ||
133 | } | ||
134 | |||
135 | |||
136 | #if __LINUX_ARM_ARCH__ >= 7 | ||
137 | |||
138 | #define alu_write_pc_interworks true | ||
139 | #define test_alu_write_pc_interworking() | ||
140 | |||
141 | #elif __LINUX_ARM_ARCH__ <= 5 | ||
142 | |||
143 | /* Kernels built for <= ARMv5 should never run on >= ARMv6 hardware, so... */ | ||
144 | #define alu_write_pc_interworks false | ||
145 | #define test_alu_write_pc_interworking() | ||
146 | |||
147 | #else /* __LINUX_ARM_ARCH__ == 6 */ | ||
148 | |||
149 | /* We could be an ARMv6 binary on ARMv7 hardware so we need a run-time check. */ | ||
150 | extern bool alu_write_pc_interworks; | ||
151 | void __init test_alu_write_pc_interworking(void); | ||
152 | |||
153 | #endif /* __LINUX_ARM_ARCH__ == 6 */ | ||
154 | |||
155 | static inline void __kprobes alu_write_pc(long pcv, struct pt_regs *regs) | ||
156 | { | ||
157 | if (alu_write_pc_interworks) | ||
158 | bx_write_pc(pcv, regs); | ||
159 | else | ||
160 | regs->ARM_pc = pcv; | ||
161 | } | ||
162 | |||
163 | |||
164 | void __kprobes kprobe_simulate_nop(struct kprobe *p, struct pt_regs *regs); | ||
165 | void __kprobes kprobe_emulate_none(struct kprobe *p, struct pt_regs *regs); | ||
166 | |||
167 | enum kprobe_insn __kprobes | ||
168 | kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi); | ||
169 | |||
170 | /* | ||
171 | * Test if load/store instructions writeback the address register. | ||
172 | * if P (bit 24) == 0 or W (bit 21) == 1 | ||
173 | */ | ||
174 | #define is_writeback(insn) ((insn ^ 0x01000000) & 0x01200000) | ||
175 | |||
176 | /* | ||
177 | * The following definitions and macros are used to build instruction | ||
178 | * decoding tables for use by kprobe_decode_insn. | ||
179 | * | ||
180 | * These tables are a concatenation of entries each of which consist of one of | ||
181 | * the decode_* structs. All of the fields in every type of decode structure | ||
182 | * are of the union type decode_item, therefore the entire decode table can be | ||
183 | * viewed as an array of these and declared like: | ||
184 | * | ||
185 | * static const union decode_item table_name[] = {}; | ||
186 | * | ||
187 | * In order to construct each entry in the table, macros are used to | ||
188 | * initialise a number of sequential decode_item values in a layout which | ||
189 | * matches the relevant struct. E.g. DECODE_SIMULATE initialise a struct | ||
190 | * decode_simulate by initialising four decode_item objects like this... | ||
191 | * | ||
192 | * {.bits = _type}, | ||
193 | * {.bits = _mask}, | ||
194 | * {.bits = _value}, | ||
195 | * {.handler = _handler}, | ||
196 | * | ||
197 | * Initialising a specified member of the union means that the compiler | ||
198 | * will produce a warning if the argument is of an incorrect type. | ||
199 | * | ||
200 | * Below is a list of each of the macros used to initialise entries and a | ||
201 | * description of the action performed when that entry is matched to an | ||
202 | * instruction. A match is found when (instruction & mask) == value. | ||
203 | * | ||
204 | * DECODE_TABLE(mask, value, table) | ||
205 | * Instruction decoding jumps to parsing the new sub-table 'table'. | ||
206 | * | ||
207 | * DECODE_CUSTOM(mask, value, decoder) | ||
208 | * The custom function 'decoder' is called to the complete decoding | ||
209 | * of an instruction. | ||
210 | * | ||
211 | * DECODE_SIMULATE(mask, value, handler) | ||
212 | * Set the probes instruction handler to 'handler', this will be used | ||
213 | * to simulate the instruction when the probe is hit. Decoding returns | ||
214 | * with INSN_GOOD_NO_SLOT. | ||
215 | * | ||
216 | * DECODE_EMULATE(mask, value, handler) | ||
217 | * Set the probes instruction handler to 'handler', this will be used | ||
218 | * to emulate the instruction when the probe is hit. The modified | ||
219 | * instruction (see below) is placed in the probes instruction slot so it | ||
220 | * may be called by the emulation code. Decoding returns with INSN_GOOD. | ||
221 | * | ||
222 | * DECODE_REJECT(mask, value) | ||
223 | * Instruction decoding fails with INSN_REJECTED | ||
224 | * | ||
225 | * DECODE_OR(mask, value) | ||
226 | * This allows the mask/value test of multiple table entries to be | ||
227 | * logically ORed. Once an 'or' entry is matched the decoding action to | ||
228 | * be performed is that of the next entry which isn't an 'or'. E.g. | ||
229 | * | ||
230 | * DECODE_OR (mask1, value1) | ||
231 | * DECODE_OR (mask2, value2) | ||
232 | * DECODE_SIMULATE (mask3, value3, simulation_handler) | ||
233 | * | ||
234 | * This means that if any of the three mask/value pairs match the | ||
235 | * instruction being decoded, then 'simulation_handler' will be used | ||
236 | * for it. | ||
237 | * | ||
238 | * Both the SIMULATE and EMULATE macros have a second form which take an | ||
239 | * additional 'regs' argument. | ||
240 | * | ||
241 | * DECODE_SIMULATEX(mask, value, handler, regs) | ||
242 | * DECODE_EMULATEX (mask, value, handler, regs) | ||
243 | * | ||
244 | * These are used to specify what kind of CPU register is encoded in each of the | ||
245 | * least significant 5 nibbles of the instruction being decoded. The regs value | ||
246 | * is specified using the REGS macro, this takes any of the REG_TYPE_* values | ||
247 | * from enum decode_reg_type as arguments; only the '*' part of the name is | ||
248 | * given. E.g. | ||
249 | * | ||
250 | * REGS(0, ANY, NOPC, 0, ANY) | ||
251 | * | ||
252 | * This indicates an instruction is encoded like: | ||
253 | * | ||
254 | * bits 19..16 ignore | ||
255 | * bits 15..12 any register allowed here | ||
256 | * bits 11.. 8 any register except PC allowed here | ||
257 | * bits 7.. 4 ignore | ||
258 | * bits 3.. 0 any register allowed here | ||
259 | * | ||
260 | * This register specification is checked after a decode table entry is found to | ||
261 | * match an instruction (through the mask/value test). Any invalid register then | ||
262 | * found in the instruction will cause decoding to fail with INSN_REJECTED. In | ||
263 | * the above example this would happen if bits 11..8 of the instruction were | ||
264 | * 1111, indicating R15 or PC. | ||
265 | * | ||
266 | * As well as checking for legal combinations of registers, this data is also | ||
267 | * used to modify the registers encoded in the instructions so that an | ||
268 | * emulation routines can use it. (See decode_regs() and INSN_NEW_BITS.) | ||
269 | * | ||
270 | * Here is a real example which matches ARM instructions of the form | ||
271 | * "AND <Rd>,<Rn>,<Rm>,<shift> <Rs>" | ||
272 | * | ||
273 | * DECODE_EMULATEX (0x0e000090, 0x00000010, emulate_rd12rn16rm0rs8_rwflags, | ||
274 | * REGS(ANY, ANY, NOPC, 0, ANY)), | ||
275 | * ^ ^ ^ ^ | ||
276 | * Rn Rd Rs Rm | ||
277 | * | ||
278 | * Decoding the instruction "AND R4, R5, R6, ASL R15" will be rejected because | ||
279 | * Rs == R15 | ||
280 | * | ||
281 | * Decoding the instruction "AND R4, R5, R6, ASL R7" will be accepted and the | ||
282 | * instruction will be modified to "AND R0, R2, R3, ASL R1" and then placed into | ||
283 | * the kprobes instruction slot. This can then be called later by the handler | ||
284 | * function emulate_rd12rn16rm0rs8_rwflags in order to simulate the instruction. | ||
285 | */ | ||
286 | |||
287 | enum decode_type { | ||
288 | DECODE_TYPE_END, | ||
289 | DECODE_TYPE_TABLE, | ||
290 | DECODE_TYPE_CUSTOM, | ||
291 | DECODE_TYPE_SIMULATE, | ||
292 | DECODE_TYPE_EMULATE, | ||
293 | DECODE_TYPE_OR, | ||
294 | DECODE_TYPE_REJECT, | ||
295 | NUM_DECODE_TYPES /* Must be last enum */ | ||
296 | }; | ||
297 | |||
298 | #define DECODE_TYPE_BITS 4 | ||
299 | #define DECODE_TYPE_MASK ((1 << DECODE_TYPE_BITS) - 1) | ||
300 | |||
301 | enum decode_reg_type { | ||
302 | REG_TYPE_NONE = 0, /* Not a register, ignore */ | ||
303 | REG_TYPE_ANY, /* Any register allowed */ | ||
304 | REG_TYPE_SAMEAS16, /* Register should be same as that at bits 19..16 */ | ||
305 | REG_TYPE_SP, /* Register must be SP */ | ||
306 | REG_TYPE_PC, /* Register must be PC */ | ||
307 | REG_TYPE_NOSP, /* Register must not be SP */ | ||
308 | REG_TYPE_NOSPPC, /* Register must not be SP or PC */ | ||
309 | REG_TYPE_NOPC, /* Register must not be PC */ | ||
310 | REG_TYPE_NOPCWB, /* No PC if load/store write-back flag also set */ | ||
311 | |||
312 | /* The following types are used when the encoding for PC indicates | ||
313 | * another instruction form. This distiction only matters for test | ||
314 | * case coverage checks. | ||
315 | */ | ||
316 | REG_TYPE_NOPCX, /* Register must not be PC */ | ||
317 | REG_TYPE_NOSPPCX, /* Register must not be SP or PC */ | ||
318 | |||
319 | /* Alias to allow '0' arg to be used in REGS macro. */ | ||
320 | REG_TYPE_0 = REG_TYPE_NONE | ||
321 | }; | ||
322 | |||
323 | #define REGS(r16, r12, r8, r4, r0) \ | ||
324 | ((REG_TYPE_##r16) << 16) + \ | ||
325 | ((REG_TYPE_##r12) << 12) + \ | ||
326 | ((REG_TYPE_##r8) << 8) + \ | ||
327 | ((REG_TYPE_##r4) << 4) + \ | ||
328 | (REG_TYPE_##r0) | ||
329 | |||
330 | union decode_item { | ||
331 | u32 bits; | ||
332 | const union decode_item *table; | ||
333 | kprobe_insn_handler_t *handler; | ||
334 | kprobe_decode_insn_t *decoder; | ||
335 | }; | ||
336 | |||
337 | |||
338 | #define DECODE_END \ | ||
339 | {.bits = DECODE_TYPE_END} | ||
340 | |||
341 | |||
342 | struct decode_header { | ||
343 | union decode_item type_regs; | ||
344 | union decode_item mask; | ||
345 | union decode_item value; | ||
346 | }; | ||
347 | |||
348 | #define DECODE_HEADER(_type, _mask, _value, _regs) \ | ||
349 | {.bits = (_type) | ((_regs) << DECODE_TYPE_BITS)}, \ | ||
350 | {.bits = (_mask)}, \ | ||
351 | {.bits = (_value)} | ||
352 | |||
353 | |||
354 | struct decode_table { | ||
355 | struct decode_header header; | ||
356 | union decode_item table; | ||
357 | }; | ||
358 | |||
359 | #define DECODE_TABLE(_mask, _value, _table) \ | ||
360 | DECODE_HEADER(DECODE_TYPE_TABLE, _mask, _value, 0), \ | ||
361 | {.table = (_table)} | ||
362 | |||
363 | |||
364 | struct decode_custom { | ||
365 | struct decode_header header; | ||
366 | union decode_item decoder; | ||
367 | }; | ||
368 | |||
369 | #define DECODE_CUSTOM(_mask, _value, _decoder) \ | ||
370 | DECODE_HEADER(DECODE_TYPE_CUSTOM, _mask, _value, 0), \ | ||
371 | {.decoder = (_decoder)} | ||
372 | |||
373 | |||
374 | struct decode_simulate { | ||
375 | struct decode_header header; | ||
376 | union decode_item handler; | ||
377 | }; | ||
378 | |||
379 | #define DECODE_SIMULATEX(_mask, _value, _handler, _regs) \ | ||
380 | DECODE_HEADER(DECODE_TYPE_SIMULATE, _mask, _value, _regs), \ | ||
381 | {.handler = (_handler)} | ||
382 | |||
383 | #define DECODE_SIMULATE(_mask, _value, _handler) \ | ||
384 | DECODE_SIMULATEX(_mask, _value, _handler, 0) | ||
385 | |||
386 | |||
387 | struct decode_emulate { | ||
388 | struct decode_header header; | ||
389 | union decode_item handler; | ||
390 | }; | ||
391 | |||
392 | #define DECODE_EMULATEX(_mask, _value, _handler, _regs) \ | ||
393 | DECODE_HEADER(DECODE_TYPE_EMULATE, _mask, _value, _regs), \ | ||
394 | {.handler = (_handler)} | ||
395 | |||
396 | #define DECODE_EMULATE(_mask, _value, _handler) \ | ||
397 | DECODE_EMULATEX(_mask, _value, _handler, 0) | ||
398 | |||
399 | |||
400 | struct decode_or { | ||
401 | struct decode_header header; | ||
402 | }; | ||
403 | |||
404 | #define DECODE_OR(_mask, _value) \ | ||
405 | DECODE_HEADER(DECODE_TYPE_OR, _mask, _value, 0) | ||
406 | |||
407 | |||
408 | struct decode_reject { | ||
409 | struct decode_header header; | ||
410 | }; | ||
411 | |||
412 | #define DECODE_REJECT(_mask, _value) \ | ||
413 | DECODE_HEADER(DECODE_TYPE_REJECT, _mask, _value, 0) | ||
414 | |||
415 | |||
416 | int kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, | ||
417 | const union decode_item *table, bool thumb16); | ||
418 | |||
419 | |||
420 | #endif /* _ARM_KERNEL_KPROBES_H */ | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 2b5b1421596c..53c9c2610cbc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void) | |||
435 | if (irq >= 0) | 435 | if (irq >= 0) |
436 | free_irq(irq, NULL); | 436 | free_irq(irq, NULL); |
437 | } | 437 | } |
438 | release_pmu(pmu_device); | 438 | release_pmu(ARM_PMU_DEVICE_CPU); |
439 | pmu_device = NULL; | 439 | pmu_device = NULL; |
440 | } | 440 | } |
441 | 441 | ||
@@ -454,7 +454,7 @@ armpmu_release_hardware(void) | |||
454 | } | 454 | } |
455 | armpmu->stop(); | 455 | armpmu->stop(); |
456 | 456 | ||
457 | release_pmu(pmu_device); | 457 | release_pmu(ARM_PMU_DEVICE_CPU); |
458 | pmu_device = NULL; | 458 | pmu_device = NULL; |
459 | } | 459 | } |
460 | 460 | ||
@@ -662,6 +662,12 @@ init_hw_perf_events(void) | |||
662 | case 0xC090: /* Cortex-A9 */ | 662 | case 0xC090: /* Cortex-A9 */ |
663 | armpmu = armv7_a9_pmu_init(); | 663 | armpmu = armv7_a9_pmu_init(); |
664 | break; | 664 | break; |
665 | case 0xC050: /* Cortex-A5 */ | ||
666 | armpmu = armv7_a5_pmu_init(); | ||
667 | break; | ||
668 | case 0xC0F0: /* Cortex-A15 */ | ||
669 | armpmu = armv7_a15_pmu_init(); | ||
670 | break; | ||
665 | } | 671 | } |
666 | /* Intel CPUs [xscale]. */ | 672 | /* Intel CPUs [xscale]. */ |
667 | } else if (0x69 == implementor) { | 673 | } else if (0x69 == implementor) { |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index e20ca9cafef5..4c851834f68e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -17,17 +17,23 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifdef CONFIG_CPU_V7 | 19 | #ifdef CONFIG_CPU_V7 |
20 | /* Common ARMv7 event types */ | 20 | /* |
21 | * Common ARMv7 event types | ||
22 | * | ||
23 | * Note: An implementation may not be able to count all of these events | ||
24 | * but the encodings are considered to be `reserved' in the case that | ||
25 | * they are not available. | ||
26 | */ | ||
21 | enum armv7_perf_types { | 27 | enum armv7_perf_types { |
22 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | 28 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, |
23 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | 29 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, |
24 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | 30 | ARMV7_PERFCTR_ITLB_MISS = 0x02, |
25 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | 31 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */ |
26 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | 32 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */ |
27 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | 33 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, |
28 | ARMV7_PERFCTR_DREAD = 0x06, | 34 | ARMV7_PERFCTR_DREAD = 0x06, |
29 | ARMV7_PERFCTR_DWRITE = 0x07, | 35 | ARMV7_PERFCTR_DWRITE = 0x07, |
30 | 36 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | |
31 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | 37 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, |
32 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | 38 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, |
33 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | 39 | ARMV7_PERFCTR_CID_WRITE = 0x0B, |
@@ -39,21 +45,30 @@ enum armv7_perf_types { | |||
39 | */ | 45 | */ |
40 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | 46 | ARMV7_PERFCTR_PC_WRITE = 0x0C, |
41 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | 47 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, |
48 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
42 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | 49 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, |
50 | |||
51 | /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */ | ||
43 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | 52 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, |
44 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | 53 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, |
45 | 54 | ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12, | |
46 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | 55 | ARMV7_PERFCTR_MEM_ACCESS = 0x13, |
56 | ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14, | ||
57 | ARMV7_PERFCTR_L1_DCACHE_WB = 0x15, | ||
58 | ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16, | ||
59 | ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17, | ||
60 | ARMV7_PERFCTR_L2_DCACHE_WB = 0x18, | ||
61 | ARMV7_PERFCTR_BUS_ACCESS = 0x19, | ||
62 | ARMV7_PERFCTR_MEMORY_ERROR = 0x1A, | ||
63 | ARMV7_PERFCTR_INSTR_SPEC = 0x1B, | ||
64 | ARMV7_PERFCTR_TTBR_WRITE = 0x1C, | ||
65 | ARMV7_PERFCTR_BUS_CYCLES = 0x1D, | ||
47 | 66 | ||
48 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | 67 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF |
49 | }; | 68 | }; |
50 | 69 | ||
51 | /* ARMv7 Cortex-A8 specific event types */ | 70 | /* ARMv7 Cortex-A8 specific event types */ |
52 | enum armv7_a8_perf_types { | 71 | enum armv7_a8_perf_types { |
53 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
54 | |||
55 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
56 | |||
57 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | 72 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, |
58 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | 73 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, |
59 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | 74 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, |
@@ -138,6 +153,39 @@ enum armv7_a9_perf_types { | |||
138 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | 153 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 |
139 | }; | 154 | }; |
140 | 155 | ||
156 | /* ARMv7 Cortex-A5 specific event types */ | ||
157 | enum armv7_a5_perf_types { | ||
158 | ARMV7_PERFCTR_IRQ_TAKEN = 0x86, | ||
159 | ARMV7_PERFCTR_FIQ_TAKEN = 0x87, | ||
160 | |||
161 | ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0, | ||
162 | ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1, | ||
163 | ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2, | ||
164 | ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, | ||
165 | ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4, | ||
166 | ARMV7_PERFCTR_READ_ALLOC = 0xc5, | ||
167 | |||
168 | ARMV7_PERFCTR_STALL_SB_FULL = 0xc9, | ||
169 | }; | ||
170 | |||
171 | /* ARMv7 Cortex-A15 specific event types */ | ||
172 | enum armv7_a15_perf_types { | ||
173 | ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, | ||
174 | ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, | ||
175 | ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, | ||
176 | ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, | ||
177 | |||
178 | ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, | ||
179 | ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, | ||
180 | |||
181 | ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, | ||
182 | ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, | ||
183 | ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, | ||
184 | ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, | ||
185 | |||
186 | ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, | ||
187 | }; | ||
188 | |||
141 | /* | 189 | /* |
142 | * Cortex-A8 HW events mapping | 190 | * Cortex-A8 HW events mapping |
143 | * | 191 | * |
@@ -207,11 +255,6 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
207 | }, | 255 | }, |
208 | }, | 256 | }, |
209 | [C(DTLB)] = { | 257 | [C(DTLB)] = { |
210 | /* | ||
211 | * Only ITLB misses and DTLB refills are supported. | ||
212 | * If users want the DTLB refills misses a raw counter | ||
213 | * must be used. | ||
214 | */ | ||
215 | [C(OP_READ)] = { | 258 | [C(OP_READ)] = { |
216 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 259 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
217 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | 260 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, |
@@ -337,11 +380,6 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
337 | }, | 380 | }, |
338 | }, | 381 | }, |
339 | [C(DTLB)] = { | 382 | [C(DTLB)] = { |
340 | /* | ||
341 | * Only ITLB misses and DTLB refills are supported. | ||
342 | * If users want the DTLB refills misses a raw counter | ||
343 | * must be used. | ||
344 | */ | ||
345 | [C(OP_READ)] = { | 383 | [C(OP_READ)] = { |
346 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | 384 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, |
347 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | 385 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, |
@@ -402,6 +440,242 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
402 | }; | 440 | }; |
403 | 441 | ||
404 | /* | 442 | /* |
443 | * Cortex-A5 HW events mapping | ||
444 | */ | ||
445 | static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { | ||
446 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
447 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
448 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
449 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
450 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
451 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
452 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
453 | }; | ||
454 | |||
455 | static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
456 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
457 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
458 | [C(L1D)] = { | ||
459 | [C(OP_READ)] = { | ||
460 | [C(RESULT_ACCESS)] | ||
461 | = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
462 | [C(RESULT_MISS)] | ||
463 | = ARMV7_PERFCTR_DCACHE_REFILL, | ||
464 | }, | ||
465 | [C(OP_WRITE)] = { | ||
466 | [C(RESULT_ACCESS)] | ||
467 | = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
468 | [C(RESULT_MISS)] | ||
469 | = ARMV7_PERFCTR_DCACHE_REFILL, | ||
470 | }, | ||
471 | [C(OP_PREFETCH)] = { | ||
472 | [C(RESULT_ACCESS)] | ||
473 | = ARMV7_PERFCTR_PREFETCH_LINEFILL, | ||
474 | [C(RESULT_MISS)] | ||
475 | = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, | ||
476 | }, | ||
477 | }, | ||
478 | [C(L1I)] = { | ||
479 | [C(OP_READ)] = { | ||
480 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
481 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
482 | }, | ||
483 | [C(OP_WRITE)] = { | ||
484 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
485 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
486 | }, | ||
487 | /* | ||
488 | * The prefetch counters don't differentiate between the I | ||
489 | * side and the D side. | ||
490 | */ | ||
491 | [C(OP_PREFETCH)] = { | ||
492 | [C(RESULT_ACCESS)] | ||
493 | = ARMV7_PERFCTR_PREFETCH_LINEFILL, | ||
494 | [C(RESULT_MISS)] | ||
495 | = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, | ||
496 | }, | ||
497 | }, | ||
498 | [C(LL)] = { | ||
499 | [C(OP_READ)] = { | ||
500 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
501 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
502 | }, | ||
503 | [C(OP_WRITE)] = { | ||
504 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
505 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
506 | }, | ||
507 | [C(OP_PREFETCH)] = { | ||
508 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
509 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
510 | }, | ||
511 | }, | ||
512 | [C(DTLB)] = { | ||
513 | [C(OP_READ)] = { | ||
514 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
515 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
516 | }, | ||
517 | [C(OP_WRITE)] = { | ||
518 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
519 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
520 | }, | ||
521 | [C(OP_PREFETCH)] = { | ||
522 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
523 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
524 | }, | ||
525 | }, | ||
526 | [C(ITLB)] = { | ||
527 | [C(OP_READ)] = { | ||
528 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
529 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
530 | }, | ||
531 | [C(OP_WRITE)] = { | ||
532 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
533 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
534 | }, | ||
535 | [C(OP_PREFETCH)] = { | ||
536 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
537 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
538 | }, | ||
539 | }, | ||
540 | [C(BPU)] = { | ||
541 | [C(OP_READ)] = { | ||
542 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
543 | [C(RESULT_MISS)] | ||
544 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
545 | }, | ||
546 | [C(OP_WRITE)] = { | ||
547 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
548 | [C(RESULT_MISS)] | ||
549 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
550 | }, | ||
551 | [C(OP_PREFETCH)] = { | ||
552 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
553 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
554 | }, | ||
555 | }, | ||
556 | }; | ||
557 | |||
558 | /* | ||
559 | * Cortex-A15 HW events mapping | ||
560 | */ | ||
561 | static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { | ||
562 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
563 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
564 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
565 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
566 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, | ||
567 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
568 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | ||
569 | }; | ||
570 | |||
571 | static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
572 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
573 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
574 | [C(L1D)] = { | ||
575 | [C(OP_READ)] = { | ||
576 | [C(RESULT_ACCESS)] | ||
577 | = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, | ||
578 | [C(RESULT_MISS)] | ||
579 | = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL, | ||
580 | }, | ||
581 | [C(OP_WRITE)] = { | ||
582 | [C(RESULT_ACCESS)] | ||
583 | = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, | ||
584 | [C(RESULT_MISS)] | ||
585 | = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL, | ||
586 | }, | ||
587 | [C(OP_PREFETCH)] = { | ||
588 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
589 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
590 | }, | ||
591 | }, | ||
592 | [C(L1I)] = { | ||
593 | /* | ||
594 | * Not all performance counters differentiate between read | ||
595 | * and write accesses/misses so we're not always strictly | ||
596 | * correct, but it's the best we can do. Writes and reads get | ||
597 | * combined in these cases. | ||
598 | */ | ||
599 | [C(OP_READ)] = { | ||
600 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
601 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
602 | }, | ||
603 | [C(OP_WRITE)] = { | ||
604 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
605 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
606 | }, | ||
607 | [C(OP_PREFETCH)] = { | ||
608 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
609 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
610 | }, | ||
611 | }, | ||
612 | [C(LL)] = { | ||
613 | [C(OP_READ)] = { | ||
614 | [C(RESULT_ACCESS)] | ||
615 | = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, | ||
616 | [C(RESULT_MISS)] | ||
617 | = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL, | ||
618 | }, | ||
619 | [C(OP_WRITE)] = { | ||
620 | [C(RESULT_ACCESS)] | ||
621 | = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, | ||
622 | [C(RESULT_MISS)] | ||
623 | = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL, | ||
624 | }, | ||
625 | [C(OP_PREFETCH)] = { | ||
626 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
627 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
628 | }, | ||
629 | }, | ||
630 | [C(DTLB)] = { | ||
631 | [C(OP_READ)] = { | ||
632 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
633 | [C(RESULT_MISS)] | ||
634 | = ARMV7_PERFCTR_L1_DTLB_READ_REFILL, | ||
635 | }, | ||
636 | [C(OP_WRITE)] = { | ||
637 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
638 | [C(RESULT_MISS)] | ||
639 | = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL, | ||
640 | }, | ||
641 | [C(OP_PREFETCH)] = { | ||
642 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
643 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
644 | }, | ||
645 | }, | ||
646 | [C(ITLB)] = { | ||
647 | [C(OP_READ)] = { | ||
648 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
649 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
650 | }, | ||
651 | [C(OP_WRITE)] = { | ||
652 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
653 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
654 | }, | ||
655 | [C(OP_PREFETCH)] = { | ||
656 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
657 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
658 | }, | ||
659 | }, | ||
660 | [C(BPU)] = { | ||
661 | [C(OP_READ)] = { | ||
662 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
663 | [C(RESULT_MISS)] | ||
664 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
665 | }, | ||
666 | [C(OP_WRITE)] = { | ||
667 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
668 | [C(RESULT_MISS)] | ||
669 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
670 | }, | ||
671 | [C(OP_PREFETCH)] = { | ||
672 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
673 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
674 | }, | ||
675 | }, | ||
676 | }; | ||
677 | |||
678 | /* | ||
405 | * Perf Events counters | 679 | * Perf Events counters |
406 | */ | 680 | */ |
407 | enum armv7_counters { | 681 | enum armv7_counters { |
@@ -933,6 +1207,26 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
933 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1207 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
934 | return &armv7pmu; | 1208 | return &armv7pmu; |
935 | } | 1209 | } |
1210 | |||
1211 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | ||
1212 | { | ||
1213 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; | ||
1214 | armv7pmu.name = "ARMv7 Cortex-A5"; | ||
1215 | armv7pmu.cache_map = &armv7_a5_perf_cache_map; | ||
1216 | armv7pmu.event_map = &armv7_a5_perf_map; | ||
1217 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | ||
1218 | return &armv7pmu; | ||
1219 | } | ||
1220 | |||
1221 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | ||
1222 | { | ||
1223 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; | ||
1224 | armv7pmu.name = "ARMv7 Cortex-A15"; | ||
1225 | armv7pmu.cache_map = &armv7_a15_perf_cache_map; | ||
1226 | armv7pmu.event_map = &armv7_a15_perf_map; | ||
1227 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | ||
1228 | return &armv7pmu; | ||
1229 | } | ||
936 | #else | 1230 | #else |
937 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | 1231 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) |
938 | { | 1232 | { |
@@ -943,4 +1237,14 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) | |||
943 | { | 1237 | { |
944 | return NULL; | 1238 | return NULL; |
945 | } | 1239 | } |
1240 | |||
1241 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | ||
1242 | { | ||
1243 | return NULL; | ||
1244 | } | ||
1245 | |||
1246 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | ||
1247 | { | ||
1248 | return NULL; | ||
1249 | } | ||
946 | #endif /* CONFIG_CPU_V7 */ | 1250 | #endif /* CONFIG_CPU_V7 */ |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec19262..2b70709376c3 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of_device.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | 22 | ||
22 | #include <asm/pmu.h> | 23 | #include <asm/pmu.h> |
@@ -25,36 +26,88 @@ static volatile long pmu_lock; | |||
25 | 26 | ||
26 | static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; | 27 | static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; |
27 | 28 | ||
28 | static int __devinit pmu_device_probe(struct platform_device *pdev) | 29 | static int __devinit pmu_register(struct platform_device *pdev, |
30 | enum arm_pmu_type type) | ||
29 | { | 31 | { |
30 | 32 | if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { | |
31 | if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { | ||
32 | pr_warning("received registration request for unknown " | 33 | pr_warning("received registration request for unknown " |
33 | "device %d\n", pdev->id); | 34 | "device %d\n", type); |
34 | return -EINVAL; | 35 | return -EINVAL; |
35 | } | 36 | } |
36 | 37 | ||
37 | if (pmu_devices[pdev->id]) | 38 | if (pmu_devices[type]) { |
38 | pr_warning("registering new PMU device type %d overwrites " | 39 | pr_warning("rejecting duplicate registration of PMU device " |
39 | "previous registration!\n", pdev->id); | 40 | "type %d.", type); |
40 | else | 41 | return -ENOSPC; |
41 | pr_info("registered new PMU device of type %d\n", | 42 | } |
42 | pdev->id); | ||
43 | 43 | ||
44 | pmu_devices[pdev->id] = pdev; | 44 | pr_info("registered new PMU device of type %d\n", type); |
45 | pmu_devices[type] = pdev; | ||
45 | return 0; | 46 | return 0; |
46 | } | 47 | } |
47 | 48 | ||
48 | static struct platform_driver pmu_driver = { | 49 | #define OF_MATCH_PMU(_name, _type) { \ |
50 | .compatible = _name, \ | ||
51 | .data = (void *)_type, \ | ||
52 | } | ||
53 | |||
54 | #define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) | ||
55 | |||
56 | static struct of_device_id armpmu_of_device_ids[] = { | ||
57 | OF_MATCH_CPU("arm,cortex-a9-pmu"), | ||
58 | OF_MATCH_CPU("arm,cortex-a8-pmu"), | ||
59 | OF_MATCH_CPU("arm,arm1136-pmu"), | ||
60 | OF_MATCH_CPU("arm,arm1176-pmu"), | ||
61 | {}, | ||
62 | }; | ||
63 | |||
64 | #define PLAT_MATCH_PMU(_name, _type) { \ | ||
65 | .name = _name, \ | ||
66 | .driver_data = _type, \ | ||
67 | } | ||
68 | |||
69 | #define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) | ||
70 | |||
71 | static struct platform_device_id armpmu_plat_device_ids[] = { | ||
72 | PLAT_MATCH_CPU("arm-pmu"), | ||
73 | {}, | ||
74 | }; | ||
75 | |||
76 | enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) | ||
77 | { | ||
78 | const struct of_device_id *of_id; | ||
79 | const struct platform_device_id *pdev_id; | ||
80 | |||
81 | /* provided by of_device_id table */ | ||
82 | if (pdev->dev.of_node) { | ||
83 | of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); | ||
84 | BUG_ON(!of_id); | ||
85 | return (enum arm_pmu_type)of_id->data; | ||
86 | } | ||
87 | |||
88 | /* Provided by platform_device_id table */ | ||
89 | pdev_id = platform_get_device_id(pdev); | ||
90 | BUG_ON(!pdev_id); | ||
91 | return pdev_id->driver_data; | ||
92 | } | ||
93 | |||
94 | static int __devinit armpmu_device_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | return pmu_register(pdev, armpmu_device_type(pdev)); | ||
97 | } | ||
98 | |||
99 | static struct platform_driver armpmu_driver = { | ||
49 | .driver = { | 100 | .driver = { |
50 | .name = "arm-pmu", | 101 | .name = "arm-pmu", |
102 | .of_match_table = armpmu_of_device_ids, | ||
51 | }, | 103 | }, |
52 | .probe = pmu_device_probe, | 104 | .probe = armpmu_device_probe, |
105 | .id_table = armpmu_plat_device_ids, | ||
53 | }; | 106 | }; |
54 | 107 | ||
55 | static int __init register_pmu_driver(void) | 108 | static int __init register_pmu_driver(void) |
56 | { | 109 | { |
57 | return platform_driver_register(&pmu_driver); | 110 | return platform_driver_register(&armpmu_driver); |
58 | } | 111 | } |
59 | device_initcall(register_pmu_driver); | 112 | device_initcall(register_pmu_driver); |
60 | 113 | ||
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device) | |||
77 | EXPORT_SYMBOL_GPL(reserve_pmu); | 130 | EXPORT_SYMBOL_GPL(reserve_pmu); |
78 | 131 | ||
79 | int | 132 | int |
80 | release_pmu(struct platform_device *pdev) | 133 | release_pmu(enum arm_pmu_type device) |
81 | { | 134 | { |
82 | if (WARN_ON(pdev != pmu_devices[pdev->id])) | 135 | if (WARN_ON(!pmu_devices[device])) |
83 | return -EINVAL; | 136 | return -EINVAL; |
84 | clear_bit_unlock(pdev->id, &pmu_lock); | 137 | clear_bit_unlock(device, &pmu_lock); |
85 | return 0; | 138 | return 0; |
86 | } | 139 | } |
87 | EXPORT_SYMBOL_GPL(release_pmu); | 140 | EXPORT_SYMBOL_GPL(release_pmu); |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 5c199610719f..2491f3b406bc 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = { | |||
228 | .fn = break_trap, | 228 | .fn = break_trap, |
229 | }; | 229 | }; |
230 | 230 | ||
231 | static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) | ||
232 | { | ||
233 | unsigned int instr2; | ||
234 | void __user *pc; | ||
235 | |||
236 | /* Check the second half of the instruction. */ | ||
237 | pc = (void __user *)(instruction_pointer(regs) + 2); | ||
238 | |||
239 | if (processor_mode(regs) == SVC_MODE) { | ||
240 | instr2 = *(u16 *) pc; | ||
241 | } else { | ||
242 | get_user(instr2, (u16 __user *)pc); | ||
243 | } | ||
244 | |||
245 | if (instr2 == 0xa000) { | ||
246 | ptrace_break(current, regs); | ||
247 | return 0; | ||
248 | } else { | ||
249 | return 1; | ||
250 | } | ||
251 | } | ||
252 | |||
253 | static struct undef_hook thumb2_break_hook = { | 231 | static struct undef_hook thumb2_break_hook = { |
254 | .instr_mask = 0xffff, | 232 | .instr_mask = 0xffffffff, |
255 | .instr_val = 0xf7f0, | 233 | .instr_val = 0xf7f0a000, |
256 | .cpsr_mask = PSR_T_BIT, | 234 | .cpsr_mask = PSR_T_BIT, |
257 | .cpsr_val = PSR_T_BIT, | 235 | .cpsr_val = PSR_T_BIT, |
258 | .fn = thumb2_break_trap, | 236 | .fn = break_trap, |
259 | }; | 237 | }; |
260 | 238 | ||
261 | static int __init ptrace_break_init(void) | 239 | static int __init ptrace_break_init(void) |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index acbb447ac6b5..70bca649e925 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -343,54 +343,6 @@ static void __init feat_v6_fixup(void) | |||
343 | elf_hwcap &= ~HWCAP_TLS; | 343 | elf_hwcap &= ~HWCAP_TLS; |
344 | } | 344 | } |
345 | 345 | ||
346 | static void __init setup_processor(void) | ||
347 | { | ||
348 | struct proc_info_list *list; | ||
349 | |||
350 | /* | ||
351 | * locate processor in the list of supported processor | ||
352 | * types. The linker builds this table for us from the | ||
353 | * entries in arch/arm/mm/proc-*.S | ||
354 | */ | ||
355 | list = lookup_processor_type(read_cpuid_id()); | ||
356 | if (!list) { | ||
357 | printk("CPU configuration botched (ID %08x), unable " | ||
358 | "to continue.\n", read_cpuid_id()); | ||
359 | while (1); | ||
360 | } | ||
361 | |||
362 | cpu_name = list->cpu_name; | ||
363 | |||
364 | #ifdef MULTI_CPU | ||
365 | processor = *list->proc; | ||
366 | #endif | ||
367 | #ifdef MULTI_TLB | ||
368 | cpu_tlb = *list->tlb; | ||
369 | #endif | ||
370 | #ifdef MULTI_USER | ||
371 | cpu_user = *list->user; | ||
372 | #endif | ||
373 | #ifdef MULTI_CACHE | ||
374 | cpu_cache = *list->cache; | ||
375 | #endif | ||
376 | |||
377 | printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | ||
378 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | ||
379 | proc_arch[cpu_architecture()], cr_alignment); | ||
380 | |||
381 | sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); | ||
382 | sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); | ||
383 | elf_hwcap = list->elf_hwcap; | ||
384 | #ifndef CONFIG_ARM_THUMB | ||
385 | elf_hwcap &= ~HWCAP_THUMB; | ||
386 | #endif | ||
387 | |||
388 | feat_v6_fixup(); | ||
389 | |||
390 | cacheid_init(); | ||
391 | cpu_proc_init(); | ||
392 | } | ||
393 | |||
394 | /* | 346 | /* |
395 | * cpu_init - initialise one CPU. | 347 | * cpu_init - initialise one CPU. |
396 | * | 348 | * |
@@ -406,6 +358,8 @@ void cpu_init(void) | |||
406 | BUG(); | 358 | BUG(); |
407 | } | 359 | } |
408 | 360 | ||
361 | cpu_proc_init(); | ||
362 | |||
409 | /* | 363 | /* |
410 | * Define the placement constraint for the inline asm directive below. | 364 | * Define the placement constraint for the inline asm directive below. |
411 | * In Thumb-2, msr with an immediate value is not allowed. | 365 | * In Thumb-2, msr with an immediate value is not allowed. |
@@ -442,6 +396,54 @@ void cpu_init(void) | |||
442 | : "r14"); | 396 | : "r14"); |
443 | } | 397 | } |
444 | 398 | ||
399 | static void __init setup_processor(void) | ||
400 | { | ||
401 | struct proc_info_list *list; | ||
402 | |||
403 | /* | ||
404 | * locate processor in the list of supported processor | ||
405 | * types. The linker builds this table for us from the | ||
406 | * entries in arch/arm/mm/proc-*.S | ||
407 | */ | ||
408 | list = lookup_processor_type(read_cpuid_id()); | ||
409 | if (!list) { | ||
410 | printk("CPU configuration botched (ID %08x), unable " | ||
411 | "to continue.\n", read_cpuid_id()); | ||
412 | while (1); | ||
413 | } | ||
414 | |||
415 | cpu_name = list->cpu_name; | ||
416 | |||
417 | #ifdef MULTI_CPU | ||
418 | processor = *list->proc; | ||
419 | #endif | ||
420 | #ifdef MULTI_TLB | ||
421 | cpu_tlb = *list->tlb; | ||
422 | #endif | ||
423 | #ifdef MULTI_USER | ||
424 | cpu_user = *list->user; | ||
425 | #endif | ||
426 | #ifdef MULTI_CACHE | ||
427 | cpu_cache = *list->cache; | ||
428 | #endif | ||
429 | |||
430 | printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | ||
431 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | ||
432 | proc_arch[cpu_architecture()], cr_alignment); | ||
433 | |||
434 | sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); | ||
435 | sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); | ||
436 | elf_hwcap = list->elf_hwcap; | ||
437 | #ifndef CONFIG_ARM_THUMB | ||
438 | elf_hwcap &= ~HWCAP_THUMB; | ||
439 | #endif | ||
440 | |||
441 | feat_v6_fixup(); | ||
442 | |||
443 | cacheid_init(); | ||
444 | cpu_init(); | ||
445 | } | ||
446 | |||
445 | void __init dump_machine_table(void) | 447 | void __init dump_machine_table(void) |
446 | { | 448 | { |
447 | struct machine_desc *p; | 449 | struct machine_desc *p; |
@@ -915,9 +917,14 @@ void __init setup_arch(char **cmdline_p) | |||
915 | #endif | 917 | #endif |
916 | reserve_crashkernel(); | 918 | reserve_crashkernel(); |
917 | 919 | ||
918 | cpu_init(); | ||
919 | tcm_init(); | 920 | tcm_init(); |
920 | 921 | ||
922 | #ifdef CONFIG_ZONE_DMA | ||
923 | if (mdesc->dma_zone_size) { | ||
924 | extern unsigned long arm_dma_zone_size; | ||
925 | arm_dma_zone_size = mdesc->dma_zone_size; | ||
926 | } | ||
927 | #endif | ||
921 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 928 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
922 | handle_arch_irq = mdesc->handle_irq; | 929 | handle_arch_irq = mdesc->handle_irq; |
923 | #endif | 930 | #endif |
@@ -979,6 +986,10 @@ static const char *hwcap_str[] = { | |||
979 | "neon", | 986 | "neon", |
980 | "vfpv3", | 987 | "vfpv3", |
981 | "vfpv3d16", | 988 | "vfpv3d16", |
989 | "tls", | ||
990 | "vfpv4", | ||
991 | "idiva", | ||
992 | "idivt", | ||
982 | NULL | 993 | NULL |
983 | }; | 994 | }; |
984 | 995 | ||
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 6398ead9d1c0..dc902f2c6845 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -10,64 +10,61 @@ | |||
10 | /* | 10 | /* |
11 | * Save CPU state for a suspend | 11 | * Save CPU state for a suspend |
12 | * r1 = v:p offset | 12 | * r1 = v:p offset |
13 | * r3 = virtual return function | 13 | * r2 = suspend function arg0 |
14 | * Note: sp is decremented to allocate space for CPU state on stack | 14 | * r3 = suspend function |
15 | * r0-r3,r9,r10,lr corrupted | ||
16 | */ | 15 | */ |
17 | ENTRY(cpu_suspend) | 16 | ENTRY(__cpu_suspend) |
18 | mov r9, lr | 17 | stmfd sp!, {r4 - r11, lr} |
19 | #ifdef MULTI_CPU | 18 | #ifdef MULTI_CPU |
20 | ldr r10, =processor | 19 | ldr r10, =processor |
21 | mov r2, sp @ current virtual SP | 20 | ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state |
22 | ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state | ||
23 | ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function | 21 | ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function |
24 | sub sp, sp, r0 @ allocate CPU state on stack | 22 | #else |
25 | mov r0, sp @ save pointer | 23 | ldr r5, =cpu_suspend_size |
24 | ldr ip, =cpu_do_resume | ||
25 | #endif | ||
26 | mov r6, sp @ current virtual SP | ||
27 | sub sp, sp, r5 @ allocate CPU state on stack | ||
28 | mov r0, sp @ save pointer to CPU save block | ||
26 | add ip, ip, r1 @ convert resume fn to phys | 29 | add ip, ip, r1 @ convert resume fn to phys |
27 | stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn | 30 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn |
28 | ldr r3, =sleep_save_sp | 31 | ldr r5, =sleep_save_sp |
29 | add r2, sp, r1 @ convert SP to phys | 32 | add r6, sp, r1 @ convert SP to phys |
33 | stmfd sp!, {r2, r3} @ save suspend func arg and pointer | ||
30 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
31 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 35 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) |
32 | ALT_UP(mov lr, #0) | 36 | ALT_UP(mov lr, #0) |
33 | and lr, lr, #15 | 37 | and lr, lr, #15 |
34 | str r2, [r3, lr, lsl #2] @ save phys SP | 38 | str r6, [r5, lr, lsl #2] @ save phys SP |
35 | #else | 39 | #else |
36 | str r2, [r3] @ save phys SP | 40 | str r6, [r5] @ save phys SP |
37 | #endif | 41 | #endif |
42 | #ifdef MULTI_CPU | ||
38 | mov lr, pc | 43 | mov lr, pc |
39 | ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state | 44 | ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state |
40 | #else | 45 | #else |
41 | mov r2, sp @ current virtual SP | ||
42 | ldr r0, =cpu_suspend_size | ||
43 | sub sp, sp, r0 @ allocate CPU state on stack | ||
44 | mov r0, sp @ save pointer | ||
45 | stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn | ||
46 | ldr r3, =sleep_save_sp | ||
47 | add r2, sp, r1 @ convert SP to phys | ||
48 | #ifdef CONFIG_SMP | ||
49 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | ||
50 | ALT_UP(mov lr, #0) | ||
51 | and lr, lr, #15 | ||
52 | str r2, [r3, lr, lsl #2] @ save phys SP | ||
53 | #else | ||
54 | str r2, [r3] @ save phys SP | ||
55 | #endif | ||
56 | bl cpu_do_suspend | 46 | bl cpu_do_suspend |
57 | #endif | 47 | #endif |
58 | 48 | ||
59 | @ flush data cache | 49 | @ flush data cache |
60 | #ifdef MULTI_CACHE | 50 | #ifdef MULTI_CACHE |
61 | ldr r10, =cpu_cache | 51 | ldr r10, =cpu_cache |
62 | mov lr, r9 | 52 | mov lr, pc |
63 | ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] | 53 | ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] |
64 | #else | 54 | #else |
65 | mov lr, r9 | 55 | bl __cpuc_flush_kern_all |
66 | b __cpuc_flush_kern_all | ||
67 | #endif | 56 | #endif |
68 | ENDPROC(cpu_suspend) | 57 | adr lr, BSYM(cpu_suspend_abort) |
58 | ldmfd sp!, {r0, pc} @ call suspend fn | ||
59 | ENDPROC(__cpu_suspend) | ||
69 | .ltorg | 60 | .ltorg |
70 | 61 | ||
62 | cpu_suspend_abort: | ||
63 | ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn | ||
64 | mov sp, r2 | ||
65 | ldmfd sp!, {r4 - r11, pc} | ||
66 | ENDPROC(cpu_suspend_abort) | ||
67 | |||
71 | /* | 68 | /* |
72 | * r0 = control register value | 69 | * r0 = control register value |
73 | * r1 = v:p offset (preserved by cpu_do_resume) | 70 | * r1 = v:p offset (preserved by cpu_do_resume) |
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on) | |||
97 | cpu_resume_after_mmu: | 94 | cpu_resume_after_mmu: |
98 | str r5, [r2, r4, lsl #2] @ restore old mapping | 95 | str r5, [r2, r4, lsl #2] @ restore old mapping |
99 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache | 96 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache |
100 | mov pc, lr | 97 | bl cpu_init @ restore the und/abt/irq banked regs |
98 | mov r0, #0 @ return zero on success | ||
99 | ldmfd sp!, {r4 - r11, pc} | ||
101 | ENDPROC(cpu_resume_after_mmu) | 100 | ENDPROC(cpu_resume_after_mmu) |
102 | 101 | ||
103 | /* | 102 | /* |
@@ -120,20 +119,11 @@ ENTRY(cpu_resume) | |||
120 | ldr r0, sleep_save_sp @ stack phys addr | 119 | ldr r0, sleep_save_sp @ stack phys addr |
121 | #endif | 120 | #endif |
122 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 121 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
123 | #ifdef MULTI_CPU | 122 | @ load v:p, stack, resume fn |
124 | @ load v:p, stack, return fn, resume fn | 123 | ARM( ldmia r0!, {r1, sp, pc} ) |
125 | ARM( ldmia r0!, {r1, sp, lr, pc} ) | 124 | THUMB( ldmia r0!, {r1, r2, r3} ) |
126 | THUMB( ldmia r0!, {r1, r2, r3, r4} ) | ||
127 | THUMB( mov sp, r2 ) | 125 | THUMB( mov sp, r2 ) |
128 | THUMB( mov lr, r3 ) | 126 | THUMB( bx r3 ) |
129 | THUMB( bx r4 ) | ||
130 | #else | ||
131 | @ load v:p, stack, return fn | ||
132 | ARM( ldmia r0!, {r1, sp, lr} ) | ||
133 | THUMB( ldmia r0!, {r1, r2, lr} ) | ||
134 | THUMB( mov sp, r2 ) | ||
135 | b cpu_do_resume | ||
136 | #endif | ||
137 | ENDPROC(cpu_resume) | 127 | ENDPROC(cpu_resume) |
138 | 128 | ||
139 | sleep_save_sp: | 129 | sleep_save_sp: |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index e7f92a4321f3..167e3cbe1f2f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -365,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
365 | */ | 365 | */ |
366 | if (max_cpus > ncores) | 366 | if (max_cpus > ncores) |
367 | max_cpus = ncores; | 367 | max_cpus = ncores; |
368 | 368 | if (ncores > 1 && max_cpus) { | |
369 | if (max_cpus > 1) { | ||
370 | /* | 369 | /* |
371 | * Enable the local timer or broadcast device for the | 370 | * Enable the local timer or broadcast device for the |
372 | * boot CPU, but only if we have more than one CPU. | 371 | * boot CPU, but only if we have more than one CPU. |
@@ -374,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
374 | percpu_timer_setup(); | 373 | percpu_timer_setup(); |
375 | 374 | ||
376 | /* | 375 | /* |
376 | * Initialise the present map, which describes the set of CPUs | ||
377 | * actually populated at the present time. A platform should | ||
378 | * re-initialize the map in platform_smp_prepare_cpus() if | ||
379 | * present != possible (e.g. physical hotplug). | ||
380 | */ | ||
381 | init_cpu_present(&cpu_possible_map); | ||
382 | |||
383 | /* | ||
377 | * Initialise the SCU if there are more than one CPU | 384 | * Initialise the SCU if there are more than one CPU |
378 | * and let them know where to start. | 385 | * and let them know where to start. |
379 | */ | 386 | */ |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index a1e757c3439b..79ed5e7f204a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #define SCU_INVALIDATE 0x0c | 20 | #define SCU_INVALIDATE 0x0c |
21 | #define SCU_FPGA_REVISION 0x10 | 21 | #define SCU_FPGA_REVISION 0x10 |
22 | 22 | ||
23 | #ifdef CONFIG_SMP | ||
23 | /* | 24 | /* |
24 | * Get the number of CPU cores from the SCU configuration | 25 | * Get the number of CPU cores from the SCU configuration |
25 | */ | 26 | */ |
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base) | |||
50 | */ | 51 | */ |
51 | flush_cache_all(); | 52 | flush_cache_all(); |
52 | } | 53 | } |
54 | #endif | ||
53 | 55 | ||
54 | /* | 56 | /* |
55 | * Set the executing CPUs power mode as defined. This will be in | 57 | * Set the executing CPUs power mode as defined. This will be in |
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f5cf660eefcc..30e302d33e0a 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include "tcm.h" | 19 | #include "tcm.h" |
20 | 20 | ||
21 | static struct gen_pool *tcm_pool; | 21 | static struct gen_pool *tcm_pool; |
22 | static bool dtcm_present; | ||
23 | static bool itcm_present; | ||
22 | 24 | ||
23 | /* TCM section definitions from the linker */ | 25 | /* TCM section definitions from the linker */ |
24 | extern char __itcm_start, __sitcm_text, __eitcm_text; | 26 | extern char __itcm_start, __sitcm_text, __eitcm_text; |
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len) | |||
90 | } | 92 | } |
91 | EXPORT_SYMBOL(tcm_free); | 93 | EXPORT_SYMBOL(tcm_free); |
92 | 94 | ||
95 | bool tcm_dtcm_present(void) | ||
96 | { | ||
97 | return dtcm_present; | ||
98 | } | ||
99 | EXPORT_SYMBOL(tcm_dtcm_present); | ||
100 | |||
101 | bool tcm_itcm_present(void) | ||
102 | { | ||
103 | return itcm_present; | ||
104 | } | ||
105 | EXPORT_SYMBOL(tcm_itcm_present); | ||
106 | |||
93 | static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, | 107 | static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, |
94 | u32 *offset) | 108 | u32 *offset) |
95 | { | 109 | { |
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, | |||
134 | (tcm_region & 1) ? "" : "not "); | 148 | (tcm_region & 1) ? "" : "not "); |
135 | } | 149 | } |
136 | 150 | ||
151 | /* Not much fun you can do with a size 0 bank */ | ||
152 | if (tcm_size == 0) | ||
153 | return 0; | ||
154 | |||
137 | /* Force move the TCM bank to where we want it, enable */ | 155 | /* Force move the TCM bank to where we want it, enable */ |
138 | tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; | 156 | tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; |
139 | 157 | ||
@@ -165,12 +183,20 @@ void __init tcm_init(void) | |||
165 | u32 tcm_status = read_cpuid_tcmstatus(); | 183 | u32 tcm_status = read_cpuid_tcmstatus(); |
166 | u8 dtcm_banks = (tcm_status >> 16) & 0x03; | 184 | u8 dtcm_banks = (tcm_status >> 16) & 0x03; |
167 | u8 itcm_banks = (tcm_status & 0x03); | 185 | u8 itcm_banks = (tcm_status & 0x03); |
186 | size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; | ||
187 | size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; | ||
168 | char *start; | 188 | char *start; |
169 | char *end; | 189 | char *end; |
170 | char *ram; | 190 | char *ram; |
171 | int ret; | 191 | int ret; |
172 | int i; | 192 | int i; |
173 | 193 | ||
194 | /* Values greater than 2 for D/ITCM banks are "reserved" */ | ||
195 | if (dtcm_banks > 2) | ||
196 | dtcm_banks = 0; | ||
197 | if (itcm_banks > 2) | ||
198 | itcm_banks = 0; | ||
199 | |||
174 | /* Setup DTCM if present */ | 200 | /* Setup DTCM if present */ |
175 | if (dtcm_banks > 0) { | 201 | if (dtcm_banks > 0) { |
176 | for (i = 0; i < dtcm_banks; i++) { | 202 | for (i = 0; i < dtcm_banks; i++) { |
@@ -178,6 +204,13 @@ void __init tcm_init(void) | |||
178 | if (ret) | 204 | if (ret) |
179 | return; | 205 | return; |
180 | } | 206 | } |
207 | /* This means you compiled more code than fits into DTCM */ | ||
208 | if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { | ||
209 | pr_info("CPU DTCM: %u bytes of code compiled to " | ||
210 | "DTCM but only %lu bytes of DTCM present\n", | ||
211 | dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); | ||
212 | goto no_dtcm; | ||
213 | } | ||
181 | dtcm_res.end = dtcm_end - 1; | 214 | dtcm_res.end = dtcm_end - 1; |
182 | request_resource(&iomem_resource, &dtcm_res); | 215 | request_resource(&iomem_resource, &dtcm_res); |
183 | dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; | 216 | dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; |
@@ -186,12 +219,16 @@ void __init tcm_init(void) | |||
186 | start = &__sdtcm_data; | 219 | start = &__sdtcm_data; |
187 | end = &__edtcm_data; | 220 | end = &__edtcm_data; |
188 | ram = &__dtcm_start; | 221 | ram = &__dtcm_start; |
189 | /* This means you compiled more code than fits into DTCM */ | 222 | memcpy(start, ram, dtcm_code_sz); |
190 | BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); | 223 | pr_debug("CPU DTCM: copied data from %p - %p\n", |
191 | memcpy(start, ram, (end-start)); | 224 | start, end); |
192 | pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); | 225 | dtcm_present = true; |
226 | } else if (dtcm_code_sz) { | ||
227 | pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " | ||
228 | "DTCM banks present in CPU\n", dtcm_code_sz); | ||
193 | } | 229 | } |
194 | 230 | ||
231 | no_dtcm: | ||
195 | /* Setup ITCM if present */ | 232 | /* Setup ITCM if present */ |
196 | if (itcm_banks > 0) { | 233 | if (itcm_banks > 0) { |
197 | for (i = 0; i < itcm_banks; i++) { | 234 | for (i = 0; i < itcm_banks; i++) { |
@@ -199,6 +236,13 @@ void __init tcm_init(void) | |||
199 | if (ret) | 236 | if (ret) |
200 | return; | 237 | return; |
201 | } | 238 | } |
239 | /* This means you compiled more code than fits into ITCM */ | ||
240 | if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { | ||
241 | pr_info("CPU ITCM: %u bytes of code compiled to " | ||
242 | "ITCM but only %lu bytes of ITCM present\n", | ||
243 | itcm_code_sz, (itcm_end - ITCM_OFFSET)); | ||
244 | return; | ||
245 | } | ||
202 | itcm_res.end = itcm_end - 1; | 246 | itcm_res.end = itcm_end - 1; |
203 | request_resource(&iomem_resource, &itcm_res); | 247 | request_resource(&iomem_resource, &itcm_res); |
204 | itcm_iomap[0].length = itcm_end - ITCM_OFFSET; | 248 | itcm_iomap[0].length = itcm_end - ITCM_OFFSET; |
@@ -207,10 +251,13 @@ void __init tcm_init(void) | |||
207 | start = &__sitcm_text; | 251 | start = &__sitcm_text; |
208 | end = &__eitcm_text; | 252 | end = &__eitcm_text; |
209 | ram = &__itcm_start; | 253 | ram = &__itcm_start; |
210 | /* This means you compiled more code than fits into ITCM */ | 254 | memcpy(start, ram, itcm_code_sz); |
211 | BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); | 255 | pr_debug("CPU ITCM: copied code from %p - %p\n", |
212 | memcpy(start, ram, (end-start)); | 256 | start, end); |
213 | pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); | 257 | itcm_present = true; |
258 | } else if (itcm_code_sz) { | ||
259 | pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " | ||
260 | "ITCM banks present in CPU\n", itcm_code_sz); | ||
214 | } | 261 | } |
215 | } | 262 | } |
216 | 263 | ||
@@ -221,7 +268,6 @@ void __init tcm_init(void) | |||
221 | */ | 268 | */ |
222 | static int __init setup_tcm_pool(void) | 269 | static int __init setup_tcm_pool(void) |
223 | { | 270 | { |
224 | u32 tcm_status = read_cpuid_tcmstatus(); | ||
225 | u32 dtcm_pool_start = (u32) &__edtcm_data; | 271 | u32 dtcm_pool_start = (u32) &__edtcm_data; |
226 | u32 itcm_pool_start = (u32) &__eitcm_text; | 272 | u32 itcm_pool_start = (u32) &__eitcm_text; |
227 | int ret; | 273 | int ret; |
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void) | |||
236 | pr_debug("Setting up TCM memory pool\n"); | 282 | pr_debug("Setting up TCM memory pool\n"); |
237 | 283 | ||
238 | /* Add the rest of DTCM to the TCM pool */ | 284 | /* Add the rest of DTCM to the TCM pool */ |
239 | if (tcm_status & (0x03 << 16)) { | 285 | if (dtcm_present) { |
240 | if (dtcm_pool_start < dtcm_end) { | 286 | if (dtcm_pool_start < dtcm_end) { |
241 | ret = gen_pool_add(tcm_pool, dtcm_pool_start, | 287 | ret = gen_pool_add(tcm_pool, dtcm_pool_start, |
242 | dtcm_end - dtcm_pool_start, -1); | 288 | dtcm_end - dtcm_pool_start, -1); |
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void) | |||
253 | } | 299 | } |
254 | 300 | ||
255 | /* Add the rest of ITCM to the TCM pool */ | 301 | /* Add the rest of ITCM to the TCM pool */ |
256 | if (tcm_status & 0x03) { | 302 | if (itcm_present) { |
257 | if (itcm_pool_start < itcm_end) { | 303 | if (itcm_pool_start < itcm_end) { |
258 | ret = gen_pool_add(tcm_pool, itcm_pool_start, | 304 | ret = gen_pool_add(tcm_pool, itcm_pool_start, |
259 | itcm_end - itcm_pool_start, -1); | 305 | itcm_end - itcm_pool_start, -1); |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6807cb1e76dd..2d3436e9f71f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
355 | pc = (void __user *)instruction_pointer(regs); | 355 | pc = (void __user *)instruction_pointer(regs); |
356 | 356 | ||
357 | if (processor_mode(regs) == SVC_MODE) { | 357 | if (processor_mode(regs) == SVC_MODE) { |
358 | instr = *(u32 *) pc; | 358 | #ifdef CONFIG_THUMB2_KERNEL |
359 | if (thumb_mode(regs)) { | ||
360 | instr = ((u16 *)pc)[0]; | ||
361 | if (is_wide_instruction(instr)) { | ||
362 | instr <<= 16; | ||
363 | instr |= ((u16 *)pc)[1]; | ||
364 | } | ||
365 | } else | ||
366 | #endif | ||
367 | instr = *(u32 *) pc; | ||
359 | } else if (thumb_mode(regs)) { | 368 | } else if (thumb_mode(regs)) { |
360 | get_user(instr, (u16 __user *)pc); | 369 | get_user(instr, (u16 __user *)pc); |
370 | if (is_wide_instruction(instr)) { | ||
371 | unsigned int instr2; | ||
372 | get_user(instr2, (u16 __user *)pc+1); | ||
373 | instr <<= 16; | ||
374 | instr |= instr2; | ||
375 | } | ||
361 | } else { | 376 | } else { |
362 | get_user(instr, (u32 __user *)pc); | 377 | get_user(instr, (u32 __user *)pc); |
363 | } | 378 | } |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e5287f21badc..bf977f8514f6 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4; | |||
38 | 38 | ||
39 | SECTIONS | 39 | SECTIONS |
40 | { | 40 | { |
41 | #ifdef CONFIG_XIP_KERNEL | ||
42 | . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); | ||
43 | #else | ||
44 | . = PAGE_OFFSET + TEXT_OFFSET; | ||
45 | #endif | ||
46 | |||
47 | .init : { /* Init code and data */ | ||
48 | _stext = .; | ||
49 | _sinittext = .; | ||
50 | HEAD_TEXT | ||
51 | INIT_TEXT | ||
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
53 | _einittext = .; | ||
54 | ARM_CPU_DISCARD(PROC_INFO) | ||
55 | __arch_info_begin = .; | ||
56 | *(.arch.info.init) | ||
57 | __arch_info_end = .; | ||
58 | __tagtable_begin = .; | ||
59 | *(.taglist.init) | ||
60 | __tagtable_end = .; | ||
61 | #ifdef CONFIG_SMP_ON_UP | ||
62 | __smpalt_begin = .; | ||
63 | *(.alt.smp.init) | ||
64 | __smpalt_end = .; | ||
65 | #endif | ||
66 | |||
67 | __pv_table_begin = .; | ||
68 | *(.pv_table) | ||
69 | __pv_table_end = .; | ||
70 | |||
71 | INIT_SETUP(16) | ||
72 | |||
73 | INIT_CALLS | ||
74 | CON_INITCALL | ||
75 | SECURITY_INITCALL | ||
76 | INIT_RAM_FS | ||
77 | |||
78 | #ifndef CONFIG_XIP_KERNEL | ||
79 | __init_begin = _stext; | ||
80 | INIT_DATA | ||
81 | ARM_EXIT_KEEP(EXIT_DATA) | ||
82 | #endif | ||
83 | } | ||
84 | |||
85 | PERCPU_SECTION(32) | ||
86 | |||
87 | #ifndef CONFIG_XIP_KERNEL | ||
88 | . = ALIGN(PAGE_SIZE); | ||
89 | __init_end = .; | ||
90 | #endif | ||
91 | |||
92 | /* | 41 | /* |
93 | * unwind exit sections must be discarded before the rest of the | 42 | * unwind exit sections must be discarded before the rest of the |
94 | * unwind sections get included. | 43 | * unwind sections get included. |
@@ -106,10 +55,22 @@ SECTIONS | |||
106 | *(.fixup) | 55 | *(.fixup) |
107 | *(__ex_table) | 56 | *(__ex_table) |
108 | #endif | 57 | #endif |
58 | #ifndef CONFIG_SMP_ON_UP | ||
59 | *(.alt.smp.init) | ||
60 | #endif | ||
109 | } | 61 | } |
110 | 62 | ||
63 | #ifdef CONFIG_XIP_KERNEL | ||
64 | . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); | ||
65 | #else | ||
66 | . = PAGE_OFFSET + TEXT_OFFSET; | ||
67 | #endif | ||
68 | .head.text : { | ||
69 | _text = .; | ||
70 | HEAD_TEXT | ||
71 | } | ||
111 | .text : { /* Real text segment */ | 72 | .text : { /* Real text segment */ |
112 | _text = .; /* Text and read-only data */ | 73 | _stext = .; /* Text and read-only data */ |
113 | __exception_text_start = .; | 74 | __exception_text_start = .; |
114 | *(.exception.text) | 75 | *(.exception.text) |
115 | __exception_text_end = .; | 76 | __exception_text_end = .; |
@@ -122,8 +83,6 @@ SECTIONS | |||
122 | *(.fixup) | 83 | *(.fixup) |
123 | #endif | 84 | #endif |
124 | *(.gnu.warning) | 85 | *(.gnu.warning) |
125 | *(.rodata) | ||
126 | *(.rodata.*) | ||
127 | *(.glue_7) | 86 | *(.glue_7) |
128 | *(.glue_7t) | 87 | *(.glue_7t) |
129 | . = ALIGN(4); | 88 | . = ALIGN(4); |
@@ -152,10 +111,63 @@ SECTIONS | |||
152 | 111 | ||
153 | _etext = .; /* End of text and rodata section */ | 112 | _etext = .; /* End of text and rodata section */ |
154 | 113 | ||
114 | #ifndef CONFIG_XIP_KERNEL | ||
115 | . = ALIGN(PAGE_SIZE); | ||
116 | __init_begin = .; | ||
117 | #endif | ||
118 | |||
119 | INIT_TEXT_SECTION(8) | ||
120 | .exit.text : { | ||
121 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
122 | } | ||
123 | .init.proc.info : { | ||
124 | ARM_CPU_DISCARD(PROC_INFO) | ||
125 | } | ||
126 | .init.arch.info : { | ||
127 | __arch_info_begin = .; | ||
128 | *(.arch.info.init) | ||
129 | __arch_info_end = .; | ||
130 | } | ||
131 | .init.tagtable : { | ||
132 | __tagtable_begin = .; | ||
133 | *(.taglist.init) | ||
134 | __tagtable_end = .; | ||
135 | } | ||
136 | #ifdef CONFIG_SMP_ON_UP | ||
137 | .init.smpalt : { | ||
138 | __smpalt_begin = .; | ||
139 | *(.alt.smp.init) | ||
140 | __smpalt_end = .; | ||
141 | } | ||
142 | #endif | ||
143 | .init.pv_table : { | ||
144 | __pv_table_begin = .; | ||
145 | *(.pv_table) | ||
146 | __pv_table_end = .; | ||
147 | } | ||
148 | .init.data : { | ||
149 | #ifndef CONFIG_XIP_KERNEL | ||
150 | INIT_DATA | ||
151 | #endif | ||
152 | INIT_SETUP(16) | ||
153 | INIT_CALLS | ||
154 | CON_INITCALL | ||
155 | SECURITY_INITCALL | ||
156 | INIT_RAM_FS | ||
157 | } | ||
158 | #ifndef CONFIG_XIP_KERNEL | ||
159 | .exit.data : { | ||
160 | ARM_EXIT_KEEP(EXIT_DATA) | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | PERCPU_SECTION(32) | ||
165 | |||
155 | #ifdef CONFIG_XIP_KERNEL | 166 | #ifdef CONFIG_XIP_KERNEL |
156 | __data_loc = ALIGN(4); /* location in binary */ | 167 | __data_loc = ALIGN(4); /* location in binary */ |
157 | . = PAGE_OFFSET + TEXT_OFFSET; | 168 | . = PAGE_OFFSET + TEXT_OFFSET; |
158 | #else | 169 | #else |
170 | __init_end = .; | ||
159 | . = ALIGN(THREAD_SIZE); | 171 | . = ALIGN(THREAD_SIZE); |
160 | __data_loc = .; | 172 | __data_loc = .; |
161 | #endif | 173 | #endif |
@@ -270,12 +282,6 @@ SECTIONS | |||
270 | 282 | ||
271 | /* Default discards */ | 283 | /* Default discards */ |
272 | DISCARDS | 284 | DISCARDS |
273 | |||
274 | #ifndef CONFIG_SMP_ON_UP | ||
275 | /DISCARD/ : { | ||
276 | *(.alt.smp.init) | ||
277 | } | ||
278 | #endif | ||
279 | } | 285 | } |
280 | 286 | ||
281 | /* | 287 | /* |
diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S index 7d393ca010ac..94c950d783ba 100644 --- a/arch/arm/mach-bcmring/include/mach/entry-macro.S +++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S | |||
@@ -80,7 +80,3 @@ | |||
80 | 80 | ||
81 | .macro arch_ret_to_user, tmp1, tmp2 | 81 | .macro arch_ret_to_user, tmp1, tmp2 |
82 | .endm | 82 | .endm |
83 | |||
84 | .macro irq_prio_table | ||
85 | .endm | ||
86 | |||
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c index 8bc3701aa05c..84fd78684868 100644 --- a/arch/arm/mach-davinci/board-da830-evm.c +++ b/arch/arm/mach-davinci/board-da830-evm.c | |||
@@ -681,4 +681,5 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM") | |||
681 | .init_irq = cp_intc_init, | 681 | .init_irq = cp_intc_init, |
682 | .timer = &davinci_timer, | 682 | .timer = &davinci_timer, |
683 | .init_machine = da830_evm_init, | 683 | .init_machine = da830_evm_init, |
684 | .dma_zone_size = SZ_128M, | ||
684 | MACHINE_END | 685 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c index a7b41bf505f1..29671ef07152 100644 --- a/arch/arm/mach-davinci/board-da850-evm.c +++ b/arch/arm/mach-davinci/board-da850-evm.c | |||
@@ -1261,4 +1261,5 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM") | |||
1261 | .init_irq = cp_intc_init, | 1261 | .init_irq = cp_intc_init, |
1262 | .timer = &davinci_timer, | 1262 | .timer = &davinci_timer, |
1263 | .init_machine = da850_evm_init, | 1263 | .init_machine = da850_evm_init, |
1264 | .dma_zone_size = SZ_128M, | ||
1264 | MACHINE_END | 1265 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c index 6e7cad13352c..241a6bd67408 100644 --- a/arch/arm/mach-davinci/board-dm355-evm.c +++ b/arch/arm/mach-davinci/board-dm355-evm.c | |||
@@ -356,4 +356,5 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM") | |||
356 | .init_irq = davinci_irq_init, | 356 | .init_irq = davinci_irq_init, |
357 | .timer = &davinci_timer, | 357 | .timer = &davinci_timer, |
358 | .init_machine = dm355_evm_init, | 358 | .init_machine = dm355_evm_init, |
359 | .dma_zone_size = SZ_128M, | ||
359 | MACHINE_END | 360 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index 543f9911b281..bee284ca7fd6 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -275,4 +275,5 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") | |||
275 | .init_irq = davinci_irq_init, | 275 | .init_irq = davinci_irq_init, |
276 | .timer = &davinci_timer, | 276 | .timer = &davinci_timer, |
277 | .init_machine = dm355_leopard_init, | 277 | .init_machine = dm355_leopard_init, |
278 | .dma_zone_size = SZ_128M, | ||
278 | MACHINE_END | 279 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index 09a87e61ffcf..9818f214d4f0 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -617,5 +617,6 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") | |||
617 | .init_irq = davinci_irq_init, | 617 | .init_irq = davinci_irq_init, |
618 | .timer = &davinci_timer, | 618 | .timer = &davinci_timer, |
619 | .init_machine = dm365_evm_init, | 619 | .init_machine = dm365_evm_init, |
620 | .dma_zone_size = SZ_128M, | ||
620 | MACHINE_END | 621 | MACHINE_END |
621 | 622 | ||
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index 556bbd468db3..95607a191e03 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -717,4 +717,5 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM") | |||
717 | .init_irq = davinci_irq_init, | 717 | .init_irq = davinci_irq_init, |
718 | .timer = &davinci_timer, | 718 | .timer = &davinci_timer, |
719 | .init_machine = davinci_evm_init, | 719 | .init_machine = davinci_evm_init, |
720 | .dma_zone_size = SZ_128M, | ||
720 | MACHINE_END | 721 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index f6ac9ba74878..6d03643b9bd1 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -802,6 +802,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") | |||
802 | .init_irq = davinci_irq_init, | 802 | .init_irq = davinci_irq_init, |
803 | .timer = &davinci_timer, | 803 | .timer = &davinci_timer, |
804 | .init_machine = evm_init, | 804 | .init_machine = evm_init, |
805 | .dma_zone_size = SZ_128M, | ||
805 | MACHINE_END | 806 | MACHINE_END |
806 | 807 | ||
807 | MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") | 808 | MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") |
@@ -810,5 +811,6 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") | |||
810 | .init_irq = davinci_irq_init, | 811 | .init_irq = davinci_irq_init, |
811 | .timer = &davinci_timer, | 812 | .timer = &davinci_timer, |
812 | .init_machine = evm_init, | 813 | .init_machine = evm_init, |
814 | .dma_zone_size = SZ_128M, | ||
813 | MACHINE_END | 815 | MACHINE_END |
814 | 816 | ||
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c index 5f5d78308873..c278226627ad 100644 --- a/arch/arm/mach-davinci/board-mityomapl138.c +++ b/arch/arm/mach-davinci/board-mityomapl138.c | |||
@@ -571,4 +571,5 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808") | |||
571 | .init_irq = cp_intc_init, | 571 | .init_irq = cp_intc_init, |
572 | .timer = &davinci_timer, | 572 | .timer = &davinci_timer, |
573 | .init_machine = mityomapl138_init, | 573 | .init_machine = mityomapl138_init, |
574 | .dma_zone_size = SZ_128M, | ||
574 | MACHINE_END | 575 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 3e7be2de96de..d60a80028ba3 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c | |||
@@ -277,4 +277,5 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2") | |||
277 | .init_irq = davinci_irq_init, | 277 | .init_irq = davinci_irq_init, |
278 | .timer = &davinci_timer, | 278 | .timer = &davinci_timer, |
279 | .init_machine = davinci_ntosd2_init, | 279 | .init_machine = davinci_ntosd2_init, |
280 | .dma_zone_size = SZ_128M, | ||
280 | MACHINE_END | 281 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c index 67c38d0ecd10..237332a11421 100644 --- a/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/arch/arm/mach-davinci/board-omapl138-hawk.c | |||
@@ -343,4 +343,5 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard") | |||
343 | .init_irq = cp_intc_init, | 343 | .init_irq = cp_intc_init, |
344 | .timer = &davinci_timer, | 344 | .timer = &davinci_timer, |
345 | .init_machine = omapl138_hawk_init, | 345 | .init_machine = omapl138_hawk_init, |
346 | .dma_zone_size = SZ_128M, | ||
346 | MACHINE_END | 347 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c index 61ac96d8f00d..5f4385c0a089 100644 --- a/arch/arm/mach-davinci/board-sffsdr.c +++ b/arch/arm/mach-davinci/board-sffsdr.c | |||
@@ -156,4 +156,5 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR") | |||
156 | .init_irq = davinci_irq_init, | 156 | .init_irq = davinci_irq_init, |
157 | .timer = &davinci_timer, | 157 | .timer = &davinci_timer, |
158 | .init_machine = davinci_sffsdr_init, | 158 | .init_machine = davinci_sffsdr_init, |
159 | .dma_zone_size = SZ_128M, | ||
159 | MACHINE_END | 160 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c index 1a656e882262..782892065682 100644 --- a/arch/arm/mach-davinci/board-tnetv107x-evm.c +++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c | |||
@@ -282,4 +282,5 @@ MACHINE_START(TNETV107X, "TNETV107X EVM") | |||
282 | .init_irq = cp_intc_init, | 282 | .init_irq = cp_intc_init, |
283 | .timer = &davinci_timer, | 283 | .timer = &davinci_timer, |
284 | .init_machine = tnetv107x_evm_board_init, | 284 | .init_machine = tnetv107x_evm_board_init, |
285 | .dma_zone_size = SZ_128M, | ||
285 | MACHINE_END | 286 | MACHINE_END |
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S index fbdebc7cb409..e14c0dc0e12c 100644 --- a/arch/arm/mach-davinci/include/mach/entry-macro.S +++ b/arch/arm/mach-davinci/include/mach/entry-macro.S | |||
@@ -46,6 +46,3 @@ | |||
46 | #endif | 46 | #endif |
47 | 1002: | 47 | 1002: |
48 | .endm | 48 | .endm |
49 | |||
50 | .macro irq_prio_table | ||
51 | .endm | ||
diff --git a/arch/arm/mach-davinci/include/mach/memory.h b/arch/arm/mach-davinci/include/mach/memory.h index 491249ef209c..78731944a70c 100644 --- a/arch/arm/mach-davinci/include/mach/memory.h +++ b/arch/arm/mach-davinci/include/mach/memory.h | |||
@@ -41,11 +41,4 @@ | |||
41 | */ | 41 | */ |
42 | #define CONSISTENT_DMA_SIZE (14<<20) | 42 | #define CONSISTENT_DMA_SIZE (14<<20) |
43 | 43 | ||
44 | /* | ||
45 | * Restrict DMA-able region to workaround silicon bug. The bug | ||
46 | * restricts buffers available for DMA to video hardware to be | ||
47 | * below 128M | ||
48 | */ | ||
49 | #define ARM_DMA_ZONE_SIZE SZ_128M | ||
50 | |||
51 | #endif /* __ASM_ARCH_MEMORY_H */ | 44 | #endif /* __ASM_ARCH_MEMORY_H */ |
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index c5e65a02be8d..b68d5bdf04cf 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c | |||
@@ -154,14 +154,6 @@ void __init smp_init_cpus(void) | |||
154 | 154 | ||
155 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 155 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
156 | { | 156 | { |
157 | int i; | ||
158 | |||
159 | /* | ||
160 | * Initialise the present map, which describes the set of CPUs | ||
161 | * actually populated at the present time. | ||
162 | */ | ||
163 | for (i = 0; i < max_cpus; i++) | ||
164 | set_cpu_present(i, true); | ||
165 | 157 | ||
166 | scu_enable(scu_base_addr()); | 158 | scu_enable(scu_base_addr()); |
167 | 159 | ||
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 8755ca8dd48d..533c28f758ca 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c | |||
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = { | |||
280 | SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), | 280 | SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), |
281 | }; | 281 | }; |
282 | 282 | ||
283 | void exynos4_cpu_suspend(void) | 283 | static int exynos4_cpu_suspend(unsigned long arg) |
284 | { | 284 | { |
285 | unsigned long tmp; | 285 | unsigned long tmp; |
286 | unsigned long mask = 0xFFFFFFFF; | 286 | unsigned long mask = 0xFFFFFFFF; |
diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index 6b62425417a6..0984078f1eba 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S | |||
@@ -33,28 +33,6 @@ | |||
33 | .text | 33 | .text |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * s3c_cpu_save | ||
37 | * | ||
38 | * entry: | ||
39 | * r1 = v:p offset | ||
40 | */ | ||
41 | |||
42 | ENTRY(s3c_cpu_save) | ||
43 | |||
44 | stmfd sp!, { r3 - r12, lr } | ||
45 | ldr r3, =resume_with_mmu | ||
46 | bl cpu_suspend | ||
47 | |||
48 | ldr r0, =pm_cpu_sleep | ||
49 | ldr r0, [ r0 ] | ||
50 | mov pc, r0 | ||
51 | |||
52 | resume_with_mmu: | ||
53 | ldmfd sp!, { r3 - r12, pc } | ||
54 | |||
55 | .ltorg | ||
56 | |||
57 | /* | ||
58 | * sleep magic, to allow the bootloader to check for an valid | 36 | * sleep magic, to allow the bootloader to check for an valid |
59 | * image to resume to. Must be the first word before the | 37 | * image to resume to. Must be the first word before the |
60 | * s3c_cpu_resume entry. | 38 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/mach-h720x/h7201-eval.c b/arch/arm/mach-h720x/h7201-eval.c index 629454d71c8d..65f1bea958e5 100644 --- a/arch/arm/mach-h720x/h7201-eval.c +++ b/arch/arm/mach-h720x/h7201-eval.c | |||
@@ -33,4 +33,5 @@ MACHINE_START(H7201, "Hynix GMS30C7201") | |||
33 | .map_io = h720x_map_io, | 33 | .map_io = h720x_map_io, |
34 | .init_irq = h720x_init_irq, | 34 | .init_irq = h720x_init_irq, |
35 | .timer = &h7201_timer, | 35 | .timer = &h7201_timer, |
36 | .dma_zone_size = SZ_256M, | ||
36 | MACHINE_END | 37 | MACHINE_END |
diff --git a/arch/arm/mach-h720x/h7202-eval.c b/arch/arm/mach-h720x/h7202-eval.c index e9f46b696354..884584a09752 100644 --- a/arch/arm/mach-h720x/h7202-eval.c +++ b/arch/arm/mach-h720x/h7202-eval.c | |||
@@ -76,4 +76,5 @@ MACHINE_START(H7202, "Hynix HMS30C7202") | |||
76 | .init_irq = h7202_init_irq, | 76 | .init_irq = h7202_init_irq, |
77 | .timer = &h7202_timer, | 77 | .timer = &h7202_timer, |
78 | .init_machine = init_eval_h7202, | 78 | .init_machine = init_eval_h7202, |
79 | .dma_zone_size = SZ_256M, | ||
79 | MACHINE_END | 80 | MACHINE_END |
diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S index 6d3b917c4a18..c3948e5ba4a0 100644 --- a/arch/arm/mach-h720x/include/mach/entry-macro.S +++ b/arch/arm/mach-h720x/include/mach/entry-macro.S | |||
@@ -57,9 +57,6 @@ | |||
57 | tst \irqstat, #1 @ bit 0 should be set | 57 | tst \irqstat, #1 @ bit 0 should be set |
58 | .endm | 58 | .endm |
59 | 59 | ||
60 | .macro irq_prio_table | ||
61 | .endm | ||
62 | |||
63 | #else | 60 | #else |
64 | #error hynix processor selection missmatch | 61 | #error hynix processor selection missmatch |
65 | #endif | 62 | #endif |
diff --git a/arch/arm/mach-h720x/include/mach/memory.h b/arch/arm/mach-h720x/include/mach/memory.h index b0b3baec9acf..96dcf50c51d3 100644 --- a/arch/arm/mach-h720x/include/mach/memory.h +++ b/arch/arm/mach-h720x/include/mach/memory.h | |||
@@ -8,11 +8,4 @@ | |||
8 | #define __ASM_ARCH_MEMORY_H | 8 | #define __ASM_ARCH_MEMORY_H |
9 | 9 | ||
10 | #define PLAT_PHYS_OFFSET UL(0x40000000) | 10 | #define PLAT_PHYS_OFFSET UL(0x40000000) |
11 | /* | ||
12 | * This is the maximum DMA address that can be DMAd to. | ||
13 | * There should not be more than (0xd0000000 - 0xc0000000) | ||
14 | * bytes of RAM. | ||
15 | */ | ||
16 | #define ARM_DMA_ZONE_SIZE SZ_256M | ||
17 | |||
18 | #endif | 11 | #endif |
diff --git a/arch/arm/mach-ixp4xx/avila-setup.c b/arch/arm/mach-ixp4xx/avila-setup.c index 73745ff102d5..ee19c1d383aa 100644 --- a/arch/arm/mach-ixp4xx/avila-setup.c +++ b/arch/arm/mach-ixp4xx/avila-setup.c | |||
@@ -169,6 +169,9 @@ MACHINE_START(AVILA, "Gateworks Avila Network Platform") | |||
169 | .timer = &ixp4xx_timer, | 169 | .timer = &ixp4xx_timer, |
170 | .boot_params = 0x0100, | 170 | .boot_params = 0x0100, |
171 | .init_machine = avila_init, | 171 | .init_machine = avila_init, |
172 | #if defined(CONFIG_PCI) | ||
173 | .dma_zone_size = SZ_64M, | ||
174 | #endif | ||
172 | MACHINE_END | 175 | MACHINE_END |
173 | 176 | ||
174 | /* | 177 | /* |
@@ -184,6 +187,9 @@ MACHINE_START(LOFT, "Giant Shoulder Inc Loft board") | |||
184 | .timer = &ixp4xx_timer, | 187 | .timer = &ixp4xx_timer, |
185 | .boot_params = 0x0100, | 188 | .boot_params = 0x0100, |
186 | .init_machine = avila_init, | 189 | .init_machine = avila_init, |
190 | #if defined(CONFIG_PCI) | ||
191 | .dma_zone_size = SZ_64M, | ||
192 | #endif | ||
187 | MACHINE_END | 193 | MACHINE_END |
188 | #endif | 194 | #endif |
189 | 195 | ||
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index e9a589395723..e2e98bbb6413 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r | |||
316 | } | 316 | } |
317 | 317 | ||
318 | 318 | ||
319 | static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
320 | { | ||
321 | return (dma_addr + size) >= SZ_64M; | ||
322 | } | ||
323 | |||
319 | /* | 324 | /* |
320 | * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. | 325 | * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. |
321 | */ | 326 | */ |
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev) | |||
324 | if(dev->bus == &pci_bus_type) { | 329 | if(dev->bus == &pci_bus_type) { |
325 | *dev->dma_mask = SZ_64M - 1; | 330 | *dev->dma_mask = SZ_64M - 1; |
326 | dev->coherent_dma_mask = SZ_64M - 1; | 331 | dev->coherent_dma_mask = SZ_64M - 1; |
327 | dmabounce_register_dev(dev, 2048, 4096); | 332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); |
328 | } | 333 | } |
329 | return 0; | 334 | return 0; |
330 | } | 335 | } |
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev) | |||
337 | return 0; | 342 | return 0; |
338 | } | 343 | } |
339 | 344 | ||
340 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
341 | { | ||
342 | return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); | ||
343 | } | ||
344 | |||
345 | void __init ixp4xx_pci_preinit(void) | 345 | void __init ixp4xx_pci_preinit(void) |
346 | { | 346 | { |
347 | unsigned long cpuid = read_cpuid_id(); | 347 | unsigned long cpuid = read_cpuid_id(); |
diff --git a/arch/arm/mach-ixp4xx/coyote-setup.c b/arch/arm/mach-ixp4xx/coyote-setup.c index 355e3de38733..e24564b5d935 100644 --- a/arch/arm/mach-ixp4xx/coyote-setup.c +++ b/arch/arm/mach-ixp4xx/coyote-setup.c | |||
@@ -114,6 +114,9 @@ MACHINE_START(ADI_COYOTE, "ADI Engineering Coyote") | |||
114 | .timer = &ixp4xx_timer, | 114 | .timer = &ixp4xx_timer, |
115 | .boot_params = 0x0100, | 115 | .boot_params = 0x0100, |
116 | .init_machine = coyote_init, | 116 | .init_machine = coyote_init, |
117 | #if defined(CONFIG_PCI) | ||
118 | .dma_zone_size = SZ_64M, | ||
119 | #endif | ||
117 | MACHINE_END | 120 | MACHINE_END |
118 | #endif | 121 | #endif |
119 | 122 | ||
diff --git a/arch/arm/mach-ixp4xx/dsmg600-setup.c b/arch/arm/mach-ixp4xx/dsmg600-setup.c index d398229cfaa5..03e54515e8b3 100644 --- a/arch/arm/mach-ixp4xx/dsmg600-setup.c +++ b/arch/arm/mach-ixp4xx/dsmg600-setup.c | |||
@@ -284,4 +284,7 @@ MACHINE_START(DSMG600, "D-Link DSM-G600 RevA") | |||
284 | .init_irq = ixp4xx_init_irq, | 284 | .init_irq = ixp4xx_init_irq, |
285 | .timer = &dsmg600_timer, | 285 | .timer = &dsmg600_timer, |
286 | .init_machine = dsmg600_init, | 286 | .init_machine = dsmg600_init, |
287 | #if defined(CONFIG_PCI) | ||
288 | .dma_zone_size = SZ_64M, | ||
289 | #endif | ||
287 | MACHINE_END | 290 | MACHINE_END |
diff --git a/arch/arm/mach-ixp4xx/fsg-setup.c b/arch/arm/mach-ixp4xx/fsg-setup.c index 727ee39ce11c..23a8b3614568 100644 --- a/arch/arm/mach-ixp4xx/fsg-setup.c +++ b/arch/arm/mach-ixp4xx/fsg-setup.c | |||
@@ -275,5 +275,8 @@ MACHINE_START(FSG, "Freecom FSG-3") | |||
275 | .timer = &ixp4xx_timer, | 275 | .timer = &ixp4xx_timer, |
276 | .boot_params = 0x0100, | 276 | .boot_params = 0x0100, |
277 | .init_machine = fsg_init, | 277 | .init_machine = fsg_init, |
278 | #if defined(CONFIG_PCI) | ||
279 | .dma_zone_size = SZ_64M, | ||
280 | #endif | ||
278 | MACHINE_END | 281 | MACHINE_END |
279 | 282 | ||
diff --git a/arch/arm/mach-ixp4xx/gateway7001-setup.c b/arch/arm/mach-ixp4xx/gateway7001-setup.c index 9dc0b4eaa65a..d4f851bdd9a4 100644 --- a/arch/arm/mach-ixp4xx/gateway7001-setup.c +++ b/arch/arm/mach-ixp4xx/gateway7001-setup.c | |||
@@ -101,5 +101,8 @@ MACHINE_START(GATEWAY7001, "Gateway 7001 AP") | |||
101 | .timer = &ixp4xx_timer, | 101 | .timer = &ixp4xx_timer, |
102 | .boot_params = 0x0100, | 102 | .boot_params = 0x0100, |
103 | .init_machine = gateway7001_init, | 103 | .init_machine = gateway7001_init, |
104 | #if defined(CONFIG_PCI) | ||
105 | .dma_zone_size = SZ_64M, | ||
106 | #endif | ||
104 | MACHINE_END | 107 | MACHINE_END |
105 | #endif | 108 | #endif |
diff --git a/arch/arm/mach-ixp4xx/goramo_mlr.c b/arch/arm/mach-ixp4xx/goramo_mlr.c index 3e8c0e33b59c..5f00ad224fe0 100644 --- a/arch/arm/mach-ixp4xx/goramo_mlr.c +++ b/arch/arm/mach-ixp4xx/goramo_mlr.c | |||
@@ -501,4 +501,7 @@ MACHINE_START(GORAMO_MLR, "MultiLink") | |||
501 | .timer = &ixp4xx_timer, | 501 | .timer = &ixp4xx_timer, |
502 | .boot_params = 0x0100, | 502 | .boot_params = 0x0100, |
503 | .init_machine = gmlr_init, | 503 | .init_machine = gmlr_init, |
504 | #if defined(CONFIG_PCI) | ||
505 | .dma_zone_size = SZ_64M, | ||
506 | #endif | ||
504 | MACHINE_END | 507 | MACHINE_END |
diff --git a/arch/arm/mach-ixp4xx/gtwx5715-setup.c b/arch/arm/mach-ixp4xx/gtwx5715-setup.c index 77abead36227..3790dffd3c30 100644 --- a/arch/arm/mach-ixp4xx/gtwx5715-setup.c +++ b/arch/arm/mach-ixp4xx/gtwx5715-setup.c | |||
@@ -169,6 +169,9 @@ MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)") | |||
169 | .timer = &ixp4xx_timer, | 169 | .timer = &ixp4xx_timer, |
170 | .boot_params = 0x0100, | 170 | .boot_params = 0x0100, |
171 | .init_machine = gtwx5715_init, | 171 | .init_machine = gtwx5715_init, |
172 | #if defined(CONFIG_PCI) | ||
173 | .dma_zone_size = SZ_64M, | ||
174 | #endif | ||
172 | MACHINE_END | 175 | MACHINE_END |
173 | 176 | ||
174 | 177 | ||
diff --git a/arch/arm/mach-ixp4xx/include/mach/memory.h b/arch/arm/mach-ixp4xx/include/mach/memory.h index 34e79404671a..4caf1761f1e2 100644 --- a/arch/arm/mach-ixp4xx/include/mach/memory.h +++ b/arch/arm/mach-ixp4xx/include/mach/memory.h | |||
@@ -14,8 +14,4 @@ | |||
14 | */ | 14 | */ |
15 | #define PLAT_PHYS_OFFSET UL(0x00000000) | 15 | #define PLAT_PHYS_OFFSET UL(0x00000000) |
16 | 16 | ||
17 | #ifdef CONFIG_PCI | ||
18 | #define ARM_DMA_ZONE_SIZE SZ_64M | ||
19 | #endif | ||
20 | |||
21 | #endif | 17 | #endif |
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c index dca4f7f9f4f7..6a2927956bf6 100644 --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c | |||
@@ -258,6 +258,9 @@ MACHINE_START(IXDP425, "Intel IXDP425 Development Platform") | |||
258 | .timer = &ixp4xx_timer, | 258 | .timer = &ixp4xx_timer, |
259 | .boot_params = 0x0100, | 259 | .boot_params = 0x0100, |
260 | .init_machine = ixdp425_init, | 260 | .init_machine = ixdp425_init, |
261 | #if defined(CONFIG_PCI) | ||
262 | .dma_zone_size = SZ_64M, | ||
263 | #endif | ||
261 | MACHINE_END | 264 | MACHINE_END |
262 | #endif | 265 | #endif |
263 | 266 | ||
@@ -269,6 +272,9 @@ MACHINE_START(IXDP465, "Intel IXDP465 Development Platform") | |||
269 | .timer = &ixp4xx_timer, | 272 | .timer = &ixp4xx_timer, |
270 | .boot_params = 0x0100, | 273 | .boot_params = 0x0100, |
271 | .init_machine = ixdp425_init, | 274 | .init_machine = ixdp425_init, |
275 | #if defined(CONFIG_PCI) | ||
276 | .dma_zone_size = SZ_64M, | ||
277 | #endif | ||
272 | MACHINE_END | 278 | MACHINE_END |
273 | #endif | 279 | #endif |
274 | 280 | ||
@@ -280,6 +286,9 @@ MACHINE_START(IXCDP1100, "Intel IXCDP1100 Development Platform") | |||
280 | .timer = &ixp4xx_timer, | 286 | .timer = &ixp4xx_timer, |
281 | .boot_params = 0x0100, | 287 | .boot_params = 0x0100, |
282 | .init_machine = ixdp425_init, | 288 | .init_machine = ixdp425_init, |
289 | #if defined(CONFIG_PCI) | ||
290 | .dma_zone_size = SZ_64M, | ||
291 | #endif | ||
283 | MACHINE_END | 292 | MACHINE_END |
284 | #endif | 293 | #endif |
285 | 294 | ||
@@ -291,5 +300,8 @@ MACHINE_START(KIXRP435, "Intel KIXRP435 Reference Platform") | |||
291 | .timer = &ixp4xx_timer, | 300 | .timer = &ixp4xx_timer, |
292 | .boot_params = 0x0100, | 301 | .boot_params = 0x0100, |
293 | .init_machine = ixdp425_init, | 302 | .init_machine = ixdp425_init, |
303 | #if defined(CONFIG_PCI) | ||
304 | .dma_zone_size = SZ_64M, | ||
305 | #endif | ||
294 | MACHINE_END | 306 | MACHINE_END |
295 | #endif | 307 | #endif |
diff --git a/arch/arm/mach-ixp4xx/nas100d-setup.c b/arch/arm/mach-ixp4xx/nas100d-setup.c index f18fee748878..afb51879d9a4 100644 --- a/arch/arm/mach-ixp4xx/nas100d-setup.c +++ b/arch/arm/mach-ixp4xx/nas100d-setup.c | |||
@@ -319,4 +319,7 @@ MACHINE_START(NAS100D, "Iomega NAS 100d") | |||
319 | .init_irq = ixp4xx_init_irq, | 319 | .init_irq = ixp4xx_init_irq, |
320 | .timer = &ixp4xx_timer, | 320 | .timer = &ixp4xx_timer, |
321 | .init_machine = nas100d_init, | 321 | .init_machine = nas100d_init, |
322 | #if defined(CONFIG_PCI) | ||
323 | .dma_zone_size = SZ_64M, | ||
324 | #endif | ||
322 | MACHINE_END | 325 | MACHINE_END |
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c index f79b62eb7614..69e40f2cf092 100644 --- a/arch/arm/mach-ixp4xx/nslu2-setup.c +++ b/arch/arm/mach-ixp4xx/nslu2-setup.c | |||
@@ -305,4 +305,7 @@ MACHINE_START(NSLU2, "Linksys NSLU2") | |||
305 | .init_irq = ixp4xx_init_irq, | 305 | .init_irq = ixp4xx_init_irq, |
306 | .timer = &nslu2_timer, | 306 | .timer = &nslu2_timer, |
307 | .init_machine = nslu2_init, | 307 | .init_machine = nslu2_init, |
308 | #if defined(CONFIG_PCI) | ||
309 | .dma_zone_size = SZ_64M, | ||
310 | #endif | ||
308 | MACHINE_END | 311 | MACHINE_END |
diff --git a/arch/arm/mach-ixp4xx/vulcan-setup.c b/arch/arm/mach-ixp4xx/vulcan-setup.c index 4e72cfdd3c46..045336c833af 100644 --- a/arch/arm/mach-ixp4xx/vulcan-setup.c +++ b/arch/arm/mach-ixp4xx/vulcan-setup.c | |||
@@ -241,4 +241,7 @@ MACHINE_START(ARCOM_VULCAN, "Arcom/Eurotech Vulcan") | |||
241 | .timer = &ixp4xx_timer, | 241 | .timer = &ixp4xx_timer, |
242 | .boot_params = 0x0100, | 242 | .boot_params = 0x0100, |
243 | .init_machine = vulcan_init, | 243 | .init_machine = vulcan_init, |
244 | #if defined(CONFIG_PCI) | ||
245 | .dma_zone_size = SZ_64M, | ||
246 | #endif | ||
244 | MACHINE_END | 247 | MACHINE_END |
diff --git a/arch/arm/mach-ixp4xx/wg302v2-setup.c b/arch/arm/mach-ixp4xx/wg302v2-setup.c index 5d148c7bc4fb..40b9fad800b8 100644 --- a/arch/arm/mach-ixp4xx/wg302v2-setup.c +++ b/arch/arm/mach-ixp4xx/wg302v2-setup.c | |||
@@ -102,5 +102,8 @@ MACHINE_START(WG302V2, "Netgear WG302 v2 / WAG302 v2") | |||
102 | .timer = &ixp4xx_timer, | 102 | .timer = &ixp4xx_timer, |
103 | .boot_params = 0x0100, | 103 | .boot_params = 0x0100, |
104 | .init_machine = wg302v2_init, | 104 | .init_machine = wg302v2_init, |
105 | #if defined(CONFIG_PCI) | ||
106 | .dma_zone_size = SZ_64M, | ||
107 | #endif | ||
105 | MACHINE_END | 108 | MACHINE_END |
106 | #endif | 109 | #endif |
diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S index 870227c96602..b725f6c93975 100644 --- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S +++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S | |||
@@ -41,7 +41,3 @@ | |||
41 | rsb \irqnr, \irqnr, #31 | 41 | rsb \irqnr, \irqnr, #31 |
42 | teq \irqstat, #0 | 42 | teq \irqstat, #0 |
43 | .endm | 43 | .endm |
44 | |||
45 | .macro irq_prio_table | ||
46 | .endm | ||
47 | |||
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 2034098cf015..315b9f365329 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c | |||
@@ -157,12 +157,4 @@ void __init smp_init_cpus(void) | |||
157 | 157 | ||
158 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 158 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
159 | { | 159 | { |
160 | int i; | ||
161 | |||
162 | /* | ||
163 | * Initialise the present map, which describes the set of CPUs | ||
164 | * actually populated at the present time. | ||
165 | */ | ||
166 | for (i = 0; i < max_cpus; i++) | ||
167 | set_cpu_present(i, true); | ||
168 | } | 160 | } |
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index da53ba3917ca..aab884fecc55 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c | |||
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void) | |||
286 | scratchpad_contents.boot_config_ptr = 0x0; | 286 | scratchpad_contents.boot_config_ptr = 0x0; |
287 | if (cpu_is_omap3630()) | 287 | if (cpu_is_omap3630()) |
288 | scratchpad_contents.public_restore_ptr = | 288 | scratchpad_contents.public_restore_ptr = |
289 | virt_to_phys(get_omap3630_restore_pointer()); | 289 | virt_to_phys(omap3_restore_3630); |
290 | else if (omap_rev() != OMAP3430_REV_ES3_0 && | 290 | else if (omap_rev() != OMAP3430_REV_ES3_0 && |
291 | omap_rev() != OMAP3430_REV_ES3_1) | 291 | omap_rev() != OMAP3430_REV_ES3_1) |
292 | scratchpad_contents.public_restore_ptr = | 292 | scratchpad_contents.public_restore_ptr = |
293 | virt_to_phys(get_restore_pointer()); | 293 | virt_to_phys(omap3_restore); |
294 | else | 294 | else |
295 | scratchpad_contents.public_restore_ptr = | 295 | scratchpad_contents.public_restore_ptr = |
296 | virt_to_phys(get_es3_restore_pointer()); | 296 | virt_to_phys(omap3_restore_es3); |
297 | |||
297 | if (omap_type() == OMAP2_DEVICE_TYPE_GP) | 298 | if (omap_type() == OMAP2_DEVICE_TYPE_GP) |
298 | scratchpad_contents.secure_ram_restore_ptr = 0x0; | 299 | scratchpad_contents.secure_ram_restore_ptr = 0x0; |
299 | else | 300 | else |
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h index a016c8b59e00..d4ef75d5a382 100644 --- a/arch/arm/mach-omap2/control.h +++ b/arch/arm/mach-omap2/control.h | |||
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset); | |||
386 | 386 | ||
387 | extern void omap3_save_scratchpad_contents(void); | 387 | extern void omap3_save_scratchpad_contents(void); |
388 | extern void omap3_clear_scratchpad_contents(void); | 388 | extern void omap3_clear_scratchpad_contents(void); |
389 | extern u32 *get_restore_pointer(void); | 389 | extern void omap3_restore(void); |
390 | extern u32 *get_es3_restore_pointer(void); | 390 | extern void omap3_restore_es3(void); |
391 | extern u32 *get_omap3630_restore_pointer(void); | 391 | extern void omap3_restore_3630(void); |
392 | extern u32 omap3_arm_context[128]; | 392 | extern u32 omap3_arm_context[128]; |
393 | extern void omap3_control_save_context(void); | 393 | extern void omap3_control_save_context(void); |
394 | extern void omap3_control_restore_context(void); | 394 | extern void omap3_control_restore_context(void); |
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S index a48690b90990..ceb8b7e593d7 100644 --- a/arch/arm/mach-omap2/include/mach/entry-macro.S +++ b/arch/arm/mach-omap2/include/mach/entry-macro.S | |||
@@ -165,6 +165,3 @@ | |||
165 | #endif | 165 | #endif |
166 | 166 | ||
167 | #endif /* MULTI_OMAP2 */ | 167 | #endif /* MULTI_OMAP2 */ |
168 | |||
169 | .macro irq_prio_table | ||
170 | .endm | ||
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ecfe93c4b585..ce65e9329c7b 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void) | |||
125 | 125 | ||
126 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 126 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
127 | { | 127 | { |
128 | int i; | ||
129 | |||
130 | /* | ||
131 | * Initialise the present map, which describes the set of CPUs | ||
132 | * actually populated at the present time. | ||
133 | */ | ||
134 | for (i = 0; i < max_cpus; i++) | ||
135 | set_cpu_present(i, true); | ||
136 | 128 | ||
137 | /* | 129 | /* |
138 | * Initialise the SCU and wake up the secondary core using | 130 | * Initialise the SCU and wake up the secondary core using |
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 45bcfce77352..04ee56646126 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h | |||
@@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set); | |||
88 | #define pm_dbg_regset_init(reg_set) do {} while (0); | 88 | #define pm_dbg_regset_init(reg_set) do {} while (0); |
89 | #endif /* CONFIG_PM_DEBUG */ | 89 | #endif /* CONFIG_PM_DEBUG */ |
90 | 90 | ||
91 | /* 24xx */ | ||
91 | extern void omap24xx_idle_loop_suspend(void); | 92 | extern void omap24xx_idle_loop_suspend(void); |
93 | extern unsigned int omap24xx_idle_loop_suspend_sz; | ||
92 | 94 | ||
93 | extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, | 95 | extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, |
94 | void __iomem *sdrc_power); | 96 | void __iomem *sdrc_power); |
95 | extern void omap34xx_cpu_suspend(u32 *addr, int save_state); | 97 | extern unsigned int omap24xx_cpu_suspend_sz; |
96 | extern int save_secure_ram_context(u32 *addr); | ||
97 | extern void omap3_save_scratchpad_contents(void); | ||
98 | 98 | ||
99 | extern unsigned int omap24xx_idle_loop_suspend_sz; | 99 | /* 3xxx */ |
100 | extern void omap34xx_cpu_suspend(int save_state); | ||
101 | |||
102 | /* omap3_do_wfi function pointer and size, for copy to SRAM */ | ||
103 | extern void omap3_do_wfi(void); | ||
104 | extern unsigned int omap3_do_wfi_sz; | ||
105 | /* ... and its pointer from SRAM after copy */ | ||
106 | extern void (*omap3_do_wfi_sram)(void); | ||
107 | |||
108 | /* save_secure_ram_context function pointer and size, for copy to SRAM */ | ||
109 | extern int save_secure_ram_context(u32 *addr); | ||
100 | extern unsigned int save_secure_ram_context_sz; | 110 | extern unsigned int save_secure_ram_context_sz; |
101 | extern unsigned int omap24xx_cpu_suspend_sz; | 111 | |
102 | extern unsigned int omap34xx_cpu_suspend_sz; | 112 | extern void omap3_save_scratchpad_contents(void); |
103 | 113 | ||
104 | #define PM_RTA_ERRATUM_i608 (1 << 0) | 114 | #define PM_RTA_ERRATUM_i608 (1 << 0) |
105 | #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) | 115 | #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c155c9d1c82c..b77d82665abb 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/console.h> | 31 | #include <linux/console.h> |
32 | #include <trace/events/power.h> | 32 | #include <trace/events/power.h> |
33 | 33 | ||
34 | #include <asm/suspend.h> | ||
35 | |||
34 | #include <plat/sram.h> | 36 | #include <plat/sram.h> |
35 | #include "clockdomain.h" | 37 | #include "clockdomain.h" |
36 | #include "powerdomain.h" | 38 | #include "powerdomain.h" |
@@ -40,8 +42,6 @@ | |||
40 | #include <plat/gpmc.h> | 42 | #include <plat/gpmc.h> |
41 | #include <plat/dma.h> | 43 | #include <plat/dma.h> |
42 | 44 | ||
43 | #include <asm/tlbflush.h> | ||
44 | |||
45 | #include "cm2xxx_3xxx.h" | 45 | #include "cm2xxx_3xxx.h" |
46 | #include "cm-regbits-34xx.h" | 46 | #include "cm-regbits-34xx.h" |
47 | #include "prm-regbits-34xx.h" | 47 | #include "prm-regbits-34xx.h" |
@@ -64,11 +64,6 @@ static inline bool is_suspending(void) | |||
64 | } | 64 | } |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | /* Scratchpad offsets */ | ||
68 | #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 | ||
69 | #define OMAP343X_TABLE_VALUE_OFFSET 0xc0 | ||
70 | #define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8 | ||
71 | |||
72 | /* pm34xx errata defined in pm.h */ | 67 | /* pm34xx errata defined in pm.h */ |
73 | u16 pm34xx_errata; | 68 | u16 pm34xx_errata; |
74 | 69 | ||
@@ -83,9 +78,8 @@ struct power_state { | |||
83 | 78 | ||
84 | static LIST_HEAD(pwrst_list); | 79 | static LIST_HEAD(pwrst_list); |
85 | 80 | ||
86 | static void (*_omap_sram_idle)(u32 *addr, int save_state); | ||
87 | |||
88 | static int (*_omap_save_secure_sram)(u32 *addr); | 81 | static int (*_omap_save_secure_sram)(u32 *addr); |
82 | void (*omap3_do_wfi_sram)(void); | ||
89 | 83 | ||
90 | static struct powerdomain *mpu_pwrdm, *neon_pwrdm; | 84 | static struct powerdomain *mpu_pwrdm, *neon_pwrdm; |
91 | static struct powerdomain *core_pwrdm, *per_pwrdm; | 85 | static struct powerdomain *core_pwrdm, *per_pwrdm; |
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | |||
312 | return IRQ_HANDLED; | 306 | return IRQ_HANDLED; |
313 | } | 307 | } |
314 | 308 | ||
315 | /* Function to restore the table entry that was modified for enabling MMU */ | 309 | static void omap34xx_save_context(u32 *save) |
316 | static void restore_table_entry(void) | ||
317 | { | 310 | { |
318 | void __iomem *scratchpad_address; | 311 | u32 val; |
319 | u32 previous_value, control_reg_value; | ||
320 | u32 *address; | ||
321 | 312 | ||
322 | scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); | 313 | /* Read Auxiliary Control Register */ |
314 | asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); | ||
315 | *save++ = 1; | ||
316 | *save++ = val; | ||
323 | 317 | ||
324 | /* Get address of entry that was modified */ | 318 | /* Read L2 AUX ctrl register */ |
325 | address = (u32 *)__raw_readl(scratchpad_address + | 319 | asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); |
326 | OMAP343X_TABLE_ADDRESS_OFFSET); | 320 | *save++ = 1; |
327 | /* Get the previous value which needs to be restored */ | 321 | *save++ = val; |
328 | previous_value = __raw_readl(scratchpad_address + | 322 | } |
329 | OMAP343X_TABLE_VALUE_OFFSET); | 323 | |
330 | address = __va(address); | 324 | static int omap34xx_do_sram_idle(unsigned long save_state) |
331 | *address = previous_value; | 325 | { |
332 | flush_tlb_all(); | 326 | omap34xx_cpu_suspend(save_state); |
333 | control_reg_value = __raw_readl(scratchpad_address | 327 | return 0; |
334 | + OMAP343X_CONTROL_REG_VALUE_OFFSET); | ||
335 | /* This will enable caches and prediction */ | ||
336 | set_cr(control_reg_value); | ||
337 | } | 328 | } |
338 | 329 | ||
339 | void omap_sram_idle(void) | 330 | void omap_sram_idle(void) |
@@ -352,9 +343,6 @@ void omap_sram_idle(void) | |||
352 | int core_prev_state, per_prev_state; | 343 | int core_prev_state, per_prev_state; |
353 | u32 sdrc_pwr = 0; | 344 | u32 sdrc_pwr = 0; |
354 | 345 | ||
355 | if (!_omap_sram_idle) | ||
356 | return; | ||
357 | |||
358 | pwrdm_clear_all_prev_pwrst(mpu_pwrdm); | 346 | pwrdm_clear_all_prev_pwrst(mpu_pwrdm); |
359 | pwrdm_clear_all_prev_pwrst(neon_pwrdm); | 347 | pwrdm_clear_all_prev_pwrst(neon_pwrdm); |
360 | pwrdm_clear_all_prev_pwrst(core_pwrdm); | 348 | pwrdm_clear_all_prev_pwrst(core_pwrdm); |
@@ -432,12 +420,16 @@ void omap_sram_idle(void) | |||
432 | sdrc_pwr = sdrc_read_reg(SDRC_POWER); | 420 | sdrc_pwr = sdrc_read_reg(SDRC_POWER); |
433 | 421 | ||
434 | /* | 422 | /* |
435 | * omap3_arm_context is the location where ARM registers | 423 | * omap3_arm_context is the location where some ARM context |
436 | * get saved. The restore path then reads from this | 424 | * get saved. The rest is placed on the stack, and restored |
437 | * location and restores them back. | 425 | * from there before resuming. |
438 | */ | 426 | */ |
439 | _omap_sram_idle(omap3_arm_context, save_state); | 427 | if (save_state) |
440 | cpu_init(); | 428 | omap34xx_save_context(omap3_arm_context); |
429 | if (save_state == 1 || save_state == 3) | ||
430 | cpu_suspend(save_state, omap34xx_do_sram_idle); | ||
431 | else | ||
432 | omap34xx_do_sram_idle(save_state); | ||
441 | 433 | ||
442 | /* Restore normal SDRC POWER settings */ | 434 | /* Restore normal SDRC POWER settings */ |
443 | if (omap_rev() >= OMAP3430_REV_ES3_0 && | 435 | if (omap_rev() >= OMAP3430_REV_ES3_0 && |
@@ -445,10 +437,6 @@ void omap_sram_idle(void) | |||
445 | core_next_state == PWRDM_POWER_OFF) | 437 | core_next_state == PWRDM_POWER_OFF) |
446 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); | 438 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); |
447 | 439 | ||
448 | /* Restore table entry modified during MMU restoration */ | ||
449 | if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) | ||
450 | restore_table_entry(); | ||
451 | |||
452 | /* CORE */ | 440 | /* CORE */ |
453 | if (core_next_state < PWRDM_POWER_ON) { | 441 | if (core_next_state < PWRDM_POWER_ON) { |
454 | core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); | 442 | core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); |
@@ -852,10 +840,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) | |||
852 | return 0; | 840 | return 0; |
853 | } | 841 | } |
854 | 842 | ||
843 | /* | ||
844 | * Push functions to SRAM | ||
845 | * | ||
846 | * The minimum set of functions is pushed to SRAM for execution: | ||
847 | * - omap3_do_wfi for erratum i581 WA, | ||
848 | * - save_secure_ram_context for security extensions. | ||
849 | */ | ||
855 | void omap_push_sram_idle(void) | 850 | void omap_push_sram_idle(void) |
856 | { | 851 | { |
857 | _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, | 852 | omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); |
858 | omap34xx_cpu_suspend_sz); | 853 | |
859 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) | 854 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) |
860 | _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, | 855 | _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, |
861 | save_secure_ram_context_sz); | 856 | save_secure_ram_context_sz); |
@@ -920,7 +915,6 @@ static int __init omap3_pm_init(void) | |||
920 | per_clkdm = clkdm_lookup("per_clkdm"); | 915 | per_clkdm = clkdm_lookup("per_clkdm"); |
921 | core_clkdm = clkdm_lookup("core_clkdm"); | 916 | core_clkdm = clkdm_lookup("core_clkdm"); |
922 | 917 | ||
923 | omap_push_sram_idle(); | ||
924 | #ifdef CONFIG_SUSPEND | 918 | #ifdef CONFIG_SUSPEND |
925 | suspend_set_ops(&omap_pm_ops); | 919 | suspend_set_ops(&omap_pm_ops); |
926 | #endif /* CONFIG_SUSPEND */ | 920 | #endif /* CONFIG_SUSPEND */ |
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 63f10669571a..f2ea1bd1c691 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S | |||
@@ -74,46 +74,6 @@ | |||
74 | * API functions | 74 | * API functions |
75 | */ | 75 | */ |
76 | 76 | ||
77 | /* | ||
78 | * The "get_*restore_pointer" functions are used to provide a | ||
79 | * physical restore address where the ROM code jumps while waking | ||
80 | * up from MPU OFF/OSWR state. | ||
81 | * The restore pointer is stored into the scratchpad. | ||
82 | */ | ||
83 | |||
84 | .text | ||
85 | /* Function call to get the restore pointer for resume from OFF */ | ||
86 | ENTRY(get_restore_pointer) | ||
87 | stmfd sp!, {lr} @ save registers on stack | ||
88 | adr r0, restore | ||
89 | ldmfd sp!, {pc} @ restore regs and return | ||
90 | ENDPROC(get_restore_pointer) | ||
91 | .align | ||
92 | ENTRY(get_restore_pointer_sz) | ||
93 | .word . - get_restore_pointer | ||
94 | |||
95 | .text | ||
96 | /* Function call to get the restore pointer for 3630 resume from OFF */ | ||
97 | ENTRY(get_omap3630_restore_pointer) | ||
98 | stmfd sp!, {lr} @ save registers on stack | ||
99 | adr r0, restore_3630 | ||
100 | ldmfd sp!, {pc} @ restore regs and return | ||
101 | ENDPROC(get_omap3630_restore_pointer) | ||
102 | .align | ||
103 | ENTRY(get_omap3630_restore_pointer_sz) | ||
104 | .word . - get_omap3630_restore_pointer | ||
105 | |||
106 | .text | ||
107 | /* Function call to get the restore pointer for ES3 to resume from OFF */ | ||
108 | ENTRY(get_es3_restore_pointer) | ||
109 | stmfd sp!, {lr} @ save registers on stack | ||
110 | adr r0, restore_es3 | ||
111 | ldmfd sp!, {pc} @ restore regs and return | ||
112 | ENDPROC(get_es3_restore_pointer) | ||
113 | .align | ||
114 | ENTRY(get_es3_restore_pointer_sz) | ||
115 | .word . - get_es3_restore_pointer | ||
116 | |||
117 | .text | 77 | .text |
118 | /* | 78 | /* |
119 | * L2 cache needs to be toggled for stable OFF mode functionality on 3630. | 79 | * L2 cache needs to be toggled for stable OFF mode functionality on 3630. |
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore) | |||
133 | /* Function to call rom code to save secure ram context */ | 93 | /* Function to call rom code to save secure ram context */ |
134 | .align 3 | 94 | .align 3 |
135 | ENTRY(save_secure_ram_context) | 95 | ENTRY(save_secure_ram_context) |
136 | stmfd sp!, {r1-r12, lr} @ save registers on stack | 96 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
137 | adr r3, api_params @ r3 points to parameters | 97 | adr r3, api_params @ r3 points to parameters |
138 | str r0, [r3,#0x4] @ r0 has sdram address | 98 | str r0, [r3,#0x4] @ r0 has sdram address |
139 | ldr r12, high_mask | 99 | ldr r12, high_mask |
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context) | |||
152 | nop | 112 | nop |
153 | nop | 113 | nop |
154 | nop | 114 | nop |
155 | ldmfd sp!, {r1-r12, pc} | 115 | ldmfd sp!, {r4 - r11, pc} |
156 | .align | 116 | .align |
157 | sram_phy_addr_mask: | 117 | sram_phy_addr_mask: |
158 | .word SRAM_BASE_P | 118 | .word SRAM_BASE_P |
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz) | |||
179 | * | 139 | * |
180 | * | 140 | * |
181 | * Notes: | 141 | * Notes: |
182 | * - this code gets copied to internal SRAM at boot and after wake-up | 142 | * - only the minimum set of functions gets copied to internal SRAM at boot |
183 | * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. | 143 | * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function |
144 | * pointers in SDRAM or SRAM are called depending on the desired low power | ||
145 | * target state. | ||
184 | * - when the OMAP wakes up it continues at different execution points | 146 | * - when the OMAP wakes up it continues at different execution points |
185 | * depending on the low power mode (non-OFF vs OFF modes), | 147 | * depending on the low power mode (non-OFF vs OFF modes), |
186 | * cf. 'Resume path for xxx mode' comments. | 148 | * cf. 'Resume path for xxx mode' comments. |
187 | */ | 149 | */ |
188 | .align 3 | 150 | .align 3 |
189 | ENTRY(omap34xx_cpu_suspend) | 151 | ENTRY(omap34xx_cpu_suspend) |
190 | stmfd sp!, {r0-r12, lr} @ save registers on stack | 152 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
191 | 153 | ||
192 | /* | 154 | /* |
193 | * r0 contains CPU context save/restore pointer in sdram | 155 | * r0 contains information about saving context: |
194 | * r1 contains information about saving context: | ||
195 | * 0 - No context lost | 156 | * 0 - No context lost |
196 | * 1 - Only L1 and logic lost | 157 | * 1 - Only L1 and logic lost |
197 | * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) | 158 | * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) |
198 | * 3 - Both L1 and L2 lost and logic lost | 159 | * 3 - Both L1 and L2 lost and logic lost |
199 | */ | 160 | */ |
200 | 161 | ||
201 | /* Directly jump to WFI is the context save is not required */ | 162 | /* |
202 | cmp r1, #0x0 | 163 | * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) |
203 | beq omap3_do_wfi | 164 | * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) |
165 | */ | ||
166 | ldr r4, omap3_do_wfi_sram_addr | ||
167 | ldr r5, [r4] | ||
168 | cmp r0, #0x0 @ If no context save required, | ||
169 | bxeq r5 @ jump to the WFI code in SRAM | ||
170 | |||
204 | 171 | ||
205 | /* Otherwise fall through to the save context code */ | 172 | /* Otherwise fall through to the save context code */ |
206 | save_context_wfi: | 173 | save_context_wfi: |
207 | mov r8, r0 @ Store SDRAM address in r8 | ||
208 | mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register | ||
209 | mov r4, #0x1 @ Number of parameters for restore call | ||
210 | stmia r8!, {r4-r5} @ Push parameters for restore call | ||
211 | mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register | ||
212 | stmia r8!, {r4-r5} @ Push parameters for restore call | ||
213 | |||
214 | /* Check what that target sleep state is from r1 */ | ||
215 | cmp r1, #0x2 @ Only L2 lost, no need to save context | ||
216 | beq clean_caches | ||
217 | |||
218 | l1_logic_lost: | ||
219 | mov r4, sp @ Store sp | ||
220 | mrs r5, spsr @ Store spsr | ||
221 | mov r6, lr @ Store lr | ||
222 | stmia r8!, {r4-r6} | ||
223 | |||
224 | mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register | ||
225 | mrc p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
226 | mrc p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
227 | mrc p15, 0, r7, c2, c0, 2 @ TTBCR | ||
228 | stmia r8!, {r4-r7} | ||
229 | |||
230 | mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
231 | mrc p15, 0, r5, c10, c2, 0 @ PRRR | ||
232 | mrc p15, 0, r6, c10, c2, 1 @ NMRR | ||
233 | stmia r8!,{r4-r6} | ||
234 | |||
235 | mrc p15, 0, r4, c13, c0, 1 @ Context ID | ||
236 | mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
237 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
238 | mrs r7, cpsr @ Store current cpsr | ||
239 | stmia r8!, {r4-r7} | ||
240 | |||
241 | mrc p15, 0, r4, c1, c0, 0 @ save control register | ||
242 | stmia r8!, {r4} | ||
243 | |||
244 | clean_caches: | ||
245 | /* | 174 | /* |
246 | * jump out to kernel flush routine | 175 | * jump out to kernel flush routine |
247 | * - reuse that code is better | 176 | * - reuse that code is better |
@@ -284,7 +213,32 @@ clean_caches: | |||
284 | THUMB( nop ) | 213 | THUMB( nop ) |
285 | .arm | 214 | .arm |
286 | 215 | ||
287 | omap3_do_wfi: | 216 | b omap3_do_wfi |
217 | |||
218 | /* | ||
219 | * Local variables | ||
220 | */ | ||
221 | omap3_do_wfi_sram_addr: | ||
222 | .word omap3_do_wfi_sram | ||
223 | kernel_flush: | ||
224 | .word v7_flush_dcache_all | ||
225 | |||
226 | /* =================================== | ||
227 | * == WFI instruction => Enter idle == | ||
228 | * =================================== | ||
229 | */ | ||
230 | |||
231 | /* | ||
232 | * Do WFI instruction | ||
233 | * Includes the resume path for non-OFF modes | ||
234 | * | ||
235 | * This code gets copied to internal SRAM and is accessible | ||
236 | * from both SDRAM and SRAM: | ||
237 | * - executed from SRAM for non-off modes (omap3_do_wfi_sram), | ||
238 | * - executed from SDRAM for OFF mode (omap3_do_wfi). | ||
239 | */ | ||
240 | .align 3 | ||
241 | ENTRY(omap3_do_wfi) | ||
288 | ldr r4, sdrc_power @ read the SDRC_POWER register | 242 | ldr r4, sdrc_power @ read the SDRC_POWER register |
289 | ldr r5, [r4] @ read the contents of SDRC_POWER | 243 | ldr r5, [r4] @ read the contents of SDRC_POWER |
290 | orr r5, r5, #0x40 @ enable self refresh on idle req | 244 | orr r5, r5, #0x40 @ enable self refresh on idle req |
@@ -316,8 +270,86 @@ omap3_do_wfi: | |||
316 | nop | 270 | nop |
317 | nop | 271 | nop |
318 | nop | 272 | nop |
319 | bl wait_sdrc_ok | ||
320 | 273 | ||
274 | /* | ||
275 | * This function implements the erratum ID i581 WA: | ||
276 | * SDRC state restore before accessing the SDRAM | ||
277 | * | ||
278 | * Only used at return from non-OFF mode. For OFF | ||
279 | * mode the ROM code configures the SDRC and | ||
280 | * the DPLL before calling the restore code directly | ||
281 | * from DDR. | ||
282 | */ | ||
283 | |||
284 | /* Make sure SDRC accesses are ok */ | ||
285 | wait_sdrc_ok: | ||
286 | |||
287 | /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ | ||
288 | ldr r4, cm_idlest_ckgen | ||
289 | wait_dpll3_lock: | ||
290 | ldr r5, [r4] | ||
291 | tst r5, #1 | ||
292 | beq wait_dpll3_lock | ||
293 | |||
294 | ldr r4, cm_idlest1_core | ||
295 | wait_sdrc_ready: | ||
296 | ldr r5, [r4] | ||
297 | tst r5, #0x2 | ||
298 | bne wait_sdrc_ready | ||
299 | /* allow DLL powerdown upon hw idle req */ | ||
300 | ldr r4, sdrc_power | ||
301 | ldr r5, [r4] | ||
302 | bic r5, r5, #0x40 | ||
303 | str r5, [r4] | ||
304 | |||
305 | /* | ||
306 | * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a | ||
307 | * base instead. | ||
308 | * Be careful not to clobber r7 when maintaing this code. | ||
309 | */ | ||
310 | |||
311 | is_dll_in_lock_mode: | ||
312 | /* Is dll in lock mode? */ | ||
313 | ldr r4, sdrc_dlla_ctrl | ||
314 | ldr r5, [r4] | ||
315 | tst r5, #0x4 | ||
316 | bne exit_nonoff_modes @ Return if locked | ||
317 | /* wait till dll locks */ | ||
318 | adr r7, kick_counter | ||
319 | wait_dll_lock_timed: | ||
320 | ldr r4, wait_dll_lock_counter | ||
321 | add r4, r4, #1 | ||
322 | str r4, [r7, #wait_dll_lock_counter - kick_counter] | ||
323 | ldr r4, sdrc_dlla_status | ||
324 | /* Wait 20uS for lock */ | ||
325 | mov r6, #8 | ||
326 | wait_dll_lock: | ||
327 | subs r6, r6, #0x1 | ||
328 | beq kick_dll | ||
329 | ldr r5, [r4] | ||
330 | and r5, r5, #0x4 | ||
331 | cmp r5, #0x4 | ||
332 | bne wait_dll_lock | ||
333 | b exit_nonoff_modes @ Return when locked | ||
334 | |||
335 | /* disable/reenable DLL if not locked */ | ||
336 | kick_dll: | ||
337 | ldr r4, sdrc_dlla_ctrl | ||
338 | ldr r5, [r4] | ||
339 | mov r6, r5 | ||
340 | bic r6, #(1<<3) @ disable dll | ||
341 | str r6, [r4] | ||
342 | dsb | ||
343 | orr r6, r6, #(1<<3) @ enable dll | ||
344 | str r6, [r4] | ||
345 | dsb | ||
346 | ldr r4, kick_counter | ||
347 | add r4, r4, #1 | ||
348 | str r4, [r7] @ kick_counter | ||
349 | b wait_dll_lock_timed | ||
350 | |||
351 | exit_nonoff_modes: | ||
352 | /* Re-enable C-bit if needed */ | ||
321 | mrc p15, 0, r0, c1, c0, 0 | 353 | mrc p15, 0, r0, c1, c0, 0 |
322 | tst r0, #(1 << 2) @ Check C bit enabled? | 354 | tst r0, #(1 << 2) @ Check C bit enabled? |
323 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared | 355 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared |
@@ -329,7 +361,32 @@ omap3_do_wfi: | |||
329 | * == Exit point from non-OFF modes == | 361 | * == Exit point from non-OFF modes == |
330 | * =================================== | 362 | * =================================== |
331 | */ | 363 | */ |
332 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | 364 | ldmfd sp!, {r4 - r11, pc} @ restore regs and return |
365 | |||
366 | /* | ||
367 | * Local variables | ||
368 | */ | ||
369 | sdrc_power: | ||
370 | .word SDRC_POWER_V | ||
371 | cm_idlest1_core: | ||
372 | .word CM_IDLEST1_CORE_V | ||
373 | cm_idlest_ckgen: | ||
374 | .word CM_IDLEST_CKGEN_V | ||
375 | sdrc_dlla_status: | ||
376 | .word SDRC_DLLA_STATUS_V | ||
377 | sdrc_dlla_ctrl: | ||
378 | .word SDRC_DLLA_CTRL_V | ||
379 | /* | ||
380 | * When exporting to userspace while the counters are in SRAM, | ||
381 | * these 2 words need to be at the end to facilitate retrival! | ||
382 | */ | ||
383 | kick_counter: | ||
384 | .word 0 | ||
385 | wait_dll_lock_counter: | ||
386 | .word 0 | ||
387 | |||
388 | ENTRY(omap3_do_wfi_sz) | ||
389 | .word . - omap3_do_wfi | ||
333 | 390 | ||
334 | 391 | ||
335 | /* | 392 | /* |
@@ -346,13 +403,17 @@ omap3_do_wfi: | |||
346 | * restore_es3: applies to 34xx >= ES3.0 | 403 | * restore_es3: applies to 34xx >= ES3.0 |
347 | * restore_3630: applies to 36xx | 404 | * restore_3630: applies to 36xx |
348 | * restore: common code for 3xxx | 405 | * restore: common code for 3xxx |
406 | * | ||
407 | * Note: when back from CORE and MPU OFF mode we are running | ||
408 | * from SDRAM, without MMU, without the caches and prediction. | ||
409 | * Also the SRAM content has been cleared. | ||
349 | */ | 410 | */ |
350 | restore_es3: | 411 | ENTRY(omap3_restore_es3) |
351 | ldr r5, pm_prepwstst_core_p | 412 | ldr r5, pm_prepwstst_core_p |
352 | ldr r4, [r5] | 413 | ldr r4, [r5] |
353 | and r4, r4, #0x3 | 414 | and r4, r4, #0x3 |
354 | cmp r4, #0x0 @ Check if previous power state of CORE is OFF | 415 | cmp r4, #0x0 @ Check if previous power state of CORE is OFF |
355 | bne restore | 416 | bne omap3_restore @ Fall through to OMAP3 common code |
356 | adr r0, es3_sdrc_fix | 417 | adr r0, es3_sdrc_fix |
357 | ldr r1, sram_base | 418 | ldr r1, sram_base |
358 | ldr r2, es3_sdrc_fix_sz | 419 | ldr r2, es3_sdrc_fix_sz |
@@ -364,35 +425,32 @@ copy_to_sram: | |||
364 | bne copy_to_sram | 425 | bne copy_to_sram |
365 | ldr r1, sram_base | 426 | ldr r1, sram_base |
366 | blx r1 | 427 | blx r1 |
367 | b restore | 428 | b omap3_restore @ Fall through to OMAP3 common code |
429 | ENDPROC(omap3_restore_es3) | ||
368 | 430 | ||
369 | restore_3630: | 431 | ENTRY(omap3_restore_3630) |
370 | ldr r1, pm_prepwstst_core_p | 432 | ldr r1, pm_prepwstst_core_p |
371 | ldr r2, [r1] | 433 | ldr r2, [r1] |
372 | and r2, r2, #0x3 | 434 | and r2, r2, #0x3 |
373 | cmp r2, #0x0 @ Check if previous power state of CORE is OFF | 435 | cmp r2, #0x0 @ Check if previous power state of CORE is OFF |
374 | bne restore | 436 | bne omap3_restore @ Fall through to OMAP3 common code |
375 | /* Disable RTA before giving control */ | 437 | /* Disable RTA before giving control */ |
376 | ldr r1, control_mem_rta | 438 | ldr r1, control_mem_rta |
377 | mov r2, #OMAP36XX_RTA_DISABLE | 439 | mov r2, #OMAP36XX_RTA_DISABLE |
378 | str r2, [r1] | 440 | str r2, [r1] |
441 | ENDPROC(omap3_restore_3630) | ||
379 | 442 | ||
380 | /* Fall through to common code for the remaining logic */ | 443 | /* Fall through to common code for the remaining logic */ |
381 | 444 | ||
382 | restore: | 445 | ENTRY(omap3_restore) |
383 | /* | 446 | /* |
384 | * Check what was the reason for mpu reset and store the reason in r9: | 447 | * Read the pwstctrl register to check the reason for mpu reset. |
385 | * 0 - No context lost | 448 | * This tells us what was lost. |
386 | * 1 - Only L1 and logic lost | ||
387 | * 2 - Only L2 lost - In this case, we wont be here | ||
388 | * 3 - Both L1 and L2 lost | ||
389 | */ | 449 | */ |
390 | ldr r1, pm_pwstctrl_mpu | 450 | ldr r1, pm_pwstctrl_mpu |
391 | ldr r2, [r1] | 451 | ldr r2, [r1] |
392 | and r2, r2, #0x3 | 452 | and r2, r2, #0x3 |
393 | cmp r2, #0x0 @ Check if target power state was OFF or RET | 453 | cmp r2, #0x0 @ Check if target power state was OFF or RET |
394 | moveq r9, #0x3 @ MPU OFF => L1 and L2 lost | ||
395 | movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation | ||
396 | bne logic_l1_restore | 454 | bne logic_l1_restore |
397 | 455 | ||
398 | ldr r0, l2dis_3630 | 456 | ldr r0, l2dis_3630 |
@@ -471,115 +529,39 @@ logic_l1_restore: | |||
471 | orr r1, r1, #2 @ re-enable L2 cache | 529 | orr r1, r1, #2 @ re-enable L2 cache |
472 | mcr p15, 0, r1, c1, c0, 1 | 530 | mcr p15, 0, r1, c1, c0, 1 |
473 | skipl2reen: | 531 | skipl2reen: |
474 | mov r1, #0 | ||
475 | /* | ||
476 | * Invalidate all instruction caches to PoU | ||
477 | * and flush branch target cache | ||
478 | */ | ||
479 | mcr p15, 0, r1, c7, c5, 0 | ||
480 | 532 | ||
481 | ldr r4, scratchpad_base | 533 | /* Now branch to the common CPU resume function */ |
482 | ldr r3, [r4,#0xBC] | 534 | b cpu_resume |
483 | adds r3, r3, #16 | 535 | ENDPROC(omap3_restore) |
484 | 536 | ||
485 | ldmia r3!, {r4-r6} | 537 | .ltorg |
486 | mov sp, r4 @ Restore sp | ||
487 | msr spsr_cxsf, r5 @ Restore spsr | ||
488 | mov lr, r6 @ Restore lr | ||
489 | |||
490 | ldmia r3!, {r4-r7} | ||
491 | mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register | ||
492 | mcr p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
493 | mcr p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
494 | mcr p15, 0, r7, c2, c0, 2 @ TTBCR | ||
495 | |||
496 | ldmia r3!,{r4-r6} | ||
497 | mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
498 | mcr p15, 0, r5, c10, c2, 0 @ PRRR | ||
499 | mcr p15, 0, r6, c10, c2, 1 @ NMRR | ||
500 | |||
501 | |||
502 | ldmia r3!,{r4-r7} | ||
503 | mcr p15, 0, r4, c13, c0, 1 @ Context ID | ||
504 | mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
505 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
506 | msr cpsr, r7 @ store cpsr | ||
507 | |||
508 | /* Enabling MMU here */ | ||
509 | mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl | ||
510 | /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ | ||
511 | and r7, #0x7 | ||
512 | cmp r7, #0x0 | ||
513 | beq usettbr0 | ||
514 | ttbr_error: | ||
515 | /* | ||
516 | * More work needs to be done to support N[0:2] value other than 0 | ||
517 | * So looping here so that the error can be detected | ||
518 | */ | ||
519 | b ttbr_error | ||
520 | usettbr0: | ||
521 | mrc p15, 0, r2, c2, c0, 0 | ||
522 | ldr r5, ttbrbit_mask | ||
523 | and r2, r5 | ||
524 | mov r4, pc | ||
525 | ldr r5, table_index_mask | ||
526 | and r4, r5 @ r4 = 31 to 20 bits of pc | ||
527 | /* Extract the value to be written to table entry */ | ||
528 | ldr r1, table_entry | ||
529 | /* r1 has the value to be written to table entry*/ | ||
530 | add r1, r1, r4 | ||
531 | /* Getting the address of table entry to modify */ | ||
532 | lsr r4, #18 | ||
533 | /* r2 has the location which needs to be modified */ | ||
534 | add r2, r4 | ||
535 | /* Storing previous entry of location being modified */ | ||
536 | ldr r5, scratchpad_base | ||
537 | ldr r4, [r2] | ||
538 | str r4, [r5, #0xC0] | ||
539 | /* Modify the table entry */ | ||
540 | str r1, [r2] | ||
541 | /* | ||
542 | * Storing address of entry being modified | ||
543 | * - will be restored after enabling MMU | ||
544 | */ | ||
545 | ldr r5, scratchpad_base | ||
546 | str r2, [r5, #0xC4] | ||
547 | |||
548 | mov r0, #0 | ||
549 | mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer | ||
550 | mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array | ||
551 | mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB | ||
552 | mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB | ||
553 | /* | ||
554 | * Restore control register. This enables the MMU. | ||
555 | * The caches and prediction are not enabled here, they | ||
556 | * will be enabled after restoring the MMU table entry. | ||
557 | */ | ||
558 | ldmia r3!, {r4} | ||
559 | /* Store previous value of control register in scratchpad */ | ||
560 | str r4, [r5, #0xC8] | ||
561 | ldr r2, cache_pred_disable_mask | ||
562 | and r4, r2 | ||
563 | mcr p15, 0, r4, c1, c0, 0 | ||
564 | dsb | ||
565 | isb | ||
566 | ldr r0, =restoremmu_on | ||
567 | bx r0 | ||
568 | 538 | ||
569 | /* | 539 | /* |
570 | * ============================== | 540 | * Local variables |
571 | * == Exit point from OFF mode == | ||
572 | * ============================== | ||
573 | */ | 541 | */ |
574 | restoremmu_on: | 542 | pm_prepwstst_core_p: |
575 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | 543 | .word PM_PREPWSTST_CORE_P |
576 | 544 | pm_pwstctrl_mpu: | |
545 | .word PM_PWSTCTRL_MPU_P | ||
546 | scratchpad_base: | ||
547 | .word SCRATCHPAD_BASE_P | ||
548 | sram_base: | ||
549 | .word SRAM_BASE_P + 0x8000 | ||
550 | control_stat: | ||
551 | .word CONTROL_STAT | ||
552 | control_mem_rta: | ||
553 | .word CONTROL_MEM_RTA_CTRL | ||
554 | l2dis_3630: | ||
555 | .word 0 | ||
577 | 556 | ||
578 | /* | 557 | /* |
579 | * Internal functions | 558 | * Internal functions |
580 | */ | 559 | */ |
581 | 560 | ||
582 | /* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ | 561 | /* |
562 | * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 | ||
563 | * Copied to and run from SRAM in order to reconfigure the SDRC parameters. | ||
564 | */ | ||
583 | .text | 565 | .text |
584 | .align 3 | 566 | .align 3 |
585 | ENTRY(es3_sdrc_fix) | 567 | ENTRY(es3_sdrc_fix) |
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix) | |||
609 | str r5, [r4] @ kick off refreshes | 591 | str r5, [r4] @ kick off refreshes |
610 | bx lr | 592 | bx lr |
611 | 593 | ||
594 | /* | ||
595 | * Local variables | ||
596 | */ | ||
612 | .align | 597 | .align |
613 | sdrc_syscfg: | 598 | sdrc_syscfg: |
614 | .word SDRC_SYSCONFIG_P | 599 | .word SDRC_SYSCONFIG_P |
@@ -627,128 +612,3 @@ sdrc_manual_1: | |||
627 | ENDPROC(es3_sdrc_fix) | 612 | ENDPROC(es3_sdrc_fix) |
628 | ENTRY(es3_sdrc_fix_sz) | 613 | ENTRY(es3_sdrc_fix_sz) |
629 | .word . - es3_sdrc_fix | 614 | .word . - es3_sdrc_fix |
630 | |||
631 | /* | ||
632 | * This function implements the erratum ID i581 WA: | ||
633 | * SDRC state restore before accessing the SDRAM | ||
634 | * | ||
635 | * Only used at return from non-OFF mode. For OFF | ||
636 | * mode the ROM code configures the SDRC and | ||
637 | * the DPLL before calling the restore code directly | ||
638 | * from DDR. | ||
639 | */ | ||
640 | |||
641 | /* Make sure SDRC accesses are ok */ | ||
642 | wait_sdrc_ok: | ||
643 | |||
644 | /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ | ||
645 | ldr r4, cm_idlest_ckgen | ||
646 | wait_dpll3_lock: | ||
647 | ldr r5, [r4] | ||
648 | tst r5, #1 | ||
649 | beq wait_dpll3_lock | ||
650 | |||
651 | ldr r4, cm_idlest1_core | ||
652 | wait_sdrc_ready: | ||
653 | ldr r5, [r4] | ||
654 | tst r5, #0x2 | ||
655 | bne wait_sdrc_ready | ||
656 | /* allow DLL powerdown upon hw idle req */ | ||
657 | ldr r4, sdrc_power | ||
658 | ldr r5, [r4] | ||
659 | bic r5, r5, #0x40 | ||
660 | str r5, [r4] | ||
661 | |||
662 | /* | ||
663 | * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a | ||
664 | * base instead. | ||
665 | * Be careful not to clobber r7 when maintaing this code. | ||
666 | */ | ||
667 | |||
668 | is_dll_in_lock_mode: | ||
669 | /* Is dll in lock mode? */ | ||
670 | ldr r4, sdrc_dlla_ctrl | ||
671 | ldr r5, [r4] | ||
672 | tst r5, #0x4 | ||
673 | bxne lr @ Return if locked | ||
674 | /* wait till dll locks */ | ||
675 | adr r7, kick_counter | ||
676 | wait_dll_lock_timed: | ||
677 | ldr r4, wait_dll_lock_counter | ||
678 | add r4, r4, #1 | ||
679 | str r4, [r7, #wait_dll_lock_counter - kick_counter] | ||
680 | ldr r4, sdrc_dlla_status | ||
681 | /* Wait 20uS for lock */ | ||
682 | mov r6, #8 | ||
683 | wait_dll_lock: | ||
684 | subs r6, r6, #0x1 | ||
685 | beq kick_dll | ||
686 | ldr r5, [r4] | ||
687 | and r5, r5, #0x4 | ||
688 | cmp r5, #0x4 | ||
689 | bne wait_dll_lock | ||
690 | bx lr @ Return when locked | ||
691 | |||
692 | /* disable/reenable DLL if not locked */ | ||
693 | kick_dll: | ||
694 | ldr r4, sdrc_dlla_ctrl | ||
695 | ldr r5, [r4] | ||
696 | mov r6, r5 | ||
697 | bic r6, #(1<<3) @ disable dll | ||
698 | str r6, [r4] | ||
699 | dsb | ||
700 | orr r6, r6, #(1<<3) @ enable dll | ||
701 | str r6, [r4] | ||
702 | dsb | ||
703 | ldr r4, kick_counter | ||
704 | add r4, r4, #1 | ||
705 | str r4, [r7] @ kick_counter | ||
706 | b wait_dll_lock_timed | ||
707 | |||
708 | .align | ||
709 | cm_idlest1_core: | ||
710 | .word CM_IDLEST1_CORE_V | ||
711 | cm_idlest_ckgen: | ||
712 | .word CM_IDLEST_CKGEN_V | ||
713 | sdrc_dlla_status: | ||
714 | .word SDRC_DLLA_STATUS_V | ||
715 | sdrc_dlla_ctrl: | ||
716 | .word SDRC_DLLA_CTRL_V | ||
717 | pm_prepwstst_core_p: | ||
718 | .word PM_PREPWSTST_CORE_P | ||
719 | pm_pwstctrl_mpu: | ||
720 | .word PM_PWSTCTRL_MPU_P | ||
721 | scratchpad_base: | ||
722 | .word SCRATCHPAD_BASE_P | ||
723 | sram_base: | ||
724 | .word SRAM_BASE_P + 0x8000 | ||
725 | sdrc_power: | ||
726 | .word SDRC_POWER_V | ||
727 | ttbrbit_mask: | ||
728 | .word 0xFFFFC000 | ||
729 | table_index_mask: | ||
730 | .word 0xFFF00000 | ||
731 | table_entry: | ||
732 | .word 0x00000C02 | ||
733 | cache_pred_disable_mask: | ||
734 | .word 0xFFFFE7FB | ||
735 | control_stat: | ||
736 | .word CONTROL_STAT | ||
737 | control_mem_rta: | ||
738 | .word CONTROL_MEM_RTA_CTRL | ||
739 | kernel_flush: | ||
740 | .word v7_flush_dcache_all | ||
741 | l2dis_3630: | ||
742 | .word 0 | ||
743 | /* | ||
744 | * When exporting to userspace while the counters are in SRAM, | ||
745 | * these 2 words need to be at the end to facilitate retrival! | ||
746 | */ | ||
747 | kick_counter: | ||
748 | .word 0 | ||
749 | wait_dll_lock_counter: | ||
750 | .word 0 | ||
751 | ENDPROC(omap34xx_cpu_suspend) | ||
752 | |||
753 | ENTRY(omap34xx_cpu_suspend_sz) | ||
754 | .word . - omap34xx_cpu_suspend | ||
diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S index 8003037578ed..db7eeebf30d7 100644 --- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S +++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S | |||
@@ -120,8 +120,3 @@ | |||
120 | 1003: | 120 | 1003: |
121 | .endm | 121 | .endm |
122 | 122 | ||
123 | |||
124 | .macro irq_prio_table | ||
125 | .endm | ||
126 | |||
127 | |||
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c index a10996782476..bc55d07566ca 100644 --- a/arch/arm/mach-pxa/cm-x2xx.c +++ b/arch/arm/mach-pxa/cm-x2xx.c | |||
@@ -518,4 +518,7 @@ MACHINE_START(ARMCORE, "Compulab CM-X2XX") | |||
518 | .init_irq = cmx2xx_init_irq, | 518 | .init_irq = cmx2xx_init_irq, |
519 | .timer = &pxa_timer, | 519 | .timer = &pxa_timer, |
520 | .init_machine = cmx2xx_init, | 520 | .init_machine = cmx2xx_init, |
521 | #ifdef CONFIG_PCI | ||
522 | .dma_zone_size = SZ_64M, | ||
523 | #endif | ||
521 | MACHINE_END | 524 | MACHINE_END |
diff --git a/arch/arm/mach-pxa/include/mach/memory.h b/arch/arm/mach-pxa/include/mach/memory.h index 07734f37f8fd..d05a59727d66 100644 --- a/arch/arm/mach-pxa/include/mach/memory.h +++ b/arch/arm/mach-pxa/include/mach/memory.h | |||
@@ -17,8 +17,4 @@ | |||
17 | */ | 17 | */ |
18 | #define PLAT_PHYS_OFFSET UL(0xa0000000) | 18 | #define PLAT_PHYS_OFFSET UL(0xa0000000) |
19 | 19 | ||
20 | #if defined(CONFIG_MACH_ARMCORE) && defined(CONFIG_PCI) | ||
21 | #define ARM_DMA_ZONE_SIZE SZ_64M | ||
22 | #endif | ||
23 | |||
24 | #endif | 20 | #endif |
diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h index f15afe012995..51558bcee999 100644 --- a/arch/arm/mach-pxa/include/mach/pm.h +++ b/arch/arm/mach-pxa/include/mach/pm.h | |||
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns { | |||
22 | extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; | 22 | extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; |
23 | 23 | ||
24 | /* sleep.S */ | 24 | /* sleep.S */ |
25 | extern void pxa25x_cpu_suspend(unsigned int, long); | 25 | extern int pxa25x_finish_suspend(unsigned long); |
26 | extern void pxa27x_cpu_suspend(unsigned int, long); | 26 | extern int pxa27x_finish_suspend(unsigned long); |
27 | 27 | ||
28 | extern int pxa_pm_enter(suspend_state_t state); | 28 | extern int pxa_pm_enter(suspend_state_t state); |
29 | extern int pxa_pm_prepare(void); | 29 | extern int pxa_pm_prepare(void); |
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 65f24f0b77e8..5a5329bc33f1 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/i2c-gpio.h> | 33 | #include <linux/i2c-gpio.h> |
34 | 34 | ||
35 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
36 | #include <asm/suspend.h> | ||
36 | #include <asm/mach/arch.h> | 37 | #include <asm/mach/arch.h> |
37 | #include <asm/mach/map.h> | 38 | #include <asm/mach/map.h> |
38 | 39 | ||
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 51e1583265b2..37178a8559b1 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c | |||
@@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state) | |||
42 | 42 | ||
43 | /* *** go zzz *** */ | 43 | /* *** go zzz *** */ |
44 | pxa_cpu_pm_fns->enter(state); | 44 | pxa_cpu_pm_fns->enter(state); |
45 | cpu_init(); | ||
46 | 45 | ||
47 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { | 46 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { |
48 | /* after sleeping, validate the checksum */ | 47 | /* after sleeping, validate the checksum */ |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fed363cec9c6..9c434d21a271 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | 26 | ||
27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
28 | #include <asm/suspend.h> | ||
28 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
29 | #include <mach/irqs.h> | 30 | #include <mach/irqs.h> |
30 | #include <mach/gpio.h> | 31 | #include <mach/gpio.h> |
@@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) | |||
244 | 245 | ||
245 | switch (state) { | 246 | switch (state) { |
246 | case PM_SUSPEND_MEM: | 247 | case PM_SUSPEND_MEM: |
247 | pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 248 | cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); |
248 | break; | 249 | break; |
249 | } | 250 | } |
250 | } | 251 | } |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2fecbec58d88..9d2400b5f503 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/mach/map.h> | 24 | #include <asm/mach/map.h> |
25 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
26 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
27 | #include <asm/suspend.h> | ||
27 | #include <mach/irqs.h> | 28 | #include <mach/irqs.h> |
28 | #include <mach/gpio.h> | 29 | #include <mach/gpio.h> |
29 | #include <mach/pxa27x.h> | 30 | #include <mach/pxa27x.h> |
@@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save) | |||
284 | void pxa27x_cpu_pm_enter(suspend_state_t state) | 285 | void pxa27x_cpu_pm_enter(suspend_state_t state) |
285 | { | 286 | { |
286 | extern void pxa_cpu_standby(void); | 287 | extern void pxa_cpu_standby(void); |
288 | #ifndef CONFIG_IWMMXT | ||
289 | u64 acc0; | ||
290 | |||
291 | asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); | ||
292 | #endif | ||
287 | 293 | ||
288 | /* ensure voltage-change sequencer not initiated, which hangs */ | 294 | /* ensure voltage-change sequencer not initiated, which hangs */ |
289 | PCFR &= ~PCFR_FVC; | 295 | PCFR &= ~PCFR_FVC; |
@@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) | |||
299 | pxa_cpu_standby(); | 305 | pxa_cpu_standby(); |
300 | break; | 306 | break; |
301 | case PM_SUSPEND_MEM: | 307 | case PM_SUSPEND_MEM: |
302 | pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 308 | cpu_suspend(pwrmode, pxa27x_finish_suspend); |
309 | #ifndef CONFIG_IWMMXT | ||
310 | asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); | ||
311 | #endif | ||
303 | break; | 312 | break; |
304 | } | 313 | } |
305 | } | 314 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8521d7d6f1da..ef1c56a67afc 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | 25 | ||
26 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
27 | #include <asm/suspend.h> | ||
27 | #include <mach/hardware.h> | 28 | #include <mach/hardware.h> |
28 | #include <mach/gpio.h> | 29 | #include <mach/gpio.h> |
29 | #include <mach/pxa3xx-regs.h> | 30 | #include <mach/pxa3xx-regs.h> |
@@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void) | |||
141 | { | 142 | { |
142 | volatile unsigned long *p = (volatile void *)0xc0000000; | 143 | volatile unsigned long *p = (volatile void *)0xc0000000; |
143 | unsigned long saved_data = *p; | 144 | unsigned long saved_data = *p; |
145 | #ifndef CONFIG_IWMMXT | ||
146 | u64 acc0; | ||
144 | 147 | ||
145 | extern void pxa3xx_cpu_suspend(long); | 148 | asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); |
149 | #endif | ||
150 | |||
151 | extern int pxa3xx_finish_suspend(unsigned long); | ||
146 | 152 | ||
147 | /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ | 153 | /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ |
148 | CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); | 154 | CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); |
@@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void) | |||
162 | /* overwrite with the resume address */ | 168 | /* overwrite with the resume address */ |
163 | *p = virt_to_phys(cpu_resume); | 169 | *p = virt_to_phys(cpu_resume); |
164 | 170 | ||
165 | pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); | 171 | cpu_suspend(0, pxa3xx_finish_suspend); |
166 | 172 | ||
167 | *p = saved_data; | 173 | *p = saved_data; |
168 | 174 | ||
169 | AD3ER = 0; | 175 | AD3ER = 0; |
176 | |||
177 | #ifndef CONFIG_IWMMXT | ||
178 | asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); | ||
179 | #endif | ||
170 | } | 180 | } |
171 | 181 | ||
172 | static void pxa3xx_cpu_pm_enter(suspend_state_t state) | 182 | static void pxa3xx_cpu_pm_enter(suspend_state_t state) |
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 6f5368899d84..1e544be9905d 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S | |||
@@ -24,20 +24,9 @@ | |||
24 | 24 | ||
25 | #ifdef CONFIG_PXA3xx | 25 | #ifdef CONFIG_PXA3xx |
26 | /* | 26 | /* |
27 | * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4) | 27 | * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4) |
28 | * | ||
29 | * r0 = v:p offset | ||
30 | */ | 28 | */ |
31 | ENTRY(pxa3xx_cpu_suspend) | 29 | ENTRY(pxa3xx_finish_suspend) |
32 | |||
33 | #ifndef CONFIG_IWMMXT | ||
34 | mra r2, r3, acc0 | ||
35 | #endif | ||
36 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
37 | mov r1, r0 | ||
38 | ldr r3, =pxa_cpu_resume @ resume function | ||
39 | bl cpu_suspend | ||
40 | |||
41 | mov r0, #0x06 @ S2D3C4 mode | 30 | mov r0, #0x06 @ S2D3C4 mode |
42 | mcr p14, 0, r0, c7, c0, 0 @ enter sleep | 31 | mcr p14, 0, r0, c7, c0, 0 @ enter sleep |
43 | 32 | ||
@@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend) | |||
46 | 35 | ||
47 | #ifdef CONFIG_PXA27x | 36 | #ifdef CONFIG_PXA27x |
48 | /* | 37 | /* |
49 | * pxa27x_cpu_suspend() | 38 | * pxa27x_finish_suspend() |
50 | * | 39 | * |
51 | * Forces CPU into sleep state. | 40 | * Forces CPU into sleep state. |
52 | * | 41 | * |
53 | * r0 = value for PWRMODE M field for desired sleep state | 42 | * r0 = value for PWRMODE M field for desired sleep state |
54 | * r1 = v:p offset | ||
55 | */ | 43 | */ |
56 | ENTRY(pxa27x_cpu_suspend) | 44 | ENTRY(pxa27x_finish_suspend) |
57 | |||
58 | #ifndef CONFIG_IWMMXT | ||
59 | mra r2, r3, acc0 | ||
60 | #endif | ||
61 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
62 | mov r4, r0 @ save sleep mode | ||
63 | ldr r3, =pxa_cpu_resume @ resume function | ||
64 | bl cpu_suspend | ||
65 | |||
66 | @ Put the processor to sleep | 45 | @ Put the processor to sleep |
67 | @ (also workaround for sighting 28071) | 46 | @ (also workaround for sighting 28071) |
68 | 47 | ||
69 | @ prepare value for sleep mode | 48 | @ prepare value for sleep mode |
70 | mov r1, r4 @ sleep mode | 49 | mov r1, r0 @ sleep mode |
71 | 50 | ||
72 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) | 51 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) |
73 | mov r2, #UNCACHED_PHYS_0 | 52 | mov r2, #UNCACHED_PHYS_0 |
@@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend) | |||
99 | 78 | ||
100 | #ifdef CONFIG_PXA25x | 79 | #ifdef CONFIG_PXA25x |
101 | /* | 80 | /* |
102 | * pxa25x_cpu_suspend() | 81 | * pxa25x_finish_suspend() |
103 | * | 82 | * |
104 | * Forces CPU into sleep state. | 83 | * Forces CPU into sleep state. |
105 | * | 84 | * |
106 | * r0 = value for PWRMODE M field for desired sleep state | 85 | * r0 = value for PWRMODE M field for desired sleep state |
107 | * r1 = v:p offset | ||
108 | */ | 86 | */ |
109 | 87 | ||
110 | ENTRY(pxa25x_cpu_suspend) | 88 | ENTRY(pxa25x_finish_suspend) |
111 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
112 | mov r4, r0 @ save sleep mode | ||
113 | ldr r3, =pxa_cpu_resume @ resume function | ||
114 | bl cpu_suspend | ||
115 | @ prepare value for sleep mode | 89 | @ prepare value for sleep mode |
116 | mov r1, r4 @ sleep mode | 90 | mov r1, r0 @ sleep mode |
117 | 91 | ||
118 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) | 92 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) |
119 | mov r2, #UNCACHED_PHYS_0 | 93 | mov r2, #UNCACHED_PHYS_0 |
@@ -195,16 +169,3 @@ pxa_cpu_do_suspend: | |||
195 | mcr p14, 0, r1, c7, c0, 0 @ PWRMODE | 169 | mcr p14, 0, r1, c7, c0, 0 @ PWRMODE |
196 | 170 | ||
197 | 20: b 20b @ loop waiting for sleep | 171 | 20: b 20b @ loop waiting for sleep |
198 | |||
199 | /* | ||
200 | * pxa_cpu_resume() | ||
201 | * | ||
202 | * entry point from bootloader into kernel during resume | ||
203 | */ | ||
204 | .align 5 | ||
205 | pxa_cpu_resume: | ||
206 | ldmfd sp!, {r2, r3} | ||
207 | #ifndef CONFIG_IWMMXT | ||
208 | mar acc0, r2, r3 | ||
209 | #endif | ||
210 | ldmfd sp!, {r4 - r12, pc} @ return to caller | ||
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 00363c7ac182..9b99cc164de5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/can/platform/mcp251x.h> | 31 | #include <linux/can/platform/mcp251x.h> |
32 | 32 | ||
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/suspend.h> | ||
34 | #include <asm/mach/arch.h> | 35 | #include <asm/mach/arch.h> |
35 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
36 | 37 | ||
@@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = { | |||
676 | static void zeus_power_off(void) | 677 | static void zeus_power_off(void) |
677 | { | 678 | { |
678 | local_irq_disable(); | 679 | local_irq_disable(); |
679 | pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 680 | cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend); |
680 | } | 681 | } |
681 | #else | 682 | #else |
682 | #define zeus_power_off NULL | 683 | #define zeus_power_off NULL |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index b9a9805e4828..dba6d0c1fc17 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176 | |||
50 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" | 50 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" |
51 | select CPU_V6 | 51 | select CPU_V6 |
52 | select ARM_GIC | 52 | select ARM_GIC |
53 | select HAVE_TCM | ||
53 | help | 54 | help |
54 | Include support for the ARM(R) RealView(R) Platform Baseboard for | 55 | Include support for the ARM(R) RealView(R) Platform Baseboard for |
55 | ARM1176JZF-S. | 56 | ARM1176JZF-S. |
diff --git a/arch/arm/mach-realview/include/mach/memory.h b/arch/arm/mach-realview/include/mach/memory.h index 1759fa673eea..2022e092f0ca 100644 --- a/arch/arm/mach-realview/include/mach/memory.h +++ b/arch/arm/mach-realview/include/mach/memory.h | |||
@@ -29,10 +29,6 @@ | |||
29 | #define PLAT_PHYS_OFFSET UL(0x00000000) | 29 | #define PLAT_PHYS_OFFSET UL(0x00000000) |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | #ifdef CONFIG_ZONE_DMA | ||
33 | #define ARM_DMA_ZONE_SIZE SZ_256M | ||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_SPARSEMEM | 32 | #ifdef CONFIG_SPARSEMEM |
37 | 33 | ||
38 | /* | 34 | /* |
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index 963bf0d8119a..4ae943bafa92 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c | |||
@@ -68,14 +68,6 @@ void __init smp_init_cpus(void) | |||
68 | 68 | ||
69 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 69 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
70 | { | 70 | { |
71 | int i; | ||
72 | |||
73 | /* | ||
74 | * Initialise the present map, which describes the set of CPUs | ||
75 | * actually populated at the present time. | ||
76 | */ | ||
77 | for (i = 0; i < max_cpus; i++) | ||
78 | set_cpu_present(i, true); | ||
79 | 71 | ||
80 | scu_enable(scu_base_addr()); | 72 | scu_enable(scu_base_addr()); |
81 | 73 | ||
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 10e75faba4c9..7a4e3b18cb3e 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -470,4 +470,7 @@ MACHINE_START(REALVIEW_EB, "ARM-RealView EB") | |||
470 | .init_irq = gic_init_irq, | 470 | .init_irq = gic_init_irq, |
471 | .timer = &realview_eb_timer, | 471 | .timer = &realview_eb_timer, |
472 | .init_machine = realview_eb_init, | 472 | .init_machine = realview_eb_init, |
473 | #ifdef CONFIG_ZONE_DMA | ||
474 | .dma_zone_size = SZ_256M, | ||
475 | #endif | ||
473 | MACHINE_END | 476 | MACHINE_END |
diff --git a/arch/arm/mach-realview/realview_pb1176.c b/arch/arm/mach-realview/realview_pb1176.c index eab6070f66d0..ad5671acb66a 100644 --- a/arch/arm/mach-realview/realview_pb1176.c +++ b/arch/arm/mach-realview/realview_pb1176.c | |||
@@ -365,4 +365,7 @@ MACHINE_START(REALVIEW_PB1176, "ARM-RealView PB1176") | |||
365 | .init_irq = gic_init_irq, | 365 | .init_irq = gic_init_irq, |
366 | .timer = &realview_pb1176_timer, | 366 | .timer = &realview_pb1176_timer, |
367 | .init_machine = realview_pb1176_init, | 367 | .init_machine = realview_pb1176_init, |
368 | #ifdef CONFIG_ZONE_DMA | ||
369 | .dma_zone_size = SZ_256M, | ||
370 | #endif | ||
368 | MACHINE_END | 371 | MACHINE_END |
diff --git a/arch/arm/mach-realview/realview_pb11mp.c b/arch/arm/mach-realview/realview_pb11mp.c index b2985fc7cd4e..b43644b3685e 100644 --- a/arch/arm/mach-realview/realview_pb11mp.c +++ b/arch/arm/mach-realview/realview_pb11mp.c | |||
@@ -367,4 +367,7 @@ MACHINE_START(REALVIEW_PB11MP, "ARM-RealView PB11MPCore") | |||
367 | .init_irq = gic_init_irq, | 367 | .init_irq = gic_init_irq, |
368 | .timer = &realview_pb11mp_timer, | 368 | .timer = &realview_pb11mp_timer, |
369 | .init_machine = realview_pb11mp_init, | 369 | .init_machine = realview_pb11mp_init, |
370 | #ifdef CONFIG_ZONE_DMA | ||
371 | .dma_zone_size = SZ_256M, | ||
372 | #endif | ||
370 | MACHINE_END | 373 | MACHINE_END |
diff --git a/arch/arm/mach-realview/realview_pba8.c b/arch/arm/mach-realview/realview_pba8.c index fb6866558760..763e8f38c15d 100644 --- a/arch/arm/mach-realview/realview_pba8.c +++ b/arch/arm/mach-realview/realview_pba8.c | |||
@@ -317,4 +317,7 @@ MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") | |||
317 | .init_irq = gic_init_irq, | 317 | .init_irq = gic_init_irq, |
318 | .timer = &realview_pba8_timer, | 318 | .timer = &realview_pba8_timer, |
319 | .init_machine = realview_pba8_init, | 319 | .init_machine = realview_pba8_init, |
320 | #ifdef CONFIG_ZONE_DMA | ||
321 | .dma_zone_size = SZ_256M, | ||
322 | #endif | ||
320 | MACHINE_END | 323 | MACHINE_END |
diff --git a/arch/arm/mach-realview/realview_pbx.c b/arch/arm/mach-realview/realview_pbx.c index 92ace2cf2b2c..363b0ab56150 100644 --- a/arch/arm/mach-realview/realview_pbx.c +++ b/arch/arm/mach-realview/realview_pbx.c | |||
@@ -400,4 +400,7 @@ MACHINE_START(REALVIEW_PBX, "ARM-RealView PBX") | |||
400 | .init_irq = gic_init_irq, | 400 | .init_irq = gic_init_irq, |
401 | .timer = &realview_pbx_timer, | 401 | .timer = &realview_pbx_timer, |
402 | .init_machine = realview_pbx_init, | 402 | .init_machine = realview_pbx_init, |
403 | #ifdef CONFIG_ZONE_DMA | ||
404 | .dma_zone_size = SZ_256M, | ||
405 | #endif | ||
403 | MACHINE_END | 406 | MACHINE_END |
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index 752b13a7b3db..f4077efa51fa 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c | |||
@@ -37,12 +37,10 @@ | |||
37 | 37 | ||
38 | extern void s3c2412_sleep_enter(void); | 38 | extern void s3c2412_sleep_enter(void); |
39 | 39 | ||
40 | static void s3c2412_cpu_suspend(void) | 40 | static int s3c2412_cpu_suspend(unsigned long arg) |
41 | { | 41 | { |
42 | unsigned long tmp; | 42 | unsigned long tmp; |
43 | 43 | ||
44 | flush_cache_all(); | ||
45 | |||
46 | /* set our standby method to sleep */ | 44 | /* set our standby method to sleep */ |
47 | 45 | ||
48 | tmp = __raw_readl(S3C2412_PWRCFG); | 46 | tmp = __raw_readl(S3C2412_PWRCFG); |
@@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void) | |||
50 | __raw_writel(tmp, S3C2412_PWRCFG); | 48 | __raw_writel(tmp, S3C2412_PWRCFG); |
51 | 49 | ||
52 | s3c2412_sleep_enter(); | 50 | s3c2412_sleep_enter(); |
51 | |||
52 | panic("sleep resumed to originator?"); | ||
53 | } | 53 | } |
54 | 54 | ||
55 | static void s3c2412_pm_prepare(void) | 55 | static void s3c2412_pm_prepare(void) |
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 41db2b21e213..9ec54f1d8e75 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c | |||
@@ -24,10 +24,8 @@ | |||
24 | 24 | ||
25 | extern void s3c2412_sleep_enter(void); | 25 | extern void s3c2412_sleep_enter(void); |
26 | 26 | ||
27 | static void s3c2416_cpu_suspend(void) | 27 | static int s3c2416_cpu_suspend(unsigned long arg) |
28 | { | 28 | { |
29 | flush_cache_all(); | ||
30 | |||
31 | /* enable wakeup sources regardless of battery state */ | 29 | /* enable wakeup sources regardless of battery state */ |
32 | __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); | 30 | __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); |
33 | 31 | ||
@@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void) | |||
35 | __raw_writel(0x2BED, S3C2443_PWRMODE); | 33 | __raw_writel(0x2BED, S3C2443_PWRMODE); |
36 | 34 | ||
37 | s3c2412_sleep_enter(); | 35 | s3c2412_sleep_enter(); |
36 | |||
37 | panic("sleep resumed to originator?"); | ||
38 | } | 38 | } |
39 | 39 | ||
40 | static void s3c2416_pm_prepare(void) | 40 | static void s3c2416_pm_prepare(void) |
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index bc1c470b7de6..8bad64370689 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c | |||
@@ -112,7 +112,7 @@ void s3c_pm_save_core(void) | |||
112 | * this. | 112 | * this. |
113 | */ | 113 | */ |
114 | 114 | ||
115 | static void s3c64xx_cpu_suspend(void) | 115 | static int s3c64xx_cpu_suspend(unsigned long arg) |
116 | { | 116 | { |
117 | unsigned long tmp; | 117 | unsigned long tmp; |
118 | 118 | ||
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index 1f87732b2320..34313f9c8792 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S | |||
@@ -25,29 +25,6 @@ | |||
25 | 25 | ||
26 | .text | 26 | .text |
27 | 27 | ||
28 | /* s3c_cpu_save | ||
29 | * | ||
30 | * Save enough processor state to allow the restart of the pm.c | ||
31 | * code after resume. | ||
32 | * | ||
33 | * entry: | ||
34 | * r1 = v:p offset | ||
35 | */ | ||
36 | |||
37 | ENTRY(s3c_cpu_save) | ||
38 | stmfd sp!, { r4 - r12, lr } | ||
39 | ldr r3, =resume_with_mmu | ||
40 | bl cpu_suspend | ||
41 | |||
42 | @@ call final suspend code | ||
43 | ldr r0, =pm_cpu_sleep | ||
44 | ldr pc, [r0] | ||
45 | |||
46 | @@ return to the caller, after the MMU is turned on. | ||
47 | @@ restore the last bits of the stack and return. | ||
48 | resume_with_mmu: | ||
49 | ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save | ||
50 | |||
51 | /* Sleep magic, the word before the resume entry point so that the | 28 | /* Sleep magic, the word before the resume entry point so that the |
52 | * bootloader can check for a resumeable image. */ | 29 | * bootloader can check for a resumeable image. */ |
53 | 30 | ||
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 24febae3d4c0..309e388a8a83 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c | |||
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = { | |||
88 | SAVE_ITEM(S3C2410_TCNTO(0)), | 88 | SAVE_ITEM(S3C2410_TCNTO(0)), |
89 | }; | 89 | }; |
90 | 90 | ||
91 | void s5pv210_cpu_suspend(void) | 91 | void s5pv210_cpu_suspend(unsigned long arg) |
92 | { | 92 | { |
93 | unsigned long tmp; | 93 | unsigned long tmp; |
94 | 94 | ||
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index a3d649466fb1..e3452ccd4b08 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S | |||
@@ -32,27 +32,6 @@ | |||
32 | 32 | ||
33 | .text | 33 | .text |
34 | 34 | ||
35 | /* s3c_cpu_save | ||
36 | * | ||
37 | * entry: | ||
38 | * r1 = v:p offset | ||
39 | */ | ||
40 | |||
41 | ENTRY(s3c_cpu_save) | ||
42 | |||
43 | stmfd sp!, { r3 - r12, lr } | ||
44 | ldr r3, =resume_with_mmu | ||
45 | bl cpu_suspend | ||
46 | |||
47 | ldr r0, =pm_cpu_sleep | ||
48 | ldr r0, [ r0 ] | ||
49 | mov pc, r0 | ||
50 | |||
51 | resume_with_mmu: | ||
52 | ldmfd sp!, { r3 - r12, pc } | ||
53 | |||
54 | .ltorg | ||
55 | |||
56 | /* sleep magic, to allow the bootloader to check for an valid | 35 | /* sleep magic, to allow the bootloader to check for an valid |
57 | * image to resume to. Must be the first word before the | 36 | * image to resume to. Must be the first word before the |
58 | * s3c_cpu_resume entry. | 37 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 5778274a8260..26257df19b63 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c | |||
@@ -453,4 +453,7 @@ MACHINE_START(ASSABET, "Intel-Assabet") | |||
453 | .init_irq = sa1100_init_irq, | 453 | .init_irq = sa1100_init_irq, |
454 | .timer = &sa1100_timer, | 454 | .timer = &sa1100_timer, |
455 | .init_machine = assabet_init, | 455 | .init_machine = assabet_init, |
456 | #ifdef CONFIG_SA1111 | ||
457 | .dma_zone_size = SZ_1M, | ||
458 | #endif | ||
456 | MACHINE_END | 459 | MACHINE_END |
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c index 4f19ff868b00..b4311b0a4395 100644 --- a/arch/arm/mach-sa1100/badge4.c +++ b/arch/arm/mach-sa1100/badge4.c | |||
@@ -306,4 +306,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4") | |||
306 | .map_io = badge4_map_io, | 306 | .map_io = badge4_map_io, |
307 | .init_irq = sa1100_init_irq, | 307 | .init_irq = sa1100_init_irq, |
308 | .timer = &sa1100_timer, | 308 | .timer = &sa1100_timer, |
309 | #ifdef CONFIG_SA1111 | ||
310 | .dma_zone_size = SZ_1M, | ||
311 | #endif | ||
309 | MACHINE_END | 312 | MACHINE_END |
diff --git a/arch/arm/mach-sa1100/include/mach/memory.h b/arch/arm/mach-sa1100/include/mach/memory.h index cff31ee246b7..12d376795abc 100644 --- a/arch/arm/mach-sa1100/include/mach/memory.h +++ b/arch/arm/mach-sa1100/include/mach/memory.h | |||
@@ -14,10 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | #define PLAT_PHYS_OFFSET UL(0xc0000000) | 15 | #define PLAT_PHYS_OFFSET UL(0xc0000000) |
16 | 16 | ||
17 | #ifdef CONFIG_SA1111 | ||
18 | #define ARM_DMA_ZONE_SIZE SZ_1M | ||
19 | #endif | ||
20 | |||
21 | /* | 17 | /* |
22 | * Because of the wide memory address space between physical RAM banks on the | 18 | * Because of the wide memory address space between physical RAM banks on the |
23 | * SA1100, it's much convenient to use Linux's SparseMEM support to implement | 19 | * SA1100, it's much convenient to use Linux's SparseMEM support to implement |
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c index 491ac9f20fb4..176c066aec7e 100644 --- a/arch/arm/mach-sa1100/jornada720.c +++ b/arch/arm/mach-sa1100/jornada720.c | |||
@@ -369,4 +369,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720") | |||
369 | .init_irq = sa1100_init_irq, | 369 | .init_irq = sa1100_init_irq, |
370 | .timer = &sa1100_timer, | 370 | .timer = &sa1100_timer, |
371 | .init_machine = jornada720_mach_init, | 371 | .init_machine = jornada720_mach_init, |
372 | #ifdef CONFIG_SA1111 | ||
373 | .dma_zone_size = SZ_1M, | ||
374 | #endif | ||
372 | MACHINE_END | 375 | MACHINE_END |
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c4661aab22fb..bf85b8b259d5 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c | |||
@@ -29,10 +29,11 @@ | |||
29 | 29 | ||
30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
31 | #include <asm/memory.h> | 31 | #include <asm/memory.h> |
32 | #include <asm/suspend.h> | ||
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
33 | #include <asm/mach/time.h> | 34 | #include <asm/mach/time.h> |
34 | 35 | ||
35 | extern void sa1100_cpu_suspend(long); | 36 | extern int sa1100_finish_suspend(unsigned long); |
36 | 37 | ||
37 | #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x | 38 | #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x |
38 | #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] | 39 | #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] |
@@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state) | |||
75 | PSPR = virt_to_phys(cpu_resume); | 76 | PSPR = virt_to_phys(cpu_resume); |
76 | 77 | ||
77 | /* go zzz */ | 78 | /* go zzz */ |
78 | sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); | 79 | cpu_suspend(0, sa1100_finish_suspend); |
79 | |||
80 | cpu_init(); | ||
81 | 80 | ||
82 | /* | 81 | /* |
83 | * Ensure not to come back here if it wasn't intended | 82 | * Ensure not to come back here if it wasn't intended |
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 04f2a618d4ef..e8223315b442 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S | |||
@@ -22,18 +22,13 @@ | |||
22 | 22 | ||
23 | .text | 23 | .text |
24 | /* | 24 | /* |
25 | * sa1100_cpu_suspend() | 25 | * sa1100_finish_suspend() |
26 | * | 26 | * |
27 | * Causes sa11x0 to enter sleep state | 27 | * Causes sa11x0 to enter sleep state |
28 | * | 28 | * |
29 | */ | 29 | */ |
30 | 30 | ||
31 | ENTRY(sa1100_cpu_suspend) | 31 | ENTRY(sa1100_finish_suspend) |
32 | stmfd sp!, {r4 - r12, lr} @ save registers on stack | ||
33 | mov r1, r0 | ||
34 | ldr r3, =sa1100_cpu_resume @ return function | ||
35 | bl cpu_suspend | ||
36 | |||
37 | @ disable clock switching | 32 | @ disable clock switching |
38 | mcr p15, 0, r1, c15, c2, 2 | 33 | mcr p15, 0, r1, c15, c2, 2 |
39 | 34 | ||
@@ -139,13 +134,3 @@ sa1110_sdram_controller_fix: | |||
139 | str r13, [r12] | 134 | str r13, [r12] |
140 | 135 | ||
141 | 20: b 20b @ loop waiting for sleep | 136 | 20: b 20b @ loop waiting for sleep |
142 | |||
143 | /* | ||
144 | * cpu_sa1100_resume() | ||
145 | * | ||
146 | * entry point from bootloader into kernel during resume | ||
147 | */ | ||
148 | .align 5 | ||
149 | sa1100_cpu_resume: | ||
150 | mcr p15, 0, r1, c15, c1, 2 @ enable clock switching | ||
151 | ldmfd sp!, {r4 - r12, pc} @ return to caller | ||
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c index 5cf7f94c1f31..ac2873c8014b 100644 --- a/arch/arm/mach-shark/core.c +++ b/arch/arm/mach-shark/core.c | |||
@@ -156,4 +156,5 @@ MACHINE_START(SHARK, "Shark") | |||
156 | .map_io = shark_map_io, | 156 | .map_io = shark_map_io, |
157 | .init_irq = shark_init_irq, | 157 | .init_irq = shark_init_irq, |
158 | .timer = &shark_timer, | 158 | .timer = &shark_timer, |
159 | .dma_zone_size = SZ_4M, | ||
159 | MACHINE_END | 160 | MACHINE_END |
diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S index e2853c0a3333..0bb6cc626eb7 100644 --- a/arch/arm/mach-shark/include/mach/entry-macro.S +++ b/arch/arm/mach-shark/include/mach/entry-macro.S | |||
@@ -11,17 +11,17 @@ | |||
11 | .endm | 11 | .endm |
12 | 12 | ||
13 | .macro get_irqnr_preamble, base, tmp | 13 | .macro get_irqnr_preamble, base, tmp |
14 | mov \base, #0xe0000000 | ||
14 | .endm | 15 | .endm |
15 | 16 | ||
16 | .macro arch_ret_to_user, tmp1, tmp2 | 17 | .macro arch_ret_to_user, tmp1, tmp2 |
17 | .endm | 18 | .endm |
18 | 19 | ||
19 | .macro get_irqnr_and_base, irqnr, irqstat, base, tmp | 20 | .macro get_irqnr_and_base, irqnr, irqstat, base, tmp |
20 | mov r4, #0xe0000000 | ||
21 | 21 | ||
22 | mov \irqstat, #0x0C | 22 | mov \irqstat, #0x0C |
23 | strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ | 23 | strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */ |
24 | ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 | 24 | ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7 |
25 | and \irqstat, \irqnr, #0x80 | 25 | and \irqstat, \irqnr, #0x80 |
26 | teq \irqstat, #0 | 26 | teq \irqstat, #0 |
27 | beq 43f | 27 | beq 43f |
@@ -29,8 +29,8 @@ | |||
29 | teq \irqnr, #2 | 29 | teq \irqnr, #2 |
30 | bne 44f | 30 | bne 44f |
31 | 43: mov \irqstat, #0x0C | 31 | 43: mov \irqstat, #0x0C |
32 | strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ | 32 | strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ |
33 | ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 | 33 | ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8 |
34 | and \irqstat, \irqnr, #0x80 | 34 | and \irqstat, \irqnr, #0x80 |
35 | teq \irqstat, #0 | 35 | teq \irqstat, #0 |
36 | beq 44f | 36 | beq 44f |
diff --git a/arch/arm/mach-shark/include/mach/memory.h b/arch/arm/mach-shark/include/mach/memory.h index 4c0831f83b0c..1cf8d6962617 100644 --- a/arch/arm/mach-shark/include/mach/memory.h +++ b/arch/arm/mach-shark/include/mach/memory.h | |||
@@ -17,8 +17,6 @@ | |||
17 | */ | 17 | */ |
18 | #define PLAT_PHYS_OFFSET UL(0x08000000) | 18 | #define PLAT_PHYS_OFFSET UL(0x08000000) |
19 | 19 | ||
20 | #define ARM_DMA_ZONE_SIZE SZ_4M | ||
21 | |||
22 | /* | 20 | /* |
23 | * Cache flushing area | 21 | * Cache flushing area |
24 | */ | 22 | */ |
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h new file mode 100644 index 000000000000..4a81b01f1e8f --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef SDHI_SH7372_H | ||
2 | #define SDHI_SH7372_H | ||
3 | |||
4 | #define SDGENCNTA 0xfe40009c | ||
5 | |||
6 | /* The countdown of SDGENCNTA is controlled by | ||
7 | * ZB3D2CLK which runs at 149.5MHz. | ||
8 | * That is 149.5ticks/us. Approximate this as 150ticks/us. | ||
9 | */ | ||
10 | static void udelay(int us) | ||
11 | { | ||
12 | __raw_writel(us * 150, SDGENCNTA); | ||
13 | while(__raw_readl(SDGENCNTA)) ; | ||
14 | } | ||
15 | |||
16 | static void msleep(int ms) | ||
17 | { | ||
18 | udelay(ms * 1000); | ||
19 | } | ||
20 | |||
21 | #endif | ||
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h new file mode 100644 index 000000000000..0ec9e69f2c3b --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef SDHI_H | ||
2 | #define SDHI_H | ||
3 | |||
4 | /************************************************** | ||
5 | * | ||
6 | * CPU specific settings | ||
7 | * | ||
8 | **************************************************/ | ||
9 | |||
10 | #ifdef CONFIG_ARCH_SH7372 | ||
11 | #include "mach/sdhi-sh7372.h" | ||
12 | #else | ||
13 | #error "unsupported CPU." | ||
14 | #endif | ||
15 | |||
16 | #endif /* SDHI_H */ | ||
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3888feb1c68..66f980625a33 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c | |||
@@ -64,10 +64,5 @@ void __init smp_init_cpus(void) | |||
64 | 64 | ||
65 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 65 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
66 | { | 66 | { |
67 | int i; | ||
68 | |||
69 | for (i = 0; i < max_cpus; i++) | ||
70 | set_cpu_present(i, true); | ||
71 | |||
72 | shmobile_smp_prepare_cpus(); | 67 | shmobile_smp_prepare_cpus(); |
73 | } | 68 | } |
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index b8ae3c978dee..1a594dce8fbc 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c | |||
@@ -129,14 +129,6 @@ void __init smp_init_cpus(void) | |||
129 | 129 | ||
130 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 130 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
131 | { | 131 | { |
132 | int i; | ||
133 | |||
134 | /* | ||
135 | * Initialise the present map, which describes the set of CPUs | ||
136 | * actually populated at the present time. | ||
137 | */ | ||
138 | for (i = 0; i < max_cpus; i++) | ||
139 | set_cpu_present(i, true); | ||
140 | 132 | ||
141 | scu_enable(scu_base); | 133 | scu_enable(scu_base); |
142 | } | 134 | } |
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 0c527fe2cebb..a33df5f4c27a 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c | |||
@@ -172,14 +172,6 @@ void __init smp_init_cpus(void) | |||
172 | 172 | ||
173 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 173 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
174 | { | 174 | { |
175 | int i; | ||
176 | |||
177 | /* | ||
178 | * Initialise the present map, which describes the set of CPUs | ||
179 | * actually populated at the present time. | ||
180 | */ | ||
181 | for (i = 0; i < max_cpus; i++) | ||
182 | set_cpu_present(i, true); | ||
183 | 175 | ||
184 | scu_enable(scu_base_addr()); | 176 | scu_enable(scu_base_addr()); |
185 | wakeup_secondary(); | 177 | wakeup_secondary(); |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 765a71ff7f3b..bfd32f52c2db 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void) | |||
229 | 229 | ||
230 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) | 230 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) |
231 | { | 231 | { |
232 | int i; | ||
233 | for (i = 0; i < max_cpus; i++) | ||
234 | set_cpu_present(i, true); | ||
235 | |||
236 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); | 232 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); |
237 | } | 233 | } |
238 | #endif | 234 | #endif |
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e87bae..54473cd4aba9 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4_early_abort | 4 | * Function: v4_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -21,10 +18,8 @@ | |||
21 | ENTRY(v4_early_abort) | 18 | ENTRY(v4_early_abort) |
22 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 19 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
23 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 20 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
24 | ldr r3, [r2] @ read aborted ARM instruction | 21 | ldr r3, [r4] @ read aborted ARM instruction |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #1 << 20 @ L = 1 -> write? | 23 | tst r3, #1 << 20 @ L = 1 -> write? |
27 | orreq r1, r1, #1 << 11 @ yes. | 24 | orreq r1, r1, #1 << 11 @ yes. |
28 | mov pc, lr | 25 | b do_DataAbort |
29 | |||
30 | |||
diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b6282548f922..9da704e7b86e 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v4t_early_abort | 5 | * Function: v4t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,9 +19,9 @@ | |||
22 | ENTRY(v4t_early_abort) | 19 | ENTRY(v4t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 24 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
28 | tst r3, #1 << 20 @ check write | 25 | tst r3, #1 << 20 @ check write |
29 | orreq r1, r1, #1 << 11 | 26 | orreq r1, r1, #1 << 11 |
30 | mov pc, lr | 27 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b526c0d..a0908d4653a3 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5t_early_abort | 5 | * Function: v5t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,10 +19,10 @@ | |||
22 | ENTRY(v5t_early_abort) | 19 | ENTRY(v5t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR | 24 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR |
28 | do_ldrd_abort | 25 | do_ldrd_abort tmp=ip, insn=r3 |
29 | tst r3, #1 << 20 @ check write | 26 | tst r3, #1 << 20 @ check write |
30 | orreq r1, r1, #1 << 11 | 27 | orreq r1, r1, #1 << 11 |
31 | mov pc, lr | 28 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d601c8b..4006b7a61264 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5tj_early_abort | 5 | * Function: v5tj_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort) | |||
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #PSR_J_BIT @ Java? | 23 | tst r5, #PSR_J_BIT @ Java? |
27 | movne pc, lr | 24 | bne do_DataAbort |
28 | do_thumb_abort | 25 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
29 | ldreq r3, [r2] @ read aborted ARM instruction | 26 | ldreq r3, [r4] @ read aborted ARM instruction |
30 | do_ldrd_abort | 27 | do_ldrd_abort tmp=ip, insn=r3 |
31 | tst r3, #1 << 20 @ L = 0 -> write | 28 | tst r3, #1 << 20 @ L = 0 -> write |
32 | orreq r1, r1, #1 << 11 @ yes. | 29 | orreq r1, r1, #1 << 11 @ yes. |
33 | mov pc, lr | 30 | b do_DataAbort |
34 | |||
35 | |||
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa522144..ff1f7cc11f87 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_early_abort | 5 | * Function: v6_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort) | |||
33 | * The test below covers all the write situations, including Java bytecodes | 30 | * The test below covers all the write situations, including Java bytecodes |
34 | */ | 31 | */ |
35 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR | 32 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR |
36 | tst r3, #PSR_J_BIT @ Java? | 33 | tst r5, #PSR_J_BIT @ Java? |
37 | movne pc, lr | 34 | bne do_DataAbort |
38 | do_thumb_abort | 35 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
39 | ldreq r3, [r2] @ read aborted ARM instruction | 36 | ldreq r3, [r4] @ read aborted ARM instruction |
40 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 37 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
41 | reveq r3, r3 | 38 | reveq r3, r3 |
42 | #endif | 39 | #endif |
43 | do_ldrd_abort | 40 | do_ldrd_abort tmp=ip, insn=r3 |
44 | tst r3, #1 << 20 @ L = 0 -> write | 41 | tst r3, #1 << 20 @ L = 0 -> write |
45 | orreq r1, r1, #1 << 11 @ yes. | 42 | orreq r1, r1, #1 << 11 @ yes. |
46 | mov pc, lr | 43 | b do_DataAbort |
47 | |||
48 | |||
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b157d3bb..703375277ba6 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v7_early_abort | 4 | * Function: v7_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | */ | 13 | */ |
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort) | |||
37 | ldr r3, =0x40d @ On permission fault | 34 | ldr r3, =0x40d @ On permission fault |
38 | and r3, r1, r3 | 35 | and r3, r1, r3 |
39 | cmp r3, #0x0d | 36 | cmp r3, #0x0d |
40 | movne pc, lr | 37 | bne do_DataAbort |
41 | 38 | ||
42 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR | 39 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR |
43 | isb | 40 | isb |
44 | mrc p15, 0, r2, c7, c4, 0 @ Read the PAR | 41 | mrc p15, 0, ip, c7, c4, 0 @ Read the PAR |
45 | and r3, r2, #0x7b @ On translation fault | 42 | and r3, ip, #0x7b @ On translation fault |
46 | cmp r3, #0x0b | 43 | cmp r3, #0x0b |
47 | movne pc, lr | 44 | bne do_DataAbort |
48 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] | 45 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] |
49 | and r2, r2, #0x7e | 46 | and ip, ip, #0x7e |
50 | orr r1, r1, r2, LSR #1 | 47 | orr r1, r1, ip, LSR #1 |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | mov pc, lr | 50 | b do_DataAbort |
54 | ENDPROC(v7_early_abort) | 51 | ENDPROC(v7_early_abort) |
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e25ea1..f3982580c273 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4t_late_abort | 4 | * Function: v4t_late_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4-r5, r10-r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -18,7 +15,7 @@ | |||
18 | * picture. Unfortunately, this does happen. We live with it. | 15 | * picture. Unfortunately, this does happen. We live with it. |
19 | */ | 16 | */ |
20 | ENTRY(v4t_late_abort) | 17 | ENTRY(v4t_late_abort) |
21 | tst r3, #PSR_T_BIT @ check for thumb mode | 18 | tst r5, #PSR_T_BIT @ check for thumb mode |
22 | #ifdef CONFIG_CPU_CP15_MMU | 19 | #ifdef CONFIG_CPU_CP15_MMU |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort) | |||
28 | mov r1, #0 | 25 | mov r1, #0 |
29 | #endif | 26 | #endif |
30 | bne .data_thumb_abort | 27 | bne .data_thumb_abort |
31 | ldr r8, [r2] @ read arm instruction | 28 | ldr r8, [r4] @ read arm instruction |
32 | tst r8, #1 << 20 @ L = 1 -> write? | 29 | tst r8, #1 << 20 @ L = 1 -> write? |
33 | orreq r1, r1, #1 << 11 @ yes. | 30 | orreq r1, r1, #1 << 11 @ yes. |
34 | and r7, r8, #15 << 24 | 31 | and r7, r8, #15 << 24 |
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort) | |||
47 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 44 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
48 | /* a */ b .data_unknown | 45 | /* a */ b .data_unknown |
49 | /* b */ b .data_unknown | 46 | /* b */ b .data_unknown |
50 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 47 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
51 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 48 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
52 | /* e */ b .data_unknown | 49 | /* e */ b .data_unknown |
53 | /* f */ | 50 | /* f */ |
54 | .data_unknown: @ Part of jumptable | 51 | .data_unknown: @ Part of jumptable |
55 | mov r0, r2 | 52 | mov r0, r4 |
56 | mov r1, r8 | 53 | mov r1, r8 |
57 | mov r2, sp | 54 | b baddataabort |
58 | bl baddataabort | ||
59 | b ret_from_exception | ||
60 | 55 | ||
61 | .data_arm_ldmstm: | 56 | .data_arm_ldmstm: |
62 | tst r8, #1 << 21 @ check writeback bit | 57 | tst r8, #1 << 21 @ check writeback bit |
63 | moveq pc, lr @ no writeback -> no fixup | 58 | beq do_DataAbort @ no writeback -> no fixup |
64 | mov r7, #0x11 | 59 | mov r7, #0x11 |
65 | orr r7, r7, #0x1100 | 60 | orr r7, r7, #0x1100 |
66 | and r6, r8, r7 | 61 | and r6, r8, r7 |
67 | and r2, r8, r7, lsl #1 | 62 | and r9, r8, r7, lsl #1 |
68 | add r6, r6, r2, lsr #1 | 63 | add r6, r6, r9, lsr #1 |
69 | and r2, r8, r7, lsl #2 | 64 | and r9, r8, r7, lsl #2 |
70 | add r6, r6, r2, lsr #2 | 65 | add r6, r6, r9, lsr #2 |
71 | and r2, r8, r7, lsl #3 | 66 | and r9, r8, r7, lsl #3 |
72 | add r6, r6, r2, lsr #3 | 67 | add r6, r6, r9, lsr #3 |
73 | add r6, r6, r6, lsr #8 | 68 | add r6, r6, r6, lsr #8 |
74 | add r6, r6, r6, lsr #4 | 69 | add r6, r6, r6, lsr #4 |
75 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 70 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
76 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 71 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
77 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 72 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
78 | tst r8, #1 << 23 @ Check U bit | 73 | tst r8, #1 << 23 @ Check U bit |
79 | subne r7, r7, r6, lsl #2 @ Undo increment | 74 | subne r7, r7, r6, lsl #2 @ Undo increment |
80 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 75 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
81 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 76 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
82 | mov pc, lr | 77 | b do_DataAbort |
83 | 78 | ||
84 | .data_arm_lateldrhpre: | 79 | .data_arm_lateldrhpre: |
85 | tst r8, #1 << 21 @ Check writeback bit | 80 | tst r8, #1 << 21 @ Check writeback bit |
86 | moveq pc, lr @ No writeback -> no fixup | 81 | beq do_DataAbort @ No writeback -> no fixup |
87 | .data_arm_lateldrhpost: | 82 | .data_arm_lateldrhpost: |
88 | and r5, r8, #0x00f @ get Rm / low nibble of immediate value | 83 | and r9, r8, #0x00f @ get Rm / low nibble of immediate value |
89 | tst r8, #1 << 22 @ if (immediate offset) | 84 | tst r8, #1 << 22 @ if (immediate offset) |
90 | andne r6, r8, #0xf00 @ { immediate high nibble | 85 | andne r6, r8, #0xf00 @ { immediate high nibble |
91 | orrne r6, r5, r6, lsr #4 @ combine nibbles } else | 86 | orrne r6, r9, r6, lsr #4 @ combine nibbles } else |
92 | ldreq r6, [sp, r5, lsl #2] @ { load Rm value } | 87 | ldreq r6, [r2, r9, lsl #2] @ { load Rm value } |
93 | .data_arm_apply_r6_and_rn: | 88 | .data_arm_apply_r6_and_rn: |
94 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 89 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
95 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 90 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
96 | tst r8, #1 << 23 @ Check U bit | 91 | tst r8, #1 << 23 @ Check U bit |
97 | subne r7, r7, r6 @ Undo incrmenet | 92 | subne r7, r7, r6 @ Undo incrmenet |
98 | addeq r7, r7, r6 @ Undo decrement | 93 | addeq r7, r7, r6 @ Undo decrement |
99 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 94 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
100 | mov pc, lr | 95 | b do_DataAbort |
101 | 96 | ||
102 | .data_arm_lateldrpreconst: | 97 | .data_arm_lateldrpreconst: |
103 | tst r8, #1 << 21 @ check writeback bit | 98 | tst r8, #1 << 21 @ check writeback bit |
104 | moveq pc, lr @ no writeback -> no fixup | 99 | beq do_DataAbort @ no writeback -> no fixup |
105 | .data_arm_lateldrpostconst: | 100 | .data_arm_lateldrpostconst: |
106 | movs r2, r8, lsl #20 @ Get offset | 101 | movs r6, r8, lsl #20 @ Get offset |
107 | moveq pc, lr @ zero -> no fixup | 102 | beq do_DataAbort @ zero -> no fixup |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 103 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 104 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 105 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r2, lsr #20 @ Undo increment | 106 | subne r7, r7, r6, lsr #20 @ Undo increment |
112 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 107 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 108 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 109 | b do_DataAbort |
115 | 110 | ||
116 | .data_arm_lateldrprereg: | 111 | .data_arm_lateldrprereg: |
117 | tst r8, #1 << 21 @ check writeback bit | 112 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 113 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostreg: | 114 | .data_arm_lateldrpostreg: |
120 | and r7, r8, #15 @ Extract 'm' from instruction | 115 | and r7, r8, #15 @ Extract 'm' from instruction |
121 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 116 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
122 | mov r5, r8, lsr #7 @ get shift count | 117 | mov r9, r8, lsr #7 @ get shift count |
123 | ands r5, r5, #31 | 118 | ands r9, r9, #31 |
124 | and r7, r8, #0x70 @ get shift type | 119 | and r7, r8, #0x70 @ get shift type |
125 | orreq r7, r7, #8 @ shift count = 0 | 120 | orreq r7, r7, #8 @ shift count = 0 |
126 | add pc, pc, r7 | 121 | add pc, pc, r7 |
127 | nop | 122 | nop |
128 | 123 | ||
129 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 124 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
130 | b .data_arm_apply_r6_and_rn | 125 | b .data_arm_apply_r6_and_rn |
131 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 126 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
132 | nop | 127 | nop |
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort) | |||
134 | nop | 129 | nop |
135 | b .data_unknown @ 3: MUL? | 130 | b .data_unknown @ 3: MUL? |
136 | nop | 131 | nop |
137 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 132 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
138 | b .data_arm_apply_r6_and_rn | 133 | b .data_arm_apply_r6_and_rn |
139 | mov r6, r6, lsr #32 @ 5: LSR #32 | 134 | mov r6, r6, lsr #32 @ 5: LSR #32 |
140 | b .data_arm_apply_r6_and_rn | 135 | b .data_arm_apply_r6_and_rn |
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort) | |||
142 | nop | 137 | nop |
143 | b .data_unknown @ 7: MUL? | 138 | b .data_unknown @ 7: MUL? |
144 | nop | 139 | nop |
145 | mov r6, r6, asr r5 @ 8: ASR #!0 | 140 | mov r6, r6, asr r9 @ 8: ASR #!0 |
146 | b .data_arm_apply_r6_and_rn | 141 | b .data_arm_apply_r6_and_rn |
147 | mov r6, r6, asr #32 @ 9: ASR #32 | 142 | mov r6, r6, asr #32 @ 9: ASR #32 |
148 | b .data_arm_apply_r6_and_rn | 143 | b .data_arm_apply_r6_and_rn |
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort) | |||
150 | nop | 145 | nop |
151 | b .data_unknown @ B: MUL? | 146 | b .data_unknown @ B: MUL? |
152 | nop | 147 | nop |
153 | mov r6, r6, ror r5 @ C: ROR #!0 | 148 | mov r6, r6, ror r9 @ C: ROR #!0 |
154 | b .data_arm_apply_r6_and_rn | 149 | b .data_arm_apply_r6_and_rn |
155 | mov r6, r6, rrx @ D: RRX | 150 | mov r6, r6, rrx @ D: RRX |
156 | b .data_arm_apply_r6_and_rn | 151 | b .data_arm_apply_r6_and_rn |
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort) | |||
159 | b .data_unknown @ F: MUL? | 154 | b .data_unknown @ F: MUL? |
160 | 155 | ||
161 | .data_thumb_abort: | 156 | .data_thumb_abort: |
162 | ldrh r8, [r2] @ read instruction | 157 | ldrh r8, [r4] @ read instruction |
163 | tst r8, #1 << 11 @ L = 1 -> write? | 158 | tst r8, #1 << 11 @ L = 1 -> write? |
164 | orreq r1, r1, #1 << 8 @ yes | 159 | orreq r1, r1, #1 << 8 @ yes |
165 | and r7, r8, #15 << 12 | 160 | and r7, r8, #15 << 12 |
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort) | |||
172 | /* 3 */ b .data_unknown | 167 | /* 3 */ b .data_unknown |
173 | /* 4 */ b .data_unknown | 168 | /* 4 */ b .data_unknown |
174 | /* 5 */ b .data_thumb_reg | 169 | /* 5 */ b .data_thumb_reg |
175 | /* 6 */ mov pc, lr | 170 | /* 6 */ b do_DataAbort |
176 | /* 7 */ mov pc, lr | 171 | /* 7 */ b do_DataAbort |
177 | /* 8 */ mov pc, lr | 172 | /* 8 */ b do_DataAbort |
178 | /* 9 */ mov pc, lr | 173 | /* 9 */ b do_DataAbort |
179 | /* A */ b .data_unknown | 174 | /* A */ b .data_unknown |
180 | /* B */ b .data_thumb_pushpop | 175 | /* B */ b .data_thumb_pushpop |
181 | /* C */ b .data_thumb_ldmstm | 176 | /* C */ b .data_thumb_ldmstm |
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort) | |||
185 | 180 | ||
186 | .data_thumb_reg: | 181 | .data_thumb_reg: |
187 | tst r8, #1 << 9 | 182 | tst r8, #1 << 9 |
188 | moveq pc, lr | 183 | beq do_DataAbort |
189 | tst r8, #1 << 10 @ If 'S' (signed) bit is set | 184 | tst r8, #1 << 10 @ If 'S' (signed) bit is set |
190 | movne r1, #0 @ it must be a load instr | 185 | movne r1, #0 @ it must be a load instr |
191 | mov pc, lr | 186 | b do_DataAbort |
192 | 187 | ||
193 | .data_thumb_pushpop: | 188 | .data_thumb_pushpop: |
194 | tst r8, #1 << 10 | 189 | tst r8, #1 << 10 |
195 | beq .data_unknown | 190 | beq .data_unknown |
196 | and r6, r8, #0x55 @ hweight8(r8) + R bit | 191 | and r6, r8, #0x55 @ hweight8(r8) + R bit |
197 | and r2, r8, #0xaa | 192 | and r9, r8, #0xaa |
198 | add r6, r6, r2, lsr #1 | 193 | add r6, r6, r9, lsr #1 |
199 | and r2, r6, #0xcc | 194 | and r9, r6, #0xcc |
200 | and r6, r6, #0x33 | 195 | and r6, r6, #0x33 |
201 | add r6, r6, r2, lsr #2 | 196 | add r6, r6, r9, lsr #2 |
202 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) | 197 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) |
203 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit | 198 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit |
204 | and r6, r6, #15 @ number of regs to transfer | 199 | and r6, r6, #15 @ number of regs to transfer |
205 | ldr r7, [sp, #13 << 2] | 200 | ldr r7, [r2, #13 << 2] |
206 | tst r8, #1 << 11 | 201 | tst r8, #1 << 11 |
207 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH | 202 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH |
208 | subne r7, r7, r6, lsl #2 @ decrement SP if POP | 203 | subne r7, r7, r6, lsl #2 @ decrement SP if POP |
209 | str r7, [sp, #13 << 2] | 204 | str r7, [r2, #13 << 2] |
210 | mov pc, lr | 205 | b do_DataAbort |
211 | 206 | ||
212 | .data_thumb_ldmstm: | 207 | .data_thumb_ldmstm: |
213 | and r6, r8, #0x55 @ hweight8(r8) | 208 | and r6, r8, #0x55 @ hweight8(r8) |
214 | and r2, r8, #0xaa | 209 | and r9, r8, #0xaa |
215 | add r6, r6, r2, lsr #1 | 210 | add r6, r6, r9, lsr #1 |
216 | and r2, r6, #0xcc | 211 | and r9, r6, #0xcc |
217 | and r6, r6, #0x33 | 212 | and r6, r6, #0x33 |
218 | add r6, r6, r2, lsr #2 | 213 | add r6, r6, r9, lsr #2 |
219 | add r6, r6, r6, lsr #4 | 214 | add r6, r6, r6, lsr #4 |
220 | and r5, r8, #7 << 8 | 215 | and r9, r8, #7 << 8 |
221 | ldr r7, [sp, r5, lsr #6] | 216 | ldr r7, [r2, r9, lsr #6] |
222 | and r6, r6, #15 @ number of regs to transfer | 217 | and r6, r6, #15 @ number of regs to transfer |
223 | sub r7, r7, r6, lsl #2 @ always decrement | 218 | sub r7, r7, r6, lsl #2 @ always decrement |
224 | str r7, [sp, r5, lsr #6] | 219 | str r7, [r2, r9, lsr #6] |
225 | mov pc, lr | 220 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bfa51a4..52162d59407a 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S | |||
@@ -9,34 +9,32 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | .macro do_thumb_abort | 12 | .macro do_thumb_abort, fsr, pc, psr, tmp |
13 | tst r3, #PSR_T_BIT | 13 | tst \psr, #PSR_T_BIT |
14 | beq not_thumb | 14 | beq not_thumb |
15 | ldrh r3, [r2] @ Read aborted Thumb instruction | 15 | ldrh \tmp, [\pc] @ Read aborted Thumb instruction |
16 | and r3, r3, # 0xfe00 @ Mask opcode field | 16 | and \tmp, \tmp, # 0xfe00 @ Mask opcode field |
17 | cmp r3, # 0x5600 @ Is it ldrsb? | 17 | cmp \tmp, # 0x5600 @ Is it ldrsb? |
18 | orreq r3, r3, #1 << 11 @ Set L-bit if yes | 18 | orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes |
19 | tst r3, #1 << 11 @ L = 0 -> write | 19 | tst \tmp, #1 << 11 @ L = 0 -> write |
20 | orreq r1, r1, #1 << 11 @ yes. | 20 | orreq \psr, \psr, #1 << 11 @ yes. |
21 | mov pc, lr | 21 | b do_DataAbort |
22 | not_thumb: | 22 | not_thumb: |
23 | .endm | 23 | .endm |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * We check for the following insturction encoding for LDRD. | 26 | * We check for the following instruction encoding for LDRD. |
27 | * | 27 | * |
28 | * [27:25] == 0 | 28 | * [27:25] == 000 |
29 | * [7:4] == 1101 | 29 | * [7:4] == 1101 |
30 | * [20] == 0 | 30 | * [20] == 0 |
31 | */ | 31 | */ |
32 | .macro do_ldrd_abort | 32 | .macro do_ldrd_abort, tmp, insn |
33 | tst r3, #0x0e000000 @ [27:25] == 0 | 33 | tst \insn, #0x0e100000 @ [27:25,20] == 0 |
34 | bne not_ldrd | 34 | bne not_ldrd |
35 | and r2, r3, #0x000000f0 @ [7:4] == 1101 | 35 | and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 |
36 | cmp r2, #0x000000d0 | 36 | cmp \tmp, #0x000000d0 |
37 | bne not_ldrd | 37 | beq do_DataAbort |
38 | tst r3, #1 << 20 @ [20] == 0 | ||
39 | moveq pc, lr | ||
40 | not_ldrd: | 38 | not_ldrd: |
41 | .endm | 39 | .endm |
42 | 40 | ||
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580945b5..119cb479c2ab 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S | |||
@@ -3,11 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: nommu_early_abort | 4 | * Function: nommu_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = 0 (abort address) | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = 0 (FSR) | ||
11 | * | 11 | * |
12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. | 12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. |
13 | * Just fill zero into the registers. | 13 | * Just fill zero into the registers. |
@@ -16,5 +16,5 @@ | |||
16 | ENTRY(nommu_early_abort) | 16 | ENTRY(nommu_early_abort) |
17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) | 17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) |
18 | mov r1, #0 | 18 | mov r1, #0 |
19 | mov pc, lr | 19 | b do_DataAbort |
20 | ENDPROC(nommu_early_abort) | 20 | ENDPROC(nommu_early_abort) |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3bce72c..be7c638b648b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
727 | int isize = 4; | 727 | int isize = 4; |
728 | int thumb2_32b = 0; | 728 | int thumb2_32b = 0; |
729 | 729 | ||
730 | if (interrupts_enabled(regs)) | ||
731 | local_irq_enable(); | ||
732 | |||
730 | instrptr = instruction_pointer(regs); | 733 | instrptr = instruction_pointer(regs); |
731 | 734 | ||
732 | fs = get_fs(); | 735 | fs = get_fs(); |
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 1fa6f71470de..072016371093 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -242,16 +242,5 @@ ENDPROC(fa_dma_unmap_area) | |||
242 | 242 | ||
243 | __INITDATA | 243 | __INITDATA |
244 | 244 | ||
245 | .type fa_cache_fns, #object | 245 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
246 | ENTRY(fa_cache_fns) | 246 | define_cache_functions fa |
247 | .long fa_flush_icache_all | ||
248 | .long fa_flush_kern_cache_all | ||
249 | .long fa_flush_user_cache_all | ||
250 | .long fa_flush_user_cache_range | ||
251 | .long fa_coherent_kern_range | ||
252 | .long fa_coherent_user_range | ||
253 | .long fa_flush_kern_dcache_area | ||
254 | .long fa_dma_map_area | ||
255 | .long fa_dma_unmap_area | ||
256 | .long fa_dma_flush_range | ||
257 | .size fa_cache_fns, . - fa_cache_fns | ||
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2e2bc406a18d..c2301f226100 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -129,16 +129,5 @@ ENDPROC(v3_dma_map_area) | |||
129 | 129 | ||
130 | __INITDATA | 130 | __INITDATA |
131 | 131 | ||
132 | .type v3_cache_fns, #object | 132 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
133 | ENTRY(v3_cache_fns) | 133 | define_cache_functions v3 |
134 | .long v3_flush_icache_all | ||
135 | .long v3_flush_kern_cache_all | ||
136 | .long v3_flush_user_cache_all | ||
137 | .long v3_flush_user_cache_range | ||
138 | .long v3_coherent_kern_range | ||
139 | .long v3_coherent_user_range | ||
140 | .long v3_flush_kern_dcache_area | ||
141 | .long v3_dma_map_area | ||
142 | .long v3_dma_unmap_area | ||
143 | .long v3_dma_flush_range | ||
144 | .size v3_cache_fns, . - v3_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index a8fefb523f19..fd9bb7addc8d 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -141,16 +141,5 @@ ENDPROC(v4_dma_map_area) | |||
141 | 141 | ||
142 | __INITDATA | 142 | __INITDATA |
143 | 143 | ||
144 | .type v4_cache_fns, #object | 144 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
145 | ENTRY(v4_cache_fns) | 145 | define_cache_functions v4 |
146 | .long v4_flush_icache_all | ||
147 | .long v4_flush_kern_cache_all | ||
148 | .long v4_flush_user_cache_all | ||
149 | .long v4_flush_user_cache_range | ||
150 | .long v4_coherent_kern_range | ||
151 | .long v4_coherent_user_range | ||
152 | .long v4_flush_kern_dcache_area | ||
153 | .long v4_dma_map_area | ||
154 | .long v4_dma_unmap_area | ||
155 | .long v4_dma_flush_range | ||
156 | .size v4_cache_fns, . - v4_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index f40c69656d8d..4f2c14151ccb 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -253,16 +253,5 @@ ENDPROC(v4wb_dma_unmap_area) | |||
253 | 253 | ||
254 | __INITDATA | 254 | __INITDATA |
255 | 255 | ||
256 | .type v4wb_cache_fns, #object | 256 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
257 | ENTRY(v4wb_cache_fns) | 257 | define_cache_functions v4wb |
258 | .long v4wb_flush_icache_all | ||
259 | .long v4wb_flush_kern_cache_all | ||
260 | .long v4wb_flush_user_cache_all | ||
261 | .long v4wb_flush_user_cache_range | ||
262 | .long v4wb_coherent_kern_range | ||
263 | .long v4wb_coherent_user_range | ||
264 | .long v4wb_flush_kern_dcache_area | ||
265 | .long v4wb_dma_map_area | ||
266 | .long v4wb_dma_unmap_area | ||
267 | .long v4wb_dma_flush_range | ||
268 | .size v4wb_cache_fns, . - v4wb_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index a7b276dbda11..4d7b467631ce 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -197,16 +197,5 @@ ENDPROC(v4wt_dma_map_area) | |||
197 | 197 | ||
198 | __INITDATA | 198 | __INITDATA |
199 | 199 | ||
200 | .type v4wt_cache_fns, #object | 200 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
201 | ENTRY(v4wt_cache_fns) | 201 | define_cache_functions v4wt |
202 | .long v4wt_flush_icache_all | ||
203 | .long v4wt_flush_kern_cache_all | ||
204 | .long v4wt_flush_user_cache_all | ||
205 | .long v4wt_flush_user_cache_range | ||
206 | .long v4wt_coherent_kern_range | ||
207 | .long v4wt_coherent_user_range | ||
208 | .long v4wt_flush_kern_dcache_area | ||
209 | .long v4wt_dma_map_area | ||
210 | .long v4wt_dma_unmap_area | ||
211 | .long v4wt_dma_flush_range | ||
212 | .size v4wt_cache_fns, . - v4wt_cache_fns | ||
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 73b4a8b66a57..74c2e5a33a4d 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -330,16 +330,5 @@ ENDPROC(v6_dma_unmap_area) | |||
330 | 330 | ||
331 | __INITDATA | 331 | __INITDATA |
332 | 332 | ||
333 | .type v6_cache_fns, #object | 333 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
334 | ENTRY(v6_cache_fns) | 334 | define_cache_functions v6 |
335 | .long v6_flush_icache_all | ||
336 | .long v6_flush_kern_cache_all | ||
337 | .long v6_flush_user_cache_all | ||
338 | .long v6_flush_user_cache_range | ||
339 | .long v6_coherent_kern_range | ||
340 | .long v6_coherent_user_range | ||
341 | .long v6_flush_kern_dcache_area | ||
342 | .long v6_dma_map_area | ||
343 | .long v6_dma_unmap_area | ||
344 | .long v6_dma_flush_range | ||
345 | .size v6_cache_fns, . - v6_cache_fns | ||
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index d32f02b61866..3b24bfa3b828 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -325,16 +325,5 @@ ENDPROC(v7_dma_unmap_area) | |||
325 | 325 | ||
326 | __INITDATA | 326 | __INITDATA |
327 | 327 | ||
328 | .type v7_cache_fns, #object | 328 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
329 | ENTRY(v7_cache_fns) | 329 | define_cache_functions v7 |
330 | .long v7_flush_icache_all | ||
331 | .long v7_flush_kern_cache_all | ||
332 | .long v7_flush_user_cache_all | ||
333 | .long v7_flush_user_cache_range | ||
334 | .long v7_coherent_kern_range | ||
335 | .long v7_coherent_user_range | ||
336 | .long v7_flush_kern_dcache_area | ||
337 | .long v7_dma_map_area | ||
338 | .long v7_dma_unmap_area | ||
339 | .long v7_dma_flush_range | ||
340 | .size v7_cache_fns, . - v7_cache_fns | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c65c901..63cca0097130 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
45 | kunmap_atomic(kto, KM_USER1); | 44 | kunmap_atomic(kto, KM_USER1); |
46 | kunmap_atomic(kfrom, KM_USER0); | 45 | kunmap_atomic(kfrom, KM_USER0); |
47 | } | 46 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09a..0a0a1e7c20d2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -25,9 +25,11 @@ | |||
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/sizes.h> | 26 | #include <asm/sizes.h> |
27 | 27 | ||
28 | #include "mm.h" | ||
29 | |||
28 | static u64 get_coherent_dma_mask(struct device *dev) | 30 | static u64 get_coherent_dma_mask(struct device *dev) |
29 | { | 31 | { |
30 | u64 mask = ISA_DMA_THRESHOLD; | 32 | u64 mask = (u64)arm_dma_limit; |
31 | 33 | ||
32 | if (dev) { | 34 | if (dev) { |
33 | mask = dev->coherent_dma_mask; | 35 | mask = dev->coherent_dma_mask; |
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
41 | return 0; | 43 | return 0; |
42 | } | 44 | } |
43 | 45 | ||
44 | if ((~mask) & ISA_DMA_THRESHOLD) { | 46 | if ((~mask) & (u64)arm_dma_limit) { |
45 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 47 | dev_warn(dev, "coherent DMA mask %#llx is smaller " |
46 | "than system GFP_DMA mask %#llx\n", | 48 | "than system GFP_DMA mask %#llx\n", |
47 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 49 | mask, (u64)arm_dma_limit); |
48 | return 0; | 50 | return 0; |
49 | } | 51 | } |
50 | } | 52 | } |
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
657 | } | 659 | } |
658 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 660 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | 661 | ||
662 | /* | ||
663 | * Return whether the given device DMA address mask can be supported | ||
664 | * properly. For example, if your device can only drive the low 24-bits | ||
665 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
666 | * to this function. | ||
667 | */ | ||
668 | int dma_supported(struct device *dev, u64 mask) | ||
669 | { | ||
670 | if (mask < (u64)arm_dma_limit) | ||
671 | return 0; | ||
672 | return 1; | ||
673 | } | ||
674 | EXPORT_SYMBOL(dma_supported); | ||
675 | |||
676 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
677 | { | ||
678 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
679 | return -EIO; | ||
680 | |||
681 | #ifndef CONFIG_DMABOUNCE | ||
682 | *dev->dma_mask = dma_mask; | ||
683 | #endif | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | EXPORT_SYMBOL(dma_set_mask); | ||
688 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 689 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
661 | 690 | ||
662 | static int __init dma_debug_do_init(void) | 691 | static int __init dma_debug_do_init(void) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 9ea4f7ddd665..3b5ea68acbb8 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
94 | 94 | ||
95 | pud = pud_offset(pgd, addr); | 95 | pud = pud_offset(pgd, addr); |
96 | if (PTRS_PER_PUD != 1) | 96 | if (PTRS_PER_PUD != 1) |
97 | printk(", *pud=%08lx", pud_val(*pud)); | 97 | printk(", *pud=%08llx", (long long)pud_val(*pud)); |
98 | 98 | ||
99 | if (pud_none(*pud)) | 99 | if (pud_none(*pud)) |
100 | break; | 100 | break; |
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
285 | tsk = current; | 285 | tsk = current; |
286 | mm = tsk->mm; | 286 | mm = tsk->mm; |
287 | 287 | ||
288 | /* Enable interrupts if they were enabled in the parent context. */ | ||
289 | if (interrupts_enabled(regs)) | ||
290 | local_irq_enable(); | ||
291 | |||
288 | /* | 292 | /* |
289 | * If we're in an interrupt or have no user | 293 | * If we're in an interrupt or have no user |
290 | * context, we must not take the fault.. | 294 | * context, we must not take the fault.. |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a21..2fee782077c1 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn, | |||
212 | } | 212 | } |
213 | 213 | ||
214 | #ifdef CONFIG_ZONE_DMA | 214 | #ifdef CONFIG_ZONE_DMA |
215 | |||
216 | unsigned long arm_dma_zone_size __read_mostly; | ||
217 | EXPORT_SYMBOL(arm_dma_zone_size); | ||
218 | |||
219 | /* | ||
220 | * The DMA mask corresponding to the maximum bus address allocatable | ||
221 | * using GFP_DMA. The default here places no restriction on DMA | ||
222 | * allocations. This must be the smallest DMA mask in the system, | ||
223 | * so a successful GFP_DMA allocation will always satisfy this. | ||
224 | */ | ||
225 | u32 arm_dma_limit; | ||
226 | |||
215 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 227 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
216 | unsigned long dma_size) | 228 | unsigned long dma_size) |
217 | { | 229 | { |
@@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | |||
267 | #endif | 279 | #endif |
268 | } | 280 | } |
269 | 281 | ||
270 | #ifdef ARM_DMA_ZONE_SIZE | 282 | #ifdef CONFIG_ZONE_DMA |
271 | #ifndef CONFIG_ZONE_DMA | ||
272 | #error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations | ||
273 | #endif | ||
274 | |||
275 | /* | 283 | /* |
276 | * Adjust the sizes according to any special requirements for | 284 | * Adjust the sizes according to any special requirements for |
277 | * this machine type. | 285 | * this machine type. |
278 | */ | 286 | */ |
279 | arm_adjust_dma_zone(zone_size, zhole_size, | 287 | if (arm_dma_zone_size) { |
280 | ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); | 288 | arm_adjust_dma_zone(zone_size, zhole_size, |
289 | arm_dma_zone_size >> PAGE_SHIFT); | ||
290 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; | ||
291 | } else | ||
292 | arm_dma_limit = 0xffffffff; | ||
281 | #endif | 293 | #endif |
282 | 294 | ||
283 | free_area_init_node(0, zone_size, min, zhole_size); | 295 | free_area_init_node(0, zone_size, min, zhole_size); |
@@ -422,6 +434,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |||
422 | return pages; | 434 | return pages; |
423 | } | 435 | } |
424 | 436 | ||
437 | /* | ||
438 | * Poison init memory with an undefined instruction (ARM) or a branch to an | ||
439 | * undefined instruction (Thumb). | ||
440 | */ | ||
441 | static inline void poison_init_mem(void *s, size_t count) | ||
442 | { | ||
443 | u32 *p = (u32 *)s; | ||
444 | while ((count = count - 4)) | ||
445 | *p++ = 0xe7fddef0; | ||
446 | } | ||
447 | |||
425 | static inline void | 448 | static inline void |
426 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 449 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
427 | { | 450 | { |
@@ -639,8 +662,8 @@ void __init mem_init(void) | |||
639 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | 662 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" |
640 | #endif | 663 | #endif |
641 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 664 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
642 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
643 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 665 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
666 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
644 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" | 667 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
645 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", | 668 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", |
646 | 669 | ||
@@ -662,8 +685,8 @@ void __init mem_init(void) | |||
662 | #endif | 685 | #endif |
663 | MLM(MODULES_VADDR, MODULES_END), | 686 | MLM(MODULES_VADDR, MODULES_END), |
664 | 687 | ||
665 | MLK_ROUNDUP(__init_begin, __init_end), | ||
666 | MLK_ROUNDUP(_text, _etext), | 688 | MLK_ROUNDUP(_text, _etext), |
689 | MLK_ROUNDUP(__init_begin, __init_end), | ||
667 | MLK_ROUNDUP(_sdata, _edata), | 690 | MLK_ROUNDUP(_sdata, _edata), |
668 | MLK_ROUNDUP(__bss_start, __bss_stop)); | 691 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
669 | 692 | ||
@@ -704,11 +727,13 @@ void free_initmem(void) | |||
704 | #ifdef CONFIG_HAVE_TCM | 727 | #ifdef CONFIG_HAVE_TCM |
705 | extern char __tcm_start, __tcm_end; | 728 | extern char __tcm_start, __tcm_end; |
706 | 729 | ||
730 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | ||
707 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), | 731 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), |
708 | __phys_to_pfn(__pa(&__tcm_end)), | 732 | __phys_to_pfn(__pa(&__tcm_end)), |
709 | "TCM link"); | 733 | "TCM link"); |
710 | #endif | 734 | #endif |
711 | 735 | ||
736 | poison_init_mem(__init_begin, __init_end - __init_begin); | ||
712 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 737 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
713 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | 738 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), |
714 | __phys_to_pfn(__pa(__init_end)), | 739 | __phys_to_pfn(__pa(__init_end)), |
@@ -721,10 +746,12 @@ static int keep_initrd; | |||
721 | 746 | ||
722 | void free_initrd_mem(unsigned long start, unsigned long end) | 747 | void free_initrd_mem(unsigned long start, unsigned long end) |
723 | { | 748 | { |
724 | if (!keep_initrd) | 749 | if (!keep_initrd) { |
750 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | ||
725 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), | 751 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), |
726 | __phys_to_pfn(__pa(end)), | 752 | __phys_to_pfn(__pa(end)), |
727 | "initrd"); | 753 | "initrd"); |
754 | } | ||
728 | } | 755 | } |
729 | 756 | ||
730 | static int __init keepinitrd_setup(char *__unused) | 757 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d543659..010566799c80 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
23 | 23 | ||
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_ZONE_DMA | ||
27 | extern u32 arm_dma_limit; | ||
28 | #else | ||
29 | #define arm_dma_limit ((u32)~0) | ||
30 | #endif | ||
31 | |||
26 | void __init bootmem_init(void); | 32 | void __init bootmem_init(void); |
27 | void arm_mm_memblock_reserve(void); | 33 | void arm_mm_memblock_reserve(void); |
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eba88ea..8bbff025269a 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: legacy_pabort | 5 | * Function: legacy_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = Simulated IFSR with section translation fault status | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(legacy_pabort) | 17 | ENTRY(legacy_pabort) |
18 | mov r0, r4 | ||
17 | mov r1, #5 | 19 | mov r1, #5 |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(legacy_pabort) | 21 | ENDPROC(legacy_pabort) |
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1ef2115..9627646ce783 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v6_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(v6_pabort) | 17 | ENTRY(v6_pabort) |
18 | mov r0, r4 | ||
17 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(v6_pabort) | 21 | ENDPROC(v6_pabort) |
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b300a18d..875761f44f3b 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S | |||
@@ -2,12 +2,13 @@ | |||
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v7_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
@@ -16,5 +17,5 @@ | |||
16 | ENTRY(v7_pabort) | 17 | ENTRY(v7_pabort) |
17 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR | 18 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR |
18 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
19 | mov pc, lr | 20 | b do_PrefetchAbort |
20 | ENDPROC(v7_pabort) | 21 | ENDPROC(v7_pabort) |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 6c4e7fd6c8af..67469665d47a 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -364,17 +364,8 @@ ENTRY(arm1020_dma_unmap_area) | |||
364 | mov pc, lr | 364 | mov pc, lr |
365 | ENDPROC(arm1020_dma_unmap_area) | 365 | ENDPROC(arm1020_dma_unmap_area) |
366 | 366 | ||
367 | ENTRY(arm1020_cache_fns) | 367 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
368 | .long arm1020_flush_icache_all | 368 | define_cache_functions arm1020 |
369 | .long arm1020_flush_kern_cache_all | ||
370 | .long arm1020_flush_user_cache_all | ||
371 | .long arm1020_flush_user_cache_range | ||
372 | .long arm1020_coherent_kern_range | ||
373 | .long arm1020_coherent_user_range | ||
374 | .long arm1020_flush_kern_dcache_area | ||
375 | .long arm1020_dma_map_area | ||
376 | .long arm1020_dma_unmap_area | ||
377 | .long arm1020_dma_flush_range | ||
378 | 369 | ||
379 | .align 5 | 370 | .align 5 |
380 | ENTRY(cpu_arm1020_dcache_clean_area) | 371 | ENTRY(cpu_arm1020_dcache_clean_area) |
@@ -477,38 +468,14 @@ arm1020_crval: | |||
477 | crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 | 468 | crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 |
478 | 469 | ||
479 | __INITDATA | 470 | __INITDATA |
471 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | ||
472 | define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort | ||
480 | 473 | ||
481 | /* | ||
482 | * Purpose : Function pointers used to access above functions - all calls | ||
483 | * come through these | ||
484 | */ | ||
485 | .type arm1020_processor_functions, #object | ||
486 | arm1020_processor_functions: | ||
487 | .word v4t_early_abort | ||
488 | .word legacy_pabort | ||
489 | .word cpu_arm1020_proc_init | ||
490 | .word cpu_arm1020_proc_fin | ||
491 | .word cpu_arm1020_reset | ||
492 | .word cpu_arm1020_do_idle | ||
493 | .word cpu_arm1020_dcache_clean_area | ||
494 | .word cpu_arm1020_switch_mm | ||
495 | .word cpu_arm1020_set_pte_ext | ||
496 | .word 0 | ||
497 | .word 0 | ||
498 | .word 0 | ||
499 | .size arm1020_processor_functions, . - arm1020_processor_functions | ||
500 | 474 | ||
501 | .section ".rodata" | 475 | .section ".rodata" |
502 | 476 | ||
503 | .type cpu_arch_name, #object | 477 | string cpu_arch_name, "armv5t" |
504 | cpu_arch_name: | 478 | string cpu_elf_name, "v5" |
505 | .asciz "armv5t" | ||
506 | .size cpu_arch_name, . - cpu_arch_name | ||
507 | |||
508 | .type cpu_elf_name, #object | ||
509 | cpu_elf_name: | ||
510 | .asciz "v5" | ||
511 | .size cpu_elf_name, . - cpu_elf_name | ||
512 | 479 | ||
513 | .type cpu_arm1020_name, #object | 480 | .type cpu_arm1020_name, #object |
514 | cpu_arm1020_name: | 481 | cpu_arm1020_name: |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 4ce947c19623..4251421c0ed5 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -350,17 +350,8 @@ ENTRY(arm1020e_dma_unmap_area) | |||
350 | mov pc, lr | 350 | mov pc, lr |
351 | ENDPROC(arm1020e_dma_unmap_area) | 351 | ENDPROC(arm1020e_dma_unmap_area) |
352 | 352 | ||
353 | ENTRY(arm1020e_cache_fns) | 353 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
354 | .long arm1020e_flush_icache_all | 354 | define_cache_functions arm1020e |
355 | .long arm1020e_flush_kern_cache_all | ||
356 | .long arm1020e_flush_user_cache_all | ||
357 | .long arm1020e_flush_user_cache_range | ||
358 | .long arm1020e_coherent_kern_range | ||
359 | .long arm1020e_coherent_user_range | ||
360 | .long arm1020e_flush_kern_dcache_area | ||
361 | .long arm1020e_dma_map_area | ||
362 | .long arm1020e_dma_unmap_area | ||
363 | .long arm1020e_dma_flush_range | ||
364 | 355 | ||
365 | .align 5 | 356 | .align 5 |
366 | ENTRY(cpu_arm1020e_dcache_clean_area) | 357 | ENTRY(cpu_arm1020e_dcache_clean_area) |
@@ -458,43 +449,14 @@ arm1020e_crval: | |||
458 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 | 449 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 |
459 | 450 | ||
460 | __INITDATA | 451 | __INITDATA |
461 | 452 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
462 | /* | 453 | define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort |
463 | * Purpose : Function pointers used to access above functions - all calls | ||
464 | * come through these | ||
465 | */ | ||
466 | .type arm1020e_processor_functions, #object | ||
467 | arm1020e_processor_functions: | ||
468 | .word v4t_early_abort | ||
469 | .word legacy_pabort | ||
470 | .word cpu_arm1020e_proc_init | ||
471 | .word cpu_arm1020e_proc_fin | ||
472 | .word cpu_arm1020e_reset | ||
473 | .word cpu_arm1020e_do_idle | ||
474 | .word cpu_arm1020e_dcache_clean_area | ||
475 | .word cpu_arm1020e_switch_mm | ||
476 | .word cpu_arm1020e_set_pte_ext | ||
477 | .word 0 | ||
478 | .word 0 | ||
479 | .word 0 | ||
480 | .size arm1020e_processor_functions, . - arm1020e_processor_functions | ||
481 | 454 | ||
482 | .section ".rodata" | 455 | .section ".rodata" |
483 | 456 | ||
484 | .type cpu_arch_name, #object | 457 | string cpu_arch_name, "armv5te" |
485 | cpu_arch_name: | 458 | string cpu_elf_name, "v5" |
486 | .asciz "armv5te" | 459 | string cpu_arm1020e_name, "ARM1020E" |
487 | .size cpu_arch_name, . - cpu_arch_name | ||
488 | |||
489 | .type cpu_elf_name, #object | ||
490 | cpu_elf_name: | ||
491 | .asciz "v5" | ||
492 | .size cpu_elf_name, . - cpu_elf_name | ||
493 | |||
494 | .type cpu_arm1020e_name, #object | ||
495 | cpu_arm1020e_name: | ||
496 | .asciz "ARM1020E" | ||
497 | .size cpu_arm1020e_name, . - cpu_arm1020e_name | ||
498 | 460 | ||
499 | .align | 461 | .align |
500 | 462 | ||
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index c8884c5413a2..d283cf3d06e3 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -339,17 +339,8 @@ ENTRY(arm1022_dma_unmap_area) | |||
339 | mov pc, lr | 339 | mov pc, lr |
340 | ENDPROC(arm1022_dma_unmap_area) | 340 | ENDPROC(arm1022_dma_unmap_area) |
341 | 341 | ||
342 | ENTRY(arm1022_cache_fns) | 342 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
343 | .long arm1022_flush_icache_all | 343 | define_cache_functions arm1022 |
344 | .long arm1022_flush_kern_cache_all | ||
345 | .long arm1022_flush_user_cache_all | ||
346 | .long arm1022_flush_user_cache_range | ||
347 | .long arm1022_coherent_kern_range | ||
348 | .long arm1022_coherent_user_range | ||
349 | .long arm1022_flush_kern_dcache_area | ||
350 | .long arm1022_dma_map_area | ||
351 | .long arm1022_dma_unmap_area | ||
352 | .long arm1022_dma_flush_range | ||
353 | 344 | ||
354 | .align 5 | 345 | .align 5 |
355 | ENTRY(cpu_arm1022_dcache_clean_area) | 346 | ENTRY(cpu_arm1022_dcache_clean_area) |
@@ -441,43 +432,14 @@ arm1022_crval: | |||
441 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 | 432 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 |
442 | 433 | ||
443 | __INITDATA | 434 | __INITDATA |
444 | 435 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
445 | /* | 436 | define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort |
446 | * Purpose : Function pointers used to access above functions - all calls | ||
447 | * come through these | ||
448 | */ | ||
449 | .type arm1022_processor_functions, #object | ||
450 | arm1022_processor_functions: | ||
451 | .word v4t_early_abort | ||
452 | .word legacy_pabort | ||
453 | .word cpu_arm1022_proc_init | ||
454 | .word cpu_arm1022_proc_fin | ||
455 | .word cpu_arm1022_reset | ||
456 | .word cpu_arm1022_do_idle | ||
457 | .word cpu_arm1022_dcache_clean_area | ||
458 | .word cpu_arm1022_switch_mm | ||
459 | .word cpu_arm1022_set_pte_ext | ||
460 | .word 0 | ||
461 | .word 0 | ||
462 | .word 0 | ||
463 | .size arm1022_processor_functions, . - arm1022_processor_functions | ||
464 | 437 | ||
465 | .section ".rodata" | 438 | .section ".rodata" |
466 | 439 | ||
467 | .type cpu_arch_name, #object | 440 | string cpu_arch_name, "armv5te" |
468 | cpu_arch_name: | 441 | string cpu_elf_name, "v5" |
469 | .asciz "armv5te" | 442 | string cpu_arm1022_name, "ARM1022" |
470 | .size cpu_arch_name, . - cpu_arch_name | ||
471 | |||
472 | .type cpu_elf_name, #object | ||
473 | cpu_elf_name: | ||
474 | .asciz "v5" | ||
475 | .size cpu_elf_name, . - cpu_elf_name | ||
476 | |||
477 | .type cpu_arm1022_name, #object | ||
478 | cpu_arm1022_name: | ||
479 | .asciz "ARM1022" | ||
480 | .size cpu_arm1022_name, . - cpu_arm1022_name | ||
481 | 443 | ||
482 | .align | 444 | .align |
483 | 445 | ||
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 413684660aad..678a1ceafed2 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -333,17 +333,8 @@ ENTRY(arm1026_dma_unmap_area) | |||
333 | mov pc, lr | 333 | mov pc, lr |
334 | ENDPROC(arm1026_dma_unmap_area) | 334 | ENDPROC(arm1026_dma_unmap_area) |
335 | 335 | ||
336 | ENTRY(arm1026_cache_fns) | 336 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
337 | .long arm1026_flush_icache_all | 337 | define_cache_functions arm1026 |
338 | .long arm1026_flush_kern_cache_all | ||
339 | .long arm1026_flush_user_cache_all | ||
340 | .long arm1026_flush_user_cache_range | ||
341 | .long arm1026_coherent_kern_range | ||
342 | .long arm1026_coherent_user_range | ||
343 | .long arm1026_flush_kern_dcache_area | ||
344 | .long arm1026_dma_map_area | ||
345 | .long arm1026_dma_unmap_area | ||
346 | .long arm1026_dma_flush_range | ||
347 | 338 | ||
348 | .align 5 | 339 | .align 5 |
349 | ENTRY(cpu_arm1026_dcache_clean_area) | 340 | ENTRY(cpu_arm1026_dcache_clean_area) |
@@ -436,45 +427,15 @@ arm1026_crval: | |||
436 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 | 427 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 |
437 | 428 | ||
438 | __INITDATA | 429 | __INITDATA |
439 | 430 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
440 | /* | 431 | define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort |
441 | * Purpose : Function pointers used to access above functions - all calls | ||
442 | * come through these | ||
443 | */ | ||
444 | .type arm1026_processor_functions, #object | ||
445 | arm1026_processor_functions: | ||
446 | .word v5t_early_abort | ||
447 | .word legacy_pabort | ||
448 | .word cpu_arm1026_proc_init | ||
449 | .word cpu_arm1026_proc_fin | ||
450 | .word cpu_arm1026_reset | ||
451 | .word cpu_arm1026_do_idle | ||
452 | .word cpu_arm1026_dcache_clean_area | ||
453 | .word cpu_arm1026_switch_mm | ||
454 | .word cpu_arm1026_set_pte_ext | ||
455 | .word 0 | ||
456 | .word 0 | ||
457 | .word 0 | ||
458 | .size arm1026_processor_functions, . - arm1026_processor_functions | ||
459 | 432 | ||
460 | .section .rodata | 433 | .section .rodata |
461 | 434 | ||
462 | .type cpu_arch_name, #object | 435 | string cpu_arch_name, "armv5tej" |
463 | cpu_arch_name: | 436 | string cpu_elf_name, "v5" |
464 | .asciz "armv5tej" | ||
465 | .size cpu_arch_name, . - cpu_arch_name | ||
466 | |||
467 | .type cpu_elf_name, #object | ||
468 | cpu_elf_name: | ||
469 | .asciz "v5" | ||
470 | .size cpu_elf_name, . - cpu_elf_name | ||
471 | .align | 437 | .align |
472 | 438 | string cpu_arm1026_name, "ARM1026EJ-S" | |
473 | .type cpu_arm1026_name, #object | ||
474 | cpu_arm1026_name: | ||
475 | .asciz "ARM1026EJ-S" | ||
476 | .size cpu_arm1026_name, . - cpu_arm1026_name | ||
477 | |||
478 | .align | 439 | .align |
479 | 440 | ||
480 | .section ".proc.info.init", #alloc, #execinstr | 441 | .section ".proc.info.init", #alloc, #execinstr |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4ce3fb..e5b974cddac3 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area) | |||
29 | /* | 29 | /* |
30 | * Function: arm6_7_data_abort () | 30 | * Function: arm6_7_data_abort () |
31 | * | 31 | * |
32 | * Params : r2 = address of aborted instruction | 32 | * Params : r2 = pt_regs |
33 | * : sp = pointer to registers | 33 | * : r4 = aborted context pc |
34 | * : r5 = aborted context psr | ||
34 | * | 35 | * |
35 | * Purpose : obtain information about current aborted instruction | 36 | * Purpose : obtain information about current aborted instruction |
36 | * | 37 | * |
37 | * Returns : r0 = address of abort | 38 | * Returns : r4-r5, r10-r11, r13 preserved |
38 | * : r1 = FSR | ||
39 | */ | 39 | */ |
40 | 40 | ||
41 | ENTRY(cpu_arm7_data_abort) | 41 | ENTRY(cpu_arm7_data_abort) |
42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
44 | ldr r8, [r2] @ read arm instruction | 44 | ldr r8, [r4] @ read arm instruction |
45 | tst r8, #1 << 20 @ L = 0 -> write? | 45 | tst r8, #1 << 20 @ L = 0 -> write? |
46 | orreq r1, r1, #1 << 11 @ yes. | 46 | orreq r1, r1, #1 << 11 @ yes. |
47 | and r7, r8, #15 << 24 | 47 | and r7, r8, #15 << 24 |
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort) | |||
49 | nop | 49 | nop |
50 | 50 | ||
51 | /* 0 */ b .data_unknown | 51 | /* 0 */ b .data_unknown |
52 | /* 1 */ mov pc, lr @ swp | 52 | /* 1 */ b do_DataAbort @ swp |
53 | /* 2 */ b .data_unknown | 53 | /* 2 */ b .data_unknown |
54 | /* 3 */ b .data_unknown | 54 | /* 3 */ b .data_unknown |
55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m | 55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m |
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort) | |||
60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
61 | /* a */ b .data_unknown | 61 | /* a */ b .data_unknown |
62 | /* b */ b .data_unknown | 62 | /* b */ b .data_unknown |
63 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 63 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
64 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 64 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
65 | /* e */ b .data_unknown | 65 | /* e */ b .data_unknown |
66 | /* f */ | 66 | /* f */ |
67 | .data_unknown: @ Part of jumptable | 67 | .data_unknown: @ Part of jumptable |
68 | mov r0, r2 | 68 | mov r0, r4 |
69 | mov r1, r8 | 69 | mov r1, r8 |
70 | mov r2, sp | 70 | b baddataabort |
71 | bl baddataabort | ||
72 | b ret_from_exception | ||
73 | 71 | ||
74 | ENTRY(cpu_arm6_data_abort) | 72 | ENTRY(cpu_arm6_data_abort) |
75 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 73 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
76 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 74 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
77 | ldr r8, [r2] @ read arm instruction | 75 | ldr r8, [r4] @ read arm instruction |
78 | tst r8, #1 << 20 @ L = 0 -> write? | 76 | tst r8, #1 << 20 @ L = 0 -> write? |
79 | orreq r1, r1, #1 << 11 @ yes. | 77 | orreq r1, r1, #1 << 11 @ yes. |
80 | and r7, r8, #14 << 24 | 78 | and r7, r8, #14 << 24 |
81 | teq r7, #8 << 24 @ was it ldm/stm | 79 | teq r7, #8 << 24 @ was it ldm/stm |
82 | movne pc, lr | 80 | bne do_DataAbort |
83 | 81 | ||
84 | .data_arm_ldmstm: | 82 | .data_arm_ldmstm: |
85 | tst r8, #1 << 21 @ check writeback bit | 83 | tst r8, #1 << 21 @ check writeback bit |
86 | moveq pc, lr @ no writeback -> no fixup | 84 | beq do_DataAbort @ no writeback -> no fixup |
87 | mov r7, #0x11 | 85 | mov r7, #0x11 |
88 | orr r7, r7, #0x1100 | 86 | orr r7, r7, #0x1100 |
89 | and r6, r8, r7 | 87 | and r6, r8, r7 |
90 | and r2, r8, r7, lsl #1 | 88 | and r9, r8, r7, lsl #1 |
91 | add r6, r6, r2, lsr #1 | 89 | add r6, r6, r9, lsr #1 |
92 | and r2, r8, r7, lsl #2 | 90 | and r9, r8, r7, lsl #2 |
93 | add r6, r6, r2, lsr #2 | 91 | add r6, r6, r9, lsr #2 |
94 | and r2, r8, r7, lsl #3 | 92 | and r9, r8, r7, lsl #3 |
95 | add r6, r6, r2, lsr #3 | 93 | add r6, r6, r9, lsr #3 |
96 | add r6, r6, r6, lsr #8 | 94 | add r6, r6, r6, lsr #8 |
97 | add r6, r6, r6, lsr #4 | 95 | add r6, r6, r6, lsr #4 |
98 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 96 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
99 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 97 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
100 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 98 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
101 | tst r8, #1 << 23 @ Check U bit | 99 | tst r8, #1 << 23 @ Check U bit |
102 | subne r7, r7, r6, lsl #2 @ Undo increment | 100 | subne r7, r7, r6, lsl #2 @ Undo increment |
103 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 101 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
104 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 102 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
105 | mov pc, lr | 103 | b do_DataAbort |
106 | 104 | ||
107 | .data_arm_apply_r6_and_rn: | 105 | .data_arm_apply_r6_and_rn: |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 106 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 107 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 108 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r6 @ Undo incrmenet | 109 | subne r7, r7, r6 @ Undo incrmenet |
112 | addeq r7, r7, r6 @ Undo decrement | 110 | addeq r7, r7, r6 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 111 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 112 | b do_DataAbort |
115 | 113 | ||
116 | .data_arm_lateldrpreconst: | 114 | .data_arm_lateldrpreconst: |
117 | tst r8, #1 << 21 @ check writeback bit | 115 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 116 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostconst: | 117 | .data_arm_lateldrpostconst: |
120 | movs r2, r8, lsl #20 @ Get offset | 118 | movs r6, r8, lsl #20 @ Get offset |
121 | moveq pc, lr @ zero -> no fixup | 119 | beq do_DataAbort @ zero -> no fixup |
122 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 120 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
123 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 121 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
124 | tst r8, #1 << 23 @ Check U bit | 122 | tst r8, #1 << 23 @ Check U bit |
125 | subne r7, r7, r2, lsr #20 @ Undo increment | 123 | subne r7, r7, r6, lsr #20 @ Undo increment |
126 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 124 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
127 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 125 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
128 | mov pc, lr | 126 | b do_DataAbort |
129 | 127 | ||
130 | .data_arm_lateldrprereg: | 128 | .data_arm_lateldrprereg: |
131 | tst r8, #1 << 21 @ check writeback bit | 129 | tst r8, #1 << 21 @ check writeback bit |
132 | moveq pc, lr @ no writeback -> no fixup | 130 | beq do_DataAbort @ no writeback -> no fixup |
133 | .data_arm_lateldrpostreg: | 131 | .data_arm_lateldrpostreg: |
134 | and r7, r8, #15 @ Extract 'm' from instruction | 132 | and r7, r8, #15 @ Extract 'm' from instruction |
135 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 133 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
136 | mov r5, r8, lsr #7 @ get shift count | 134 | mov r9, r8, lsr #7 @ get shift count |
137 | ands r5, r5, #31 | 135 | ands r9, r9, #31 |
138 | and r7, r8, #0x70 @ get shift type | 136 | and r7, r8, #0x70 @ get shift type |
139 | orreq r7, r7, #8 @ shift count = 0 | 137 | orreq r7, r7, #8 @ shift count = 0 |
140 | add pc, pc, r7 | 138 | add pc, pc, r7 |
141 | nop | 139 | nop |
142 | 140 | ||
143 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 141 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
144 | b .data_arm_apply_r6_and_rn | 142 | b .data_arm_apply_r6_and_rn |
145 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 143 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
146 | nop | 144 | nop |
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) | |||
148 | nop | 146 | nop |
149 | b .data_unknown @ 3: MUL? | 147 | b .data_unknown @ 3: MUL? |
150 | nop | 148 | nop |
151 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 149 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
152 | b .data_arm_apply_r6_and_rn | 150 | b .data_arm_apply_r6_and_rn |
153 | mov r6, r6, lsr #32 @ 5: LSR #32 | 151 | mov r6, r6, lsr #32 @ 5: LSR #32 |
154 | b .data_arm_apply_r6_and_rn | 152 | b .data_arm_apply_r6_and_rn |
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) | |||
156 | nop | 154 | nop |
157 | b .data_unknown @ 7: MUL? | 155 | b .data_unknown @ 7: MUL? |
158 | nop | 156 | nop |
159 | mov r6, r6, asr r5 @ 8: ASR #!0 | 157 | mov r6, r6, asr r9 @ 8: ASR #!0 |
160 | b .data_arm_apply_r6_and_rn | 158 | b .data_arm_apply_r6_and_rn |
161 | mov r6, r6, asr #32 @ 9: ASR #32 | 159 | mov r6, r6, asr #32 @ 9: ASR #32 |
162 | b .data_arm_apply_r6_and_rn | 160 | b .data_arm_apply_r6_and_rn |
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) | |||
164 | nop | 162 | nop |
165 | b .data_unknown @ B: MUL? | 163 | b .data_unknown @ B: MUL? |
166 | nop | 164 | nop |
167 | mov r6, r6, ror r5 @ C: ROR #!0 | 165 | mov r6, r6, ror r9 @ C: ROR #!0 |
168 | b .data_arm_apply_r6_and_rn | 166 | b .data_arm_apply_r6_and_rn |
169 | mov r6, r6, rrx @ D: RRX | 167 | mov r6, r6, rrx @ D: RRX |
170 | b .data_arm_apply_r6_and_rn | 168 | b .data_arm_apply_r6_and_rn |
@@ -269,159 +267,57 @@ __arm7_setup: mov r0, #0 | |||
269 | 267 | ||
270 | __INITDATA | 268 | __INITDATA |
271 | 269 | ||
272 | /* | 270 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
273 | * Purpose : Function pointers used to access above functions - all calls | 271 | define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort |
274 | * come through these | 272 | define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort |
275 | */ | ||
276 | .type arm6_processor_functions, #object | ||
277 | ENTRY(arm6_processor_functions) | ||
278 | .word cpu_arm6_data_abort | ||
279 | .word legacy_pabort | ||
280 | .word cpu_arm6_proc_init | ||
281 | .word cpu_arm6_proc_fin | ||
282 | .word cpu_arm6_reset | ||
283 | .word cpu_arm6_do_idle | ||
284 | .word cpu_arm6_dcache_clean_area | ||
285 | .word cpu_arm6_switch_mm | ||
286 | .word cpu_arm6_set_pte_ext | ||
287 | .word 0 | ||
288 | .word 0 | ||
289 | .word 0 | ||
290 | .size arm6_processor_functions, . - arm6_processor_functions | ||
291 | |||
292 | /* | ||
293 | * Purpose : Function pointers used to access above functions - all calls | ||
294 | * come through these | ||
295 | */ | ||
296 | .type arm7_processor_functions, #object | ||
297 | ENTRY(arm7_processor_functions) | ||
298 | .word cpu_arm7_data_abort | ||
299 | .word legacy_pabort | ||
300 | .word cpu_arm7_proc_init | ||
301 | .word cpu_arm7_proc_fin | ||
302 | .word cpu_arm7_reset | ||
303 | .word cpu_arm7_do_idle | ||
304 | .word cpu_arm7_dcache_clean_area | ||
305 | .word cpu_arm7_switch_mm | ||
306 | .word cpu_arm7_set_pte_ext | ||
307 | .word 0 | ||
308 | .word 0 | ||
309 | .word 0 | ||
310 | .size arm7_processor_functions, . - arm7_processor_functions | ||
311 | 273 | ||
312 | .section ".rodata" | 274 | .section ".rodata" |
313 | 275 | ||
314 | .type cpu_arch_name, #object | 276 | string cpu_arch_name, "armv3" |
315 | cpu_arch_name: .asciz "armv3" | 277 | string cpu_elf_name, "v3" |
316 | .size cpu_arch_name, . - cpu_arch_name | 278 | string cpu_arm6_name, "ARM6" |
317 | 279 | string cpu_arm610_name, "ARM610" | |
318 | .type cpu_elf_name, #object | 280 | string cpu_arm7_name, "ARM7" |
319 | cpu_elf_name: .asciz "v3" | 281 | string cpu_arm710_name, "ARM710" |
320 | .size cpu_elf_name, . - cpu_elf_name | ||
321 | |||
322 | .type cpu_arm6_name, #object | ||
323 | cpu_arm6_name: .asciz "ARM6" | ||
324 | .size cpu_arm6_name, . - cpu_arm6_name | ||
325 | |||
326 | .type cpu_arm610_name, #object | ||
327 | cpu_arm610_name: | ||
328 | .asciz "ARM610" | ||
329 | .size cpu_arm610_name, . - cpu_arm610_name | ||
330 | |||
331 | .type cpu_arm7_name, #object | ||
332 | cpu_arm7_name: .asciz "ARM7" | ||
333 | .size cpu_arm7_name, . - cpu_arm7_name | ||
334 | |||
335 | .type cpu_arm710_name, #object | ||
336 | cpu_arm710_name: | ||
337 | .asciz "ARM710" | ||
338 | .size cpu_arm710_name, . - cpu_arm710_name | ||
339 | 282 | ||
340 | .align | 283 | .align |
341 | 284 | ||
342 | .section ".proc.info.init", #alloc, #execinstr | 285 | .section ".proc.info.init", #alloc, #execinstr |
343 | 286 | ||
344 | .type __arm6_proc_info, #object | 287 | .macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ |
345 | __arm6_proc_info: | 288 | cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req |
346 | .long 0x41560600 | 289 | .type __\name\()_proc_info, #object |
347 | .long 0xfffffff0 | 290 | __\name\()_proc_info: |
348 | .long 0x00000c1e | 291 | .long \cpu_val |
349 | .long PMD_TYPE_SECT | \ | 292 | .long \cpu_mask |
350 | PMD_BIT4 | \ | 293 | .long \cpu_mm_mmu_flags |
351 | PMD_SECT_AP_WRITE | \ | ||
352 | PMD_SECT_AP_READ | ||
353 | b __arm6_setup | ||
354 | .long cpu_arch_name | ||
355 | .long cpu_elf_name | ||
356 | .long HWCAP_SWP | HWCAP_26BIT | ||
357 | .long cpu_arm6_name | ||
358 | .long arm6_processor_functions | ||
359 | .long v3_tlb_fns | ||
360 | .long v3_user_fns | ||
361 | .long v3_cache_fns | ||
362 | .size __arm6_proc_info, . - __arm6_proc_info | ||
363 | |||
364 | .type __arm610_proc_info, #object | ||
365 | __arm610_proc_info: | ||
366 | .long 0x41560610 | ||
367 | .long 0xfffffff0 | ||
368 | .long 0x00000c1e | ||
369 | .long PMD_TYPE_SECT | \ | 294 | .long PMD_TYPE_SECT | \ |
370 | PMD_BIT4 | \ | 295 | PMD_BIT4 | \ |
371 | PMD_SECT_AP_WRITE | \ | 296 | PMD_SECT_AP_WRITE | \ |
372 | PMD_SECT_AP_READ | 297 | PMD_SECT_AP_READ |
373 | b __arm6_setup | 298 | b \cpu_flush |
374 | .long cpu_arch_name | 299 | .long cpu_arch_name |
375 | .long cpu_elf_name | 300 | .long cpu_elf_name |
376 | .long HWCAP_SWP | HWCAP_26BIT | 301 | .long HWCAP_SWP | HWCAP_26BIT |
377 | .long cpu_arm610_name | 302 | .long \cpu_name |
378 | .long arm6_processor_functions | 303 | .long \cpu_proc_funcs |
379 | .long v3_tlb_fns | 304 | .long v3_tlb_fns |
380 | .long v3_user_fns | 305 | .long v3_user_fns |
381 | .long v3_cache_fns | 306 | .long v3_cache_fns |
382 | .size __arm610_proc_info, . - __arm610_proc_info | 307 | .size __\name\()_proc_info, . - __\name\()_proc_info |
383 | 308 | .endm | |
384 | .type __arm7_proc_info, #object | 309 | |
385 | __arm7_proc_info: | 310 | arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \ |
386 | .long 0x41007000 | 311 | 0x00000c1e, __arm6_setup, arm6_processor_functions |
387 | .long 0xffffff00 | 312 | arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \ |
388 | .long 0x00000c1e | 313 | 0x00000c1e, __arm6_setup, arm6_processor_functions |
389 | .long PMD_TYPE_SECT | \ | 314 | arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \ |
390 | PMD_BIT4 | \ | 315 | 0x00000c1e, __arm7_setup, arm7_processor_functions |
391 | PMD_SECT_AP_WRITE | \ | 316 | arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \ |
392 | PMD_SECT_AP_READ | 317 | PMD_TYPE_SECT | \ |
393 | b __arm7_setup | ||
394 | .long cpu_arch_name | ||
395 | .long cpu_elf_name | ||
396 | .long HWCAP_SWP | HWCAP_26BIT | ||
397 | .long cpu_arm7_name | ||
398 | .long arm7_processor_functions | ||
399 | .long v3_tlb_fns | ||
400 | .long v3_user_fns | ||
401 | .long v3_cache_fns | ||
402 | .size __arm7_proc_info, . - __arm7_proc_info | ||
403 | |||
404 | .type __arm710_proc_info, #object | ||
405 | __arm710_proc_info: | ||
406 | .long 0x41007100 | ||
407 | .long 0xfff8ff00 | ||
408 | .long PMD_TYPE_SECT | \ | ||
409 | PMD_SECT_BUFFERABLE | \ | 318 | PMD_SECT_BUFFERABLE | \ |
410 | PMD_SECT_CACHEABLE | \ | 319 | PMD_SECT_CACHEABLE | \ |
411 | PMD_BIT4 | \ | 320 | PMD_BIT4 | \ |
412 | PMD_SECT_AP_WRITE | \ | 321 | PMD_SECT_AP_WRITE | \ |
413 | PMD_SECT_AP_READ | 322 | PMD_SECT_AP_READ, \ |
414 | .long PMD_TYPE_SECT | \ | 323 | __arm7_setup, arm7_processor_functions |
415 | PMD_BIT4 | \ | ||
416 | PMD_SECT_AP_WRITE | \ | ||
417 | PMD_SECT_AP_READ | ||
418 | b __arm7_setup | ||
419 | .long cpu_arch_name | ||
420 | .long cpu_elf_name | ||
421 | .long HWCAP_SWP | HWCAP_26BIT | ||
422 | .long cpu_arm710_name | ||
423 | .long arm7_processor_functions | ||
424 | .long v3_tlb_fns | ||
425 | .long v3_user_fns | ||
426 | .long v3_cache_fns | ||
427 | .size __arm710_proc_info, . - __arm710_proc_info | ||
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 7a06e5964f59..55f4e290665a 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
@@ -169,46 +169,15 @@ arm720_crval: | |||
169 | crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 | 169 | crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 |
170 | 170 | ||
171 | __INITDATA | 171 | __INITDATA |
172 | 172 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
173 | /* | 173 | define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort |
174 | * Purpose : Function pointers used to access above functions - all calls | ||
175 | * come through these | ||
176 | */ | ||
177 | .type arm720_processor_functions, #object | ||
178 | ENTRY(arm720_processor_functions) | ||
179 | .word v4t_late_abort | ||
180 | .word legacy_pabort | ||
181 | .word cpu_arm720_proc_init | ||
182 | .word cpu_arm720_proc_fin | ||
183 | .word cpu_arm720_reset | ||
184 | .word cpu_arm720_do_idle | ||
185 | .word cpu_arm720_dcache_clean_area | ||
186 | .word cpu_arm720_switch_mm | ||
187 | .word cpu_arm720_set_pte_ext | ||
188 | .word 0 | ||
189 | .word 0 | ||
190 | .word 0 | ||
191 | .size arm720_processor_functions, . - arm720_processor_functions | ||
192 | 174 | ||
193 | .section ".rodata" | 175 | .section ".rodata" |
194 | 176 | ||
195 | .type cpu_arch_name, #object | 177 | string cpu_arch_name, "armv4t" |
196 | cpu_arch_name: .asciz "armv4t" | 178 | string cpu_elf_name, "v4" |
197 | .size cpu_arch_name, . - cpu_arch_name | 179 | string cpu_arm710_name, "ARM710T" |
198 | 180 | string cpu_arm720_name, "ARM720T" | |
199 | .type cpu_elf_name, #object | ||
200 | cpu_elf_name: .asciz "v4" | ||
201 | .size cpu_elf_name, . - cpu_elf_name | ||
202 | |||
203 | .type cpu_arm710_name, #object | ||
204 | cpu_arm710_name: | ||
205 | .asciz "ARM710T" | ||
206 | .size cpu_arm710_name, . - cpu_arm710_name | ||
207 | |||
208 | .type cpu_arm720_name, #object | ||
209 | cpu_arm720_name: | ||
210 | .asciz "ARM720T" | ||
211 | .size cpu_arm720_name, . - cpu_arm720_name | ||
212 | 181 | ||
213 | .align | 182 | .align |
214 | 183 | ||
@@ -218,10 +187,11 @@ cpu_arm720_name: | |||
218 | 187 | ||
219 | .section ".proc.info.init", #alloc, #execinstr | 188 | .section ".proc.info.init", #alloc, #execinstr |
220 | 189 | ||
221 | .type __arm710_proc_info, #object | 190 | .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req |
222 | __arm710_proc_info: | 191 | .type __\name\()_proc_info,#object |
223 | .long 0x41807100 @ cpu_val | 192 | __\name\()_proc_info: |
224 | .long 0xffffff00 @ cpu_mask | 193 | .long \cpu_val |
194 | .long \cpu_mask | ||
225 | .long PMD_TYPE_SECT | \ | 195 | .long PMD_TYPE_SECT | \ |
226 | PMD_SECT_BUFFERABLE | \ | 196 | PMD_SECT_BUFFERABLE | \ |
227 | PMD_SECT_CACHEABLE | \ | 197 | PMD_SECT_CACHEABLE | \ |
@@ -232,38 +202,17 @@ __arm710_proc_info: | |||
232 | PMD_BIT4 | \ | 202 | PMD_BIT4 | \ |
233 | PMD_SECT_AP_WRITE | \ | 203 | PMD_SECT_AP_WRITE | \ |
234 | PMD_SECT_AP_READ | 204 | PMD_SECT_AP_READ |
235 | b __arm710_setup @ cpu_flush | 205 | b \cpu_flush @ cpu_flush |
236 | .long cpu_arch_name @ arch_name | 206 | .long cpu_arch_name @ arch_name |
237 | .long cpu_elf_name @ elf_name | 207 | .long cpu_elf_name @ elf_name |
238 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap | 208 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap |
239 | .long cpu_arm710_name @ name | 209 | .long \cpu_name |
240 | .long arm720_processor_functions | 210 | .long arm720_processor_functions |
241 | .long v4_tlb_fns | 211 | .long v4_tlb_fns |
242 | .long v4wt_user_fns | 212 | .long v4wt_user_fns |
243 | .long v4_cache_fns | 213 | .long v4_cache_fns |
244 | .size __arm710_proc_info, . - __arm710_proc_info | 214 | .size __\name\()_proc_info, . - __\name\()_proc_info |
215 | .endm | ||
245 | 216 | ||
246 | .type __arm720_proc_info, #object | 217 | arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup |
247 | __arm720_proc_info: | 218 | arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup |
248 | .long 0x41807200 @ cpu_val | ||
249 | .long 0xffffff00 @ cpu_mask | ||
250 | .long PMD_TYPE_SECT | \ | ||
251 | PMD_SECT_BUFFERABLE | \ | ||
252 | PMD_SECT_CACHEABLE | \ | ||
253 | PMD_BIT4 | \ | ||
254 | PMD_SECT_AP_WRITE | \ | ||
255 | PMD_SECT_AP_READ | ||
256 | .long PMD_TYPE_SECT | \ | ||
257 | PMD_BIT4 | \ | ||
258 | PMD_SECT_AP_WRITE | \ | ||
259 | PMD_SECT_AP_READ | ||
260 | b __arm720_setup @ cpu_flush | ||
261 | .long cpu_arch_name @ arch_name | ||
262 | .long cpu_elf_name @ elf_name | ||
263 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap | ||
264 | .long cpu_arm720_name @ name | ||
265 | .long arm720_processor_functions | ||
266 | .long v4_tlb_fns | ||
267 | .long v4wt_user_fns | ||
268 | .long v4_cache_fns | ||
269 | .size __arm720_proc_info, . - __arm720_proc_info | ||
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 6f9d12effee1..4506be3adda6 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm740_proc_init() | 24 | * cpu_arm740_proc_init() |
@@ -115,42 +117,14 @@ __arm740_setup: | |||
115 | 117 | ||
116 | __INITDATA | 118 | __INITDATA |
117 | 119 | ||
118 | /* | 120 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
119 | * Purpose : Function pointers used to access above functions - all calls | 121 | define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 |
120 | * come through these | ||
121 | */ | ||
122 | .type arm740_processor_functions, #object | ||
123 | ENTRY(arm740_processor_functions) | ||
124 | .word v4t_late_abort | ||
125 | .word legacy_pabort | ||
126 | .word cpu_arm740_proc_init | ||
127 | .word cpu_arm740_proc_fin | ||
128 | .word cpu_arm740_reset | ||
129 | .word cpu_arm740_do_idle | ||
130 | .word cpu_arm740_dcache_clean_area | ||
131 | .word cpu_arm740_switch_mm | ||
132 | .word 0 @ cpu_*_set_pte | ||
133 | .word 0 | ||
134 | .word 0 | ||
135 | .word 0 | ||
136 | .size arm740_processor_functions, . - arm740_processor_functions | ||
137 | 122 | ||
138 | .section ".rodata" | 123 | .section ".rodata" |
139 | 124 | ||
140 | .type cpu_arch_name, #object | 125 | string cpu_arch_name, "armv4" |
141 | cpu_arch_name: | 126 | string cpu_elf_name, "v4" |
142 | .asciz "armv4" | 127 | string cpu_arm740_name, "ARM740T" |
143 | .size cpu_arch_name, . - cpu_arch_name | ||
144 | |||
145 | .type cpu_elf_name, #object | ||
146 | cpu_elf_name: | ||
147 | .asciz "v4" | ||
148 | .size cpu_elf_name, . - cpu_elf_name | ||
149 | |||
150 | .type cpu_arm740_name, #object | ||
151 | cpu_arm740_name: | ||
152 | .ascii "ARM740T" | ||
153 | .size cpu_arm740_name, . - cpu_arm740_name | ||
154 | 128 | ||
155 | .align | 129 | .align |
156 | 130 | ||
@@ -170,5 +144,3 @@ __arm740_proc_info: | |||
170 | .long 0 | 144 | .long 0 |
171 | .long v3_cache_fns @ cache model | 145 | .long v3_cache_fns @ cache model |
172 | .size __arm740_proc_info, . - __arm740_proc_info | 146 | .size __arm740_proc_info, . - __arm740_proc_info |
173 | |||
174 | |||
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 537ffcb0646d..7e0e1fe4ed4d 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm7tdmi_proc_init() | 24 | * cpu_arm7tdmi_proc_init() |
@@ -55,197 +57,57 @@ __arm7tdmi_setup: | |||
55 | 57 | ||
56 | __INITDATA | 58 | __INITDATA |
57 | 59 | ||
58 | /* | 60 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
59 | * Purpose : Function pointers used to access above functions - all calls | 61 | define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 |
60 | * come through these | ||
61 | */ | ||
62 | .type arm7tdmi_processor_functions, #object | ||
63 | ENTRY(arm7tdmi_processor_functions) | ||
64 | .word v4t_late_abort | ||
65 | .word legacy_pabort | ||
66 | .word cpu_arm7tdmi_proc_init | ||
67 | .word cpu_arm7tdmi_proc_fin | ||
68 | .word cpu_arm7tdmi_reset | ||
69 | .word cpu_arm7tdmi_do_idle | ||
70 | .word cpu_arm7tdmi_dcache_clean_area | ||
71 | .word cpu_arm7tdmi_switch_mm | ||
72 | .word 0 @ cpu_*_set_pte | ||
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
76 | .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions | ||
77 | 62 | ||
78 | .section ".rodata" | 63 | .section ".rodata" |
79 | 64 | ||
80 | .type cpu_arch_name, #object | 65 | string cpu_arch_name, "armv4t" |
81 | cpu_arch_name: | 66 | string cpu_elf_name, "v4" |
82 | .asciz "armv4t" | 67 | string cpu_arm7tdmi_name, "ARM7TDMI" |
83 | .size cpu_arch_name, . - cpu_arch_name | 68 | string cpu_triscenda7_name, "Triscend-A7x" |
84 | 69 | string cpu_at91_name, "Atmel-AT91M40xxx" | |
85 | .type cpu_elf_name, #object | 70 | string cpu_s3c3410_name, "Samsung-S3C3410" |
86 | cpu_elf_name: | 71 | string cpu_s3c44b0x_name, "Samsung-S3C44B0x" |
87 | .asciz "v4" | 72 | string cpu_s3c4510b_name, "Samsung-S3C4510B" |
88 | .size cpu_elf_name, . - cpu_elf_name | 73 | string cpu_s3c4530_name, "Samsung-S3C4530" |
89 | 74 | string cpu_netarm_name, "NETARM" | |
90 | .type cpu_arm7tdmi_name, #object | ||
91 | cpu_arm7tdmi_name: | ||
92 | .asciz "ARM7TDMI" | ||
93 | .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name | ||
94 | |||
95 | .type cpu_triscenda7_name, #object | ||
96 | cpu_triscenda7_name: | ||
97 | .asciz "Triscend-A7x" | ||
98 | .size cpu_triscenda7_name, . - cpu_triscenda7_name | ||
99 | |||
100 | .type cpu_at91_name, #object | ||
101 | cpu_at91_name: | ||
102 | .asciz "Atmel-AT91M40xxx" | ||
103 | .size cpu_at91_name, . - cpu_at91_name | ||
104 | |||
105 | .type cpu_s3c3410_name, #object | ||
106 | cpu_s3c3410_name: | ||
107 | .asciz "Samsung-S3C3410" | ||
108 | .size cpu_s3c3410_name, . - cpu_s3c3410_name | ||
109 | |||
110 | .type cpu_s3c44b0x_name, #object | ||
111 | cpu_s3c44b0x_name: | ||
112 | .asciz "Samsung-S3C44B0x" | ||
113 | .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name | ||
114 | |||
115 | .type cpu_s3c4510b, #object | ||
116 | cpu_s3c4510b_name: | ||
117 | .asciz "Samsung-S3C4510B" | ||
118 | .size cpu_s3c4510b_name, . - cpu_s3c4510b_name | ||
119 | |||
120 | .type cpu_s3c4530_name, #object | ||
121 | cpu_s3c4530_name: | ||
122 | .asciz "Samsung-S3C4530" | ||
123 | .size cpu_s3c4530_name, . - cpu_s3c4530_name | ||
124 | |||
125 | .type cpu_netarm_name, #object | ||
126 | cpu_netarm_name: | ||
127 | .asciz "NETARM" | ||
128 | .size cpu_netarm_name, . - cpu_netarm_name | ||
129 | 75 | ||
130 | .align | 76 | .align |
131 | 77 | ||
132 | .section ".proc.info.init", #alloc, #execinstr | 78 | .section ".proc.info.init", #alloc, #execinstr |
133 | 79 | ||
134 | .type __arm7tdmi_proc_info, #object | 80 | .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ |
135 | __arm7tdmi_proc_info: | 81 | extra_hwcaps=0 |
136 | .long 0x41007700 | 82 | .type __\name\()_proc_info, #object |
137 | .long 0xfff8ff00 | 83 | __\name\()_proc_info: |
138 | .long 0 | 84 | .long \cpu_val |
139 | .long 0 | 85 | .long \cpu_mask |
140 | b __arm7tdmi_setup | ||
141 | .long cpu_arch_name | ||
142 | .long cpu_elf_name | ||
143 | .long HWCAP_SWP | HWCAP_26BIT | ||
144 | .long cpu_arm7tdmi_name | ||
145 | .long arm7tdmi_processor_functions | ||
146 | .long 0 | ||
147 | .long 0 | ||
148 | .long v4_cache_fns | ||
149 | .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info | ||
150 | |||
151 | .type __triscenda7_proc_info, #object | ||
152 | __triscenda7_proc_info: | ||
153 | .long 0x0001d2ff | ||
154 | .long 0x0001ffff | ||
155 | .long 0 | ||
156 | .long 0 | ||
157 | b __arm7tdmi_setup | ||
158 | .long cpu_arch_name | ||
159 | .long cpu_elf_name | ||
160 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
161 | .long cpu_triscenda7_name | ||
162 | .long arm7tdmi_processor_functions | ||
163 | .long 0 | ||
164 | .long 0 | ||
165 | .long v4_cache_fns | ||
166 | .size __triscenda7_proc_info, . - __triscenda7_proc_info | ||
167 | |||
168 | .type __at91_proc_info, #object | ||
169 | __at91_proc_info: | ||
170 | .long 0x14000040 | ||
171 | .long 0xfff000e0 | ||
172 | .long 0 | ||
173 | .long 0 | ||
174 | b __arm7tdmi_setup | ||
175 | .long cpu_arch_name | ||
176 | .long cpu_elf_name | ||
177 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
178 | .long cpu_at91_name | ||
179 | .long arm7tdmi_processor_functions | ||
180 | .long 0 | ||
181 | .long 0 | ||
182 | .long v4_cache_fns | ||
183 | .size __at91_proc_info, . - __at91_proc_info | ||
184 | |||
185 | .type __s3c4510b_proc_info, #object | ||
186 | __s3c4510b_proc_info: | ||
187 | .long 0x36365000 | ||
188 | .long 0xfffff000 | ||
189 | .long 0 | ||
190 | .long 0 | ||
191 | b __arm7tdmi_setup | ||
192 | .long cpu_arch_name | ||
193 | .long cpu_elf_name | ||
194 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
195 | .long cpu_s3c4510b_name | ||
196 | .long arm7tdmi_processor_functions | ||
197 | .long 0 | ||
198 | .long 0 | ||
199 | .long v4_cache_fns | ||
200 | .size __s3c4510b_proc_info, . - __s3c4510b_proc_info | ||
201 | |||
202 | .type __s3c4530_proc_info, #object | ||
203 | __s3c4530_proc_info: | ||
204 | .long 0x4c000000 | ||
205 | .long 0xfff000e0 | ||
206 | .long 0 | ||
207 | .long 0 | ||
208 | b __arm7tdmi_setup | ||
209 | .long cpu_arch_name | ||
210 | .long cpu_elf_name | ||
211 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
212 | .long cpu_s3c4530_name | ||
213 | .long arm7tdmi_processor_functions | ||
214 | .long 0 | ||
215 | .long 0 | ||
216 | .long v4_cache_fns | ||
217 | .size __s3c4530_proc_info, . - __s3c4530_proc_info | ||
218 | |||
219 | .type __s3c3410_proc_info, #object | ||
220 | __s3c3410_proc_info: | ||
221 | .long 0x34100000 | ||
222 | .long 0xffff0000 | ||
223 | .long 0 | ||
224 | .long 0 | ||
225 | b __arm7tdmi_setup | ||
226 | .long cpu_arch_name | ||
227 | .long cpu_elf_name | ||
228 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
229 | .long cpu_s3c3410_name | ||
230 | .long arm7tdmi_processor_functions | ||
231 | .long 0 | ||
232 | .long 0 | ||
233 | .long v4_cache_fns | ||
234 | .size __s3c3410_proc_info, . - __s3c3410_proc_info | ||
235 | |||
236 | .type __s3c44b0x_proc_info, #object | ||
237 | __s3c44b0x_proc_info: | ||
238 | .long 0x44b00000 | ||
239 | .long 0xffff0000 | ||
240 | .long 0 | 86 | .long 0 |
241 | .long 0 | 87 | .long 0 |
242 | b __arm7tdmi_setup | 88 | b __arm7tdmi_setup |
243 | .long cpu_arch_name | 89 | .long cpu_arch_name |
244 | .long cpu_elf_name | 90 | .long cpu_elf_name |
245 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | 91 | .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) |
246 | .long cpu_s3c44b0x_name | 92 | .long \cpu_name |
247 | .long arm7tdmi_processor_functions | 93 | .long arm7tdmi_processor_functions |
248 | .long 0 | 94 | .long 0 |
249 | .long 0 | 95 | .long 0 |
250 | .long v4_cache_fns | 96 | .long v4_cache_fns |
251 | .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info | 97 | .size __\name\()_proc_info, . - __\name\()_proc_info |
98 | .endm | ||
99 | |||
100 | arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \ | ||
101 | cpu_arm7tdmi_name | ||
102 | arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \ | ||
103 | cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB | ||
104 | arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \ | ||
105 | cpu_at91_name, extra_hwcaps=HWCAP_THUMB | ||
106 | arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \ | ||
107 | cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB | ||
108 | arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \ | ||
109 | cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB | ||
110 | arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \ | ||
111 | cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB | ||
112 | arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \ | ||
113 | cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB | ||
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index bf8a1d1cccb6..92bd102e3982 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -315,18 +315,8 @@ ENTRY(arm920_dma_unmap_area) | |||
315 | mov pc, lr | 315 | mov pc, lr |
316 | ENDPROC(arm920_dma_unmap_area) | 316 | ENDPROC(arm920_dma_unmap_area) |
317 | 317 | ||
318 | ENTRY(arm920_cache_fns) | 318 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
319 | .long arm920_flush_icache_all | 319 | define_cache_functions arm920 |
320 | .long arm920_flush_kern_cache_all | ||
321 | .long arm920_flush_user_cache_all | ||
322 | .long arm920_flush_user_cache_range | ||
323 | .long arm920_coherent_kern_range | ||
324 | .long arm920_coherent_user_range | ||
325 | .long arm920_flush_kern_dcache_area | ||
326 | .long arm920_dma_map_area | ||
327 | .long arm920_dma_unmap_area | ||
328 | .long arm920_dma_flush_range | ||
329 | |||
330 | #endif | 320 | #endif |
331 | 321 | ||
332 | 322 | ||
@@ -416,9 +406,6 @@ ENTRY(cpu_arm920_do_resume) | |||
416 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | 406 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE |
417 | b cpu_resume_mmu | 407 | b cpu_resume_mmu |
418 | ENDPROC(cpu_arm920_do_resume) | 408 | ENDPROC(cpu_arm920_do_resume) |
419 | #else | ||
420 | #define cpu_arm920_do_suspend 0 | ||
421 | #define cpu_arm920_do_resume 0 | ||
422 | #endif | 409 | #endif |
423 | 410 | ||
424 | __CPUINIT | 411 | __CPUINIT |
@@ -450,43 +437,14 @@ arm920_crval: | |||
450 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 | 437 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 |
451 | 438 | ||
452 | __INITDATA | 439 | __INITDATA |
453 | 440 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
454 | /* | 441 | define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1 |
455 | * Purpose : Function pointers used to access above functions - all calls | ||
456 | * come through these | ||
457 | */ | ||
458 | .type arm920_processor_functions, #object | ||
459 | arm920_processor_functions: | ||
460 | .word v4t_early_abort | ||
461 | .word legacy_pabort | ||
462 | .word cpu_arm920_proc_init | ||
463 | .word cpu_arm920_proc_fin | ||
464 | .word cpu_arm920_reset | ||
465 | .word cpu_arm920_do_idle | ||
466 | .word cpu_arm920_dcache_clean_area | ||
467 | .word cpu_arm920_switch_mm | ||
468 | .word cpu_arm920_set_pte_ext | ||
469 | .word cpu_arm920_suspend_size | ||
470 | .word cpu_arm920_do_suspend | ||
471 | .word cpu_arm920_do_resume | ||
472 | .size arm920_processor_functions, . - arm920_processor_functions | ||
473 | 442 | ||
474 | .section ".rodata" | 443 | .section ".rodata" |
475 | 444 | ||
476 | .type cpu_arch_name, #object | 445 | string cpu_arch_name, "armv4t" |
477 | cpu_arch_name: | 446 | string cpu_elf_name, "v4" |
478 | .asciz "armv4t" | 447 | string cpu_arm920_name, "ARM920T" |
479 | .size cpu_arch_name, . - cpu_arch_name | ||
480 | |||
481 | .type cpu_elf_name, #object | ||
482 | cpu_elf_name: | ||
483 | .asciz "v4" | ||
484 | .size cpu_elf_name, . - cpu_elf_name | ||
485 | |||
486 | .type cpu_arm920_name, #object | ||
487 | cpu_arm920_name: | ||
488 | .asciz "ARM920T" | ||
489 | .size cpu_arm920_name, . - cpu_arm920_name | ||
490 | 448 | ||
491 | .align | 449 | .align |
492 | 450 | ||
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 95ba1fc56e4d..490e18833857 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -317,18 +317,8 @@ ENTRY(arm922_dma_unmap_area) | |||
317 | mov pc, lr | 317 | mov pc, lr |
318 | ENDPROC(arm922_dma_unmap_area) | 318 | ENDPROC(arm922_dma_unmap_area) |
319 | 319 | ||
320 | ENTRY(arm922_cache_fns) | 320 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
321 | .long arm922_flush_icache_all | 321 | define_cache_functions arm922 |
322 | .long arm922_flush_kern_cache_all | ||
323 | .long arm922_flush_user_cache_all | ||
324 | .long arm922_flush_user_cache_range | ||
325 | .long arm922_coherent_kern_range | ||
326 | .long arm922_coherent_user_range | ||
327 | .long arm922_flush_kern_dcache_area | ||
328 | .long arm922_dma_map_area | ||
329 | .long arm922_dma_unmap_area | ||
330 | .long arm922_dma_flush_range | ||
331 | |||
332 | #endif | 322 | #endif |
333 | 323 | ||
334 | 324 | ||
@@ -420,43 +410,14 @@ arm922_crval: | |||
420 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 | 410 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 |
421 | 411 | ||
422 | __INITDATA | 412 | __INITDATA |
423 | 413 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
424 | /* | 414 | define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort |
425 | * Purpose : Function pointers used to access above functions - all calls | ||
426 | * come through these | ||
427 | */ | ||
428 | .type arm922_processor_functions, #object | ||
429 | arm922_processor_functions: | ||
430 | .word v4t_early_abort | ||
431 | .word legacy_pabort | ||
432 | .word cpu_arm922_proc_init | ||
433 | .word cpu_arm922_proc_fin | ||
434 | .word cpu_arm922_reset | ||
435 | .word cpu_arm922_do_idle | ||
436 | .word cpu_arm922_dcache_clean_area | ||
437 | .word cpu_arm922_switch_mm | ||
438 | .word cpu_arm922_set_pte_ext | ||
439 | .word 0 | ||
440 | .word 0 | ||
441 | .word 0 | ||
442 | .size arm922_processor_functions, . - arm922_processor_functions | ||
443 | 415 | ||
444 | .section ".rodata" | 416 | .section ".rodata" |
445 | 417 | ||
446 | .type cpu_arch_name, #object | 418 | string cpu_arch_name, "armv4t" |
447 | cpu_arch_name: | 419 | string cpu_elf_name, "v4" |
448 | .asciz "armv4t" | 420 | string cpu_arm922_name, "ARM922T" |
449 | .size cpu_arch_name, . - cpu_arch_name | ||
450 | |||
451 | .type cpu_elf_name, #object | ||
452 | cpu_elf_name: | ||
453 | .asciz "v4" | ||
454 | .size cpu_elf_name, . - cpu_elf_name | ||
455 | |||
456 | .type cpu_arm922_name, #object | ||
457 | cpu_arm922_name: | ||
458 | .asciz "ARM922T" | ||
459 | .size cpu_arm922_name, . - cpu_arm922_name | ||
460 | 421 | ||
461 | .align | 422 | .align |
462 | 423 | ||
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 541e4774eea1..51d494be057e 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -372,17 +372,8 @@ ENTRY(arm925_dma_unmap_area) | |||
372 | mov pc, lr | 372 | mov pc, lr |
373 | ENDPROC(arm925_dma_unmap_area) | 373 | ENDPROC(arm925_dma_unmap_area) |
374 | 374 | ||
375 | ENTRY(arm925_cache_fns) | 375 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
376 | .long arm925_flush_icache_all | 376 | define_cache_functions arm925 |
377 | .long arm925_flush_kern_cache_all | ||
378 | .long arm925_flush_user_cache_all | ||
379 | .long arm925_flush_user_cache_range | ||
380 | .long arm925_coherent_kern_range | ||
381 | .long arm925_coherent_user_range | ||
382 | .long arm925_flush_kern_dcache_area | ||
383 | .long arm925_dma_map_area | ||
384 | .long arm925_dma_unmap_area | ||
385 | .long arm925_dma_flush_range | ||
386 | 377 | ||
387 | ENTRY(cpu_arm925_dcache_clean_area) | 378 | ENTRY(cpu_arm925_dcache_clean_area) |
388 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 379 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -487,52 +478,24 @@ arm925_crval: | |||
487 | crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 | 478 | crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 |
488 | 479 | ||
489 | __INITDATA | 480 | __INITDATA |
490 | 481 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
491 | /* | 482 | define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort |
492 | * Purpose : Function pointers used to access above functions - all calls | ||
493 | * come through these | ||
494 | */ | ||
495 | .type arm925_processor_functions, #object | ||
496 | arm925_processor_functions: | ||
497 | .word v4t_early_abort | ||
498 | .word legacy_pabort | ||
499 | .word cpu_arm925_proc_init | ||
500 | .word cpu_arm925_proc_fin | ||
501 | .word cpu_arm925_reset | ||
502 | .word cpu_arm925_do_idle | ||
503 | .word cpu_arm925_dcache_clean_area | ||
504 | .word cpu_arm925_switch_mm | ||
505 | .word cpu_arm925_set_pte_ext | ||
506 | .word 0 | ||
507 | .word 0 | ||
508 | .word 0 | ||
509 | .size arm925_processor_functions, . - arm925_processor_functions | ||
510 | 483 | ||
511 | .section ".rodata" | 484 | .section ".rodata" |
512 | 485 | ||
513 | .type cpu_arch_name, #object | 486 | string cpu_arch_name, "armv4t" |
514 | cpu_arch_name: | 487 | string cpu_elf_name, "v4" |
515 | .asciz "armv4t" | 488 | string cpu_arm925_name, "ARM925T" |
516 | .size cpu_arch_name, . - cpu_arch_name | ||
517 | |||
518 | .type cpu_elf_name, #object | ||
519 | cpu_elf_name: | ||
520 | .asciz "v4" | ||
521 | .size cpu_elf_name, . - cpu_elf_name | ||
522 | |||
523 | .type cpu_arm925_name, #object | ||
524 | cpu_arm925_name: | ||
525 | .asciz "ARM925T" | ||
526 | .size cpu_arm925_name, . - cpu_arm925_name | ||
527 | 489 | ||
528 | .align | 490 | .align |
529 | 491 | ||
530 | .section ".proc.info.init", #alloc, #execinstr | 492 | .section ".proc.info.init", #alloc, #execinstr |
531 | 493 | ||
532 | .type __arm925_proc_info,#object | 494 | .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache |
533 | __arm925_proc_info: | 495 | .type __\name\()_proc_info,#object |
534 | .long 0x54029250 | 496 | __\name\()_proc_info: |
535 | .long 0xfffffff0 | 497 | .long \cpu_val |
498 | .long \cpu_mask | ||
536 | .long PMD_TYPE_SECT | \ | 499 | .long PMD_TYPE_SECT | \ |
537 | PMD_BIT4 | \ | 500 | PMD_BIT4 | \ |
538 | PMD_SECT_AP_WRITE | \ | 501 | PMD_SECT_AP_WRITE | \ |
@@ -550,27 +513,8 @@ __arm925_proc_info: | |||
550 | .long v4wbi_tlb_fns | 513 | .long v4wbi_tlb_fns |
551 | .long v4wb_user_fns | 514 | .long v4wb_user_fns |
552 | .long arm925_cache_fns | 515 | .long arm925_cache_fns |
553 | .size __arm925_proc_info, . - __arm925_proc_info | 516 | .size __\name\()_proc_info, . - __\name\()_proc_info |
517 | .endm | ||
554 | 518 | ||
555 | .type __arm915_proc_info,#object | 519 | arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name |
556 | __arm915_proc_info: | 520 | arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name |
557 | .long 0x54029150 | ||
558 | .long 0xfffffff0 | ||
559 | .long PMD_TYPE_SECT | \ | ||
560 | PMD_BIT4 | \ | ||
561 | PMD_SECT_AP_WRITE | \ | ||
562 | PMD_SECT_AP_READ | ||
563 | .long PMD_TYPE_SECT | \ | ||
564 | PMD_BIT4 | \ | ||
565 | PMD_SECT_AP_WRITE | \ | ||
566 | PMD_SECT_AP_READ | ||
567 | b __arm925_setup | ||
568 | .long cpu_arch_name | ||
569 | .long cpu_elf_name | ||
570 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | ||
571 | .long cpu_arm925_name | ||
572 | .long arm925_processor_functions | ||
573 | .long v4wbi_tlb_fns | ||
574 | .long v4wb_user_fns | ||
575 | .long arm925_cache_fns | ||
576 | .size __arm925_proc_info, . - __arm925_proc_info | ||
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 0ed85d930c09..2bbcf053dffd 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -335,17 +335,8 @@ ENTRY(arm926_dma_unmap_area) | |||
335 | mov pc, lr | 335 | mov pc, lr |
336 | ENDPROC(arm926_dma_unmap_area) | 336 | ENDPROC(arm926_dma_unmap_area) |
337 | 337 | ||
338 | ENTRY(arm926_cache_fns) | 338 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
339 | .long arm926_flush_icache_all | 339 | define_cache_functions arm926 |
340 | .long arm926_flush_kern_cache_all | ||
341 | .long arm926_flush_user_cache_all | ||
342 | .long arm926_flush_user_cache_range | ||
343 | .long arm926_coherent_kern_range | ||
344 | .long arm926_coherent_user_range | ||
345 | .long arm926_flush_kern_dcache_area | ||
346 | .long arm926_dma_map_area | ||
347 | .long arm926_dma_unmap_area | ||
348 | .long arm926_dma_flush_range | ||
349 | 340 | ||
350 | ENTRY(cpu_arm926_dcache_clean_area) | 341 | ENTRY(cpu_arm926_dcache_clean_area) |
351 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 342 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -430,9 +421,6 @@ ENTRY(cpu_arm926_do_resume) | |||
430 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | 421 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE |
431 | b cpu_resume_mmu | 422 | b cpu_resume_mmu |
432 | ENDPROC(cpu_arm926_do_resume) | 423 | ENDPROC(cpu_arm926_do_resume) |
433 | #else | ||
434 | #define cpu_arm926_do_suspend 0 | ||
435 | #define cpu_arm926_do_resume 0 | ||
436 | #endif | 424 | #endif |
437 | 425 | ||
438 | __CPUINIT | 426 | __CPUINIT |
@@ -475,42 +463,14 @@ arm926_crval: | |||
475 | 463 | ||
476 | __INITDATA | 464 | __INITDATA |
477 | 465 | ||
478 | /* | 466 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
479 | * Purpose : Function pointers used to access above functions - all calls | 467 | define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 |
480 | * come through these | ||
481 | */ | ||
482 | .type arm926_processor_functions, #object | ||
483 | arm926_processor_functions: | ||
484 | .word v5tj_early_abort | ||
485 | .word legacy_pabort | ||
486 | .word cpu_arm926_proc_init | ||
487 | .word cpu_arm926_proc_fin | ||
488 | .word cpu_arm926_reset | ||
489 | .word cpu_arm926_do_idle | ||
490 | .word cpu_arm926_dcache_clean_area | ||
491 | .word cpu_arm926_switch_mm | ||
492 | .word cpu_arm926_set_pte_ext | ||
493 | .word cpu_arm926_suspend_size | ||
494 | .word cpu_arm926_do_suspend | ||
495 | .word cpu_arm926_do_resume | ||
496 | .size arm926_processor_functions, . - arm926_processor_functions | ||
497 | 468 | ||
498 | .section ".rodata" | 469 | .section ".rodata" |
499 | 470 | ||
500 | .type cpu_arch_name, #object | 471 | string cpu_arch_name, "armv5tej" |
501 | cpu_arch_name: | 472 | string cpu_elf_name, "v5" |
502 | .asciz "armv5tej" | 473 | string cpu_arm926_name, "ARM926EJ-S" |
503 | .size cpu_arch_name, . - cpu_arch_name | ||
504 | |||
505 | .type cpu_elf_name, #object | ||
506 | cpu_elf_name: | ||
507 | .asciz "v5" | ||
508 | .size cpu_elf_name, . - cpu_elf_name | ||
509 | |||
510 | .type cpu_arm926_name, #object | ||
511 | cpu_arm926_name: | ||
512 | .asciz "ARM926EJ-S" | ||
513 | .size cpu_arm926_name, . - cpu_arm926_name | ||
514 | 474 | ||
515 | .align | 475 | .align |
516 | 476 | ||
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 26aea3f71c26..ac750d506153 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -264,17 +264,8 @@ ENTRY(arm940_dma_unmap_area) | |||
264 | mov pc, lr | 264 | mov pc, lr |
265 | ENDPROC(arm940_dma_unmap_area) | 265 | ENDPROC(arm940_dma_unmap_area) |
266 | 266 | ||
267 | ENTRY(arm940_cache_fns) | 267 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
268 | .long arm940_flush_icache_all | 268 | define_cache_functions arm940 |
269 | .long arm940_flush_kern_cache_all | ||
270 | .long arm940_flush_user_cache_all | ||
271 | .long arm940_flush_user_cache_range | ||
272 | .long arm940_coherent_kern_range | ||
273 | .long arm940_coherent_user_range | ||
274 | .long arm940_flush_kern_dcache_area | ||
275 | .long arm940_dma_map_area | ||
276 | .long arm940_dma_unmap_area | ||
277 | .long arm940_dma_flush_range | ||
278 | 269 | ||
279 | __CPUINIT | 270 | __CPUINIT |
280 | 271 | ||
@@ -348,42 +339,14 @@ __arm940_setup: | |||
348 | 339 | ||
349 | __INITDATA | 340 | __INITDATA |
350 | 341 | ||
351 | /* | 342 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
352 | * Purpose : Function pointers used to access above functions - all calls | 343 | define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
353 | * come through these | ||
354 | */ | ||
355 | .type arm940_processor_functions, #object | ||
356 | ENTRY(arm940_processor_functions) | ||
357 | .word nommu_early_abort | ||
358 | .word legacy_pabort | ||
359 | .word cpu_arm940_proc_init | ||
360 | .word cpu_arm940_proc_fin | ||
361 | .word cpu_arm940_reset | ||
362 | .word cpu_arm940_do_idle | ||
363 | .word cpu_arm940_dcache_clean_area | ||
364 | .word cpu_arm940_switch_mm | ||
365 | .word 0 @ cpu_*_set_pte | ||
366 | .word 0 | ||
367 | .word 0 | ||
368 | .word 0 | ||
369 | .size arm940_processor_functions, . - arm940_processor_functions | ||
370 | 344 | ||
371 | .section ".rodata" | 345 | .section ".rodata" |
372 | 346 | ||
373 | .type cpu_arch_name, #object | 347 | string cpu_arch_name, "armv4t" |
374 | cpu_arch_name: | 348 | string cpu_elf_name, "v4" |
375 | .asciz "armv4t" | 349 | string cpu_arm940_name, "ARM940T" |
376 | .size cpu_arch_name, . - cpu_arch_name | ||
377 | |||
378 | .type cpu_elf_name, #object | ||
379 | cpu_elf_name: | ||
380 | .asciz "v4" | ||
381 | .size cpu_elf_name, . - cpu_elf_name | ||
382 | |||
383 | .type cpu_arm940_name, #object | ||
384 | cpu_arm940_name: | ||
385 | .ascii "ARM940T" | ||
386 | .size cpu_arm940_name, . - cpu_arm940_name | ||
387 | 350 | ||
388 | .align | 351 | .align |
389 | 352 | ||
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 8063345406fe..f8f7ea34bfc5 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -306,18 +306,8 @@ ENTRY(arm946_dma_unmap_area) | |||
306 | mov pc, lr | 306 | mov pc, lr |
307 | ENDPROC(arm946_dma_unmap_area) | 307 | ENDPROC(arm946_dma_unmap_area) |
308 | 308 | ||
309 | ENTRY(arm946_cache_fns) | 309 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
310 | .long arm946_flush_icache_all | 310 | define_cache_functions arm946 |
311 | .long arm946_flush_kern_cache_all | ||
312 | .long arm946_flush_user_cache_all | ||
313 | .long arm946_flush_user_cache_range | ||
314 | .long arm946_coherent_kern_range | ||
315 | .long arm946_coherent_user_range | ||
316 | .long arm946_flush_kern_dcache_area | ||
317 | .long arm946_dma_map_area | ||
318 | .long arm946_dma_unmap_area | ||
319 | .long arm946_dma_flush_range | ||
320 | |||
321 | 311 | ||
322 | ENTRY(cpu_arm946_dcache_clean_area) | 312 | ENTRY(cpu_arm946_dcache_clean_area) |
323 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 313 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -403,43 +393,14 @@ __arm946_setup: | |||
403 | 393 | ||
404 | __INITDATA | 394 | __INITDATA |
405 | 395 | ||
406 | /* | 396 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
407 | * Purpose : Function pointers used to access above functions - all calls | 397 | define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
408 | * come through these | ||
409 | */ | ||
410 | .type arm946_processor_functions, #object | ||
411 | ENTRY(arm946_processor_functions) | ||
412 | .word nommu_early_abort | ||
413 | .word legacy_pabort | ||
414 | .word cpu_arm946_proc_init | ||
415 | .word cpu_arm946_proc_fin | ||
416 | .word cpu_arm946_reset | ||
417 | .word cpu_arm946_do_idle | ||
418 | |||
419 | .word cpu_arm946_dcache_clean_area | ||
420 | .word cpu_arm946_switch_mm | ||
421 | .word 0 @ cpu_*_set_pte | ||
422 | .word 0 | ||
423 | .word 0 | ||
424 | .word 0 | ||
425 | .size arm946_processor_functions, . - arm946_processor_functions | ||
426 | 398 | ||
427 | .section ".rodata" | 399 | .section ".rodata" |
428 | 400 | ||
429 | .type cpu_arch_name, #object | 401 | string cpu_arch_name, "armv5te" |
430 | cpu_arch_name: | 402 | string cpu_elf_name, "v5t" |
431 | .asciz "armv5te" | 403 | string cpu_arm946_name, "ARM946E-S" |
432 | .size cpu_arch_name, . - cpu_arch_name | ||
433 | |||
434 | .type cpu_elf_name, #object | ||
435 | cpu_elf_name: | ||
436 | .asciz "v5t" | ||
437 | .size cpu_elf_name, . - cpu_elf_name | ||
438 | |||
439 | .type cpu_arm946_name, #object | ||
440 | cpu_arm946_name: | ||
441 | .ascii "ARM946E-S" | ||
442 | .size cpu_arm946_name, . - cpu_arm946_name | ||
443 | 404 | ||
444 | .align | 405 | .align |
445 | 406 | ||
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 546b54da1005..2120f9e2af7f 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm9tdmi_proc_init() | 24 | * cpu_arm9tdmi_proc_init() |
@@ -55,82 +57,38 @@ __arm9tdmi_setup: | |||
55 | 57 | ||
56 | __INITDATA | 58 | __INITDATA |
57 | 59 | ||
58 | /* | 60 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
59 | * Purpose : Function pointers used to access above functions - all calls | 61 | define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
60 | * come through these | ||
61 | */ | ||
62 | .type arm9tdmi_processor_functions, #object | ||
63 | ENTRY(arm9tdmi_processor_functions) | ||
64 | .word nommu_early_abort | ||
65 | .word legacy_pabort | ||
66 | .word cpu_arm9tdmi_proc_init | ||
67 | .word cpu_arm9tdmi_proc_fin | ||
68 | .word cpu_arm9tdmi_reset | ||
69 | .word cpu_arm9tdmi_do_idle | ||
70 | .word cpu_arm9tdmi_dcache_clean_area | ||
71 | .word cpu_arm9tdmi_switch_mm | ||
72 | .word 0 @ cpu_*_set_pte | ||
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
76 | .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions | ||
77 | 62 | ||
78 | .section ".rodata" | 63 | .section ".rodata" |
79 | 64 | ||
80 | .type cpu_arch_name, #object | 65 | string cpu_arch_name, "armv4t" |
81 | cpu_arch_name: | 66 | string cpu_elf_name, "v4" |
82 | .asciz "armv4t" | 67 | string cpu_arm9tdmi_name, "ARM9TDMI" |
83 | .size cpu_arch_name, . - cpu_arch_name | 68 | string cpu_p2001_name, "P2001" |
84 | |||
85 | .type cpu_elf_name, #object | ||
86 | cpu_elf_name: | ||
87 | .asciz "v4" | ||
88 | .size cpu_elf_name, . - cpu_elf_name | ||
89 | |||
90 | .type cpu_arm9tdmi_name, #object | ||
91 | cpu_arm9tdmi_name: | ||
92 | .asciz "ARM9TDMI" | ||
93 | .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name | ||
94 | |||
95 | .type cpu_p2001_name, #object | ||
96 | cpu_p2001_name: | ||
97 | .asciz "P2001" | ||
98 | .size cpu_p2001_name, . - cpu_p2001_name | ||
99 | 69 | ||
100 | .align | 70 | .align |
101 | 71 | ||
102 | .section ".proc.info.init", #alloc, #execinstr | 72 | .section ".proc.info.init", #alloc, #execinstr |
103 | 73 | ||
104 | .type __arm9tdmi_proc_info, #object | 74 | .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req |
105 | __arm9tdmi_proc_info: | 75 | .type __\name\()_proc_info, #object |
106 | .long 0x41009900 | 76 | __\name\()_proc_info: |
107 | .long 0xfff8ff00 | 77 | .long \cpu_val |
78 | .long \cpu_mask | ||
108 | .long 0 | 79 | .long 0 |
109 | .long 0 | 80 | .long 0 |
110 | b __arm9tdmi_setup | 81 | b __arm9tdmi_setup |
111 | .long cpu_arch_name | 82 | .long cpu_arch_name |
112 | .long cpu_elf_name | 83 | .long cpu_elf_name |
113 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | 84 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT |
114 | .long cpu_arm9tdmi_name | 85 | .long \cpu_name |
115 | .long arm9tdmi_processor_functions | 86 | .long arm9tdmi_processor_functions |
116 | .long 0 | 87 | .long 0 |
117 | .long 0 | 88 | .long 0 |
118 | .long v4_cache_fns | 89 | .long v4_cache_fns |
119 | .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info | 90 | .size __\name\()_proc_info, . - __\name\()_proc_info |
91 | .endm | ||
120 | 92 | ||
121 | .type __p2001_proc_info, #object | 93 | arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name |
122 | __p2001_proc_info: | 94 | arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name |
123 | .long 0x41029000 | ||
124 | .long 0xffffffff | ||
125 | .long 0 | ||
126 | .long 0 | ||
127 | b __arm9tdmi_setup | ||
128 | .long cpu_arch_name | ||
129 | .long cpu_elf_name | ||
130 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
131 | .long cpu_p2001_name | ||
132 | .long arm9tdmi_processor_functions | ||
133 | .long 0 | ||
134 | .long 0 | ||
135 | .long v4_cache_fns | ||
136 | .size __p2001_proc_info, . - __p2001_proc_info | ||
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index fc2a4ae15cf4..4c7a5710472b 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -180,42 +180,14 @@ fa526_cr1_set: | |||
180 | 180 | ||
181 | __INITDATA | 181 | __INITDATA |
182 | 182 | ||
183 | /* | 183 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
184 | * Purpose : Function pointers used to access above functions - all calls | 184 | define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort |
185 | * come through these | ||
186 | */ | ||
187 | .type fa526_processor_functions, #object | ||
188 | fa526_processor_functions: | ||
189 | .word v4_early_abort | ||
190 | .word legacy_pabort | ||
191 | .word cpu_fa526_proc_init | ||
192 | .word cpu_fa526_proc_fin | ||
193 | .word cpu_fa526_reset | ||
194 | .word cpu_fa526_do_idle | ||
195 | .word cpu_fa526_dcache_clean_area | ||
196 | .word cpu_fa526_switch_mm | ||
197 | .word cpu_fa526_set_pte_ext | ||
198 | .word 0 | ||
199 | .word 0 | ||
200 | .word 0 | ||
201 | .size fa526_processor_functions, . - fa526_processor_functions | ||
202 | 185 | ||
203 | .section ".rodata" | 186 | .section ".rodata" |
204 | 187 | ||
205 | .type cpu_arch_name, #object | 188 | string cpu_arch_name, "armv4" |
206 | cpu_arch_name: | 189 | string cpu_elf_name, "v4" |
207 | .asciz "armv4" | 190 | string cpu_fa526_name, "FA526" |
208 | .size cpu_arch_name, . - cpu_arch_name | ||
209 | |||
210 | .type cpu_elf_name, #object | ||
211 | cpu_elf_name: | ||
212 | .asciz "v4" | ||
213 | .size cpu_elf_name, . - cpu_elf_name | ||
214 | |||
215 | .type cpu_fa526_name, #object | ||
216 | cpu_fa526_name: | ||
217 | .asciz "FA526" | ||
218 | .size cpu_fa526_name, . - cpu_fa526_name | ||
219 | 191 | ||
220 | .align | 192 | .align |
221 | 193 | ||
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index d3883eed7a4a..8a6c2f78c1c3 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -411,29 +411,28 @@ ENTRY(feroceon_dma_unmap_area) | |||
411 | mov pc, lr | 411 | mov pc, lr |
412 | ENDPROC(feroceon_dma_unmap_area) | 412 | ENDPROC(feroceon_dma_unmap_area) |
413 | 413 | ||
414 | ENTRY(feroceon_cache_fns) | 414 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
415 | .long feroceon_flush_icache_all | 415 | define_cache_functions feroceon |
416 | .long feroceon_flush_kern_cache_all | 416 | |
417 | .long feroceon_flush_user_cache_all | 417 | .macro range_alias basename |
418 | .long feroceon_flush_user_cache_range | 418 | .globl feroceon_range_\basename |
419 | .long feroceon_coherent_kern_range | 419 | .type feroceon_range_\basename , %function |
420 | .long feroceon_coherent_user_range | 420 | .equ feroceon_range_\basename , feroceon_\basename |
421 | .long feroceon_flush_kern_dcache_area | 421 | .endm |
422 | .long feroceon_dma_map_area | 422 | |
423 | .long feroceon_dma_unmap_area | 423 | /* |
424 | .long feroceon_dma_flush_range | 424 | * Most of the cache functions are unchanged for this case. |
425 | 425 | * Export suitable alias symbols for the unchanged functions: | |
426 | ENTRY(feroceon_range_cache_fns) | 426 | */ |
427 | .long feroceon_flush_icache_all | 427 | range_alias flush_icache_all |
428 | .long feroceon_flush_kern_cache_all | 428 | range_alias flush_user_cache_all |
429 | .long feroceon_flush_user_cache_all | 429 | range_alias flush_kern_cache_all |
430 | .long feroceon_flush_user_cache_range | 430 | range_alias flush_user_cache_range |
431 | .long feroceon_coherent_kern_range | 431 | range_alias coherent_kern_range |
432 | .long feroceon_coherent_user_range | 432 | range_alias coherent_user_range |
433 | .long feroceon_range_flush_kern_dcache_area | 433 | range_alias dma_unmap_area |
434 | .long feroceon_range_dma_map_area | 434 | |
435 | .long feroceon_dma_unmap_area | 435 | define_cache_functions feroceon_range |
436 | .long feroceon_range_dma_flush_range | ||
437 | 436 | ||
438 | .align 5 | 437 | .align 5 |
439 | ENTRY(cpu_feroceon_dcache_clean_area) | 438 | ENTRY(cpu_feroceon_dcache_clean_area) |
@@ -539,93 +538,27 @@ feroceon_crval: | |||
539 | 538 | ||
540 | __INITDATA | 539 | __INITDATA |
541 | 540 | ||
542 | /* | 541 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
543 | * Purpose : Function pointers used to access above functions - all calls | 542 | define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort |
544 | * come through these | ||
545 | */ | ||
546 | .type feroceon_processor_functions, #object | ||
547 | feroceon_processor_functions: | ||
548 | .word v5t_early_abort | ||
549 | .word legacy_pabort | ||
550 | .word cpu_feroceon_proc_init | ||
551 | .word cpu_feroceon_proc_fin | ||
552 | .word cpu_feroceon_reset | ||
553 | .word cpu_feroceon_do_idle | ||
554 | .word cpu_feroceon_dcache_clean_area | ||
555 | .word cpu_feroceon_switch_mm | ||
556 | .word cpu_feroceon_set_pte_ext | ||
557 | .word 0 | ||
558 | .word 0 | ||
559 | .word 0 | ||
560 | .size feroceon_processor_functions, . - feroceon_processor_functions | ||
561 | 543 | ||
562 | .section ".rodata" | 544 | .section ".rodata" |
563 | 545 | ||
564 | .type cpu_arch_name, #object | 546 | string cpu_arch_name, "armv5te" |
565 | cpu_arch_name: | 547 | string cpu_elf_name, "v5" |
566 | .asciz "armv5te" | 548 | string cpu_feroceon_name, "Feroceon" |
567 | .size cpu_arch_name, . - cpu_arch_name | 549 | string cpu_88fr531_name, "Feroceon 88FR531-vd" |
568 | 550 | string cpu_88fr571_name, "Feroceon 88FR571-vd" | |
569 | .type cpu_elf_name, #object | 551 | string cpu_88fr131_name, "Feroceon 88FR131" |
570 | cpu_elf_name: | ||
571 | .asciz "v5" | ||
572 | .size cpu_elf_name, . - cpu_elf_name | ||
573 | |||
574 | .type cpu_feroceon_name, #object | ||
575 | cpu_feroceon_name: | ||
576 | .asciz "Feroceon" | ||
577 | .size cpu_feroceon_name, . - cpu_feroceon_name | ||
578 | |||
579 | .type cpu_88fr531_name, #object | ||
580 | cpu_88fr531_name: | ||
581 | .asciz "Feroceon 88FR531-vd" | ||
582 | .size cpu_88fr531_name, . - cpu_88fr531_name | ||
583 | |||
584 | .type cpu_88fr571_name, #object | ||
585 | cpu_88fr571_name: | ||
586 | .asciz "Feroceon 88FR571-vd" | ||
587 | .size cpu_88fr571_name, . - cpu_88fr571_name | ||
588 | |||
589 | .type cpu_88fr131_name, #object | ||
590 | cpu_88fr131_name: | ||
591 | .asciz "Feroceon 88FR131" | ||
592 | .size cpu_88fr131_name, . - cpu_88fr131_name | ||
593 | 552 | ||
594 | .align | 553 | .align |
595 | 554 | ||
596 | .section ".proc.info.init", #alloc, #execinstr | 555 | .section ".proc.info.init", #alloc, #execinstr |
597 | 556 | ||
598 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID | 557 | .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req |
599 | .type __feroceon_old_id_proc_info,#object | 558 | .type __\name\()_proc_info,#object |
600 | __feroceon_old_id_proc_info: | 559 | __\name\()_proc_info: |
601 | .long 0x41009260 | 560 | .long \cpu_val |
602 | .long 0xff00fff0 | 561 | .long \cpu_mask |
603 | .long PMD_TYPE_SECT | \ | ||
604 | PMD_SECT_BUFFERABLE | \ | ||
605 | PMD_SECT_CACHEABLE | \ | ||
606 | PMD_BIT4 | \ | ||
607 | PMD_SECT_AP_WRITE | \ | ||
608 | PMD_SECT_AP_READ | ||
609 | .long PMD_TYPE_SECT | \ | ||
610 | PMD_BIT4 | \ | ||
611 | PMD_SECT_AP_WRITE | \ | ||
612 | PMD_SECT_AP_READ | ||
613 | b __feroceon_setup | ||
614 | .long cpu_arch_name | ||
615 | .long cpu_elf_name | ||
616 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
617 | .long cpu_feroceon_name | ||
618 | .long feroceon_processor_functions | ||
619 | .long v4wbi_tlb_fns | ||
620 | .long feroceon_user_fns | ||
621 | .long feroceon_cache_fns | ||
622 | .size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info | ||
623 | #endif | ||
624 | |||
625 | .type __88fr531_proc_info,#object | ||
626 | __88fr531_proc_info: | ||
627 | .long 0x56055310 | ||
628 | .long 0xfffffff0 | ||
629 | .long PMD_TYPE_SECT | \ | 562 | .long PMD_TYPE_SECT | \ |
630 | PMD_SECT_BUFFERABLE | \ | 563 | PMD_SECT_BUFFERABLE | \ |
631 | PMD_SECT_CACHEABLE | \ | 564 | PMD_SECT_CACHEABLE | \ |
@@ -640,59 +573,22 @@ __88fr531_proc_info: | |||
640 | .long cpu_arch_name | 573 | .long cpu_arch_name |
641 | .long cpu_elf_name | 574 | .long cpu_elf_name |
642 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 575 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
643 | .long cpu_88fr531_name | 576 | .long \cpu_name |
644 | .long feroceon_processor_functions | 577 | .long feroceon_processor_functions |
645 | .long v4wbi_tlb_fns | 578 | .long v4wbi_tlb_fns |
646 | .long feroceon_user_fns | 579 | .long feroceon_user_fns |
647 | .long feroceon_cache_fns | 580 | .long \cache |
648 | .size __88fr531_proc_info, . - __88fr531_proc_info | 581 | .size __\name\()_proc_info, . - __\name\()_proc_info |
582 | .endm | ||
649 | 583 | ||
650 | .type __88fr571_proc_info,#object | 584 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID |
651 | __88fr571_proc_info: | 585 | feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ |
652 | .long 0x56155710 | 586 | cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns |
653 | .long 0xfffffff0 | 587 | #endif |
654 | .long PMD_TYPE_SECT | \ | ||
655 | PMD_SECT_BUFFERABLE | \ | ||
656 | PMD_SECT_CACHEABLE | \ | ||
657 | PMD_BIT4 | \ | ||
658 | PMD_SECT_AP_WRITE | \ | ||
659 | PMD_SECT_AP_READ | ||
660 | .long PMD_TYPE_SECT | \ | ||
661 | PMD_BIT4 | \ | ||
662 | PMD_SECT_AP_WRITE | \ | ||
663 | PMD_SECT_AP_READ | ||
664 | b __feroceon_setup | ||
665 | .long cpu_arch_name | ||
666 | .long cpu_elf_name | ||
667 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
668 | .long cpu_88fr571_name | ||
669 | .long feroceon_processor_functions | ||
670 | .long v4wbi_tlb_fns | ||
671 | .long feroceon_user_fns | ||
672 | .long feroceon_range_cache_fns | ||
673 | .size __88fr571_proc_info, . - __88fr571_proc_info | ||
674 | 588 | ||
675 | .type __88fr131_proc_info,#object | 589 | feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ |
676 | __88fr131_proc_info: | 590 | cache=feroceon_cache_fns |
677 | .long 0x56251310 | 591 | feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ |
678 | .long 0xfffffff0 | 592 | cache=feroceon_range_cache_fns |
679 | .long PMD_TYPE_SECT | \ | 593 | feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ |
680 | PMD_SECT_BUFFERABLE | \ | 594 | cache=feroceon_range_cache_fns |
681 | PMD_SECT_CACHEABLE | \ | ||
682 | PMD_BIT4 | \ | ||
683 | PMD_SECT_AP_WRITE | \ | ||
684 | PMD_SECT_AP_READ | ||
685 | .long PMD_TYPE_SECT | \ | ||
686 | PMD_BIT4 | \ | ||
687 | PMD_SECT_AP_WRITE | \ | ||
688 | PMD_SECT_AP_READ | ||
689 | b __feroceon_setup | ||
690 | .long cpu_arch_name | ||
691 | .long cpu_elf_name | ||
692 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
693 | .long cpu_88fr131_name | ||
694 | .long feroceon_processor_functions | ||
695 | .long v4wbi_tlb_fns | ||
696 | .long feroceon_user_fns | ||
697 | .long feroceon_range_cache_fns | ||
698 | .size __88fr131_proc_info, . - __88fr131_proc_info | ||
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 34261f9486b9..307a4def8d3a 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -254,3 +254,71 @@ | |||
254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier | 255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier |
256 | .endm | 256 | .endm |
257 | |||
258 | .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 | ||
259 | .type \name\()_processor_functions, #object | ||
260 | .align 2 | ||
261 | ENTRY(\name\()_processor_functions) | ||
262 | .word \dabort | ||
263 | .word \pabort | ||
264 | .word cpu_\name\()_proc_init | ||
265 | .word cpu_\name\()_proc_fin | ||
266 | .word cpu_\name\()_reset | ||
267 | .word cpu_\name\()_do_idle | ||
268 | .word cpu_\name\()_dcache_clean_area | ||
269 | .word cpu_\name\()_switch_mm | ||
270 | |||
271 | .if \nommu | ||
272 | .word 0 | ||
273 | .else | ||
274 | .word cpu_\name\()_set_pte_ext | ||
275 | .endif | ||
276 | |||
277 | .if \suspend | ||
278 | .word cpu_\name\()_suspend_size | ||
279 | #ifdef CONFIG_PM_SLEEP | ||
280 | .word cpu_\name\()_do_suspend | ||
281 | .word cpu_\name\()_do_resume | ||
282 | #else | ||
283 | .word 0 | ||
284 | .word 0 | ||
285 | #endif | ||
286 | .else | ||
287 | .word 0 | ||
288 | .word 0 | ||
289 | .word 0 | ||
290 | .endif | ||
291 | |||
292 | .size \name\()_processor_functions, . - \name\()_processor_functions | ||
293 | .endm | ||
294 | |||
295 | .macro define_cache_functions name:req | ||
296 | .align 2 | ||
297 | .type \name\()_cache_fns, #object | ||
298 | ENTRY(\name\()_cache_fns) | ||
299 | .long \name\()_flush_icache_all | ||
300 | .long \name\()_flush_kern_cache_all | ||
301 | .long \name\()_flush_user_cache_all | ||
302 | .long \name\()_flush_user_cache_range | ||
303 | .long \name\()_coherent_kern_range | ||
304 | .long \name\()_coherent_user_range | ||
305 | .long \name\()_flush_kern_dcache_area | ||
306 | .long \name\()_dma_map_area | ||
307 | .long \name\()_dma_unmap_area | ||
308 | .long \name\()_dma_flush_range | ||
309 | .size \name\()_cache_fns, . - \name\()_cache_fns | ||
310 | .endm | ||
311 | |||
312 | .macro define_tlb_functions name:req, flags_up:req, flags_smp | ||
313 | .type \name\()_tlb_fns, #object | ||
314 | ENTRY(\name\()_tlb_fns) | ||
315 | .long \name\()_flush_user_tlb_range | ||
316 | .long \name\()_flush_kern_tlb_range | ||
317 | .ifnb \flags_smp | ||
318 | ALT_SMP(.long \flags_smp ) | ||
319 | ALT_UP(.long \flags_up ) | ||
320 | .else | ||
321 | .long \flags_up | ||
322 | .endif | ||
323 | .size \name\()_tlb_fns, . - \name\()_tlb_fns | ||
324 | .endm | ||
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9d4f2ae63370..db52b0fb14a0 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -93,6 +93,17 @@ ENTRY(cpu_mohawk_do_idle) | |||
93 | mov pc, lr | 93 | mov pc, lr |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * flush_icache_all() | ||
97 | * | ||
98 | * Unconditionally clean and invalidate the entire icache. | ||
99 | */ | ||
100 | ENTRY(mohawk_flush_icache_all) | ||
101 | mov r0, #0 | ||
102 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
103 | mov pc, lr | ||
104 | ENDPROC(mohawk_flush_icache_all) | ||
105 | |||
106 | /* | ||
96 | * flush_user_cache_all() | 107 | * flush_user_cache_all() |
97 | * | 108 | * |
98 | * Clean and invalidate all cache entries in a particular | 109 | * Clean and invalidate all cache entries in a particular |
@@ -288,16 +299,8 @@ ENTRY(mohawk_dma_unmap_area) | |||
288 | mov pc, lr | 299 | mov pc, lr |
289 | ENDPROC(mohawk_dma_unmap_area) | 300 | ENDPROC(mohawk_dma_unmap_area) |
290 | 301 | ||
291 | ENTRY(mohawk_cache_fns) | 302 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
292 | .long mohawk_flush_kern_cache_all | 303 | define_cache_functions mohawk |
293 | .long mohawk_flush_user_cache_all | ||
294 | .long mohawk_flush_user_cache_range | ||
295 | .long mohawk_coherent_kern_range | ||
296 | .long mohawk_coherent_user_range | ||
297 | .long mohawk_flush_kern_dcache_area | ||
298 | .long mohawk_dma_map_area | ||
299 | .long mohawk_dma_unmap_area | ||
300 | .long mohawk_dma_flush_range | ||
301 | 304 | ||
302 | ENTRY(cpu_mohawk_dcache_clean_area) | 305 | ENTRY(cpu_mohawk_dcache_clean_area) |
303 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 306 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -373,42 +376,14 @@ mohawk_crval: | |||
373 | 376 | ||
374 | __INITDATA | 377 | __INITDATA |
375 | 378 | ||
376 | /* | 379 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
377 | * Purpose : Function pointers used to access above functions - all calls | 380 | define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort |
378 | * come through these | ||
379 | */ | ||
380 | .type mohawk_processor_functions, #object | ||
381 | mohawk_processor_functions: | ||
382 | .word v5t_early_abort | ||
383 | .word legacy_pabort | ||
384 | .word cpu_mohawk_proc_init | ||
385 | .word cpu_mohawk_proc_fin | ||
386 | .word cpu_mohawk_reset | ||
387 | .word cpu_mohawk_do_idle | ||
388 | .word cpu_mohawk_dcache_clean_area | ||
389 | .word cpu_mohawk_switch_mm | ||
390 | .word cpu_mohawk_set_pte_ext | ||
391 | .word 0 | ||
392 | .word 0 | ||
393 | .word 0 | ||
394 | .size mohawk_processor_functions, . - mohawk_processor_functions | ||
395 | 381 | ||
396 | .section ".rodata" | 382 | .section ".rodata" |
397 | 383 | ||
398 | .type cpu_arch_name, #object | 384 | string cpu_arch_name, "armv5te" |
399 | cpu_arch_name: | 385 | string cpu_elf_name, "v5" |
400 | .asciz "armv5te" | 386 | string cpu_mohawk_name, "Marvell 88SV331x" |
401 | .size cpu_arch_name, . - cpu_arch_name | ||
402 | |||
403 | .type cpu_elf_name, #object | ||
404 | cpu_elf_name: | ||
405 | .asciz "v5" | ||
406 | .size cpu_elf_name, . - cpu_elf_name | ||
407 | |||
408 | .type cpu_mohawk_name, #object | ||
409 | cpu_mohawk_name: | ||
410 | .asciz "Marvell 88SV331x" | ||
411 | .size cpu_mohawk_name, . - cpu_mohawk_name | ||
412 | 387 | ||
413 | .align | 388 | .align |
414 | 389 | ||
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 46f09ed16b98..d50ada26edd6 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
@@ -187,43 +187,14 @@ sa110_crval: | |||
187 | 187 | ||
188 | __INITDATA | 188 | __INITDATA |
189 | 189 | ||
190 | /* | 190 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
191 | * Purpose : Function pointers used to access above functions - all calls | 191 | define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort |
192 | * come through these | ||
193 | */ | ||
194 | |||
195 | .type sa110_processor_functions, #object | ||
196 | ENTRY(sa110_processor_functions) | ||
197 | .word v4_early_abort | ||
198 | .word legacy_pabort | ||
199 | .word cpu_sa110_proc_init | ||
200 | .word cpu_sa110_proc_fin | ||
201 | .word cpu_sa110_reset | ||
202 | .word cpu_sa110_do_idle | ||
203 | .word cpu_sa110_dcache_clean_area | ||
204 | .word cpu_sa110_switch_mm | ||
205 | .word cpu_sa110_set_pte_ext | ||
206 | .word 0 | ||
207 | .word 0 | ||
208 | .word 0 | ||
209 | .size sa110_processor_functions, . - sa110_processor_functions | ||
210 | 192 | ||
211 | .section ".rodata" | 193 | .section ".rodata" |
212 | 194 | ||
213 | .type cpu_arch_name, #object | 195 | string cpu_arch_name, "armv4" |
214 | cpu_arch_name: | 196 | string cpu_elf_name, "v4" |
215 | .asciz "armv4" | 197 | string cpu_sa110_name, "StrongARM-110" |
216 | .size cpu_arch_name, . - cpu_arch_name | ||
217 | |||
218 | .type cpu_elf_name, #object | ||
219 | cpu_elf_name: | ||
220 | .asciz "v4" | ||
221 | .size cpu_elf_name, . - cpu_elf_name | ||
222 | |||
223 | .type cpu_sa110_name, #object | ||
224 | cpu_sa110_name: | ||
225 | .asciz "StrongARM-110" | ||
226 | .size cpu_sa110_name, . - cpu_sa110_name | ||
227 | 198 | ||
228 | .align | 199 | .align |
229 | 200 | ||
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c997e36..07219c2ae114 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | #define DCACHELINESIZE 32 | 35 | #define DCACHELINESIZE 32 |
36 | 36 | ||
37 | __INIT | 37 | .section .text |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * cpu_sa1100_proc_init() | 40 | * cpu_sa1100_proc_init() |
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) | |||
45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland | 45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland |
46 | mov pc, lr | 46 | mov pc, lr |
47 | 47 | ||
48 | .section .text | ||
49 | |||
50 | /* | 48 | /* |
51 | * cpu_sa1100_proc_fin() | 49 | * cpu_sa1100_proc_fin() |
52 | * | 50 | * |
@@ -200,9 +198,6 @@ ENTRY(cpu_sa1100_do_resume) | |||
200 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | 198 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE |
201 | b cpu_resume_mmu | 199 | b cpu_resume_mmu |
202 | ENDPROC(cpu_sa1100_do_resume) | 200 | ENDPROC(cpu_sa1100_do_resume) |
203 | #else | ||
204 | #define cpu_sa1100_do_suspend 0 | ||
205 | #define cpu_sa1100_do_resume 0 | ||
206 | #endif | 201 | #endif |
207 | 202 | ||
208 | __CPUINIT | 203 | __CPUINIT |
@@ -236,59 +231,28 @@ sa1100_crval: | |||
236 | __INITDATA | 231 | __INITDATA |
237 | 232 | ||
238 | /* | 233 | /* |
239 | * Purpose : Function pointers used to access above functions - all calls | ||
240 | * come through these | ||
241 | */ | ||
242 | |||
243 | /* | ||
244 | * SA1100 and SA1110 share the same function calls | 234 | * SA1100 and SA1110 share the same function calls |
245 | */ | 235 | */ |
246 | .type sa1100_processor_functions, #object | ||
247 | ENTRY(sa1100_processor_functions) | ||
248 | .word v4_early_abort | ||
249 | .word legacy_pabort | ||
250 | .word cpu_sa1100_proc_init | ||
251 | .word cpu_sa1100_proc_fin | ||
252 | .word cpu_sa1100_reset | ||
253 | .word cpu_sa1100_do_idle | ||
254 | .word cpu_sa1100_dcache_clean_area | ||
255 | .word cpu_sa1100_switch_mm | ||
256 | .word cpu_sa1100_set_pte_ext | ||
257 | .word cpu_sa1100_suspend_size | ||
258 | .word cpu_sa1100_do_suspend | ||
259 | .word cpu_sa1100_do_resume | ||
260 | .size sa1100_processor_functions, . - sa1100_processor_functions | ||
261 | 236 | ||
262 | .section ".rodata" | 237 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
263 | 238 | define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 | |
264 | .type cpu_arch_name, #object | ||
265 | cpu_arch_name: | ||
266 | .asciz "armv4" | ||
267 | .size cpu_arch_name, . - cpu_arch_name | ||
268 | 239 | ||
269 | .type cpu_elf_name, #object | 240 | .section ".rodata" |
270 | cpu_elf_name: | ||
271 | .asciz "v4" | ||
272 | .size cpu_elf_name, . - cpu_elf_name | ||
273 | |||
274 | .type cpu_sa1100_name, #object | ||
275 | cpu_sa1100_name: | ||
276 | .asciz "StrongARM-1100" | ||
277 | .size cpu_sa1100_name, . - cpu_sa1100_name | ||
278 | 241 | ||
279 | .type cpu_sa1110_name, #object | 242 | string cpu_arch_name, "armv4" |
280 | cpu_sa1110_name: | 243 | string cpu_elf_name, "v4" |
281 | .asciz "StrongARM-1110" | 244 | string cpu_sa1100_name, "StrongARM-1100" |
282 | .size cpu_sa1110_name, . - cpu_sa1110_name | 245 | string cpu_sa1110_name, "StrongARM-1110" |
283 | 246 | ||
284 | .align | 247 | .align |
285 | 248 | ||
286 | .section ".proc.info.init", #alloc, #execinstr | 249 | .section ".proc.info.init", #alloc, #execinstr |
287 | 250 | ||
288 | .type __sa1100_proc_info,#object | 251 | .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req |
289 | __sa1100_proc_info: | 252 | .type __\name\()_proc_info,#object |
290 | .long 0x4401a110 | 253 | __\name\()_proc_info: |
291 | .long 0xfffffff0 | 254 | .long \cpu_val |
255 | .long \cpu_mask | ||
292 | .long PMD_TYPE_SECT | \ | 256 | .long PMD_TYPE_SECT | \ |
293 | PMD_SECT_BUFFERABLE | \ | 257 | PMD_SECT_BUFFERABLE | \ |
294 | PMD_SECT_CACHEABLE | \ | 258 | PMD_SECT_CACHEABLE | \ |
@@ -301,32 +265,13 @@ __sa1100_proc_info: | |||
301 | .long cpu_arch_name | 265 | .long cpu_arch_name |
302 | .long cpu_elf_name | 266 | .long cpu_elf_name |
303 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT | 267 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT |
304 | .long cpu_sa1100_name | 268 | .long \cpu_name |
305 | .long sa1100_processor_functions | 269 | .long sa1100_processor_functions |
306 | .long v4wb_tlb_fns | 270 | .long v4wb_tlb_fns |
307 | .long v4_mc_user_fns | 271 | .long v4_mc_user_fns |
308 | .long v4wb_cache_fns | 272 | .long v4wb_cache_fns |
309 | .size __sa1100_proc_info, . - __sa1100_proc_info | 273 | .size __\name\()_proc_info, . - __\name\()_proc_info |
274 | .endm | ||
310 | 275 | ||
311 | .type __sa1110_proc_info,#object | 276 | sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name |
312 | __sa1110_proc_info: | 277 | sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name |
313 | .long 0x6901b110 | ||
314 | .long 0xfffffff0 | ||
315 | .long PMD_TYPE_SECT | \ | ||
316 | PMD_SECT_BUFFERABLE | \ | ||
317 | PMD_SECT_CACHEABLE | \ | ||
318 | PMD_SECT_AP_WRITE | \ | ||
319 | PMD_SECT_AP_READ | ||
320 | .long PMD_TYPE_SECT | \ | ||
321 | PMD_SECT_AP_WRITE | \ | ||
322 | PMD_SECT_AP_READ | ||
323 | b __sa1100_setup | ||
324 | .long cpu_arch_name | ||
325 | .long cpu_elf_name | ||
326 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT | ||
327 | .long cpu_sa1110_name | ||
328 | .long sa1100_processor_functions | ||
329 | .long v4wb_tlb_fns | ||
330 | .long v4_mc_user_fns | ||
331 | .long v4wb_cache_fns | ||
332 | .size __sa1110_proc_info, . - __sa1110_proc_info | ||
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 1d2b8451bf25..219138d2f158 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -56,6 +56,11 @@ ENTRY(cpu_v6_proc_fin) | |||
56 | */ | 56 | */ |
57 | .align 5 | 57 | .align 5 |
58 | ENTRY(cpu_v6_reset) | 58 | ENTRY(cpu_v6_reset) |
59 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | ||
60 | bic r1, r1, #0x1 @ ...............m | ||
61 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | ||
62 | mov r1, #0 | ||
63 | mcr p15, 0, r1, c7, c5, 4 @ ISB | ||
59 | mov pc, r0 | 64 | mov pc, r0 |
60 | 65 | ||
61 | /* | 66 | /* |
@@ -164,16 +169,9 @@ ENDPROC(cpu_v6_do_resume) | |||
164 | cpu_resume_l1_flags: | 169 | cpu_resume_l1_flags: |
165 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | 170 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) |
166 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | 171 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) |
167 | #else | ||
168 | #define cpu_v6_do_suspend 0 | ||
169 | #define cpu_v6_do_resume 0 | ||
170 | #endif | 172 | #endif |
171 | 173 | ||
172 | 174 | string cpu_v6_name, "ARMv6-compatible processor" | |
173 | .type cpu_v6_name, #object | ||
174 | cpu_v6_name: | ||
175 | .asciz "ARMv6-compatible processor" | ||
176 | .size cpu_v6_name, . - cpu_v6_name | ||
177 | 175 | ||
178 | .align | 176 | .align |
179 | 177 | ||
@@ -239,33 +237,13 @@ v6_crval: | |||
239 | 237 | ||
240 | __INITDATA | 238 | __INITDATA |
241 | 239 | ||
242 | .type v6_processor_functions, #object | 240 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
243 | ENTRY(v6_processor_functions) | 241 | define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1 |
244 | .word v6_early_abort | ||
245 | .word v6_pabort | ||
246 | .word cpu_v6_proc_init | ||
247 | .word cpu_v6_proc_fin | ||
248 | .word cpu_v6_reset | ||
249 | .word cpu_v6_do_idle | ||
250 | .word cpu_v6_dcache_clean_area | ||
251 | .word cpu_v6_switch_mm | ||
252 | .word cpu_v6_set_pte_ext | ||
253 | .word cpu_v6_suspend_size | ||
254 | .word cpu_v6_do_suspend | ||
255 | .word cpu_v6_do_resume | ||
256 | .size v6_processor_functions, . - v6_processor_functions | ||
257 | 242 | ||
258 | .section ".rodata" | 243 | .section ".rodata" |
259 | 244 | ||
260 | .type cpu_arch_name, #object | 245 | string cpu_arch_name, "armv6" |
261 | cpu_arch_name: | 246 | string cpu_elf_name, "v6" |
262 | .asciz "armv6" | ||
263 | .size cpu_arch_name, . - cpu_arch_name | ||
264 | |||
265 | .type cpu_elf_name, #object | ||
266 | cpu_elf_name: | ||
267 | .asciz "v6" | ||
268 | .size cpu_elf_name, . - cpu_elf_name | ||
269 | .align | 247 | .align |
270 | 248 | ||
271 | .section ".proc.info.init", #alloc, #execinstr | 249 | .section ".proc.info.init", #alloc, #execinstr |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 089c0b5e454f..a30e78542ccf 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -58,9 +58,16 @@ ENDPROC(cpu_v7_proc_fin) | |||
58 | * to what would be the reset vector. | 58 | * to what would be the reset vector. |
59 | * | 59 | * |
60 | * - loc - location to jump to for soft reset | 60 | * - loc - location to jump to for soft reset |
61 | * | ||
62 | * This code must be executed using a flat identity mapping with | ||
63 | * caches disabled. | ||
61 | */ | 64 | */ |
62 | .align 5 | 65 | .align 5 |
63 | ENTRY(cpu_v7_reset) | 66 | ENTRY(cpu_v7_reset) |
67 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | ||
68 | bic r1, r1, #0x1 @ ...............m | ||
69 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | ||
70 | isb | ||
64 | mov pc, r0 | 71 | mov pc, r0 |
65 | ENDPROC(cpu_v7_reset) | 72 | ENDPROC(cpu_v7_reset) |
66 | 73 | ||
@@ -173,8 +180,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
173 | mov pc, lr | 180 | mov pc, lr |
174 | ENDPROC(cpu_v7_set_pte_ext) | 181 | ENDPROC(cpu_v7_set_pte_ext) |
175 | 182 | ||
176 | cpu_v7_name: | 183 | string cpu_v7_name, "ARMv7 Processor" |
177 | .ascii "ARMv7 Processor" | ||
178 | .align | 184 | .align |
179 | 185 | ||
180 | /* | 186 | /* |
@@ -257,9 +263,6 @@ ENDPROC(cpu_v7_do_resume) | |||
257 | cpu_resume_l1_flags: | 263 | cpu_resume_l1_flags: |
258 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | 264 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) |
259 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | 265 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) |
260 | #else | ||
261 | #define cpu_v7_do_suspend 0 | ||
262 | #define cpu_v7_do_resume 0 | ||
263 | #endif | 266 | #endif |
264 | 267 | ||
265 | __CPUINIT | 268 | __CPUINIT |
@@ -279,13 +282,20 @@ cpu_resume_l1_flags: | |||
279 | * It is assumed that: | 282 | * It is assumed that: |
280 | * - cache type register is implemented | 283 | * - cache type register is implemented |
281 | */ | 284 | */ |
285 | __v7_ca5mp_setup: | ||
282 | __v7_ca9mp_setup: | 286 | __v7_ca9mp_setup: |
287 | mov r10, #(1 << 0) @ TLB ops broadcasting | ||
288 | b 1f | ||
289 | __v7_ca15mp_setup: | ||
290 | mov r10, #0 | ||
291 | 1: | ||
283 | #ifdef CONFIG_SMP | 292 | #ifdef CONFIG_SMP |
284 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) | 293 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) |
285 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP | 294 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP |
286 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 295 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
287 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 296 | orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode |
288 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 297 | orreq r0, r0, r10 @ Enable CPU-specific SMP bits |
298 | mcreq p15, 0, r0, c1, c0, 1 | ||
289 | #endif | 299 | #endif |
290 | __v7_setup: | 300 | __v7_setup: |
291 | adr r12, __v7_setup_stack @ the local stack | 301 | adr r12, __v7_setup_stack @ the local stack |
@@ -411,94 +421,75 @@ __v7_setup_stack: | |||
411 | 421 | ||
412 | __INITDATA | 422 | __INITDATA |
413 | 423 | ||
414 | .type v7_processor_functions, #object | 424 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
415 | ENTRY(v7_processor_functions) | 425 | define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 |
416 | .word v7_early_abort | ||
417 | .word v7_pabort | ||
418 | .word cpu_v7_proc_init | ||
419 | .word cpu_v7_proc_fin | ||
420 | .word cpu_v7_reset | ||
421 | .word cpu_v7_do_idle | ||
422 | .word cpu_v7_dcache_clean_area | ||
423 | .word cpu_v7_switch_mm | ||
424 | .word cpu_v7_set_pte_ext | ||
425 | .word cpu_v7_suspend_size | ||
426 | .word cpu_v7_do_suspend | ||
427 | .word cpu_v7_do_resume | ||
428 | .size v7_processor_functions, . - v7_processor_functions | ||
429 | 426 | ||
430 | .section ".rodata" | 427 | .section ".rodata" |
431 | 428 | ||
432 | .type cpu_arch_name, #object | 429 | string cpu_arch_name, "armv7" |
433 | cpu_arch_name: | 430 | string cpu_elf_name, "v7" |
434 | .asciz "armv7" | ||
435 | .size cpu_arch_name, . - cpu_arch_name | ||
436 | |||
437 | .type cpu_elf_name, #object | ||
438 | cpu_elf_name: | ||
439 | .asciz "v7" | ||
440 | .size cpu_elf_name, . - cpu_elf_name | ||
441 | .align | 431 | .align |
442 | 432 | ||
443 | .section ".proc.info.init", #alloc, #execinstr | 433 | .section ".proc.info.init", #alloc, #execinstr |
444 | 434 | ||
445 | .type __v7_ca9mp_proc_info, #object | 435 | /* |
446 | __v7_ca9mp_proc_info: | 436 | * Standard v7 proc info content |
447 | .long 0x410fc090 @ Required ID value | 437 | */ |
448 | .long 0xff0ffff0 @ Mask for ID | 438 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 |
449 | ALT_SMP(.long \ | 439 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
450 | PMD_TYPE_SECT | \ | 440 | PMD_FLAGS_SMP | \mm_mmuflags) |
451 | PMD_SECT_AP_WRITE | \ | 441 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
452 | PMD_SECT_AP_READ | \ | 442 | PMD_FLAGS_UP | \mm_mmuflags) |
453 | PMD_FLAGS_SMP) | 443 | .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ |
454 | ALT_UP(.long \ | 444 | PMD_SECT_AP_READ | \io_mmuflags |
455 | PMD_TYPE_SECT | \ | 445 | W(b) \initfunc |
456 | PMD_SECT_AP_WRITE | \ | ||
457 | PMD_SECT_AP_READ | \ | ||
458 | PMD_FLAGS_UP) | ||
459 | .long PMD_TYPE_SECT | \ | ||
460 | PMD_SECT_XN | \ | ||
461 | PMD_SECT_AP_WRITE | \ | ||
462 | PMD_SECT_AP_READ | ||
463 | W(b) __v7_ca9mp_setup | ||
464 | .long cpu_arch_name | 446 | .long cpu_arch_name |
465 | .long cpu_elf_name | 447 | .long cpu_elf_name |
466 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | 448 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ |
449 | HWCAP_EDSP | HWCAP_TLS | \hwcaps | ||
467 | .long cpu_v7_name | 450 | .long cpu_v7_name |
468 | .long v7_processor_functions | 451 | .long v7_processor_functions |
469 | .long v7wbi_tlb_fns | 452 | .long v7wbi_tlb_fns |
470 | .long v6_user_fns | 453 | .long v6_user_fns |
471 | .long v7_cache_fns | 454 | .long v7_cache_fns |
455 | .endm | ||
456 | |||
457 | /* | ||
458 | * ARM Ltd. Cortex A5 processor. | ||
459 | */ | ||
460 | .type __v7_ca5mp_proc_info, #object | ||
461 | __v7_ca5mp_proc_info: | ||
462 | .long 0x410fc050 | ||
463 | .long 0xff0ffff0 | ||
464 | __v7_proc __v7_ca5mp_setup | ||
465 | .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info | ||
466 | |||
467 | /* | ||
468 | * ARM Ltd. Cortex A9 processor. | ||
469 | */ | ||
470 | .type __v7_ca9mp_proc_info, #object | ||
471 | __v7_ca9mp_proc_info: | ||
472 | .long 0x410fc090 | ||
473 | .long 0xff0ffff0 | ||
474 | __v7_proc __v7_ca9mp_setup | ||
472 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | 475 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info |
473 | 476 | ||
474 | /* | 477 | /* |
478 | * ARM Ltd. Cortex A15 processor. | ||
479 | */ | ||
480 | .type __v7_ca15mp_proc_info, #object | ||
481 | __v7_ca15mp_proc_info: | ||
482 | .long 0x410fc0f0 | ||
483 | .long 0xff0ffff0 | ||
484 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | ||
485 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | ||
486 | |||
487 | /* | ||
475 | * Match any ARMv7 processor core. | 488 | * Match any ARMv7 processor core. |
476 | */ | 489 | */ |
477 | .type __v7_proc_info, #object | 490 | .type __v7_proc_info, #object |
478 | __v7_proc_info: | 491 | __v7_proc_info: |
479 | .long 0x000f0000 @ Required ID value | 492 | .long 0x000f0000 @ Required ID value |
480 | .long 0x000f0000 @ Mask for ID | 493 | .long 0x000f0000 @ Mask for ID |
481 | ALT_SMP(.long \ | 494 | __v7_proc __v7_setup |
482 | PMD_TYPE_SECT | \ | ||
483 | PMD_SECT_AP_WRITE | \ | ||
484 | PMD_SECT_AP_READ | \ | ||
485 | PMD_FLAGS_SMP) | ||
486 | ALT_UP(.long \ | ||
487 | PMD_TYPE_SECT | \ | ||
488 | PMD_SECT_AP_WRITE | \ | ||
489 | PMD_SECT_AP_READ | \ | ||
490 | PMD_FLAGS_UP) | ||
491 | .long PMD_TYPE_SECT | \ | ||
492 | PMD_SECT_XN | \ | ||
493 | PMD_SECT_AP_WRITE | \ | ||
494 | PMD_SECT_AP_READ | ||
495 | W(b) __v7_setup | ||
496 | .long cpu_arch_name | ||
497 | .long cpu_elf_name | ||
498 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | ||
499 | .long cpu_v7_name | ||
500 | .long v7_processor_functions | ||
501 | .long v7wbi_tlb_fns | ||
502 | .long v6_user_fns | ||
503 | .long v7_cache_fns | ||
504 | .size __v7_proc_info, . - __v7_proc_info | 495 | .size __v7_proc_info, . - __v7_proc_info |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 596213699f37..64f1fc7edf0a 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -335,17 +335,8 @@ ENTRY(xsc3_dma_unmap_area) | |||
335 | mov pc, lr | 335 | mov pc, lr |
336 | ENDPROC(xsc3_dma_unmap_area) | 336 | ENDPROC(xsc3_dma_unmap_area) |
337 | 337 | ||
338 | ENTRY(xsc3_cache_fns) | 338 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
339 | .long xsc3_flush_icache_all | 339 | define_cache_functions xsc3 |
340 | .long xsc3_flush_kern_cache_all | ||
341 | .long xsc3_flush_user_cache_all | ||
342 | .long xsc3_flush_user_cache_range | ||
343 | .long xsc3_coherent_kern_range | ||
344 | .long xsc3_coherent_user_range | ||
345 | .long xsc3_flush_kern_dcache_area | ||
346 | .long xsc3_dma_map_area | ||
347 | .long xsc3_dma_unmap_area | ||
348 | .long xsc3_dma_flush_range | ||
349 | 340 | ||
350 | ENTRY(cpu_xsc3_dcache_clean_area) | 341 | ENTRY(cpu_xsc3_dcache_clean_area) |
351 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 342 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
@@ -454,9 +445,6 @@ ENTRY(cpu_xsc3_do_resume) | |||
454 | ldr r3, =0x542e @ section flags | 445 | ldr r3, =0x542e @ section flags |
455 | b cpu_resume_mmu | 446 | b cpu_resume_mmu |
456 | ENDPROC(cpu_xsc3_do_resume) | 447 | ENDPROC(cpu_xsc3_do_resume) |
457 | #else | ||
458 | #define cpu_xsc3_do_suspend 0 | ||
459 | #define cpu_xsc3_do_resume 0 | ||
460 | #endif | 448 | #endif |
461 | 449 | ||
462 | __CPUINIT | 450 | __CPUINIT |
@@ -503,52 +491,24 @@ xsc3_crval: | |||
503 | 491 | ||
504 | __INITDATA | 492 | __INITDATA |
505 | 493 | ||
506 | /* | 494 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
507 | * Purpose : Function pointers used to access above functions - all calls | 495 | define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 |
508 | * come through these | ||
509 | */ | ||
510 | |||
511 | .type xsc3_processor_functions, #object | ||
512 | ENTRY(xsc3_processor_functions) | ||
513 | .word v5t_early_abort | ||
514 | .word legacy_pabort | ||
515 | .word cpu_xsc3_proc_init | ||
516 | .word cpu_xsc3_proc_fin | ||
517 | .word cpu_xsc3_reset | ||
518 | .word cpu_xsc3_do_idle | ||
519 | .word cpu_xsc3_dcache_clean_area | ||
520 | .word cpu_xsc3_switch_mm | ||
521 | .word cpu_xsc3_set_pte_ext | ||
522 | .word cpu_xsc3_suspend_size | ||
523 | .word cpu_xsc3_do_suspend | ||
524 | .word cpu_xsc3_do_resume | ||
525 | .size xsc3_processor_functions, . - xsc3_processor_functions | ||
526 | 496 | ||
527 | .section ".rodata" | 497 | .section ".rodata" |
528 | 498 | ||
529 | .type cpu_arch_name, #object | 499 | string cpu_arch_name, "armv5te" |
530 | cpu_arch_name: | 500 | string cpu_elf_name, "v5" |
531 | .asciz "armv5te" | 501 | string cpu_xsc3_name, "XScale-V3 based processor" |
532 | .size cpu_arch_name, . - cpu_arch_name | ||
533 | |||
534 | .type cpu_elf_name, #object | ||
535 | cpu_elf_name: | ||
536 | .asciz "v5" | ||
537 | .size cpu_elf_name, . - cpu_elf_name | ||
538 | |||
539 | .type cpu_xsc3_name, #object | ||
540 | cpu_xsc3_name: | ||
541 | .asciz "XScale-V3 based processor" | ||
542 | .size cpu_xsc3_name, . - cpu_xsc3_name | ||
543 | 502 | ||
544 | .align | 503 | .align |
545 | 504 | ||
546 | .section ".proc.info.init", #alloc, #execinstr | 505 | .section ".proc.info.init", #alloc, #execinstr |
547 | 506 | ||
548 | .type __xsc3_proc_info,#object | 507 | .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req |
549 | __xsc3_proc_info: | 508 | .type __\name\()_proc_info,#object |
550 | .long 0x69056000 | 509 | __\name\()_proc_info: |
551 | .long 0xffffe000 | 510 | .long \cpu_val |
511 | .long \cpu_mask | ||
552 | .long PMD_TYPE_SECT | \ | 512 | .long PMD_TYPE_SECT | \ |
553 | PMD_SECT_BUFFERABLE | \ | 513 | PMD_SECT_BUFFERABLE | \ |
554 | PMD_SECT_CACHEABLE | \ | 514 | PMD_SECT_CACHEABLE | \ |
@@ -566,29 +526,10 @@ __xsc3_proc_info: | |||
566 | .long v4wbi_tlb_fns | 526 | .long v4wbi_tlb_fns |
567 | .long xsc3_mc_user_fns | 527 | .long xsc3_mc_user_fns |
568 | .long xsc3_cache_fns | 528 | .long xsc3_cache_fns |
569 | .size __xsc3_proc_info, . - __xsc3_proc_info | 529 | .size __\name\()_proc_info, . - __\name\()_proc_info |
530 | .endm | ||
570 | 531 | ||
571 | /* Note: PXA935 changed its implementor ID from Intel to Marvell */ | 532 | xsc3_proc_info xsc3, 0x69056000, 0xffffe000 |
572 | 533 | ||
573 | .type __xsc3_pxa935_proc_info,#object | 534 | /* Note: PXA935 changed its implementor ID from Intel to Marvell */ |
574 | __xsc3_pxa935_proc_info: | 535 | xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000 |
575 | .long 0x56056000 | ||
576 | .long 0xffffe000 | ||
577 | .long PMD_TYPE_SECT | \ | ||
578 | PMD_SECT_BUFFERABLE | \ | ||
579 | PMD_SECT_CACHEABLE | \ | ||
580 | PMD_SECT_AP_WRITE | \ | ||
581 | PMD_SECT_AP_READ | ||
582 | .long PMD_TYPE_SECT | \ | ||
583 | PMD_SECT_AP_WRITE | \ | ||
584 | PMD_SECT_AP_READ | ||
585 | b __xsc3_setup | ||
586 | .long cpu_arch_name | ||
587 | .long cpu_elf_name | ||
588 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
589 | .long cpu_xsc3_name | ||
590 | .long xsc3_processor_functions | ||
591 | .long v4wbi_tlb_fns | ||
592 | .long xsc3_mc_user_fns | ||
593 | .long xsc3_cache_fns | ||
594 | .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info | ||
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 42af97664c9d..fbc06e55b87a 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -390,12 +390,12 @@ ENDPROC(xscale_dma_map_area) | |||
390 | * - size - size of region | 390 | * - size - size of region |
391 | * - dir - DMA direction | 391 | * - dir - DMA direction |
392 | */ | 392 | */ |
393 | ENTRY(xscale_dma_a0_map_area) | 393 | ENTRY(xscale_80200_A0_A1_dma_map_area) |
394 | add r1, r1, r0 | 394 | add r1, r1, r0 |
395 | teq r2, #DMA_TO_DEVICE | 395 | teq r2, #DMA_TO_DEVICE |
396 | beq xscale_dma_clean_range | 396 | beq xscale_dma_clean_range |
397 | b xscale_dma_flush_range | 397 | b xscale_dma_flush_range |
398 | ENDPROC(xscale_dma_a0_map_area) | 398 | ENDPROC(xscale_80200_A0_A1_dma_map_area) |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * dma_unmap_area(start, size, dir) | 401 | * dma_unmap_area(start, size, dir) |
@@ -407,17 +407,8 @@ ENTRY(xscale_dma_unmap_area) | |||
407 | mov pc, lr | 407 | mov pc, lr |
408 | ENDPROC(xscale_dma_unmap_area) | 408 | ENDPROC(xscale_dma_unmap_area) |
409 | 409 | ||
410 | ENTRY(xscale_cache_fns) | 410 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
411 | .long xscale_flush_icache_all | 411 | define_cache_functions xscale |
412 | .long xscale_flush_kern_cache_all | ||
413 | .long xscale_flush_user_cache_all | ||
414 | .long xscale_flush_user_cache_range | ||
415 | .long xscale_coherent_kern_range | ||
416 | .long xscale_coherent_user_range | ||
417 | .long xscale_flush_kern_dcache_area | ||
418 | .long xscale_dma_map_area | ||
419 | .long xscale_dma_unmap_area | ||
420 | .long xscale_dma_flush_range | ||
421 | 412 | ||
422 | /* | 413 | /* |
423 | * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't | 414 | * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't |
@@ -432,16 +423,28 @@ ENTRY(xscale_cache_fns) | |||
432 | * revision January 22, 2003, available at: | 423 | * revision January 22, 2003, available at: |
433 | * http://www.intel.com/design/iio/specupdt/273415.htm | 424 | * http://www.intel.com/design/iio/specupdt/273415.htm |
434 | */ | 425 | */ |
435 | ENTRY(xscale_80200_A0_A1_cache_fns) | 426 | .macro a0_alias basename |
436 | .long xscale_flush_kern_cache_all | 427 | .globl xscale_80200_A0_A1_\basename |
437 | .long xscale_flush_user_cache_all | 428 | .type xscale_80200_A0_A1_\basename , %function |
438 | .long xscale_flush_user_cache_range | 429 | .equ xscale_80200_A0_A1_\basename , xscale_\basename |
439 | .long xscale_coherent_kern_range | 430 | .endm |
440 | .long xscale_coherent_user_range | 431 | |
441 | .long xscale_flush_kern_dcache_area | 432 | /* |
442 | .long xscale_dma_a0_map_area | 433 | * Most of the cache functions are unchanged for these processor revisions. |
443 | .long xscale_dma_unmap_area | 434 | * Export suitable alias symbols for the unchanged functions: |
444 | .long xscale_dma_flush_range | 435 | */ |
436 | a0_alias flush_icache_all | ||
437 | a0_alias flush_user_cache_all | ||
438 | a0_alias flush_kern_cache_all | ||
439 | a0_alias flush_user_cache_range | ||
440 | a0_alias coherent_kern_range | ||
441 | a0_alias coherent_user_range | ||
442 | a0_alias flush_kern_dcache_area | ||
443 | a0_alias dma_flush_range | ||
444 | a0_alias dma_unmap_area | ||
445 | |||
446 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
447 | define_cache_functions xscale_80200_A0_A1 | ||
445 | 448 | ||
446 | ENTRY(cpu_xscale_dcache_clean_area) | 449 | ENTRY(cpu_xscale_dcache_clean_area) |
447 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 450 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -551,9 +554,6 @@ ENTRY(cpu_xscale_do_resume) | |||
551 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | 554 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE |
552 | b cpu_resume_mmu | 555 | b cpu_resume_mmu |
553 | ENDPROC(cpu_xscale_do_resume) | 556 | ENDPROC(cpu_xscale_do_resume) |
554 | #else | ||
555 | #define cpu_xscale_do_suspend 0 | ||
556 | #define cpu_xscale_do_resume 0 | ||
557 | #endif | 557 | #endif |
558 | 558 | ||
559 | __CPUINIT | 559 | __CPUINIT |
@@ -587,432 +587,74 @@ xscale_crval: | |||
587 | 587 | ||
588 | __INITDATA | 588 | __INITDATA |
589 | 589 | ||
590 | /* | 590 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
591 | * Purpose : Function pointers used to access above functions - all calls | 591 | define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 |
592 | * come through these | ||
593 | */ | ||
594 | |||
595 | .type xscale_processor_functions, #object | ||
596 | ENTRY(xscale_processor_functions) | ||
597 | .word v5t_early_abort | ||
598 | .word legacy_pabort | ||
599 | .word cpu_xscale_proc_init | ||
600 | .word cpu_xscale_proc_fin | ||
601 | .word cpu_xscale_reset | ||
602 | .word cpu_xscale_do_idle | ||
603 | .word cpu_xscale_dcache_clean_area | ||
604 | .word cpu_xscale_switch_mm | ||
605 | .word cpu_xscale_set_pte_ext | ||
606 | .word cpu_xscale_suspend_size | ||
607 | .word cpu_xscale_do_suspend | ||
608 | .word cpu_xscale_do_resume | ||
609 | .size xscale_processor_functions, . - xscale_processor_functions | ||
610 | 592 | ||
611 | .section ".rodata" | 593 | .section ".rodata" |
612 | 594 | ||
613 | .type cpu_arch_name, #object | 595 | string cpu_arch_name, "armv5te" |
614 | cpu_arch_name: | 596 | string cpu_elf_name, "v5" |
615 | .asciz "armv5te" | 597 | |
616 | .size cpu_arch_name, . - cpu_arch_name | 598 | string cpu_80200_A0_A1_name, "XScale-80200 A0/A1" |
617 | 599 | string cpu_80200_name, "XScale-80200" | |
618 | .type cpu_elf_name, #object | 600 | string cpu_80219_name, "XScale-80219" |
619 | cpu_elf_name: | 601 | string cpu_8032x_name, "XScale-IOP8032x Family" |
620 | .asciz "v5" | 602 | string cpu_8033x_name, "XScale-IOP8033x Family" |
621 | .size cpu_elf_name, . - cpu_elf_name | 603 | string cpu_pxa250_name, "XScale-PXA250" |
622 | 604 | string cpu_pxa210_name, "XScale-PXA210" | |
623 | .type cpu_80200_A0_A1_name, #object | 605 | string cpu_ixp42x_name, "XScale-IXP42x Family" |
624 | cpu_80200_A0_A1_name: | 606 | string cpu_ixp43x_name, "XScale-IXP43x Family" |
625 | .asciz "XScale-80200 A0/A1" | 607 | string cpu_ixp46x_name, "XScale-IXP46x Family" |
626 | .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name | 608 | string cpu_ixp2400_name, "XScale-IXP2400" |
627 | 609 | string cpu_ixp2800_name, "XScale-IXP2800" | |
628 | .type cpu_80200_name, #object | 610 | string cpu_pxa255_name, "XScale-PXA255" |
629 | cpu_80200_name: | 611 | string cpu_pxa270_name, "XScale-PXA270" |
630 | .asciz "XScale-80200" | ||
631 | .size cpu_80200_name, . - cpu_80200_name | ||
632 | |||
633 | .type cpu_80219_name, #object | ||
634 | cpu_80219_name: | ||
635 | .asciz "XScale-80219" | ||
636 | .size cpu_80219_name, . - cpu_80219_name | ||
637 | |||
638 | .type cpu_8032x_name, #object | ||
639 | cpu_8032x_name: | ||
640 | .asciz "XScale-IOP8032x Family" | ||
641 | .size cpu_8032x_name, . - cpu_8032x_name | ||
642 | |||
643 | .type cpu_8033x_name, #object | ||
644 | cpu_8033x_name: | ||
645 | .asciz "XScale-IOP8033x Family" | ||
646 | .size cpu_8033x_name, . - cpu_8033x_name | ||
647 | |||
648 | .type cpu_pxa250_name, #object | ||
649 | cpu_pxa250_name: | ||
650 | .asciz "XScale-PXA250" | ||
651 | .size cpu_pxa250_name, . - cpu_pxa250_name | ||
652 | |||
653 | .type cpu_pxa210_name, #object | ||
654 | cpu_pxa210_name: | ||
655 | .asciz "XScale-PXA210" | ||
656 | .size cpu_pxa210_name, . - cpu_pxa210_name | ||
657 | |||
658 | .type cpu_ixp42x_name, #object | ||
659 | cpu_ixp42x_name: | ||
660 | .asciz "XScale-IXP42x Family" | ||
661 | .size cpu_ixp42x_name, . - cpu_ixp42x_name | ||
662 | |||
663 | .type cpu_ixp43x_name, #object | ||
664 | cpu_ixp43x_name: | ||
665 | .asciz "XScale-IXP43x Family" | ||
666 | .size cpu_ixp43x_name, . - cpu_ixp43x_name | ||
667 | |||
668 | .type cpu_ixp46x_name, #object | ||
669 | cpu_ixp46x_name: | ||
670 | .asciz "XScale-IXP46x Family" | ||
671 | .size cpu_ixp46x_name, . - cpu_ixp46x_name | ||
672 | |||
673 | .type cpu_ixp2400_name, #object | ||
674 | cpu_ixp2400_name: | ||
675 | .asciz "XScale-IXP2400" | ||
676 | .size cpu_ixp2400_name, . - cpu_ixp2400_name | ||
677 | |||
678 | .type cpu_ixp2800_name, #object | ||
679 | cpu_ixp2800_name: | ||
680 | .asciz "XScale-IXP2800" | ||
681 | .size cpu_ixp2800_name, . - cpu_ixp2800_name | ||
682 | |||
683 | .type cpu_pxa255_name, #object | ||
684 | cpu_pxa255_name: | ||
685 | .asciz "XScale-PXA255" | ||
686 | .size cpu_pxa255_name, . - cpu_pxa255_name | ||
687 | |||
688 | .type cpu_pxa270_name, #object | ||
689 | cpu_pxa270_name: | ||
690 | .asciz "XScale-PXA270" | ||
691 | .size cpu_pxa270_name, . - cpu_pxa270_name | ||
692 | 612 | ||
693 | .align | 613 | .align |
694 | 614 | ||
695 | .section ".proc.info.init", #alloc, #execinstr | 615 | .section ".proc.info.init", #alloc, #execinstr |
696 | 616 | ||
697 | .type __80200_A0_A1_proc_info,#object | 617 | .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache |
698 | __80200_A0_A1_proc_info: | 618 | .type __\name\()_proc_info,#object |
699 | .long 0x69052000 | 619 | __\name\()_proc_info: |
700 | .long 0xfffffffe | 620 | .long \cpu_val |
701 | .long PMD_TYPE_SECT | \ | 621 | .long \cpu_mask |
702 | PMD_SECT_BUFFERABLE | \ | 622 | .long PMD_TYPE_SECT | \ |
703 | PMD_SECT_CACHEABLE | \ | ||
704 | PMD_SECT_AP_WRITE | \ | ||
705 | PMD_SECT_AP_READ | ||
706 | .long PMD_TYPE_SECT | \ | ||
707 | PMD_SECT_AP_WRITE | \ | ||
708 | PMD_SECT_AP_READ | ||
709 | b __xscale_setup | ||
710 | .long cpu_arch_name | ||
711 | .long cpu_elf_name | ||
712 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
713 | .long cpu_80200_name | ||
714 | .long xscale_processor_functions | ||
715 | .long v4wbi_tlb_fns | ||
716 | .long xscale_mc_user_fns | ||
717 | .long xscale_80200_A0_A1_cache_fns | ||
718 | .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info | ||
719 | |||
720 | .type __80200_proc_info,#object | ||
721 | __80200_proc_info: | ||
722 | .long 0x69052000 | ||
723 | .long 0xfffffff0 | ||
724 | .long PMD_TYPE_SECT | \ | ||
725 | PMD_SECT_BUFFERABLE | \ | 623 | PMD_SECT_BUFFERABLE | \ |
726 | PMD_SECT_CACHEABLE | \ | 624 | PMD_SECT_CACHEABLE | \ |
727 | PMD_SECT_AP_WRITE | \ | 625 | PMD_SECT_AP_WRITE | \ |
728 | PMD_SECT_AP_READ | 626 | PMD_SECT_AP_READ |
729 | .long PMD_TYPE_SECT | \ | 627 | .long PMD_TYPE_SECT | \ |
730 | PMD_SECT_AP_WRITE | \ | 628 | PMD_SECT_AP_WRITE | \ |
731 | PMD_SECT_AP_READ | 629 | PMD_SECT_AP_READ |
732 | b __xscale_setup | 630 | b __xscale_setup |
733 | .long cpu_arch_name | 631 | .long cpu_arch_name |
734 | .long cpu_elf_name | 632 | .long cpu_elf_name |
735 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 633 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
736 | .long cpu_80200_name | 634 | .long \cpu_name |
737 | .long xscale_processor_functions | 635 | .long xscale_processor_functions |
738 | .long v4wbi_tlb_fns | 636 | .long v4wbi_tlb_fns |
739 | .long xscale_mc_user_fns | 637 | .long xscale_mc_user_fns |
740 | .long xscale_cache_fns | 638 | .ifb \cache |
741 | .size __80200_proc_info, . - __80200_proc_info | 639 | .long xscale_cache_fns |
742 | 640 | .else | |
743 | .type __80219_proc_info,#object | 641 | .long \cache |
744 | __80219_proc_info: | 642 | .endif |
745 | .long 0x69052e20 | 643 | .size __\name\()_proc_info, . - __\name\()_proc_info |
746 | .long 0xffffffe0 | 644 | .endm |
747 | .long PMD_TYPE_SECT | \ | 645 | |
748 | PMD_SECT_BUFFERABLE | \ | 646 | xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \ |
749 | PMD_SECT_CACHEABLE | \ | 647 | cache=xscale_80200_A0_A1_cache_fns |
750 | PMD_SECT_AP_WRITE | \ | 648 | xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name |
751 | PMD_SECT_AP_READ | 649 | xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name |
752 | .long PMD_TYPE_SECT | \ | 650 | xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name |
753 | PMD_SECT_AP_WRITE | \ | 651 | xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name |
754 | PMD_SECT_AP_READ | 652 | xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name |
755 | b __xscale_setup | 653 | xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name |
756 | .long cpu_arch_name | 654 | xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name |
757 | .long cpu_elf_name | 655 | xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name |
758 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 656 | xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name |
759 | .long cpu_80219_name | 657 | xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name |
760 | .long xscale_processor_functions | 658 | xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name |
761 | .long v4wbi_tlb_fns | 659 | xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name |
762 | .long xscale_mc_user_fns | 660 | xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name |
763 | .long xscale_cache_fns | ||
764 | .size __80219_proc_info, . - __80219_proc_info | ||
765 | |||
766 | .type __8032x_proc_info,#object | ||
767 | __8032x_proc_info: | ||
768 | .long 0x69052420 | ||
769 | .long 0xfffff7e0 | ||
770 | .long PMD_TYPE_SECT | \ | ||
771 | PMD_SECT_BUFFERABLE | \ | ||
772 | PMD_SECT_CACHEABLE | \ | ||
773 | PMD_SECT_AP_WRITE | \ | ||
774 | PMD_SECT_AP_READ | ||
775 | .long PMD_TYPE_SECT | \ | ||
776 | PMD_SECT_AP_WRITE | \ | ||
777 | PMD_SECT_AP_READ | ||
778 | b __xscale_setup | ||
779 | .long cpu_arch_name | ||
780 | .long cpu_elf_name | ||
781 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
782 | .long cpu_8032x_name | ||
783 | .long xscale_processor_functions | ||
784 | .long v4wbi_tlb_fns | ||
785 | .long xscale_mc_user_fns | ||
786 | .long xscale_cache_fns | ||
787 | .size __8032x_proc_info, . - __8032x_proc_info | ||
788 | |||
789 | .type __8033x_proc_info,#object | ||
790 | __8033x_proc_info: | ||
791 | .long 0x69054010 | ||
792 | .long 0xfffffd30 | ||
793 | .long PMD_TYPE_SECT | \ | ||
794 | PMD_SECT_BUFFERABLE | \ | ||
795 | PMD_SECT_CACHEABLE | \ | ||
796 | PMD_SECT_AP_WRITE | \ | ||
797 | PMD_SECT_AP_READ | ||
798 | .long PMD_TYPE_SECT | \ | ||
799 | PMD_SECT_AP_WRITE | \ | ||
800 | PMD_SECT_AP_READ | ||
801 | b __xscale_setup | ||
802 | .long cpu_arch_name | ||
803 | .long cpu_elf_name | ||
804 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
805 | .long cpu_8033x_name | ||
806 | .long xscale_processor_functions | ||
807 | .long v4wbi_tlb_fns | ||
808 | .long xscale_mc_user_fns | ||
809 | .long xscale_cache_fns | ||
810 | .size __8033x_proc_info, . - __8033x_proc_info | ||
811 | |||
812 | .type __pxa250_proc_info,#object | ||
813 | __pxa250_proc_info: | ||
814 | .long 0x69052100 | ||
815 | .long 0xfffff7f0 | ||
816 | .long PMD_TYPE_SECT | \ | ||
817 | PMD_SECT_BUFFERABLE | \ | ||
818 | PMD_SECT_CACHEABLE | \ | ||
819 | PMD_SECT_AP_WRITE | \ | ||
820 | PMD_SECT_AP_READ | ||
821 | .long PMD_TYPE_SECT | \ | ||
822 | PMD_SECT_AP_WRITE | \ | ||
823 | PMD_SECT_AP_READ | ||
824 | b __xscale_setup | ||
825 | .long cpu_arch_name | ||
826 | .long cpu_elf_name | ||
827 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
828 | .long cpu_pxa250_name | ||
829 | .long xscale_processor_functions | ||
830 | .long v4wbi_tlb_fns | ||
831 | .long xscale_mc_user_fns | ||
832 | .long xscale_cache_fns | ||
833 | .size __pxa250_proc_info, . - __pxa250_proc_info | ||
834 | |||
835 | .type __pxa210_proc_info,#object | ||
836 | __pxa210_proc_info: | ||
837 | .long 0x69052120 | ||
838 | .long 0xfffff3f0 | ||
839 | .long PMD_TYPE_SECT | \ | ||
840 | PMD_SECT_BUFFERABLE | \ | ||
841 | PMD_SECT_CACHEABLE | \ | ||
842 | PMD_SECT_AP_WRITE | \ | ||
843 | PMD_SECT_AP_READ | ||
844 | .long PMD_TYPE_SECT | \ | ||
845 | PMD_SECT_AP_WRITE | \ | ||
846 | PMD_SECT_AP_READ | ||
847 | b __xscale_setup | ||
848 | .long cpu_arch_name | ||
849 | .long cpu_elf_name | ||
850 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
851 | .long cpu_pxa210_name | ||
852 | .long xscale_processor_functions | ||
853 | .long v4wbi_tlb_fns | ||
854 | .long xscale_mc_user_fns | ||
855 | .long xscale_cache_fns | ||
856 | .size __pxa210_proc_info, . - __pxa210_proc_info | ||
857 | |||
858 | .type __ixp2400_proc_info, #object | ||
859 | __ixp2400_proc_info: | ||
860 | .long 0x69054190 | ||
861 | .long 0xfffffff0 | ||
862 | .long PMD_TYPE_SECT | \ | ||
863 | PMD_SECT_BUFFERABLE | \ | ||
864 | PMD_SECT_CACHEABLE | \ | ||
865 | PMD_SECT_AP_WRITE | \ | ||
866 | PMD_SECT_AP_READ | ||
867 | .long PMD_TYPE_SECT | \ | ||
868 | PMD_SECT_AP_WRITE | \ | ||
869 | PMD_SECT_AP_READ | ||
870 | b __xscale_setup | ||
871 | .long cpu_arch_name | ||
872 | .long cpu_elf_name | ||
873 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
874 | .long cpu_ixp2400_name | ||
875 | .long xscale_processor_functions | ||
876 | .long v4wbi_tlb_fns | ||
877 | .long xscale_mc_user_fns | ||
878 | .long xscale_cache_fns | ||
879 | .size __ixp2400_proc_info, . - __ixp2400_proc_info | ||
880 | |||
881 | .type __ixp2800_proc_info, #object | ||
882 | __ixp2800_proc_info: | ||
883 | .long 0x690541a0 | ||
884 | .long 0xfffffff0 | ||
885 | .long PMD_TYPE_SECT | \ | ||
886 | PMD_SECT_BUFFERABLE | \ | ||
887 | PMD_SECT_CACHEABLE | \ | ||
888 | PMD_SECT_AP_WRITE | \ | ||
889 | PMD_SECT_AP_READ | ||
890 | .long PMD_TYPE_SECT | \ | ||
891 | PMD_SECT_AP_WRITE | \ | ||
892 | PMD_SECT_AP_READ | ||
893 | b __xscale_setup | ||
894 | .long cpu_arch_name | ||
895 | .long cpu_elf_name | ||
896 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
897 | .long cpu_ixp2800_name | ||
898 | .long xscale_processor_functions | ||
899 | .long v4wbi_tlb_fns | ||
900 | .long xscale_mc_user_fns | ||
901 | .long xscale_cache_fns | ||
902 | .size __ixp2800_proc_info, . - __ixp2800_proc_info | ||
903 | |||
904 | .type __ixp42x_proc_info, #object | ||
905 | __ixp42x_proc_info: | ||
906 | .long 0x690541c0 | ||
907 | .long 0xffffffc0 | ||
908 | .long PMD_TYPE_SECT | \ | ||
909 | PMD_SECT_BUFFERABLE | \ | ||
910 | PMD_SECT_CACHEABLE | \ | ||
911 | PMD_SECT_AP_WRITE | \ | ||
912 | PMD_SECT_AP_READ | ||
913 | .long PMD_TYPE_SECT | \ | ||
914 | PMD_SECT_AP_WRITE | \ | ||
915 | PMD_SECT_AP_READ | ||
916 | b __xscale_setup | ||
917 | .long cpu_arch_name | ||
918 | .long cpu_elf_name | ||
919 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
920 | .long cpu_ixp42x_name | ||
921 | .long xscale_processor_functions | ||
922 | .long v4wbi_tlb_fns | ||
923 | .long xscale_mc_user_fns | ||
924 | .long xscale_cache_fns | ||
925 | .size __ixp42x_proc_info, . - __ixp42x_proc_info | ||
926 | |||
927 | .type __ixp43x_proc_info, #object | ||
928 | __ixp43x_proc_info: | ||
929 | .long 0x69054040 | ||
930 | .long 0xfffffff0 | ||
931 | .long PMD_TYPE_SECT | \ | ||
932 | PMD_SECT_BUFFERABLE | \ | ||
933 | PMD_SECT_CACHEABLE | \ | ||
934 | PMD_SECT_AP_WRITE | \ | ||
935 | PMD_SECT_AP_READ | ||
936 | .long PMD_TYPE_SECT | \ | ||
937 | PMD_SECT_AP_WRITE | \ | ||
938 | PMD_SECT_AP_READ | ||
939 | b __xscale_setup | ||
940 | .long cpu_arch_name | ||
941 | .long cpu_elf_name | ||
942 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
943 | .long cpu_ixp43x_name | ||
944 | .long xscale_processor_functions | ||
945 | .long v4wbi_tlb_fns | ||
946 | .long xscale_mc_user_fns | ||
947 | .long xscale_cache_fns | ||
948 | .size __ixp43x_proc_info, . - __ixp43x_proc_info | ||
949 | |||
950 | .type __ixp46x_proc_info, #object | ||
951 | __ixp46x_proc_info: | ||
952 | .long 0x69054200 | ||
953 | .long 0xffffff00 | ||
954 | .long PMD_TYPE_SECT | \ | ||
955 | PMD_SECT_BUFFERABLE | \ | ||
956 | PMD_SECT_CACHEABLE | \ | ||
957 | PMD_SECT_AP_WRITE | \ | ||
958 | PMD_SECT_AP_READ | ||
959 | .long PMD_TYPE_SECT | \ | ||
960 | PMD_SECT_AP_WRITE | \ | ||
961 | PMD_SECT_AP_READ | ||
962 | b __xscale_setup | ||
963 | .long cpu_arch_name | ||
964 | .long cpu_elf_name | ||
965 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
966 | .long cpu_ixp46x_name | ||
967 | .long xscale_processor_functions | ||
968 | .long v4wbi_tlb_fns | ||
969 | .long xscale_mc_user_fns | ||
970 | .long xscale_cache_fns | ||
971 | .size __ixp46x_proc_info, . - __ixp46x_proc_info | ||
972 | |||
973 | .type __pxa255_proc_info,#object | ||
974 | __pxa255_proc_info: | ||
975 | .long 0x69052d00 | ||
976 | .long 0xfffffff0 | ||
977 | .long PMD_TYPE_SECT | \ | ||
978 | PMD_SECT_BUFFERABLE | \ | ||
979 | PMD_SECT_CACHEABLE | \ | ||
980 | PMD_SECT_AP_WRITE | \ | ||
981 | PMD_SECT_AP_READ | ||
982 | .long PMD_TYPE_SECT | \ | ||
983 | PMD_SECT_AP_WRITE | \ | ||
984 | PMD_SECT_AP_READ | ||
985 | b __xscale_setup | ||
986 | .long cpu_arch_name | ||
987 | .long cpu_elf_name | ||
988 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
989 | .long cpu_pxa255_name | ||
990 | .long xscale_processor_functions | ||
991 | .long v4wbi_tlb_fns | ||
992 | .long xscale_mc_user_fns | ||
993 | .long xscale_cache_fns | ||
994 | .size __pxa255_proc_info, . - __pxa255_proc_info | ||
995 | |||
996 | .type __pxa270_proc_info,#object | ||
997 | __pxa270_proc_info: | ||
998 | .long 0x69054110 | ||
999 | .long 0xfffffff0 | ||
1000 | .long PMD_TYPE_SECT | \ | ||
1001 | PMD_SECT_BUFFERABLE | \ | ||
1002 | PMD_SECT_CACHEABLE | \ | ||
1003 | PMD_SECT_AP_WRITE | \ | ||
1004 | PMD_SECT_AP_READ | ||
1005 | .long PMD_TYPE_SECT | \ | ||
1006 | PMD_SECT_AP_WRITE | \ | ||
1007 | PMD_SECT_AP_READ | ||
1008 | b __xscale_setup | ||
1009 | .long cpu_arch_name | ||
1010 | .long cpu_elf_name | ||
1011 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
1012 | .long cpu_pxa270_name | ||
1013 | .long xscale_processor_functions | ||
1014 | .long v4wbi_tlb_fns | ||
1015 | .long xscale_mc_user_fns | ||
1016 | .long xscale_cache_fns | ||
1017 | .size __pxa270_proc_info, . - __pxa270_proc_info | ||
1018 | |||
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S index 9694f1f6f485..d3ddcf9a76ca 100644 --- a/arch/arm/mm/tlb-fa.S +++ b/arch/arm/mm/tlb-fa.S | |||
@@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range) | |||
46 | add r0, r0, #PAGE_SZ | 46 | add r0, r0, #PAGE_SZ |
47 | cmp r0, r1 | 47 | cmp r0, r1 |
48 | blo 1b | 48 | blo 1b |
49 | mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB | ||
50 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier | 49 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier |
51 | mov pc, lr | 50 | mov pc, lr |
52 | 51 | ||
@@ -60,16 +59,11 @@ ENTRY(fa_flush_kern_tlb_range) | |||
60 | add r0, r0, #PAGE_SZ | 59 | add r0, r0, #PAGE_SZ |
61 | cmp r0, r1 | 60 | cmp r0, r1 |
62 | blo 1b | 61 | blo 1b |
63 | mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB | ||
64 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier | 62 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier |
65 | mcr p15, 0, r3, c7, c5, 4 @ prefetch flush | 63 | mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) |
66 | mov pc, lr | 64 | mov pc, lr |
67 | 65 | ||
68 | __INITDATA | 66 | __INITDATA |
69 | 67 | ||
70 | .type fa_tlb_fns, #object | 68 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
71 | ENTRY(fa_tlb_fns) | 69 | define_tlb_functions fa, fa_tlb_flags |
72 | .long fa_flush_user_tlb_range | ||
73 | .long fa_flush_kern_tlb_range | ||
74 | .long fa_tlb_flags | ||
75 | .size fa_tlb_fns, . - fa_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S index c10786ec8e0a..d253995ec4ca 100644 --- a/arch/arm/mm/tlb-v3.S +++ b/arch/arm/mm/tlb-v3.S | |||
@@ -44,9 +44,5 @@ ENTRY(v3_flush_kern_tlb_range) | |||
44 | 44 | ||
45 | __INITDATA | 45 | __INITDATA |
46 | 46 | ||
47 | .type v3_tlb_fns, #object | 47 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
48 | ENTRY(v3_tlb_fns) | 48 | define_tlb_functions v3, v3_tlb_flags |
49 | .long v3_flush_user_tlb_range | ||
50 | .long v3_flush_kern_tlb_range | ||
51 | .long v3_tlb_flags | ||
52 | .size v3_tlb_fns, . - v3_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S index d6c94457c2b9..17a025ade573 100644 --- a/arch/arm/mm/tlb-v4.S +++ b/arch/arm/mm/tlb-v4.S | |||
@@ -57,9 +57,5 @@ ENTRY(v4_flush_user_tlb_range) | |||
57 | 57 | ||
58 | __INITDATA | 58 | __INITDATA |
59 | 59 | ||
60 | .type v4_tlb_fns, #object | 60 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
61 | ENTRY(v4_tlb_fns) | 61 | define_tlb_functions v4, v4_tlb_flags |
62 | .long v4_flush_user_tlb_range | ||
63 | .long v4_flush_kern_tlb_range | ||
64 | .long v4_tlb_flags | ||
65 | .size v4_tlb_fns, . - v4_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S index cb829ca7845d..c04598fa4d4a 100644 --- a/arch/arm/mm/tlb-v4wb.S +++ b/arch/arm/mm/tlb-v4wb.S | |||
@@ -69,9 +69,5 @@ ENTRY(v4wb_flush_kern_tlb_range) | |||
69 | 69 | ||
70 | __INITDATA | 70 | __INITDATA |
71 | 71 | ||
72 | .type v4wb_tlb_fns, #object | 72 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
73 | ENTRY(v4wb_tlb_fns) | 73 | define_tlb_functions v4wb, v4wb_tlb_flags |
74 | .long v4wb_flush_user_tlb_range | ||
75 | .long v4wb_flush_kern_tlb_range | ||
76 | .long v4wb_tlb_flags | ||
77 | .size v4wb_tlb_fns, . - v4wb_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S index 60cfc4a25dd5..1f6062b6c1c1 100644 --- a/arch/arm/mm/tlb-v4wbi.S +++ b/arch/arm/mm/tlb-v4wbi.S | |||
@@ -60,9 +60,5 @@ ENTRY(v4wbi_flush_kern_tlb_range) | |||
60 | 60 | ||
61 | __INITDATA | 61 | __INITDATA |
62 | 62 | ||
63 | .type v4wbi_tlb_fns, #object | 63 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
64 | ENTRY(v4wbi_tlb_fns) | 64 | define_tlb_functions v4wbi, v4wbi_tlb_flags |
65 | .long v4wbi_flush_user_tlb_range | ||
66 | .long v4wbi_flush_kern_tlb_range | ||
67 | .long v4wbi_tlb_flags | ||
68 | .size v4wbi_tlb_fns, . - v4wbi_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 73d7d89b04c4..eca07f550a0b 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S | |||
@@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range) | |||
54 | add r0, r0, #PAGE_SZ | 54 | add r0, r0, #PAGE_SZ |
55 | cmp r0, r1 | 55 | cmp r0, r1 |
56 | blo 1b | 56 | blo 1b |
57 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | ||
58 | mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier | 57 | mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier |
59 | mov pc, lr | 58 | mov pc, lr |
60 | 59 | ||
@@ -83,16 +82,11 @@ ENTRY(v6wbi_flush_kern_tlb_range) | |||
83 | add r0, r0, #PAGE_SZ | 82 | add r0, r0, #PAGE_SZ |
84 | cmp r0, r1 | 83 | cmp r0, r1 |
85 | blo 1b | 84 | blo 1b |
86 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
87 | mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier | 85 | mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier |
88 | mcr p15, 0, r2, c7, c5, 4 @ prefetch flush | 86 | mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) |
89 | mov pc, lr | 87 | mov pc, lr |
90 | 88 | ||
91 | __INIT | 89 | __INIT |
92 | 90 | ||
93 | .type v6wbi_tlb_fns, #object | 91 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
94 | ENTRY(v6wbi_tlb_fns) | 92 | define_tlb_functions v6wbi, v6wbi_tlb_flags |
95 | .long v6wbi_flush_user_tlb_range | ||
96 | .long v6wbi_flush_kern_tlb_range | ||
97 | .long v6wbi_tlb_flags | ||
98 | .size v6wbi_tlb_fns, . - v6wbi_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 53cd5b454673..845f461f8ec1 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
48 | add r0, r0, #PAGE_SZ | 48 | add r0, r0, #PAGE_SZ |
49 | cmp r0, r1 | 49 | cmp r0, r1 |
50 | blo 1b | 50 | blo 1b |
51 | mov ip, #0 | ||
52 | ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable | ||
53 | ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB | ||
54 | dsb | 51 | dsb |
55 | mov pc, lr | 52 | mov pc, lr |
56 | ENDPROC(v7wbi_flush_user_tlb_range) | 53 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
75 | add r0, r0, #PAGE_SZ | 72 | add r0, r0, #PAGE_SZ |
76 | cmp r0, r1 | 73 | cmp r0, r1 |
77 | blo 1b | 74 | blo 1b |
78 | mov r2, #0 | ||
79 | ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable | ||
80 | ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB | ||
81 | dsb | 75 | dsb |
82 | isb | 76 | isb |
83 | mov pc, lr | 77 | mov pc, lr |
@@ -85,10 +79,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) | |||
85 | 79 | ||
86 | __INIT | 80 | __INIT |
87 | 81 | ||
88 | .type v7wbi_tlb_fns, #object | 82 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
89 | ENTRY(v7wbi_tlb_fns) | 83 | define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp |
90 | .long v7wbi_flush_user_tlb_range | ||
91 | .long v7wbi_flush_kern_tlb_range | ||
92 | ALT_SMP(.long v7wbi_tlb_flags_smp) | ||
93 | ALT_UP(.long v7wbi_tlb_flags_up) | ||
94 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns | ||
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S index 2e49e71b1b98..066d464d322d 100644 --- a/arch/arm/plat-mxc/include/mach/entry-macro.S +++ b/arch/arm/plat-mxc/include/mach/entry-macro.S | |||
@@ -78,7 +78,3 @@ | |||
78 | movs \irqnr, \irqnr | 78 | movs \irqnr, \irqnr |
79 | #endif | 79 | #endif |
80 | .endm | 80 | .endm |
81 | |||
82 | @ irq priority table (not used) | ||
83 | .macro irq_prio_table | ||
84 | .endm | ||
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 6af3d0b1f8d0..363c91e44efb 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void) | |||
394 | } | 394 | } |
395 | #endif /* CONFIG_PM */ | 395 | #endif /* CONFIG_PM */ |
396 | 396 | ||
397 | static int __init omap34xx_sram_init(void) | 397 | #endif /* CONFIG_ARCH_OMAP3 */ |
398 | { | 398 | |
399 | _omap3_sram_configure_core_dpll = | ||
400 | omap_sram_push(omap3_sram_configure_core_dpll, | ||
401 | omap3_sram_configure_core_dpll_sz); | ||
402 | omap_push_sram_idle(); | ||
403 | return 0; | ||
404 | } | ||
405 | #else | ||
406 | static inline int omap34xx_sram_init(void) | 399 | static inline int omap34xx_sram_init(void) |
407 | { | 400 | { |
401 | #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) | ||
402 | omap3_sram_restore_context(); | ||
403 | #endif | ||
408 | return 0; | 404 | return 0; |
409 | } | 405 | } |
410 | #endif | ||
411 | 406 | ||
412 | int __init omap_sram_init(void) | 407 | int __init omap_sram_init(void) |
413 | { | 408 | { |
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index fd7032f84ae7..c56612569b40 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S | |||
@@ -41,31 +41,6 @@ | |||
41 | 41 | ||
42 | .text | 42 | .text |
43 | 43 | ||
44 | /* s3c_cpu_save | ||
45 | * | ||
46 | * entry: | ||
47 | * r1 = v:p offset | ||
48 | */ | ||
49 | |||
50 | ENTRY(s3c_cpu_save) | ||
51 | stmfd sp!, { r4 - r12, lr } | ||
52 | ldr r3, =resume_with_mmu | ||
53 | bl cpu_suspend | ||
54 | |||
55 | @@ jump to final code to send system to sleep | ||
56 | ldr r0, =pm_cpu_sleep | ||
57 | @@ldr pc, [ r0 ] | ||
58 | ldr r0, [ r0 ] | ||
59 | mov pc, r0 | ||
60 | |||
61 | @@ return to the caller, after having the MMU | ||
62 | @@ turned on, this restores the last bits from the | ||
63 | @@ stack | ||
64 | resume_with_mmu: | ||
65 | ldmfd sp!, { r4 - r12, pc } | ||
66 | |||
67 | .ltorg | ||
68 | |||
69 | /* sleep magic, to allow the bootloader to check for an valid | 44 | /* sleep magic, to allow the bootloader to check for an valid |
70 | * image to resume to. Must be the first word before the | 45 | * image to resume to. Must be the first word before the |
71 | * s3c_cpu_resume entry. | 46 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 7fb6f6be8c81..f6749916d194 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h | |||
@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow; | |||
42 | /* per-cpu sleep functions */ | 42 | /* per-cpu sleep functions */ |
43 | 43 | ||
44 | extern void (*pm_cpu_prep)(void); | 44 | extern void (*pm_cpu_prep)(void); |
45 | extern void (*pm_cpu_sleep)(void); | 45 | extern int (*pm_cpu_sleep)(unsigned long); |
46 | 46 | ||
47 | /* Flags for PM Control */ | 47 | /* Flags for PM Control */ |
48 | 48 | ||
@@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */ | |||
52 | 52 | ||
53 | /* from sleep.S */ | 53 | /* from sleep.S */ |
54 | 54 | ||
55 | extern int s3c_cpu_save(unsigned long *saveblk, long); | ||
56 | extern void s3c_cpu_resume(void); | 55 | extern void s3c_cpu_resume(void); |
57 | 56 | ||
58 | extern void s3c2410_cpu_suspend(void); | 57 | extern int s3c2410_cpu_suspend(unsigned long); |
59 | 58 | ||
60 | /* sleep save info */ | 59 | /* sleep save info */ |
61 | 60 | ||
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 5c0a440d6e16..5fa1742d019b 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/suspend.h> | ||
23 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
24 | #include <mach/map.h> | 25 | #include <mach/map.h> |
25 | 26 | ||
@@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start, | |||
231 | 232 | ||
232 | 233 | ||
233 | void (*pm_cpu_prep)(void); | 234 | void (*pm_cpu_prep)(void); |
234 | void (*pm_cpu_sleep)(void); | 235 | int (*pm_cpu_sleep)(unsigned long); |
235 | 236 | ||
236 | #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) | 237 | #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) |
237 | 238 | ||
@@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state) | |||
294 | 295 | ||
295 | s3c_pm_arch_stop_clocks(); | 296 | s3c_pm_arch_stop_clocks(); |
296 | 297 | ||
297 | /* s3c_cpu_save will also act as our return point from when | 298 | /* this will also act as our return point from when |
298 | * we resume as it saves its own register state and restores it | 299 | * we resume as it saves its own register state and restores it |
299 | * during the resume. */ | 300 | * during the resume. */ |
300 | 301 | ||
301 | s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 302 | cpu_suspend(0, pm_cpu_sleep); |
302 | |||
303 | /* restore the cpu state using the kernel's cpu init code. */ | ||
304 | |||
305 | cpu_init(); | ||
306 | 303 | ||
307 | /* restore the system state */ | 304 | /* restore the system state */ |
308 | 305 | ||
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d6..2d30c7f6edd3 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S | |||
@@ -77,27 +77,27 @@ ENTRY(vfp_support_entry) | |||
77 | bne look_for_VFP_exceptions @ VFP is already enabled | 77 | bne look_for_VFP_exceptions @ VFP is already enabled |
78 | 78 | ||
79 | DBGSTR1 "enable %x", r10 | 79 | DBGSTR1 "enable %x", r10 |
80 | ldr r3, last_VFP_context_address | 80 | ldr r3, vfp_current_hw_state_address |
81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set | 81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
82 | ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer | 82 | ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer |
83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled | 83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
84 | cmp r4, r10 | 84 | cmp r4, r10 @ this thread owns the hw context? |
85 | beq check_for_exception @ we are returning to the same | 85 | #ifndef CONFIG_SMP |
86 | @ process, so the registers are | 86 | @ For UP, checking that this thread owns the hw context is |
87 | @ still there. In this case, we do | 87 | @ sufficient to determine that the hardware state is valid. |
88 | @ not want to drop a pending exception. | 88 | beq vfp_hw_state_valid |
89 | |||
90 | @ On UP, we lazily save the VFP context. As a different | ||
91 | @ thread wants ownership of the VFP hardware, save the old | ||
92 | @ state if there was a previous (valid) owner. | ||
89 | 93 | ||
90 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | 94 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending |
91 | @ exceptions, so we can get at the | 95 | @ exceptions, so we can get at the |
92 | @ rest of it | 96 | @ rest of it |
93 | 97 | ||
94 | #ifndef CONFIG_SMP | ||
95 | @ Save out the current registers to the old thread state | ||
96 | @ No need for SMP since this is not done lazily | ||
97 | |||
98 | DBGSTR1 "save old state %p", r4 | 98 | DBGSTR1 "save old state %p", r4 |
99 | cmp r4, #0 | 99 | cmp r4, #0 @ if the vfp_current_hw_state is NULL |
100 | beq no_old_VFP_process | 100 | beq vfp_reload_hw @ then the hw state needs reloading |
101 | VFPFSTMIA r4, r5 @ save the working registers | 101 | VFPFSTMIA r4, r5 @ save the working registers |
102 | VFPFMRX r5, FPSCR @ current status | 102 | VFPFMRX r5, FPSCR @ current status |
103 | #ifndef CONFIG_CPU_FEROCEON | 103 | #ifndef CONFIG_CPU_FEROCEON |
@@ -110,13 +110,35 @@ ENTRY(vfp_support_entry) | |||
110 | 1: | 110 | 1: |
111 | #endif | 111 | #endif |
112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 | 112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 |
113 | @ and point r4 at the word at the | 113 | vfp_reload_hw: |
114 | @ start of the register dump | 114 | |
115 | #else | ||
116 | @ For SMP, if this thread does not own the hw context, then we | ||
117 | @ need to reload it. No need to save the old state as on SMP, | ||
118 | @ we always save the state when we switch away from a thread. | ||
119 | bne vfp_reload_hw | ||
120 | |||
121 | @ This thread has ownership of the current hardware context. | ||
122 | @ However, it may have been migrated to another CPU, in which | ||
123 | @ case the saved state is newer than the hardware context. | ||
124 | @ Check this by looking at the CPU number which the state was | ||
125 | @ last loaded onto. | ||
126 | ldr ip, [r10, #VFP_CPU] | ||
127 | teq ip, r11 | ||
128 | beq vfp_hw_state_valid | ||
129 | |||
130 | vfp_reload_hw: | ||
131 | @ We're loading this threads state into the VFP hardware. Update | ||
132 | @ the CPU number which contains the most up to date VFP context. | ||
133 | str r11, [r10, #VFP_CPU] | ||
134 | |||
135 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | ||
136 | @ exceptions, so we can get at the | ||
137 | @ rest of it | ||
115 | #endif | 138 | #endif |
116 | 139 | ||
117 | no_old_VFP_process: | ||
118 | DBGSTR1 "load state %p", r10 | 140 | DBGSTR1 "load state %p", r10 |
119 | str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer | 141 | str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer |
120 | @ Load the saved state back into the VFP | 142 | @ Load the saved state back into the VFP |
121 | VFPFLDMIA r10, r5 @ reload the working registers while | 143 | VFPFLDMIA r10, r5 @ reload the working registers while |
122 | @ FPEXC is in a safe state | 144 | @ FPEXC is in a safe state |
@@ -132,7 +154,8 @@ no_old_VFP_process: | |||
132 | #endif | 154 | #endif |
133 | VFPFMXR FPSCR, r5 @ restore status | 155 | VFPFMXR FPSCR, r5 @ restore status |
134 | 156 | ||
135 | check_for_exception: | 157 | @ The context stored in the VFP hardware is up to date with this thread |
158 | vfp_hw_state_valid: | ||
136 | tst r1, #FPEXC_EX | 159 | tst r1, #FPEXC_EX |
137 | bne process_exception @ might as well handle the pending | 160 | bne process_exception @ might as well handle the pending |
138 | @ exception before retrying branch | 161 | @ exception before retrying branch |
@@ -207,8 +230,8 @@ ENTRY(vfp_save_state) | |||
207 | ENDPROC(vfp_save_state) | 230 | ENDPROC(vfp_save_state) |
208 | 231 | ||
209 | .align | 232 | .align |
210 | last_VFP_context_address: | 233 | vfp_current_hw_state_address: |
211 | .word last_VFP_context | 234 | .word vfp_current_hw_state |
212 | 235 | ||
213 | .macro tbl_branch, base, tmp, shift | 236 | .macro tbl_branch, base, tmp, shift |
214 | #ifdef CONFIG_THUMB2_KERNEL | 237 | #ifdef CONFIG_THUMB2_KERNEL |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f25e7ec89416..79bcb4316930 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -33,7 +33,6 @@ void vfp_support_entry(void); | |||
33 | void vfp_null_entry(void); | 33 | void vfp_null_entry(void); |
34 | 34 | ||
35 | void (*vfp_vector)(void) = vfp_null_entry; | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | union vfp_state *last_VFP_context[NR_CPUS]; | ||
37 | 36 | ||
38 | /* | 37 | /* |
39 | * Dual-use variable. | 38 | * Dual-use variable. |
@@ -43,6 +42,46 @@ union vfp_state *last_VFP_context[NR_CPUS]; | |||
43 | unsigned int VFP_arch; | 42 | unsigned int VFP_arch; |
44 | 43 | ||
45 | /* | 44 | /* |
45 | * The pointer to the vfpstate structure of the thread which currently | ||
46 | * owns the context held in the VFP hardware, or NULL if the hardware | ||
47 | * context is invalid. | ||
48 | * | ||
49 | * For UP, this is sufficient to tell which thread owns the VFP context. | ||
50 | * However, for SMP, we also need to check the CPU number stored in the | ||
51 | * saved state too to catch migrations. | ||
52 | */ | ||
53 | union vfp_state *vfp_current_hw_state[NR_CPUS]; | ||
54 | |||
55 | /* | ||
56 | * Is 'thread's most up to date state stored in this CPUs hardware? | ||
57 | * Must be called from non-preemptible context. | ||
58 | */ | ||
59 | static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) | ||
60 | { | ||
61 | #ifdef CONFIG_SMP | ||
62 | if (thread->vfpstate.hard.cpu != cpu) | ||
63 | return false; | ||
64 | #endif | ||
65 | return vfp_current_hw_state[cpu] == &thread->vfpstate; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Force a reload of the VFP context from the thread structure. We do | ||
70 | * this by ensuring that access to the VFP hardware is disabled, and | ||
71 | * clear last_VFP_context. Must be called from non-preemptible context. | ||
72 | */ | ||
73 | static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) | ||
74 | { | ||
75 | if (vfp_state_in_hw(cpu, thread)) { | ||
76 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | ||
77 | vfp_current_hw_state[cpu] = NULL; | ||
78 | } | ||
79 | #ifdef CONFIG_SMP | ||
80 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
81 | #endif | ||
82 | } | ||
83 | |||
84 | /* | ||
46 | * Per-thread VFP initialization. | 85 | * Per-thread VFP initialization. |
47 | */ | 86 | */ |
48 | static void vfp_thread_flush(struct thread_info *thread) | 87 | static void vfp_thread_flush(struct thread_info *thread) |
@@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread) | |||
50 | union vfp_state *vfp = &thread->vfpstate; | 89 | union vfp_state *vfp = &thread->vfpstate; |
51 | unsigned int cpu; | 90 | unsigned int cpu; |
52 | 91 | ||
53 | memset(vfp, 0, sizeof(union vfp_state)); | ||
54 | |||
55 | vfp->hard.fpexc = FPEXC_EN; | ||
56 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
57 | |||
58 | /* | 92 | /* |
59 | * Disable VFP to ensure we initialize it first. We must ensure | 93 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | * that the modification of last_VFP_context[] and hardware disable | 94 | * that the modification of vfp_current_hw_state[] and hardware |
61 | * are done for the same CPU and without preemption. | 95 | * disable are done for the same CPU and without preemption. |
96 | * | ||
97 | * Do this first to ensure that preemption won't overwrite our | ||
98 | * state saving should access to the VFP be enabled at this point. | ||
62 | */ | 99 | */ |
63 | cpu = get_cpu(); | 100 | cpu = get_cpu(); |
64 | if (last_VFP_context[cpu] == vfp) | 101 | if (vfp_current_hw_state[cpu] == vfp) |
65 | last_VFP_context[cpu] = NULL; | 102 | vfp_current_hw_state[cpu] = NULL; |
66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 103 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | put_cpu(); | 104 | put_cpu(); |
105 | |||
106 | memset(vfp, 0, sizeof(union vfp_state)); | ||
107 | |||
108 | vfp->hard.fpexc = FPEXC_EN; | ||
109 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
110 | #ifdef CONFIG_SMP | ||
111 | vfp->hard.cpu = NR_CPUS; | ||
112 | #endif | ||
68 | } | 113 | } |
69 | 114 | ||
70 | static void vfp_thread_exit(struct thread_info *thread) | 115 | static void vfp_thread_exit(struct thread_info *thread) |
@@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread) | |||
73 | union vfp_state *vfp = &thread->vfpstate; | 118 | union vfp_state *vfp = &thread->vfpstate; |
74 | unsigned int cpu = get_cpu(); | 119 | unsigned int cpu = get_cpu(); |
75 | 120 | ||
76 | if (last_VFP_context[cpu] == vfp) | 121 | if (vfp_current_hw_state[cpu] == vfp) |
77 | last_VFP_context[cpu] = NULL; | 122 | vfp_current_hw_state[cpu] = NULL; |
78 | put_cpu(); | 123 | put_cpu(); |
79 | } | 124 | } |
80 | 125 | ||
@@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread) | |||
84 | 129 | ||
85 | vfp_sync_hwstate(parent); | 130 | vfp_sync_hwstate(parent); |
86 | thread->vfpstate = parent->vfpstate; | 131 | thread->vfpstate = parent->vfpstate; |
132 | #ifdef CONFIG_SMP | ||
133 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
134 | #endif | ||
87 | } | 135 | } |
88 | 136 | ||
89 | /* | 137 | /* |
@@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
129 | * case the thread migrates to a different CPU. The | 177 | * case the thread migrates to a different CPU. The |
130 | * restoring is done lazily. | 178 | * restoring is done lazily. |
131 | */ | 179 | */ |
132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | 180 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) |
133 | vfp_save_state(last_VFP_context[cpu], fpexc); | 181 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
134 | last_VFP_context[cpu]->hard.cpu = cpu; | ||
135 | } | ||
136 | /* | ||
137 | * Thread migration, just force the reloading of the | ||
138 | * state on the new CPU in case the VFP registers | ||
139 | * contain stale data. | ||
140 | */ | ||
141 | if (thread->vfpstate.hard.cpu != cpu) | ||
142 | last_VFP_context[cpu] = NULL; | ||
143 | #endif | 182 | #endif |
144 | 183 | ||
145 | /* | 184 | /* |
@@ -415,7 +454,7 @@ static int vfp_pm_suspend(void) | |||
415 | } | 454 | } |
416 | 455 | ||
417 | /* clear any information we had about last context state */ | 456 | /* clear any information we had about last context state */ |
418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | 457 | memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); |
419 | 458 | ||
420 | return 0; | 459 | return 0; |
421 | } | 460 | } |
@@ -443,15 +482,15 @@ static void vfp_pm_init(void) | |||
443 | static inline void vfp_pm_init(void) { } | 482 | static inline void vfp_pm_init(void) { } |
444 | #endif /* CONFIG_PM */ | 483 | #endif /* CONFIG_PM */ |
445 | 484 | ||
485 | /* | ||
486 | * Ensure that the VFP state stored in 'thread->vfpstate' is up to date | ||
487 | * with the hardware state. | ||
488 | */ | ||
446 | void vfp_sync_hwstate(struct thread_info *thread) | 489 | void vfp_sync_hwstate(struct thread_info *thread) |
447 | { | 490 | { |
448 | unsigned int cpu = get_cpu(); | 491 | unsigned int cpu = get_cpu(); |
449 | 492 | ||
450 | /* | 493 | if (vfp_state_in_hw(cpu, thread)) { |
451 | * If the thread we're interested in is the current owner of the | ||
452 | * hardware VFP state, then we need to save its state. | ||
453 | */ | ||
454 | if (last_VFP_context[cpu] == &thread->vfpstate) { | ||
455 | u32 fpexc = fmrx(FPEXC); | 494 | u32 fpexc = fmrx(FPEXC); |
456 | 495 | ||
457 | /* | 496 | /* |
@@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread) | |||
465 | put_cpu(); | 504 | put_cpu(); |
466 | } | 505 | } |
467 | 506 | ||
507 | /* Ensure that the thread reloads the hardware VFP state on the next use. */ | ||
468 | void vfp_flush_hwstate(struct thread_info *thread) | 508 | void vfp_flush_hwstate(struct thread_info *thread) |
469 | { | 509 | { |
470 | unsigned int cpu = get_cpu(); | 510 | unsigned int cpu = get_cpu(); |
471 | 511 | ||
472 | /* | 512 | vfp_force_reload(cpu, thread); |
473 | * If the thread we're interested in is the current owner of the | ||
474 | * hardware VFP state, then we need to save its state. | ||
475 | */ | ||
476 | if (last_VFP_context[cpu] == &thread->vfpstate) { | ||
477 | u32 fpexc = fmrx(FPEXC); | ||
478 | 513 | ||
479 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | ||
480 | |||
481 | /* | ||
482 | * Set the context to NULL to force a reload the next time | ||
483 | * the thread uses the VFP. | ||
484 | */ | ||
485 | last_VFP_context[cpu] = NULL; | ||
486 | } | ||
487 | |||
488 | #ifdef CONFIG_SMP | ||
489 | /* | ||
490 | * For SMP we still have to take care of the case where the thread | ||
491 | * migrates to another CPU and then back to the original CPU on which | ||
492 | * the last VFP user is still the same thread. Mark the thread VFP | ||
493 | * state as belonging to a non-existent CPU so that the saved one will | ||
494 | * be reloaded in the above case. | ||
495 | */ | ||
496 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
497 | #endif | ||
498 | put_cpu(); | 514 | put_cpu(); |
499 | } | 515 | } |
500 | 516 | ||
@@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, | |||
513 | void *hcpu) | 529 | void *hcpu) |
514 | { | 530 | { |
515 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { | 531 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
516 | unsigned int cpu = (long)hcpu; | 532 | vfp_force_reload((long)hcpu, current_thread_info()); |
517 | last_VFP_context[cpu] = NULL; | ||
518 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 533 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
519 | vfp_enable(NULL); | 534 | vfp_enable(NULL); |
520 | return NOTIFY_OK; | 535 | return NOTIFY_OK; |
@@ -582,7 +597,6 @@ static int __init vfp_init(void) | |||
582 | elf_hwcap |= HWCAP_VFPv3D16; | 597 | elf_hwcap |= HWCAP_VFPv3D16; |
583 | } | 598 | } |
584 | #endif | 599 | #endif |
585 | #ifdef CONFIG_NEON | ||
586 | /* | 600 | /* |
587 | * Check for the presence of the Advanced SIMD | 601 | * Check for the presence of the Advanced SIMD |
588 | * load/store instructions, integer and single | 602 | * load/store instructions, integer and single |
@@ -590,10 +604,13 @@ static int __init vfp_init(void) | |||
590 | * for NEON if the hardware has the MVFR registers. | 604 | * for NEON if the hardware has the MVFR registers. |
591 | */ | 605 | */ |
592 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { | 606 | if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { |
607 | #ifdef CONFIG_NEON | ||
593 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) | 608 | if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) |
594 | elf_hwcap |= HWCAP_NEON; | 609 | elf_hwcap |= HWCAP_NEON; |
595 | } | ||
596 | #endif | 610 | #endif |
611 | if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) | ||
612 | elf_hwcap |= HWCAP_VFPv4; | ||
613 | } | ||
597 | } | 614 | } |
598 | return 0; | 615 | return 0; |
599 | } | 616 | } |