diff options
Diffstat (limited to 'arch/arm')
185 files changed, 2403 insertions, 1863 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9adc278a22ab..84fda2bebd7a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -37,6 +37,9 @@ config ARM | |||
37 | Europe. There is an ARM Linux project with a web page at | 37 | Europe. There is an ARM Linux project with a web page at |
38 | <http://www.arm.linux.org.uk/>. | 38 | <http://www.arm.linux.org.uk/>. |
39 | 39 | ||
40 | config ARM_HAS_SG_CHAIN | ||
41 | bool | ||
42 | |||
40 | config HAVE_PWM | 43 | config HAVE_PWM |
41 | bool | 44 | bool |
42 | 45 | ||
@@ -1346,7 +1349,6 @@ config SMP_ON_UP | |||
1346 | 1349 | ||
1347 | config HAVE_ARM_SCU | 1350 | config HAVE_ARM_SCU |
1348 | bool | 1351 | bool |
1349 | depends on SMP | ||
1350 | help | 1352 | help |
1351 | This option enables support for the ARM system coherency unit | 1353 | This option enables support for the ARM system coherency unit |
1352 | 1354 | ||
@@ -1715,17 +1717,34 @@ config ZBOOT_ROM | |||
1715 | Say Y here if you intend to execute your compressed kernel image | 1717 | Say Y here if you intend to execute your compressed kernel image |
1716 | (zImage) directly from ROM or flash. If unsure, say N. | 1718 | (zImage) directly from ROM or flash. If unsure, say N. |
1717 | 1719 | ||
1720 | choice | ||
1721 | prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" | ||
1722 | depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL | ||
1723 | default ZBOOT_ROM_NONE | ||
1724 | help | ||
1725 | Include experimental SD/MMC loading code in the ROM-able zImage. | ||
1726 | With this enabled it is possible to write the the ROM-able zImage | ||
1727 | kernel image to an MMC or SD card and boot the kernel straight | ||
1728 | from the reset vector. At reset the processor Mask ROM will load | ||
1729 | the first part of the the ROM-able zImage which in turn loads the | ||
1730 | rest the kernel image to RAM. | ||
1731 | |||
1732 | config ZBOOT_ROM_NONE | ||
1733 | bool "No SD/MMC loader in zImage (EXPERIMENTAL)" | ||
1734 | help | ||
1735 | Do not load image from SD or MMC | ||
1736 | |||
1718 | config ZBOOT_ROM_MMCIF | 1737 | config ZBOOT_ROM_MMCIF |
1719 | bool "Include MMCIF loader in zImage (EXPERIMENTAL)" | 1738 | bool "Include MMCIF loader in zImage (EXPERIMENTAL)" |
1720 | depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL | ||
1721 | help | 1739 | help |
1722 | Say Y here to include experimental MMCIF loading code in the | 1740 | Load image from MMCIF hardware block. |
1723 | ROM-able zImage. With this enabled it is possible to write the | 1741 | |
1724 | the ROM-able zImage kernel image to an MMC card and boot the | 1742 | config ZBOOT_ROM_SH_MOBILE_SDHI |
1725 | kernel straight from the reset vector. At reset the processor | 1743 | bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)" |
1726 | Mask ROM will load the first part of the the ROM-able zImage | 1744 | help |
1727 | which in turn loads the rest the kernel image to RAM using the | 1745 | Load image from SDHI hardware block |
1728 | MMCIF hardware block. | 1746 | |
1747 | endchoice | ||
1729 | 1748 | ||
1730 | config CMDLINE | 1749 | config CMDLINE |
1731 | string "Default kernel command string" | 1750 | string "Default kernel command string" |
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 23aad0722303..0c74a6fab952 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -6,13 +6,19 @@ | |||
6 | 6 | ||
7 | OBJS = | 7 | OBJS = |
8 | 8 | ||
9 | # Ensure that mmcif loader code appears early in the image | 9 | # Ensure that MMCIF loader code appears early in the image |
10 | # to minimise that number of bocks that have to be read in | 10 | # to minimise that number of bocks that have to be read in |
11 | # order to load it. | 11 | # order to load it. |
12 | ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) | 12 | ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) |
13 | ifeq ($(CONFIG_ARCH_SH7372),y) | ||
14 | OBJS += mmcif-sh7372.o | 13 | OBJS += mmcif-sh7372.o |
15 | endif | 14 | endif |
15 | |||
16 | # Ensure that SDHI loader code appears early in the image | ||
17 | # to minimise that number of bocks that have to be read in | ||
18 | # order to load it. | ||
19 | ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y) | ||
20 | OBJS += sdhi-shmobile.o | ||
21 | OBJS += sdhi-sh7372.o | ||
16 | endif | 22 | endif |
17 | 23 | ||
18 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) | 24 | AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) |
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S index c943d2e7da9d..fe3719b516fd 100644 --- a/arch/arm/boot/compressed/head-shmobile.S +++ b/arch/arm/boot/compressed/head-shmobile.S | |||
@@ -25,14 +25,14 @@ | |||
25 | /* load board-specific initialization code */ | 25 | /* load board-specific initialization code */ |
26 | #include <mach/zboot.h> | 26 | #include <mach/zboot.h> |
27 | 27 | ||
28 | #ifdef CONFIG_ZBOOT_ROM_MMCIF | 28 | #if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI) |
29 | /* Load image from MMC */ | 29 | /* Load image from MMC/SD */ |
30 | adr sp, __tmp_stack + 128 | 30 | adr sp, __tmp_stack + 256 |
31 | ldr r0, __image_start | 31 | ldr r0, __image_start |
32 | ldr r1, __image_end | 32 | ldr r1, __image_end |
33 | subs r1, r1, r0 | 33 | subs r1, r1, r0 |
34 | ldr r0, __load_base | 34 | ldr r0, __load_base |
35 | bl mmcif_loader | 35 | bl mmc_loader |
36 | 36 | ||
37 | /* Jump to loaded code */ | 37 | /* Jump to loaded code */ |
38 | ldr r0, __loaded | 38 | ldr r0, __loaded |
@@ -51,9 +51,9 @@ __loaded: | |||
51 | .long __continue | 51 | .long __continue |
52 | .align | 52 | .align |
53 | __tmp_stack: | 53 | __tmp_stack: |
54 | .space 128 | 54 | .space 256 |
55 | __continue: | 55 | __continue: |
56 | #endif /* CONFIG_ZBOOT_ROM_MMCIF */ | 56 | #endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */ |
57 | 57 | ||
58 | b 1f | 58 | b 1f |
59 | __atags:@ tag #1 | 59 | __atags:@ tag #1 |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 942fad97e447..e95a5989602a 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -353,7 +353,8 @@ not_relocated: mov r0, #0 | |||
353 | mov r0, #0 @ must be zero | 353 | mov r0, #0 @ must be zero |
354 | mov r1, r7 @ restore architecture number | 354 | mov r1, r7 @ restore architecture number |
355 | mov r2, r8 @ restore atags pointer | 355 | mov r2, r8 @ restore atags pointer |
356 | mov pc, r4 @ call kernel | 356 | ARM( mov pc, r4 ) @ call kernel |
357 | THUMB( bx r4 ) @ entry point is always ARM | ||
357 | 358 | ||
358 | .align 2 | 359 | .align 2 |
359 | .type LC0, #object | 360 | .type LC0, #object |
@@ -597,6 +598,8 @@ __common_mmu_cache_on: | |||
597 | sub pc, lr, r0, lsr #32 @ properly flush pipeline | 598 | sub pc, lr, r0, lsr #32 @ properly flush pipeline |
598 | #endif | 599 | #endif |
599 | 600 | ||
601 | #define PROC_ENTRY_SIZE (4*5) | ||
602 | |||
600 | /* | 603 | /* |
601 | * Here follow the relocatable cache support functions for the | 604 | * Here follow the relocatable cache support functions for the |
602 | * various processors. This is a generic hook for locating an | 605 | * various processors. This is a generic hook for locating an |
@@ -624,7 +627,7 @@ call_cache_fn: adr r12, proc_types | |||
624 | ARM( addeq pc, r12, r3 ) @ call cache function | 627 | ARM( addeq pc, r12, r3 ) @ call cache function |
625 | THUMB( addeq r12, r3 ) | 628 | THUMB( addeq r12, r3 ) |
626 | THUMB( moveq pc, r12 ) @ call cache function | 629 | THUMB( moveq pc, r12 ) @ call cache function |
627 | add r12, r12, #4*5 | 630 | add r12, r12, #PROC_ENTRY_SIZE |
628 | b 1b | 631 | b 1b |
629 | 632 | ||
630 | /* | 633 | /* |
@@ -794,6 +797,16 @@ proc_types: | |||
794 | 797 | ||
795 | .size proc_types, . - proc_types | 798 | .size proc_types, . - proc_types |
796 | 799 | ||
800 | /* | ||
801 | * If you get a "non-constant expression in ".if" statement" | ||
802 | * error from the assembler on this line, check that you have | ||
803 | * not accidentally written a "b" instruction where you should | ||
804 | * have written W(b). | ||
805 | */ | ||
806 | .if (. - proc_types) % PROC_ENTRY_SIZE != 0 | ||
807 | .error "The size of one or more proc_types entries is wrong." | ||
808 | .endif | ||
809 | |||
797 | /* | 810 | /* |
798 | * Turn off the Cache and MMU. ARMv3 does not support | 811 | * Turn off the Cache and MMU. ARMv3 does not support |
799 | * reading the control register, but ARMv4 does. | 812 | * reading the control register, but ARMv4 does. |
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c index 7453c8337b83..b6f61d9a5a1b 100644 --- a/arch/arm/boot/compressed/mmcif-sh7372.c +++ b/arch/arm/boot/compressed/mmcif-sh7372.c | |||
@@ -40,7 +40,7 @@ | |||
40 | * to an MMC card | 40 | * to an MMC card |
41 | * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 | 41 | * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 |
42 | */ | 42 | */ |
43 | asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) | 43 | asmlinkage void mmc_loader(unsigned char *buf, unsigned long len) |
44 | { | 44 | { |
45 | mmc_init_progress(); | 45 | mmc_init_progress(); |
46 | mmc_update_progress(MMC_PROGRESS_ENTER); | 46 | mmc_update_progress(MMC_PROGRESS_ENTER); |
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c new file mode 100644 index 000000000000..d403a8b24d7f --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-sh7372.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2010 Magnus Damm | ||
5 | * Copyright (C) 2010 Kuninori Morimoto | ||
6 | * Copyright (C) 2010 Simon Horman | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * Parts inspired by u-boot | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <mach/mmc.h> | ||
17 | #include <linux/mmc/boot.h> | ||
18 | #include <linux/mmc/tmio.h> | ||
19 | |||
20 | #include "sdhi-shmobile.h" | ||
21 | |||
22 | #define PORT179CR 0xe60520b3 | ||
23 | #define PORT180CR 0xe60520b4 | ||
24 | #define PORT181CR 0xe60520b5 | ||
25 | #define PORT182CR 0xe60520b6 | ||
26 | #define PORT183CR 0xe60520b7 | ||
27 | #define PORT184CR 0xe60520b8 | ||
28 | |||
29 | #define SMSTPCR3 0xe615013c | ||
30 | |||
31 | #define CR_INPUT_ENABLE 0x10 | ||
32 | #define CR_FUNCTION1 0x01 | ||
33 | |||
34 | #define SDHI1_BASE (void __iomem *)0xe6860000 | ||
35 | #define SDHI_BASE SDHI1_BASE | ||
36 | |||
37 | /* SuperH Mobile SDHI loader | ||
38 | * | ||
39 | * loads the zImage from an SD card starting from block 0 | ||
40 | * on physical partition 1 | ||
41 | * | ||
42 | * The image must be start with a vrl4 header and | ||
43 | * the zImage must start at offset 512 of the image. That is, | ||
44 | * at block 1 (=byte 512) of physical partition 1 | ||
45 | * | ||
46 | * Use the following line to write the vrl4 formated zImage | ||
47 | * to an SD card | ||
48 | * # dd if=vrl4.out of=/dev/sdx bs=512 | ||
49 | */ | ||
50 | asmlinkage void mmc_loader(unsigned short *buf, unsigned long len) | ||
51 | { | ||
52 | int high_capacity; | ||
53 | |||
54 | mmc_init_progress(); | ||
55 | |||
56 | mmc_update_progress(MMC_PROGRESS_ENTER); | ||
57 | /* Initialise SDHI1 */ | ||
58 | /* PORT184CR: GPIO_FN_SDHICMD1 Control */ | ||
59 | __raw_writeb(CR_FUNCTION1, PORT184CR); | ||
60 | /* PORT179CR: GPIO_FN_SDHICLK1 Control */ | ||
61 | __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR); | ||
62 | /* PORT181CR: GPIO_FN_SDHID1_3 Control */ | ||
63 | __raw_writeb(CR_FUNCTION1, PORT183CR); | ||
64 | /* PORT182CR: GPIO_FN_SDHID1_2 Control */ | ||
65 | __raw_writeb(CR_FUNCTION1, PORT182CR); | ||
66 | /* PORT183CR: GPIO_FN_SDHID1_1 Control */ | ||
67 | __raw_writeb(CR_FUNCTION1, PORT181CR); | ||
68 | /* PORT180CR: GPIO_FN_SDHID1_0 Control */ | ||
69 | __raw_writeb(CR_FUNCTION1, PORT180CR); | ||
70 | |||
71 | /* Enable clock to SDHI1 hardware block */ | ||
72 | __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3); | ||
73 | |||
74 | /* setup SDHI hardware */ | ||
75 | mmc_update_progress(MMC_PROGRESS_INIT); | ||
76 | high_capacity = sdhi_boot_init(SDHI_BASE); | ||
77 | if (high_capacity < 0) | ||
78 | goto err; | ||
79 | |||
80 | mmc_update_progress(MMC_PROGRESS_LOAD); | ||
81 | /* load kernel */ | ||
82 | if (sdhi_boot_do_read(SDHI_BASE, high_capacity, | ||
83 | 0, /* Kernel is at block 1 */ | ||
84 | (len + TMIO_BBS - 1) / TMIO_BBS, buf)) | ||
85 | goto err; | ||
86 | |||
87 | /* Disable clock to SDHI1 hardware block */ | ||
88 | __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3); | ||
89 | |||
90 | mmc_update_progress(MMC_PROGRESS_DONE); | ||
91 | |||
92 | return; | ||
93 | err: | ||
94 | for(;;); | ||
95 | } | ||
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c new file mode 100644 index 000000000000..bd3d46980955 --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.c | |||
@@ -0,0 +1,449 @@ | |||
1 | /* | ||
2 | * SuperH Mobile SDHI | ||
3 | * | ||
4 | * Copyright (C) 2010 Magnus Damm | ||
5 | * Copyright (C) 2010 Kuninori Morimoto | ||
6 | * Copyright (C) 2010 Simon Horman | ||
7 | * | ||
8 | * This file is subject to the terms and conditions of the GNU General Public | ||
9 | * License. See the file "COPYING" in the main directory of this archive | ||
10 | * for more details. | ||
11 | * | ||
12 | * Parts inspired by u-boot | ||
13 | */ | ||
14 | |||
15 | #include <linux/io.h> | ||
16 | #include <linux/mmc/host.h> | ||
17 | #include <linux/mmc/core.h> | ||
18 | #include <linux/mmc/mmc.h> | ||
19 | #include <linux/mmc/sd.h> | ||
20 | #include <linux/mmc/tmio.h> | ||
21 | #include <mach/sdhi.h> | ||
22 | |||
23 | #define OCR_FASTBOOT (1<<29) | ||
24 | #define OCR_HCS (1<<30) | ||
25 | #define OCR_BUSY (1<<31) | ||
26 | |||
27 | #define RESP_CMD12 0x00000030 | ||
28 | |||
29 | static inline u16 sd_ctrl_read16(void __iomem *base, int addr) | ||
30 | { | ||
31 | return __raw_readw(base + addr); | ||
32 | } | ||
33 | |||
34 | static inline u32 sd_ctrl_read32(void __iomem *base, int addr) | ||
35 | { | ||
36 | return __raw_readw(base + addr) | | ||
37 | __raw_readw(base + addr + 2) << 16; | ||
38 | } | ||
39 | |||
40 | static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val) | ||
41 | { | ||
42 | __raw_writew(val, base + addr); | ||
43 | } | ||
44 | |||
45 | static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val) | ||
46 | { | ||
47 | __raw_writew(val, base + addr); | ||
48 | __raw_writew(val >> 16, base + addr + 2); | ||
49 | } | ||
50 | |||
51 | #define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \ | ||
52 | TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \ | ||
53 | TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \ | ||
54 | TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \ | ||
55 | TMIO_STAT_ILL_FUNC) | ||
56 | |||
57 | static int sdhi_intr(void __iomem *base) | ||
58 | { | ||
59 | unsigned long state = sd_ctrl_read32(base, CTL_STATUS); | ||
60 | |||
61 | if (state & ALL_ERROR) { | ||
62 | sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR); | ||
63 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
64 | ALL_ERROR | | ||
65 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
66 | return -EINVAL; | ||
67 | } | ||
68 | if (state & TMIO_STAT_CMDRESPEND) { | ||
69 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); | ||
70 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
71 | TMIO_STAT_CMDRESPEND | | ||
72 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
73 | return 0; | ||
74 | } | ||
75 | if (state & TMIO_STAT_RXRDY) { | ||
76 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY); | ||
77 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
78 | TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN | | ||
79 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
80 | return 0; | ||
81 | } | ||
82 | if (state & TMIO_STAT_DATAEND) { | ||
83 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND); | ||
84 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
85 | TMIO_STAT_DATAEND | | ||
86 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | return -EAGAIN; | ||
91 | } | ||
92 | |||
93 | static int sdhi_boot_wait_resp_end(void __iomem *base) | ||
94 | { | ||
95 | int err = -EAGAIN, timeout = 10000000; | ||
96 | |||
97 | while (timeout--) { | ||
98 | err = sdhi_intr(base); | ||
99 | if (err != -EAGAIN) | ||
100 | break; | ||
101 | udelay(1); | ||
102 | } | ||
103 | |||
104 | return err; | ||
105 | } | ||
106 | |||
107 | /* SDHI_CLK_CTRL */ | ||
108 | #define CLK_MMC_ENABLE (1 << 8) | ||
109 | #define CLK_MMC_INIT (1 << 6) /* clk / 256 */ | ||
110 | |||
111 | static void sdhi_boot_mmc_clk_stop(void __iomem *base) | ||
112 | { | ||
113 | sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000); | ||
114 | msleep(10); | ||
115 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE & | ||
116 | sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); | ||
117 | msleep(10); | ||
118 | } | ||
119 | |||
120 | static void sdhi_boot_mmc_clk_start(void __iomem *base) | ||
121 | { | ||
122 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE | | ||
123 | sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL)); | ||
124 | msleep(10); | ||
125 | sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE); | ||
126 | msleep(10); | ||
127 | } | ||
128 | |||
129 | static void sdhi_boot_reset(void __iomem *base) | ||
130 | { | ||
131 | sd_ctrl_write16(base, CTL_RESET_SD, 0x0000); | ||
132 | msleep(10); | ||
133 | sd_ctrl_write16(base, CTL_RESET_SD, 0x0001); | ||
134 | msleep(10); | ||
135 | } | ||
136 | |||
137 | /* Set MMC clock / power. | ||
138 | * Note: This controller uses a simple divider scheme therefore it cannot | ||
139 | * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as | ||
140 | * MMC wont run that fast, it has to be clocked at 12MHz which is the next | ||
141 | * slowest setting. | ||
142 | */ | ||
143 | static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios) | ||
144 | { | ||
145 | if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY) | ||
146 | return -EBUSY; | ||
147 | |||
148 | if (ios->clock) | ||
149 | sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, | ||
150 | ios->clock | CLK_MMC_ENABLE); | ||
151 | |||
152 | /* Power sequence - OFF -> ON -> UP */ | ||
153 | switch (ios->power_mode) { | ||
154 | case MMC_POWER_OFF: /* power down SD bus */ | ||
155 | sdhi_boot_mmc_clk_stop(base); | ||
156 | break; | ||
157 | case MMC_POWER_ON: /* power up SD bus */ | ||
158 | break; | ||
159 | case MMC_POWER_UP: /* start bus clock */ | ||
160 | sdhi_boot_mmc_clk_start(base); | ||
161 | break; | ||
162 | } | ||
163 | |||
164 | switch (ios->bus_width) { | ||
165 | case MMC_BUS_WIDTH_1: | ||
166 | sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0); | ||
167 | break; | ||
168 | case MMC_BUS_WIDTH_4: | ||
169 | sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0); | ||
170 | break; | ||
171 | } | ||
172 | |||
173 | /* Let things settle. delay taken from winCE driver */ | ||
174 | udelay(140); | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | /* These are the bitmasks the tmio chip requires to implement the MMC response | ||
180 | * types. Note that R1 and R6 are the same in this scheme. */ | ||
181 | #define RESP_NONE 0x0300 | ||
182 | #define RESP_R1 0x0400 | ||
183 | #define RESP_R1B 0x0500 | ||
184 | #define RESP_R2 0x0600 | ||
185 | #define RESP_R3 0x0700 | ||
186 | #define DATA_PRESENT 0x0800 | ||
187 | #define TRANSFER_READ 0x1000 | ||
188 | |||
189 | static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd) | ||
190 | { | ||
191 | int err, c = cmd->opcode; | ||
192 | |||
193 | switch (mmc_resp_type(cmd)) { | ||
194 | case MMC_RSP_NONE: c |= RESP_NONE; break; | ||
195 | case MMC_RSP_R1: c |= RESP_R1; break; | ||
196 | case MMC_RSP_R1B: c |= RESP_R1B; break; | ||
197 | case MMC_RSP_R2: c |= RESP_R2; break; | ||
198 | case MMC_RSP_R3: c |= RESP_R3; break; | ||
199 | default: | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | |||
203 | /* No interrupts so this may not be cleared */ | ||
204 | sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND); | ||
205 | |||
206 | sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND | | ||
207 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
208 | sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg); | ||
209 | sd_ctrl_write16(base, CTL_SD_CMD, c); | ||
210 | |||
211 | |||
212 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
213 | ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) & | ||
214 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
215 | |||
216 | err = sdhi_boot_wait_resp_end(base); | ||
217 | if (err) | ||
218 | return err; | ||
219 | |||
220 | cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity, | ||
226 | unsigned long block, unsigned short *buf) | ||
227 | { | ||
228 | int err, i; | ||
229 | |||
230 | /* CMD17 - Read */ | ||
231 | { | ||
232 | struct mmc_command cmd; | ||
233 | |||
234 | cmd.opcode = MMC_READ_SINGLE_BLOCK | \ | ||
235 | TRANSFER_READ | DATA_PRESENT; | ||
236 | if (high_capacity) | ||
237 | cmd.arg = block; | ||
238 | else | ||
239 | cmd.arg = block * TMIO_BBS; | ||
240 | cmd.flags = MMC_RSP_R1; | ||
241 | err = sdhi_boot_request(base, &cmd); | ||
242 | if (err) | ||
243 | return err; | ||
244 | } | ||
245 | |||
246 | sd_ctrl_write32(base, CTL_IRQ_MASK, | ||
247 | ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY | | ||
248 | TMIO_STAT_TXUNDERRUN) & | ||
249 | sd_ctrl_read32(base, CTL_IRQ_MASK)); | ||
250 | err = sdhi_boot_wait_resp_end(base); | ||
251 | if (err) | ||
252 | return err; | ||
253 | |||
254 | sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS); | ||
255 | for (i = 0; i < TMIO_BBS / sizeof(*buf); i++) | ||
256 | *buf++ = sd_ctrl_read16(base, RESP_CMD12); | ||
257 | |||
258 | err = sdhi_boot_wait_resp_end(base); | ||
259 | if (err) | ||
260 | return err; | ||
261 | |||
262 | return 0; | ||
263 | } | ||
264 | |||
265 | int sdhi_boot_do_read(void __iomem *base, int high_capacity, | ||
266 | unsigned long offset, unsigned short count, | ||
267 | unsigned short *buf) | ||
268 | { | ||
269 | unsigned long i; | ||
270 | int err = 0; | ||
271 | |||
272 | for (i = 0; i < count; i++) { | ||
273 | err = sdhi_boot_do_read_single(base, high_capacity, offset + i, | ||
274 | buf + (i * TMIO_BBS / | ||
275 | sizeof(*buf))); | ||
276 | if (err) | ||
277 | return err; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | #define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34) | ||
284 | |||
285 | int sdhi_boot_init(void __iomem *base) | ||
286 | { | ||
287 | bool sd_v2 = false, sd_v1_0 = false; | ||
288 | unsigned short cid; | ||
289 | int err, high_capacity = 0; | ||
290 | |||
291 | sdhi_boot_mmc_clk_stop(base); | ||
292 | sdhi_boot_reset(base); | ||
293 | |||
294 | /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */ | ||
295 | { | ||
296 | struct mmc_ios ios; | ||
297 | ios.power_mode = MMC_POWER_ON; | ||
298 | ios.bus_width = MMC_BUS_WIDTH_1; | ||
299 | ios.clock = CLK_MMC_INIT; | ||
300 | err = sdhi_boot_mmc_set_ios(base, &ios); | ||
301 | if (err) | ||
302 | return err; | ||
303 | } | ||
304 | |||
305 | /* CMD0 */ | ||
306 | { | ||
307 | struct mmc_command cmd; | ||
308 | msleep(1); | ||
309 | cmd.opcode = MMC_GO_IDLE_STATE; | ||
310 | cmd.arg = 0; | ||
311 | cmd.flags = MMC_RSP_NONE; | ||
312 | err = sdhi_boot_request(base, &cmd); | ||
313 | if (err) | ||
314 | return err; | ||
315 | msleep(2); | ||
316 | } | ||
317 | |||
318 | /* CMD8 - Test for SD version 2 */ | ||
319 | { | ||
320 | struct mmc_command cmd; | ||
321 | cmd.opcode = SD_SEND_IF_COND; | ||
322 | cmd.arg = (VOLTAGES != 0) << 8 | 0xaa; | ||
323 | cmd.flags = MMC_RSP_R1; | ||
324 | err = sdhi_boot_request(base, &cmd); /* Ignore error */ | ||
325 | if ((cmd.resp[0] & 0xff) == 0xaa) | ||
326 | sd_v2 = true; | ||
327 | } | ||
328 | |||
329 | /* CMD55 - Get OCR (SD) */ | ||
330 | { | ||
331 | int timeout = 1000; | ||
332 | struct mmc_command cmd; | ||
333 | |||
334 | cmd.arg = 0; | ||
335 | |||
336 | do { | ||
337 | cmd.opcode = MMC_APP_CMD; | ||
338 | cmd.flags = MMC_RSP_R1; | ||
339 | cmd.arg = 0; | ||
340 | err = sdhi_boot_request(base, &cmd); | ||
341 | if (err) | ||
342 | break; | ||
343 | |||
344 | cmd.opcode = SD_APP_OP_COND; | ||
345 | cmd.flags = MMC_RSP_R3; | ||
346 | cmd.arg = (VOLTAGES & 0xff8000); | ||
347 | if (sd_v2) | ||
348 | cmd.arg |= OCR_HCS; | ||
349 | cmd.arg |= OCR_FASTBOOT; | ||
350 | err = sdhi_boot_request(base, &cmd); | ||
351 | if (err) | ||
352 | break; | ||
353 | |||
354 | msleep(1); | ||
355 | } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); | ||
356 | |||
357 | if (!err && timeout) { | ||
358 | if (!sd_v2) | ||
359 | sd_v1_0 = true; | ||
360 | high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | /* CMD1 - Get OCR (MMC) */ | ||
365 | if (!sd_v2 && !sd_v1_0) { | ||
366 | int timeout = 1000; | ||
367 | struct mmc_command cmd; | ||
368 | |||
369 | do { | ||
370 | cmd.opcode = MMC_SEND_OP_COND; | ||
371 | cmd.arg = VOLTAGES | OCR_HCS; | ||
372 | cmd.flags = MMC_RSP_R3; | ||
373 | err = sdhi_boot_request(base, &cmd); | ||
374 | if (err) | ||
375 | return err; | ||
376 | |||
377 | msleep(1); | ||
378 | } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout); | ||
379 | |||
380 | if (!timeout) | ||
381 | return -EAGAIN; | ||
382 | |||
383 | high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS; | ||
384 | } | ||
385 | |||
386 | /* CMD2 - Get CID */ | ||
387 | { | ||
388 | struct mmc_command cmd; | ||
389 | cmd.opcode = MMC_ALL_SEND_CID; | ||
390 | cmd.arg = 0; | ||
391 | cmd.flags = MMC_RSP_R2; | ||
392 | err = sdhi_boot_request(base, &cmd); | ||
393 | if (err) | ||
394 | return err; | ||
395 | } | ||
396 | |||
397 | /* CMD3 | ||
398 | * MMC: Set the relative address | ||
399 | * SD: Get the relative address | ||
400 | * Also puts the card into the standby state | ||
401 | */ | ||
402 | { | ||
403 | struct mmc_command cmd; | ||
404 | cmd.opcode = MMC_SET_RELATIVE_ADDR; | ||
405 | cmd.arg = 0; | ||
406 | cmd.flags = MMC_RSP_R1; | ||
407 | err = sdhi_boot_request(base, &cmd); | ||
408 | if (err) | ||
409 | return err; | ||
410 | cid = cmd.resp[0] >> 16; | ||
411 | } | ||
412 | |||
413 | /* CMD9 - Get CSD */ | ||
414 | { | ||
415 | struct mmc_command cmd; | ||
416 | cmd.opcode = MMC_SEND_CSD; | ||
417 | cmd.arg = cid << 16; | ||
418 | cmd.flags = MMC_RSP_R2; | ||
419 | err = sdhi_boot_request(base, &cmd); | ||
420 | if (err) | ||
421 | return err; | ||
422 | } | ||
423 | |||
424 | /* CMD7 - Select the card */ | ||
425 | { | ||
426 | struct mmc_command cmd; | ||
427 | cmd.opcode = MMC_SELECT_CARD; | ||
428 | //cmd.arg = rca << 16; | ||
429 | cmd.arg = cid << 16; | ||
430 | //cmd.flags = MMC_RSP_R1B; | ||
431 | cmd.flags = MMC_RSP_R1; | ||
432 | err = sdhi_boot_request(base, &cmd); | ||
433 | if (err) | ||
434 | return err; | ||
435 | } | ||
436 | |||
437 | /* CMD16 - Set the block size */ | ||
438 | { | ||
439 | struct mmc_command cmd; | ||
440 | cmd.opcode = MMC_SET_BLOCKLEN; | ||
441 | cmd.arg = TMIO_BBS; | ||
442 | cmd.flags = MMC_RSP_R1; | ||
443 | err = sdhi_boot_request(base, &cmd); | ||
444 | if (err) | ||
445 | return err; | ||
446 | } | ||
447 | |||
448 | return high_capacity; | ||
449 | } | ||
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h new file mode 100644 index 000000000000..92eaa09f985e --- /dev/null +++ b/arch/arm/boot/compressed/sdhi-shmobile.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef SDHI_MOBILE_H | ||
2 | #define SDHI_MOBILE_H | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | |||
6 | int sdhi_boot_do_read(void __iomem *base, int high_capacity, | ||
7 | unsigned long offset, unsigned short count, | ||
8 | unsigned short *buf); | ||
9 | int sdhi_boot_init(void __iomem *base); | ||
10 | |||
11 | #endif | ||
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in index ea80abe78844..4e728834a1b9 100644 --- a/arch/arm/boot/compressed/vmlinux.lds.in +++ b/arch/arm/boot/compressed/vmlinux.lds.in | |||
@@ -33,20 +33,24 @@ SECTIONS | |||
33 | *(.text.*) | 33 | *(.text.*) |
34 | *(.fixup) | 34 | *(.fixup) |
35 | *(.gnu.warning) | 35 | *(.gnu.warning) |
36 | *(.glue_7t) | ||
37 | *(.glue_7) | ||
38 | } | ||
39 | .rodata : { | ||
36 | *(.rodata) | 40 | *(.rodata) |
37 | *(.rodata.*) | 41 | *(.rodata.*) |
38 | *(.glue_7) | 42 | } |
39 | *(.glue_7t) | 43 | .piggydata : { |
40 | *(.piggydata) | 44 | *(.piggydata) |
41 | . = ALIGN(4); | ||
42 | } | 45 | } |
43 | 46 | ||
47 | . = ALIGN(4); | ||
44 | _etext = .; | 48 | _etext = .; |
45 | 49 | ||
50 | .got.plt : { *(.got.plt) } | ||
46 | _got_start = .; | 51 | _got_start = .; |
47 | .got : { *(.got) } | 52 | .got : { *(.got) } |
48 | _got_end = .; | 53 | _got_end = .; |
49 | .got.plt : { *(.got.plt) } | ||
50 | _edata = .; | 54 | _edata = .; |
51 | 55 | ||
52 | . = BSS_START; | 56 | . = BSS_START; |
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index e5681636626f..595ecd290ebf 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
@@ -79,6 +79,8 @@ struct dmabounce_device_info { | |||
79 | struct dmabounce_pool large; | 79 | struct dmabounce_pool large; |
80 | 80 | ||
81 | rwlock_t lock; | 81 | rwlock_t lock; |
82 | |||
83 | int (*needs_bounce)(struct device *, dma_addr_t, size_t); | ||
82 | }; | 84 | }; |
83 | 85 | ||
84 | #ifdef STATS | 86 | #ifdef STATS |
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev, | |||
210 | if (!dev || !dev->archdata.dmabounce) | 212 | if (!dev || !dev->archdata.dmabounce) |
211 | return NULL; | 213 | return NULL; |
212 | if (dma_mapping_error(dev, dma_addr)) { | 214 | if (dma_mapping_error(dev, dma_addr)) { |
213 | if (dev) | 215 | dev_err(dev, "Trying to %s invalid mapping\n", where); |
214 | dev_err(dev, "Trying to %s invalid mapping\n", where); | ||
215 | else | ||
216 | pr_err("unknown device: Trying to %s invalid mapping\n", where); | ||
217 | return NULL; | 216 | return NULL; |
218 | } | 217 | } |
219 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); | 218 | return find_safe_buffer(dev->archdata.dmabounce, dma_addr); |
220 | } | 219 | } |
221 | 220 | ||
222 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, | 221 | static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) |
223 | enum dma_data_direction dir) | ||
224 | { | 222 | { |
225 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; | 223 | if (!dev || !dev->archdata.dmabounce) |
226 | dma_addr_t dma_addr; | 224 | return 0; |
227 | int needs_bounce = 0; | ||
228 | |||
229 | if (device_info) | ||
230 | DO_STATS ( device_info->map_op_count++ ); | ||
231 | |||
232 | dma_addr = virt_to_dma(dev, ptr); | ||
233 | 225 | ||
234 | if (dev->dma_mask) { | 226 | if (dev->dma_mask) { |
235 | unsigned long mask = *dev->dma_mask; | 227 | unsigned long limit, mask = *dev->dma_mask; |
236 | unsigned long limit; | ||
237 | 228 | ||
238 | limit = (mask + 1) & ~mask; | 229 | limit = (mask + 1) & ~mask; |
239 | if (limit && size > limit) { | 230 | if (limit && size > limit) { |
240 | dev_err(dev, "DMA mapping too big (requested %#x " | 231 | dev_err(dev, "DMA mapping too big (requested %#x " |
241 | "mask %#Lx)\n", size, *dev->dma_mask); | 232 | "mask %#Lx)\n", size, *dev->dma_mask); |
242 | return ~0; | 233 | return -E2BIG; |
243 | } | 234 | } |
244 | 235 | ||
245 | /* | 236 | /* Figure out if we need to bounce from the DMA mask. */ |
246 | * Figure out if we need to bounce from the DMA mask. | 237 | if ((dma_addr | (dma_addr + size - 1)) & ~mask) |
247 | */ | 238 | return 1; |
248 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | ||
249 | } | 239 | } |
250 | 240 | ||
251 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | 241 | return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); |
252 | struct safe_buffer *buf; | 242 | } |
253 | 243 | ||
254 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | 244 | static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, |
255 | if (buf == 0) { | 245 | enum dma_data_direction dir) |
256 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | 246 | { |
257 | __func__, ptr); | 247 | struct dmabounce_device_info *device_info = dev->archdata.dmabounce; |
258 | return 0; | 248 | struct safe_buffer *buf; |
259 | } | ||
260 | 249 | ||
261 | dev_dbg(dev, | 250 | if (device_info) |
262 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 251 | DO_STATS ( device_info->map_op_count++ ); |
263 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | ||
264 | buf->safe, buf->safe_dma_addr); | ||
265 | 252 | ||
266 | if ((dir == DMA_TO_DEVICE) || | 253 | buf = alloc_safe_buffer(device_info, ptr, size, dir); |
267 | (dir == DMA_BIDIRECTIONAL)) { | 254 | if (buf == NULL) { |
268 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | 255 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", |
269 | __func__, ptr, buf->safe, size); | 256 | __func__, ptr); |
270 | memcpy(buf->safe, ptr, size); | 257 | return ~0; |
271 | } | 258 | } |
272 | ptr = buf->safe; | ||
273 | 259 | ||
274 | dma_addr = buf->safe_dma_addr; | 260 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
275 | } else { | 261 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
276 | /* | 262 | buf->safe, buf->safe_dma_addr); |
277 | * We don't need to sync the DMA buffer since | 263 | |
278 | * it was allocated via the coherent allocators. | 264 | if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { |
279 | */ | 265 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", |
280 | __dma_single_cpu_to_dev(ptr, size, dir); | 266 | __func__, ptr, buf->safe, size); |
267 | memcpy(buf->safe, ptr, size); | ||
281 | } | 268 | } |
282 | 269 | ||
283 | return dma_addr; | 270 | return buf->safe_dma_addr; |
284 | } | 271 | } |
285 | 272 | ||
286 | static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | 273 | static inline void unmap_single(struct device *dev, struct safe_buffer *buf, |
287 | size_t size, enum dma_data_direction dir) | 274 | size_t size, enum dma_data_direction dir) |
288 | { | 275 | { |
289 | struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); | 276 | BUG_ON(buf->size != size); |
290 | 277 | BUG_ON(buf->direction != dir); | |
291 | if (buf) { | ||
292 | BUG_ON(buf->size != size); | ||
293 | BUG_ON(buf->direction != dir); | ||
294 | 278 | ||
295 | dev_dbg(dev, | 279 | dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", |
296 | "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", | 280 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), |
297 | __func__, buf->ptr, virt_to_dma(dev, buf->ptr), | 281 | buf->safe, buf->safe_dma_addr); |
298 | buf->safe, buf->safe_dma_addr); | ||
299 | 282 | ||
300 | DO_STATS(dev->archdata.dmabounce->bounce_count++); | 283 | DO_STATS(dev->archdata.dmabounce->bounce_count++); |
301 | 284 | ||
302 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { | 285 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
303 | void *ptr = buf->ptr; | 286 | void *ptr = buf->ptr; |
304 | 287 | ||
305 | dev_dbg(dev, | 288 | dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", |
306 | "%s: copy back safe %p to unsafe %p size %d\n", | 289 | __func__, buf->safe, ptr, size); |
307 | __func__, buf->safe, ptr, size); | 290 | memcpy(ptr, buf->safe, size); |
308 | memcpy(ptr, buf->safe, size); | ||
309 | 291 | ||
310 | /* | 292 | /* |
311 | * Since we may have written to a page cache page, | 293 | * Since we may have written to a page cache page, |
312 | * we need to ensure that the data will be coherent | 294 | * we need to ensure that the data will be coherent |
313 | * with user mappings. | 295 | * with user mappings. |
314 | */ | 296 | */ |
315 | __cpuc_flush_dcache_area(ptr, size); | 297 | __cpuc_flush_dcache_area(ptr, size); |
316 | } | ||
317 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
318 | } else { | ||
319 | __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir); | ||
320 | } | 298 | } |
299 | free_safe_buffer(dev->archdata.dmabounce, buf); | ||
321 | } | 300 | } |
322 | 301 | ||
323 | /* ************************************************** */ | 302 | /* ************************************************** */ |
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
328 | * substitute the safe buffer for the unsafe one. | 307 | * substitute the safe buffer for the unsafe one. |
329 | * (basically move the buffer from an unsafe area to a safe one) | 308 | * (basically move the buffer from an unsafe area to a safe one) |
330 | */ | 309 | */ |
331 | dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, | ||
332 | enum dma_data_direction dir) | ||
333 | { | ||
334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
335 | __func__, ptr, size, dir); | ||
336 | |||
337 | BUG_ON(!valid_dma_direction(dir)); | ||
338 | |||
339 | return map_single(dev, ptr, size, dir); | ||
340 | } | ||
341 | EXPORT_SYMBOL(__dma_map_single); | ||
342 | |||
343 | /* | ||
344 | * see if a mapped address was really a "safe" buffer and if so, copy | ||
345 | * the data from the safe buffer back to the unsafe buffer and free up | ||
346 | * the safe buffer. (basically return things back to the way they | ||
347 | * should be) | ||
348 | */ | ||
349 | void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | ||
350 | enum dma_data_direction dir) | ||
351 | { | ||
352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | ||
353 | __func__, (void *) dma_addr, size, dir); | ||
354 | |||
355 | unmap_single(dev, dma_addr, size, dir); | ||
356 | } | ||
357 | EXPORT_SYMBOL(__dma_unmap_single); | ||
358 | |||
359 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 310 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
360 | unsigned long offset, size_t size, enum dma_data_direction dir) | 311 | unsigned long offset, size_t size, enum dma_data_direction dir) |
361 | { | 312 | { |
313 | dma_addr_t dma_addr; | ||
314 | int ret; | ||
315 | |||
362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 316 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
363 | __func__, page, offset, size, dir); | 317 | __func__, page, offset, size, dir); |
364 | 318 | ||
365 | BUG_ON(!valid_dma_direction(dir)); | 319 | dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; |
320 | |||
321 | ret = needs_bounce(dev, dma_addr, size); | ||
322 | if (ret < 0) | ||
323 | return ~0; | ||
324 | |||
325 | if (ret == 0) { | ||
326 | __dma_page_cpu_to_dev(page, offset, size, dir); | ||
327 | return dma_addr; | ||
328 | } | ||
366 | 329 | ||
367 | if (PageHighMem(page)) { | 330 | if (PageHighMem(page)) { |
368 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " | 331 | dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); |
369 | "is not supported\n"); | ||
370 | return ~0; | 332 | return ~0; |
371 | } | 333 | } |
372 | 334 | ||
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page); | |||
383 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 345 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
384 | enum dma_data_direction dir) | 346 | enum dma_data_direction dir) |
385 | { | 347 | { |
386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 348 | struct safe_buffer *buf; |
387 | __func__, (void *) dma_addr, size, dir); | 349 | |
350 | dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", | ||
351 | __func__, dma_addr, size, dir); | ||
352 | |||
353 | buf = find_safe_buffer_dev(dev, dma_addr, __func__); | ||
354 | if (!buf) { | ||
355 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), | ||
356 | dma_addr & ~PAGE_MASK, size, dir); | ||
357 | return; | ||
358 | } | ||
388 | 359 | ||
389 | unmap_single(dev, dma_addr, size, dir); | 360 | unmap_single(dev, buf, size, dir); |
390 | } | 361 | } |
391 | EXPORT_SYMBOL(__dma_unmap_page); | 362 | EXPORT_SYMBOL(__dma_unmap_page); |
392 | 363 | ||
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, | |||
461 | } | 432 | } |
462 | 433 | ||
463 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | 434 | int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, |
464 | unsigned long large_buffer_size) | 435 | unsigned long large_buffer_size, |
436 | int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) | ||
465 | { | 437 | { |
466 | struct dmabounce_device_info *device_info; | 438 | struct dmabounce_device_info *device_info; |
467 | int ret; | 439 | int ret; |
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |||
497 | device_info->dev = dev; | 469 | device_info->dev = dev; |
498 | INIT_LIST_HEAD(&device_info->safe_buffers); | 470 | INIT_LIST_HEAD(&device_info->safe_buffers); |
499 | rwlock_init(&device_info->lock); | 471 | rwlock_init(&device_info->lock); |
472 | device_info->needs_bounce = needs_bounce_fn; | ||
500 | 473 | ||
501 | #ifdef STATS | 474 | #ifdef STATS |
502 | device_info->total_allocs = 0; | 475 | device_info->total_allocs = 0; |
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 4ddd0a6ac7ff..7bdd91766d65 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, | |||
179 | { | 179 | { |
180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); | 180 | void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); |
181 | unsigned int shift = (d->irq % 4) * 8; | 181 | unsigned int shift = (d->irq % 4) * 8; |
182 | unsigned int cpu = cpumask_first(mask_val); | 182 | unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); |
183 | u32 val, mask, bit; | 183 | u32 val, mask, bit; |
184 | 184 | ||
185 | if (cpu >= 8) | 185 | if (cpu >= 8 || cpu >= nr_cpu_ids) |
186 | return -EINVAL; | 186 | return -EINVAL; |
187 | 187 | ||
188 | mask = 0xff << shift; | 188 | mask = 0xff << shift; |
189 | bit = 1 << (cpu + shift); | 189 | bit = 1 << (cpu + shift); |
190 | 190 | ||
191 | spin_lock(&irq_controller_lock); | 191 | spin_lock(&irq_controller_lock); |
192 | d->node = cpu; | ||
193 | val = readl_relaxed(reg) & ~mask; | 192 | val = readl_relaxed(reg) & ~mask; |
194 | writel_relaxed(val | bit, reg); | 193 | writel_relaxed(val | bit, reg); |
195 | spin_unlock(&irq_controller_lock); | 194 | spin_unlock(&irq_controller_lock); |
196 | 195 | ||
197 | return 0; | 196 | return IRQ_SET_MASK_OK; |
198 | } | 197 | } |
199 | #endif | 198 | #endif |
200 | 199 | ||
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 7a21927c52e1..14ad62e16dd1 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c | |||
@@ -243,6 +243,12 @@ static struct resource it8152_mem = { | |||
243 | * ITE8152 chip can address up to 64MByte, so all the devices | 243 | * ITE8152 chip can address up to 64MByte, so all the devices |
244 | * connected to ITE8152 (PCI and USB) should have limited DMA window | 244 | * connected to ITE8152 (PCI and USB) should have limited DMA window |
245 | */ | 245 | */ |
246 | static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
247 | { | ||
248 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
249 | __func__, dma_addr, size); | ||
250 | return (dma_addr + size - PHYS_OFFSET) >= SZ_64M; | ||
251 | } | ||
246 | 252 | ||
247 | /* | 253 | /* |
248 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all | 254 | * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all |
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev) | |||
254 | if (dev->dma_mask) | 260 | if (dev->dma_mask) |
255 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 261 | *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
256 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; | 262 | dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; |
257 | dmabounce_register_dev(dev, 2048, 4096); | 263 | dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce); |
258 | } | 264 | } |
259 | return 0; | 265 | return 0; |
260 | } | 266 | } |
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev) | |||
267 | return 0; | 273 | return 0; |
268 | } | 274 | } |
269 | 275 | ||
270 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
271 | { | ||
272 | dev_dbg(dev, "%s: dma_addr %08x, size %08x\n", | ||
273 | __func__, dma_addr, size); | ||
274 | return (dev->bus == &pci_bus_type) && | ||
275 | ((dma_addr + size - PHYS_OFFSET) >= SZ_64M); | ||
276 | } | ||
277 | |||
278 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 276 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
279 | { | 277 | { |
280 | if (mask >= PHYS_OFFSET + SZ_64M - 1) | 278 | if (mask >= PHYS_OFFSET + SZ_64M - 1) |
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 9c49a46a2b7a..0569de6acfba 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c | |||
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac, | |||
579 | 579 | ||
580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; | 580 | sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; |
581 | } | 581 | } |
582 | #endif | ||
582 | 583 | ||
584 | #ifdef CONFIG_DMABOUNCE | ||
585 | /* | ||
586 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
587 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
588 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
589 | * an access to a region of memory above 1MB relative to the bank base, | ||
590 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
591 | * on the configuration of the RAM, bit 10 may correspond to one | ||
592 | * of several different (processor-relative) address bits. | ||
593 | * | ||
594 | * This routine only identifies whether or not a given DMA address | ||
595 | * is susceptible to the bug. | ||
596 | * | ||
597 | * This should only get called for sa1111_device types due to the | ||
598 | * way we configure our device dma_masks. | ||
599 | */ | ||
600 | static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
601 | { | ||
602 | /* | ||
603 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
604 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
605 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
606 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
607 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
608 | */ | ||
609 | return (machine_is_assabet() || machine_is_pfs168()) && | ||
610 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); | ||
611 | } | ||
583 | #endif | 612 | #endif |
584 | 613 | ||
585 | static void sa1111_dev_release(struct device *_dev) | 614 | static void sa1111_dev_release(struct device *_dev) |
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent, | |||
644 | dev->dev.dma_mask = &dev->dma_mask; | 673 | dev->dev.dma_mask = &dev->dma_mask; |
645 | 674 | ||
646 | if (dev->dma_mask != 0xffffffffUL) { | 675 | if (dev->dma_mask != 0xffffffffUL) { |
647 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096); | 676 | ret = dmabounce_register_dev(&dev->dev, 1024, 4096, |
677 | sa1111_needs_bounce); | ||
648 | if (ret) { | 678 | if (ret) { |
649 | dev_err(&dev->dev, "SA1111: Failed to register" | 679 | dev_err(&dev->dev, "SA1111: Failed to register" |
650 | " with dmabounce\n"); | 680 | " with dmabounce\n"); |
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip) | |||
818 | kfree(sachip); | 848 | kfree(sachip); |
819 | } | 849 | } |
820 | 850 | ||
821 | /* | ||
822 | * According to the "Intel StrongARM SA-1111 Microprocessor Companion | ||
823 | * Chip Specification Update" (June 2000), erratum #7, there is a | ||
824 | * significant bug in the SA1111 SDRAM shared memory controller. If | ||
825 | * an access to a region of memory above 1MB relative to the bank base, | ||
826 | * it is important that address bit 10 _NOT_ be asserted. Depending | ||
827 | * on the configuration of the RAM, bit 10 may correspond to one | ||
828 | * of several different (processor-relative) address bits. | ||
829 | * | ||
830 | * This routine only identifies whether or not a given DMA address | ||
831 | * is susceptible to the bug. | ||
832 | * | ||
833 | * This should only get called for sa1111_device types due to the | ||
834 | * way we configure our device dma_masks. | ||
835 | */ | ||
836 | int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) | ||
837 | { | ||
838 | /* | ||
839 | * Section 4.6 of the "Intel StrongARM SA-1111 Development Module | ||
840 | * User's Guide" mentions that jumpers R51 and R52 control the | ||
841 | * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or | ||
842 | * SDRAM bank 1 on Neponset). The default configuration selects | ||
843 | * Assabet, so any address in bank 1 is necessarily invalid. | ||
844 | */ | ||
845 | return ((machine_is_assabet() || machine_is_pfs168()) && | ||
846 | (addr >= 0xc8000000 || (addr + size) >= 0xc8000000)); | ||
847 | } | ||
848 | |||
849 | struct sa1111_save_data { | 851 | struct sa1111_save_data { |
850 | unsigned int skcr; | 852 | unsigned int skcr; |
851 | unsigned int skpcr; | 853 | unsigned int skpcr; |
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index bc2d2d75f706..65c3f2474f5e 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h | |||
@@ -13,6 +13,9 @@ | |||
13 | * Do not include any C declarations in this file - it is included by | 13 | * Do not include any C declarations in this file - it is included by |
14 | * assembler source. | 14 | * assembler source. |
15 | */ | 15 | */ |
16 | #ifndef __ASM_ASSEMBLER_H__ | ||
17 | #define __ASM_ASSEMBLER_H__ | ||
18 | |||
16 | #ifndef __ASSEMBLY__ | 19 | #ifndef __ASSEMBLY__ |
17 | #error "Only include this from assembly code" | 20 | #error "Only include this from assembly code" |
18 | #endif | 21 | #endif |
@@ -290,3 +293,4 @@ | |||
290 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f | 293 | .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f |
291 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort | 294 | usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort |
292 | .endm | 295 | .endm |
296 | #endif /* __ASM_ASSEMBLER_H__ */ | ||
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index b4892a06442c..f4280593dfa3 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #include <linux/compiler.h> | 26 | #include <linux/compiler.h> |
27 | #include <asm/system.h> | 27 | #include <asm/system.h> |
28 | 28 | ||
29 | #define smp_mb__before_clear_bit() mb() | 29 | #define smp_mb__before_clear_bit() smp_mb() |
30 | #define smp_mb__after_clear_bit() mb() | 30 | #define smp_mb__after_clear_bit() smp_mb() |
31 | 31 | ||
32 | /* | 32 | /* |
33 | * These functions are the basis of our bit ops. | 33 | * These functions are the basis of our bit ops. |
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h index 4fff837363ed..7a21d0bf7134 100644 --- a/arch/arm/include/asm/dma-mapping.h +++ b/arch/arm/include/asm/dma-mapping.h | |||
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
115 | ___dma_page_dev_to_cpu(page, off, size, dir); | 115 | ___dma_page_dev_to_cpu(page, off, size, dir); |
116 | } | 116 | } |
117 | 117 | ||
118 | /* | 118 | extern int dma_supported(struct device *, u64); |
119 | * Return whether the given device DMA address mask can be supported | 119 | extern int dma_set_mask(struct device *, u64); |
120 | * properly. For example, if your device can only drive the low 24-bits | ||
121 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
122 | * to this function. | ||
123 | * | ||
124 | * FIXME: This should really be a platform specific issue - we should | ||
125 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. | ||
126 | */ | ||
127 | static inline int dma_supported(struct device *dev, u64 mask) | ||
128 | { | ||
129 | if (mask < ISA_DMA_THRESHOLD) | ||
130 | return 0; | ||
131 | return 1; | ||
132 | } | ||
133 | |||
134 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | ||
135 | { | ||
136 | #ifdef CONFIG_DMABOUNCE | ||
137 | if (dev->archdata.dmabounce) { | ||
138 | if (dma_mask >= ISA_DMA_THRESHOLD) | ||
139 | return 0; | ||
140 | else | ||
141 | return -EIO; | ||
142 | } | ||
143 | #endif | ||
144 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
145 | return -EIO; | ||
146 | |||
147 | *dev->dma_mask = dma_mask; | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | 120 | ||
152 | /* | 121 | /* |
153 | * DMA errors are defined by all-bits-set in the DMA address. | 122 | * DMA errors are defined by all-bits-set in the DMA address. |
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *, | |||
256 | * @dev: valid struct device pointer | 225 | * @dev: valid struct device pointer |
257 | * @small_buf_size: size of buffers to use with small buffer pool | 226 | * @small_buf_size: size of buffers to use with small buffer pool |
258 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) | 227 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
228 | * @needs_bounce_fn: called to determine whether buffer needs bouncing | ||
259 | * | 229 | * |
260 | * This function should be called by low-level platform code to register | 230 | * This function should be called by low-level platform code to register |
261 | * a device as requireing DMA buffer bouncing. The function will allocate | 231 | * a device as requireing DMA buffer bouncing. The function will allocate |
262 | * appropriate DMA pools for the device. | 232 | * appropriate DMA pools for the device. |
263 | * | ||
264 | */ | 233 | */ |
265 | extern int dmabounce_register_dev(struct device *, unsigned long, | 234 | extern int dmabounce_register_dev(struct device *, unsigned long, |
266 | unsigned long); | 235 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
267 | 236 | ||
268 | /** | 237 | /** |
269 | * dmabounce_unregister_dev | 238 | * dmabounce_unregister_dev |
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long, | |||
277 | */ | 246 | */ |
278 | extern void dmabounce_unregister_dev(struct device *); | 247 | extern void dmabounce_unregister_dev(struct device *); |
279 | 248 | ||
280 | /** | ||
281 | * dma_needs_bounce | ||
282 | * | ||
283 | * @dev: valid struct device pointer | ||
284 | * @dma_handle: dma_handle of unbounced buffer | ||
285 | * @size: size of region being mapped | ||
286 | * | ||
287 | * Platforms that utilize the dmabounce mechanism must implement | ||
288 | * this function. | ||
289 | * | ||
290 | * The dmabounce routines call this function whenever a dma-mapping | ||
291 | * is requested to determine whether a given buffer needs to be bounced | ||
292 | * or not. The function must return 0 if the buffer is OK for | ||
293 | * DMA access and 1 if the buffer needs to be bounced. | ||
294 | * | ||
295 | */ | ||
296 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); | ||
297 | |||
298 | /* | 249 | /* |
299 | * The DMA API, implemented by dmabounce.c. See below for descriptions. | 250 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
300 | */ | 251 | */ |
301 | extern dma_addr_t __dma_map_single(struct device *, void *, size_t, | ||
302 | enum dma_data_direction); | ||
303 | extern void __dma_unmap_single(struct device *, dma_addr_t, size_t, | ||
304 | enum dma_data_direction); | ||
305 | extern dma_addr_t __dma_map_page(struct device *, struct page *, | 252 | extern dma_addr_t __dma_map_page(struct device *, struct page *, |
306 | unsigned long, size_t, enum dma_data_direction); | 253 | unsigned long, size_t, enum dma_data_direction); |
307 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, | 254 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, | |||
328 | } | 275 | } |
329 | 276 | ||
330 | 277 | ||
331 | static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr, | ||
332 | size_t size, enum dma_data_direction dir) | ||
333 | { | ||
334 | __dma_single_cpu_to_dev(cpu_addr, size, dir); | ||
335 | return virt_to_dma(dev, cpu_addr); | ||
336 | } | ||
337 | |||
338 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | 278 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
339 | unsigned long offset, size_t size, enum dma_data_direction dir) | 279 | unsigned long offset, size_t size, enum dma_data_direction dir) |
340 | { | 280 | { |
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, | |||
342 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; | 282 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
343 | } | 283 | } |
344 | 284 | ||
345 | static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle, | ||
346 | size_t size, enum dma_data_direction dir) | ||
347 | { | ||
348 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir); | ||
349 | } | ||
350 | |||
351 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | 285 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
352 | size_t size, enum dma_data_direction dir) | 286 | size_t size, enum dma_data_direction dir) |
353 | { | 287 | { |
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, | |||
373 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, | 307 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
374 | size_t size, enum dma_data_direction dir) | 308 | size_t size, enum dma_data_direction dir) |
375 | { | 309 | { |
310 | unsigned long offset; | ||
311 | struct page *page; | ||
376 | dma_addr_t addr; | 312 | dma_addr_t addr; |
377 | 313 | ||
314 | BUG_ON(!virt_addr_valid(cpu_addr)); | ||
315 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); | ||
378 | BUG_ON(!valid_dma_direction(dir)); | 316 | BUG_ON(!valid_dma_direction(dir)); |
379 | 317 | ||
380 | addr = __dma_map_single(dev, cpu_addr, size, dir); | 318 | page = virt_to_page(cpu_addr); |
381 | debug_dma_map_page(dev, virt_to_page(cpu_addr), | 319 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; |
382 | (unsigned long)cpu_addr & ~PAGE_MASK, size, | 320 | addr = __dma_map_page(dev, page, offset, size, dir); |
383 | dir, addr, true); | 321 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); |
384 | 322 | ||
385 | return addr; | 323 | return addr; |
386 | } | 324 | } |
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, | |||
430 | size_t size, enum dma_data_direction dir) | 368 | size_t size, enum dma_data_direction dir) |
431 | { | 369 | { |
432 | debug_dma_unmap_page(dev, handle, size, dir, true); | 370 | debug_dma_unmap_page(dev, handle, size, dir, true); |
433 | __dma_unmap_single(dev, handle, size, dir); | 371 | __dma_unmap_page(dev, handle, size, dir); |
434 | } | 372 | } |
435 | 373 | ||
436 | /** | 374 | /** |
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S index ec0bbf79c71f..2f1e2098dfe7 100644 --- a/arch/arm/include/asm/entry-macro-multi.S +++ b/arch/arm/include/asm/entry-macro-multi.S | |||
@@ -1,9 +1,11 @@ | |||
1 | #include <asm/assembler.h> | ||
2 | |||
1 | /* | 3 | /* |
2 | * Interrupt handling. Preserves r7, r8, r9 | 4 | * Interrupt handling. Preserves r7, r8, r9 |
3 | */ | 5 | */ |
4 | .macro arch_irq_handler_default | 6 | .macro arch_irq_handler_default |
5 | get_irqnr_preamble r5, lr | 7 | get_irqnr_preamble r6, lr |
6 | 1: get_irqnr_and_base r0, r6, r5, lr | 8 | 1: get_irqnr_and_base r0, r2, r6, lr |
7 | movne r1, sp | 9 | movne r1, sp |
8 | @ | 10 | @ |
9 | @ routine called with r0 = irq number, r1 = struct pt_regs * | 11 | @ routine called with r0 = irq number, r1 = struct pt_regs * |
@@ -15,17 +17,17 @@ | |||
15 | /* | 17 | /* |
16 | * XXX | 18 | * XXX |
17 | * | 19 | * |
18 | * this macro assumes that irqstat (r6) and base (r5) are | 20 | * this macro assumes that irqstat (r2) and base (r6) are |
19 | * preserved from get_irqnr_and_base above | 21 | * preserved from get_irqnr_and_base above |
20 | */ | 22 | */ |
21 | ALT_SMP(test_for_ipi r0, r6, r5, lr) | 23 | ALT_SMP(test_for_ipi r0, r2, r6, lr) |
22 | ALT_UP_B(9997f) | 24 | ALT_UP_B(9997f) |
23 | movne r1, sp | 25 | movne r1, sp |
24 | adrne lr, BSYM(1b) | 26 | adrne lr, BSYM(1b) |
25 | bne do_IPI | 27 | bne do_IPI |
26 | 28 | ||
27 | #ifdef CONFIG_LOCAL_TIMERS | 29 | #ifdef CONFIG_LOCAL_TIMERS |
28 | test_for_ltirq r0, r6, r5, lr | 30 | test_for_ltirq r0, r2, r6, lr |
29 | movne r0, sp | 31 | movne r0, sp |
30 | adrne lr, BSYM(1b) | 32 | adrne lr, BSYM(1b) |
31 | bne do_local_timer | 33 | bne do_local_timer |
@@ -38,7 +40,7 @@ | |||
38 | .align 5 | 40 | .align 5 |
39 | .global \symbol_name | 41 | .global \symbol_name |
40 | \symbol_name: | 42 | \symbol_name: |
41 | mov r4, lr | 43 | mov r8, lr |
42 | arch_irq_handler_default | 44 | arch_irq_handler_default |
43 | mov pc, r4 | 45 | mov pc, r8 |
44 | .endm | 46 | .endm |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index af44a8fb3480..b8de516e600e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -204,18 +204,6 @@ static inline unsigned long __phys_to_virt(unsigned long x) | |||
204 | #endif | 204 | #endif |
205 | 205 | ||
206 | /* | 206 | /* |
207 | * The DMA mask corresponding to the maximum bus address allocatable | ||
208 | * using GFP_DMA. The default here places no restriction on DMA | ||
209 | * allocations. This must be the smallest DMA mask in the system, | ||
210 | * so a successful GFP_DMA allocation will always satisfy this. | ||
211 | */ | ||
212 | #ifndef ARM_DMA_ZONE_SIZE | ||
213 | #define ISA_DMA_THRESHOLD (0xffffffffULL) | ||
214 | #else | ||
215 | #define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1) | ||
216 | #endif | ||
217 | |||
218 | /* | ||
219 | * PFNs are used to describe any physical page; this means | 207 | * PFNs are used to describe any physical page; this means |
220 | * PFN 0 == physical address 0. | 208 | * PFN 0 == physical address 0. |
221 | * | 209 | * |
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index 7544ce6b481a..67c70a31a1be 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device); | |||
52 | * a cookie. | 52 | * a cookie. |
53 | */ | 53 | */ |
54 | extern int | 54 | extern int |
55 | release_pmu(struct platform_device *pdev); | 55 | release_pmu(enum arm_pmu_type type); |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * init_pmu() - Initialise the PMU. | 58 | * init_pmu() - Initialise the PMU. |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 8ec535e11fd7..633d1cb84d87 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
@@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); | |||
82 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); | 82 | extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); |
83 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); | 83 | extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); |
84 | #else | 84 | #else |
85 | #define cpu_proc_init() processor._proc_init() | 85 | #define cpu_proc_init processor._proc_init |
86 | #define cpu_proc_fin() processor._proc_fin() | 86 | #define cpu_proc_fin processor._proc_fin |
87 | #define cpu_reset(addr) processor.reset(addr) | 87 | #define cpu_reset processor.reset |
88 | #define cpu_do_idle() processor._do_idle() | 88 | #define cpu_do_idle processor._do_idle |
89 | #define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) | 89 | #define cpu_dcache_clean_area processor.dcache_clean_area |
90 | #define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) | 90 | #define cpu_set_pte_ext processor.set_pte_ext |
91 | #define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) | 91 | #define cpu_do_switch_mm processor.switch_mm |
92 | #endif | 92 | #endif |
93 | 93 | ||
94 | extern void cpu_resume(void); | 94 | extern void cpu_resume(void); |
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h index 2f87870d9347..cefdb8f898a1 100644 --- a/arch/arm/include/asm/scatterlist.h +++ b/arch/arm/include/asm/scatterlist.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASMARM_SCATTERLIST_H | 1 | #ifndef _ASMARM_SCATTERLIST_H |
2 | #define _ASMARM_SCATTERLIST_H | 2 | #define _ASMARM_SCATTERLIST_H |
3 | 3 | ||
4 | #ifdef CONFIG_ARM_HAS_SG_CHAIN | ||
5 | #define ARCH_HAS_SG_CHAIN | ||
6 | #endif | ||
7 | |||
4 | #include <asm/memory.h> | 8 | #include <asm/memory.h> |
5 | #include <asm/types.h> | 9 | #include <asm/types.h> |
6 | #include <asm-generic/scatterlist.h> | 10 | #include <asm-generic/scatterlist.h> |
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index ee2ad8ae07af..915696dd9c7c 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -187,12 +187,16 @@ struct tagtable { | |||
187 | 187 | ||
188 | #define __tag __used __attribute__((__section__(".taglist.init"))) | 188 | #define __tag __used __attribute__((__section__(".taglist.init"))) |
189 | #define __tagtable(tag, fn) \ | 189 | #define __tagtable(tag, fn) \ |
190 | static struct tagtable __tagtable_##fn __tag = { tag, fn } | 190 | static const struct tagtable __tagtable_##fn __tag = { tag, fn } |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * Memory map description | 193 | * Memory map description |
194 | */ | 194 | */ |
195 | #define NR_BANKS 8 | 195 | #ifdef CONFIG_ARCH_EP93XX |
196 | # define NR_BANKS 16 | ||
197 | #else | ||
198 | # define NR_BANKS 8 | ||
199 | #endif | ||
196 | 200 | ||
197 | struct membank { | 201 | struct membank { |
198 | phys_addr_t start; | 202 | phys_addr_t start; |
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h new file mode 100644 index 000000000000..b0e4e1a02318 --- /dev/null +++ b/arch/arm/include/asm/suspend.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef __ASM_ARM_SUSPEND_H | ||
2 | #define __ASM_ARM_SUSPEND_H | ||
3 | |||
4 | #include <asm/memory.h> | ||
5 | #include <asm/tlbflush.h> | ||
6 | |||
7 | extern void cpu_resume(void); | ||
8 | |||
9 | /* | ||
10 | * Hide the first two arguments to __cpu_suspend - these are an implementation | ||
11 | * detail which platform code shouldn't have to know about. | ||
12 | */ | ||
13 | static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | ||
14 | { | ||
15 | extern int __cpu_suspend(int, long, unsigned long, | ||
16 | int (*)(unsigned long)); | ||
17 | int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn); | ||
18 | flush_tlb_all(); | ||
19 | return ret; | ||
20 | } | ||
21 | |||
22 | #endif | ||
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h index 5929ef5d927a..8578d726ad78 100644 --- a/arch/arm/include/asm/tcm.h +++ b/arch/arm/include/asm/tcm.h | |||
@@ -27,5 +27,7 @@ | |||
27 | 27 | ||
28 | void *tcm_alloc(size_t len); | 28 | void *tcm_alloc(size_t len); |
29 | void tcm_free(void *addr, size_t len); | 29 | void tcm_free(void *addr, size_t len); |
30 | bool tcm_dtcm_present(void); | ||
31 | bool tcm_itcm_present(void); | ||
30 | 32 | ||
31 | #endif | 33 | #endif |
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index f90756dc16dc..5b29a6673625 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h | |||
@@ -3,6 +3,9 @@ | |||
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | 5 | ||
6 | struct pt_regs; | ||
7 | struct task_struct; | ||
8 | |||
6 | struct undef_hook { | 9 | struct undef_hook { |
7 | struct list_head node; | 10 | struct list_head node; |
8 | u32 instr_mask; | 11 | u32 instr_mask; |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 927522cfc12e..16baba2e4369 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -59,6 +59,9 @@ int main(void) | |||
59 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); | 59 | DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value)); |
60 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); | 60 | DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate)); |
61 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); | 61 | DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate)); |
62 | #ifdef CONFIG_SMP | ||
63 | DEFINE(VFP_CPU, offsetof(union vfp_state, hard.cpu)); | ||
64 | #endif | ||
62 | #ifdef CONFIG_ARM_THUMBEE | 65 | #ifdef CONFIG_ARM_THUMBEE |
63 | DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); | 66 | DEFINE(TI_THUMBEE_STATE, offsetof(struct thread_info, thumbee_state)); |
64 | #endif | 67 | #endif |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 90c62cd51ca9..fa02a22a4c4b 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -29,21 +29,53 @@ | |||
29 | #include <asm/entry-macro-multi.S> | 29 | #include <asm/entry-macro-multi.S> |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Interrupt handling. Preserves r7, r8, r9 | 32 | * Interrupt handling. |
33 | */ | 33 | */ |
34 | .macro irq_handler | 34 | .macro irq_handler |
35 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 35 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
36 | ldr r5, =handle_arch_irq | 36 | ldr r1, =handle_arch_irq |
37 | mov r0, sp | 37 | mov r0, sp |
38 | ldr r5, [r5] | 38 | ldr r1, [r1] |
39 | adr lr, BSYM(9997f) | 39 | adr lr, BSYM(9997f) |
40 | teq r5, #0 | 40 | teq r1, #0 |
41 | movne pc, r5 | 41 | movne pc, r1 |
42 | #endif | 42 | #endif |
43 | arch_irq_handler_default | 43 | arch_irq_handler_default |
44 | 9997: | 44 | 9997: |
45 | .endm | 45 | .endm |
46 | 46 | ||
47 | .macro pabt_helper | ||
48 | @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5 | ||
49 | #ifdef MULTI_PABORT | ||
50 | ldr ip, .LCprocfns | ||
51 | mov lr, pc | ||
52 | ldr pc, [ip, #PROCESSOR_PABT_FUNC] | ||
53 | #else | ||
54 | bl CPU_PABORT_HANDLER | ||
55 | #endif | ||
56 | .endm | ||
57 | |||
58 | .macro dabt_helper | ||
59 | |||
60 | @ | ||
61 | @ Call the processor-specific abort handler: | ||
62 | @ | ||
63 | @ r2 - pt_regs | ||
64 | @ r4 - aborted context pc | ||
65 | @ r5 - aborted context psr | ||
66 | @ | ||
67 | @ The abort handler must return the aborted address in r0, and | ||
68 | @ the fault status register in r1. r9 must be preserved. | ||
69 | @ | ||
70 | #ifdef MULTI_DABORT | ||
71 | ldr ip, .LCprocfns | ||
72 | mov lr, pc | ||
73 | ldr pc, [ip, #PROCESSOR_DABT_FUNC] | ||
74 | #else | ||
75 | bl CPU_DABORT_HANDLER | ||
76 | #endif | ||
77 | .endm | ||
78 | |||
47 | #ifdef CONFIG_KPROBES | 79 | #ifdef CONFIG_KPROBES |
48 | .section .kprobes.text,"ax",%progbits | 80 | .section .kprobes.text,"ax",%progbits |
49 | #else | 81 | #else |
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid) | |||
126 | SPFIX( subeq sp, sp, #4 ) | 158 | SPFIX( subeq sp, sp, #4 ) |
127 | stmia sp, {r1 - r12} | 159 | stmia sp, {r1 - r12} |
128 | 160 | ||
129 | ldmia r0, {r1 - r3} | 161 | ldmia r0, {r3 - r5} |
130 | add r5, sp, #S_SP - 4 @ here for interlock avoidance | 162 | add r7, sp, #S_SP - 4 @ here for interlock avoidance |
131 | mov r4, #-1 @ "" "" "" "" | 163 | mov r6, #-1 @ "" "" "" "" |
132 | add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) | 164 | add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4) |
133 | SPFIX( addeq r0, r0, #4 ) | 165 | SPFIX( addeq r2, r2, #4 ) |
134 | str r1, [sp, #-4]! @ save the "real" r0 copied | 166 | str r3, [sp, #-4]! @ save the "real" r0 copied |
135 | @ from the exception stack | 167 | @ from the exception stack |
136 | 168 | ||
137 | mov r1, lr | 169 | mov r3, lr |
138 | 170 | ||
139 | @ | 171 | @ |
140 | @ We are now ready to fill in the remaining blanks on the stack: | 172 | @ We are now ready to fill in the remaining blanks on the stack: |
141 | @ | 173 | @ |
142 | @ r0 - sp_svc | 174 | @ r2 - sp_svc |
143 | @ r1 - lr_svc | 175 | @ r3 - lr_svc |
144 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 176 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
145 | @ r3 - spsr_<exception> | 177 | @ r5 - spsr_<exception> |
146 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 178 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
147 | @ | 179 | @ |
148 | stmia r5, {r0 - r4} | 180 | stmia r7, {r2 - r6} |
181 | |||
182 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
183 | bl trace_hardirqs_off | ||
184 | #endif | ||
149 | .endm | 185 | .endm |
150 | 186 | ||
151 | .align 5 | 187 | .align 5 |
152 | __dabt_svc: | 188 | __dabt_svc: |
153 | svc_entry | 189 | svc_entry |
154 | |||
155 | @ | ||
156 | @ get ready to re-enable interrupts if appropriate | ||
157 | @ | ||
158 | mrs r9, cpsr | ||
159 | tst r3, #PSR_I_BIT | ||
160 | biceq r9, r9, #PSR_I_BIT | ||
161 | |||
162 | @ | ||
163 | @ Call the processor-specific abort handler: | ||
164 | @ | ||
165 | @ r2 - aborted context pc | ||
166 | @ r3 - aborted context cpsr | ||
167 | @ | ||
168 | @ The abort handler must return the aborted address in r0, and | ||
169 | @ the fault status register in r1. r9 must be preserved. | ||
170 | @ | ||
171 | #ifdef MULTI_DABORT | ||
172 | ldr r4, .LCprocfns | ||
173 | mov lr, pc | ||
174 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
175 | #else | ||
176 | bl CPU_DABORT_HANDLER | ||
177 | #endif | ||
178 | |||
179 | @ | ||
180 | @ set desired IRQ state, then call main handler | ||
181 | @ | ||
182 | debug_entry r1 | ||
183 | msr cpsr_c, r9 | ||
184 | mov r2, sp | 190 | mov r2, sp |
185 | bl do_DataAbort | 191 | dabt_helper |
186 | 192 | ||
187 | @ | 193 | @ |
188 | @ IRQs off again before pulling preserved data off the stack | 194 | @ IRQs off again before pulling preserved data off the stack |
189 | @ | 195 | @ |
190 | disable_irq_notrace | 196 | disable_irq_notrace |
191 | 197 | ||
192 | @ | 198 | #ifdef CONFIG_TRACE_IRQFLAGS |
193 | @ restore SPSR and restart the instruction | 199 | tst r5, #PSR_I_BIT |
194 | @ | 200 | bleq trace_hardirqs_on |
195 | ldr r2, [sp, #S_PSR] | 201 | tst r5, #PSR_I_BIT |
196 | svc_exit r2 @ return from exception | 202 | blne trace_hardirqs_off |
203 | #endif | ||
204 | svc_exit r5 @ return from exception | ||
197 | UNWIND(.fnend ) | 205 | UNWIND(.fnend ) |
198 | ENDPROC(__dabt_svc) | 206 | ENDPROC(__dabt_svc) |
199 | 207 | ||
200 | .align 5 | 208 | .align 5 |
201 | __irq_svc: | 209 | __irq_svc: |
202 | svc_entry | 210 | svc_entry |
211 | irq_handler | ||
203 | 212 | ||
204 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
205 | bl trace_hardirqs_off | ||
206 | #endif | ||
207 | #ifdef CONFIG_PREEMPT | 213 | #ifdef CONFIG_PREEMPT |
208 | get_thread_info tsk | 214 | get_thread_info tsk |
209 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | 215 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count |
210 | add r7, r8, #1 @ increment it | ||
211 | str r7, [tsk, #TI_PREEMPT] | ||
212 | #endif | ||
213 | |||
214 | irq_handler | ||
215 | #ifdef CONFIG_PREEMPT | ||
216 | str r8, [tsk, #TI_PREEMPT] @ restore preempt count | ||
217 | ldr r0, [tsk, #TI_FLAGS] @ get flags | 216 | ldr r0, [tsk, #TI_FLAGS] @ get flags |
218 | teq r8, #0 @ if preempt count != 0 | 217 | teq r8, #0 @ if preempt count != 0 |
219 | movne r0, #0 @ force flags to 0 | 218 | movne r0, #0 @ force flags to 0 |
220 | tst r0, #_TIF_NEED_RESCHED | 219 | tst r0, #_TIF_NEED_RESCHED |
221 | blne svc_preempt | 220 | blne svc_preempt |
222 | #endif | 221 | #endif |
223 | ldr r4, [sp, #S_PSR] @ irqs are already disabled | 222 | |
224 | #ifdef CONFIG_TRACE_IRQFLAGS | 223 | #ifdef CONFIG_TRACE_IRQFLAGS |
225 | tst r4, #PSR_I_BIT | 224 | @ The parent context IRQs must have been enabled to get here in |
226 | bleq trace_hardirqs_on | 225 | @ the first place, so there's no point checking the PSR I bit. |
226 | bl trace_hardirqs_on | ||
227 | #endif | 227 | #endif |
228 | svc_exit r4 @ return from exception | 228 | svc_exit r5 @ return from exception |
229 | UNWIND(.fnend ) | 229 | UNWIND(.fnend ) |
230 | ENDPROC(__irq_svc) | 230 | ENDPROC(__irq_svc) |
231 | 231 | ||
@@ -251,7 +251,6 @@ __und_svc: | |||
251 | #else | 251 | #else |
252 | svc_entry | 252 | svc_entry |
253 | #endif | 253 | #endif |
254 | |||
255 | @ | 254 | @ |
256 | @ call emulation code, which returns using r9 if it has emulated | 255 | @ call emulation code, which returns using r9 if it has emulated |
257 | @ the instruction, or the more conventional lr if we are to treat | 256 | @ the instruction, or the more conventional lr if we are to treat |
@@ -260,15 +259,16 @@ __und_svc: | |||
260 | @ r0 - instruction | 259 | @ r0 - instruction |
261 | @ | 260 | @ |
262 | #ifndef CONFIG_THUMB2_KERNEL | 261 | #ifndef CONFIG_THUMB2_KERNEL |
263 | ldr r0, [r2, #-4] | 262 | ldr r0, [r4, #-4] |
264 | #else | 263 | #else |
265 | ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 | 264 | ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2 |
266 | and r9, r0, #0xf800 | 265 | and r9, r0, #0xf800 |
267 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 | 266 | cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 |
268 | ldrhhs r9, [r2] @ bottom 16 bits | 267 | ldrhhs r9, [r4] @ bottom 16 bits |
269 | orrhs r0, r9, r0, lsl #16 | 268 | orrhs r0, r9, r0, lsl #16 |
270 | #endif | 269 | #endif |
271 | adr r9, BSYM(1f) | 270 | adr r9, BSYM(1f) |
271 | mov r2, r4 | ||
272 | bl call_fpe | 272 | bl call_fpe |
273 | 273 | ||
274 | mov r0, sp @ struct pt_regs *regs | 274 | mov r0, sp @ struct pt_regs *regs |
@@ -282,45 +282,35 @@ __und_svc: | |||
282 | @ | 282 | @ |
283 | @ restore SPSR and restart the instruction | 283 | @ restore SPSR and restart the instruction |
284 | @ | 284 | @ |
285 | ldr r2, [sp, #S_PSR] @ Get SVC cpsr | 285 | ldr r5, [sp, #S_PSR] @ Get SVC cpsr |
286 | svc_exit r2 @ return from exception | 286 | #ifdef CONFIG_TRACE_IRQFLAGS |
287 | tst r5, #PSR_I_BIT | ||
288 | bleq trace_hardirqs_on | ||
289 | tst r5, #PSR_I_BIT | ||
290 | blne trace_hardirqs_off | ||
291 | #endif | ||
292 | svc_exit r5 @ return from exception | ||
287 | UNWIND(.fnend ) | 293 | UNWIND(.fnend ) |
288 | ENDPROC(__und_svc) | 294 | ENDPROC(__und_svc) |
289 | 295 | ||
290 | .align 5 | 296 | .align 5 |
291 | __pabt_svc: | 297 | __pabt_svc: |
292 | svc_entry | 298 | svc_entry |
293 | |||
294 | @ | ||
295 | @ re-enable interrupts if appropriate | ||
296 | @ | ||
297 | mrs r9, cpsr | ||
298 | tst r3, #PSR_I_BIT | ||
299 | biceq r9, r9, #PSR_I_BIT | ||
300 | |||
301 | mov r0, r2 @ pass address of aborted instruction. | ||
302 | #ifdef MULTI_PABORT | ||
303 | ldr r4, .LCprocfns | ||
304 | mov lr, pc | ||
305 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
306 | #else | ||
307 | bl CPU_PABORT_HANDLER | ||
308 | #endif | ||
309 | debug_entry r1 | ||
310 | msr cpsr_c, r9 @ Maybe enable interrupts | ||
311 | mov r2, sp @ regs | 299 | mov r2, sp @ regs |
312 | bl do_PrefetchAbort @ call abort handler | 300 | pabt_helper |
313 | 301 | ||
314 | @ | 302 | @ |
315 | @ IRQs off again before pulling preserved data off the stack | 303 | @ IRQs off again before pulling preserved data off the stack |
316 | @ | 304 | @ |
317 | disable_irq_notrace | 305 | disable_irq_notrace |
318 | 306 | ||
319 | @ | 307 | #ifdef CONFIG_TRACE_IRQFLAGS |
320 | @ restore SPSR and restart the instruction | 308 | tst r5, #PSR_I_BIT |
321 | @ | 309 | bleq trace_hardirqs_on |
322 | ldr r2, [sp, #S_PSR] | 310 | tst r5, #PSR_I_BIT |
323 | svc_exit r2 @ return from exception | 311 | blne trace_hardirqs_off |
312 | #endif | ||
313 | svc_exit r5 @ return from exception | ||
324 | UNWIND(.fnend ) | 314 | UNWIND(.fnend ) |
325 | ENDPROC(__pabt_svc) | 315 | ENDPROC(__pabt_svc) |
326 | 316 | ||
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc) | |||
351 | ARM( stmib sp, {r1 - r12} ) | 341 | ARM( stmib sp, {r1 - r12} ) |
352 | THUMB( stmia sp, {r0 - r12} ) | 342 | THUMB( stmia sp, {r0 - r12} ) |
353 | 343 | ||
354 | ldmia r0, {r1 - r3} | 344 | ldmia r0, {r3 - r5} |
355 | add r0, sp, #S_PC @ here for interlock avoidance | 345 | add r0, sp, #S_PC @ here for interlock avoidance |
356 | mov r4, #-1 @ "" "" "" "" | 346 | mov r6, #-1 @ "" "" "" "" |
357 | 347 | ||
358 | str r1, [sp] @ save the "real" r0 copied | 348 | str r3, [sp] @ save the "real" r0 copied |
359 | @ from the exception stack | 349 | @ from the exception stack |
360 | 350 | ||
361 | @ | 351 | @ |
362 | @ We are now ready to fill in the remaining blanks on the stack: | 352 | @ We are now ready to fill in the remaining blanks on the stack: |
363 | @ | 353 | @ |
364 | @ r2 - lr_<exception>, already fixed up for correct return/restart | 354 | @ r4 - lr_<exception>, already fixed up for correct return/restart |
365 | @ r3 - spsr_<exception> | 355 | @ r5 - spsr_<exception> |
366 | @ r4 - orig_r0 (see pt_regs definition in ptrace.h) | 356 | @ r6 - orig_r0 (see pt_regs definition in ptrace.h) |
367 | @ | 357 | @ |
368 | @ Also, separately save sp_usr and lr_usr | 358 | @ Also, separately save sp_usr and lr_usr |
369 | @ | 359 | @ |
370 | stmia r0, {r2 - r4} | 360 | stmia r0, {r4 - r6} |
371 | ARM( stmdb r0, {sp, lr}^ ) | 361 | ARM( stmdb r0, {sp, lr}^ ) |
372 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) | 362 | THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) |
373 | 363 | ||
@@ -380,6 +370,10 @@ ENDPROC(__pabt_svc) | |||
380 | @ Clear FP to mark the first stack frame | 370 | @ Clear FP to mark the first stack frame |
381 | @ | 371 | @ |
382 | zero_fp | 372 | zero_fp |
373 | |||
374 | #ifdef CONFIG_IRQSOFF_TRACER | ||
375 | bl trace_hardirqs_off | ||
376 | #endif | ||
383 | .endm | 377 | .endm |
384 | 378 | ||
385 | .macro kuser_cmpxchg_check | 379 | .macro kuser_cmpxchg_check |
@@ -391,7 +385,7 @@ ENDPROC(__pabt_svc) | |||
391 | @ if it was interrupted in a critical region. Here we | 385 | @ if it was interrupted in a critical region. Here we |
392 | @ perform a quick test inline since it should be false | 386 | @ perform a quick test inline since it should be false |
393 | @ 99.9999% of the time. The rest is done out of line. | 387 | @ 99.9999% of the time. The rest is done out of line. |
394 | cmp r2, #TASK_SIZE | 388 | cmp r4, #TASK_SIZE |
395 | blhs kuser_cmpxchg_fixup | 389 | blhs kuser_cmpxchg_fixup |
396 | #endif | 390 | #endif |
397 | #endif | 391 | #endif |
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc) | |||
401 | __dabt_usr: | 395 | __dabt_usr: |
402 | usr_entry | 396 | usr_entry |
403 | kuser_cmpxchg_check | 397 | kuser_cmpxchg_check |
404 | |||
405 | @ | ||
406 | @ Call the processor-specific abort handler: | ||
407 | @ | ||
408 | @ r2 - aborted context pc | ||
409 | @ r3 - aborted context cpsr | ||
410 | @ | ||
411 | @ The abort handler must return the aborted address in r0, and | ||
412 | @ the fault status register in r1. | ||
413 | @ | ||
414 | #ifdef MULTI_DABORT | ||
415 | ldr r4, .LCprocfns | ||
416 | mov lr, pc | ||
417 | ldr pc, [r4, #PROCESSOR_DABT_FUNC] | ||
418 | #else | ||
419 | bl CPU_DABORT_HANDLER | ||
420 | #endif | ||
421 | |||
422 | @ | ||
423 | @ IRQs on, then call the main handler | ||
424 | @ | ||
425 | debug_entry r1 | ||
426 | enable_irq | ||
427 | mov r2, sp | 398 | mov r2, sp |
428 | adr lr, BSYM(ret_from_exception) | 399 | dabt_helper |
429 | b do_DataAbort | 400 | b ret_from_exception |
430 | UNWIND(.fnend ) | 401 | UNWIND(.fnend ) |
431 | ENDPROC(__dabt_usr) | 402 | ENDPROC(__dabt_usr) |
432 | 403 | ||
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr) | |||
434 | __irq_usr: | 405 | __irq_usr: |
435 | usr_entry | 406 | usr_entry |
436 | kuser_cmpxchg_check | 407 | kuser_cmpxchg_check |
437 | |||
438 | #ifdef CONFIG_IRQSOFF_TRACER | ||
439 | bl trace_hardirqs_off | ||
440 | #endif | ||
441 | |||
442 | get_thread_info tsk | ||
443 | #ifdef CONFIG_PREEMPT | ||
444 | ldr r8, [tsk, #TI_PREEMPT] @ get preempt count | ||
445 | add r7, r8, #1 @ increment it | ||
446 | str r7, [tsk, #TI_PREEMPT] | ||
447 | #endif | ||
448 | |||
449 | irq_handler | 408 | irq_handler |
450 | #ifdef CONFIG_PREEMPT | 409 | get_thread_info tsk |
451 | ldr r0, [tsk, #TI_PREEMPT] | ||
452 | str r8, [tsk, #TI_PREEMPT] | ||
453 | teq r0, r7 | ||
454 | ARM( strne r0, [r0, -r0] ) | ||
455 | THUMB( movne r0, #0 ) | ||
456 | THUMB( strne r0, [r0] ) | ||
457 | #endif | ||
458 | |||
459 | mov why, #0 | 410 | mov why, #0 |
460 | b ret_to_user_from_irq | 411 | b ret_to_user_from_irq |
461 | UNWIND(.fnend ) | 412 | UNWIND(.fnend ) |
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr) | |||
467 | __und_usr: | 418 | __und_usr: |
468 | usr_entry | 419 | usr_entry |
469 | 420 | ||
421 | mov r2, r4 | ||
422 | mov r3, r5 | ||
423 | |||
470 | @ | 424 | @ |
471 | @ fall through to the emulation code, which returns using r9 if | 425 | @ fall through to the emulation code, which returns using r9 if |
472 | @ it has emulated the instruction, or the more conventional lr | 426 | @ it has emulated the instruction, or the more conventional lr |
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown) | |||
682 | .align 5 | 636 | .align 5 |
683 | __pabt_usr: | 637 | __pabt_usr: |
684 | usr_entry | 638 | usr_entry |
685 | |||
686 | mov r0, r2 @ pass address of aborted instruction. | ||
687 | #ifdef MULTI_PABORT | ||
688 | ldr r4, .LCprocfns | ||
689 | mov lr, pc | ||
690 | ldr pc, [r4, #PROCESSOR_PABT_FUNC] | ||
691 | #else | ||
692 | bl CPU_PABORT_HANDLER | ||
693 | #endif | ||
694 | debug_entry r1 | ||
695 | enable_irq @ Enable interrupts | ||
696 | mov r2, sp @ regs | 639 | mov r2, sp @ regs |
697 | bl do_PrefetchAbort @ call abort handler | 640 | pabt_helper |
698 | UNWIND(.fnend ) | 641 | UNWIND(.fnend ) |
699 | /* fall through */ | 642 | /* fall through */ |
700 | /* | 643 | /* |
@@ -927,13 +870,13 @@ __kuser_cmpxchg: @ 0xffff0fc0 | |||
927 | .text | 870 | .text |
928 | kuser_cmpxchg_fixup: | 871 | kuser_cmpxchg_fixup: |
929 | @ Called from kuser_cmpxchg_check macro. | 872 | @ Called from kuser_cmpxchg_check macro. |
930 | @ r2 = address of interrupted insn (must be preserved). | 873 | @ r4 = address of interrupted insn (must be preserved). |
931 | @ sp = saved regs. r7 and r8 are clobbered. | 874 | @ sp = saved regs. r7 and r8 are clobbered. |
932 | @ 1b = first critical insn, 2b = last critical insn. | 875 | @ 1b = first critical insn, 2b = last critical insn. |
933 | @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. | 876 | @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b. |
934 | mov r7, #0xffff0fff | 877 | mov r7, #0xffff0fff |
935 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) | 878 | sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) |
936 | subs r8, r2, r7 | 879 | subs r8, r4, r7 |
937 | rsbcss r8, r8, #(2b - 1b) | 880 | rsbcss r8, r8, #(2b - 1b) |
938 | strcs r7, [sp, #S_PC] | 881 | strcs r7, [sp, #S_PC] |
939 | mov pc, lr | 882 | mov pc, lr |
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 051166c2a932..4d6ad8348e89 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S | |||
@@ -165,25 +165,6 @@ | |||
165 | .endm | 165 | .endm |
166 | #endif /* !CONFIG_THUMB2_KERNEL */ | 166 | #endif /* !CONFIG_THUMB2_KERNEL */ |
167 | 167 | ||
168 | @ | ||
169 | @ Debug exceptions are taken as prefetch or data aborts. | ||
170 | @ We must disable preemption during the handler so that | ||
171 | @ we can access the debug registers safely. | ||
172 | @ | ||
173 | .macro debug_entry, fsr | ||
174 | #if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT) | ||
175 | ldr r4, =0x40f @ mask out fsr.fs | ||
176 | and r5, r4, \fsr | ||
177 | cmp r5, #2 @ debug exception | ||
178 | bne 1f | ||
179 | get_thread_info r10 | ||
180 | ldr r6, [r10, #TI_PREEMPT] @ get preempt count | ||
181 | add r11, r6, #1 @ increment it | ||
182 | str r11, [r10, #TI_PREEMPT] | ||
183 | 1: | ||
184 | #endif | ||
185 | .endm | ||
186 | |||
187 | /* | 168 | /* |
188 | * These are the registers used in the syscall handler, and allow us to | 169 | * These are the registers used in the syscall handler, and allow us to |
189 | * have in theory up to 7 arguments to a function - r0 to r6. | 170 | * have in theory up to 7 arguments to a function - r0 to r6. |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index 6b1e0ad9ec3b..d46f25968bec 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -32,8 +32,16 @@ | |||
32 | * numbers for r1. | 32 | * numbers for r1. |
33 | * | 33 | * |
34 | */ | 34 | */ |
35 | .arm | ||
36 | |||
35 | __HEAD | 37 | __HEAD |
36 | ENTRY(stext) | 38 | ENTRY(stext) |
39 | |||
40 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. | ||
41 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, | ||
42 | THUMB( .thumb ) @ switch to Thumb now. | ||
43 | THUMB(1: ) | ||
44 | |||
37 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 45 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
38 | @ and irqs disabled | 46 | @ and irqs disabled |
39 | #ifndef CONFIG_CPU_CP15 | 47 | #ifndef CONFIG_CPU_CP15 |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 278c1b0ebb2e..742b6108a001 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -71,8 +71,16 @@ | |||
71 | * crap here - that's what the boot loader (or in extreme, well justified | 71 | * crap here - that's what the boot loader (or in extreme, well justified |
72 | * circumstances, zImage) is for. | 72 | * circumstances, zImage) is for. |
73 | */ | 73 | */ |
74 | .arm | ||
75 | |||
74 | __HEAD | 76 | __HEAD |
75 | ENTRY(stext) | 77 | ENTRY(stext) |
78 | |||
79 | THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM. | ||
80 | THUMB( bx r9 ) @ If this is a Thumb-2 kernel, | ||
81 | THUMB( .thumb ) @ switch to Thumb now. | ||
82 | THUMB(1: ) | ||
83 | |||
76 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode | 84 | setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode |
77 | @ and irqs disabled | 85 | @ and irqs disabled |
78 | mrc p15, 0, r9, c0, c0 @ get processor id | 86 | mrc p15, 0, r9, c0, c0 @ get processor id |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 87acc25d7a3e..a927ca1f5566 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -796,7 +796,7 @@ unlock: | |||
796 | 796 | ||
797 | /* | 797 | /* |
798 | * Called from either the Data Abort Handler [watchpoint] or the | 798 | * Called from either the Data Abort Handler [watchpoint] or the |
799 | * Prefetch Abort Handler [breakpoint] with preemption disabled. | 799 | * Prefetch Abort Handler [breakpoint] with interrupts disabled. |
800 | */ | 800 | */ |
801 | static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | 801 | static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, |
802 | struct pt_regs *regs) | 802 | struct pt_regs *regs) |
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
804 | int ret = 0; | 804 | int ret = 0; |
805 | u32 dscr; | 805 | u32 dscr; |
806 | 806 | ||
807 | /* We must be called with preemption disabled. */ | 807 | preempt_disable(); |
808 | WARN_ON(preemptible()); | 808 | |
809 | if (interrupts_enabled(regs)) | ||
810 | local_irq_enable(); | ||
809 | 811 | ||
810 | /* We only handle watchpoints and hardware breakpoints. */ | 812 | /* We only handle watchpoints and hardware breakpoints. */ |
811 | ARM_DBG_READ(c1, 0, dscr); | 813 | ARM_DBG_READ(c1, 0, dscr); |
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
824 | ret = 1; /* Unhandled fault. */ | 826 | ret = 1; /* Unhandled fault. */ |
825 | } | 827 | } |
826 | 828 | ||
827 | /* | ||
828 | * Re-enable preemption after it was disabled in the | ||
829 | * low-level exception handling code. | ||
830 | */ | ||
831 | preempt_enable(); | 829 | preempt_enable(); |
832 | 830 | ||
833 | return ret; | 831 | return ret; |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 83bbad03fcc6..0f928a131af8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void) | |||
131 | 131 | ||
132 | #ifdef CONFIG_HOTPLUG_CPU | 132 | #ifdef CONFIG_HOTPLUG_CPU |
133 | 133 | ||
134 | static bool migrate_one_irq(struct irq_data *d) | 134 | static bool migrate_one_irq(struct irq_desc *desc) |
135 | { | 135 | { |
136 | unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); | 136 | struct irq_data *d = irq_desc_get_irq_data(desc); |
137 | const struct cpumask *affinity = d->affinity; | ||
138 | struct irq_chip *c; | ||
137 | bool ret = false; | 139 | bool ret = false; |
138 | 140 | ||
139 | if (cpu >= nr_cpu_ids) { | 141 | /* |
140 | cpu = cpumask_any(cpu_online_mask); | 142 | * If this is a per-CPU interrupt, or the affinity does not |
143 | * include this CPU, then we have nothing to do. | ||
144 | */ | ||
145 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
146 | return false; | ||
147 | |||
148 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
149 | affinity = cpu_online_mask; | ||
141 | ret = true; | 150 | ret = true; |
142 | } | 151 | } |
143 | 152 | ||
144 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); | 153 | c = irq_data_get_irq_chip(d); |
145 | 154 | if (c->irq_set_affinity) | |
146 | d->chip->irq_set_affinity(d, cpumask_of(cpu), true); | 155 | c->irq_set_affinity(d, affinity, true); |
156 | else | ||
157 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
147 | 158 | ||
148 | return ret; | 159 | return ret; |
149 | } | 160 | } |
150 | 161 | ||
151 | /* | 162 | /* |
152 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | 163 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
153 | * the affinity settings do not allow other CPUs, force them onto any | 164 | * If the affinity settings do not allow other CPUs, force them onto any |
154 | * available CPU. | 165 | * available CPU. |
166 | * | ||
167 | * Note: we must iterate over all IRQs, whether they have an attached | ||
168 | * action structure or not, as we need to get chained interrupts too. | ||
155 | */ | 169 | */ |
156 | void migrate_irqs(void) | 170 | void migrate_irqs(void) |
157 | { | 171 | { |
158 | unsigned int i, cpu = smp_processor_id(); | 172 | unsigned int i; |
159 | struct irq_desc *desc; | 173 | struct irq_desc *desc; |
160 | unsigned long flags; | 174 | unsigned long flags; |
161 | 175 | ||
162 | local_irq_save(flags); | 176 | local_irq_save(flags); |
163 | 177 | ||
164 | for_each_irq_desc(i, desc) { | 178 | for_each_irq_desc(i, desc) { |
165 | struct irq_data *d = &desc->irq_data; | ||
166 | bool affinity_broken = false; | 179 | bool affinity_broken = false; |
167 | 180 | ||
168 | raw_spin_lock(&desc->lock); | 181 | if (!desc) |
169 | do { | 182 | continue; |
170 | if (desc->action == NULL) | ||
171 | break; | ||
172 | |||
173 | if (d->node != cpu) | ||
174 | break; | ||
175 | 183 | ||
176 | affinity_broken = migrate_one_irq(d); | 184 | raw_spin_lock(&desc->lock); |
177 | } while (0); | 185 | affinity_broken = migrate_one_irq(desc); |
178 | raw_spin_unlock(&desc->lock); | 186 | raw_spin_unlock(&desc->lock); |
179 | 187 | ||
180 | if (affinity_broken && printk_ratelimit()) | 188 | if (affinity_broken && printk_ratelimit()) |
181 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); | 189 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
190 | smp_processor_id()); | ||
182 | } | 191 | } |
183 | 192 | ||
184 | local_irq_restore(flags); | 193 | local_irq_restore(flags); |
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index fee7c36349eb..016d6a0830a3 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c | |||
@@ -193,8 +193,17 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, | |||
193 | offset -= 0x02000000; | 193 | offset -= 0x02000000; |
194 | offset += sym->st_value - loc; | 194 | offset += sym->st_value - loc; |
195 | 195 | ||
196 | /* only Thumb addresses allowed (no interworking) */ | 196 | /* |
197 | if (!(offset & 1) || | 197 | * For function symbols, only Thumb addresses are |
198 | * allowed (no interworking). | ||
199 | * | ||
200 | * For non-function symbols, the destination | ||
201 | * has no specific ARM/Thumb disposition, so | ||
202 | * the branch is resolved under the assumption | ||
203 | * that interworking is not required. | ||
204 | */ | ||
205 | if ((ELF32_ST_TYPE(sym->st_info) == STT_FUNC && | ||
206 | !(offset & 1)) || | ||
198 | offset <= (s32)0xff000000 || | 207 | offset <= (s32)0xff000000 || |
199 | offset >= (s32)0x01000000) { | 208 | offset >= (s32)0x01000000) { |
200 | pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", | 209 | pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n", |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0abc4dd3..8d8507858e5c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void) | |||
435 | if (irq >= 0) | 435 | if (irq >= 0) |
436 | free_irq(irq, NULL); | 436 | free_irq(irq, NULL); |
437 | } | 437 | } |
438 | release_pmu(pmu_device); | 438 | release_pmu(ARM_PMU_DEVICE_CPU); |
439 | pmu_device = NULL; | 439 | pmu_device = NULL; |
440 | } | 440 | } |
441 | 441 | ||
@@ -454,7 +454,7 @@ armpmu_release_hardware(void) | |||
454 | } | 454 | } |
455 | armpmu->stop(); | 455 | armpmu->stop(); |
456 | 456 | ||
457 | release_pmu(pmu_device); | 457 | release_pmu(ARM_PMU_DEVICE_CPU); |
458 | pmu_device = NULL; | 458 | pmu_device = NULL; |
459 | } | 459 | } |
460 | 460 | ||
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event) | |||
583 | static void armpmu_enable(struct pmu *pmu) | 583 | static void armpmu_enable(struct pmu *pmu) |
584 | { | 584 | { |
585 | /* Enable all of the perf events on hardware. */ | 585 | /* Enable all of the perf events on hardware. */ |
586 | int idx; | 586 | int idx, enabled = 0; |
587 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 587 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
588 | 588 | ||
589 | if (!armpmu) | 589 | if (!armpmu) |
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu) | |||
596 | continue; | 596 | continue; |
597 | 597 | ||
598 | armpmu->enable(&event->hw, idx); | 598 | armpmu->enable(&event->hw, idx); |
599 | enabled = 1; | ||
599 | } | 600 | } |
600 | 601 | ||
601 | armpmu->start(); | 602 | if (enabled) |
603 | armpmu->start(); | ||
602 | } | 604 | } |
603 | 605 | ||
604 | static void armpmu_disable(struct pmu *pmu) | 606 | static void armpmu_disable(struct pmu *pmu) |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index 2c79eec19262..2b70709376c3 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of_device.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | 22 | ||
22 | #include <asm/pmu.h> | 23 | #include <asm/pmu.h> |
@@ -25,36 +26,88 @@ static volatile long pmu_lock; | |||
25 | 26 | ||
26 | static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; | 27 | static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; |
27 | 28 | ||
28 | static int __devinit pmu_device_probe(struct platform_device *pdev) | 29 | static int __devinit pmu_register(struct platform_device *pdev, |
30 | enum arm_pmu_type type) | ||
29 | { | 31 | { |
30 | 32 | if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { | |
31 | if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { | ||
32 | pr_warning("received registration request for unknown " | 33 | pr_warning("received registration request for unknown " |
33 | "device %d\n", pdev->id); | 34 | "device %d\n", type); |
34 | return -EINVAL; | 35 | return -EINVAL; |
35 | } | 36 | } |
36 | 37 | ||
37 | if (pmu_devices[pdev->id]) | 38 | if (pmu_devices[type]) { |
38 | pr_warning("registering new PMU device type %d overwrites " | 39 | pr_warning("rejecting duplicate registration of PMU device " |
39 | "previous registration!\n", pdev->id); | 40 | "type %d.", type); |
40 | else | 41 | return -ENOSPC; |
41 | pr_info("registered new PMU device of type %d\n", | 42 | } |
42 | pdev->id); | ||
43 | 43 | ||
44 | pmu_devices[pdev->id] = pdev; | 44 | pr_info("registered new PMU device of type %d\n", type); |
45 | pmu_devices[type] = pdev; | ||
45 | return 0; | 46 | return 0; |
46 | } | 47 | } |
47 | 48 | ||
48 | static struct platform_driver pmu_driver = { | 49 | #define OF_MATCH_PMU(_name, _type) { \ |
50 | .compatible = _name, \ | ||
51 | .data = (void *)_type, \ | ||
52 | } | ||
53 | |||
54 | #define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) | ||
55 | |||
56 | static struct of_device_id armpmu_of_device_ids[] = { | ||
57 | OF_MATCH_CPU("arm,cortex-a9-pmu"), | ||
58 | OF_MATCH_CPU("arm,cortex-a8-pmu"), | ||
59 | OF_MATCH_CPU("arm,arm1136-pmu"), | ||
60 | OF_MATCH_CPU("arm,arm1176-pmu"), | ||
61 | {}, | ||
62 | }; | ||
63 | |||
64 | #define PLAT_MATCH_PMU(_name, _type) { \ | ||
65 | .name = _name, \ | ||
66 | .driver_data = _type, \ | ||
67 | } | ||
68 | |||
69 | #define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) | ||
70 | |||
71 | static struct platform_device_id armpmu_plat_device_ids[] = { | ||
72 | PLAT_MATCH_CPU("arm-pmu"), | ||
73 | {}, | ||
74 | }; | ||
75 | |||
76 | enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) | ||
77 | { | ||
78 | const struct of_device_id *of_id; | ||
79 | const struct platform_device_id *pdev_id; | ||
80 | |||
81 | /* provided by of_device_id table */ | ||
82 | if (pdev->dev.of_node) { | ||
83 | of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); | ||
84 | BUG_ON(!of_id); | ||
85 | return (enum arm_pmu_type)of_id->data; | ||
86 | } | ||
87 | |||
88 | /* Provided by platform_device_id table */ | ||
89 | pdev_id = platform_get_device_id(pdev); | ||
90 | BUG_ON(!pdev_id); | ||
91 | return pdev_id->driver_data; | ||
92 | } | ||
93 | |||
94 | static int __devinit armpmu_device_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | return pmu_register(pdev, armpmu_device_type(pdev)); | ||
97 | } | ||
98 | |||
99 | static struct platform_driver armpmu_driver = { | ||
49 | .driver = { | 100 | .driver = { |
50 | .name = "arm-pmu", | 101 | .name = "arm-pmu", |
102 | .of_match_table = armpmu_of_device_ids, | ||
51 | }, | 103 | }, |
52 | .probe = pmu_device_probe, | 104 | .probe = armpmu_device_probe, |
105 | .id_table = armpmu_plat_device_ids, | ||
53 | }; | 106 | }; |
54 | 107 | ||
55 | static int __init register_pmu_driver(void) | 108 | static int __init register_pmu_driver(void) |
56 | { | 109 | { |
57 | return platform_driver_register(&pmu_driver); | 110 | return platform_driver_register(&armpmu_driver); |
58 | } | 111 | } |
59 | device_initcall(register_pmu_driver); | 112 | device_initcall(register_pmu_driver); |
60 | 113 | ||
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device) | |||
77 | EXPORT_SYMBOL_GPL(reserve_pmu); | 130 | EXPORT_SYMBOL_GPL(reserve_pmu); |
78 | 131 | ||
79 | int | 132 | int |
80 | release_pmu(struct platform_device *pdev) | 133 | release_pmu(enum arm_pmu_type device) |
81 | { | 134 | { |
82 | if (WARN_ON(pdev != pmu_devices[pdev->id])) | 135 | if (WARN_ON(!pmu_devices[device])) |
83 | return -EINVAL; | 136 | return -EINVAL; |
84 | clear_bit_unlock(pdev->id, &pmu_lock); | 137 | clear_bit_unlock(device, &pmu_lock); |
85 | return 0; | 138 | return 0; |
86 | } | 139 | } |
87 | EXPORT_SYMBOL_GPL(release_pmu); | 140 | EXPORT_SYMBOL_GPL(release_pmu); |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05a..9c3278f37796 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup); | |||
73 | #endif | 73 | #endif |
74 | 74 | ||
75 | extern void paging_init(struct machine_desc *desc); | 75 | extern void paging_init(struct machine_desc *desc); |
76 | extern void sanity_check_meminfo(void); | ||
76 | extern void reboot_setup(char *str); | 77 | extern void reboot_setup(char *str); |
77 | 78 | ||
78 | unsigned int processor_id; | 79 | unsigned int processor_id; |
@@ -342,54 +343,6 @@ static void __init feat_v6_fixup(void) | |||
342 | elf_hwcap &= ~HWCAP_TLS; | 343 | elf_hwcap &= ~HWCAP_TLS; |
343 | } | 344 | } |
344 | 345 | ||
345 | static void __init setup_processor(void) | ||
346 | { | ||
347 | struct proc_info_list *list; | ||
348 | |||
349 | /* | ||
350 | * locate processor in the list of supported processor | ||
351 | * types. The linker builds this table for us from the | ||
352 | * entries in arch/arm/mm/proc-*.S | ||
353 | */ | ||
354 | list = lookup_processor_type(read_cpuid_id()); | ||
355 | if (!list) { | ||
356 | printk("CPU configuration botched (ID %08x), unable " | ||
357 | "to continue.\n", read_cpuid_id()); | ||
358 | while (1); | ||
359 | } | ||
360 | |||
361 | cpu_name = list->cpu_name; | ||
362 | |||
363 | #ifdef MULTI_CPU | ||
364 | processor = *list->proc; | ||
365 | #endif | ||
366 | #ifdef MULTI_TLB | ||
367 | cpu_tlb = *list->tlb; | ||
368 | #endif | ||
369 | #ifdef MULTI_USER | ||
370 | cpu_user = *list->user; | ||
371 | #endif | ||
372 | #ifdef MULTI_CACHE | ||
373 | cpu_cache = *list->cache; | ||
374 | #endif | ||
375 | |||
376 | printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | ||
377 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | ||
378 | proc_arch[cpu_architecture()], cr_alignment); | ||
379 | |||
380 | sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); | ||
381 | sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); | ||
382 | elf_hwcap = list->elf_hwcap; | ||
383 | #ifndef CONFIG_ARM_THUMB | ||
384 | elf_hwcap &= ~HWCAP_THUMB; | ||
385 | #endif | ||
386 | |||
387 | feat_v6_fixup(); | ||
388 | |||
389 | cacheid_init(); | ||
390 | cpu_proc_init(); | ||
391 | } | ||
392 | |||
393 | /* | 346 | /* |
394 | * cpu_init - initialise one CPU. | 347 | * cpu_init - initialise one CPU. |
395 | * | 348 | * |
@@ -405,6 +358,8 @@ void cpu_init(void) | |||
405 | BUG(); | 358 | BUG(); |
406 | } | 359 | } |
407 | 360 | ||
361 | cpu_proc_init(); | ||
362 | |||
408 | /* | 363 | /* |
409 | * Define the placement constraint for the inline asm directive below. | 364 | * Define the placement constraint for the inline asm directive below. |
410 | * In Thumb-2, msr with an immediate value is not allowed. | 365 | * In Thumb-2, msr with an immediate value is not allowed. |
@@ -441,6 +396,54 @@ void cpu_init(void) | |||
441 | : "r14"); | 396 | : "r14"); |
442 | } | 397 | } |
443 | 398 | ||
399 | static void __init setup_processor(void) | ||
400 | { | ||
401 | struct proc_info_list *list; | ||
402 | |||
403 | /* | ||
404 | * locate processor in the list of supported processor | ||
405 | * types. The linker builds this table for us from the | ||
406 | * entries in arch/arm/mm/proc-*.S | ||
407 | */ | ||
408 | list = lookup_processor_type(read_cpuid_id()); | ||
409 | if (!list) { | ||
410 | printk("CPU configuration botched (ID %08x), unable " | ||
411 | "to continue.\n", read_cpuid_id()); | ||
412 | while (1); | ||
413 | } | ||
414 | |||
415 | cpu_name = list->cpu_name; | ||
416 | |||
417 | #ifdef MULTI_CPU | ||
418 | processor = *list->proc; | ||
419 | #endif | ||
420 | #ifdef MULTI_TLB | ||
421 | cpu_tlb = *list->tlb; | ||
422 | #endif | ||
423 | #ifdef MULTI_USER | ||
424 | cpu_user = *list->user; | ||
425 | #endif | ||
426 | #ifdef MULTI_CACHE | ||
427 | cpu_cache = *list->cache; | ||
428 | #endif | ||
429 | |||
430 | printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | ||
431 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | ||
432 | proc_arch[cpu_architecture()], cr_alignment); | ||
433 | |||
434 | sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); | ||
435 | sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); | ||
436 | elf_hwcap = list->elf_hwcap; | ||
437 | #ifndef CONFIG_ARM_THUMB | ||
438 | elf_hwcap &= ~HWCAP_THUMB; | ||
439 | #endif | ||
440 | |||
441 | feat_v6_fixup(); | ||
442 | |||
443 | cacheid_init(); | ||
444 | cpu_init(); | ||
445 | } | ||
446 | |||
444 | void __init dump_machine_table(void) | 447 | void __init dump_machine_table(void) |
445 | { | 448 | { |
446 | struct machine_desc *p; | 449 | struct machine_desc *p; |
@@ -900,6 +903,7 @@ void __init setup_arch(char **cmdline_p) | |||
900 | 903 | ||
901 | parse_early_param(); | 904 | parse_early_param(); |
902 | 905 | ||
906 | sanity_check_meminfo(); | ||
903 | arm_memblock_init(&meminfo, mdesc); | 907 | arm_memblock_init(&meminfo, mdesc); |
904 | 908 | ||
905 | paging_init(mdesc); | 909 | paging_init(mdesc); |
@@ -913,7 +917,6 @@ void __init setup_arch(char **cmdline_p) | |||
913 | #endif | 917 | #endif |
914 | reserve_crashkernel(); | 918 | reserve_crashkernel(); |
915 | 919 | ||
916 | cpu_init(); | ||
917 | tcm_init(); | 920 | tcm_init(); |
918 | 921 | ||
919 | #ifdef CONFIG_MULTI_IRQ_HANDLER | 922 | #ifdef CONFIG_MULTI_IRQ_HANDLER |
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S index 6398ead9d1c0..dc902f2c6845 100644 --- a/arch/arm/kernel/sleep.S +++ b/arch/arm/kernel/sleep.S | |||
@@ -10,64 +10,61 @@ | |||
10 | /* | 10 | /* |
11 | * Save CPU state for a suspend | 11 | * Save CPU state for a suspend |
12 | * r1 = v:p offset | 12 | * r1 = v:p offset |
13 | * r3 = virtual return function | 13 | * r2 = suspend function arg0 |
14 | * Note: sp is decremented to allocate space for CPU state on stack | 14 | * r3 = suspend function |
15 | * r0-r3,r9,r10,lr corrupted | ||
16 | */ | 15 | */ |
17 | ENTRY(cpu_suspend) | 16 | ENTRY(__cpu_suspend) |
18 | mov r9, lr | 17 | stmfd sp!, {r4 - r11, lr} |
19 | #ifdef MULTI_CPU | 18 | #ifdef MULTI_CPU |
20 | ldr r10, =processor | 19 | ldr r10, =processor |
21 | mov r2, sp @ current virtual SP | 20 | ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state |
22 | ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state | ||
23 | ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function | 21 | ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function |
24 | sub sp, sp, r0 @ allocate CPU state on stack | 22 | #else |
25 | mov r0, sp @ save pointer | 23 | ldr r5, =cpu_suspend_size |
24 | ldr ip, =cpu_do_resume | ||
25 | #endif | ||
26 | mov r6, sp @ current virtual SP | ||
27 | sub sp, sp, r5 @ allocate CPU state on stack | ||
28 | mov r0, sp @ save pointer to CPU save block | ||
26 | add ip, ip, r1 @ convert resume fn to phys | 29 | add ip, ip, r1 @ convert resume fn to phys |
27 | stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn | 30 | stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn |
28 | ldr r3, =sleep_save_sp | 31 | ldr r5, =sleep_save_sp |
29 | add r2, sp, r1 @ convert SP to phys | 32 | add r6, sp, r1 @ convert SP to phys |
33 | stmfd sp!, {r2, r3} @ save suspend func arg and pointer | ||
30 | #ifdef CONFIG_SMP | 34 | #ifdef CONFIG_SMP |
31 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | 35 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) |
32 | ALT_UP(mov lr, #0) | 36 | ALT_UP(mov lr, #0) |
33 | and lr, lr, #15 | 37 | and lr, lr, #15 |
34 | str r2, [r3, lr, lsl #2] @ save phys SP | 38 | str r6, [r5, lr, lsl #2] @ save phys SP |
35 | #else | 39 | #else |
36 | str r2, [r3] @ save phys SP | 40 | str r6, [r5] @ save phys SP |
37 | #endif | 41 | #endif |
42 | #ifdef MULTI_CPU | ||
38 | mov lr, pc | 43 | mov lr, pc |
39 | ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state | 44 | ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state |
40 | #else | 45 | #else |
41 | mov r2, sp @ current virtual SP | ||
42 | ldr r0, =cpu_suspend_size | ||
43 | sub sp, sp, r0 @ allocate CPU state on stack | ||
44 | mov r0, sp @ save pointer | ||
45 | stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn | ||
46 | ldr r3, =sleep_save_sp | ||
47 | add r2, sp, r1 @ convert SP to phys | ||
48 | #ifdef CONFIG_SMP | ||
49 | ALT_SMP(mrc p15, 0, lr, c0, c0, 5) | ||
50 | ALT_UP(mov lr, #0) | ||
51 | and lr, lr, #15 | ||
52 | str r2, [r3, lr, lsl #2] @ save phys SP | ||
53 | #else | ||
54 | str r2, [r3] @ save phys SP | ||
55 | #endif | ||
56 | bl cpu_do_suspend | 46 | bl cpu_do_suspend |
57 | #endif | 47 | #endif |
58 | 48 | ||
59 | @ flush data cache | 49 | @ flush data cache |
60 | #ifdef MULTI_CACHE | 50 | #ifdef MULTI_CACHE |
61 | ldr r10, =cpu_cache | 51 | ldr r10, =cpu_cache |
62 | mov lr, r9 | 52 | mov lr, pc |
63 | ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] | 53 | ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] |
64 | #else | 54 | #else |
65 | mov lr, r9 | 55 | bl __cpuc_flush_kern_all |
66 | b __cpuc_flush_kern_all | ||
67 | #endif | 56 | #endif |
68 | ENDPROC(cpu_suspend) | 57 | adr lr, BSYM(cpu_suspend_abort) |
58 | ldmfd sp!, {r0, pc} @ call suspend fn | ||
59 | ENDPROC(__cpu_suspend) | ||
69 | .ltorg | 60 | .ltorg |
70 | 61 | ||
62 | cpu_suspend_abort: | ||
63 | ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn | ||
64 | mov sp, r2 | ||
65 | ldmfd sp!, {r4 - r11, pc} | ||
66 | ENDPROC(cpu_suspend_abort) | ||
67 | |||
71 | /* | 68 | /* |
72 | * r0 = control register value | 69 | * r0 = control register value |
73 | * r1 = v:p offset (preserved by cpu_do_resume) | 70 | * r1 = v:p offset (preserved by cpu_do_resume) |
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on) | |||
97 | cpu_resume_after_mmu: | 94 | cpu_resume_after_mmu: |
98 | str r5, [r2, r4, lsl #2] @ restore old mapping | 95 | str r5, [r2, r4, lsl #2] @ restore old mapping |
99 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache | 96 | mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache |
100 | mov pc, lr | 97 | bl cpu_init @ restore the und/abt/irq banked regs |
98 | mov r0, #0 @ return zero on success | ||
99 | ldmfd sp!, {r4 - r11, pc} | ||
101 | ENDPROC(cpu_resume_after_mmu) | 100 | ENDPROC(cpu_resume_after_mmu) |
102 | 101 | ||
103 | /* | 102 | /* |
@@ -120,20 +119,11 @@ ENTRY(cpu_resume) | |||
120 | ldr r0, sleep_save_sp @ stack phys addr | 119 | ldr r0, sleep_save_sp @ stack phys addr |
121 | #endif | 120 | #endif |
122 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off | 121 | setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off |
123 | #ifdef MULTI_CPU | 122 | @ load v:p, stack, resume fn |
124 | @ load v:p, stack, return fn, resume fn | 123 | ARM( ldmia r0!, {r1, sp, pc} ) |
125 | ARM( ldmia r0!, {r1, sp, lr, pc} ) | 124 | THUMB( ldmia r0!, {r1, r2, r3} ) |
126 | THUMB( ldmia r0!, {r1, r2, r3, r4} ) | ||
127 | THUMB( mov sp, r2 ) | 125 | THUMB( mov sp, r2 ) |
128 | THUMB( mov lr, r3 ) | 126 | THUMB( bx r3 ) |
129 | THUMB( bx r4 ) | ||
130 | #else | ||
131 | @ load v:p, stack, return fn | ||
132 | ARM( ldmia r0!, {r1, sp, lr} ) | ||
133 | THUMB( ldmia r0!, {r1, r2, lr} ) | ||
134 | THUMB( mov sp, r2 ) | ||
135 | b cpu_do_resume | ||
136 | #endif | ||
137 | ENDPROC(cpu_resume) | 127 | ENDPROC(cpu_resume) |
138 | 128 | ||
139 | sleep_save_sp: | 129 | sleep_save_sp: |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 344e52b16c8c..167e3cbe1f2f 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -318,9 +318,13 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
318 | smp_store_cpu_info(cpu); | 318 | smp_store_cpu_info(cpu); |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * OK, now it's safe to let the boot CPU continue | 321 | * OK, now it's safe to let the boot CPU continue. Wait for |
322 | * the CPU migration code to notice that the CPU is online | ||
323 | * before we continue. | ||
322 | */ | 324 | */ |
323 | set_cpu_online(cpu, true); | 325 | set_cpu_online(cpu, true); |
326 | while (!cpu_active(cpu)) | ||
327 | cpu_relax(); | ||
324 | 328 | ||
325 | /* | 329 | /* |
326 | * OK, it's off to the idle thread for us | 330 | * OK, it's off to the idle thread for us |
@@ -361,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
361 | */ | 365 | */ |
362 | if (max_cpus > ncores) | 366 | if (max_cpus > ncores) |
363 | max_cpus = ncores; | 367 | max_cpus = ncores; |
364 | 368 | if (ncores > 1 && max_cpus) { | |
365 | if (max_cpus > 1) { | ||
366 | /* | 369 | /* |
367 | * Enable the local timer or broadcast device for the | 370 | * Enable the local timer or broadcast device for the |
368 | * boot CPU, but only if we have more than one CPU. | 371 | * boot CPU, but only if we have more than one CPU. |
@@ -370,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
370 | percpu_timer_setup(); | 373 | percpu_timer_setup(); |
371 | 374 | ||
372 | /* | 375 | /* |
376 | * Initialise the present map, which describes the set of CPUs | ||
377 | * actually populated at the present time. A platform should | ||
378 | * re-initialize the map in platform_smp_prepare_cpus() if | ||
379 | * present != possible (e.g. physical hotplug). | ||
380 | */ | ||
381 | init_cpu_present(&cpu_possible_map); | ||
382 | |||
383 | /* | ||
373 | * Initialise the SCU if there are more than one CPU | 384 | * Initialise the SCU if there are more than one CPU |
374 | * and let them know where to start. | 385 | * and let them know where to start. |
375 | */ | 386 | */ |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index a1e757c3439b..79ed5e7f204a 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #define SCU_INVALIDATE 0x0c | 20 | #define SCU_INVALIDATE 0x0c |
21 | #define SCU_FPGA_REVISION 0x10 | 21 | #define SCU_FPGA_REVISION 0x10 |
22 | 22 | ||
23 | #ifdef CONFIG_SMP | ||
23 | /* | 24 | /* |
24 | * Get the number of CPU cores from the SCU configuration | 25 | * Get the number of CPU cores from the SCU configuration |
25 | */ | 26 | */ |
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base) | |||
50 | */ | 51 | */ |
51 | flush_cache_all(); | 52 | flush_cache_all(); |
52 | } | 53 | } |
54 | #endif | ||
53 | 55 | ||
54 | /* | 56 | /* |
55 | * Set the executing CPUs power mode as defined. This will be in | 57 | * Set the executing CPUs power mode as defined. This will be in |
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 60636f499cb3..2c277d40cee6 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void) | |||
115 | twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); | 115 | twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); |
116 | 116 | ||
117 | printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, | 117 | printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, |
118 | (twd_timer_rate / 1000000) % 100); | 118 | (twd_timer_rate / 10000) % 100); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | 121 | ||
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c index f5cf660eefcc..30e302d33e0a 100644 --- a/arch/arm/kernel/tcm.c +++ b/arch/arm/kernel/tcm.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include "tcm.h" | 19 | #include "tcm.h" |
20 | 20 | ||
21 | static struct gen_pool *tcm_pool; | 21 | static struct gen_pool *tcm_pool; |
22 | static bool dtcm_present; | ||
23 | static bool itcm_present; | ||
22 | 24 | ||
23 | /* TCM section definitions from the linker */ | 25 | /* TCM section definitions from the linker */ |
24 | extern char __itcm_start, __sitcm_text, __eitcm_text; | 26 | extern char __itcm_start, __sitcm_text, __eitcm_text; |
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len) | |||
90 | } | 92 | } |
91 | EXPORT_SYMBOL(tcm_free); | 93 | EXPORT_SYMBOL(tcm_free); |
92 | 94 | ||
95 | bool tcm_dtcm_present(void) | ||
96 | { | ||
97 | return dtcm_present; | ||
98 | } | ||
99 | EXPORT_SYMBOL(tcm_dtcm_present); | ||
100 | |||
101 | bool tcm_itcm_present(void) | ||
102 | { | ||
103 | return itcm_present; | ||
104 | } | ||
105 | EXPORT_SYMBOL(tcm_itcm_present); | ||
106 | |||
93 | static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, | 107 | static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, |
94 | u32 *offset) | 108 | u32 *offset) |
95 | { | 109 | { |
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, | |||
134 | (tcm_region & 1) ? "" : "not "); | 148 | (tcm_region & 1) ? "" : "not "); |
135 | } | 149 | } |
136 | 150 | ||
151 | /* Not much fun you can do with a size 0 bank */ | ||
152 | if (tcm_size == 0) | ||
153 | return 0; | ||
154 | |||
137 | /* Force move the TCM bank to where we want it, enable */ | 155 | /* Force move the TCM bank to where we want it, enable */ |
138 | tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; | 156 | tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; |
139 | 157 | ||
@@ -165,12 +183,20 @@ void __init tcm_init(void) | |||
165 | u32 tcm_status = read_cpuid_tcmstatus(); | 183 | u32 tcm_status = read_cpuid_tcmstatus(); |
166 | u8 dtcm_banks = (tcm_status >> 16) & 0x03; | 184 | u8 dtcm_banks = (tcm_status >> 16) & 0x03; |
167 | u8 itcm_banks = (tcm_status & 0x03); | 185 | u8 itcm_banks = (tcm_status & 0x03); |
186 | size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data; | ||
187 | size_t itcm_code_sz = &__eitcm_text - &__sitcm_text; | ||
168 | char *start; | 188 | char *start; |
169 | char *end; | 189 | char *end; |
170 | char *ram; | 190 | char *ram; |
171 | int ret; | 191 | int ret; |
172 | int i; | 192 | int i; |
173 | 193 | ||
194 | /* Values greater than 2 for D/ITCM banks are "reserved" */ | ||
195 | if (dtcm_banks > 2) | ||
196 | dtcm_banks = 0; | ||
197 | if (itcm_banks > 2) | ||
198 | itcm_banks = 0; | ||
199 | |||
174 | /* Setup DTCM if present */ | 200 | /* Setup DTCM if present */ |
175 | if (dtcm_banks > 0) { | 201 | if (dtcm_banks > 0) { |
176 | for (i = 0; i < dtcm_banks; i++) { | 202 | for (i = 0; i < dtcm_banks; i++) { |
@@ -178,6 +204,13 @@ void __init tcm_init(void) | |||
178 | if (ret) | 204 | if (ret) |
179 | return; | 205 | return; |
180 | } | 206 | } |
207 | /* This means you compiled more code than fits into DTCM */ | ||
208 | if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) { | ||
209 | pr_info("CPU DTCM: %u bytes of code compiled to " | ||
210 | "DTCM but only %lu bytes of DTCM present\n", | ||
211 | dtcm_code_sz, (dtcm_end - DTCM_OFFSET)); | ||
212 | goto no_dtcm; | ||
213 | } | ||
181 | dtcm_res.end = dtcm_end - 1; | 214 | dtcm_res.end = dtcm_end - 1; |
182 | request_resource(&iomem_resource, &dtcm_res); | 215 | request_resource(&iomem_resource, &dtcm_res); |
183 | dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; | 216 | dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; |
@@ -186,12 +219,16 @@ void __init tcm_init(void) | |||
186 | start = &__sdtcm_data; | 219 | start = &__sdtcm_data; |
187 | end = &__edtcm_data; | 220 | end = &__edtcm_data; |
188 | ram = &__dtcm_start; | 221 | ram = &__dtcm_start; |
189 | /* This means you compiled more code than fits into DTCM */ | 222 | memcpy(start, ram, dtcm_code_sz); |
190 | BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); | 223 | pr_debug("CPU DTCM: copied data from %p - %p\n", |
191 | memcpy(start, ram, (end-start)); | 224 | start, end); |
192 | pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); | 225 | dtcm_present = true; |
226 | } else if (dtcm_code_sz) { | ||
227 | pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no " | ||
228 | "DTCM banks present in CPU\n", dtcm_code_sz); | ||
193 | } | 229 | } |
194 | 230 | ||
231 | no_dtcm: | ||
195 | /* Setup ITCM if present */ | 232 | /* Setup ITCM if present */ |
196 | if (itcm_banks > 0) { | 233 | if (itcm_banks > 0) { |
197 | for (i = 0; i < itcm_banks; i++) { | 234 | for (i = 0; i < itcm_banks; i++) { |
@@ -199,6 +236,13 @@ void __init tcm_init(void) | |||
199 | if (ret) | 236 | if (ret) |
200 | return; | 237 | return; |
201 | } | 238 | } |
239 | /* This means you compiled more code than fits into ITCM */ | ||
240 | if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) { | ||
241 | pr_info("CPU ITCM: %u bytes of code compiled to " | ||
242 | "ITCM but only %lu bytes of ITCM present\n", | ||
243 | itcm_code_sz, (itcm_end - ITCM_OFFSET)); | ||
244 | return; | ||
245 | } | ||
202 | itcm_res.end = itcm_end - 1; | 246 | itcm_res.end = itcm_end - 1; |
203 | request_resource(&iomem_resource, &itcm_res); | 247 | request_resource(&iomem_resource, &itcm_res); |
204 | itcm_iomap[0].length = itcm_end - ITCM_OFFSET; | 248 | itcm_iomap[0].length = itcm_end - ITCM_OFFSET; |
@@ -207,10 +251,13 @@ void __init tcm_init(void) | |||
207 | start = &__sitcm_text; | 251 | start = &__sitcm_text; |
208 | end = &__eitcm_text; | 252 | end = &__eitcm_text; |
209 | ram = &__itcm_start; | 253 | ram = &__itcm_start; |
210 | /* This means you compiled more code than fits into ITCM */ | 254 | memcpy(start, ram, itcm_code_sz); |
211 | BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); | 255 | pr_debug("CPU ITCM: copied code from %p - %p\n", |
212 | memcpy(start, ram, (end-start)); | 256 | start, end); |
213 | pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); | 257 | itcm_present = true; |
258 | } else if (itcm_code_sz) { | ||
259 | pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no " | ||
260 | "ITCM banks present in CPU\n", itcm_code_sz); | ||
214 | } | 261 | } |
215 | } | 262 | } |
216 | 263 | ||
@@ -221,7 +268,6 @@ void __init tcm_init(void) | |||
221 | */ | 268 | */ |
222 | static int __init setup_tcm_pool(void) | 269 | static int __init setup_tcm_pool(void) |
223 | { | 270 | { |
224 | u32 tcm_status = read_cpuid_tcmstatus(); | ||
225 | u32 dtcm_pool_start = (u32) &__edtcm_data; | 271 | u32 dtcm_pool_start = (u32) &__edtcm_data; |
226 | u32 itcm_pool_start = (u32) &__eitcm_text; | 272 | u32 itcm_pool_start = (u32) &__eitcm_text; |
227 | int ret; | 273 | int ret; |
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void) | |||
236 | pr_debug("Setting up TCM memory pool\n"); | 282 | pr_debug("Setting up TCM memory pool\n"); |
237 | 283 | ||
238 | /* Add the rest of DTCM to the TCM pool */ | 284 | /* Add the rest of DTCM to the TCM pool */ |
239 | if (tcm_status & (0x03 << 16)) { | 285 | if (dtcm_present) { |
240 | if (dtcm_pool_start < dtcm_end) { | 286 | if (dtcm_pool_start < dtcm_end) { |
241 | ret = gen_pool_add(tcm_pool, dtcm_pool_start, | 287 | ret = gen_pool_add(tcm_pool, dtcm_pool_start, |
242 | dtcm_end - dtcm_pool_start, -1); | 288 | dtcm_end - dtcm_pool_start, -1); |
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void) | |||
253 | } | 299 | } |
254 | 300 | ||
255 | /* Add the rest of ITCM to the TCM pool */ | 301 | /* Add the rest of ITCM to the TCM pool */ |
256 | if (tcm_status & 0x03) { | 302 | if (itcm_present) { |
257 | if (itcm_pool_start < itcm_end) { | 303 | if (itcm_pool_start < itcm_end) { |
258 | ret = gen_pool_add(tcm_pool, itcm_pool_start, | 304 | ret = gen_pool_add(tcm_pool, itcm_pool_start, |
259 | itcm_end - itcm_pool_start, -1); | 305 | itcm_end - itcm_pool_start, -1); |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index e5287f21badc..bf977f8514f6 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4; | |||
38 | 38 | ||
39 | SECTIONS | 39 | SECTIONS |
40 | { | 40 | { |
41 | #ifdef CONFIG_XIP_KERNEL | ||
42 | . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); | ||
43 | #else | ||
44 | . = PAGE_OFFSET + TEXT_OFFSET; | ||
45 | #endif | ||
46 | |||
47 | .init : { /* Init code and data */ | ||
48 | _stext = .; | ||
49 | _sinittext = .; | ||
50 | HEAD_TEXT | ||
51 | INIT_TEXT | ||
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
53 | _einittext = .; | ||
54 | ARM_CPU_DISCARD(PROC_INFO) | ||
55 | __arch_info_begin = .; | ||
56 | *(.arch.info.init) | ||
57 | __arch_info_end = .; | ||
58 | __tagtable_begin = .; | ||
59 | *(.taglist.init) | ||
60 | __tagtable_end = .; | ||
61 | #ifdef CONFIG_SMP_ON_UP | ||
62 | __smpalt_begin = .; | ||
63 | *(.alt.smp.init) | ||
64 | __smpalt_end = .; | ||
65 | #endif | ||
66 | |||
67 | __pv_table_begin = .; | ||
68 | *(.pv_table) | ||
69 | __pv_table_end = .; | ||
70 | |||
71 | INIT_SETUP(16) | ||
72 | |||
73 | INIT_CALLS | ||
74 | CON_INITCALL | ||
75 | SECURITY_INITCALL | ||
76 | INIT_RAM_FS | ||
77 | |||
78 | #ifndef CONFIG_XIP_KERNEL | ||
79 | __init_begin = _stext; | ||
80 | INIT_DATA | ||
81 | ARM_EXIT_KEEP(EXIT_DATA) | ||
82 | #endif | ||
83 | } | ||
84 | |||
85 | PERCPU_SECTION(32) | ||
86 | |||
87 | #ifndef CONFIG_XIP_KERNEL | ||
88 | . = ALIGN(PAGE_SIZE); | ||
89 | __init_end = .; | ||
90 | #endif | ||
91 | |||
92 | /* | 41 | /* |
93 | * unwind exit sections must be discarded before the rest of the | 42 | * unwind exit sections must be discarded before the rest of the |
94 | * unwind sections get included. | 43 | * unwind sections get included. |
@@ -106,10 +55,22 @@ SECTIONS | |||
106 | *(.fixup) | 55 | *(.fixup) |
107 | *(__ex_table) | 56 | *(__ex_table) |
108 | #endif | 57 | #endif |
58 | #ifndef CONFIG_SMP_ON_UP | ||
59 | *(.alt.smp.init) | ||
60 | #endif | ||
109 | } | 61 | } |
110 | 62 | ||
63 | #ifdef CONFIG_XIP_KERNEL | ||
64 | . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR); | ||
65 | #else | ||
66 | . = PAGE_OFFSET + TEXT_OFFSET; | ||
67 | #endif | ||
68 | .head.text : { | ||
69 | _text = .; | ||
70 | HEAD_TEXT | ||
71 | } | ||
111 | .text : { /* Real text segment */ | 72 | .text : { /* Real text segment */ |
112 | _text = .; /* Text and read-only data */ | 73 | _stext = .; /* Text and read-only data */ |
113 | __exception_text_start = .; | 74 | __exception_text_start = .; |
114 | *(.exception.text) | 75 | *(.exception.text) |
115 | __exception_text_end = .; | 76 | __exception_text_end = .; |
@@ -122,8 +83,6 @@ SECTIONS | |||
122 | *(.fixup) | 83 | *(.fixup) |
123 | #endif | 84 | #endif |
124 | *(.gnu.warning) | 85 | *(.gnu.warning) |
125 | *(.rodata) | ||
126 | *(.rodata.*) | ||
127 | *(.glue_7) | 86 | *(.glue_7) |
128 | *(.glue_7t) | 87 | *(.glue_7t) |
129 | . = ALIGN(4); | 88 | . = ALIGN(4); |
@@ -152,10 +111,63 @@ SECTIONS | |||
152 | 111 | ||
153 | _etext = .; /* End of text and rodata section */ | 112 | _etext = .; /* End of text and rodata section */ |
154 | 113 | ||
114 | #ifndef CONFIG_XIP_KERNEL | ||
115 | . = ALIGN(PAGE_SIZE); | ||
116 | __init_begin = .; | ||
117 | #endif | ||
118 | |||
119 | INIT_TEXT_SECTION(8) | ||
120 | .exit.text : { | ||
121 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
122 | } | ||
123 | .init.proc.info : { | ||
124 | ARM_CPU_DISCARD(PROC_INFO) | ||
125 | } | ||
126 | .init.arch.info : { | ||
127 | __arch_info_begin = .; | ||
128 | *(.arch.info.init) | ||
129 | __arch_info_end = .; | ||
130 | } | ||
131 | .init.tagtable : { | ||
132 | __tagtable_begin = .; | ||
133 | *(.taglist.init) | ||
134 | __tagtable_end = .; | ||
135 | } | ||
136 | #ifdef CONFIG_SMP_ON_UP | ||
137 | .init.smpalt : { | ||
138 | __smpalt_begin = .; | ||
139 | *(.alt.smp.init) | ||
140 | __smpalt_end = .; | ||
141 | } | ||
142 | #endif | ||
143 | .init.pv_table : { | ||
144 | __pv_table_begin = .; | ||
145 | *(.pv_table) | ||
146 | __pv_table_end = .; | ||
147 | } | ||
148 | .init.data : { | ||
149 | #ifndef CONFIG_XIP_KERNEL | ||
150 | INIT_DATA | ||
151 | #endif | ||
152 | INIT_SETUP(16) | ||
153 | INIT_CALLS | ||
154 | CON_INITCALL | ||
155 | SECURITY_INITCALL | ||
156 | INIT_RAM_FS | ||
157 | } | ||
158 | #ifndef CONFIG_XIP_KERNEL | ||
159 | .exit.data : { | ||
160 | ARM_EXIT_KEEP(EXIT_DATA) | ||
161 | } | ||
162 | #endif | ||
163 | |||
164 | PERCPU_SECTION(32) | ||
165 | |||
155 | #ifdef CONFIG_XIP_KERNEL | 166 | #ifdef CONFIG_XIP_KERNEL |
156 | __data_loc = ALIGN(4); /* location in binary */ | 167 | __data_loc = ALIGN(4); /* location in binary */ |
157 | . = PAGE_OFFSET + TEXT_OFFSET; | 168 | . = PAGE_OFFSET + TEXT_OFFSET; |
158 | #else | 169 | #else |
170 | __init_end = .; | ||
159 | . = ALIGN(THREAD_SIZE); | 171 | . = ALIGN(THREAD_SIZE); |
160 | __data_loc = .; | 172 | __data_loc = .; |
161 | #endif | 173 | #endif |
@@ -270,12 +282,6 @@ SECTIONS | |||
270 | 282 | ||
271 | /* Default discards */ | 283 | /* Default discards */ |
272 | DISCARDS | 284 | DISCARDS |
273 | |||
274 | #ifndef CONFIG_SMP_ON_UP | ||
275 | /DISCARD/ : { | ||
276 | *(.alt.smp.init) | ||
277 | } | ||
278 | #endif | ||
279 | } | 285 | } |
280 | 286 | ||
281 | /* | 287 | /* |
diff --git a/arch/arm/mach-at91/at91cap9.c b/arch/arm/mach-at91/at91cap9.c index 17fae4a42ab5..f1013d08bb57 100644 --- a/arch/arm/mach-at91/at91cap9.c +++ b/arch/arm/mach-at91/at91cap9.c | |||
@@ -223,15 +223,15 @@ static struct clk *periph_clocks[] __initdata = { | |||
223 | }; | 223 | }; |
224 | 224 | ||
225 | static struct clk_lookup periph_clocks_lookups[] = { | 225 | static struct clk_lookup periph_clocks_lookups[] = { |
226 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), | 226 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), |
227 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), | 227 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), |
228 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), | 228 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), |
229 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), | 229 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), |
230 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), | 230 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), |
231 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), | 231 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), |
232 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk), | 232 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb_clk), |
233 | CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), | 233 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), |
234 | CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), | 234 | CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), |
235 | }; | 235 | }; |
236 | 236 | ||
237 | static struct clk_lookup usart_clocks_lookups[] = { | 237 | static struct clk_lookup usart_clocks_lookups[] = { |
diff --git a/arch/arm/mach-at91/at91cap9_devices.c b/arch/arm/mach-at91/at91cap9_devices.c index cd850ed6f335..dba0d8d8a4bd 100644 --- a/arch/arm/mach-at91/at91cap9_devices.c +++ b/arch/arm/mach-at91/at91cap9_devices.c | |||
@@ -1220,7 +1220,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1220 | { | 1220 | { |
1221 | if (portnr < ATMEL_MAX_UART) { | 1221 | if (portnr < ATMEL_MAX_UART) { |
1222 | atmel_default_console_device = at91_uarts[portnr]; | 1222 | atmel_default_console_device = at91_uarts[portnr]; |
1223 | at91cap9_set_console_clock(portnr); | 1223 | at91cap9_set_console_clock(at91_uarts[portnr]->id); |
1224 | } | 1224 | } |
1225 | } | 1225 | } |
1226 | 1226 | ||
diff --git a/arch/arm/mach-at91/at91rm9200.c b/arch/arm/mach-at91/at91rm9200.c index b228ce9e21a1..83a1a3fee554 100644 --- a/arch/arm/mach-at91/at91rm9200.c +++ b/arch/arm/mach-at91/at91rm9200.c | |||
@@ -199,9 +199,9 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
199 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), | 199 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk), |
200 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), | 200 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk), |
201 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), | 201 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk), |
202 | CLKDEV_CON_DEV_ID("ssc", "ssc.0", &ssc0_clk), | 202 | CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc0_clk), |
203 | CLKDEV_CON_DEV_ID("ssc", "ssc.1", &ssc1_clk), | 203 | CLKDEV_CON_DEV_ID("pclk", "ssc.1", &ssc1_clk), |
204 | CLKDEV_CON_DEV_ID("ssc", "ssc.2", &ssc2_clk), | 204 | CLKDEV_CON_DEV_ID("pclk", "ssc.2", &ssc2_clk), |
205 | }; | 205 | }; |
206 | 206 | ||
207 | static struct clk_lookup usart_clocks_lookups[] = { | 207 | static struct clk_lookup usart_clocks_lookups[] = { |
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index a0ba475be04c..7227755ffec6 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c | |||
@@ -1135,7 +1135,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1135 | { | 1135 | { |
1136 | if (portnr < ATMEL_MAX_UART) { | 1136 | if (portnr < ATMEL_MAX_UART) { |
1137 | atmel_default_console_device = at91_uarts[portnr]; | 1137 | atmel_default_console_device = at91_uarts[portnr]; |
1138 | at91rm9200_set_console_clock(portnr); | 1138 | at91rm9200_set_console_clock(at91_uarts[portnr]->id); |
1139 | } | 1139 | } |
1140 | } | 1140 | } |
1141 | 1141 | ||
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 1fdeb9058a76..39f81f47b4ba 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c | |||
@@ -1173,7 +1173,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1173 | { | 1173 | { |
1174 | if (portnr < ATMEL_MAX_UART) { | 1174 | if (portnr < ATMEL_MAX_UART) { |
1175 | atmel_default_console_device = at91_uarts[portnr]; | 1175 | atmel_default_console_device = at91_uarts[portnr]; |
1176 | at91sam9260_set_console_clock(portnr); | 1176 | at91sam9260_set_console_clock(at91_uarts[portnr]->id); |
1177 | } | 1177 | } |
1178 | } | 1178 | } |
1179 | 1179 | ||
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 3eb4538fceeb..5004bf0a05f2 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c | |||
@@ -1013,7 +1013,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1013 | { | 1013 | { |
1014 | if (portnr < ATMEL_MAX_UART) { | 1014 | if (portnr < ATMEL_MAX_UART) { |
1015 | atmel_default_console_device = at91_uarts[portnr]; | 1015 | atmel_default_console_device = at91_uarts[portnr]; |
1016 | at91sam9261_set_console_clock(portnr); | 1016 | at91sam9261_set_console_clock(at91_uarts[portnr]->id); |
1017 | } | 1017 | } |
1018 | } | 1018 | } |
1019 | 1019 | ||
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index ffe081b77ed0..a050f41fc860 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c | |||
@@ -1395,7 +1395,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1395 | { | 1395 | { |
1396 | if (portnr < ATMEL_MAX_UART) { | 1396 | if (portnr < ATMEL_MAX_UART) { |
1397 | atmel_default_console_device = at91_uarts[portnr]; | 1397 | atmel_default_console_device = at91_uarts[portnr]; |
1398 | at91sam9263_set_console_clock(portnr); | 1398 | at91sam9263_set_console_clock(at91_uarts[portnr]->id); |
1399 | } | 1399 | } |
1400 | } | 1400 | } |
1401 | 1401 | ||
diff --git a/arch/arm/mach-at91/at91sam9g45.c b/arch/arm/mach-at91/at91sam9g45.c index 2bb6ff9af1c7..11e214121b23 100644 --- a/arch/arm/mach-at91/at91sam9g45.c +++ b/arch/arm/mach-at91/at91sam9g45.c | |||
@@ -217,11 +217,11 @@ static struct clk *periph_clocks[] __initdata = { | |||
217 | static struct clk_lookup periph_clocks_lookups[] = { | 217 | static struct clk_lookup periph_clocks_lookups[] = { |
218 | /* One additional fake clock for ohci */ | 218 | /* One additional fake clock for ohci */ |
219 | CLKDEV_CON_ID("ohci_clk", &uhphs_clk), | 219 | CLKDEV_CON_ID("ohci_clk", &uhphs_clk), |
220 | CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci.0", &uhphs_clk), | 220 | CLKDEV_CON_DEV_ID("ehci_clk", "atmel-ehci", &uhphs_clk), |
221 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), | 221 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), |
222 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), | 222 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), |
223 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.0", &mmc0_clk), | 223 | CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.0", &mmc0_clk), |
224 | CLKDEV_CON_DEV_ID("mci_clk", "at91_mci.1", &mmc1_clk), | 224 | CLKDEV_CON_DEV_ID("mci_clk", "atmel_mci.1", &mmc1_clk), |
225 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), | 225 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.0", &spi0_clk), |
226 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), | 226 | CLKDEV_CON_DEV_ID("spi_clk", "atmel_spi.1", &spi1_clk), |
227 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk), | 227 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tcb0_clk), |
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c index 05674865bc21..600bffb01edb 100644 --- a/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/arch/arm/mach-at91/at91sam9g45_devices.c | |||
@@ -1550,7 +1550,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1550 | { | 1550 | { |
1551 | if (portnr < ATMEL_MAX_UART) { | 1551 | if (portnr < ATMEL_MAX_UART) { |
1552 | atmel_default_console_device = at91_uarts[portnr]; | 1552 | atmel_default_console_device = at91_uarts[portnr]; |
1553 | at91sam9g45_set_console_clock(portnr); | 1553 | at91sam9g45_set_console_clock(at91_uarts[portnr]->id); |
1554 | } | 1554 | } |
1555 | } | 1555 | } |
1556 | 1556 | ||
diff --git a/arch/arm/mach-at91/at91sam9rl.c b/arch/arm/mach-at91/at91sam9rl.c index 1a40f16b66c8..29dff18ed130 100644 --- a/arch/arm/mach-at91/at91sam9rl.c +++ b/arch/arm/mach-at91/at91sam9rl.c | |||
@@ -191,8 +191,8 @@ static struct clk *periph_clocks[] __initdata = { | |||
191 | }; | 191 | }; |
192 | 192 | ||
193 | static struct clk_lookup periph_clocks_lookups[] = { | 193 | static struct clk_lookup periph_clocks_lookups[] = { |
194 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc.0", &utmi_clk), | 194 | CLKDEV_CON_DEV_ID("hclk", "atmel_usba_udc", &utmi_clk), |
195 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc.0", &udphs_clk), | 195 | CLKDEV_CON_DEV_ID("pclk", "atmel_usba_udc", &udphs_clk), |
196 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), | 196 | CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk), |
197 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), | 197 | CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk), |
198 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), | 198 | CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk), |
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index c296045f2b6a..aacb19dc9225 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c | |||
@@ -1168,7 +1168,7 @@ void __init at91_set_serial_console(unsigned portnr) | |||
1168 | { | 1168 | { |
1169 | if (portnr < ATMEL_MAX_UART) { | 1169 | if (portnr < ATMEL_MAX_UART) { |
1170 | atmel_default_console_device = at91_uarts[portnr]; | 1170 | atmel_default_console_device = at91_uarts[portnr]; |
1171 | at91sam9rl_set_console_clock(portnr); | 1171 | at91sam9rl_set_console_clock(at91_uarts[portnr]->id); |
1172 | } | 1172 | } |
1173 | } | 1173 | } |
1174 | 1174 | ||
diff --git a/arch/arm/mach-at91/board-cap9adk.c b/arch/arm/mach-at91/board-cap9adk.c index 1904fdf87613..cdb65d483250 100644 --- a/arch/arm/mach-at91/board-cap9adk.c +++ b/arch/arm/mach-at91/board-cap9adk.c | |||
@@ -215,7 +215,7 @@ static void __init cap9adk_add_device_nand(void) | |||
215 | csa = at91_sys_read(AT91_MATRIX_EBICSA); | 215 | csa = at91_sys_read(AT91_MATRIX_EBICSA); |
216 | at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V); | 216 | at91_sys_write(AT91_MATRIX_EBICSA, csa | AT91_MATRIX_EBI_VDDIOMSEL_3_3V); |
217 | 217 | ||
218 | cap9adk_nand_data.bus_width_16 = !board_have_nand_8bit(); | 218 | cap9adk_nand_data.bus_width_16 = board_have_nand_16bit(); |
219 | /* setup bus-width (8 or 16) */ | 219 | /* setup bus-width (8 or 16) */ |
220 | if (cap9adk_nand_data.bus_width_16) | 220 | if (cap9adk_nand_data.bus_width_16) |
221 | cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16; | 221 | cap9adk_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/board-sam9260ek.c b/arch/arm/mach-at91/board-sam9260ek.c index d600dc123227..5c240743c5b7 100644 --- a/arch/arm/mach-at91/board-sam9260ek.c +++ b/arch/arm/mach-at91/board-sam9260ek.c | |||
@@ -214,7 +214,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { | |||
214 | 214 | ||
215 | static void __init ek_add_device_nand(void) | 215 | static void __init ek_add_device_nand(void) |
216 | { | 216 | { |
217 | ek_nand_data.bus_width_16 = !board_have_nand_8bit(); | 217 | ek_nand_data.bus_width_16 = board_have_nand_16bit(); |
218 | /* setup bus-width (8 or 16) */ | 218 | /* setup bus-width (8 or 16) */ |
219 | if (ek_nand_data.bus_width_16) | 219 | if (ek_nand_data.bus_width_16) |
220 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; | 220 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/board-sam9261ek.c b/arch/arm/mach-at91/board-sam9261ek.c index f897f84d43dc..b60c22b6e241 100644 --- a/arch/arm/mach-at91/board-sam9261ek.c +++ b/arch/arm/mach-at91/board-sam9261ek.c | |||
@@ -220,7 +220,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { | |||
220 | 220 | ||
221 | static void __init ek_add_device_nand(void) | 221 | static void __init ek_add_device_nand(void) |
222 | { | 222 | { |
223 | ek_nand_data.bus_width_16 = !board_have_nand_8bit(); | 223 | ek_nand_data.bus_width_16 = board_have_nand_16bit(); |
224 | /* setup bus-width (8 or 16) */ | 224 | /* setup bus-width (8 or 16) */ |
225 | if (ek_nand_data.bus_width_16) | 225 | if (ek_nand_data.bus_width_16) |
226 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; | 226 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/board-sam9263ek.c b/arch/arm/mach-at91/board-sam9263ek.c index 605b26f40a4c..9bbdc92ea194 100644 --- a/arch/arm/mach-at91/board-sam9263ek.c +++ b/arch/arm/mach-at91/board-sam9263ek.c | |||
@@ -221,7 +221,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { | |||
221 | 221 | ||
222 | static void __init ek_add_device_nand(void) | 222 | static void __init ek_add_device_nand(void) |
223 | { | 223 | { |
224 | ek_nand_data.bus_width_16 = !board_have_nand_8bit(); | 224 | ek_nand_data.bus_width_16 = board_have_nand_16bit(); |
225 | /* setup bus-width (8 or 16) */ | 225 | /* setup bus-width (8 or 16) */ |
226 | if (ek_nand_data.bus_width_16) | 226 | if (ek_nand_data.bus_width_16) |
227 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; | 227 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/board-sam9g20ek.c b/arch/arm/mach-at91/board-sam9g20ek.c index 7624cf0d006b..1325a50101a8 100644 --- a/arch/arm/mach-at91/board-sam9g20ek.c +++ b/arch/arm/mach-at91/board-sam9g20ek.c | |||
@@ -198,7 +198,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { | |||
198 | 198 | ||
199 | static void __init ek_add_device_nand(void) | 199 | static void __init ek_add_device_nand(void) |
200 | { | 200 | { |
201 | ek_nand_data.bus_width_16 = !board_have_nand_8bit(); | 201 | ek_nand_data.bus_width_16 = board_have_nand_16bit(); |
202 | /* setup bus-width (8 or 16) */ | 202 | /* setup bus-width (8 or 16) */ |
203 | if (ek_nand_data.bus_width_16) | 203 | if (ek_nand_data.bus_width_16) |
204 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; | 204 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/board-sam9m10g45ek.c b/arch/arm/mach-at91/board-sam9m10g45ek.c index 063c95d0e8f0..33eaa135f248 100644 --- a/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/arch/arm/mach-at91/board-sam9m10g45ek.c | |||
@@ -178,7 +178,7 @@ static struct sam9_smc_config __initdata ek_nand_smc_config = { | |||
178 | 178 | ||
179 | static void __init ek_add_device_nand(void) | 179 | static void __init ek_add_device_nand(void) |
180 | { | 180 | { |
181 | ek_nand_data.bus_width_16 = !board_have_nand_8bit(); | 181 | ek_nand_data.bus_width_16 = board_have_nand_16bit(); |
182 | /* setup bus-width (8 or 16) */ | 182 | /* setup bus-width (8 or 16) */ |
183 | if (ek_nand_data.bus_width_16) | 183 | if (ek_nand_data.bus_width_16) |
184 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; | 184 | ek_nand_smc_config.mode |= AT91_SMC_DBW_16; |
diff --git a/arch/arm/mach-at91/include/mach/system_rev.h b/arch/arm/mach-at91/include/mach/system_rev.h index b855ee75f72c..8f4866045b41 100644 --- a/arch/arm/mach-at91/include/mach/system_rev.h +++ b/arch/arm/mach-at91/include/mach/system_rev.h | |||
@@ -13,13 +13,13 @@ | |||
13 | * the 16-31 bit are reserved for at91 generic information | 13 | * the 16-31 bit are reserved for at91 generic information |
14 | * | 14 | * |
15 | * bit 31: | 15 | * bit 31: |
16 | * 0 => nand 16 bit | 16 | * 0 => nand 8 bit |
17 | * 1 => nand 8 bit | 17 | * 1 => nand 16 bit |
18 | */ | 18 | */ |
19 | #define BOARD_HAVE_NAND_8BIT (1 << 31) | 19 | #define BOARD_HAVE_NAND_16BIT (1 << 31) |
20 | static int inline board_have_nand_8bit(void) | 20 | static inline int board_have_nand_16bit(void) |
21 | { | 21 | { |
22 | return system_rev & BOARD_HAVE_NAND_8BIT; | 22 | return system_rev & BOARD_HAVE_NAND_16BIT; |
23 | } | 23 | } |
24 | 24 | ||
25 | #endif /* __ARCH_SYSTEM_REV_H__ */ | 25 | #endif /* __ARCH_SYSTEM_REV_H__ */ |
diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S index 7d393ca010ac..94c950d783ba 100644 --- a/arch/arm/mach-bcmring/include/mach/entry-macro.S +++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S | |||
@@ -80,7 +80,3 @@ | |||
80 | 80 | ||
81 | .macro arch_ret_to_user, tmp1, tmp2 | 81 | .macro arch_ret_to_user, tmp1, tmp2 |
82 | .endm | 82 | .endm |
83 | |||
84 | .macro irq_prio_table | ||
85 | .endm | ||
86 | |||
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c index c67f684ee3e5..09a87e61ffcf 100644 --- a/arch/arm/mach-davinci/board-dm365-evm.c +++ b/arch/arm/mach-davinci/board-dm365-evm.c | |||
@@ -520,7 +520,7 @@ fail: | |||
520 | */ | 520 | */ |
521 | if (have_imager()) { | 521 | if (have_imager()) { |
522 | label = "HD imager"; | 522 | label = "HD imager"; |
523 | mux |= 1; | 523 | mux |= 2; |
524 | 524 | ||
525 | /* externally mux MMC1/ENET/AIC33 to imager */ | 525 | /* externally mux MMC1/ENET/AIC33 to imager */ |
526 | mux |= BIT(6) | BIT(5) | BIT(3); | 526 | mux |= BIT(6) | BIT(5) | BIT(3); |
@@ -540,7 +540,7 @@ fail: | |||
540 | resets &= ~BIT(1); | 540 | resets &= ~BIT(1); |
541 | 541 | ||
542 | if (have_tvp7002()) { | 542 | if (have_tvp7002()) { |
543 | mux |= 2; | 543 | mux |= 1; |
544 | resets &= ~BIT(2); | 544 | resets &= ~BIT(2); |
545 | label = "tvp7002 HD"; | 545 | label = "tvp7002 HD"; |
546 | } else { | 546 | } else { |
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c index e7221398e5af..cafbe13a82a5 100644 --- a/arch/arm/mach-davinci/gpio.c +++ b/arch/arm/mach-davinci/gpio.c | |||
@@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
254 | { | 254 | { |
255 | struct davinci_gpio_regs __iomem *g; | 255 | struct davinci_gpio_regs __iomem *g; |
256 | u32 mask = 0xffff; | 256 | u32 mask = 0xffff; |
257 | struct davinci_gpio_controller *d; | ||
257 | 258 | ||
258 | g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); | 259 | d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc); |
260 | g = (struct davinci_gpio_regs __iomem *)d->regs; | ||
259 | 261 | ||
260 | /* we only care about one bank */ | 262 | /* we only care about one bank */ |
261 | if (irq & 1) | 263 | if (irq & 1) |
@@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc) | |||
274 | if (!status) | 276 | if (!status) |
275 | break; | 277 | break; |
276 | __raw_writel(status, &g->intstat); | 278 | __raw_writel(status, &g->intstat); |
277 | if (irq & 1) | ||
278 | status >>= 16; | ||
279 | 279 | ||
280 | /* now demux them to the right lowlevel handler */ | 280 | /* now demux them to the right lowlevel handler */ |
281 | n = (int)irq_get_handler_data(irq); | 281 | n = d->irq_base; |
282 | if (irq & 1) { | ||
283 | n += 16; | ||
284 | status >>= 16; | ||
285 | } | ||
286 | |||
282 | while (status) { | 287 | while (status) { |
283 | res = ffs(status); | 288 | res = ffs(status); |
284 | n += res; | 289 | n += res; |
@@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void) | |||
424 | 429 | ||
425 | /* set up all irqs in this bank */ | 430 | /* set up all irqs in this bank */ |
426 | irq_set_chained_handler(bank_irq, gpio_irq_handler); | 431 | irq_set_chained_handler(bank_irq, gpio_irq_handler); |
427 | irq_set_handler_data(bank_irq, (__force void *)g); | 432 | |
433 | /* | ||
434 | * Each chip handles 32 gpios, and each irq bank consists of 16 | ||
435 | * gpio irqs. Pass the irq bank's corresponding controller to | ||
436 | * the chained irq handler. | ||
437 | */ | ||
438 | irq_set_handler_data(bank_irq, &chips[gpio / 32]); | ||
428 | 439 | ||
429 | for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { | 440 | for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { |
430 | irq_set_chip(irq, &gpio_irqchip); | 441 | irq_set_chip(irq, &gpio_irqchip); |
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S index fbdebc7cb409..e14c0dc0e12c 100644 --- a/arch/arm/mach-davinci/include/mach/entry-macro.S +++ b/arch/arm/mach-davinci/include/mach/entry-macro.S | |||
@@ -46,6 +46,3 @@ | |||
46 | #endif | 46 | #endif |
47 | 1002: | 47 | 1002: |
48 | .endm | 48 | .endm |
49 | |||
50 | .macro irq_prio_table | ||
51 | .endm | ||
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c index bfe68ec4e1a6..952dc126c390 100644 --- a/arch/arm/mach-davinci/irq.c +++ b/arch/arm/mach-davinci/irq.c | |||
@@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num) | |||
52 | struct irq_chip_type *ct; | 52 | struct irq_chip_type *ct; |
53 | 53 | ||
54 | gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); | 54 | gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); |
55 | if (!gc) { | ||
56 | pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", | ||
57 | __func__, irq_start); | ||
58 | return; | ||
59 | } | ||
60 | |||
55 | ct = gc->chip_types; | 61 | ct = gc->chip_types; |
56 | ct->chip.irq_ack = irq_gc_ack; | 62 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
57 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 63 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
58 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 64 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
59 | 65 | ||
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c index 1d4b65fd673e..6659a0d137a3 100644 --- a/arch/arm/mach-ep93xx/core.c +++ b/arch/arm/mach-ep93xx/core.c | |||
@@ -251,9 +251,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev, | |||
251 | unsigned int mcr; | 251 | unsigned int mcr; |
252 | 252 | ||
253 | mcr = 0; | 253 | mcr = 0; |
254 | if (!(mctrl & TIOCM_RTS)) | 254 | if (mctrl & TIOCM_RTS) |
255 | mcr |= 2; | 255 | mcr |= 2; |
256 | if (!(mctrl & TIOCM_DTR)) | 256 | if (mctrl & TIOCM_DTR) |
257 | mcr |= 1; | 257 | mcr |= 1; |
258 | 258 | ||
259 | __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); | 259 | __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); |
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c index 9babe4473e88..bfd621460abf 100644 --- a/arch/arm/mach-exynos4/cpu.c +++ b/arch/arm/mach-exynos4/cpu.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <plat/sdhci.h> | 23 | #include <plat/sdhci.h> |
24 | #include <plat/devs.h> | 24 | #include <plat/devs.h> |
25 | #include <plat/fimc-core.h> | 25 | #include <plat/fimc-core.h> |
26 | #include <plat/iic-core.h> | ||
26 | 27 | ||
27 | #include <mach/regs-irq.h> | 28 | #include <mach/regs-irq.h> |
28 | 29 | ||
@@ -132,6 +133,11 @@ void __init exynos4_map_io(void) | |||
132 | s3c_fimc_setname(1, "exynos4-fimc"); | 133 | s3c_fimc_setname(1, "exynos4-fimc"); |
133 | s3c_fimc_setname(2, "exynos4-fimc"); | 134 | s3c_fimc_setname(2, "exynos4-fimc"); |
134 | s3c_fimc_setname(3, "exynos4-fimc"); | 135 | s3c_fimc_setname(3, "exynos4-fimc"); |
136 | |||
137 | /* The I2C bus controllers are directly compatible with s3c2440 */ | ||
138 | s3c_i2c0_setname("s3c2440-i2c"); | ||
139 | s3c_i2c1_setname("s3c2440-i2c"); | ||
140 | s3c_i2c2_setname("s3c2440-i2c"); | ||
135 | } | 141 | } |
136 | 142 | ||
137 | void __init exynos4_init_clocks(int xtal) | 143 | void __init exynos4_init_clocks(int xtal) |
diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c index 1eed5f9f7bd3..983069a53239 100644 --- a/arch/arm/mach-exynos4/dev-audio.c +++ b/arch/arm/mach-exynos4/dev-audio.c | |||
@@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = { | |||
330 | 330 | ||
331 | static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) | 331 | static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) |
332 | { | 332 | { |
333 | s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3)); | 333 | s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4)); |
334 | 334 | ||
335 | return 0; | 335 | return 0; |
336 | } | 336 | } |
diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S index 6c6cfc50c46b..3cdeb3647542 100644 --- a/arch/arm/mach-exynos4/headsmp.S +++ b/arch/arm/mach-exynos4/headsmp.S | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | 15 | ||
16 | __INIT | 16 | __CPUINIT |
17 | 17 | ||
18 | /* | 18 | /* |
19 | * exynos4 specific entry point for secondary CPUs. This provides | 19 | * exynos4 specific entry point for secondary CPUs. This provides |
diff --git a/arch/arm/mach-exynos4/init.c b/arch/arm/mach-exynos4/init.c index cf91f50e43ab..a8a83e3881a4 100644 --- a/arch/arm/mach-exynos4/init.c +++ b/arch/arm/mach-exynos4/init.c | |||
@@ -35,6 +35,7 @@ void __init exynos4_common_init_uarts(struct s3c2410_uartcfg *cfg, int no) | |||
35 | tcfg->clocks = exynos4_serial_clocks; | 35 | tcfg->clocks = exynos4_serial_clocks; |
36 | tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); | 36 | tcfg->clocks_size = ARRAY_SIZE(exynos4_serial_clocks); |
37 | } | 37 | } |
38 | tcfg->flags |= NO_NEED_CHECK_CLKSRC; | ||
38 | } | 39 | } |
39 | 40 | ||
40 | s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); | 41 | s3c24xx_init_uartdevs("s5pv210-uart", s5p_uart_resources, cfg, no); |
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c index 152676471b67..edd814110da8 100644 --- a/arch/arm/mach-exynos4/mach-smdkv310.c +++ b/arch/arm/mach-exynos4/mach-smdkv310.c | |||
@@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = { | |||
78 | }; | 78 | }; |
79 | 79 | ||
80 | static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { | 80 | static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { |
81 | .cd_type = S3C_SDHCI_CD_GPIO, | 81 | .cd_type = S3C_SDHCI_CD_INTERNAL, |
82 | .ext_cd_gpio = EXYNOS4_GPK0(2), | ||
83 | .ext_cd_gpio_invert = 1, | ||
84 | .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, | 82 | .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, |
85 | #ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT | 83 | #ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT |
86 | .max_width = 8, | 84 | .max_width = 8, |
@@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = { | |||
96 | }; | 94 | }; |
97 | 95 | ||
98 | static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { | 96 | static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { |
99 | .cd_type = S3C_SDHCI_CD_GPIO, | 97 | .cd_type = S3C_SDHCI_CD_INTERNAL, |
100 | .ext_cd_gpio = EXYNOS4_GPK2(2), | ||
101 | .ext_cd_gpio_invert = 1, | ||
102 | .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, | 98 | .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, |
103 | #ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT | 99 | #ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT |
104 | .max_width = 8, | 100 | .max_width = 8, |
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c index c5e65a02be8d..b68d5bdf04cf 100644 --- a/arch/arm/mach-exynos4/platsmp.c +++ b/arch/arm/mach-exynos4/platsmp.c | |||
@@ -154,14 +154,6 @@ void __init smp_init_cpus(void) | |||
154 | 154 | ||
155 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 155 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
156 | { | 156 | { |
157 | int i; | ||
158 | |||
159 | /* | ||
160 | * Initialise the present map, which describes the set of CPUs | ||
161 | * actually populated at the present time. | ||
162 | */ | ||
163 | for (i = 0; i < max_cpus; i++) | ||
164 | set_cpu_present(i, true); | ||
165 | 157 | ||
166 | scu_enable(scu_base_addr()); | 158 | scu_enable(scu_base_addr()); |
167 | 159 | ||
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c index 8755ca8dd48d..533c28f758ca 100644 --- a/arch/arm/mach-exynos4/pm.c +++ b/arch/arm/mach-exynos4/pm.c | |||
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = { | |||
280 | SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), | 280 | SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), |
281 | }; | 281 | }; |
282 | 282 | ||
283 | void exynos4_cpu_suspend(void) | 283 | static int exynos4_cpu_suspend(unsigned long arg) |
284 | { | 284 | { |
285 | unsigned long tmp; | 285 | unsigned long tmp; |
286 | unsigned long mask = 0xFFFFFFFF; | 286 | unsigned long mask = 0xFFFFFFFF; |
diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S index 6b62425417a6..0984078f1eba 100644 --- a/arch/arm/mach-exynos4/sleep.S +++ b/arch/arm/mach-exynos4/sleep.S | |||
@@ -33,28 +33,6 @@ | |||
33 | .text | 33 | .text |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * s3c_cpu_save | ||
37 | * | ||
38 | * entry: | ||
39 | * r1 = v:p offset | ||
40 | */ | ||
41 | |||
42 | ENTRY(s3c_cpu_save) | ||
43 | |||
44 | stmfd sp!, { r3 - r12, lr } | ||
45 | ldr r3, =resume_with_mmu | ||
46 | bl cpu_suspend | ||
47 | |||
48 | ldr r0, =pm_cpu_sleep | ||
49 | ldr r0, [ r0 ] | ||
50 | mov pc, r0 | ||
51 | |||
52 | resume_with_mmu: | ||
53 | ldmfd sp!, { r3 - r12, pc } | ||
54 | |||
55 | .ltorg | ||
56 | |||
57 | /* | ||
58 | * sleep magic, to allow the bootloader to check for an valid | 36 | * sleep magic, to allow the bootloader to check for an valid |
59 | * image to resume to. Must be the first word before the | 37 | * image to resume to. Must be the first word before the |
60 | * s3c_cpu_resume entry. | 38 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/mach-h720x/Kconfig b/arch/arm/mach-h720x/Kconfig index 9b6982efbd22..abf356c02343 100644 --- a/arch/arm/mach-h720x/Kconfig +++ b/arch/arm/mach-h720x/Kconfig | |||
@@ -6,12 +6,14 @@ config ARCH_H7201 | |||
6 | bool "gms30c7201" | 6 | bool "gms30c7201" |
7 | depends on ARCH_H720X | 7 | depends on ARCH_H720X |
8 | select CPU_H7201 | 8 | select CPU_H7201 |
9 | select ZONE_DMA | ||
9 | help | 10 | help |
10 | Say Y here if you are using the Hynix GMS30C7201 Reference Board | 11 | Say Y here if you are using the Hynix GMS30C7201 Reference Board |
11 | 12 | ||
12 | config ARCH_H7202 | 13 | config ARCH_H7202 |
13 | bool "hms30c7202" | 14 | bool "hms30c7202" |
14 | select CPU_H7202 | 15 | select CPU_H7202 |
16 | select ZONE_DMA | ||
15 | depends on ARCH_H720X | 17 | depends on ARCH_H720X |
16 | help | 18 | help |
17 | Say Y here if you are using the Hynix HMS30C7202 Reference Board | 19 | Say Y here if you are using the Hynix HMS30C7202 Reference Board |
diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S index 6d3b917c4a18..c3948e5ba4a0 100644 --- a/arch/arm/mach-h720x/include/mach/entry-macro.S +++ b/arch/arm/mach-h720x/include/mach/entry-macro.S | |||
@@ -57,9 +57,6 @@ | |||
57 | tst \irqstat, #1 @ bit 0 should be set | 57 | tst \irqstat, #1 @ bit 0 should be set |
58 | .endm | 58 | .endm |
59 | 59 | ||
60 | .macro irq_prio_table | ||
61 | .endm | ||
62 | |||
63 | #else | 60 | #else |
64 | #error hynix processor selection missmatch | 61 | #error hynix processor selection missmatch |
65 | #endif | 62 | #endif |
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index e9a589395723..e2e98bbb6413 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c | |||
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r | |||
316 | } | 316 | } |
317 | 317 | ||
318 | 318 | ||
319 | static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
320 | { | ||
321 | return (dma_addr + size) >= SZ_64M; | ||
322 | } | ||
323 | |||
319 | /* | 324 | /* |
320 | * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. | 325 | * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. |
321 | */ | 326 | */ |
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev) | |||
324 | if(dev->bus == &pci_bus_type) { | 329 | if(dev->bus == &pci_bus_type) { |
325 | *dev->dma_mask = SZ_64M - 1; | 330 | *dev->dma_mask = SZ_64M - 1; |
326 | dev->coherent_dma_mask = SZ_64M - 1; | 331 | dev->coherent_dma_mask = SZ_64M - 1; |
327 | dmabounce_register_dev(dev, 2048, 4096); | 332 | dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); |
328 | } | 333 | } |
329 | return 0; | 334 | return 0; |
330 | } | 335 | } |
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev) | |||
337 | return 0; | 342 | return 0; |
338 | } | 343 | } |
339 | 344 | ||
340 | int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) | ||
341 | { | ||
342 | return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M); | ||
343 | } | ||
344 | |||
345 | void __init ixp4xx_pci_preinit(void) | 345 | void __init ixp4xx_pci_preinit(void) |
346 | { | 346 | { |
347 | unsigned long cpuid = read_cpuid_id(); | 347 | unsigned long cpuid = read_cpuid_id(); |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 74ed81a3cb1a..07772575d7ab 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void) | |||
419 | /* | 419 | /* |
420 | * clocksource | 420 | * clocksource |
421 | */ | 421 | */ |
422 | |||
423 | static cycle_t ixp4xx_clocksource_read(struct clocksource *c) | ||
424 | { | ||
425 | return *IXP4XX_OSTS; | ||
426 | } | ||
427 | |||
422 | unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; | 428 | unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; |
423 | EXPORT_SYMBOL(ixp4xx_timer_freq); | 429 | EXPORT_SYMBOL(ixp4xx_timer_freq); |
424 | static void __init ixp4xx_clocksource_init(void) | 430 | static void __init ixp4xx_clocksource_init(void) |
425 | { | 431 | { |
426 | init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); | 432 | init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); |
427 | 433 | ||
428 | clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32, | 434 | clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32, |
429 | clocksource_mmio_readl_up); | 435 | ixp4xx_clocksource_read); |
430 | } | 436 | } |
431 | 437 | ||
432 | /* | 438 | /* |
diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S index 870227c96602..b725f6c93975 100644 --- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S +++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S | |||
@@ -41,7 +41,3 @@ | |||
41 | rsb \irqnr, \irqnr, #31 | 41 | rsb \irqnr, \irqnr, #31 |
42 | teq \irqstat, #0 | 42 | teq \irqstat, #0 |
43 | .endm | 43 | .endm |
44 | |||
45 | .macro irq_prio_table | ||
46 | .endm | ||
47 | |||
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c index 72b4e7631583..ab9f999106c7 100644 --- a/arch/arm/mach-mmp/pxa168.c +++ b/arch/arm/mach-mmp/pxa168.c | |||
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0); | |||
79 | static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); | 79 | static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); |
80 | static APBC_CLK(keypad, PXA168_KPC, 0, 32000); | 80 | static APBC_CLK(keypad, PXA168_KPC, 0, 32000); |
81 | 81 | ||
82 | static APMU_CLK(nand, NAND, 0x01db, 208000000); | 82 | static APMU_CLK(nand, NAND, 0x19b, 156000000); |
83 | static APMU_CLK(lcd, LCD, 0x7f, 312000000); | 83 | static APMU_CLK(lcd, LCD, 0x7f, 312000000); |
84 | 84 | ||
85 | /* device and clock bindings */ | 85 | /* device and clock bindings */ |
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c index 8f92ccd26edf..1464607aa60d 100644 --- a/arch/arm/mach-mmp/pxa910.c +++ b/arch/arm/mach-mmp/pxa910.c | |||
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000); | |||
110 | static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); | 110 | static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); |
111 | static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); | 111 | static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); |
112 | 112 | ||
113 | static APMU_CLK(nand, NAND, 0x01db, 208000000); | 113 | static APMU_CLK(nand, NAND, 0x19b, 156000000); |
114 | static APMU_CLK(u2o, USB, 0x1b, 480000000); | 114 | static APMU_CLK(u2o, USB, 0x1b, 480000000); |
115 | 115 | ||
116 | /* device and clock bindings */ | 116 | /* device and clock bindings */ |
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c index 2034098cf015..315b9f365329 100644 --- a/arch/arm/mach-msm/platsmp.c +++ b/arch/arm/mach-msm/platsmp.c | |||
@@ -157,12 +157,4 @@ void __init smp_init_cpus(void) | |||
157 | 157 | ||
158 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 158 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
159 | { | 159 | { |
160 | int i; | ||
161 | |||
162 | /* | ||
163 | * Initialise the present map, which describes the set of CPUs | ||
164 | * actually populated at the present time. | ||
165 | */ | ||
166 | for (i = 0; i < max_cpus; i++) | ||
167 | set_cpu_present(i, true); | ||
168 | } | 160 | } |
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c index 38b95e949d13..63621f152c98 100644 --- a/arch/arm/mach-msm/timer.c +++ b/arch/arm/mach-msm/timer.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/io.h> | 23 | #include <linux/io.h> |
24 | 24 | ||
25 | #include <asm/mach/time.h> | 25 | #include <asm/mach/time.h> |
26 | #include <asm/hardware/gic.h> | ||
27 | |||
26 | #include <mach/msm_iomap.h> | 28 | #include <mach/msm_iomap.h> |
27 | #include <mach/cpu.h> | 29 | #include <mach/cpu.h> |
28 | 30 | ||
@@ -55,10 +57,12 @@ enum timer_location { | |||
55 | #if defined(CONFIG_ARCH_QSD8X50) | 57 | #if defined(CONFIG_ARCH_QSD8X50) |
56 | #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ | 58 | #define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ |
57 | #define MSM_DGT_SHIFT (0) | 59 | #define MSM_DGT_SHIFT (0) |
58 | #elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ | 60 | #elif defined(CONFIG_ARCH_MSM7X30) |
59 | defined(CONFIG_ARCH_MSM8960) | ||
60 | #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ | 61 | #define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ |
61 | #define MSM_DGT_SHIFT (0) | 62 | #define MSM_DGT_SHIFT (0) |
63 | #elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960) | ||
64 | #define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */ | ||
65 | #define MSM_DGT_SHIFT (0) | ||
62 | #else | 66 | #else |
63 | #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ | 67 | #define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ |
64 | #define MSM_DGT_SHIFT (5) | 68 | #define MSM_DGT_SHIFT (5) |
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs) | |||
100 | { | 104 | { |
101 | struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); | 105 | struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); |
102 | 106 | ||
103 | return readl(clk->global_counter); | 107 | /* |
108 | * Shift timer count down by a constant due to unreliable lower bits | ||
109 | * on some targets. | ||
110 | */ | ||
111 | return readl(clk->global_counter) >> clk->shift; | ||
104 | } | 112 | } |
105 | 113 | ||
106 | static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) | 114 | static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) |
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile index af98117043d2..5b114d1558c8 100644 --- a/arch/arm/mach-omap1/Makefile +++ b/arch/arm/mach-omap1/Makefile | |||
@@ -4,14 +4,14 @@ | |||
4 | 4 | ||
5 | # Common support | 5 | # Common support |
6 | obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o | 6 | obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o |
7 | obj-y += clock.o clock_data.o opp_data.o reset.o | 7 | obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o |
8 | 8 | ||
9 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o | 9 | obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o |
10 | 10 | ||
11 | obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o | 11 | obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o |
12 | 12 | ||
13 | # Power Management | 13 | # Power Management |
14 | obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o | 14 | obj-$(CONFIG_PM) += pm.o sleep.o |
15 | 15 | ||
16 | # DSP | 16 | # DSP |
17 | obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o | 17 | obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o |
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index de88c9297b68..f49ce85d2448 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c | |||
@@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = { | |||
215 | .delay = 9, | 215 | .delay = 9, |
216 | }; | 216 | }; |
217 | 217 | ||
218 | static struct platform_device ams_delta_kp_device __initdata = { | 218 | static struct platform_device ams_delta_kp_device = { |
219 | .name = "omap-keypad", | 219 | .name = "omap-keypad", |
220 | .id = -1, | 220 | .id = -1, |
221 | .dev = { | 221 | .dev = { |
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = { | |||
225 | .resource = ams_delta_kp_resources, | 225 | .resource = ams_delta_kp_resources, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | static struct platform_device ams_delta_lcd_device __initdata = { | 228 | static struct platform_device ams_delta_lcd_device = { |
229 | .name = "lcd_ams_delta", | 229 | .name = "lcd_ams_delta", |
230 | .id = -1, | 230 | .id = -1, |
231 | }; | 231 | }; |
232 | 232 | ||
233 | static struct platform_device ams_delta_led_device __initdata = { | 233 | static struct platform_device ams_delta_led_device = { |
234 | .name = "ams-delta-led", | 234 | .name = "ams-delta-led", |
235 | .id = -1 | 235 | .id = -1 |
236 | }; | 236 | }; |
@@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = { | |||
267 | .power = ams_delta_camera_power, | 267 | .power = ams_delta_camera_power, |
268 | }; | 268 | }; |
269 | 269 | ||
270 | static struct platform_device ams_delta_camera_device __initdata = { | 270 | static struct platform_device ams_delta_camera_device = { |
271 | .name = "soc-camera-pdrv", | 271 | .name = "soc-camera-pdrv", |
272 | .id = 0, | 272 | .id = 0, |
273 | .dev = { | 273 | .dev = { |
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c index 04c4b04cf54e..364137c2042c 100644 --- a/arch/arm/mach-omap1/gpio15xx.c +++ b/arch/arm/mach-omap1/gpio15xx.c | |||
@@ -41,7 +41,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = { | |||
41 | .bank_stride = 1, | 41 | .bank_stride = 1, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | static struct __initdata platform_device omap15xx_mpu_gpio = { | 44 | static struct platform_device omap15xx_mpu_gpio = { |
45 | .name = "omap_gpio", | 45 | .name = "omap_gpio", |
46 | .id = 0, | 46 | .id = 0, |
47 | .dev = { | 47 | .dev = { |
@@ -70,7 +70,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = { | |||
70 | .bank_width = 16, | 70 | .bank_width = 16, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | static struct __initdata platform_device omap15xx_gpio = { | 73 | static struct platform_device omap15xx_gpio = { |
74 | .name = "omap_gpio", | 74 | .name = "omap_gpio", |
75 | .id = 1, | 75 | .id = 1, |
76 | .dev = { | 76 | .dev = { |
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c index 5dd0d4c82b24..293a246e2824 100644 --- a/arch/arm/mach-omap1/gpio16xx.c +++ b/arch/arm/mach-omap1/gpio16xx.c | |||
@@ -44,7 +44,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = { | |||
44 | .bank_stride = 1, | 44 | .bank_stride = 1, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static struct __initdata platform_device omap16xx_mpu_gpio = { | 47 | static struct platform_device omap16xx_mpu_gpio = { |
48 | .name = "omap_gpio", | 48 | .name = "omap_gpio", |
49 | .id = 0, | 49 | .id = 0, |
50 | .dev = { | 50 | .dev = { |
@@ -73,7 +73,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = { | |||
73 | .bank_width = 16, | 73 | .bank_width = 16, |
74 | }; | 74 | }; |
75 | 75 | ||
76 | static struct __initdata platform_device omap16xx_gpio1 = { | 76 | static struct platform_device omap16xx_gpio1 = { |
77 | .name = "omap_gpio", | 77 | .name = "omap_gpio", |
78 | .id = 1, | 78 | .id = 1, |
79 | .dev = { | 79 | .dev = { |
@@ -102,7 +102,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = { | |||
102 | .bank_width = 16, | 102 | .bank_width = 16, |
103 | }; | 103 | }; |
104 | 104 | ||
105 | static struct __initdata platform_device omap16xx_gpio2 = { | 105 | static struct platform_device omap16xx_gpio2 = { |
106 | .name = "omap_gpio", | 106 | .name = "omap_gpio", |
107 | .id = 2, | 107 | .id = 2, |
108 | .dev = { | 108 | .dev = { |
@@ -131,7 +131,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = { | |||
131 | .bank_width = 16, | 131 | .bank_width = 16, |
132 | }; | 132 | }; |
133 | 133 | ||
134 | static struct __initdata platform_device omap16xx_gpio3 = { | 134 | static struct platform_device omap16xx_gpio3 = { |
135 | .name = "omap_gpio", | 135 | .name = "omap_gpio", |
136 | .id = 3, | 136 | .id = 3, |
137 | .dev = { | 137 | .dev = { |
@@ -160,7 +160,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = { | |||
160 | .bank_width = 16, | 160 | .bank_width = 16, |
161 | }; | 161 | }; |
162 | 162 | ||
163 | static struct __initdata platform_device omap16xx_gpio4 = { | 163 | static struct platform_device omap16xx_gpio4 = { |
164 | .name = "omap_gpio", | 164 | .name = "omap_gpio", |
165 | .id = 4, | 165 | .id = 4, |
166 | .dev = { | 166 | .dev = { |
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c index 1204c8b871af..c6ad248d63a6 100644 --- a/arch/arm/mach-omap1/gpio7xx.c +++ b/arch/arm/mach-omap1/gpio7xx.c | |||
@@ -46,7 +46,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = { | |||
46 | .bank_stride = 2, | 46 | .bank_stride = 2, |
47 | }; | 47 | }; |
48 | 48 | ||
49 | static struct __initdata platform_device omap7xx_mpu_gpio = { | 49 | static struct platform_device omap7xx_mpu_gpio = { |
50 | .name = "omap_gpio", | 50 | .name = "omap_gpio", |
51 | .id = 0, | 51 | .id = 0, |
52 | .dev = { | 52 | .dev = { |
@@ -75,7 +75,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = { | |||
75 | .bank_width = 32, | 75 | .bank_width = 32, |
76 | }; | 76 | }; |
77 | 77 | ||
78 | static struct __initdata platform_device omap7xx_gpio1 = { | 78 | static struct platform_device omap7xx_gpio1 = { |
79 | .name = "omap_gpio", | 79 | .name = "omap_gpio", |
80 | .id = 1, | 80 | .id = 1, |
81 | .dev = { | 81 | .dev = { |
@@ -104,7 +104,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = { | |||
104 | .bank_width = 32, | 104 | .bank_width = 32, |
105 | }; | 105 | }; |
106 | 106 | ||
107 | static struct __initdata platform_device omap7xx_gpio2 = { | 107 | static struct platform_device omap7xx_gpio2 = { |
108 | .name = "omap_gpio", | 108 | .name = "omap_gpio", |
109 | .id = 2, | 109 | .id = 2, |
110 | .dev = { | 110 | .dev = { |
@@ -133,7 +133,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = { | |||
133 | .bank_width = 32, | 133 | .bank_width = 32, |
134 | }; | 134 | }; |
135 | 135 | ||
136 | static struct __initdata platform_device omap7xx_gpio3 = { | 136 | static struct platform_device omap7xx_gpio3 = { |
137 | .name = "omap_gpio", | 137 | .name = "omap_gpio", |
138 | .id = 3, | 138 | .id = 3, |
139 | .dev = { | 139 | .dev = { |
@@ -162,7 +162,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = { | |||
162 | .bank_width = 32, | 162 | .bank_width = 32, |
163 | }; | 163 | }; |
164 | 164 | ||
165 | static struct __initdata platform_device omap7xx_gpio4 = { | 165 | static struct platform_device omap7xx_gpio4 = { |
166 | .name = "omap_gpio", | 166 | .name = "omap_gpio", |
167 | .id = 4, | 167 | .id = 4, |
168 | .dev = { | 168 | .dev = { |
@@ -191,7 +191,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = { | |||
191 | .bank_width = 32, | 191 | .bank_width = 32, |
192 | }; | 192 | }; |
193 | 193 | ||
194 | static struct __initdata platform_device omap7xx_gpio5 = { | 194 | static struct platform_device omap7xx_gpio5 = { |
195 | .name = "omap_gpio", | 195 | .name = "omap_gpio", |
196 | .id = 5, | 196 | .id = 5, |
197 | .dev = { | 197 | .dev = { |
@@ -220,7 +220,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = { | |||
220 | .bank_width = 32, | 220 | .bank_width = 32, |
221 | }; | 221 | }; |
222 | 222 | ||
223 | static struct __initdata platform_device omap7xx_gpio6 = { | 223 | static struct platform_device omap7xx_gpio6 = { |
224 | .name = "omap_gpio", | 224 | .name = "omap_gpio", |
225 | .id = 6, | 225 | .id = 6, |
226 | .dev = { | 226 | .dev = { |
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c index fe31d933f0ed..334fb8871bc3 100644 --- a/arch/arm/mach-omap1/pm_bus.c +++ b/arch/arm/mach-omap1/pm_bus.c | |||
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = { | |||
56 | USE_PLATFORM_PM_SLEEP_OPS | 56 | USE_PLATFORM_PM_SLEEP_OPS |
57 | }, | 57 | }, |
58 | }; | 58 | }; |
59 | #define OMAP1_PWR_DOMAIN (&default_power_domain) | ||
60 | #else | ||
61 | #define OMAP1_PWR_DOMAIN NULL | ||
62 | #endif /* CONFIG_PM_RUNTIME */ | ||
59 | 63 | ||
60 | static struct pm_clk_notifier_block platform_bus_notifier = { | 64 | static struct pm_clk_notifier_block platform_bus_notifier = { |
61 | .pwr_domain = &default_power_domain, | 65 | .pwr_domain = OMAP1_PWR_DOMAIN, |
62 | .con_ids = { "ick", "fck", NULL, }, | 66 | .con_ids = { "ick", "fck", NULL, }, |
63 | }; | 67 | }; |
64 | 68 | ||
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void) | |||
72 | return 0; | 76 | return 0; |
73 | } | 77 | } |
74 | core_initcall(omap1_pm_runtime_init); | 78 | core_initcall(omap1_pm_runtime_init); |
75 | #endif /* CONFIG_PM_RUNTIME */ | 79 | |
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 2a0bb4818cae..23f71d40883e 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c | |||
@@ -84,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = { | |||
84 | 84 | ||
85 | static struct omap_nand_platform_data pandora_nand_data = { | 85 | static struct omap_nand_platform_data pandora_nand_data = { |
86 | .cs = 0, | 86 | .cs = 0, |
87 | .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ | 87 | .devsize = NAND_BUSWIDTH_16, |
88 | .xfer_type = NAND_OMAP_PREFETCH_DMA, | ||
88 | .parts = omap3pandora_nand_partitions, | 89 | .parts = omap3pandora_nand_partitions, |
89 | .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), | 90 | .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), |
90 | }; | 91 | }; |
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index 990366726c58..88bd6f7705f0 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module = | |||
558 | .subdev_board_info = &rx51_si4713_board_info, | 558 | .subdev_board_info = &rx51_si4713_board_info, |
559 | }; | 559 | }; |
560 | 560 | ||
561 | static struct platform_device rx51_si4713_dev __initdata_or_module = { | 561 | static struct platform_device rx51_si4713_dev = { |
562 | .name = "radio-si4713", | 562 | .name = "radio-si4713", |
563 | .id = -1, | 563 | .id = -1, |
564 | .dev = { | 564 | .dev = { |
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c index da53ba3917ca..aab884fecc55 100644 --- a/arch/arm/mach-omap2/control.c +++ b/arch/arm/mach-omap2/control.c | |||
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void) | |||
286 | scratchpad_contents.boot_config_ptr = 0x0; | 286 | scratchpad_contents.boot_config_ptr = 0x0; |
287 | if (cpu_is_omap3630()) | 287 | if (cpu_is_omap3630()) |
288 | scratchpad_contents.public_restore_ptr = | 288 | scratchpad_contents.public_restore_ptr = |
289 | virt_to_phys(get_omap3630_restore_pointer()); | 289 | virt_to_phys(omap3_restore_3630); |
290 | else if (omap_rev() != OMAP3430_REV_ES3_0 && | 290 | else if (omap_rev() != OMAP3430_REV_ES3_0 && |
291 | omap_rev() != OMAP3430_REV_ES3_1) | 291 | omap_rev() != OMAP3430_REV_ES3_1) |
292 | scratchpad_contents.public_restore_ptr = | 292 | scratchpad_contents.public_restore_ptr = |
293 | virt_to_phys(get_restore_pointer()); | 293 | virt_to_phys(omap3_restore); |
294 | else | 294 | else |
295 | scratchpad_contents.public_restore_ptr = | 295 | scratchpad_contents.public_restore_ptr = |
296 | virt_to_phys(get_es3_restore_pointer()); | 296 | virt_to_phys(omap3_restore_es3); |
297 | |||
297 | if (omap_type() == OMAP2_DEVICE_TYPE_GP) | 298 | if (omap_type() == OMAP2_DEVICE_TYPE_GP) |
298 | scratchpad_contents.secure_ram_restore_ptr = 0x0; | 299 | scratchpad_contents.secure_ram_restore_ptr = 0x0; |
299 | else | 300 | else |
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h index a016c8b59e00..d4ef75d5a382 100644 --- a/arch/arm/mach-omap2/control.h +++ b/arch/arm/mach-omap2/control.h | |||
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset); | |||
386 | 386 | ||
387 | extern void omap3_save_scratchpad_contents(void); | 387 | extern void omap3_save_scratchpad_contents(void); |
388 | extern void omap3_clear_scratchpad_contents(void); | 388 | extern void omap3_clear_scratchpad_contents(void); |
389 | extern u32 *get_restore_pointer(void); | 389 | extern void omap3_restore(void); |
390 | extern u32 *get_es3_restore_pointer(void); | 390 | extern void omap3_restore_es3(void); |
391 | extern u32 *get_omap3630_restore_pointer(void); | 391 | extern void omap3_restore_3630(void); |
392 | extern u32 omap3_arm_context[128]; | 392 | extern u32 omap3_arm_context[128]; |
393 | extern void omap3_control_save_context(void); | 393 | extern void omap3_control_save_context(void); |
394 | extern void omap3_control_restore_context(void); | 394 | extern void omap3_control_restore_context(void); |
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S index a48690b90990..ceb8b7e593d7 100644 --- a/arch/arm/mach-omap2/include/mach/entry-macro.S +++ b/arch/arm/mach-omap2/include/mach/entry-macro.S | |||
@@ -165,6 +165,3 @@ | |||
165 | #endif | 165 | #endif |
166 | 166 | ||
167 | #endif /* MULTI_OMAP2 */ | 167 | #endif /* MULTI_OMAP2 */ |
168 | |||
169 | .macro irq_prio_table | ||
170 | .endm | ||
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index ecfe93c4b585..ce65e9329c7b 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c | |||
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void) | |||
125 | 125 | ||
126 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 126 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
127 | { | 127 | { |
128 | int i; | ||
129 | |||
130 | /* | ||
131 | * Initialise the present map, which describes the set of CPUs | ||
132 | * actually populated at the present time. | ||
133 | */ | ||
134 | for (i = 0; i < max_cpus; i++) | ||
135 | set_cpu_present(i, true); | ||
136 | 128 | ||
137 | /* | 129 | /* |
138 | * Initialise the SCU and wake up the secondary core using | 130 | * Initialise the SCU and wake up the secondary core using |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index a5a83b358ddd..e01da45c0537 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir; | |||
189 | 189 | ||
190 | static int pm_dbg_init_done; | 190 | static int pm_dbg_init_done; |
191 | 191 | ||
192 | static int __init pm_dbg_init(void); | 192 | static int pm_dbg_init(void); |
193 | 193 | ||
194 | enum { | 194 | enum { |
195 | DEBUG_FILE_COUNTERS = 0, | 195 | DEBUG_FILE_COUNTERS = 0, |
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val) | |||
595 | 595 | ||
596 | DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); | 596 | DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); |
597 | 597 | ||
598 | static int __init pm_dbg_init(void) | 598 | static int pm_dbg_init(void) |
599 | { | 599 | { |
600 | int i; | 600 | int i; |
601 | struct dentry *d; | 601 | struct dentry *d; |
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 45bcfce77352..04ee56646126 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h | |||
@@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set); | |||
88 | #define pm_dbg_regset_init(reg_set) do {} while (0); | 88 | #define pm_dbg_regset_init(reg_set) do {} while (0); |
89 | #endif /* CONFIG_PM_DEBUG */ | 89 | #endif /* CONFIG_PM_DEBUG */ |
90 | 90 | ||
91 | /* 24xx */ | ||
91 | extern void omap24xx_idle_loop_suspend(void); | 92 | extern void omap24xx_idle_loop_suspend(void); |
93 | extern unsigned int omap24xx_idle_loop_suspend_sz; | ||
92 | 94 | ||
93 | extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, | 95 | extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, |
94 | void __iomem *sdrc_power); | 96 | void __iomem *sdrc_power); |
95 | extern void omap34xx_cpu_suspend(u32 *addr, int save_state); | 97 | extern unsigned int omap24xx_cpu_suspend_sz; |
96 | extern int save_secure_ram_context(u32 *addr); | ||
97 | extern void omap3_save_scratchpad_contents(void); | ||
98 | 98 | ||
99 | extern unsigned int omap24xx_idle_loop_suspend_sz; | 99 | /* 3xxx */ |
100 | extern void omap34xx_cpu_suspend(int save_state); | ||
101 | |||
102 | /* omap3_do_wfi function pointer and size, for copy to SRAM */ | ||
103 | extern void omap3_do_wfi(void); | ||
104 | extern unsigned int omap3_do_wfi_sz; | ||
105 | /* ... and its pointer from SRAM after copy */ | ||
106 | extern void (*omap3_do_wfi_sram)(void); | ||
107 | |||
108 | /* save_secure_ram_context function pointer and size, for copy to SRAM */ | ||
109 | extern int save_secure_ram_context(u32 *addr); | ||
100 | extern unsigned int save_secure_ram_context_sz; | 110 | extern unsigned int save_secure_ram_context_sz; |
101 | extern unsigned int omap24xx_cpu_suspend_sz; | 111 | |
102 | extern unsigned int omap34xx_cpu_suspend_sz; | 112 | extern void omap3_save_scratchpad_contents(void); |
103 | 113 | ||
104 | #define PM_RTA_ERRATUM_i608 (1 << 0) | 114 | #define PM_RTA_ERRATUM_i608 (1 << 0) |
105 | #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) | 115 | #define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c155c9d1c82c..b77d82665abb 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <linux/console.h> | 31 | #include <linux/console.h> |
32 | #include <trace/events/power.h> | 32 | #include <trace/events/power.h> |
33 | 33 | ||
34 | #include <asm/suspend.h> | ||
35 | |||
34 | #include <plat/sram.h> | 36 | #include <plat/sram.h> |
35 | #include "clockdomain.h" | 37 | #include "clockdomain.h" |
36 | #include "powerdomain.h" | 38 | #include "powerdomain.h" |
@@ -40,8 +42,6 @@ | |||
40 | #include <plat/gpmc.h> | 42 | #include <plat/gpmc.h> |
41 | #include <plat/dma.h> | 43 | #include <plat/dma.h> |
42 | 44 | ||
43 | #include <asm/tlbflush.h> | ||
44 | |||
45 | #include "cm2xxx_3xxx.h" | 45 | #include "cm2xxx_3xxx.h" |
46 | #include "cm-regbits-34xx.h" | 46 | #include "cm-regbits-34xx.h" |
47 | #include "prm-regbits-34xx.h" | 47 | #include "prm-regbits-34xx.h" |
@@ -64,11 +64,6 @@ static inline bool is_suspending(void) | |||
64 | } | 64 | } |
65 | #endif | 65 | #endif |
66 | 66 | ||
67 | /* Scratchpad offsets */ | ||
68 | #define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4 | ||
69 | #define OMAP343X_TABLE_VALUE_OFFSET 0xc0 | ||
70 | #define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8 | ||
71 | |||
72 | /* pm34xx errata defined in pm.h */ | 67 | /* pm34xx errata defined in pm.h */ |
73 | u16 pm34xx_errata; | 68 | u16 pm34xx_errata; |
74 | 69 | ||
@@ -83,9 +78,8 @@ struct power_state { | |||
83 | 78 | ||
84 | static LIST_HEAD(pwrst_list); | 79 | static LIST_HEAD(pwrst_list); |
85 | 80 | ||
86 | static void (*_omap_sram_idle)(u32 *addr, int save_state); | ||
87 | |||
88 | static int (*_omap_save_secure_sram)(u32 *addr); | 81 | static int (*_omap_save_secure_sram)(u32 *addr); |
82 | void (*omap3_do_wfi_sram)(void); | ||
89 | 83 | ||
90 | static struct powerdomain *mpu_pwrdm, *neon_pwrdm; | 84 | static struct powerdomain *mpu_pwrdm, *neon_pwrdm; |
91 | static struct powerdomain *core_pwrdm, *per_pwrdm; | 85 | static struct powerdomain *core_pwrdm, *per_pwrdm; |
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) | |||
312 | return IRQ_HANDLED; | 306 | return IRQ_HANDLED; |
313 | } | 307 | } |
314 | 308 | ||
315 | /* Function to restore the table entry that was modified for enabling MMU */ | 309 | static void omap34xx_save_context(u32 *save) |
316 | static void restore_table_entry(void) | ||
317 | { | 310 | { |
318 | void __iomem *scratchpad_address; | 311 | u32 val; |
319 | u32 previous_value, control_reg_value; | ||
320 | u32 *address; | ||
321 | 312 | ||
322 | scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); | 313 | /* Read Auxiliary Control Register */ |
314 | asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val)); | ||
315 | *save++ = 1; | ||
316 | *save++ = val; | ||
323 | 317 | ||
324 | /* Get address of entry that was modified */ | 318 | /* Read L2 AUX ctrl register */ |
325 | address = (u32 *)__raw_readl(scratchpad_address + | 319 | asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val)); |
326 | OMAP343X_TABLE_ADDRESS_OFFSET); | 320 | *save++ = 1; |
327 | /* Get the previous value which needs to be restored */ | 321 | *save++ = val; |
328 | previous_value = __raw_readl(scratchpad_address + | 322 | } |
329 | OMAP343X_TABLE_VALUE_OFFSET); | 323 | |
330 | address = __va(address); | 324 | static int omap34xx_do_sram_idle(unsigned long save_state) |
331 | *address = previous_value; | 325 | { |
332 | flush_tlb_all(); | 326 | omap34xx_cpu_suspend(save_state); |
333 | control_reg_value = __raw_readl(scratchpad_address | 327 | return 0; |
334 | + OMAP343X_CONTROL_REG_VALUE_OFFSET); | ||
335 | /* This will enable caches and prediction */ | ||
336 | set_cr(control_reg_value); | ||
337 | } | 328 | } |
338 | 329 | ||
339 | void omap_sram_idle(void) | 330 | void omap_sram_idle(void) |
@@ -352,9 +343,6 @@ void omap_sram_idle(void) | |||
352 | int core_prev_state, per_prev_state; | 343 | int core_prev_state, per_prev_state; |
353 | u32 sdrc_pwr = 0; | 344 | u32 sdrc_pwr = 0; |
354 | 345 | ||
355 | if (!_omap_sram_idle) | ||
356 | return; | ||
357 | |||
358 | pwrdm_clear_all_prev_pwrst(mpu_pwrdm); | 346 | pwrdm_clear_all_prev_pwrst(mpu_pwrdm); |
359 | pwrdm_clear_all_prev_pwrst(neon_pwrdm); | 347 | pwrdm_clear_all_prev_pwrst(neon_pwrdm); |
360 | pwrdm_clear_all_prev_pwrst(core_pwrdm); | 348 | pwrdm_clear_all_prev_pwrst(core_pwrdm); |
@@ -432,12 +420,16 @@ void omap_sram_idle(void) | |||
432 | sdrc_pwr = sdrc_read_reg(SDRC_POWER); | 420 | sdrc_pwr = sdrc_read_reg(SDRC_POWER); |
433 | 421 | ||
434 | /* | 422 | /* |
435 | * omap3_arm_context is the location where ARM registers | 423 | * omap3_arm_context is the location where some ARM context |
436 | * get saved. The restore path then reads from this | 424 | * get saved. The rest is placed on the stack, and restored |
437 | * location and restores them back. | 425 | * from there before resuming. |
438 | */ | 426 | */ |
439 | _omap_sram_idle(omap3_arm_context, save_state); | 427 | if (save_state) |
440 | cpu_init(); | 428 | omap34xx_save_context(omap3_arm_context); |
429 | if (save_state == 1 || save_state == 3) | ||
430 | cpu_suspend(save_state, omap34xx_do_sram_idle); | ||
431 | else | ||
432 | omap34xx_do_sram_idle(save_state); | ||
441 | 433 | ||
442 | /* Restore normal SDRC POWER settings */ | 434 | /* Restore normal SDRC POWER settings */ |
443 | if (omap_rev() >= OMAP3430_REV_ES3_0 && | 435 | if (omap_rev() >= OMAP3430_REV_ES3_0 && |
@@ -445,10 +437,6 @@ void omap_sram_idle(void) | |||
445 | core_next_state == PWRDM_POWER_OFF) | 437 | core_next_state == PWRDM_POWER_OFF) |
446 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); | 438 | sdrc_write_reg(sdrc_pwr, SDRC_POWER); |
447 | 439 | ||
448 | /* Restore table entry modified during MMU restoration */ | ||
449 | if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF) | ||
450 | restore_table_entry(); | ||
451 | |||
452 | /* CORE */ | 440 | /* CORE */ |
453 | if (core_next_state < PWRDM_POWER_ON) { | 441 | if (core_next_state < PWRDM_POWER_ON) { |
454 | core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); | 442 | core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); |
@@ -852,10 +840,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused) | |||
852 | return 0; | 840 | return 0; |
853 | } | 841 | } |
854 | 842 | ||
843 | /* | ||
844 | * Push functions to SRAM | ||
845 | * | ||
846 | * The minimum set of functions is pushed to SRAM for execution: | ||
847 | * - omap3_do_wfi for erratum i581 WA, | ||
848 | * - save_secure_ram_context for security extensions. | ||
849 | */ | ||
855 | void omap_push_sram_idle(void) | 850 | void omap_push_sram_idle(void) |
856 | { | 851 | { |
857 | _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, | 852 | omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz); |
858 | omap34xx_cpu_suspend_sz); | 853 | |
859 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) | 854 | if (omap_type() != OMAP2_DEVICE_TYPE_GP) |
860 | _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, | 855 | _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, |
861 | save_secure_ram_context_sz); | 856 | save_secure_ram_context_sz); |
@@ -920,7 +915,6 @@ static int __init omap3_pm_init(void) | |||
920 | per_clkdm = clkdm_lookup("per_clkdm"); | 915 | per_clkdm = clkdm_lookup("per_clkdm"); |
921 | core_clkdm = clkdm_lookup("core_clkdm"); | 916 | core_clkdm = clkdm_lookup("core_clkdm"); |
922 | 917 | ||
923 | omap_push_sram_idle(); | ||
924 | #ifdef CONFIG_SUSPEND | 918 | #ifdef CONFIG_SUSPEND |
925 | suspend_set_ops(&omap_pm_ops); | 919 | suspend_set_ops(&omap_pm_ops); |
926 | #endif /* CONFIG_SUSPEND */ | 920 | #endif /* CONFIG_SUSPEND */ |
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S index 63f10669571a..f2ea1bd1c691 100644 --- a/arch/arm/mach-omap2/sleep34xx.S +++ b/arch/arm/mach-omap2/sleep34xx.S | |||
@@ -74,46 +74,6 @@ | |||
74 | * API functions | 74 | * API functions |
75 | */ | 75 | */ |
76 | 76 | ||
77 | /* | ||
78 | * The "get_*restore_pointer" functions are used to provide a | ||
79 | * physical restore address where the ROM code jumps while waking | ||
80 | * up from MPU OFF/OSWR state. | ||
81 | * The restore pointer is stored into the scratchpad. | ||
82 | */ | ||
83 | |||
84 | .text | ||
85 | /* Function call to get the restore pointer for resume from OFF */ | ||
86 | ENTRY(get_restore_pointer) | ||
87 | stmfd sp!, {lr} @ save registers on stack | ||
88 | adr r0, restore | ||
89 | ldmfd sp!, {pc} @ restore regs and return | ||
90 | ENDPROC(get_restore_pointer) | ||
91 | .align | ||
92 | ENTRY(get_restore_pointer_sz) | ||
93 | .word . - get_restore_pointer | ||
94 | |||
95 | .text | ||
96 | /* Function call to get the restore pointer for 3630 resume from OFF */ | ||
97 | ENTRY(get_omap3630_restore_pointer) | ||
98 | stmfd sp!, {lr} @ save registers on stack | ||
99 | adr r0, restore_3630 | ||
100 | ldmfd sp!, {pc} @ restore regs and return | ||
101 | ENDPROC(get_omap3630_restore_pointer) | ||
102 | .align | ||
103 | ENTRY(get_omap3630_restore_pointer_sz) | ||
104 | .word . - get_omap3630_restore_pointer | ||
105 | |||
106 | .text | ||
107 | /* Function call to get the restore pointer for ES3 to resume from OFF */ | ||
108 | ENTRY(get_es3_restore_pointer) | ||
109 | stmfd sp!, {lr} @ save registers on stack | ||
110 | adr r0, restore_es3 | ||
111 | ldmfd sp!, {pc} @ restore regs and return | ||
112 | ENDPROC(get_es3_restore_pointer) | ||
113 | .align | ||
114 | ENTRY(get_es3_restore_pointer_sz) | ||
115 | .word . - get_es3_restore_pointer | ||
116 | |||
117 | .text | 77 | .text |
118 | /* | 78 | /* |
119 | * L2 cache needs to be toggled for stable OFF mode functionality on 3630. | 79 | * L2 cache needs to be toggled for stable OFF mode functionality on 3630. |
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore) | |||
133 | /* Function to call rom code to save secure ram context */ | 93 | /* Function to call rom code to save secure ram context */ |
134 | .align 3 | 94 | .align 3 |
135 | ENTRY(save_secure_ram_context) | 95 | ENTRY(save_secure_ram_context) |
136 | stmfd sp!, {r1-r12, lr} @ save registers on stack | 96 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
137 | adr r3, api_params @ r3 points to parameters | 97 | adr r3, api_params @ r3 points to parameters |
138 | str r0, [r3,#0x4] @ r0 has sdram address | 98 | str r0, [r3,#0x4] @ r0 has sdram address |
139 | ldr r12, high_mask | 99 | ldr r12, high_mask |
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context) | |||
152 | nop | 112 | nop |
153 | nop | 113 | nop |
154 | nop | 114 | nop |
155 | ldmfd sp!, {r1-r12, pc} | 115 | ldmfd sp!, {r4 - r11, pc} |
156 | .align | 116 | .align |
157 | sram_phy_addr_mask: | 117 | sram_phy_addr_mask: |
158 | .word SRAM_BASE_P | 118 | .word SRAM_BASE_P |
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz) | |||
179 | * | 139 | * |
180 | * | 140 | * |
181 | * Notes: | 141 | * Notes: |
182 | * - this code gets copied to internal SRAM at boot and after wake-up | 142 | * - only the minimum set of functions gets copied to internal SRAM at boot |
183 | * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. | 143 | * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function |
144 | * pointers in SDRAM or SRAM are called depending on the desired low power | ||
145 | * target state. | ||
184 | * - when the OMAP wakes up it continues at different execution points | 146 | * - when the OMAP wakes up it continues at different execution points |
185 | * depending on the low power mode (non-OFF vs OFF modes), | 147 | * depending on the low power mode (non-OFF vs OFF modes), |
186 | * cf. 'Resume path for xxx mode' comments. | 148 | * cf. 'Resume path for xxx mode' comments. |
187 | */ | 149 | */ |
188 | .align 3 | 150 | .align 3 |
189 | ENTRY(omap34xx_cpu_suspend) | 151 | ENTRY(omap34xx_cpu_suspend) |
190 | stmfd sp!, {r0-r12, lr} @ save registers on stack | 152 | stmfd sp!, {r4 - r11, lr} @ save registers on stack |
191 | 153 | ||
192 | /* | 154 | /* |
193 | * r0 contains CPU context save/restore pointer in sdram | 155 | * r0 contains information about saving context: |
194 | * r1 contains information about saving context: | ||
195 | * 0 - No context lost | 156 | * 0 - No context lost |
196 | * 1 - Only L1 and logic lost | 157 | * 1 - Only L1 and logic lost |
197 | * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) | 158 | * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) |
198 | * 3 - Both L1 and L2 lost and logic lost | 159 | * 3 - Both L1 and L2 lost and logic lost |
199 | */ | 160 | */ |
200 | 161 | ||
201 | /* Directly jump to WFI is the context save is not required */ | 162 | /* |
202 | cmp r1, #0x0 | 163 | * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi) |
203 | beq omap3_do_wfi | 164 | * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram) |
165 | */ | ||
166 | ldr r4, omap3_do_wfi_sram_addr | ||
167 | ldr r5, [r4] | ||
168 | cmp r0, #0x0 @ If no context save required, | ||
169 | bxeq r5 @ jump to the WFI code in SRAM | ||
170 | |||
204 | 171 | ||
205 | /* Otherwise fall through to the save context code */ | 172 | /* Otherwise fall through to the save context code */ |
206 | save_context_wfi: | 173 | save_context_wfi: |
207 | mov r8, r0 @ Store SDRAM address in r8 | ||
208 | mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register | ||
209 | mov r4, #0x1 @ Number of parameters for restore call | ||
210 | stmia r8!, {r4-r5} @ Push parameters for restore call | ||
211 | mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register | ||
212 | stmia r8!, {r4-r5} @ Push parameters for restore call | ||
213 | |||
214 | /* Check what that target sleep state is from r1 */ | ||
215 | cmp r1, #0x2 @ Only L2 lost, no need to save context | ||
216 | beq clean_caches | ||
217 | |||
218 | l1_logic_lost: | ||
219 | mov r4, sp @ Store sp | ||
220 | mrs r5, spsr @ Store spsr | ||
221 | mov r6, lr @ Store lr | ||
222 | stmia r8!, {r4-r6} | ||
223 | |||
224 | mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register | ||
225 | mrc p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
226 | mrc p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
227 | mrc p15, 0, r7, c2, c0, 2 @ TTBCR | ||
228 | stmia r8!, {r4-r7} | ||
229 | |||
230 | mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
231 | mrc p15, 0, r5, c10, c2, 0 @ PRRR | ||
232 | mrc p15, 0, r6, c10, c2, 1 @ NMRR | ||
233 | stmia r8!,{r4-r6} | ||
234 | |||
235 | mrc p15, 0, r4, c13, c0, 1 @ Context ID | ||
236 | mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
237 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
238 | mrs r7, cpsr @ Store current cpsr | ||
239 | stmia r8!, {r4-r7} | ||
240 | |||
241 | mrc p15, 0, r4, c1, c0, 0 @ save control register | ||
242 | stmia r8!, {r4} | ||
243 | |||
244 | clean_caches: | ||
245 | /* | 174 | /* |
246 | * jump out to kernel flush routine | 175 | * jump out to kernel flush routine |
247 | * - reuse that code is better | 176 | * - reuse that code is better |
@@ -284,7 +213,32 @@ clean_caches: | |||
284 | THUMB( nop ) | 213 | THUMB( nop ) |
285 | .arm | 214 | .arm |
286 | 215 | ||
287 | omap3_do_wfi: | 216 | b omap3_do_wfi |
217 | |||
218 | /* | ||
219 | * Local variables | ||
220 | */ | ||
221 | omap3_do_wfi_sram_addr: | ||
222 | .word omap3_do_wfi_sram | ||
223 | kernel_flush: | ||
224 | .word v7_flush_dcache_all | ||
225 | |||
226 | /* =================================== | ||
227 | * == WFI instruction => Enter idle == | ||
228 | * =================================== | ||
229 | */ | ||
230 | |||
231 | /* | ||
232 | * Do WFI instruction | ||
233 | * Includes the resume path for non-OFF modes | ||
234 | * | ||
235 | * This code gets copied to internal SRAM and is accessible | ||
236 | * from both SDRAM and SRAM: | ||
237 | * - executed from SRAM for non-off modes (omap3_do_wfi_sram), | ||
238 | * - executed from SDRAM for OFF mode (omap3_do_wfi). | ||
239 | */ | ||
240 | .align 3 | ||
241 | ENTRY(omap3_do_wfi) | ||
288 | ldr r4, sdrc_power @ read the SDRC_POWER register | 242 | ldr r4, sdrc_power @ read the SDRC_POWER register |
289 | ldr r5, [r4] @ read the contents of SDRC_POWER | 243 | ldr r5, [r4] @ read the contents of SDRC_POWER |
290 | orr r5, r5, #0x40 @ enable self refresh on idle req | 244 | orr r5, r5, #0x40 @ enable self refresh on idle req |
@@ -316,8 +270,86 @@ omap3_do_wfi: | |||
316 | nop | 270 | nop |
317 | nop | 271 | nop |
318 | nop | 272 | nop |
319 | bl wait_sdrc_ok | ||
320 | 273 | ||
274 | /* | ||
275 | * This function implements the erratum ID i581 WA: | ||
276 | * SDRC state restore before accessing the SDRAM | ||
277 | * | ||
278 | * Only used at return from non-OFF mode. For OFF | ||
279 | * mode the ROM code configures the SDRC and | ||
280 | * the DPLL before calling the restore code directly | ||
281 | * from DDR. | ||
282 | */ | ||
283 | |||
284 | /* Make sure SDRC accesses are ok */ | ||
285 | wait_sdrc_ok: | ||
286 | |||
287 | /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ | ||
288 | ldr r4, cm_idlest_ckgen | ||
289 | wait_dpll3_lock: | ||
290 | ldr r5, [r4] | ||
291 | tst r5, #1 | ||
292 | beq wait_dpll3_lock | ||
293 | |||
294 | ldr r4, cm_idlest1_core | ||
295 | wait_sdrc_ready: | ||
296 | ldr r5, [r4] | ||
297 | tst r5, #0x2 | ||
298 | bne wait_sdrc_ready | ||
299 | /* allow DLL powerdown upon hw idle req */ | ||
300 | ldr r4, sdrc_power | ||
301 | ldr r5, [r4] | ||
302 | bic r5, r5, #0x40 | ||
303 | str r5, [r4] | ||
304 | |||
305 | /* | ||
306 | * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a | ||
307 | * base instead. | ||
308 | * Be careful not to clobber r7 when maintaing this code. | ||
309 | */ | ||
310 | |||
311 | is_dll_in_lock_mode: | ||
312 | /* Is dll in lock mode? */ | ||
313 | ldr r4, sdrc_dlla_ctrl | ||
314 | ldr r5, [r4] | ||
315 | tst r5, #0x4 | ||
316 | bne exit_nonoff_modes @ Return if locked | ||
317 | /* wait till dll locks */ | ||
318 | adr r7, kick_counter | ||
319 | wait_dll_lock_timed: | ||
320 | ldr r4, wait_dll_lock_counter | ||
321 | add r4, r4, #1 | ||
322 | str r4, [r7, #wait_dll_lock_counter - kick_counter] | ||
323 | ldr r4, sdrc_dlla_status | ||
324 | /* Wait 20uS for lock */ | ||
325 | mov r6, #8 | ||
326 | wait_dll_lock: | ||
327 | subs r6, r6, #0x1 | ||
328 | beq kick_dll | ||
329 | ldr r5, [r4] | ||
330 | and r5, r5, #0x4 | ||
331 | cmp r5, #0x4 | ||
332 | bne wait_dll_lock | ||
333 | b exit_nonoff_modes @ Return when locked | ||
334 | |||
335 | /* disable/reenable DLL if not locked */ | ||
336 | kick_dll: | ||
337 | ldr r4, sdrc_dlla_ctrl | ||
338 | ldr r5, [r4] | ||
339 | mov r6, r5 | ||
340 | bic r6, #(1<<3) @ disable dll | ||
341 | str r6, [r4] | ||
342 | dsb | ||
343 | orr r6, r6, #(1<<3) @ enable dll | ||
344 | str r6, [r4] | ||
345 | dsb | ||
346 | ldr r4, kick_counter | ||
347 | add r4, r4, #1 | ||
348 | str r4, [r7] @ kick_counter | ||
349 | b wait_dll_lock_timed | ||
350 | |||
351 | exit_nonoff_modes: | ||
352 | /* Re-enable C-bit if needed */ | ||
321 | mrc p15, 0, r0, c1, c0, 0 | 353 | mrc p15, 0, r0, c1, c0, 0 |
322 | tst r0, #(1 << 2) @ Check C bit enabled? | 354 | tst r0, #(1 << 2) @ Check C bit enabled? |
323 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared | 355 | orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared |
@@ -329,7 +361,32 @@ omap3_do_wfi: | |||
329 | * == Exit point from non-OFF modes == | 361 | * == Exit point from non-OFF modes == |
330 | * =================================== | 362 | * =================================== |
331 | */ | 363 | */ |
332 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | 364 | ldmfd sp!, {r4 - r11, pc} @ restore regs and return |
365 | |||
366 | /* | ||
367 | * Local variables | ||
368 | */ | ||
369 | sdrc_power: | ||
370 | .word SDRC_POWER_V | ||
371 | cm_idlest1_core: | ||
372 | .word CM_IDLEST1_CORE_V | ||
373 | cm_idlest_ckgen: | ||
374 | .word CM_IDLEST_CKGEN_V | ||
375 | sdrc_dlla_status: | ||
376 | .word SDRC_DLLA_STATUS_V | ||
377 | sdrc_dlla_ctrl: | ||
378 | .word SDRC_DLLA_CTRL_V | ||
379 | /* | ||
380 | * When exporting to userspace while the counters are in SRAM, | ||
381 | * these 2 words need to be at the end to facilitate retrival! | ||
382 | */ | ||
383 | kick_counter: | ||
384 | .word 0 | ||
385 | wait_dll_lock_counter: | ||
386 | .word 0 | ||
387 | |||
388 | ENTRY(omap3_do_wfi_sz) | ||
389 | .word . - omap3_do_wfi | ||
333 | 390 | ||
334 | 391 | ||
335 | /* | 392 | /* |
@@ -346,13 +403,17 @@ omap3_do_wfi: | |||
346 | * restore_es3: applies to 34xx >= ES3.0 | 403 | * restore_es3: applies to 34xx >= ES3.0 |
347 | * restore_3630: applies to 36xx | 404 | * restore_3630: applies to 36xx |
348 | * restore: common code for 3xxx | 405 | * restore: common code for 3xxx |
406 | * | ||
407 | * Note: when back from CORE and MPU OFF mode we are running | ||
408 | * from SDRAM, without MMU, without the caches and prediction. | ||
409 | * Also the SRAM content has been cleared. | ||
349 | */ | 410 | */ |
350 | restore_es3: | 411 | ENTRY(omap3_restore_es3) |
351 | ldr r5, pm_prepwstst_core_p | 412 | ldr r5, pm_prepwstst_core_p |
352 | ldr r4, [r5] | 413 | ldr r4, [r5] |
353 | and r4, r4, #0x3 | 414 | and r4, r4, #0x3 |
354 | cmp r4, #0x0 @ Check if previous power state of CORE is OFF | 415 | cmp r4, #0x0 @ Check if previous power state of CORE is OFF |
355 | bne restore | 416 | bne omap3_restore @ Fall through to OMAP3 common code |
356 | adr r0, es3_sdrc_fix | 417 | adr r0, es3_sdrc_fix |
357 | ldr r1, sram_base | 418 | ldr r1, sram_base |
358 | ldr r2, es3_sdrc_fix_sz | 419 | ldr r2, es3_sdrc_fix_sz |
@@ -364,35 +425,32 @@ copy_to_sram: | |||
364 | bne copy_to_sram | 425 | bne copy_to_sram |
365 | ldr r1, sram_base | 426 | ldr r1, sram_base |
366 | blx r1 | 427 | blx r1 |
367 | b restore | 428 | b omap3_restore @ Fall through to OMAP3 common code |
429 | ENDPROC(omap3_restore_es3) | ||
368 | 430 | ||
369 | restore_3630: | 431 | ENTRY(omap3_restore_3630) |
370 | ldr r1, pm_prepwstst_core_p | 432 | ldr r1, pm_prepwstst_core_p |
371 | ldr r2, [r1] | 433 | ldr r2, [r1] |
372 | and r2, r2, #0x3 | 434 | and r2, r2, #0x3 |
373 | cmp r2, #0x0 @ Check if previous power state of CORE is OFF | 435 | cmp r2, #0x0 @ Check if previous power state of CORE is OFF |
374 | bne restore | 436 | bne omap3_restore @ Fall through to OMAP3 common code |
375 | /* Disable RTA before giving control */ | 437 | /* Disable RTA before giving control */ |
376 | ldr r1, control_mem_rta | 438 | ldr r1, control_mem_rta |
377 | mov r2, #OMAP36XX_RTA_DISABLE | 439 | mov r2, #OMAP36XX_RTA_DISABLE |
378 | str r2, [r1] | 440 | str r2, [r1] |
441 | ENDPROC(omap3_restore_3630) | ||
379 | 442 | ||
380 | /* Fall through to common code for the remaining logic */ | 443 | /* Fall through to common code for the remaining logic */ |
381 | 444 | ||
382 | restore: | 445 | ENTRY(omap3_restore) |
383 | /* | 446 | /* |
384 | * Check what was the reason for mpu reset and store the reason in r9: | 447 | * Read the pwstctrl register to check the reason for mpu reset. |
385 | * 0 - No context lost | 448 | * This tells us what was lost. |
386 | * 1 - Only L1 and logic lost | ||
387 | * 2 - Only L2 lost - In this case, we wont be here | ||
388 | * 3 - Both L1 and L2 lost | ||
389 | */ | 449 | */ |
390 | ldr r1, pm_pwstctrl_mpu | 450 | ldr r1, pm_pwstctrl_mpu |
391 | ldr r2, [r1] | 451 | ldr r2, [r1] |
392 | and r2, r2, #0x3 | 452 | and r2, r2, #0x3 |
393 | cmp r2, #0x0 @ Check if target power state was OFF or RET | 453 | cmp r2, #0x0 @ Check if target power state was OFF or RET |
394 | moveq r9, #0x3 @ MPU OFF => L1 and L2 lost | ||
395 | movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation | ||
396 | bne logic_l1_restore | 454 | bne logic_l1_restore |
397 | 455 | ||
398 | ldr r0, l2dis_3630 | 456 | ldr r0, l2dis_3630 |
@@ -471,115 +529,39 @@ logic_l1_restore: | |||
471 | orr r1, r1, #2 @ re-enable L2 cache | 529 | orr r1, r1, #2 @ re-enable L2 cache |
472 | mcr p15, 0, r1, c1, c0, 1 | 530 | mcr p15, 0, r1, c1, c0, 1 |
473 | skipl2reen: | 531 | skipl2reen: |
474 | mov r1, #0 | ||
475 | /* | ||
476 | * Invalidate all instruction caches to PoU | ||
477 | * and flush branch target cache | ||
478 | */ | ||
479 | mcr p15, 0, r1, c7, c5, 0 | ||
480 | 532 | ||
481 | ldr r4, scratchpad_base | 533 | /* Now branch to the common CPU resume function */ |
482 | ldr r3, [r4,#0xBC] | 534 | b cpu_resume |
483 | adds r3, r3, #16 | 535 | ENDPROC(omap3_restore) |
484 | 536 | ||
485 | ldmia r3!, {r4-r6} | 537 | .ltorg |
486 | mov sp, r4 @ Restore sp | ||
487 | msr spsr_cxsf, r5 @ Restore spsr | ||
488 | mov lr, r6 @ Restore lr | ||
489 | |||
490 | ldmia r3!, {r4-r7} | ||
491 | mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register | ||
492 | mcr p15, 0, r5, c2, c0, 0 @ TTBR0 | ||
493 | mcr p15, 0, r6, c2, c0, 1 @ TTBR1 | ||
494 | mcr p15, 0, r7, c2, c0, 2 @ TTBCR | ||
495 | |||
496 | ldmia r3!,{r4-r6} | ||
497 | mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register | ||
498 | mcr p15, 0, r5, c10, c2, 0 @ PRRR | ||
499 | mcr p15, 0, r6, c10, c2, 1 @ NMRR | ||
500 | |||
501 | |||
502 | ldmia r3!,{r4-r7} | ||
503 | mcr p15, 0, r4, c13, c0, 1 @ Context ID | ||
504 | mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID | ||
505 | mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address | ||
506 | msr cpsr, r7 @ store cpsr | ||
507 | |||
508 | /* Enabling MMU here */ | ||
509 | mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl | ||
510 | /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */ | ||
511 | and r7, #0x7 | ||
512 | cmp r7, #0x0 | ||
513 | beq usettbr0 | ||
514 | ttbr_error: | ||
515 | /* | ||
516 | * More work needs to be done to support N[0:2] value other than 0 | ||
517 | * So looping here so that the error can be detected | ||
518 | */ | ||
519 | b ttbr_error | ||
520 | usettbr0: | ||
521 | mrc p15, 0, r2, c2, c0, 0 | ||
522 | ldr r5, ttbrbit_mask | ||
523 | and r2, r5 | ||
524 | mov r4, pc | ||
525 | ldr r5, table_index_mask | ||
526 | and r4, r5 @ r4 = 31 to 20 bits of pc | ||
527 | /* Extract the value to be written to table entry */ | ||
528 | ldr r1, table_entry | ||
529 | /* r1 has the value to be written to table entry*/ | ||
530 | add r1, r1, r4 | ||
531 | /* Getting the address of table entry to modify */ | ||
532 | lsr r4, #18 | ||
533 | /* r2 has the location which needs to be modified */ | ||
534 | add r2, r4 | ||
535 | /* Storing previous entry of location being modified */ | ||
536 | ldr r5, scratchpad_base | ||
537 | ldr r4, [r2] | ||
538 | str r4, [r5, #0xC0] | ||
539 | /* Modify the table entry */ | ||
540 | str r1, [r2] | ||
541 | /* | ||
542 | * Storing address of entry being modified | ||
543 | * - will be restored after enabling MMU | ||
544 | */ | ||
545 | ldr r5, scratchpad_base | ||
546 | str r2, [r5, #0xC4] | ||
547 | |||
548 | mov r0, #0 | ||
549 | mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer | ||
550 | mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array | ||
551 | mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB | ||
552 | mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB | ||
553 | /* | ||
554 | * Restore control register. This enables the MMU. | ||
555 | * The caches and prediction are not enabled here, they | ||
556 | * will be enabled after restoring the MMU table entry. | ||
557 | */ | ||
558 | ldmia r3!, {r4} | ||
559 | /* Store previous value of control register in scratchpad */ | ||
560 | str r4, [r5, #0xC8] | ||
561 | ldr r2, cache_pred_disable_mask | ||
562 | and r4, r2 | ||
563 | mcr p15, 0, r4, c1, c0, 0 | ||
564 | dsb | ||
565 | isb | ||
566 | ldr r0, =restoremmu_on | ||
567 | bx r0 | ||
568 | 538 | ||
569 | /* | 539 | /* |
570 | * ============================== | 540 | * Local variables |
571 | * == Exit point from OFF mode == | ||
572 | * ============================== | ||
573 | */ | 541 | */ |
574 | restoremmu_on: | 542 | pm_prepwstst_core_p: |
575 | ldmfd sp!, {r0-r12, pc} @ restore regs and return | 543 | .word PM_PREPWSTST_CORE_P |
576 | 544 | pm_pwstctrl_mpu: | |
545 | .word PM_PWSTCTRL_MPU_P | ||
546 | scratchpad_base: | ||
547 | .word SCRATCHPAD_BASE_P | ||
548 | sram_base: | ||
549 | .word SRAM_BASE_P + 0x8000 | ||
550 | control_stat: | ||
551 | .word CONTROL_STAT | ||
552 | control_mem_rta: | ||
553 | .word CONTROL_MEM_RTA_CTRL | ||
554 | l2dis_3630: | ||
555 | .word 0 | ||
577 | 556 | ||
578 | /* | 557 | /* |
579 | * Internal functions | 558 | * Internal functions |
580 | */ | 559 | */ |
581 | 560 | ||
582 | /* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ | 561 | /* |
562 | * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 | ||
563 | * Copied to and run from SRAM in order to reconfigure the SDRC parameters. | ||
564 | */ | ||
583 | .text | 565 | .text |
584 | .align 3 | 566 | .align 3 |
585 | ENTRY(es3_sdrc_fix) | 567 | ENTRY(es3_sdrc_fix) |
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix) | |||
609 | str r5, [r4] @ kick off refreshes | 591 | str r5, [r4] @ kick off refreshes |
610 | bx lr | 592 | bx lr |
611 | 593 | ||
594 | /* | ||
595 | * Local variables | ||
596 | */ | ||
612 | .align | 597 | .align |
613 | sdrc_syscfg: | 598 | sdrc_syscfg: |
614 | .word SDRC_SYSCONFIG_P | 599 | .word SDRC_SYSCONFIG_P |
@@ -627,128 +612,3 @@ sdrc_manual_1: | |||
627 | ENDPROC(es3_sdrc_fix) | 612 | ENDPROC(es3_sdrc_fix) |
628 | ENTRY(es3_sdrc_fix_sz) | 613 | ENTRY(es3_sdrc_fix_sz) |
629 | .word . - es3_sdrc_fix | 614 | .word . - es3_sdrc_fix |
630 | |||
631 | /* | ||
632 | * This function implements the erratum ID i581 WA: | ||
633 | * SDRC state restore before accessing the SDRAM | ||
634 | * | ||
635 | * Only used at return from non-OFF mode. For OFF | ||
636 | * mode the ROM code configures the SDRC and | ||
637 | * the DPLL before calling the restore code directly | ||
638 | * from DDR. | ||
639 | */ | ||
640 | |||
641 | /* Make sure SDRC accesses are ok */ | ||
642 | wait_sdrc_ok: | ||
643 | |||
644 | /* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */ | ||
645 | ldr r4, cm_idlest_ckgen | ||
646 | wait_dpll3_lock: | ||
647 | ldr r5, [r4] | ||
648 | tst r5, #1 | ||
649 | beq wait_dpll3_lock | ||
650 | |||
651 | ldr r4, cm_idlest1_core | ||
652 | wait_sdrc_ready: | ||
653 | ldr r5, [r4] | ||
654 | tst r5, #0x2 | ||
655 | bne wait_sdrc_ready | ||
656 | /* allow DLL powerdown upon hw idle req */ | ||
657 | ldr r4, sdrc_power | ||
658 | ldr r5, [r4] | ||
659 | bic r5, r5, #0x40 | ||
660 | str r5, [r4] | ||
661 | |||
662 | /* | ||
663 | * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a | ||
664 | * base instead. | ||
665 | * Be careful not to clobber r7 when maintaing this code. | ||
666 | */ | ||
667 | |||
668 | is_dll_in_lock_mode: | ||
669 | /* Is dll in lock mode? */ | ||
670 | ldr r4, sdrc_dlla_ctrl | ||
671 | ldr r5, [r4] | ||
672 | tst r5, #0x4 | ||
673 | bxne lr @ Return if locked | ||
674 | /* wait till dll locks */ | ||
675 | adr r7, kick_counter | ||
676 | wait_dll_lock_timed: | ||
677 | ldr r4, wait_dll_lock_counter | ||
678 | add r4, r4, #1 | ||
679 | str r4, [r7, #wait_dll_lock_counter - kick_counter] | ||
680 | ldr r4, sdrc_dlla_status | ||
681 | /* Wait 20uS for lock */ | ||
682 | mov r6, #8 | ||
683 | wait_dll_lock: | ||
684 | subs r6, r6, #0x1 | ||
685 | beq kick_dll | ||
686 | ldr r5, [r4] | ||
687 | and r5, r5, #0x4 | ||
688 | cmp r5, #0x4 | ||
689 | bne wait_dll_lock | ||
690 | bx lr @ Return when locked | ||
691 | |||
692 | /* disable/reenable DLL if not locked */ | ||
693 | kick_dll: | ||
694 | ldr r4, sdrc_dlla_ctrl | ||
695 | ldr r5, [r4] | ||
696 | mov r6, r5 | ||
697 | bic r6, #(1<<3) @ disable dll | ||
698 | str r6, [r4] | ||
699 | dsb | ||
700 | orr r6, r6, #(1<<3) @ enable dll | ||
701 | str r6, [r4] | ||
702 | dsb | ||
703 | ldr r4, kick_counter | ||
704 | add r4, r4, #1 | ||
705 | str r4, [r7] @ kick_counter | ||
706 | b wait_dll_lock_timed | ||
707 | |||
708 | .align | ||
709 | cm_idlest1_core: | ||
710 | .word CM_IDLEST1_CORE_V | ||
711 | cm_idlest_ckgen: | ||
712 | .word CM_IDLEST_CKGEN_V | ||
713 | sdrc_dlla_status: | ||
714 | .word SDRC_DLLA_STATUS_V | ||
715 | sdrc_dlla_ctrl: | ||
716 | .word SDRC_DLLA_CTRL_V | ||
717 | pm_prepwstst_core_p: | ||
718 | .word PM_PREPWSTST_CORE_P | ||
719 | pm_pwstctrl_mpu: | ||
720 | .word PM_PWSTCTRL_MPU_P | ||
721 | scratchpad_base: | ||
722 | .word SCRATCHPAD_BASE_P | ||
723 | sram_base: | ||
724 | .word SRAM_BASE_P + 0x8000 | ||
725 | sdrc_power: | ||
726 | .word SDRC_POWER_V | ||
727 | ttbrbit_mask: | ||
728 | .word 0xFFFFC000 | ||
729 | table_index_mask: | ||
730 | .word 0xFFF00000 | ||
731 | table_entry: | ||
732 | .word 0x00000C02 | ||
733 | cache_pred_disable_mask: | ||
734 | .word 0xFFFFE7FB | ||
735 | control_stat: | ||
736 | .word CONTROL_STAT | ||
737 | control_mem_rta: | ||
738 | .word CONTROL_MEM_RTA_CTRL | ||
739 | kernel_flush: | ||
740 | .word v7_flush_dcache_all | ||
741 | l2dis_3630: | ||
742 | .word 0 | ||
743 | /* | ||
744 | * When exporting to userspace while the counters are in SRAM, | ||
745 | * these 2 words need to be at the end to facilitate retrival! | ||
746 | */ | ||
747 | kick_counter: | ||
748 | .word 0 | ||
749 | wait_dll_lock_counter: | ||
750 | .word 0 | ||
751 | ENDPROC(omap34xx_cpu_suspend) | ||
752 | |||
753 | ENTRY(omap34xx_cpu_suspend_sz) | ||
754 | .word . - omap34xx_cpu_suspend | ||
diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S index 8003037578ed..db7eeebf30d7 100644 --- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S +++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S | |||
@@ -120,8 +120,3 @@ | |||
120 | 1003: | 120 | 1003: |
121 | .endm | 121 | .endm |
122 | 122 | ||
123 | |||
124 | .macro irq_prio_table | ||
125 | .endm | ||
126 | |||
127 | |||
diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h index f15afe012995..51558bcee999 100644 --- a/arch/arm/mach-pxa/include/mach/pm.h +++ b/arch/arm/mach-pxa/include/mach/pm.h | |||
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns { | |||
22 | extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; | 22 | extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; |
23 | 23 | ||
24 | /* sleep.S */ | 24 | /* sleep.S */ |
25 | extern void pxa25x_cpu_suspend(unsigned int, long); | 25 | extern int pxa25x_finish_suspend(unsigned long); |
26 | extern void pxa27x_cpu_suspend(unsigned int, long); | 26 | extern int pxa27x_finish_suspend(unsigned long); |
27 | 27 | ||
28 | extern int pxa_pm_enter(suspend_state_t state); | 28 | extern int pxa_pm_enter(suspend_state_t state); |
29 | extern int pxa_pm_prepare(void); | 29 | extern int pxa_pm_prepare(void); |
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c index 87ae3129f4f7..b27544bcafcb 100644 --- a/arch/arm/mach-pxa/mfp-pxa2xx.c +++ b/arch/arm/mach-pxa/mfp-pxa2xx.c | |||
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void) | |||
347 | if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && | 347 | if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && |
348 | (GPDR(i) & GPIO_bit(i))) { | 348 | (GPDR(i) & GPIO_bit(i))) { |
349 | if (GPLR(i) & GPIO_bit(i)) | 349 | if (GPLR(i) & GPIO_bit(i)) |
350 | PGSR(i) |= GPIO_bit(i); | 350 | PGSR(gpio_to_bank(i)) |= GPIO_bit(i); |
351 | else | 351 | else |
352 | PGSR(i) &= ~GPIO_bit(i); | 352 | PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i); |
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index 65f24f0b77e8..5a5329bc33f1 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/i2c-gpio.h> | 33 | #include <linux/i2c-gpio.h> |
34 | 34 | ||
35 | #include <asm/mach-types.h> | 35 | #include <asm/mach-types.h> |
36 | #include <asm/suspend.h> | ||
36 | #include <asm/mach/arch.h> | 37 | #include <asm/mach/arch.h> |
37 | #include <asm/mach/map.h> | 38 | #include <asm/mach/map.h> |
38 | 39 | ||
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c index 51e1583265b2..37178a8559b1 100644 --- a/arch/arm/mach-pxa/pm.c +++ b/arch/arm/mach-pxa/pm.c | |||
@@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state) | |||
42 | 42 | ||
43 | /* *** go zzz *** */ | 43 | /* *** go zzz *** */ |
44 | pxa_cpu_pm_fns->enter(state); | 44 | pxa_cpu_pm_fns->enter(state); |
45 | cpu_init(); | ||
46 | 45 | ||
47 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { | 46 | if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { |
48 | /* after sleeping, validate the checksum */ | 47 | /* after sleeping, validate the checksum */ |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fed363cec9c6..9c434d21a271 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | 26 | ||
27 | #include <asm/mach/map.h> | 27 | #include <asm/mach/map.h> |
28 | #include <asm/suspend.h> | ||
28 | #include <mach/hardware.h> | 29 | #include <mach/hardware.h> |
29 | #include <mach/irqs.h> | 30 | #include <mach/irqs.h> |
30 | #include <mach/gpio.h> | 31 | #include <mach/gpio.h> |
@@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state) | |||
244 | 245 | ||
245 | switch (state) { | 246 | switch (state) { |
246 | case PM_SUSPEND_MEM: | 247 | case PM_SUSPEND_MEM: |
247 | pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 248 | cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend); |
248 | break; | 249 | break; |
249 | } | 250 | } |
250 | } | 251 | } |
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 2fecbec58d88..9d2400b5f503 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/mach/map.h> | 24 | #include <asm/mach/map.h> |
25 | #include <mach/hardware.h> | 25 | #include <mach/hardware.h> |
26 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
27 | #include <asm/suspend.h> | ||
27 | #include <mach/irqs.h> | 28 | #include <mach/irqs.h> |
28 | #include <mach/gpio.h> | 29 | #include <mach/gpio.h> |
29 | #include <mach/pxa27x.h> | 30 | #include <mach/pxa27x.h> |
@@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save) | |||
284 | void pxa27x_cpu_pm_enter(suspend_state_t state) | 285 | void pxa27x_cpu_pm_enter(suspend_state_t state) |
285 | { | 286 | { |
286 | extern void pxa_cpu_standby(void); | 287 | extern void pxa_cpu_standby(void); |
288 | #ifndef CONFIG_IWMMXT | ||
289 | u64 acc0; | ||
290 | |||
291 | asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); | ||
292 | #endif | ||
287 | 293 | ||
288 | /* ensure voltage-change sequencer not initiated, which hangs */ | 294 | /* ensure voltage-change sequencer not initiated, which hangs */ |
289 | PCFR &= ~PCFR_FVC; | 295 | PCFR &= ~PCFR_FVC; |
@@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state) | |||
299 | pxa_cpu_standby(); | 305 | pxa_cpu_standby(); |
300 | break; | 306 | break; |
301 | case PM_SUSPEND_MEM: | 307 | case PM_SUSPEND_MEM: |
302 | pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 308 | cpu_suspend(pwrmode, pxa27x_finish_suspend); |
309 | #ifndef CONFIG_IWMMXT | ||
310 | asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); | ||
311 | #endif | ||
303 | break; | 312 | break; |
304 | } | 313 | } |
305 | } | 314 | } |
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c index 8521d7d6f1da..ef1c56a67afc 100644 --- a/arch/arm/mach-pxa/pxa3xx.c +++ b/arch/arm/mach-pxa/pxa3xx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/i2c/pxa-i2c.h> | 24 | #include <linux/i2c/pxa-i2c.h> |
25 | 25 | ||
26 | #include <asm/mach/map.h> | 26 | #include <asm/mach/map.h> |
27 | #include <asm/suspend.h> | ||
27 | #include <mach/hardware.h> | 28 | #include <mach/hardware.h> |
28 | #include <mach/gpio.h> | 29 | #include <mach/gpio.h> |
29 | #include <mach/pxa3xx-regs.h> | 30 | #include <mach/pxa3xx-regs.h> |
@@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void) | |||
141 | { | 142 | { |
142 | volatile unsigned long *p = (volatile void *)0xc0000000; | 143 | volatile unsigned long *p = (volatile void *)0xc0000000; |
143 | unsigned long saved_data = *p; | 144 | unsigned long saved_data = *p; |
145 | #ifndef CONFIG_IWMMXT | ||
146 | u64 acc0; | ||
144 | 147 | ||
145 | extern void pxa3xx_cpu_suspend(long); | 148 | asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0)); |
149 | #endif | ||
150 | |||
151 | extern int pxa3xx_finish_suspend(unsigned long); | ||
146 | 152 | ||
147 | /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ | 153 | /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ |
148 | CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); | 154 | CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); |
@@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void) | |||
162 | /* overwrite with the resume address */ | 168 | /* overwrite with the resume address */ |
163 | *p = virt_to_phys(cpu_resume); | 169 | *p = virt_to_phys(cpu_resume); |
164 | 170 | ||
165 | pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); | 171 | cpu_suspend(0, pxa3xx_finish_suspend); |
166 | 172 | ||
167 | *p = saved_data; | 173 | *p = saved_data; |
168 | 174 | ||
169 | AD3ER = 0; | 175 | AD3ER = 0; |
176 | |||
177 | #ifndef CONFIG_IWMMXT | ||
178 | asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0)); | ||
179 | #endif | ||
170 | } | 180 | } |
171 | 181 | ||
172 | static void pxa3xx_cpu_pm_enter(suspend_state_t state) | 182 | static void pxa3xx_cpu_pm_enter(suspend_state_t state) |
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c index d130f77b6d11..2f37d43f51b6 100644 --- a/arch/arm/mach-pxa/raumfeld.c +++ b/arch/arm/mach-pxa/raumfeld.c | |||
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = { | |||
573 | .xres = 480, | 573 | .xres = 480, |
574 | .yres = 272, | 574 | .yres = 272, |
575 | .bpp = 16, | 575 | .bpp = 16, |
576 | .hsync_len = 4, | 576 | .hsync_len = 41, |
577 | .left_margin = 2, | 577 | .left_margin = 2, |
578 | .right_margin = 1, | 578 | .right_margin = 1, |
579 | .vsync_len = 1, | 579 | .vsync_len = 10, |
580 | .upper_margin = 3, | 580 | .upper_margin = 3, |
581 | .lower_margin = 1, | 581 | .lower_margin = 1, |
582 | .sync = 0, | 582 | .sync = 0, |
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void) | |||
596 | { | 596 | { |
597 | int ret; | 597 | int ret; |
598 | 598 | ||
599 | pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); | ||
600 | |||
601 | /* Earlier devices had the backlight regulator controlled | ||
602 | * via PWM, later versions use another controller for that */ | ||
603 | if ((system_rev & 0xff) < 2) { | ||
604 | mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; | ||
605 | pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); | ||
606 | platform_device_register(&raumfeld_pwm_backlight_device); | ||
607 | } else | ||
608 | platform_device_register(&raumfeld_lt3593_device); | ||
609 | |||
610 | ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); | 599 | ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); |
611 | if (ret < 0) | 600 | if (ret < 0) |
612 | pr_warning("Unable to request GPIO_TFT_VA_EN\n"); | 601 | pr_warning("Unable to request GPIO_TFT_VA_EN\n"); |
613 | else | 602 | else |
614 | gpio_direction_output(GPIO_TFT_VA_EN, 1); | 603 | gpio_direction_output(GPIO_TFT_VA_EN, 1); |
615 | 604 | ||
605 | msleep(100); | ||
606 | |||
616 | ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); | 607 | ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); |
617 | if (ret < 0) | 608 | if (ret < 0) |
618 | pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); | 609 | pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); |
619 | else | 610 | else |
620 | gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); | 611 | gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); |
621 | 612 | ||
613 | /* Hardware revision 2 has the backlight regulator controlled | ||
614 | * by an LT3593, earlier and later devices use PWM for that. */ | ||
615 | if ((system_rev & 0xff) == 2) { | ||
616 | platform_device_register(&raumfeld_lt3593_device); | ||
617 | } else { | ||
618 | mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT; | ||
619 | pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1); | ||
620 | platform_device_register(&raumfeld_pwm_backlight_device); | ||
621 | } | ||
622 | |||
623 | pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info); | ||
622 | platform_device_register(&pxa3xx_device_gcu); | 624 | platform_device_register(&pxa3xx_device_gcu); |
623 | } | 625 | } |
624 | 626 | ||
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = { | |||
657 | 659 | ||
658 | #define SPI_AK4104 \ | 660 | #define SPI_AK4104 \ |
659 | { \ | 661 | { \ |
660 | .modalias = "ak4104", \ | 662 | .modalias = "ak4104-codec", \ |
661 | .max_speed_hz = 10000, \ | 663 | .max_speed_hz = 10000, \ |
662 | .bus_num = 0, \ | 664 | .bus_num = 0, \ |
663 | .chip_select = 0, \ | 665 | .chip_select = 0, \ |
664 | .controller_data = (void *) GPIO_SPDIF_CS, \ | 666 | .controller_data = (void *) GPIO_SPDIF_CS, \ |
665 | } | 667 | } |
666 | 668 | ||
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S index 6f5368899d84..1e544be9905d 100644 --- a/arch/arm/mach-pxa/sleep.S +++ b/arch/arm/mach-pxa/sleep.S | |||
@@ -24,20 +24,9 @@ | |||
24 | 24 | ||
25 | #ifdef CONFIG_PXA3xx | 25 | #ifdef CONFIG_PXA3xx |
26 | /* | 26 | /* |
27 | * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4) | 27 | * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4) |
28 | * | ||
29 | * r0 = v:p offset | ||
30 | */ | 28 | */ |
31 | ENTRY(pxa3xx_cpu_suspend) | 29 | ENTRY(pxa3xx_finish_suspend) |
32 | |||
33 | #ifndef CONFIG_IWMMXT | ||
34 | mra r2, r3, acc0 | ||
35 | #endif | ||
36 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
37 | mov r1, r0 | ||
38 | ldr r3, =pxa_cpu_resume @ resume function | ||
39 | bl cpu_suspend | ||
40 | |||
41 | mov r0, #0x06 @ S2D3C4 mode | 30 | mov r0, #0x06 @ S2D3C4 mode |
42 | mcr p14, 0, r0, c7, c0, 0 @ enter sleep | 31 | mcr p14, 0, r0, c7, c0, 0 @ enter sleep |
43 | 32 | ||
@@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend) | |||
46 | 35 | ||
47 | #ifdef CONFIG_PXA27x | 36 | #ifdef CONFIG_PXA27x |
48 | /* | 37 | /* |
49 | * pxa27x_cpu_suspend() | 38 | * pxa27x_finish_suspend() |
50 | * | 39 | * |
51 | * Forces CPU into sleep state. | 40 | * Forces CPU into sleep state. |
52 | * | 41 | * |
53 | * r0 = value for PWRMODE M field for desired sleep state | 42 | * r0 = value for PWRMODE M field for desired sleep state |
54 | * r1 = v:p offset | ||
55 | */ | 43 | */ |
56 | ENTRY(pxa27x_cpu_suspend) | 44 | ENTRY(pxa27x_finish_suspend) |
57 | |||
58 | #ifndef CONFIG_IWMMXT | ||
59 | mra r2, r3, acc0 | ||
60 | #endif | ||
61 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
62 | mov r4, r0 @ save sleep mode | ||
63 | ldr r3, =pxa_cpu_resume @ resume function | ||
64 | bl cpu_suspend | ||
65 | |||
66 | @ Put the processor to sleep | 45 | @ Put the processor to sleep |
67 | @ (also workaround for sighting 28071) | 46 | @ (also workaround for sighting 28071) |
68 | 47 | ||
69 | @ prepare value for sleep mode | 48 | @ prepare value for sleep mode |
70 | mov r1, r4 @ sleep mode | 49 | mov r1, r0 @ sleep mode |
71 | 50 | ||
72 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) | 51 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) |
73 | mov r2, #UNCACHED_PHYS_0 | 52 | mov r2, #UNCACHED_PHYS_0 |
@@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend) | |||
99 | 78 | ||
100 | #ifdef CONFIG_PXA25x | 79 | #ifdef CONFIG_PXA25x |
101 | /* | 80 | /* |
102 | * pxa25x_cpu_suspend() | 81 | * pxa25x_finish_suspend() |
103 | * | 82 | * |
104 | * Forces CPU into sleep state. | 83 | * Forces CPU into sleep state. |
105 | * | 84 | * |
106 | * r0 = value for PWRMODE M field for desired sleep state | 85 | * r0 = value for PWRMODE M field for desired sleep state |
107 | * r1 = v:p offset | ||
108 | */ | 86 | */ |
109 | 87 | ||
110 | ENTRY(pxa25x_cpu_suspend) | 88 | ENTRY(pxa25x_finish_suspend) |
111 | stmfd sp!, {r2 - r12, lr} @ save registers on stack | ||
112 | mov r4, r0 @ save sleep mode | ||
113 | ldr r3, =pxa_cpu_resume @ resume function | ||
114 | bl cpu_suspend | ||
115 | @ prepare value for sleep mode | 89 | @ prepare value for sleep mode |
116 | mov r1, r4 @ sleep mode | 90 | mov r1, r0 @ sleep mode |
117 | 91 | ||
118 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) | 92 | @ prepare pointer to physical address 0 (virtual mapping in generic.c) |
119 | mov r2, #UNCACHED_PHYS_0 | 93 | mov r2, #UNCACHED_PHYS_0 |
@@ -195,16 +169,3 @@ pxa_cpu_do_suspend: | |||
195 | mcr p14, 0, r1, c7, c0, 0 @ PWRMODE | 169 | mcr p14, 0, r1, c7, c0, 0 @ PWRMODE |
196 | 170 | ||
197 | 20: b 20b @ loop waiting for sleep | 171 | 20: b 20b @ loop waiting for sleep |
198 | |||
199 | /* | ||
200 | * pxa_cpu_resume() | ||
201 | * | ||
202 | * entry point from bootloader into kernel during resume | ||
203 | */ | ||
204 | .align 5 | ||
205 | pxa_cpu_resume: | ||
206 | ldmfd sp!, {r2, r3} | ||
207 | #ifndef CONFIG_IWMMXT | ||
208 | mar acc0, r2, r3 | ||
209 | #endif | ||
210 | ldmfd sp!, {r4 - r12, pc} @ return to caller | ||
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c index 7fe74067d85f..094279aefe9c 100644 --- a/arch/arm/mach-pxa/spitz_pm.c +++ b/arch/arm/mach-pxa/spitz_pm.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/gpio.h> | ||
17 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
18 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
19 | #include <linux/apm-emulation.h> | 20 | #include <linux/apm-emulation.h> |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 00363c7ac182..9b99cc164de5 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/can/platform/mcp251x.h> | 31 | #include <linux/can/platform/mcp251x.h> |
32 | 32 | ||
33 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
34 | #include <asm/suspend.h> | ||
34 | #include <asm/mach/arch.h> | 35 | #include <asm/mach/arch.h> |
35 | #include <asm/mach/map.h> | 36 | #include <asm/mach/map.h> |
36 | 37 | ||
@@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = { | |||
676 | static void zeus_power_off(void) | 677 | static void zeus_power_off(void) |
677 | { | 678 | { |
678 | local_irq_disable(); | 679 | local_irq_disable(); |
679 | pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 680 | cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend); |
680 | } | 681 | } |
681 | #else | 682 | #else |
682 | #define zeus_power_off NULL | 683 | #define zeus_power_off NULL |
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index b9a9805e4828..dba6d0c1fc17 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig | |||
@@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176 | |||
50 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" | 50 | bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" |
51 | select CPU_V6 | 51 | select CPU_V6 |
52 | select ARM_GIC | 52 | select ARM_GIC |
53 | select HAVE_TCM | ||
53 | help | 54 | help |
54 | Include support for the ARM(R) RealView(R) Platform Baseboard for | 55 | Include support for the ARM(R) RealView(R) Platform Baseboard for |
55 | ARM1176JZF-S. | 56 | ARM1176JZF-S. |
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c index 963bf0d8119a..4ae943bafa92 100644 --- a/arch/arm/mach-realview/platsmp.c +++ b/arch/arm/mach-realview/platsmp.c | |||
@@ -68,14 +68,6 @@ void __init smp_init_cpus(void) | |||
68 | 68 | ||
69 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 69 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
70 | { | 70 | { |
71 | int i; | ||
72 | |||
73 | /* | ||
74 | * Initialise the present map, which describes the set of CPUs | ||
75 | * actually populated at the present time. | ||
76 | */ | ||
77 | for (i = 0; i < max_cpus; i++) | ||
78 | set_cpu_present(i, true); | ||
79 | 71 | ||
80 | scu_enable(scu_base_addr()); | 72 | scu_enable(scu_base_addr()); |
81 | 73 | ||
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c index 752b13a7b3db..f4077efa51fa 100644 --- a/arch/arm/mach-s3c2412/pm.c +++ b/arch/arm/mach-s3c2412/pm.c | |||
@@ -37,12 +37,10 @@ | |||
37 | 37 | ||
38 | extern void s3c2412_sleep_enter(void); | 38 | extern void s3c2412_sleep_enter(void); |
39 | 39 | ||
40 | static void s3c2412_cpu_suspend(void) | 40 | static int s3c2412_cpu_suspend(unsigned long arg) |
41 | { | 41 | { |
42 | unsigned long tmp; | 42 | unsigned long tmp; |
43 | 43 | ||
44 | flush_cache_all(); | ||
45 | |||
46 | /* set our standby method to sleep */ | 44 | /* set our standby method to sleep */ |
47 | 45 | ||
48 | tmp = __raw_readl(S3C2412_PWRCFG); | 46 | tmp = __raw_readl(S3C2412_PWRCFG); |
@@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void) | |||
50 | __raw_writel(tmp, S3C2412_PWRCFG); | 48 | __raw_writel(tmp, S3C2412_PWRCFG); |
51 | 49 | ||
52 | s3c2412_sleep_enter(); | 50 | s3c2412_sleep_enter(); |
51 | |||
52 | panic("sleep resumed to originator?"); | ||
53 | } | 53 | } |
54 | 54 | ||
55 | static void s3c2412_pm_prepare(void) | 55 | static void s3c2412_pm_prepare(void) |
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c index 41db2b21e213..9ec54f1d8e75 100644 --- a/arch/arm/mach-s3c2416/pm.c +++ b/arch/arm/mach-s3c2416/pm.c | |||
@@ -24,10 +24,8 @@ | |||
24 | 24 | ||
25 | extern void s3c2412_sleep_enter(void); | 25 | extern void s3c2412_sleep_enter(void); |
26 | 26 | ||
27 | static void s3c2416_cpu_suspend(void) | 27 | static int s3c2416_cpu_suspend(unsigned long arg) |
28 | { | 28 | { |
29 | flush_cache_all(); | ||
30 | |||
31 | /* enable wakeup sources regardless of battery state */ | 29 | /* enable wakeup sources regardless of battery state */ |
32 | __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); | 30 | __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); |
33 | 31 | ||
@@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void) | |||
35 | __raw_writel(0x2BED, S3C2443_PWRMODE); | 33 | __raw_writel(0x2BED, S3C2443_PWRMODE); |
36 | 34 | ||
37 | s3c2412_sleep_enter(); | 35 | s3c2412_sleep_enter(); |
36 | |||
37 | panic("sleep resumed to originator?"); | ||
38 | } | 38 | } |
39 | 39 | ||
40 | static void s3c2416_pm_prepare(void) | 40 | static void s3c2416_pm_prepare(void) |
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c index dd3120df09fe..fc2dc0b3d4fe 100644 --- a/arch/arm/mach-s3c2440/mach-mini2440.c +++ b/arch/arm/mach-s3c2440/mach-mini2440.c | |||
@@ -552,7 +552,7 @@ struct mini2440_features_t { | |||
552 | struct platform_device *optional[8]; | 552 | struct platform_device *optional[8]; |
553 | }; | 553 | }; |
554 | 554 | ||
555 | static void mini2440_parse_features( | 555 | static void __init mini2440_parse_features( |
556 | struct mini2440_features_t * features, | 556 | struct mini2440_features_t * features, |
557 | const char * features_str ) | 557 | const char * features_str ) |
558 | { | 558 | { |
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c index 82db072cb836..5e6b42089eb4 100644 --- a/arch/arm/mach-s3c64xx/dev-spi.c +++ b/arch/arm/mach-s3c64xx/dev-spi.c | |||
@@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = { | |||
88 | .cfg_gpio = s3c64xx_spi_cfg_gpio, | 88 | .cfg_gpio = s3c64xx_spi_cfg_gpio, |
89 | .fifo_lvl_mask = 0x7f, | 89 | .fifo_lvl_mask = 0x7f, |
90 | .rx_lvl_offset = 13, | 90 | .rx_lvl_offset = 13, |
91 | .tx_st_done = 21, | ||
91 | }; | 92 | }; |
92 | 93 | ||
93 | static u64 spi_dmamask = DMA_BIT_MASK(32); | 94 | static u64 spi_dmamask = DMA_BIT_MASK(32); |
@@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = { | |||
132 | .cfg_gpio = s3c64xx_spi_cfg_gpio, | 133 | .cfg_gpio = s3c64xx_spi_cfg_gpio, |
133 | .fifo_lvl_mask = 0x7f, | 134 | .fifo_lvl_mask = 0x7f, |
134 | .rx_lvl_offset = 13, | 135 | .rx_lvl_offset = 13, |
136 | .tx_st_done = 21, | ||
135 | }; | 137 | }; |
136 | 138 | ||
137 | struct platform_device s3c64xx_device_spi1 = { | 139 | struct platform_device s3c64xx_device_spi1 = { |
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index b197171e7d03..204bfafe4bfc 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c | |||
@@ -113,7 +113,7 @@ found: | |||
113 | return chan; | 113 | return chan; |
114 | } | 114 | } |
115 | 115 | ||
116 | int s3c2410_dma_config(unsigned int channel, int xferunit) | 116 | int s3c2410_dma_config(enum dma_ch channel, int xferunit) |
117 | { | 117 | { |
118 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 118 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
119 | 119 | ||
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | 299 | ||
300 | int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) | 300 | int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) |
301 | { | 301 | { |
302 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 302 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
303 | 303 | ||
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); | |||
331 | * | 331 | * |
332 | */ | 332 | */ |
333 | 333 | ||
334 | int s3c2410_dma_enqueue(unsigned int channel, void *id, | 334 | int s3c2410_dma_enqueue(enum dma_ch channel, void *id, |
335 | dma_addr_t data, int size) | 335 | dma_addr_t data, int size) |
336 | { | 336 | { |
337 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 337 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
@@ -415,7 +415,7 @@ err_buff: | |||
415 | EXPORT_SYMBOL(s3c2410_dma_enqueue); | 415 | EXPORT_SYMBOL(s3c2410_dma_enqueue); |
416 | 416 | ||
417 | 417 | ||
418 | int s3c2410_dma_devconfig(unsigned int channel, | 418 | int s3c2410_dma_devconfig(enum dma_ch channel, |
419 | enum s3c2410_dmasrc source, | 419 | enum s3c2410_dmasrc source, |
420 | unsigned long devaddr) | 420 | unsigned long devaddr) |
421 | { | 421 | { |
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel, | |||
463 | EXPORT_SYMBOL(s3c2410_dma_devconfig); | 463 | EXPORT_SYMBOL(s3c2410_dma_devconfig); |
464 | 464 | ||
465 | 465 | ||
466 | int s3c2410_dma_getposition(unsigned int channel, | 466 | int s3c2410_dma_getposition(enum dma_ch channel, |
467 | dma_addr_t *src, dma_addr_t *dst) | 467 | dma_addr_t *src, dma_addr_t *dst) |
468 | { | 468 | { |
469 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 469 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition); | |||
487 | * get control of an dma channel | 487 | * get control of an dma channel |
488 | */ | 488 | */ |
489 | 489 | ||
490 | int s3c2410_dma_request(unsigned int channel, | 490 | int s3c2410_dma_request(enum dma_ch channel, |
491 | struct s3c2410_dma_client *client, | 491 | struct s3c2410_dma_client *client, |
492 | void *dev) | 492 | void *dev) |
493 | { | 493 | { |
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); | |||
533 | * allowed to go through. | 533 | * allowed to go through. |
534 | */ | 534 | */ |
535 | 535 | ||
536 | int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) | 536 | int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) |
537 | { | 537 | { |
538 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 538 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
539 | unsigned long flags; | 539 | unsigned long flags; |
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c index bc1c470b7de6..8bad64370689 100644 --- a/arch/arm/mach-s3c64xx/pm.c +++ b/arch/arm/mach-s3c64xx/pm.c | |||
@@ -112,7 +112,7 @@ void s3c_pm_save_core(void) | |||
112 | * this. | 112 | * this. |
113 | */ | 113 | */ |
114 | 114 | ||
115 | static void s3c64xx_cpu_suspend(void) | 115 | static int s3c64xx_cpu_suspend(unsigned long arg) |
116 | { | 116 | { |
117 | unsigned long tmp; | 117 | unsigned long tmp; |
118 | 118 | ||
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S index 1f87732b2320..34313f9c8792 100644 --- a/arch/arm/mach-s3c64xx/sleep.S +++ b/arch/arm/mach-s3c64xx/sleep.S | |||
@@ -25,29 +25,6 @@ | |||
25 | 25 | ||
26 | .text | 26 | .text |
27 | 27 | ||
28 | /* s3c_cpu_save | ||
29 | * | ||
30 | * Save enough processor state to allow the restart of the pm.c | ||
31 | * code after resume. | ||
32 | * | ||
33 | * entry: | ||
34 | * r1 = v:p offset | ||
35 | */ | ||
36 | |||
37 | ENTRY(s3c_cpu_save) | ||
38 | stmfd sp!, { r4 - r12, lr } | ||
39 | ldr r3, =resume_with_mmu | ||
40 | bl cpu_suspend | ||
41 | |||
42 | @@ call final suspend code | ||
43 | ldr r0, =pm_cpu_sleep | ||
44 | ldr pc, [r0] | ||
45 | |||
46 | @@ return to the caller, after the MMU is turned on. | ||
47 | @@ restore the last bits of the stack and return. | ||
48 | resume_with_mmu: | ||
49 | ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save | ||
50 | |||
51 | /* Sleep magic, the word before the resume entry point so that the | 28 | /* Sleep magic, the word before the resume entry point so that the |
52 | * bootloader can check for a resumeable image. */ | 29 | * bootloader can check for a resumeable image. */ |
53 | 30 | ||
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c index e78ee18c76e3..ac825e826326 100644 --- a/arch/arm/mach-s5p64x0/dev-spi.c +++ b/arch/arm/mach-s5p64x0/dev-spi.c | |||
@@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = { | |||
112 | .cfg_gpio = s5p6440_spi_cfg_gpio, | 112 | .cfg_gpio = s5p6440_spi_cfg_gpio, |
113 | .fifo_lvl_mask = 0x1ff, | 113 | .fifo_lvl_mask = 0x1ff, |
114 | .rx_lvl_offset = 15, | 114 | .rx_lvl_offset = 15, |
115 | .tx_st_done = 25, | ||
115 | }; | 116 | }; |
116 | 117 | ||
117 | static struct s3c64xx_spi_info s5p6450_spi0_pdata = { | 118 | static struct s3c64xx_spi_info s5p6450_spi0_pdata = { |
118 | .cfg_gpio = s5p6450_spi_cfg_gpio, | 119 | .cfg_gpio = s5p6450_spi_cfg_gpio, |
119 | .fifo_lvl_mask = 0x1ff, | 120 | .fifo_lvl_mask = 0x1ff, |
120 | .rx_lvl_offset = 15, | 121 | .rx_lvl_offset = 15, |
122 | .tx_st_done = 25, | ||
121 | }; | 123 | }; |
122 | 124 | ||
123 | static u64 spi_dmamask = DMA_BIT_MASK(32); | 125 | static u64 spi_dmamask = DMA_BIT_MASK(32); |
@@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = { | |||
160 | .cfg_gpio = s5p6440_spi_cfg_gpio, | 162 | .cfg_gpio = s5p6440_spi_cfg_gpio, |
161 | .fifo_lvl_mask = 0x7f, | 163 | .fifo_lvl_mask = 0x7f, |
162 | .rx_lvl_offset = 15, | 164 | .rx_lvl_offset = 15, |
165 | .tx_st_done = 25, | ||
163 | }; | 166 | }; |
164 | 167 | ||
165 | static struct s3c64xx_spi_info s5p6450_spi1_pdata = { | 168 | static struct s3c64xx_spi_info s5p6450_spi1_pdata = { |
166 | .cfg_gpio = s5p6450_spi_cfg_gpio, | 169 | .cfg_gpio = s5p6450_spi_cfg_gpio, |
167 | .fifo_lvl_mask = 0x7f, | 170 | .fifo_lvl_mask = 0x7f, |
168 | .rx_lvl_offset = 15, | 171 | .rx_lvl_offset = 15, |
172 | .tx_st_done = 25, | ||
169 | }; | 173 | }; |
170 | 174 | ||
171 | struct platform_device s5p64x0_device_spi1 = { | 175 | struct platform_device s5p64x0_device_spi1 = { |
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c index 57b19794d9bb..e5d6c4dceb56 100644 --- a/arch/arm/mach-s5pc100/dev-spi.c +++ b/arch/arm/mach-s5pc100/dev-spi.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <mach/dma.h> | 15 | #include <mach/dma.h> |
16 | #include <mach/map.h> | 16 | #include <mach/map.h> |
17 | #include <mach/spi-clocks.h> | 17 | #include <mach/spi-clocks.h> |
18 | #include <mach/irqs.h> | ||
18 | 19 | ||
19 | #include <plat/s3c64xx-spi.h> | 20 | #include <plat/s3c64xx-spi.h> |
20 | #include <plat/gpio-cfg.h> | 21 | #include <plat/gpio-cfg.h> |
@@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = { | |||
90 | .fifo_lvl_mask = 0x7f, | 91 | .fifo_lvl_mask = 0x7f, |
91 | .rx_lvl_offset = 13, | 92 | .rx_lvl_offset = 13, |
92 | .high_speed = 1, | 93 | .high_speed = 1, |
94 | .tx_st_done = 21, | ||
93 | }; | 95 | }; |
94 | 96 | ||
95 | static u64 spi_dmamask = DMA_BIT_MASK(32); | 97 | static u64 spi_dmamask = DMA_BIT_MASK(32); |
@@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = { | |||
134 | .fifo_lvl_mask = 0x7f, | 136 | .fifo_lvl_mask = 0x7f, |
135 | .rx_lvl_offset = 13, | 137 | .rx_lvl_offset = 13, |
136 | .high_speed = 1, | 138 | .high_speed = 1, |
139 | .tx_st_done = 21, | ||
137 | }; | 140 | }; |
138 | 141 | ||
139 | struct platform_device s5pc100_device_spi1 = { | 142 | struct platform_device s5pc100_device_spi1 = { |
@@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = { | |||
176 | .fifo_lvl_mask = 0x7f, | 179 | .fifo_lvl_mask = 0x7f, |
177 | .rx_lvl_offset = 13, | 180 | .rx_lvl_offset = 13, |
178 | .high_speed = 1, | 181 | .high_speed = 1, |
182 | .tx_st_done = 21, | ||
179 | }; | 183 | }; |
180 | 184 | ||
181 | struct platform_device s5pc100_device_spi2 = { | 185 | struct platform_device s5pc100_device_spi2 = { |
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c index e3249a47e3b1..eaf9a7bff7a0 100644 --- a/arch/arm/mach-s5pv210/dev-spi.c +++ b/arch/arm/mach-s5pv210/dev-spi.c | |||
@@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = { | |||
85 | .fifo_lvl_mask = 0x1ff, | 85 | .fifo_lvl_mask = 0x1ff, |
86 | .rx_lvl_offset = 15, | 86 | .rx_lvl_offset = 15, |
87 | .high_speed = 1, | 87 | .high_speed = 1, |
88 | .tx_st_done = 25, | ||
88 | }; | 89 | }; |
89 | 90 | ||
90 | static u64 spi_dmamask = DMA_BIT_MASK(32); | 91 | static u64 spi_dmamask = DMA_BIT_MASK(32); |
@@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = { | |||
129 | .fifo_lvl_mask = 0x7f, | 130 | .fifo_lvl_mask = 0x7f, |
130 | .rx_lvl_offset = 15, | 131 | .rx_lvl_offset = 15, |
131 | .high_speed = 1, | 132 | .high_speed = 1, |
133 | .tx_st_done = 25, | ||
132 | }; | 134 | }; |
133 | 135 | ||
134 | struct platform_device s5pv210_device_spi1 = { | 136 | struct platform_device s5pv210_device_spi1 = { |
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c index 24febae3d4c0..309e388a8a83 100644 --- a/arch/arm/mach-s5pv210/pm.c +++ b/arch/arm/mach-s5pv210/pm.c | |||
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = { | |||
88 | SAVE_ITEM(S3C2410_TCNTO(0)), | 88 | SAVE_ITEM(S3C2410_TCNTO(0)), |
89 | }; | 89 | }; |
90 | 90 | ||
91 | void s5pv210_cpu_suspend(void) | 91 | void s5pv210_cpu_suspend(unsigned long arg) |
92 | { | 92 | { |
93 | unsigned long tmp; | 93 | unsigned long tmp; |
94 | 94 | ||
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S index a3d649466fb1..e3452ccd4b08 100644 --- a/arch/arm/mach-s5pv210/sleep.S +++ b/arch/arm/mach-s5pv210/sleep.S | |||
@@ -32,27 +32,6 @@ | |||
32 | 32 | ||
33 | .text | 33 | .text |
34 | 34 | ||
35 | /* s3c_cpu_save | ||
36 | * | ||
37 | * entry: | ||
38 | * r1 = v:p offset | ||
39 | */ | ||
40 | |||
41 | ENTRY(s3c_cpu_save) | ||
42 | |||
43 | stmfd sp!, { r3 - r12, lr } | ||
44 | ldr r3, =resume_with_mmu | ||
45 | bl cpu_suspend | ||
46 | |||
47 | ldr r0, =pm_cpu_sleep | ||
48 | ldr r0, [ r0 ] | ||
49 | mov pc, r0 | ||
50 | |||
51 | resume_with_mmu: | ||
52 | ldmfd sp!, { r3 - r12, pc } | ||
53 | |||
54 | .ltorg | ||
55 | |||
56 | /* sleep magic, to allow the bootloader to check for an valid | 35 | /* sleep magic, to allow the bootloader to check for an valid |
57 | * image to resume to. Must be the first word before the | 36 | * image to resume to. Must be the first word before the |
58 | * s3c_cpu_resume entry. | 37 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c index c4661aab22fb..bf85b8b259d5 100644 --- a/arch/arm/mach-sa1100/pm.c +++ b/arch/arm/mach-sa1100/pm.c | |||
@@ -29,10 +29,11 @@ | |||
29 | 29 | ||
30 | #include <mach/hardware.h> | 30 | #include <mach/hardware.h> |
31 | #include <asm/memory.h> | 31 | #include <asm/memory.h> |
32 | #include <asm/suspend.h> | ||
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
33 | #include <asm/mach/time.h> | 34 | #include <asm/mach/time.h> |
34 | 35 | ||
35 | extern void sa1100_cpu_suspend(long); | 36 | extern int sa1100_finish_suspend(unsigned long); |
36 | 37 | ||
37 | #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x | 38 | #define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x |
38 | #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] | 39 | #define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] |
@@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state) | |||
75 | PSPR = virt_to_phys(cpu_resume); | 76 | PSPR = virt_to_phys(cpu_resume); |
76 | 77 | ||
77 | /* go zzz */ | 78 | /* go zzz */ |
78 | sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); | 79 | cpu_suspend(0, sa1100_finish_suspend); |
79 | |||
80 | cpu_init(); | ||
81 | 80 | ||
82 | /* | 81 | /* |
83 | * Ensure not to come back here if it wasn't intended | 82 | * Ensure not to come back here if it wasn't intended |
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S index 04f2a618d4ef..e8223315b442 100644 --- a/arch/arm/mach-sa1100/sleep.S +++ b/arch/arm/mach-sa1100/sleep.S | |||
@@ -22,18 +22,13 @@ | |||
22 | 22 | ||
23 | .text | 23 | .text |
24 | /* | 24 | /* |
25 | * sa1100_cpu_suspend() | 25 | * sa1100_finish_suspend() |
26 | * | 26 | * |
27 | * Causes sa11x0 to enter sleep state | 27 | * Causes sa11x0 to enter sleep state |
28 | * | 28 | * |
29 | */ | 29 | */ |
30 | 30 | ||
31 | ENTRY(sa1100_cpu_suspend) | 31 | ENTRY(sa1100_finish_suspend) |
32 | stmfd sp!, {r4 - r12, lr} @ save registers on stack | ||
33 | mov r1, r0 | ||
34 | ldr r3, =sa1100_cpu_resume @ return function | ||
35 | bl cpu_suspend | ||
36 | |||
37 | @ disable clock switching | 32 | @ disable clock switching |
38 | mcr p15, 0, r1, c15, c2, 2 | 33 | mcr p15, 0, r1, c15, c2, 2 |
39 | 34 | ||
@@ -139,13 +134,3 @@ sa1110_sdram_controller_fix: | |||
139 | str r13, [r12] | 134 | str r13, [r12] |
140 | 135 | ||
141 | 20: b 20b @ loop waiting for sleep | 136 | 20: b 20b @ loop waiting for sleep |
142 | |||
143 | /* | ||
144 | * cpu_sa1100_resume() | ||
145 | * | ||
146 | * entry point from bootloader into kernel during resume | ||
147 | */ | ||
148 | .align 5 | ||
149 | sa1100_cpu_resume: | ||
150 | mcr p15, 0, r1, c15, c1, 2 @ enable clock switching | ||
151 | ldmfd sp!, {r4 - r12, pc} @ return to caller | ||
diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S index e2853c0a3333..0bb6cc626eb7 100644 --- a/arch/arm/mach-shark/include/mach/entry-macro.S +++ b/arch/arm/mach-shark/include/mach/entry-macro.S | |||
@@ -11,17 +11,17 @@ | |||
11 | .endm | 11 | .endm |
12 | 12 | ||
13 | .macro get_irqnr_preamble, base, tmp | 13 | .macro get_irqnr_preamble, base, tmp |
14 | mov \base, #0xe0000000 | ||
14 | .endm | 15 | .endm |
15 | 16 | ||
16 | .macro arch_ret_to_user, tmp1, tmp2 | 17 | .macro arch_ret_to_user, tmp1, tmp2 |
17 | .endm | 18 | .endm |
18 | 19 | ||
19 | .macro get_irqnr_and_base, irqnr, irqstat, base, tmp | 20 | .macro get_irqnr_and_base, irqnr, irqstat, base, tmp |
20 | mov r4, #0xe0000000 | ||
21 | 21 | ||
22 | mov \irqstat, #0x0C | 22 | mov \irqstat, #0x0C |
23 | strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ | 23 | strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */ |
24 | ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 | 24 | ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7 |
25 | and \irqstat, \irqnr, #0x80 | 25 | and \irqstat, \irqnr, #0x80 |
26 | teq \irqstat, #0 | 26 | teq \irqstat, #0 |
27 | beq 43f | 27 | beq 43f |
@@ -29,8 +29,8 @@ | |||
29 | teq \irqnr, #2 | 29 | teq \irqnr, #2 |
30 | bne 44f | 30 | bne 44f |
31 | 43: mov \irqstat, #0x0C | 31 | 43: mov \irqstat, #0x0C |
32 | strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ | 32 | strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ |
33 | ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 | 33 | ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8 |
34 | and \irqstat, \irqnr, #0x80 | 34 | and \irqstat, \irqnr, #0x80 |
35 | teq \irqstat, #0 | 35 | teq \irqstat, #0 |
36 | beq 44f | 36 | beq 44f |
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 1e2aba23e0d6..ce5c2513c6ce 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c | |||
@@ -381,7 +381,7 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state) | |||
381 | gpio_set_value(GPIO_PORT114, state); | 381 | gpio_set_value(GPIO_PORT114, state); |
382 | } | 382 | } |
383 | 383 | ||
384 | static struct sh_mobile_sdhi_info sh_sdhi1_platdata = { | 384 | static struct sh_mobile_sdhi_info sh_sdhi1_info = { |
385 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, | 385 | .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, |
386 | .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, | 386 | .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ, |
387 | .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, | 387 | .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, |
@@ -413,7 +413,7 @@ static struct platform_device sdhi1_device = { | |||
413 | .name = "sh_mobile_sdhi", | 413 | .name = "sh_mobile_sdhi", |
414 | .id = 1, | 414 | .id = 1, |
415 | .dev = { | 415 | .dev = { |
416 | .platform_data = &sh_sdhi1_platdata, | 416 | .platform_data = &sh_sdhi1_info, |
417 | }, | 417 | }, |
418 | .num_resources = ARRAY_SIZE(sdhi1_resources), | 418 | .num_resources = ARRAY_SIZE(sdhi1_resources), |
419 | .resource = sdhi1_resources, | 419 | .resource = sdhi1_resources, |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index f6b687f61c28..803bc6edfca4 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -913,7 +913,7 @@ static struct i2c_board_info imx074_info = { | |||
913 | I2C_BOARD_INFO("imx074", 0x1a), | 913 | I2C_BOARD_INFO("imx074", 0x1a), |
914 | }; | 914 | }; |
915 | 915 | ||
916 | struct soc_camera_link imx074_link = { | 916 | static struct soc_camera_link imx074_link = { |
917 | .bus_id = 0, | 917 | .bus_id = 0, |
918 | .board_info = &imx074_info, | 918 | .board_info = &imx074_info, |
919 | .i2c_adapter_id = 0, | 919 | .i2c_adapter_id = 0, |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 7e1d37584321..3802f2afabef 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -1287,9 +1287,9 @@ static struct platform_device *mackerel_devices[] __initdata = { | |||
1287 | &nor_flash_device, | 1287 | &nor_flash_device, |
1288 | &smc911x_device, | 1288 | &smc911x_device, |
1289 | &lcdc_device, | 1289 | &lcdc_device, |
1290 | &usbhs0_device, | ||
1291 | &usb1_host_device, | 1290 | &usb1_host_device, |
1292 | &usbhs1_device, | 1291 | &usbhs1_device, |
1292 | &usbhs0_device, | ||
1293 | &leds_device, | 1293 | &leds_device, |
1294 | &fsi_device, | 1294 | &fsi_device, |
1295 | &fsi_ak4643_device, | 1295 | &fsi_ak4643_device, |
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h new file mode 100644 index 000000000000..4a81b01f1e8f --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h | |||
@@ -0,0 +1,21 @@ | |||
1 | #ifndef SDHI_SH7372_H | ||
2 | #define SDHI_SH7372_H | ||
3 | |||
4 | #define SDGENCNTA 0xfe40009c | ||
5 | |||
6 | /* The countdown of SDGENCNTA is controlled by | ||
7 | * ZB3D2CLK which runs at 149.5MHz. | ||
8 | * That is 149.5ticks/us. Approximate this as 150ticks/us. | ||
9 | */ | ||
10 | static void udelay(int us) | ||
11 | { | ||
12 | __raw_writel(us * 150, SDGENCNTA); | ||
13 | while(__raw_readl(SDGENCNTA)) ; | ||
14 | } | ||
15 | |||
16 | static void msleep(int ms) | ||
17 | { | ||
18 | udelay(ms * 1000); | ||
19 | } | ||
20 | |||
21 | #endif | ||
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h new file mode 100644 index 000000000000..0ec9e69f2c3b --- /dev/null +++ b/arch/arm/mach-shmobile/include/mach/sdhi.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef SDHI_H | ||
2 | #define SDHI_H | ||
3 | |||
4 | /************************************************** | ||
5 | * | ||
6 | * CPU specific settings | ||
7 | * | ||
8 | **************************************************/ | ||
9 | |||
10 | #ifdef CONFIG_ARCH_SH7372 | ||
11 | #include "mach/sdhi-sh7372.h" | ||
12 | #else | ||
13 | #error "unsupported CPU." | ||
14 | #endif | ||
15 | |||
16 | #endif /* SDHI_H */ | ||
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c index f3888feb1c68..66f980625a33 100644 --- a/arch/arm/mach-shmobile/platsmp.c +++ b/arch/arm/mach-shmobile/platsmp.c | |||
@@ -64,10 +64,5 @@ void __init smp_init_cpus(void) | |||
64 | 64 | ||
65 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 65 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
66 | { | 66 | { |
67 | int i; | ||
68 | |||
69 | for (i = 0; i < max_cpus; i++) | ||
70 | set_cpu_present(i, true); | ||
71 | |||
72 | shmobile_smp_prepare_cpus(); | 67 | shmobile_smp_prepare_cpus(); |
73 | } | 68 | } |
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c index b8ae3c978dee..1a594dce8fbc 100644 --- a/arch/arm/mach-tegra/platsmp.c +++ b/arch/arm/mach-tegra/platsmp.c | |||
@@ -129,14 +129,6 @@ void __init smp_init_cpus(void) | |||
129 | 129 | ||
130 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 130 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
131 | { | 131 | { |
132 | int i; | ||
133 | |||
134 | /* | ||
135 | * Initialise the present map, which describes the set of CPUs | ||
136 | * actually populated at the present time. | ||
137 | */ | ||
138 | for (i = 0; i < max_cpus; i++) | ||
139 | set_cpu_present(i, true); | ||
140 | 132 | ||
141 | scu_enable(scu_base); | 133 | scu_enable(scu_base); |
142 | } | 134 | } |
diff --git a/arch/arm/mach-ux500/board-mop500-pins.c b/arch/arm/mach-ux500/board-mop500-pins.c index fd4cf1ca5efd..70cdbd60596a 100644 --- a/arch/arm/mach-ux500/board-mop500-pins.c +++ b/arch/arm/mach-ux500/board-mop500-pins.c | |||
@@ -110,10 +110,18 @@ static pin_cfg_t mop500_pins_common[] = { | |||
110 | GPIO168_KP_O0, | 110 | GPIO168_KP_O0, |
111 | 111 | ||
112 | /* UART */ | 112 | /* UART */ |
113 | GPIO0_U0_CTSn | PIN_INPUT_PULLUP, | 113 | /* uart-0 pins gpio configuration should be |
114 | GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, | 114 | * kept intact to prevent glitch in tx line |
115 | GPIO2_U0_RXD | PIN_INPUT_PULLUP, | 115 | * when tty dev is opened. Later these pins |
116 | GPIO3_U0_TXD | PIN_OUTPUT_HIGH, | 116 | * are configured to uart mop500_pins_uart0 |
117 | * | ||
118 | * It will be replaced with uart configuration | ||
119 | * once the issue is solved. | ||
120 | */ | ||
121 | GPIO0_GPIO | PIN_INPUT_PULLUP, | ||
122 | GPIO1_GPIO | PIN_OUTPUT_HIGH, | ||
123 | GPIO2_GPIO | PIN_INPUT_PULLUP, | ||
124 | GPIO3_GPIO | PIN_OUTPUT_HIGH, | ||
117 | 125 | ||
118 | GPIO29_U2_RXD | PIN_INPUT_PULLUP, | 126 | GPIO29_U2_RXD | PIN_INPUT_PULLUP, |
119 | GPIO30_U2_TXD | PIN_OUTPUT_HIGH, | 127 | GPIO30_U2_TXD | PIN_OUTPUT_HIGH, |
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index bb26f40493e6..2a08c07dec6d 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c | |||
@@ -27,18 +27,21 @@ | |||
27 | #include <linux/leds-lp5521.h> | 27 | #include <linux/leds-lp5521.h> |
28 | #include <linux/input.h> | 28 | #include <linux/input.h> |
29 | #include <linux/gpio_keys.h> | 29 | #include <linux/gpio_keys.h> |
30 | #include <linux/delay.h> | ||
30 | 31 | ||
31 | #include <asm/mach-types.h> | 32 | #include <asm/mach-types.h> |
32 | #include <asm/mach/arch.h> | 33 | #include <asm/mach/arch.h> |
33 | 34 | ||
34 | #include <plat/i2c.h> | 35 | #include <plat/i2c.h> |
35 | #include <plat/ste_dma40.h> | 36 | #include <plat/ste_dma40.h> |
37 | #include <plat/pincfg.h> | ||
36 | 38 | ||
37 | #include <mach/hardware.h> | 39 | #include <mach/hardware.h> |
38 | #include <mach/setup.h> | 40 | #include <mach/setup.h> |
39 | #include <mach/devices.h> | 41 | #include <mach/devices.h> |
40 | #include <mach/irqs.h> | 42 | #include <mach/irqs.h> |
41 | 43 | ||
44 | #include "pins-db8500.h" | ||
42 | #include "ste-dma40-db8500.h" | 45 | #include "ste-dma40-db8500.h" |
43 | #include "devices-db8500.h" | 46 | #include "devices-db8500.h" |
44 | #include "board-mop500.h" | 47 | #include "board-mop500.h" |
@@ -393,12 +396,63 @@ static struct stedma40_chan_cfg uart2_dma_cfg_tx = { | |||
393 | }; | 396 | }; |
394 | #endif | 397 | #endif |
395 | 398 | ||
399 | |||
400 | static pin_cfg_t mop500_pins_uart0[] = { | ||
401 | GPIO0_U0_CTSn | PIN_INPUT_PULLUP, | ||
402 | GPIO1_U0_RTSn | PIN_OUTPUT_HIGH, | ||
403 | GPIO2_U0_RXD | PIN_INPUT_PULLUP, | ||
404 | GPIO3_U0_TXD | PIN_OUTPUT_HIGH, | ||
405 | }; | ||
406 | |||
407 | #define PRCC_K_SOFTRST_SET 0x18 | ||
408 | #define PRCC_K_SOFTRST_CLEAR 0x1C | ||
409 | static void ux500_uart0_reset(void) | ||
410 | { | ||
411 | void __iomem *prcc_rst_set, *prcc_rst_clr; | ||
412 | |||
413 | prcc_rst_set = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + | ||
414 | PRCC_K_SOFTRST_SET); | ||
415 | prcc_rst_clr = (void __iomem *)IO_ADDRESS(U8500_CLKRST1_BASE + | ||
416 | PRCC_K_SOFTRST_CLEAR); | ||
417 | |||
418 | /* Activate soft reset PRCC_K_SOFTRST_CLEAR */ | ||
419 | writel((readl(prcc_rst_clr) | 0x1), prcc_rst_clr); | ||
420 | udelay(1); | ||
421 | |||
422 | /* Release soft reset PRCC_K_SOFTRST_SET */ | ||
423 | writel((readl(prcc_rst_set) | 0x1), prcc_rst_set); | ||
424 | udelay(1); | ||
425 | } | ||
426 | |||
427 | static void ux500_uart0_init(void) | ||
428 | { | ||
429 | int ret; | ||
430 | |||
431 | ret = nmk_config_pins(mop500_pins_uart0, | ||
432 | ARRAY_SIZE(mop500_pins_uart0)); | ||
433 | if (ret < 0) | ||
434 | pr_err("pl011: uart pins_enable failed\n"); | ||
435 | } | ||
436 | |||
437 | static void ux500_uart0_exit(void) | ||
438 | { | ||
439 | int ret; | ||
440 | |||
441 | ret = nmk_config_pins_sleep(mop500_pins_uart0, | ||
442 | ARRAY_SIZE(mop500_pins_uart0)); | ||
443 | if (ret < 0) | ||
444 | pr_err("pl011: uart pins_disable failed\n"); | ||
445 | } | ||
446 | |||
396 | static struct amba_pl011_data uart0_plat = { | 447 | static struct amba_pl011_data uart0_plat = { |
397 | #ifdef CONFIG_STE_DMA40 | 448 | #ifdef CONFIG_STE_DMA40 |
398 | .dma_filter = stedma40_filter, | 449 | .dma_filter = stedma40_filter, |
399 | .dma_rx_param = &uart0_dma_cfg_rx, | 450 | .dma_rx_param = &uart0_dma_cfg_rx, |
400 | .dma_tx_param = &uart0_dma_cfg_tx, | 451 | .dma_tx_param = &uart0_dma_cfg_tx, |
401 | #endif | 452 | #endif |
453 | .init = ux500_uart0_init, | ||
454 | .exit = ux500_uart0_exit, | ||
455 | .reset = ux500_uart0_reset, | ||
402 | }; | 456 | }; |
403 | 457 | ||
404 | static struct amba_pl011_data uart1_plat = { | 458 | static struct amba_pl011_data uart1_plat = { |
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c index 0c527fe2cebb..a33df5f4c27a 100644 --- a/arch/arm/mach-ux500/platsmp.c +++ b/arch/arm/mach-ux500/platsmp.c | |||
@@ -172,14 +172,6 @@ void __init smp_init_cpus(void) | |||
172 | 172 | ||
173 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) | 173 | void __init platform_smp_prepare_cpus(unsigned int max_cpus) |
174 | { | 174 | { |
175 | int i; | ||
176 | |||
177 | /* | ||
178 | * Initialise the present map, which describes the set of CPUs | ||
179 | * actually populated at the present time. | ||
180 | */ | ||
181 | for (i = 0; i < max_cpus; i++) | ||
182 | set_cpu_present(i, true); | ||
183 | 175 | ||
184 | scu_enable(scu_base_addr()); | 176 | scu_enable(scu_base_addr()); |
185 | wakeup_secondary(); | 177 | wakeup_secondary(); |
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c index 765a71ff7f3b..bfd32f52c2db 100644 --- a/arch/arm/mach-vexpress/ct-ca9x4.c +++ b/arch/arm/mach-vexpress/ct-ca9x4.c | |||
@@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void) | |||
229 | 229 | ||
230 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) | 230 | static void ct_ca9x4_smp_enable(unsigned int max_cpus) |
231 | { | 231 | { |
232 | int i; | ||
233 | for (i = 0; i < max_cpus; i++) | ||
234 | set_cpu_present(i, true); | ||
235 | |||
236 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); | 232 | scu_enable(MMIO_P2V(A9_MPCORE_SCU)); |
237 | } | 233 | } |
238 | #endif | 234 | #endif |
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c index 245140c0df10..642de0408f25 100644 --- a/arch/arm/mach-vt8500/irq.c +++ b/arch/arm/mach-vt8500/irq.c | |||
@@ -39,9 +39,10 @@ | |||
39 | static void __iomem *ic_regbase; | 39 | static void __iomem *ic_regbase; |
40 | static void __iomem *sic_regbase; | 40 | static void __iomem *sic_regbase; |
41 | 41 | ||
42 | static void vt8500_irq_mask(unsigned int irq) | 42 | static void vt8500_irq_mask(struct irq_data *d) |
43 | { | 43 | { |
44 | void __iomem *base = ic_regbase; | 44 | void __iomem *base = ic_regbase; |
45 | unsigned irq = d->irq; | ||
45 | u8 edge; | 46 | u8 edge; |
46 | 47 | ||
47 | if (irq >= 64) { | 48 | if (irq >= 64) { |
@@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq) | |||
64 | } | 65 | } |
65 | } | 66 | } |
66 | 67 | ||
67 | static void vt8500_irq_unmask(unsigned int irq) | 68 | static void vt8500_irq_unmask(struct irq_data *d) |
68 | { | 69 | { |
69 | void __iomem *base = ic_regbase; | 70 | void __iomem *base = ic_regbase; |
71 | unsigned irq = d->irq; | ||
70 | u8 dctr; | 72 | u8 dctr; |
71 | 73 | ||
72 | if (irq >= 64) { | 74 | if (irq >= 64) { |
@@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq) | |||
78 | writeb(dctr, base + VT8500_IC_DCTR + irq); | 80 | writeb(dctr, base + VT8500_IC_DCTR + irq); |
79 | } | 81 | } |
80 | 82 | ||
81 | static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) | 83 | static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type) |
82 | { | 84 | { |
83 | void __iomem *base = ic_regbase; | 85 | void __iomem *base = ic_regbase; |
84 | unsigned int orig_irq = irq; | 86 | unsigned irq = d->irq; |
87 | unsigned orig_irq = irq; | ||
85 | u8 dctr; | 88 | u8 dctr; |
86 | 89 | ||
87 | if (irq >= 64) { | 90 | if (irq >= 64) { |
@@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) | |||
114 | } | 117 | } |
115 | 118 | ||
116 | static struct irq_chip vt8500_irq_chip = { | 119 | static struct irq_chip vt8500_irq_chip = { |
117 | .name = "vt8500", | 120 | .name = "vt8500", |
118 | .ack = vt8500_irq_mask, | 121 | .irq_ack = vt8500_irq_mask, |
119 | .mask = vt8500_irq_mask, | 122 | .irq_mask = vt8500_irq_mask, |
120 | .unmask = vt8500_irq_unmask, | 123 | .irq_unmask = vt8500_irq_unmask, |
121 | .set_type = vt8500_irq_set_type, | 124 | .irq_set_type = vt8500_irq_set_type, |
122 | }; | 125 | }; |
123 | 126 | ||
124 | void __init vt8500_init_irq(void) | 127 | void __init vt8500_init_irq(void) |
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e87bae..54473cd4aba9 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4_early_abort | 4 | * Function: v4_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -21,10 +18,8 @@ | |||
21 | ENTRY(v4_early_abort) | 18 | ENTRY(v4_early_abort) |
22 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 19 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
23 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 20 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
24 | ldr r3, [r2] @ read aborted ARM instruction | 21 | ldr r3, [r4] @ read aborted ARM instruction |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #1 << 20 @ L = 1 -> write? | 23 | tst r3, #1 << 20 @ L = 1 -> write? |
27 | orreq r1, r1, #1 << 11 @ yes. | 24 | orreq r1, r1, #1 << 11 @ yes. |
28 | mov pc, lr | 25 | b do_DataAbort |
29 | |||
30 | |||
diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b6282548f922..9da704e7b86e 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v4t_early_abort | 5 | * Function: v4t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,9 +19,9 @@ | |||
22 | ENTRY(v4t_early_abort) | 19 | ENTRY(v4t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 24 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
28 | tst r3, #1 << 20 @ check write | 25 | tst r3, #1 << 20 @ check write |
29 | orreq r1, r1, #1 << 11 | 26 | orreq r1, r1, #1 << 11 |
30 | mov pc, lr | 27 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b526c0d..a0908d4653a3 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5t_early_abort | 5 | * Function: v5t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,10 +19,10 @@ | |||
22 | ENTRY(v5t_early_abort) | 19 | ENTRY(v5t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR | 24 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR |
28 | do_ldrd_abort | 25 | do_ldrd_abort tmp=ip, insn=r3 |
29 | tst r3, #1 << 20 @ check write | 26 | tst r3, #1 << 20 @ check write |
30 | orreq r1, r1, #1 << 11 | 27 | orreq r1, r1, #1 << 11 |
31 | mov pc, lr | 28 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d601c8b..4006b7a61264 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5tj_early_abort | 5 | * Function: v5tj_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort) | |||
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #PSR_J_BIT @ Java? | 23 | tst r5, #PSR_J_BIT @ Java? |
27 | movne pc, lr | 24 | bne do_DataAbort |
28 | do_thumb_abort | 25 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
29 | ldreq r3, [r2] @ read aborted ARM instruction | 26 | ldreq r3, [r4] @ read aborted ARM instruction |
30 | do_ldrd_abort | 27 | do_ldrd_abort tmp=ip, insn=r3 |
31 | tst r3, #1 << 20 @ L = 0 -> write | 28 | tst r3, #1 << 20 @ L = 0 -> write |
32 | orreq r1, r1, #1 << 11 @ yes. | 29 | orreq r1, r1, #1 << 11 @ yes. |
33 | mov pc, lr | 30 | b do_DataAbort |
34 | |||
35 | |||
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa522144..ff1f7cc11f87 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_early_abort | 5 | * Function: v6_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort) | |||
33 | * The test below covers all the write situations, including Java bytecodes | 30 | * The test below covers all the write situations, including Java bytecodes |
34 | */ | 31 | */ |
35 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR | 32 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR |
36 | tst r3, #PSR_J_BIT @ Java? | 33 | tst r5, #PSR_J_BIT @ Java? |
37 | movne pc, lr | 34 | bne do_DataAbort |
38 | do_thumb_abort | 35 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
39 | ldreq r3, [r2] @ read aborted ARM instruction | 36 | ldreq r3, [r4] @ read aborted ARM instruction |
40 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 37 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
41 | reveq r3, r3 | 38 | reveq r3, r3 |
42 | #endif | 39 | #endif |
43 | do_ldrd_abort | 40 | do_ldrd_abort tmp=ip, insn=r3 |
44 | tst r3, #1 << 20 @ L = 0 -> write | 41 | tst r3, #1 << 20 @ L = 0 -> write |
45 | orreq r1, r1, #1 << 11 @ yes. | 42 | orreq r1, r1, #1 << 11 @ yes. |
46 | mov pc, lr | 43 | b do_DataAbort |
47 | |||
48 | |||
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b157d3bb..703375277ba6 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v7_early_abort | 4 | * Function: v7_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | */ | 13 | */ |
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort) | |||
37 | ldr r3, =0x40d @ On permission fault | 34 | ldr r3, =0x40d @ On permission fault |
38 | and r3, r1, r3 | 35 | and r3, r1, r3 |
39 | cmp r3, #0x0d | 36 | cmp r3, #0x0d |
40 | movne pc, lr | 37 | bne do_DataAbort |
41 | 38 | ||
42 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR | 39 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR |
43 | isb | 40 | isb |
44 | mrc p15, 0, r2, c7, c4, 0 @ Read the PAR | 41 | mrc p15, 0, ip, c7, c4, 0 @ Read the PAR |
45 | and r3, r2, #0x7b @ On translation fault | 42 | and r3, ip, #0x7b @ On translation fault |
46 | cmp r3, #0x0b | 43 | cmp r3, #0x0b |
47 | movne pc, lr | 44 | bne do_DataAbort |
48 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] | 45 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] |
49 | and r2, r2, #0x7e | 46 | and ip, ip, #0x7e |
50 | orr r1, r1, r2, LSR #1 | 47 | orr r1, r1, ip, LSR #1 |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | mov pc, lr | 50 | b do_DataAbort |
54 | ENDPROC(v7_early_abort) | 51 | ENDPROC(v7_early_abort) |
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e25ea1..f3982580c273 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4t_late_abort | 4 | * Function: v4t_late_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4-r5, r10-r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -18,7 +15,7 @@ | |||
18 | * picture. Unfortunately, this does happen. We live with it. | 15 | * picture. Unfortunately, this does happen. We live with it. |
19 | */ | 16 | */ |
20 | ENTRY(v4t_late_abort) | 17 | ENTRY(v4t_late_abort) |
21 | tst r3, #PSR_T_BIT @ check for thumb mode | 18 | tst r5, #PSR_T_BIT @ check for thumb mode |
22 | #ifdef CONFIG_CPU_CP15_MMU | 19 | #ifdef CONFIG_CPU_CP15_MMU |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort) | |||
28 | mov r1, #0 | 25 | mov r1, #0 |
29 | #endif | 26 | #endif |
30 | bne .data_thumb_abort | 27 | bne .data_thumb_abort |
31 | ldr r8, [r2] @ read arm instruction | 28 | ldr r8, [r4] @ read arm instruction |
32 | tst r8, #1 << 20 @ L = 1 -> write? | 29 | tst r8, #1 << 20 @ L = 1 -> write? |
33 | orreq r1, r1, #1 << 11 @ yes. | 30 | orreq r1, r1, #1 << 11 @ yes. |
34 | and r7, r8, #15 << 24 | 31 | and r7, r8, #15 << 24 |
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort) | |||
47 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 44 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
48 | /* a */ b .data_unknown | 45 | /* a */ b .data_unknown |
49 | /* b */ b .data_unknown | 46 | /* b */ b .data_unknown |
50 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 47 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
51 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 48 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
52 | /* e */ b .data_unknown | 49 | /* e */ b .data_unknown |
53 | /* f */ | 50 | /* f */ |
54 | .data_unknown: @ Part of jumptable | 51 | .data_unknown: @ Part of jumptable |
55 | mov r0, r2 | 52 | mov r0, r4 |
56 | mov r1, r8 | 53 | mov r1, r8 |
57 | mov r2, sp | 54 | b baddataabort |
58 | bl baddataabort | ||
59 | b ret_from_exception | ||
60 | 55 | ||
61 | .data_arm_ldmstm: | 56 | .data_arm_ldmstm: |
62 | tst r8, #1 << 21 @ check writeback bit | 57 | tst r8, #1 << 21 @ check writeback bit |
63 | moveq pc, lr @ no writeback -> no fixup | 58 | beq do_DataAbort @ no writeback -> no fixup |
64 | mov r7, #0x11 | 59 | mov r7, #0x11 |
65 | orr r7, r7, #0x1100 | 60 | orr r7, r7, #0x1100 |
66 | and r6, r8, r7 | 61 | and r6, r8, r7 |
67 | and r2, r8, r7, lsl #1 | 62 | and r9, r8, r7, lsl #1 |
68 | add r6, r6, r2, lsr #1 | 63 | add r6, r6, r9, lsr #1 |
69 | and r2, r8, r7, lsl #2 | 64 | and r9, r8, r7, lsl #2 |
70 | add r6, r6, r2, lsr #2 | 65 | add r6, r6, r9, lsr #2 |
71 | and r2, r8, r7, lsl #3 | 66 | and r9, r8, r7, lsl #3 |
72 | add r6, r6, r2, lsr #3 | 67 | add r6, r6, r9, lsr #3 |
73 | add r6, r6, r6, lsr #8 | 68 | add r6, r6, r6, lsr #8 |
74 | add r6, r6, r6, lsr #4 | 69 | add r6, r6, r6, lsr #4 |
75 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 70 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
76 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 71 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
77 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 72 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
78 | tst r8, #1 << 23 @ Check U bit | 73 | tst r8, #1 << 23 @ Check U bit |
79 | subne r7, r7, r6, lsl #2 @ Undo increment | 74 | subne r7, r7, r6, lsl #2 @ Undo increment |
80 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 75 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
81 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 76 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
82 | mov pc, lr | 77 | b do_DataAbort |
83 | 78 | ||
84 | .data_arm_lateldrhpre: | 79 | .data_arm_lateldrhpre: |
85 | tst r8, #1 << 21 @ Check writeback bit | 80 | tst r8, #1 << 21 @ Check writeback bit |
86 | moveq pc, lr @ No writeback -> no fixup | 81 | beq do_DataAbort @ No writeback -> no fixup |
87 | .data_arm_lateldrhpost: | 82 | .data_arm_lateldrhpost: |
88 | and r5, r8, #0x00f @ get Rm / low nibble of immediate value | 83 | and r9, r8, #0x00f @ get Rm / low nibble of immediate value |
89 | tst r8, #1 << 22 @ if (immediate offset) | 84 | tst r8, #1 << 22 @ if (immediate offset) |
90 | andne r6, r8, #0xf00 @ { immediate high nibble | 85 | andne r6, r8, #0xf00 @ { immediate high nibble |
91 | orrne r6, r5, r6, lsr #4 @ combine nibbles } else | 86 | orrne r6, r9, r6, lsr #4 @ combine nibbles } else |
92 | ldreq r6, [sp, r5, lsl #2] @ { load Rm value } | 87 | ldreq r6, [r2, r9, lsl #2] @ { load Rm value } |
93 | .data_arm_apply_r6_and_rn: | 88 | .data_arm_apply_r6_and_rn: |
94 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 89 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
95 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 90 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
96 | tst r8, #1 << 23 @ Check U bit | 91 | tst r8, #1 << 23 @ Check U bit |
97 | subne r7, r7, r6 @ Undo incrmenet | 92 | subne r7, r7, r6 @ Undo incrmenet |
98 | addeq r7, r7, r6 @ Undo decrement | 93 | addeq r7, r7, r6 @ Undo decrement |
99 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 94 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
100 | mov pc, lr | 95 | b do_DataAbort |
101 | 96 | ||
102 | .data_arm_lateldrpreconst: | 97 | .data_arm_lateldrpreconst: |
103 | tst r8, #1 << 21 @ check writeback bit | 98 | tst r8, #1 << 21 @ check writeback bit |
104 | moveq pc, lr @ no writeback -> no fixup | 99 | beq do_DataAbort @ no writeback -> no fixup |
105 | .data_arm_lateldrpostconst: | 100 | .data_arm_lateldrpostconst: |
106 | movs r2, r8, lsl #20 @ Get offset | 101 | movs r6, r8, lsl #20 @ Get offset |
107 | moveq pc, lr @ zero -> no fixup | 102 | beq do_DataAbort @ zero -> no fixup |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 103 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 104 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 105 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r2, lsr #20 @ Undo increment | 106 | subne r7, r7, r6, lsr #20 @ Undo increment |
112 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 107 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 108 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 109 | b do_DataAbort |
115 | 110 | ||
116 | .data_arm_lateldrprereg: | 111 | .data_arm_lateldrprereg: |
117 | tst r8, #1 << 21 @ check writeback bit | 112 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 113 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostreg: | 114 | .data_arm_lateldrpostreg: |
120 | and r7, r8, #15 @ Extract 'm' from instruction | 115 | and r7, r8, #15 @ Extract 'm' from instruction |
121 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 116 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
122 | mov r5, r8, lsr #7 @ get shift count | 117 | mov r9, r8, lsr #7 @ get shift count |
123 | ands r5, r5, #31 | 118 | ands r9, r9, #31 |
124 | and r7, r8, #0x70 @ get shift type | 119 | and r7, r8, #0x70 @ get shift type |
125 | orreq r7, r7, #8 @ shift count = 0 | 120 | orreq r7, r7, #8 @ shift count = 0 |
126 | add pc, pc, r7 | 121 | add pc, pc, r7 |
127 | nop | 122 | nop |
128 | 123 | ||
129 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 124 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
130 | b .data_arm_apply_r6_and_rn | 125 | b .data_arm_apply_r6_and_rn |
131 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 126 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
132 | nop | 127 | nop |
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort) | |||
134 | nop | 129 | nop |
135 | b .data_unknown @ 3: MUL? | 130 | b .data_unknown @ 3: MUL? |
136 | nop | 131 | nop |
137 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 132 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
138 | b .data_arm_apply_r6_and_rn | 133 | b .data_arm_apply_r6_and_rn |
139 | mov r6, r6, lsr #32 @ 5: LSR #32 | 134 | mov r6, r6, lsr #32 @ 5: LSR #32 |
140 | b .data_arm_apply_r6_and_rn | 135 | b .data_arm_apply_r6_and_rn |
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort) | |||
142 | nop | 137 | nop |
143 | b .data_unknown @ 7: MUL? | 138 | b .data_unknown @ 7: MUL? |
144 | nop | 139 | nop |
145 | mov r6, r6, asr r5 @ 8: ASR #!0 | 140 | mov r6, r6, asr r9 @ 8: ASR #!0 |
146 | b .data_arm_apply_r6_and_rn | 141 | b .data_arm_apply_r6_and_rn |
147 | mov r6, r6, asr #32 @ 9: ASR #32 | 142 | mov r6, r6, asr #32 @ 9: ASR #32 |
148 | b .data_arm_apply_r6_and_rn | 143 | b .data_arm_apply_r6_and_rn |
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort) | |||
150 | nop | 145 | nop |
151 | b .data_unknown @ B: MUL? | 146 | b .data_unknown @ B: MUL? |
152 | nop | 147 | nop |
153 | mov r6, r6, ror r5 @ C: ROR #!0 | 148 | mov r6, r6, ror r9 @ C: ROR #!0 |
154 | b .data_arm_apply_r6_and_rn | 149 | b .data_arm_apply_r6_and_rn |
155 | mov r6, r6, rrx @ D: RRX | 150 | mov r6, r6, rrx @ D: RRX |
156 | b .data_arm_apply_r6_and_rn | 151 | b .data_arm_apply_r6_and_rn |
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort) | |||
159 | b .data_unknown @ F: MUL? | 154 | b .data_unknown @ F: MUL? |
160 | 155 | ||
161 | .data_thumb_abort: | 156 | .data_thumb_abort: |
162 | ldrh r8, [r2] @ read instruction | 157 | ldrh r8, [r4] @ read instruction |
163 | tst r8, #1 << 11 @ L = 1 -> write? | 158 | tst r8, #1 << 11 @ L = 1 -> write? |
164 | orreq r1, r1, #1 << 8 @ yes | 159 | orreq r1, r1, #1 << 8 @ yes |
165 | and r7, r8, #15 << 12 | 160 | and r7, r8, #15 << 12 |
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort) | |||
172 | /* 3 */ b .data_unknown | 167 | /* 3 */ b .data_unknown |
173 | /* 4 */ b .data_unknown | 168 | /* 4 */ b .data_unknown |
174 | /* 5 */ b .data_thumb_reg | 169 | /* 5 */ b .data_thumb_reg |
175 | /* 6 */ mov pc, lr | 170 | /* 6 */ b do_DataAbort |
176 | /* 7 */ mov pc, lr | 171 | /* 7 */ b do_DataAbort |
177 | /* 8 */ mov pc, lr | 172 | /* 8 */ b do_DataAbort |
178 | /* 9 */ mov pc, lr | 173 | /* 9 */ b do_DataAbort |
179 | /* A */ b .data_unknown | 174 | /* A */ b .data_unknown |
180 | /* B */ b .data_thumb_pushpop | 175 | /* B */ b .data_thumb_pushpop |
181 | /* C */ b .data_thumb_ldmstm | 176 | /* C */ b .data_thumb_ldmstm |
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort) | |||
185 | 180 | ||
186 | .data_thumb_reg: | 181 | .data_thumb_reg: |
187 | tst r8, #1 << 9 | 182 | tst r8, #1 << 9 |
188 | moveq pc, lr | 183 | beq do_DataAbort |
189 | tst r8, #1 << 10 @ If 'S' (signed) bit is set | 184 | tst r8, #1 << 10 @ If 'S' (signed) bit is set |
190 | movne r1, #0 @ it must be a load instr | 185 | movne r1, #0 @ it must be a load instr |
191 | mov pc, lr | 186 | b do_DataAbort |
192 | 187 | ||
193 | .data_thumb_pushpop: | 188 | .data_thumb_pushpop: |
194 | tst r8, #1 << 10 | 189 | tst r8, #1 << 10 |
195 | beq .data_unknown | 190 | beq .data_unknown |
196 | and r6, r8, #0x55 @ hweight8(r8) + R bit | 191 | and r6, r8, #0x55 @ hweight8(r8) + R bit |
197 | and r2, r8, #0xaa | 192 | and r9, r8, #0xaa |
198 | add r6, r6, r2, lsr #1 | 193 | add r6, r6, r9, lsr #1 |
199 | and r2, r6, #0xcc | 194 | and r9, r6, #0xcc |
200 | and r6, r6, #0x33 | 195 | and r6, r6, #0x33 |
201 | add r6, r6, r2, lsr #2 | 196 | add r6, r6, r9, lsr #2 |
202 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) | 197 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) |
203 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit | 198 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit |
204 | and r6, r6, #15 @ number of regs to transfer | 199 | and r6, r6, #15 @ number of regs to transfer |
205 | ldr r7, [sp, #13 << 2] | 200 | ldr r7, [r2, #13 << 2] |
206 | tst r8, #1 << 11 | 201 | tst r8, #1 << 11 |
207 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH | 202 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH |
208 | subne r7, r7, r6, lsl #2 @ decrement SP if POP | 203 | subne r7, r7, r6, lsl #2 @ decrement SP if POP |
209 | str r7, [sp, #13 << 2] | 204 | str r7, [r2, #13 << 2] |
210 | mov pc, lr | 205 | b do_DataAbort |
211 | 206 | ||
212 | .data_thumb_ldmstm: | 207 | .data_thumb_ldmstm: |
213 | and r6, r8, #0x55 @ hweight8(r8) | 208 | and r6, r8, #0x55 @ hweight8(r8) |
214 | and r2, r8, #0xaa | 209 | and r9, r8, #0xaa |
215 | add r6, r6, r2, lsr #1 | 210 | add r6, r6, r9, lsr #1 |
216 | and r2, r6, #0xcc | 211 | and r9, r6, #0xcc |
217 | and r6, r6, #0x33 | 212 | and r6, r6, #0x33 |
218 | add r6, r6, r2, lsr #2 | 213 | add r6, r6, r9, lsr #2 |
219 | add r6, r6, r6, lsr #4 | 214 | add r6, r6, r6, lsr #4 |
220 | and r5, r8, #7 << 8 | 215 | and r9, r8, #7 << 8 |
221 | ldr r7, [sp, r5, lsr #6] | 216 | ldr r7, [r2, r9, lsr #6] |
222 | and r6, r6, #15 @ number of regs to transfer | 217 | and r6, r6, #15 @ number of regs to transfer |
223 | sub r7, r7, r6, lsl #2 @ always decrement | 218 | sub r7, r7, r6, lsl #2 @ always decrement |
224 | str r7, [sp, r5, lsr #6] | 219 | str r7, [r2, r9, lsr #6] |
225 | mov pc, lr | 220 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bfa51a4..52162d59407a 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S | |||
@@ -9,34 +9,32 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | .macro do_thumb_abort | 12 | .macro do_thumb_abort, fsr, pc, psr, tmp |
13 | tst r3, #PSR_T_BIT | 13 | tst \psr, #PSR_T_BIT |
14 | beq not_thumb | 14 | beq not_thumb |
15 | ldrh r3, [r2] @ Read aborted Thumb instruction | 15 | ldrh \tmp, [\pc] @ Read aborted Thumb instruction |
16 | and r3, r3, # 0xfe00 @ Mask opcode field | 16 | and \tmp, \tmp, # 0xfe00 @ Mask opcode field |
17 | cmp r3, # 0x5600 @ Is it ldrsb? | 17 | cmp \tmp, # 0x5600 @ Is it ldrsb? |
18 | orreq r3, r3, #1 << 11 @ Set L-bit if yes | 18 | orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes |
19 | tst r3, #1 << 11 @ L = 0 -> write | 19 | tst \tmp, #1 << 11 @ L = 0 -> write |
20 | orreq r1, r1, #1 << 11 @ yes. | 20 | orreq \psr, \psr, #1 << 11 @ yes. |
21 | mov pc, lr | 21 | b do_DataAbort |
22 | not_thumb: | 22 | not_thumb: |
23 | .endm | 23 | .endm |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * We check for the following insturction encoding for LDRD. | 26 | * We check for the following instruction encoding for LDRD. |
27 | * | 27 | * |
28 | * [27:25] == 0 | 28 | * [27:25] == 000 |
29 | * [7:4] == 1101 | 29 | * [7:4] == 1101 |
30 | * [20] == 0 | 30 | * [20] == 0 |
31 | */ | 31 | */ |
32 | .macro do_ldrd_abort | 32 | .macro do_ldrd_abort, tmp, insn |
33 | tst r3, #0x0e000000 @ [27:25] == 0 | 33 | tst \insn, #0x0e100000 @ [27:25,20] == 0 |
34 | bne not_ldrd | 34 | bne not_ldrd |
35 | and r2, r3, #0x000000f0 @ [7:4] == 1101 | 35 | and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 |
36 | cmp r2, #0x000000d0 | 36 | cmp \tmp, #0x000000d0 |
37 | bne not_ldrd | 37 | beq do_DataAbort |
38 | tst r3, #1 << 20 @ [20] == 0 | ||
39 | moveq pc, lr | ||
40 | not_ldrd: | 38 | not_ldrd: |
41 | .endm | 39 | .endm |
42 | 40 | ||
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580945b5..119cb479c2ab 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S | |||
@@ -3,11 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: nommu_early_abort | 4 | * Function: nommu_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = 0 (abort address) | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = 0 (FSR) | ||
11 | * | 11 | * |
12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. | 12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. |
13 | * Just fill zero into the registers. | 13 | * Just fill zero into the registers. |
@@ -16,5 +16,5 @@ | |||
16 | ENTRY(nommu_early_abort) | 16 | ENTRY(nommu_early_abort) |
17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) | 17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) |
18 | mov r1, #0 | 18 | mov r1, #0 |
19 | mov pc, lr | 19 | b do_DataAbort |
20 | ENDPROC(nommu_early_abort) | 20 | ENDPROC(nommu_early_abort) |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3bce72c..be7c638b648b 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
727 | int isize = 4; | 727 | int isize = 4; |
728 | int thumb2_32b = 0; | 728 | int thumb2_32b = 0; |
729 | 729 | ||
730 | if (interrupts_enabled(regs)) | ||
731 | local_irq_enable(); | ||
732 | |||
730 | instrptr = instruction_pointer(regs); | 733 | instrptr = instruction_pointer(regs); |
731 | 734 | ||
732 | fs = get_fs(); | 735 | fs = get_fs(); |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index ef59099a5463..44c086710d2b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -120,17 +120,22 @@ static void l2x0_cache_sync(void) | |||
120 | spin_unlock_irqrestore(&l2x0_lock, flags); | 120 | spin_unlock_irqrestore(&l2x0_lock, flags); |
121 | } | 121 | } |
122 | 122 | ||
123 | static void l2x0_flush_all(void) | 123 | static void __l2x0_flush_all(void) |
124 | { | 124 | { |
125 | unsigned long flags; | ||
126 | |||
127 | /* clean all ways */ | ||
128 | spin_lock_irqsave(&l2x0_lock, flags); | ||
129 | debug_writel(0x03); | 125 | debug_writel(0x03); |
130 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); | 126 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); |
131 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | 127 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); |
132 | cache_sync(); | 128 | cache_sync(); |
133 | debug_writel(0x00); | 129 | debug_writel(0x00); |
130 | } | ||
131 | |||
132 | static void l2x0_flush_all(void) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | /* clean all ways */ | ||
137 | spin_lock_irqsave(&l2x0_lock, flags); | ||
138 | __l2x0_flush_all(); | ||
134 | spin_unlock_irqrestore(&l2x0_lock, flags); | 139 | spin_unlock_irqrestore(&l2x0_lock, flags); |
135 | } | 140 | } |
136 | 141 | ||
@@ -266,7 +271,9 @@ static void l2x0_disable(void) | |||
266 | unsigned long flags; | 271 | unsigned long flags; |
267 | 272 | ||
268 | spin_lock_irqsave(&l2x0_lock, flags); | 273 | spin_lock_irqsave(&l2x0_lock, flags); |
269 | writel(0, l2x0_base + L2X0_CTRL); | 274 | __l2x0_flush_all(); |
275 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | ||
276 | dsb(); | ||
270 | spin_unlock_irqrestore(&l2x0_lock, flags); | 277 | spin_unlock_irqrestore(&l2x0_lock, flags); |
271 | } | 278 | } |
272 | 279 | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c65c901..63cca0097130 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
45 | kunmap_atomic(kto, KM_USER1); | 44 | kunmap_atomic(kto, KM_USER1); |
46 | kunmap_atomic(kfrom, KM_USER0); | 45 | kunmap_atomic(kfrom, KM_USER0); |
47 | } | 46 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09a..0a0a1e7c20d2 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -25,9 +25,11 @@ | |||
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/sizes.h> | 26 | #include <asm/sizes.h> |
27 | 27 | ||
28 | #include "mm.h" | ||
29 | |||
28 | static u64 get_coherent_dma_mask(struct device *dev) | 30 | static u64 get_coherent_dma_mask(struct device *dev) |
29 | { | 31 | { |
30 | u64 mask = ISA_DMA_THRESHOLD; | 32 | u64 mask = (u64)arm_dma_limit; |
31 | 33 | ||
32 | if (dev) { | 34 | if (dev) { |
33 | mask = dev->coherent_dma_mask; | 35 | mask = dev->coherent_dma_mask; |
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
41 | return 0; | 43 | return 0; |
42 | } | 44 | } |
43 | 45 | ||
44 | if ((~mask) & ISA_DMA_THRESHOLD) { | 46 | if ((~mask) & (u64)arm_dma_limit) { |
45 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 47 | dev_warn(dev, "coherent DMA mask %#llx is smaller " |
46 | "than system GFP_DMA mask %#llx\n", | 48 | "than system GFP_DMA mask %#llx\n", |
47 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 49 | mask, (u64)arm_dma_limit); |
48 | return 0; | 50 | return 0; |
49 | } | 51 | } |
50 | } | 52 | } |
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
657 | } | 659 | } |
658 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 660 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | 661 | ||
662 | /* | ||
663 | * Return whether the given device DMA address mask can be supported | ||
664 | * properly. For example, if your device can only drive the low 24-bits | ||
665 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
666 | * to this function. | ||
667 | */ | ||
668 | int dma_supported(struct device *dev, u64 mask) | ||
669 | { | ||
670 | if (mask < (u64)arm_dma_limit) | ||
671 | return 0; | ||
672 | return 1; | ||
673 | } | ||
674 | EXPORT_SYMBOL(dma_supported); | ||
675 | |||
676 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
677 | { | ||
678 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
679 | return -EIO; | ||
680 | |||
681 | #ifndef CONFIG_DMABOUNCE | ||
682 | *dev->dma_mask = dma_mask; | ||
683 | #endif | ||
684 | |||
685 | return 0; | ||
686 | } | ||
687 | EXPORT_SYMBOL(dma_set_mask); | ||
688 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 689 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
661 | 690 | ||
662 | static int __init dma_debug_do_init(void) | 691 | static int __init dma_debug_do_init(void) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3b..55657c222d7c 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
94 | 94 | ||
95 | pud = pud_offset(pgd, addr); | 95 | pud = pud_offset(pgd, addr); |
96 | if (PTRS_PER_PUD != 1) | 96 | if (PTRS_PER_PUD != 1) |
97 | printk(", *pud=%08lx", pud_val(*pud)); | 97 | printk(", *pud=%08llx", (long long)pud_val(*pud)); |
98 | 98 | ||
99 | if (pud_none(*pud)) | 99 | if (pud_none(*pud)) |
100 | break; | 100 | break; |
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
285 | tsk = current; | 285 | tsk = current; |
286 | mm = tsk->mm; | 286 | mm = tsk->mm; |
287 | 287 | ||
288 | /* Enable interrupts if they were enabled in the parent context. */ | ||
289 | if (interrupts_enabled(regs)) | ||
290 | local_irq_enable(); | ||
291 | |||
288 | /* | 292 | /* |
289 | * If we're in an interrupt or have no user | 293 | * If we're in an interrupt or have no user |
290 | * context, we must not take the fault.. | 294 | * context, we must not take the fault.. |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a21..e5ab4362322f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn, | |||
212 | } | 212 | } |
213 | 213 | ||
214 | #ifdef CONFIG_ZONE_DMA | 214 | #ifdef CONFIG_ZONE_DMA |
215 | /* | ||
216 | * The DMA mask corresponding to the maximum bus address allocatable | ||
217 | * using GFP_DMA. The default here places no restriction on DMA | ||
218 | * allocations. This must be the smallest DMA mask in the system, | ||
219 | * so a successful GFP_DMA allocation will always satisfy this. | ||
220 | */ | ||
221 | u32 arm_dma_limit; | ||
222 | |||
215 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 223 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
216 | unsigned long dma_size) | 224 | unsigned long dma_size) |
217 | { | 225 | { |
@@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | |||
278 | */ | 286 | */ |
279 | arm_adjust_dma_zone(zone_size, zhole_size, | 287 | arm_adjust_dma_zone(zone_size, zhole_size, |
280 | ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); | 288 | ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); |
289 | |||
290 | arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; | ||
281 | #endif | 291 | #endif |
282 | 292 | ||
283 | free_area_init_node(0, zone_size, min, zhole_size); | 293 | free_area_init_node(0, zone_size, min, zhole_size); |
@@ -422,6 +432,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |||
422 | return pages; | 432 | return pages; |
423 | } | 433 | } |
424 | 434 | ||
435 | /* | ||
436 | * Poison init memory with an undefined instruction (ARM) or a branch to an | ||
437 | * undefined instruction (Thumb). | ||
438 | */ | ||
439 | static inline void poison_init_mem(void *s, size_t count) | ||
440 | { | ||
441 | u32 *p = (u32 *)s; | ||
442 | while ((count = count - 4)) | ||
443 | *p++ = 0xe7fddef0; | ||
444 | } | ||
445 | |||
425 | static inline void | 446 | static inline void |
426 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 447 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
427 | { | 448 | { |
@@ -639,8 +660,8 @@ void __init mem_init(void) | |||
639 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | 660 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" |
640 | #endif | 661 | #endif |
641 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 662 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
642 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
643 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 663 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
664 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
644 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" | 665 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
645 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", | 666 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", |
646 | 667 | ||
@@ -662,8 +683,8 @@ void __init mem_init(void) | |||
662 | #endif | 683 | #endif |
663 | MLM(MODULES_VADDR, MODULES_END), | 684 | MLM(MODULES_VADDR, MODULES_END), |
664 | 685 | ||
665 | MLK_ROUNDUP(__init_begin, __init_end), | ||
666 | MLK_ROUNDUP(_text, _etext), | 686 | MLK_ROUNDUP(_text, _etext), |
687 | MLK_ROUNDUP(__init_begin, __init_end), | ||
667 | MLK_ROUNDUP(_sdata, _edata), | 688 | MLK_ROUNDUP(_sdata, _edata), |
668 | MLK_ROUNDUP(__bss_start, __bss_stop)); | 689 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
669 | 690 | ||
@@ -704,11 +725,13 @@ void free_initmem(void) | |||
704 | #ifdef CONFIG_HAVE_TCM | 725 | #ifdef CONFIG_HAVE_TCM |
705 | extern char __tcm_start, __tcm_end; | 726 | extern char __tcm_start, __tcm_end; |
706 | 727 | ||
728 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | ||
707 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), | 729 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), |
708 | __phys_to_pfn(__pa(&__tcm_end)), | 730 | __phys_to_pfn(__pa(&__tcm_end)), |
709 | "TCM link"); | 731 | "TCM link"); |
710 | #endif | 732 | #endif |
711 | 733 | ||
734 | poison_init_mem(__init_begin, __init_end - __init_begin); | ||
712 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 735 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
713 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | 736 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), |
714 | __phys_to_pfn(__pa(__init_end)), | 737 | __phys_to_pfn(__pa(__init_end)), |
@@ -721,10 +744,12 @@ static int keep_initrd; | |||
721 | 744 | ||
722 | void free_initrd_mem(unsigned long start, unsigned long end) | 745 | void free_initrd_mem(unsigned long start, unsigned long end) |
723 | { | 746 | { |
724 | if (!keep_initrd) | 747 | if (!keep_initrd) { |
748 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | ||
725 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), | 749 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), |
726 | __phys_to_pfn(__pa(end)), | 750 | __phys_to_pfn(__pa(end)), |
727 | "initrd"); | 751 | "initrd"); |
752 | } | ||
728 | } | 753 | } |
729 | 754 | ||
730 | static int __init keepinitrd_setup(char *__unused) | 755 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d543659..010566799c80 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
23 | 23 | ||
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_ZONE_DMA | ||
27 | extern u32 arm_dma_limit; | ||
28 | #else | ||
29 | #define arm_dma_limit ((u32)~0) | ||
30 | #endif | ||
31 | |||
26 | void __init bootmem_init(void); | 32 | void __init bootmem_init(void); |
27 | void arm_mm_memblock_reserve(void); | 33 | void arm_mm_memblock_reserve(void); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d9e736c2b4f..594d677b92c8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc); | |||
759 | 759 | ||
760 | static phys_addr_t lowmem_limit __initdata = 0; | 760 | static phys_addr_t lowmem_limit __initdata = 0; |
761 | 761 | ||
762 | static void __init sanity_check_meminfo(void) | 762 | void __init sanity_check_meminfo(void) |
763 | { | 763 | { |
764 | int i, j, highmem = 0; | 764 | int i, j, highmem = 0; |
765 | 765 | ||
@@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1032 | { | 1032 | { |
1033 | void *zero_page; | 1033 | void *zero_page; |
1034 | 1034 | ||
1035 | memblock_set_current_limit(lowmem_limit); | ||
1036 | |||
1035 | build_mem_type_table(); | 1037 | build_mem_type_table(); |
1036 | sanity_check_meminfo(); | ||
1037 | prepare_page_table(); | 1038 | prepare_page_table(); |
1038 | map_lowmem(); | 1039 | map_lowmem(); |
1039 | devicemaps_init(mdesc); | 1040 | devicemaps_init(mdesc); |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 687d02319a41..941a98c9e8aa 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void) | |||
27 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); | 27 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); |
28 | } | 28 | } |
29 | 29 | ||
30 | void __init sanity_check_meminfo(void) | ||
31 | { | ||
32 | } | ||
33 | |||
30 | /* | 34 | /* |
31 | * paging_init() sets up the page tables, initialises the zone memory | 35 | * paging_init() sets up the page tables, initialises the zone memory |
32 | * maps, and sets up the zero page, bad page and bad page tables. | 36 | * maps, and sets up the zero page, bad page and bad page tables. |
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eba88ea..8bbff025269a 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: legacy_pabort | 5 | * Function: legacy_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = Simulated IFSR with section translation fault status | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(legacy_pabort) | 17 | ENTRY(legacy_pabort) |
18 | mov r0, r4 | ||
17 | mov r1, #5 | 19 | mov r1, #5 |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(legacy_pabort) | 21 | ENDPROC(legacy_pabort) |
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1ef2115..9627646ce783 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v6_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(v6_pabort) | 17 | ENTRY(v6_pabort) |
18 | mov r0, r4 | ||
17 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(v6_pabort) | 21 | ENDPROC(v6_pabort) |
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b300a18d..875761f44f3b 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S | |||
@@ -2,12 +2,13 @@ | |||
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v7_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
@@ -16,5 +17,5 @@ | |||
16 | ENTRY(v7_pabort) | 17 | ENTRY(v7_pabort) |
17 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR | 18 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR |
18 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
19 | mov pc, lr | 20 | b do_PrefetchAbort |
20 | ENDPROC(v7_pabort) | 21 | ENDPROC(v7_pabort) |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4ce3fb..50e3543d03bf 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area) | |||
29 | /* | 29 | /* |
30 | * Function: arm6_7_data_abort () | 30 | * Function: arm6_7_data_abort () |
31 | * | 31 | * |
32 | * Params : r2 = address of aborted instruction | 32 | * Params : r2 = pt_regs |
33 | * : sp = pointer to registers | 33 | * : r4 = aborted context pc |
34 | * : r5 = aborted context psr | ||
34 | * | 35 | * |
35 | * Purpose : obtain information about current aborted instruction | 36 | * Purpose : obtain information about current aborted instruction |
36 | * | 37 | * |
37 | * Returns : r0 = address of abort | 38 | * Returns : r4-r5, r10-r11, r13 preserved |
38 | * : r1 = FSR | ||
39 | */ | 39 | */ |
40 | 40 | ||
41 | ENTRY(cpu_arm7_data_abort) | 41 | ENTRY(cpu_arm7_data_abort) |
42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
44 | ldr r8, [r2] @ read arm instruction | 44 | ldr r8, [r4] @ read arm instruction |
45 | tst r8, #1 << 20 @ L = 0 -> write? | 45 | tst r8, #1 << 20 @ L = 0 -> write? |
46 | orreq r1, r1, #1 << 11 @ yes. | 46 | orreq r1, r1, #1 << 11 @ yes. |
47 | and r7, r8, #15 << 24 | 47 | and r7, r8, #15 << 24 |
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort) | |||
49 | nop | 49 | nop |
50 | 50 | ||
51 | /* 0 */ b .data_unknown | 51 | /* 0 */ b .data_unknown |
52 | /* 1 */ mov pc, lr @ swp | 52 | /* 1 */ b do_DataAbort @ swp |
53 | /* 2 */ b .data_unknown | 53 | /* 2 */ b .data_unknown |
54 | /* 3 */ b .data_unknown | 54 | /* 3 */ b .data_unknown |
55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m | 55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m |
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort) | |||
60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
61 | /* a */ b .data_unknown | 61 | /* a */ b .data_unknown |
62 | /* b */ b .data_unknown | 62 | /* b */ b .data_unknown |
63 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 63 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
64 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 64 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
65 | /* e */ b .data_unknown | 65 | /* e */ b .data_unknown |
66 | /* f */ | 66 | /* f */ |
67 | .data_unknown: @ Part of jumptable | 67 | .data_unknown: @ Part of jumptable |
68 | mov r0, r2 | 68 | mov r0, r4 |
69 | mov r1, r8 | 69 | mov r1, r8 |
70 | mov r2, sp | 70 | b baddataabort |
71 | bl baddataabort | ||
72 | b ret_from_exception | ||
73 | 71 | ||
74 | ENTRY(cpu_arm6_data_abort) | 72 | ENTRY(cpu_arm6_data_abort) |
75 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 73 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
76 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 74 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
77 | ldr r8, [r2] @ read arm instruction | 75 | ldr r8, [r4] @ read arm instruction |
78 | tst r8, #1 << 20 @ L = 0 -> write? | 76 | tst r8, #1 << 20 @ L = 0 -> write? |
79 | orreq r1, r1, #1 << 11 @ yes. | 77 | orreq r1, r1, #1 << 11 @ yes. |
80 | and r7, r8, #14 << 24 | 78 | and r7, r8, #14 << 24 |
81 | teq r7, #8 << 24 @ was it ldm/stm | 79 | teq r7, #8 << 24 @ was it ldm/stm |
82 | movne pc, lr | 80 | bne do_DataAbort |
83 | 81 | ||
84 | .data_arm_ldmstm: | 82 | .data_arm_ldmstm: |
85 | tst r8, #1 << 21 @ check writeback bit | 83 | tst r8, #1 << 21 @ check writeback bit |
86 | moveq pc, lr @ no writeback -> no fixup | 84 | beq do_DataAbort @ no writeback -> no fixup |
87 | mov r7, #0x11 | 85 | mov r7, #0x11 |
88 | orr r7, r7, #0x1100 | 86 | orr r7, r7, #0x1100 |
89 | and r6, r8, r7 | 87 | and r6, r8, r7 |
90 | and r2, r8, r7, lsl #1 | 88 | and r9, r8, r7, lsl #1 |
91 | add r6, r6, r2, lsr #1 | 89 | add r6, r6, r9, lsr #1 |
92 | and r2, r8, r7, lsl #2 | 90 | and r9, r8, r7, lsl #2 |
93 | add r6, r6, r2, lsr #2 | 91 | add r6, r6, r9, lsr #2 |
94 | and r2, r8, r7, lsl #3 | 92 | and r9, r8, r7, lsl #3 |
95 | add r6, r6, r2, lsr #3 | 93 | add r6, r6, r9, lsr #3 |
96 | add r6, r6, r6, lsr #8 | 94 | add r6, r6, r6, lsr #8 |
97 | add r6, r6, r6, lsr #4 | 95 | add r6, r6, r6, lsr #4 |
98 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 96 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
99 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 97 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
100 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 98 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
101 | tst r8, #1 << 23 @ Check U bit | 99 | tst r8, #1 << 23 @ Check U bit |
102 | subne r7, r7, r6, lsl #2 @ Undo increment | 100 | subne r7, r7, r6, lsl #2 @ Undo increment |
103 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 101 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
104 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 102 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
105 | mov pc, lr | 103 | b do_DataAbort |
106 | 104 | ||
107 | .data_arm_apply_r6_and_rn: | 105 | .data_arm_apply_r6_and_rn: |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 106 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 107 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 108 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r6 @ Undo incrmenet | 109 | subne r7, r7, r6 @ Undo incrmenet |
112 | addeq r7, r7, r6 @ Undo decrement | 110 | addeq r7, r7, r6 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 111 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 112 | b do_DataAbort |
115 | 113 | ||
116 | .data_arm_lateldrpreconst: | 114 | .data_arm_lateldrpreconst: |
117 | tst r8, #1 << 21 @ check writeback bit | 115 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 116 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostconst: | 117 | .data_arm_lateldrpostconst: |
120 | movs r2, r8, lsl #20 @ Get offset | 118 | movs r6, r8, lsl #20 @ Get offset |
121 | moveq pc, lr @ zero -> no fixup | 119 | beq do_DataAbort @ zero -> no fixup |
122 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 120 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
123 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 121 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
124 | tst r8, #1 << 23 @ Check U bit | 122 | tst r8, #1 << 23 @ Check U bit |
125 | subne r7, r7, r2, lsr #20 @ Undo increment | 123 | subne r7, r7, r6, lsr #20 @ Undo increment |
126 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 124 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
127 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 125 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
128 | mov pc, lr | 126 | b do_DataAbort |
129 | 127 | ||
130 | .data_arm_lateldrprereg: | 128 | .data_arm_lateldrprereg: |
131 | tst r8, #1 << 21 @ check writeback bit | 129 | tst r8, #1 << 21 @ check writeback bit |
132 | moveq pc, lr @ no writeback -> no fixup | 130 | beq do_DataAbort @ no writeback -> no fixup |
133 | .data_arm_lateldrpostreg: | 131 | .data_arm_lateldrpostreg: |
134 | and r7, r8, #15 @ Extract 'm' from instruction | 132 | and r7, r8, #15 @ Extract 'm' from instruction |
135 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 133 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
136 | mov r5, r8, lsr #7 @ get shift count | 134 | mov r9, r8, lsr #7 @ get shift count |
137 | ands r5, r5, #31 | 135 | ands r9, r9, #31 |
138 | and r7, r8, #0x70 @ get shift type | 136 | and r7, r8, #0x70 @ get shift type |
139 | orreq r7, r7, #8 @ shift count = 0 | 137 | orreq r7, r7, #8 @ shift count = 0 |
140 | add pc, pc, r7 | 138 | add pc, pc, r7 |
141 | nop | 139 | nop |
142 | 140 | ||
143 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 141 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
144 | b .data_arm_apply_r6_and_rn | 142 | b .data_arm_apply_r6_and_rn |
145 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 143 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
146 | nop | 144 | nop |
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) | |||
148 | nop | 146 | nop |
149 | b .data_unknown @ 3: MUL? | 147 | b .data_unknown @ 3: MUL? |
150 | nop | 148 | nop |
151 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 149 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
152 | b .data_arm_apply_r6_and_rn | 150 | b .data_arm_apply_r6_and_rn |
153 | mov r6, r6, lsr #32 @ 5: LSR #32 | 151 | mov r6, r6, lsr #32 @ 5: LSR #32 |
154 | b .data_arm_apply_r6_and_rn | 152 | b .data_arm_apply_r6_and_rn |
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) | |||
156 | nop | 154 | nop |
157 | b .data_unknown @ 7: MUL? | 155 | b .data_unknown @ 7: MUL? |
158 | nop | 156 | nop |
159 | mov r6, r6, asr r5 @ 8: ASR #!0 | 157 | mov r6, r6, asr r9 @ 8: ASR #!0 |
160 | b .data_arm_apply_r6_and_rn | 158 | b .data_arm_apply_r6_and_rn |
161 | mov r6, r6, asr #32 @ 9: ASR #32 | 159 | mov r6, r6, asr #32 @ 9: ASR #32 |
162 | b .data_arm_apply_r6_and_rn | 160 | b .data_arm_apply_r6_and_rn |
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) | |||
164 | nop | 162 | nop |
165 | b .data_unknown @ B: MUL? | 163 | b .data_unknown @ B: MUL? |
166 | nop | 164 | nop |
167 | mov r6, r6, ror r5 @ C: ROR #!0 | 165 | mov r6, r6, ror r9 @ C: ROR #!0 |
168 | b .data_arm_apply_r6_and_rn | 166 | b .data_arm_apply_r6_and_rn |
169 | mov r6, r6, rrx @ D: RRX | 167 | mov r6, r6, rrx @ D: RRX |
170 | b .data_arm_apply_r6_and_rn | 168 | b .data_arm_apply_r6_and_rn |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c997e36..e9c47271732d 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | #define DCACHELINESIZE 32 | 35 | #define DCACHELINESIZE 32 |
36 | 36 | ||
37 | __INIT | 37 | .section .text |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * cpu_sa1100_proc_init() | 40 | * cpu_sa1100_proc_init() |
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) | |||
45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland | 45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland |
46 | mov pc, lr | 46 | mov pc, lr |
47 | 47 | ||
48 | .section .text | ||
49 | |||
50 | /* | 48 | /* |
51 | * cpu_sa1100_proc_fin() | 49 | * cpu_sa1100_proc_fin() |
52 | * | 50 | * |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 3c3867850a30..089c0b5e454f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -210,19 +210,21 @@ cpu_v7_name: | |||
210 | 210 | ||
211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
212 | .globl cpu_v7_suspend_size | 212 | .globl cpu_v7_suspend_size |
213 | .equ cpu_v7_suspend_size, 4 * 8 | 213 | .equ cpu_v7_suspend_size, 4 * 9 |
214 | #ifdef CONFIG_PM_SLEEP | 214 | #ifdef CONFIG_PM_SLEEP |
215 | ENTRY(cpu_v7_do_suspend) | 215 | ENTRY(cpu_v7_do_suspend) |
216 | stmfd sp!, {r4 - r11, lr} | 216 | stmfd sp!, {r4 - r11, lr} |
217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID | 218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID |
219 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
220 | stmia r0!, {r4 - r6} | ||
219 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 221 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
220 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 | 222 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 |
221 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 | 223 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 |
222 | mrc p15, 0, r9, c1, c0, 0 @ Control register | 224 | mrc p15, 0, r9, c1, c0, 0 @ Control register |
223 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register | 225 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register |
224 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control | 226 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control |
225 | stmia r0, {r4 - r11} | 227 | stmia r0, {r6 - r11} |
226 | ldmfd sp!, {r4 - r11, pc} | 228 | ldmfd sp!, {r4 - r11, pc} |
227 | ENDPROC(cpu_v7_do_suspend) | 229 | ENDPROC(cpu_v7_do_suspend) |
228 | 230 | ||
@@ -230,9 +232,11 @@ ENTRY(cpu_v7_do_resume) | |||
230 | mov ip, #0 | 232 | mov ip, #0 |
231 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | 233 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs |
232 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 234 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
233 | ldmia r0, {r4 - r11} | 235 | ldmia r0!, {r4 - r6} |
234 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 236 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
235 | mcr p15, 0, r5, c13, c0, 1 @ Context ID | 237 | mcr p15, 0, r5, c13, c0, 1 @ Context ID |
238 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
239 | ldmia r0, {r6 - r11} | ||
236 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 240 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
237 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 | 241 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 |
238 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 | 242 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 |
@@ -418,9 +422,9 @@ ENTRY(v7_processor_functions) | |||
418 | .word cpu_v7_dcache_clean_area | 422 | .word cpu_v7_dcache_clean_area |
419 | .word cpu_v7_switch_mm | 423 | .word cpu_v7_switch_mm |
420 | .word cpu_v7_set_pte_ext | 424 | .word cpu_v7_set_pte_ext |
421 | .word 0 | 425 | .word cpu_v7_suspend_size |
422 | .word 0 | 426 | .word cpu_v7_do_suspend |
423 | .word 0 | 427 | .word cpu_v7_do_resume |
424 | .size v7_processor_functions, . - v7_processor_functions | 428 | .size v7_processor_functions, . - v7_processor_functions |
425 | 429 | ||
426 | .section ".rodata" | 430 | .section ".rodata" |
diff --git a/arch/arm/plat-iop/cp6.c b/arch/arm/plat-iop/cp6.c index 9612a87e2a88..bab73e2c79db 100644 --- a/arch/arm/plat-iop/cp6.c +++ b/arch/arm/plat-iop/cp6.c | |||
@@ -18,6 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <asm/traps.h> | 20 | #include <asm/traps.h> |
21 | #include <asm/ptrace.h> | ||
21 | 22 | ||
22 | static int cp6_trap(struct pt_regs *regs, unsigned int instr) | 23 | static int cp6_trap(struct pt_regs *regs, unsigned int instr) |
23 | { | 24 | { |
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S index 2e49e71b1b98..066d464d322d 100644 --- a/arch/arm/plat-mxc/include/mach/entry-macro.S +++ b/arch/arm/plat-mxc/include/mach/entry-macro.S | |||
@@ -78,7 +78,3 @@ | |||
78 | movs \irqnr, \irqnr | 78 | movs \irqnr, \irqnr |
79 | #endif | 79 | #endif |
80 | .endm | 80 | .endm |
81 | |||
82 | @ irq priority table (not used) | ||
83 | .macro irq_prio_table | ||
84 | .endm | ||
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c index a37b8eb65b76..49fc0df0c21f 100644 --- a/arch/arm/plat-omap/omap_device.c +++ b/arch/arm/plat-omap/omap_device.c | |||
@@ -84,6 +84,7 @@ | |||
84 | #include <linux/io.h> | 84 | #include <linux/io.h> |
85 | #include <linux/clk.h> | 85 | #include <linux/clk.h> |
86 | #include <linux/clkdev.h> | 86 | #include <linux/clkdev.h> |
87 | #include <linux/pm_runtime.h> | ||
87 | 88 | ||
88 | #include <plat/omap_device.h> | 89 | #include <plat/omap_device.h> |
89 | #include <plat/omap_hwmod.h> | 90 | #include <plat/omap_hwmod.h> |
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od) | |||
539 | static int _od_runtime_suspend(struct device *dev) | 540 | static int _od_runtime_suspend(struct device *dev) |
540 | { | 541 | { |
541 | struct platform_device *pdev = to_platform_device(dev); | 542 | struct platform_device *pdev = to_platform_device(dev); |
543 | int ret; | ||
544 | |||
545 | ret = pm_generic_runtime_suspend(dev); | ||
546 | |||
547 | if (!ret) | ||
548 | omap_device_idle(pdev); | ||
549 | |||
550 | return ret; | ||
551 | } | ||
542 | 552 | ||
543 | return omap_device_idle(pdev); | 553 | static int _od_runtime_idle(struct device *dev) |
554 | { | ||
555 | return pm_generic_runtime_idle(dev); | ||
544 | } | 556 | } |
545 | 557 | ||
546 | static int _od_runtime_resume(struct device *dev) | 558 | static int _od_runtime_resume(struct device *dev) |
547 | { | 559 | { |
548 | struct platform_device *pdev = to_platform_device(dev); | 560 | struct platform_device *pdev = to_platform_device(dev); |
549 | 561 | ||
550 | return omap_device_enable(pdev); | 562 | omap_device_enable(pdev); |
563 | |||
564 | return pm_generic_runtime_resume(dev); | ||
551 | } | 565 | } |
552 | 566 | ||
553 | static struct dev_power_domain omap_device_power_domain = { | 567 | static struct dev_power_domain omap_device_power_domain = { |
554 | .ops = { | 568 | .ops = { |
555 | .runtime_suspend = _od_runtime_suspend, | 569 | .runtime_suspend = _od_runtime_suspend, |
570 | .runtime_idle = _od_runtime_idle, | ||
556 | .runtime_resume = _od_runtime_resume, | 571 | .runtime_resume = _od_runtime_resume, |
557 | USE_PLATFORM_PM_SLEEP_OPS | 572 | USE_PLATFORM_PM_SLEEP_OPS |
558 | } | 573 | } |
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c index 6af3d0b1f8d0..363c91e44efb 100644 --- a/arch/arm/plat-omap/sram.c +++ b/arch/arm/plat-omap/sram.c | |||
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void) | |||
394 | } | 394 | } |
395 | #endif /* CONFIG_PM */ | 395 | #endif /* CONFIG_PM */ |
396 | 396 | ||
397 | static int __init omap34xx_sram_init(void) | 397 | #endif /* CONFIG_ARCH_OMAP3 */ |
398 | { | 398 | |
399 | _omap3_sram_configure_core_dpll = | ||
400 | omap_sram_push(omap3_sram_configure_core_dpll, | ||
401 | omap3_sram_configure_core_dpll_sz); | ||
402 | omap_push_sram_idle(); | ||
403 | return 0; | ||
404 | } | ||
405 | #else | ||
406 | static inline int omap34xx_sram_init(void) | 399 | static inline int omap34xx_sram_init(void) |
407 | { | 400 | { |
401 | #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM) | ||
402 | omap3_sram_restore_context(); | ||
403 | #endif | ||
408 | return 0; | 404 | return 0; |
409 | } | 405 | } |
410 | #endif | ||
411 | 406 | ||
412 | int __init omap_sram_init(void) | 407 | int __init omap_sram_init(void) |
413 | { | 408 | { |
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c index 5b4fffab1eb4..41ab97ebe4cf 100644 --- a/arch/arm/plat-orion/gpio.c +++ b/arch/arm/plat-orion/gpio.c | |||
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio, | |||
432 | ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; | 432 | ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; |
433 | ct->regs.ack = GPIO_EDGE_CAUSE_OFF; | 433 | ct->regs.ack = GPIO_EDGE_CAUSE_OFF; |
434 | ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; | 434 | ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; |
435 | ct->chip.irq_ack = irq_gc_ack; | 435 | ct->chip.irq_ack = irq_gc_ack_clr_bit; |
436 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 436 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
437 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 437 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
438 | ct->chip.irq_set_type = gpio_irq_set_type; | 438 | ct->chip.irq_set_type = gpio_irq_set_type; |
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c index 48ebb9479b61..a11dc3670505 100644 --- a/arch/arm/plat-pxa/gpio.c +++ b/arch/arm/plat-pxa/gpio.c | |||
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c) | |||
50 | return container_of(c, struct pxa_gpio_chip, chip)->regbase; | 50 | return container_of(c, struct pxa_gpio_chip, chip)->regbase; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) | 53 | static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) |
54 | { | 54 | { |
55 | return &pxa_gpio_chips[gpio_to_bank(gpio)]; | 55 | return &pxa_gpio_chips[gpio_to_bank(gpio)]; |
56 | } | 56 | } |
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
161 | int gpio = irq_to_gpio(d->irq); | 161 | int gpio = irq_to_gpio(d->irq); |
162 | unsigned long gpdr, mask = GPIO_bit(gpio); | 162 | unsigned long gpdr, mask = GPIO_bit(gpio); |
163 | 163 | ||
164 | c = gpio_to_chip(gpio); | 164 | c = gpio_to_pxachip(gpio); |
165 | 165 | ||
166 | if (type == IRQ_TYPE_PROBE) { | 166 | if (type == IRQ_TYPE_PROBE) { |
167 | /* Don't mess with enabled GPIOs using preconfigured edges or | 167 | /* Don't mess with enabled GPIOs using preconfigured edges or |
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) | |||
230 | static void pxa_ack_muxed_gpio(struct irq_data *d) | 230 | static void pxa_ack_muxed_gpio(struct irq_data *d) |
231 | { | 231 | { |
232 | int gpio = irq_to_gpio(d->irq); | 232 | int gpio = irq_to_gpio(d->irq); |
233 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 233 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
234 | 234 | ||
235 | __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); | 235 | __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); |
236 | } | 236 | } |
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d) | |||
238 | static void pxa_mask_muxed_gpio(struct irq_data *d) | 238 | static void pxa_mask_muxed_gpio(struct irq_data *d) |
239 | { | 239 | { |
240 | int gpio = irq_to_gpio(d->irq); | 240 | int gpio = irq_to_gpio(d->irq); |
241 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 241 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
242 | uint32_t grer, gfer; | 242 | uint32_t grer, gfer; |
243 | 243 | ||
244 | c->irq_mask &= ~GPIO_bit(gpio); | 244 | c->irq_mask &= ~GPIO_bit(gpio); |
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d) | |||
252 | static void pxa_unmask_muxed_gpio(struct irq_data *d) | 252 | static void pxa_unmask_muxed_gpio(struct irq_data *d) |
253 | { | 253 | { |
254 | int gpio = irq_to_gpio(d->irq); | 254 | int gpio = irq_to_gpio(d->irq); |
255 | struct pxa_gpio_chip *c = gpio_to_chip(gpio); | 255 | struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); |
256 | 256 | ||
257 | c->irq_mask |= GPIO_bit(gpio); | 257 | c->irq_mask |= GPIO_bit(gpio); |
258 | update_edge_detect(c); | 258 | update_edge_detect(c); |
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c index 2abf9660bc6c..539bd0e3defd 100644 --- a/arch/arm/plat-s3c24xx/dma.c +++ b/arch/arm/plat-s3c24xx/dma.c | |||
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel); | |||
712 | * get control of an dma channel | 712 | * get control of an dma channel |
713 | */ | 713 | */ |
714 | 714 | ||
715 | int s3c2410_dma_request(unsigned int channel, | 715 | int s3c2410_dma_request(enum dma_ch channel, |
716 | struct s3c2410_dma_client *client, | 716 | struct s3c2410_dma_client *client, |
717 | void *dev) | 717 | void *dev) |
718 | { | 718 | { |
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request); | |||
783 | * allowed to go through. | 783 | * allowed to go through. |
784 | */ | 784 | */ |
785 | 785 | ||
786 | int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) | 786 | int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client) |
787 | { | 787 | { |
788 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 788 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
789 | unsigned long flags; | 789 | unsigned long flags; |
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan) | |||
974 | } | 974 | } |
975 | 975 | ||
976 | int | 976 | int |
977 | s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) | 977 | s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op) |
978 | { | 978 | { |
979 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 979 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
980 | 980 | ||
@@ -1021,23 +1021,19 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl); | |||
1021 | * xfersize: size of unit in bytes (1,2,4) | 1021 | * xfersize: size of unit in bytes (1,2,4) |
1022 | */ | 1022 | */ |
1023 | 1023 | ||
1024 | int s3c2410_dma_config(unsigned int channel, | 1024 | int s3c2410_dma_config(enum dma_ch channel, |
1025 | int xferunit) | 1025 | int xferunit) |
1026 | { | 1026 | { |
1027 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 1027 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
1028 | unsigned int dcon; | 1028 | unsigned int dcon; |
1029 | 1029 | ||
1030 | pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n", | 1030 | pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit); |
1031 | __func__, channel, xferunit, dcon); | ||
1032 | 1031 | ||
1033 | if (chan == NULL) | 1032 | if (chan == NULL) |
1034 | return -EINVAL; | 1033 | return -EINVAL; |
1035 | 1034 | ||
1036 | pr_debug("%s: Initial dcon is %08x\n", __func__, dcon); | ||
1037 | |||
1038 | dcon = chan->dcon & dma_sel.dcon_mask; | 1035 | dcon = chan->dcon & dma_sel.dcon_mask; |
1039 | 1036 | pr_debug("%s: dcon is %08x\n", __func__, dcon); | |
1040 | pr_debug("%s: New dcon is %08x\n", __func__, dcon); | ||
1041 | 1037 | ||
1042 | switch (chan->req_ch) { | 1038 | switch (chan->req_ch) { |
1043 | case DMACH_I2S_IN: | 1039 | case DMACH_I2S_IN: |
@@ -1104,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config); | |||
1104 | * devaddr: physical address of the source | 1100 | * devaddr: physical address of the source |
1105 | */ | 1101 | */ |
1106 | 1102 | ||
1107 | int s3c2410_dma_devconfig(unsigned int channel, | 1103 | int s3c2410_dma_devconfig(enum dma_ch channel, |
1108 | enum s3c2410_dmasrc source, | 1104 | enum s3c2410_dmasrc source, |
1109 | unsigned long devaddr) | 1105 | unsigned long devaddr) |
1110 | { | 1106 | { |
@@ -1177,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig); | |||
1177 | * returns the current transfer points for the dma source and destination | 1173 | * returns the current transfer points for the dma source and destination |
1178 | */ | 1174 | */ |
1179 | 1175 | ||
1180 | int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) | 1176 | int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst) |
1181 | { | 1177 | { |
1182 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 1178 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
1183 | 1179 | ||
@@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) | |||
1235 | /* restore channel's hardware configuration */ | 1231 | /* restore channel's hardware configuration */ |
1236 | 1232 | ||
1237 | if (!cp->in_use) | 1233 | if (!cp->in_use) |
1238 | return 0; | 1234 | return; |
1239 | 1235 | ||
1240 | printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); | 1236 | printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); |
1241 | 1237 | ||
@@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp) | |||
1246 | 1242 | ||
1247 | if (cp->map != NULL) | 1243 | if (cp->map != NULL) |
1248 | dma_sel.select(cp, cp->map); | 1244 | dma_sel.select(cp, cp->map); |
1249 | |||
1250 | return 0; | ||
1251 | } | 1245 | } |
1252 | 1246 | ||
1253 | static void s3c2410_dma_resume(void) | 1247 | static void s3c2410_dma_resume(void) |
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S index fd7032f84ae7..c56612569b40 100644 --- a/arch/arm/plat-s3c24xx/sleep.S +++ b/arch/arm/plat-s3c24xx/sleep.S | |||
@@ -41,31 +41,6 @@ | |||
41 | 41 | ||
42 | .text | 42 | .text |
43 | 43 | ||
44 | /* s3c_cpu_save | ||
45 | * | ||
46 | * entry: | ||
47 | * r1 = v:p offset | ||
48 | */ | ||
49 | |||
50 | ENTRY(s3c_cpu_save) | ||
51 | stmfd sp!, { r4 - r12, lr } | ||
52 | ldr r3, =resume_with_mmu | ||
53 | bl cpu_suspend | ||
54 | |||
55 | @@ jump to final code to send system to sleep | ||
56 | ldr r0, =pm_cpu_sleep | ||
57 | @@ldr pc, [ r0 ] | ||
58 | ldr r0, [ r0 ] | ||
59 | mov pc, r0 | ||
60 | |||
61 | @@ return to the caller, after having the MMU | ||
62 | @@ turned on, this restores the last bits from the | ||
63 | @@ stack | ||
64 | resume_with_mmu: | ||
65 | ldmfd sp!, { r4 - r12, pc } | ||
66 | |||
67 | .ltorg | ||
68 | |||
69 | /* sleep magic, to allow the bootloader to check for an valid | 44 | /* sleep magic, to allow the bootloader to check for an valid |
70 | * image to resume to. Must be the first word before the | 45 | * image to resume to. Must be the first word before the |
71 | * s3c_cpu_resume entry. | 46 | * s3c_cpu_resume entry. |
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index 135abda31c9a..327ab9f662e8 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c | |||
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) | |||
152 | if (!gc) | 152 | if (!gc) |
153 | return -ENOMEM; | 153 | return -ENOMEM; |
154 | ct = gc->chip_types; | 154 | ct = gc->chip_types; |
155 | ct->chip.irq_ack = irq_gc_ack; | 155 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
156 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 156 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
157 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 157 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
158 | ct->chip.irq_set_type = s5p_gpioint_set_type, | 158 | ct->chip.irq_set_type = s5p_gpioint_set_type, |
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c index 899a8cc011ff..612934c48b0d 100644 --- a/arch/arm/plat-s5p/s5p-time.c +++ b/arch/arm/plat-s5p/s5p-time.c | |||
@@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void) | |||
370 | 370 | ||
371 | clock_rate = clk_get_rate(tin_source); | 371 | clock_rate = clk_get_rate(tin_source); |
372 | 372 | ||
373 | init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); | ||
374 | |||
375 | s5p_time_setup(timer_source.source_id, TCNT_MAX); | 373 | s5p_time_setup(timer_source.source_id, TCNT_MAX); |
376 | s5p_time_start(timer_source.source_id, PERIODIC); | 374 | s5p_time_start(timer_source.source_id, PERIODIC); |
377 | 375 | ||
376 | init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate); | ||
377 | |||
378 | if (clocksource_register_hz(&time_clocksource, clock_rate)) | 378 | if (clocksource_register_hz(&time_clocksource, clock_rate)) |
379 | panic("%s: can't register clocksource\n", time_clocksource.name); | 379 | panic("%s: can't register clocksource\n", time_clocksource.name); |
380 | } | 380 | } |
diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c index cb459dd95459..6143aa147688 100644 --- a/arch/arm/plat-samsung/dma.c +++ b/arch/arm/plat-samsung/dma.c | |||
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel) | |||
41 | * irq? | 41 | * irq? |
42 | */ | 42 | */ |
43 | 43 | ||
44 | int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) | 44 | int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn) |
45 | { | 45 | { |
46 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 46 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
47 | 47 | ||
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) | |||
56 | } | 56 | } |
57 | EXPORT_SYMBOL(s3c2410_dma_set_opfn); | 57 | EXPORT_SYMBOL(s3c2410_dma_set_opfn); |
58 | 58 | ||
59 | int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) | 59 | int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn) |
60 | { | 60 | { |
61 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 61 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
62 | 62 | ||
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) | |||
71 | } | 71 | } |
72 | EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); | 72 | EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); |
73 | 73 | ||
74 | int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) | 74 | int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags) |
75 | { | 75 | { |
76 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); | 76 | struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); |
77 | 77 | ||
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h index 4af108ff4112..e3b31c26ac3e 100644 --- a/arch/arm/plat-samsung/include/plat/devs.h +++ b/arch/arm/plat-samsung/include/plat/devs.h | |||
@@ -12,6 +12,10 @@ | |||
12 | * it under the terms of the GNU General Public License version 2 as | 12 | * it under the terms of the GNU General Public License version 2 as |
13 | * published by the Free Software Foundation. | 13 | * published by the Free Software Foundation. |
14 | */ | 14 | */ |
15 | |||
16 | #ifndef __PLAT_DEVS_H | ||
17 | #define __PLAT_DEVS_H __FILE__ | ||
18 | |||
15 | #include <linux/platform_device.h> | 19 | #include <linux/platform_device.h> |
16 | 20 | ||
17 | struct s3c24xx_uart_resources { | 21 | struct s3c24xx_uart_resources { |
@@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97; | |||
159 | */ | 163 | */ |
160 | extern void *s3c_set_platdata(void *pd, size_t pdsize, | 164 | extern void *s3c_set_platdata(void *pd, size_t pdsize, |
161 | struct platform_device *pdev); | 165 | struct platform_device *pdev); |
166 | |||
167 | #endif /* __PLAT_DEVS_H */ | ||
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h index 2e8f8c6560d7..8c273b7a6f56 100644 --- a/arch/arm/plat-samsung/include/plat/dma.h +++ b/arch/arm/plat-samsung/include/plat/dma.h | |||
@@ -42,6 +42,7 @@ struct s3c2410_dma_client { | |||
42 | }; | 42 | }; |
43 | 43 | ||
44 | struct s3c2410_dma_chan; | 44 | struct s3c2410_dma_chan; |
45 | enum dma_ch; | ||
45 | 46 | ||
46 | /* s3c2410_dma_cbfn_t | 47 | /* s3c2410_dma_cbfn_t |
47 | * | 48 | * |
@@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *, | |||
62 | * request a dma channel exclusivley | 63 | * request a dma channel exclusivley |
63 | */ | 64 | */ |
64 | 65 | ||
65 | extern int s3c2410_dma_request(unsigned int channel, | 66 | extern int s3c2410_dma_request(enum dma_ch channel, |
66 | struct s3c2410_dma_client *, void *dev); | 67 | struct s3c2410_dma_client *, void *dev); |
67 | 68 | ||
68 | 69 | ||
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel, | |||
71 | * change the state of the dma channel | 72 | * change the state of the dma channel |
72 | */ | 73 | */ |
73 | 74 | ||
74 | extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); | 75 | extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op); |
75 | 76 | ||
76 | /* s3c2410_dma_setflags | 77 | /* s3c2410_dma_setflags |
77 | * | 78 | * |
78 | * set the channel's flags to a given state | 79 | * set the channel's flags to a given state |
79 | */ | 80 | */ |
80 | 81 | ||
81 | extern int s3c2410_dma_setflags(unsigned int channel, | 82 | extern int s3c2410_dma_setflags(enum dma_ch channel, |
82 | unsigned int flags); | 83 | unsigned int flags); |
83 | 84 | ||
84 | /* s3c2410_dma_free | 85 | /* s3c2410_dma_free |
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel, | |||
86 | * free the dma channel (will also abort any outstanding operations) | 87 | * free the dma channel (will also abort any outstanding operations) |
87 | */ | 88 | */ |
88 | 89 | ||
89 | extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); | 90 | extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *); |
90 | 91 | ||
91 | /* s3c2410_dma_enqueue | 92 | /* s3c2410_dma_enqueue |
92 | * | 93 | * |
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); | |||
95 | * drained before the buffer is given to the DMA system. | 96 | * drained before the buffer is given to the DMA system. |
96 | */ | 97 | */ |
97 | 98 | ||
98 | extern int s3c2410_dma_enqueue(unsigned int channel, void *id, | 99 | extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id, |
99 | dma_addr_t data, int size); | 100 | dma_addr_t data, int size); |
100 | 101 | ||
101 | /* s3c2410_dma_config | 102 | /* s3c2410_dma_config |
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id, | |||
103 | * configure the dma channel | 104 | * configure the dma channel |
104 | */ | 105 | */ |
105 | 106 | ||
106 | extern int s3c2410_dma_config(unsigned int channel, int xferunit); | 107 | extern int s3c2410_dma_config(enum dma_ch channel, int xferunit); |
107 | 108 | ||
108 | /* s3c2410_dma_devconfig | 109 | /* s3c2410_dma_devconfig |
109 | * | 110 | * |
110 | * configure the device we're talking to | 111 | * configure the device we're talking to |
111 | */ | 112 | */ |
112 | 113 | ||
113 | extern int s3c2410_dma_devconfig(unsigned int channel, | 114 | extern int s3c2410_dma_devconfig(enum dma_ch channel, |
114 | enum s3c2410_dmasrc source, unsigned long devaddr); | 115 | enum s3c2410_dmasrc source, unsigned long devaddr); |
115 | 116 | ||
116 | /* s3c2410_dma_getposition | 117 | /* s3c2410_dma_getposition |
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel, | |||
118 | * get the position that the dma transfer is currently at | 119 | * get the position that the dma transfer is currently at |
119 | */ | 120 | */ |
120 | 121 | ||
121 | extern int s3c2410_dma_getposition(unsigned int channel, | 122 | extern int s3c2410_dma_getposition(enum dma_ch channel, |
122 | dma_addr_t *src, dma_addr_t *dest); | 123 | dma_addr_t *src, dma_addr_t *dest); |
123 | 124 | ||
124 | extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); | 125 | extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn); |
125 | extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); | 126 | extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); |
126 | 127 | ||
127 | 128 | ||
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h index 7fb6f6be8c81..f6749916d194 100644 --- a/arch/arm/plat-samsung/include/plat/pm.h +++ b/arch/arm/plat-samsung/include/plat/pm.h | |||
@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow; | |||
42 | /* per-cpu sleep functions */ | 42 | /* per-cpu sleep functions */ |
43 | 43 | ||
44 | extern void (*pm_cpu_prep)(void); | 44 | extern void (*pm_cpu_prep)(void); |
45 | extern void (*pm_cpu_sleep)(void); | 45 | extern int (*pm_cpu_sleep)(unsigned long); |
46 | 46 | ||
47 | /* Flags for PM Control */ | 47 | /* Flags for PM Control */ |
48 | 48 | ||
@@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */ | |||
52 | 52 | ||
53 | /* from sleep.S */ | 53 | /* from sleep.S */ |
54 | 54 | ||
55 | extern int s3c_cpu_save(unsigned long *saveblk, long); | ||
56 | extern void s3c_cpu_resume(void); | 55 | extern void s3c_cpu_resume(void); |
57 | 56 | ||
58 | extern void s3c2410_cpu_suspend(void); | 57 | extern int s3c2410_cpu_suspend(unsigned long); |
59 | 58 | ||
60 | /* sleep save info */ | 59 | /* sleep save info */ |
61 | 60 | ||
diff --git a/arch/arm/plat-samsung/include/plat/regs-serial.h b/arch/arm/plat-samsung/include/plat/regs-serial.h index c151c5f94a87..116edfe120b9 100644 --- a/arch/arm/plat-samsung/include/plat/regs-serial.h +++ b/arch/arm/plat-samsung/include/plat/regs-serial.h | |||
@@ -224,6 +224,8 @@ | |||
224 | #define S5PV210_UFSTAT_RXMASK (255<<0) | 224 | #define S5PV210_UFSTAT_RXMASK (255<<0) |
225 | #define S5PV210_UFSTAT_RXSHIFT (0) | 225 | #define S5PV210_UFSTAT_RXSHIFT (0) |
226 | 226 | ||
227 | #define NO_NEED_CHECK_CLKSRC 1 | ||
228 | |||
227 | #ifndef __ASSEMBLY__ | 229 | #ifndef __ASSEMBLY__ |
228 | 230 | ||
229 | /* struct s3c24xx_uart_clksrc | 231 | /* struct s3c24xx_uart_clksrc |
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h index 0ffe34a21554..4c16fa3621bb 100644 --- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h +++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h | |||
@@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo { | |||
39 | * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 | 39 | * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 |
40 | * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number | 40 | * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number |
41 | * @high_speed: If the controller supports HIGH_SPEED_EN bit | 41 | * @high_speed: If the controller supports HIGH_SPEED_EN bit |
42 | * @tx_st_done: Depends on tx fifo_lvl field | ||
42 | */ | 43 | */ |
43 | struct s3c64xx_spi_info { | 44 | struct s3c64xx_spi_info { |
44 | int src_clk_nr; | 45 | int src_clk_nr; |
@@ -53,6 +54,7 @@ struct s3c64xx_spi_info { | |||
53 | int fifo_lvl_mask; | 54 | int fifo_lvl_mask; |
54 | int rx_lvl_offset; | 55 | int rx_lvl_offset; |
55 | int high_speed; | 56 | int high_speed; |
57 | int tx_st_done; | ||
56 | }; | 58 | }; |
57 | 59 | ||
58 | /** | 60 | /** |
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c index 32582c0958e3..657405c481d0 100644 --- a/arch/arm/plat-samsung/irq-uart.c +++ b/arch/arm/plat-samsung/irq-uart.c | |||
@@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq) | |||
54 | 54 | ||
55 | gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, | 55 | gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, |
56 | handle_level_irq); | 56 | handle_level_irq); |
57 | |||
58 | if (!gc) { | ||
59 | pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n", | ||
60 | __func__, uirq->base_irq); | ||
61 | return; | ||
62 | } | ||
63 | |||
57 | ct = gc->chip_types; | 64 | ct = gc->chip_types; |
58 | ct->chip.irq_ack = irq_gc_ack; | 65 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
59 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 66 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
60 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 67 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
61 | ct->regs.ack = S3C64XX_UINTP; | 68 | ct->regs.ack = S3C64XX_UINTP; |
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c index a607546ddbd0..f714d060370d 100644 --- a/arch/arm/plat-samsung/irq-vic-timer.c +++ b/arch/arm/plat-samsung/irq-vic-timer.c | |||
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq) | |||
54 | 54 | ||
55 | s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, | 55 | s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, |
56 | S3C64XX_TINT_CSTAT, handle_level_irq); | 56 | S3C64XX_TINT_CSTAT, handle_level_irq); |
57 | |||
58 | if (!s3c_tgc) { | ||
59 | pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n", | ||
60 | __func__, timer_irq); | ||
61 | return; | ||
62 | } | ||
63 | |||
57 | ct = s3c_tgc->chip_types; | 64 | ct = s3c_tgc->chip_types; |
58 | ct->chip.irq_mask = irq_gc_mask_clr_bit; | 65 | ct->chip.irq_mask = irq_gc_mask_clr_bit; |
59 | ct->chip.irq_unmask = irq_gc_mask_set_bit; | 66 | ct->chip.irq_unmask = irq_gc_mask_set_bit; |
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 5c0a440d6e16..5fa1742d019b 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | 21 | ||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/suspend.h> | ||
23 | #include <mach/hardware.h> | 24 | #include <mach/hardware.h> |
24 | #include <mach/map.h> | 25 | #include <mach/map.h> |
25 | 26 | ||
@@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start, | |||
231 | 232 | ||
232 | 233 | ||
233 | void (*pm_cpu_prep)(void); | 234 | void (*pm_cpu_prep)(void); |
234 | void (*pm_cpu_sleep)(void); | 235 | int (*pm_cpu_sleep)(unsigned long); |
235 | 236 | ||
236 | #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) | 237 | #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) |
237 | 238 | ||
@@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state) | |||
294 | 295 | ||
295 | s3c_pm_arch_stop_clocks(); | 296 | s3c_pm_arch_stop_clocks(); |
296 | 297 | ||
297 | /* s3c_cpu_save will also act as our return point from when | 298 | /* this will also act as our return point from when |
298 | * we resume as it saves its own register state and restores it | 299 | * we resume as it saves its own register state and restores it |
299 | * during the resume. */ | 300 | * during the resume. */ |
300 | 301 | ||
301 | s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); | 302 | cpu_suspend(0, pm_cpu_sleep); |
302 | |||
303 | /* restore the cpu state using the kernel's cpu init code. */ | ||
304 | |||
305 | cpu_init(); | ||
306 | 303 | ||
307 | /* restore the system state */ | 304 | /* restore the system state */ |
308 | 305 | ||
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index 9897dcfc16d6..2d30c7f6edd3 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S | |||
@@ -77,27 +77,27 @@ ENTRY(vfp_support_entry) | |||
77 | bne look_for_VFP_exceptions @ VFP is already enabled | 77 | bne look_for_VFP_exceptions @ VFP is already enabled |
78 | 78 | ||
79 | DBGSTR1 "enable %x", r10 | 79 | DBGSTR1 "enable %x", r10 |
80 | ldr r3, last_VFP_context_address | 80 | ldr r3, vfp_current_hw_state_address |
81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set | 81 | orr r1, r1, #FPEXC_EN @ user FPEXC has the enable bit set |
82 | ldr r4, [r3, r11, lsl #2] @ last_VFP_context pointer | 82 | ldr r4, [r3, r11, lsl #2] @ vfp_current_hw_state pointer |
83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled | 83 | bic r5, r1, #FPEXC_EX @ make sure exceptions are disabled |
84 | cmp r4, r10 | 84 | cmp r4, r10 @ this thread owns the hw context? |
85 | beq check_for_exception @ we are returning to the same | 85 | #ifndef CONFIG_SMP |
86 | @ process, so the registers are | 86 | @ For UP, checking that this thread owns the hw context is |
87 | @ still there. In this case, we do | 87 | @ sufficient to determine that the hardware state is valid. |
88 | @ not want to drop a pending exception. | 88 | beq vfp_hw_state_valid |
89 | |||
90 | @ On UP, we lazily save the VFP context. As a different | ||
91 | @ thread wants ownership of the VFP hardware, save the old | ||
92 | @ state if there was a previous (valid) owner. | ||
89 | 93 | ||
90 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | 94 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending |
91 | @ exceptions, so we can get at the | 95 | @ exceptions, so we can get at the |
92 | @ rest of it | 96 | @ rest of it |
93 | 97 | ||
94 | #ifndef CONFIG_SMP | ||
95 | @ Save out the current registers to the old thread state | ||
96 | @ No need for SMP since this is not done lazily | ||
97 | |||
98 | DBGSTR1 "save old state %p", r4 | 98 | DBGSTR1 "save old state %p", r4 |
99 | cmp r4, #0 | 99 | cmp r4, #0 @ if the vfp_current_hw_state is NULL |
100 | beq no_old_VFP_process | 100 | beq vfp_reload_hw @ then the hw state needs reloading |
101 | VFPFSTMIA r4, r5 @ save the working registers | 101 | VFPFSTMIA r4, r5 @ save the working registers |
102 | VFPFMRX r5, FPSCR @ current status | 102 | VFPFMRX r5, FPSCR @ current status |
103 | #ifndef CONFIG_CPU_FEROCEON | 103 | #ifndef CONFIG_CPU_FEROCEON |
@@ -110,13 +110,35 @@ ENTRY(vfp_support_entry) | |||
110 | 1: | 110 | 1: |
111 | #endif | 111 | #endif |
112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 | 112 | stmia r4, {r1, r5, r6, r8} @ save FPEXC, FPSCR, FPINST, FPINST2 |
113 | @ and point r4 at the word at the | 113 | vfp_reload_hw: |
114 | @ start of the register dump | 114 | |
115 | #else | ||
116 | @ For SMP, if this thread does not own the hw context, then we | ||
117 | @ need to reload it. No need to save the old state as on SMP, | ||
118 | @ we always save the state when we switch away from a thread. | ||
119 | bne vfp_reload_hw | ||
120 | |||
121 | @ This thread has ownership of the current hardware context. | ||
122 | @ However, it may have been migrated to another CPU, in which | ||
123 | @ case the saved state is newer than the hardware context. | ||
124 | @ Check this by looking at the CPU number which the state was | ||
125 | @ last loaded onto. | ||
126 | ldr ip, [r10, #VFP_CPU] | ||
127 | teq ip, r11 | ||
128 | beq vfp_hw_state_valid | ||
129 | |||
130 | vfp_reload_hw: | ||
131 | @ We're loading this threads state into the VFP hardware. Update | ||
132 | @ the CPU number which contains the most up to date VFP context. | ||
133 | str r11, [r10, #VFP_CPU] | ||
134 | |||
135 | VFPFMXR FPEXC, r5 @ enable VFP, disable any pending | ||
136 | @ exceptions, so we can get at the | ||
137 | @ rest of it | ||
115 | #endif | 138 | #endif |
116 | 139 | ||
117 | no_old_VFP_process: | ||
118 | DBGSTR1 "load state %p", r10 | 140 | DBGSTR1 "load state %p", r10 |
119 | str r10, [r3, r11, lsl #2] @ update the last_VFP_context pointer | 141 | str r10, [r3, r11, lsl #2] @ update the vfp_current_hw_state pointer |
120 | @ Load the saved state back into the VFP | 142 | @ Load the saved state back into the VFP |
121 | VFPFLDMIA r10, r5 @ reload the working registers while | 143 | VFPFLDMIA r10, r5 @ reload the working registers while |
122 | @ FPEXC is in a safe state | 144 | @ FPEXC is in a safe state |
@@ -132,7 +154,8 @@ no_old_VFP_process: | |||
132 | #endif | 154 | #endif |
133 | VFPFMXR FPSCR, r5 @ restore status | 155 | VFPFMXR FPSCR, r5 @ restore status |
134 | 156 | ||
135 | check_for_exception: | 157 | @ The context stored in the VFP hardware is up to date with this thread |
158 | vfp_hw_state_valid: | ||
136 | tst r1, #FPEXC_EX | 159 | tst r1, #FPEXC_EX |
137 | bne process_exception @ might as well handle the pending | 160 | bne process_exception @ might as well handle the pending |
138 | @ exception before retrying branch | 161 | @ exception before retrying branch |
@@ -207,8 +230,8 @@ ENTRY(vfp_save_state) | |||
207 | ENDPROC(vfp_save_state) | 230 | ENDPROC(vfp_save_state) |
208 | 231 | ||
209 | .align | 232 | .align |
210 | last_VFP_context_address: | 233 | vfp_current_hw_state_address: |
211 | .word last_VFP_context | 234 | .word vfp_current_hw_state |
212 | 235 | ||
213 | .macro tbl_branch, base, tmp, shift | 236 | .macro tbl_branch, base, tmp, shift |
214 | #ifdef CONFIG_THUMB2_KERNEL | 237 | #ifdef CONFIG_THUMB2_KERNEL |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index f25e7ec89416..0a96f71f0abd 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -33,7 +33,6 @@ void vfp_support_entry(void); | |||
33 | void vfp_null_entry(void); | 33 | void vfp_null_entry(void); |
34 | 34 | ||
35 | void (*vfp_vector)(void) = vfp_null_entry; | 35 | void (*vfp_vector)(void) = vfp_null_entry; |
36 | union vfp_state *last_VFP_context[NR_CPUS]; | ||
37 | 36 | ||
38 | /* | 37 | /* |
39 | * Dual-use variable. | 38 | * Dual-use variable. |
@@ -43,6 +42,46 @@ union vfp_state *last_VFP_context[NR_CPUS]; | |||
43 | unsigned int VFP_arch; | 42 | unsigned int VFP_arch; |
44 | 43 | ||
45 | /* | 44 | /* |
45 | * The pointer to the vfpstate structure of the thread which currently | ||
46 | * owns the context held in the VFP hardware, or NULL if the hardware | ||
47 | * context is invalid. | ||
48 | * | ||
49 | * For UP, this is sufficient to tell which thread owns the VFP context. | ||
50 | * However, for SMP, we also need to check the CPU number stored in the | ||
51 | * saved state too to catch migrations. | ||
52 | */ | ||
53 | union vfp_state *vfp_current_hw_state[NR_CPUS]; | ||
54 | |||
55 | /* | ||
56 | * Is 'thread's most up to date state stored in this CPUs hardware? | ||
57 | * Must be called from non-preemptible context. | ||
58 | */ | ||
59 | static bool vfp_state_in_hw(unsigned int cpu, struct thread_info *thread) | ||
60 | { | ||
61 | #ifdef CONFIG_SMP | ||
62 | if (thread->vfpstate.hard.cpu != cpu) | ||
63 | return false; | ||
64 | #endif | ||
65 | return vfp_current_hw_state[cpu] == &thread->vfpstate; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Force a reload of the VFP context from the thread structure. We do | ||
70 | * this by ensuring that access to the VFP hardware is disabled, and | ||
71 | * clear last_VFP_context. Must be called from non-preemptible context. | ||
72 | */ | ||
73 | static void vfp_force_reload(unsigned int cpu, struct thread_info *thread) | ||
74 | { | ||
75 | if (vfp_state_in_hw(cpu, thread)) { | ||
76 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | ||
77 | vfp_current_hw_state[cpu] = NULL; | ||
78 | } | ||
79 | #ifdef CONFIG_SMP | ||
80 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
81 | #endif | ||
82 | } | ||
83 | |||
84 | /* | ||
46 | * Per-thread VFP initialization. | 85 | * Per-thread VFP initialization. |
47 | */ | 86 | */ |
48 | static void vfp_thread_flush(struct thread_info *thread) | 87 | static void vfp_thread_flush(struct thread_info *thread) |
@@ -50,21 +89,27 @@ static void vfp_thread_flush(struct thread_info *thread) | |||
50 | union vfp_state *vfp = &thread->vfpstate; | 89 | union vfp_state *vfp = &thread->vfpstate; |
51 | unsigned int cpu; | 90 | unsigned int cpu; |
52 | 91 | ||
53 | memset(vfp, 0, sizeof(union vfp_state)); | ||
54 | |||
55 | vfp->hard.fpexc = FPEXC_EN; | ||
56 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
57 | |||
58 | /* | 92 | /* |
59 | * Disable VFP to ensure we initialize it first. We must ensure | 93 | * Disable VFP to ensure we initialize it first. We must ensure |
60 | * that the modification of last_VFP_context[] and hardware disable | 94 | * that the modification of vfp_current_hw_state[] and hardware |
61 | * are done for the same CPU and without preemption. | 95 | * disable are done for the same CPU and without preemption. |
96 | * | ||
97 | * Do this first to ensure that preemption won't overwrite our | ||
98 | * state saving should access to the VFP be enabled at this point. | ||
62 | */ | 99 | */ |
63 | cpu = get_cpu(); | 100 | cpu = get_cpu(); |
64 | if (last_VFP_context[cpu] == vfp) | 101 | if (vfp_current_hw_state[cpu] == vfp) |
65 | last_VFP_context[cpu] = NULL; | 102 | vfp_current_hw_state[cpu] = NULL; |
66 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); | 103 | fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); |
67 | put_cpu(); | 104 | put_cpu(); |
105 | |||
106 | memset(vfp, 0, sizeof(union vfp_state)); | ||
107 | |||
108 | vfp->hard.fpexc = FPEXC_EN; | ||
109 | vfp->hard.fpscr = FPSCR_ROUND_NEAREST; | ||
110 | #ifdef CONFIG_SMP | ||
111 | vfp->hard.cpu = NR_CPUS; | ||
112 | #endif | ||
68 | } | 113 | } |
69 | 114 | ||
70 | static void vfp_thread_exit(struct thread_info *thread) | 115 | static void vfp_thread_exit(struct thread_info *thread) |
@@ -73,8 +118,8 @@ static void vfp_thread_exit(struct thread_info *thread) | |||
73 | union vfp_state *vfp = &thread->vfpstate; | 118 | union vfp_state *vfp = &thread->vfpstate; |
74 | unsigned int cpu = get_cpu(); | 119 | unsigned int cpu = get_cpu(); |
75 | 120 | ||
76 | if (last_VFP_context[cpu] == vfp) | 121 | if (vfp_current_hw_state[cpu] == vfp) |
77 | last_VFP_context[cpu] = NULL; | 122 | vfp_current_hw_state[cpu] = NULL; |
78 | put_cpu(); | 123 | put_cpu(); |
79 | } | 124 | } |
80 | 125 | ||
@@ -84,6 +129,9 @@ static void vfp_thread_copy(struct thread_info *thread) | |||
84 | 129 | ||
85 | vfp_sync_hwstate(parent); | 130 | vfp_sync_hwstate(parent); |
86 | thread->vfpstate = parent->vfpstate; | 131 | thread->vfpstate = parent->vfpstate; |
132 | #ifdef CONFIG_SMP | ||
133 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
134 | #endif | ||
87 | } | 135 | } |
88 | 136 | ||
89 | /* | 137 | /* |
@@ -129,17 +177,8 @@ static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v) | |||
129 | * case the thread migrates to a different CPU. The | 177 | * case the thread migrates to a different CPU. The |
130 | * restoring is done lazily. | 178 | * restoring is done lazily. |
131 | */ | 179 | */ |
132 | if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { | 180 | if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) |
133 | vfp_save_state(last_VFP_context[cpu], fpexc); | 181 | vfp_save_state(vfp_current_hw_state[cpu], fpexc); |
134 | last_VFP_context[cpu]->hard.cpu = cpu; | ||
135 | } | ||
136 | /* | ||
137 | * Thread migration, just force the reloading of the | ||
138 | * state on the new CPU in case the VFP registers | ||
139 | * contain stale data. | ||
140 | */ | ||
141 | if (thread->vfpstate.hard.cpu != cpu) | ||
142 | last_VFP_context[cpu] = NULL; | ||
143 | #endif | 182 | #endif |
144 | 183 | ||
145 | /* | 184 | /* |
@@ -415,7 +454,7 @@ static int vfp_pm_suspend(void) | |||
415 | } | 454 | } |
416 | 455 | ||
417 | /* clear any information we had about last context state */ | 456 | /* clear any information we had about last context state */ |
418 | memset(last_VFP_context, 0, sizeof(last_VFP_context)); | 457 | memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state)); |
419 | 458 | ||
420 | return 0; | 459 | return 0; |
421 | } | 460 | } |
@@ -443,15 +482,15 @@ static void vfp_pm_init(void) | |||
443 | static inline void vfp_pm_init(void) { } | 482 | static inline void vfp_pm_init(void) { } |
444 | #endif /* CONFIG_PM */ | 483 | #endif /* CONFIG_PM */ |
445 | 484 | ||
485 | /* | ||
486 | * Ensure that the VFP state stored in 'thread->vfpstate' is up to date | ||
487 | * with the hardware state. | ||
488 | */ | ||
446 | void vfp_sync_hwstate(struct thread_info *thread) | 489 | void vfp_sync_hwstate(struct thread_info *thread) |
447 | { | 490 | { |
448 | unsigned int cpu = get_cpu(); | 491 | unsigned int cpu = get_cpu(); |
449 | 492 | ||
450 | /* | 493 | if (vfp_state_in_hw(cpu, thread)) { |
451 | * If the thread we're interested in is the current owner of the | ||
452 | * hardware VFP state, then we need to save its state. | ||
453 | */ | ||
454 | if (last_VFP_context[cpu] == &thread->vfpstate) { | ||
455 | u32 fpexc = fmrx(FPEXC); | 494 | u32 fpexc = fmrx(FPEXC); |
456 | 495 | ||
457 | /* | 496 | /* |
@@ -465,36 +504,13 @@ void vfp_sync_hwstate(struct thread_info *thread) | |||
465 | put_cpu(); | 504 | put_cpu(); |
466 | } | 505 | } |
467 | 506 | ||
507 | /* Ensure that the thread reloads the hardware VFP state on the next use. */ | ||
468 | void vfp_flush_hwstate(struct thread_info *thread) | 508 | void vfp_flush_hwstate(struct thread_info *thread) |
469 | { | 509 | { |
470 | unsigned int cpu = get_cpu(); | 510 | unsigned int cpu = get_cpu(); |
471 | 511 | ||
472 | /* | 512 | vfp_force_reload(cpu, thread); |
473 | * If the thread we're interested in is the current owner of the | ||
474 | * hardware VFP state, then we need to save its state. | ||
475 | */ | ||
476 | if (last_VFP_context[cpu] == &thread->vfpstate) { | ||
477 | u32 fpexc = fmrx(FPEXC); | ||
478 | |||
479 | fmxr(FPEXC, fpexc & ~FPEXC_EN); | ||
480 | |||
481 | /* | ||
482 | * Set the context to NULL to force a reload the next time | ||
483 | * the thread uses the VFP. | ||
484 | */ | ||
485 | last_VFP_context[cpu] = NULL; | ||
486 | } | ||
487 | 513 | ||
488 | #ifdef CONFIG_SMP | ||
489 | /* | ||
490 | * For SMP we still have to take care of the case where the thread | ||
491 | * migrates to another CPU and then back to the original CPU on which | ||
492 | * the last VFP user is still the same thread. Mark the thread VFP | ||
493 | * state as belonging to a non-existent CPU so that the saved one will | ||
494 | * be reloaded in the above case. | ||
495 | */ | ||
496 | thread->vfpstate.hard.cpu = NR_CPUS; | ||
497 | #endif | ||
498 | put_cpu(); | 514 | put_cpu(); |
499 | } | 515 | } |
500 | 516 | ||
@@ -513,8 +529,7 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action, | |||
513 | void *hcpu) | 529 | void *hcpu) |
514 | { | 530 | { |
515 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { | 531 | if (action == CPU_DYING || action == CPU_DYING_FROZEN) { |
516 | unsigned int cpu = (long)hcpu; | 532 | vfp_force_reload((long)hcpu, current_thread_info()); |
517 | last_VFP_context[cpu] = NULL; | ||
518 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) | 533 | } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) |
519 | vfp_enable(NULL); | 534 | vfp_enable(NULL); |
520 | return NOTIFY_OK; | 535 | return NOTIFY_OK; |